url = https://github.com/rust-lang/libc.git
[submodule "src/doc/nomicon"]
path = src/doc/nomicon
- url = https://github.com/rust-lang-nursery/nomicon
+ url = https://github.com/rust-lang-nursery/nomicon.git
[submodule "src/tools/cargo"]
path = cargo
- url = https://github.com/rust-lang/cargo
+ url = https://github.com/rust-lang/cargo.git
[submodule "reference"]
path = src/doc/reference
url = https://github.com/rust-lang-nursery/reference.git
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
SCCACHE_ERROR_LOG=/tmp/sccache.log
RUST_LOG=sccache=debug
+ MACOSX_DEPLOYMENT_TARGET=10.8
+ MACOSX_STD_DEPLOYMENT_TARGET=10.7
os: osx
osx_image: xcode8.2
install: &osx_install_sccache >
- travis_retry curl -o /usr/local/bin/sccache https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-apple-darwin &&
+ travis_retry curl -o /usr/local/bin/sccache https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-apple-darwin &&
chmod +x /usr/local/bin/sccache
- env: >
RUST_CHECK_TARGET=check
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
SCCACHE_ERROR_LOG=/tmp/sccache.log
RUST_LOG=sccache=debug
+ MACOSX_DEPLOYMENT_TARGET=10.8
+ MACOSX_STD_DEPLOYMENT_TARGET=10.7
os: osx
osx_image: xcode8.2
install: *osx_install_sccache
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
SCCACHE_ERROR_LOG=/tmp/sccache.log
RUST_LOG=sccache=debug
+ MACOSX_DEPLOYMENT_TARGET=10.8
+ MACOSX_STD_DEPLOYMENT_TARGET=10.7
os: osx
osx_image: xcode8.2
install: >
- travis_retry curl -o /usr/local/bin/sccache https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-apple-darwin &&
+ travis_retry curl -o /usr/local/bin/sccache https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-apple-darwin &&
chmod +x /usr/local/bin/sccache
- env: >
RUST_CHECK_TARGET=dist
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
SCCACHE_ERROR_LOG=/tmp/sccache.log
RUST_LOG=sccache=debug
+ MACOSX_DEPLOYMENT_TARGET=10.8
+ MACOSX_STD_DEPLOYMENT_TARGET=10.7
os: osx
osx_image: xcode8.2
install: *osx_install_sccache
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
SCCACHE_ERROR_LOG=/tmp/sccache.log
RUST_LOG=sccache=debug
+ MACOSX_DEPLOYMENT_TARGET=10.8
+ MACOSX_STD_DEPLOYMENT_TARGET=10.7
os: osx
osx_image: xcode8.2
install: *osx_install_sccache
$ ./x.py doc
```
-The generated documentation will appear in a top-level `doc` directory,
-created by the `make` rule.
+The generated documentation will appear under `doc` in the `build` directory for
+the ABI used. I.e., if the ABI was `x86_64-pc-windows-msvc`, the directory will be
+`build\x86_64-pc-windows-msvc\doc`.
## Notes
Language
--------
-* Lifetimes in statics and consts default to `'static`. [RFC 1623]
* [The compiler's `dead_code` lint now accounts for type aliases][38051].
* [Uninhabitable enums (those without any variants) no longer permit wildcard
match patterns][38069]
* Compiler works with the following configurations:
* Linux: x86 and x86_64 hosts and targets
- * MacOS: x86 and x86_64 hosts and targets
+ * macOS: x86 and x86_64 hosts and targets
* Windows: x86 hosts and targets
* Cross compilation / multi-target configuration supported.
- set PATH=C:\Python27;%PATH%
# Download and install sccache
- - appveyor-retry appveyor DownloadFile https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-pc-windows-msvc
- - mv 2017-02-25-sccache-x86_64-pc-windows-msvc sccache
+ - appveyor-retry appveyor DownloadFile https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-pc-windows-msvc
+ - mv 2017-03-16-sccache-x86_64-pc-windows-msvc sccache
- set PATH=%PATH%;%CD%
# Install InnoSetup to get `iscc` used to produce installers
-Subproject commit 4a3c0a63b07e9a4feb41cb11de37c92a09db5a60
+Subproject commit c995e9eb5acf3976ae8674a0dc6d9e958053d9fd
opt inject-std-version 1 "inject the current compiler version of libstd into programs"
opt llvm-version-check 1 "check if the LLVM version is supported, build anyway"
opt codegen-tests 1 "run the src/test/codegen tests"
+opt save-analysis 0 "save API analysis data"
opt option-checking 1 "complain about unrecognized options in this configure script"
opt ninja 0 "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)"
opt locked-deps 0 "force Cargo.lock to be up to date"
if env::var("RUSTC_RPATH") == Ok("true".to_string()) {
let rpath = if target.contains("apple") {
- // Note that we need to take one extra step on OSX to also pass
+ // Note that we need to take one extra step on macOS to also pass
// `-Wl,-instal_name,@rpath/...` to get things to work right. To
// do that we pass a weird flag to the compiler to get it to do
// so. Note that this is definitely a hack, and we should likely
cmd.arg("--docck-python").arg(build.python());
if build.config.build.ends_with("apple-darwin") {
- // Force /usr/bin/python on OSX for LLDB tests because we're loading the
+ // Force /usr/bin/python on macOS for LLDB tests because we're loading the
// LLDB plugin's compiled module which only works with the system python
// (namely not Homebrew-installed python)
cmd.arg("--lldb-python").arg("/usr/bin/python");
use Build;
pub fn clean(build: &Build) {
- rm_rf(build, "tmp".as_ref());
- rm_rf(build, &build.out.join("tmp"));
- rm_rf(build, &build.out.join("dist"));
+ rm_rf("tmp".as_ref());
+ rm_rf(&build.out.join("tmp"));
+ rm_rf(&build.out.join("dist"));
for host in build.config.host.iter() {
let entries = match build.out.join(host).read_dir() {
continue
}
let path = t!(entry.path().canonicalize());
- rm_rf(build, &path);
+ rm_rf(&path);
}
}
}
-fn rm_rf(build: &Build, path: &Path) {
+fn rm_rf(path: &Path) {
if !path.exists() {
return
}
let file = t!(file).path();
if file.is_dir() {
- rm_rf(build, &file);
+ rm_rf(&file);
} else {
// On windows we can't remove a readonly file, and git will
// often clone files as readonly. As a result, we have some
use std::fs::{self, File};
use std::path::{Path, PathBuf};
use std::process::Command;
+use std::env;
use build_helper::{output, mtime, up_to_date};
use filetime::FileTime;
build.clear_if_dirty(&out_dir, &build.compiler_path(compiler));
let mut cargo = build.cargo(compiler, Mode::Libstd, target, "build");
let mut features = build.std_features();
+
+ if let Ok(target) = env::var("MACOSX_STD_DEPLOYMENT_TARGET") {
+ cargo.env("MACOSX_DEPLOYMENT_TARGET", target);
+ }
+
// When doing a local rebuild we tell cargo that we're stage1 rather than
// stage0. This works fine if the local rust and being-built rust have the
// same view of what the default allocator is, but fails otherwise. Since
let out_dir = build.cargo_out(compiler, Mode::Libtest, target);
build.clear_if_dirty(&out_dir, &libstd_stamp(build, compiler, target));
let mut cargo = build.cargo(compiler, Mode::Libtest, target, "build");
+ if let Ok(target) = env::var("MACOSX_STD_DEPLOYMENT_TARGET") {
+ cargo.env("MACOSX_DEPLOYMENT_TARGET", target);
+ }
cargo.arg("--manifest-path")
.arg(build.src.join("src/libtest/Cargo.toml"));
build.run(&mut cargo);
cargo.env("CFG_LLVM_ROOT", s);
}
// Building with a static libstdc++ is only supported on linux right now,
- // not for MSVC or OSX
+ // not for MSVC or macOS
if build.config.llvm_static_stdcpp &&
!target.contains("windows") &&
!target.contains("apple") {
pub rustc_default_ar: Option<String>,
pub rust_optimize_tests: bool,
pub rust_debuginfo_tests: bool,
+ pub rust_save_analysis: bool,
pub rust_dist_src: bool,
pub build: String,
optimize_tests: Option<bool>,
debuginfo_tests: Option<bool>,
codegen_tests: Option<bool>,
+ save_analysis: Option<bool>,
}
/// TOML representation of how each build target is configured.
set(&mut config.rust_optimize_tests, rust.optimize_tests);
set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests);
set(&mut config.codegen_tests, rust.codegen_tests);
+ set(&mut config.rust_save_analysis, rust.save_analysis);
set(&mut config.rust_rpath, rust.rpath);
set(&mut config.debug_jemalloc, rust.debug_jemalloc);
set(&mut config.use_jemalloc, rust.use_jemalloc);
("LOCAL_REBUILD", self.local_rebuild),
("NINJA", self.ninja),
("CODEGEN_TESTS", self.codegen_tests),
+ ("SAVE_ANALYSIS", self.rust_save_analysis),
("LOCKED_DEPS", self.locked_deps),
("VENDOR", self.vendor),
("FULL_BOOTSTRAP", self.full_bootstrap),
# saying that the FileCheck executable is missing, you may want to disable this.
#codegen-tests = true
+# Flag indicating whether the API analysis data should be saved.
+#save-analysis = false
+
# =============================================================================
# Options for specific targets
#
/// Creates a tarball of save-analysis metadata, if available.
pub fn analysis(build: &Build, compiler: &Compiler, target: &str) {
+ if !build.config.rust_save_analysis {
+ return
+ }
+
println!("Dist analysis");
- if build.config.channel != "nightly" {
- println!("\tskipping - not on nightly channel");
- return;
- }
if compiler.host != build.config.build {
- println!("\tskipping - not a build host");
- return
- }
- if compiler.stage != 2 {
- println!("\tskipping - not stage2");
+ println!("\tskipping, not a build host");
return
}
install_sh(&build, "docs", "rust-docs", stage, host, &prefix,
&docdir, &libdir, &mandir, &empty_dir);
}
+ if build.config.rust_save_analysis {
+ install_sh(&build, "analysis", "rust-analysis", stage, host, &prefix,
+ &docdir, &libdir, &mandir, &empty_dir);
+ }
install_sh(&build, "std", "rust-std", stage, host, &prefix,
&docdir, &libdir, &mandir, &empty_dir);
install_sh(&build, "rustc", "rustc", stage, host, &prefix,
.env(format!("CFLAGS_{}", target), self.cflags(target).join(" "));
}
- if self.config.channel == "nightly" && compiler.is_final_stage(self) {
+ if self.config.rust_save_analysis && compiler.is_final_stage(self) {
cargo.env("RUSTC_SAVE_ANALYSIS", "api".to_string());
}
.filter(|s| !s.starts_with("-O") && !s.starts_with("/O"))
.collect::<Vec<_>>();
- // If we're compiling on OSX then we add a few unconditional flags
+ // If we're compiling on macOS then we add a few unconditional flags
// indicating that we want libc++ (more filled out than libstdc++) and
// we want to compile for 10.7. This way we can ensure that
// LLVM/jemalloc/etc are all properly compiled.
let tarball = out.join(&name);
if !tarball.exists() {
let tmp = tarball.with_extension("tmp");
- build.run(Command::new("curl")
- .arg("-o").arg(&tmp)
- .arg(format!("https://www.openssl.org/source/{}", name)));
+ // originally from https://www.openssl.org/source/...
+ let url = format!("https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/{}",
+ name);
+ let mut ok = false;
+ for _ in 0..3 {
+ let status = Command::new("curl")
+ .arg("-o").arg(&tmp)
+ .arg(&url)
+ .status()
+ .expect("failed to spawn curl");
+ if status.success() {
+ ok = true;
+ break
+ }
+ }
+ if !ok {
+ panic!("failed to download openssl source")
+ }
let mut shasum = if target.contains("apple") {
let mut cmd = Command::new("shasum");
cmd.arg("-a").arg("256");
}
for target in build.config.target.iter() {
- // Can't compile for iOS unless we're on OSX
+ // Can't compile for iOS unless we're on macOS
if target.contains("apple-ios") &&
!build.config.build.contains("apple-darwin") {
- panic!("the iOS target is only supported on OSX");
+ panic!("the iOS target is only supported on macOS");
}
// Make sure musl-root is valid if specified
RUN curl -O http://ftp.nl.debian.org/debian/dists/jessie/main/installer-armhf/current/images/device-tree/vexpress-v2p-ca15-tc1.dtb
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
ENV TARGETS=arm-linux-androideabi
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
ENV \
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
ENV \
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
ENV RUST_CONFIGURE_ARGS \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
WORKDIR /tmp
COPY patches/ /tmp/patches/
-COPY powerpc64-linux-gnu.config build-powerpc64-toolchain.sh /tmp/
+COPY shared.sh powerpc64-linux-gnu.config build-powerpc64-toolchain.sh /tmp/
RUN ./build-powerpc64-toolchain.sh
USER root
set -ex
-hide_output() {
- set +x
- on_err="
-echo ERROR: An error was encountered with the build.
-cat /tmp/build.log
-exit 1
-"
- trap "$on_err" ERR
- bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
- PING_LOOP_PID=$!
- $@ &> /tmp/build.log
- rm /tmp/build.log
- trap - ERR
- kill $PING_LOOP_PID
- set -x
-}
+source shared.sh
mkdir build
cd build
set -ex
+source shared.sh
+
BINUTILS=2.25.1
GCC=5.3.0
TARGET=powerpc64le-linux-gnu
curl https://ftp.gnu.org/gnu/binutils/binutils-$BINUTILS.tar.bz2 | tar xjf -
mkdir binutils-build
cd binutils-build
-../binutils-$BINUTILS/configure --target=$TARGET --with-sysroot=$SYSROOT
-make -j10
-make install
+hide_output ../binutils-$BINUTILS/configure --target=$TARGET --with-sysroot=$SYSROOT
+hide_output make -j10
+hide_output make install
popd
rm -rf binutils-$TARGET
pushd gcc-$TARGET
curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf -
cd gcc-$GCC
-./contrib/download_prerequisites
+hide_output ./contrib/download_prerequisites
mkdir ../gcc-build
cd ../gcc-build
-../gcc-$GCC/configure \
+hide_output ../gcc-$GCC/configure \
--enable-languages=c,c++ \
--target=$TARGET \
--with-cpu=power8 \
--disable-libsanitizer \
--disable-libquadmath-support \
--disable-lto
-make -j10
-make install
+hide_output hide_output make -j10
+hide_output make install
popd
rm -rf gcc-$TARGET
--- /dev/null
+# Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+# file at the top-level directory of this distribution and at
+# http://rust-lang.org/COPYRIGHT.
+#
+# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+# option. This file may not be copied, modified, or distributed
+# except according to those terms.
+
+hide_output() {
+ set +x
+ on_err="
+echo ERROR: An error was encountered with the build.
+cat /tmp/build.log
+exit 1
+"
+ trap "$on_err" ERR
+ bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
+ PING_LOOP_PID=$!
+ $@ &> /tmp/build.log
+ trap - ERR
+ kill $PING_LOOP_PID
+ set -x
+}
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
ENTRYPOINT ["/rustroot/dumb-init", "--"]
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
ENV HOSTS=i686-unknown-linux-gnu
--enable-extended \
--enable-sanitizers
ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS
+
+# This is the only builder which will create source tarballs
ENV DIST_SRC 1
+
+# When we build cargo in this container, we don't want it to use the system
+# libcurl, instead it should compile its own.
+ENV LIBCURL_NO_PKG_CONFIG 1
ENTRYPOINT ["/usr/bin/dumb-init", "--"]
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
ENV RUST_CONFIGURE_ARGS \
lib32stdc++6
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
pkg-config
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
xz-utils
RUN curl -o /usr/local/bin/sccache \
- https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-02-25-sccache-x86_64-unknown-linux-musl && \
+ https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror/2017-03-16-sccache-x86_64-unknown-linux-musl && \
chmod +x /usr/local/bin/sccache
RUN curl -OL https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
if [ "$DEPLOY$DEPLOY_ALT" != "" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --release-channel=nightly"
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-llvm-static-stdcpp"
+ RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --enable-save-analysis"
if [ "$NO_LLVM_ASSERTIONS" = "1" ]; then
RUST_CONFIGURE_ARGS="$RUST_CONFIGURE_ARGS --disable-llvm-assertions"
# fn foo() {}
```
-Let's discuss the details of these code blocks.
-
#### Code block annotations
To write some Rust code in a comment, use the triple graves:
# fn foo() {}
```
-If you want something that's not Rust code, you can add an annotation:
-
-```rust
-/// ```c
-/// printf("Hello, world\n");
-/// ```
-# fn foo() {}
-```
-
-This will highlight according to whatever language you're showing off.
-If you're only showing plain text, choose `text`.
-
-It's important to choose the correct annotation here, because `rustdoc` uses it
-in an interesting way: It can be used to actually test your examples in a
-library crate, so that they don't get out of date. If you have some C code but
-`rustdoc` thinks it's Rust because you left off the annotation, `rustdoc` will
-complain when trying to generate the documentation.
+This will add code highlighting. If you are only showing plain text, put `text`
+instead of `rust` after the triple graves (see below).
## Documentation as tests
It’s important to be mindful of `panic!`s when working with FFI. A `panic!`
across an FFI boundary is undefined behavior. If you’re writing code that may
-panic, you should run it in a closure with [`catch_unwind()`]:
+panic, you should run it in a closure with [`catch_unwind`]:
```rust
use std::panic::catch_unwind;
fn main() {}
```
-Please note that [`catch_unwind()`] will only catch unwinding panics, not
-those who abort the process. See the documentation of [`catch_unwind()`]
+Please note that [`catch_unwind`] will only catch unwinding panics, not
+those who abort the process. See the documentation of [`catch_unwind`]
for more information.
-[`catch_unwind()`]: ../std/panic/fn.catch_unwind.html
+[`catch_unwind`]: ../std/panic/fn.catch_unwind.html
# Representing opaque structs
.read_line(&mut guess)
```
-Here, we call the [`read_line()`][read_line] method on our handle.
+Here, we call the [`read_line`][read_line] method on our handle.
[Methods][method] are like associated functions, but are only available on a
particular instance of a type, rather than the type itself. We’re also passing
one argument to `read_line()`: `&mut guess`.
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured
```
-We also get a non-zero status code. We can use `$?` on OS X and Linux:
+We also get a non-zero status code. We can use `$?` on macOS and Linux:
```bash
$ echo $?
Learning] project collects documentation from the community, and [Docs.rs]
builds documentation for individual Rust packages.
-## API Documentation
+# API Documentation
Rust provides a standard library with a number of features; [we host its
documentation here][api].
-## Extended Error Documentation
+# Extended Error Documentation
Many of Rust's errors come with error codes, and you can request extended
diagnostics from the compiler on those errors. We also [have the text of those
extended errors on the web][err], if you prefer to read them that way.
-## The Rust Bookshelf
+# The Rust Bookshelf
Rust provides a number of book-length sets of documentation, collectively
nicknamed 'The Rust Bookshelf.'
- [repr_simd](repr-simd.md)
- [rustc_attrs](rustc-attrs.md)
- [rustc_diagnostic_macros](rustc-diagnostic-macros.md)
+- [rvalue_static_promotion](rvalue-static-promotion.md)
- [sanitizer_runtime](sanitizer-runtime.md)
- [simd](simd.md)
- [simd_ffi](simd-ffi.md)
- [slice_patterns](slice-patterns.md)
+- [sort_unstable](sort-unstable.md)
- [specialization](specialization.md)
- [staged_api](staged-api.md)
- [start](start.md)
------------------------
+The `concat_idents` feature adds a macro for concatenating multiple identifiers
+into one identifier.
+## Examples
+```rust
+#![feature(concat_idents)]
+
+fn main() {
+ fn foobar() -> u32 { 23 }
+ let f = concat_idents!(foo, bar);
+ assert_eq!(f(), 23);
+}
+```
\ No newline at end of file
------------------------
+The `conservative_impl_trait` feature allows a conservative form of abstract
+return types.
+Abstract return types allow a function to hide a concrete return type behind a
+trait interface similar to trait objects, while still generating the same
+statically dispatched code as with concrete types.
+## Examples
+
+```rust
+#![feature(conservative_impl_trait)]
+
+fn even_iter() -> impl Iterator<Item=u32> {
+ (0..).map(|n| n * 2)
+}
+
+fn main() {
+ let first_four_even_numbers = even_iter().take(4).collect::<Vec<_>>();
+ assert_eq!(first_four_even_numbers, vec![0, 2, 4, 6]);
+}
+```
+
+## Background
+
+In today's Rust, you can write function signatures like:
+
+````rust,ignore
+fn consume_iter_static<I: Iterator<u8>>(iter: I) { }
+
+fn consume_iter_dynamic(iter: Box<Iterator<u8>>) { }
+````
+
+In both cases, the function does not depend on the exact type of the argument.
+The type held is "abstract", and is assumed only to satisfy a trait bound.
+
+* In the `_static` version using generics, each use of the function is
+ specialized to a concrete, statically-known type, giving static dispatch,
+ inline layout, and other performance wins.
+* In the `_dynamic` version using trait objects, the concrete argument type is
+ only known at runtime using a vtable.
+
+On the other hand, while you can write:
+
+````rust,ignore
+fn produce_iter_dynamic() -> Box<Iterator<u8>> { }
+````
+
+...but you _cannot_ write something like:
+
+````rust,ignore
+fn produce_iter_static() -> Iterator<u8> { }
+````
+
+That is, in today's Rust, abstract return types can only be written using trait
+objects, which can be a significant performance penalty. This RFC proposes
+"unboxed abstract types" as a way of achieving signatures like
+`produce_iter_static`. Like generics, unboxed abstract types guarantee static
+dispatch and inline data layout.
------------------------
+The `const_fn` feature allows marking free functions and inherent methods as
+`const`, enabling them to be called in constants contexts, with constant
+arguments.
+## Examples
+```rust
+#![feature(const_fn)]
+
+const fn double(x: i32) -> i32 {
+ x * 2
+}
+
+const FIVE: i32 = 5;
+const TEN: i32 = double(FIVE);
+
+fn main() {
+ assert_eq!(5, FIVE);
+ assert_eq!(10, TEN);
+}
+```
------------------------
+The `const_indexing` feature allows the constant evaluation of index operations
+on constant arrays and repeat expressions.
+## Examples
+```rust
+#![feature(const_indexing)]
+
+const ARR: [usize; 5] = [1, 2, 3, 4, 5];
+const ARR2: [usize; ARR[1]] = [42, 99];
+```
\ No newline at end of file
------------------------
+The `i128_type` feature adds support for 128 bit signed and unsigned integer
+types.
+```rust
+#![feature(i128_type)]
+
+fn main() {
+ assert_eq!(1u128 + 1u128, 2u128);
+ assert_eq!(u128::min_value(), 0);
+ assert_eq!(u128::max_value(), 340282366920938463463374607431768211455);
+
+ assert_eq!(1i128 - 2i128, -1i128);
+ assert_eq!(i128::min_value(), -170141183460469231731687303715884105728);
+ assert_eq!(i128::max_value(), 170141183460469231731687303715884105727);
+}
+```
------------------------
+The `non_ascii_idents` feature adds support for non-ASCII identifiers.
+## Examples
+```rust
+#![feature(non_ascii_idents)]
+
+const ε: f64 = 0.00001f64;
+const Π: f64 = 3.14f64;
+```
\ No newline at end of file
--- /dev/null
+# `rvalue_static_promotion`
+
+The tracking issue for this feature is: [#38865]
+
+[#38865]: https://github.com/rust-lang/rust/issues/38865
+
+------------------------
+
+The `rvalue_static_promotion` feature allows directly creating `'static` references to
+constant `rvalue`s, which in particular allowing for more concise code in the common case
+in which a `'static` reference is all that's needed.
+
+
+## Examples
+
+```rust
+#![feature(rvalue_static_promotion)]
+
+fn main() {
+ let DEFAULT_VALUE: &'static u32 = &42;
+ assert_eq!(*DEFAULT_VALUE, 42);
+}
+```
--- /dev/null
+# `sort_unstable`
+
+The tracking issue for this feature is: [#40585]
+
+[#40585]: https://github.com/rust-lang/rust/issues/40585
+
+------------------------
+
+
/// # Examples
///
/// ```
- /// #![feature(rc_raw)]
- ///
/// use std::sync::Arc;
///
/// let x = Arc::new(10);
/// let x_ptr = Arc::into_raw(x);
/// assert_eq!(unsafe { *x_ptr }, 10);
/// ```
- #[unstable(feature = "rc_raw", issue = "37197")]
- pub fn into_raw(this: Self) -> *mut T {
- let ptr = unsafe { &mut (**this.ptr).data as *mut _ };
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub fn into_raw(this: Self) -> *const T {
+ let ptr = unsafe { &(**this.ptr).data as *const _ };
mem::forget(this);
ptr
}
/// # Examples
///
/// ```
- /// #![feature(rc_raw)]
- ///
/// use std::sync::Arc;
///
/// let x = Arc::new(10);
///
/// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
/// ```
- #[unstable(feature = "rc_raw", issue = "37197")]
- pub unsafe fn from_raw(ptr: *mut T) -> Self {
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
// To find the corresponding pointer to the `ArcInner` we need to subtract the offset of the
// `data` field from the pointer.
- Arc { ptr: Shared::new((ptr as *mut u8).offset(-offset_of!(ArcInner<T>, data)) as *mut _) }
+ let ptr = (ptr as *const u8).offset(-offset_of!(ArcInner<T>, data));
+ Arc {
+ ptr: Shared::new(ptr as *const _),
+ }
}
}
// Non-inlined part of `drop`.
#[inline(never)]
unsafe fn drop_slow(&mut self) {
- let ptr = *self.ptr;
+ let ptr = self.ptr.as_mut_ptr();
// Destroy the data at this time, even though we may not free the box
// allocation itself (there may still be weak pointers lying around).
}
#[inline]
- #[unstable(feature = "ptr_eq",
- reason = "newly added",
- issue = "36497")]
+ #[stable(feature = "ptr_eq", since = "1.17.0")]
/// Returns true if the two `Arc`s point to the same value (not
/// just values that compare as equal).
///
/// # Examples
///
/// ```
- /// #![feature(ptr_eq)]
- ///
/// use std::sync::Arc;
///
/// let five = Arc::new(5);
// As with `get_mut()`, the unsafety is ok because our reference was
// either unique to begin with, or became one upon cloning the contents.
unsafe {
- let inner = &mut **this.ptr;
+ let inner = &mut *this.ptr.as_mut_ptr();
&mut inner.data
}
}
// the Arc itself to be `mut`, so we're returning the only possible
// reference to the inner data.
unsafe {
- let inner = &mut **this.ptr;
+ let inner = &mut *this.ptr.as_mut_ptr();
Some(&mut inner.data)
}
} else {
//! Single-threaded reference-counting pointers.
//!
//! The type [`Rc<T>`][`Rc`] provides shared ownership of a value of type `T`,
-//! allocated in the heap. Invoking [`clone()`][clone] on [`Rc`] produces a new
+//! allocated in the heap. Invoking [`clone`][clone] on [`Rc`] produces a new
//! pointer to the same value in the heap. When the last [`Rc`] pointer to a
//! given value is destroyed, the pointed-to value is also destroyed.
//!
//! threads. If you need multi-threaded, atomic reference counting, use
//! [`sync::Arc`][arc].
//!
-//! The [`downgrade()`][downgrade] method can be used to create a non-owning
+//! The [`downgrade`][downgrade] method can be used to create a non-owning
//! [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
//! to an [`Rc`], but this will return [`None`] if the value has
//! already been dropped.
/// # Examples
///
/// ```
- /// #![feature(rc_raw)]
- ///
/// use std::rc::Rc;
///
/// let x = Rc::new(10);
/// let x_ptr = Rc::into_raw(x);
/// assert_eq!(unsafe { *x_ptr }, 10);
/// ```
- #[unstable(feature = "rc_raw", issue = "37197")]
- pub fn into_raw(this: Self) -> *mut T {
- let ptr = unsafe { &mut (**this.ptr).value as *mut _ };
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub fn into_raw(this: Self) -> *const T {
+ let ptr = unsafe { &mut (*this.ptr.as_mut_ptr()).value as *const _ };
mem::forget(this);
ptr
}
/// # Examples
///
/// ```
- /// #![feature(rc_raw)]
- ///
/// use std::rc::Rc;
///
/// let x = Rc::new(10);
///
/// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
/// ```
- #[unstable(feature = "rc_raw", issue = "37197")]
- pub unsafe fn from_raw(ptr: *mut T) -> Self {
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
// To find the corresponding pointer to the `RcBox` we need to subtract the offset of the
// `value` field from the pointer.
- Rc { ptr: Shared::new((ptr as *mut u8).offset(-offset_of!(RcBox<T>, value)) as *mut _) }
+ Rc { ptr: Shared::new((ptr as *const u8).offset(-offset_of!(RcBox<T>, value)) as *const _) }
}
}
#[stable(feature = "rc_unique", since = "1.4.0")]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if Rc::is_unique(this) {
- let inner = unsafe { &mut **this.ptr };
+ let inner = unsafe { &mut *this.ptr.as_mut_ptr() };
Some(&mut inner.value)
} else {
None
}
#[inline]
- #[unstable(feature = "ptr_eq",
- reason = "newly added",
- issue = "36497")]
+ #[stable(feature = "ptr_eq", since = "1.17.0")]
/// Returns true if the two `Rc`s point to the same value (not
/// just values that compare as equal).
///
/// # Examples
///
/// ```
- /// #![feature(ptr_eq)]
- ///
/// use std::rc::Rc;
///
/// let five = Rc::new(5);
// reference count is guaranteed to be 1 at this point, and we required
// the `Rc<T>` itself to be `mut`, so we're returning the only possible
// reference to the inner value.
- let inner = unsafe { &mut **this.ptr };
+ let inner = unsafe { &mut *this.ptr.as_mut_ptr() };
&mut inner.value
}
}
/// ```
fn drop(&mut self) {
unsafe {
- let ptr = *self.ptr;
+ let ptr = self.ptr.as_mut_ptr();
self.dec_strong();
if self.strong() == 0 {
mod imp {
use libc::{c_int, c_void, size_t};
- // Note that the symbols here are prefixed by default on OSX and Windows (we
+ // Note that the symbols here are prefixed by default on macOS and Windows (we
// don't explicitly request it), and on Android and DragonFly we explicitly
// request it as unprefixing cause segfaults (mismatches in allocators).
extern "C" {
data: Vec<T>,
}
-/// A container object that represents the result of the [`peek_mut()`] method
+/// A container object that represents the result of the [`peek_mut`] method
/// on `BinaryHeap`. See its documentation for details.
///
-/// [`peek_mut()`]: struct.BinaryHeap.html#method.peek_mut
+/// [`peek_mut`]: struct.BinaryHeap.html#method.peek_mut
#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
pub struct PeekMut<'a, T: 'a + Ord> {
heap: &'a mut BinaryHeap<T>,
}
/// An iterator over a sub-range of BTreeMap's entries.
+#[stable(feature = "btree_range", since = "1.17.0")]
pub struct Range<'a, K: 'a, V: 'a> {
front: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
back: Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge>,
}
/// A mutable iterator over a sub-range of BTreeMap's entries.
+#[stable(feature = "btree_range", since = "1.17.0")]
pub struct RangeMut<'a, K: 'a, V: 'a> {
front: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
back: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
/// Basic usage:
///
/// ```
- /// #![feature(btree_range, collections_bound)]
- ///
/// use std::collections::BTreeMap;
/// use std::collections::Bound::Included;
///
/// }
/// assert_eq!(Some((&5, &"b")), map.range(4..).next());
/// ```
- #[unstable(feature = "btree_range",
- reason = "matches collection reform specification, waiting for dust to settle",
- issue = "27787")]
+ #[stable(feature = "btree_range", since = "1.17.0")]
pub fn range<T: ?Sized, R>(&self, range: R) -> Range<K, V>
where T: Ord, K: Borrow<T>, R: RangeArgument<T>
{
/// Basic usage:
///
/// ```
- /// #![feature(btree_range)]
- ///
/// use std::collections::BTreeMap;
///
/// let mut map: BTreeMap<&str, i32> = ["Alice", "Bob", "Carol", "Cheryl"].iter()
/// println!("{} => {}", name, balance);
/// }
/// ```
- #[unstable(feature = "btree_range",
- reason = "matches collection reform specification, waiting for dust to settle",
- issue = "27787")]
+ #[stable(feature = "btree_range", since = "1.17.0")]
pub fn range_mut<T: ?Sized, R>(&mut self, range: R) -> RangeMut<K, V>
where T: Ord, K: Borrow<T>, R: RangeArgument<T>
{
/// [`BTreeSet`]: struct.BTreeSet.html
/// [`range`]: struct.BTreeSet.html#method.range
#[derive(Debug)]
+#[stable(feature = "btree_range", since = "1.17.0")]
pub struct Range<'a, T: 'a> {
iter: ::btree_map::Range<'a, T, ()>,
}
/// # Examples
///
/// ```
- /// #![feature(btree_range, collections_bound)]
- ///
/// use std::collections::BTreeSet;
/// use std::collections::Bound::Included;
///
/// }
/// assert_eq!(Some(&5), set.range(4..).next());
/// ```
- #[unstable(feature = "btree_range",
- reason = "matches collection reform specification, waiting for dust to settle",
- issue = "27787")]
+ #[stable(feature = "btree_range", since = "1.17.0")]
pub fn range<K: ?Sized, R>(&self, range: R) -> Range<T>
where K: Ord, T: Borrow<K>, R: RangeArgument<K>
{
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
-//! format_string := <text> [ format <text> ] *
+//! format_string := <text> [ maybe-format <text> ] *
+//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
+//! Note that padding zeroes are always placed after the sign (if any)
+//! and before the digits. When used together with the `#` flag, a similar
+//! rule applies: padding zeroes are inserted after the prefix but before
+//! the digits.
//!
//! ## Width
//!
}
/// An endpoint of a range of keys.
-#[unstable(feature = "collections_bound", issue = "27787")]
+#[stable(feature = "collections_bound", since = "1.17.0")]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum Bound<T> {
/// An inclusive bound.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
Included(T),
/// An exclusive bound.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
Excluded(T),
/// An infinite endpoint. Indicates that there is no bound in this direction.
+ #[stable(feature = "collections_bound", since = "1.17.0")]
Unbounded,
}
match self.head {
None => self.tail = node,
- Some(head) => (**head).prev = node,
+ Some(head) => (*head.as_mut_ptr()).prev = node,
}
self.head = node;
#[inline]
fn pop_front_node(&mut self) -> Option<Box<Node<T>>> {
self.head.map(|node| unsafe {
- let node = Box::from_raw(*node);
+ let node = Box::from_raw(node.as_mut_ptr());
self.head = node.next;
match self.head {
None => self.tail = None,
- Some(head) => (**head).prev = None,
+ Some(head) => (*head.as_mut_ptr()).prev = None,
}
self.len -= 1;
match self.tail {
None => self.head = node,
- Some(tail) => (**tail).next = node,
+ Some(tail) => (*tail.as_mut_ptr()).next = node,
}
self.tail = node;
#[inline]
fn pop_back_node(&mut self) -> Option<Box<Node<T>>> {
self.tail.map(|node| unsafe {
- let node = Box::from_raw(*node);
+ let node = Box::from_raw(node.as_mut_ptr());
self.tail = node.prev;
match self.tail {
None => self.head = None,
- Some(tail) => (**tail).next = None,
+ Some(tail) => (*tail.as_mut_ptr()).next = None,
}
self.len -= 1;
Some(tail) => {
if let Some(other_head) = other.head.take() {
unsafe {
- (**tail).next = Some(other_head);
- (**other_head).prev = Some(tail);
+ (*tail.as_mut_ptr()).next = Some(other_head);
+ (*other_head.as_mut_ptr()).prev = Some(tail);
}
self.tail = other.tail.take();
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front_mut(&mut self) -> Option<&mut T> {
- self.head.map(|node| unsafe { &mut (**node).element })
+ self.head.map(|node| unsafe { &mut (*node.as_mut_ptr()).element })
}
/// Provides a reference to the back element, or `None` if the list is
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back_mut(&mut self) -> Option<&mut T> {
- self.tail.map(|node| unsafe { &mut (**node).element })
+ self.tail.map(|node| unsafe { &mut (*node.as_mut_ptr()).element })
}
/// Adds an element first in the list.
let second_part_head;
unsafe {
- second_part_head = (**split_node.unwrap()).next.take();
+ second_part_head = (*split_node.unwrap().as_mut_ptr()).next.take();
if let Some(head) = second_part_head {
- (**head).prev = None;
+ (*head.as_mut_ptr()).prev = None;
}
}
None
} else {
self.head.map(|node| unsafe {
- let node = &mut **node;
+ let node = &mut *node.as_mut_ptr();
self.len -= 1;
self.head = node.next;
&mut node.element
None
} else {
self.tail.map(|node| unsafe {
- let node = &mut **node;
+ let node = &mut *node.as_mut_ptr();
self.len -= 1;
self.tail = node.prev;
&mut node.element
element: element,
})));
- (**prev).next = node;
- (**head).prev = node;
+ (*prev.as_mut_ptr()).next = node;
+ (*head.as_mut_ptr()).prev = node;
self.list.len += 1;
},
if self.len == 0 {
None
} else {
- self.head.map(|node| unsafe { &mut (**node).element })
+ self.head.map(|node| unsafe { &mut (*node.as_mut_ptr()).element })
}
}
}
/// ```
/// #![feature(collections)]
/// #![feature(collections_range)]
- /// #![feature(collections_bound)]
///
/// extern crate collections;
///
/// ```
/// #![feature(collections)]
/// #![feature(collections_range)]
- /// #![feature(collections_bound)]
///
/// extern crate collections;
///
//! the element type of the slice is `i32`, the element type of the iterator is
//! `&mut i32`.
//!
-//! * [`.iter()`] and [`.iter_mut()`] are the explicit methods to return the default
+//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
//! iterators.
-//! * Further methods that return iterators are [`.split()`], [`.splitn()`],
-//! [`.chunks()`], [`.windows()`] and more.
+//! * Further methods that return iterators are [`.split`], [`.splitn`],
+//! [`.chunks`], [`.windows`] and more.
//!
//! *[See also the slice primitive type](../../std/primitive.slice.html).*
//!
//! [`Ord`]: ../../std/cmp/trait.Ord.html
//! [`Iter`]: struct.Iter.html
//! [`Hash`]: ../../std/hash/trait.Hash.html
-//! [`.iter()`]: ../../std/primitive.slice.html#method.iter
-//! [`.iter_mut()`]: ../../std/primitive.slice.html#method.iter_mut
-//! [`.split()`]: ../../std/primitive.slice.html#method.split
-//! [`.splitn()`]: ../../std/primitive.slice.html#method.splitn
-//! [`.chunks()`]: ../../std/primitive.slice.html#method.chunks
-//! [`.windows()`]: ../../std/primitive.slice.html#method.windows
+//! [`.iter`]: ../../std/primitive.slice.html#method.iter
+//! [`.iter_mut`]: ../../std/primitive.slice.html#method.iter_mut
+//! [`.split`]: ../../std/primitive.slice.html#method.split
+//! [`.splitn`]: ../../std/primitive.slice.html#method.splitn
+//! [`.chunks`]: ../../std/primitive.slice.html#method.chunks
+//! [`.windows`]: ../../std/primitive.slice.html#method.windows
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
core_slice::SliceExt::first_mut(self)
}
- /// Returns the first and all the rest of the elements of a slice.
+ /// Returns the first and all the rest of the elements of a slice, or `None` if it is empty.
///
/// # Examples
///
core_slice::SliceExt::split_first(self)
}
- /// Returns the first and all the rest of the elements of a slice.
+ /// Returns the first and all the rest of the elements of a slice, or `None` if it is empty.
///
/// # Examples
///
core_slice::SliceExt::split_first_mut(self)
}
- /// Returns the last and all the rest of the elements of a slice.
+ /// Returns the last and all the rest of the elements of a slice, or `None` if it is empty.
///
/// # Examples
///
}
- /// Returns the last and all the rest of the elements of a slice.
+ /// Returns the last and all the rest of the elements of a slice, or `None` if it is empty.
///
/// # Examples
///
}
/// Returns a mutable reference to an element or subslice depending on the
- /// type of index (see [`get()`]) or `None` if the index is out of bounds.
+ /// type of index (see [`get`]) or `None` if the index is out of bounds.
///
- /// [`get()`]: #method.get
+ /// [`get`]: #method.get
///
/// # Examples
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
- /// Modifying the slice may cause its buffer to be reallocated, which
- /// would also make any pointers to it invalid.
+ /// Modifying the container referenced by this slice may cause its buffer
+ /// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
- /// Modifying the slice may cause its buffer to be reallocated, which
- /// would also make any pointers to it invalid.
+ /// Modifying the container referenced by this slice may cause its buffer
+ /// to be reallocated, which would also make any pointers to it invalid.
///
/// # Examples
///
/// excluding `end`.
///
/// To get a mutable string slice instead, see the
- /// [`slice_mut_unchecked()`] method.
+ /// [`slice_mut_unchecked`] method.
///
- /// [`slice_mut_unchecked()`]: #method.slice_mut_unchecked
+ /// [`slice_mut_unchecked`]: #method.slice_mut_unchecked
///
/// # Safety
///
/// excluding `end`.
///
/// To get an immutable string slice instead, see the
- /// [`slice_unchecked()`] method.
+ /// [`slice_unchecked`] method.
///
- /// [`slice_unchecked()`]: #method.slice_unchecked
+ /// [`slice_unchecked`]: #method.slice_unchecked
///
/// # Safety
///
/// The two slices returned go from the start of the string slice to `mid`,
/// and from `mid` to the end of the string slice.
///
- /// To get mutable string slices instead, see the [`split_at_mut()`]
+ /// To get mutable string slices instead, see the [`split_at_mut`]
/// method.
///
- /// [`split_at_mut()`]: #method.split_at_mut
+ /// [`split_at_mut`]: #method.split_at_mut
///
/// # Panics
///
/// The two slices returned go from the start of the string slice to `mid`,
/// and from `mid` to the end of the string slice.
///
- /// To get immutable string slices instead, see the [`split_at()`] method.
+ /// To get immutable string slices instead, see the [`split_at`] method.
///
- /// [`split_at()`]: #method.split_at
+ /// [`split_at`]: #method.split_at
///
/// # Panics
///
/// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html
///
/// If the pattern allows a reverse search but its results might differ
- /// from a forward search, the [`rsplit()`] method can be used.
+ /// from a forward search, the [`rsplit`] method can be used.
///
/// [`char`]: primitive.char.html
- /// [`rsplit()`]: #method.rsplit
+ /// [`rsplit`]: #method.rsplit
///
/// # Examples
///
/// assert_eq!(d, &["a", "b", "c"]);
/// ```
///
- /// Use [`split_whitespace()`] for this behavior.
+ /// Use [`split_whitespace`] for this behavior.
///
- /// [`split_whitespace()`]: #method.split_whitespace
+ /// [`split_whitespace`]: #method.split_whitespace
#[stable(feature = "rust1", since = "1.0.0")]
pub fn split<'a, P: Pattern<'a>>(&'a self, pat: P) -> Split<'a, P> {
core_str::StrExt::split(self, pat)
///
/// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html
///
- /// For iterating from the front, the [`split()`] method can be used.
+ /// For iterating from the front, the [`split`] method can be used.
///
- /// [`split()`]: #method.split
+ /// [`split`]: #method.split
///
/// # Examples
///
/// The pattern can be a `&str`, [`char`], or a closure that determines the
/// split.
///
- /// Equivalent to [`split()`], except that the trailing substring
+ /// Equivalent to [`split`], except that the trailing substring
/// is skipped if empty.
///
- /// [`split()`]: #method.split
+ /// [`split`]: #method.split
///
/// This method can be used for string data that is _terminated_,
/// rather than _separated_ by a pattern.
/// [`char`]: primitive.char.html
///
/// If the pattern allows a reverse search but its results might differ
- /// from a forward search, the [`rsplit_terminator()`] method can be used.
+ /// from a forward search, the [`rsplit_terminator`] method can be used.
///
- /// [`rsplit_terminator()`]: #method.rsplit_terminator
+ /// [`rsplit_terminator`]: #method.rsplit_terminator
///
/// # Examples
///
///
/// [`char`]: primitive.char.html
///
- /// Equivalent to [`split()`], except that the trailing substring is
+ /// Equivalent to [`split`], except that the trailing substring is
/// skipped if empty.
///
- /// [`split()`]: #method.split
+ /// [`split`]: #method.split
///
/// This method can be used for string data that is _terminated_,
/// rather than _separated_ by a pattern.
/// reverse search, and it will be double ended if a forward/reverse
/// search yields the same elements.
///
- /// For iterating from the front, the [`split_terminator()`] method can be
+ /// For iterating from the front, the [`split_terminator`] method can be
/// used.
///
- /// [`split_terminator()`]: #method.split_terminator
+ /// [`split_terminator`]: #method.split_terminator
///
/// # Examples
///
/// The returned iterator will not be double ended, because it is
/// not efficient to support.
///
- /// If the pattern allows a reverse search, the [`rsplitn()`] method can be
+ /// If the pattern allows a reverse search, the [`rsplitn`] method can be
/// used.
///
- /// [`rsplitn()`]: #method.rsplitn
+ /// [`rsplitn`]: #method.rsplitn
///
/// # Examples
///
/// The returned iterator will not be double ended, because it is not
/// efficient to support.
///
- /// For splitting from the front, the [`splitn()`] method can be used.
+ /// For splitting from the front, the [`splitn`] method can be used.
///
- /// [`splitn()`]: #method.splitn
+ /// [`splitn`]: #method.splitn
///
/// # Examples
///
/// [`char`]: primitive.char.html
///
/// If the pattern allows a reverse search but its results might differ
- /// from a forward search, the [`rmatches()`] method can be used.
+ /// from a forward search, the [`rmatches`] method can be used.
///
- /// [`rmatches()`]: #method.rmatches
+ /// [`rmatches`]: #method.rmatches
///
/// # Examples
///
///
/// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html
///
- /// For iterating from the front, the [`matches()`] method can be used.
+ /// For iterating from the front, the [`matches`] method can be used.
///
- /// [`matches()`]: #method.matches
+ /// [`matches`]: #method.matches
///
/// # Examples
///
/// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html
///
/// If the pattern allows a reverse search but its results might differ
- /// from a forward search, the [`rmatch_indices()`] method can be used.
+ /// from a forward search, the [`rmatch_indices`] method can be used.
///
- /// [`rmatch_indices()`]: #method.rmatch_indices
+ /// [`rmatch_indices`]: #method.rmatch_indices
///
/// # Examples
///
///
/// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html
///
- /// For iterating from the front, the [`match_indices()`] method can be used.
+ /// For iterating from the front, the [`match_indices`] method can be used.
///
- /// [`match_indices()`]: #method.match_indices
+ /// [`match_indices`]: #method.match_indices
///
/// # Examples
///
/// let hello = String::from("Hello, world!");
/// ```
///
-/// You can append a [`char`] to a `String` with the [`push()`] method, and
-/// append a [`&str`] with the [`push_str()`] method:
+/// You can append a [`char`] to a `String` with the [`push`] method, and
+/// append a [`&str`] with the [`push_str`] method:
///
/// ```
/// let mut hello = String::from("Hello, ");
/// ```
///
/// [`char`]: ../../std/primitive.char.html
-/// [`push()`]: #method.push
-/// [`push_str()`]: #method.push_str
+/// [`push`]: #method.push
+/// [`push_str`]: #method.push_str
///
/// If you have a vector of UTF-8 bytes, you can create a `String` from it with
-/// the [`from_utf8()`] method:
+/// the [`from_utf8`] method:
///
/// ```
/// // some bytes, in a vector
/// assert_eq!("💖", sparkle_heart);
/// ```
///
-/// [`from_utf8()`]: #method.from_utf8
+/// [`from_utf8`]: #method.from_utf8
///
/// # UTF-8
///
/// Indexing is intended to be a constant-time operation, but UTF-8 encoding
/// does not allow us to do this. Furthermore, it's not clear what sort of
/// thing the index should return: a byte, a codepoint, or a grapheme cluster.
-/// The [`bytes()`] and [`chars()`] methods return iterators over the first
+/// The [`bytes`] and [`chars`] methods return iterators over the first
/// two, respectively.
///
-/// [`bytes()`]: #method.bytes
-/// [`chars()`]: #method.chars
+/// [`bytes`]: #method.bytes
+/// [`chars`]: #method.chars
///
/// # Deref
///
///
/// This buffer is always stored on the heap.
///
-/// You can look at these with the [`as_ptr()`], [`len()`], and [`capacity()`]
+/// You can look at these with the [`as_ptr`], [`len`], and [`capacity`]
/// methods:
///
/// ```
/// assert_eq!(String::from("Once upon a time..."), s);
/// ```
///
-/// [`as_ptr()`]: #method.as_ptr
-/// [`len()`]: #method.len
-/// [`capacity()`]: #method.capacity
+/// [`as_ptr`]: #method.as_ptr
+/// [`len`]: #method.len
+/// [`capacity`]: #method.capacity
///
/// If a `String` has enough capacity, adding elements to it will not
/// re-allocate. For example, consider this program:
///
/// At first, we have no memory allocated at all, but as we append to the
/// string, it increases its capacity appropriately. If we instead use the
-/// [`with_capacity()`] method to allocate the correct capacity initially:
+/// [`with_capacity`] method to allocate the correct capacity initially:
///
/// ```
/// let mut s = String::with_capacity(25);
/// }
/// ```
///
-/// [`with_capacity()`]: #method.with_capacity
+/// [`with_capacity`]: #method.with_capacity
///
/// We end up with a different output:
///
/// A possible error value when converting a `String` from a UTF-8 byte vector.
///
-/// This type is the error type for the [`from_utf8()`] method on [`String`]. It
+/// This type is the error type for the [`from_utf8`] method on [`String`]. It
/// is designed in such a way to carefully avoid reallocations: the
-/// [`into_bytes()`] method will give back the byte vector that was used in the
+/// [`into_bytes`] method will give back the byte vector that was used in the
/// conversion attempt.
///
-/// [`from_utf8()`]: struct.String.html#method.from_utf8
+/// [`from_utf8`]: struct.String.html#method.from_utf8
/// [`String`]: struct.String.html
-/// [`into_bytes()`]: struct.FromUtf8Error.html#method.into_bytes
+/// [`into_bytes`]: struct.FromUtf8Error.html#method.into_bytes
///
/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
/// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
-/// through the [`utf8_error()`] method.
+/// through the [`utf8_error`] method.
///
/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html
/// [`std::str`]: ../../std/str/index.html
/// [`u8`]: ../../std/primitive.u8.html
/// [`&str`]: ../../std/primitive.str.html
-/// [`utf8_error()`]: #method.utf8_error
+/// [`utf8_error`]: #method.utf8_error
///
/// # Examples
///
/// A possible error value when converting a `String` from a UTF-16 byte slice.
///
-/// This type is the error type for the [`from_utf16()`] method on [`String`].
+/// This type is the error type for the [`from_utf16`] method on [`String`].
///
-/// [`from_utf16()`]: struct.String.html#method.from_utf16
+/// [`from_utf16`]: struct.String.html#method.from_utf16
/// [`String`]: struct.String.html
///
/// # Examples
/// buffer. While that means that this initial operation is very
/// inexpensive, but may cause excessive allocation later, when you add
/// data. If you have an idea of how much data the `String` will hold,
- /// consider the [`with_capacity()`] method to prevent excessive
+ /// consider the [`with_capacity`] method to prevent excessive
/// re-allocation.
///
- /// [`with_capacity()`]: #method.with_capacity
+ /// [`with_capacity`]: #method.with_capacity
///
/// # Examples
///
/// Creates a new empty `String` with a particular capacity.
///
/// `String`s have an internal buffer to hold their data. The capacity is
- /// the length of that buffer, and can be queried with the [`capacity()`]
+ /// the length of that buffer, and can be queried with the [`capacity`]
/// method. This method creates an empty `String`, but one with an initial
/// buffer that can hold `capacity` bytes. This is useful when you may be
/// appending a bunch of data to the `String`, reducing the number of
/// reallocations it needs to do.
///
- /// [`capacity()`]: #method.capacity
+ /// [`capacity`]: #method.capacity
///
/// If the given capacity is `0`, no allocation will occur, and this method
- /// is identical to the [`new()`] method.
+ /// is identical to the [`new`] method.
///
- /// [`new()`]: #method.new
+ /// [`new`]: #method.new
///
/// # Examples
///
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want
/// to incur the overhead of the validity check, there is an unsafe version
- /// of this function, [`from_utf8_unchecked()`], which has the same behavior
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
/// but skips the check.
///
- /// [`from_utf8_unchecked()`]: struct.String.html#method.from_utf8_unchecked
+ /// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
///
/// This method will take care to not copy the vector, for efficiency's
/// sake.
///
/// If you need a `&str` instead of a `String`, consider
- /// [`str::from_utf8()`].
+ /// [`str::from_utf8`].
///
- /// [`str::from_utf8()`]: ../../std/str/fn.from_utf8.html
+ /// [`str::from_utf8`]: ../../std/str/fn.from_utf8.html
///
/// The inverse of this method is [`as_bytes`].
///
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want
/// to incur the overhead of the conversion, there is an unsafe version
- /// of this function, [`from_utf8_unchecked()`], which has the same behavior
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
/// but skips the checks.
///
- /// [`from_utf8_unchecked()`]: struct.String.html#method.from_utf8_unchecked
+ /// [`from_utf8_unchecked`]: struct.String.html#method.from_utf8_unchecked
///
/// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid
/// UTF-8, then we need to insert the replacement characters, which will
/// Converts a vector of bytes to a `String` without checking that the
/// string contains valid UTF-8.
///
- /// See the safe version, [`from_utf8()`], for more details.
+ /// See the safe version, [`from_utf8`], for more details.
///
- /// [`from_utf8()`]: struct.String.html#method.from_utf8
+ /// [`from_utf8`]: struct.String.html#method.from_utf8
///
/// # Safety
///
/// The capacity may be increased by more than `additional` bytes if it
/// chooses, to prevent frequent reallocations.
///
- /// If you do not want this "at least" behavior, see the [`reserve_exact()`]
+ /// If you do not want this "at least" behavior, see the [`reserve_exact`]
/// method.
///
- /// [`reserve_exact()`]: #method.reserve_exact
+ /// [`reserve_exact`]: #method.reserve_exact
///
/// # Panics
///
/// Ensures that this `String`'s capacity is `additional` bytes
/// larger than its length.
///
- /// Consider using the [`reserve()`] method unless you absolutely know
+ /// Consider using the [`reserve`] method unless you absolutely know
/// better than the allocator.
///
- /// [`reserve()`]: #method.reserve
+ /// [`reserve`]: #method.reserve
///
/// # Panics
///
/// Implements the `+=` operator for appending to a `String`.
///
-/// This has the same behavior as the [`push_str()`] method.
+/// This has the same behavior as the [`push_str`] method.
///
-/// [`push_str()`]: struct.String.html#method.push_str
+/// [`push_str`]: struct.String.html#method.push_str
#[stable(feature = "stringaddassign", since = "1.12.0")]
impl<'a> AddAssign<&'a str> for String {
#[inline]
///
/// This `enum` is slightly awkward: it will never actually exist. This error is
/// part of the type signature of the implementation of [`FromStr`] on
-/// [`String`]. The return type of [`from_str()`], requires that an error be
+/// [`String`]. The return type of [`from_str`], requires that an error be
/// defined, but, given that a [`String`] can always be made into a new
/// [`String`] without error, this type will never actually be returned. As
/// such, it is only here to satisfy said signature, and is useless otherwise.
///
/// [`FromStr`]: ../../std/str/trait.FromStr.html
/// [`String`]: struct.String.html
-/// [`from_str()`]: ../../std/str/trait.FromStr.html#tymethod.from_str
+/// [`from_str`]: ../../std/str/trait.FromStr.html#tymethod.from_str
#[stable(feature = "str_parse_error", since = "1.5.0")]
#[derive(Copy)]
pub enum ParseError {}
}
}
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "string_from_box", since = "1.17.0")]
+impl From<Box<str>> for String {
+ fn from(s: Box<str>) -> String {
+ s.into_string()
+ }
+}
+
+#[stable(feature = "box_from_str", since = "1.17.0")]
+impl Into<Box<str>> for String {
+ fn into(self) -> Box<str> {
+ self.into_boxed_str()
+ }
+}
+
#[stable(feature = "string_from_cow_str", since = "1.14.0")]
impl<'a> From<Cow<'a, str>> for String {
fn from(s: Cow<'a, str>) -> String {
/// A draining iterator for `String`.
///
-/// This struct is created by the [`drain()`] method on [`String`]. See its
+/// This struct is created by the [`drain`] method on [`String`]. See its
/// documentation for more.
///
-/// [`drain()`]: struct.String.html#method.drain
+/// [`drain`]: struct.String.html#method.drain
/// [`String`]: struct.String.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a> {
//!
//! # Examples
//!
-//! You can explicitly create a [`Vec<T>`] with [`new()`]:
+//! You can explicitly create a [`Vec<T>`] with [`new`]:
//!
//! ```
//! let v: Vec<i32> = Vec::new();
//! ```
//!
//! [`Vec<T>`]: ../../std/vec/struct.Vec.html
-//! [`new()`]: ../../std/vec/struct.Vec.html#method.new
+//! [`new`]: ../../std/vec/struct.Vec.html#method.new
//! [`push`]: ../../std/vec/struct.Vec.html#method.push
//! [`Index`]: ../../std/ops/trait.Index.html
//! [`IndexMut`]: ../../std/ops/trait.IndexMut.html
/// The pointer will never be null, so this type is null-pointer-optimized.
///
/// However, the pointer may not actually point to allocated memory. In particular,
-/// if you construct a `Vec` with capacity 0 via [`Vec::new()`], [`vec![]`][`vec!`],
-/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit()`]
+/// if you construct a `Vec` with capacity 0 via [`Vec::new`], [`vec![]`][`vec!`],
+/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit`]
/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
/// types inside a `Vec`, it will not allocate space for them. *Note that in this case
-/// the `Vec` may not report a [`capacity()`] of 0*. `Vec` will allocate if and only
-/// if [`mem::size_of::<T>()`]` * capacity() > 0`. In general, `Vec`'s allocation
+/// the `Vec` may not report a [`capacity`] of 0*. `Vec` will allocate if and only
+/// if [`mem::size_of::<T>`]` * capacity() > 0`. In general, `Vec`'s allocation
/// details are subtle enough that it is strongly recommended that you only
/// free memory allocated by a `Vec` by creating a new `Vec` and dropping it.
///
/// If a `Vec` *has* allocated memory, then the memory it points to is on the heap
/// (as defined by the allocator Rust is configured to use by default), and its
-/// pointer points to [`len()`] initialized elements in order (what you would see
-/// if you coerced it to a slice), followed by [`capacity()`]` - `[`len()`]
+/// pointer points to [`len`] initialized elements in order (what you would see
+/// if you coerced it to a slice), followed by [`capacity`]` - `[`len`]
/// logically uninitialized elements.
///
/// `Vec` will never perform a "small optimization" where elements are actually
///
/// `Vec` will never automatically shrink itself, even if completely empty. This
/// ensures no unnecessary allocations or deallocations occur. Emptying a `Vec`
-/// and then filling it back up to the same [`len()`] should incur no calls to
+/// and then filling it back up to the same [`len`] should incur no calls to
/// the allocator. If you wish to free up unused memory, use
-/// [`shrink_to_fit`][`shrink_to_fit()`].
+/// [`shrink_to_fit`][`shrink_to_fit`].
///
/// [`push`] and [`insert`] will never (re)allocate if the reported capacity is
/// sufficient. [`push`] and [`insert`] *will* (re)allocate if
-/// [`len()`]` == `[`capacity()`]. That is, the reported capacity is completely
+/// [`len`]` == `[`capacity`]. That is, the reported capacity is completely
/// accurate, and can be relied on. It can even be used to manually free the memory
/// allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even
/// when not necessary.
///
/// `vec![x; n]`, `vec![a, b, c, d]`, and
/// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec`
-/// with exactly the requested capacity. If [`len()`]` == `[`capacity()`],
+/// with exactly the requested capacity. If [`len`]` == `[`capacity`],
/// (as is the case for the [`vec!`] macro), then a `Vec<T>` can be converted to
/// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements.
///
/// [`String`]: ../../std/string/struct.String.html
/// [`&str`]: ../../std/primitive.str.html
/// [`Vec::with_capacity`]: ../../std/vec/struct.Vec.html#method.with_capacity
-/// [`Vec::new()`]: ../../std/vec/struct.Vec.html#method.new
-/// [`shrink_to_fit()`]: ../../std/vec/struct.Vec.html#method.shrink_to_fit
-/// [`capacity()`]: ../../std/vec/struct.Vec.html#method.capacity
-/// [`mem::size_of::<T>()`]: ../../std/mem/fn.size_of.html
-/// [`len()`]: ../../std/vec/struct.Vec.html#method.len
+/// [`Vec::new`]: ../../std/vec/struct.Vec.html#method.new
+/// [`shrink_to_fit`]: ../../std/vec/struct.Vec.html#method.shrink_to_fit
+/// [`capacity`]: ../../std/vec/struct.Vec.html#method.capacity
+/// [`mem::size_of::<T>`]: ../../std/mem/fn.size_of.html
+/// [`len`]: ../../std/vec/struct.Vec.html#method.len
/// [`push`]: ../../std/vec/struct.Vec.html#method.push
/// [`insert`]: ../../std/vec/struct.Vec.html#method.insert
/// [`reserve`]: ../../std/vec/struct.Vec.html#method.reserve
/// Converts the vector into [`Box<[T]>`][owned slice].
///
/// Note that this will drop any excess capacity. Calling this and
- /// converting back to a vector with [`into_vec()`] is equivalent to calling
- /// [`shrink_to_fit()`].
+ /// converting back to a vector with [`into_vec`] is equivalent to calling
+ /// [`shrink_to_fit`].
///
/// [owned slice]: ../../std/boxed/struct.Box.html
- /// [`into_vec()`]: ../../std/primitive.slice.html#method.into_vec
- /// [`shrink_to_fit()`]: #method.shrink_to_fit
+ /// [`into_vec`]: ../../std/primitive.slice.html#method.into_vec
+ /// [`shrink_to_fit`]: #method.shrink_to_fit
///
/// # Examples
///
self.dedup_by(|a, b| key(a) == key(b))
}
- /// Removes consecutive elements in the vector that resolve to the same key.
+ /// Removes consecutive elements in the vector according to a predicate.
+ ///
+ /// The `same_bucket` function is passed references to two elements from the vector, and
+ /// returns `true` if the elements compare equal, or `false` if they do not. Only the first
+ /// of adjacent equal items is kept.
///
/// If the vector is sorted, this removes all duplicates.
///
}
}
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "vec_from_box", since = "1.17.0")]
+impl<T> From<Box<[T]>> for Vec<T> {
+ fn from(s: Box<[T]>) -> Vec<T> {
+ s.into_vec()
+ }
+}
+
+#[stable(feature = "box_from_vec", since = "1.17.0")]
+impl<T> Into<Box<[T]>> for Vec<T> {
+ fn into(self) -> Box<[T]> {
+ self.into_boxed_slice()
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> From<&'a str> for Vec<u8> {
fn from(s: &'a str) -> Vec<u8> {
for _x in self.by_ref() {}
// RawVec handles deallocation
- let _ = unsafe { RawVec::from_raw_parts(*self.buf, self.cap) };
+ let _ = unsafe { RawVec::from_raw_parts(self.buf.as_mut_ptr(), self.cap) };
}
}
if self.tail_len > 0 {
unsafe {
- let source_vec = &mut **self.vec;
+ let source_vec = &mut *self.vec.as_mut_ptr();
// memmove back untouched tail, update to new length
let start = source_vec.len();
let tail = self.tail_start;
fn drop(&mut self) {
for _ in self.by_ref() {}
- let source_deque = unsafe { &mut **self.deque };
+ let source_deque = unsafe { &mut *self.deque.as_mut_ptr() };
// T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
//
#![feature(binary_heap_extras)]
#![feature(binary_heap_peek_mut_pop)]
#![feature(box_syntax)]
-#![feature(btree_range)]
#![feature(inclusive_range_syntax)]
#![feature(collection_placement)]
#![feature(collections)]
-#![feature(collections_bound)]
#![feature(const_fn)]
#![feature(exact_size_is_empty)]
#![feature(pattern)]
#![feature(test)]
#![feature(unboxed_closures)]
#![feature(unicode)]
+#![feature(utf8_error_error_len)]
extern crate collections;
extern crate test;
}
}
+#[test]
+fn from_utf8_error() {
+ macro_rules! test {
+ ($input: expr, $expected_valid_up_to: expr, $expected_error_len: expr) => {
+ let error = from_utf8($input).unwrap_err();
+ assert_eq!(error.valid_up_to(), $expected_valid_up_to);
+ assert_eq!(error.error_len(), $expected_error_len);
+ }
+ }
+ test!(b"A\xC3\xA9 \xFF ", 4, Some(1));
+ test!(b"A\xC3\xA9 \x80 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC1 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC1", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC2", 4, None);
+ test!(b"A\xC3\xA9 \xC2 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC2\xC0", 4, Some(1));
+ test!(b"A\xC3\xA9 \xE0", 4, None);
+ test!(b"A\xC3\xA9 \xE0\x9F", 4, Some(1));
+ test!(b"A\xC3\xA9 \xE0\xA0", 4, None);
+ test!(b"A\xC3\xA9 \xE0\xA0\xC0", 4, Some(2));
+ test!(b"A\xC3\xA9 \xE0\xA0 ", 4, Some(2));
+ test!(b"A\xC3\xA9 \xED\xA0\x80 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xF1", 4, None);
+ test!(b"A\xC3\xA9 \xF1\x80", 4, None);
+ test!(b"A\xC3\xA9 \xF1\x80\x80", 4, None);
+ test!(b"A\xC3\xA9 \xF1 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xF1\x80 ", 4, Some(2));
+ test!(b"A\xC3\xA9 \xF1\x80\x80 ", 4, Some(3));
+}
+
#[test]
fn test_as_bytes() {
// no null
/// # Examples
///
/// ```
- /// #![feature(move_cell)]
/// use std::cell::Cell;
///
/// let c1 = Cell::new(5i32);
/// assert_eq!(5, c2.get());
/// ```
#[inline]
- #[unstable(feature = "move_cell", issue = "39264")]
+ #[stable(feature = "move_cell", since = "1.17.0")]
pub fn swap(&self, other: &Self) {
if ptr::eq(self, other) {
return;
/// # Examples
///
/// ```
- /// #![feature(move_cell)]
/// use std::cell::Cell;
///
/// let c = Cell::new(5);
///
/// assert_eq!(5, old);
/// ```
- #[unstable(feature = "move_cell", issue = "39264")]
+ #[stable(feature = "move_cell", since = "1.17.0")]
pub fn replace(&self, val: T) -> T {
mem::replace(unsafe { &mut *self.value.get() }, val)
}
/// # Examples
///
/// ```
- /// #![feature(move_cell)]
/// use std::cell::Cell;
///
/// let c = Cell::new(5);
///
/// assert_eq!(five, 5);
/// ```
- #[unstable(feature = "move_cell", issue = "39264")]
+ #[stable(feature = "move_cell", since = "1.17.0")]
pub fn into_inner(self) -> T {
unsafe { self.value.into_inner() }
}
/// # Examples
///
/// ```
- /// #![feature(move_cell)]
/// use std::cell::Cell;
///
/// let c = Cell::new(5);
/// assert_eq!(five, 5);
/// assert_eq!(c.into_inner(), 0);
/// ```
- #[unstable(feature = "move_cell", issue = "39264")]
+ #[stable(feature = "move_cell", since = "1.17.0")]
pub fn take(&self) -> T {
self.replace(Default::default())
}
/// [`as`]: ../../book/casting-between-types.html#as
///
/// For an unsafe version of this function which ignores these checks, see
-/// [`from_u32_unchecked()`].
+/// [`from_u32_unchecked`].
///
-/// [`from_u32_unchecked()`]: fn.from_u32_unchecked.html
+/// [`from_u32_unchecked`]: fn.from_u32_unchecked.html
///
/// # Examples
///
///
/// This function is unsafe, as it may construct invalid `char` values.
///
-/// For a safe version of this function, see the [`from_u32()`] function.
+/// For a safe version of this function, see the [`from_u32`] function.
///
-/// [`from_u32()`]: fn.from_u32.html
+/// [`from_u32`]: fn.from_u32.html
///
/// # Examples
///
#[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<u32> for char {
- type Err = CharTryFromError;
+ type Error = CharTryFromError;
#[inline]
- fn try_from(i: u32) -> Result<Self, Self::Err> {
+ fn try_from(i: u32) -> Result<Self, Self::Error> {
if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) {
Err(CharTryFromError(()))
} else {
/// Returns an iterator that yields the hexadecimal Unicode escape of a
/// character, as `char`s.
///
-/// This `struct` is created by the [`escape_unicode()`] method on [`char`]. See
+/// This `struct` is created by the [`escape_unicode`] method on [`char`]. See
/// its documentation for more.
///
-/// [`escape_unicode()`]: ../../std/primitive.char.html#method.escape_unicode
+/// [`escape_unicode`]: ../../std/primitive.char.html#method.escape_unicode
/// [`char`]: ../../std/primitive.char.html
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that yields the literal escape code of a `char`.
///
-/// This `struct` is created by the [`escape_default()`] method on [`char`]. See
+/// This `struct` is created by the [`escape_default`] method on [`char`]. See
/// its documentation for more.
///
-/// [`escape_default()`]: ../../std/primitive.char.html#method.escape_default
+/// [`escape_default`]: ../../std/primitive.char.html#method.escape_default
/// [`char`]: ../../std/primitive.char.html
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that yields the literal escape code of a `char`.
///
-/// This `struct` is created by the [`escape_debug()`] method on [`char`]. See its
+/// This `struct` is created by the [`escape_debug`] method on [`char`]. See its
/// documentation for more.
///
-/// [`escape_debug()`]: ../../std/primitive.char.html#method.escape_debug
+/// [`escape_debug`]: ../../std/primitive.char.html#method.escape_debug
/// [`char`]: ../../std/primitive.char.html
#[unstable(feature = "char_escape_debug", issue = "35068")]
#[derive(Clone, Debug)]
/// ## Derivable
///
/// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d
-/// implementation of [`clone()`] calls [`clone()`] on each field.
+/// implementation of [`clone`] calls [`clone`] on each field.
///
/// ## How can I implement `Clone`?
///
/// `Clone` cannot be `derive`d, but can be implemented as:
///
/// [`Copy`]: ../../std/marker/trait.Copy.html
-/// [`clone()`]: trait.Clone.html#tymethod.clone
+/// [`clone`]: trait.Clone.html#tymethod.clone
///
/// ```
/// #[derive(Copy)]
/// # Examples
///
/// ```
- /// #![feature(ordering_chaining)]
- ///
/// use std::cmp::Ordering;
///
/// let result = Ordering::Equal.then(Ordering::Less);
///
/// assert_eq!(result, Ordering::Less);
/// ```
- #[unstable(feature = "ordering_chaining", issue = "37053")]
+ #[inline]
+ #[stable(feature = "ordering_chaining", since = "1.17.0")]
pub fn then(self, other: Ordering) -> Ordering {
match self {
Equal => other,
/// # Examples
///
/// ```
- /// #![feature(ordering_chaining)]
- ///
/// use std::cmp::Ordering;
///
/// let result = Ordering::Equal.then_with(|| Ordering::Less);
///
/// assert_eq!(result, Ordering::Less);
/// ```
- #[unstable(feature = "ordering_chaining", issue = "37053")]
+ #[inline]
+ #[stable(feature = "ordering_chaining", since = "1.17.0")]
pub fn then_with<F: FnOnce() -> Ordering>(self, f: F) -> Ordering {
match self {
Equal => f(),
#![stable(feature = "rust1", since = "1.0.0")]
+use str::FromStr;
+
/// A cheap, reference-to-reference conversion.
///
/// `AsRef` is very similar to, but different than, [`Borrow`]. See
/// # Generic Impls
///
/// - [`From<T>`][From]` for U` implies `Into<U> for T`
-/// - [`into()`] is reflexive, which means that `Into<T> for T` is implemented
+/// - [`into`] is reflexive, which means that `Into<T> for T` is implemented
///
/// [`TryInto`]: trait.TryInto.html
/// [`Option<T>`]: ../../std/option/enum.Option.html
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`String`]: ../../std/string/struct.String.html
/// [From]: trait.From.html
-/// [`into()`]: trait.Into.html#tymethod.into
+/// [`into`]: trait.Into.html#tymethod.into
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Into<T>: Sized {
/// Performs the conversion.
/// # Generic impls
///
/// - `From<T> for U` implies [`Into<U>`]` for T`
-/// - [`from()`] is reflexive, which means that `From<T> for T` is implemented
+/// - [`from`] is reflexive, which means that `From<T> for T` is implemented
///
/// [`TryFrom`]: trait.TryFrom.html
/// [`Option<T>`]: ../../std/option/enum.Option.html
/// [`Result<T, E>`]: ../../std/result/enum.Result.html
/// [`String`]: ../../std/string/struct.String.html
/// [`Into<U>`]: trait.Into.html
-/// [`from()`]: trait.From.html#tymethod.from
+/// [`from`]: trait.From.html#tymethod.from
#[stable(feature = "rust1", since = "1.0.0")]
pub trait From<T>: Sized {
/// Performs the conversion.
#[unstable(feature = "try_from", issue = "33417")]
pub trait TryInto<T>: Sized {
/// The type returned in the event of a conversion error.
- type Err;
+ type Error;
/// Performs the conversion.
- fn try_into(self) -> Result<T, Self::Err>;
+ fn try_into(self) -> Result<T, Self::Error>;
}
/// Attempt to construct `Self` via a conversion.
#[unstable(feature = "try_from", issue = "33417")]
pub trait TryFrom<T>: Sized {
/// The type returned in the event of a conversion error.
- type Err;
+ type Error;
/// Performs the conversion.
- fn try_from(value: T) -> Result<Self, Self::Err>;
+ fn try_from(value: T) -> Result<Self, Self::Error>;
}
////////////////////////////////////////////////////////////////////////////////
// TryFrom implies TryInto
#[unstable(feature = "try_from", issue = "33417")]
impl<T, U> TryInto<U> for T where U: TryFrom<T> {
- type Err = U::Err;
+ type Error = U::Error;
- fn try_into(self) -> Result<U, U::Err> {
+ fn try_into(self) -> Result<U, U::Error> {
U::try_from(self)
}
}
self
}
}
+
+// FromStr implies TryFrom<&str>
+#[unstable(feature = "try_from", issue = "33417")]
+impl<'a, T> TryFrom<&'a str> for T where T: FromStr {
+ type Error = <T as FromStr>::Err;
+
+ fn try_from(s: &'a str) -> Result<T, Self::Error> {
+ FromStr::from_str(s)
+ }
+}
// is zero
Some(min) if self.sign_aware_zero_pad() => {
self.fill = '0';
+ self.align = rt::v1::Alignment::Right;
write_prefix(self)?;
self.with_padding(min - width, rt::v1::Alignment::Right, |f| {
f.buf.write_str(buf)
// for the sign-aware zero padding, we render the sign first and
// behave as if we had no sign from the beginning.
let mut formatted = formatted.clone();
- let mut align = self.align;
let old_fill = self.fill;
+ let old_align = self.align;
+ let mut align = old_align;
if self.sign_aware_zero_pad() {
// a sign always goes first
let sign = unsafe { str::from_utf8_unchecked(formatted.sign) };
width = if width < sign.len() { 0 } else { width - sign.len() };
align = rt::v1::Alignment::Right;
self.fill = '0';
+ self.align = rt::v1::Alignment::Right;
}
// remaining parts go through the ordinary padding process.
})
};
self.fill = old_fill;
+ self.align = old_align;
ret
} else {
// this is the common case and we take a shortcut
//! # Examples
//!
//! ```rust
-//! use std::hash::{Hash, SipHasher, Hasher};
+//! use std::collections::hash_map::DefaultHasher;
+//! use std::hash::{Hash, Hasher};
//!
//! #[derive(Hash)]
//! struct Person {
//! phone: u64,
//! }
//!
-//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
-//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
//!
-//! assert!(hash(&person1) != hash(&person2));
+//! assert!(calculate_hash(&person1) != calculate_hash(&person2));
//!
-//! fn hash<T: Hash>(t: &T) -> u64 {
-//! let mut s = SipHasher::new();
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
//! t.hash(&mut s);
//! s.finish()
//! }
//! [`Hash`]: trait.Hash.html
//!
//! ```rust
-//! use std::hash::{Hash, Hasher, SipHasher};
+//! use std::collections::hash_map::DefaultHasher;
+//! use std::hash::{Hash, Hasher};
//!
//! struct Person {
//! id: u32,
-//! # #[allow(dead_code)]
+//! # #[allow(dead_code)]
//! name: String,
//! phone: u64,
//! }
//! }
//! }
//!
-//! let person1 = Person { id: 5, name: "Janet".to_string(), phone: 555_666_7777 };
-//! let person2 = Person { id: 5, name: "Bob".to_string(), phone: 555_666_7777 };
+//! let person1 = Person {
+//! id: 5,
+//! name: "Janet".to_string(),
+//! phone: 555_666_7777,
+//! };
+//! let person2 = Person {
+//! id: 5,
+//! name: "Bob".to_string(),
+//! phone: 555_666_7777,
+//! };
//!
-//! assert_eq!(hash(&person1), hash(&person2));
+//! assert_eq!(calculate_hash(&person1), calculate_hash(&person2));
//!
-//! fn hash<T: Hash>(t: &T) -> u64 {
-//! let mut s = SipHasher::new();
+//! fn calculate_hash<T: Hash>(t: &T) -> u64 {
+//! let mut s = DefaultHasher::new();
//! t.hash(&mut s);
//! s.finish()
//! }
///
/// This trait can be used with `#[derive]` if all fields implement `Hash`.
/// When `derive`d, the resulting hash will be the combination of the values
-/// from calling [`.hash()`] on each field.
+/// from calling [`.hash`] on each field.
///
/// ## How can I implement `Hash`?
///
/// [`Eq`]: ../../std/cmp/trait.Eq.html
/// [`HashMap`]: ../../std/collections/struct.HashMap.html
/// [`HashSet`]: ../../std/collections/struct.HashSet.html
-/// [`.hash()`]: #tymethod.hash
+/// [`.hash`]: #tymethod.hash
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Hash {
/// Feeds this value into the state given, updating the hasher as necessary.
/// Consumes the iterator, counting the number of iterations and returning it.
///
- /// This method will evaluate the iterator until its [`next()`] returns
+ /// This method will evaluate the iterator until its [`next`] returns
/// [`None`]. Once [`None`] is encountered, `count()` returns the number of
- /// times it called [`next()`].
+ /// times it called [`next`].
///
- /// [`next()`]: #tymethod.next
+ /// [`next`]: #tymethod.next
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Overflow Behavior
///
/// In other words, it zips two iterators together, into a single one.
///
- /// When either iterator returns [`None`], all further calls to [`next()`]
+ /// When either iterator returns [`None`], all further calls to [`next`]
/// will return [`None`].
///
/// # Examples
///
/// `zip()` is often used to zip an infinite iterator to a finite one.
/// This works because the finite iterator will eventually return [`None`],
- /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate()`]:
+ /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate`]:
///
/// ```
/// let enumerate: Vec<_> = "foo".chars().enumerate().collect();
/// assert_eq!((2, 'o'), zipper[2]);
/// ```
///
- /// [`enumerate()`]: trait.Iterator.html#method.enumerate
- /// [`next()`]: ../../std/iter/trait.Iterator.html#tymethod.next
+ /// [`enumerate`]: trait.Iterator.html#method.enumerate
+ /// [`next`]: ../../std/iter/trait.Iterator.html#tymethod.next
/// [`None`]: ../../std/option/enum.Option.html#variant.None
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
/// closure returns [`None`], it will try again, and call the closure on the
/// next element, seeing if it will return [`Some`].
///
- /// Why `filter_map()` and not just [`filter()`].[`map()`]? The key is in this
+ /// Why `filter_map()` and not just [`filter()`].[`map`]? The key is in this
/// part:
///
- /// [`filter()`]: #method.filter
- /// [`map()`]: #method.map
+ /// [`filter`]: #method.filter
+ /// [`map`]: #method.map
///
/// > If the closure returns [`Some(element)`][`Some`], then that element is returned.
///
/// assert_eq!(iter.next(), None);
/// ```
///
- /// Here's the same example, but with [`filter()`] and [`map()`]:
+ /// Here's the same example, but with [`filter`] and [`map`]:
///
/// ```
/// let a = ["1", "2", "lol"];
/// iterator.
///
/// `enumerate()` keeps its count as a [`usize`]. If you want to count by a
- /// different sized integer, the [`zip()`] function provides similar
+ /// different sized integer, the [`zip`] function provides similar
/// functionality.
///
/// # Overflow Behavior
///
/// [`usize::MAX`]: ../../std/usize/constant.MAX.html
/// [`usize`]: ../../std/primitive.usize.html
- /// [`zip()`]: #method.zip
+ /// [`zip`]: #method.zip
///
/// # Examples
///
/// Creates an iterator which can use `peek` to look at the next element of
/// the iterator without consuming it.
///
- /// Adds a [`peek()`] method to an iterator. See its documentation for
+ /// Adds a [`peek`] method to an iterator. See its documentation for
/// more information.
///
- /// Note that the underlying iterator is still advanced when [`peek()`] is
+ /// Note that the underlying iterator is still advanced when [`peek`] is
/// called for the first time: In order to retrieve the next element,
- /// [`next()`] is called on the underlying iterator, hence any side effects of
- /// the [`next()`] method will occur.
+ /// [`next`] is called on the underlying iterator, hence any side effects of
+ /// the [`next`] method will occur.
///
- /// [`peek()`]: struct.Peekable.html#method.peek
- /// [`next()`]: ../../std/iter/trait.Iterator.html#tymethod.next
+ /// [`peek`]: struct.Peekable.html#method.peek
+ /// [`next`]: ../../std/iter/trait.Iterator.html#tymethod.next
///
/// # Examples
///
Peekable{iter: self, peeked: None}
}
- /// Creates an iterator that [`skip()`]s elements based on a predicate.
+ /// Creates an iterator that [`skip`]s elements based on a predicate.
///
- /// [`skip()`]: #method.skip
+ /// [`skip`]: #method.skip
///
/// `skip_while()` takes a closure as an argument. It will call this
/// closure on each element of the iterator, and ignore elements
Take{iter: self, n: n}
}
- /// An iterator adaptor similar to [`fold()`] that holds internal state and
+ /// An iterator adaptor similar to [`fold`] that holds internal state and
/// produces a new iterator.
///
- /// [`fold()`]: #method.fold
+ /// [`fold`]: #method.fold
///
/// `scan()` takes two arguments: an initial value which seeds the internal
/// state, and a closure with two arguments, the first being a mutable
/// Creates an iterator that works like map, but flattens nested structure.
///
- /// The [`map()`] adapter is very useful, but only when the closure
+ /// The [`map`] adapter is very useful, but only when the closure
/// argument produces values. If it produces an iterator instead, there's
/// an extra layer of indirection. `flat_map()` will remove this extra layer
/// on its own.
///
- /// Another way of thinking about `flat_map()`: [`map()`]'s closure returns
+ /// Another way of thinking about `flat_map()`: [`map`]'s closure returns
/// one item for each element, and `flat_map()`'s closure returns an
/// iterator for each element.
///
- /// [`map()`]: #method.map
+ /// [`map`]: #method.map
///
/// # Examples
///
/// library, used in a variety of contexts.
///
/// The most basic pattern in which `collect()` is used is to turn one
- /// collection into another. You take a collection, call [`iter()`] on it,
+ /// collection into another. You take a collection, call [`iter`] on it,
/// do a bunch of transformations, and then `collect()` at the end.
///
/// One of the keys to `collect()`'s power is that many things you might
/// assert_eq!(Ok(vec![1, 3]), result);
/// ```
///
- /// [`iter()`]: ../../std/iter/trait.Iterator.html#tymethod.next
+ /// [`iter`]: ../../std/iter/trait.Iterator.html#tymethod.next
/// [`String`]: ../../std/string/struct.String.html
/// [`char`]: ../../std/primitive.char.html
/// [`Result`]: ../../std/result/enum.Result.html
/// collections: one from the left elements of the pairs, and one
/// from the right elements.
///
- /// This function is, in some sense, the opposite of [`zip()`].
+ /// This function is, in some sense, the opposite of [`zip`].
///
- /// [`zip()`]: #method.zip
+ /// [`zip`]: #method.zip
///
/// # Examples
///
(ts, us)
}
- /// Creates an iterator which [`clone()`]s all of its elements.
+ /// Creates an iterator which [`clone`]s all of its elements.
///
/// This is useful when you have an iterator over `&T`, but you need an
/// iterator over `T`.
///
- /// [`clone()`]: ../../std/clone/trait.Clone.html#tymethod.clone
+ /// [`clone`]: ../../std/clone/trait.Clone.html#tymethod.clone
///
/// # Examples
///
//! }
//! ```
//!
-//! An iterator has a method, [`next()`], which when called, returns an
-//! [`Option`]`<Item>`. [`next()`] will return `Some(Item)` as long as there
+//! An iterator has a method, [`next`], which when called, returns an
+//! [`Option`]`<Item>`. [`next`] will return `Some(Item)` as long as there
//! are elements, and once they've all been exhausted, will return `None` to
//! indicate that iteration is finished. Individual iterators may choose to
-//! resume iteration, and so calling [`next()`] again may or may not eventually
+//! resume iteration, and so calling [`next`] again may or may not eventually
//! start returning `Some(Item)` again at some point.
//!
//! [`Iterator`]'s full definition includes a number of other methods as well,
-//! but they are default methods, built on top of [`next()`], and so you get
+//! but they are default methods, built on top of [`next`], and so you get
//! them for free.
//!
//! Iterators are also composable, and it's common to chain them together to do
//! below for more details.
//!
//! [`Iterator`]: trait.Iterator.html
-//! [`next()`]: trait.Iterator.html#tymethod.next
+//! [`next`]: trait.Iterator.html#tymethod.next
//! [`Option`]: ../../std/option/enum.Option.html
//!
//! # The three forms of iteration
//! produce an iterator. What gives?
//!
//! There's a trait in the standard library for converting something into an
-//! iterator: [`IntoIterator`]. This trait has one method, [`into_iter()`],
+//! iterator: [`IntoIterator`]. This trait has one method, [`into_iter`],
//! which converts the thing implementing [`IntoIterator`] into an iterator.
//! Let's take a look at that `for` loop again, and what the compiler converts
//! it into:
//!
//! [`IntoIterator`]: trait.IntoIterator.html
-//! [`into_iter()`]: trait.IntoIterator.html#tymethod.into_iter
+//! [`into_iter`]: trait.IntoIterator.html#tymethod.into_iter
//!
//! ```
//! let values = vec![1, 2, 3, 4, 5];
//! ```
//!
//! First, we call `into_iter()` on the value. Then, we match on the iterator
-//! that returns, calling [`next()`] over and over until we see a `None`. At
+//! that returns, calling [`next`] over and over until we see a `None`. At
//! that point, we `break` out of the loop, and we're done iterating.
//!
//! There's one more subtle bit here: the standard library contains an
//! often called 'iterator adapters', as they're a form of the 'adapter
//! pattern'.
//!
-//! Common iterator adapters include [`map()`], [`take()`], and [`filter()`].
+//! Common iterator adapters include [`map`], [`take`], and [`filter`].
//! For more, see their documentation.
//!
-//! [`map()`]: trait.Iterator.html#method.map
-//! [`take()`]: trait.Iterator.html#method.take
-//! [`filter()`]: trait.Iterator.html#method.filter
+//! [`map`]: trait.Iterator.html#method.map
+//! [`take`]: trait.Iterator.html#method.take
+//! [`filter`]: trait.Iterator.html#method.filter
//!
//! # Laziness
//!
//! Iterators (and iterator [adapters](#adapters)) are *lazy*. This means that
//! just creating an iterator doesn't _do_ a whole lot. Nothing really happens
-//! until you call [`next()`]. This is sometimes a source of confusion when
-//! creating an iterator solely for its side effects. For example, the [`map()`]
+//! until you call [`next`]. This is sometimes a source of confusion when
+//! creating an iterator solely for its side effects. For example, the [`map`]
//! method calls a closure on each element it iterates over:
//!
//! ```
//! do nothing unless consumed
//! ```
//!
-//! The idiomatic way to write a [`map()`] for its side effects is to use a
+//! The idiomatic way to write a [`map`] for its side effects is to use a
//! `for` loop instead:
//!
//! ```
//! }
//! ```
//!
-//! [`map()`]: trait.Iterator.html#method.map
+//! [`map`]: trait.Iterator.html#method.map
//!
//! The two most common ways to evaluate an iterator are to use a `for` loop
-//! like this, or using the [`collect()`] method to produce a new collection.
+//! like this, or using the [`collect`] method to produce a new collection.
//!
-//! [`collect()`]: trait.Iterator.html#method.collect
+//! [`collect`]: trait.Iterator.html#method.collect
//!
//! # Infinity
//!
//! let numbers = 0..;
//! ```
//!
-//! It is common to use the [`take()`] iterator adapter to turn an infinite
+//! It is common to use the [`take`] iterator adapter to turn an infinite
//! iterator into a finite one:
//!
//! ```
//!
//! This will print the numbers `0` through `4`, each on their own line.
//!
-//! [`take()`]: trait.Iterator.html#method.take
+//! [`take`]: trait.Iterator.html#method.take
#![stable(feature = "rust1", since = "1.0.0")]
/// A double-ended iterator with the direction inverted.
///
-/// This `struct` is created by the [`rev()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`rev()`]: trait.Iterator.html#method.rev
+/// [`rev`]: trait.Iterator.html#method.rev
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator that clones the elements of an underlying iterator.
///
-/// This `struct` is created by the [`cloned()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`cloned()`]: trait.Iterator.html#method.cloned
+/// [`cloned`]: trait.Iterator.html#method.cloned
/// [`Iterator`]: trait.Iterator.html
#[stable(feature = "iter_cloned", since = "1.1.0")]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator that repeats endlessly.
///
-/// This `struct` is created by the [`cycle()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`cycle()`]: trait.Iterator.html#method.cycle
+/// [`cycle`]: trait.Iterator.html#method.cycle
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator that strings two iterators together.
///
-/// This `struct` is created by the [`chain()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`chain`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`chain()`]: trait.Iterator.html#method.chain
+/// [`chain`]: trait.Iterator.html#method.chain
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator that iterates two other iterators simultaneously.
///
-/// This `struct` is created by the [`zip()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`zip`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`zip()`]: trait.Iterator.html#method.zip
+/// [`zip`]: trait.Iterator.html#method.zip
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator that maps the values of `iter` with `f`.
///
-/// This `struct` is created by the [`map()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`map`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`map()`]: trait.Iterator.html#method.map
+/// [`map`]: trait.Iterator.html#method.map
/// [`Iterator`]: trait.Iterator.html
///
/// # Notes about side effects
///
-/// The [`map()`] iterator implements [`DoubleEndedIterator`], meaning that
-/// you can also [`map()`] backwards:
+/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that
+/// you can also [`map`] backwards:
///
/// ```rust
/// let v: Vec<i32> = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect();
/// An iterator that filters the elements of `iter` with `predicate`.
///
-/// This `struct` is created by the [`filter()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`filter()`]: trait.Iterator.html#method.filter
+/// [`filter`]: trait.Iterator.html#method.filter
/// [`Iterator`]: trait.Iterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that uses `f` to both filter and map elements from `iter`.
///
-/// This `struct` is created by the [`filter_map()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`filter_map()`]: trait.Iterator.html#method.filter_map
+/// [`filter_map`]: trait.Iterator.html#method.filter_map
/// [`Iterator`]: trait.Iterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that yields the current count and the element during iteration.
///
-/// This `struct` is created by the [`enumerate()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`enumerate()`]: trait.Iterator.html#method.enumerate
+/// [`enumerate`]: trait.Iterator.html#method.enumerate
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator with a `peek()` that returns an optional reference to the next
/// element.
///
-/// This `struct` is created by the [`peekable()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`peekable()`]: trait.Iterator.html#method.peekable
+/// [`peekable`]: trait.Iterator.html#method.peekable
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
impl<I: Iterator> Peekable<I> {
/// Returns a reference to the next() value without advancing the iterator.
///
- /// Like [`next()`], if there is a value, it is wrapped in a `Some(T)`.
+ /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`.
/// But if the iteration is over, `None` is returned.
///
- /// [`next()`]: trait.Iterator.html#tymethod.next
+ /// [`next`]: trait.Iterator.html#tymethod.next
///
/// Because `peek()` returns a reference, and many iterators iterate over
/// references, there can be a possibly confusing situation where the
/// An iterator that rejects elements while `predicate` is true.
///
-/// This `struct` is created by the [`skip_while()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`skip_while()`]: trait.Iterator.html#method.skip_while
+/// [`skip_while`]: trait.Iterator.html#method.skip_while
/// [`Iterator`]: trait.Iterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that only accepts elements while `predicate` is true.
///
-/// This `struct` is created by the [`take_while()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`take_while()`]: trait.Iterator.html#method.take_while
+/// [`take_while`]: trait.Iterator.html#method.take_while
/// [`Iterator`]: trait.Iterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that skips over `n` elements of `iter`.
///
-/// This `struct` is created by the [`skip()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`skip()`]: trait.Iterator.html#method.skip
+/// [`skip`]: trait.Iterator.html#method.skip
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator that only iterates over the first `n` iterations of `iter`.
///
-/// This `struct` is created by the [`take()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`take`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`take()`]: trait.Iterator.html#method.take
+/// [`take`]: trait.Iterator.html#method.take
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator to maintain state while iterating another iterator.
///
-/// This `struct` is created by the [`scan()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`scan()`]: trait.Iterator.html#method.scan
+/// [`scan`]: trait.Iterator.html#method.scan
/// [`Iterator`]: trait.Iterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that maps each element to an iterator, and yields the elements
/// of the produced iterators.
///
-/// This `struct` is created by the [`flat_map()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`flat_map`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`flat_map()`]: trait.Iterator.html#method.flat_map
+/// [`flat_map`]: trait.Iterator.html#method.flat_map
/// [`Iterator`]: trait.Iterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that yields `None` forever after the underlying iterator
/// yields `None` once.
///
-/// This `struct` is created by the [`fuse()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`fuse()`]: trait.Iterator.html#method.fuse
+/// [`fuse`]: trait.Iterator.html#method.fuse
/// [`Iterator`]: trait.Iterator.html
#[derive(Clone, Debug)]
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
/// An iterator that calls a function with a reference to each element before
/// yielding it.
///
-/// This `struct` is created by the [`inspect()`] method on [`Iterator`]. See its
+/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its
/// documentation for more.
///
-/// [`inspect()`]: trait.Iterator.html#method.inspect
+/// [`inspect`]: trait.Iterator.html#method.inspect
/// [`Iterator`]: trait.Iterator.html
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
/// An iterator that repeats an element endlessly.
///
-/// This `struct` is created by the [`repeat()`] function. See its documentation for more.
+/// This `struct` is created by the [`repeat`] function. See its documentation for more.
///
-/// [`repeat()`]: fn.repeat.html
+/// [`repeat`]: fn.repeat.html
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Repeat<A> {
/// over and over and 🔁.
///
/// Infinite iterators like `repeat()` are often used with adapters like
-/// [`take()`], in order to make them finite.
+/// [`take`], in order to make them finite.
///
-/// [`take()`]: trait.Iterator.html#method.take
+/// [`take`]: trait.Iterator.html#method.take
///
/// # Examples
///
/// assert_eq!(Some(4), fours.next());
/// ```
///
-/// Going finite with [`take()`]:
+/// Going finite with [`take`]:
///
/// ```
/// use std::iter;
/// An iterator that yields nothing.
///
-/// This `struct` is created by the [`empty()`] function. See its documentation for more.
+/// This `struct` is created by the [`empty`] function. See its documentation for more.
///
-/// [`empty()`]: fn.empty.html
+/// [`empty`]: fn.empty.html
#[stable(feature = "iter_empty", since = "1.2.0")]
pub struct Empty<T>(marker::PhantomData<T>);
/// An iterator that yields an element exactly once.
///
-/// This `struct` is created by the [`once()`] function. See its documentation for more.
+/// This `struct` is created by the [`once`] function. See its documentation for more.
///
-/// [`once()`]: fn.once.html
+/// [`once`]: fn.once.html
#[derive(Clone, Debug)]
#[stable(feature = "iter_once", since = "1.2.0")]
pub struct Once<T> {
/// Creates an iterator that yields an element exactly once.
///
-/// This is commonly used to adapt a single value into a [`chain()`] of other
+/// This is commonly used to adapt a single value into a [`chain`] of other
/// kinds of iteration. Maybe you have an iterator that covers almost
/// everything, but you need an extra special case. Maybe you have a function
/// which works on iterators, but you only need to process one value.
///
-/// [`chain()`]: trait.Iterator.html#method.chain
+/// [`chain`]: trait.Iterator.html#method.chain
///
/// # Examples
///
/// created from an iterator. This is common for types which describe a
/// collection of some kind.
///
-/// `FromIterator`'s [`from_iter()`] is rarely called explicitly, and is instead
-/// used through [`Iterator`]'s [`collect()`] method. See [`collect()`]'s
+/// `FromIterator`'s [`from_iter`] is rarely called explicitly, and is instead
+/// used through [`Iterator`]'s [`collect`] method. See [`collect`]'s
/// documentation for more examples.
///
-/// [`from_iter()`]: #tymethod.from_iter
+/// [`from_iter`]: #tymethod.from_iter
/// [`Iterator`]: trait.Iterator.html
-/// [`collect()`]: trait.Iterator.html#method.collect
+/// [`collect`]: trait.Iterator.html#method.collect
///
/// See also: [`IntoIterator`].
///
/// assert_eq!(v, vec![5, 5, 5, 5, 5]);
/// ```
///
-/// Using [`collect()`] to implicitly use `FromIterator`:
+/// Using [`collect`] to implicitly use `FromIterator`:
///
/// ```
/// let five_fives = std::iter::repeat(5).take(5);
/// backwards, a good start is to know where the end is.
///
/// When implementing an `ExactSizeIterator`, You must also implement
-/// [`Iterator`]. When doing so, the implementation of [`size_hint()`] *must*
+/// [`Iterator`]. When doing so, the implementation of [`size_hint`] *must*
/// return the exact size of the iterator.
///
/// [`Iterator`]: trait.Iterator.html
-/// [`size_hint()`]: trait.Iterator.html#method.size_hint
+/// [`size_hint`]: trait.Iterator.html#method.size_hint
///
-/// The [`len()`] method has a default implementation, so you usually shouldn't
+/// The [`len`] method has a default implementation, so you usually shouldn't
/// implement it. However, you may be able to provide a more performant
/// implementation than the default, so overriding it in this case makes sense.
///
-/// [`len()`]: #method.len
+/// [`len`]: #method.len
///
/// # Examples
///
/// implementation, you can do so. See the [trait-level] docs for an
/// example.
///
- /// This function has the same safety guarantees as the [`size_hint()`]
+ /// This function has the same safety guarantees as the [`size_hint`]
/// function.
///
/// [trait-level]: trait.ExactSizeIterator.html
- /// [`size_hint()`]: trait.Iterator.html#method.size_hint
+ /// [`size_hint`]: trait.Iterator.html#method.size_hint
///
/// # Examples
///
/// Trait to represent types that can be created by summing up an iterator.
///
-/// This trait is used to implement the [`sum()`] method on iterators. Types which
-/// implement the trait can be generated by the [`sum()`] method. Like
+/// This trait is used to implement the [`sum`] method on iterators. Types which
+/// implement the trait can be generated by the [`sum`] method. Like
/// [`FromIterator`] this trait should rarely be called directly and instead
-/// interacted with through [`Iterator::sum()`].
+/// interacted with through [`Iterator::sum`].
///
-/// [`sum()`]: ../../std/iter/trait.Sum.html#tymethod.sum
+/// [`sum`]: ../../std/iter/trait.Sum.html#tymethod.sum
/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html
-/// [`Iterator::sum()`]: ../../std/iter/trait.Iterator.html#method.sum
+/// [`Iterator::sum`]: ../../std/iter/trait.Iterator.html#method.sum
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
pub trait Sum<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// Trait to represent types that can be created by multiplying elements of an
/// iterator.
///
-/// This trait is used to implement the [`product()`] method on iterators. Types
-/// which implement the trait can be generated by the [`product()`] method. Like
+/// This trait is used to implement the [`product`] method on iterators. Types
+/// which implement the trait can be generated by the [`product`] method. Like
/// [`FromIterator`] this trait should rarely be called directly and instead
-/// interacted with through [`Iterator::product()`].
+/// interacted with through [`Iterator::product`].
///
-/// [`product()`]: ../../std/iter/trait.Product.html#tymethod.product
+/// [`product`]: ../../std/iter/trait.Product.html#tymethod.product
/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html
-/// [`Iterator::product()`]: ../../std/iter/trait.Iterator.html#method.product
+/// [`Iterator::product`]: ../../std/iter/trait.Iterator.html#method.product
#[stable(feature = "iter_arith_traits", since = "1.12.0")]
pub trait Product<A = Self>: Sized {
/// Method which takes an iterator and generates `Self` from the elements by
/// that behave this way because it allows for some significant optimizations.
///
/// Note: In general, you should not use `FusedIterator` in generic bounds if
-/// you need a fused iterator. Instead, you should just call [`Iterator::fuse()`]
+/// you need a fused iterator. Instead, you should just call [`Iterator::fuse`]
/// on the iterator. If the iterator is already fused, the additional [`Fuse`]
/// wrapper will be a no-op with no performance penalty.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
-/// [`Iterator::fuse()`]: ../../std/iter/trait.Iterator.html#method.fuse
+/// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse
/// [`Fuse`]: ../../std/iter/struct.Fuse.html
#[unstable(feature = "fused", issue = "35602")]
pub trait FusedIterator: Iterator {}
/// # Safety
///
/// This trait must only be implemented when the contract is upheld.
-/// Consumers of this trait must inspect [`.size_hint()`]’s upper bound.
+/// Consumers of this trait must inspect [`.size_hint`]’s upper bound.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
/// [`usize::MAX`]: ../../std/usize/constant.MAX.html
-/// [`.size_hint()`]: ../../std/iter/trait.Iterator.html#method.size_hint
+/// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint
#[unstable(feature = "trusted_len", issue = "37572")]
pub unsafe trait TrustedLen : Iterator {}
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
///
-/// Like [`assert!()`], this macro has a second version, where a custom
+/// Like [`assert!`], this macro has a second version, where a custom
/// panic message can be provided.
///
-/// [`assert!()`]: macro.assert.html
+/// [`assert!`]: macro.assert.html
///
/// # Examples
///
/// [`String`]'s buffer, leading to a double free.
///
/// Generalizing the latter case, any type implementing [`Drop`] can't be `Copy`, because it's
-/// managing some resource besides its own [`size_of::<T>()`] bytes.
+/// managing some resource besides its own [`size_of::<T>`] bytes.
///
/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get
/// the error [E0204].
/// [`Vec<T>`]: ../../std/vec/struct.Vec.html
/// [`String`]: ../../std/string/struct.String.html
/// [`Drop`]: ../../std/ops/trait.Drop.html
-/// [`size_of::<T>()`]: ../../std/mem/fn.size_of.html
+/// [`size_of::<T>`]: ../../std/mem/fn.size_of.html
/// [`Clone`]: ../clone/trait.Clone.html
/// [`String`]: ../../std/string/struct.String.html
/// [`i32`]: ../../std/primitive.i32.html
/// the contained value.
///
/// This function will unsafely assume the pointer `src` is valid for
-/// [`size_of::<U>()`][size_of] bytes by transmuting `&T` to `&U` and then reading
+/// [`size_of::<U>`][size_of] bytes by transmuting `&T` to `&U` and then reading
/// the `&U`. It will also unsafely create a copy of the contained value instead of
/// moving out of `src`.
///
/// A classification of floating point numbers.
///
-/// This `enum` is used as the return type for [`f32::classify()`] and [`f64::classify()`]. See
+/// This `enum` is used as the return type for [`f32::classify`] and [`f64::classify`]. See
/// their documentation for more.
///
-/// [`f32::classify()`]: ../../std/primitive.f32.html#method.classify
-/// [`f64::classify()`]: ../../std/primitive.f64.html#method.classify
+/// [`f32::classify`]: ../../std/primitive.f32.html#method.classify
+/// [`f64::classify`]: ../../std/primitive.f64.html#method.classify
///
/// # Examples
///
($storage:ty, $target:ty, $($source:ty),*) => {$(
#[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<$source> for $target {
- type Err = TryFromIntError;
+ type Error = TryFromIntError;
fn try_from(u: $source) -> Result<$target, TryFromIntError> {
let min = <$target as FromStrRadixHelper>::min_value() as $storage;
($unsigned:ty, $($signed:ty),*) => {$(
#[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<$unsigned> for $signed {
- type Err = TryFromIntError;
+ type Error = TryFromIntError;
fn try_from(u: $unsigned) -> Result<$signed, TryFromIntError> {
let max = <$signed as FromStrRadixHelper>::max_value() as u128;
#[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<$signed> for $unsigned {
- type Err = TryFromIntError;
+ type Error = TryFromIntError;
fn try_from(u: $signed) -> Result<$unsigned, TryFromIntError> {
let max = <$unsigned as FromStrRadixHelper>::max_value() as u128;
/// An error which can be returned when parsing an integer.
///
/// This error is used as the error type for the `from_str_radix()` functions
-/// on the primitive integer types, such as [`i8::from_str_radix()`].
+/// on the primitive integer types, such as [`i8::from_str_radix`].
///
-/// [`i8::from_str_radix()`]: ../../std/primitive.i8.html#method.from_str_radix
+/// [`i8::from_str_radix`]: ../../std/primitive.i8.html#method.from_str_radix
#[derive(Debug, Clone, PartialEq, Eq)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ParseIntError {
//! contexts involving built-in types, this is usually not a problem.
//! However, using these operators in generic code, requires some
//! attention if values have to be reused as opposed to letting the operators
-//! consume them. One option is to occasionally use [`clone()`].
+//! consume them. One option is to occasionally use [`clone`].
//! Another option is to rely on the types involved providing additional
//! operator implementations for references. For example, for a user-defined
//! type `T` which is supposed to support addition, it is probably a good
//! [`FnOnce`]: trait.FnOnce.html
//! [`Add`]: trait.Add.html
//! [`Sub`]: trait.Sub.html
-//! [`clone()`]: ../clone/trait.Clone.html#tymethod.clone
+//! [`clone`]: ../clone/trait.Clone.html#tymethod.clone
#![stable(feature = "rust1", since = "1.0.0")]
/// A (half-open) range which is bounded at both ends: { x | start <= x < end }.
/// Use `start..end` (two dots) for its shorthand.
///
-/// See the [`contains()`](#method.contains) method for its characterization.
+/// See the [`contains`](#method.contains) method for its characterization.
///
/// # Examples
///
/// A range which is only bounded below: { x | start <= x }.
/// Use `start..` for its shorthand.
///
-/// See the [`contains()`](#method.contains) method for its characterization.
+/// See the [`contains`](#method.contains) method for its characterization.
///
/// Note: Currently, no overflow checking is done for the iterator
/// implementation; if you use an integer range and the integer overflows, it
/// A range which is only bounded above: { x | x < end }.
/// Use `..end` (two dots) for its shorthand.
///
-/// See the [`contains()`](#method.contains) method for its characterization.
+/// See the [`contains`](#method.contains) method for its characterization.
///
/// It cannot serve as an iterator because it doesn't have a starting point.
///
/// An inclusive range which is bounded at both ends: { x | start <= x <= end }.
/// Use `start...end` (three dots) for its shorthand.
///
-/// See the [`contains()`](#method.contains) method for its characterization.
+/// See the [`contains`](#method.contains) method for its characterization.
///
/// # Examples
///
/// An inclusive range which is only bounded above: { x | x <= end }.
/// Use `...end` (three dots) for its shorthand.
///
-/// See the [`contains()`](#method.contains) method for its characterization.
+/// See the [`contains`](#method.contains) method for its characterization.
///
/// It cannot serve as an iterator because it doesn't have a starting point.
///
/// Basic usage:
///
/// ```
-/// #![feature(ptr_unaligned)]
-///
/// let x = 12;
/// let y = &x as *const i32;
///
/// }
/// ```
#[inline(always)]
-#[unstable(feature = "ptr_unaligned", issue = "37955")]
+#[stable(feature = "ptr_unaligned", since = "1.17.0")]
pub unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp: T = mem::uninitialized();
copy_nonoverlapping(src as *const u8,
/// allocations or resources, so care must be taken not to overwrite an object
/// that should be dropped.
///
-/// It does not immediately drop the contents of `src` either; it is rather
-/// *moved* into the memory location `dst` and will be dropped whenever that
-/// location goes out of scope.
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
///
/// This is appropriate for initializing uninitialized memory, or overwriting
/// memory that has previously been `read` from.
/// allocations or resources, so care must be taken not to overwrite an object
/// that should be dropped.
///
+/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
+/// location pointed to by `dst`.
+///
/// This is appropriate for initializing uninitialized memory, or overwriting
/// memory that has previously been `read` from.
///
/// Basic usage:
///
/// ```
-/// #![feature(ptr_unaligned)]
-///
/// let mut x = 0;
/// let y = &mut x as *mut i32;
/// let z = 12;
/// }
/// ```
#[inline]
-#[unstable(feature = "ptr_unaligned", issue = "37955")]
+#[stable(feature = "ptr_unaligned", since = "1.17.0")]
pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
copy_nonoverlapping(&src as *const T as *const u8,
dst as *mut u8,
/// # Examples
///
/// ```
-/// #![feature(ptr_eq)]
/// use std::ptr;
///
/// let five = 5;
/// assert!(ptr::eq(five_ref, same_five_ref));
/// assert!(!ptr::eq(five_ref, other_five_ref));
/// ```
-#[unstable(feature = "ptr_eq", reason = "newly added", issue = "36497")]
+#[stable(feature = "ptr_eq", since = "1.17.0")]
#[inline]
pub fn eq<T: ?Sized>(a: *const T, b: *const T) -> bool {
a == b
/// # Safety
///
/// `ptr` must be non-null.
- pub unsafe fn new(ptr: *mut T) -> Self {
+ pub unsafe fn new(ptr: *const T) -> Self {
Shared { pointer: NonZero::new(ptr), _marker: PhantomData }
}
}
+#[unstable(feature = "shared", issue = "27730")]
+impl<T: ?Sized> Shared<T> {
+ /// Acquires the underlying pointer as a `*mut` pointer.
+ pub unsafe fn as_mut_ptr(&self) -> *mut T {
+ **self as _
+ }
+}
+
#[unstable(feature = "shared", issue = "27730")]
impl<T: ?Sized> Clone for Shared<T> {
fn clone(&self) -> Self {
#[unstable(feature = "shared", issue = "27730")]
impl<T: ?Sized> Deref for Shared<T> {
- type Target = *mut T;
+ type Target = *const T;
#[inline]
- fn deref(&self) -> &*mut T {
+ fn deref(&self) -> &*const T {
unsafe { mem::transmute(&*self.pointer) }
}
}
/// Basic usage:
///
/// ```{.should_panic}
- /// # #![feature(result_expect_err)]
/// let x: Result<u32, &str> = Ok(10);
/// x.expect_err("Testing expect_err"); // panics with `Testing expect_err: 10`
/// ```
#[inline]
- #[unstable(feature = "result_expect_err", issue = "39041")]
+ #[stable(feature = "result_expect_err", since = "1.17.0")]
pub fn expect_err(self, msg: &str) -> E {
match self {
Ok(t) => unwrap_failed(msg, t),
//! Slice management and manipulation
//!
-//! For more details `std::slice`.
+//! For more details see [`std::slice`].
+//!
+//! [`std::slice`]: ../../std/slice/index.html
#![stable(feature = "rust1", since = "1.0.0")]
use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher};
use char;
+use convert::TryFrom;
use fmt;
use iter::{Map, Cloned, FusedIterator};
use mem;
/// A trait to abstract the idea of creating a new instance of a type from a
/// string.
///
-/// `FromStr`'s [`from_str()`] method is often used implicitly, through
-/// [`str`]'s [`parse()`] method. See [`parse()`]'s documentation for examples.
+/// `FromStr`'s [`from_str`] method is often used implicitly, through
+/// [`str`]'s [`parse`] method. See [`parse`]'s documentation for examples.
///
-/// [`from_str()`]: #tymethod.from_str
+/// [`from_str`]: #tymethod.from_str
/// [`str`]: ../../std/primitive.str.html
-/// [`parse()`]: ../../std/primitive.str.html#method.parse
+/// [`parse`]: ../../std/primitive.str.html#method.parse
#[stable(feature = "rust1", since = "1.0.0")]
pub trait FromStr: Sized {
/// The associated error which can be returned from parsing.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Utf8Error {
valid_up_to: usize,
+ error_len: Option<u8>,
}
impl Utf8Error {
/// Returns the index in the given string up to which valid UTF-8 was
/// verified.
///
- /// It is the maximum index such that `from_utf8(input[..index])`
+ /// It is the maximum index such that `from_utf8(&input[..index])`
/// would return `Ok(_)`.
///
/// # Examples
/// ```
#[stable(feature = "utf8_error", since = "1.5.0")]
pub fn valid_up_to(&self) -> usize { self.valid_up_to }
+
+ /// Provide more information about the failure:
+ ///
+ /// * `None`: the end of the input was reached unexpectedly.
+ /// `self.valid_up_to()` is 1 to 3 bytes from the end of the input.
+ /// If a byte stream (such as a file or a network socket) is being decoded incrementally,
+ /// this could be a valid `char` whose UTF-8 byte sequence is spanning multiple chunks.
+ ///
+ /// * `Some(len)`: an unexpected byte was encountered.
+ /// The length provided is that of the invalid byte sequence
+ /// that starts at the index given by `valid_up_to()`.
+ /// Decoding should resume after that sequence
+ /// (after inserting a U+FFFD REPLACEMENT CHARACTER) in case of lossy decoding.
+ #[unstable(feature = "utf8_error_error_len", reason ="new", issue = "40494")]
+ pub fn error_len(&self) -> Option<usize> {
+ self.error_len.map(|len| len as usize)
+ }
}
/// Converts a slice of bytes to a string slice.
///
/// If you are sure that the byte slice is valid UTF-8, and you don't want to
/// incur the overhead of the validity check, there is an unsafe version of
-/// this function, [`from_utf8_unchecked()`][fromutf8u], which has the same
+/// this function, [`from_utf8_unchecked`][fromutf8u], which has the same
/// behavior but skips the check.
///
/// [fromutf8u]: fn.from_utf8_unchecked.html
///
/// If you need a `String` instead of a `&str`, consider
-/// [`String::from_utf8()`][string].
+/// [`String::from_utf8`][string].
///
/// [string]: ../../std/string/struct.String.html#method.from_utf8
///
/// Converts a slice of bytes to a string slice without checking
/// that the string contains valid UTF-8.
///
-/// See the safe version, [`from_utf8()`][fromutf8], for more information.
+/// See the safe version, [`from_utf8`][fromutf8], for more information.
///
/// [fromutf8]: fn.from_utf8.html
///
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Display for Utf8Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "invalid utf-8: invalid byte near index {}", self.valid_up_to)
+ if let Some(error_len) = self.error_len {
+ write!(f, "invalid utf-8 sequence of {} bytes from index {}",
+ error_len, self.valid_up_to)
+ } else {
+ write!(f, "incomplete utf-8 byte sequence from index {}", self.valid_up_to)
+ }
}
}
/// Iterator for the char (representing *Unicode Scalar Values*) of a string
///
-/// Created with the method [`chars()`].
+/// Created with the method [`chars`].
///
-/// [`chars()`]: ../../std/primitive.str.html#method.chars
+/// [`chars`]: ../../std/primitive.str.html#method.chars
#[derive(Clone, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chars<'a> {
/// External iterator for a string's bytes.
/// Use with the `std::iter` module.
///
-/// Created with the method [`bytes()`].
+/// Created with the method [`bytes`].
///
-/// [`bytes()`]: ../../std/primitive.str.html#method.bytes
+/// [`bytes`]: ../../std/primitive.str.html#method.bytes
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone, Debug)]
pub struct Bytes<'a>(Cloned<slice::Iter<'a, u8>>);
generate_pattern_iterators! {
forward:
- /// Created with the method [`split()`].
+ /// Created with the method [`split`].
///
- /// [`split()`]: ../../std/primitive.str.html#method.split
+ /// [`split`]: ../../std/primitive.str.html#method.split
struct Split;
reverse:
- /// Created with the method [`rsplit()`].
+ /// Created with the method [`rsplit`].
///
- /// [`rsplit()`]: ../../std/primitive.str.html#method.rsplit
+ /// [`rsplit`]: ../../std/primitive.str.html#method.rsplit
struct RSplit;
stability:
#[stable(feature = "rust1", since = "1.0.0")]
generate_pattern_iterators! {
forward:
- /// Created with the method [`split_terminator()`].
+ /// Created with the method [`split_terminator`].
///
- /// [`split_terminator()`]: ../../std/primitive.str.html#method.split_terminator
+ /// [`split_terminator`]: ../../std/primitive.str.html#method.split_terminator
struct SplitTerminator;
reverse:
- /// Created with the method [`rsplit_terminator()`].
+ /// Created with the method [`rsplit_terminator`].
///
- /// [`rsplit_terminator()`]: ../../std/primitive.str.html#method.rsplit_terminator
+ /// [`rsplit_terminator`]: ../../std/primitive.str.html#method.rsplit_terminator
struct RSplitTerminator;
stability:
#[stable(feature = "rust1", since = "1.0.0")]
generate_pattern_iterators! {
forward:
- /// Created with the method [`splitn()`].
+ /// Created with the method [`splitn`].
///
- /// [`splitn()`]: ../../std/primitive.str.html#method.splitn
+ /// [`splitn`]: ../../std/primitive.str.html#method.splitn
struct SplitN;
reverse:
- /// Created with the method [`rsplitn()`].
+ /// Created with the method [`rsplitn`].
///
- /// [`rsplitn()`]: ../../std/primitive.str.html#method.rsplitn
+ /// [`rsplitn`]: ../../std/primitive.str.html#method.rsplitn
struct RSplitN;
stability:
#[stable(feature = "rust1", since = "1.0.0")]
generate_pattern_iterators! {
forward:
- /// Created with the method [`match_indices()`].
+ /// Created with the method [`match_indices`].
///
- /// [`match_indices()`]: ../../std/primitive.str.html#method.match_indices
+ /// [`match_indices`]: ../../std/primitive.str.html#method.match_indices
struct MatchIndices;
reverse:
- /// Created with the method [`rmatch_indices()`].
+ /// Created with the method [`rmatch_indices`].
///
- /// [`rmatch_indices()`]: ../../std/primitive.str.html#method.rmatch_indices
+ /// [`rmatch_indices`]: ../../std/primitive.str.html#method.rmatch_indices
struct RMatchIndices;
stability:
#[stable(feature = "str_match_indices", since = "1.5.0")]
generate_pattern_iterators! {
forward:
- /// Created with the method [`matches()`].
+ /// Created with the method [`matches`].
///
- /// [`matches()`]: ../../std/primitive.str.html#method.matches
+ /// [`matches`]: ../../std/primitive.str.html#method.matches
struct Matches;
reverse:
- /// Created with the method [`rmatches()`].
+ /// Created with the method [`rmatches`].
///
- /// [`rmatches()`]: ../../std/primitive.str.html#method.rmatches
+ /// [`rmatches`]: ../../std/primitive.str.html#method.rmatches
struct RMatches;
stability:
#[stable(feature = "str_matches", since = "1.2.0")]
delegate double ended;
}
-/// Created with the method [`lines()`].
+/// Created with the method [`lines`].
///
-/// [`lines()`]: ../../std/primitive.str.html#method.lines
+/// [`lines`]: ../../std/primitive.str.html#method.lines
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone, Debug)]
pub struct Lines<'a>(Map<SplitTerminator<'a, char>, LinesAnyMap>);
#[unstable(feature = "fused", issue = "35602")]
impl<'a> FusedIterator for Lines<'a> {}
-/// Created with the method [`lines_any()`].
+/// Created with the method [`lines_any`].
///
-/// [`lines_any()`]: ../../std/primitive.str.html#method.lines_any
+/// [`lines_any`]: ../../std/primitive.str.html#method.lines_any
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_deprecated(since = "1.4.0", reason = "use lines()/Lines instead now")]
#[derive(Clone, Debug)]
while index < len {
let old_offset = index;
- macro_rules! err { () => {{
- return Err(Utf8Error {
- valid_up_to: old_offset
- })
- }}}
+ macro_rules! err {
+ ($error_len: expr) => {
+ return Err(Utf8Error {
+ valid_up_to: old_offset,
+ error_len: $error_len,
+ })
+ }
+ }
macro_rules! next { () => {{
index += 1;
// we needed data, but there was none: error!
if index >= len {
- err!()
+ err!(None)
}
v[index]
}}}
let first = v[index];
if first >= 128 {
let w = UTF8_CHAR_WIDTH[first as usize];
- let second = next!();
// 2-byte encoding is for codepoints \u{0080} to \u{07ff}
// first C2 80 last DF BF
// 3-byte encoding is for codepoints \u{0800} to \u{ffff}
// UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
// %xF4 %x80-8F 2( UTF8-tail )
match w {
- 2 => if second & !CONT_MASK != TAG_CONT_U8 {err!()},
+ 2 => if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(1))
+ },
3 => {
- match (first, second, next!() & !CONT_MASK) {
- (0xE0 , 0xA0 ... 0xBF, TAG_CONT_U8) |
- (0xE1 ... 0xEC, 0x80 ... 0xBF, TAG_CONT_U8) |
- (0xED , 0x80 ... 0x9F, TAG_CONT_U8) |
- (0xEE ... 0xEF, 0x80 ... 0xBF, TAG_CONT_U8) => {}
- _ => err!()
+ match (first, next!()) {
+ (0xE0 , 0xA0 ... 0xBF) |
+ (0xE1 ... 0xEC, 0x80 ... 0xBF) |
+ (0xED , 0x80 ... 0x9F) |
+ (0xEE ... 0xEF, 0x80 ... 0xBF) => {}
+ _ => err!(Some(1))
+ }
+ if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(2))
}
}
4 => {
- match (first, second, next!() & !CONT_MASK, next!() & !CONT_MASK) {
- (0xF0 , 0x90 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
- (0xF1 ... 0xF3, 0x80 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
- (0xF4 , 0x80 ... 0x8F, TAG_CONT_U8, TAG_CONT_U8) => {}
- _ => err!()
+ match (first, next!()) {
+ (0xF0 , 0x90 ... 0xBF) |
+ (0xF1 ... 0xF3, 0x80 ... 0xBF) |
+ (0xF4 , 0x80 ... 0x8F) => {}
+ _ => err!(Some(1))
+ }
+ if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(2))
+ }
+ if next!() & !CONT_MASK != TAG_CONT_U8 {
+ err!(Some(3))
}
}
- _ => err!()
+ _ => err!(Some(1))
}
index += 1;
} else {
#[stable(feature = "core", since = "1.6.0")]
fn is_empty(&self) -> bool;
#[stable(feature = "core", since = "1.6.0")]
- fn parse<T: FromStr>(&self) -> Result<T, T::Err>;
+ fn parse<'a, T: TryFrom<&'a str>>(&'a self) -> Result<T, T::Error>;
}
// truncate `&str` to length at most equal to `max`
fn is_empty(&self) -> bool { self.len() == 0 }
#[inline]
- fn parse<T: FromStr>(&self) -> Result<T, T::Err> { FromStr::from_str(self) }
+ fn parse<'a, T>(&'a self) -> Result<T, T::Error> where T: TryFrom<&'a str> {
+ T::try_from(self)
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
#![feature(nonzero)]
#![feature(rand)]
#![feature(raw)]
-#![feature(result_expect_err)]
#![feature(sip_hash_13)]
#![feature(slice_patterns)]
#![feature(step_by)]
#![feature(try_from)]
#![feature(unicode)]
#![feature(unique)]
-#![feature(ordering_chaining)]
-#![feature(ptr_unaligned)]
-#![feature(move_cell)]
#![feature(fmt_internals)]
extern crate core;
tables: tables,
graph: graph,
fn_exit: fn_exit,
- loop_scopes: Vec::new()
+ loop_scopes: Vec::new(),
};
body_exit = cfg_builder.expr(&body.value, entry);
cfg_builder.add_contained_edge(body_exit, fn_exit);
- let CFGBuilder {graph, ..} = cfg_builder;
- CFG {graph: graph,
- entry: entry,
- exit: fn_exit}
+ let CFGBuilder { graph, .. } = cfg_builder;
+ CFG {
+ graph: graph,
+ entry: entry,
+ exit: fn_exit,
+ }
}
impl<'a, 'tcx> CFGBuilder<'a, 'tcx> {
self.add_ast_node(id, &[exit])
}
- hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) => {
+ hir::StmtExpr(ref expr, id) |
+ hir::StmtSemi(ref expr, id) => {
let exit = self.expr(&expr, pred);
self.add_ast_node(id, &[exit])
}
self.pat(&local.pat, init_exit)
}
- hir::DeclItem(_) => {
- pred
- }
+ hir::DeclItem(_) => pred,
}
}
PatKind::Path(_) |
PatKind::Lit(..) |
PatKind::Range(..) |
- PatKind::Wild => {
- self.add_ast_node(pat.id, &[pred])
- }
+ PatKind::Wild => self.add_ast_node(pat.id, &[pred]),
PatKind::Box(ref subpat) |
PatKind::Ref(ref subpat, _) |
}
PatKind::Struct(_, ref subpats, _) => {
- let pats_exit =
- self.pats_all(subpats.iter().map(|f| &f.node.pat), pred);
+ let pats_exit = self.pats_all(subpats.iter().map(|f| &f.node.pat), pred);
self.add_ast_node(pat.id, &[pats_exit])
}
let method_call = ty::MethodCall::expr(call_expr.id);
let fn_ty = match self.tables.method_map.get(&method_call) {
Some(method) => method.ty,
- None => self.tables.expr_ty_adjusted(func_or_rcvr)
+ None => self.tables.expr_ty_adjusted(func_or_rcvr),
};
let func_or_rcvr_exit = self.expr(func_or_rcvr, pred);
from_index: CFGIndex,
to_loop: LoopScope,
to_index: CFGIndex) {
- let mut data = CFGEdgeData {exiting_scopes: vec![] };
+ let mut data = CFGEdgeData { exiting_scopes: vec![] };
let mut scope = self.tcx.region_maps.node_extent(from_expr.id);
let target_scope = self.tcx.region_maps.node_extent(to_loop.loop_id);
while scope != target_scope {
}
span_bug!(expr.span, "no loop scope for id {}", loop_id);
}
- Err(err) => span_bug!(expr.span, "loop scope error: {}", err)
+ Err(err) => span_bug!(expr.span, "loop scope error: {}", err),
}
}
}
DepTrackingMap {
phantom: PhantomData,
graph: graph,
- map: FxHashMap()
+ map: FxHashMap(),
}
}
struct TrackingVisitor<'visit, 'tcx: 'visit, F: 'visit, V: 'visit> {
tcx: TyCtxt<'visit, 'tcx, 'tcx>,
dep_node_fn: &'visit mut F,
- visitor: &'visit mut V
+ visitor: &'visit mut V,
}
impl<'visit, 'tcx, F, V> ItemLikeVisitor<'tcx> for TrackingVisitor<'visit, 'tcx, F, V>
let mut tracking_visitor = TrackingVisitor {
tcx: tcx,
dep_node_fn: &mut dep_node_fn,
- visitor: visitor
+ visitor: visitor,
};
krate.visit_all_item_likes(&mut tracking_visitor)
}
pub fn visit_all_bodies_in_krate<'a, 'tcx, C>(tcx: TyCtxt<'a, 'tcx, 'tcx>, callback: C)
- where C: Fn(/* body_owner */ DefId, /* body id */ hir::BodyId),
+ where C: Fn(/* body_owner */
+ DefId,
+ /* body id */
+ hir::BodyId)
{
let krate = tcx.hir.krate();
for &body_id in &krate.body_ids {
}
fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
- let name: &str = &attr.name().as_str();
- match name {
- "inline" => self.check_inline(attr, target),
- "repr" => self.check_repr(attr, target),
- _ => (),
+ if let Some(name) = attr.name() {
+ match &*name.as_str() {
+ "inline" => self.check_inline(attr, target),
+ "repr" => self.check_repr(attr, target),
+ _ => (),
+ }
}
}
}
let attrs = self.lower_attrs(&i.attrs);
let mut vis = self.lower_visibility(&i.vis);
if let ItemKind::MacroDef(ref tts) = i.node {
- if i.attrs.iter().any(|attr| attr.name() == "macro_export") {
+ if i.attrs.iter().any(|attr| attr.path == "macro_export") {
self.exported_macros.push(hir::MacroDef {
name: name, attrs: attrs, id: i.id, span: i.span, body: tts.clone().into(),
});
let (span, msg) = self;
let mut diagnostic = Diagnostic::new(errors::Level::Warning, msg);
diagnostic.set_span(span);
- EarlyLint { id: id, diagnostic: diagnostic }
+ EarlyLint {
+ id: id,
+ diagnostic: diagnostic,
+ }
}
}
impl IntoEarlyLint for Diagnostic {
fn into_early_lint(self, id: LintId) -> EarlyLint {
- EarlyLint { id: id, diagnostic: self }
+ EarlyLint {
+ id: id,
+ diagnostic: self,
+ }
}
}
enum FindLintError {
NotFound,
- Removed
+ Removed,
}
impl LintStore {
pub fn gather_attr(attr: &ast::Attribute) -> Vec<Result<(ast::Name, Level, Span), Span>> {
let mut out = vec![];
- let level = match Level::from_str(&attr.name().as_str()) {
+ let level = match attr.name().and_then(|name| Level::from_str(&name.as_str())) {
None => return out,
Some(lvl) => lvl,
};
+ let meta = unwrap_or!(attr.meta(), return out);
attr::mark_used(attr);
- let meta = &attr.value;
let metas = if let Some(metas) = meta.meta_item_list() {
metas
} else {
NoLint,
// The lint is either renamed or removed. This is the warning
// message.
- Warning(String)
+ Warning(String),
}
/// Checks the name of a lint for its existence, and whether it was
pub enum NativeLibraryKind {
NativeStatic, // native static library (.a archive)
NativeStaticNobundle, // native static library, which doesn't get bundled into .rlibs
- NativeFramework, // OSX-specific
+ NativeFramework, // macOS-specific
NativeUnknown, // default way to specify a dynamic library
}
pub type cmt<'tcx> = Rc<cmt_<'tcx>>;
impl<'tcx> cmt_<'tcx> {
+ pub fn get_def(&self) -> Option<ast::NodeId> {
+ match self.cat {
+ Categorization::Deref(ref cmt, ..) |
+ Categorization::Interior(ref cmt, _) |
+ Categorization::Downcast(ref cmt, _) => {
+ if let Categorization::Local(nid) = cmt.cat {
+ Some(nid)
+ } else {
+ None
+ }
+ }
+ _ => None
+ }
+ }
+
pub fn get_field(&self, name: ast::Name) -> Option<DefId> {
match self.cat {
Categorization::Deref(ref cmt, ..) |
let promotable = self.tcx().rvalue_promotable_to_static.borrow().get(&id).cloned()
.unwrap_or(false);
- // Only promote `[T; 0]` before an RFC for rvalue promotions
- // is accepted.
+ // When the corresponding feature isn't toggled, only promote `[T; 0]`.
let promotable = match expr_ty.sty {
ty::TyArray(_, 0) => true,
- _ => promotable & false
+ _ => promotable && self.tcx().sess.features.borrow().rvalue_static_promotion,
};
// Compute maximum lifetime of this rvalue. This is 'static if
} else {
// Emit errors for non-staged-api crates.
for attr in attrs {
- let tag = attr.name();
+ let tag = unwrap_or!(attr.name(), continue);
if tag == "unstable" || tag == "stable" || tag == "rustc_deprecated" {
attr::mark_used(attr);
self.tcx.sess.span_err(attr.span(), "stability attributes may not be used \
let mut is_staged_api = false;
for attr in &krate.attrs {
- if attr.name() == "stable" || attr.name() == "unstable" {
+ if attr.path == "stable" || attr.path == "unstable" {
is_staged_api = true;
break
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub enum SizeKind { Exact, Min }
+pub enum SizeKind {
+ Exact,
+ Min,
+}
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct FieldInfo {
always_encode_mir: bool = (false, parse_bool, [TRACKED],
"encode MIR of all functions into the crate metadata"),
osx_rpath_install_name: bool = (false, parse_bool, [TRACKED],
- "pass `-install_name @rpath/...` to the OSX linker"),
+ "pass `-install_name @rpath/...` to the macOS linker"),
sanitizer: Option<Sanitizer> = (None, parse_sanitizer, [TRACKED],
"Use a sanitizer"),
}
.filter(|a| a.check_name("rustc_on_unimplemented"))
.next()
{
- let err_sp = item.meta().span.substitute_dummy(span);
+ let err_sp = item.span.substitute_dummy(span);
let trait_str = self.tcx.item_path_str(trait_ref.def_id);
if let Some(istring) = item.value_str() {
let istring = &*istring.as_str();
pub struct OverlapError {
pub with_impl: DefId,
pub trait_desc: String,
- pub self_desc: Option<String>
+ pub self_desc: Option<String>,
}
/// Given a subst for the requested impl, translate it to a subst
}
pub struct SpecializesCache {
- map: FxHashMap<(DefId, DefId), bool>
+ map: FxHashMap<(DefId, DefId), bool>,
}
impl SpecializesCache {
#[derive(Clone, Copy, Debug)]
pub struct ExpectedFound<T> {
pub expected: T,
- pub found: T
+ pub found: T,
}
// Data structures used in type unification
fn new(root_mode: RootMode) -> LocalPathBuffer {
LocalPathBuffer {
root_mode: root_mode,
- str: String::new()
+ str: String::new(),
}
}
fn into_string(self) -> String {
self.str
}
-
}
impl ItemPathBuffer for LocalPathBuffer {
pub struct CycleError<'a> {
span: Span,
- cycle: RefMut<'a, [(Span, Query)]>
+ cycle: RefMut<'a, [(Span, Query)]>,
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
/// at least as big as the scope `fr.scope`".
pub struct FreeRegion {
pub scope: region::CodeExtent,
- pub bound_region: BoundRegion
+ pub bound_region: BoundRegion,
}
#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash,
// Anonymous region for the implicit env pointer parameter
// to a closure
- BrEnv
+ BrEnv,
}
/// When a region changed from late-bound to early-bound when #32330
pub fn principal(&self) -> Option<ExistentialTraitRef<'tcx>> {
match self.get(0) {
Some(&ExistentialPredicate::Trait(tr)) => Some(tr),
- _ => None
+ _ => None,
}
}
ty::Binder(&self.0)
}
- pub fn map_bound_ref<F,U>(&self, f: F) -> Binder<U>
+ pub fn map_bound_ref<F, U>(&self, f: F) -> Binder<U>
where F: FnOnce(&T) -> U
{
self.as_ref().map_bound(f)
}
- pub fn map_bound<F,U>(self, f: F) -> Binder<U>
+ pub fn map_bound<F, U>(self, f: F) -> Binder<U>
where F: FnOnce(T) -> U
{
ty::Binder(f(self.0))
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct IntVid {
- pub index: u32
+ pub index: u32,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct FloatVid {
- pub index: u32
+ pub index: u32,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
pub struct RegionVid {
- pub index: u32
+ pub index: u32,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct SkolemizedRegionVid {
- pub index: u32
+ pub index: u32,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
/// `infer::freshen` for more details.
FreshTy(u32),
FreshIntTy(u32),
- FreshFloatTy(u32)
+ FreshFloatTy(u32),
}
/// A `ProjectionPredicate` for an `ExistentialTraitRef`.
pub struct ExistentialProjection<'tcx> {
pub trait_ref: ExistentialTraitRef<'tcx>,
pub item_name: Name,
- pub ty: Ty<'tcx>
+ pub ty: Ty<'tcx>,
}
pub type PolyExistentialProjection<'tcx> = Binder<ExistentialProjection<'tcx>>;
ty::ProjectionPredicate {
projection_ty: ty::ProjectionTy {
trait_ref: self.trait_ref.with_self_ty(tcx, self_ty),
- item_name: self.item_name
+ item_name: self.item_name,
},
- ty: self.ty
+ ty: self.ty,
}
}
}
match *self {
ty::ReEarlyBound(..) => true,
ty::ReLateBound(..) => true,
- _ => false
+ _ => false,
}
}
pub fn is_nil(&self) -> bool {
match self.sty {
TyTuple(ref tys, _) => tys.is_empty(),
- _ => false
+ _ => false,
}
}
pub fn is_ty_var(&self) -> bool {
match self.sty {
TyInfer(TyVar(_)) => true,
- _ => false
+ _ => false,
}
}
pub fn is_self(&self) -> bool {
match self.sty {
TyParam(ref p) => p.is_self(),
- _ => false
+ _ => false,
}
}
pub fn is_structural(&self) -> bool {
match self.sty {
TyAdt(..) | TyTuple(..) | TyArray(..) | TyClosure(..) => true,
- _ => self.is_slice() | self.is_trait()
+ _ => self.is_slice() | self.is_trait(),
}
}
pub fn is_simd(&self) -> bool {
match self.sty {
TyAdt(def, _) => def.repr.simd,
- _ => false
+ _ => false,
}
}
pub fn is_region_ptr(&self) -> bool {
match self.sty {
TyRef(..) => true,
- _ => false
+ _ => false,
}
}
pub fn is_unsafe_ptr(&self) -> bool {
match self.sty {
TyRawPtr(_) => return true,
- _ => return false
+ _ => return false,
}
}
pub fn is_trait(&self) -> bool {
match self.sty {
TyDynamic(..) => true,
- _ => false
+ _ => false,
}
}
TyInfer(FreshTy(_)) => true,
TyInfer(FreshIntTy(_)) => true,
TyInfer(FreshFloatTy(_)) => true,
- _ => false
+ _ => false,
}
}
pub fn is_char(&self) -> bool {
match self.sty {
TyChar => true,
- _ => false
+ _ => false,
}
}
pub fn is_signed(&self) -> bool {
match self.sty {
TyInt(_) => true,
- _ => false
+ _ => false,
}
}
match self.sty {
TyInt(ast::IntTy::Is) | TyUint(ast::UintTy::Us) => false,
TyInt(..) | TyUint(..) | TyFloat(..) => true,
- _ => false
+ _ => false,
}
}
},
TyRef(_, mt) => Some(mt),
TyRawPtr(mt) if explicit => Some(mt),
- _ => None
+ _ => None,
}
}
pub fn builtin_index(&self) -> Option<Ty<'tcx>> {
match self.sty {
TyArray(ty, _) | TySlice(ty) => Some(ty),
- _ => None
+ _ => None,
}
}
pub fn is_fn(&self) -> bool {
match self.sty {
TyFnDef(..) | TyFnPtr(_) => true,
- _ => false
+ _ => false,
}
}
TyDynamic(ref tt, ..) => tt.principal().map(|p| p.def_id()),
TyAdt(def, _) => Some(def.did),
TyClosure(id, _) => Some(id),
- _ => None
+ _ => None,
}
}
pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> {
match self.sty {
TyAdt(adt, _) => Some(adt),
- _ => None
+ _ => None,
}
}
use hir::map as hir_map;
use traits::{self, Reveal};
use ty::{self, Ty, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable};
-use ty::{ParameterEnvironment};
+use ty::ParameterEnvironment;
use ty::fold::TypeVisitor;
use ty::layout::{Layout, LayoutError};
use ty::TypeVariants::*;
type Disr = ConstInt;
- pub trait IntTypeExt {
+pub trait IntTypeExt {
fn to_ty<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx>;
fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option<Disr>)
-> Option<Disr>;
fn assert_ty_matches(&self, val: Disr);
fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Disr;
- }
+}
macro_rules! typed_literal {
pub enum CopyImplementationError<'tcx> {
InfrigingField(&'tcx ty::FieldDef),
NotAnAdt,
- HasDestructor
+ HasDestructor,
}
/// Describes whether a type is representable. For types that are not
tcx.infer_ctxt(self.clone(), Reveal::UserFacing).enter(|infcx| {
let (adt, substs) = match self_type.sty {
ty::TyAdt(adt, substs) => (adt, substs),
- _ => return Err(CopyImplementationError::NotAnAdt)
+ _ => return Err(CopyImplementationError::NotAnAdt),
};
let field_implements_copy = |field: &ty::FieldDef| {
let cause = traits::ObligationCause::dummy();
match traits::fully_normalize(&infcx, cause, &field.ty(tcx, substs)) {
Ok(ty) => !infcx.type_moves_by_default(ty, span),
- Err(..) => false
+ Err(..) => false,
}
};
}
}
}
- _ => ()
+ _ => (),
}
false
}
adt.variants[0].fields.get(i).map(|f| f.ty(self, substs))
}
(&TyTuple(ref v, _), None) => v.get(i).cloned(),
- _ => None
+ _ => None,
}
}
pub fn struct_tail(self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
while let TyAdt(def, substs) = ty.sty {
if !def.is_struct() {
- break
+ break;
}
match def.struct_variant().fields.last() {
Some(f) => ty = f.ty(self, substs),
- None => break
+ None => break,
}
}
ty
let (mut a, mut b) = (source, target);
while let (&TyAdt(a_def, a_substs), &TyAdt(b_def, b_substs)) = (&a.sty, &b.sty) {
if a_def != b_def || !a_def.is_struct() {
- break
+ break;
}
match a_def.struct_variant().fields.last() {
Some(f) => {
a = f.ty(self, a_substs);
b = f.ty(self, b_substs);
}
- _ => break
+ _ => break,
}
}
(a, b)
let dtor_did = match dtor_did {
Some(dtor) => dtor,
- None => return None
+ None => return None,
};
// RFC 1238: if the destructor method is tagged with the
substs_a.types().zip(substs_b.types()).all(|(a, b)| same_type(a, b))
}
- _ => {
- a == b
- }
+ _ => a == b,
}
}
if val == 0 {
groups.push(format!("{}", group));
- break
+ break;
} else {
groups.push(format!("{:03}", group));
}
type HANDLE = *mut u8;
use libc::size_t;
use std::mem;
- #[repr(C)] #[allow(non_snake_case)]
+ #[repr(C)]
+ #[allow(non_snake_case)]
struct PROCESS_MEMORY_COUNTERS {
cb: DWORD,
PageFaultCount: DWORD,
}
pub struct Indenter {
- _cannot_construct_outside_of_this_module: ()
+ _cannot_construct_outside_of_this_module: (),
}
impl Drop for Indenter {
}
}
}
-
-// Like std::fs::create_dir_all, except handles concurrent calls among multiple
-// threads or processes.
-pub fn create_dir_racy(path: &Path) -> io::Result<()> {
- match fs::create_dir(path) {
- Ok(()) => return Ok(()),
- Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()),
- Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
- Err(e) => return Err(e),
- }
- match path.parent() {
- Some(p) => try!(create_dir_racy(p)),
- None => return Err(io::Error::new(io::ErrorKind::Other, "failed to create whole tree")),
- }
- match fs::create_dir(path) {
- Ok(()) => Ok(()),
- Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()),
- Err(e) => Err(e),
- }
-}
use target::TargetOptions;
pub fn opts() -> TargetOptions {
- // ELF TLS is only available in OSX 10.7+. If you try to compile for 10.6
+ // ELF TLS is only available in macOS 10.7+. If you try to compile for 10.6
// either the linker will complain if it is used or the binary will end up
- // segfaulting at runtime when run on 10.6. Rust by default supports OSX
+ // segfaulting at runtime when run on 10.6. Rust by default supports macOS
// 10.7+, but there is a standard environment variable,
// MACOSX_DEPLOYMENT_TARGET, which is used to signal targeting older
- // versions of OSX. For example compiling on 10.10 with
+ // versions of macOS. For example compiling on 10.10 with
// MACOSX_DEPLOYMENT_TARGET set to 10.6 will cause the linker to generate
// warnings about the usage of ELF TLS.
//
}).unwrap_or((10, 7));
TargetOptions {
- // OSX has -dead_strip, which doesn't rely on function_sections
+ // macOS has -dead_strip, which doesn't rely on function_sections
function_sections: false,
dynamic_linking: true,
executables: true,
/// Whether the target toolchain is like OpenBSD's.
/// Only useful for compiling against OpenBSD, for configuring abi when returning a struct.
pub is_like_openbsd: bool,
- /// Whether the target toolchain is like OSX's. Only useful for compiling against iOS/OS X, in
- /// particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
+ /// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
+ /// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
pub is_like_osx: bool,
/// Whether the target toolchain is like Solaris's.
/// Only useful for compiling against Illumos/Solaris,
pub fn bckerr_to_diag(&self, err: &BckError<'tcx>) -> DiagnosticBuilder<'a> {
let span = err.span.clone();
let mut immutable_field = None;
+ let mut local_def = None;
let msg = &match err.code {
err_mutbl => {
}
None
});
+ local_def = err.cmt.get_def()
+ .and_then(|nid| {
+ if !self.tcx.hir.is_argument(nid) {
+ Some(self.tcx.hir.span(nid))
+ } else {
+ None
+ }
+ });
format!("cannot borrow {} as mutable", descr)
}
if let Some((span, msg)) = immutable_field {
db.span_label(span, &msg);
}
+ if let Some(let_span) = local_def {
+ if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(let_span) {
+ db.span_label(let_span, &format!("consider changing this to `mut {}`", snippet));
+ }
+ }
db
}
} else {
db.span_label(*error_span, &format!("cannot borrow mutably"));
}
+ } else if let Categorization::Interior(ref cmt, _) = err.cmt.cat {
+ if let mc::MutabilityCategory::McImmutable = cmt.mutbl {
+ db.span_label(*error_span,
+ &"cannot mutably borrow immutable field");
+ }
}
}
}
if self.tail_len > 0 {
unsafe {
- let source_array_vec = &mut **self.array_vec;
+ let source_array_vec = &mut *self.array_vec.as_mut_ptr();
// memmove back untouched tail, update to new length
let start = source_array_vec.len();
let tail = self.tail_start;
ManuallyDrop::new()
}
}
-
#![feature(shared)]
#![feature(collections_range)]
-#![feature(collections_bound)]
#![cfg_attr(stage0,feature(field_init_shorthand))]
#![feature(nonzero)]
#![feature(rustc_private)]
use syntax::ast::{self, Name, NodeId};
use syntax::attr;
use syntax::parse::token;
-use syntax::symbol::{Symbol, InternedString};
+use syntax::symbol::InternedString;
use syntax_pos::{Span, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos};
use syntax::tokenstream;
use rustc::hir;
use rustc::hir::*;
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
-use rustc::hir::intravisit as visit;
+use rustc::hir::intravisit::{self as visit, Visitor};
use rustc::ty::TyCtxt;
-use rustc_data_structures::fnv;
use std::hash::{Hash, Hasher};
use super::def_path_hash::DefPathHashes;
});
}
-impl<'a, 'hash, 'tcx> visit::Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> {
+impl<'a, 'hash, 'tcx> Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> visit::NestedVisitorMap<'this, 'tcx> {
if self.hash_bodies {
visit::NestedVisitorMap::OnlyBodies(&self.tcx.hir)
}
}
- fn hash_meta_item(&mut self, meta_item: &ast::MetaItem) {
- debug!("hash_meta_item: st={:?}", self.st);
-
- // ignoring span information, it doesn't matter here
- self.hash_discriminant(&meta_item.node);
- meta_item.name.as_str().len().hash(self.st);
- meta_item.name.as_str().hash(self.st);
-
- match meta_item.node {
- ast::MetaItemKind::Word => {}
- ast::MetaItemKind::NameValue(ref lit) => saw_lit(lit).hash(self.st),
- ast::MetaItemKind::List(ref items) => {
- // Sort subitems so the hash does not depend on their order
- let indices = self.indices_sorted_by(&items, |p| {
- (p.name().map(Symbol::as_str), fnv::hash(&p.literal().map(saw_lit)))
- });
- items.len().hash(self.st);
- for (index, &item_index) in indices.iter().enumerate() {
- index.hash(self.st);
- let nested_meta_item: &ast::NestedMetaItemKind = &items[item_index].node;
- self.hash_discriminant(nested_meta_item);
- match *nested_meta_item {
- ast::NestedMetaItemKind::MetaItem(ref meta_item) => {
- self.hash_meta_item(meta_item);
- }
- ast::NestedMetaItemKind::Literal(ref lit) => {
- saw_lit(lit).hash(self.st);
- }
- }
- }
- }
- }
- }
-
pub fn hash_attributes(&mut self, attributes: &[ast::Attribute]) {
debug!("hash_attributes: st={:?}", self.st);
let indices = self.indices_sorted_by(attributes, |attr| attr.name());
for i in indices {
let attr = &attributes[i];
- if !attr.is_sugared_doc &&
- !IGNORED_ATTRIBUTES.contains(&&*attr.value.name().as_str()) {
+ match attr.name() {
+ Some(name) if IGNORED_ATTRIBUTES.contains(&&*name.as_str()) => continue,
+ _ => {}
+ };
+ if !attr.is_sugared_doc {
SawAttribute(attr.style).hash(self.st);
- self.hash_meta_item(&attr.value);
+ for segment in &attr.path.segments {
+ SawIdent(segment.identifier.name.as_str()).hash(self.st);
+ }
+ for tt in attr.tokens.trees() {
+ self.hash_token_tree(&tt);
+ }
}
}
}
impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> {
fn dep_node(&self, attr: &Attribute, def_id: DefId) -> DepNode<DefId> {
- for item in attr.meta_item_list().unwrap_or(&[]) {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
if item.check_name(LABEL) {
- let value = expect_associated_value(self.tcx, item);
+ let value = expect_associated_value(self.tcx, &item);
match DepNode::from_label_string(&value.as_str(), def_id) {
Ok(def_id) => return def_id,
Err(()) => {
debug!("check_config(attr={:?})", attr);
let config = &tcx.sess.parse_sess.config;
debug!("check_config: config={:?}", config);
- for item in attr.meta_item_list().unwrap_or(&[]) {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
if item.check_name(CFG) {
- let value = expect_associated_value(tcx, item);
+ let value = expect_associated_value(tcx, &item);
debug!("check_config: searching for cfg {:?}", value);
return config.contains(&(value, None));
}
}
fn create_dir(sess: &Session, path: &Path, dir_tag: &str) -> Result<(),()> {
- match fs_util::create_dir_racy(path) {
+ match std_fs::create_dir_all(path) {
Ok(()) => {
debug!("{} directory created successfully", dir_tag);
Ok(())
}
}
- let has_doc = attrs.iter().any(|a| a.is_value_str() && a.name() == "doc");
+ let has_doc = attrs.iter().any(|a| a.is_value_str() && a.check_name("doc"));
if !has_doc {
cx.span_lint(MISSING_DOCS,
sp,
impl EarlyLintPass for DeprecatedAttr {
fn check_attribute(&mut self, cx: &EarlyContext, attr: &ast::Attribute) {
- let name = attr.name();
+ let name = unwrap_or!(attr.name(), return);
for &&(n, _, ref g) in &self.depr_attrs {
if name == n {
if let &AttributeGate::Gated(Stability::Deprecated(link),
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnstableFeatures {
fn check_attribute(&mut self, ctx: &LateContext, attr: &ast::Attribute) {
- if attr.meta().check_name("feature") {
- if let Some(items) = attr.meta().meta_item_list() {
+ if attr.check_name("feature") {
+ if let Some(items) = attr.meta_item_list() {
for item in items {
ctx.span_lint(UNSTABLE_FEATURES, item.span(), "unstable feature");
}
#![feature(slice_patterns)]
#![feature(staged_api)]
+#[macro_use]
extern crate syntax;
#[macro_use]
extern crate rustc;
impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedAttributes {
fn check_attribute(&mut self, cx: &LateContext, attr: &ast::Attribute) {
debug!("checking attribute: {:?}", attr);
+ let name = unwrap_or!(attr.name(), return);
// Note that check_name() marks the attribute as used if it matches.
for &(ref name, ty, _) in BUILTIN_ATTRIBUTES {
cx.span_lint(UNUSED_ATTRIBUTES, attr.span, "unused attribute");
// Is it a builtin attribute that must be used at the crate level?
let known_crate = BUILTIN_ATTRIBUTES.iter()
- .find(|&&(name, ty, _)| attr.name() == name && ty == AttributeType::CrateLevel)
+ .find(|&&(builtin, ty, _)| name == builtin && ty == AttributeType::CrateLevel)
.is_some();
// Has a plugin registered this attribute as one which must be used at
// the crate level?
let plugin_crate = plugin_attributes.iter()
- .find(|&&(ref x, t)| attr.name() == &**x && AttributeType::CrateLevel == t)
+ .find(|&&(ref x, t)| name == &**x && AttributeType::CrateLevel == t)
.is_some();
if known_crate || plugin_crate {
let msg = match attr.style {
}
let is_osx = sess.target.target.options.is_like_osx;
if lib.kind == cstore::NativeFramework && !is_osx {
- let msg = "native frameworks are only available on OSX targets";
+ let msg = "native frameworks are only available on macOS targets";
match span {
Some(span) => span_err!(sess, span, E0455, "{}", msg),
None => sess.err(msg),
impl<'a> CrateLoader<'a> {
pub fn preprocess(&mut self, krate: &ast::Crate) {
- for attr in krate.attrs.iter().filter(|m| m.name() == "link_args") {
- if let Some(linkarg) = attr.value_str() {
- self.cstore.add_used_link_args(&linkarg.as_str());
+ for attr in &krate.attrs {
+ if attr.path == "link_args" {
+ if let Some(linkarg) = attr.value_str() {
+ self.cstore.add_used_link_args(&linkarg.as_str());
+ }
}
}
}
}
pub fn is_staged_api(&self) -> bool {
- self.get_item_attrs(CRATE_DEF_INDEX)
- .iter()
- .any(|attr| attr.name() == "stable" || attr.name() == "unstable")
+ for attr in self.get_item_attrs(CRATE_DEF_INDEX) {
+ if attr.path == "stable" || attr.path == "unstable" {
+ return true;
+ }
+ }
+ false
}
pub fn is_allocator(&self) -> bool {
"##,
E0455: r##"
-Linking with `kind=framework` is only supported when targeting OS X,
+Linking with `kind=framework` is only supported when targeting macOS,
as frameworks are specific to that operating system.
Erroneous code example:
//
// And here we run into yet another obscure archive bug: in which metadata
// loaded from archives may have trailing garbage bytes. Awhile back one of
-// our tests was failing sporadically on the OSX 64-bit builders (both nopt
+// our tests was failing sporadically on the macOS 64-bit builders (both nopt
// and opt) by having ebml generate an out-of-bounds panic when looking at
// metadata.
//
ItemKind::Mod(_) => {
// Ensure that `path` attributes on modules are recorded as used (c.f. #35584).
attr::first_attr_value_str_by_name(&item.attrs, "path");
- if let Some(attr) =
- item.attrs.iter().find(|attr| attr.name() == "warn_directory_ownership") {
+ if item.attrs.iter().any(|attr| attr.check_name("warn_directory_ownership")) {
let lint = lint::builtin::LEGACY_DIRECTORY_OWNERSHIP;
let msg = "cannot declare a new module at this location";
self.session.add_lint(lint, item.id, item.span, msg.to_string());
- attr::mark_used(attr);
}
}
ItemKind::Union(ref vdata, _) => {
use rustc::middle::cstore::LoadedMacro;
use rustc::hir::def::*;
-use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId};
+use rustc::hir::def_id::{CrateNum, BUILTIN_MACROS_CRATE, CRATE_DEF_INDEX, DefId};
use rustc::ty;
use std::cell::Cell;
let def_id = self.macro_defs[&expansion];
if let Some(id) = self.definitions.as_local_node_id(def_id) {
self.local_macro_def_scopes[&id]
+ } else if def_id.krate == BUILTIN_MACROS_CRATE {
+ // FIXME(jseyfried): This happens when `include!()`ing a `$crate::` path, c.f, #40469.
+ self.graph_root
} else {
let module_def_id = ty::DefIdTree::parent(&*self, def_id).unwrap();
self.get_extern_crate_root(module_def_id.krate)
privacy_errors: Vec<PrivacyError<'a>>,
ambiguity_errors: Vec<AmbiguityError<'a>>,
+ gated_errors: FxHashSet<Span>,
disallowed_shadowing: Vec<&'a LegacyBinding<'a>>,
arenas: &'a ResolverArenas<'a>,
privacy_errors: Vec::new(),
ambiguity_errors: Vec::new(),
+ gated_errors: FxHashSet(),
disallowed_shadowing: Vec::new(),
arenas: arenas,
if self.proc_macro_enabled { return; }
for attr in attrs {
- let maybe_binding = self.builtin_macros.get(&attr.name()).cloned().or_else(|| {
- let ident = Ident::with_empty_ctxt(attr.name());
+ let name = unwrap_or!(attr.name(), continue);
+ let maybe_binding = self.builtin_macros.get(&name).cloned().or_else(|| {
+ let ident = Ident::with_empty_ctxt(name);
self.resolve_lexical_macro_path_segment(ident, MacroNS, None).ok()
});
use syntax::ext::tt::macro_rules;
use syntax::feature_gate::{self, emit_feature_err, GateIssue};
use syntax::fold::{self, Folder};
+use syntax::parse::parser::PathStyle;
+use syntax::parse::token::{self, Token};
use syntax::ptr::P;
use syntax::symbol::{Symbol, keywords};
+use syntax::tokenstream::{TokenStream, TokenTree, Delimited};
use syntax::util::lev_distance::find_best_match_for_name;
use syntax_pos::{Span, DUMMY_SP};
fn find_legacy_attr_invoc(&mut self, attrs: &mut Vec<ast::Attribute>)
-> Option<ast::Attribute> {
for i in 0..attrs.len() {
+ let name = unwrap_or!(attrs[i].name(), continue);
+
if self.session.plugin_attributes.borrow().iter()
- .any(|&(ref attr_nm, _)| attrs[i].name() == &**attr_nm) {
+ .any(|&(ref attr_nm, _)| name == &**attr_nm) {
attr::mark_known(&attrs[i]);
}
- match self.builtin_macros.get(&attrs[i].name()).cloned() {
+ match self.builtin_macros.get(&name).cloned() {
Some(binding) => match *binding.get_macro(self) {
MultiModifier(..) | MultiDecorator(..) | SyntaxExtension::AttrProcMacro(..) => {
return Some(attrs.remove(i))
// Check for legacy derives
for i in 0..attrs.len() {
- if attrs[i].name() == "derive" {
- let mut traits = match attrs[i].meta_item_list() {
- Some(traits) if !traits.is_empty() => traits.to_owned(),
- _ => continue,
+ let name = unwrap_or!(attrs[i].name(), continue);
+
+ if name == "derive" {
+ let result = attrs[i].parse_list(&self.session.parse_sess,
+ |parser| parser.parse_path(PathStyle::Mod));
+ let mut traits = match result {
+ Ok(traits) => traits,
+ Err(mut e) => {
+ e.cancel();
+ continue
+ }
};
for j in 0..traits.len() {
- let legacy_name = Symbol::intern(&match traits[j].word() {
- Some(..) => format!("derive_{}", traits[j].name().unwrap()),
- None => continue,
- });
+ if traits[j].segments.len() > 1 {
+ continue
+ }
+ let trait_name = traits[j].segments[0].identifier.name;
+ let legacy_name = Symbol::intern(&format!("derive_{}", trait_name));
if !self.builtin_macros.contains_key(&legacy_name) {
continue
}
if traits.is_empty() {
attrs.remove(i);
} else {
- attrs[i].value = ast::MetaItem {
- name: attrs[i].name(),
- span: attrs[i].span,
- node: ast::MetaItemKind::List(traits),
- };
+ let mut tokens = Vec::new();
+ for (i, path) in traits.iter().enumerate() {
+ if i > 0 {
+ tokens.push(TokenTree::Token(attrs[i].span, Token::Comma).into());
+ }
+ for (j, segment) in path.segments.iter().enumerate() {
+ if j > 0 {
+ tokens.push(TokenTree::Token(path.span, Token::ModSep).into());
+ }
+ let tok = Token::Ident(segment.identifier);
+ tokens.push(TokenTree::Token(path.span, tok).into());
+ }
+ }
+ attrs[i].tokens = TokenTree::Delimited(attrs[i].span, Delimited {
+ delim: token::Paren,
+ tts: TokenStream::concat(tokens).into(),
+ }).into();
}
return Some(ast::Attribute {
- value: ast::MetaItem {
- name: legacy_name,
- span: span,
- node: ast::MetaItemKind::Word,
- },
+ path: ast::Path::from_ident(span, Ident::with_empty_ctxt(legacy_name)),
+ tokens: TokenStream::empty(),
id: attr::mk_attr_id(),
style: ast::AttrStyle::Outer,
is_sugared_doc: false,
InvocationKind::Bang { ref mac, .. } => {
return self.resolve_macro_to_def(scope, &mac.node.path, MacroKind::Bang, force);
}
- InvocationKind::Derive { name, span, .. } => {
- let path = ast::Path::from_ident(span, Ident::with_empty_ctxt(name));
- return self.resolve_macro_to_def(scope, &path, MacroKind::Derive, force);
+ InvocationKind::Derive { ref path, .. } => {
+ return self.resolve_macro_to_def(scope, path, MacroKind::Derive, force);
}
};
- let (attr_name, path) = {
- let attr = attr.as_ref().unwrap();
- (attr.name(), ast::Path::from_ident(attr.span, Ident::with_empty_ctxt(attr.name())))
- };
- let mut determined = true;
+ let path = attr.as_ref().unwrap().path.clone();
+ let mut determinacy = Determinacy::Determined;
match self.resolve_macro_to_def(scope, &path, MacroKind::Attr, force) {
Ok(def) => return Ok(def),
- Err(Determinacy::Undetermined) => determined = false,
+ Err(Determinacy::Undetermined) => determinacy = Determinacy::Undetermined,
Err(Determinacy::Determined) if force => return Err(Determinacy::Determined),
Err(Determinacy::Determined) => {}
}
- for &(name, span) in traits {
- let path = ast::Path::from_ident(span, Ident::with_empty_ctxt(name));
- match self.resolve_macro(scope, &path, MacroKind::Derive, force) {
+ let attr_name = match path.segments.len() {
+ 1 => path.segments[0].identifier.name,
+ _ => return Err(determinacy),
+ };
+ for path in traits {
+ match self.resolve_macro(scope, path, MacroKind::Derive, force) {
Ok(ext) => if let SyntaxExtension::ProcMacroDerive(_, ref inert_attrs) = *ext {
if inert_attrs.contains(&attr_name) {
// FIXME(jseyfried) Avoid `mem::replace` here.
}
return Err(Determinacy::Undetermined);
},
- Err(Determinacy::Undetermined) => determined = false,
+ Err(Determinacy::Undetermined) => determinacy = Determinacy::Undetermined,
Err(Determinacy::Determined) => {}
}
}
- Err(if determined { Determinacy::Determined } else { Determinacy::Undetermined })
+ Err(determinacy)
}
fn resolve_macro_to_def(&mut self, scope: Mark, path: &ast::Path, kind: MacroKind, force: bool)
self.current_module = invocation.module.get();
if path.len() > 1 {
- if !self.use_extern_macros {
+ if !self.use_extern_macros && self.gated_errors.insert(span) {
let msg = "non-ident macro paths are experimental";
let feature = "use_extern_macros";
emit_feature_err(&self.session.parse_sess, feature, span, GateIssue::Language, msg);
use syntax::ast::{self, NodeId};
use syntax::codemap::CodeMap;
use syntax::print::pprust;
-use syntax::symbol::Symbol;
use syntax_pos::Span;
use data::{self, Visibility, SigElement};
type Target = Vec<Attribute>;
fn lower(self, tcx: TyCtxt) -> Vec<Attribute> {
- let doc = Symbol::intern("doc");
self.into_iter()
// Only retain real attributes. Doc comments are lowered separately.
- .filter(|attr| attr.name() != doc)
+ .filter(|attr| attr.path != "doc")
.map(|mut attr| {
// Remove the surrounding '#[..]' or '#![..]' of the pretty printed
// attribute. First normalize all inner attribute (#![..]) to outer
use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID};
use syntax::parse::lexer::comments::strip_doc_comment_decoration;
use syntax::parse::token;
-use syntax::symbol::{Symbol, keywords};
+use syntax::symbol::keywords;
use syntax::visit::{self, Visitor};
use syntax::print::pprust::{ty_to_string, arg_to_string};
use syntax::codemap::MacroAttribute;
}
fn docs_for_attrs(attrs: &[Attribute]) -> String {
- let doc = Symbol::intern("doc");
let mut result = String::new();
for attr in attrs {
- if attr.name() == doc {
+ if attr.check_name("doc") {
if let Some(val) = attr.value_str() {
if attr.is_sugared_doc {
result.push_str(&strip_doc_comment_decoration(&val.as_str()));
},
};
- if let Err(e) = rustc::util::fs::create_dir_racy(&root_path) {
+ if let Err(e) = std::fs::create_dir_all(&root_path) {
tcx.sess.err(&format!("Could not create directory {}: {}",
root_path.display(),
e));
}
fn field(&self, attr: &ast::Attribute, name: &str) -> ast::Name {
- for item in attr.meta_item_list().unwrap_or(&[]) {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
if item.check_name(name) {
if let Some(value) = item.value_str() {
return value;
}
// After adding all files to the archive, we need to update the
- // symbol table of the archive. This currently dies on OSX (see
+ // symbol table of the archive. This currently dies on macOS (see
// #11162), and isn't necessary there anyway
if !sess.target.target.options.is_like_osx {
ab.update_symbols();
// pain to land PRs when they spuriously fail due to a segfault.
//
// The issue #38878 has some more debugging information on it as well, but
- // this unfortunately looks like it's just a race condition in OSX's linker
+ // this unfortunately looks like it's just a race condition in macOS's linker
// with some thread pool working in the background. It seems that no one
// currently knows a fix for this so in the meantime we're left with this...
info!("{:?}", &cmd);
}
- // On OSX, debuggers need this utility to get run to do some munging of
+ // On macOS, debuggers need this utility to get run to do some munging of
// the symbols
if sess.target.target.options.is_like_osx && sess.opts.debuginfo != NoDebugInfo {
match Command::new("dsymutil").arg(out_filename).output() {
.arg("-l").arg(lib)
.arg("-Wl,--no-whole-archive");
} else {
- // -force_load is the OSX equivalent of --whole-archive, but it
+ // -force_load is the macOS equivalent of --whole-archive, but it
// involves passing the full path to the library to link.
let mut v = OsString::from("-Wl,-force_load,");
v.push(&archive::find_library(lib, search_path, &self.sess));
// Follow C++ namespace-mangling style, see
// http://en.wikipedia.org/wiki/Name_mangling for more info.
//
- // It turns out that on OSX you can actually have arbitrary symbols in
+ // It turns out that on macOS you can actually have arbitrary symbols in
// function names (at least when given to LLVM), but this is not possible
// when using unix's linker. Perhaps one day when we just use a linker from LLVM
// we won't need to do this name mangling. The problem with name mangling is
llvm::LLVMRustDIBuilderFinalize(DIB(cx));
llvm::LLVMRustDIBuilderDispose(DIB(cx));
// Debuginfo generation in LLVM by default uses a higher
- // version of dwarf than OS X currently understands. We can
+ // version of dwarf than macOS currently understands. We can
// instruct LLVM to emit an older version of dwarf, however,
- // for OS X to understand. For more info see #11352
+ // for macOS to understand. For more info see #11352
// This can be overridden using --llvm-opts -dwarf-version,N.
// Android has the same issue (#22398)
if cx.sess().target.target.options.is_like_osx ||
// Call the generic checker.
let expected_arg_tys =
- self.expected_types_for_fn_args(call_expr.span,
+ self.expected_inputs_for_expected_output(call_expr.span,
expected,
fn_sig.output(),
fn_sig.inputs());
// do know the types expected for each argument and the return
// type.
- let expected_arg_tys = self.expected_types_for_fn_args(call_expr.span,
+ let expected_arg_tys = self.expected_inputs_for_expected_output(call_expr.span,
expected,
fn_sig.output().clone(),
fn_sig.inputs());
match method_fn_ty.sty {
ty::TyFnDef(def_id, .., ref fty) => {
// HACK(eddyb) ignore self in the definition (see above).
- let expected_arg_tys = self.expected_types_for_fn_args(
+ let expected_arg_tys = self.expected_inputs_for_expected_output(
sp,
expected,
fty.0.output(),
TypeAndSubsts { substs: substs, ty: substd_ty }
}
- /// Unifies the return type with the expected type early, for more coercions
- /// and forward type information on the argument expressions.
- fn expected_types_for_fn_args(&self,
- call_span: Span,
- expected_ret: Expectation<'tcx>,
- formal_ret: Ty<'tcx>,
- formal_args: &[Ty<'tcx>])
- -> Vec<Ty<'tcx>> {
+ /// Unifies the output type with the expected type early, for more coercions
+ /// and forward type information on the input expressions.
+ fn expected_inputs_for_expected_output(&self,
+ call_span: Span,
+ expected_ret: Expectation<'tcx>,
+ formal_ret: Ty<'tcx>,
+ formal_args: &[Ty<'tcx>])
+ -> Vec<Ty<'tcx>> {
let expected_args = expected_ret.only_has_type(self).and_then(|ret_ty| {
self.fudge_regions_if_ok(&RegionVariableOrigin::Coercion(call_span), || {
// Attempt to apply a subtyping relationship between the formal
}).collect())
}).ok()
}).unwrap_or(vec![]);
- debug!("expected_types_for_fn_args(formal={:?} -> {:?}, expected={:?} -> {:?})",
+ debug!("expected_inputs_for_expected_output(formal={:?} -> {:?}, expected={:?} -> {:?})",
formal_args, formal_ret,
expected_args, expected_ret);
expected_args
fn check_expr_struct_fields(&self,
adt_ty: Ty<'tcx>,
+ expected: Expectation<'tcx>,
expr_id: ast::NodeId,
span: Span,
variant: &'tcx ty::VariantDef,
ast_fields: &'gcx [hir::Field],
check_completeness: bool) {
let tcx = self.tcx;
- let (substs, adt_kind, kind_name) = match adt_ty.sty {
- ty::TyAdt(adt, substs) => (substs, adt.adt_kind(), adt.variant_descr()),
+
+ let adt_ty_hint =
+ self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty])
+ .get(0).cloned().unwrap_or(adt_ty);
+
+ let (substs, hint_substs, adt_kind, kind_name) = match (&adt_ty.sty, &adt_ty_hint.sty) {
+ (&ty::TyAdt(adt, substs), &ty::TyAdt(_, hint_substs)) => {
+ (substs, hint_substs, adt.adt_kind(), adt.variant_descr())
+ }
_ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
};
// Typecheck each field.
for field in ast_fields {
- let expected_field_type;
+ let final_field_type;
+ let field_type_hint;
if let Some(v_field) = remaining_fields.remove(&field.name.node) {
- expected_field_type = self.field_ty(field.span, v_field, substs);
+ final_field_type = self.field_ty(field.span, v_field, substs);
+ field_type_hint = self.field_ty(field.span, v_field, hint_substs);
seen_fields.insert(field.name.node, field.span);
}
} else {
error_happened = true;
- expected_field_type = tcx.types.err;
+ final_field_type = tcx.types.err;
+ field_type_hint = tcx.types.err;
if let Some(_) = variant.find_field_named(field.name.node) {
let mut err = struct_span_err!(self.tcx.sess,
field.name.span,
// Make sure to give a type to the field even if there's
// an error, so we can continue typechecking
- self.check_expr_coercable_to_type(&field.expr, expected_field_type);
+ let ty = self.check_expr_with_hint(&field.expr, field_type_hint);
+ self.demand_coerce(&field.expr, ty, final_field_type);
}
// Make sure the programmer specified correct number of fields.
fn check_expr_struct(&self,
expr: &hir::Expr,
+ expected: Expectation<'tcx>,
qpath: &hir::QPath,
fields: &'gcx [hir::Field],
base_expr: &'gcx Option<P<hir::Expr>>) -> Ty<'tcx>
hir::QPath::TypeRelative(ref qself, _) => qself.span
};
- self.check_expr_struct_fields(struct_ty, expr.id, path_span, variant, fields,
+ self.check_expr_struct_fields(struct_ty, expected, expr.id, path_span, variant, fields,
base_expr.is_none());
if let &Some(ref base_expr) = base_expr {
self.check_expr_has_type(base_expr, struct_ty);
}
}
hir::ExprStruct(ref qpath, ref fields, ref base_expr) => {
- self.check_expr_struct(expr, qpath, fields, base_expr)
+ self.check_expr_struct(expr, expected, qpath, fields, base_expr)
}
hir::ExprField(ref base, ref field) => {
self.check_field(expr, lvalue_pref, &base, field)
use rustc::hir;
+use std::{mem, slice, vec};
use std::path::PathBuf;
use std::rc::Rc;
-use std::slice;
use std::sync::Arc;
use std::u32;
-use std::mem;
use core::DocContext;
use doctree;
pub struct ListAttributesIter<'a> {
attrs: slice::Iter<'a, ast::Attribute>,
- current_list: slice::Iter<'a, ast::NestedMetaItem>,
+ current_list: vec::IntoIter<ast::NestedMetaItem>,
name: &'a str
}
impl<'a> Iterator for ListAttributesIter<'a> {
- type Item = &'a ast::NestedMetaItem;
+ type Item = ast::NestedMetaItem;
fn next(&mut self) -> Option<Self::Item> {
if let Some(nested) = self.current_list.next() {
}
for attr in &mut self.attrs {
- if let Some(ref list) = attr.meta_item_list() {
+ if let Some(list) = attr.meta_item_list() {
if attr.check_name(self.name) {
- self.current_list = list.iter();
+ self.current_list = list.into_iter();
if let Some(nested) = self.current_list.next() {
return Some(nested);
}
fn lists<'a>(&'a self, name: &'a str) -> ListAttributesIter<'a> {
ListAttributesIter {
attrs: self.iter(),
- current_list: [].iter(),
+ current_list: Vec::new().into_iter(),
name: name
}
}
fn has_word(self, &str) -> bool;
}
-impl<'a, I: IntoIterator<Item=&'a ast::NestedMetaItem>> NestedAttributesExt for I {
+impl<I: IntoIterator<Item=ast::NestedMetaItem>> NestedAttributesExt for I {
fn has_word(self, word: &str) -> bool {
self.into_iter().any(|attr| attr.is_word() && attr.check_name(word))
}
decl: decl,
abi: sig.abi(),
- // trait methods canot (currently, at least) be const
+ // trait methods cannot (currently, at least) be const
constness: hir::Constness::NotConst,
})
} else {
// #[doc(no_inline)] attribute is present.
// Don't inline doc(hidden) imports so they can be stripped at a later stage.
let denied = self.vis != hir::Public || self.attrs.iter().any(|a| {
- a.name() == "doc" && match a.meta_item_list() {
- Some(l) => attr::list_contains_name(l, "no_inline") ||
- attr::list_contains_name(l, "hidden"),
+ a.name().unwrap() == "doc" && match a.meta_item_list() {
+ Some(l) => attr::list_contains_name(&l, "no_inline") ||
+ attr::list_contains_name(&l, "hidden"),
None => false,
}
});
use rustc::middle::stability;
use rustc::hir;
use rustc::util::nodemap::{FxHashMap, FxHashSet};
+use rustc::session::config::nightly_options::is_nightly_build;
use rustc_data_structures::flock;
use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability};
}
};
// FIXME(#24111): remove when `const_fn` is stabilized
- let vis_constness = match UnstableFeatures::from_environment() {
- UnstableFeatures::Allow => constness,
- _ => hir::Constness::NotConst
+ let vis_constness = if is_nightly_build() {
+ constness
+ } else {
+ hir::Constness::NotConst
};
let prefix = format!("{}{}{:#}fn {}{:#}",
ConstnessSpace(vis_constness),
let mut attrs = String::new();
for attr in &it.attrs.other_attrs {
- let name = attr.name();
+ let name = attr.name().unwrap();
if !ATTRIBUTE_WHITELIST.contains(&&name.as_str()[..]) {
continue;
}
- if let Some(s) = render_attribute(attr.meta()) {
+ if let Some(s) = render_attribute(&attr.meta().unwrap()) {
attrs.push_str(&format!("#[{}]\n", s));
}
}
position: absolute;
left: 0;
top: 0;
- min-height: 100vh;
+ min-height: 100%;
}
.sidebar .current {
/// Load a plugin with the given name.
///
/// Turns `name` into the proper dynamic library filename for the given
- /// platform. On windows, it turns into name.dll, on OS X, name.dylib, and
+ /// platform. On windows, it turns into name.dll, on macOS, name.dylib, and
/// elsewhere, libname.so.
pub fn load_plugin(&mut self, name: String) {
let x = self.prefix.join(libname(name));
attrs: Vec::new(),
};
- let attrs = krate.attrs.iter()
- .filter(|a| a.check_name("doc"))
- .filter_map(|a| a.meta_item_list())
- .flat_map(|l| l)
- .filter(|a| a.check_name("test"))
- .filter_map(|a| a.meta_item_list())
- .flat_map(|l| l);
+ let test_attrs: Vec<_> = krate.attrs.iter()
+ .filter(|a| a.check_name("doc"))
+ .flat_map(|a| a.meta_item_list().unwrap_or_else(Vec::new))
+ .filter(|a| a.check_name("test"))
+ .collect();
+ let attrs = test_attrs.iter().flat_map(|a| a.meta_item_list().unwrap_or(&[]));
+
for attr in attrs {
if attr.check_name("no_crate_inject") {
opts.no_crate_inject = true;
if item.vis == hir::Public && self.inside_public_path {
let please_inline = item.attrs.iter().any(|item| {
match item.meta_item_list() {
- Some(list) if item.check_name("doc") => {
+ Some(ref list) if item.check_name("doc") => {
list.iter().any(|i| i.check_name("inline"))
}
_ => false,
/// resistance against HashDoS attacks. The algorithm is randomly seeded, and a
/// reasonable best-effort is made to generate this seed from a high quality,
/// secure source of randomness provided by the host without blocking the
-/// program. Because of this, the randomness of the seed is dependant on the
-/// quality of the system's random number generator at the time it is created.
+/// program. Because of this, the randomness of the seed depends on the output
+/// quality of the system's random number generator when the seed is created.
/// In particular, seeds generated when the system's entropy pool is abnormally
/// low such as during system boot may be of a lower quality.
///
fn next(&mut self) -> Option<(SafeHash, K, V)> {
self.iter.next().map(|bucket| {
unsafe {
- (**self.table).size -= 1;
+ (*self.table.as_mut_ptr()).size -= 1;
let (k, v) = ptr::read(bucket.pair);
(SafeHash { hash: ptr::replace(bucket.hash, EMPTY_BUCKET) }, k, v)
}
}
/// Converts this `CString` into a boxed `CStr`.
- #[unstable(feature = "into_boxed_c_str", issue = "0")]
+ #[unstable(feature = "into_boxed_c_str", issue = "40380")]
pub fn into_boxed_c_str(self) -> Box<CStr> {
unsafe { mem::transmute(self.into_inner()) }
}
}
}
+#[stable(feature = "c_string_from_box", since = "1.17.0")]
+impl From<Box<CStr>> for CString {
+ fn from(s: Box<CStr>) -> CString {
+ s.into_c_string()
+ }
+}
+
+#[stable(feature = "box_from_c_string", since = "1.17.0")]
+impl Into<Box<CStr>> for CString {
+ fn into(self) -> Box<CStr> {
+ self.into_boxed_c_str()
+ }
+}
+
#[stable(feature = "default_box_extra", since = "1.17.0")]
impl Default for Box<CStr> {
fn default() -> Box<CStr> {
pub fn to_string_lossy(&self) -> Cow<str> {
String::from_utf8_lossy(self.to_bytes())
}
+
+ /// Converts a `Box<CStr>` into a `CString` without copying or allocating.
+ #[unstable(feature = "into_boxed_c_str", issue = "40380")]
+ pub fn into_c_string(self: Box<CStr>) -> CString {
+ unsafe { mem::transmute(self) }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
fn into_boxed() {
let orig: &[u8] = b"Hello, world!\0";
let cstr = CStr::from_bytes_with_nul(orig).unwrap();
- let cstring = cstr.to_owned();
- let box1: Box<CStr> = Box::from(cstr);
- let box2 = cstring.into_boxed_c_str();
- assert_eq!(cstr, &*box1);
- assert_eq!(box1, box2);
- assert_eq!(&*box2, cstr);
+ let boxed: Box<CStr> = Box::from(cstr);
+ let cstring = cstr.to_owned().into_boxed_c_str().into_c_string();
+ assert_eq!(cstr, &*boxed);
+ assert_eq!(&*boxed, &*cstring);
+ assert_eq!(&*cstring, cstr);
}
#[test]
/// in the given `OsString`.
///
/// The collection may reserve more space to avoid frequent reallocations.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::new();
+ /// s.reserve(10);
+ /// assert!(s.capacity() >= 10);
+ /// ```
#[stable(feature = "osstring_simple_functions", since = "1.9.0")]
pub fn reserve(&mut self, additional: usize) {
self.inner.reserve(additional)
/// Note that the allocator may give the collection more space than it
/// requests. Therefore capacity can not be relied upon to be precisely
/// minimal. Prefer reserve if future insertions are expected.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::new();
+ /// s.reserve_exact(10);
+ /// assert!(s.capacity() >= 10);
+ /// ```
#[stable(feature = "osstring_simple_functions", since = "1.9.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.inner.reserve_exact(additional)
}
/// Shrinks the capacity of the `OsString` to match its length.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(osstring_shrink_to_fit)]
+ ///
+ /// use std::ffi::OsString;
+ ///
+ /// let mut s = OsString::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to_fit();
+ /// assert_eq!(3, s.capacity());
+ /// ```
#[unstable(feature = "osstring_shrink_to_fit", issue = "40421")]
pub fn shrink_to_fit(&mut self) {
self.inner.shrink_to_fit()
}
/// Converts this `OsString` into a boxed `OsStr`.
- #[unstable(feature = "into_boxed_os_str", issue = "0")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(into_boxed_os_str)]
+ ///
+ /// use std::ffi::{OsString, OsStr};
+ ///
+ /// let s = OsString::from("hello");
+ ///
+ /// let b: Box<OsStr> = s.into_boxed_os_str();
+ /// ```
+ #[unstable(feature = "into_boxed_os_str", issue = "40380")]
pub fn into_boxed_os_str(self) -> Box<OsStr> {
unsafe { mem::transmute(self.inner.into_box()) }
}
/// Copies the slice into an owned [`OsString`].
///
/// [`OsString`]: struct.OsString.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{OsStr, OsString};
+ ///
+ /// let os_str = OsStr::new("foo");
+ /// let os_string = os_str.to_os_string();
+ /// assert_eq!(os_string, OsString::from("foo"));
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_os_string(&self) -> OsString {
OsString { inner: self.inner.to_owned() }
self.inner.inner.len()
}
+ /// Converts a `Box<OsStr>` into an `OsString` without copying or allocating.
+ #[unstable(feature = "into_boxed_os_str", issue = "40380")]
+ pub fn into_os_string(self: Box<OsStr>) -> OsString {
+ let inner: Box<Slice> = unsafe { mem::transmute(self) };
+ OsString { inner: Buf::from_box(inner) }
+ }
+
/// Gets the underlying byte representation.
///
/// Note: it is *crucial* that this API is private, to avoid
}
}
+#[stable(feature = "os_string_from_box", since = "1.17.0")]
+impl<'a> From<Box<OsStr>> for OsString {
+ fn from(boxed: Box<OsStr>) -> OsString {
+ boxed.into_os_string()
+ }
+}
+
+#[stable(feature = "box_from_c_string", since = "1.17.0")]
+impl Into<Box<OsStr>> for OsString {
+ fn into(self) -> Box<OsStr> {
+ self.into_boxed_os_str()
+ }
+}
+
#[stable(feature = "box_default_extra", since = "1.17.0")]
impl Default for Box<OsStr> {
fn default() -> Box<OsStr> {
fn into_boxed() {
let orig = "Hello, world!";
let os_str = OsStr::new(orig);
- let os_string = os_str.to_owned();
- let box1: Box<OsStr> = Box::from(os_str);
- let box2 = os_string.into_boxed_os_str();
- assert_eq!(os_str, &*box1);
- assert_eq!(box1, box2);
- assert_eq!(&*box2, os_str);
+ let boxed: Box<OsStr> = Box::from(os_str);
+ let os_string = os_str.to_owned().into_boxed_os_str().into_os_string();
+ assert_eq!(os_str, &*boxed);
+ assert_eq!(&*boxed, &*os_string);
+ assert_eq!(&*os_string, os_str);
}
#[test]
/// [`File::open`]: struct.File.html#method.open
/// [`File::create`]: struct.File.html#method.create
///
-/// Generally speaking, when using `OpenOptions`, you'll first call [`new()`],
-/// then chain calls to methods to set each option, then call [`open()`],
+/// Generally speaking, when using `OpenOptions`, you'll first call [`new`],
+/// then chain calls to methods to set each option, then call [`open`],
/// passing the path of the file you're trying to open. This will give you a
/// [`io::Result`][result] with a [`File`][file] inside that you can further
/// operate on.
///
-/// [`new()`]: struct.OpenOptions.html#method.new
-/// [`open()`]: struct.OpenOptions.html#method.open
+/// [`new`]: struct.OpenOptions.html#method.new
+/// [`open`]: struct.OpenOptions.html#method.open
/// [result]: ../io/type.Result.html
/// [file]: struct.File.html
///
/// error conditions for when a directory is being created (after it is
/// determined to not exist) are outlined by `fs::create_dir`.
///
+/// Notable exception is made for situations where any of the directories
+/// specified in the `path` could not be created as it was created concurrently.
+/// Such cases are considered success. In other words: calling `create_dir_all`
+/// concurrently from multiple threads or processes is guaranteed to not fail
+/// due to race itself.
+///
/// # Examples
///
/// ```
}
fn create_dir_all(&self, path: &Path) -> io::Result<()> {
- if path == Path::new("") || path.is_dir() { return Ok(()) }
- if let Some(p) = path.parent() {
- self.create_dir_all(p)?
+ if path == Path::new("") {
+ return Ok(())
+ }
+
+ match self.inner.mkdir(path) {
+ Ok(()) => return Ok(()),
+ Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
+ Err(_) if path.is_dir() => return Ok(()),
+ Err(e) => return Err(e),
+ }
+ match path.parent() {
+ Some(p) => try!(self.create_dir_all(p)),
+ None => return Err(io::Error::new(io::ErrorKind::Other, "failed to create whole tree")),
+ }
+ match self.inner.mkdir(path) {
+ Ok(()) => Ok(()),
+ Err(_) if path.is_dir() => Ok(()),
+ Err(e) => Err(e),
}
- self.inner.mkdir(path)
}
}
use rand::{StdRng, Rng};
use str;
use sys_common::io::test::{TempDir, tmpdir};
+ use thread;
#[cfg(windows)]
use os::windows::fs::{symlink_dir, symlink_file};
assert!(result.is_err());
}
+ #[test]
+ fn concurrent_recursive_mkdir() {
+ for _ in 0..100 {
+ let dir = tmpdir();
+ let mut dir = dir.join("a");
+ for _ in 0..40 {
+ dir = dir.join("a");
+ }
+ let mut join = vec!();
+ for _ in 0..8 {
+ let dir = dir.clone();
+ join.push(thread::spawn(move || {
+ check!(fs::create_dir_all(&dir));
+ }))
+ }
+
+ // No `Display` on result of `join()`
+ join.drain(..).map(|join| join.join().unwrap()).count();
+ }
+ }
+
#[test]
fn recursive_mkdir_slash() {
check!(fs::create_dir_all(&Path::new("/")));
}
+ #[test]
+ fn recursive_mkdir_dot() {
+ check!(fs::create_dir_all(&Path::new(".")));
+ }
+
+ #[test]
+ fn recursive_mkdir_empty() {
+ check!(fs::create_dir_all(&Path::new("")));
+ }
+
#[test]
fn recursive_rmdir() {
let tmpdir = tmpdir();
#[stable(feature = "rust1", since = "1.0.0")]
TimedOut,
/// An error returned when an operation could not be completed because a
- /// call to [`write()`] returned [`Ok(0)`].
+ /// call to [`write`] returned [`Ok(0)`].
///
/// This typically means that an operation could only succeed if it wrote a
/// particular number of bytes but only a smaller number of bytes could be
/// written.
///
- /// [`write()`]: ../../std/io/trait.Write.html#tymethod.write
+ /// [`write`]: ../../std/io/trait.Write.html#tymethod.write
/// [`Ok(0)`]: ../../std/io/type.Result.html
#[stable(feature = "rust1", since = "1.0.0")]
WriteZero,
//! of other types, and you can implement them for your types too. As such,
//! you'll see a few different types of I/O throughout the documentation in
//! this module: [`File`]s, [`TcpStream`]s, and sometimes even [`Vec<T>`]s. For
-//! example, [`Read`] adds a [`read()`] method, which we can use on `File`s:
+//! example, [`Read`] adds a [`read`] method, which we can use on `File`s:
//!
//! ```
//! use std::io;
//! ```
//!
//! [`BufWriter`] doesn't add any new ways of writing; it just buffers every call
-//! to [`write()`]:
+//! to [`write`]:
//!
//! ```
//! use std::io;
//! # }
//! ```
//!
-//! Of course, using [`io::stdout()`] directly is less common than something like
+//! Of course, using [`io::stdout`] directly is less common than something like
//! [`println!`].
//!
//! ## Iterator types
//! [`Vec<T>`]: ../vec/struct.Vec.html
//! [`BufReader`]: struct.BufReader.html
//! [`BufWriter`]: struct.BufWriter.html
-//! [`write()`]: trait.Write.html#tymethod.write
-//! [`io::stdout()`]: fn.stdout.html
+//! [`write`]: trait.Write.html#tymethod.write
+//! [`io::stdout`]: fn.stdout.html
//! [`println!`]: ../macro.println.html
//! [`Lines`]: struct.Lines.html
//! [`io::Result`]: type.Result.html
//! [`?` operator]: ../../book/syntax-index.html
-//! [`read()`]: trait.Read.html#tymethod.read
+//! [`read`]: trait.Read.html#tymethod.read
#![stable(feature = "rust1", since = "1.0.0")]
/// If the data in this stream is *not* valid UTF-8 then an error is
/// returned and `buf` is unchanged.
///
- /// See [`read_to_end()`][readtoend] for other error semantics.
+ /// See [`read_to_end`][readtoend] for other error semantics.
///
/// [readtoend]: #method.read_to_end
///
///
/// Implementors of the `Write` trait are sometimes called 'writers'.
///
-/// Writers are defined by two required methods, [`write()`] and [`flush()`]:
+/// Writers are defined by two required methods, [`write`] and [`flush`]:
///
-/// * The [`write()`] method will attempt to write some data into the object,
+/// * The [`write`] method will attempt to write some data into the object,
/// returning how many bytes were successfully written.
///
-/// * The [`flush()`] method is useful for adaptors and explicit buffers
+/// * The [`flush`] method is useful for adaptors and explicit buffers
/// themselves for ensuring that all buffered data has been pushed out to the
/// 'true sink'.
///
/// throughout [`std::io`] take and provide types which implement the `Write`
/// trait.
///
-/// [`write()`]: #tymethod.write
-/// [`flush()`]: #tymethod.flush
+/// [`write`]: #tymethod.write
+/// [`flush`]: #tymethod.flush
/// [`std::io`]: index.html
///
/// # Examples
///
/// For example, reading line-by-line is inefficient without using a buffer, so
/// if you want to read by line, you'll need `BufRead`, which includes a
-/// [`read_line()`] method as well as a [`lines()`] iterator.
+/// [`read_line`] method as well as a [`lines`] iterator.
///
/// # Examples
///
///
/// [`BufReader`]: struct.BufReader.html
/// [`File`]: ../fs/struct.File.html
-/// [`read_line()`]: #method.read_line
-/// [`lines()`]: #method.lines
+/// [`read_line`]: #method.read_line
+/// [`lines`]: #method.lines
/// [`Read`]: trait.Read.html
///
/// ```
/// Fills the internal buffer of this object, returning the buffer contents.
///
/// This function is a lower-level call. It needs to be paired with the
- /// [`consume()`] method to function properly. When calling this
+ /// [`consume`] method to function properly. When calling this
/// method, none of the contents will be "read" in the sense that later
- /// calling `read` may return the same contents. As such, [`consume()`] must
+ /// calling `read` may return the same contents. As such, [`consume`] must
/// be called with the number of bytes that are consumed from this buffer to
/// ensure that the bytes are never returned twice.
///
- /// [`consume()`]: #tymethod.consume
+ /// [`consume`]: #tymethod.consume
///
/// An empty buffer returned indicates that the stream has reached EOF.
///
/// so they should no longer be returned in calls to `read`.
///
/// This function is a lower-level call. It needs to be paired with the
- /// [`fill_buf()`] method to function properly. This function does
+ /// [`fill_buf`] method to function properly. This function does
/// not perform any I/O, it simply informs this object that some amount of
- /// its buffer, returned from [`fill_buf()`], has been consumed and should
+ /// its buffer, returned from [`fill_buf`], has been consumed and should
/// no longer be returned. As such, this function may do odd things if
- /// [`fill_buf()`] isn't called before calling it.
+ /// [`fill_buf`] isn't called before calling it.
///
/// The `amt` must be `<=` the number of bytes in the buffer returned by
- /// [`fill_buf()`].
+ /// [`fill_buf`].
///
/// # Examples
///
- /// Since `consume()` is meant to be used with [`fill_buf()`],
+ /// Since `consume()` is meant to be used with [`fill_buf`],
/// that method's example includes an example of `consume()`.
///
- /// [`fill_buf()`]: #tymethod.fill_buf
+ /// [`fill_buf`]: #tymethod.fill_buf
#[stable(feature = "rust1", since = "1.0.0")]
fn consume(&mut self, amt: usize);
/// # Errors
///
/// This function will ignore all instances of [`ErrorKind::Interrupted`] and
- /// will otherwise return any errors returned by [`fill_buf()`].
+ /// will otherwise return any errors returned by [`fill_buf`].
///
/// If an I/O error is encountered then all bytes read so far will be
/// present in `buf` and its length will have been adjusted appropriately.
/// A locked standard input implements `BufRead`. In this example, we'll
/// read from standard input until we see an `a` byte.
///
- /// [`fill_buf()`]: #tymethod.fill_buf
+ /// [`fill_buf`]: #tymethod.fill_buf
/// [`ErrorKind::Interrupted`]: enum.ErrorKind.html#variant.Interrupted
///
/// ```
///
/// # Errors
///
- /// This function has the same error semantics as [`read_until()`] and will
+ /// This function has the same error semantics as [`read_until`] and will
/// also return an error if the read bytes are not valid UTF-8. If an I/O
/// error is encountered then `buf` may contain some bytes already read in
/// the event that all data read so far was valid UTF-8.
///
/// A locked standard input implements `BufRead`. In this example, we'll
/// read all of the lines from standard input. If we were to do this in
- /// an actual project, the [`lines()`] method would be easier, of
+ /// an actual project, the [`lines`] method would be easier, of
/// course.
///
- /// [`lines()`]: #method.lines
- /// [`read_until()`]: #method.read_until
+ /// [`lines`]: #method.lines
+ /// [`read_until`]: #method.read_until
///
/// ```
/// use std::io;
/// [`io::Result`]`<`[`Vec<u8>`]`>`. Each vector returned will *not* have
/// the delimiter byte at the end.
///
- /// This function will yield errors whenever [`read_until()`] would have
+ /// This function will yield errors whenever [`read_until`] would have
/// also yielded an error.
///
/// # Examples
///
/// [`io::Result`]: type.Result.html
/// [`Vec<u8>`]: ../vec/struct.Vec.html
- /// [`read_until()`]: #method.read_until
+ /// [`read_until`]: #method.read_until
///
/// ```
/// use std::io;
///
/// # Errors
///
- /// Each line of the iterator has the same error semantics as [`BufRead::read_line()`].
+ /// Each line of the iterator has the same error semantics as [`BufRead::read_line`].
///
- /// [`BufRead::read_line()`]: trait.BufRead.html#method.read_line
+ /// [`BufRead::read_line`]: trait.BufRead.html#method.read_line
#[stable(feature = "rust1", since = "1.0.0")]
fn lines(self) -> Lines<Self> where Self: Sized {
Lines { buf: self }
/// Adaptor to chain together two readers.
///
-/// This struct is generally created by calling [`chain()`] on a reader.
-/// Please see the documentation of [`chain()`] for more details.
+/// This struct is generally created by calling [`chain`] on a reader.
+/// Please see the documentation of [`chain`] for more details.
///
-/// [`chain()`]: trait.Read.html#method.chain
+/// [`chain`]: trait.Read.html#method.chain
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Chain<T, U> {
first: T,
/// Reader adaptor which limits the bytes read from an underlying reader.
///
-/// This struct is generally created by calling [`take()`] on a reader.
-/// Please see the documentation of [`take()`] for more details.
+/// This struct is generally created by calling [`take`] on a reader.
+/// Please see the documentation of [`take`] for more details.
///
-/// [`take()`]: trait.Read.html#method.take
+/// [`take`]: trait.Read.html#method.take
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Take<T> {
/// An iterator over `u8` values of a reader.
///
-/// This struct is generally created by calling [`bytes()`] on a reader.
-/// Please see the documentation of [`bytes()`] for more details.
+/// This struct is generally created by calling [`bytes`] on a reader.
+/// Please see the documentation of [`bytes`] for more details.
///
-/// [`bytes()`]: trait.Read.html#method.bytes
+/// [`bytes`]: trait.Read.html#method.bytes
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Bytes<R> {
/// An iterator over the `char`s of a reader.
///
-/// This struct is generally created by calling [`chars()`][chars] on a reader.
+/// This struct is generally created by calling [`chars`][chars] on a reader.
/// Please see the documentation of `chars()` for more details.
///
/// [chars]: trait.Read.html#method.chars
/// An iterator over the contents of an instance of `BufRead` split on a
/// particular byte.
///
-/// This struct is generally created by calling [`split()`][split] on a
+/// This struct is generally created by calling [`split`][split] on a
/// `BufRead`. Please see the documentation of `split()` for more details.
///
/// [split]: trait.BufRead.html#method.split
/// An iterator over the lines of an instance of `BufRead`.
///
-/// This struct is generally created by calling [`lines()`][lines] on a
+/// This struct is generally created by calling [`lines`][lines] on a
/// `BufRead`. Please see the documentation of `lines()` for more details.
///
/// [lines]: trait.BufRead.html#method.lines
///
/// Each handle shares a global buffer of data to be written to the standard
/// output stream. Access is also synchronized via a lock and explicit control
-/// over locking is available via the [`lock()`] method.
+/// over locking is available via the [`lock`] method.
///
/// Created by the [`io::stdout`] method.
///
-/// [`lock()`]: #method.lock
+/// [`lock`]: #method.lock
/// [`io::stdout`]: fn.stdout.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stdout {
/// A reader which is always at EOF.
///
-/// This struct is generally created by calling [`empty()`][empty]. Please see
+/// This struct is generally created by calling [`empty`][empty]. Please see
/// the documentation of `empty()` for more details.
///
/// [empty]: fn.empty.html
/// A reader which yields one byte over and over and over and over and over and...
///
-/// This struct is generally created by calling [`repeat()`][repeat]. Please
+/// This struct is generally created by calling [`repeat`][repeat]. Please
/// see the documentation of `repeat()` for more details.
///
/// [repeat]: fn.repeat.html
/// A writer which will move data into the void.
///
-/// This struct is generally created by calling [`sink()`][sink]. Please
+/// This struct is generally created by calling [`sink`][sink]. Please
/// see the documentation of `sink()` for more details.
///
/// [sink]: fn.sink.html
//! contained an `extern crate std;` import at the [crate root]. Therefore the
//! standard library can be accessed in [`use`] statements through the path
//! `std`, as in [`use std::env`], or in expressions through the absolute path
-//! `::std`, as in [`::std::env::args()`].
+//! `::std`, as in [`::std::env::args`].
//!
//! # How to read this documentation
//!
//! [TCP]: net/struct.TcpStream.html
//! [The Rust Prelude]: prelude/index.html
//! [UDP]: net/struct.UdpSocket.html
-//! [`::std::env::args()`]: env/fn.args.html
+//! [`::std::env::args`]: env/fn.args.html
//! [`Arc`]: sync/struct.Arc.html
//! [owned slice]: boxed/index.html
//! [`Cell`]: cell/struct.Cell.html
#![feature(char_escape_debug)]
#![feature(char_internals)]
#![feature(collections)]
-#![feature(collections_bound)]
#![feature(collections_range)]
#![feature(compiler_builtins_lib)]
#![feature(const_fn)]
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<Ipv4Addr> for u32 {
+ /// It performs the conversion in network order (big-endian).
fn from(ip: Ipv4Addr) -> u32 {
let ip = ip.octets();
((ip[0] as u32) << 24) + ((ip[1] as u32) << 16) + ((ip[2] as u32) << 8) + (ip[3] as u32)
#[stable(feature = "ip_u32", since = "1.1.0")]
impl From<u32> for Ipv4Addr {
+ /// It performs the conversion in network order (big-endian).
fn from(ip: u32) -> Ipv4Addr {
Ipv4Addr::new((ip >> 24) as u8, (ip >> 16) as u8, (ip >> 8) as u8, ip as u8)
}
/// Sets the read timeout to the timeout specified.
///
- /// If the value specified is [`None`], then [`read()`] calls will block
+ /// If the value specified is [`None`], then [`read`] calls will block
/// indefinitely. It is an error to pass the zero `Duration` to this
/// method.
///
/// error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`read()`]: ../../std/io/trait.Read.html#tymethod.read
+ /// [`read`]: ../../std/io/trait.Read.html#tymethod.read
/// [`WouldBlock`]: ../../std/io/enum.ErrorKind.html#variant.WouldBlock
/// [`TimedOut`]: ../../std/io/enum.ErrorKind.html#variant.TimedOut
///
/// Sets the write timeout to the timeout specified.
///
- /// If the value specified is [`None`], then [`write()`] calls will block
+ /// If the value specified is [`None`], then [`write`] calls will block
/// indefinitely. It is an error to pass the zero [`Duration`] to this
/// method.
///
/// an error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`write()`]: ../../std/io/trait.Write.html#tymethod.write
+ /// [`write`]: ../../std/io/trait.Write.html#tymethod.write
/// [`Duration`]: ../../std/time/struct.Duration.html
/// [`WouldBlock`]: ../../std/io/enum.ErrorKind.html#variant.WouldBlock
/// [`TimedOut`]: ../../std/io/enum.ErrorKind.html#variant.TimedOut
/// Returns the read timeout of this socket.
///
- /// If the timeout is [`None`], then [`read()`] calls will block indefinitely.
+ /// If the timeout is [`None`], then [`read`] calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`read()`]: ../../std/io/trait.Read.html#tymethod.read
+ /// [`read`]: ../../std/io/trait.Read.html#tymethod.read
///
/// # Examples
///
/// Returns the write timeout of this socket.
///
- /// If the timeout is [`None`], then [`write()`] calls will block indefinitely.
+ /// If the timeout is [`None`], then [`write`] calls will block indefinitely.
///
/// # Note
///
/// Some platforms do not provide access to the current timeout.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`write()`]: ../../std/io/trait.Write.html#tymethod.write
+ /// [`write`]: ../../std/io/trait.Write.html#tymethod.write
///
/// # Examples
///
/// Gets the value of the `IP_TTL` option for this socket.
///
- /// For more information about this option, see [`set_ttl()`][link].
+ /// For more information about this option, see [`set_ttl`][link].
///
/// [link]: #method.set_ttl
///
/// Sets the read timeout to the timeout specified.
///
- /// If the value specified is [`None`], then [`read()`] calls will block
+ /// If the value specified is [`None`], then [`read`] calls will block
/// indefinitely. It is an error to pass the zero [`Duration`] to this
/// method.
///
/// error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`read()`]: ../../std/io/trait.Read.html#tymethod.read
+ /// [`read`]: ../../std/io/trait.Read.html#tymethod.read
/// [`Duration`]: ../../std/time/struct.Duration.html
/// [`WouldBlock`]: ../../std/io/enum.ErrorKind.html#variant.WouldBlock
/// [`TimedOut`]: ../../std/io/enum.ErrorKind.html#variant.TimedOut
/// Sets the write timeout to the timeout specified.
///
- /// If the value specified is [`None`], then [`write()`] calls will block
+ /// If the value specified is [`None`], then [`write`] calls will block
/// indefinitely. It is an error to pass the zero [`Duration`] to this
/// method.
///
/// an error of the kind [`WouldBlock`], but Windows may return [`TimedOut`].
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`write()`]: ../../std/io/trait.Write.html#tymethod.write
+ /// [`write`]: ../../std/io/trait.Write.html#tymethod.write
/// [`Duration`]: ../../std/time/struct.Duration.html
/// [`WouldBlock`]: ../../std/io/enum.ErrorKind.html#variant.WouldBlock
/// [`TimedOut`]: ../../std/io/enum.ErrorKind.html#variant.TimedOut
/// Returns the read timeout of this socket.
///
- /// If the timeout is [`None`], then [`read()`] calls will block indefinitely.
+ /// If the timeout is [`None`], then [`read`] calls will block indefinitely.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`read()`]: ../../std/io/trait.Read.html#tymethod.read
+ /// [`read`]: ../../std/io/trait.Read.html#tymethod.read
///
/// # Examples
///
/// Returns the write timeout of this socket.
///
- /// If the timeout is [`None`], then [`write()`] calls will block indefinitely.
+ /// If the timeout is [`None`], then [`write`] calls will block indefinitely.
///
/// [`None`]: ../../std/option/enum.Option.html#variant.None
- /// [`write()`]: ../../std/io/trait.Write.html#tymethod.write
+ /// [`write`]: ../../std/io/trait.Write.html#tymethod.write
///
/// # Examples
///
/// Sends data on the socket to the remote address to which it is connected.
///
- /// The [`connect()`] method will connect this socket to a remote address. This
+ /// The [`connect`] method will connect this socket to a remote address. This
/// method will fail if the socket is not connected.
///
- /// [`connect()`]: #method.connect
+ /// [`connect`]: #method.connect
///
/// # Examples
///
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! MacOS-specific definitions
+//! macOS-specific definitions
#![stable(feature = "raw_ext", since = "1.1.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! MacOS-specific raw type definitions
+//! macOS-specific raw type definitions
#![stable(feature = "raw_ext", since = "1.1.0")]
#![rustc_deprecated(since = "1.8.0",
/// A struct providing information about a panic.
///
-/// `PanicInfo` structure is passed to a panic hook set by the [`set_hook()`]
+/// `PanicInfo` structure is passed to a panic hook set by the [`set_hook`]
/// function.
///
-/// [`set_hook()`]: ../../std/panic/fn.set_hook.html
+/// [`set_hook`]: ../../std/panic/fn.set_hook.html
///
/// # Examples
///
/// A struct containing information about the location of a panic.
///
-/// This structure is created by the [`location()`] method of [`PanicInfo`].
+/// This structure is created by the [`location`] method of [`PanicInfo`].
///
-/// [`location()`]: ../../std/panic/struct.PanicInfo.html#method.location
+/// [`location`]: ../../std/panic/struct.PanicInfo.html#method.location
/// [`PanicInfo`]: ../../std/panic/struct.PanicInfo.html
///
/// # Examples
self.inner.push(path);
}
- /// Truncate `self` to [`self.parent()`].
+ /// Truncate `self` to [`self.parent`].
///
- /// Returns false and does nothing if [`self.file_name()`] is `None`.
+ /// Returns false and does nothing if [`self.file_name`] is `None`.
/// Otherwise, returns `true`.
///
- /// [`self.parent()`]: struct.PathBuf.html#method.parent
- /// [`self.file_name()`]: struct.PathBuf.html#method.file_name
+ /// [`self.parent`]: struct.PathBuf.html#method.parent
+ /// [`self.file_name`]: struct.PathBuf.html#method.file_name
///
/// # Examples
///
}
}
- /// Updates [`self.file_name()`] to `file_name`.
+ /// Updates [`self.file_name`] to `file_name`.
///
- /// If [`self.file_name()`] was [`None`], this is equivalent to pushing
+ /// If [`self.file_name`] was [`None`], this is equivalent to pushing
/// `file_name`.
///
- /// [`self.file_name()`]: struct.PathBuf.html#method.file_name
+ /// [`self.file_name`]: struct.PathBuf.html#method.file_name
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
self.push(file_name);
}
- /// Updates [`self.extension()`] to `extension`.
+ /// Updates [`self.extension`] to `extension`.
///
- /// If [`self.file_name()`] is `None`, does nothing and returns `false`.
+ /// If [`self.file_name`] is `None`, does nothing and returns `false`.
///
- /// Otherwise, returns `true`; if [`self.extension()`] is [`None`], the
+ /// Otherwise, returns `true`; if [`self.extension`] is [`None`], the
/// extension is added; otherwise it is replaced.
///
- /// [`self.file_name()`]: struct.PathBuf.html#method.file_name
- /// [`self.extension()`]: struct.PathBuf.html#method.extension
+ /// [`self.file_name`]: struct.PathBuf.html#method.file_name
+ /// [`self.extension`]: struct.PathBuf.html#method.extension
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
}
/// Converts this `PathBuf` into a boxed `Path`.
- #[unstable(feature = "into_boxed_path", issue = "0")]
+ #[unstable(feature = "into_boxed_path", issue = "40380")]
pub fn into_boxed_path(self) -> Box<Path> {
unsafe { mem::transmute(self.inner.into_boxed_os_str()) }
}
}
}
-#[stable(feature = "box_default_extra", since = "1.17.0")]
-impl Default for Box<Path> {
- fn default() -> Box<Path> {
- let boxed: Box<OsStr> = Default::default();
- unsafe { mem::transmute(boxed) }
+#[stable(feature = "path_buf_from_box", since = "1.17.0")]
+impl<'a> From<Box<Path>> for PathBuf {
+ fn from(boxed: Box<Path>) -> PathBuf {
+ boxed.into_path_buf()
+ }
+}
+
+#[stable(feature = "box_from_path_buf", since = "1.17.0")]
+impl Into<Box<Path>> for PathBuf {
+ fn into(self) -> Box<Path> {
+ self.into_boxed_path()
}
}
/// assert_eq!(path.to_string_lossy(), "foo.txt");
/// ```
///
- /// Had `os_str` contained invalid unicode, the `to_string_lossy` call might
+ /// Had `path` contained invalid unicode, the `to_string_lossy` call might
/// have returned `"fo�.txt"`.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_string_lossy(&self) -> Cow<str> {
iter_after(self.components().rev(), child.components().rev()).is_some()
}
- /// Extracts the stem (non-extension) portion of [`self.file_name()`].
+ /// Extracts the stem (non-extension) portion of [`self.file_name`].
///
- /// [`self.file_name()`]: struct.Path.html#method.file_name
+ /// [`self.file_name`]: struct.Path.html#method.file_name
///
/// The stem is:
///
self.file_name().map(split_file_at_dot).and_then(|(before, after)| before.or(after))
}
- /// Extracts the extension of [`self.file_name()`], if possible.
+ /// Extracts the extension of [`self.file_name`], if possible.
///
/// The extension is:
///
/// * [`None`], if the file name begins with `.` and has no other `.`s within;
/// * Otherwise, the portion of the file name after the final `.`
///
- /// [`self.file_name()`]: struct.Path.html#method.file_name
+ /// [`self.file_name`]: struct.Path.html#method.file_name
/// [`None`]: ../../std/option/enum.Option.html#variant.None
///
/// # Examples
pub fn is_dir(&self) -> bool {
fs::metadata(self).map(|m| m.is_dir()).unwrap_or(false)
}
+
+ /// Converts a `Box<Path>` into a `PathBuf` without copying or allocating.
+ #[unstable(feature = "into_boxed_path", issue = "40380")]
+ pub fn into_path_buf(self: Box<Path>) -> PathBuf {
+ let inner: Box<OsStr> = unsafe { mem::transmute(self) };
+ PathBuf { inner: OsString::from(inner) }
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
fn into_boxed() {
let orig: &str = "some/sort/of/path";
let path = Path::new(orig);
- let path_buf = path.to_owned();
- let box1: Box<Path> = Box::from(path);
- let box2 = path_buf.into_boxed_path();
- assert_eq!(path, &*box1);
- assert_eq!(box1, box2);
- assert_eq!(&*box2, path);
- }
-
- #[test]
- fn boxed_default() {
- let boxed = <Box<Path>>::default();
- assert!(boxed.as_os_str().is_empty());
+ let boxed: Box<Path> = Box::from(path);
+ let path_buf = path.to_owned().into_boxed_path().into_path_buf();
+ assert_eq!(path, &*boxed);
+ assert_eq!(&*boxed, &*path_buf);
+ assert_eq!(&*path_buf, path);
}
}
//! value.
//! * [`std::boxed`]::[`Box`], a way to allocate values on the heap.
//! * [`std::borrow`]::[`ToOwned`], The conversion trait that defines
-//! [`to_owned()`], the generic method for creating an owned type from a
+//! [`to_owned`], the generic method for creating an owned type from a
//! borrowed type.
-//! * [`std::clone`]::[`Clone`], the ubiquitous trait that defines [`clone()`],
+//! * [`std::clone`]::[`Clone`], the ubiquitous trait that defines [`clone`],
//! the method for producing a copy of a value.
//! * [`std::cmp`]::{[`PartialEq`], [`PartialOrd`], [`Eq`], [`Ord`] }. The
//! comparison traits, which implement the comparison operators and are often
//! [`ToOwned`]: ../borrow/trait.ToOwned.html
//! [`ToString`]: ../string/trait.ToString.html
//! [`Vec`]: ../vec/struct.Vec.html
-//! [`clone()`]: ../clone/trait.Clone.html#tymethod.clone
+//! [`clone`]: ../clone/trait.Clone.html#tymethod.clone
//! [`drop`]: ../mem/fn.drop.html
//! [`std::borrow`]: ../borrow/index.html
//! [`std::boxed`]: ../boxed/index.html
//! [`std::slice`]: ../slice/index.html
//! [`std::string`]: ../string/index.html
//! [`std::vec`]: ../vec/index.html
-//! [`to_owned()`]: ../borrow/trait.ToOwned.html#tymethod.to_owned
+//! [`to_owned`]: ../borrow/trait.ToOwned.html#tymethod.to_owned
//! [book-closures]: ../../book/closures.html
//! [book-dtor]: ../../book/drop.html
//! [book-enums]: ../../book/enums.html
/// # Representation
///
/// A `&str` is made up of two components: a pointer to some bytes, and a
-/// length. You can look at these with the [`.as_ptr()`] and [`len()`] methods:
+/// length. You can look at these with the [`.as_ptr`] and [`len`] methods:
///
/// ```
/// use std::slice;
/// assert_eq!(s, Ok(story));
/// ```
///
-/// [`.as_ptr()`]: #method.as_ptr
-/// [`len()`]: #method.len
+/// [`.as_ptr`]: #method.as_ptr
+/// [`len`]: #method.len
///
/// Note: This example shows the internals of `&str`. `unsafe` should not be
/// used to get a string slice under normal circumstances. Use `.as_slice()`
/// will be run. If a clean shutdown is needed it is recommended to only call
/// this function at a known point where there are no more destructors left
/// to run.
-#[unstable(feature = "process_abort", issue = "37838")]
+#[stable(feature = "process_abort", since = "1.17.0")]
pub fn abort() -> ! {
unsafe { ::sys::abort_internal() };
}
//! If an application does not have `getrandom` and likely to be run soon after first booting,
//! or on a system with very few entropy sources, one should consider using `/dev/random` via
//! `ReaderRng`.
-//! - On some systems (e.g. FreeBSD, OpenBSD and Mac OS X) there is no difference
+//! - On some systems (e.g. FreeBSD, OpenBSD and macOS) there is no difference
//! between the two sources. (Also note that, on some systems e.g. FreeBSD, both `/dev/random`
//! and `/dev/urandom` may block once if the CSPRNG has not seeded yet.)
/// A random number generator that retrieves randomness straight from
/// the operating system. Platform sources:
///
-/// - Unix-like systems (Linux, Android, Mac OSX): read directly from
+/// - Unix-like systems (Linux, Android, macOS): read directly from
/// `/dev/urandom`, or from `getrandom(2)` system call if available.
/// - Windows: calls `CryptGenRandom`, using the default cryptographic
/// service provider with the `PROV_RSA_FULL` type.
use panic;
use sys;
use sys_common;
- use sys_common::thread_info::{self, NewThread};
+ use sys_common::thread_info;
use thread::Thread;
sys::init();
// created. Note that this isn't necessary in general for new threads,
// but we just do this to name the main thread and to give it correct
// info about the stack bounds.
- let thread: Thread = NewThread::new(Some("main".to_owned()));
+ let thread = Thread::new(Some("main".to_owned()));
thread_info::set(main_guard, thread);
// Store our args if necessary in a squirreled away location
/// A result returned from wait.
///
-/// Currently this opaque structure only has one method, [`.is_leader()`]. Only
+/// Currently this opaque structure only has one method, [`.is_leader`]. Only
/// one thread will receive a result that will return `true` from this function.
///
-/// [`.is_leader()`]: #method.is_leader
+/// [`.is_leader`]: #method.is_leader
///
/// # Examples
///
///
/// This function will atomically unlock the mutex specified (represented by
/// `guard`) and block the current thread. This means that any calls
- /// to [`notify_one()`] or [`notify_all()`] which happen logically after the
+ /// to [`notify_one`] or [`notify_all`] which happen logically after the
/// mutex is unlocked are candidates to wake this thread up. When this
/// function call returns, the lock specified will have been re-acquired.
///
///
/// # Panics
///
- /// This function will [`panic!()`] if it is used with more than one mutex
+ /// This function will [`panic!`] if it is used with more than one mutex
/// over time. Each condition variable is dynamically bound to exactly one
/// mutex to ensure defined behavior across platforms. If this functionality
/// is not desired, then unsafe primitives in `sys` are provided.
///
- /// [`notify_one()`]: #method.notify_one
- /// [`notify_all()`]: #method.notify_all
+ /// [`notify_one`]: #method.notify_one
+ /// [`notify_all`]: #method.notify_all
/// [poisoning]: ../sync/struct.Mutex.html#poisoning
/// [`Mutex`]: ../sync/struct.Mutex.html
- /// [`panic!()`]: ../../std/macro.panic.html
+ /// [`panic!`]: ../../std/macro.panic.html
///
/// # Examples
///
/// be woken up from its call to [`wait`] or [`wait_timeout`]. Calls to
/// `notify_one` are not buffered in any way.
///
- /// To wake up all threads, see [`notify_all()`].
+ /// To wake up all threads, see [`notify_all`].
///
/// [`wait`]: #method.wait
/// [`wait_timeout`]: #method.wait_timeout
- /// [`notify_all()`]: #method.notify_all
+ /// [`notify_all`]: #method.notify_all
///
/// # Examples
///
/// variable are awoken. Calls to `notify_all()` are not buffered in any
/// way.
///
- /// To wake up only one thread, see [`notify_one()`].
+ /// To wake up only one thread, see [`notify_one`].
///
- /// [`notify_one()`]: #method.notify_one
+ /// [`notify_one`]: #method.notify_one
///
/// # Examples
///
/// All data sent on the sender will become available on the receiver, and no
/// send will block the calling thread (this channel has an "infinite buffer").
///
-/// If the [`Receiver`] is disconnected while trying to [`send()`] with the
-/// [`Sender`], the [`send()`] method will return an error.
+/// If the [`Receiver`] is disconnected while trying to [`send`] with the
+/// [`Sender`], the [`send`] method will return an error.
///
-/// [`send()`]: ../../../std/sync/mpsc/struct.Sender.html#method.send
+/// [`send`]: ../../../std/sync/mpsc/struct.Sender.html#method.send
/// [`Sender`]: ../../../std/sync/mpsc/struct.Sender.html
/// [`Receiver`]: ../../../std/sync/mpsc/struct.Receiver.html
///
/// `bound` specifies the buffer size. When the internal buffer becomes full,
/// future sends will *block* waiting for the buffer to open up. Note that a
/// buffer size of 0 is valid, in which case this becomes "rendezvous channel"
-/// where each [`send()`] will not return until a recv is paired with it.
+/// where each [`send`] will not return until a recv is paired with it.
///
/// Like asynchronous channels, if the [`Receiver`] is disconnected while
-/// trying to [`send()`] with the [`SyncSender`], the [`send()`] method will
+/// trying to [`send`] with the [`SyncSender`], the [`send`] method will
/// return an error.
///
-/// [`send()`]: ../../../std/sync/mpsc/struct.SyncSender.html#method.send
+/// [`send`]: ../../../std/sync/mpsc/struct.SyncSender.html#method.send
/// [`SyncSender`]: ../../../std/sync/mpsc/struct.SyncSender.html
/// [`Receiver`]: ../../../std/sync/mpsc/struct.Receiver.html
///
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
/// dropped (falls out of scope), the lock will be unlocked.
///
-/// The data protected by the mutex can be access through this guard via its
+/// The data protected by the mutex can be accessed through this guard via its
/// [`Deref`] and [`DerefMut`] implementations.
///
-/// This structure is created by the [`lock()`] and [`try_lock()`] methods on
+/// This structure is created by the [`lock`] and [`try_lock`] methods on
/// [`Mutex`].
///
/// [`Deref`]: ../../std/ops/trait.Deref.html
/// [`DerefMut`]: ../../std/ops/trait.DerefMut.html
-/// [`lock()`]: struct.Mutex.html#method.lock
-/// [`try_lock()`]: struct.Mutex.html#method.try_lock
+/// [`lock`]: struct.Mutex.html#method.lock
+/// [`try_lock`]: struct.Mutex.html#method.try_lock
/// [`Mutex`]: struct.Mutex.html
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
/// RAII structure used to release the shared read access of a lock when
/// dropped.
///
-/// This structure is created by the [`read()`] and [`try_read()`] methods on
+/// This structure is created by the [`read`] and [`try_read`] methods on
/// [`RwLock`].
///
-/// [`read()`]: struct.RwLock.html#method.read
-/// [`try_read()`]: struct.RwLock.html#method.try_read
+/// [`read`]: struct.RwLock.html#method.read
+/// [`try_read`]: struct.RwLock.html#method.try_read
/// [`RwLock`]: struct.RwLock.html
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
///
-/// This structure is created by the [`write()`] and [`try_write()`] methods
+/// This structure is created by the [`write`] and [`try_write`] methods
/// on [`RwLock`].
///
-/// [`write()`]: struct.RwLock.html#method.write
-/// [`try_write()`]: struct.RwLock.html#method.try_write
+/// [`write`]: struct.RwLock.html#method.write
+/// [`try_write`]: struct.RwLock.html#method.try_write
/// [`RwLock`]: struct.RwLock.html
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
// `None`.
(*ptr).dtor_running.set(true);
- // The OSX implementation of TLS apparently had an odd aspect to it
+ // The macOS implementation of TLS apparently had an odd aspect to it
// where the pointer we have may be overwritten while this destructor
// is running. Specifically if a TLS destructor re-accesses TLS it may
// trigger a re-initialization of all TLS variables, paving over at
// least some destroyed ones with initial values.
//
- // This means that if we drop a TLS value in place on OSX that we could
+ // This means that if we drop a TLS value in place on macOS that we could
// revert the value to its original state halfway through the
// destructor, which would be bad!
//
- // Hence, we use `ptr::read` on OSX (to move to a "safe" location)
+ // Hence, we use `ptr::read` on macOS (to move to a "safe" location)
// instead of drop_in_place.
if cfg!(target_os = "macos") {
ptr::read((*ptr).inner.get());
pub fn into_box(self) -> Box<Slice> {
unsafe { mem::transmute(self.inner.into_boxed_slice()) }
}
+
+ #[inline]
+ pub fn from_box(boxed: Box<Slice>) -> Buf {
+ let inner: Box<[u8]> = unsafe { mem::transmute(boxed) };
+ Buf { inner: inner.into_vec() }
+ }
}
impl Slice {
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
- // deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
+ // deadlock on both macOS and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
/// Some methods of getting a backtrace:
///
/// * The backtrace() functions on unix. It turns out this doesn't work very
-/// well for green threads on OSX, and the address to symbol portion of it
+/// well for green threads on macOS, and the address to symbol portion of it
/// suffers problems that are described below.
///
/// * Using libunwind. This is more difficult than it sounds because libunwind
///
/// * Use dladdr(). The original backtrace()-based idea actually uses dladdr()
/// behind the scenes to translate, and this is why backtrace() was not used.
-/// Conveniently, this method works fantastically on OSX. It appears dladdr()
+/// Conveniently, this method works fantastically on macOS. It appears dladdr()
/// uses magic to consult the local symbol table, or we're putting everything
-/// in the dynamic symbol table anyway. Regardless, for OSX, this is the
+/// in the dynamic symbol table anyway. Regardless, for macOS, this is the
/// method used for translation. It's provided by the system and easy to do.o
///
/// Sadly, all other systems have a dladdr() implementation that does not
/// * Use `libbacktrace`. It turns out that this is a small library bundled in
/// the gcc repository which provides backtrace and symbol translation
/// functionality. All we really need from it is the backtrace functionality,
-/// and we only really need this on everything that's not OSX, so this is the
+/// and we only really need this on everything that's not macOS, so this is the
/// chosen route for now.
///
/// In summary, the current situation uses libgcc_s to get a trace of stack
let len = self.len as usize - sun_path_offset();
let path = unsafe { mem::transmute::<&[libc::c_char], &[u8]>(&self.addr.sun_path) };
- // OSX seems to return a len of 16 and a zeroed sun_path for unnamed addresses
+ // macOS seems to return a len of 16 and a zeroed sun_path for unnamed addresses
if len == 0 || (cfg!(not(target_os = "linux")) && self.addr.sun_path[0] == 0) {
AddressKind::Unnamed
} else if self.addr.sun_path[0] == 0 {
/// Sets the read timeout for the socket.
///
- /// If the provided value is [`None`], then [`read()`] calls will block
+ /// If the provided value is [`None`], then [`read`] calls will block
/// indefinitely. It is an error to pass the zero [`Duration`] to this
/// method.
///
/// [`None`]: ../../../../std/option/enum.Option.html#variant.None
- /// [`read()`]: ../../../../std/io/trait.Read.html#tymethod.read
+ /// [`read`]: ../../../../std/io/trait.Read.html#tymethod.read
/// [`Duration`]: ../../../../std/time/struct.Duration.html
///
/// # Examples
/// Sets the write timeout for the socket.
///
- /// If the provided value is [`None`], then [`write()`] calls will block
+ /// If the provided value is [`None`], then [`write`] calls will block
/// indefinitely. It is an error to pass the zero [`Duration`] to this
/// method.
///
/// [`None`]: ../../../../std/option/enum.Option.html#variant.None
- /// [`read()`]: ../../../../std/io/trait.Write.html#tymethod.write
+ /// [`read`]: ../../../../std/io/trait.Write.html#tymethod.write
/// [`Duration`]: ../../../../std/time/struct.Duration.html
///
/// # Examples
/// Connects the socket to the specified address.
///
- /// The [`send()`] method may be used to send data to the specified address.
- /// [`recv()`] and [`recv_from()`] will only receive data from that address.
+ /// The [`send`] method may be used to send data to the specified address.
+ /// [`recv`] and [`recv_from`] will only receive data from that address.
///
- /// [`send()`]: #method.send
- /// [`recv()`]: #method.recv
- /// [`recv_from()`]: #method.recv_from
+ /// [`send`]: #method.send
+ /// [`recv`]: #method.recv
+ /// [`recv_from`]: #method.recv_from
///
/// # Examples
///
/// Returns the address of this socket's peer.
///
- /// The [`connect()`] method will connect the socket to a peer.
+ /// The [`connect`] method will connect the socket to a peer.
///
- /// [`connect()`]: #method.connect
+ /// [`connect`]: #method.connect
///
/// # Examples
///
/// Sets the read timeout for the socket.
///
- /// If the provided value is [`None`], then [`recv()`] and [`recv_from()`] calls will
+ /// If the provided value is [`None`], then [`recv`] and [`recv_from`] calls will
/// block indefinitely. It is an error to pass the zero [`Duration`] to this
/// method.
///
/// [`None`]: ../../../../std/option/enum.Option.html#variant.None
- /// [`recv()`]: #method.recv
- /// [`recv_from()`]: #method.recv_from
+ /// [`recv`]: #method.recv
+ /// [`recv_from`]: #method.recv_from
/// [`Duration`]: ../../../../std/time/struct.Duration.html
///
/// # Examples
/// Sets the write timeout for the socket.
///
- /// If the provided value is [`None`], then [`send()`] and [`send_to()`] calls will
+ /// If the provided value is [`None`], then [`send`] and [`send_to`] calls will
/// block indefinitely. It is an error to pass the zero [`Duration`] to this
/// method.
///
/// [`None`]: ../../../../std/option/enum.Option.html#variant.None
- /// [`send()`]: #method.send
- /// [`send_to()`]: #method.send_to
+ /// [`send`]: #method.send
+ /// [`send_to`]: #method.send_to
/// [`Duration`]: ../../../../std/time/struct.Duration.html
///
/// # Examples
register_dtor_fallback(t, dtor);
}
-// OSX's analog of the above linux function is this _tlv_atexit function.
+// macOS's analog of the above linux function is this _tlv_atexit function.
// The disassembly of thread_local globals in C++ (at least produced by
// clang) will have this show up in the output.
#[cfg(target_os = "macos")]
// `None`.
(*ptr).dtor_running.set(true);
- // The OSX implementation of TLS apparently had an odd aspect to it
+ // The macOS implementation of TLS apparently had an odd aspect to it
// where the pointer we have may be overwritten while this destructor
// is running. Specifically if a TLS destructor re-accesses TLS it may
// trigger a re-initialization of all TLS variables, paving over at
// least some destroyed ones with initial values.
//
- // This means that if we drop a TLS value in place on OSX that we could
+ // This means that if we drop a TLS value in place on macOS that we could
// revert the value to its original state halfway through the
// destructor, which would be bad!
//
- // Hence, we use `ptr::read` on OSX (to move to a "safe" location)
+ // Hence, we use `ptr::read` on macOS (to move to a "safe" location)
// instead of drop_in_place.
if cfg!(target_os = "macos") {
ptr::read((*ptr).inner.get());
// with the man page quoting that if the count of bytes to read is
// greater than `SSIZE_MAX` the result is "unspecified".
//
- // On OSX, however, apparently the 64-bit libc is either buggy or
+ // On macOS, however, apparently the 64-bit libc is either buggy or
// intentionally showing odd behavior by rejecting any read with a size
// larger than or equal to INT_MAX. To handle both of these the read
// size is capped on both platforms.
// Linux kernel then the flag is just ignored by the OS, so we continue
// to explicitly ask for a CLOEXEC fd here.
//
- // The CLOEXEC flag, however, is supported on versions of OSX/BSD/etc
+ // The CLOEXEC flag, however, is supported on versions of macOS/BSD/etc
// that we support, so we only do this on Linux currently.
if cfg!(target_os = "linux") {
fd.set_cloexec()?;
#[cfg(target_os = "macos")]
fn get_path(fd: c_int) -> Option<PathBuf> {
// FIXME: The use of PATH_MAX is generally not encouraged, but it
- // is inevitable in this case because OS X defines `fcntl` with
+ // is inevitable in this case because macOS defines `fcntl` with
// `F_GETPATH` in terms of `MAXPATHLEN`, and there are no
// alternatives. If a better method is invented, it should be used
// instead.
pub fn into_box(self) -> Box<Slice> {
unsafe { mem::transmute(self.inner.into_boxed_slice()) }
}
+
+ #[inline]
+ pub fn from_box(boxed: Box<Slice>) -> Buf {
+ let inner: Box<[u8]> = unsafe { mem::transmute(boxed) };
+ Buf { inner: inner.into_vec() }
+ }
}
impl Slice {
}
// See #14232 for more information, but it appears that signal delivery to a
- // newly spawned process may just be raced in the OSX, so to prevent this
- // test from being flaky we ignore it on OSX.
+ // newly spawned process may just be raced in the macOS, so to prevent this
+ // test from being flaky we ignore it on macOS.
#[test]
#[cfg_attr(target_os = "macos", ignore)]
#[cfg_attr(target_os = "nacl", ignore)] // no signals on NaCl.
// mutex, and then after the fork they unlock it.
//
// Despite this information, libnative's spawn has been witnessed to
- // deadlock on both OSX and FreeBSD. I'm not entirely sure why, but
+ // deadlock on both macOS and FreeBSD. I'm not entirely sure why, but
// all collected backtraces point at malloc/free traffic in the
// child spawned process.
//
let stack = libc::stack_t {
ss_sp: ptr::null_mut(),
ss_flags: SS_DISABLE,
- // Workaround for bug in MacOS implementation of sigaltstack
+ // Workaround for bug in macOS implementation of sigaltstack
// UNIX2003 which returns ENOMEM when disabling a stack while
// passing ss_size smaller than MINSIGSTKSZ. According to POSIX
// both ss_sp and ss_size should be ignored in this case.
pub fn into_box(self) -> Box<Slice> {
unsafe { mem::transmute(self.inner.into_box()) }
}
+
+ #[inline]
+ pub fn from_box(boxed: Box<Slice>) -> Buf {
+ let inner: Box<Wtf8> = unsafe { mem::transmute(boxed) };
+ Buf { inner: Wtf8Buf::from_box(inner) }
+ }
}
impl Slice {
if c.borrow().is_none() {
*c.borrow_mut() = Some(ThreadInfo {
stack_guard: None,
- thread: NewThread::new(None),
+ thread: Thread::new(None),
})
}
Some(f(c.borrow_mut().as_mut().unwrap()))
thread: thread,
}));
}
-
-// a hack to get around privacy restrictions; implemented by `std::thread`
-pub trait NewThread {
- fn new(name: Option<String>) -> Self;
-}
pub fn into_box(self) -> Box<Wtf8> {
unsafe { mem::transmute(self.bytes.into_boxed_slice()) }
}
+
+ /// Converts a `Box<Wtf8>` into a `Wtf8Buf`.
+ pub fn from_box(boxed: Box<Wtf8>) -> Wtf8Buf {
+ let bytes: Box<[u8]> = unsafe { mem::transmute(boxed) };
+ Wtf8Buf { bytes: bytes.into_vec() }
+ }
}
/// Create a new WTF-8 string from an iterator of code points.
/// destroyed, but not all platforms have this guard. Those platforms that do
/// not guard typically have a synthetic limit after which point no more
/// destructors are run.
-/// 3. On OSX, initializing TLS during destruction of other TLS slots can
+/// 3. On macOS, initializing TLS during destruction of other TLS slots can
/// sometimes cancel *all* destructors for the current thread, whether or not
/// the slots have already had their destructors run or not.
#[stable(feature = "rust1", since = "1.0.0")]
}
// Note that this test will deadlock if TLS destructors aren't run (this
- // requires the destructor to be run to pass the test). OSX has a known bug
+ // requires the destructor to be run to pass the test). macOS has a known bug
// where dtors-in-dtors may cancel other destructors, so we just ignore this
- // test on OSX.
+ // test on macOS.
#[test]
#[cfg_attr(target_os = "macos", ignore)]
fn dtors_in_dtors_in_dtors() {
//! two ways:
//!
//! * By spawning a new thread, e.g. using the [`thread::spawn`][`spawn`]
-//! function, and calling [`thread()`] on the [`JoinHandle`].
-//! * By requesting the current thread, using the [`thread::current()`] function.
+//! function, and calling [`thread`] on the [`JoinHandle`].
+//! * By requesting the current thread, using the [`thread::current`] function.
//!
-//! The [`thread::current()`] function is available even for threads not spawned
+//! The [`thread::current`] function is available even for threads not spawned
//! by the APIs of this module.
//!
//! ## Blocking support: park and unpark
//!
//! Every thread is equipped with some basic low-level blocking support, via the
-//! [`thread::park()`][`park()`] function and [`thread::Thread::unpark()`][`unpark()`]
-//! method. [`park()`] blocks the current thread, which can then be resumed from
-//! another thread by calling the [`unpark()`] method on the blocked thread's handle.
+//! [`thread::park`][`park`] function and [`thread::Thread::unpark()`][`unpark`]
+//! method. [`park`] blocks the current thread, which can then be resumed from
+//! another thread by calling the [`unpark`] method on the blocked thread's handle.
//!
//! Conceptually, each [`Thread`] handle has an associated token, which is
//! initially not present:
//!
-//! * The [`thread::park()`][`park()`] function blocks the current thread unless or until
+//! * The [`thread::park`][`park`] function blocks the current thread unless or until
//! the token is available for its thread handle, at which point it atomically
//! consumes the token. It may also return *spuriously*, without consuming the
-//! token. [`thread::park_timeout()`] does the same, but allows specifying a
+//! token. [`thread::park_timeout`] does the same, but allows specifying a
//! maximum time to block the thread for.
//!
-//! * The [`unpark()`] method on a [`Thread`] atomically makes the token available
+//! * The [`unpark`] method on a [`Thread`] atomically makes the token available
//! if it wasn't already.
//!
//! In other words, each [`Thread`] acts a bit like a semaphore with initial count
//! The API is typically used by acquiring a handle to the current thread,
//! placing that handle in a shared data structure so that other threads can
//! find it, and then `park`ing. When some desired condition is met, another
-//! thread calls [`unpark()`] on the handle.
+//! thread calls [`unpark`] on the handle.
//!
//! The motivation for this design is twofold:
//!
//! [`Arc`]: ../../std/sync/struct.Arc.html
//! [`spawn`]: ../../std/thread/fn.spawn.html
//! [`JoinHandle`]: ../../std/thread/struct.JoinHandle.html
-//! [`thread()`]: ../../std/thread/struct.JoinHandle.html#method.thread
+//! [`thread`]: ../../std/thread/struct.JoinHandle.html#method.thread
//! [`join`]: ../../std/thread/struct.JoinHandle.html#method.join
//! [`Result`]: ../../std/result/enum.Result.html
//! [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
//! [`Err`]: ../../std/result/enum.Result.html#variant.Err
//! [`panic!`]: ../../std/macro.panic.html
//! [`Builder`]: ../../std/thread/struct.Builder.html
-//! [`thread::current()`]: ../../std/thread/fn.spawn.html
+//! [`thread::current`]: ../../std/thread/fn.spawn.html
//! [`Thread`]: ../../std/thread/struct.Thread.html
-//! [`park()`]: ../../std/thread/fn.park.html
-//! [`unpark()`]: ../../std/thread/struct.Thread.html#method.unpark
-//! [`thread::park_timeout()`]: ../../std/thread/fn.park_timeout.html
+//! [`park`]: ../../std/thread/fn.park.html
+//! [`unpark`]: ../../std/thread/struct.Thread.html#method.unpark
+//! [`thread::park_timeout`]: ../../std/thread/fn.park_timeout.html
//! [`Cell`]: ../cell/struct.Cell.html
//! [`RefCell`]: ../cell/struct.RefCell.html
//! [`thread_local!`]: ../macro.thread_local.html
/// Blocks unless or until the current thread's token is made available.
///
/// Every thread is equipped with some basic low-level blocking support, via
-/// the `park()` function and the [`unpark()`][unpark] method. These can be
+/// the `park()` function and the [`unpark`][unpark] method. These can be
/// used as a more CPU-efficient implementation of a spinlock.
///
/// [unpark]: struct.Thread.html#method.unpark
impl Thread {
// Used only internally to construct a thread object without spawning
- fn new(name: Option<String>) -> Thread {
+ pub(crate) fn new(name: Option<String>) -> Thread {
let cname = name.map(|n| {
CString::new(n).expect("thread name may not contain interior null bytes")
});
}
}
-// a hack to get around privacy restrictions
-impl thread_info::NewThread for Thread {
- fn new(name: Option<String>) -> Thread { Thread::new(name) }
-}
-
////////////////////////////////////////////////////////////////////////////////
// JoinHandle
////////////////////////////////////////////////////////////////////////////////
/// Returns an iterator that yields the lowercase equivalent of a `char`.
///
-/// This `struct` is created by the [`to_lowercase()`] method on [`char`]. See
+/// This `struct` is created by the [`to_lowercase`] method on [`char`]. See
/// its documentation for more.
///
-/// [`to_lowercase()`]: ../../std/primitive.char.html#method.to_lowercase
+/// [`to_lowercase`]: ../../std/primitive.char.html#method.to_lowercase
/// [`char`]: ../../std/primitive.char.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ToLowercase(CaseMappingIter);
/// Returns an iterator that yields the uppercase equivalent of a `char`.
///
-/// This `struct` is created by the [`to_uppercase()`] method on [`char`]. See
+/// This `struct` is created by the [`to_uppercase`] method on [`char`]. See
/// its documentation for more.
///
-/// [`to_uppercase()`]: ../../std/primitive.char.html#method.to_uppercase
+/// [`to_uppercase`]: ../../std/primitive.char.html#method.to_uppercase
/// [`char`]: ../../std/primitive.char.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct ToUppercase(CaseMappingIter);
/// * `a-z`
/// * `A-Z`
///
- /// For a more comprehensive understanding of 'digit', see [`is_numeric()`][is_numeric].
+ /// For a more comprehensive understanding of 'digit', see [`is_numeric`][is_numeric].
///
/// [is_numeric]: #method.is_numeric
///
/// Returns the number of 16-bit code units this `char` would need if
/// encoded in UTF-16.
///
- /// See the documentation for [`len_utf8()`] for more explanation of this
+ /// See the documentation for [`len_utf8`] for more explanation of this
/// concept. This function is a mirror, but for UTF-16 instead of UTF-8.
///
- /// [`len_utf8()`]: #method.len_utf8
+ /// [`len_utf8`]: #method.len_utf8
///
/// # Examples
///
/// // Sometimes the result is more than one character:
/// assert_eq!('İ'.to_lowercase().to_string(), "i\u{307}");
///
- /// // Japanese scripts do not have case, and so:
+ /// // Characters that do not have both uppercase and lowercase
+ /// // convert into themselves.
/// assert_eq!('山'.to_lowercase().to_string(), "山");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
/// // Sometimes the result is more than one character:
/// assert_eq!('ß'.to_uppercase().to_string(), "SS");
///
- /// // Japanese does not have case, and so:
+ /// // Characters that do not have both uppercase and lowercase
+ /// // convert into themselves.
/// assert_eq!('山'.to_uppercase().to_string(), "山");
/// ```
///
pub segments: Vec<PathSegment>,
}
+impl<'a> PartialEq<&'a str> for Path {
+ fn eq(&self, string: &&'a str) -> bool {
+ self.segments.len() == 1 && self.segments[0].identifier.name == *string
+ }
+}
+
impl fmt::Debug for Path {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "path({})", pprust::path_to_string(self))
pub struct Attribute {
pub id: AttrId,
pub style: AttrStyle,
- pub value: MetaItem,
+ pub path: Path,
+ pub tokens: TokenStream,
pub is_sugared_doc: bool,
pub span: Span,
}
pub use self::IntType::*;
use ast;
-use ast::{AttrId, Attribute, Name};
+use ast::{AttrId, Attribute, Name, Ident};
use ast::{MetaItem, MetaItemKind, NestedMetaItem, NestedMetaItemKind};
-use ast::{Lit, Expr, Item, Local, Stmt, StmtKind};
+use ast::{Lit, LitKind, Expr, ExprKind, Item, Local, Stmt, StmtKind};
use codemap::{Spanned, spanned, dummy_spanned, mk_sp};
use syntax_pos::{Span, BytePos, DUMMY_SP};
use errors::Handler;
use feature_gate::{Features, GatedCfg};
use parse::lexer::comments::{doc_comment_style, strip_doc_comment_decoration};
-use parse::ParseSess;
+use parse::parser::Parser;
+use parse::{self, ParseSess, PResult};
+use parse::token::{self, Token};
use ptr::P;
use symbol::Symbol;
+use tokenstream::{TokenStream, TokenTree, Delimited};
use util::ThinVec;
use std::cell::{RefCell, Cell};
+use std::iter;
thread_local! {
static USED_ATTRS: RefCell<Vec<u64>> = RefCell::new(Vec::new());
impl Attribute {
pub fn check_name(&self, name: &str) -> bool {
- let matches = self.name() == name;
+ let matches = self.path == name;
if matches {
mark_used(self);
}
matches
}
- pub fn name(&self) -> Name { self.meta().name() }
+ pub fn name(&self) -> Option<Name> {
+ match self.path.segments.len() {
+ 1 => Some(self.path.segments[0].identifier.name),
+ _ => None,
+ }
+ }
pub fn value_str(&self) -> Option<Symbol> {
- self.meta().value_str()
+ self.meta().and_then(|meta| meta.value_str())
}
- pub fn meta_item_list(&self) -> Option<&[NestedMetaItem]> {
- self.meta().meta_item_list()
+ pub fn meta_item_list(&self) -> Option<Vec<NestedMetaItem>> {
+ match self.meta() {
+ Some(MetaItem { node: MetaItemKind::List(list), .. }) => Some(list),
+ _ => None
+ }
}
- pub fn is_word(&self) -> bool { self.meta().is_word() }
+ pub fn is_word(&self) -> bool {
+ self.path.segments.len() == 1 && self.tokens.is_empty()
+ }
- pub fn span(&self) -> Span { self.meta().span }
+ pub fn span(&self) -> Span {
+ self.span
+ }
pub fn is_meta_item_list(&self) -> bool {
self.meta_item_list().is_some()
match self.node {
MetaItemKind::NameValue(ref v) => {
match v.node {
- ast::LitKind::Str(ref s, _) => Some((*s).clone()),
+ LitKind::Str(ref s, _) => Some((*s).clone()),
_ => None,
}
},
impl Attribute {
/// Extract the MetaItem from inside this Attribute.
- pub fn meta(&self) -> &MetaItem {
- &self.value
+ pub fn meta(&self) -> Option<MetaItem> {
+ let mut tokens = self.tokens.trees().peekable();
+ Some(MetaItem {
+ name: match self.path.segments.len() {
+ 1 => self.path.segments[0].identifier.name,
+ _ => return None,
+ },
+ node: if let Some(node) = MetaItemKind::from_tokens(&mut tokens) {
+ if tokens.peek().is_some() {
+ return None;
+ }
+ node
+ } else {
+ return None;
+ },
+ span: self.span,
+ })
+ }
+
+ pub fn parse<'a, T, F>(&self, sess: &'a ParseSess, mut f: F) -> PResult<'a, T>
+ where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ {
+ let mut parser = Parser::new(sess, self.tokens.clone(), None, false);
+ let result = f(&mut parser)?;
+ if parser.token != token::Eof {
+ parser.unexpected()?;
+ }
+ Ok(result)
+ }
+
+ pub fn parse_list<'a, T, F>(&self, sess: &'a ParseSess, mut f: F) -> PResult<'a, Vec<T>>
+ where F: FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ {
+ if self.tokens.is_empty() {
+ return Ok(Vec::new());
+ }
+ self.parse(sess, |parser| {
+ parser.expect(&token::OpenDelim(token::Paren))?;
+ let mut list = Vec::new();
+ while !parser.eat(&token::CloseDelim(token::Paren)) {
+ list.push(f(parser)?);
+ if !parser.eat(&token::Comma) {
+ parser.expect(&token::CloseDelim(token::Paren))?;
+ break
+ }
+ }
+ Ok(list)
+ })
+ }
+
+ pub fn parse_meta<'a>(&self, sess: &'a ParseSess) -> PResult<'a, MetaItem> {
+ if self.path.segments.len() > 1 {
+ sess.span_diagnostic.span_err(self.path.span, "expected ident, found path");
+ }
+
+ Ok(MetaItem {
+ name: self.path.segments.last().unwrap().identifier.name,
+ node: self.parse(sess, |parser| parser.parse_meta_item_kind())?,
+ span: self.span,
+ })
}
/// Convert self to a normal #[doc="foo"] comment, if it is a
/* Constructors */
pub fn mk_name_value_item_str(name: Name, value: Symbol) -> MetaItem {
- let value_lit = dummy_spanned(ast::LitKind::Str(value, ast::StrStyle::Cooked));
+ let value_lit = dummy_spanned(LitKind::Str(value, ast::StrStyle::Cooked));
mk_spanned_name_value_item(DUMMY_SP, name, value_lit)
}
Attribute {
id: id,
style: ast::AttrStyle::Inner,
- value: item,
+ path: ast::Path::from_ident(item.span, ast::Ident::with_empty_ctxt(item.name)),
+ tokens: item.node.tokens(item.span),
is_sugared_doc: false,
span: sp,
}
Attribute {
id: id,
style: ast::AttrStyle::Outer,
- value: item,
+ path: ast::Path::from_ident(item.span, ast::Ident::with_empty_ctxt(item.name)),
+ tokens: item.node.tokens(item.span),
is_sugared_doc: false,
span: sp,
}
pub fn mk_sugared_doc_attr(id: AttrId, text: Symbol, lo: BytePos, hi: BytePos)
-> Attribute {
let style = doc_comment_style(&text.as_str());
- let lit = spanned(lo, hi, ast::LitKind::Str(text, ast::StrStyle::Cooked));
+ let lit = spanned(lo, hi, LitKind::Str(text, ast::StrStyle::Cooked));
Attribute {
id: id,
style: style,
- value: MetaItem {
- span: mk_sp(lo, hi),
- name: Symbol::intern("doc"),
- node: MetaItemKind::NameValue(lit),
- },
+ path: ast::Path::from_ident(mk_sp(lo, hi), ast::Ident::from_str("doc")),
+ tokens: MetaItemKind::NameValue(lit).tokens(mk_sp(lo, hi)),
is_sugared_doc: true,
span: mk_sp(lo, hi),
}
}
pub fn list_contains_name(items: &[NestedMetaItem], name: &str) -> bool {
- debug!("attr::list_contains_name (name={})", name);
items.iter().any(|item| {
- debug!(" testing: {:?}", item.name());
item.check_name(name)
})
}
pub fn contains_name(attrs: &[Attribute], name: &str) -> bool {
- debug!("attr::contains_name (name={})", name);
attrs.iter().any(|item| {
- debug!(" testing: {}", item.name());
item.check_name(name)
})
}
/// Determine what `#[inline]` attribute is present in `attrs`, if any.
pub fn find_inline_attr(diagnostic: Option<&Handler>, attrs: &[Attribute]) -> InlineAttr {
attrs.iter().fold(InlineAttr::None, |ia, attr| {
- match attr.value.node {
- _ if attr.value.name != "inline" => ia,
+ if attr.path != "inline" {
+ return ia;
+ }
+ let meta = match attr.meta() {
+ Some(meta) => meta.node,
+ None => return ia,
+ };
+ match meta {
MetaItemKind::Word => {
mark_used(attr);
InlineAttr::Hint
let mut rustc_depr: Option<RustcDeprecation> = None;
'outer: for attr in attrs_iter {
- let tag = attr.name();
- if tag != "rustc_deprecated" && tag != "unstable" && tag != "stable" {
+ if attr.path != "rustc_deprecated" && attr.path != "unstable" && attr.path != "stable" {
continue // not a stability level
}
mark_used(attr);
- if let Some(metas) = attr.meta_item_list() {
+ let meta = attr.meta();
+ if let Some(MetaItem { node: MetaItemKind::List(ref metas), .. }) = meta {
+ let meta = meta.as_ref().unwrap();
let get = |meta: &MetaItem, item: &mut Option<Symbol>| {
if item.is_some() {
handle_errors(diagnostic, meta.span, AttrError::MultipleItem(meta.name()));
}
};
- match &*tag.as_str() {
+ match &*meta.name.as_str() {
"rustc_deprecated" => {
if rustc_depr.is_some() {
span_err!(diagnostic, item_sp, E0540,
let mut depr: Option<Deprecation> = None;
'outer: for attr in attrs_iter {
- if attr.name() != "deprecated" {
+ if attr.path != "deprecated" {
continue
}
/// structure layout, and `packed` to remove padding.
pub fn find_repr_attrs(diagnostic: &Handler, attr: &Attribute) -> Vec<ReprAttr> {
let mut acc = Vec::new();
- match attr.value.node {
- ast::MetaItemKind::List(ref items) if attr.value.name == "repr" => {
+ if attr.path == "repr" {
+ if let Some(items) = attr.meta_item_list() {
mark_used(attr);
for item in items {
if !item.is_meta_item() {
}
}
}
- // Not a "repr" hint: ignore.
- _ => { }
}
acc
}
}
}
+impl MetaItem {
+ fn tokens(&self) -> TokenStream {
+ let ident = TokenTree::Token(self.span, Token::Ident(Ident::with_empty_ctxt(self.name)));
+ TokenStream::concat(vec![ident.into(), self.node.tokens(self.span)])
+ }
+
+ fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<MetaItem>
+ where I: Iterator<Item = TokenTree>,
+ {
+ let (mut span, name) = match tokens.next() {
+ Some(TokenTree::Token(span, Token::Ident(ident))) => (span, ident.name),
+ Some(TokenTree::Token(_, Token::Interpolated(ref nt))) => return match **nt {
+ token::Nonterminal::NtMeta(ref meta) => Some(meta.clone()),
+ _ => None,
+ },
+ _ => return None,
+ };
+ let node = match MetaItemKind::from_tokens(tokens) {
+ Some(node) => node,
+ _ => return None,
+ };
+ if let Some(last_span) = node.last_span() {
+ span.hi = last_span.hi;
+ }
+ Some(MetaItem { name: name, span: span, node: node })
+ }
+}
+
+impl MetaItemKind {
+ fn last_span(&self) -> Option<Span> {
+ match *self {
+ MetaItemKind::Word => None,
+ MetaItemKind::List(ref list) => list.last().map(NestedMetaItem::span),
+ MetaItemKind::NameValue(ref lit) => Some(lit.span),
+ }
+ }
+
+ pub fn tokens(&self, span: Span) -> TokenStream {
+ match *self {
+ MetaItemKind::Word => TokenStream::empty(),
+ MetaItemKind::NameValue(ref lit) => {
+ TokenStream::concat(vec![TokenTree::Token(span, Token::Eq).into(), lit.tokens()])
+ }
+ MetaItemKind::List(ref list) => {
+ let mut tokens = Vec::new();
+ for (i, item) in list.iter().enumerate() {
+ if i > 0 {
+ tokens.push(TokenTree::Token(span, Token::Comma).into());
+ }
+ tokens.push(item.node.tokens());
+ }
+ TokenTree::Delimited(span, Delimited {
+ delim: token::Paren,
+ tts: TokenStream::concat(tokens).into(),
+ }).into()
+ }
+ }
+ }
+
+ fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<MetaItemKind>
+ where I: Iterator<Item = TokenTree>,
+ {
+ let delimited = match tokens.peek().cloned() {
+ Some(TokenTree::Token(_, token::Eq)) => {
+ tokens.next();
+ return if let Some(TokenTree::Token(span, token)) = tokens.next() {
+ LitKind::from_token(token)
+ .map(|lit| MetaItemKind::NameValue(Spanned { node: lit, span: span }))
+ } else {
+ None
+ };
+ }
+ Some(TokenTree::Delimited(_, ref delimited)) if delimited.delim == token::Paren => {
+ tokens.next();
+ delimited.stream()
+ }
+ _ => return Some(MetaItemKind::Word),
+ };
+
+ let mut tokens = delimited.into_trees().peekable();
+ let mut result = Vec::new();
+ while let Some(..) = tokens.peek() {
+ match NestedMetaItemKind::from_tokens(&mut tokens) {
+ Some(item) => result.push(Spanned { span: item.span(), node: item }),
+ None => return None,
+ }
+ match tokens.next() {
+ None | Some(TokenTree::Token(_, Token::Comma)) => {}
+ _ => return None,
+ }
+ }
+ Some(MetaItemKind::List(result))
+ }
+}
+
+impl NestedMetaItemKind {
+ fn span(&self) -> Span {
+ match *self {
+ NestedMetaItemKind::MetaItem(ref item) => item.span,
+ NestedMetaItemKind::Literal(ref lit) => lit.span,
+ }
+ }
+
+ fn tokens(&self) -> TokenStream {
+ match *self {
+ NestedMetaItemKind::MetaItem(ref item) => item.tokens(),
+ NestedMetaItemKind::Literal(ref lit) => lit.tokens(),
+ }
+ }
+
+ fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<NestedMetaItemKind>
+ where I: Iterator<Item = TokenTree>,
+ {
+ if let Some(TokenTree::Token(span, token)) = tokens.peek().cloned() {
+ if let Some(node) = LitKind::from_token(token) {
+ tokens.next();
+ return Some(NestedMetaItemKind::Literal(Spanned { node: node, span: span }));
+ }
+ }
+
+ MetaItem::from_tokens(tokens).map(NestedMetaItemKind::MetaItem)
+ }
+}
+
+impl Lit {
+ fn tokens(&self) -> TokenStream {
+ TokenTree::Token(self.span, self.node.token()).into()
+ }
+}
+
+impl LitKind {
+ fn token(&self) -> Token {
+ use std::ascii;
+
+ match *self {
+ LitKind::Str(string, ast::StrStyle::Cooked) => {
+ let mut escaped = String::new();
+ for ch in string.as_str().chars() {
+ escaped.extend(ch.escape_unicode());
+ }
+ Token::Literal(token::Lit::Str_(Symbol::intern(&escaped)), None)
+ }
+ LitKind::Str(string, ast::StrStyle::Raw(n)) => {
+ Token::Literal(token::Lit::StrRaw(string, n), None)
+ }
+ LitKind::ByteStr(ref bytes) => {
+ let string = bytes.iter().cloned().flat_map(ascii::escape_default)
+ .map(Into::<char>::into).collect::<String>();
+ Token::Literal(token::Lit::ByteStr(Symbol::intern(&string)), None)
+ }
+ LitKind::Byte(byte) => {
+ let string: String = ascii::escape_default(byte).map(Into::<char>::into).collect();
+ Token::Literal(token::Lit::Byte(Symbol::intern(&string)), None)
+ }
+ LitKind::Char(ch) => {
+ let string: String = ch.escape_default().map(Into::<char>::into).collect();
+ Token::Literal(token::Lit::Char(Symbol::intern(&string)), None)
+ }
+ LitKind::Int(n, ty) => {
+ let suffix = match ty {
+ ast::LitIntType::Unsigned(ty) => Some(Symbol::intern(ty.ty_to_string())),
+ ast::LitIntType::Signed(ty) => Some(Symbol::intern(ty.ty_to_string())),
+ ast::LitIntType::Unsuffixed => None,
+ };
+ Token::Literal(token::Lit::Integer(Symbol::intern(&n.to_string())), suffix)
+ }
+ LitKind::Float(symbol, ty) => {
+ Token::Literal(token::Lit::Float(symbol), Some(Symbol::intern(ty.ty_to_string())))
+ }
+ LitKind::FloatUnsuffixed(symbol) => Token::Literal(token::Lit::Float(symbol), None),
+ LitKind::Bool(value) => Token::Ident(Ident::with_empty_ctxt(Symbol::intern(match value {
+ true => "true",
+ false => "false",
+ }))),
+ }
+ }
+
+ fn from_token(token: Token) -> Option<LitKind> {
+ match token {
+ Token::Ident(ident) if ident.name == "true" => Some(LitKind::Bool(true)),
+ Token::Ident(ident) if ident.name == "false" => Some(LitKind::Bool(false)),
+ Token::Interpolated(ref nt) => match **nt {
+ token::NtExpr(ref v) => match v.node {
+ ExprKind::Lit(ref lit) => Some(lit.node.clone()),
+ _ => None,
+ },
+ _ => None,
+ },
+ Token::Literal(lit, suf) => {
+ let (suffix_illegal, result) = parse::lit_token(lit, suf, None);
+ if suffix_illegal && suf.is_some() {
+ return None;
+ }
+ result
+ }
+ _ => None,
+ }
+ }
+}
+
pub trait HasAttrs: Sized {
fn attrs(&self) -> &[ast::Attribute];
fn map_attrs<F: FnOnce(Vec<ast::Attribute>) -> Vec<ast::Attribute>>(self, f: F) -> Self;
use {fold, attr};
use ast;
use codemap::Spanned;
-use parse::ParseSess;
-use ptr::P;
+use parse::{token, ParseSess};
+use syntax_pos::Span;
+use ptr::P;
use util::small_vector::SmallVector;
/// A folder that strips out items that do not belong in the current configuration.
return Some(attr);
}
- let attr_list = match attr.meta_item_list() {
- Some(attr_list) => attr_list,
- None => {
- let msg = "expected `#[cfg_attr(<cfg pattern>, <attr>)]`";
- self.sess.span_diagnostic.span_err(attr.span, msg);
- return None;
- }
- };
-
- let (cfg, mi) = match (attr_list.len(), attr_list.get(0), attr_list.get(1)) {
- (2, Some(cfg), Some(mi)) => (cfg, mi),
- _ => {
- let msg = "expected `#[cfg_attr(<cfg pattern>, <attr>)]`";
- self.sess.span_diagnostic.span_err(attr.span, msg);
+ let (cfg, path, tokens, span) = match attr.parse(self.sess, |parser| {
+ parser.expect(&token::OpenDelim(token::Paren))?;
+ let cfg = parser.parse_meta_item()?;
+ parser.expect(&token::Comma)?;
+ let lo = parser.span.lo;
+ let (path, tokens) = parser.parse_path_and_tokens()?;
+ parser.expect(&token::CloseDelim(token::Paren))?;
+ Ok((cfg, path, tokens, Span { lo: lo, ..parser.prev_span }))
+ }) {
+ Ok(result) => result,
+ Err(mut e) => {
+ e.emit();
return None;
}
};
- use attr::cfg_matches;
- match (cfg.meta_item(), mi.meta_item()) {
- (Some(cfg), Some(mi)) =>
- if cfg_matches(&cfg, self.sess, self.features) {
- self.process_cfg_attr(ast::Attribute {
- id: attr::mk_attr_id(),
- style: attr.style,
- value: mi.clone(),
- is_sugared_doc: false,
- span: mi.span,
- })
- } else {
- None
- },
- _ => {
- let msg = "unexpected literal(s) in `#[cfg_attr(<cfg pattern>, <attr>)]`";
- self.sess.span_diagnostic.span_err(attr.span, msg);
- None
- }
+ if attr::cfg_matches(&cfg, self.sess, self.features) {
+ self.process_cfg_attr(ast::Attribute {
+ id: attr::mk_attr_id(),
+ style: attr.style,
+ path: path,
+ tokens: tokens,
+ is_sugared_doc: false,
+ span: span,
+ })
+ } else {
+ None
}
}
return false;
}
- let mis = match attr.value.node {
- ast::MetaItemKind::List(ref mis) if is_cfg(&attr) => mis,
- _ => return true
+ let mis = if !is_cfg(&attr) {
+ return true;
+ } else if let Some(mis) = attr.meta_item_list() {
+ mis
+ } else {
+ return true;
};
if mis.len() != 1 {
use {ast, codemap};
use ext::base::ExtCtxt;
use ext::build::AstBuilder;
+use parse::parser::PathStyle;
use symbol::Symbol;
use syntax_pos::Span;
-pub fn collect_derives(cx: &mut ExtCtxt, attrs: &mut Vec<ast::Attribute>) -> Vec<(Symbol, Span)> {
+pub fn collect_derives(cx: &mut ExtCtxt, attrs: &mut Vec<ast::Attribute>) -> Vec<ast::Path> {
let mut result = Vec::new();
attrs.retain(|attr| {
- if attr.name() != "derive" {
+ if attr.path != "derive" {
return true;
}
- if attr.value_str().is_some() {
- cx.span_err(attr.span, "unexpected value in `derive`");
- return false;
- }
-
- let traits = attr.meta_item_list().unwrap_or(&[]).to_owned();
- if traits.is_empty() {
- cx.span_warn(attr.span, "empty trait list in `derive`");
- return false;
- }
-
- for titem in traits {
- if titem.word().is_none() {
- cx.span_err(titem.span, "malformed `derive` entry");
- return false;
+ match attr.parse_list(cx.parse_sess, |parser| parser.parse_path(PathStyle::Mod)) {
+ Ok(ref traits) if traits.is_empty() => {
+ cx.span_warn(attr.span, "empty trait list in `derive`");
+ false
+ }
+ Ok(traits) => {
+ result.extend(traits);
+ true
+ }
+ Err(mut e) => {
+ e.emit();
+ false
}
- result.push((titem.name().unwrap(), titem.span));
}
-
- true
});
result
}
}
}
-pub fn add_derived_markers<T: HasAttrs>(cx: &mut ExtCtxt, traits: &[(Symbol, Span)], item: T) -> T {
+pub fn add_derived_markers<T: HasAttrs>(cx: &mut ExtCtxt, traits: &[ast::Path], item: T) -> T {
let span = match traits.get(0) {
- Some(&(_, span)) => span,
+ Some(path) => path.span,
None => return item,
};
item.map_attrs(|mut attrs| {
- if traits.iter().any(|&(name, _)| name == "PartialEq") &&
- traits.iter().any(|&(name, _)| name == "Eq") {
+ if traits.iter().any(|path| *path == "PartialEq") &&
+ traits.iter().any(|path| *path == "Eq") {
let span = allow_unstable(cx, span, "derive(PartialEq, Eq)");
let meta = cx.meta_word(span, Symbol::intern("structural_match"));
attrs.push(cx.attribute(span, meta));
}
- if traits.iter().any(|&(name, _)| name == "Copy") &&
- traits.iter().any(|&(name, _)| name == "Clone") {
+ if traits.iter().any(|path| *path == "Copy") &&
+ traits.iter().any(|path| *path == "Clone") {
let span = allow_unstable(cx, span, "derive(Copy, Clone)");
let meta = cx.meta_word(span, Symbol::intern("rustc_copy_clone_marker"));
attrs.push(cx.attribute(span, meta));
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use ast::{self, Block, Ident, PatKind};
-use ast::{Name, MacStmtStyle, StmtKind, ItemKind};
+use ast::{self, Block, Ident, PatKind, Path};
+use ast::{MacStmtStyle, StmtKind, ItemKind};
use attr::{self, HasAttrs};
use codemap::{ExpnInfo, NameAndSpan, MacroBang, MacroAttribute};
use config::{is_test_or_bench, StripUnconfigured};
use std_inject;
use symbol::Symbol;
use symbol::keywords;
-use syntax_pos::{self, Span, ExpnId};
+use syntax_pos::{Span, ExpnId, DUMMY_SP};
use tokenstream::TokenStream;
use util::small_vector::SmallVector;
use visit::Visitor;
},
Attr {
attr: Option<ast::Attribute>,
- traits: Vec<(Symbol, Span)>,
+ traits: Vec<Path>,
item: Annotatable,
},
Derive {
- name: Symbol,
- span: Span,
+ path: Path,
item: Annotatable,
},
}
match self.kind {
InvocationKind::Bang { span, .. } => span,
InvocationKind::Attr { attr: Some(ref attr), .. } => attr.span,
- InvocationKind::Attr { attr: None, .. } => syntax_pos::DUMMY_SP,
- InvocationKind::Derive { span, .. } => span,
+ InvocationKind::Attr { attr: None, .. } => DUMMY_SP,
+ InvocationKind::Derive { ref path, .. } => path.span,
}
}
}
self.collect_invocations(expansion, &[])
} else if let InvocationKind::Attr { attr: None, traits, item } = invoc.kind {
let item = item
- .map_attrs(|mut attrs| { attrs.retain(|a| a.name() != "derive"); attrs });
+ .map_attrs(|mut attrs| { attrs.retain(|a| a.path != "derive"); attrs });
let item_with_markers =
add_derived_markers(&mut self.cx, &traits, item.clone());
let derives = derives.entry(invoc.expansion_data.mark).or_insert_with(Vec::new);
- for &(name, span) in &traits {
+ for path in &traits {
let mark = Mark::fresh();
derives.push(mark);
- let path = ast::Path::from_ident(span, Ident::with_empty_ctxt(name));
let item = match self.cx.resolver.resolve_macro(
- Mark::root(), &path, MacroKind::Derive, false) {
+ Mark::root(), path, MacroKind::Derive, false) {
Ok(ext) => match *ext {
SyntaxExtension::BuiltinDerive(..) => item_with_markers.clone(),
_ => item.clone(),
_ => item.clone(),
};
invocations.push(Invocation {
- kind: InvocationKind::Derive { name: name, span: span, item: item },
+ kind: InvocationKind::Derive { path: path.clone(), item: item },
expansion_kind: invoc.expansion_kind,
expansion_data: ExpansionData {
mark: mark,
};
attr::mark_used(&attr);
- let name = attr.name();
self.cx.bt_push(ExpnInfo {
call_site: attr.span,
callee: NameAndSpan {
- format: MacroAttribute(name),
+ format: MacroAttribute(Symbol::intern(&format!("{}", attr.path))),
span: Some(attr.span),
allow_internal_unstable: false,
}
match *ext {
MultiModifier(ref mac) => {
- let item = mac.expand(self.cx, attr.span, &attr.value, item);
+ let meta = panictry!(attr.parse_meta(&self.cx.parse_sess));
+ let item = mac.expand(self.cx, attr.span, &meta, item);
kind.expect_from_annotatables(item)
}
MultiDecorator(ref mac) => {
let mut items = Vec::new();
- mac.expand(self.cx, attr.span, &attr.value, &item,
- &mut |item| items.push(item));
+ let meta = panictry!(attr.parse_meta(&self.cx.parse_sess));
+ mac.expand(self.cx, attr.span, &meta, &item, &mut |item| items.push(item));
items.push(item);
kind.expect_from_annotatables(items)
}
SyntaxExtension::AttrProcMacro(ref mac) => {
- let attr_toks = stream_for_attr_args(&attr, &self.cx.parse_sess);
let item_toks = stream_for_item(&item, &self.cx.parse_sess);
let span = Span {
expn_id: self.cx.codemap().record_expansion(ExpnInfo {
call_site: attr.span,
callee: NameAndSpan {
- format: MacroAttribute(name),
+ format: MacroAttribute(Symbol::intern(&format!("{}", attr.path))),
span: None,
allow_internal_unstable: false,
},
..attr.span
};
- let tok_result = mac.expand(self.cx, attr.span, attr_toks, item_toks);
- self.parse_expansion(tok_result, kind, name, span)
+ let tok_result = mac.expand(self.cx, attr.span, attr.tokens.clone(), item_toks);
+ self.parse_expansion(tok_result, kind, &attr.path, span)
}
SyntaxExtension::ProcMacroDerive(..) | SyntaxExtension::BuiltinDerive(..) => {
- self.cx.span_err(attr.span, &format!("`{}` is a derive mode", name));
+ self.cx.span_err(attr.span, &format!("`{}` is a derive mode", attr.path));
kind.dummy(attr.span)
}
_ => {
- let msg = &format!("macro `{}` may not be used in attributes", name);
+ let msg = &format!("macro `{}` may not be used in attributes", attr.path);
self.cx.span_err(attr.span, &msg);
kind.dummy(attr.span)
}
};
let path = &mac.node.path;
- let extname = path.segments.last().unwrap().identifier.name;
let ident = ident.unwrap_or(keywords::Invalid.ident());
let marked_tts =
noop_fold_tts(mac.node.stream(), &mut Marker { mark: mark, expn_id: None });
NormalTT(ref expandfun, exp_span, allow_internal_unstable) => {
if ident.name != keywords::Invalid.name() {
let msg =
- format!("macro {}! expects no ident argument, given '{}'", extname, ident);
+ format!("macro {}! expects no ident argument, given '{}'", path, ident);
self.cx.span_err(path.span, &msg);
return kind.dummy(span);
}
self.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
- format: MacroBang(extname),
+ format: MacroBang(Symbol::intern(&format!("{}", path))),
span: exp_span,
allow_internal_unstable: allow_internal_unstable,
},
IdentTT(ref expander, tt_span, allow_internal_unstable) => {
if ident.name == keywords::Invalid.name() {
self.cx.span_err(path.span,
- &format!("macro {}! expects an ident argument", extname));
+ &format!("macro {}! expects an ident argument", path));
return kind.dummy(span);
};
self.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
- format: MacroBang(extname),
+ format: MacroBang(Symbol::intern(&format!("{}", path))),
span: tt_span,
allow_internal_unstable: allow_internal_unstable,
}
MultiDecorator(..) | MultiModifier(..) | SyntaxExtension::AttrProcMacro(..) => {
self.cx.span_err(path.span,
- &format!("`{}` can only be used in attributes", extname));
+ &format!("`{}` can only be used in attributes", path));
return kind.dummy(span);
}
SyntaxExtension::ProcMacroDerive(..) | SyntaxExtension::BuiltinDerive(..) => {
- self.cx.span_err(path.span, &format!("`{}` is a derive mode", extname));
+ self.cx.span_err(path.span, &format!("`{}` is a derive mode", path));
return kind.dummy(span);
}
SyntaxExtension::ProcMacro(ref expandfun) => {
if ident.name != keywords::Invalid.name() {
let msg =
- format!("macro {}! expects no ident argument, given '{}'", extname, ident);
+ format!("macro {}! expects no ident argument, given '{}'", path, ident);
self.cx.span_err(path.span, &msg);
return kind.dummy(span);
}
self.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
- format: MacroBang(extname),
+ format: MacroBang(Symbol::intern(&format!("{}", path))),
// FIXME procedural macros do not have proper span info
// yet, when they do, we should use it here.
span: None,
});
let tok_result = expandfun.expand(self.cx, span, marked_tts);
- Some(self.parse_expansion(tok_result, kind, extname, span))
+ Some(self.parse_expansion(tok_result, kind, path, span))
}
};
/// Expand a derive invocation. Returns the result of expansion.
fn expand_derive_invoc(&mut self, invoc: Invocation, ext: Rc<SyntaxExtension>) -> Expansion {
let Invocation { expansion_kind: kind, .. } = invoc;
- let (name, span, item) = match invoc.kind {
- InvocationKind::Derive { name, span, item } => (name, span, item),
+ let (path, item) = match invoc.kind {
+ InvocationKind::Derive { path, item } => (path, item),
_ => unreachable!(),
};
- let mitem = ast::MetaItem { name: name, span: span, node: ast::MetaItemKind::Word };
- let pretty_name = Symbol::intern(&format!("derive({})", name));
+ let pretty_name = Symbol::intern(&format!("derive({})", path));
+ let span = path.span;
+ let attr = ast::Attribute {
+ path: path, tokens: TokenStream::empty(), span: span,
+ // irrelevant:
+ id: ast::AttrId(0), style: ast::AttrStyle::Outer, is_sugared_doc: false,
+ };
self.cx.bt_push(ExpnInfo {
call_site: span,
callee: NameAndSpan {
format: MacroAttribute(pretty_name),
- span: Some(span),
+ span: None,
allow_internal_unstable: false,
}
});
}),
..span
};
- return kind.expect_from_annotatables(ext.expand(self.cx, span, &mitem, item));
+ let dummy = ast::MetaItem { // FIXME(jseyfried) avoid this
+ name: keywords::Invalid.name(),
+ span: DUMMY_SP,
+ node: ast::MetaItemKind::Word,
+ };
+ return kind.expect_from_annotatables(ext.expand(self.cx, span, &dummy, item));
}
SyntaxExtension::BuiltinDerive(func) => {
let span = Span {
..span
};
let mut items = Vec::new();
- func(self.cx, span, &mitem, &item, &mut |a| {
- items.push(a)
- });
+ func(self.cx, span, &attr.meta().unwrap(), &item, &mut |a| items.push(a));
return kind.expect_from_annotatables(items);
}
_ => {
- let msg = &format!("macro `{}` may not be used for derive attributes", name);
+ let msg = &format!("macro `{}` may not be used for derive attributes", attr.path);
self.cx.span_err(span, &msg);
kind.dummy(span)
}
}
}
- fn parse_expansion(&mut self, toks: TokenStream, kind: ExpansionKind, name: Name, span: Span)
+ fn parse_expansion(&mut self, toks: TokenStream, kind: ExpansionKind, path: &Path, span: Span)
-> Expansion {
let mut parser = self.cx.new_parser_from_tts(&toks.into_trees().collect::<Vec<_>>());
let expansion = match parser.parse_expansion(kind, false) {
return kind.dummy(span);
}
};
- parser.ensure_complete_parse(name, kind.name(), span);
+ parser.ensure_complete_parse(path, kind.name(), span);
// FIXME better span info
expansion.fold_with(&mut ChangeSpan { span: span })
}
})
}
- pub fn ensure_complete_parse(&mut self, macro_name: ast::Name, kind_name: &str, span: Span) {
+ pub fn ensure_complete_parse(&mut self, macro_path: &Path, kind_name: &str, span: Span) {
if self.token != token::Eof {
let msg = format!("macro expansion ignores token `{}` and any following",
self.this_token_to_string());
let mut err = self.diagnostic().struct_span_err(self.span, &msg);
let msg = format!("caused by the macro expansion here; the usage \
of `{}!` is likely invalid in {} context",
- macro_name, kind_name);
+ macro_path, kind_name);
err.span_note(span, &msg).emit();
}
}
fn collect_attr(&mut self,
attr: Option<ast::Attribute>,
- traits: Vec<(Symbol, Span)>,
+ traits: Vec<Path>,
item: Annotatable,
kind: ExpansionKind)
-> Expansion {
if !traits.is_empty() &&
(kind == ExpansionKind::TraitItems || kind == ExpansionKind::ImplItems) {
- self.cx.span_err(traits[0].1, "`derive` can be only be applied to items");
+ self.cx.span_err(traits[0].span, "`derive` can be only be applied to items");
return kind.expect_from_annotatables(::std::iter::once(item));
}
self.collect(kind, InvocationKind::Attr { attr: attr, traits: traits, item: item })
}
// If `item` is an attr invocation, remove and return the macro attribute.
- fn classify_item<T>(&mut self, mut item: T) -> (Option<ast::Attribute>, Vec<(Symbol, Span)>, T)
+ fn classify_item<T>(&mut self, mut item: T) -> (Option<ast::Attribute>, Vec<Path>, T)
where T: HasAttrs,
{
let (mut attr, mut traits) = (None, Vec::new());
string_to_stream(text, parse_sess)
}
-fn stream_for_attr_args(attr: &ast::Attribute, parse_sess: &ParseSess) -> TokenStream {
- use ast::MetaItemKind::*;
- use print::pp::Breaks;
- use print::pprust::PrintState;
-
- let token_string = match attr.value.node {
- // For `#[foo]`, an empty token
- Word => return TokenStream::empty(),
- // For `#[foo(bar, baz)]`, returns `(bar, baz)`
- List(ref items) => pprust::to_string(|s| {
- s.popen()?;
- s.commasep(Breaks::Consistent,
- &items[..],
- |s, i| s.print_meta_list_item(&i))?;
- s.pclose()
- }),
- // For `#[foo = "bar"]`, returns `= "bar"`
- NameValue(ref lit) => pprust::to_string(|s| {
- s.word_space("=")?;
- s.print_literal(lit)
- }),
- };
-
- string_to_stream(token_string, parse_sess)
-}
-
fn string_to_stream(text: String, parse_sess: &ParseSess) -> TokenStream {
let filename = String::from("<macro expansion>");
filemap_to_stream(parse_sess, parse_sess.codemap().new_filemap(filename, None, text))
// Detect if this is an inline module (`mod m { ... }` as opposed to `mod m;`).
// In the non-inline case, `inner` is never the dummy span (c.f. `parse_item_mod`).
// Thus, if `inner` is the dummy span, we know the module is inline.
- let inline_module = item.span.contains(inner) || inner == syntax_pos::DUMMY_SP;
+ let inline_module = item.span.contains(inner) || inner == DUMMY_SP;
if inline_module {
if let Some(path) = attr::first_attr_value_str_by_name(&item.attrs, "path") {
}
impl ToTokens for ast::Attribute {
- fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
+ fn to_tokens(&self, _cx: &ExtCtxt) -> Vec<TokenTree> {
let mut r = vec![];
// FIXME: The spans could be better
r.push(TokenTree::Token(self.span, token::Pound));
if self.style == ast::AttrStyle::Inner {
r.push(TokenTree::Token(self.span, token::Not));
}
+ let mut inner = Vec::new();
+ for (i, segment) in self.path.segments.iter().enumerate() {
+ if i > 0 {
+ inner.push(TokenTree::Token(self.span, token::Colon).into());
+ }
+ inner.push(TokenTree::Token(self.span, token::Ident(segment.identifier)).into());
+ }
+ inner.push(self.tokens.clone());
+
r.push(TokenTree::Delimited(self.span, tokenstream::Delimited {
- delim: token::Bracket,
- tts: self.value.to_tokens(cx).into_iter().collect::<TokenStream>().into(),
+ delim: token::Bracket, tts: TokenStream::concat(inner).into()
}));
r
}
fn parse_nt<'a>(p: &mut Parser<'a>, sp: Span, name: &str) -> Nonterminal {
match name {
"tt" => {
- return token::NtTT(panictry!(p.parse_token_tree()));
+ return token::NtTT(p.parse_token_tree());
}
_ => {}
}
}
// Make sure we don't have any tokens left to parse so we don't silently drop anything.
- parser.ensure_complete_parse(macro_ident.name, kind.name(), site_span);
+ let path = ast::Path::from_ident(site_span, macro_ident);
+ parser.ensure_complete_parse(&path, kind.name(), site_span);
expansion
}
}
// Allows the `catch {...}` expression
(active, catch_expr, "1.17.0", Some(31436)),
+
+ // See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work.
+ (active, rvalue_static_promotion, "1.15.1", Some(38865)),
);
declare_features! (
impl<'a> Context<'a> {
fn check_attribute(&self, attr: &ast::Attribute, is_macro: bool) {
debug!("check_attribute(attr = {:?})", attr);
- let name = &*attr.name().as_str();
+ let name = unwrap_or!(attr.name(), return);
+
for &(n, ty, ref gateage) in BUILTIN_ATTRIBUTES {
- if n == name {
+ if name == n {
if let &Gated(_, ref name, ref desc, ref has_feature) = gateage {
gate_feature_fn!(self, has_feature, attr.span, name, desc);
}
- debug!("check_attribute: {:?} is builtin, {:?}, {:?}", name, ty, gateage);
+ debug!("check_attribute: {:?} is builtin, {:?}, {:?}", attr.path, ty, gateage);
return;
}
}
for &(ref n, ref ty) in self.plugin_attributes {
- if n == name {
+ if attr.path == &**n {
// Plugins can't gate attributes, so we don't check for it
// unlike the code above; we only use this loop to
// short-circuit to avoid the checks below
- debug!("check_attribute: {:?} is registered by a plugin, {:?}", name, ty);
+ debug!("check_attribute: {:?} is registered by a plugin, {:?}", attr.path, ty);
return;
}
}
- if name.starts_with("rustc_") {
+ if name.as_str().starts_with("rustc_") {
gate_feature!(self, rustc_attrs, attr.span,
"unless otherwise specified, attributes \
with the prefix `rustc_` \
are reserved for internal compiler diagnostics");
- } else if name.starts_with("derive_") {
+ } else if name.as_str().starts_with("derive_") {
gate_feature!(self, custom_derive, attr.span, EXPLAIN_DERIVE_UNDERSCORE);
- } else if attr::is_known(attr) {
- debug!("check_attribute: {:?} is known", name);
- } else {
+ } else if !attr::is_known(attr) {
// Only run the custom attribute lint during regular
// feature gate checking. Macro gating runs
// before the plugin attributes are registered
unknown to the compiler and \
may have meaning \
added to it in the future",
- name));
+ attr.path));
}
}
}
self.context.check_attribute(attr, false);
}
- if contains_novel_literal(&attr.value) {
+ if self.context.features.proc_macro && attr::is_known(attr) {
+ return
+ }
+
+ let meta = panictry!(attr.parse_meta(&self.context.parse_sess));
+ if contains_novel_literal(&meta) {
gate_feature_post!(&self, attr_literals, attr.span,
"non-string literals in attributes, or string \
literals in top-level positions, are experimental");
`#[repr(simd)]` instead");
}
for attr in &i.attrs {
- if attr.name() == "repr" {
- for item in attr.meta_item_list().unwrap_or(&[]) {
+ if attr.path == "repr" {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
if item.check_name("simd") {
gate_feature_post!(&self, repr_simd, i.span,
"SIMD types are experimental \
Some(Attribute {
id: attr.id,
style: attr.style,
- value: fld.fold_meta_item(attr.value),
+ path: fld.fold_path(attr.path),
+ tokens: fld.fold_tts(attr.tokens),
is_sugared_doc: attr.is_sugared_doc,
span: fld.new_span(attr.span),
})
token::NtExpr(expr) => token::NtExpr(fld.fold_expr(expr)),
token::NtTy(ty) => token::NtTy(fld.fold_ty(ty)),
token::NtIdent(id) => token::NtIdent(Spanned::<Ident>{node: fld.fold_ident(id.node), ..id}),
- token::NtMeta(meta_item) => token::NtMeta(fld.fold_meta_item(meta_item)),
+ token::NtMeta(meta) => token::NtMeta(fld.fold_meta_item(meta)),
token::NtPath(path) => token::NtPath(fld.fold_path(path)),
token::NtTT(tt) => token::NtTT(fld.fold_tt(tt)),
token::NtArm(arm) => token::NtArm(fld.fold_arm(arm)),
matches_codepattern,
"matches_codepattern",
pprust::to_string(|s| fake_print_crate(s, &folded_crate)),
- "#[a]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string());
+ "#[zz]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string());
}
// even inside macro defs....
})
}
+#[macro_export]
+macro_rules! unwrap_or {
+ ($opt:expr, $default:expr) => {
+ match $opt {
+ Some(x) => x,
+ None => $default,
+ }
+ }
+}
+
#[macro_use]
pub mod diagnostics {
#[macro_use]
use codemap::spanned;
use parse::common::SeqSep;
use parse::PResult;
-use parse::token;
-use parse::parser::{Parser, TokenType};
+use parse::token::{self, Nonterminal};
+use parse::parser::{Parser, TokenType, PathStyle};
+use tokenstream::TokenStream;
#[derive(PartialEq, Eq, Debug)]
enum InnerAttributeParsePolicy<'a> {
debug!("parse_attribute_with_inner_parse_policy: inner_parse_policy={:?} self.token={:?}",
inner_parse_policy,
self.token);
- let (span, value, mut style) = match self.token {
+ let (span, path, tokens, mut style) = match self.token {
token::Pound => {
let lo = self.span.lo;
self.bump();
};
self.expect(&token::OpenDelim(token::Bracket))?;
- let meta_item = self.parse_meta_item()?;
+ let (path, tokens) = self.parse_path_and_tokens()?;
self.expect(&token::CloseDelim(token::Bracket))?;
let hi = self.prev_span.hi;
- (mk_sp(lo, hi), meta_item, style)
+ (mk_sp(lo, hi), path, tokens, style)
}
_ => {
let token_str = self.this_token_to_string();
Ok(ast::Attribute {
id: attr::mk_attr_id(),
style: style,
- value: value,
+ path: path,
+ tokens: tokens,
is_sugared_doc: false,
span: span,
})
}
+ pub fn parse_path_and_tokens(&mut self) -> PResult<'a, (ast::Path, TokenStream)> {
+ let meta = match self.token {
+ token::Interpolated(ref nt) => match **nt {
+ Nonterminal::NtMeta(ref meta) => Some(meta.clone()),
+ _ => None,
+ },
+ _ => None,
+ };
+ Ok(if let Some(meta) = meta {
+ self.bump();
+ (ast::Path::from_ident(meta.span, ast::Ident::with_empty_ctxt(meta.name)),
+ meta.node.tokens(meta.span))
+ } else {
+ (self.parse_path(PathStyle::Mod)?, self.parse_tokens())
+ })
+ }
+
/// Parse attributes that appear after the opening of an item. These should
/// be preceded by an exclamation mark, but we accept and warn about one
/// terminated by a semicolon.
let lo = self.span.lo;
let ident = self.parse_ident()?;
- let node = if self.eat(&token::Eq) {
+ let node = self.parse_meta_item_kind()?;
+ let hi = self.prev_span.hi;
+ Ok(ast::MetaItem { name: ident.name, node: node, span: mk_sp(lo, hi) })
+ }
+
+ pub fn parse_meta_item_kind(&mut self) -> PResult<'a, ast::MetaItemKind> {
+ Ok(if self.eat(&token::Eq) {
ast::MetaItemKind::NameValue(self.parse_unsuffixed_lit()?)
} else if self.token == token::OpenDelim(token::Paren) {
ast::MetaItemKind::List(self.parse_meta_seq()?)
} else {
+ self.eat(&token::OpenDelim(token::Paren));
ast::MetaItemKind::Word
- };
- let hi = self.prev_span.hi;
- Ok(ast::MetaItem { name: ident.name, node: node, span: mk_sp(lo, hi) })
+ })
}
/// matches meta_item_inner : (meta_item | UNSUFFIXED_LIT) ;
base = 16;
num_digits = self.scan_digits(16, 16);
}
- '0'...'9' | '_' | '.' => {
+ '0'...'9' | '_' | '.' | 'e' | 'E' => {
num_digits = self.scan_digits(10, 10) + 1;
}
_ => {
s[1..].chars().all(|c| '0' <= c && c <= '9')
}
-fn filtered_float_lit(data: Symbol, suffix: Option<Symbol>, sd: &Handler, sp: Span)
- -> ast::LitKind {
+macro_rules! err {
+ ($opt_diag:expr, |$span:ident, $diag:ident| $($body:tt)*) => {
+ match $opt_diag {
+ Some(($span, $diag)) => { $($body)* }
+ None => return None,
+ }
+ }
+}
+
+pub fn lit_token(lit: token::Lit, suf: Option<Symbol>, diag: Option<(Span, &Handler)>)
+ -> (bool /* suffix illegal? */, Option<ast::LitKind>) {
+ use ast::LitKind;
+
+ match lit {
+ token::Byte(i) => (true, Some(LitKind::Byte(byte_lit(&i.as_str()).0))),
+ token::Char(i) => (true, Some(LitKind::Char(char_lit(&i.as_str()).0))),
+
+ // There are some valid suffixes for integer and float literals,
+ // so all the handling is done internally.
+ token::Integer(s) => (false, integer_lit(&s.as_str(), suf, diag)),
+ token::Float(s) => (false, float_lit(&s.as_str(), suf, diag)),
+
+ token::Str_(s) => {
+ let s = Symbol::intern(&str_lit(&s.as_str()));
+ (true, Some(LitKind::Str(s, ast::StrStyle::Cooked)))
+ }
+ token::StrRaw(s, n) => {
+ let s = Symbol::intern(&raw_str_lit(&s.as_str()));
+ (true, Some(LitKind::Str(s, ast::StrStyle::Raw(n))))
+ }
+ token::ByteStr(i) => {
+ (true, Some(LitKind::ByteStr(byte_str_lit(&i.as_str()))))
+ }
+ token::ByteStrRaw(i, _) => {
+ (true, Some(LitKind::ByteStr(Rc::new(i.to_string().into_bytes()))))
+ }
+ }
+}
+
+fn filtered_float_lit(data: Symbol, suffix: Option<Symbol>, diag: Option<(Span, &Handler)>)
+ -> Option<ast::LitKind> {
debug!("filtered_float_lit: {}, {:?}", data, suffix);
let suffix = match suffix {
Some(suffix) => suffix,
- None => return ast::LitKind::FloatUnsuffixed(data),
+ None => return Some(ast::LitKind::FloatUnsuffixed(data)),
};
- match &*suffix.as_str() {
+ Some(match &*suffix.as_str() {
"f32" => ast::LitKind::Float(data, ast::FloatTy::F32),
"f64" => ast::LitKind::Float(data, ast::FloatTy::F64),
suf => {
- if suf.len() >= 2 && looks_like_width_suffix(&['f'], suf) {
- // if it looks like a width, lets try to be helpful.
- sd.struct_span_err(sp, &format!("invalid width `{}` for float literal", &suf[1..]))
- .help("valid widths are 32 and 64")
- .emit();
- } else {
- sd.struct_span_err(sp, &format!("invalid suffix `{}` for float literal", suf))
- .help("valid suffixes are `f32` and `f64`")
- .emit();
- }
+ err!(diag, |span, diag| {
+ if suf.len() >= 2 && looks_like_width_suffix(&['f'], suf) {
+ // if it looks like a width, lets try to be helpful.
+ let msg = format!("invalid width `{}` for float literal", &suf[1..]);
+ diag.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit()
+ } else {
+ let msg = format!("invalid suffix `{}` for float literal", suf);
+ diag.struct_span_err(span, &msg)
+ .help("valid suffixes are `f32` and `f64`")
+ .emit();
+ }
+ });
ast::LitKind::FloatUnsuffixed(data)
}
- }
+ })
}
-pub fn float_lit(s: &str, suffix: Option<Symbol>, sd: &Handler, sp: Span) -> ast::LitKind {
+pub fn float_lit(s: &str, suffix: Option<Symbol>, diag: Option<(Span, &Handler)>)
+ -> Option<ast::LitKind> {
debug!("float_lit: {:?}, {:?}", s, suffix);
// FIXME #2252: bounds checking float literals is deferred until trans
let s = s.chars().filter(|&c| c != '_').collect::<String>();
- filtered_float_lit(Symbol::intern(&s), suffix, sd, sp)
+ filtered_float_lit(Symbol::intern(&s), suffix, diag)
}
/// Parse a string representing a byte literal into its final form. Similar to `char_lit`
Rc::new(res)
}
-pub fn integer_lit(s: &str, suffix: Option<Symbol>, sd: &Handler, sp: Span) -> ast::LitKind {
+pub fn integer_lit(s: &str, suffix: Option<Symbol>, diag: Option<(Span, &Handler)>)
+ -> Option<ast::LitKind> {
// s can only be ascii, byte indexing is fine
let s2 = s.chars().filter(|&c| c != '_').collect::<String>();
// 1f64 and 2f32 etc. are valid float literals.
if let Some(suf) = suffix {
if looks_like_width_suffix(&['f'], &suf.as_str()) {
- match base {
- 16 => sd.span_err(sp, "hexadecimal float literal is not supported"),
- 8 => sd.span_err(sp, "octal float literal is not supported"),
- 2 => sd.span_err(sp, "binary float literal is not supported"),
- _ => ()
+ let err = match base {
+ 16 => Some("hexadecimal float literal is not supported"),
+ 8 => Some("octal float literal is not supported"),
+ 2 => Some("binary float literal is not supported"),
+ _ => None,
+ };
+ if let Some(err) = err {
+ err!(diag, |span, diag| diag.span_err(span, err));
}
- return filtered_float_lit(Symbol::intern(&s), Some(suf), sd, sp)
+ return filtered_float_lit(Symbol::intern(&s), Some(suf), diag)
}
}
}
if let Some(suf) = suffix {
- if suf.as_str().is_empty() { sd.span_bug(sp, "found empty literal suffix in Some")}
+ if suf.as_str().is_empty() {
+ err!(diag, |span, diag| diag.span_bug(span, "found empty literal suffix in Some"));
+ }
ty = match &*suf.as_str() {
"isize" => ast::LitIntType::Signed(ast::IntTy::Is),
"i8" => ast::LitIntType::Signed(ast::IntTy::I8),
suf => {
// i<digits> and u<digits> look like widths, so lets
// give an error message along those lines
- if looks_like_width_suffix(&['i', 'u'], suf) {
- sd.struct_span_err(sp, &format!("invalid width `{}` for integer literal",
- &suf[1..]))
- .help("valid widths are 8, 16, 32, 64 and 128")
- .emit();
- } else {
- sd.struct_span_err(sp, &format!("invalid suffix `{}` for numeric literal", suf))
- .help("the suffix must be one of the integral types \
- (`u32`, `isize`, etc)")
- .emit();
- }
+ err!(diag, |span, diag| {
+ if looks_like_width_suffix(&['i', 'u'], suf) {
+ let msg = format!("invalid width `{}` for integer literal", &suf[1..]);
+ diag.struct_span_err(span, &msg)
+ .help("valid widths are 8, 16, 32, 64 and 128")
+ .emit();
+ } else {
+ let msg = format!("invalid suffix `{}` for numeric literal", suf);
+ diag.struct_span_err(span, &msg)
+ .help("the suffix must be one of the integral types \
+ (`u32`, `isize`, etc)")
+ .emit();
+ }
+ });
ty
}
debug!("integer_lit: the type is {:?}, base {:?}, the new string is {:?}, the original \
string was {:?}, the original suffix was {:?}", ty, base, s, orig, suffix);
- match u128::from_str_radix(s, base) {
+ Some(match u128::from_str_radix(s, base) {
Ok(r) => ast::LitKind::Int(r, ty),
Err(_) => {
// small bases are lexed as if they were base 10, e.g, the string
s.chars().any(|c| c.to_digit(10).map_or(false, |d| d >= base));
if !already_errored {
- sd.span_err(sp, "int literal is too large");
+ err!(diag, |span, diag| diag.span_err(span, "int literal is too large"));
}
ast::LitKind::Int(0, ty)
}
- }
+ })
}
#[cfg(test)]
let source = "/// doc comment\r\n/// line 2\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name.clone(), source, &sess)
.unwrap().unwrap();
- let docs = item.attrs.iter().filter(|a| a.name() == "doc")
+ let docs = item.attrs.iter().filter(|a| a.path == "doc")
.map(|a| a.value_str().unwrap().to_string()).collect::<Vec<_>>();
let b: &[_] = &["/// doc comment".to_string(), "/// line 2".to_string()];
assert_eq!(&docs[..], b);
use std::collections::HashSet;
use std::{cmp, mem, slice};
use std::path::{Path, PathBuf};
-use std::rc::Rc;
bitflags! {
flags Restrictions: u8 {
self.parse_seq_to_before_tokens(kets,
SeqSep::none(),
- |p| p.parse_token_tree(),
+ |p| Ok(p.parse_token_tree()),
|mut e| handler.cancel(&mut e));
}
break;
}
token::OpenDelim(token::Brace) => {
- self.parse_token_tree()?;
+ self.parse_token_tree();
break;
}
_ => self.bump(),
_ => { return self.unexpected_last(&self.token); }
},
token::Literal(lit, suf) => {
- let (suffix_illegal, out) = match lit {
- token::Byte(i) => (true, LitKind::Byte(parse::byte_lit(&i.as_str()).0)),
- token::Char(i) => (true, LitKind::Char(parse::char_lit(&i.as_str()).0)),
-
- // there are some valid suffixes for integer and
- // float literals, so all the handling is done
- // internally.
- token::Integer(s) => {
- let diag = &self.sess.span_diagnostic;
- (false, parse::integer_lit(&s.as_str(), suf, diag, self.span))
- }
- token::Float(s) => {
- let diag = &self.sess.span_diagnostic;
- (false, parse::float_lit(&s.as_str(), suf, diag, self.span))
- }
-
- token::Str_(s) => {
- let s = Symbol::intern(&parse::str_lit(&s.as_str()));
- (true, LitKind::Str(s, ast::StrStyle::Cooked))
- }
- token::StrRaw(s, n) => {
- let s = Symbol::intern(&parse::raw_str_lit(&s.as_str()));
- (true, LitKind::Str(s, ast::StrStyle::Raw(n)))
- }
- token::ByteStr(i) => {
- (true, LitKind::ByteStr(parse::byte_str_lit(&i.as_str())))
- }
- token::ByteStrRaw(i, _) => {
- (true, LitKind::ByteStr(Rc::new(i.to_string().into_bytes())))
- }
- };
+ let diag = Some((self.span, &self.sess.span_diagnostic));
+ let (suffix_illegal, result) = parse::lit_token(lit, suf, diag);
if suffix_illegal {
let sp = self.span;
self.expect_no_suffix(sp, &format!("{} literal", lit.short_name()), suf)
}
- out
+ result.unwrap()
}
_ => { return self.unexpected_last(&self.token); }
};
fn expect_delimited_token_tree(&mut self) -> PResult<'a, (token::DelimToken, ThinTokenStream)> {
match self.token {
- token::OpenDelim(delim) => self.parse_token_tree().map(|tree| match tree {
- TokenTree::Delimited(_, delimited) => (delim, delimited.stream().into()),
+ token::OpenDelim(delim) => match self.parse_token_tree() {
+ TokenTree::Delimited(_, delimited) => Ok((delim, delimited.stream().into())),
_ => unreachable!(),
- }),
+ },
_ => Err(self.fatal("expected open delimiter")),
}
}
}
/// parse a single token tree from the input.
- pub fn parse_token_tree(&mut self) -> PResult<'a, TokenTree> {
+ pub fn parse_token_tree(&mut self) -> TokenTree {
match self.token {
token::OpenDelim(..) => {
let frame = mem::replace(&mut self.token_cursor.frame,
self.token_cursor.stack.pop().unwrap());
self.span = frame.span;
self.bump();
- return Ok(TokenTree::Delimited(frame.span, Delimited {
+ TokenTree::Delimited(frame.span, Delimited {
delim: frame.delim,
tts: frame.tree_cursor.original_stream().into(),
- }));
+ })
},
token::CloseDelim(_) | token::Eof => unreachable!(),
_ => {
let token = mem::replace(&mut self.token, token::Underscore);
- let res = Ok(TokenTree::Token(self.span, token));
self.bump();
- res
+ TokenTree::Token(self.prev_span, token)
}
}
}
pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> {
let mut tts = Vec::new();
while self.token != token::Eof {
- tts.push(self.parse_token_tree()?);
+ tts.push(self.parse_token_tree());
}
Ok(tts)
}
+ pub fn parse_tokens(&mut self) -> TokenStream {
+ let mut result = Vec::new();
+ loop {
+ match self.token {
+ token::Eof | token::CloseDelim(..) => break,
+ _ => result.push(self.parse_token_tree().into()),
+ }
+ }
+ TokenStream::concat(result)
+ }
+
/// Parse a prefix-unary-operator expr
pub fn parse_prefix_expr(&mut self,
already_parsed_attrs: Option<ThinVec<Attribute>>)
let op_span = mk_sp(op.span.lo, self.span.hi);
let mut err = self.diagnostic().struct_span_err(op_span,
"chained comparison operators require parentheses");
- if op.node == BinOpKind::Lt && *outer_op == AssocOp::Greater {
+ if op.node == BinOpKind::Lt &&
+ *outer_op == AssocOp::Less || // Include `<` to provide this recommendation
+ *outer_op == AssocOp::Greater // even in a case like the following:
+ { // Foo<Bar<Baz<Qux, ()>>>
err.help(
"use `::<...>` instead of `<...>` if you meant to specify type arguments");
}
let attr = ast::Attribute {
id: attr::mk_attr_id(),
style: ast::AttrStyle::Outer,
- value: ast::MetaItem {
- name: Symbol::intern("warn_directory_ownership"),
- node: ast::MetaItemKind::Word,
- span: syntax_pos::DUMMY_SP,
- },
+ path: ast::Path::from_ident(syntax_pos::DUMMY_SP,
+ Ident::from_str("warn_directory_ownership")),
+ tokens: TokenStream::empty(),
is_sugared_doc: false,
span: syntax_pos::DUMMY_SP,
};
use ast::{self};
use ptr::P;
use symbol::keywords;
-use tokenstream;
+use tokenstream::TokenTree;
use std::fmt;
use std::rc::Rc;
/// Stuff inside brackets for attributes
NtMeta(ast::MetaItem),
NtPath(ast::Path),
- NtTT(tokenstream::TokenTree),
+ NtTT(TokenTree),
// These are not exposed to macros, but are used by quasiquote.
NtArm(ast::Arm),
NtImplItem(ast::ImplItem),
use std_inject;
use symbol::{Symbol, keywords};
use syntax_pos::DUMMY_SP;
-use tokenstream::{self, TokenTree};
+use tokenstream::{self, TokenStream, TokenTree};
use std::ascii;
use std::io::{self, Write, Read};
to_string(|s| s.print_tts(tts.iter().cloned().collect()))
}
+pub fn tokens_to_string(tokens: TokenStream) -> String {
+ to_string(|s| s.print_tts(tokens))
+}
+
pub fn stmt_to_string(stmt: &ast::Stmt) -> String {
to_string(|s| s.print_stmt(stmt))
}
ast::AttrStyle::Inner => word(self.writer(), "#![")?,
ast::AttrStyle::Outer => word(self.writer(), "#[")?,
}
- self.print_meta_item(&attr.meta())?;
+ if let Some(mi) = attr.meta() {
+ self.print_meta_item(&mi)?
+ } else {
+ for (i, segment) in attr.path.segments.iter().enumerate() {
+ if i > 0 {
+ word(self.writer(), "::")?
+ }
+ if segment.identifier.name != keywords::CrateRoot.name() &&
+ segment.identifier.name != "$crate" {
+ word(self.writer(), &segment.identifier.name.as_str())?;
+ }
+ }
+ space(self.writer())?;
+ self.print_tts(attr.tokens.clone())?;
+ }
word(self.writer(), "]")
}
}
self.end()
}
+ /// This doesn't deserve to be called "pretty" printing, but it should be
+ /// meaning-preserving. A quick hack that might help would be to look at the
+ /// spans embedded in the TTs to decide where to put spaces and newlines.
+ /// But it'd be better to parse these according to the grammar of the
+ /// appropriate macro, transcribe back into the grammar we just parsed from,
+ /// and then pretty-print the resulting AST nodes (so, e.g., we print
+ /// expression arguments as expressions). It can be done! I think.
+ fn print_tt(&mut self, tt: tokenstream::TokenTree) -> io::Result<()> {
+ match tt {
+ TokenTree::Token(_, ref tk) => {
+ word(self.writer(), &token_to_string(tk))?;
+ match *tk {
+ parse::token::DocComment(..) => {
+ hardbreak(self.writer())
+ }
+ _ => Ok(())
+ }
+ }
+ TokenTree::Delimited(_, ref delimed) => {
+ word(self.writer(), &token_to_string(&delimed.open_token()))?;
+ space(self.writer())?;
+ self.print_tts(delimed.stream())?;
+ space(self.writer())?;
+ word(self.writer(), &token_to_string(&delimed.close_token()))
+ },
+ }
+ }
+
+ fn print_tts(&mut self, tts: tokenstream::TokenStream) -> io::Result<()> {
+ self.ibox(0)?;
+ for (i, tt) in tts.into_trees().enumerate() {
+ if i != 0 {
+ space(self.writer())?;
+ }
+ self.print_tt(tt)?;
+ }
+ self.end()
+ }
+
fn space_if_not_bol(&mut self) -> io::Result<()> {
if !self.is_bol() { space(self.writer())?; }
Ok(())
}
}
- /// This doesn't deserve to be called "pretty" printing, but it should be
- /// meaning-preserving. A quick hack that might help would be to look at the
- /// spans embedded in the TTs to decide where to put spaces and newlines.
- /// But it'd be better to parse these according to the grammar of the
- /// appropriate macro, transcribe back into the grammar we just parsed from,
- /// and then pretty-print the resulting AST nodes (so, e.g., we print
- /// expression arguments as expressions). It can be done! I think.
- pub fn print_tt(&mut self, tt: tokenstream::TokenTree) -> io::Result<()> {
- match tt {
- TokenTree::Token(_, ref tk) => {
- word(&mut self.s, &token_to_string(tk))?;
- match *tk {
- parse::token::DocComment(..) => {
- hardbreak(&mut self.s)
- }
- _ => Ok(())
- }
- }
- TokenTree::Delimited(_, ref delimed) => {
- word(&mut self.s, &token_to_string(&delimed.open_token()))?;
- space(&mut self.s)?;
- self.print_tts(delimed.stream())?;
- space(&mut self.s)?;
- word(&mut self.s, &token_to_string(&delimed.close_token()))
- },
- }
- }
-
- pub fn print_tts(&mut self, tts: tokenstream::TokenStream) -> io::Result<()> {
- self.ibox(0)?;
- for (i, tt) in tts.into_trees().enumerate() {
- if i != 0 {
- space(&mut self.s)?;
- }
- self.print_tt(tt)?;
- }
- self.end()
- }
-
pub fn print_variant(&mut self, v: &ast::Variant) -> io::Result<()> {
self.head("")?;
let generics = ast::Generics::default();
use codemap::{self, ExpnInfo, NameAndSpan, MacroAttribute};
use parse::ParseSess;
use ptr::P;
+use tokenstream::TokenStream;
/// Craft a span that will be ignored by the stability lint's
/// call to codemap's is_internal check.
krate.module.items.insert(0, P(ast::Item {
attrs: vec![ast::Attribute {
style: ast::AttrStyle::Outer,
- value: ast::MetaItem {
- name: Symbol::intern("prelude_import"),
- node: ast::MetaItemKind::Word,
- span: span,
- },
+ path: ast::Path::from_ident(span, ast::Ident::from_str("prelude_import")),
+ tokens: TokenStream::empty(),
id: attr::mk_attr_id(),
is_sugared_doc: false,
span: span,
impl fmt::Display for TokenStream {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str(&pprust::tts_to_string(&self.trees().collect::<Vec<_>>()))
+ f.write_str(&pprust::tokens_to_string(self.clone()))
}
}
impl<'a> Visitor<'a> for MarkAttrs<'a> {
fn visit_attribute(&mut self, attr: &Attribute) {
- if self.0.contains(&attr.name()) {
- mark_used(attr);
- mark_known(attr);
+ if let Some(name) = attr.name() {
+ if self.0.contains(&name) {
+ mark_used(attr);
+ mark_known(attr);
+ }
}
}
attrs.extend(item.attrs
.iter()
.filter(|a| {
- match &*a.name().as_str() {
+ a.name().is_some() && match &*a.name().unwrap().as_str() {
"allow" | "warn" | "deny" | "forbid" | "stable" | "unstable" => true,
_ => false,
}
fn visit_item(&mut self, item: &'a ast::Item) {
if let ast::ItemKind::MacroDef(..) = item.node {
if self.is_proc_macro_crate &&
- item.attrs.iter().any(|attr| attr.name() == "macro_export") {
+ item.attrs.iter().any(|attr| attr.path == "macro_export") {
let msg =
"cannot export macro_rules! macros from a `proc-macro` crate type currently";
self.handler.span_err(item.span, msg);
for attr in &item.attrs {
if is_proc_macro_attr(&attr) {
if let Some(prev_attr) = found_attr {
- let msg = if attr.name() == prev_attr.name() {
+ let msg = if attr.path == prev_attr.path {
format!("Only one `#[{}]` attribute is allowed on any given function",
- attr.name())
+ attr.path)
} else {
format!("`#[{}]` and `#[{}]` attributes cannot both be applied \
- to the same function", attr.name(), prev_attr.name())
+ to the same function", attr.path, prev_attr.path)
};
self.handler.struct_span_err(attr.span(), &msg)
if !is_fn {
let msg = format!("the `#[{}]` attribute may only be used on bare functions",
- attr.name());
+ attr.path);
self.handler.span_err(attr.span(), &msg);
return;
if !self.is_proc_macro_crate {
let msg = format!("the `#[{}]` attribute is only usable with crates of the \
- `proc-macro` crate type", attr.name());
+ `proc-macro` crate type", attr.path);
self.handler.span_err(attr.span(), &msg);
return;
p.pop();
// on some installations the dir is named after the hex of the char
- // (e.g. OS X)
+ // (e.g. macOS)
p.push(&format!("{:x}", first_char as usize));
p.push(term);
if fs::metadata(&p).is_ok() {
#[ignore(reason = "buildbots don't have ncurses installed and I can't mock everything I need")]
fn test_get_dbpath_for_term() {
// woefully inadequate test coverage
- // note: current tests won't work with non-standard terminfo hierarchies (e.g. OS X's)
+ // note: current tests won't work with non-standard terminfo hierarchies (e.g. macOS's)
use std::env;
// FIXME (#9639): This needs to handle non-utf8 paths
fn x(t: &str) -> String {
#[C] //~ ERROR: The attribute `C` is currently unknown to the compiler
#[B(D)]
#[B(E = "foo")]
+#[B arbitrary tokens] //~ expected one of `(` or `=`, found `arbitrary`
struct B;
fn main() {}
// that this just passes on those platforms we link in some other allocator to
// ensure we get the same error.
//
-// So long as we CI linux/OSX we should be good.
+// So long as we CI linux/macOS we should be good.
#[cfg(any(target_os = "linux", target_os = "macos"))]
extern crate alloc_jemalloc;
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
// that this just passes on those platforms we link in some other allocator to
// ensure we get the same error.
//
-// So long as we CI linux/OSX we should be good.
+// So long as we CI linux/macOS we should be good.
#[cfg(any(all(target_os = "linux", any(target_arch = "x86", target_arch = "x86_64")),
target_os = "macos"))]
extern crate alloc_system;
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[allow(unused_variables)]
+fn main() {
+ let x: &'static u32 = &42; //~ error: does not live long enough
+ let y: &'static Option<u32> = &None; //~ error: does not live long enough
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[doc = $not_there] //~ error: unexpected token: `$`
+fn main() { }
globnar::brotz!(); //~ ERROR non-ident macro paths are experimental
::foo!(); //~ ERROR non-ident macro paths are experimental
foo::<T>!(); //~ ERROR type parameters are not allowed on macros
+ #[derive(foo::Bar)] struct T; //~ ERROR non-ident macro paths are experimental
}
// except according to those terms.
#[derive(Copy(Bad))]
-//~^ ERROR malformed `derive` entry
+//~^ ERROR expected one of `)`, `,`, or `::`, found `(`
struct Test1;
#[derive(Copy="bad")]
-//~^ ERROR malformed `derive` entry
+//~^ ERROR expected one of `)`, `,`, or `::`, found `=`
struct Test2;
#[derive()]
// ignore-macos
// ignore-ios
// compile-flags:-l framework=foo
-// error-pattern: native frameworks are only available on OSX targets
+// error-pattern: native frameworks are only available on macOS targets
fn main() {
}
#[link(name = "foo", kind = "framework")]
extern {}
-//~^^ ERROR: native frameworks are only available on OSX
+//~^^ ERROR: native frameworks are only available on macOS
fn main() {
}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(attr_literals)]
+
+#[path = 1usize] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1u8] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1u16] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1u32] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1u64] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1isize] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1i8] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1i16] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1i32] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1i64] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1.0f32] //~ ERROR: suffixed literals are not allowed in attributes
+#[path = 1.0f64] //~ ERROR: suffixed literals are not allowed in attributes
+fn main() { }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// compile-flags: -Z parse-only
-
-// error-pattern:expected one of `=` or `]`
-
// asterisk is bogus
-#[attr*]
+#[path*] //~ ERROR expected one of `(` or `=`
mod m {}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -Z parse-only
-
-#[doc = $not_there] //~ error: unexpected token: `$`
-fn main() { }
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// compile-flags: -Z parse-only
-
-#[foo = 1usize] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1u8] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1u16] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1u32] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1u64] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1isize] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1i8] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1i16] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1i32] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1i64] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1.0f32] //~ ERROR: suffixed literals are not allowed in attributes
-#[foo = 1.0f64] //~ ERROR: suffixed literals are not allowed in attributes
-fn main() { }
#[attr_with_args(text = "Hello, world!")]
fn foo() {}
-fn main() {
- assert_eq!(foo(), "Hello, world!");
-}
+#[::attr_args::identity
+ fn main() { assert_eq!(foo(), "Hello, world!"); }]
+struct Dummy;
fn foo() -> &'static str { "Hello, world!" }
"#.parse().unwrap()
}
+
+#[proc_macro_attribute]
+pub fn identity(attr_args: TokenStream, _: TokenStream) -> TokenStream {
+ attr_args
+}
#[proc_macro_derive(B, attributes(B, C))]
pub fn derive(input: TokenStream) -> TokenStream {
let input = input.to_string();
- assert!(input.contains("#[B]"));
+ assert!(input.contains("#[B arbitrary tokens]"));
assert!(input.contains("struct B {"));
assert!(input.contains("#[C]"));
"".parse().unwrap()
// aux-build:derive-b.rs
// ignore-stage1
-#[macro_use]
+#![feature(proc_macro)]
+
extern crate derive_b;
-#[derive(Debug, PartialEq, B, Eq, Copy, Clone)]
-#[B]
+#[derive(Debug, PartialEq, derive_b::B, Eq, Copy, Clone)]
+#[cfg_attr(all(), B arbitrary tokens)]
struct B {
#[C]
a: u64
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! m { () => { $crate::main(); } }
t!(format!("{:?}", -0.0), "-0");
t!(format!("{:?}", 0.0), "0");
+ // sign aware zero padding
+ t!(format!("{:<3}", 1), "1 ");
+ t!(format!("{:>3}", 1), " 1");
+ t!(format!("{:^3}", 1), " 1 ");
+ t!(format!("{:03}", 1), "001");
+ t!(format!("{:<03}", 1), "001");
+ t!(format!("{:>03}", 1), "001");
+ t!(format!("{:^03}", 1), "001");
+ t!(format!("{:+03}", 1), "+01");
+ t!(format!("{:<+03}", 1), "+01");
+ t!(format!("{:>+03}", 1), "+01");
+ t!(format!("{:^+03}", 1), "+01");
+ t!(format!("{:#05x}", 1), "0x001");
+ t!(format!("{:<#05x}", 1), "0x001");
+ t!(format!("{:>#05x}", 1), "0x001");
+ t!(format!("{:^#05x}", 1), "0x001");
+ t!(format!("{:05}", 1.2), "001.2");
+ t!(format!("{:<05}", 1.2), "001.2");
+ t!(format!("{:>05}", 1.2), "001.2");
+ t!(format!("{:^05}", 1.2), "001.2");
+ t!(format!("{:05}", -1.2), "-01.2");
+ t!(format!("{:<05}", -1.2), "-01.2");
+ t!(format!("{:>05}", -1.2), "-01.2");
+ t!(format!("{:^05}", -1.2), "-01.2");
+ t!(format!("{:+05}", 1.2), "+01.2");
+ t!(format!("{:<+05}", 1.2), "+01.2");
+ t!(format!("{:>+05}", 1.2), "+01.2");
+ t!(format!("{:^+05}", 1.2), "+01.2");
// Ergonomic format_args!
t!(format!("{0:x} {0:X}", 15), "f F");
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Struct<K: 'static> {
+ pub field: K,
+}
+
+// Partial fix for #31260, doesn't work without {...}.
+static STRUCT: Struct<&'static [u8]> = Struct {
+ field: {&[1]}
+};
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[repr(u8)]
+enum Foo {
+ Foo(u8),
+}
+
+fn main() {
+ match Foo::Foo(1) {
+ _ => ()
+ }
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ println!("{}", 0E+10);
+ println!("{}", 0e+10);
+ println!("{}", 00e+10);
+ println!("{}", 00E+10);
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-pretty issue #37195
+
+#![allow(dead_code)]
+
+include!("auxiliary/issue_40469.rs");
+fn f() { m!(); }
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rvalue_static_promotion)]
+
+#[allow(unused_variables)]
+fn main() {
+ let x: &'static u32 = &42;
+ let y: &'static Option<u32> = &None;
+}
// (E.g. negative float to unsigned integer goes through a
// library routine on the default i686 platforms, and the
// implementation of that routine differs on e.g. Linux
- // vs. OSX, resulting in different answers.)
+ // vs. macOS, resulting in different answers.)
if $from::is_float() {
if !$to::in_range(A) { from.0 = 0 as $to; to.0 = 0 as $to; }
if !$to::in_range(B) { from.1 = 0 as $to; to.1 = 0 as $to; }
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type="lib"]
+
+#![feature(const_fn)]
+
+pub struct Foo;
+
+impl Foo {
+ // @has const/struct.Foo.html '//*[@id="new.v"]//code' 'const unsafe fn new'
+ pub const unsafe fn new() -> Foo {
+ Foo
+ }
+}
error: cannot borrow immutable field `z.x` as mutable
--> $DIR/issue-39544.rs:21:18
|
+20 | let z = Z { x: X::Y };
+ | - consider changing this to `mut z`
21 | let _ = &mut z.x;
- | ^^^
+ | ^^^ cannot mutably borrow immutable field
error: aborting due to previous error
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo() {
+ println!("{:?}", (0..13).collect<Vec<i32>>());
+}
+
+fn bar() {
+ println!("{:?}", Vec<i32>::new());
+}
+
+fn qux() {
+ println!("{:?}", (0..13).collect<Vec<i32>());
+}
+
+fn main() {}
--- /dev/null
+error: chained comparison operators require parentheses
+ --> $DIR/issue-40396.rs:12:37
+ |
+12 | println!("{:?}", (0..13).collect<Vec<i32>>());
+ | ^^^^^^^^
+ |
+ = help: use `::<...>` instead of `<...>` if you meant to specify type arguments
+
+error: chained comparison operators require parentheses
+ --> $DIR/issue-40396.rs:16:25
+ |
+16 | println!("{:?}", Vec<i32>::new());
+ | ^^^^^^^
+ |
+ = help: use `::<...>` instead of `<...>` if you meant to specify type arguments
+
+error: chained comparison operators require parentheses
+ --> $DIR/issue-40396.rs:20:37
+ |
+20 | println!("{:?}", (0..13).collect<Vec<i32>());
+ | ^^^^^^^^
+ |
+ = help: use `::<...>` instead of `<...>` if you meant to specify type arguments
+
+error: chained comparison operators require parentheses
+ --> $DIR/issue-40396.rs:20:41
+ |
+20 | println!("{:?}", (0..13).collect<Vec<i32>());
+ | ^^^^^^
+ |
+ = help: use `::<...>` instead of `<...>` if you meant to specify type arguments
+
+error: aborting due to 4 previous errors
+
--> $DIR/E0536.rs:11:7
|
11 | #[cfg(not())] //~ ERROR E0536
- | ^^^^^
+ | ^^^
error: aborting due to previous error
--> $DIR/E0537.rs:11:7
|
11 | #[cfg(unknown())] //~ ERROR E0537
- | ^^^^^^^^^
+ | ^^^^^^^
error: aborting due to previous error
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_consts)]
+
+trait Tr {
+ const C: Self;
+}
+
+fn main() {
+ let a: u8 = Tr::C; //~ ERROR the trait bound `u8: Tr` is not satisfied
+}
--- /dev/null
+error[E0277]: the trait bound `u8: Tr` is not satisfied
+ --> $DIR/issue-29595.rs:18:17
+ |
+18 | let a: u8 = Tr::C; //~ ERROR the trait bound `u8: Tr` is not satisfied
+ | ^^^^^ the trait `Tr` is not implemented for `u8`
+ |
+ = note: required by `Tr::C`
+
+error: aborting due to previous error
+
let mut components = Vec::new();
let mut extensions = Vec::new();
- // rustc/rust-std/cargo are all required, and so is rust-mingw if it's
- // available for the target.
+ // rustc/rust-std/cargo/docs are all required, and so is rust-mingw
+ // if it's available for the target.
components.extend(vec![
Component { pkg: "rustc".to_string(), target: host.to_string() },
Component { pkg: "rust-std".to_string(), target: host.to_string() },
Component { pkg: "cargo".to_string(), target: host.to_string() },
+ Component { pkg: "rust-docs".to_string(), target: host.to_string() },
]);
if host.contains("pc-windows-gnu") {
components.push(Component {
});
}
- // Docs, other standard libraries, and the source package are all
- // optional.
- extensions.push(Component {
- pkg: "rust-docs".to_string(),
- target: host.to_string(),
- });
for target in TARGETS {
if target != host {
extensions.push(Component {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/// darwin_fd_limit exists to work around an issue where launchctl on Mac OS X
+/// darwin_fd_limit exists to work around an issue where launchctl on macOS
/// defaults the rlimit maxfiles to 256/unlimited. The default soft limit of 256
/// ends up being far too low for our multithreaded scheduler testing, depending
/// on the number of cores available.
use std::collections::HashSet;
use std::env;
use std::fmt;
-use std::fs::{self, File};
+use std::fs::{self, File, create_dir_all};
use std::io::prelude::*;
use std::io::{self, BufReader};
use std::path::{Path, PathBuf};
let out_dir = self.output_base_name().with_extension("pretty-out");
let _ = fs::remove_dir_all(&out_dir);
- self.create_dir_racy(&out_dir);
+ create_dir_all(&out_dir).unwrap();
// FIXME (#9639): This needs to handle non-utf8 paths
let mut args = vec!["-".to_owned(),
fn compose_and_run_compiler(&self, args: ProcArgs, input: Option<String>) -> ProcRes {
if !self.props.aux_builds.is_empty() {
- self.create_dir_racy(&self.aux_output_dir_name());
+ create_dir_all(&self.aux_output_dir_name()).unwrap();
}
let aux_dir = self.aux_output_dir_name();
input)
}
- // Like std::fs::create_dir_all, except handles concurrent calls among multiple
- // threads or processes.
- fn create_dir_racy(&self, path: &Path) {
- match fs::create_dir(path) {
- Ok(()) => return,
- Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return,
- Err(ref e) if e.kind() == io::ErrorKind::NotFound => {}
- Err(e) => panic!("failed to create dir {:?}: {}", path, e),
- }
- self.create_dir_racy(path.parent().unwrap());
- match fs::create_dir(path) {
- Ok(()) => {}
- Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {}
- Err(e) => panic!("failed to create dir {:?}: {}", path, e),
- }
- }
fn compose_and_run(&self,
ProcArgs{ args, prog }: ProcArgs,
let mir_dump_dir = self.get_mir_dump_dir();
- self.create_dir_racy(mir_dump_dir.as_path());
+ create_dir_all(mir_dump_dir.as_path()).unwrap();
let mut dir_opt = "dump-mir-dir=".to_string();
dir_opt.push_str(mir_dump_dir.to_str().unwrap());
debug!("dir_opt: {:?}", dir_opt);
let out_dir = self.output_base_name();
let _ = fs::remove_dir_all(&out_dir);
- self.create_dir_racy(&out_dir);
+ create_dir_all(&out_dir).unwrap();
let proc_res = self.document(&out_dir);
if !proc_res.status.success() {
if tmpdir.exists() {
self.aggressive_rm_rf(&tmpdir).unwrap();
}
- self.create_dir_racy(&tmpdir);
+ create_dir_all(&tmpdir).unwrap();
let host = &self.config.host;
let make = if host.contains("bitrig") || host.contains("dragonfly") ||