# version that we're using, 8.2, cannot compile LLVM for OSX 10.7.
- env: >
RUST_CHECK_TARGET=check
- RUST_CONFIGURE_ARGS=--build=x86_64-apple-darwin
+ RUST_CONFIGURE_ARGS="--build=x86_64-apple-darwin --enable-sanitizers"
SRC=.
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
SCCACHE_ERROR_LOG=/tmp/sccache.log
install: *osx_install_sccache
- env: >
RUST_CHECK_TARGET=dist
- RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-extended"
+ RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-extended --enable-sanitizers"
SRC=.
DEPLOY=1
RUSTC_RETRY_LINKER_ON_SEGFAULT=1
# 32/64-bit MinGW builds.
#
- # The MinGW builds unfortunately have to both download a custom toolchain and
- # avoid the one installed by AppVeyor by default. Interestingly, though, for
- # different reasons!
+ # We are using MinGW with posix threads since LLVM does not compile with
+ # the win32 threads version due to missing support for C++'s std::thread.
#
- # For 32-bit the installed gcc toolchain on AppVeyor uses the pthread
- # threading model. This is unfortunately not what we want, and if we compile
- # with it then there's lots of link errors in the standard library (undefined
- # references to pthread symbols).
- #
- # For 64-bit the installed gcc toolchain is currently 5.3.0 which
- # unfortunately segfaults on Windows with --enable-llvm-assertions (segfaults
- # in LLVM). See rust-lang/rust#28445 for more information, but to work around
- # this we go back in time to 4.9.2 specifically.
+ # Instead of relying on the MinGW version installed on appveryor we download
+ # and install one ourselves so we won't be surprised by changes to appveyor's
+ # build image.
#
# Finally, note that the downloads below are all in the `rust-lang-ci` S3
# bucket, but they cleraly didn't originate there! The downloads originally
# came from the mingw-w64 SourceForge download site. Unfortunately
# SourceForge is notoriously flaky, so we mirror it on our own infrastructure.
- #
- # And as a final point of note, the 32-bit MinGW build using the makefiles do
- # *not* use debug assertions and llvm assertions. This is because they take
- # too long on appveyor and this is tested by rustbuild below.
- MSYS_BITS: 32
RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-ninja
SCRIPT: python x.py test
MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
- MINGW_ARCHIVE: i686-6.2.0-release-win32-dwarf-rt_v5-rev1.7z
+ MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z
MINGW_DIR: mingw32
- MSYS_BITS: 64
SCRIPT: python x.py test
RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-ninja
MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
- MINGW_ARCHIVE: x86_64-6.2.0-release-win32-seh-rt_v5-rev1.7z
+ MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z
MINGW_DIR: mingw64
# 32/64 bit MSVC and GNU deployment
RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-extended --enable-ninja
SCRIPT: python x.py dist
MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
- MINGW_ARCHIVE: i686-6.2.0-release-win32-dwarf-rt_v5-rev1.7z
+ MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z
MINGW_DIR: mingw32
DEPLOY: 1
- MSYS_BITS: 64
SCRIPT: python x.py dist
RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-extended --enable-ninja
MINGW_URL: https://s3.amazonaws.com/rust-lang-ci/rust-ci-mirror
- MINGW_ARCHIVE: x86_64-6.2.0-release-win32-seh-rt_v5-rev1.7z
+ MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z
MINGW_DIR: mingw64
DEPLOY: 1
- if defined MINGW_URL 7z x -y %MINGW_ARCHIVE% > nul
- if defined MINGW_URL set PATH=%CD%\%MINGW_DIR%\bin;C:\msys64\usr\bin;%PATH%
+ # Here we do a pretty heinous thing which is to mangle the MinGW installation
+ # we just had above. Currently, as of this writing, we're using MinGW-w64
+ # builds of gcc, and that's currently at 6.3.0. We use 6.3.0 as it appears to
+ # be the first version which contains a fix for #40546, builds randomly
+ # failing during LLVM due to ar.exe/ranlib.exe failures.
+ #
+ # Unfortunately, though, 6.3.0 *also* is the first version of MinGW-w64 builds
+ # to contain a regression in gdb (#40184). As a result if we were to use the
+ # gdb provided (7.11.1) then we would fail all debuginfo tests.
+ #
+ # In order to fix spurious failures (pretty high priority) we use 6.3.0. To
+ # avoid disabling gdb tests we download an *old* version of gdb, specifically
+ # that found inside the 6.2.0 distribution. We then overwrite the 6.3.0 gdb
+ # with the 6.2.0 gdb to get tests passing.
+ #
+ # Note that we don't literally overwrite the gdb.exe binary because it appears
+ # to just use gdborig.exe, so that's the binary we deal with instead.
+ - if defined MINGW_URL appveyor-retry appveyor DownloadFile %MINGW_URL%/2017-04-20-%MSYS_BITS%bit-gdborig.exe
+ - if defined MINGW_URL mv 2017-04-20-%MSYS_BITS%bit-gdborig.exe %MINGW_DIR%\bin\gdborig.exe
+
# Otherwise pull in the MinGW installed on appveyor
- if NOT defined MINGW_URL set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH%
-Subproject commit c416fb60b11ecfd2a1ba0fb8567c9a92590b5d28
+Subproject commit 03efb7fc8b0dbb54973ee1b6188f3faf14fffe36
valopt arm-linux-androideabi-ndk "" "arm-linux-androideabi NDK standalone path"
valopt armv7-linux-androideabi-ndk "" "armv7-linux-androideabi NDK standalone path"
valopt aarch64-linux-android-ndk "" "aarch64-linux-android NDK standalone path"
+valopt x86_64-linux-android-ndk "" "x86_64-linux-android NDK standalone path"
valopt nacl-cross-path "" "NaCl SDK path (Pepper Canary is recommended). Must be absolute!"
valopt musl-root "/usr/local" "MUSL root installation directory (deprecated)"
valopt musl-root-x86_64 "" "x86_64-unknown-linux-musl install directory"
putvar CFG_ARM_LINUX_ANDROIDEABI_NDK
putvar CFG_ARMV7_LINUX_ANDROIDEABI_NDK
putvar CFG_I686_LINUX_ANDROID_NDK
+putvar CFG_X86_64_LINUX_ANDROID_NDK
putvar CFG_NACL_CROSS_PATH
putvar CFG_MANDIR
putvar CFG_DOCDIR
-Subproject commit 016cbc514cf44a2bd3fe806e8afa6b9c50287373
+Subproject commit 6ecff95fdc3ee7ceed2b9b0cc1a3a64876860bce
# The goal here is to come up with the same triple as LLVM would,
# at least for the subset of platforms we're willing to target.
if ostype == 'Linux':
- ostype = 'unknown-linux-gnu'
+ os = subprocess.check_output(['uname', '-o']).strip().decode(default_encoding)
+ if os == 'Android':
+ ostype = 'linux-android'
+ else:
+ ostype = 'unknown-linux-gnu'
elif ostype == 'FreeBSD':
ostype = 'unknown-freebsd'
elif ostype == 'DragonFly':
cputype = 'i686'
elif cputype in {'xscale', 'arm'}:
cputype = 'arm'
+ if ostype == 'linux-android':
+ ostype = 'linux-androideabi'
elif cputype == 'armv6l':
cputype = 'arm'
- ostype += 'eabihf'
+ if ostype == 'linux-android':
+ ostype = 'linux-androideabi'
+ else:
+ ostype += 'eabihf'
elif cputype in {'armv7l', 'armv8l'}:
cputype = 'armv7'
- ostype += 'eabihf'
- elif cputype == 'aarch64':
- cputype = 'aarch64'
- elif cputype == 'arm64':
+ if ostype == 'linux-android':
+ ostype = 'linux-androideabi'
+ else:
+ ostype += 'eabihf'
+ elif cputype in {'aarch64', 'arm64'}:
cputype = 'aarch64'
elif cputype == 'mips':
if sys.byteorder == 'big':
pub fn cargotest(build: &Build, stage: u32, host: &str) {
let ref compiler = Compiler::new(stage, host);
- // Configure PATH to find the right rustc. NB. we have to use PATH
- // and not RUSTC because the Cargo test suite has tests that will
- // fail if rustc is not spelled `rustc`.
- let path = build.sysroot(compiler).join("bin");
- let old_path = ::std::env::var("PATH").expect("");
- let sep = if cfg!(windows) { ";" } else {":" };
- let ref newpath = format!("{}{}{}", path.display(), sep, old_path);
-
// Note that this is a short, cryptic, and not scoped directory name. This
// is currently to minimize the length of path on Windows where we otherwise
// quickly run into path name limit constraints.
let _time = util::timeit();
let mut cmd = Command::new(build.tool(&Compiler::new(0, host), "cargotest"));
build.prepare_tool_cmd(compiler, &mut cmd);
- build.run(cmd.env("PATH", newpath)
- .arg(&build.cargo)
- .arg(&out_dir));
+ build.run(cmd.arg(&build.cargo)
+ .arg(&out_dir)
+ .env("RUSTC", build.compiler_path(compiler))
+ .env("RUSTDOC", build.rustdoc(compiler)))
+}
+
+/// Runs `cargo test` for `cargo` packaged with Rust.
+pub fn cargo(build: &Build, stage: u32, host: &str) {
+ let ref compiler = Compiler::new(stage, host);
+
+ // Configure PATH to find the right rustc. NB. we have to use PATH
+ // and not RUSTC because the Cargo test suite has tests that will
+ // fail if rustc is not spelled `rustc`.
+ let path = build.sysroot(compiler).join("bin");
+ let old_path = ::std::env::var("PATH").expect("");
+ let sep = if cfg!(windows) { ";" } else {":" };
+ let ref newpath = format!("{}{}{}", path.display(), sep, old_path);
+
+ let mut cargo = build.cargo(compiler, Mode::Tool, host, "test");
+ cargo.arg("--manifest-path").arg(build.src.join("cargo/Cargo.toml"));
+
+ // Don't build tests dynamically, just a pain to work with
+ cargo.env("RUSTC_NO_PREFER_DYNAMIC", "1");
+
+ // Don't run cross-compile tests, we may not have cross-compiled libstd libs
+ // available.
+ cargo.env("CFG_DISABLE_CROSS_TESTS", "1");
+
+ build.run(cargo.env("PATH", newpath));
}
/// Runs the `tidy` tool as compiled in `stage` by the `host` compiler.
if target.contains("musl") && !target.contains("mips") {
copy_musl_third_party_objects(build, target, &libdir);
}
+
+ if build.config.sanitizers && compiler.stage != 0 && target == "x86_64-apple-darwin" {
+ // The sanitizers are only built in stage1 or above, so the dylibs will
+ // be missing in stage0 and causes panic. See the `std()` function above
+ // for reason why the sanitizers are not built in stage0.
+ copy_apple_sanitizer_dylibs(&build.native_dir(target), "osx", &libdir);
+ }
}
/// Copies the crt(1,i,n).o startup objects
}
}
+fn copy_apple_sanitizer_dylibs(native_dir: &Path, platform: &str, into: &Path) {
+ for &sanitizer in &["asan", "tsan"] {
+ let filename = format!("libclang_rt.{}_{}_dynamic.dylib", sanitizer, platform);
+ let mut src_path = native_dir.join(sanitizer);
+ src_path.push("build");
+ src_path.push("lib");
+ src_path.push("darwin");
+ src_path.push(&filename);
+ copy(&src_path, &into.join(filename));
+ }
+}
+
/// Build and prepare startup objects like rsbegin.o and rsend.o
///
/// These are primarily used on Windows right now for linking executables/dlls.
if !up_to_date(src_file, dst_file) {
let mut cmd = Command::new(&compiler_path);
build.run(cmd.env("RUSTC_BOOTSTRAP", "1")
+ .arg("--cfg").arg(format!("stage{}", compiler.stage))
.arg("--target").arg(target)
.arg("--emit=obj")
.arg("--out-dir").arg(dst_dir)
.or_insert(Target::default());
target.ndk = Some(parse_configure_path(value));
}
+ "CFG_X86_64_LINUX_ANDROID_NDK" if value.len() > 0 => {
+ let target = "x86_64-linux-android".to_string();
+ let target = self.target_config.entry(target)
+ .or_insert(Target::default());
+ target.ndk = Some(parse_configure_path(value));
+ }
"CFG_LOCAL_RUST_ROOT" if value.len() > 0 => {
let path = parse_configure_path(value);
self.rustc = Some(push_exe_path(path.clone(), &["bin", "rustc"]));
install(&build.src.join("src/etc/").join(file), &dst, 0o644);
};
if host.contains("windows-msvc") {
- // no debugger scripts
+ // windbg debugger scripts
+ install(&build.src.join("src/etc/rust-windbg.cmd"), &sysroot.join("bin"),
+ 0o755);
+
+ cp_debugger_script("natvis/libcore.natvis");
+ cp_debugger_script("natvis/libcollections.natvis");
} else {
cp_debugger_script("debugger_pretty_printers_common.py");
cargo.env("RUSTC_SAVE_ANALYSIS", "api".to_string());
}
+ // When being built Cargo will at some point call `nmake.exe` on Windows
+ // MSVC. Unfortunately `nmake` will read these two environment variables
+ // below and try to intepret them. We're likely being run, however, from
+ // MSYS `make` which uses the same variables.
+ //
+ // As a result, to prevent confusion and errors, we remove these
+ // variables from our environment to prevent passing MSYS make flags to
+ // nmake, causing it to blow up.
+ if cfg!(target_env = "msvc") {
+ cargo.env_remove("MAKE");
+ cargo.env_remove("MAKEFLAGS");
+ }
+
// Environment variables *required* needed throughout the build
//
// FIXME: should update code to not require this env var
if target.contains("apple-darwin") {
base.push("-stdlib=libc++".into());
}
+
+ // Work around an apparently bad MinGW / GCC optimization,
+ // See: http://lists.llvm.org/pipermail/cfe-dev/2016-December/051980.html
+ // See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78936
+ if target == "i686-pc-windows-gnu" {
+ base.push("-fno-omit-frame-pointer".into());
+ }
return base
}
// the dependency graph and what `-p` arguments there are.
let mut cargo = Command::new(&build.cargo);
cargo.arg("metadata")
+ .arg("--format-version").arg("1")
.arg("--manifest-path").arg(build.src.join(krate).join("Cargo.toml"));
let output = output(&mut cargo);
let output: Output = json::decode(&output).unwrap();
check-aux:
$(Q)$(BOOTSTRAP) test \
src/tools/cargotest \
+ cargo \
src/test/pretty \
src/test/run-pass/pretty \
src/test/run-fail/pretty \
.dep(|s| s.name("librustc"))
.host(true)
.run(move |s| check::cargotest(build, s.stage, s.target));
+ rules.test("check-cargo", "cargo")
+ .dep(|s| s.name("tool-cargo"))
+ .host(true)
+ .run(move |s| check::cargo(build, s.stage, s.target));
rules.test("check-tidy", "src/tools/tidy")
.dep(|s| s.name("tool-tidy").stage(0))
.default(true)
let out_dir = env::var_os("RUSTBUILD_NATIVE_DIR").unwrap_or(env::var_os("OUT_DIR").unwrap());
let out_dir = PathBuf::from(out_dir).join(out_name);
t!(create_dir_racy(&out_dir));
- println!("cargo:rustc-link-lib=static={}", link_name);
+ if link_name.contains('=') {
+ println!("cargo:rustc-link-lib={}", link_name);
+ } else {
+ println!("cargo:rustc-link-lib=static={}", link_name);
+ }
println!("cargo:rustc-link-search=native={}", out_dir.join(search_subdir).display());
let timestamp = out_dir.join("rustbuild.timestamp");
}
}
+pub fn sanitizer_lib_boilerplate(sanitizer_name: &str) -> Result<NativeLibBoilerplate, ()> {
+ let (link_name, search_path) = match &*env::var("TARGET").unwrap() {
+ "x86_64-unknown-linux-gnu" => (
+ format!("clang_rt.{}-x86_64", sanitizer_name),
+ "build/lib/linux",
+ ),
+ "x86_64-apple-darwin" => (
+ format!("dylib=clang_rt.{}_osx_dynamic", sanitizer_name),
+ "build/lib/darwin",
+ ),
+ _ => return Err(()),
+ };
+ native_lib_boilerplate("compiler-rt", sanitizer_name, &link_name, search_path)
+}
+
fn dir_up_to_date(src: &Path, threshold: &FileTime) -> bool {
t!(fs::read_dir(src)).map(|e| t!(e)).all(|e| {
let meta = t!(e.metadata());
chmod +x /usr/local/bin/sccache
ENV TARGETS=arm-linux-androideabi
+ENV TARGETS=$TARGETS,armv7-linux-androideabi
ENV TARGETS=$TARGETS,i686-linux-android
ENV TARGETS=$TARGETS,aarch64-linux-android
-ENV TARGETS=$TARGETS,armv7-linux-androideabi
+ENV TARGETS=$TARGETS,x86_64-linux-android
ENV RUST_CONFIGURE_ARGS \
--target=$TARGETS \
--arm-linux-androideabi-ndk=/android/ndk-arm-9 \
--armv7-linux-androideabi-ndk=/android/ndk-arm-9 \
--i686-linux-android-ndk=/android/ndk-x86-9 \
- --aarch64-linux-android-ndk=/android/ndk-aarch64
+ --aarch64-linux-android-ndk=/android/ndk-arm64-21 \
+ --x86_64-linux-android-ndk=/android/ndk-x86_64-21
ENV SCRIPT python2.7 ../x.py dist --target $TARGETS
bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \
--platform=android-21 \
--toolchain=aarch64-linux-android-4.9 \
- --install-dir=/android/ndk-aarch64 \
+ --install-dir=/android/ndk-arm64-21 \
--ndk-dir=/android/android-ndk-r11c \
--arch=arm64
bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \
--install-dir=/android/ndk-x86-9 \
--ndk-dir=/android/android-ndk-r11c \
--arch=x86
+bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \
+ --platform=android-21 \
+ --toolchain=x86_64-4.9 \
+ --install-dir=/android/ndk-x86_64-21 \
+ --ndk-dir=/android/android-ndk-r11c \
+ --arch=x86_64
rm -rf ./android-ndk-r11c-linux-x86_64.zip ./android-ndk-r11c
COPY build-emscripten.sh /tmp/
RUN ./build-emscripten.sh
ENV PATH=$PATH:/tmp/emsdk_portable
-ENV PATH=$PATH:/tmp/emsdk_portable/clang/tag-e1.37.1/build_tag-e1.37.1_32/bin
+ENV PATH=$PATH:/tmp/emsdk_portable/clang/tag-e1.37.10/build_tag-e1.37.10_32/bin
ENV PATH=$PATH:/tmp/emsdk_portable/node/4.1.1_32bit/bin
-ENV PATH=$PATH:/tmp/emsdk_portable/emscripten/tag-1.37.1
-ENV EMSCRIPTEN=/tmp/emsdk_portable/emscripten/tag-1.37.1
+ENV PATH=$PATH:/tmp/emsdk_portable/emscripten/tag-1.37.10
+ENV EMSCRIPTEN=/tmp/emsdk_portable/emscripten/tag-1.37.10
ENV RUST_CONFIGURE_ARGS --target=asmjs-unknown-emscripten
source emsdk_portable/emsdk_env.sh
hide_output emsdk update
-hide_output emsdk install --build=Release sdk-tag-1.37.1-32bit
-hide_output emsdk activate --build=Release sdk-tag-1.37.1-32bit
+hide_output emsdk install --build=Release sdk-tag-1.37.10-32bit
+hide_output emsdk activate --build=Release sdk-tag-1.37.10-32bit
-Subproject commit d30da544a8afc5d78391dee270bdf40e74a215d3
+Subproject commit c8a8767c56ad3d3f4eb45c87b95026936fb9aa35
### Type parameter bounds
```antlr
+bound-list := bound | bound '+' bound-list '+' ?
bound := ty_bound | lt_bound
lt_bound := lifetime
-ty_bound := [?] [ for<lt_param_defs> ] simple_path
-bound-list := bound | bound '+' bound-list '+' ?
+ty_bound := ty_bound_noparen | (ty_bound_noparen)
+ty_bound_noparen := [?] [ for<lt_param_defs> ] simple_path
```
### Self types
* [The Rustonomicon][nomicon] is your guidebook to the dark arts of unsafe Rust.
* [The Reference][ref] is not a formal spec, but is more detailed and comprehensive than the book.
+Initially, documentation lands in the Unstable Book, and then, as part of the
+stabilization process, is moved into the Book, Nomicon, or Reference.
+
Another few words about the reference: it is guaranteed to be accurate, but not
-complete. We now have a policy that all new features must be included in the
-reference before stabilization; however, we are still back-filling things that
-landed before then. That work is being tracked [here][38643].
+complete. We have a policy that features must have documentation to be stabilized,
+but we did not always have this policy, and so there are some stable things that
+are not yet in the reference. We're working on back-filling things that landed
+before this policy was put into place. That work is being tracked
+[here][refchecklist].
[Rust Learning]: https://github.com/ctjhoa/rust-learning
[Docs.rs]: https://docs.rs/
[api]: std/index.html
[ref]: reference/index.html
-[38643]: https://github.com/rust-lang/rust/issues/38643
+[refchecklist]: https://github.com/rust-lang-nursery/reference/issues/9
[err]: error-index.html
[book]: book/index.html
[nomicon]: nomicon/index.html
- [proc_macro](language-features/proc-macro.md)
- [quote](language-features/quote.md)
- [relaxed_adts](language-features/relaxed-adts.md)
+ - [repr_align](language-features/repr-align.md)
- [repr_simd](language-features/repr-simd.md)
- [rustc_attrs](language-features/rustc-attrs.md)
- [rustc_diagnostic_macros](language-features/rustc-diagnostic-macros.md)
- [alloc_system](library-features/alloc-system.md)
- [alloc](library-features/alloc.md)
- [as_c_str](library-features/as-c-str.md)
- - [as_unsafe_cell](library-features/as-unsafe-cell.md)
- [ascii_ctype](library-features/ascii-ctype.md)
- - [binary_heap_extras](library-features/binary-heap-extras.md)
- [binary_heap_peek_mut_pop](library-features/binary-heap-peek-mut-pop.md)
- - [borrow_state](library-features/borrow-state.md)
- [box_heap](library-features/box-heap.md)
- [c_void_variant](library-features/c-void-variant.md)
- [char_escape_debug](library-features/char-escape-debug.md)
- [derive_clone_copy](library-features/derive-clone-copy.md)
- [derive_eq](library-features/derive-eq.md)
- [discriminant_value](library-features/discriminant-value.md)
- - [enumset](library-features/enumset.md)
- [error_type_id](library-features/error-type-id.md)
- [exact_size_is_empty](library-features/exact-size-is-empty.md)
- [fd](library-features/fd.md)
- [fd_read](library-features/fd-read.md)
- [fixed_size_array](library-features/fixed-size-array.md)
- [float_bits_conv](library-features/float-bits-conv.md)
- - [float_extras](library-features/float-extras.md)
- [flt2dec](library-features/flt2dec.md)
- [fmt_flags_align](library-features/fmt-flags-align.md)
- [fmt_internals](library-features/fmt-internals.md)
- [io_error_internals](library-features/io-error-internals.md)
- [io](library-features/io.md)
- [ip](library-features/ip.md)
- - [is_unique](library-features/is-unique.md)
- [iter_rfind](library-features/iter-rfind.md)
- [libstd_io_internals](library-features/libstd-io-internals.md)
- [libstd_sys_internals](library-features/libstd-sys-internals.md)
- [linked_list_extras](library-features/linked-list-extras.md)
- [lookup_host](library-features/lookup-host.md)
- [manually_drop](library-features/manually-drop.md)
- - [map_entry_recover_keys](library-features/map-entry-recover-keys.md)
+ - [more_io_inner_methods](library-features/more-io-inner-methods.md)
- [mpsc_select](library-features/mpsc-select.md)
- [n16](library-features/n16.md)
- [never_type_impls](library-features/never-type-impls.md)
- [rand](library-features/rand.md)
- [range_contains](library-features/range-contains.md)
- [raw](library-features/raw.md)
- - [rc_would_unwrap](library-features/rc-would-unwrap.md)
- [retain_hash_collection](library-features/retain-hash-collection.md)
- [reverse_cmp_key](library-features/reverse-cmp-key.md)
- [rt](library-features/rt.md)
- [slice_rsplit](library-features/slice-rsplit.md)
- [sort_internals](library-features/sort-internals.md)
- [sort_unstable](library-features/sort-unstable.md)
+ - [splice](library-features/splice.md)
- [step_by](library-features/step-by.md)
- [step_trait](library-features/step-trait.md)
- [str_checked_slicing](library-features/str-checked-slicing.md)
- [str_escape](library-features/str-escape.md)
- [str_internals](library-features/str-internals.md)
+ - [str_box_extras](library-features/str-box-extras.md)
- [str_mut_extras](library-features/str-mut-extras.md)
- [test](library-features/test.md)
- [thread_id](library-features/thread-id.md)
- [windows_handle](library-features/windows-handle.md)
- [windows_net](library-features/windows-net.md)
- [windows_stdio](library-features/windows-stdio.md)
- - [zero_one](library-features/zero-one.md)
->>>>>> Add top level sections to the Unstable Book.
--- /dev/null
+# `repr_align`
+
+The tracking issue for this feature is: [#33626]
+
+[#33626]: https://github.com/rust-lang/rust/issues/33626
+
+------------------------
+
+
+
+
+++ /dev/null
-# `as_unsafe_cell`
-
-The tracking issue for this feature is: [#27708]
-
-[#27708]: https://github.com/rust-lang/rust/issues/27708
-
-------------------------
+++ /dev/null
-# `binary_heap_extras`
-
-The tracking issue for this feature is: [#28147]
-
-[#28147]: https://github.com/rust-lang/rust/issues/28147
-
-------------------------
+++ /dev/null
-# `borrow_state`
-
-The tracking issue for this feature is: [#27733]
-
-[#27733]: https://github.com/rust-lang/rust/issues/27733
-
-------------------------
+++ /dev/null
-# `enumset`
-
-The tracking issue for this feature is: [#37966]
-
-[#37966]: https://github.com/rust-lang/rust/issues/37966
-
-------------------------
+++ /dev/null
-# `float_extras`
-
-The tracking issue for this feature is: [#27752]
-
-[#27752]: https://github.com/rust-lang/rust/issues/27752
-
-------------------------
+++ /dev/null
-# `is_unique`
-
-The tracking issue for this feature is: [#28356]
-
-[#28356]: https://github.com/rust-lang/rust/issues/28356
-
-------------------------
+++ /dev/null
-# `map_entry_recover_keys`
-
-The tracking issue for this feature is: [#34285]
-
-[#34285]: https://github.com/rust-lang/rust/issues/34285
--- /dev/null
+# `more_io_inner_methods`
+
+The tracking issue for this feature is: [#41519]
+
+[#41519]: https://github.com/rust-lang/rust/issues/41519
+
+------------------------
+
+This feature enables several internal accessor methods on structures in
+`std::io` including `Take::{get_ref, get_mut}` and `Chain::{into_inner, get_ref,
+get_mut}`.
+++ /dev/null
-# `rc_would_unwrap`
-
-The tracking issue for this feature is: [#28356]
-
-[#28356]: https://github.com/rust-lang/rust/issues/28356
--- /dev/null
+# `splice`
+
+The tracking issue for this feature is: [#32310]
+
+[#32310]: https://github.com/rust-lang/rust/issues/32310
+
+------------------------
+
+The `splice()` method on `Vec` and `String` allows you to replace a range
+of values in a vector or string with another range of values, and returns
+the replaced values.
+
+A simple example:
+
+```rust
+#![feature(splice)]
+let mut s = String::from("α is alpha, β is beta");
+let beta_offset = s.find('β').unwrap_or(s.len());
+
+// Replace the range up until the β from the string
+let t: String = s.splice(..beta_offset, "Α is capital alpha; ").collect();
+assert_eq!(t, "α is alpha, ");
+assert_eq!(s, "Α is capital alpha; β is beta");
+```
\ No newline at end of file
--- /dev/null
+# `str_box_extras`
+
+The tracking issue for this feature is: [#str_box_extras]
+
+[#str_box_extras]: https://github.com/rust-lang/rust/issues/41119
+
+------------------------
+
+
+++ /dev/null
-# `zero_one`
-
-The tracking issue for this feature is: [#27739]
-
-[#27739]: https://github.com/rust-lang/rust/issues/27739
-
-------------------------
GDB_PYTHON_MODULE_DIRECTORY="$RUSTC_SYSROOT/lib/rustlib/etc"
# Run GDB with the additional arguments that load the pretty printers
-PYTHONPATH="$PYTHONPATH:$GDB_PYTHON_MODULE_DIRECTORY" gdb \
+# Set the environment variable `RUST_GDB` to overwrite the call to a
+# different/specific command (defaults to `gdb`).
+RUST_GDB="${RUST_GDB:-gdb}"
+PYTHONPATH="$PYTHONPATH:$GDB_PYTHON_MODULE_DIRECTORY" ${RUST_GDB} \
-d "$GDB_PYTHON_MODULE_DIRECTORY" \
-iex "add-auto-load-safe-path $GDB_PYTHON_MODULE_DIRECTORY" \
"$@"
--- /dev/null
+@echo off
+setlocal
+
+REM Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+REM file at the top-level directory of this distribution and at
+REM http://rust-lang.org/COPYRIGHT.
+REM
+REM Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+REM http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+REM <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+REM option. This file may not be copied, modified, or distributed
+REM except according to those terms.
+
+for /f "delims=" %%i in ('rustc --print=sysroot') do set rustc_sysroot=%%i
+
+set rust_etc=%rustc_sysroot%\lib\rustlib\etc
+
+windbg -c ".nvload %rust_etc%\libcore.natvis;.nvload %rust_etc%\libcollections.natvis;" %*
\ No newline at end of file
use core::ops::{BoxPlace, Boxed, InPlace, Place, Placer};
use core::ptr::{self, Unique};
use core::convert::From;
+use str::from_boxed_utf8_unchecked;
/// A value that represents the heap. This is the default place that the `box`
/// keyword allocates into when no place is supplied.
#[stable(feature = "default_box_extra", since = "1.17.0")]
impl Default for Box<str> {
fn default() -> Box<str> {
- let default: Box<[u8]> = Default::default();
- unsafe { mem::transmute(default) }
+ unsafe { from_boxed_utf8_unchecked(Default::default()) }
}
}
let buf = RawVec::with_capacity(len);
unsafe {
ptr::copy_nonoverlapping(self.as_ptr(), buf.ptr(), len);
- mem::transmute(buf.into_box()) // bytes to str ~magic
+ from_boxed_utf8_unchecked(buf.into_box())
}
}
}
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<'a> From<&'a str> for Box<str> {
fn from(s: &'a str) -> Box<str> {
- let boxed: Box<[u8]> = Box::from(s.as_bytes());
- unsafe { mem::transmute(boxed) }
+ unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
+ }
+}
+
+#[stable(feature = "boxed_str_conv", since = "1.18.0")]
+impl From<Box<str>> for Box<[u8]> {
+ fn from(s: Box<str>) -> Self {
+ unsafe {
+ mem::transmute(s)
+ }
}
}
issue = "27700")]
use core::{isize, usize};
-#[cfg(not(test))]
use core::intrinsics::{min_align_of_val, size_of_val};
#[allow(improper_ctypes)]
}
}
-#[cfg(not(test))]
-#[lang = "box_free"]
+#[cfg_attr(not(test), lang = "box_free")]
#[inline]
-unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+pub(crate) unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
let size = size_of_val(&*ptr);
let align = min_align_of_val(&*ptr);
// We do not allocate for Box<T> when T is ZST, so deallocation is also not necessary.
#![feature(needs_allocator)]
#![feature(optin_builtin_traits)]
#![feature(placement_in_syntax)]
+#![cfg_attr(stage0, feature(pub_restricted))]
#![feature(shared)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
pub mod arc;
pub mod rc;
pub mod raw_vec;
+#[unstable(feature = "str_box_extras", issue = "41119")]
+pub mod str;
pub mod oom;
pub use oom::oom;
use core::ptr::{self, Shared};
use core::convert::From;
-use heap::deallocate;
+use heap::{allocate, deallocate, box_free};
use raw_vec::RawVec;
struct RcBox<T: ?Sized> {
value: T,
}
-
/// A single-threaded reference-counting pointer.
///
/// See the [module-level documentation](./index.html) for more details.
}
}
- /// Checks whether [`Rc::try_unwrap`][try_unwrap] would return
- /// [`Ok`].
- ///
- /// [try_unwrap]: struct.Rc.html#method.try_unwrap
- /// [`Ok`]: ../../std/result/enum.Result.html#variant.Ok
- #[unstable(feature = "rc_would_unwrap",
- reason = "just added for niche usecase",
- issue = "28356")]
- #[rustc_deprecated(since = "1.15.0", reason = "too niche; use `strong_count` instead")]
- pub fn would_unwrap(this: &Self) -> bool {
- Rc::strong_count(&this) == 1
- }
-
/// Consumes the `Rc`, returning the wrapped pointer.
///
/// To avoid a memory leak the pointer must be converted back to an `Rc` using
}
}
+impl<T> Rc<[T]> {
+ /// Constructs a new `Rc<[T]>` from a `Box<[T]>`.
+ #[doc(hidden)]
+ #[unstable(feature = "rustc_private",
+ reason = "for internal use in rustc",
+ issue = "0")]
+ pub fn __from_array(value: Box<[T]>) -> Rc<[T]> {
+ unsafe {
+ let ptr: *mut RcBox<[T]> =
+ mem::transmute([mem::align_of::<RcBox<[T; 1]>>(), value.len()]);
+ // FIXME(custom-DST): creating this invalid &[T] is dubiously defined,
+ // we should have a better way of getting the size/align
+ // of a DST from its unsized part.
+ let ptr = allocate(size_of_val(&*ptr), align_of_val(&*ptr));
+ let ptr: *mut RcBox<[T]> = mem::transmute([ptr as usize, value.len()]);
+
+ // Initialize the new RcBox.
+ ptr::write(&mut (*ptr).strong, Cell::new(1));
+ ptr::write(&mut (*ptr).weak, Cell::new(1));
+ ptr::copy_nonoverlapping(
+ value.as_ptr(),
+ &mut (*ptr).value as *mut [T] as *mut T,
+ value.len());
+
+ // Free the original allocation without freeing its (moved) contents.
+ box_free(Box::into_raw(value));
+
+ Rc { ptr: Shared::new(ptr as *const _) }
+ }
+ }
+}
+
impl<T: ?Sized> Rc<T> {
/// Creates a new [`Weak`][weak] pointer to this value.
///
///
/// [weak]: struct.Weak.html
#[inline]
- #[unstable(feature = "is_unique", reason = "uniqueness has unclear meaning",
- issue = "28356")]
- #[rustc_deprecated(since = "1.15.0",
- reason = "too niche; use `strong_count` and `weak_count` instead")]
- pub fn is_unique(this: &Self) -> bool {
+ fn is_unique(this: &Self) -> bool {
Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1
}
--- /dev/null
+// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Methods for dealing with boxed strings.
+use core::mem;
+
+use boxed::Box;
+
+/// Converts a boxed slice of bytes to a boxed string slice without checking
+/// that the string contains valid UTF-8.
+#[unstable(feature = "str_box_extras", issue = "41119")]
+pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
+ mem::transmute(v)
+}
self.sift_up(0, old_len);
}
- /// Pushes an item onto the binary heap, then pops the greatest item off the queue in
- /// an optimized fashion.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_extras)]
- /// #![allow(deprecated)]
- ///
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.push(1);
- /// heap.push(5);
- ///
- /// assert_eq!(heap.push_pop(3), 5);
- /// assert_eq!(heap.push_pop(9), 9);
- /// assert_eq!(heap.len(), 2);
- /// assert_eq!(heap.peek(), Some(&3));
- /// ```
- #[unstable(feature = "binary_heap_extras",
- reason = "needs to be audited",
- issue = "28147")]
- #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")]
- pub fn push_pop(&mut self, mut item: T) -> T {
- match self.data.get_mut(0) {
- None => return item,
- Some(top) => {
- if *top > item {
- swap(&mut item, top);
- } else {
- return item;
- }
- }
- }
-
- self.sift_down(0);
- item
- }
-
- /// Pops the greatest item off the binary heap, then pushes an item onto the queue in
- /// an optimized fashion. The push is done regardless of whether the binary heap
- /// was empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_extras)]
- /// #![allow(deprecated)]
- ///
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- ///
- /// assert_eq!(heap.replace(1), None);
- /// assert_eq!(heap.replace(3), Some(1));
- /// assert_eq!(heap.len(), 1);
- /// assert_eq!(heap.peek(), Some(&3));
- /// ```
- #[unstable(feature = "binary_heap_extras",
- reason = "needs to be audited",
- issue = "28147")]
- #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")]
- pub fn replace(&mut self, mut item: T) -> Option<T> {
- if !self.is_empty() {
- swap(&mut item, &mut self.data[0]);
- self.sift_down(0);
- Some(item)
- } else {
- self.push(item);
- None
- }
- }
-
/// Consumes the `BinaryHeap` and returns the underlying vector
/// in arbitrary order.
///
/// An owning iterator over the elements of a `BinaryHeap`.
///
-/// This `struct` is created by the [`into_iter`] method on [`BinaryHeap`]
+/// This `struct` is created by the [`into_iter`] method on [`BinaryHeap`][`BinaryHeap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.BinaryHeap.html#method.into_iter
/// An owning iterator over the entries of a `BTreeMap`.
///
-/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`]
+/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`][`BTreeMap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.BTreeMap.html#method.into_iter
self.handle.reborrow().into_kv().0
}
- /// Deprecated, renamed to `remove_entry`
- #[unstable(feature = "map_entry_recover_keys", issue = "34285")]
- #[rustc_deprecated(since = "1.12.0", reason = "renamed to `remove_entry`")]
- pub fn remove_pair(self) -> (K, V) {
- self.remove_entry()
- }
-
/// Take ownership of the key and value from the map.
///
/// # Examples
/// An owning iterator over the items of a `BTreeSet`.
///
-/// This `struct` is created by the [`into_iter`] method on [`BTreeSet`]
+/// This `struct` is created by the [`into_iter`] method on [`BTreeSet`][`BTreeSet`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`BTreeSet`]: struct.BTreeSet.html
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A structure for holding a set of enum variants.
-//!
-//! This module defines a container which uses an efficient bit mask
-//! representation to hold C-like enum variants.
-
-#![unstable(feature = "enumset",
- reason = "matches collection reform specification, \
- waiting for dust to settle",
- issue = "37966")]
-#![rustc_deprecated(since = "1.16.0", reason = "long since replaced")]
-#![allow(deprecated)]
-
-use core::marker;
-use core::fmt;
-use core::iter::{FromIterator, FusedIterator};
-use core::ops::{Sub, BitOr, BitAnd, BitXor};
-
-// FIXME(contentions): implement union family of methods? (general design may be
-// wrong here)
-
-/// A specialized set implementation to use enum types.
-///
-/// It is a logic error for an item to be modified in such a way that the
-/// transformation of the item to or from a `usize`, as determined by the
-/// `CLike` trait, changes while the item is in the set. This is normally only
-/// possible through `Cell`, `RefCell`, global state, I/O, or unsafe code.
-#[derive(PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct EnumSet<E> {
- // We must maintain the invariant that no bits are set
- // for which no variant exists
- bits: usize,
- marker: marker::PhantomData<E>,
-}
-
-impl<E> Copy for EnumSet<E> {}
-
-impl<E> Clone for EnumSet<E> {
- fn clone(&self) -> EnumSet<E> {
- *self
- }
-}
-
-impl<E: CLike + fmt::Debug> fmt::Debug for EnumSet<E> {
- fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- fmt.debug_set().entries(self).finish()
- }
-}
-
-/// An interface for casting C-like enum to usize and back.
-/// A typically implementation is as below.
-///
-/// ```{rust,ignore}
-/// #[repr(usize)]
-/// enum Foo {
-/// A, B, C
-/// }
-///
-/// impl CLike for Foo {
-/// fn to_usize(&self) -> usize {
-/// *self as usize
-/// }
-///
-/// fn from_usize(v: usize) -> Foo {
-/// unsafe { mem::transmute(v) }
-/// }
-/// }
-/// ```
-pub trait CLike {
- /// Converts a C-like enum to a `usize`.
- fn to_usize(&self) -> usize;
- /// Converts a `usize` to a C-like enum.
- fn from_usize(usize) -> Self;
-}
-
-fn bit<E: CLike>(e: &E) -> usize {
- use core::mem;
- let value = e.to_usize();
- let bits = mem::size_of::<usize>() * 8;
- assert!(value < bits,
- "EnumSet only supports up to {} variants.",
- bits - 1);
- 1 << value
-}
-
-impl<E: CLike> EnumSet<E> {
- /// Returns an empty `EnumSet`.
- pub fn new() -> EnumSet<E> {
- EnumSet {
- bits: 0,
- marker: marker::PhantomData,
- }
- }
-
- /// Returns the number of elements in the given `EnumSet`.
- pub fn len(&self) -> usize {
- self.bits.count_ones() as usize
- }
-
- /// Returns `true` if the `EnumSet` is empty.
- pub fn is_empty(&self) -> bool {
- self.bits == 0
- }
-
- pub fn clear(&mut self) {
- self.bits = 0;
- }
-
- /// Returns `false` if the `EnumSet` contains any enum of the given `EnumSet`.
- pub fn is_disjoint(&self, other: &EnumSet<E>) -> bool {
- (self.bits & other.bits) == 0
- }
-
- /// Returns `true` if a given `EnumSet` is included in this `EnumSet`.
- pub fn is_superset(&self, other: &EnumSet<E>) -> bool {
- (self.bits & other.bits) == other.bits
- }
-
- /// Returns `true` if this `EnumSet` is included in the given `EnumSet`.
- pub fn is_subset(&self, other: &EnumSet<E>) -> bool {
- other.is_superset(self)
- }
-
- /// Returns the union of both `EnumSets`.
- pub fn union(&self, e: EnumSet<E>) -> EnumSet<E> {
- EnumSet {
- bits: self.bits | e.bits,
- marker: marker::PhantomData,
- }
- }
-
- /// Returns the intersection of both `EnumSets`.
- pub fn intersection(&self, e: EnumSet<E>) -> EnumSet<E> {
- EnumSet {
- bits: self.bits & e.bits,
- marker: marker::PhantomData,
- }
- }
-
- /// Adds an enum to the `EnumSet`, and returns `true` if it wasn't there before
- pub fn insert(&mut self, e: E) -> bool {
- let result = !self.contains(&e);
- self.bits |= bit(&e);
- result
- }
-
- /// Removes an enum from the EnumSet
- pub fn remove(&mut self, e: &E) -> bool {
- let result = self.contains(e);
- self.bits &= !bit(e);
- result
- }
-
- /// Returns `true` if an `EnumSet` contains a given enum.
- pub fn contains(&self, e: &E) -> bool {
- (self.bits & bit(e)) != 0
- }
-
- /// Returns an iterator over an `EnumSet`.
- pub fn iter(&self) -> Iter<E> {
- Iter::new(self.bits)
- }
-}
-
-impl<E: CLike> Sub for EnumSet<E> {
- type Output = EnumSet<E>;
-
- fn sub(self, e: EnumSet<E>) -> EnumSet<E> {
- EnumSet {
- bits: self.bits & !e.bits,
- marker: marker::PhantomData,
- }
- }
-}
-
-impl<E: CLike> BitOr for EnumSet<E> {
- type Output = EnumSet<E>;
-
- fn bitor(self, e: EnumSet<E>) -> EnumSet<E> {
- EnumSet {
- bits: self.bits | e.bits,
- marker: marker::PhantomData,
- }
- }
-}
-
-impl<E: CLike> BitAnd for EnumSet<E> {
- type Output = EnumSet<E>;
-
- fn bitand(self, e: EnumSet<E>) -> EnumSet<E> {
- EnumSet {
- bits: self.bits & e.bits,
- marker: marker::PhantomData,
- }
- }
-}
-
-impl<E: CLike> BitXor for EnumSet<E> {
- type Output = EnumSet<E>;
-
- fn bitxor(self, e: EnumSet<E>) -> EnumSet<E> {
- EnumSet {
- bits: self.bits ^ e.bits,
- marker: marker::PhantomData,
- }
- }
-}
-
-/// An iterator over an `EnumSet`
-pub struct Iter<E> {
- index: usize,
- bits: usize,
- marker: marker::PhantomData<E>,
-}
-
-impl<E: fmt::Debug> fmt::Debug for Iter<E> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_tuple("Iter")
- .field(&self.index)
- .field(&self.bits)
- .finish()
- }
-}
-
-// FIXME(#19839) Remove in favor of `#[derive(Clone)]`
-impl<E> Clone for Iter<E> {
- fn clone(&self) -> Iter<E> {
- Iter {
- index: self.index,
- bits: self.bits,
- marker: marker::PhantomData,
- }
- }
-}
-
-impl<E: CLike> Iter<E> {
- fn new(bits: usize) -> Iter<E> {
- Iter {
- index: 0,
- bits: bits,
- marker: marker::PhantomData,
- }
- }
-}
-
-impl<E: CLike> Iterator for Iter<E> {
- type Item = E;
-
- fn next(&mut self) -> Option<E> {
- if self.bits == 0 {
- return None;
- }
-
- while (self.bits & 1) == 0 {
- self.index += 1;
- self.bits >>= 1;
- }
- let elem = CLike::from_usize(self.index);
- self.index += 1;
- self.bits >>= 1;
- Some(elem)
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = self.bits.count_ones() as usize;
- (exact, Some(exact))
- }
-}
-
-#[unstable(feature = "fused", issue = "35602")]
-impl<E: CLike> FusedIterator for Iter<E> {}
-
-impl<E: CLike> FromIterator<E> for EnumSet<E> {
- fn from_iter<I: IntoIterator<Item = E>>(iter: I) -> EnumSet<E> {
- let mut ret = EnumSet::new();
- ret.extend(iter);
- ret
- }
-}
-
-impl<'a, E> IntoIterator for &'a EnumSet<E>
- where E: CLike
-{
- type Item = E;
- type IntoIter = Iter<E>;
-
- fn into_iter(self) -> Iter<E> {
- self.iter()
- }
-}
-
-impl<E: CLike> Extend<E> for EnumSet<E> {
- fn extend<I: IntoIterator<Item = E>>(&mut self, iter: I) {
- for element in iter {
- self.insert(element);
- }
- }
-}
-
-impl<'a, E: 'a + CLike + Copy> Extend<&'a E> for EnumSet<E> {
- fn extend<I: IntoIterator<Item = &'a E>>(&mut self, iter: I) {
- self.extend(iter.into_iter().cloned());
- }
-}
#![feature(specialization)]
#![feature(staged_api)]
#![feature(str_internals)]
+#![feature(str_box_extras)]
#![feature(str_mut_extras)]
#![feature(trusted_len)]
#![feature(unicode)]
#[doc(no_inline)]
pub use linked_list::LinkedList;
#[doc(no_inline)]
-#[allow(deprecated)]
-pub use enum_set::EnumSet;
-#[doc(no_inline)]
pub use vec_deque::VecDeque;
#[doc(no_inline)]
pub use string::String;
pub mod binary_heap;
mod btree;
pub mod borrow;
-pub mod enum_set;
pub mod fmt;
pub mod linked_list;
pub mod range;
/// An owning iterator over the elements of a `LinkedList`.
///
-/// This `struct` is created by the [`into_iter`] method on [`LinkedList`]
+/// This `struct` is created by the [`into_iter`] method on [`LinkedList`][`LinkedList`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.LinkedList.html#method.into_iter
self.to_vec()
}
- // HACK(japaric): with cfg(test) the inherent `[T]::to_vec`, which is required for this method
- // definition, is not available. Since we don't require this method for testing purposes, I'll
- // just stub it
- // NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
fn to_owned(&self) -> Vec<T> {
- panic!("not available with cfg(test)")
+ hack::to_vec(self)
}
fn clone_into(&self, target: &mut Vec<T>) {
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{MatchIndices, RMatchIndices};
#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::str::{from_utf8, Chars, CharIndices, Bytes};
+pub use core::str::{from_utf8, from_utf8_mut, Chars, CharIndices, Bytes};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError};
+#[unstable(feature = "str_box_extras", issue = "41119")]
+pub use alloc::str::from_boxed_utf8_unchecked;
#[stable(feature = "rust1", since = "1.0.0")]
pub use std_unicode::str::SplitWhitespace;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::pattern;
+
#[unstable(feature = "slice_concat_ext",
reason = "trait should not have to exist",
issue = "27747")]
core_str::StrExt::parse(self)
}
+ /// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating.
+ #[unstable(feature = "str_box_extras", issue = "41119")]
+ pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
+ self.into()
+ }
+
/// Replaces all matches of a pattern with another string.
///
/// `replace` creates a new [`String`], and copies the data from this string slice into it.
#![stable(feature = "rust1", since = "1.0.0")]
+use alloc::str as alloc_str;
+
use core::fmt;
use core::hash;
use core::iter::{FromIterator, FusedIterator};
-use core::mem;
use core::ops::{self, Add, AddAssign, Index, IndexMut};
use core::ptr;
use core::str as core_str;
self.vec.clear()
}
- /// Create a draining iterator that removes the specified range in the string
+ /// Creates a draining iterator that removes the specified range in the string
/// and yields the removed chars.
///
/// Note: The element range is removed even if the iterator is not
}
}
+ /// Creates a splicing iterator that removes the specified range in the string,
+ /// replaces with the given string, and yields the removed chars.
+ /// The given string doesn’t need to be the same length as the range.
+ ///
+ /// Note: The element range is removed when the `Splice` is dropped,
+ /// even if the iterator is not consumed until the end.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// [`char`]: ../../std/primitive.char.html
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(splice)]
+ /// let mut s = String::from("α is alpha, β is beta");
+ /// let beta_offset = s.find('β').unwrap_or(s.len());
+ ///
+ /// // Replace the range up until the β from the string
+ /// let t: String = s.splice(..beta_offset, "Α is capital alpha; ").collect();
+ /// assert_eq!(t, "α is alpha, ");
+ /// assert_eq!(s, "Α is capital alpha; β is beta");
+ /// ```
+ #[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+ pub fn splice<'a, 'b, R>(&'a mut self, range: R, replace_with: &'b str) -> Splice<'a, 'b>
+ where R: RangeArgument<usize>
+ {
+ // Memory safety
+ //
+ // The String version of Splice does not have the memory safety issues
+ // of the vector version. The data is just plain bytes.
+ // Because the range removal happens in Drop, if the Splice iterator is leaked,
+ // the removal will not happen.
+ let len = self.len();
+ let start = match range.start() {
+ Included(&n) => n,
+ Excluded(&n) => n + 1,
+ Unbounded => 0,
+ };
+ let end = match range.end() {
+ Included(&n) => n + 1,
+ Excluded(&n) => n,
+ Unbounded => len,
+ };
+
+ // Take out two simultaneous borrows. The &mut String won't be accessed
+ // until iteration is over, in Drop.
+ let self_ptr = self as *mut _;
+ // slicing does the appropriate bounds checks
+ let chars_iter = self[start..end].chars();
+
+ Splice {
+ start: start,
+ end: end,
+ iter: chars_iter,
+ string: self_ptr,
+ replace_with: replace_with
+ }
+ }
+
/// Converts this `String` into a `Box<str>`.
///
/// This will drop any excess capacity.
#[stable(feature = "box_str", since = "1.4.0")]
pub fn into_boxed_str(self) -> Box<str> {
let slice = self.vec.into_boxed_slice();
- unsafe { mem::transmute::<Box<[u8]>, Box<str>>(slice) }
+ unsafe { alloc_str::from_boxed_utf8_unchecked(slice) }
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a> FusedIterator for Drain<'a> {}
+
+/// A splicing iterator for `String`.
+///
+/// This struct is created by the [`splice()`] method on [`String`]. See its
+/// documentation for more.
+///
+/// [`splice()`]: struct.String.html#method.splice
+/// [`String`]: struct.String.html
+#[derive(Debug)]
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+pub struct Splice<'a, 'b> {
+ /// Will be used as &'a mut String in the destructor
+ string: *mut String,
+ /// Start of part to remove
+ start: usize,
+ /// End of part to remove
+ end: usize,
+ /// Current remaining range to remove
+ iter: Chars<'a>,
+ replace_with: &'b str,
+}
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+unsafe impl<'a, 'b> Sync for Splice<'a, 'b> {}
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+unsafe impl<'a, 'b> Send for Splice<'a, 'b> {}
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+impl<'a, 'b> Drop for Splice<'a, 'b> {
+ fn drop(&mut self) {
+ unsafe {
+ let vec = (*self.string).as_mut_vec();
+ vec.splice(self.start..self.end, self.replace_with.bytes());
+ }
+ }
+}
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+impl<'a, 'b> Iterator for Splice<'a, 'b> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+impl<'a, 'b> DoubleEndedIterator for Splice<'a, 'b> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ self.iter.next_back()
+ }
+}
assert!(*heap.peek().unwrap() == box 103);
}
-#[test]
-#[allow(deprecated)]
-fn test_push_pop() {
- let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.push_pop(6), 6);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.push_pop(0), 5);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.push_pop(4), 5);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.push_pop(1), 4);
- assert_eq!(heap.len(), 5);
-}
-
-#[test]
-#[allow(deprecated)]
-fn test_replace() {
- let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.replace(6).unwrap(), 5);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.replace(0).unwrap(), 6);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.replace(4).unwrap(), 5);
- assert_eq!(heap.len(), 5);
- assert_eq!(heap.replace(1).unwrap(), 4);
- assert_eq!(heap.len(), 5);
-}
-
fn check_to_vec(mut data: Vec<i32>) {
let heap = BinaryHeap::from(data.clone());
let mut v = heap.clone().into_vec();
assert!(empty.peek_mut().is_none());
}
-#[test]
-#[allow(deprecated)]
-fn test_empty_replace() {
- let mut heap = BinaryHeap::new();
- assert!(heap.replace(5).is_none());
-}
-
#[test]
fn test_from_iter() {
let xs = vec![9, 8, 7, 6, 5, 4, 3, 2, 1];
#![deny(warnings)]
-#![feature(binary_heap_extras)]
#![feature(binary_heap_peek_mut_pop)]
#![feature(box_syntax)]
#![feature(inclusive_range_syntax)]
#![feature(pattern)]
#![feature(placement_in_syntax)]
#![feature(rand)]
+#![feature(splice)]
#![feature(step_by)]
#![feature(str_escape)]
#![feature(test)]
assert_eq!(t, "");
}
+#[test]
+fn test_splice() {
+ let mut s = "Hello, world!".to_owned();
+ let t: String = s.splice(7..12, "世界").collect();
+ assert_eq!(s, "Hello, 世界!");
+ assert_eq!(t, "world");
+}
+
+#[test]
+#[should_panic]
+fn test_splice_char_boundary() {
+ let mut s = "Hello, 世界!".to_owned();
+ s.splice(..8, "");
+}
+
+#[test]
+fn test_splice_inclusive_range() {
+ let mut v = String::from("12345");
+ let t: String = v.splice(2...3, "789").collect();
+ assert_eq!(v, "127895");
+ assert_eq!(t, "34");
+ let t2: String = v.splice(1...2, "A").collect();
+ assert_eq!(v, "1A895");
+ assert_eq!(t2, "27");
+}
+
+#[test]
+#[should_panic]
+fn test_splice_out_of_bounds() {
+ let mut s = String::from("12345");
+ s.splice(5..6, "789");
+}
+
+#[test]
+#[should_panic]
+fn test_splice_inclusive_out_of_bounds() {
+ let mut s = String::from("12345");
+ s.splice(5...5, "789");
+}
+
+#[test]
+fn test_splice_empty() {
+ let mut s = String::from("12345");
+ let t: String = s.splice(1..2, "").collect();
+ assert_eq!(s, "1345");
+ assert_eq!(t, "2");
+}
+
+#[test]
+fn test_splice_unbounded() {
+ let mut s = String::from("12345");
+ let t: String = s.splice(.., "").collect();
+ assert_eq!(s, "");
+ assert_eq!(t, "12345");
+}
+
+#[test]
+fn test_splice_forget() {
+ let mut s = String::from("12345");
+ ::std::mem::forget(s.splice(2..4, "789"));
+ assert_eq!(s, "12345");
+}
+
#[test]
fn test_extend_ref() {
let mut a = "foo".to_string();
v.drain(5...5);
}
+#[test]
+fn test_splice() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ v.splice(2..4, a.iter().cloned());
+ assert_eq!(v, &[1, 2, 10, 11, 12, 5]);
+ v.splice(1..3, Some(20));
+ assert_eq!(v, &[1, 20, 11, 12, 5]);
+}
+
+#[test]
+fn test_splice_inclusive_range() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ let t1: Vec<_> = v.splice(2...3, a.iter().cloned()).collect();
+ assert_eq!(v, &[1, 2, 10, 11, 12, 5]);
+ assert_eq!(t1, &[3, 4]);
+ let t2: Vec<_> = v.splice(1...2, Some(20)).collect();
+ assert_eq!(v, &[1, 20, 11, 12, 5]);
+ assert_eq!(t2, &[2, 10]);
+}
+
+#[test]
+#[should_panic]
+fn test_splice_out_of_bounds() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ v.splice(5..6, a.iter().cloned());
+}
+
+#[test]
+#[should_panic]
+fn test_splice_inclusive_out_of_bounds() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ v.splice(5...5, a.iter().cloned());
+}
+
+#[test]
+fn test_splice_items_zero_sized() {
+ let mut vec = vec![(), (), ()];
+ let vec2 = vec![];
+ let t: Vec<_> = vec.splice(1..2, vec2.iter().cloned()).collect();
+ assert_eq!(vec, &[(), ()]);
+ assert_eq!(t, &[()]);
+}
+
+#[test]
+fn test_splice_unbounded() {
+ let mut vec = vec![1, 2, 3, 4, 5];
+ let t: Vec<_> = vec.splice(.., None).collect();
+ assert_eq!(vec, &[]);
+ assert_eq!(t, &[1, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_splice_forget() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ ::std::mem::forget(v.splice(2..4, a.iter().cloned()));
+ assert_eq!(v, &[1, 2]);
+}
+
#[test]
fn test_into_boxed_slice() {
let xs = vec![1, 2, 3];
/// removed data to be erased for security purposes. Even if you drop a `Vec`, its
/// buffer may simply be reused by another `Vec`. Even if you zero a `Vec`'s memory
/// first, that may not actually happen because the optimizer does not consider
-/// this a side-effect that must be preserved.
+/// this a side-effect that must be preserved. There is one case which we will
+/// not break, however: using `unsafe` code to write to the excess capacity,
+/// and then increasing the length to match, is always valid.
///
/// `Vec` does not currently guarantee the order in which elements are dropped
/// (the order has changed in the past, and may change again).
self.len += count;
}
- /// Create a draining iterator that removes the specified range in the vector
+ /// Creates a draining iterator that removes the specified range in the vector
/// and yields the removed items.
///
/// Note 1: The element range is removed even if the iterator is only
/// partially consumed or not consumed at all.
///
- /// Note 2: It is unspecified how many elements are removed from the vector,
+ /// Note 2: It is unspecified how many elements are removed from the vector
/// if the `Drain` value is leaked.
///
/// # Panics
self.truncate(0)
}
- /// Returns the number of elements in the vector.
+ /// Returns the number of elements in the vector, also referred to
+ /// as its 'length'.
///
/// # Examples
///
}
}
}
+
+ /// Creates a splicing iterator that replaces the specified range in the vector
+ /// with the given `replace_with` iterator and yields the removed items.
+ /// `replace_with` does not need to be the same length as `range`.
+ ///
+ /// Note 1: The element range is removed even if the iterator is not
+ /// consumed until the end.
+ ///
+ /// Note 2: It is unspecified how many elements are removed from the vector,
+ /// if the `Splice` value is leaked.
+ ///
+ /// Note 3: The input iterator `replace_with` is only consumed
+ /// when the `Splice` value is dropped.
+ ///
+ /// Note 4: This is optimal if:
+ ///
+ /// * The tail (elements in the vector after `range`) is empty,
+ /// * or `replace_with` yields fewer elements than `range`’s length
+ /// * or the lower bound of its `size_hint()` is exact.
+ ///
+ /// Otherwise, a temporary vector is allocated and the tail is moved twice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(splice)]
+ /// let mut v = vec![1, 2, 3];
+ /// let new = [7, 8];
+ /// let u: Vec<_> = v.splice(..2, new.iter().cloned()).collect();
+ /// assert_eq!(v, &[7, 8, 3]);
+ /// assert_eq!(u, &[1, 2]);
+ /// ```
+ #[inline]
+ #[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<I::IntoIter>
+ where R: RangeArgument<usize>, I: IntoIterator<Item=T>
+ {
+ Splice {
+ drain: self.drain(range),
+ replace_with: replace_with.into_iter(),
+ }
+ }
+
}
#[stable(feature = "extend_ref", since = "1.2.0")]
}
}
+#[stable(feature = "vec_from_mut", since = "1.21.0")]
+impl<'a, T: Clone> From<&'a mut [T]> for Vec<T> {
+ #[cfg(not(test))]
+ fn from(s: &'a mut [T]) -> Vec<T> {
+ s.to_vec()
+ }
+ #[cfg(test)]
+ fn from(s: &'a mut [T]) -> Vec<T> {
+ ::slice::to_vec(s)
+ }
+}
+
#[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
impl<'a, T> From<Cow<'a, [T]>> for Vec<T> where [T]: ToOwned<Owned=Vec<T>> {
fn from(s: Cow<'a, [T]>) -> Vec<T> {
&mut *ptr
}
}
+
+
+/// A splicing iterator for `Vec`.
+///
+/// This struct is created by the [`splice()`] method on [`Vec`]. See its
+/// documentation for more.
+///
+/// [`splice()`]: struct.Vec.html#method.splice
+/// [`Vec`]: struct.Vec.html
+#[derive(Debug)]
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+pub struct Splice<'a, I: Iterator + 'a> {
+ drain: Drain<'a, I::Item>,
+ replace_with: I,
+}
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+impl<'a, I: Iterator> Iterator for Splice<'a, I> {
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.drain.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.drain.size_hint()
+ }
+}
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+impl<'a, I: Iterator> DoubleEndedIterator for Splice<'a, I> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.drain.next_back()
+ }
+}
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+impl<'a, I: Iterator> ExactSizeIterator for Splice<'a, I> {}
+
+
+#[unstable(feature = "splice", reason = "recently added", issue = "32310")]
+impl<'a, I: Iterator> Drop for Splice<'a, I> {
+ fn drop(&mut self) {
+ // exhaust drain first
+ while let Some(_) = self.drain.next() {}
+
+
+ unsafe {
+ if self.drain.tail_len == 0 {
+ let vec = &mut *self.drain.vec.as_mut_ptr();
+ vec.extend(self.replace_with.by_ref());
+ return
+ }
+
+ // First fill the range left by drain().
+ if !self.drain.fill(&mut self.replace_with) {
+ return
+ }
+
+ // There may be more elements. Use the lower bound as an estimate.
+ // FIXME: Is the upper bound a better guess? Or something else?
+ let (lower_bound, _upper_bound) = self.replace_with.size_hint();
+ if lower_bound > 0 {
+ self.drain.move_tail(lower_bound);
+ if !self.drain.fill(&mut self.replace_with) {
+ return
+ }
+ }
+
+ // Collect any remaining elements.
+ // This is a zero-length vector which does not allocate if `lower_bound` was exact.
+ let mut collected = self.replace_with.by_ref().collect::<Vec<I::Item>>().into_iter();
+ // Now we have an exact count.
+ if collected.len() > 0 {
+ self.drain.move_tail(collected.len());
+ let filled = self.drain.fill(&mut collected);
+ debug_assert!(filled);
+ debug_assert_eq!(collected.len(), 0);
+ }
+ }
+ // Let `Drain::drop` move the tail back if necessary and restore `vec.len`.
+ }
+}
+
+/// Private helper methods for `Splice::drop`
+impl<'a, T> Drain<'a, T> {
+ /// The range from `self.vec.len` to `self.tail_start` contains elements
+ /// that have been moved out.
+ /// Fill that range as much as possible with new elements from the `replace_with` iterator.
+ /// Return whether we filled the entire range. (`replace_with.next()` didn’t return `None`.)
+ unsafe fn fill<I: Iterator<Item=T>>(&mut self, replace_with: &mut I) -> bool {
+ let vec = &mut *self.vec.as_mut_ptr();
+ let range_start = vec.len;
+ let range_end = self.tail_start;
+ let range_slice = slice::from_raw_parts_mut(
+ vec.as_mut_ptr().offset(range_start as isize),
+ range_end - range_start);
+
+ for place in range_slice {
+ if let Some(new_item) = replace_with.next() {
+ ptr::write(place, new_item);
+ vec.len += 1;
+ } else {
+ return false
+ }
+ }
+ true
+ }
+
+ /// Make room for inserting more elements before the tail.
+ unsafe fn move_tail(&mut self, extra_capacity: usize) {
+ let vec = &mut *self.vec.as_mut_ptr();
+ let used_capacity = self.tail_start + self.tail_len;
+ vec.buf.reserve(used_capacity, extra_capacity);
+
+ let new_tail_start = self.tail_start + extra_capacity;
+ let src = vec.as_ptr().offset(self.tail_start as isize);
+ let dst = vec.as_mut_ptr().offset(new_tail_start as isize);
+ ptr::copy(src, dst, self.tail_len);
+ self.tail_start = new_tail_start;
+ }
+}
/// An owning iterator over the elements of a `VecDeque`.
///
-/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
+/// This `struct` is created by the [`into_iter`] method on [`VecDeque`][`VecDeque`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.VecDeque.html#method.into_iter
}
if target.contains("arm") && !target.contains("ios") {
+ // (At least) udivsi3.S is broken for Thumb 1 which our gcc uses by
+ // default, we don't want Thumb 2 since it isn't supported on some
+ // devices, so disable thumb entirely.
+ // Upstream bug: https://bugs.llvm.org/show_bug.cgi?id=32492
+ cfg.define("__ARM_ARCH_ISA_THUMB", Some("0"));
+
sources.extend(&["arm/aeabi_cdcmp.S",
"arm/aeabi_cdcmpeq_check_nan.c",
"arm/aeabi_cfcmp.S",
}
}
- /// Returns a reference to the underlying `UnsafeCell`.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(as_unsafe_cell)]
- ///
- /// use std::cell::Cell;
- ///
- /// let c = Cell::new(5);
- ///
- /// let uc = c.as_unsafe_cell();
- /// ```
- #[inline]
- #[unstable(feature = "as_unsafe_cell", issue = "27708")]
- #[rustc_deprecated(since = "1.12.0", reason = "renamed to as_ptr")]
- pub fn as_unsafe_cell(&self) -> &UnsafeCell<T> {
- &self.value
- }
-
/// Returns a raw pointer to the underlying data in this cell.
///
/// # Examples
value: UnsafeCell<T>,
}
-/// An enumeration of values returned from the `state` method on a `RefCell<T>`.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-#[unstable(feature = "borrow_state", issue = "27733")]
-#[rustc_deprecated(since = "1.15.0", reason = "use `try_borrow` instead")]
-#[allow(deprecated)]
-pub enum BorrowState {
- /// The cell is currently being read, there is at least one active `borrow`.
- Reading,
- /// The cell is currently being written to, there is an active `borrow_mut`.
- Writing,
- /// There are no outstanding borrows on this cell.
- Unused,
-}
-
/// An error returned by [`RefCell::try_borrow`](struct.RefCell.html#method.try_borrow).
#[stable(feature = "try_borrow", since = "1.13.0")]
pub struct BorrowError {
}
impl<T: ?Sized> RefCell<T> {
- /// Query the current state of this `RefCell`
- ///
- /// The returned value can be dispatched on to determine if a call to
- /// `borrow` or `borrow_mut` would succeed.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(borrow_state)]
- ///
- /// use std::cell::{BorrowState, RefCell};
- ///
- /// let c = RefCell::new(5);
- ///
- /// match c.borrow_state() {
- /// BorrowState::Writing => println!("Cannot be borrowed"),
- /// BorrowState::Reading => println!("Cannot be borrowed mutably"),
- /// BorrowState::Unused => println!("Can be borrowed (mutably as well)"),
- /// }
- /// ```
- #[unstable(feature = "borrow_state", issue = "27733")]
- #[rustc_deprecated(since = "1.15.0", reason = "use `try_borrow` instead")]
- #[allow(deprecated)]
- #[inline]
- pub fn borrow_state(&self) -> BorrowState {
- match self.borrow.get() {
- WRITING => BorrowState::Writing,
- UNUSED => BorrowState::Unused,
- _ => BorrowState::Reading,
- }
- }
-
/// Immutably borrows the wrapped value.
///
/// The borrow lasts until the returned `Ref` exits scope. Multiple
}
}
- /// Returns a reference to the underlying `UnsafeCell`.
- ///
- /// This can be used to circumvent `RefCell`'s safety checks.
- ///
- /// This function is `unsafe` because `UnsafeCell`'s field is public.
- ///
- /// # Examples
- ///
- /// ```
- /// #![feature(as_unsafe_cell)]
- ///
- /// use std::cell::RefCell;
- ///
- /// let c = RefCell::new(5);
- /// let c = unsafe { c.as_unsafe_cell() };
- /// ```
- #[inline]
- #[unstable(feature = "as_unsafe_cell", issue = "27708")]
- #[rustc_deprecated(since = "1.12.0", reason = "renamed to as_ptr")]
- pub unsafe fn as_unsafe_cell(&self) -> &UnsafeCell<T> {
- &self.value
- }
-
/// Returns a raw pointer to the underlying data in this cell.
///
/// # Examples
/// This call borrows `RefCell` mutably (at compile-time) so there is no
/// need for dynamic checks.
///
+ /// However be cautious: this method expects `self` to be mutable, which is
+ /// generally not the case when using a `RefCell`. Take a look at the
+ /// [`borrow_mut`] method instead if `self` isn't mutable.
+ ///
+ /// Also, please be aware that this method is only for special circumstances and is usually
+ /// not you want. In case of doubt, use [`borrow_mut`] instead.
+ ///
+ /// [`borrow_mut`]: #method.borrow_mut
+ ///
/// # Examples
///
/// ```
// FIXME: #6220 Implement floating point formatting
use fmt;
-use num::Zero;
use ops::{Div, Rem, Sub};
use str;
use slice;
use mem;
#[doc(hidden)]
-trait Int: Zero + PartialEq + PartialOrd + Div<Output=Self> + Rem<Output=Self> +
+trait Int: PartialEq + PartialOrd + Div<Output=Self> + Rem<Output=Self> +
Sub<Output=Self> + Copy {
+ fn zero() -> Self;
fn from_u8(u: u8) -> Self;
fn to_u8(&self) -> u8;
fn to_u16(&self) -> u16;
macro_rules! doit {
($($t:ident)*) => ($(impl Int for $t {
+ fn zero() -> $t { 0 }
fn from_u8(u: u8) -> $t { u as $t }
fn to_u8(&self) -> u8 { *self as u8 }
fn to_u16(&self) -> u16 { *self as u16 }
///
/// Note that the underlying iterator is still advanced when [`peek`] is
/// called for the first time: In order to retrieve the next element,
- /// [`next`] is called on the underlying iterator, hence any side effects of
- /// the [`next`] method will occur.
+ /// [`next`] is called on the underlying iterator, hence any side effects (i.e.
+ /// anything other than fetching the next value) of the [`next`] method
+ /// will occur.
///
/// [`peek`]: struct.Peekable.html#method.peek
/// [`next`]: ../../std/iter/trait.Iterator.html#tymethod.next
#![stable(feature = "rust1", since = "1.0.0")]
+use cell::UnsafeCell;
use cmp;
use hash::Hash;
use hash::Hasher;
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<'a, T: Send + ?Sized> Send for &'a mut T {}
}
+
+/// Compiler-internal trait used to determine whether a type contains
+/// any `UnsafeCell` internally, but not through an indirection.
+/// This affects, for example, whether a `static` of that type is
+/// placed in read-only static memory or writable static memory.
+#[cfg_attr(not(stage0), lang = "freeze")]
+unsafe trait Freeze {}
+
+unsafe impl Freeze for .. {}
+
+impl<T: ?Sized> !Freeze for UnsafeCell<T> {}
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<'a, T: ?Sized> Freeze for &'a T {}
+unsafe impl<'a, T: ?Sized> Freeze for &'a mut T {}
const NAN: Self;
const ZERO: Self;
- // suffix of "2" because Float::integer_decode is deprecated
- #[allow(deprecated)]
- fn integer_decode2(self) -> (u64, i16, i8) {
- Float::integer_decode(self)
- }
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8);
/// Get the raw binary representation of the float.
fn transmute(self) -> u64;
const ZERO_CUTOFF: i64 = -48;
other_constants!(f32);
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits: u32 = unsafe { transmute(self) };
+ let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0x7fffff) << 1
+ } else {
+ (bits & 0x7fffff) | 0x800000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 127 + 23;
+ (mantissa as u64, exponent, sign)
+ }
+
fn transmute(self) -> u64 {
let bits: u32 = unsafe { transmute(self) };
bits as u64
}
fn unpack(self) -> Unpacked {
- let (sig, exp, _sig) = self.integer_decode2();
+ let (sig, exp, _sig) = self.integer_decode();
Unpacked::new(sig, exp)
}
const ZERO_CUTOFF: i64 = -326;
other_constants!(f64);
+ /// Returns the mantissa, exponent and sign as integers.
+ fn integer_decode(self) -> (u64, i16, i8) {
+ let bits: u64 = unsafe { transmute(self) };
+ let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
+ let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
+ let mantissa = if exponent == 0 {
+ (bits & 0xfffffffffffff) << 1
+ } else {
+ (bits & 0xfffffffffffff) | 0x10000000000000
+ };
+ // Exponent bias + mantissa shift
+ exponent -= 1023 + 52;
+ (mantissa, exponent, sign)
+ }
+
fn transmute(self) -> u64 {
let bits: u64 = unsafe { transmute(self) };
bits
}
fn unpack(self) -> Unpacked {
- let (sig, exp, _sig) = self.integer_decode2();
+ let (sig, exp, _sig) = self.integer_decode();
Unpacked::new(sig, exp)
}
reason = "stable interface is via `impl f{32,64}` in later crates",
issue = "32110")]
impl Float for f32 {
- #[inline]
- fn nan() -> f32 {
- NAN
- }
-
- #[inline]
- fn infinity() -> f32 {
- INFINITY
- }
-
- #[inline]
- fn neg_infinity() -> f32 {
- NEG_INFINITY
- }
-
- #[inline]
- fn zero() -> f32 {
- 0.0
- }
-
- #[inline]
- fn neg_zero() -> f32 {
- -0.0
- }
-
- #[inline]
- fn one() -> f32 {
- 1.0
- }
-
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool {
}
}
- /// Returns the mantissa, exponent and sign as integers.
- fn integer_decode(self) -> (u64, i16, i8) {
- let bits: u32 = unsafe { mem::transmute(self) };
- let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
- let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
- let mantissa = if exponent == 0 {
- (bits & 0x7fffff) << 1
- } else {
- (bits & 0x7fffff) | 0x800000
- };
- // Exponent bias + mantissa shift
- exponent -= 127 + 23;
- (mantissa as u64, exponent, sign)
- }
-
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
reason = "stable interface is via `impl f{32,64}` in later crates",
issue = "32110")]
impl Float for f64 {
- #[inline]
- fn nan() -> f64 {
- NAN
- }
-
- #[inline]
- fn infinity() -> f64 {
- INFINITY
- }
-
- #[inline]
- fn neg_infinity() -> f64 {
- NEG_INFINITY
- }
-
- #[inline]
- fn zero() -> f64 {
- 0.0
- }
-
- #[inline]
- fn neg_zero() -> f64 {
- -0.0
- }
-
- #[inline]
- fn one() -> f64 {
- 1.0
- }
-
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool {
}
}
- /// Returns the mantissa, exponent and sign as integers.
- fn integer_decode(self) -> (u64, i16, i8) {
- let bits: u64 = unsafe { mem::transmute(self) };
- let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
- let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
- let mantissa = if exponent == 0 {
- (bits & 0xfffffffffffff) << 1
- } else {
- (bits & 0xfffffffffffff) | 0x10000000000000
- };
- // Exponent bias + mantissa shift
- exponent -= 1023 + 52;
- (mantissa, exponent, sign)
- }
-
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
/// Returns a sign (true when negative) and `FullDecoded` value
/// from given floating point number.
pub fn decode<T: DecodableFloat>(v: T) -> (/*negative?*/ bool, FullDecoded) {
- let (mant, exp, sign) = v.integer_decode2();
+ let (mant, exp, sign) = v.integer_decode();
let even = (mant & 1) == 0;
let decoded = match v.classify() {
FpCategory::Nan => FullDecoded::Nan,
exp: exp, inclusive: even })
}
FpCategory::Normal => {
- let minnorm = <T as DecodableFloat>::min_pos_norm_value().integer_decode2();
+ let minnorm = <T as DecodableFloat>::min_pos_norm_value().integer_decode();
if mant == minnorm.0 {
// neighbors: (maxmant, exp - 1) -- (minnormmant, exp) -- (minnormmant + 1, exp)
// where maxmant = minnormmant * 2 - 1
pub mod bignum;
pub mod diy_float;
-/// Types that have a "zero" value.
-///
-/// This trait is intended for use in conjunction with `Add`, as an identity:
-/// `x + T::zero() == x`.
-#[unstable(feature = "zero_one",
- reason = "unsure of placement, wants to use associated constants",
- issue = "27739")]
-#[rustc_deprecated(since = "1.11.0", reason = "no longer used for \
- Iterator::sum")]
-pub trait Zero: Sized {
- /// The "zero" (usually, additive identity) for this type.
- fn zero() -> Self;
-}
-
-/// Types that have a "one" value.
-///
-/// This trait is intended for use in conjunction with `Mul`, as an identity:
-/// `x * T::one() == x`.
-#[unstable(feature = "zero_one",
- reason = "unsure of placement, wants to use associated constants",
- issue = "27739")]
-#[rustc_deprecated(since = "1.11.0", reason = "no longer used for \
- Iterator::product")]
-pub trait One: Sized {
- /// The "one" (usually, multiplicative identity) for this type.
- fn one() -> Self;
-}
-
-macro_rules! zero_one_impl {
- ($($t:ty)*) => ($(
- #[unstable(feature = "zero_one",
- reason = "unsure of placement, wants to use associated constants",
- issue = "27739")]
- #[allow(deprecated)]
- impl Zero for $t {
- #[inline]
- fn zero() -> Self { 0 }
- }
- #[unstable(feature = "zero_one",
- reason = "unsure of placement, wants to use associated constants",
- issue = "27739")]
- #[allow(deprecated)]
- impl One for $t {
- #[inline]
- fn one() -> Self { 1 }
- }
- )*)
-}
-zero_one_impl! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 i128 isize }
-
-macro_rules! zero_one_impl_float {
- ($($t:ty)*) => ($(
- #[unstable(feature = "zero_one",
- reason = "unsure of placement, wants to use associated constants",
- issue = "27739")]
- #[allow(deprecated)]
- impl Zero for $t {
- #[inline]
- fn zero() -> Self { 0.0 }
- }
- #[unstable(feature = "zero_one",
- reason = "unsure of placement, wants to use associated constants",
- issue = "27739")]
- #[allow(deprecated)]
- impl One for $t {
- #[inline]
- fn one() -> Self { 1.0 }
- }
- )*)
-}
-zero_one_impl_float! { f32 f64 }
-
macro_rules! checked_op {
($U:ty, $op:path, $x:expr, $y:expr) => {{
let (result, overflowed) = unsafe { $op($x as $U, $y as $U) };
reason = "stable interface is via `impl f{32,64}` in later crates",
issue = "32110")]
pub trait Float: Sized {
- /// Returns the NaN value.
- #[unstable(feature = "float_extras", reason = "needs removal",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- fn nan() -> Self;
- /// Returns the infinite value.
- #[unstable(feature = "float_extras", reason = "needs removal",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- fn infinity() -> Self;
- /// Returns the negative infinite value.
- #[unstable(feature = "float_extras", reason = "needs removal",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- fn neg_infinity() -> Self;
- /// Returns -0.0.
- #[unstable(feature = "float_extras", reason = "needs removal",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- fn neg_zero() -> Self;
- /// Returns 0.0.
- #[unstable(feature = "float_extras", reason = "needs removal",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- fn zero() -> Self;
- /// Returns 1.0.
- #[unstable(feature = "float_extras", reason = "needs removal",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- fn one() -> Self;
-
/// Returns `true` if this value is NaN and false otherwise.
#[stable(feature = "core", since = "1.6.0")]
fn is_nan(self) -> bool;
#[stable(feature = "core", since = "1.6.0")]
fn classify(self) -> FpCategory;
- /// Returns the mantissa, exponent and sign as integers, respectively.
- #[unstable(feature = "float_extras", reason = "signature is undecided",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- fn integer_decode(self) -> (u64, i16, i8);
-
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[stable(feature = "core", since = "1.6.0")]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use std::f32;
use std::f64;
-use std::mem;
use core::num::diy_float::Fp;
use core::num::dec2flt::rawfp::{fp_to_float, prev_float, next_float, round_normal};
+use core::num::dec2flt::rawfp::RawFloat;
fn integer_decode(f: f64) -> (u64, i16, i8) {
- let bits: u64 = unsafe { mem::transmute(f) };
- let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 };
- let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16;
- let mantissa = if exponent == 0 {
- (bits & 0xfffffffffffff) << 1
- } else {
- (bits & 0xfffffffffffff) | 0x10000000000000
- };
- // Exponent bias + mantissa shift
- exponent -= 1023 + 52;
- (mantissa, exponent, sign)
+ RawFloat::integer_decode(f)
}
#[test]
}
assert!(x > 0.5);
}
+
+#[test]
+fn test_f32_integer_decode() {
+ assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1));
+ assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1));
+ assert_eq!(2f32.powf(100.0).integer_decode(), (8388608, 77, 1));
+ assert_eq!(0f32.integer_decode(), (0, -150, 1));
+ assert_eq!((-0f32).integer_decode(), (0, -150, -1));
+ assert_eq!(f32::INFINITY.integer_decode(), (8388608, 105, 1));
+ assert_eq!(f32::NEG_INFINITY.integer_decode(), (8388608, 105, -1));
+
+ // Ignore the "sign" (quiet / signalling flag) of NAN.
+ // It can vary between runtime operations and LLVM folding.
+ let (nan_m, nan_e, _nan_s) = f32::NAN.integer_decode();
+ assert_eq!((nan_m, nan_e), (12582912, 105));
+}
+
+#[test]
+fn test_f64_integer_decode() {
+ assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1));
+ assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1));
+ assert_eq!(2f64.powf(100.0).integer_decode(), (4503599627370496, 48, 1));
+ assert_eq!(0f64.integer_decode(), (0, -1075, 1));
+ assert_eq!((-0f64).integer_decode(), (0, -1075, -1));
+ assert_eq!(f64::INFINITY.integer_decode(), (4503599627370496, 972, 1));
+ assert_eq!(f64::NEG_INFINITY.integer_decode(), (4503599627370496, 972, -1));
+
+ // Ignore the "sign" (quiet / signalling flag) of NAN.
+ // It can vary between runtime operations and LLVM folding.
+ let (nan_m, nan_e, _nan_s) = f64::NAN.integer_decode();
+ assert_eq!((nan_m, nan_e), (6755399441055744, 972));
+}
-Subproject commit 05a2d197356ef253dfd985166576619ac9b6947f
+Subproject commit c34a802d1eb037b44c5252078c7270b5472e0f65
// Find the tables for this body.
let owner_def_id = tcx.hir.local_def_id(tcx.hir.body_owner(body.id()));
- let tables = tcx.item_tables(owner_def_id);
+ let tables = tcx.typeck_tables_of(owner_def_id);
let mut cfg_builder = CFGBuilder {
tcx: tcx,
// predicates for an item wind up in `ItemSignature`).
AssociatedItems(D),
ItemSignature(D),
+ IsForeignItem(D),
TypeParamPredicates((D, D)),
SizedConstraint(D),
+ DtorckConstraint(D),
AdtDestructor(D),
AssociatedItemDefIds(D),
InherentImpls(D),
TypeckBodiesKrate,
TypeckTables(D),
UsedTraitImports(D),
- MonomorphicConstEval(D),
+ ConstEval(D),
// The set of impls for a given trait. Ultimately, it would be
// nice to get more fine-grained here (e.g., to include a
TransCrateItem,
AssociatedItems,
ItemSignature,
+ IsForeignItem,
AssociatedItemDefIds,
InherentImpls,
TypeckTables,
TransInlinedItem(ref d) => op(d).map(TransInlinedItem),
AssociatedItems(ref d) => op(d).map(AssociatedItems),
ItemSignature(ref d) => op(d).map(ItemSignature),
+ IsForeignItem(ref d) => op(d).map(IsForeignItem),
TypeParamPredicates((ref item, ref param)) => {
Some(TypeParamPredicates((try_opt!(op(item)), try_opt!(op(param)))))
}
SizedConstraint(ref d) => op(d).map(SizedConstraint),
+ DtorckConstraint(ref d) => op(d).map(DtorckConstraint),
AdtDestructor(ref d) => op(d).map(AdtDestructor),
AssociatedItemDefIds(ref d) => op(d).map(AssociatedItemDefIds),
InherentImpls(ref d) => op(d).map(InherentImpls),
TypeckTables(ref d) => op(d).map(TypeckTables),
UsedTraitImports(ref d) => op(d).map(UsedTraitImports),
- MonomorphicConstEval(ref d) => op(d).map(MonomorphicConstEval),
+ ConstEval(ref d) => op(d).map(ConstEval),
TraitImpls(ref d) => op(d).map(TraitImpls),
TraitItems(ref d) => op(d).map(TraitItems),
ReprHints(ref d) => op(d).map(ReprHints),
E0314, // closure outlives stack frame
E0315, // cannot invoke closure outside of its lifetime
E0316, // nested quantification of lifetimes
+ E0320, // recursive overflow during dropck
E0473, // dereference of reference outside its lifetime
E0474, // captured variable `..` does not outlive the enclosing closure
E0475, // index of slice outside its lifetime
E0489, // type/lifetime parameter not in scope here
E0490, // a value of type `..` is borrowed for too long
E0495, // cannot infer an appropriate lifetime due to conflicting requirements
- E0566 // conflicting representation hints
+ E0566, // conflicting representation hints
+ E0587, // conflicting packed and align representation hints
}
};
let mut conflicting_reprs = 0;
+ let mut found_packed = false;
+ let mut found_align = false;
+
for word in words {
let name = match word.name() {
("attribute should be applied to struct or union",
"a struct or union")
} else {
+ found_packed = true;
continue
}
}
continue
}
}
+ "align" => {
+ found_align = true;
+ if target != Target::Struct {
+ ("attribute should be applied to struct",
+ "a struct")
+ } else {
+ continue
+ }
+ }
"i8" | "u8" | "i16" | "u16" |
"i32" | "u32" | "i64" | "u64" |
"isize" | "usize" => {
span_warn!(self.sess, attr.span, E0566,
"conflicting representation hints");
}
+ if found_align && found_packed {
+ struct_span_err!(self.sess, attr.span, E0587,
+ "conflicting packed and align representation hints").emit();
+ }
}
fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
ConstVal::Bool(value) => {
value.hash_stable(hcx, hasher);
}
+ ConstVal::Char(value) => {
+ value.hash_stable(hcx, hasher);
+ }
+ ConstVal::Variant(def_id) => {
+ def_id.hash_stable(hcx, hasher);
+ }
ConstVal::Function(def_id, substs) => {
def_id.hash_stable(hcx, hasher);
substs.hash_stable(hcx, hasher);
value.hash_stable(hcx, hasher);
times.hash_stable(hcx, hasher);
}
- ConstVal::Char(value) => {
- value.hash_stable(hcx, hasher);
- }
}
}
}
ty::ReEmpty |
ty::ReErased => {
// replace all free regions with 'erased
- self.tcx().mk_region(ty::ReErased)
+ self.tcx().types.re_erased
}
}
}
Option<ty::TypeckTables<'tcx>>,
Option<ty::ParameterEnvironment<'tcx>>) {
let item_id = tcx.hir.body_owner(self);
- (Some(tcx.item_tables(tcx.hir.local_def_id(item_id))),
+ (Some(tcx.typeck_tables_of(tcx.hir.local_def_id(item_id))),
None,
Some(ty::ParameterEnvironment::for_item(tcx, item_id)))
}
substs: &[Kind<'tcx>])
-> Ty<'tcx> {
let default = if def.has_default {
- let default = self.tcx.item_type(def.def_id);
+ let default = self.tcx.type_of(def.def_id);
Some(type_variable::Default {
ty: default.subst_spanned(self.tcx, substs, Some(span)),
origin_span: span,
} else {
// otherwise, we don't know what the free region is,
// so we must conservatively say the LUB is static:
- self.tcx.mk_region(ReStatic)
+ self.tcx.types.re_static
}
}
if a == b {
a
} else {
- self.tcx.mk_region(ReStatic)
+ self.tcx.types.re_static
}
}
}
fn construct_var_data(&self) -> Vec<VarValue<'tcx>> {
(0..self.num_vars() as usize)
- .map(|_| Value(self.tcx.mk_region(ty::ReEmpty)))
+ .map(|_| Value(self.tcx.types.re_empty))
.collect()
}
-> &'tcx ty::Region {
match values[rid.index as usize] {
Value(r) => r,
- ErrorValue => tcx.mk_region(ReStatic), // Previously reported error.
+ ErrorValue => tcx.types.re_static, // Previously reported error.
}
}
use syntax::attr;
use syntax::ast;
use syntax::symbol::Symbol;
-use syntax_pos::{DUMMY_SP, MultiSpan, Span};
+use syntax_pos::{MultiSpan, Span};
use errors::{self, Diagnostic, DiagnosticBuilder};
use hir;
use hir::def_id::LOCAL_CRATE;
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let _task = tcx.dep_graph.in_task(DepNode::LateLintCheck);
- let access_levels = &ty::queries::privacy_access_levels::get(tcx, DUMMY_SP, LOCAL_CRATE);
+ let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE);
let krate = tcx.hir.krate();
use hir;
use hir::def::Def;
use hir::def_id::DefId;
-use ty::{self, TyCtxt};
+use ty::TyCtxt;
use ty::subst::Substs;
use util::common::ErrorReported;
use rustc_const_math::*;
Str(InternedString),
ByteStr(Rc<Vec<u8>>),
Bool(bool),
+ Char(char),
+ Variant(DefId),
Function(DefId, &'tcx Substs<'tcx>),
Struct(BTreeMap<ast::Name, ConstVal<'tcx>>),
Tuple(Vec<ConstVal<'tcx>>),
Array(Vec<ConstVal<'tcx>>),
Repeat(Box<ConstVal<'tcx>>, u64),
- Char(char),
}
impl<'tcx> ConstVal<'tcx> {
Str(_) => "string literal",
ByteStr(_) => "byte string literal",
Bool(_) => "boolean",
+ Char(..) => "char",
+ Variant(_) => "enum variant",
Struct(_) => "struct",
Tuple(_) => "tuple",
Function(..) => "function definition",
Array(..) => "array",
Repeat(..) => "repeat",
- Char(..) => "char",
}
}
MissingStructField,
NegateOn(ConstVal<'tcx>),
NotOn(ConstVal<'tcx>),
- CallOn(ConstVal<'tcx>),
NonConstPath,
UnimplementedConstVal(&'static str),
CannotCast => simple!("can't cast this type"),
NegateOn(ref const_val) => simple!("negate on {}", const_val.description()),
NotOn(ref const_val) => simple!("not on {}", const_val.description()),
- CallOn(ref const_val) => simple!("call on {}", const_val.description()),
MissingStructField => simple!("nonexistent struct field"),
NonConstPath => simple!("non-constant path in constant expression"),
{
let count_expr = &tcx.hir.body(count).value;
let count_def_id = tcx.hir.body_owner_def_id(count);
- match ty::queries::monomorphic_const_eval::get(tcx, count_expr.span, count_def_id) {
+ let substs = Substs::empty();
+ match tcx.at(count_expr.span).const_eval((count_def_id, substs)) {
Ok(Integral(Usize(count))) => {
let val = count.as_u64(tcx.sess.target.uint_type);
assert_eq!(val as usize as u64, val);
fn visibility(&self, def: DefId) -> ty::Visibility;
fn visible_parent_map<'a>(&'a self) -> ::std::cell::Ref<'a, DefIdMap<DefId>>;
fn item_generics_cloned(&self, def: DefId) -> ty::Generics;
- fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute>;
+ fn item_attrs(&self, def_id: DefId) -> Rc<[ast::Attribute]>;
fn fn_arg_names(&self, did: DefId) -> Vec<ast::Name>;
// trait info
fn implementations_of_trait(&self, filter: Option<DefId>) -> Vec<DefId>;
// impl info
- fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity;
fn impl_parent(&self, impl_def_id: DefId) -> Option<DefId>;
// trait/impl-item info
fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro;
// misc. metadata
- fn maybe_get_item_body<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
- -> Option<&'tcx hir::Body>;
+ fn item_body<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> &'tcx hir::Body;
fn item_body_nested_bodies(&self, def: DefId) -> BTreeMap<hir::BodyId, hir::Body>;
fn const_is_rvalue_promotable_to_static(&self, def: DefId) -> bool;
}
fn item_generics_cloned(&self, def: DefId) -> ty::Generics
{ bug!("item_generics_cloned") }
- fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute> { bug!("item_attrs") }
+ fn item_attrs(&self, def_id: DefId) -> Rc<[ast::Attribute]> { bug!("item_attrs") }
fn fn_arg_names(&self, did: DefId) -> Vec<ast::Name> { bug!("fn_arg_names") }
// trait info
fn implementations_of_trait(&self, filter: Option<DefId>) -> Vec<DefId> { vec![] }
// impl info
- fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity { bug!("impl_polarity") }
fn impl_parent(&self, def: DefId) -> Option<DefId> { bug!("impl_parent") }
// trait/impl-item info
fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro { bug!("load_macro") }
// misc. metadata
- fn maybe_get_item_body<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
- -> Option<&'tcx hir::Body> {
- bug!("maybe_get_item_body")
+ fn item_body<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
+ -> &'tcx hir::Body {
+ bug!("item_body")
}
fn item_body_nested_bodies(&self, def: DefId) -> BTreeMap<hir::BodyId, hir::Body> {
bug!("item_body_nested_bodies")
use syntax::{ast, codemap};
use syntax::attr;
-use syntax::codemap::DUMMY_SP;
use syntax_pos;
// Any local node that may call something in its body block should be
match item.node {
hir::ItemStruct(..) | hir::ItemUnion(..) => {
let def_id = self.tcx.hir.local_def_id(item.id);
- let def = self.tcx.lookup_adt_def(def_id);
+ let def = self.tcx.adt_def(def_id);
self.struct_has_extern_repr = def.repr.c();
intravisit::walk_item(self, &item);
}
fn should_warn_about_field(&mut self, field: &hir::StructField) -> bool {
- let field_type = self.tcx.item_type(self.tcx.hir.local_def_id(field.id));
+ let field_type = self.tcx.type_of(self.tcx.hir.local_def_id(field.id));
let is_marker_field = match field_type.ty_to_def_id() {
Some(def_id) => self.tcx.lang_items.items().iter().any(|item| *item == Some(def_id)),
_ => false
}
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- let access_levels = &ty::queries::privacy_access_levels::get(tcx, DUMMY_SP, LOCAL_CRATE);
+ let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE);
let krate = tcx.hir.krate();
let live_symbols = find_live(tcx, access_levels, krate);
let mut visitor = DeadVisitor { tcx: tcx, live_symbols: live_symbols };
hir::ExprMatch(ref discr, ref arms, _) => {
let discr_cmt = return_if_err!(self.mc.cat_expr(&discr));
- let r = self.tcx().mk_region(ty::ReEmpty);
+ let r = self.tcx().types.re_empty;
self.borrow_expr(&discr, r, ty::ImmBorrow, MatchDiscriminant);
// treatment of the discriminant is handled while walking the arms.
Def::Variant(variant_did) |
Def::VariantCtor(variant_did, ..) => {
let enum_did = tcx.parent_def_id(variant_did).unwrap();
- let downcast_cmt = if tcx.lookup_adt_def(enum_did).is_univariant() {
+ let downcast_cmt = if tcx.adt_def(enum_did).is_univariant() {
cmt_pat
} else {
let cmt_pat_ty = cmt_pat.ty;
impl<'a, 'gcx, 'tcx> ExprVisitor<'a, 'gcx, 'tcx> {
fn def_id_is_transmute(&self, def_id: DefId) -> bool {
- let intrinsic = match self.infcx.tcx.item_type(def_id).sty {
+ let intrinsic = match self.infcx.tcx.type_of(def_id).sty {
ty::TyFnDef(.., bfty) => bfty.abi() == RustIntrinsic,
_ => return false
};
pub fn extract(attrs: &[ast::Attribute]) -> Option<Symbol> {
for attribute in attrs {
- match attribute.value_str() {
- Some(value) if attribute.check_name("lang") => return Some(value),
- _ => {}
+ if attribute.check_name("lang") {
+ if let Some(value) = attribute.value_str() {
+ return Some(value)
+ }
}
}
UnsizeTraitLangItem, "unsize", unsize_trait;
CopyTraitLangItem, "copy", copy_trait;
SyncTraitLangItem, "sync", sync_trait;
+ FreezeTraitLangItem, "freeze", freeze_trait;
DropTraitLangItem, "drop", drop_trait;
entry_ln: LiveNode,
body: &hir::Body)
{
- let fn_ty = self.ir.tcx.item_type(self.ir.tcx.hir.local_def_id(id));
+ let fn_ty = self.ir.tcx.type_of(self.ir.tcx.hir.local_def_id(id));
let fn_sig = match fn_ty.sty {
ty::TyClosure(closure_def_id, substs) => {
self.ir.tcx.closure_type(closure_def_id)
// we can promote to a constant, otherwise equal to enclosing temp
// lifetime.
let (re, old_re) = if promotable {
- (self.tcx().mk_region(ty::ReStatic),
- self.tcx().mk_region(ty::ReStatic))
+ (self.tcx().types.re_static,
+ self.tcx().types.re_static)
} else {
self.temporary_scope(id)
};
Def::VariantCtor(variant_did, ..) => {
// univariant enums do not need downcasts
let enum_did = self.tcx().parent_def_id(variant_did).unwrap();
- if !self.tcx().lookup_adt_def(enum_did).is_univariant() {
+ if !self.tcx().adt_def(enum_did).is_univariant() {
self.cat_downcast(pat, cmt.clone(), cmt.ty, variant_did)
} else {
cmt
let expected_len = match def {
Def::VariantCtor(def_id, CtorKind::Fn) => {
let enum_def = self.tcx().parent_def_id(def_id).unwrap();
- self.tcx().lookup_adt_def(enum_def).variant_with_id(def_id).fields.len()
+ self.tcx().adt_def(enum_def).variant_with_id(def_id).fields.len()
}
Def::StructCtor(_, CtorKind::Fn) => {
match self.pat_ty(&pat)?.sty {
use hir::map as hir_map;
use hir::def::Def;
use hir::def_id::{DefId, CrateNum};
+use std::rc::Rc;
use ty::{self, TyCtxt};
use ty::maps::Providers;
use middle::privacy;
use syntax::abi::Abi;
use syntax::ast;
use syntax::attr;
-use syntax::codemap::DUMMY_SP;
use hir;
use hir::def_id::LOCAL_CRATE;
use hir::intravisit::{Visitor, NestedVisitorMap};
}
}
-pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> NodeSet {
- ty::queries::reachable_set::get(tcx, DUMMY_SP, LOCAL_CRATE)
+pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Rc<NodeSet> {
+ tcx.reachable_set(LOCAL_CRATE)
}
-fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> NodeSet {
+fn reachable_set<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> Rc<NodeSet> {
debug_assert!(crate_num == LOCAL_CRATE);
- let access_levels = &ty::queries::privacy_access_levels::get(tcx, DUMMY_SP, LOCAL_CRATE);
+ let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE);
let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| {
*ty == config::CrateTypeRlib || *ty == config::CrateTypeDylib ||
reachable_context.propagate();
// Return the set of reachable symbols.
- reachable_context.reachable_symbols
+ Rc::new(reachable_context.reachable_symbols)
}
pub fn provide(providers: &mut Providers) {
pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let sess = &tcx.sess;
- let access_levels = &ty::queries::privacy_access_levels::get(tcx, DUMMY_SP, LOCAL_CRATE);
+ let access_levels = &tcx.privacy_access_levels(LOCAL_CRATE);
if tcx.stability.borrow().staged_api[&LOCAL_CRATE] && tcx.sess.features.borrow().staged_api {
let krate = tcx.hir.krate();
) -> Self {
Operand::Constant(Constant {
span: span,
- ty: tcx.item_type(def_id).subst(tcx, substs),
+ ty: tcx.type_of(def_id).subst(tcx, substs),
literal: Literal::Value { value: ConstVal::Function(def_id, substs) },
})
}
write!(fmt, "b\"{}\"", escaped)
}
Bool(b) => write!(fmt, "{:?}", b),
+ Char(c) => write!(fmt, "{:?}", c),
+ Variant(def_id) |
Function(def_id, _) => write!(fmt, "{}", item_path_str(def_id)),
Struct(_) | Tuple(_) | Array(_) | Repeat(..) =>
bug!("ConstVal `{:?}` should not be in MIR", const_val),
- Char(c) => write!(fmt, "{:?}", c),
}
}
)
}
AggregateKind::Adt(def, _, substs, _) => {
- tcx.item_type(def.did).subst(tcx, substs)
+ tcx.type_of(def.did).subst(tcx, substs)
}
AggregateKind::Closure(did, substs) => {
tcx.mk_closure_from_closure_substs(did, substs)
pub uint_type: UintTy,
}
-#[derive(Clone, Hash)]
+#[derive(Clone, Hash, Debug)]
pub enum Sanitizer {
Address,
Leak,
let mut self_match_impls = vec![];
let mut fuzzy_match_impls = vec![];
- self.tcx.lookup_trait_def(trait_ref.def_id)
+ self.tcx.trait_def(trait_ref.def_id)
.for_each_relevant_impl(self.tcx, trait_self_ty, |def_id| {
let impl_substs = self.fresh_substs_for_item(obligation.cause.span, def_id);
let impl_trait_ref = tcx
let trait_str = self.tcx.item_path_str(trait_ref.def_id);
if let Some(istring) = item.value_str() {
let istring = &*istring.as_str();
- let generics = self.tcx.item_generics(trait_ref.def_id);
+ let generics = self.tcx.generics_of(trait_ref.def_id);
let generic_map = generics.types.iter().map(|param| {
(param.name.as_str().to_string(),
trait_ref.substs.type_for_def(param).to_string())
trait_ref.skip_binder().self_ty(),
true);
let mut impl_candidates = Vec::new();
- let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id());
+ let trait_def = self.tcx.trait_def(trait_ref.def_id());
match simp {
Some(simp) => trait_def.for_each_impl(self.tcx, |def_id| {
// Otherwise, we have something of the form
// `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`.
Some(t_a) => {
- let r_static = selcx.tcx().mk_region(ty::ReStatic);
+ let r_static = selcx.tcx().types.re_static;
register_region_obligation(t_a, r_static,
obligation.cause.clone(),
region_obligations);
mod select;
mod specialize;
mod structural_impls;
+pub mod trans;
mod util;
/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for
// the method may have some early-bound lifetimes, add
// regions for those
let substs = Substs::for_item(tcx, def_id,
- |_, _| tcx.mk_region(ty::ReErased),
+ |_, _| tcx.types.re_erased,
|def, _| trait_ref.substs().type_for_def(def));
// the trait type may have higher-ranked lifetimes in it;
// do not hold for this particular set of type parameters.
// Note that this method could then never be called, so we
// do not want to try and trans it, in that case (see #23435).
- let predicates = tcx.item_predicates(def_id).instantiate_own(tcx, substs);
+ let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs);
if !normalize_and_test_predicates(tcx, predicates.predicates) {
debug!("get_vtable_methods: predicates do not hold");
return None;
/// Method has something illegal
Method(ast::Name, MethodViolationCode),
+
+ /// Associated const
+ AssociatedConst(ast::Name),
}
impl ObjectSafetyViolation {
in its arguments or return type", name).into(),
ObjectSafetyViolation::Method(name, MethodViolationCode::Generic) =>
format!("method `{}` has generic type parameters", name).into(),
+ ObjectSafetyViolation::AssociatedConst(name) =>
+ format!("the trait cannot contain associated consts like `{}`", name).into(),
}
}
}
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn is_object_safe(self, trait_def_id: DefId) -> bool {
// Because we query yes/no results frequently, we keep a cache:
- let def = self.lookup_trait_def(trait_def_id);
+ let def = self.trait_def(trait_def_id);
let result = def.object_safety().unwrap_or_else(|| {
let result = self.object_safety_violations(trait_def_id).is_empty();
violations.push(ObjectSafetyViolation::SupertraitSelf);
}
+ violations.extend(self.associated_items(trait_def_id)
+ .filter(|item| item.kind == ty::AssociatedKind::Const)
+ .map(|item| ObjectSafetyViolation::AssociatedConst(item.name)));
+
debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
trait_def_id,
violations);
substs: Substs::identity_for_item(self, trait_def_id)
});
let predicates = if supertraits_only {
- self.item_super_predicates(trait_def_id)
+ self.super_predicates_of(trait_def_id)
} else {
- self.item_predicates(trait_def_id)
+ self.predicates_of(trait_def_id)
};
predicates
.predicates
// Search for a predicate like `Self : Sized` amongst the trait bounds.
let free_substs = self.construct_free_substs(def_id,
self.region_maps.node_extent(ast::DUMMY_NODE_ID));
- let predicates = self.item_predicates(def_id);
+ let predicates = self.predicates_of(def_id);
let predicates = predicates.instantiate(self, free_substs).predicates;
elaborate_predicates(self, predicates)
.any(|predicate| {
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
- let ref sig = self.item_type(method.def_id).fn_sig();
+ let ref sig = self.type_of(method.def_id).fn_sig();
for input_ty in &sig.skip_binder().inputs()[1..] {
if self.contains_illegal_self_type_reference(trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
// We can't monomorphize things like `fn foo<A>(...)`.
- if !self.item_generics(method.def_id).types.is_empty() {
+ if !self.generics_of(method.def_id).types.is_empty() {
return Some(MethodViolationCode::Generic);
}
ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => { // (*)
// Only normalize `impl Trait` after type-checking, usually in trans.
if self.selcx.projection_mode() == Reveal::All {
- let generic_ty = self.tcx().item_type(def_id);
+ let generic_ty = self.tcx().type_of(def_id);
let concrete_ty = generic_ty.subst(self.tcx(), substs);
self.fold_ty(concrete_ty)
} else {
};
// If so, extract what we know from the trait and try to come up with a good answer.
- let trait_predicates = selcx.tcx().item_predicates(def_id);
+ let trait_predicates = selcx.tcx().predicates_of(def_id);
let bounds = trait_predicates.instantiate(selcx.tcx(), substs);
let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates);
assemble_candidates_from_predicates(selcx,
obligation.predicate.trait_ref);
tcx.types.err
} else {
- tcx.item_type(node_item.item.def_id)
+ tcx.type_of(node_item.item.def_id)
};
let substs = translate_substs(selcx.infcx(), impl_def_id, substs, node_item.node);
Progress {
-> Option<specialization_graph::NodeItem<ty::AssociatedItem>>
{
let trait_def_id = selcx.tcx().impl_trait_ref(impl_def_id).unwrap().def_id;
- let trait_def = selcx.tcx().lookup_trait_def(trait_def_id);
+ let trait_def = selcx.tcx().trait_def(trait_def_id);
if !trait_def.is_complete(selcx.tcx()) {
let impl_node = specialization_graph::Node::Impl(impl_def_id);
fn filter_negative_impls(&self, candidate: SelectionCandidate<'tcx>)
-> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
if let ImplCandidate(def_id) = candidate {
- if self.tcx().trait_impl_polarity(def_id) == hir::ImplPolarity::Negative {
+ if self.tcx().impl_polarity(def_id) == hir::ImplPolarity::Negative {
return Err(Unimplemented)
}
}
debug!("Retaining candidate #{}/{}: {:?}",
i, candidates.len(), candidates[i]);
i += 1;
+
+ // If there are *STILL* multiple candidates, give up
+ // and report ambiguity.
+ if i > 1 {
+ debug!("multiple matches, ambig");
+ return Ok(None);
+ }
}
}
}
- // If there are *STILL* multiple candidates, give up and
- // report ambiguity.
- if candidates.len() > 1 {
- debug!("multiple matches, ambig");
- return Ok(None);
- }
-
// If there are *NO* candidates, then there are no impls --
// that we know of, anyway. Note that in the case where there
// are unbound type variables within the obligation, it might
def_id={:?}, substs={:?}",
def_id, substs);
- let item_predicates = self.tcx().item_predicates(def_id);
- let bounds = item_predicates.instantiate(self.tcx(), substs);
+ let predicates_of = self.tcx().predicates_of(def_id);
+ let bounds = predicates_of.instantiate(self.tcx(), substs);
debug!("match_projection_obligation_against_definition_bounds: \
bounds={:?}",
bounds);
.iter()
.filter_map(|o| o.to_opt_poly_trait_ref());
+ // micro-optimization: filter out predicates relating to different
+ // traits.
+ let matching_bounds =
+ all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id());
+
let matching_bounds =
- all_bounds.filter(
+ matching_bounds.filter(
|bound| self.evaluate_where_clause(stack, bound.clone()).may_apply());
let param_candidates =
{
debug!("assemble_candidates_from_impls(obligation={:?})", obligation);
- let def = self.tcx().lookup_trait_def(obligation.predicate.def_id());
+ let def = self.tcx().trait_def(obligation.predicate.def_id());
def.for_each_relevant_impl(
self.tcx(),
ty::TyAdt(def, substs) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
- Where(ty::Binder(match sized_crit.sty {
- ty::TyTuple(tys, _) => tys.to_vec().subst(self.tcx(), substs),
- ty::TyBool => vec![],
- _ => vec![sized_crit.subst(self.tcx(), substs)]
- }))
+ Where(ty::Binder(
+ sized_crit.iter().map(|ty| ty.subst(self.tcx(), substs)).collect()
+ ))
}
ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => None,
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
- vec![self.tcx().item_type(def_id).subst(self.tcx(), substs)]
+ vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)]
}
}
}
(&ty::TyAdt(def, substs_a), &ty::TyAdt(_, substs_b)) => {
let fields = def
.all_fields()
- .map(|f| tcx.item_type(f.did))
+ .map(|f| tcx.type_of(f.did))
.collect::<Vec<_>>();
// The last field of the structure has to exist and contain type parameters.
// obligation will normalize to `<$0 as Iterator>::Item = $1` and
// `$1: Copy`, so we must ensure the obligations are emitted in
// that order.
- let predicates = tcx.item_predicates(def_id);
+ let predicates = tcx.predicates_of(def_id);
assert_eq!(predicates.parent, None);
let predicates = predicates.predicates.iter().flat_map(|predicate| {
let predicate = normalize_with_depth(self, cause.clone(), recursion_depth,
assert!(!substs.needs_infer());
let trait_def_id = tcx.trait_id_of_impl(impl_data.impl_def_id).unwrap();
- let trait_def = tcx.lookup_trait_def(trait_def_id);
+ let trait_def = tcx.trait_def(trait_def_id);
let ancestors = trait_def.ancestors(impl_data.impl_def_id);
match ancestors.defs(tcx, item.name, item.kind).next() {
// See RFC 1210 for more details and justification.
// Currently we do not allow e.g. a negative impl to specialize a positive one
- if tcx.trait_impl_polarity(impl1_def_id) != tcx.trait_impl_polarity(impl2_def_id) {
+ if tcx.impl_polarity(impl1_def_id) != tcx.impl_polarity(impl2_def_id) {
return false;
}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This file contains various trait resolution methods used by trans.
+// They all assume regions can be erased and monomorphic types. It
+// seems likely that they should eventually be merged into more
+// general routines.
+
+use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig};
+use hir::def_id::DefId;
+use infer::TransNormalize;
+use std::cell::RefCell;
+use std::marker::PhantomData;
+use syntax::ast;
+use syntax_pos::Span;
+use traits::{FulfillmentContext, Obligation, ObligationCause, Reveal, SelectionContext, Vtable};
+use ty::{self, Ty, TyCtxt};
+use ty::subst::{Subst, Substs};
+use ty::fold::{TypeFoldable, TypeFolder};
+use util::common::MemoizationMap;
+
+impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
+ /// Attempts to resolve an obligation to a vtable.. The result is
+ /// a shallow vtable resolution -- meaning that we do not
+ /// (necessarily) resolve all nested obligations on the impl. Note
+ /// that type check should guarantee to us that all nested
+ /// obligations *could be* resolved if we wanted to.
+ pub fn trans_fulfill_obligation(self,
+ span: Span,
+ trait_ref: ty::PolyTraitRef<'tcx>)
+ -> Vtable<'tcx, ()>
+ {
+ // Remove any references to regions; this helps improve caching.
+ let trait_ref = self.erase_regions(&trait_ref);
+
+ self.trans_trait_caches.trait_cache.memoize(trait_ref, || {
+ debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
+ trait_ref, trait_ref.def_id());
+
+ // Do the initial selection for the obligation. This yields the
+ // shallow result we are looking for -- that is, what specific impl.
+ self.infer_ctxt((), Reveal::All).enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+
+ let obligation_cause = ObligationCause::misc(span,
+ ast::DUMMY_NODE_ID);
+ let obligation = Obligation::new(obligation_cause,
+ trait_ref.to_poly_trait_predicate());
+
+ let selection = match selcx.select(&obligation) {
+ Ok(Some(selection)) => selection,
+ Ok(None) => {
+ // Ambiguity can happen when monomorphizing during trans
+ // expands to some humongo type that never occurred
+ // statically -- this humongo type can then overflow,
+ // leading to an ambiguous result. So report this as an
+ // overflow bug, since I believe this is the only case
+ // where ambiguity can result.
+ debug!("Encountered ambiguity selecting `{:?}` during trans, \
+ presuming due to overflow",
+ trait_ref);
+ self.sess.span_fatal(span,
+ "reached the recursion limit during monomorphization \
+ (selection ambiguity)");
+ }
+ Err(e) => {
+ span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
+ e, trait_ref)
+ }
+ };
+
+ debug!("fulfill_obligation: selection={:?}", selection);
+
+ // Currently, we use a fulfillment context to completely resolve
+ // all nested obligations. This is because they can inform the
+ // inference of the impl's type parameters.
+ let mut fulfill_cx = FulfillmentContext::new();
+ let vtable = selection.map(|predicate| {
+ debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ });
+ let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
+
+ info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
+ vtable
+ })
+ })
+ }
+
+ /// Monomorphizes a type from the AST by first applying the in-scope
+ /// substitutions and then normalizing any associated types.
+ pub fn trans_apply_param_substs<T>(self,
+ param_substs: &Substs<'tcx>,
+ value: &T)
+ -> T
+ where T: TransNormalize<'tcx>
+ {
+ debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value);
+ let substituted = value.subst(self, param_substs);
+ let substituted = self.erase_regions(&substituted);
+ AssociatedTypeNormalizer::new(self).fold(&substituted)
+ }
+}
+
+struct AssociatedTypeNormalizer<'a, 'gcx: 'a> {
+ tcx: TyCtxt<'a, 'gcx, 'gcx>,
+}
+
+impl<'a, 'gcx> AssociatedTypeNormalizer<'a, 'gcx> {
+ fn new(tcx: TyCtxt<'a, 'gcx, 'gcx>) -> Self {
+ AssociatedTypeNormalizer { tcx }
+ }
+
+ fn fold<T:TypeFoldable<'gcx>>(&mut self, value: &T) -> T {
+ if !value.has_projection_types() {
+ value.clone()
+ } else {
+ value.fold_with(self)
+ }
+ }
+}
+
+impl<'a, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'gcx> {
+ fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> {
+ if !ty.has_projection_types() {
+ ty
+ } else {
+ self.tcx.trans_trait_caches.project_cache.memoize(ty, || {
+ debug!("AssociatedTypeNormalizer: ty={:?}", ty);
+ self.tcx.normalize_associated_type(&ty)
+ })
+ }
+ }
+}
+
+/// Specializes caches used in trans -- in particular, they assume all
+/// types are fully monomorphized and that free regions can be erased.
+pub struct TransTraitCaches<'tcx> {
+ trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
+ project_cache: RefCell<DepTrackingMap<ProjectionCache<'tcx>>>,
+}
+
+impl<'tcx> TransTraitCaches<'tcx> {
+ pub fn new(graph: DepGraph) -> Self {
+ TransTraitCaches {
+ trait_cache: RefCell::new(DepTrackingMap::new(graph.clone())),
+ project_cache: RefCell::new(DepTrackingMap::new(graph)),
+ }
+ }
+}
+
+// Implement DepTrackingMapConfig for `trait_cache`
+pub struct TraitSelectionCache<'tcx> {
+ data: PhantomData<&'tcx ()>
+}
+
+impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
+ type Key = ty::PolyTraitRef<'tcx>;
+ type Value = Vtable<'tcx, ()>;
+ fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
+ key.to_poly_trait_predicate().dep_node()
+ }
+}
+
+// # Global Cache
+
+pub struct ProjectionCache<'gcx> {
+ data: PhantomData<&'gcx ()>
+}
+
+impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> {
+ type Key = Ty<'gcx>;
+ type Value = Ty<'gcx>;
+ fn to_dep_node(key: &Self::Key) -> DepNode<DefId> {
+ // Ideally, we'd just put `key` into the dep-node, but we
+ // can't put full types in there. So just collect up all the
+ // def-ids of structs/enums as well as any traits that we
+ // project out of. It doesn't matter so much what we do here,
+ // except that if we are too coarse, we'll create overly
+ // coarse edges between impls and the trans. For example, if
+ // we just used the def-id of things we are projecting out of,
+ // then the key for `<Foo as SomeTrait>::T` and `<Bar as
+ // SomeTrait>::T` would both share a dep-node
+ // (`TraitSelect(SomeTrait)`), and hence the impls for both
+ // `Foo` and `Bar` would be considered inputs. So a change to
+ // `Bar` would affect things that just normalized `Foo`.
+ // Anyway, this heuristic is not ideal, but better than
+ // nothing.
+ let def_ids: Vec<DefId> =
+ key.walk()
+ .filter_map(|t| match t.sty {
+ ty::TyAdt(adt_def, _) => Some(adt_def.did),
+ ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id),
+ _ => None,
+ })
+ .collect();
+
+ DepNode::ProjectionCache { def_ids: def_ids }
+ }
+}
+
match *predicate {
ty::Predicate::Trait(ref data) => {
// Predicates declared on the trait.
- let predicates = tcx.item_super_predicates(data.def_id());
+ let predicates = tcx.super_predicates_of(data.def_id());
let mut predicates: Vec<_> =
predicates.predicates
None => { return None; }
};
- let predicates = self.tcx.item_super_predicates(def_id);
+ let predicates = self.tcx.super_predicates_of(def_id);
let visited = &mut self.visited;
self.stack.extend(
predicates.predicates
let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
super::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref);
- let predicates = selcx.tcx().item_predicates(impl_def_id);
+ let predicates = selcx.tcx().predicates_of(impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
let Normalized { value: predicates, obligations: normalization_obligations2 } =
super::normalize(selcx, ObligationCause::dummy(), &predicates);
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use hir::def_id::{DefId};
-use ty::{self, Ty, TyCtxt};
-use util::common::MemoizationMap;
-use util::nodemap::FxHashMap;
-
-use std::fmt;
-use std::ops;
-
-use syntax::ast;
-
-/// Type contents is how the type checker reasons about kinds.
-/// They track what kinds of things are found within a type. You can
-/// think of them as kind of an "anti-kind". They track the kinds of values
-/// and thinks that are contained in types. Having a larger contents for
-/// a type tends to rule that type *out* from various kinds. For example,
-/// a type that contains a reference is not sendable.
-///
-/// The reason we compute type contents and not kinds is that it is
-/// easier for me (nmatsakis) to think about what is contained within
-/// a type than to think about what is *not* contained within a type.
-#[derive(Clone, Copy)]
-pub struct TypeContents {
- pub bits: u64
-}
-
-macro_rules! def_type_content_sets {
- (mod $mname:ident { $($name:ident = $bits:expr),+ }) => {
- #[allow(non_snake_case)]
- mod $mname {
- use super::TypeContents;
- $(
- #[allow(non_upper_case_globals)]
- pub const $name: TypeContents = TypeContents { bits: $bits };
- )+
- }
- }
-}
-
-def_type_content_sets! {
- mod TC {
- None = 0b0000_0000__0000_0000__0000,
-
- // Things that are interior to the value (first nibble):
- InteriorUnsafe = 0b0000_0000__0000_0000__0010,
- InteriorParam = 0b0000_0000__0000_0000__0100,
- // InteriorAll = 0b00000000__00000000__1111,
-
- // Things that are owned by the value (second and third nibbles):
- OwnsDtor = 0b0000_0000__0000_0010__0000,
- // OwnsAll = 0b0000_0000__1111_1111__0000,
-
- // All bits
- All = 0b1111_1111__1111_1111__1111
- }
-}
-
-impl TypeContents {
- pub fn when(&self, cond: bool) -> TypeContents {
- if cond {*self} else {TC::None}
- }
-
- pub fn intersects(&self, tc: TypeContents) -> bool {
- (self.bits & tc.bits) != 0
- }
-
- pub fn interior_param(&self) -> bool {
- self.intersects(TC::InteriorParam)
- }
-
- pub fn interior_unsafe(&self) -> bool {
- self.intersects(TC::InteriorUnsafe)
- }
-
- pub fn needs_drop(&self, _: TyCtxt) -> bool {
- self.intersects(TC::OwnsDtor)
- }
-
- pub fn union<I, T, F>(v: I, mut f: F) -> TypeContents where
- I: IntoIterator<Item=T>,
- F: FnMut(T) -> TypeContents,
- {
- v.into_iter().fold(TC::None, |tc, ty| tc | f(ty))
- }
-}
-
-impl ops::BitOr for TypeContents {
- type Output = TypeContents;
-
- fn bitor(self, other: TypeContents) -> TypeContents {
- TypeContents {bits: self.bits | other.bits}
- }
-}
-
-impl ops::BitAnd for TypeContents {
- type Output = TypeContents;
-
- fn bitand(self, other: TypeContents) -> TypeContents {
- TypeContents {bits: self.bits & other.bits}
- }
-}
-
-impl ops::Sub for TypeContents {
- type Output = TypeContents;
-
- fn sub(self, other: TypeContents) -> TypeContents {
- TypeContents {bits: self.bits & !other.bits}
- }
-}
-
-impl fmt::Debug for TypeContents {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "TypeContents({:b})", self.bits)
- }
-}
-
-impl<'a, 'tcx> ty::TyS<'tcx> {
- pub fn type_contents(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> TypeContents {
- return tcx.tc_cache.memoize(self, || tc_ty(tcx, self, &mut FxHashMap()));
-
- fn tc_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- ty: Ty<'tcx>,
- cache: &mut FxHashMap<Ty<'tcx>, TypeContents>) -> TypeContents
- {
- // Subtle: Note that we are *not* using tcx.tc_cache here but rather a
- // private cache for this walk. This is needed in the case of cyclic
- // types like:
- //
- // struct List { next: Box<Option<List>>, ... }
- //
- // When computing the type contents of such a type, we wind up deeply
- // recursing as we go. So when we encounter the recursive reference
- // to List, we temporarily use TC::None as its contents. Later we'll
- // patch up the cache with the correct value, once we've computed it
- // (this is basically a co-inductive process, if that helps). So in
- // the end we'll compute TC::OwnsOwned, in this case.
- //
- // The problem is, as we are doing the computation, we will also
- // compute an *intermediate* contents for, e.g., Option<List> of
- // TC::None. This is ok during the computation of List itself, but if
- // we stored this intermediate value into tcx.tc_cache, then later
- // requests for the contents of Option<List> would also yield TC::None
- // which is incorrect. This value was computed based on the crutch
- // value for the type contents of list. The correct value is
- // TC::OwnsOwned. This manifested as issue #4821.
- if let Some(tc) = cache.get(&ty) {
- return *tc;
- }
- // Must check both caches!
- if let Some(tc) = tcx.tc_cache.borrow().get(&ty) {
- return *tc;
- }
- cache.insert(ty, TC::None);
-
- let result = match ty.sty {
- // usize and isize are ffi-unsafe
- ty::TyUint(ast::UintTy::Us) | ty::TyInt(ast::IntTy::Is) => {
- TC::None
- }
-
- // Scalar and unique types are sendable, and durable
- ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
- ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever |
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar => {
- TC::None
- }
-
- ty::TyDynamic(..) => {
- TC::All - TC::InteriorParam
- }
-
- ty::TyRawPtr(_) => {
- TC::None
- }
-
- ty::TyRef(..) => {
- TC::None
- }
-
- ty::TyArray(ty, _) => {
- tc_ty(tcx, ty, cache)
- }
-
- ty::TySlice(ty) => {
- tc_ty(tcx, ty, cache)
- }
- ty::TyStr => TC::None,
-
- ty::TyClosure(def_id, ref substs) => {
- TypeContents::union(
- substs.upvar_tys(def_id, tcx),
- |ty| tc_ty(tcx, &ty, cache))
- }
-
- ty::TyTuple(ref tys, _) => {
- TypeContents::union(&tys[..],
- |ty| tc_ty(tcx, *ty, cache))
- }
-
- ty::TyAdt(def, substs) => {
- let mut res =
- TypeContents::union(&def.variants, |v| {
- TypeContents::union(&v.fields, |f| {
- tc_ty(tcx, f.ty(tcx, substs), cache)
- })
- });
-
- if def.is_union() {
- // unions don't have destructors regardless of the child types
- res = res - TC::OwnsDtor;
- }
-
- if def.has_dtor(tcx) {
- res = res | TC::OwnsDtor;
- }
-
- apply_lang_items(tcx, def.did, res)
- }
-
- ty::TyProjection(..) |
- ty::TyParam(_) |
- ty::TyAnon(..) => {
- TC::All
- }
-
- ty::TyInfer(_) |
- ty::TyError => {
- bug!("asked to compute contents of error type");
- }
- };
-
- cache.insert(ty, result);
- result
- }
-
- fn apply_lang_items<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- did: DefId, tc: TypeContents)
- -> TypeContents {
- if Some(did) == tcx.lang_items.unsafe_cell_type() {
- tc | TC::InteriorUnsafe
- } else {
- tc
- }
- }
- }
-}
pub f64: Ty<'tcx>,
pub never: Ty<'tcx>,
pub err: Ty<'tcx>,
+
+ pub re_empty: &'tcx Region,
+ pub re_static: &'tcx Region,
+ pub re_erased: &'tcx Region,
}
#[derive(RustcEncodable, RustcDecodable)]
impl<'tcx> CommonTypes<'tcx> {
fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> {
let mk = |sty| interners.intern_ty(sty, None);
+ let mk_region = |r| {
+ if let Some(r) = interners.region.borrow().get(&r) {
+ return r.0;
+ }
+ let r = interners.arena.alloc(r);
+ interners.region.borrow_mut().insert(Interned(r));
+ &*r
+ };
CommonTypes {
bool: mk(TyBool),
char: mk(TyChar),
u128: mk(TyUint(ast::UintTy::U128)),
f32: mk(TyFloat(ast::FloatTy::F32)),
f64: mk(TyFloat(ast::FloatTy::F64)),
+
+ re_empty: mk_region(Region::ReEmpty),
+ re_static: mk_region(Region::ReStatic),
+ re_erased: mk_region(Region::ReErased),
}
}
}
pub specializes_cache: RefCell<traits::SpecializesCache>,
+ pub trans_trait_caches: traits::trans::TransTraitCaches<'tcx>,
+
pub dep_graph: DepGraph,
/// Common types, pre-interned for your convenience.
// Internal cache for metadata decoding. No need to track deps on this.
pub rcache: RefCell<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
- // Cache for the type-contents routine. FIXME -- track deps?
- pub tc_cache: RefCell<FxHashMap<Ty<'tcx>, ty::contents::TypeContents>>,
-
// FIXME dep tracking -- should be harmless enough
pub normalized_cache: RefCell<FxHashMap<Ty<'tcx>, Ty<'tcx>>>,
providers[LOCAL_CRATE] = local_providers;
tls::enter_global(GlobalCtxt {
sess: s,
+ trans_trait_caches: traits::trans::TransTraitCaches::new(dep_graph.clone()),
specializes_cache: RefCell::new(traits::SpecializesCache::new()),
global_arenas: arenas,
global_interners: interners,
freevars: RefCell::new(resolutions.freevars),
maybe_unused_trait_imports: resolutions.maybe_unused_trait_imports,
rcache: RefCell::new(FxHashMap()),
- tc_cache: RefCell::new(FxHashMap()),
normalized_cache: RefCell::new(FxHashMap()),
inhabitedness_cache: RefCell::new(FxHashMap()),
lang_items: lang_items,
}
pub fn mk_static_str(self) -> Ty<'tcx> {
- self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str())
+ self.mk_imm_ref(self.types.re_static, self.mk_str())
}
pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
let def_id = self.require_lang_item(lang_items::OwnedBoxLangItem);
- let adt_def = self.lookup_adt_def(def_id);
+ let adt_def = self.adt_def(def_id);
let substs = self.mk_substs(iter::once(Kind::from(ty)));
self.mk_ty(TyAdt(adt_def, substs))
}
pub fn erase_late_bound_regions<T>(self, value: &Binder<T>) -> T
where T : TypeFoldable<'tcx>
{
- self.replace_late_bound_regions(value, |_| self.mk_region(ty::ReErased)).0
+ self.replace_late_bound_regions(value, |_| self.types.re_erased).0
}
/// Rewrite any late-bound regions so that they are anonymous. Region numbers are
// whenever a substitution occurs.
match *r {
ty::ReLateBound(..) => r,
- _ => self.tcx().mk_region(ty::ReErased)
+ _ => self.tcx().types.re_erased
}
}
}
}
}
+pub fn shift_region_ref<'a, 'gcx, 'tcx>(
+ tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ region: &'tcx ty::Region,
+ amount: u32)
+ -> &'tcx ty::Region
+{
+ match region {
+ &ty::ReLateBound(debruijn, br) if amount > 0 => {
+ tcx.mk_region(ty::ReLateBound(debruijn.shifted(amount), br))
+ }
+ _ => {
+ region
+ }
+ }
+}
+
pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
amount: u32, value: &T) -> T
where T: TypeFoldable<'tcx>
value, amount);
value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| {
- tcx.mk_region(shift_region(*region, amount))
+ shift_region_ref(tcx, region, amount)
}))
}
use ty::{self, Ty, TypeFoldable, Substs};
use util::ppaux;
-use std::borrow::Cow;
use std::fmt;
-use syntax::ast;
-
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct Instance<'tcx> {
#[inline]
pub fn def_ty<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
- tcx.item_type(self.def_id())
+ tcx.type_of(self.def_id())
}
#[inline]
- pub fn attrs<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Cow<'tcx, [ast::Attribute]> {
+ pub fn attrs<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> ty::Attributes<'tcx> {
tcx.get_attrs(self.def_id())
}
// for local crates, check whether type info is
// available; typeck might not have completed yet
self.maps.impl_trait_ref.borrow().contains_key(&impl_def_id) &&
- self.maps.ty.borrow().contains_key(&impl_def_id)
+ self.maps.type_of.borrow().contains_key(&impl_def_id)
};
if !use_types {
// users may find it useful. Currently, we omit the parent if
// the impl is either in the same module as the self-type or
// as the trait.
- let self_ty = self.item_type(impl_def_id);
+ let self_ty = self.type_of(impl_def_id);
let in_self_mod = match characteristic_def_id_of_type(self_ty) {
None => false,
Some(ty_def_id) => self.parent_def_id(ty_def_id) == Some(parent_def_id),
/// A structure, a product type in ADT terms.
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct Struct {
+ /// Maximum alignment of fields and repr alignment.
pub align: Align,
+ /// Primitive alignment of fields without repr alignment.
+ pub primitive_align: Align,
+
/// If true, no alignment padding is used.
pub packed: bool,
fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
repr: &ReprOptions, kind: StructKind,
scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
- let packed = repr.packed();
+ if repr.packed() && repr.align > 0 {
+ bug!("Struct cannot be packed and aligned");
+ }
+
+ let align = if repr.packed() {
+ dl.i8_align
+ } else {
+ dl.aggregate_align
+ };
+
let mut ret = Struct {
- align: if packed { dl.i8_align } else { dl.aggregate_align },
- packed: packed,
+ align: align,
+ primitive_align: align,
+ packed: repr.packed(),
sized: true,
offsets: vec![],
memory_index: vec![],
// Invariant: offset < dl.obj_size_bound() <= 1<<61
if !ret.packed {
let align = field.align(dl);
+ let primitive_align = field.primitive_align(dl);
ret.align = ret.align.max(align);
+ ret.primitive_align = ret.primitive_align.max(primitive_align);
offset = offset.abi_align(align);
}
.map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
}
+ if repr.align > 0 {
+ let repr_align = repr.align as u64;
+ ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
+ debug!("Struct::new repr_align: {:?}", repr_align);
+ }
debug!("Struct::new min_size: {:?}", offset);
ret.min_size = offset;
}
(_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
- let normalized = normalize_associated_type(infcx, ty);
+ let normalized = infcx.normalize_projections(ty);
if ty == normalized {
return Ok(None);
}
}
Ok(None)
}
+
+ pub fn over_align(&self) -> Option<u32> {
+ let align = self.align.abi();
+ let primitive_align = self.primitive_align.abi();
+ if align > primitive_align {
+ Some(align as u32)
+ } else {
+ None
+ }
+ }
}
/// An untagged union.
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct Union {
pub align: Align,
+ pub primitive_align: Align,
pub min_size: Size,
impl<'a, 'gcx, 'tcx> Union {
fn new(dl: &TargetDataLayout, packed: bool) -> Union {
+ let align = if packed { dl.i8_align } else { dl.aggregate_align };
Union {
- align: if packed { dl.i8_align } else { dl.aggregate_align },
+ align: align,
+ primitive_align: align,
min_size: Size::from_bytes(0),
packed: packed,
}
if !self.packed {
self.align = self.align.max(field.align(dl));
+ self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
}
self.min_size = cmp::max(self.min_size, field.size(dl));
}
pub fn stride(&self) -> Size {
self.min_size.abi_align(self.align)
}
+
+ pub fn over_align(&self) -> Option<u32> {
+ let align = self.align.abi();
+ let primitive_align = self.primitive_align.abi();
+ if align > primitive_align {
+ Some(align as u32)
+ } else {
+ None
+ }
+ }
}
/// The first half of a fat pointer.
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
align: Align,
+ primitive_align: Align,
element_size: Size,
count: u64
},
discr: Integer,
variants: Vec<Struct>,
size: Size,
- align: Align
+ align: Align,
+ primitive_align: Align,
},
/// Two cases distinguished by a nullable pointer: the case with discriminant
}
}
-/// Helper function for normalizing associated types in an inference context.
-fn normalize_associated_type<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>,
- ty: Ty<'gcx>)
- -> Ty<'gcx> {
- if !ty.has_projection_types() {
- return ty;
- }
-
- let mut selcx = traits::SelectionContext::new(infcx);
- let cause = traits::ObligationCause::dummy();
- let traits::Normalized { value: result, obligations } =
- traits::normalize(&mut selcx, cause, &ty);
-
- let mut fulfill_cx = traits::FulfillmentContext::new();
-
- for obligation in obligations {
- fulfill_cx.register_predicate_obligation(infcx, obligation);
- }
-
- infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
-}
-
impl<'a, 'gcx, 'tcx> Layout {
pub fn compute_uncached(ty: Ty<'gcx>,
infcx: &InferCtxt<'a, 'gcx, 'tcx>)
let ptr_layout = |pointee: Ty<'gcx>| {
let non_zero = !ty.is_unsafe_ptr();
- let pointee = normalize_associated_type(infcx, pointee);
+ let pointee = infcx.normalize_projections(pointee);
if pointee.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) {
Ok(Scalar { value: Pointer, non_zero: non_zero })
} else {
Array {
sized: true,
align: element.align(dl),
+ primitive_align: element.primitive_align(dl),
element_size: element_size,
count: count
}
Array {
sized: false,
align: element.align(dl),
+ primitive_align: element.primitive_align(dl),
element_size: element.size(dl),
count: 0
}
Array {
sized: false,
align: dl.i8_align,
+ primitive_align: dl.i8_align,
element_size: Size::from_bytes(1),
count: 0
}
assert!(discr_max >= 0);
let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
let mut align = dl.aggregate_align;
+ let mut primitive_align = dl.aggregate_align;
let mut size = Size::from_bytes(0);
// We're interested in the smallest alignment, so start large.
}
size = cmp::max(size, st.min_size);
align = align.max(st.align);
+ primitive_align = primitive_align.max(st.primitive_align);
Ok(st)
}).collect::<Result<Vec<_>, _>>()?;
discr: ity,
variants: variants,
size: size,
- align: align
+ align: align,
+ primitive_align: primitive_align
}
}
// Types with no meaningful known layout.
ty::TyProjection(_) | ty::TyAnon(..) => {
- let normalized = normalize_associated_type(infcx, ty);
+ let normalized = infcx.normalize_projections(ty);
if ty == normalized {
return Err(LayoutError::Unknown(ty));
}
}
}
+ /// Returns alignment before repr alignment is applied
+ pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
+ match *self {
+ Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
+ Univariant { ref variant, .. } |
+ StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+ variant.primitive_align
+ },
+
+ _ => self.align(dl)
+ }
+ }
+
+ /// Returns repr alignment if it is greater than the primitive alignment.
+ pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
+ let align = self.align(dl);
+ let primitive_align = self.primitive_align(dl);
+ if align.abi() > primitive_align.abi() {
+ Some(align.abi() as u32)
+ } else {
+ None
+ }
+ }
+
pub fn field_offset<C: HasDataLayout>(&self,
cx: C,
i: usize,
}
ty::TyProjection(_) | ty::TyAnon(..) => {
- let normalized = normalize_associated_type(infcx, ty);
+ let normalized = infcx.normalize_projections(ty);
if ty == normalized {
Err(err)
} else {
type TyLayout;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
+ fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>;
}
impl<'a, 'gcx, 'tcx> LayoutTyper<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> {
type TyLayout = Result<TyLayout<'gcx>, LayoutError<'gcx>>;
fn layout_of(self, ty: Ty<'gcx>) -> Self::TyLayout {
- let ty = normalize_associated_type(self, ty);
+ let ty = self.normalize_projections(ty);
Ok(TyLayout {
ty: ty,
variant_index: None
})
}
+
+ fn normalize_projections(self, ty: Ty<'gcx>) -> Ty<'gcx> {
+ if !ty.has_projection_types() {
+ return ty;
+ }
+
+ let mut selcx = traits::SelectionContext::new(self);
+ let cause = traits::ObligationCause::dummy();
+ let traits::Normalized { value: result, obligations } =
+ traits::normalize(&mut selcx, cause, &ty);
+
+ let mut fulfill_cx = traits::FulfillmentContext::new();
+
+ for obligation in obligations {
+ fulfill_cx.register_predicate_obligation(self, obligation);
+ }
+
+ self.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result)
+ }
}
impl<'a, 'tcx> TyLayout<'tcx> {
}
pub fn field<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> C::TyLayout {
- cx.layout_of(self.field_type(cx, i))
+ cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
}
}
use dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig};
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use hir;
use middle::const_val;
use middle::privacy::AccessLevels;
use mir;
use session::CompileResult;
use ty::{self, CrateInherentImpls, Ty, TyCtxt};
+use ty::subst::Substs;
use util::nodemap::NodeSet;
use rustc_data_structures::indexed_vec::IndexVec;
use std::cell::{RefCell, RefMut};
+use std::ops::Deref;
use std::rc::Rc;
use syntax_pos::{Span, DUMMY_SP};
}
}
+impl<'tcx> Key for (DefId, &'tcx Substs<'tcx>) {
+ fn map_crate(&self) -> CrateNum {
+ self.0.krate
+ }
+ fn default_span(&self, tcx: TyCtxt) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
trait Value<'tcx>: Sized {
fn from_cycle_error<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self;
}
}
}
+
+impl<'tcx> Value<'tcx> for ty::DtorckConstraint<'tcx> {
+ fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
+ Self::empty()
+ }
+}
+
pub struct CycleError<'a, 'tcx: 'a> {
span: Span,
cycle: RefMut<'a, [(Span, Query<'tcx>)]>,
}
}
-impl<'tcx> QueryDescription for queries::super_predicates<'tcx> {
+impl<'tcx> QueryDescription for queries::super_predicates_of<'tcx> {
fn describe(tcx: TyCtxt, def_id: DefId) -> String {
format!("computing the supertraits of `{}`",
tcx.item_path_str(def_id))
}
}
+impl<'tcx> QueryDescription for queries::const_eval<'tcx> {
+ fn describe(tcx: TyCtxt, (def_id, _): (DefId, &'tcx Substs<'tcx>)) -> String {
+ format!("const-evaluating `{}`",
+ tcx.item_path_str(def_id))
+ }
+}
+
macro_rules! define_maps {
(<$tcx:tt>
$($(#[$attr:meta])*
Self::try_get_with(tcx, span, key, Clone::clone)
}
- $(#[$attr])*
- pub fn get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) -> $V {
- Self::try_get(tcx, span, key).unwrap_or_else(|e| {
- tcx.report_cycle(e);
- Value::from_cycle_error(tcx.global_tcx())
- })
- }
-
pub fn force(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) {
// FIXME(eddyb) Move away from using `DepTrackingMap`
// so we don't have to explicitly ignore a false edge:
}
})*
+ #[derive(Copy, Clone)]
+ pub struct TyCtxtAt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+ pub tcx: TyCtxt<'a, 'gcx, 'tcx>,
+ pub span: Span,
+ }
+
+ impl<'a, 'gcx, 'tcx> Deref for TyCtxtAt<'a, 'gcx, 'tcx> {
+ type Target = TyCtxt<'a, 'gcx, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.tcx
+ }
+ }
+
+ impl<'a, $tcx, 'lcx> TyCtxt<'a, $tcx, 'lcx> {
+ /// Return a transparent wrapper for `TyCtxt` which uses
+ /// `span` as the location of queries performed through it.
+ pub fn at(self, span: Span) -> TyCtxtAt<'a, $tcx, 'lcx> {
+ TyCtxtAt {
+ tcx: self,
+ span
+ }
+ }
+
+ $($(#[$attr])*
+ pub fn $name(self, key: $K) -> $V {
+ self.at(DUMMY_SP).$name(key)
+ })*
+ }
+
+ impl<'a, $tcx, 'lcx> TyCtxtAt<'a, $tcx, 'lcx> {
+ $($(#[$attr])*
+ pub fn $name(self, key: $K) -> $V {
+ queries::$name::try_get(self.tcx, self.span, key).unwrap_or_else(|e| {
+ self.report_cycle(e);
+ Value::from_cycle_error(self.global_tcx())
+ })
+ })*
+ }
+
pub struct Providers<$tcx> {
$(pub $name: for<'a> fn(TyCtxt<'a, $tcx, $tcx>, $K) -> $V),*
}
// the driver creates (using several `rustc_*` crates).
define_maps! { <'tcx>
/// Records the type of every item.
- pub ty: ItemSignature(DefId) -> Ty<'tcx>,
+ pub type_of: ItemSignature(DefId) -> Ty<'tcx>,
/// Maps from the def-id of an item (trait/struct/enum/fn) to its
/// associated generics and predicates.
- pub generics: ItemSignature(DefId) -> &'tcx ty::Generics,
- pub predicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx>,
+ pub generics_of: ItemSignature(DefId) -> &'tcx ty::Generics,
+ pub predicates_of: ItemSignature(DefId) -> ty::GenericPredicates<'tcx>,
/// Maps from the def-id of a trait to the list of
/// super-predicates. This is a subset of the full list of
/// evaluate them even during type conversion, often before the
/// full predicates are available (note that supertraits have
/// additional acyclicity requirements).
- pub super_predicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx>,
+ pub super_predicates_of: ItemSignature(DefId) -> ty::GenericPredicates<'tcx>,
/// To avoid cycles within the predicates of a single item we compute
/// per-type-parameter predicates for resolving `T::AssocTy`.
pub trait_def: ItemSignature(DefId) -> &'tcx ty::TraitDef,
pub adt_def: ItemSignature(DefId) -> &'tcx ty::AdtDef,
pub adt_destructor: AdtDestructor(DefId) -> Option<ty::Destructor>,
- pub adt_sized_constraint: SizedConstraint(DefId) -> Ty<'tcx>,
+ pub adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>],
+ pub adt_dtorck_constraint: DtorckConstraint(DefId) -> ty::DtorckConstraint<'tcx>,
+
+ /// True if this is a foreign item (i.e., linked via `extern { ... }`).
+ pub is_foreign_item: IsForeignItem(DefId) -> bool,
/// Maps from def-id of a type or region parameter to its
/// (inferred) variance.
- pub variances: ItemSignature(DefId) -> Rc<Vec<ty::Variance>>,
+ pub variances_of: ItemSignature(DefId) -> Rc<Vec<ty::Variance>>,
/// Maps from an impl/trait def-id to a list of the def-ids of its items
pub associated_item_def_ids: AssociatedItemDefIds(DefId) -> Rc<Vec<DefId>>,
pub associated_item: AssociatedItems(DefId) -> ty::AssociatedItem,
pub impl_trait_ref: ItemSignature(DefId) -> Option<ty::TraitRef<'tcx>>,
+ pub impl_polarity: ItemSignature(DefId) -> hir::ImplPolarity,
/// Maps a DefId of a type to a list of its inherent impls.
/// Contains implementations of methods that are inherent to a type.
pub typeck_item_bodies: typeck_item_bodies_dep_node(CrateNum) -> CompileResult,
- pub typeck_tables: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>,
+ pub typeck_tables_of: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>,
pub coherent_trait: coherent_trait_dep_node((CrateNum, DefId)) -> (),
/// (Defined only for LOCAL_CRATE)
pub crate_inherent_impls_overlap_check: crate_inherent_impls_dep_node(CrateNum) -> (),
- /// Results of evaluating monomorphic constants embedded in
- /// other items, such as enum variant explicit discriminants.
- pub monomorphic_const_eval: MonomorphicConstEval(DefId) -> const_val::EvalResult<'tcx>,
+ /// Results of evaluating const items or constants embedded in
+ /// other items (such as enum variant explicit discriminants).
+ pub const_eval: const_eval_dep_node((DefId, &'tcx Substs<'tcx>))
+ -> const_val::EvalResult<'tcx>,
/// Performs the privacy check and computes "access levels".
pub privacy_access_levels: PrivacyAccessLevels(CrateNum) -> Rc<AccessLevels>,
- pub reachable_set: reachability_dep_node(CrateNum) -> NodeSet,
+ pub reachable_set: reachability_dep_node(CrateNum) -> Rc<NodeSet>,
- pub mir_shims: mir_shim(ty::InstanceDef<'tcx>) -> &'tcx RefCell<mir::Mir<'tcx>>
+ pub mir_shims: mir_shim_dep_node(ty::InstanceDef<'tcx>) -> &'tcx RefCell<mir::Mir<'tcx>>
}
fn coherent_trait_dep_node((_, def_id): (CrateNum, DefId)) -> DepNode<DefId> {
DepNode::Reachability
}
-fn mir_shim(instance: ty::InstanceDef) -> DepNode<DefId> {
+fn mir_shim_dep_node(instance: ty::InstanceDef) -> DepNode<DefId> {
instance.dep_node()
}
fn typeck_item_bodies_dep_node(_: CrateNum) -> DepNode<DefId> {
DepNode::TypeckBodiesKrate
}
+
+fn const_eval_dep_node((def_id, _): (DefId, &Substs)) -> DepNode<DefId> {
+ DepNode::ConstEval(def_id)
+}
use ty::subst::{Subst, Substs};
use ty::util::IntTypeExt;
use ty::walk::TypeWalker;
-use util::nodemap::{NodeSet, DefIdMap, FxHashMap};
+use util::common::ErrorReported;
+use util::nodemap::{NodeSet, DefIdMap, FxHashMap, FxHashSet};
use serialize::{self, Encodable, Encoder};
-use std::borrow::Cow;
use std::cell::{Cell, RefCell, Ref};
use std::collections::BTreeMap;
+use std::cmp;
use std::hash::{Hash, Hasher};
+use std::iter::FromIterator;
use std::ops::Deref;
use std::rc::Rc;
use std::slice;
pub use self::sty::Region::*;
pub use self::sty::TypeVariants::*;
-pub use self::contents::TypeContents;
pub use self::context::{TyCtxt, GlobalArenas, tls};
pub use self::context::{Lift, TypeckTables};
pub mod wf;
pub mod util;
-mod contents;
mod context;
mod flags;
mod instance;
#[derive(Clone)]
pub struct CrateAnalysis {
pub access_levels: Rc<AccessLevels>,
- pub reachable: NodeSet,
+ pub reachable: Rc<NodeSet>,
pub name: String,
pub glob_map: Option<hir::GlobMap>,
}
let header = ImplHeader {
impl_def_id: impl_def_id,
- self_ty: tcx.item_type(impl_def_id),
+ self_ty: tcx.type_of(impl_def_id),
trait_ref: tcx.impl_trait_ref(impl_def_id),
- predicates: tcx.item_predicates(impl_def_id).predicates
+ predicates: tcx.predicates_of(impl_def_id).predicates
}.subst(tcx, impl_substs);
let traits::Normalized { value: mut header, obligations } =
const IS_SIZED = 1 << 17,
const MOVENESS_CACHED = 1 << 18,
const MOVES_BY_DEFAULT = 1 << 19,
+ const FREEZENESS_CACHED = 1 << 20,
+ const IS_FREEZE = 1 << 21,
+ const NEEDS_DROP_CACHED = 1 << 22,
+ const NEEDS_DROP = 1 << 23,
}
}
instantiated: &mut InstantiatedPredicates<'tcx>,
substs: &Substs<'tcx>) {
if let Some(def_id) = self.parent {
- tcx.item_predicates(def_id).instantiate_into(tcx, instantiated, substs);
+ tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs);
}
instantiated.predicates.extend(self.predicates.iter().map(|p| p.subst(tcx, substs)))
}
/// A cache for `type_is_sized`
pub is_sized_cache: RefCell<FxHashMap<Ty<'tcx>, bool>>,
+
+ /// A cache for `type_is_freeze`
+ pub is_freeze_cache: RefCell<FxHashMap<Ty<'tcx>, bool>>,
}
impl<'a, 'tcx> ParameterEnvironment<'tcx> {
free_id_outlive: self.free_id_outlive,
is_copy_cache: RefCell::new(FxHashMap()),
is_sized_cache: RefCell::new(FxHashMap()),
+ is_freeze_cache: RefCell::new(FxHashMap()),
}
}
pub struct Destructor {
/// The def-id of the destructor method
pub did: DefId,
- /// Invoking the destructor of a dtorck type during usual cleanup
- /// (e.g. the glue emitted for stack unwinding) requires all
- /// lifetimes in the type-structure of `adt` to strictly outlive
- /// the adt value itself.
- ///
- /// If `adt` is not dtorck, then the adt's destructor can be
- /// invoked even when there are lifetimes in the type-structure of
- /// `adt` that do not strictly outlive the adt value itself.
- /// (This allows programs to make cyclic structures without
- /// resorting to unsafe means; see RFCs 769 and 1238).
- pub is_dtorck: bool,
}
bitflags! {
#[derive(Copy, Clone, Eq, PartialEq, RustcEncodable, RustcDecodable, Default)]
pub struct ReprOptions {
pub int: Option<attr::IntType>,
+ pub align: u16,
pub flags: ReprFlags,
}
impl_stable_hash_for!(struct ReprOptions {
+ align,
int,
flags
});
pub fn new(tcx: TyCtxt, did: DefId) -> ReprOptions {
let mut flags = ReprFlags::empty();
let mut size = None;
-
+ let mut max_align = 0;
for attr in tcx.get_attrs(did).iter() {
for r in attr::find_repr_attrs(tcx.sess.diagnostic(), attr) {
flags.insert(match r {
size = Some(i);
ReprFlags::empty()
},
+ attr::ReprAlign(align) => {
+ max_align = cmp::max(align, max_align);
+ ReprFlags::empty()
+ },
});
}
}
if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.item_path_str(did))) {
flags.insert(ReprFlags::IS_LINEAR);
}
- ReprOptions { int: size, flags: flags }
+ ReprOptions { int: size, align: max_align, flags: flags }
}
#[inline]
}
}
- /// Returns whether this is a dtorck type. If this returns
- /// true, this type being safe for destruction requires it to be
- /// alive; Otherwise, only the contents are required to be.
- #[inline]
- pub fn is_dtorck(&'gcx self, tcx: TyCtxt) -> bool {
- self.destructor(tcx).map_or(false, |d| d.is_dtorck)
- }
-
/// Returns whether this type is #[fundamental] for the purposes
/// of coherence checking.
#[inline]
#[inline]
pub fn predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> GenericPredicates<'gcx> {
- tcx.item_predicates(self.did)
+ tcx.predicates_of(self.did)
}
/// Returns an iterator over all fields contained
}
}
+ #[inline]
pub fn discriminants(&'a self, tcx: TyCtxt<'a, 'gcx, 'tcx>)
-> impl Iterator<Item=ConstInt> + 'a {
let repr_type = self.repr.discr_type();
self.variants.iter().map(move |v| {
let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr());
if let VariantDiscr::Explicit(expr_did) = v.discr {
- match queries::monomorphic_const_eval::get(tcx, DUMMY_SP, expr_did) {
+ let substs = Substs::empty();
+ match tcx.const_eval((expr_did, substs)) {
Ok(ConstVal::Integral(v)) => {
discr = v;
}
- _ => {}
+ err => {
+ if !expr_did.is_local() {
+ span_bug!(tcx.def_span(expr_did),
+ "variant discriminant evaluation succeeded \
+ in its crate but failed locally: {:?}", err);
+ }
+ }
}
}
prev_discr = Some(discr);
explicit_index -= distance;
}
ty::VariantDiscr::Explicit(expr_did) => {
- match queries::monomorphic_const_eval::get(tcx, DUMMY_SP, expr_did) {
+ let substs = Substs::empty();
+ match tcx.const_eval((expr_did, substs)) {
Ok(ConstVal::Integral(v)) => {
explicit_value = v;
break;
}
- _ => {
+ err => {
+ if !expr_did.is_local() {
+ span_bug!(tcx.def_span(expr_did),
+ "variant discriminant evaluation succeeded \
+ in its crate but failed locally: {:?}", err);
+ }
+ if explicit_index == 0 {
+ break;
+ }
explicit_index -= 1;
}
}
}
pub fn destructor(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option<Destructor> {
- queries::adt_destructor::get(tcx, DUMMY_SP, self.did)
+ tcx.adt_destructor(self.did)
}
- /// Returns a simpler type such that `Self: Sized` if and only
+ /// Returns a list of types such that `Self: Sized` if and only
/// if that type is Sized, or `TyErr` if this type is recursive.
///
- /// HACK: instead of returning a list of types, this function can
- /// return a tuple. In that case, the result is Sized only if
- /// all elements of the tuple are Sized.
- ///
- /// This is generally the `struct_tail` if this is a struct, or a
- /// tuple of them if this is an enum.
- ///
/// Oddly enough, checking that the sized-constraint is Sized is
/// actually more expressive than checking all members:
/// the Sized trait is inductive, so an associated type that references
///
/// Due to normalization being eager, this applies even if
/// the associated type is behind a pointer, e.g. issue #31299.
- pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> {
+ pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> &'tcx [Ty<'tcx>] {
match queries::adt_sized_constraint::try_get(tcx, DUMMY_SP, self.did) {
- Ok(ty) => ty,
+ Ok(tys) => tys,
Err(_) => {
debug!("adt_sized_constraint: {:?} is recursive", self);
// This should be reported as an error by `check_representable`.
//
// Consider the type as Sized in the meanwhile to avoid
// further errors.
- tcx.types.err
+ tcx.intern_type_list(&[tcx.types.err])
}
}
}
TyAdt(adt, substs) => {
// recursive case
- let adt_ty =
- adt.sized_constraint(tcx)
- .subst(tcx, substs);
+ let adt_tys = adt.sized_constraint(tcx);
debug!("sized_constraint_for_ty({:?}) intermediate = {:?}",
- ty, adt_ty);
- if let ty::TyTuple(ref tys, _) = adt_ty.sty {
- tys.iter().flat_map(|ty| {
- self.sized_constraint_for_ty(tcx, ty)
- }).collect()
- } else {
- self.sized_constraint_for_ty(tcx, adt_ty)
- }
+ ty, adt_tys);
+ adt_tys.iter()
+ .map(|ty| ty.subst(tcx, substs))
+ .flat_map(|ty| self.sized_constraint_for_ty(tcx, ty))
+ .collect()
}
TyProjection(..) | TyAnon(..) => {
def_id: sized_trait,
substs: tcx.mk_substs_trait(ty, &[])
}).to_predicate();
- let predicates = tcx.item_predicates(self.did).predicates;
+ let predicates = tcx.predicates_of(self.did).predicates;
if predicates.into_iter().any(|p| p == sized_predicate) {
vec![]
} else {
impl<'a, 'gcx, 'tcx> FieldDef {
pub fn ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> {
- tcx.item_type(self.did).subst(tcx, subst)
+ tcx.type_of(self.did).subst(tcx, subst)
}
}
}
}
-impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
- pub fn body_tables(self, body: hir::BodyId) -> &'gcx TypeckTables<'gcx> {
- self.item_tables(self.hir.body_owner_def_id(body))
+#[derive(Debug, Clone)]
+pub enum Attributes<'gcx> {
+ Owned(Rc<[ast::Attribute]>),
+ Borrowed(&'gcx [ast::Attribute])
+}
+
+impl<'gcx> ::std::ops::Deref for Attributes<'gcx> {
+ type Target = [ast::Attribute];
+
+ fn deref(&self) -> &[ast::Attribute] {
+ match self {
+ &Attributes::Owned(ref data) => &data,
+ &Attributes::Borrowed(data) => data
+ }
}
+}
- pub fn item_tables(self, def_id: DefId) -> &'gcx TypeckTables<'gcx> {
- queries::typeck_tables::get(self, DUMMY_SP, def_id)
+impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
+ pub fn body_tables(self, body: hir::BodyId) -> &'gcx TypeckTables<'gcx> {
+ self.typeck_tables_of(self.hir.body_owner_def_id(body))
}
pub fn expr_span(self, id: NodeId) -> Span {
.collect()
}
- pub fn trait_impl_polarity(self, id: DefId) -> hir::ImplPolarity {
- if let Some(id) = self.hir.as_local_node_id(id) {
- match self.hir.expect_item(id).node {
- hir::ItemImpl(_, polarity, ..) => polarity,
- ref item => bug!("trait_impl_polarity: {:?} not an impl", item)
- }
- } else {
- self.sess.cstore.impl_polarity(id)
- }
- }
-
pub fn trait_relevant_for_never(self, did: DefId) -> bool {
self.associated_items(did).any(|item| {
item.relevant_for_never()
})
}
- pub fn coerce_unsized_info(self, did: DefId) -> adjustment::CoerceUnsizedInfo {
- queries::coerce_unsized_info::get(self, DUMMY_SP, did)
- }
-
- pub fn associated_item(self, def_id: DefId) -> AssociatedItem {
- queries::associated_item::get(self, DUMMY_SP, def_id)
- }
-
fn associated_item_from_trait_item_ref(self,
parent_def_id: DefId,
+ parent_vis: &hir::Visibility,
trait_item_ref: &hir::TraitItemRef)
-> AssociatedItem {
let def_id = self.hir.local_def_id(trait_item_ref.id.node_id);
AssociatedItem {
name: trait_item_ref.name,
kind: kind,
- vis: Visibility::from_hir(&hir::Inherited, trait_item_ref.id.node_id, self),
+ // Visibility of trait items is inherited from their traits.
+ vis: Visibility::from_hir(parent_vis, trait_item_ref.id.node_id, self),
defaultness: trait_item_ref.defaultness,
def_id: def_id,
container: TraitContainer(parent_def_id),
fn associated_item_from_impl_item_ref(self,
parent_def_id: DefId,
- from_trait_impl: bool,
impl_item_ref: &hir::ImplItemRef)
-> AssociatedItem {
let def_id = self.hir.local_def_id(impl_item_ref.id.node_id);
hir::AssociatedItemKind::Type => (ty::AssociatedKind::Type, false),
};
- // Trait impl items are always public.
- let public = hir::Public;
- let vis = if from_trait_impl { &public } else { &impl_item_ref.vis };
-
ty::AssociatedItem {
name: impl_item_ref.name,
kind: kind,
- vis: ty::Visibility::from_hir(vis, impl_item_ref.id.node_id, self),
+ // Visibility of trait impl items doesn't matter.
+ vis: ty::Visibility::from_hir(&impl_item_ref.vis, impl_item_ref.id.node_id, self),
defaultness: impl_item_ref.defaultness,
def_id: def_id,
container: ImplContainer(parent_def_id),
}
}
- pub fn associated_item_def_ids(self, def_id: DefId) -> Rc<Vec<DefId>> {
- queries::associated_item_def_ids::get(self, DUMMY_SP, def_id)
- }
-
#[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait.
pub fn associated_items(self, def_id: DefId)
-> impl Iterator<Item = ty::AssociatedItem> + 'a {
(0..def_ids.len()).map(move |i| self.associated_item(def_ids[i]))
}
- /// Returns the trait-ref corresponding to a given impl, or None if it is
- /// an inherent impl.
- pub fn impl_trait_ref(self, id: DefId) -> Option<TraitRef<'gcx>> {
- queries::impl_trait_ref::get(self, DUMMY_SP, id)
- }
-
/// Returns true if the impls are the same polarity and are implementing
/// a trait which contains no items
pub fn impls_are_allowed_to_overlap(self, def_id1: DefId, def_id2: DefId) -> bool {
.map_or(false, |trait_ref| {
self.associated_item_def_ids(trait_ref.def_id).is_empty()
});
- self.trait_impl_polarity(def_id1) == self.trait_impl_polarity(def_id2)
+ self.impl_polarity(def_id1) == self.impl_polarity(def_id2)
&& trait1_is_empty
&& trait2_is_empty
}
match def {
Def::Variant(did) | Def::VariantCtor(did, ..) => {
let enum_did = self.parent_def_id(did).unwrap();
- self.lookup_adt_def(enum_did).variant_with_id(did)
+ self.adt_def(enum_did).variant_with_id(did)
}
Def::Struct(did) | Def::Union(did) => {
- self.lookup_adt_def(did).struct_variant()
+ self.adt_def(did).struct_variant()
}
Def::StructCtor(ctor_did, ..) => {
let did = self.parent_def_id(ctor_did).expect("struct ctor has no parent");
- self.lookup_adt_def(did).struct_variant()
+ self.adt_def(did).struct_variant()
}
_ => bug!("expect_variant_def used with unexpected def {:?}", def)
}
}
}
- // If the given item is in an external crate, looks up its type and adds it to
- // the type cache. Returns the type parameters and type.
- pub fn item_type(self, did: DefId) -> Ty<'gcx> {
- queries::ty::get(self, DUMMY_SP, did)
- }
-
- /// Given the did of a trait, returns its canonical trait ref.
- pub fn lookup_trait_def(self, did: DefId) -> &'gcx TraitDef {
- queries::trait_def::get(self, DUMMY_SP, did)
- }
-
- /// Given the did of an ADT, return a reference to its definition.
- pub fn lookup_adt_def(self, did: DefId) -> &'gcx AdtDef {
- queries::adt_def::get(self, DUMMY_SP, did)
- }
-
- /// Given the did of an item, returns its generics.
- pub fn item_generics(self, did: DefId) -> &'gcx Generics {
- queries::generics::get(self, DUMMY_SP, did)
- }
-
- /// Given the did of an item, returns its full set of predicates.
- pub fn item_predicates(self, did: DefId) -> GenericPredicates<'gcx> {
- queries::predicates::get(self, DUMMY_SP, did)
- }
-
- /// Given the did of a trait, returns its superpredicates.
- pub fn item_super_predicates(self, did: DefId) -> GenericPredicates<'gcx> {
- queries::super_predicates::get(self, DUMMY_SP, did)
- }
-
/// Given the did of an item, returns its MIR, borrowed immutably.
pub fn item_mir(self, did: DefId) -> Ref<'gcx, Mir<'gcx>> {
- queries::mir::get(self, DUMMY_SP, did).borrow()
+ self.mir(did).borrow()
}
/// Return the possibly-auto-generated MIR of a (DefId, Subst) pair.
{
match instance {
ty::InstanceDef::Item(did) if true => self.item_mir(did),
- _ => queries::mir_shims::get(self, DUMMY_SP, instance).borrow(),
+ _ => self.mir_shims(instance).borrow(),
}
}
Some(self.item_mir(did))
}
- /// If `type_needs_drop` returns true, then `ty` is definitely
- /// non-copy and *might* have a destructor attached; if it returns
- /// false, then `ty` definitely has no destructor (i.e. no drop glue).
- ///
- /// (Note that this implies that if `ty` has a destructor attached,
- /// then `type_needs_drop` will definitely return `true` for `ty`.)
- pub fn type_needs_drop_given_env(self,
- ty: Ty<'gcx>,
- param_env: &ty::ParameterEnvironment<'gcx>) -> bool {
- // Issue #22536: We first query type_moves_by_default. It sees a
- // normalized version of the type, and therefore will definitely
- // know whether the type implements Copy (and thus needs no
- // cleanup/drop/zeroing) ...
- let tcx = self.global_tcx();
- let implements_copy = !ty.moves_by_default(tcx, param_env, DUMMY_SP);
-
- if implements_copy { return false; }
-
- // ... (issue #22536 continued) but as an optimization, still use
- // prior logic of asking if the `needs_drop` bit is set; we need
- // not zero non-Copy types if they have no destructor.
-
- // FIXME(#22815): Note that calling `ty::type_contents` is a
- // conservative heuristic; it may report that `needs_drop` is set
- // when actual type does not actually have a destructor associated
- // with it. But since `ty` absolutely did not have the `Copy`
- // bound attached (see above), it is sound to treat it as having a
- // destructor (e.g. zero its memory on move).
-
- let contents = ty.type_contents(tcx);
- debug!("type_needs_drop ty={:?} contents={:?}", ty, contents);
- contents.needs_drop(tcx)
- }
-
/// Get the attributes of a definition.
- pub fn get_attrs(self, did: DefId) -> Cow<'gcx, [ast::Attribute]> {
+ pub fn get_attrs(self, did: DefId) -> Attributes<'gcx> {
if let Some(id) = self.hir.as_local_node_id(did) {
- Cow::Borrowed(self.hir.attrs(id))
+ Attributes::Borrowed(self.hir.attrs(id))
} else {
- Cow::Owned(self.sess.cstore.item_attrs(did))
+ Attributes::Owned(self.sess.cstore.item_attrs(did))
}
}
self.get_attrs(did).iter().any(|item| item.check_name(attr))
}
- pub fn item_variances(self, item_id: DefId) -> Rc<Vec<ty::Variance>> {
- queries::variances::get(self, DUMMY_SP, item_id)
- }
-
pub fn trait_has_default_impl(self, trait_def_id: DefId) -> bool {
- let def = self.lookup_trait_def(trait_def_id);
+ let def = self.trait_def(trait_def_id);
def.flags.get().intersects(TraitFlags::HAS_DEFAULT_IMPL)
}
// metadata and don't need to track edges.
let _ignore = self.dep_graph.in_ignore();
- let def = self.lookup_trait_def(trait_id);
+ let def = self.trait_def(trait_id);
if def.flags.get().intersects(TraitFlags::HAS_REMOTE_IMPLS) {
return;
}
def.flags.set(def.flags.get() | TraitFlags::HAS_REMOTE_IMPLS);
}
- pub fn closure_kind(self, def_id: DefId) -> ty::ClosureKind {
- queries::closure_kind::get(self, DUMMY_SP, def_id)
- }
-
- pub fn closure_type(self, def_id: DefId) -> ty::PolyFnSig<'tcx> {
- queries::closure_type::get(self, DUMMY_SP, def_id)
- }
-
/// Given the def_id of an impl, return the def_id of the trait it implements.
/// If it implements no trait, return `None`.
pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {
/// Construct a parameter environment suitable for static contexts or other contexts where there
/// are no free type/lifetime parameters in scope.
pub fn empty_parameter_environment(self) -> ParameterEnvironment<'tcx> {
-
- // for an empty parameter environment, there ARE no free
- // regions, so it shouldn't matter what we use for the free id
- let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID);
ty::ParameterEnvironment {
free_substs: self.intern_substs(&[]),
caller_bounds: Vec::new(),
- implicit_region_bound: self.mk_region(ty::ReEmpty),
- free_id_outlive: free_id_outlive,
+ implicit_region_bound: self.types.re_empty,
+ // for an empty parameter environment, there ARE no free
+ // regions, so it shouldn't matter what we use for the free id
+ free_id_outlive: ROOT_CODE_EXTENT,
is_copy_cache: RefCell::new(FxHashMap()),
is_sized_cache: RefCell::new(FxHashMap()),
+ is_freeze_cache: RefCell::new(FxHashMap()),
}
}
//
let tcx = self.global_tcx();
- let generic_predicates = tcx.item_predicates(def_id);
+ let generic_predicates = tcx.predicates_of(def_id);
let bounds = generic_predicates.instantiate(tcx, free_substs);
let bounds = tcx.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds));
let predicates = bounds.predicates;
free_id_outlive: free_id_outlive,
is_copy_cache: RefCell::new(FxHashMap()),
is_sized_cache: RefCell::new(FxHashMap()),
+ is_freeze_cache: RefCell::new(FxHashMap()),
};
let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps));
let parent_def_id = tcx.hir.local_def_id(parent_id);
let parent_item = tcx.hir.expect_item(parent_id);
match parent_item.node {
- hir::ItemImpl(.., ref impl_trait_ref, _, ref impl_item_refs) => {
+ hir::ItemImpl(.., ref impl_item_refs) => {
if let Some(impl_item_ref) = impl_item_refs.iter().find(|i| i.id.node_id == id) {
- let assoc_item =
- tcx.associated_item_from_impl_item_ref(parent_def_id,
- impl_trait_ref.is_some(),
- impl_item_ref);
+ let assoc_item = tcx.associated_item_from_impl_item_ref(parent_def_id,
+ impl_item_ref);
debug_assert_eq!(assoc_item.def_id, def_id);
return assoc_item;
}
hir::ItemTrait(.., ref trait_item_refs) => {
if let Some(trait_item_ref) = trait_item_refs.iter().find(|i| i.id.node_id == id) {
- let assoc_item =
- tcx.associated_item_from_trait_item_ref(parent_def_id, trait_item_ref);
+ let assoc_item = tcx.associated_item_from_trait_item_ref(parent_def_id,
+ &parent_item.vis,
+ trait_item_ref);
debug_assert_eq!(assoc_item.def_id, def_id);
return assoc_item;
}
/// Calculates the Sized-constraint.
///
-/// As the Sized-constraint of enums can be a *set* of types,
-/// the Sized-constraint may need to be a set also. Because introducing
-/// a new type of IVar is currently a complex affair, the Sized-constraint
-/// may be a tuple.
-///
-/// In fact, there are only a few options for the constraint:
-/// - `bool`, if the type is always Sized
+/// In fact, there are only a few options for the types in the constraint:
/// - an obviously-unsized type
/// - a type parameter or projection whose Sizedness can't be known
/// - a tuple of type parameters or projections, if there are multiple
/// check should catch this case.
fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
- -> Ty<'tcx> {
- let def = tcx.lookup_adt_def(def_id);
+ -> &'tcx [Ty<'tcx>] {
+ let def = tcx.adt_def(def_id);
- let tys: Vec<_> = def.variants.iter().flat_map(|v| {
+ let result = tcx.intern_type_list(&def.variants.iter().flat_map(|v| {
v.fields.last()
}).flat_map(|f| {
- let ty = tcx.item_type(f.did);
- def.sized_constraint_for_ty(tcx, ty)
- }).collect();
-
- let ty = match tys.len() {
- _ if tys.references_error() => tcx.types.err,
- 0 => tcx.types.bool,
- 1 => tys[0],
- _ => tcx.intern_tup(&tys[..], false)
- };
+ def.sized_constraint_for_ty(tcx, tcx.type_of(f.did))
+ }).collect::<Vec<_>>());
+
+ debug!("adt_sized_constraint: {:?} => {:?}", def, result);
+
+ result
+}
+
+/// Calculates the dtorck constraint for a type.
+fn adt_dtorck_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> DtorckConstraint<'tcx> {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ debug!("dtorck_constraint: {:?}", def);
+
+ if def.is_phantom_data() {
+ let result = DtorckConstraint {
+ outlives: vec![],
+ dtorck_types: vec![
+ tcx.mk_param_from_def(&tcx.generics_of(def_id).types[0])
+ ]
+ };
+ debug!("dtorck_constraint: {:?} => {:?}", def, result);
+ return result;
+ }
+
+ let mut result = def.all_fields()
+ .map(|field| tcx.type_of(field.did))
+ .map(|fty| tcx.dtorck_constraint_for_ty(span, fty, 0, fty))
+ .collect::<Result<DtorckConstraint, ErrorReported>>()
+ .unwrap_or(DtorckConstraint::empty());
+ result.outlives.extend(tcx.destructor_constraints(def));
+ result.dedup();
- debug!("adt_sized_constraint: {:?} => {:?}", def, ty);
+ debug!("dtorck_constraint: {:?} => {:?}", def, result);
- ty
+ result
}
fn associated_item_def_ids<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
associated_item,
associated_item_def_ids,
adt_sized_constraint,
+ adt_dtorck_constraint,
..*providers
};
}
pub fn provide_extern(providers: &mut ty::maps::Providers) {
*providers = ty::maps::Providers {
adt_sized_constraint,
+ adt_dtorck_constraint,
..*providers
};
}
/// A map for the local crate mapping each type to a vector of its
/// inherent impls. This is not meant to be used outside of coherence;
/// rather, you should request the vector for a specific type via
-/// `ty::queries::inherent_impls::get(def_id)` so as to minimize your
-/// dependencies (constructing this map requires touching the entire
-/// crate).
+/// `tcx.inherent_impls(def_id)` so as to minimize your dependencies
+/// (constructing this map requires touching the entire crate).
#[derive(Clone, Debug)]
pub struct CrateInherentImpls {
pub inherent_impls: DefIdMap<Rc<Vec<DefId>>>,
}
+/// A set of constraints that need to be satisfied in order for
+/// a type to be valid for destruction.
+#[derive(Clone, Debug)]
+pub struct DtorckConstraint<'tcx> {
+ /// Types that are required to be alive in order for this
+ /// type to be valid for destruction.
+ pub outlives: Vec<ty::subst::Kind<'tcx>>,
+ /// Types that could not be resolved: projections and params.
+ pub dtorck_types: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> FromIterator<DtorckConstraint<'tcx>> for DtorckConstraint<'tcx>
+{
+ fn from_iter<I: IntoIterator<Item=DtorckConstraint<'tcx>>>(iter: I) -> Self {
+ let mut result = Self::empty();
+
+ for constraint in iter {
+ result.outlives.extend(constraint.outlives);
+ result.dtorck_types.extend(constraint.dtorck_types);
+ }
+
+ result
+ }
+}
+
+
+impl<'tcx> DtorckConstraint<'tcx> {
+ fn empty() -> DtorckConstraint<'tcx> {
+ DtorckConstraint {
+ outlives: vec![],
+ dtorck_types: vec![]
+ }
+ }
+
+ fn dedup<'a>(&mut self) {
+ let mut outlives = FxHashSet();
+ let mut dtorck_types = FxHashSet();
+
+ self.outlives.retain(|&val| outlives.replace(val).is_none());
+ self.dtorck_types.retain(|&val| dtorck_types.replace(val).is_none());
+ }
+}
let variances;
let opt_variances = if relation.tcx().variance_computed.get() {
- variances = relation.tcx().item_variances(item_def_id);
+ variances = relation.tcx().variances_of(item_def_id);
Some(&*variances)
} else {
None
pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'acx>) ->
impl Iterator<Item=Ty<'tcx>> + 'tcx
{
- let generics = tcx.item_generics(def_id);
+ let generics = tcx.generics_of(def_id);
self.substs[self.substs.len()-generics.own_count()..].iter().map(
|t| t.as_type().expect("unexpected region in upvars"))
}
(Trait(_), Trait(_)) => Ordering::Equal,
(Projection(ref a), Projection(ref b)) => a.sort_key(tcx).cmp(&b.sort_key(tcx)),
(AutoTrait(ref a), AutoTrait(ref b)) =>
- tcx.lookup_trait_def(*a).def_path_hash.cmp(&tcx.lookup_trait_def(*b).def_path_hash),
+ tcx.trait_def(*a).def_path_hash.cmp(&tcx.trait_def(*b).def_path_hash),
(Trait(_), _) => Ordering::Less,
(Projection(_), Trait(_)) => Ordering::Greater,
(Projection(_), _) => Ordering::Less,
// We want something here that is stable across crate boundaries.
// The DefId isn't but the `deterministic_hash` of the corresponding
// DefPath is.
- let trait_def = tcx.lookup_trait_def(self.trait_ref.def_id);
+ let trait_def = tcx.trait_def(self.trait_ref.def_id);
let def_path_hash = trait_def.def_path_hash;
// An `ast::Name` is also not stable (it's just an index into an
-> &'tcx Substs<'tcx>
where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> &'tcx ty::Region,
FT: FnMut(&ty::TypeParameterDef, &[Kind<'tcx>]) -> Ty<'tcx> {
- let defs = tcx.item_generics(def_id);
+ let defs = tcx.generics_of(def_id);
let mut substs = Vec::with_capacity(defs.count());
Substs::fill_item(&mut substs, tcx, defs, &mut mk_region, &mut mk_type);
tcx.intern_substs(&substs)
where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> &'tcx ty::Region,
FT: FnMut(&ty::TypeParameterDef, &[Kind<'tcx>]) -> Ty<'tcx>
{
- let defs = tcx.item_generics(def_id);
+ let defs = tcx.generics_of(def_id);
let mut result = Vec::with_capacity(defs.count());
result.extend(self[..].iter().cloned());
Substs::fill_single(&mut result, defs, &mut mk_region, &mut mk_type);
FT: FnMut(&ty::TypeParameterDef, &[Kind<'tcx>]) -> Ty<'tcx> {
if let Some(def_id) = defs.parent {
- let parent_defs = tcx.item_generics(def_id);
+ let parent_defs = tcx.generics_of(def_id);
Substs::fill_item(substs, tcx, parent_defs, mk_region, mk_type);
}
Substs::fill_single(substs, defs, mk_region, mk_type)
source_ancestor: DefId,
target_substs: &Substs<'tcx>)
-> &'tcx Substs<'tcx> {
- let defs = tcx.item_generics(source_ancestor);
+ let defs = tcx.generics_of(source_ancestor);
tcx.mk_substs(target_substs.iter().chain(&self[defs.own_count()..]).cloned())
}
}
fn shift_region_through_binders(&self, region: &'tcx ty::Region) -> &'tcx ty::Region {
+ if self.region_binders_passed == 0 || !region.has_escaping_regions() {
+ return region;
+ }
self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed))
}
}
trait_id: DefId,
substs: &Substs<'tcx>)
-> ty::TraitRef<'tcx> {
- let defs = tcx.item_generics(trait_id);
+ let defs = tcx.generics_of(trait_id);
ty::TraitRef {
def_id: trait_id,
use ty::ParameterEnvironment;
use ty::fold::TypeVisitor;
use ty::layout::{Layout, LayoutError};
+use ty::subst::{Subst, Kind};
use ty::TypeVariants::*;
use util::common::ErrorReported;
-use util::nodemap::FxHashMap;
+use util::nodemap::{FxHashMap, FxHashSet};
use middle::lang_items;
use rustc_const_math::{ConstInt, ConstIsize, ConstUsize};
return None;
};
- ty::queries::coherent_trait::get(self, DUMMY_SP, (LOCAL_CRATE, drop_trait));
+ self.coherent_trait((LOCAL_CRATE, drop_trait));
let mut dtor_did = None;
- let ty = self.item_type(adt_did);
- self.lookup_trait_def(drop_trait).for_each_relevant_impl(self, ty, |impl_did| {
+ let ty = self.type_of(adt_did);
+ self.trait_def(drop_trait).for_each_relevant_impl(self, ty, |impl_did| {
if let Some(item) = self.associated_items(impl_did).next() {
if let Ok(()) = validate(self, impl_did) {
dtor_did = Some(item.def_id);
None => return None,
};
+ Some(ty::Destructor { did: dtor_did })
+ }
+
+ /// Return the set of types that are required to be alive in
+ /// order to run the destructor of `def` (see RFCs 769 and
+ /// 1238).
+ ///
+ /// Note that this returns only the constraints for the
+ /// destructor of `def` itself. For the destructors of the
+ /// contents, you need `adt_dtorck_constraint`.
+ pub fn destructor_constraints(self, def: &'tcx ty::AdtDef)
+ -> Vec<ty::subst::Kind<'tcx>>
+ {
+ let dtor = match def.destructor(self) {
+ None => {
+ debug!("destructor_constraints({:?}) - no dtor", def.did);
+ return vec![]
+ }
+ Some(dtor) => dtor.did
+ };
+
// RFC 1238: if the destructor method is tagged with the
// attribute `unsafe_destructor_blind_to_params`, then the
// compiler is being instructed to *assume* that the
// Such access can be in plain sight (e.g. dereferencing
// `*foo.0` of `Foo<'a>(&'a u32)`) or indirectly hidden
// (e.g. calling `foo.0.clone()` of `Foo<T:Clone>`).
- let is_dtorck = !self.has_attr(dtor_did, "unsafe_destructor_blind_to_params");
- Some(ty::Destructor { did: dtor_did, is_dtorck: is_dtorck })
+ if self.has_attr(dtor, "unsafe_destructor_blind_to_params") {
+ debug!("destructor_constraint({:?}) - blind", def.did);
+ return vec![];
+ }
+
+ let impl_def_id = self.associated_item(dtor).container.id();
+ let impl_generics = self.generics_of(impl_def_id);
+
+ // We have a destructor - all the parameters that are not
+ // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
+ // must be live.
+
+ // We need to return the list of parameters from the ADTs
+ // generics/substs that correspond to impure parameters on the
+ // impl's generics. This is a bit ugly, but conceptually simple:
+ //
+ // Suppose our ADT looks like the following
+ //
+ // struct S<X, Y, Z>(X, Y, Z);
+ //
+ // and the impl is
+ //
+ // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
+ //
+ // We want to return the parameters (X, Y). For that, we match
+ // up the item-substs <X, Y, Z> with the substs on the impl ADT,
+ // <P1, P2, P0>, and then look up which of the impl substs refer to
+ // parameters marked as pure.
+
+ let impl_substs = match self.type_of(impl_def_id).sty {
+ ty::TyAdt(def_, substs) if def_ == def => substs,
+ _ => bug!()
+ };
+
+ let item_substs = match self.type_of(def.did).sty {
+ ty::TyAdt(def_, substs) if def_ == def => substs,
+ _ => bug!()
+ };
+
+ let result = item_substs.iter().zip(impl_substs.iter())
+ .filter(|&(_, &k)| {
+ if let Some(&ty::Region::ReEarlyBound(ref ebr)) = k.as_region() {
+ !impl_generics.region_param(ebr).pure_wrt_drop
+ } else if let Some(&ty::TyS {
+ sty: ty::TypeVariants::TyParam(ref pt), ..
+ }) = k.as_type() {
+ !impl_generics.type_param(pt).pure_wrt_drop
+ } else {
+ // not a type or region param - this should be reported
+ // as an error.
+ false
+ }
+ }).map(|(&item_param, _)| item_param).collect();
+ debug!("destructor_constraint({:?}) = {:?}", def.did, result);
+ result
+ }
+
+ /// Return a set of constraints that needs to be satisfied in
+ /// order for `ty` to be valid for destruction.
+ pub fn dtorck_constraint_for_ty(self,
+ span: Span,
+ for_ty: Ty<'tcx>,
+ depth: usize,
+ ty: Ty<'tcx>)
+ -> Result<ty::DtorckConstraint<'tcx>, ErrorReported>
+ {
+ debug!("dtorck_constraint_for_ty({:?}, {:?}, {:?}, {:?})",
+ span, for_ty, depth, ty);
+
+ if depth >= self.sess.recursion_limit.get() {
+ let mut err = struct_span_err!(
+ self.sess, span, E0320,
+ "overflow while adding drop-check rules for {}", for_ty);
+ err.note(&format!("overflowed on {}", ty));
+ err.emit();
+ return Err(ErrorReported);
+ }
+
+ let result = match ty.sty {
+ ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
+ ty::TyFloat(_) | ty::TyStr | ty::TyNever |
+ ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyFnDef(..) | ty::TyFnPtr(_) => {
+ // these types never have a destructor
+ Ok(ty::DtorckConstraint::empty())
+ }
+
+ ty::TyArray(ety, _) | ty::TySlice(ety) => {
+ // single-element containers, behave like their element
+ self.dtorck_constraint_for_ty(span, for_ty, depth+1, ety)
+ }
+
+ ty::TyTuple(tys, _) => {
+ tys.iter().map(|ty| {
+ self.dtorck_constraint_for_ty(span, for_ty, depth+1, ty)
+ }).collect()
+ }
+
+ ty::TyClosure(def_id, substs) => {
+ substs.upvar_tys(def_id, self).map(|ty| {
+ self.dtorck_constraint_for_ty(span, for_ty, depth+1, ty)
+ }).collect()
+ }
+
+ ty::TyAdt(def, substs) => {
+ let ty::DtorckConstraint {
+ dtorck_types, outlives
+ } = self.at(span).adt_dtorck_constraint(def.did);
+ Ok(ty::DtorckConstraint {
+ // FIXME: we can try to recursively `dtorck_constraint_on_ty`
+ // there, but that needs some way to handle cycles.
+ dtorck_types: dtorck_types.subst(self, substs),
+ outlives: outlives.subst(self, substs)
+ })
+ }
+
+ // Objects must be alive in order for their destructor
+ // to be called.
+ ty::TyDynamic(..) => Ok(ty::DtorckConstraint {
+ outlives: vec![Kind::from(ty)],
+ dtorck_types: vec![],
+ }),
+
+ // Types that can't be resolved. Pass them forward.
+ ty::TyProjection(..) | ty::TyAnon(..) | ty::TyParam(..) => {
+ Ok(ty::DtorckConstraint {
+ outlives: vec![],
+ dtorck_types: vec![ty],
+ })
+ }
+
+ ty::TyInfer(..) | ty::TyError => {
+ self.sess.delay_span_bug(span, "unresolved type in dtorck");
+ Err(ErrorReported)
+ }
+ };
+
+ debug!("dtorck_constraint_for_ty({:?}) = {:?}", ty, result);
+ result
}
- pub fn closure_base_def_id(&self, def_id: DefId) -> DefId {
+ pub fn closure_base_def_id(self, def_id: DefId) -> DefId {
let mut def_id = def_id;
while self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr {
def_id = self.parent_def_id(def_id).unwrap_or_else(|| {
/// a suitable "empty substs" for it.
pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> &'tcx ty::Substs<'tcx> {
ty::Substs::for_item(self, item_def_id,
- |_, _| self.mk_region(ty::ReErased),
+ |_, _| self.types.re_erased,
|_, _| {
bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id)
})
result
}
+ /// Returns `true` if and only if there are no `UnsafeCell`s
+ /// nested within the type (ignoring `PhantomData` or pointers).
+ #[inline]
+ pub fn is_freeze(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ span: Span) -> bool
+ {
+ if self.flags.get().intersects(TypeFlags::FREEZENESS_CACHED) {
+ return self.flags.get().intersects(TypeFlags::IS_FREEZE);
+ }
+
+ self.is_freeze_uncached(tcx, param_env, span)
+ }
+
+ fn is_freeze_uncached(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ParameterEnvironment<'tcx>,
+ span: Span) -> bool {
+ assert!(!self.needs_infer());
+
+ // Fast-path for primitive types
+ let result = match self.sty {
+ TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
+ TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) |
+ TyStr | TyNever => Some(true),
+
+ TyArray(..) | TySlice(_) |
+ TyTuple(..) | TyClosure(..) | TyAdt(..) |
+ TyDynamic(..) | TyProjection(..) | TyParam(..) |
+ TyInfer(..) | TyAnon(..) | TyError => None
+ }.unwrap_or_else(|| {
+ self.impls_bound(tcx, param_env, tcx.require_lang_item(lang_items::FreezeTraitLangItem),
+ ¶m_env.is_freeze_cache, span) });
+
+ if !self.has_param_types() && !self.has_self_ty() {
+ self.flags.set(self.flags.get() | if result {
+ TypeFlags::FREEZENESS_CACHED | TypeFlags::IS_FREEZE
+ } else {
+ TypeFlags::FREEZENESS_CACHED
+ });
+ }
+
+ result
+ }
+
+ /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
+ /// non-copy and *might* have a destructor attached; if it returns
+ /// `false`, then `ty` definitely has no destructor (i.e. no drop glue).
+ ///
+ /// (Note that this implies that if `ty` has a destructor attached,
+ /// then `needs_drop` will definitely return `true` for `ty`.)
+ #[inline]
+ pub fn needs_drop(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ty::ParameterEnvironment<'tcx>) -> bool {
+ if self.flags.get().intersects(TypeFlags::NEEDS_DROP_CACHED) {
+ return self.flags.get().intersects(TypeFlags::NEEDS_DROP);
+ }
+
+ self.needs_drop_uncached(tcx, param_env, &mut FxHashSet())
+ }
+
+ fn needs_drop_inner(&'tcx self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ty::ParameterEnvironment<'tcx>,
+ stack: &mut FxHashSet<Ty<'tcx>>)
+ -> bool {
+ if self.flags.get().intersects(TypeFlags::NEEDS_DROP_CACHED) {
+ return self.flags.get().intersects(TypeFlags::NEEDS_DROP);
+ }
+
+ // This should be reported as an error by `check_representable`.
+ //
+ // Consider the type as not needing drop in the meanwhile to avoid
+ // further errors.
+ if let Some(_) = stack.replace(self) {
+ return false;
+ }
+
+ let needs_drop = self.needs_drop_uncached(tcx, param_env, stack);
+
+ // "Pop" the cycle detection "stack".
+ stack.remove(self);
+
+ needs_drop
+ }
+
+ fn needs_drop_uncached(&'tcx self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: &ty::ParameterEnvironment<'tcx>,
+ stack: &mut FxHashSet<Ty<'tcx>>)
+ -> bool {
+ assert!(!self.needs_infer());
+
+ let result = match self.sty {
+ // Fast-path for primitive types
+ ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) |
+ ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever |
+ ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar |
+ ty::TyRawPtr(_) | ty::TyRef(..) | ty::TyStr => false,
+
+ // Issue #22536: We first query type_moves_by_default. It sees a
+ // normalized version of the type, and therefore will definitely
+ // know whether the type implements Copy (and thus needs no
+ // cleanup/drop/zeroing) ...
+ _ if !self.moves_by_default(tcx, param_env, DUMMY_SP) => false,
+
+ // ... (issue #22536 continued) but as an optimization, still use
+ // prior logic of asking for the structural "may drop".
+
+ // FIXME(#22815): Note that this is a conservative heuristic;
+ // it may report that the type "may drop" when actual type does
+ // not actually have a destructor associated with it. But since
+ // the type absolutely did not have the `Copy` bound attached
+ // (see above), it is sound to treat it as having a destructor.
+
+ // User destructors are the only way to have concrete drop types.
+ ty::TyAdt(def, _) if def.has_dtor(tcx) => true,
+
+ // Can refer to a type which may drop.
+ // FIXME(eddyb) check this against a ParameterEnvironment.
+ ty::TyDynamic(..) | ty::TyProjection(..) | ty::TyParam(_) |
+ ty::TyAnon(..) | ty::TyInfer(_) | ty::TyError => true,
+
+ // Structural recursion.
+ ty::TyArray(ty, _) | ty::TySlice(ty) => {
+ ty.needs_drop_inner(tcx, param_env, stack)
+ }
+
+ ty::TyClosure(def_id, ref substs) => {
+ substs.upvar_tys(def_id, tcx)
+ .any(|ty| ty.needs_drop_inner(tcx, param_env, stack))
+ }
+
+ ty::TyTuple(ref tys, _) => {
+ tys.iter().any(|ty| ty.needs_drop_inner(tcx, param_env, stack))
+ }
+
+ // unions don't have destructors regardless of the child types
+ ty::TyAdt(def, _) if def.is_union() => false,
+
+ ty::TyAdt(def, substs) => {
+ def.variants.iter().any(|v| {
+ v.fields.iter().any(|f| {
+ f.ty(tcx, substs).needs_drop_inner(tcx, param_env, stack)
+ })
+ })
+ }
+ };
+
+ if !self.has_param_types() && !self.has_self_ty() {
+ self.flags.set(self.flags.get() | if result {
+ TypeFlags::NEEDS_DROP_CACHED | TypeFlags::NEEDS_DROP
+ } else {
+ TypeFlags::NEEDS_DROP_CACHED
+ });
+ }
+
+ result
+ }
+
#[inline]
pub fn layout<'lcx>(&'tcx self, infcx: &InferCtxt<'a, 'tcx, 'lcx>)
-> Result<&'tcx Layout, LayoutError<'tcx>> {
-> Vec<traits::PredicateObligation<'tcx>>
{
let predicates =
- self.infcx.tcx.item_predicates(def_id)
+ self.infcx.tcx.predicates_of(def_id)
.instantiate(self.infcx.tcx, substs);
let cause = self.cause(traits::ItemObligation(def_id));
predicates.predicates
}
}
}
- let mut generics = tcx.item_generics(item_def_id);
+ let mut generics = tcx.generics_of(item_def_id);
let mut path_def_id = did;
verbose = tcx.sess.verbose();
has_self = generics.has_self;
// Methods.
assert!(is_value_path);
child_types = generics.types.len();
- generics = tcx.item_generics(def_id);
+ generics = tcx.generics_of(def_id);
num_regions = generics.regions.len();
num_types = generics.types.len();
if !def.has_default {
break;
}
- if tcx.item_type(def.def_id).subst(tcx, substs) != actual {
+ if tcx.type_of(def.def_id).subst(tcx, substs) != actual {
break;
}
num_supplied_defaults += 1;
ty::tls::with(|tcx| {
// Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
// by looking up the projections associated with the def_id.
- let item_predicates = tcx.item_predicates(def_id);
+ let predicates_of = tcx.predicates_of(def_id);
let substs = tcx.lift(&substs).unwrap_or_else(|| {
tcx.intern_substs(&[])
});
- let bounds = item_predicates.instantiate(tcx, substs);
+ let bounds = predicates_of.instantiate(tcx, substs);
let mut first = true;
let mut is_sized = false;
extern crate cmake;
use std::env;
-use build_helper::native_lib_boilerplate;
+use build_helper::sanitizer_lib_boilerplate;
use cmake::Config;
fn main() {
if let Some(llvm_config) = env::var_os("LLVM_CONFIG") {
- let native = match native_lib_boilerplate("compiler-rt", "asan", "clang_rt.asan-x86_64",
- "build/lib/linux") {
+ let native = match sanitizer_lib_boilerplate("asan") {
Ok(native) => native,
_ => return,
};
("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
("i686-linux-android", i686_linux_android),
+ ("x86_64-linux-android", x86_64_linux_android),
("arm-linux-androideabi", arm_linux_androideabi),
("armv7-linux-androideabi", armv7_linux_androideabi),
("aarch64-linux-android", aarch64_linux_android),
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use LinkerFlavor;
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::android_base::opts();
+ base.cpu = "x86-64".to_string();
+ // https://developer.android.com/ndk/guides/abis.html#86-64
+ base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".to_string();
+ base.max_atomic_width = Some(64);
+ base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
+
+ Ok(Target {
+ llvm_target: "x86_64-linux-android".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "64".to_string(),
+ data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
+ arch: "x86_64".to_string(),
+ target_os: "android".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "unknown".to_string(),
+ linker_flavor: LinkerFlavor::Gcc,
+ options: base,
+ })
+}
}
Categorization::StaticItem |
Categorization::Deref(.., mc::UnsafePtr(..)) => {
- self.bccx.tcx.mk_region(ty::ReStatic)
+ self.bccx.tcx.types.re_static
}
Categorization::Deref(.., mc::BorrowedPtr(_, r)) |
Categorization::Deref(.., mc::Implicit(_, r)) => {
let ty = lvalue.ty(mir, tcx).to_ty(tcx);
debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, lvalue, ty);
- if tcx.type_needs_drop_given_env(ty, &ctxt.param_env) {
+ if ty.needs_drop(tcx, &ctxt.param_env) {
each_child(child);
} else {
debug!("on_all_drop_children_bits - skipping")
use std::rc::Rc;
use std::hash::{Hash, Hasher};
use syntax::ast;
-use syntax_pos::{DUMMY_SP, MultiSpan, Span};
+use syntax_pos::{MultiSpan, Span};
use errors::DiagnosticBuilder;
use rustc::hir;
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
tcx.visit_all_bodies_in_krate(|body_owner_def_id, _body_id| {
- ty::queries::borrowck::get(tcx, DUMMY_SP, body_owner_def_id);
+ tcx.borrowck(body_owner_def_id);
});
}
let owner_id = tcx.hir.as_local_node_id(owner_def_id).unwrap();
let body_id = tcx.hir.body_owned_by(owner_id);
let attributes = tcx.get_attrs(owner_def_id);
- let tables = tcx.item_tables(owner_def_id);
+ let tables = tcx.typeck_tables_of(owner_def_id);
let mut bccx = &mut BorrowckCtxt {
tcx: tcx,
{
let owner_id = tcx.hir.body_owner(body_id);
let owner_def_id = tcx.hir.local_def_id(owner_id);
- let tables = tcx.item_tables(owner_def_id);
+ let tables = tcx.typeck_tables_of(owner_def_id);
let mut bccx = BorrowckCtxt {
tcx: tcx,
use rustc::hir::map as hir_map;
use rustc::hir::map::blocks::FnLikeNode;
use rustc::traits;
-use rustc::hir::def::Def;
+use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::DefId;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::maps::Providers;
use syntax::ast;
use rustc::hir::{self, Expr};
-use syntax_pos::{Span, DUMMY_SP};
+use syntax_pos::Span;
use std::cmp::Ordering;
}
}
-fn lookup_variant_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- variant_def: DefId)
- -> Option<(&'tcx Expr, &'a ty::TypeckTables<'tcx>)> {
- if let Some(variant_node_id) = tcx.hir.as_local_node_id(variant_def) {
- let enum_node_id = tcx.hir.get_parent(variant_node_id);
- if let Some(hir_map::NodeItem(it)) = tcx.hir.find(enum_node_id) {
- if let hir::ItemEnum(ref edef, _) = it.node {
- for variant in &edef.variants {
- if variant.node.data.id() == variant_node_id {
- return variant.node.disr_expr.map(|e| {
- let def_id = tcx.hir.body_owner_def_id(e);
- (&tcx.hir.body(e).value,
- tcx.item_tables(def_id))
- });
- }
- }
- }
- }
- }
- None
-}
-
/// * `def_id` is the id of the constant.
/// * `substs` is the monomorphized substitutions for the expression.
///
/// `substs` is optional and is used for associated constants.
/// This generally happens in late/trans const evaluation.
-pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId,
- substs: &'tcx Substs<'tcx>)
- -> Option<(&'tcx Expr,
- &'a ty::TypeckTables<'tcx>)> {
+pub fn lookup_const_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ substs: &'tcx Substs<'tcx>)
+ -> Option<(DefId, &'tcx Substs<'tcx>)> {
if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
match tcx.hir.find(node_id) {
- None => None,
- Some(hir_map::NodeItem(&hir::Item {
- node: hir::ItemConst(_, body), ..
- })) |
- Some(hir_map::NodeImplItem(&hir::ImplItem {
- node: hir::ImplItemKind::Const(_, body), ..
- })) => {
- Some((&tcx.hir.body(body).value,
- tcx.item_tables(def_id)))
+ Some(hir_map::NodeTraitItem(_)) => {
+ // If we have a trait item and the substitutions for it,
+ // `resolve_trait_associated_const` will select an impl
+ // or the default.
+ resolve_trait_associated_const(tcx, def_id, substs)
}
- Some(hir_map::NodeTraitItem(ti)) => match ti.node {
- hir::TraitItemKind::Const(_, default) => {
- // If we have a trait item and the substitutions for it,
- // `resolve_trait_associated_const` will select an impl
- // or the default.
- let trait_id = tcx.hir.get_parent(node_id);
- let trait_id = tcx.hir.local_def_id(trait_id);
- let default_value = default.map(|body| {
- (&tcx.hir.body(body).value,
- tcx.item_tables(def_id))
- });
- resolve_trait_associated_const(tcx, def_id, default_value, trait_id, substs)
- }
- _ => None
- },
- Some(_) => None
+ _ => Some((def_id, substs))
}
} else {
- let expr_and_tables = tcx.sess.cstore.maybe_get_item_body(tcx, def_id).map(|body| {
- (&body.value, tcx.item_tables(def_id))
- });
match tcx.sess.cstore.describe_def(def_id) {
Some(Def::AssociatedConst(_)) => {
- let trait_id = tcx.sess.cstore.trait_of_item(def_id);
// As mentioned in the comments above for in-crate
// constants, we only try to find the expression for a
// trait-associated const if the caller gives us the
// substitutions for the reference to it.
- if let Some(trait_id) = trait_id {
- resolve_trait_associated_const(tcx, def_id, expr_and_tables,
- trait_id, substs)
+ if tcx.sess.cstore.trait_of_item(def_id).is_some() {
+ resolve_trait_associated_const(tcx, def_id, substs)
} else {
- expr_and_tables
+ Some((def_id, substs))
}
- },
- Some(Def::Const(..)) => expr_and_tables,
- _ => None
- }
- }
-}
-
-fn lookup_const_fn_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId)
- -> Option<(&'tcx hir::Body, &'a ty::TypeckTables<'tcx>)>
-{
- if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
- FnLikeNode::from_node(tcx.hir.get(node_id)).and_then(|fn_like| {
- if fn_like.constness() == hir::Constness::Const {
- Some((tcx.hir.body(fn_like.body()),
- tcx.item_tables(def_id)))
- } else {
- None
}
- })
- } else {
- if tcx.sess.cstore.is_const_fn(def_id) {
- tcx.sess.cstore.maybe_get_item_body(tcx, def_id).map(|body| {
- (body, tcx.item_tables(def_id))
- })
- } else {
- None
+ _ => Some((def_id, substs))
}
}
}
}
}
hir::ExprCast(ref base, _) => {
- match cast_const(tcx, cx.eval(base)?, ety) {
- Ok(val) => val,
- Err(kind) => return Err(ConstEvalErr { span: e.span, kind: kind }),
+ let base_val = cx.eval(base)?;
+ let base_ty = cx.tables.expr_ty(base);
+
+ // Avoid applying substitutions if they're empty, that'd ICE.
+ let base_ty = if cx.substs.is_empty() {
+ base_ty
+ } else {
+ base_ty.subst(tcx, cx.substs)
+ };
+ if ety == base_ty {
+ base_val
+ } else {
+ match cast_const(tcx, base_val, ety) {
+ Ok(val) => val,
+ Err(kind) => signal!(e, kind),
+ }
}
}
hir::ExprPath(ref qpath) => {
match cx.tables.qpath_def(qpath, e.id) {
Def::Const(def_id) |
Def::AssociatedConst(def_id) => {
- if let Some((expr, tables)) = lookup_const_by_id(tcx, def_id, substs) {
- let cx = ConstContext::with_tables(tcx, tables);
- match cx.eval(expr) {
- Ok(val) => val,
- Err(ConstEvalErr { kind: TypeckError, .. }) => {
- signal!(e, TypeckError);
- }
- Err(err) => {
- debug!("bad reference: {:?}, {:?}", err.description(), err.span);
- signal!(e, ErroneousReferencedConstant(box err))
- },
- }
- } else {
- signal!(e, TypeckError);
- }
+ match tcx.at(e.span).const_eval((def_id, substs)) {
+ Ok(val) => val,
+ Err(ConstEvalErr { kind: TypeckError, .. }) => {
+ signal!(e, TypeckError);
+ }
+ Err(err) => {
+ debug!("bad reference: {:?}, {:?}", err.description(), err.span);
+ signal!(e, ErroneousReferencedConstant(box err))
+ },
+ }
},
- Def::VariantCtor(variant_def, ..) => {
- if let Some((expr, tables)) = lookup_variant_by_id(tcx, variant_def) {
- let cx = ConstContext::with_tables(tcx, tables);
- match cx.eval(expr) {
- Ok(val) => val,
- Err(ConstEvalErr { kind: TypeckError, .. }) => {
- signal!(e, TypeckError);
- }
- Err(err) => {
- debug!("bad reference: {:?}, {:?}", err.description(), err.span);
- signal!(e, ErroneousReferencedConstant(box err))
- },
- }
- } else {
- signal!(e, UnimplementedConstVal("enum variants"));
- }
+ Def::VariantCtor(variant_def, CtorKind::Const) => {
+ Variant(variant_def)
+ }
+ Def::VariantCtor(_, CtorKind::Fn) => {
+ signal!(e, UnimplementedConstVal("enum variants"));
}
- Def::StructCtor(..) => {
+ Def::StructCtor(_, CtorKind::Const) => {
ConstVal::Struct(Default::default())
}
+ Def::StructCtor(_, CtorKind::Fn) => {
+ signal!(e, UnimplementedConstVal("tuple struct constructors"))
+ }
Def::Local(def_id) => {
debug!("Def::Local({:?}): {:?}", def_id, cx.fn_args);
if let Some(val) = cx.fn_args.as_ref().and_then(|args| args.get(&def_id)) {
}
}
hir::ExprCall(ref callee, ref args) => {
- let (did, substs) = match cx.eval(callee)? {
- Function(did, substs) => (did, substs),
- Struct(_) => signal!(e, UnimplementedConstVal("tuple struct constructors")),
- callee => signal!(e, CallOn(callee)),
+ let (def_id, substs) = match cx.eval(callee)? {
+ Function(def_id, substs) => (def_id, substs),
+ _ => signal!(e, TypeckError),
};
- let (body, tables) = match lookup_const_fn_by_id(tcx, did) {
- Some(x) => x,
- None => signal!(e, NonConstPath),
+
+ let body = if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
+ if let Some(fn_like) = FnLikeNode::from_node(tcx.hir.get(node_id)) {
+ if fn_like.constness() == hir::Constness::Const {
+ tcx.hir.body(fn_like.body())
+ } else {
+ signal!(e, TypeckError)
+ }
+ } else {
+ signal!(e, TypeckError)
+ }
+ } else {
+ if tcx.sess.cstore.is_const_fn(def_id) {
+ tcx.sess.cstore.item_body(tcx, def_id)
+ } else {
+ signal!(e, TypeckError)
+ }
};
let arg_defs = body.arguments.iter().map(|arg| match arg.pat.node {
debug!("const call({:?})", call_args);
let callee_cx = ConstContext {
tcx: tcx,
- tables: tables,
+ tables: tcx.typeck_tables_of(def_id),
substs: substs,
fn_args: Some(call_args)
};
Ok(result)
}
-fn resolve_trait_associated_const<'a, 'tcx: 'a>(
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- trait_item_id: DefId,
- default_value: Option<(&'tcx Expr, &'a ty::TypeckTables<'tcx>)>,
- trait_id: DefId,
- rcvr_substs: &'tcx Substs<'tcx>
-) -> Option<(&'tcx Expr, &'a ty::TypeckTables<'tcx>)>
-{
- let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, rcvr_substs));
+fn resolve_trait_associated_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ substs: &'tcx Substs<'tcx>)
+ -> Option<(DefId, &'tcx Substs<'tcx>)> {
+ let trait_item = tcx.associated_item(def_id);
+ let trait_id = trait_item.container.id();
+ let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, substs));
debug!("resolve_trait_associated_const: trait_ref={:?}",
trait_ref);
- tcx.populate_implementations_for_trait_if_necessary(trait_id);
tcx.infer_ctxt((), Reveal::UserFacing).enter(|infcx| {
let mut selcx = traits::SelectionContext::new(&infcx);
let obligation = traits::Obligation::new(traits::ObligationCause::dummy(),
// when constructing the inference context above.
match selection {
traits::VtableImpl(ref impl_data) => {
- let name = tcx.associated_item(trait_item_id).name;
+ let name = trait_item.name;
let ac = tcx.associated_items(impl_data.impl_def_id)
.find(|item| item.kind == ty::AssociatedKind::Const && item.name == name);
match ac {
- Some(ic) => lookup_const_by_id(tcx, ic.def_id, Substs::empty()),
- None => default_value,
+ // FIXME(eddyb) Use proper Instance resolution to
+ // get the correct Substs returned from here.
+ Some(ic) => Some((ic.def_id, Substs::empty())),
+ None => {
+ if trait_item.defaultness.has_value() {
+ Some((def_id, substs))
+ } else {
+ None
+ }
+ }
}
}
_ => {
U8(u) => Ok(Char(u as char)),
_ => bug!(),
},
- _ => bug!(),
+ _ => Err(CannotCast),
}
}
Bool(b) => cast_const_int(tcx, U8(b as u8), ty),
Float(f) => cast_const_float(tcx, f, ty),
Char(c) => cast_const_int(tcx, U32(c as u32), ty),
+ Variant(v) => {
+ let adt = tcx.adt_def(tcx.parent_def_id(v).unwrap());
+ let idx = adt.variant_index_with_id(v);
+ cast_const_int(tcx, adt.discriminant_for_variant(tcx, idx), ty)
+ }
Function(..) => Err(UnimplementedConstVal("casting fn pointers")),
ByteStr(b) => match ty.sty {
ty::TyRawPtr(_) => {
pub fn provide(providers: &mut Providers) {
*providers = Providers {
- monomorphic_const_eval,
+ const_eval,
..*providers
};
}
-fn monomorphic_const_eval<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> EvalResult<'tcx> {
- let cx = ConstContext::with_tables(tcx, tcx.item_tables(def_id));
+fn const_eval<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ (def_id, substs): (DefId, &'tcx Substs<'tcx>))
+ -> EvalResult<'tcx> {
+ let (def_id, substs) = if let Some(resolved) = lookup_const_by_id(tcx, def_id, substs) {
+ resolved
+ } else {
+ return Err(ConstEvalErr {
+ span: tcx.def_span(def_id),
+ kind: TypeckError
+ });
+ };
+
+ let cx = ConstContext {
+ tcx,
+ tables: tcx.typeck_tables_of(def_id),
+ substs: substs,
+ fn_args: None
+ };
let body = if let Some(id) = tcx.hir.as_local_node_id(def_id) {
- ty::queries::mir_const_qualif::get(tcx, DUMMY_SP, def_id);
+ tcx.mir_const_qualif(def_id);
tcx.hir.body(tcx.hir.body_owned_by(id))
} else {
- tcx.sess.cstore.maybe_get_item_body(tcx, def_id).unwrap()
+ tcx.sess.cstore.item_body(tcx, def_id)
};
cx.eval(&body.value)
}
ConstVal::ByteStr(ref b) => write!(f, "{:?}", &b[..]),
ConstVal::Bool(b) => write!(f, "{:?}", b),
ConstVal::Char(c) => write!(f, "{:?}", c),
+ ConstVal::Variant(_) |
ConstVal::Struct(_) |
ConstVal::Tuple(_) |
ConstVal::Function(..) |
match def {
Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => {
let enum_id = self.tcx.parent_def_id(variant_id).unwrap();
- let adt_def = self.tcx.lookup_adt_def(enum_id);
+ let adt_def = self.tcx.adt_def(enum_id);
if adt_def.variants.len() > 1 {
let substs = match ty.sty {
TypeVariants::TyAdt(_, substs) => substs,
let substs = self.tables.node_id_item_substs(id)
.unwrap_or_else(|| tcx.intern_substs(&[]));
match eval::lookup_const_by_id(tcx, def_id, substs) {
- Some((const_expr, const_tables)) => {
+ Some((def_id, _substs)) => {
// Enter the inlined constant's tables temporarily.
let old_tables = self.tables;
- self.tables = const_tables;
- let pat = self.lower_const_expr(const_expr, pat_id, span);
+ self.tables = tcx.typeck_tables_of(def_id);
+ let body = if let Some(id) = tcx.hir.as_local_node_id(def_id) {
+ tcx.hir.body(tcx.hir.body_owned_by(id))
+ } else {
+ tcx.sess.cstore.item_body(tcx, def_id)
+ };
+ let pat = self.lower_const_expr(&body.value, pat_id, span);
self.tables = old_tables;
return pat;
}
let const_cx = eval::ConstContext::with_tables(self.tcx.global_tcx(), self.tables);
match const_cx.eval(expr) {
Ok(value) => {
- PatternKind::Constant { value: value }
+ if let ConstVal::Variant(def_id) = value {
+ let ty = self.tables.expr_ty(expr);
+ self.lower_variant_or_leaf(Def::Variant(def_id), ty, vec![])
+ } else {
+ PatternKind::Constant { value: value }
+ }
}
Err(e) => {
self.errors.push(PatternError::ConstEval(e));
sess.code_stats.borrow().print_type_sizes();
}
+ if ::std::env::var("SKIP_LLVM").is_ok() { ::std::process::exit(0); }
+
let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs);
controller_entry_point!(after_llvm,
defs: resolver.definitions,
analysis: ty::CrateAnalysis {
access_levels: Rc::new(AccessLevels::default()),
- reachable: NodeSet(),
+ reachable: Rc::new(NodeSet()),
name: crate_name.to_string(),
glob_map: if resolver.make_glob_map { Some(resolver.glob_map) } else { None },
},
let index = stability::Index::new(&hir_map);
let mut local_providers = ty::maps::Providers::default();
+ borrowck::provide(&mut local_providers);
mir::provide(&mut local_providers);
+ reachable::provide(&mut local_providers);
rustc_privacy::provide(&mut local_providers);
- borrowck::provide(&mut local_providers);
typeck::provide(&mut local_providers);
ty::provide(&mut local_providers);
reachable::provide(&mut local_providers);
}
pub fn t_rptr_static(&self) -> Ty<'tcx> {
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(ty::ReStatic),
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.types.re_static,
self.tcx().types.isize)
}
pub fn t_rptr_empty(&self) -> Ty<'tcx> {
- self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(ty::ReEmpty),
+ self.infcx.tcx.mk_imm_ref(self.infcx.tcx.types.re_empty,
self.tcx().types.isize)
}
draw_col_separator(buffer, line_offset, width_offset - 2);
+ // Special case when there's only one annotation involved, it is the start of a multiline
+ // span and there's no text at the beginning of the code line. Instead of doing the whole
+ // graph:
+ //
+ // 2 | fn foo() {
+ // | _^
+ // 3 | |
+ // 4 | | }
+ // | |_^ test
+ //
+ // we simplify the output to:
+ //
+ // 2 | / fn foo() {
+ // 3 | |
+ // 4 | | }
+ // | |_^ test
+ if line.annotations.len() == 1 {
+ if let Some(ref ann) = line.annotations.get(0) {
+ if let AnnotationType::MultilineStart(depth) = ann.annotation_type {
+ if source_string[0..ann.start_col].trim() == "" {
+ let style = if ann.is_primary {
+ Style::UnderlinePrimary
+ } else {
+ Style::UnderlineSecondary
+ };
+ buffer.putc(line_offset,
+ width_offset + depth - 1,
+ '/',
+ style);
+ return vec![(depth, style)];
+ }
+ }
+ }
+ }
+
// We want to display like this:
//
// vec.push(vec.pop().unwrap());
for (i, annotation) in annotations.iter().enumerate() {
for (j, next) in annotations.iter().enumerate() {
if overlaps(next, annotation, 0) // This label overlaps with another one and both
- && !annotation.is_line() // take space (they have text and are not
- && !next.is_line() // multiline lines).
- && annotation.has_label()
- && j > i
+ && annotation.has_label() // take space (they have text and are not
+ && j > i // multiline lines).
&& p == 0 // We're currently on the first line, move the label one line down
{
// This annotation needs a new line in the output.
} else {
0
};
- if overlaps(next, annotation, l) // Do not allow two labels to be in the same
+ if (overlaps(next, annotation, l) // Do not allow two labels to be in the same
// line if they overlap including padding, to
// avoid situations like:
//
// | |
// fn_spanx_span
//
- && !annotation.is_line() // Do not add a new line if this annotation
- && !next.is_line() // or the next are vertical line placeholders.
&& annotation.has_label() // Both labels must have some text, otherwise
- && next.has_label() // they are not overlapping.
+ && next.has_label()) // they are not overlapping.
+ // Do not add a new line if this annotation
+ // or the next are vertical line placeholders.
+ || (annotation.takes_space() // If either this or the next annotation is
+ && next.has_label()) // multiline start/end, move it to a new line
+ || (annotation.has_label() // so as not to overlap the orizontal lines.
+ && next.takes_space())
+ || (annotation.takes_space()
+ && next.takes_space())
{
+ // This annotation needs a new line in the output.
p += 1;
break;
}
line_len = p;
}
}
+
if line_len != 0 {
line_len += 1;
}
};
let pos = pos + 1;
- if pos > 1 && annotation.has_label() {
+ if pos > 1 && (annotation.has_label() || annotation.takes_space()) {
for p in line_offset + 1..line_offset + pos + 1 {
buffer.putc(p,
code_offset + annotation.start_col,
// After this we will have:
//
// 2 | fn foo() {
- // | __________ starting here...
+ // | __________
// | |
// | something about `foo`
// 3 |
// 4 | }
- // | _ ...ending here: test
+ // | _ test
for &(pos, annotation) in &annotations_position {
let style = if annotation.is_primary {
Style::LabelPrimary
// After this we will have:
//
// 2 | fn foo() {
- // | ____-_____^ starting here...
+ // | ____-_____^
// | |
// | something about `foo`
// 3 |
// 4 | }
- // | _^ ...ending here: test
+ // | _^ test
for &(_, annotation) in &annotations_position {
let (underline, style) = if annotation.is_primary {
('^', Style::UnderlinePrimary)
start_col: self.start_col,
end_col: self.start_col + 1,
is_primary: self.is_primary,
- label: Some("starting here...".to_owned()),
+ label: None,
annotation_type: AnnotationType::MultilineStart(self.depth)
}
}
start_col: self.end_col - 1,
end_col: self.end_col,
is_primary: self.is_primary,
- label: match self.label {
- Some(ref label) => Some(format!("...ending here: {}", label)),
- None => Some("...ending here".to_owned()),
- },
+ label: self.label.clone(),
annotation_type: AnnotationType::MultilineEnd(self.depth)
}
}
// Each of these corresponds to one part of the following diagram:
//
// x | foo(1 + bar(x,
- // | _________^ starting here... < MultilineStart
- // x | | y), < MultilineLine
- // | |______________^ ...ending here: label < MultilineEnd
+ // | _________^ < MultilineStart
+ // x | | y), < MultilineLine
+ // | |______________^ label < MultilineEnd
// x | z);
/// Annotation marking the first character of a fully shown multiline span
MultilineStart(usize),
false
}
}
+
+ pub fn takes_space(&self) -> bool {
+ // Multiline annotations always have to keep vertical space.
+ match self.annotation_type {
+ AnnotationType::MultilineStart(_) |
+ AnnotationType::MultilineEnd(_) => true,
+ _ => false,
+ }
+ }
}
#[derive(Debug)]
hir::ItemStruct(..) |
hir::ItemUnion(..) => {
let def_id = cx.tcx.hir.local_def_id(it.id);
- self.check_heap_type(cx, it.span, cx.tcx.item_type(def_id))
+ self.check_heap_type(cx, it.span, cx.tcx.type_of(def_id))
}
_ => ()
}
for struct_field in struct_def.fields() {
let def_id = cx.tcx.hir.local_def_id(struct_field.id);
self.check_heap_type(cx, struct_field.span,
- cx.tcx.item_type(def_id));
+ cx.tcx.type_of(def_id));
}
}
_ => (),
if ast_generics.is_parameterized() {
return;
}
- let def = cx.tcx.lookup_adt_def(cx.tcx.hir.local_def_id(item.id));
+ let def = cx.tcx.adt_def(cx.tcx.hir.local_def_id(item.id));
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
hir::ItemUnion(_, ref ast_generics) => {
if ast_generics.is_parameterized() {
return;
}
- let def = cx.tcx.lookup_adt_def(cx.tcx.hir.local_def_id(item.id));
+ let def = cx.tcx.adt_def(cx.tcx.hir.local_def_id(item.id));
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
hir::ItemEnum(_, ref ast_generics) => {
if ast_generics.is_parameterized() {
return;
}
- let def = cx.tcx.lookup_adt_def(cx.tcx.hir.local_def_id(item.id));
+ let def = cx.tcx.adt_def(cx.tcx.hir.local_def_id(item.id));
(def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
}
_ => return,
};
if self.impling_types.is_none() {
- let debug_def = cx.tcx.lookup_trait_def(debug);
+ let debug_def = cx.tcx.trait_def(debug);
let mut impls = NodeSet();
debug_def.for_each_impl(cx.tcx, |d| {
- if let Some(ty_def) = cx.tcx.item_type(d).ty_to_def_id() {
+ if let Some(ty_def) = cx.tcx.type_of(d).ty_to_def_id() {
if let Some(node_id) = cx.tcx.hir.as_local_node_id(ty_def) {
impls.insert(node_id);
}
}
fn def_id_is_transmute(cx: &LateContext, def_id: DefId) -> bool {
- match cx.tcx.item_type(def_id).sty {
+ match cx.tcx.type_of(def_id).sty {
ty::TyFnDef(.., bfty) if bfty.abi() == RustIntrinsic => (),
_ => return false,
}
if let hir::ItemUnion(ref vdata, _) = item.node {
let param_env = &ty::ParameterEnvironment::for_item(ctx.tcx, item.id);
for field in vdata.fields() {
- let field_ty = ctx.tcx.item_type(ctx.tcx.hir.local_def_id(field.id));
- if ctx.tcx.type_needs_drop_given_env(field_ty, param_env) {
+ let field_ty = ctx.tcx.type_of(ctx.tcx.hir.local_def_id(field.id));
+ if field_ty.needs_drop(ctx.tcx, param_env) {
ctx.span_lint(UNIONS_WITH_DROP_FIELDS,
field.span,
"union contains a field with possibly non-trivial drop code, \
fn check_foreign_fn(&mut self, id: ast::NodeId, decl: &hir::FnDecl) {
let def_id = self.cx.tcx.hir.local_def_id(id);
- let sig = self.cx.tcx.item_type(def_id).fn_sig();
+ let sig = self.cx.tcx.type_of(def_id).fn_sig();
let sig = self.cx.tcx.erase_late_bound_regions(&sig);
for (input_ty, input_hir) in sig.inputs().iter().zip(&decl.inputs) {
fn check_foreign_static(&mut self, id: ast::NodeId, span: Span) {
let def_id = self.cx.tcx.hir.local_def_id(id);
- let ty = self.cx.tcx.item_type(def_id);
+ let ty = self.cx.tcx.type_of(def_id);
self.check_type_for_ffi_and_report_errors(span, ty);
}
}
if let hir::ItemEnum(ref enum_definition, ref gens) = it.node {
if gens.ty_params.is_empty() {
// sizes only make sense for non-generic types
- let t = cx.tcx.item_type(cx.tcx.hir.local_def_id(it.id));
+ let t = cx.tcx.type_of(cx.tcx.hir.local_def_id(it.id));
let layout = cx.tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
let ty = cx.tcx.erase_regions(&t);
ty.layout(&infcx).unwrap_or_else(|e| {
if target.contains("windows") {
println!("cargo:rustc-link-lib=ole32");
}
+ if target.contains("windows-gnu") {
+ println!("cargo:rustc-link-lib=static-nobundle=gcc_s");
+ println!("cargo:rustc-link-lib=static-nobundle=pthread");
+ }
}
#![feature(link_args)]
#![feature(staged_api)]
#![feature(rustc_private)]
+#![feature(static_nobundle)]
extern crate libc;
#[macro_use]
extern crate cmake;
use std::env;
-use build_helper::native_lib_boilerplate;
+use build_helper::sanitizer_lib_boilerplate;
use cmake::Config;
fn main() {
if let Some(llvm_config) = env::var_os("LLVM_CONFIG") {
- let native = match native_lib_boilerplate("compiler-rt", "lsan", "clang_rt.lsan-x86_64",
- "build/lib/linux") {
+ let native = match sanitizer_lib_boilerplate("lsan") {
Ok(native) => native,
_ => return,
};
cnum_map: RefCell::new(cnum_map),
cnum: cnum,
codemap_import_info: RefCell::new(vec![]),
+ attribute_cache: RefCell::new([Vec::new(), Vec::new()]),
dep_kind: Cell::new(dep_kind),
source: cstore::CrateSource {
dylib: dylib,
fn inject_sanitizer_runtime(&mut self) {
if let Some(ref sanitizer) = self.sess.opts.debugging_opts.sanitizer {
- // Sanitizers can only be used with x86_64 Linux executables linked
- // to `std`
- if self.sess.target.target.llvm_target != "x86_64-unknown-linux-gnu" {
- self.sess.err(&format!("Sanitizers only work with the \
- `x86_64-unknown-linux-gnu` target."));
+ // Sanitizers can only be used on some tested platforms with
+ // executables linked to `std`
+ const ASAN_SUPPORTED_TARGETS: &[&str] = &["x86_64-unknown-linux-gnu",
+ "x86_64-apple-darwin"];
+ const TSAN_SUPPORTED_TARGETS: &[&str] = &["x86_64-unknown-linux-gnu",
+ "x86_64-apple-darwin"];
+ const LSAN_SUPPORTED_TARGETS: &[&str] = &["x86_64-unknown-linux-gnu"];
+ const MSAN_SUPPORTED_TARGETS: &[&str] = &["x86_64-unknown-linux-gnu"];
+
+ let supported_targets = match *sanitizer {
+ Sanitizer::Address => ASAN_SUPPORTED_TARGETS,
+ Sanitizer::Thread => TSAN_SUPPORTED_TARGETS,
+ Sanitizer::Leak => LSAN_SUPPORTED_TARGETS,
+ Sanitizer::Memory => MSAN_SUPPORTED_TARGETS,
+ };
+ if !supported_targets.contains(&&*self.sess.target.target.llvm_target) {
+ self.sess.err(&format!("{:?}Sanitizer only works with the `{}` target",
+ sanitizer,
+ supported_targets.join("` or `")
+ ));
return
}
pub cnum_map: RefCell<CrateNumMap>,
pub cnum: CrateNum,
pub codemap_import_info: RefCell<Vec<ImportedFileMap>>,
+ pub attribute_cache: RefCell<[Vec<Option<Rc<[ast::Attribute]>>>; 2]>,
pub root: schema::CrateRoot,
}
pub fn is_staged_api(&self) -> bool {
- for attr in self.get_item_attrs(CRATE_DEF_INDEX) {
+ for attr in self.get_item_attrs(CRATE_DEF_INDEX).iter() {
if attr.path == "stable" || attr.path == "unstable" {
return true;
}
}
provide! { <'tcx> tcx, def_id, cdata
- ty => { cdata.get_type(def_id.index, tcx) }
- generics => { tcx.alloc_generics(cdata.get_generics(def_id.index)) }
- predicates => { cdata.get_predicates(def_id.index, tcx) }
- super_predicates => { cdata.get_super_predicates(def_id.index, tcx) }
+ type_of => { cdata.get_type(def_id.index, tcx) }
+ generics_of => { tcx.alloc_generics(cdata.get_generics(def_id.index)) }
+ predicates_of => { cdata.get_predicates(def_id.index, tcx) }
+ super_predicates_of => { cdata.get_super_predicates(def_id.index, tcx) }
trait_def => {
tcx.alloc_trait_def(cdata.get_trait_def(def_id.index))
}
let _ = cdata;
tcx.calculate_dtor(def_id, &mut |_,_| Ok(()))
}
- variances => { Rc::new(cdata.get_item_variances(def_id.index)) }
+ variances_of => { Rc::new(cdata.get_item_variances(def_id.index)) }
associated_item_def_ids => {
let mut result = vec![];
cdata.each_child_of_item(def_id.index, |child| result.push(child.def.def_id()));
}
associated_item => { cdata.get_associated_item(def_id.index) }
impl_trait_ref => { cdata.get_impl_trait(def_id.index, tcx) }
+ impl_polarity => { cdata.get_impl_polarity(def_id.index) }
coerce_unsized_info => {
cdata.get_coerce_unsized_info(def_id.index).unwrap_or_else(|| {
bug!("coerce_unsized_info: `{:?}` is missing its info", def_id);
mir
}
mir_const_qualif => { cdata.mir_const_qualif(def_id.index) }
- typeck_tables => { cdata.item_body_tables(def_id.index, tcx) }
+ typeck_tables_of => { cdata.item_body_tables(def_id.index, tcx) }
closure_kind => { cdata.closure_kind(def_id.index) }
closure_type => { cdata.closure_ty(def_id.index, tcx) }
inherent_impls => { Rc::new(cdata.get_inherent_implementations_for_type(def_id.index)) }
+ is_foreign_item => { cdata.is_foreign_item(def_id.index) }
}
impl CrateStore for cstore::CStore {
self.get_crate_data(def.krate).get_generics(def.index)
}
- fn item_attrs(&self, def_id: DefId) -> Vec<ast::Attribute>
+ fn item_attrs(&self, def_id: DefId) -> Rc<[ast::Attribute]>
{
self.dep_graph.read(DepNode::MetaData(def_id));
self.get_crate_data(def_id.krate).get_item_attrs(def_id.index)
result
}
- fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity
- {
- self.dep_graph.read(DepNode::MetaData(def));
- self.get_crate_data(def.krate).get_impl_polarity(def.index)
- }
-
fn impl_parent(&self, impl_def: DefId) -> Option<DefId> {
self.dep_graph.read(DepNode::MetaData(impl_def));
self.get_crate_data(impl_def.krate).get_parent_impl(impl_def.index)
// Mark the attrs as used
let attrs = data.get_item_attrs(id.index);
- for attr in &attrs {
+ for attr in attrs.iter() {
attr::mark_used(attr);
}
ident: ast::Ident::with_empty_ctxt(name),
id: ast::DUMMY_NODE_ID,
span: local_span,
- attrs: attrs,
+ attrs: attrs.iter().cloned().collect(),
node: ast::ItemKind::MacroDef(body.into()),
vis: ast::Visibility::Inherited,
})
}
- fn maybe_get_item_body<'a, 'tcx>(&self,
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> Option<&'tcx hir::Body>
- {
+ fn item_body<'a, 'tcx>(&self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> &'tcx hir::Body {
if let Some(cached) = tcx.hir.get_inlined_body(def_id) {
- return Some(cached);
+ return cached;
}
self.dep_graph.read(DepNode::MetaData(def_id));
- debug!("maybe_get_item_body({}): inlining item", tcx.item_path_str(def_id));
+ debug!("item_body({}): inlining item", tcx.item_path_str(def_id));
- self.get_crate_data(def_id.krate).maybe_get_item_body(tcx, def_id.index)
+ self.get_crate_data(def_id.krate).item_body(tcx, def_id.index)
}
fn item_body_nested_bodies(&self, def: DefId) -> BTreeMap<hir::BodyId, hir::Body> {
use std::collections::BTreeMap;
use std::io;
use std::mem;
+use std::rc::Rc;
use std::str;
use std::u32;
impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::AdtDef> for DecodeContext<'a, 'tcx> {
fn specialized_decode(&mut self) -> Result<&'tcx ty::AdtDef, Self::Error> {
let def_id = DefId::decode(self)?;
- Ok(self.tcx().lookup_adt_def(def_id))
+ Ok(self.tcx().adt_def(def_id))
}
}
}
}
- pub fn maybe_get_item_body(&self,
- tcx: TyCtxt<'a, 'tcx, 'tcx>,
- id: DefIndex)
- -> Option<&'tcx hir::Body> {
- if self.is_proc_macro(id) { return None; }
- self.entry(id).ast.map(|ast| {
- let def_id = self.local_def_id(id);
- let body = ast.decode(self).body.decode(self);
- tcx.hir.intern_inlined_body(def_id, body)
- })
+ pub fn item_body(&self,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ id: DefIndex)
+ -> &'tcx hir::Body {
+ assert!(!self.is_proc_macro(id));
+ let ast = self.entry(id).ast.unwrap();
+ let def_id = self.local_def_id(id);
+ let body = ast.decode(self).body.decode(self);
+ tcx.hir.intern_inlined_body(def_id, body)
}
pub fn item_body_tables(&self,
}
}
- pub fn get_item_attrs(&self, node_id: DefIndex) -> Vec<ast::Attribute> {
+ pub fn get_item_attrs(&self, node_id: DefIndex) -> Rc<[ast::Attribute]> {
+ let (node_as, node_index) =
+ (node_id.address_space().index(), node_id.as_array_index());
if self.is_proc_macro(node_id) {
- return Vec::new();
+ return Rc::new([]);
+ }
+
+ if let Some(&Some(ref val)) =
+ self.attribute_cache.borrow()[node_as].get(node_index) {
+ return val.clone();
}
+
// The attributes for a tuple struct are attached to the definition, not the ctor;
// we assume that someone passing in a tuple struct ctor is actually wanting to
// look at the definition
if def_key.disambiguated_data.data == DefPathData::StructCtor {
item = self.entry(def_key.parent.unwrap());
}
- self.get_attributes(&item)
+ let result = Rc::__from_array(self.get_attributes(&item).into_boxed_slice());
+ let vec_ = &mut self.attribute_cache.borrow_mut()[node_as];
+ if vec_.len() < node_index + 1 {
+ vec_.resize(node_index + 1, None);
+ }
+ vec_[node_index] = Some(result.clone());
+ result
}
pub fn get_struct_field_names(&self, id: DefIndex) -> Vec<ast::Name> {
use syntax::codemap::Spanned;
use syntax::attr;
use syntax::symbol::Symbol;
-use syntax_pos::{self, DUMMY_SP};
+use syntax_pos;
use rustc::hir::{self, PatKind};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
fn encode_item_variances(&mut self, def_id: DefId) -> LazySeq<ty::Variance> {
debug!("EntryBuilder::encode_item_variances({:?})", def_id);
let tcx = self.tcx;
- self.lazy_seq_from_slice(&tcx.item_variances(def_id))
+ self.lazy_seq_from_slice(&tcx.variances_of(def_id))
}
fn encode_item_type(&mut self, def_id: DefId) -> Lazy<Ty<'tcx>> {
let tcx = self.tcx;
- let ty = tcx.item_type(def_id);
+ let ty = tcx.type_of(def_id);
debug!("EntryBuilder::encode_item_type({:?}) => {:?}", def_id, ty);
self.lazy(&ty)
}
(enum_did, Untracked(index)): (DefId, Untracked<usize>))
-> Entry<'tcx> {
let tcx = self.tcx;
- let def = tcx.lookup_adt_def(enum_did);
+ let def = tcx.adt_def(enum_did);
let variant = &def.variants[index];
let def_id = variant.did;
debug!("EntryBuilder::encode_enum_variant_info({:?})", def_id);
impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> {
fn encode_fields(&mut self, adt_def_id: DefId) {
- let def = self.tcx.lookup_adt_def(adt_def_id);
+ let def = self.tcx.adt_def(adt_def_id);
for (variant_index, variant) in def.variants.iter().enumerate() {
for (field_index, field) in variant.fields.iter().enumerate() {
self.record(field.did,
usize)>))
-> Entry<'tcx> {
let tcx = self.tcx;
- let variant = &tcx.lookup_adt_def(adt_def_id).variants[variant_index];
+ let variant = &tcx.adt_def(adt_def_id).variants[variant_index];
let field = &variant.fields[field_index];
let def_id = field.did;
fn encode_struct_ctor(&mut self, (adt_def_id, def_id): (DefId, DefId)) -> Entry<'tcx> {
debug!("EntryBuilder::encode_struct_ctor({:?})", def_id);
let tcx = self.tcx;
- let variant = tcx.lookup_adt_def(adt_def_id).struct_variant();
+ let variant = tcx.adt_def(adt_def_id).struct_variant();
let data = VariantData {
ctor_kind: variant.ctor_kind,
fn encode_generics(&mut self, def_id: DefId) -> Lazy<ty::Generics> {
debug!("EntryBuilder::encode_generics({:?})", def_id);
let tcx = self.tcx;
- self.lazy(tcx.item_generics(def_id))
+ self.lazy(tcx.generics_of(def_id))
}
fn encode_predicates(&mut self, def_id: DefId) -> Lazy<ty::GenericPredicates<'tcx>> {
debug!("EntryBuilder::encode_predicates({:?})", def_id);
let tcx = self.tcx;
- self.lazy(&tcx.item_predicates(def_id))
+ self.lazy(&tcx.predicates_of(def_id))
}
fn encode_info_for_trait_item(&mut self, def_id: DefId) -> Entry<'tcx> {
let kind = match impl_item.kind {
ty::AssociatedKind::Const => {
EntryKind::AssociatedConst(container,
- ty::queries::mir_const_qualif::get(self.tcx, ast_item.span, def_id))
+ self.tcx.at(ast_item.span).mir_const_qualif(def_id))
}
ty::AssociatedKind::Method => {
let fn_data = if let hir::ImplItemKind::Method(ref sig, body) = ast_item.node {
let (ast, mir) = if let hir::ImplItemKind::Const(_, body) = ast_item.node {
(Some(body), true)
} else if let hir::ImplItemKind::Method(ref sig, body) = ast_item.node {
- let generics = self.tcx.item_generics(def_id);
+ let generics = self.tcx.generics_of(def_id);
let types = generics.parent_types as usize + generics.types.len();
let needs_inline = types > 0 || attr::requests_inline(&ast_item.attrs);
let is_const_fn = sig.constness == hir::Constness::Const;
hir::ItemStatic(_, hir::MutMutable, _) => EntryKind::MutStatic,
hir::ItemStatic(_, hir::MutImmutable, _) => EntryKind::ImmStatic,
hir::ItemConst(..) => {
- EntryKind::Const(ty::queries::mir_const_qualif::get(tcx, item.span, def_id))
+ EntryKind::Const(tcx.at(item.span).mir_const_qualif(def_id))
}
hir::ItemFn(_, _, constness, .., body) => {
let data = FnData {
hir::ItemTy(..) => EntryKind::Type,
hir::ItemEnum(..) => EntryKind::Enum(get_repr_options(&tcx, def_id)),
hir::ItemStruct(ref struct_def, _) => {
- let variant = tcx.lookup_adt_def(def_id).struct_variant();
+ let variant = tcx.adt_def(def_id).struct_variant();
// Encode def_ids for each field and method
// for methods, write all the stuff get_trait_method
}), repr_options)
}
hir::ItemUnion(..) => {
- let variant = tcx.lookup_adt_def(def_id).struct_variant();
+ let variant = tcx.adt_def(def_id).struct_variant();
let repr_options = get_repr_options(&tcx, def_id);
EntryKind::Union(self.lazy(&VariantData {
hir::ItemImpl(_, polarity, ..) => {
let trait_ref = tcx.impl_trait_ref(def_id);
let parent = if let Some(trait_ref) = trait_ref {
- let trait_def = tcx.lookup_trait_def(trait_ref.def_id);
+ let trait_def = tcx.trait_def(trait_ref.def_id);
trait_def.ancestors(def_id).skip(1).next().and_then(|node| {
match node {
specialization_graph::Node::Impl(parent) => Some(parent),
let coerce_unsized_info =
trait_ref.and_then(|t| {
if Some(t.def_id) == tcx.lang_items.coerce_unsized_trait() {
- Some(ty::queries::coerce_unsized_info::get(tcx, item.span, def_id))
+ Some(tcx.at(item.span).coerce_unsized_info(def_id))
} else {
None
}
EntryKind::Impl(self.lazy(&data))
}
hir::ItemTrait(..) => {
- let trait_def = tcx.lookup_trait_def(def_id);
+ let trait_def = tcx.trait_def(def_id);
let data = TraitData {
unsafety: trait_def.unsafety,
paren_sugar: trait_def.paren_sugar,
has_default_impl: tcx.trait_has_default_impl(def_id),
- super_predicates: self.lazy(&tcx.item_super_predicates(def_id)),
+ super_predicates: self.lazy(&tcx.super_predicates_of(def_id)),
};
EntryKind::Trait(self.lazy(&data))
.map(|foreign_item| tcx.hir.local_def_id(foreign_item.id).index))
}
hir::ItemEnum(..) => {
- let def = self.tcx.lookup_adt_def(def_id);
+ let def = self.tcx.adt_def(def_id);
self.lazy_seq(def.variants.iter().map(|v| {
assert!(v.did.is_local());
v.did.index
}
hir::ItemStruct(..) |
hir::ItemUnion(..) => {
- let def = self.tcx.lookup_adt_def(def_id);
+ let def = self.tcx.adt_def(def_id);
self.lazy_seq(def.struct_variant().fields.iter().map(|f| {
assert!(f.did.is_local());
f.did.index
hir::ItemEnum(..) => {
self.encode_fields(def_id);
- let def = self.tcx.lookup_adt_def(def_id);
+ let def = self.tcx.adt_def(def_id);
for (i, variant) in def.variants.iter().enumerate() {
self.record(variant.did,
EntryBuilder::encode_enum_variant_info,
let body = tcx.hir.body_owned_by(id);
Entry {
- kind: EntryKind::Const(ty::queries::mir_const_qualif::get(tcx, DUMMY_SP, def_id)),
+ kind: EntryKind::Const(tcx.mir_const_qualif(def_id)),
visibility: self.lazy(&ty::Visibility::Public),
span: self.lazy(&tcx.def_span(def_id)),
attributes: LazySeq::empty(),
}
pub fn get_repr_options<'a, 'tcx, 'gcx>(tcx: &TyCtxt<'a, 'tcx, 'gcx>, did: DefId) -> ReprOptions {
- let ty = tcx.item_type(did);
+ let ty = tcx.type_of(did);
match ty.sty {
ty::TyAdt(ref def, _) => return def.repr,
_ => bug!("{} is not an ADT", ty),
assert!(ty.is_slice());
let array_ty = tcx.mk_array(tcx.types.u8, bytes.len());
- let array_ref = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), array_ty);
+ let array_ref = tcx.mk_imm_ref(tcx.types.re_static, array_ty);
let array = self.literal_operand(test.span, array_ref, Literal::Value {
value: value.clone()
});
TerminatorKind::Call {
func: Operand::Constant(Constant {
span: data.span,
- ty: tcx.item_type(free_func).subst(tcx, substs),
+ ty: tcx.type_of(free_func).subst(tcx, substs),
literal: Literal::Value {
value: ConstVal::Function(free_func, substs),
}
hir::ExprRepeat(ref v, count) => {
let c = &cx.tcx.hir.body(count).value;
let def_id = cx.tcx.hir.body_owner_def_id(count);
- let count = match ty::queries::monomorphic_const_eval::get(cx.tcx, c.span, def_id) {
+ let substs = Substs::empty();
+ let count = match cx.tcx.at(c.span).const_eval((def_id, substs)) {
Ok(ConstVal::Integral(ConstInt::Usize(u))) => u,
Ok(other) => bug!("constant evaluation of repeat count yielded {:?}", other),
Err(s) => cx.fatal_const_eval_err(&s, c.span, "expression")
let substs = self.tcx.mk_substs_trait(self_ty, params);
for item in self.tcx.associated_items(trait_def_id) {
if item.kind == ty::AssociatedKind::Method && item.name == method_name {
- let method_ty = self.tcx.item_type(item.def_id);
+ let method_ty = self.tcx.type_of(item.def_id);
let method_ty = method_ty.subst(self.tcx, substs);
return (method_ty,
Literal::Value {
type with inference types/regions",
ty);
});
- self.tcx.type_needs_drop_given_env(ty, &self.infcx.parameter_environment)
+ ty.needs_drop(self.tcx.global_tcx(), &self.infcx.parameter_environment)
}
pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> {
// types/lifetimes replaced)
let fn_sig = cx.tables().liberated_fn_sigs[&id].clone();
- let ty = tcx.item_type(tcx.hir.local_def_id(id));
+ let ty = tcx.type_of(tcx.hir.local_def_id(id));
let mut abi = fn_sig.abi;
let implicit_argument = if let ty::TyClosure(..) = ty.sty {
// HACK(eddyb) Avoid having RustCall on closures,
} else {
param_env.free_substs
};
- let fn_ty = tcx.item_type(def_id).subst(tcx, substs);
+ let fn_ty = tcx.type_of(def_id).subst(tcx, substs);
let sig = tcx.erase_late_bound_regions(&fn_ty.fn_sig());
let span = tcx.def_span(def_id);
call_kind={:?}, untuple_args={:?})",
def_id, rcvr_adjustment, call_kind, untuple_args);
- let fn_ty = tcx.item_type(def_id).subst(tcx, param_env.free_substs);
+ let fn_ty = tcx.type_of(def_id).subst(tcx, param_env.free_substs);
let sig = tcx.erase_late_bound_regions(&fn_ty.fn_sig());
let span = tcx.def_span(def_id);
Adjustment::Deref => Operand::Consume(rcvr_l.deref()),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
- let re_erased = tcx.mk_region(ty::ReErased);
let ref_rcvr = local_decls.push(temp_decl(
Mutability::Not,
- tcx.mk_ref(re_erased, ty::TypeAndMut {
+ tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty: sig.inputs()[0],
mutbl: hir::Mutability::MutMutable
}),
source_info: source_info,
kind: StatementKind::Assign(
Lvalue::Local(ref_rcvr),
- Rvalue::Ref(re_erased, BorrowKind::Mut, rcvr_l)
+ Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, rcvr_l)
)
});
Operand::Consume(Lvalue::Local(ref_rcvr))
CallKind::Direct(def_id) => (
Operand::Constant(Constant {
span: span,
- ty: tcx.item_type(def_id).subst(tcx, param_env.free_substs),
+ ty: tcx.type_of(def_id).subst(tcx, param_env.free_substs),
literal: Literal::Value {
value: ConstVal::Function(def_id, param_env.free_substs),
},
{
let tcx = infcx.tcx;
let def_id = tcx.hir.local_def_id(ctor_id);
- let sig = match tcx.item_type(def_id).sty {
+ let sig = match tcx.type_of(def_id).sty {
ty::TyFnDef(_, _, fty) => tcx.no_late_bound_regions(&fty)
.expect("LBR in ADT constructor signature"),
_ => bug!("unexpected type for ctor {:?}", def_id)
//! care erasing regions all over the place.
use rustc::ty::subst::Substs;
-use rustc::ty::{Ty, TyCtxt, ReErased, ClosureSubsts};
+use rustc::ty::{Ty, TyCtxt, ClosureSubsts};
use rustc::mir::*;
use rustc::mir::visit::MutVisitor;
use rustc::mir::transform::{MirPass, MirSource, Pass};
fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) {
match *rvalue {
Rvalue::Ref(ref mut r, _, _) => {
- *r = self.tcx.mk_region(ReErased);
+ *r = self.tcx.types.re_erased;
}
Rvalue::Use(..) |
Rvalue::Repeat(..) |
// a regular goto.
let ty = location.ty(&callee_mir, tcx).subst(tcx, callsite.substs);
let ty = ty.to_ty(tcx);
- if tcx.type_needs_drop_given_env(ty, ¶m_env) {
+ if ty.needs_drop(tcx, ¶m_env) {
cost += CALL_PENALTY;
if let Some(unwind) = unwind {
work_list.push(unwind);
let dest = if dest_needs_borrow(&destination.0) {
debug!("Creating temp for return destination");
let dest = Rvalue::Ref(
- self.tcx.mk_region(ty::ReErased),
+ self.tcx.types.re_erased,
BorrowKind::Mut,
destination.0);
fn cast_box_free_arg(&self, arg: Lvalue<'tcx>, ptr_ty: Ty<'tcx>,
callsite: &CallSite<'tcx>, caller_mir: &mut Mir<'tcx>) -> Operand<'tcx> {
let arg = Rvalue::Ref(
- self.tcx.mk_region(ty::ReErased),
+ self.tcx.types.re_erased,
BorrowKind::Mut,
arg.deref());
fn restrict(&mut self, ty: Ty<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: &ty::ParameterEnvironment<'tcx>) {
- if !ty.type_contents(tcx).interior_unsafe() {
+ if ty.is_freeze(tcx, param_env, DUMMY_SP) {
*self = *self - Qualif::MUTABLE_INTERIOR;
}
- if !tcx.type_needs_drop_given_env(ty, param_env) {
+ if !ty.needs_drop(tcx, param_env) {
*self = *self - Qualif::NEEDS_DROP;
}
}
let mut span = None;
self.tcx
- .lookup_trait_def(drop_trait_id)
+ .trait_def(drop_trait_id)
.for_each_relevant_impl(self.tcx, self.mir.return_ty, |impl_did| {
self.tcx.hir
.as_local_node_id(impl_did)
if substs.types().next().is_some() {
self.add_type(constant.ty);
} else {
- let bits = ty::queries::mir_const_qualif::get(self.tcx,
- constant.span,
- def_id);
+ let bits = self.tcx.at(constant.span).mir_const_qualif(def_id);
let qualif = Qualif::from_bits(bits).expect("invalid mir_const_qualif");
self.add(qualif);
let src = MirSource::from_node(tcx, id);
if let MirSource::Const(_) = src {
- ty::queries::mir_const_qualif::get(tcx, DUMMY_SP, def_id);
+ tcx.mir_const_qualif(def_id);
continue;
}
self.collapse_goto_chain(successor, &mut changed);
}
+ changed |= self.simplify_unwind(&mut terminator);
+
let mut new_stmts = vec![];
let mut inner_changed = true;
while inner_changed {
true
}
+ // turn an unwind branch to a resume block into a None
+ fn simplify_unwind(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
+ let unwind = match terminator.kind {
+ TerminatorKind::Drop { ref mut unwind, .. } |
+ TerminatorKind::DropAndReplace { ref mut unwind, .. } |
+ TerminatorKind::Call { cleanup: ref mut unwind, .. } |
+ TerminatorKind::Assert { cleanup: ref mut unwind, .. } =>
+ unwind,
+ _ => return false
+ };
+
+ if let &mut Some(unwind_block) = unwind {
+ let is_resume_block = match self.basic_blocks[unwind_block] {
+ BasicBlockData {
+ ref statements,
+ terminator: Some(Terminator {
+ kind: TerminatorKind::Resume, ..
+ }), ..
+ } if statements.is_empty() => true,
+ _ => false
+ };
+ if is_resume_block {
+ debug!("simplifying unwind to {:?} from {:?}",
+ unwind_block, terminator.source_info);
+ *unwind = None;
+ }
+ return is_resume_block;
+ }
+
+ false
+ }
+
fn strip_nops(&mut self) {
for blk in self.basic_blocks.iter_mut() {
blk.statements.retain(|stmt| if let StatementKind::Nop = stmt.kind {
Lvalue::Local(index) => LvalueTy::Ty { ty: self.mir.local_decls[index].ty },
Lvalue::Static(box Static { def_id, ty: sty }) => {
let sty = self.sanitize_type(lvalue, sty);
- let ty = self.tcx().item_type(def_id);
+ let ty = self.tcx().type_of(def_id);
let ty = self.cx.normalize(&ty);
if let Err(terr) = self.cx.eq_types(self.last_span, ty, sty) {
span_mirbug!(
let mut fields = fields;
fields.retain(|&(ref lvalue, _)| {
- self.tcx().type_needs_drop_given_env(
- self.lvalue_ty(lvalue), self.elaborator.param_env())
+ self.lvalue_ty(lvalue).needs_drop(self.tcx(), self.elaborator.param_env())
});
debug!("drop_ladder - fields needing drop: {:?}", fields);
let ty = self.lvalue_ty(self.lvalue);
let substs = tcx.mk_substs(iter::once(Kind::from(ty)));
- let re_erased = tcx.mk_region(ty::ReErased);
- let ref_ty = tcx.mk_ref(re_erased, ty::TypeAndMut {
+ let ref_ty = tcx.mk_ref(tcx.types.re_erased, ty::TypeAndMut {
ty: ty,
mutbl: hir::Mutability::MutMutable
});
source_info: self.source_info,
kind: StatementKind::Assign(
Lvalue::Local(ref_lvalue),
- Rvalue::Ref(re_erased, BorrowKind::Mut, self.lvalue.clone())
+ Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut, self.lvalue.clone())
)
}],
terminator: Some(Terminator {
extern crate cmake;
use std::env;
-use build_helper::native_lib_boilerplate;
+use build_helper::sanitizer_lib_boilerplate;
use cmake::Config;
fn main() {
if let Some(llvm_config) = env::var_os("LLVM_CONFIG") {
- let native = match native_lib_boilerplate("compiler-rt", "msan", "clang_rt.msan-x86_64",
- "build/lib/linux") {
+ let native = match sanitizer_lib_boilerplate("msan") {
Ok(native) => native,
_ => return,
};
use rustc::hir::{self, PatKind, RangeEnd};
use syntax::ast;
-use syntax_pos::Span;
+use syntax_pos::{Span, DUMMY_SP};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use std::collections::hash_map::Entry;
// Adds the worst effect out of all the values of one type.
fn add_type(&mut self, ty: Ty<'gcx>) {
- if ty.type_contents(self.tcx).interior_unsafe() {
+ if !ty.is_freeze(self.tcx, &self.param_env, DUMMY_SP) {
self.promotable = false;
}
- if self.tcx.type_needs_drop_given_env(ty, &self.param_env) {
+ if ty.needs_drop(self.tcx, &self.param_env) {
self.promotable = false;
}
}
};
let outer_tables = self.tables;
- self.tables = self.tcx.item_tables(self.tcx.hir.local_def_id(item_id));
+ self.tables = self.tcx.typeck_tables_of(self.tcx.hir.local_def_id(item_id));
let body = self.tcx.hir.body(body_id);
if !self.in_fn {
#![crate_type = "dylib"]
#![crate_type = "rlib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
- html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
- html_root_url = "https://doc.rust-lang.org/nightly/")]
+ html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
+ html_root_url = "https://doc.rust-lang.org/nightly/")]
#![deny(warnings)]
#![feature(rustc_diagnostic_macros)]
use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, CrateNum, DefId};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use rustc::hir::itemlikevisit::DeepVisitor;
-use rustc::hir::pat_util::EnumerateAndAdjustIterator;
use rustc::lint;
use rustc::middle::privacy::{AccessLevel, AccessLevels};
use rustc::ty::{self, TyCtxt, Ty, TypeFoldable};
use rustc::ty::maps::Providers;
use rustc::util::nodemap::NodeSet;
use syntax::ast;
-use syntax_pos::{DUMMY_SP, Span};
+use syntax_pos::Span;
use std::cmp;
use std::mem::replace;
impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> {
fn item_ty_level(&self, item_def_id: DefId) -> Option<AccessLevel> {
- let ty_def_id = match self.tcx.item_type(item_def_id).sty {
+ let ty_def_id = match self.tcx.type_of(item_def_id).sty {
ty::TyAdt(adt, _) => adt.did,
ty::TyDynamic(ref obj, ..) if obj.principal().is_some() =>
obj.principal().unwrap().def_id(),
hir::ItemConst(..) | hir::ItemStatic(..) |
hir::ItemFn(..) | hir::ItemTy(..) => {
if item_level.is_some() {
- self.reach(item.id).generics().predicates().item_type();
+ self.reach(item.id).generics().predicates().ty();
}
}
hir::ItemTrait(.., ref trait_item_refs) => {
!trait_item_ref.defaultness.has_value() {
// No type to visit.
} else {
- reach.item_type();
+ reach.ty();
}
}
}
for impl_item_ref in impl_item_refs {
let id = impl_item_ref.id.node_id;
if trait_ref.is_some() || self.get(id).is_some() {
- self.reach(id).generics().predicates().item_type();
+ self.reach(id).generics().predicates().ty();
}
}
}
for variant in &def.variants {
if self.get(variant.node.data.id()).is_some() {
for field in variant.node.data.fields() {
- self.reach(field.id).item_type();
+ self.reach(field.id).ty();
}
// Corner case: if the variant is reachable, but its
// enum is not, make the enum reachable as well.
hir::ItemForeignMod(ref foreign_mod) => {
for foreign_item in &foreign_mod.items {
if self.get(foreign_item.id).is_some() {
- self.reach(foreign_item.id).generics().predicates().item_type();
+ self.reach(foreign_item.id).generics().predicates().ty();
}
}
}
self.reach(item.id).generics().predicates();
for field in struct_def.fields() {
if self.get(field.id).is_some() {
- self.reach(field.id).item_type();
+ self.reach(field.id).ty();
}
}
}
if let hir::TyImplTrait(..) = ty.node {
if self.get(ty.id).is_some() {
// Reach the (potentially private) type and the API being exposed.
- self.reach(ty.id).item_type().predicates();
+ self.reach(ty.id).ty().predicates();
}
}
impl<'b, 'a, 'tcx> ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> {
fn generics(&mut self) -> &mut Self {
- for def in &self.ev.tcx.item_generics(self.item_def_id).types {
+ for def in &self.ev.tcx.generics_of(self.item_def_id).types {
if def.has_default {
- self.ev.tcx.item_type(def.def_id).visit_with(self);
+ self.ev.tcx.type_of(def.def_id).visit_with(self);
}
}
self
}
fn predicates(&mut self) -> &mut Self {
- self.ev.tcx.item_predicates(self.item_def_id).visit_with(self);
+ self.ev.tcx.predicates_of(self.item_def_id).visit_with(self);
self
}
- fn item_type(&mut self) -> &mut Self {
- self.ev.tcx.item_type(self.item_def_id).visit_with(self);
+ fn ty(&mut self) -> &mut Self {
+ self.ev.tcx.type_of(self.item_def_id).visit_with(self);
self
}
}
}
-////////////////////////////////////////////////////////////////////////////////
-/// The privacy visitor, where privacy checks take place (violations reported)
-////////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////////////
+/// Name privacy visitor, checks privacy and reports violations.
+/// Most of name privacy checks are performed during the main resolution phase,
+/// or later in type checking when field accesses and associated items are resolved.
+/// This pass performs remaining checks for fields in struct expressions and patterns.
+//////////////////////////////////////////////////////////////////////////////////////
-struct PrivacyVisitor<'a, 'tcx: 'a> {
+struct NamePrivacyVisitor<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- curitem: DefId,
- in_foreign: bool,
tables: &'a ty::TypeckTables<'tcx>,
+ current_item: DefId,
}
-impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> {
- fn item_is_accessible(&self, did: DefId) -> bool {
- match self.tcx.hir.as_local_node_id(did) {
- Some(node_id) =>
- ty::Visibility::from_hir(&self.tcx.hir.expect_item(node_id).vis, node_id, self.tcx),
- None => self.tcx.sess.cstore.visibility(did),
- }.is_accessible_from(self.curitem, self.tcx)
- }
-
- // Checks that a field is in scope.
+impl<'a, 'tcx> NamePrivacyVisitor<'a, 'tcx> {
+ // Checks that a field is accessible.
fn check_field(&mut self, span: Span, def: &'tcx ty::AdtDef, field: &'tcx ty::FieldDef) {
- if !def.is_enum() && !field.vis.is_accessible_from(self.curitem, self.tcx) {
+ if !def.is_enum() && !field.vis.is_accessible_from(self.current_item, self.tcx) {
struct_span_err!(self.tcx.sess, span, E0451, "field `{}` of {} `{}` is private",
- field.name, def.variant_descr(), self.tcx.item_path_str(def.did))
+ field.name, def.variant_descr(), self.tcx.item_path_str(def.did))
.span_label(span, &format!("field `{}` is private", field.name))
.emit();
}
}
-
- // Checks that a method is in scope.
- fn check_method(&mut self, span: Span, method_def_id: DefId) {
- match self.tcx.associated_item(method_def_id).container {
- // Trait methods are always all public. The only controlling factor
- // is whether the trait itself is accessible or not.
- ty::TraitContainer(trait_def_id) if !self.item_is_accessible(trait_def_id) => {
- let msg = format!("source trait `{}` is private",
- self.tcx.item_path_str(trait_def_id));
- self.tcx.sess.span_err(span, &msg);
- }
- _ => {}
- }
- }
}
-impl<'a, 'tcx> Visitor<'tcx> for PrivacyVisitor<'a, 'tcx> {
+impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> {
/// We want to visit items in the context of their containing
/// module and so forth, so supply a crate for doing a deep walk.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> {
}
fn visit_nested_body(&mut self, body: hir::BodyId) {
- let old_tables = self.tables;
- self.tables = self.tcx.body_tables(body);
+ let orig_tables = replace(&mut self.tables, self.tcx.body_tables(body));
let body = self.tcx.hir.body(body);
self.visit_body(body);
- self.tables = old_tables;
+ self.tables = orig_tables;
}
fn visit_item(&mut self, item: &'tcx hir::Item) {
- let orig_curitem = replace(&mut self.curitem, self.tcx.hir.local_def_id(item.id));
+ let orig_current_item = replace(&mut self.current_item, self.tcx.hir.local_def_id(item.id));
intravisit::walk_item(self, item);
- self.curitem = orig_curitem;
+ self.current_item = orig_current_item;
}
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
match expr.node {
- hir::ExprMethodCall(..) => {
- let method_call = ty::MethodCall::expr(expr.id);
- let method = self.tables.method_map[&method_call];
- self.check_method(expr.span, method.def_id);
- }
- hir::ExprStruct(ref qpath, ref expr_fields, _) => {
+ hir::ExprStruct(ref qpath, ref fields, ref base) => {
let def = self.tables.qpath_def(qpath, expr.id);
let adt = self.tables.expr_ty(expr).ty_adt_def().unwrap();
let variant = adt.variant_of_def(def);
- // RFC 736: ensure all unmentioned fields are visible.
- // Rather than computing the set of unmentioned fields
- // (i.e. `all_fields - fields`), just check them all,
- // unless the ADT is a union, then unmentioned fields
- // are not checked.
- if adt.is_union() {
- for expr_field in expr_fields {
- self.check_field(expr.span, adt, variant.field_named(expr_field.name.node));
+ if let Some(ref base) = *base {
+ // If the expression uses FRU we need to make sure all the unmentioned fields
+ // are checked for privacy (RFC 736). Rather than computing the set of
+ // unmentioned fields, just check them all.
+ for variant_field in &variant.fields {
+ let field = fields.iter().find(|f| f.name.node == variant_field.name);
+ let span = if let Some(f) = field { f.span } else { base.span };
+ self.check_field(span, adt, variant_field);
}
} else {
- for field in &variant.fields {
- let expr_field = expr_fields.iter().find(|f| f.name.node == field.name);
- let span = if let Some(f) = expr_field { f.span } else { expr.span };
- self.check_field(span, adt, field);
+ for field in fields {
+ self.check_field(field.span, adt, variant.field_named(field.name.node));
}
}
}
intravisit::walk_expr(self, expr);
}
- fn visit_pat(&mut self, pattern: &'tcx hir::Pat) {
- // Foreign functions do not have their patterns mapped in the def_map,
- // and there's nothing really relevant there anyway, so don't bother
- // checking privacy. If you can name the type then you can pass it to an
- // external C function anyway.
- if self.in_foreign { return }
-
- match pattern.node {
+ fn visit_pat(&mut self, pat: &'tcx hir::Pat) {
+ match pat.node {
PatKind::Struct(ref qpath, ref fields, _) => {
- let def = self.tables.qpath_def(qpath, pattern.id);
- let adt = self.tables.pat_ty(pattern).ty_adt_def().unwrap();
+ let def = self.tables.qpath_def(qpath, pat.id);
+ let adt = self.tables.pat_ty(pat).ty_adt_def().unwrap();
let variant = adt.variant_of_def(def);
for field in fields {
self.check_field(field.span, adt, variant.field_named(field.node.name));
}
}
- PatKind::TupleStruct(_, ref fields, ddpos) => {
- match self.tables.pat_ty(pattern).sty {
- // enum fields have no privacy at this time
- ty::TyAdt(def, _) if !def.is_enum() => {
- let expected_len = def.struct_variant().fields.len();
- for (i, field) in fields.iter().enumerate_and_adjust(expected_len, ddpos) {
- if let PatKind::Wild = field.node {
- continue
- }
- self.check_field(field.span, def, &def.struct_variant().fields[i]);
- }
- }
- _ => {}
- }
- }
_ => {}
}
- intravisit::walk_pat(self, pattern);
- }
-
- fn visit_foreign_item(&mut self, fi: &'tcx hir::ForeignItem) {
- self.in_foreign = true;
- intravisit::walk_foreign_item(self, fi);
- self.in_foreign = false;
+ intravisit::walk_pat(self, pat);
}
}
impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> {
fn generics(&mut self) -> &mut Self {
- for def in &self.tcx.item_generics(self.item_def_id).types {
+ for def in &self.tcx.generics_of(self.item_def_id).types {
if def.has_default {
- self.tcx.item_type(def.def_id).visit_with(self);
+ self.tcx.type_of(def.def_id).visit_with(self);
}
}
self
}
fn predicates(&mut self) -> &mut Self {
- self.tcx.item_predicates(self.item_def_id).visit_with(self);
+ self.tcx.predicates_of(self.item_def_id).visit_with(self);
self
}
- fn item_type(&mut self) -> &mut Self {
- self.tcx.item_type(self.item_def_id).visit_with(self);
+ fn ty(&mut self) -> &mut Self {
+ self.tcx.type_of(self.item_def_id).visit_with(self);
self
}
// Subitems of these items have inherited publicity
hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) |
hir::ItemTy(..) => {
- self.check(item.id, item_visibility).generics().predicates().item_type();
+ self.check(item.id, item_visibility).generics().predicates().ty();
// Recurse for e.g. `impl Trait` (see `visit_ty`).
self.inner_visibility = item_visibility;
!trait_item_ref.defaultness.has_value() {
// No type to visit.
} else {
- check.item_type();
+ check.ty();
}
}
}
for variant in &def.variants {
for field in variant.node.data.fields() {
- self.check(field.id, item_visibility).item_type();
+ self.check(field.id, item_visibility).ty();
}
}
}
hir::ItemForeignMod(ref foreign_mod) => {
for foreign_item in &foreign_mod.items {
let vis = ty::Visibility::from_hir(&foreign_item.vis, item.id, tcx);
- self.check(foreign_item.id, vis).generics().predicates().item_type();
+ self.check(foreign_item.id, vis).generics().predicates().ty();
}
}
// Subitems of structs and unions have their own publicity
for field in struct_def.fields() {
let field_visibility = ty::Visibility::from_hir(&field.vis, item.id, tcx);
- self.check(field.id, min(item_visibility, field_visibility)).item_type();
+ self.check(field.id, min(item_visibility, field_visibility)).ty();
}
}
// The interface is empty
// Subitems of inherent impls have their own publicity
hir::ItemImpl(.., None, _, ref impl_item_refs) => {
let ty_vis =
- self.check(item.id, ty::Visibility::Invisible).item_type().min_visibility;
+ self.check(item.id, ty::Visibility::Invisible).ty().min_visibility;
self.check(item.id, ty_vis).generics().predicates();
for impl_item_ref in impl_item_refs {
let impl_item_vis =
ty::Visibility::from_hir(&impl_item.vis, item.id, tcx);
self.check(impl_item.id, min(impl_item_vis, ty_vis))
- .generics().predicates().item_type();
+ .generics().predicates().ty();
// Recurse for e.g. `impl Trait` (see `visit_ty`).
self.inner_visibility = impl_item_vis;
// Subitems of trait impls have inherited publicity
hir::ItemImpl(.., Some(_), _, ref impl_item_refs) => {
let vis = self.check(item.id, ty::Visibility::Invisible)
- .item_type().impl_trait_ref().min_visibility;
+ .ty().impl_trait_ref().min_visibility;
self.check(item.id, vis).generics().predicates();
for impl_item_ref in impl_item_refs {
let impl_item = self.tcx.hir.impl_item(impl_item_ref.id);
- self.check(impl_item.id, vis).generics().predicates().item_type();
+ self.check(impl_item.id, vis).generics().predicates().ty();
// Recurse for e.g. `impl Trait` (see `visit_ty`).
self.inner_visibility = vis;
// e.g. `impl Iterator<Item=T>` has two predicates,
// `X: Iterator` and `<X as Iterator>::Item == T`,
// where `X` is the `impl Iterator<Item=T>` itself,
- // stored in `item_predicates`, not in the `Ty` itself.
+ // stored in `predicates_of`, not in the `Ty` itself.
self.check(ty.id, self.inner_visibility).predicates();
}
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Rc<AccessLevels> {
tcx.dep_graph.with_ignore(|| { // FIXME
- ty::queries::privacy_access_levels::get(tcx, DUMMY_SP, LOCAL_CRATE)
+ tcx.privacy_access_levels(LOCAL_CRATE)
})
}
let krate = tcx.hir.krate();
- // Use the parent map to check the privacy of everything
- let mut visitor = PrivacyVisitor {
- curitem: DefId::local(CRATE_DEF_INDEX),
- in_foreign: false,
+ // Check privacy of names not checked in previous compilation stages.
+ let mut visitor = NamePrivacyVisitor {
tcx: tcx,
tables: &ty::TypeckTables::empty(),
+ current_item: DefId::local(CRATE_DEF_INDEX),
};
intravisit::walk_crate(&mut visitor, krate);
- tcx.sess.abort_if_errors();
-
// Build up a set of all exported items in the AST. This is a set of all
// items which are reachable from external crates based on visibility.
let mut visitor = EmbargoVisitor {
where F: FnOnce(&mut DumpVisitor<'l, 'tcx, 'll, D>)
{
let item_def_id = self.tcx.hir.local_def_id(item_id);
- match self.tcx.maps.typeck_tables.borrow().get(&item_def_id) {
+ match self.tcx.maps.typeck_tables_of.borrow().get(&item_def_id) {
Some(tables) => {
let old_tables = self.save_ctxt.tables;
self.save_ctxt.tables = tables;
let sub_span = self.span_utils.sub_span_before_token(field.span, token::Colon);
filter!(self.span_utils, sub_span, field.span, None);
let def_id = self.tcx.hir.local_def_id(field.id);
- let typ = self.tcx.item_type(def_id).to_string();
+ let typ = self.tcx.type_of(def_id).to_string();
let span = field.span;
let text = self.span_utils.snippet(field.span);
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
- let llscratch = bcx.alloca(ty, "abi_cast");
+ let llscratch = bcx.alloca(ty, "abi_cast", None);
base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value...
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
// on memory dependencies rather than pointer equality
- let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
+ let is_freeze = ccx.shared().type_is_freeze(mt.ty);
- if mt.mutbl != hir::MutMutable && !interior_unsafe {
+ if mt.mutbl != hir::MutMutable && is_freeze {
arg.attrs.set(ArgAttribute::NoAlias);
}
- if mt.mutbl == hir::MutImmutable && !interior_unsafe {
+ if mt.mutbl == hir::MutImmutable && is_freeze {
arg.attrs.set(ArgAttribute::ReadOnly);
}
/// and fill in the actual contents in a second pass to prevent
/// unbounded recursion; see also the comments in `trans::type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
- generic_type_of(cx, t, None, false, false)
+ generic_type_of(cx, t, None)
}
pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>, name: &str) -> Type {
- generic_type_of(cx, t, Some(name), false, false)
+ generic_type_of(cx, t, Some(name))
}
pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
_ => unreachable!()
};
let fields = compute_fields(cx, t, nonnull_variant_index as usize, true);
- llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant, false, false),
+ llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant),
packed)
},
_ => bug!("This function cannot handle {} with layout {:#?}", t, l)
fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
- name: Option<&str>,
- sizing: bool,
- dst: bool) -> Type {
+ name: Option<&str>) -> Type {
let l = cx.layout_of(t);
- debug!("adt::generic_type_of t: {:?} name: {:?} sizing: {} dst: {}",
- t, name, sizing, dst);
+ debug!("adt::generic_type_of t: {:?} name: {:?}", t, name);
match *l {
layout::CEnum { discr, .. } => Type::from_integer(cx, discr),
layout::RawNullablePointer { nndiscr, .. } => {
let fields = compute_fields(cx, t, nndiscr as usize, false);
match name {
None => {
- Type::struct_(cx, &struct_llfields(cx, &fields, nonnull, sizing, dst),
+ Type::struct_(cx, &struct_llfields(cx, &fields, nonnull),
nonnull.packed)
}
Some(name) => {
- assert_eq!(sizing, false);
Type::named_struct(cx, name)
}
}
let fields = compute_fields(cx, t, 0, true);
match name {
None => {
- let fields = struct_llfields(cx, &fields, &variant, sizing, dst);
+ let fields = struct_llfields(cx, &fields, &variant);
Type::struct_(cx, &fields, variant.packed)
}
Some(name) => {
// Hypothesis: named_struct's can never need a
// drop flag. (... needs validation.)
- assert_eq!(sizing, false);
Type::named_struct(cx, name)
}
}
}
}
}
- layout::General { discr, size, align, .. } => {
+ layout::General { discr, size, align, primitive_align, .. } => {
// We need a representation that has:
// * The alignment of the most-aligned field
// * The size of the largest variant (rounded up to that alignment)
// of the size.
let size = size.bytes();
let align = align.abi();
+ let primitive_align = primitive_align.abi();
assert!(align <= std::u32::MAX as u64);
let discr_ty = Type::from_integer(cx, discr);
let discr_size = discr.size().bytes();
let padded_discr_size = roundup(discr_size, align as u32);
let variant_part_size = size-padded_discr_size;
- let variant_fill = union_fill(cx, variant_part_size, align);
+ let variant_fill = union_fill(cx, variant_part_size, primitive_align);
- assert_eq!(machine::llalign_of_min(cx, variant_fill), align as u32);
+ assert_eq!(machine::llalign_of_min(cx, variant_fill), primitive_align as u32);
assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly
let fields: Vec<Type> =
[discr_ty,
}
-fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>,
- variant: &layout::Struct,
- sizing: bool, _dst: bool) -> Vec<Type> {
- let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]);
- if sizing {
- bug!()
+// Double index to account for padding (FieldPath already uses `Struct::memory_index`)
+fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec<usize> {
+ discrfield.iter().map(|&i| (i as usize) << 1).collect::<Vec<_>>()
+}
+
+
+// Lookup `Struct::memory_index` and double it to account for padding
+pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize {
+ (variant.memory_index[index] as usize) << 1
+}
+
+
+pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty<'tcx>>,
+ variant: &layout::Struct) -> Vec<Type> {
+ debug!("struct_llfields: variant: {:?}", variant);
+ let mut first_field = true;
+ let mut min_offset = 0;
+ let mut result: Vec<Type> = Vec::with_capacity(field_tys.len() * 2);
+ let field_iter = variant.field_index_by_increasing_offset().map(|i| {
+ (i, field_tys[i as usize], variant.offsets[i as usize].bytes()) });
+ for (index, ty, target_offset) in field_iter {
+ if first_field {
+ debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}",
+ index, ty, min_offset, target_offset);
+ first_field = false;
+ } else {
+ assert!(target_offset >= min_offset);
+ let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset };
+ result.push(Type::array(&Type::i8(cx), padding_bytes));
+ debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}",
+ index, ty, padding_bytes, min_offset, target_offset);
+ }
+ let llty = type_of::in_memory_type_of(cx, ty);
+ result.push(llty);
+ let layout = cx.layout_of(ty);
+ let target_size = layout.size(&cx.tcx().data_layout).bytes();
+ min_offset = target_offset + target_size;
+ }
+ if variant.sized && !field_tys.is_empty() {
+ if variant.stride().bytes() < min_offset {
+ bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(),
+ min_offset);
+ }
+ let padding_bytes = variant.stride().bytes() - min_offset;
+ debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n",
+ padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes());
+ result.push(Type::array(&Type::i8(cx), padding_bytes));
+ assert!(result.len() == (field_tys.len() * 2));
} else {
- fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect()
+ debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n",
+ min_offset, variant.min_size.bytes(), variant.stride().bytes());
}
+
+ result
}
pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
scrutinee: ValueRef,
alignment: Alignment,
) -> ValueRef {
- let llptrptr = bcx.gepi(scrutinee,
- &discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>());
+ let path = struct_llfields_path(discrfield);
+ let llptrptr = bcx.gepi(scrutinee, &path);
let llptr = bcx.load(llptrptr, alignment.to_align());
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
- let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
+ let path = struct_llfields_path(discrfield);
let llptrptr = bcx.gepi(val, &path);
let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr, None);
cnum: CrateNum) {
let src = sess.cstore.used_crate_source(cnum);
let cratepath = &src.rlib.unwrap().0;
+
+ if sess.target.target.options.is_like_osx {
+ // On Apple platforms, the sanitizer is always built as a dylib, and
+ // LLVM will link to `@rpath/*.dylib`, so we need to specify an
+ // rpath to the library as well (the rpath should be absolute, see
+ // PR #41352 for details).
+ //
+ // FIXME: Remove this logic into librustc_*san once Cargo supports it
+ let rpath = cratepath.parent().unwrap();
+ let rpath = rpath.to_str().expect("non-utf8 component in path");
+ cmd.args(&["-Wl,-rpath".into(), "-Xlinker".into(), rpath.into()]);
+ }
+
let dst = tmpdir.join(cratepath.file_name().unwrap());
let cfg = archive_config(sess, &dst, Some(cratepath));
let mut archive = ArchiveBuilder::new(cfg);
scx.tcx().hir.local_def_id(node_id)
})
.map(|def_id| {
- let name = symbol_for_def_id(scx, def_id, symbol_map);
+ let name = symbol_for_def_id(scx.tcx(), def_id, symbol_map);
let export_level = export_level(scx, def_id);
debug!("EXPORTED SYMBOL (local): {} ({:?})", name, export_level);
(name, export_level)
.exported_symbols(cnum)
.iter()
.map(|&def_id| {
- let name = symbol_name(Instance::mono(scx.tcx(), def_id), scx);
+ let name = symbol_name(Instance::mono(scx.tcx(), def_id), scx.tcx());
let export_level = if special_runtime_crate {
// We can probably do better here by just ensuring that
// it has hidden visibility rather than public
}
}
-fn symbol_for_def_id<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
+fn symbol_for_def_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
symbol_map: &SymbolMap<'tcx>)
-> String {
// Just try to look things up in the symbol map. If nothing's there, we
// recompute.
- if let Some(node_id) = scx.tcx().hir.as_local_node_id(def_id) {
+ if let Some(node_id) = tcx.hir.as_local_node_id(def_id) {
if let Some(sym) = symbol_map.get(TransItem::Static(node_id)) {
return sym.to_owned();
}
}
- let instance = Instance::mono(scx.tcx(), def_id);
+ let instance = Instance::mono(tcx, def_id);
symbol_map.get(TransItem::Fn(instance))
.map(str::to_owned)
- .unwrap_or_else(|| symbol_name(instance, scx))
+ .unwrap_or_else(|| symbol_name(instance, tcx))
}
//! virtually impossible. Thus, symbol hash generation exclusively relies on
//! DefPaths which are much more robust in the face of changes to the code base.
-use common::SharedCrateContext;
use monomorphize::Instance;
use rustc::middle::weak_lang_items;
use rustc::hir::def_id::DefId;
use rustc::hir::map as hir_map;
-use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::fold::TypeVisitor;
use rustc::ty::item_path::{self, ItemPathBuffer, RootMode};
use rustc::ty::subst::Substs;
use rustc::util::common::record_time;
use syntax::attr;
-use syntax::symbol::{Symbol, InternedString};
-fn get_symbol_hash<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
+use std::fmt::Write;
+
+fn get_symbol_hash<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
// the DefId of the item this name is for
def_id: Option<DefId>,
-> String {
debug!("get_symbol_hash(def_id={:?}, parameters={:?})", def_id, substs);
- let tcx = scx.tcx();
-
let mut hasher = ty::util::TypeIdHasher::<u64>::new(tcx);
record_time(&tcx.sess.perf_stats.symbol_hash_time, || {
// in case the same instances is emitted in two crates of the same
// project.
if substs.types().next().is_some() {
- hasher.hash(scx.tcx().crate_name.as_str());
- hasher.hash(scx.sess().local_crate_disambiguator().as_str());
+ hasher.hash(tcx.crate_name.as_str());
+ hasher.hash(tcx.sess.local_crate_disambiguator().as_str());
}
}
});
}
pub fn symbol_name<'a, 'tcx>(instance: Instance<'tcx>,
- scx: &SharedCrateContext<'a, 'tcx>) -> String {
+ tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String {
let def_id = instance.def_id();
let substs = instance.substs;
debug!("symbol_name(def_id={:?}, substs={:?})",
def_id, substs);
- let node_id = scx.tcx().hir.as_local_node_id(def_id);
+ let node_id = tcx.hir.as_local_node_id(def_id);
if let Some(id) = node_id {
- if scx.sess().plugin_registrar_fn.get() == Some(id) {
+ if tcx.sess.plugin_registrar_fn.get() == Some(id) {
let idx = def_id.index;
- let disambiguator = scx.sess().local_crate_disambiguator();
- return scx.sess().generate_plugin_registrar_symbol(disambiguator, idx);
+ let disambiguator = tcx.sess.local_crate_disambiguator();
+ return tcx.sess.generate_plugin_registrar_symbol(disambiguator, idx);
}
- if scx.sess().derive_registrar_fn.get() == Some(id) {
+ if tcx.sess.derive_registrar_fn.get() == Some(id) {
let idx = def_id.index;
- let disambiguator = scx.sess().local_crate_disambiguator();
- return scx.sess().generate_derive_registrar_symbol(disambiguator, idx);
+ let disambiguator = tcx.sess.local_crate_disambiguator();
+ return tcx.sess.generate_derive_registrar_symbol(disambiguator, idx);
}
}
// FIXME(eddyb) Precompute a custom symbol name based on attributes.
- let attrs = scx.tcx().get_attrs(def_id);
+ let attrs = tcx.get_attrs(def_id);
let is_foreign = if let Some(id) = node_id {
- match scx.tcx().hir.get(id) {
+ match tcx.hir.get(id) {
hir_map::NodeForeignItem(_) => true,
_ => false
}
} else {
- scx.sess().cstore.is_foreign_item(def_id)
+ tcx.sess.cstore.is_foreign_item(def_id)
};
if let Some(name) = weak_lang_items::link_name(&attrs) {
return name.to_string();
}
// Don't mangle foreign items.
- return scx.tcx().item_name(def_id).as_str().to_string();
+ return tcx.item_name(def_id).as_str().to_string();
}
- if let Some(name) = attr::find_export_name_attr(scx.sess().diagnostic(), &attrs) {
+ if let Some(name) = attr::find_export_name_attr(tcx.sess.diagnostic(), &attrs) {
// Use provided name
return name.to_string();
}
if attr::contains_name(&attrs, "no_mangle") {
// Don't mangle
- return scx.tcx().item_name(def_id).as_str().to_string();
+ return tcx.item_name(def_id).as_str().to_string();
}
// We want to compute the "type" of this item. Unfortunately, some
let mut ty_def_id = def_id;
let instance_ty;
loop {
- let key = scx.tcx().def_key(ty_def_id);
+ let key = tcx.def_key(ty_def_id);
match key.disambiguated_data.data {
DefPathData::TypeNs(_) |
DefPathData::ValueNs(_) => {
- instance_ty = scx.tcx().item_type(ty_def_id);
+ instance_ty = tcx.type_of(ty_def_id);
break;
}
_ => {
// Erase regions because they may not be deterministic when hashed
// and should not matter anyhow.
- let instance_ty = scx.tcx().erase_regions(&instance_ty);
-
- let hash = get_symbol_hash(scx, Some(def_id), instance_ty, Some(substs));
+ let instance_ty = tcx.erase_regions(&instance_ty);
- let mut buffer = SymbolPathBuffer {
- names: Vec::new()
- };
+ let hash = get_symbol_hash(tcx, Some(def_id), instance_ty, Some(substs));
+ let mut buffer = SymbolPathBuffer::new();
item_path::with_forced_absolute_paths(|| {
- scx.tcx().push_item_path(&mut buffer, def_id);
+ tcx.push_item_path(&mut buffer, def_id);
});
-
- mangle(buffer.names.into_iter(), &hash)
+ buffer.finish(&hash)
}
+// Follow C++ namespace-mangling style, see
+// http://en.wikipedia.org/wiki/Name_mangling for more info.
+//
+// It turns out that on macOS you can actually have arbitrary symbols in
+// function names (at least when given to LLVM), but this is not possible
+// when using unix's linker. Perhaps one day when we just use a linker from LLVM
+// we won't need to do this name mangling. The problem with name mangling is
+// that it seriously limits the available characters. For example we can't
+// have things like &T in symbol names when one would theoretically
+// want them for things like impls of traits on that type.
+//
+// To be able to work on all platforms and get *some* reasonable output, we
+// use C++ name-mangling.
struct SymbolPathBuffer {
- names: Vec<InternedString>,
+ result: String,
+ temp_buf: String
+}
+
+impl SymbolPathBuffer {
+ fn new() -> Self {
+ let mut result = SymbolPathBuffer {
+ result: String::with_capacity(64),
+ temp_buf: String::with_capacity(16)
+ };
+ result.result.push_str("_ZN"); // _Z == Begin name-sequence, N == nested
+ result
+ }
+
+ fn finish(mut self, hash: &str) -> String {
+ // end name-sequence
+ self.push(hash);
+ self.result.push('E');
+ self.result
+ }
}
impl ItemPathBuffer for SymbolPathBuffer {
}
fn push(&mut self, text: &str) {
- self.names.push(Symbol::intern(text).as_str());
+ self.temp_buf.clear();
+ let need_underscore = sanitize(&mut self.temp_buf, text);
+ let _ = write!(self.result, "{}", self.temp_buf.len() + (need_underscore as usize));
+ if need_underscore {
+ self.result.push('_');
+ }
+ self.result.push_str(&self.temp_buf);
}
}
-pub fn exported_name_from_type_and_prefix<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
+pub fn exported_name_from_type_and_prefix<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
t: Ty<'tcx>,
prefix: &str)
-> String {
- let hash = get_symbol_hash(scx, None, t, None);
- let path = [Symbol::intern(prefix).as_str()];
- mangle(path.iter().cloned(), &hash)
+ let hash = get_symbol_hash(tcx, None, t, None);
+ let mut buffer = SymbolPathBuffer::new();
+ buffer.push(prefix);
+ buffer.finish(&hash)
}
// Name sanitation. LLVM will happily accept identifiers with weird names, but
// gas doesn't!
// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $
-pub fn sanitize(s: &str) -> String {
- let mut result = String::new();
+//
+// returns true if an underscore must be added at the start
+pub fn sanitize(result: &mut String, s: &str) -> bool {
for c in s.chars() {
match c {
// Escape these with $ sequences
}
// Underscore-qualify anything that didn't start as an ident.
- if !result.is_empty() &&
+ !result.is_empty() &&
result.as_bytes()[0] != '_' as u8 &&
- ! (result.as_bytes()[0] as char).is_xid_start() {
- return format!("_{}", result);
- }
-
- return result;
-}
-
-fn mangle<PI: Iterator<Item=InternedString>>(path: PI, hash: &str) -> String {
- // Follow C++ namespace-mangling style, see
- // http://en.wikipedia.org/wiki/Name_mangling for more info.
- //
- // It turns out that on macOS you can actually have arbitrary symbols in
- // function names (at least when given to LLVM), but this is not possible
- // when using unix's linker. Perhaps one day when we just use a linker from LLVM
- // we won't need to do this name mangling. The problem with name mangling is
- // that it seriously limits the available characters. For example we can't
- // have things like &T in symbol names when one would theoretically
- // want them for things like impls of traits on that type.
- //
- // To be able to work on all platforms and get *some* reasonable output, we
- // use C++ name-mangling.
-
- let mut n = String::from("_ZN"); // _Z == Begin name-sequence, N == nested
-
- fn push(n: &mut String, s: &str) {
- let sani = sanitize(s);
- n.push_str(&format!("{}{}", sani.len(), sani));
- }
-
- // First, connect each component with <len, name> pairs.
- for data in path {
- push(&mut n, &data);
- }
-
- push(&mut n, hash);
-
- n.push('E'); // End name-sequence.
- n
+ ! (result.as_bytes()[0] as char).is_xid_start()
}
use mir;
use monomorphize::{self, Instance};
use partitioning::{self, PartitioningStrategy, CodegenUnit};
+use symbol_cache::SymbolCache;
use symbol_map::SymbolMap;
use symbol_names_test;
use trans_item::{TransItem, DefPathBasedNames};
use libc::c_uint;
use std::ffi::{CStr, CString};
-use std::rc::Rc;
use std::str;
use std::i32;
use syntax_pos::Span;
/// in any other compilation unit. Give these symbols internal linkage.
fn internalize_symbols<'a, 'tcx>(sess: &Session,
scx: &SharedCrateContext<'a, 'tcx>,
+ translation_items: &FxHashSet<TransItem<'tcx>>,
llvm_modules: &[ModuleLlvm],
symbol_map: &SymbolMap<'tcx>,
exported_symbols: &ExportedSymbols) {
let mut locally_defined_symbols = FxHashSet();
let mut linkage_fixed_explicitly = FxHashSet();
- for trans_item in scx.translation_items().borrow().iter() {
+ for trans_item in translation_items {
let symbol_name = symbol_map.get_or_compute(scx, *trans_item);
if trans_item.explicit_linkage(tcx).is_some() {
linkage_fixed_explicitly.insert(symbol_name.clone());
///
/// This list is later used by linkers to determine the set of symbols needed to
/// be exposed from a dynamic library and it's also encoded into the metadata.
-pub fn find_exported_symbols(tcx: TyCtxt, reachable: NodeSet) -> NodeSet {
- reachable.into_iter().filter(|&id| {
+pub fn find_exported_symbols(tcx: TyCtxt, reachable: &NodeSet) -> NodeSet {
+ reachable.iter().cloned().filter(|&id| {
// Next, we want to ignore some FFI functions that are not exposed from
// this crate. Reachable FFI functions can be lumped into two
// categories:
hir_map::NodeImplItem(&hir::ImplItem {
node: hir::ImplItemKind::Method(..), .. }) => {
let def_id = tcx.hir.local_def_id(id);
- let generics = tcx.item_generics(def_id);
+ let generics = tcx.generics_of(def_id);
let attributes = tcx.get_attrs(def_id);
(generics.parent_types == 0 && generics.types.is_empty()) &&
// Functions marked with #[inline] are only ever translated
let krate = tcx.hir.krate();
let ty::CrateAnalysis { reachable, .. } = analysis;
- let exported_symbols = find_exported_symbols(tcx, reachable);
+ let exported_symbols = find_exported_symbols(tcx, &reachable);
let check_overflow = tcx.sess.overflow_checks();
// Run the translation item collector and partition the collected items into
// codegen units.
- let (codegen_units, symbol_map) = collect_and_partition_translation_items(&shared_ccx);
-
- let symbol_map = Rc::new(symbol_map);
+ let (translation_items, codegen_units, symbol_map) =
+ collect_and_partition_translation_items(&shared_ccx);
let mut all_stats = Stats::default();
let modules: Vec<ModuleTranslation> = codegen_units
let (stats, module) =
tcx.dep_graph.with_task(dep_node,
AssertDepGraphSafe(&shared_ccx),
- AssertDepGraphSafe((cgu, symbol_map.clone())),
+ AssertDepGraphSafe(cgu),
module_translation);
all_stats.extend(stats);
module
fn module_translation<'a, 'tcx>(
scx: AssertDepGraphSafe<&SharedCrateContext<'a, 'tcx>>,
- args: AssertDepGraphSafe<(CodegenUnit<'tcx>, Rc<SymbolMap<'tcx>>)>)
+ args: AssertDepGraphSafe<CodegenUnit<'tcx>>)
-> (Stats, ModuleTranslation)
{
// FIXME(#40304): We ought to be using the id as a key and some queries, I think.
let AssertDepGraphSafe(scx) = scx;
- let AssertDepGraphSafe((cgu, symbol_map)) = args;
+ let AssertDepGraphSafe(cgu) = args;
let cgu_name = String::from(cgu.name());
let cgu_id = cgu.work_product_id();
- let symbol_name_hash = cgu.compute_symbol_name_hash(scx, &symbol_map);
+ let symbol_cache = SymbolCache::new(scx.tcx());
+ let symbol_name_hash = cgu.compute_symbol_name_hash(scx, &symbol_cache);
// Check whether there is a previous work-product we can
// re-use. Not only must the file exist, and the inputs not
}
// Instantiate translation items without filling out definitions yet...
- let lcx = LocalCrateContext::new(scx, cgu, symbol_map.clone());
+ let lcx = LocalCrateContext::new(scx, cgu, &symbol_cache);
let module = {
let ccx = CrateContext::new(scx, &lcx);
let trans_items = ccx.codegen_unit()
- .items_in_deterministic_order(ccx.tcx(), &symbol_map);
+ .items_in_deterministic_order(ccx.tcx(), &symbol_cache);
for &(trans_item, linkage) in &trans_items {
trans_item.predefine(&ccx, linkage);
}
assert_module_sources::assert_module_sources(tcx, &modules);
- symbol_names_test::report_symbol_names(&shared_ccx);
+ symbol_names_test::report_symbol_names(tcx);
if shared_ccx.sess().trans_stats() {
println!("--- trans stats ---");
time(shared_ccx.sess().time_passes(), "internalize symbols", || {
internalize_symbols(sess,
&shared_ccx,
+ &translation_items,
&llvm_modules,
&symbol_map,
&exported_symbols);
}
fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>)
- -> (Vec<CodegenUnit<'tcx>>, SymbolMap<'tcx>) {
+ -> (FxHashSet<TransItem<'tcx>>,
+ Vec<CodegenUnit<'tcx>>,
+ SymbolMap<'tcx>) {
let time_passes = scx.sess().time_passes();
let collection_mode = match scx.sess().opts.debugging_opts.print_trans_items {
assert!(scx.tcx().sess.opts.cg.codegen_units == codegen_units.len() ||
scx.tcx().sess.opts.debugging_opts.incremental.is_some());
- {
- let mut ccx_map = scx.translation_items().borrow_mut();
-
- for trans_item in items.iter().cloned() {
- ccx_map.insert(trans_item);
- }
- }
+ let translation_items: FxHashSet<TransItem<'tcx>> = items.iter().cloned().collect();
if scx.sess().opts.debugging_opts.print_trans_items.is_some() {
let mut item_to_cgus = FxHashMap();
}
}
- (codegen_units, symbol_map)
+ (translation_items, codegen_units, symbol_map)
}
}
}
- pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
+ pub fn alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
let builder = Builder::with_ccx(self.ccx);
builder.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
- builder.dynamic_alloca(ty, name)
+ builder.dynamic_alloca(ty, name, align)
}
- pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
+ pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
self.count_insn("alloca");
unsafe {
- if name.is_empty() {
+ let alloca = if name.is_empty() {
llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), noname())
} else {
let name = CString::new(name).unwrap();
llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(),
name.as_ptr())
+ };
+ if let Some(align) = align {
+ llvm::LLVMSetAlignment(alloca, align as c_uint);
}
+ alloca
}
}
//! and methods are represented as just a fn ptr and not a full
//! closure.
-use llvm::{self, ValueRef};
-use rustc::hir::def_id::DefId;
-use rustc::ty::subst::Substs;
use attributes;
use common::{self, CrateContext};
-use monomorphize;
use consts;
use declare;
-use monomorphize::Instance;
+use llvm::{self, ValueRef};
+use monomorphize::{self, Instance};
+use rustc::hir::def_id::DefId;
+use rustc::ty::TypeFoldable;
+use rustc::ty::subst::Substs;
use trans_item::TransItem;
use type_of;
-use rustc::ty::TypeFoldable;
/// Translates a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
return llfn;
}
- let sym = ccx.symbol_map().get_or_compute(ccx.shared(),
- TransItem::Fn(instance));
+ let sym = ccx.symbol_cache().get(TransItem::Fn(instance));
debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym);
// This is subtle and surprising, but sometimes we have to bitcast
let attrs = instance.def.attrs(ccx.tcx());
attributes::from_fn_attrs(ccx, &attrs, llfn);
- let is_local_def = ccx.shared().translation_items().borrow()
- .contains(&TransItem::Fn(instance));
- if is_local_def {
- // FIXME(eddyb) Doubt all extern fn should allow unwinding.
+ // Perhaps questionable, but we assume that anything defined
+ // *in Rust code* may unwind. Foreign items like `extern "C" {
+ // fn foo(); }` are assumed not to unwind **unless** they have
+ // a `#[unwind]` attribute.
+ if !tcx.is_foreign_item(instance.def_id()) {
attributes::unwind(llfn, true);
unsafe {
llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
}
}
+
if ccx.use_dll_storage_attrs() &&
ccx.sess().cstore.is_dllimport_foreign_item(instance.def_id())
{
// have to instantiate all methods of the trait being cast to, so we
// can build the appropriate vtable.
mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => {
- let target_ty = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &target_ty);
+ let target_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &target_ty);
let source_ty = operand.ty(self.mir, self.scx.tcx());
- let source_ty = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &source_ty);
+ let source_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &source_ty);
let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.scx,
source_ty,
target_ty);
}
mir::Rvalue::Cast(mir::CastKind::ReifyFnPointer, ref operand, _) => {
let fn_ty = operand.ty(self.mir, self.scx.tcx());
- let fn_ty = monomorphize::apply_param_substs(
- self.scx,
- self.param_substs,
- &fn_ty);
+ let fn_ty = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &fn_ty);
visit_fn_use(self.scx, fn_ty, false, &mut self.output);
}
mir::Rvalue::Cast(mir::CastKind::ClosureFnPointer, ref operand, _) => {
}
if let mir::Literal::Item { def_id, substs } = constant.literal {
- let substs = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &substs);
+ let substs = self.scx.tcx().trans_apply_param_substs(self.param_substs,
+ &substs);
let instance = monomorphize::resolve(self.scx, def_id, substs);
collect_neighbours(self.scx, instance, self.output);
}
match *kind {
mir::TerminatorKind::Call { ref func, .. } => {
let callee_ty = func.ty(self.mir, tcx);
- let callee_ty = monomorphize::apply_param_substs(
- self.scx, self.param_substs, &callee_ty);
+ let callee_ty = tcx.trans_apply_param_substs(self.param_substs, &callee_ty);
visit_fn_use(self.scx, callee_ty, true, &mut self.output);
}
mir::TerminatorKind::Drop { ref location, .. } |
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
let ty = location.ty(self.mir, self.scx.tcx())
.to_ty(self.scx.tcx());
- let ty = monomorphize::apply_param_substs(self.scx,
- self.param_substs,
- &ty);
+ let ty = tcx.trans_apply_param_substs(self.param_substs, &ty);
visit_drop_use(self.scx, ty, true, self.output);
}
mir::TerminatorKind::Goto { .. } |
continue;
}
- if !tcx.item_generics(method.def_id).types.is_empty() {
+ if !tcx.generics_of(method.def_id).types.is_empty() {
continue;
}
let instance =
monomorphize::resolve(scx, method.def_id, callee_substs);
- let predicates = tcx.item_predicates(instance.def_id()).predicates
+ let predicates = tcx.predicates_of(instance.def_id()).predicates
.subst(tcx, instance.substs);
if !traits::normalize_and_test_predicates(tcx, predicates) {
continue;
substs: &'tcx Substs<'tcx>)
-> Ty<'tcx>
{
- let ty = shared.tcx().item_type(def_id);
- monomorphize::apply_param_substs(shared, substs, &ty)
+ let ty = shared.tcx().type_of(def_id);
+ shared.tcx().trans_apply_param_substs(substs, &ty)
}
/// Return the substituted type of an instance.
-> Ty<'tcx>
{
let ty = instance.def.def_ty(shared.tcx());
- monomorphize::apply_param_substs(shared, instance.substs, &ty)
+ shared.tcx().trans_apply_param_substs(instance.substs, &ty)
}
hir_map::NodeItem(&hir::Item {
ref attrs, span, node: hir::ItemStatic(..), ..
}) => {
- let sym = ccx.symbol_map()
- .get(TransItem::Static(id))
- .expect("Local statics should always be in the SymbolMap");
+ let sym = ccx.symbol_cache()
+ .get(TransItem::Static(id));
let defined_in_current_codegen_unit = ccx.codegen_unit()
.items()
.contains_key(&TransItem::Static(id));
assert!(!defined_in_current_codegen_unit);
- if declare::get_declared_value(ccx, sym).is_some() {
+ if declare::get_declared_value(ccx, &sym[..]).is_some() {
span_bug!(span, "trans: Conflicting symbol names for static?");
}
- let g = declare::define_global(ccx, sym, llty).unwrap();
+ let g = declare::define_global(ccx, &sym[..], llty).unwrap();
(g, attrs)
}
hir_map::NodeForeignItem(&hir::ForeignItem {
ref attrs, span, node: hir::ForeignItemStatic(..), ..
}) => {
- let sym = symbol_names::symbol_name(instance, ccx.shared());
+ let sym = symbol_names::symbol_name(instance, ccx.tcx());
let g = if let Some(name) =
attr::first_attr_value_str_by_name(&attrs, "linkage") {
// If this is a static with a linkage specified, then we need to handle
g
} else {
- let sym = symbol_names::symbol_name(instance, ccx.shared());
+ let sym = symbol_names::symbol_name(instance, ccx.tcx());
// FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
// FIXME(nagisa): investigate whether it can be changed into define_global
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if m != hir::MutMutable {
- let tcontents = ty.type_contents(ccx.tcx());
- if !tcontents.interior_unsafe() {
+ if ccx.shared().type_is_freeze(ty) {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
}
use llvm;
use llvm::{ContextRef, ModuleRef, ValueRef};
-use rustc::dep_graph::{DepGraph, DepGraphSafe, DepNode, DepTrackingMap, DepTrackingMapConfig};
+use rustc::dep_graph::{DepGraph, DepGraphSafe};
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::traits;
use monomorphize::Instance;
use partitioning::CodegenUnit;
-use trans_item::TransItem;
use type_::Type;
use rustc_data_structures::base_n;
use rustc::ty::subst::Substs;
use session::config::NoDebugInfo;
use session::Session;
use session::config;
-use symbol_map::SymbolMap;
-use util::nodemap::{NodeSet, DefIdMap, FxHashMap, FxHashSet};
+use symbol_cache::SymbolCache;
+use util::nodemap::{NodeSet, DefIdMap, FxHashMap};
use std::ffi::{CStr, CString};
use std::cell::{Cell, RefCell};
-use std::marker::PhantomData;
use std::ptr;
use std::iter;
-use std::rc::Rc;
use std::str;
use syntax::ast;
use syntax::symbol::InternedString;
check_overflow: bool,
use_dll_storage_attrs: bool,
-
- translation_items: RefCell<FxHashSet<TransItem<'tcx>>>,
- trait_cache: RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>>,
- project_cache: RefCell<DepTrackingMap<ProjectionCache<'tcx>>>,
}
/// The local portion of a `CrateContext`. There is one `LocalCrateContext`
/// per compilation unit. Each one has its own LLVM `ContextRef` so that
/// several compilation units may be optimized in parallel. All other LLVM
/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
-pub struct LocalCrateContext<'tcx> {
+pub struct LocalCrateContext<'a, 'tcx: 'a> {
llmod: ModuleRef,
llcx: ContextRef,
stats: Stats,
/// Depth of the current type-of computation - used to bail out
type_of_depth: Cell<usize>,
- symbol_map: Rc<SymbolMap<'tcx>>,
-
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
-}
-
-// Implement DepTrackingMapConfig for `trait_cache`
-pub struct TraitSelectionCache<'tcx> {
- data: PhantomData<&'tcx ()>
-}
-
-impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> {
- type Key = ty::PolyTraitRef<'tcx>;
- type Value = traits::Vtable<'tcx, ()>;
- fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode<DefId> {
- key.to_poly_trait_predicate().dep_node()
- }
-}
-
-// # Global Cache
-pub struct ProjectionCache<'gcx> {
- data: PhantomData<&'gcx ()>
-}
-
-impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> {
- type Key = Ty<'gcx>;
- type Value = Ty<'gcx>;
- fn to_dep_node(key: &Self::Key) -> DepNode<DefId> {
- // Ideally, we'd just put `key` into the dep-node, but we
- // can't put full types in there. So just collect up all the
- // def-ids of structs/enums as well as any traits that we
- // project out of. It doesn't matter so much what we do here,
- // except that if we are too coarse, we'll create overly
- // coarse edges between impls and the trans. For example, if
- // we just used the def-id of things we are projecting out of,
- // then the key for `<Foo as SomeTrait>::T` and `<Bar as
- // SomeTrait>::T` would both share a dep-node
- // (`TraitSelect(SomeTrait)`), and hence the impls for both
- // `Foo` and `Bar` would be considered inputs. So a change to
- // `Bar` would affect things that just normalized `Foo`.
- // Anyway, this heuristic is not ideal, but better than
- // nothing.
- let def_ids: Vec<DefId> =
- key.walk()
- .filter_map(|t| match t.sty {
- ty::TyAdt(adt_def, _) => Some(adt_def.did),
- ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id),
- _ => None,
- })
- .collect();
-
- DepNode::ProjectionCache { def_ids: def_ids }
- }
+ symbol_cache: &'a SymbolCache<'a, 'tcx>,
}
/// A CrateContext value binds together one LocalCrateContext with the
/// pass around (SharedCrateContext, LocalCrateContext) tuples all over trans.
pub struct CrateContext<'a, 'tcx: 'a> {
shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccx: &'a LocalCrateContext<'tcx>,
+ local_ccx: &'a LocalCrateContext<'a, 'tcx>,
}
impl<'a, 'tcx> CrateContext<'a, 'tcx> {
pub fn new(shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccx: &'a LocalCrateContext<'tcx>)
+ local_ccx: &'a LocalCrateContext<'a, 'tcx>)
-> Self {
CrateContext { shared, local_ccx }
}
tcx: tcx,
check_overflow: check_overflow,
use_dll_storage_attrs: use_dll_storage_attrs,
- translation_items: RefCell::new(FxHashSet()),
- trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
- project_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
}
}
pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
- self.tcx.type_needs_drop_given_env(ty, &self.empty_param_env)
+ ty.needs_drop(self.tcx, &self.empty_param_env)
}
pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
ty.is_sized(self.tcx, &self.empty_param_env, DUMMY_SP)
}
- pub fn exported_symbols<'a>(&'a self) -> &'a NodeSet {
- &self.exported_symbols
- }
-
- pub fn trait_cache(&self) -> &RefCell<DepTrackingMap<TraitSelectionCache<'tcx>>> {
- &self.trait_cache
+ pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_freeze(self.tcx, &self.empty_param_env, DUMMY_SP)
}
- pub fn project_cache(&self) -> &RefCell<DepTrackingMap<ProjectionCache<'tcx>>> {
- &self.project_cache
+ pub fn exported_symbols<'a>(&'a self) -> &'a NodeSet {
+ &self.exported_symbols
}
pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> {
pub fn use_dll_storage_attrs(&self) -> bool {
self.use_dll_storage_attrs
}
-
- pub fn translation_items(&self) -> &RefCell<FxHashSet<TransItem<'tcx>>> {
- &self.translation_items
- }
}
-impl<'tcx> LocalCrateContext<'tcx> {
- pub fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>,
- codegen_unit: CodegenUnit<'tcx>,
- symbol_map: Rc<SymbolMap<'tcx>>)
- -> LocalCrateContext<'tcx> {
+impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> {
+ pub fn new(shared: &SharedCrateContext<'a, 'tcx>,
+ codegen_unit: CodegenUnit<'tcx>,
+ symbol_cache: &'a SymbolCache<'a, 'tcx>)
+ -> LocalCrateContext<'a, 'tcx> {
unsafe {
// Append ".rs" to LLVM module identifier.
//
rust_try_fn: Cell::new(None),
intrinsics: RefCell::new(FxHashMap()),
type_of_depth: Cell::new(0),
- symbol_map: symbol_map,
local_gen_sym_counter: Cell::new(0),
+ symbol_cache: symbol_cache,
};
let (int_type, opaque_vec_type, str_slice_ty, mut local_ccx) = {
/// This is used in the `LocalCrateContext` constructor to allow calling
/// functions that expect a complete `CrateContext`, even before the local
/// portion is fully initialized and attached to the `SharedCrateContext`.
- fn dummy_ccx<'a>(shared: &'a SharedCrateContext<'a, 'tcx>,
- local_ccxs: &'a [LocalCrateContext<'tcx>])
- -> CrateContext<'a, 'tcx> {
+ fn dummy_ccx(shared: &'a SharedCrateContext<'a, 'tcx>,
+ local_ccxs: &'a [LocalCrateContext<'a, 'tcx>])
+ -> CrateContext<'a, 'tcx> {
assert!(local_ccxs.len() == 1);
CrateContext {
shared: shared,
self.shared
}
- fn local(&self) -> &'b LocalCrateContext<'tcx> {
+ fn local(&self) -> &'b LocalCrateContext<'b, 'tcx> {
self.local_ccx
}
self.shared.use_dll_storage_attrs()
}
- pub fn symbol_map(&self) -> &SymbolMap<'tcx> {
- &*self.local().symbol_map
- }
-
- pub fn translation_items(&self) -> &RefCell<FxHashSet<TransItem<'tcx>>> {
- &self.shared.translation_items
+ pub fn symbol_cache(&self) -> &'b SymbolCache<'b, 'tcx> {
+ self.local().symbol_cache
}
/// Given the def-id of some item that has no type parameters, make
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
+ if let Some(&layout) = self.tcx().layout_cache.borrow().get(&ty) {
+ return TyLayout { ty: ty, layout: layout, variant_index: None };
+ }
+
self.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| {
infcx.layout_of(ty).unwrap_or_else(|e| {
match e {
})
})
}
+
+ fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.tcx().normalize_associated_type(&ty)
+ }
}
impl<'a, 'tcx> LayoutTyper<'tcx> for &'a CrateContext<'a, 'tcx> {
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.shared.layout_of(ty)
}
+
+ fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.shared.normalize_projections(ty)
+ }
}
-pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>);
+pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'a, 'tcx>);
impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> {
fn drop(&mut self) {
// Get_template_parameters() will append a `<...>` clause to the function
// name if necessary.
- let generics = cx.tcx().item_generics(fn_def_id);
+ let generics = cx.tcx().generics_of(fn_def_id);
let substs = instance.substs.truncate_to(cx.tcx(), generics);
let template_parameters = get_template_parameters(cx,
&generics,
fn get_type_parameter_names(cx: &CrateContext, generics: &ty::Generics) -> Vec<ast::Name> {
let mut names = generics.parent.map_or(vec![], |def_id| {
- get_type_parameter_names(cx, cx.tcx().item_generics(def_id))
+ get_type_parameter_names(cx, cx.tcx().generics_of(def_id))
});
names.extend(generics.types.iter().map(|param| param.name));
names
//
// More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to();
- let slot = bcx.alloca(i64p, "slot");
+ let slot = bcx.alloca(i64p, "slot", None);
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
None);
mod mir;
mod monomorphize;
mod partitioning;
+mod symbol_cache;
mod symbol_map;
mod symbol_names_test;
mod trans_item;
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir;
use abi::{Abi, FnType, ArgType};
+use adt;
use base::{self, Lifetime};
use callee;
use builder::Builder;
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
- let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret");
+ let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret", None);
self.store_operand(&bcx, llscratch, None, op);
llscratch
}
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
- let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
+ let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
self.store_operand(bcx, llscratch, None, op);
(llscratch, Alignment::AbiAligned, true)
} else {
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
- let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
+ let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1));
(llscratch, Alignment::AbiAligned, true)
}
bug!("Not a tuple.");
};
for (n, &ty) in arg_types.iter().enumerate() {
- let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize);
+ let mut elem = bcx.extract_value(
+ llval, adt::struct_llfields_index(v, n));
// Truncate bools to i1, if needed
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
elem = bcx.trunc(elem, Type::i1(bcx.ccx));
slot
} else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
- let slot = bcx.alloca(llretty, "personalityslot");
+ let slot = bcx.alloca(llretty, "personalityslot", None);
self.llpersonalityslot = Some(slot);
slot
}
ConstVal::Integral(ref i) => return Const::from_constint(ccx, i),
ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"),
+ ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false),
+ ConstVal::Function(..) => C_null(type_of::type_of(ccx, ty)),
+ ConstVal::Variant(_) |
ConstVal::Struct(_) | ConstVal::Tuple(_) |
ConstVal::Array(..) | ConstVal::Repeat(..) => {
bug!("MIR must not use `{:?}` (aggregates are expanded to MIR rvalues)", cv)
}
- ConstVal::Function(..) => {
- let llty = type_of::type_of(ccx, ty);
- return Const::new(C_null(llty), ty);
- }
- ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false),
};
assert!(!ty.has_erasable_regions());
fn monomorphize<T>(&self, value: &T) -> T
where T: TransNormalize<'tcx>
{
- monomorphize::apply_param_substs(self.ccx.shared(),
- self.substs,
- value)
+ self.ccx.tcx().trans_apply_param_substs(self.substs, value)
}
fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
let tr_lvalue = self.const_lvalue(lvalue, span)?;
let ty = tr_lvalue.ty;
- let ref_ty = tcx.mk_ref(tcx.mk_region(ty::ReErased),
+ let ref_ty = tcx.mk_ref(tcx.types.re_erased,
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() });
let base = match tr_lvalue.base {
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, ty);
- let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
+ let tmp = bcx.alloca(
+ type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty));
assert!(!ty.has_param_types());
Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
}
let alignment = self.alignment | Alignment::from_packed(st.packed);
+ let llfields = adt::struct_llfields(ccx, fields, st);
let ptr_val = if needs_cast {
- let fields = st.field_index_by_increasing_offset().map(|i| {
- type_of::in_memory_type_of(ccx, fields[i])
- }).collect::<Vec<_>>();
- let real_ty = Type::struct_(ccx, &fields[..], st.packed);
+ let real_ty = Type::struct_(ccx, &llfields[..], st.packed);
bcx.pointercast(self.llval, real_ty.ptr_to())
} else {
self.llval
// * Field is sized - pointer is properly aligned already
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
bcx.ccx.shared().type_is_sized(fty) {
- return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
+ return (bcx.struct_gep(
+ ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
// If the type of the last field is [T] or str, then we don't need to do
// any adjusments
match fty.sty {
ty::TySlice(..) | ty::TyStr => {
- return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
+ return (bcx.struct_gep(
+ ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
_ => ()
}
if !self.has_extra() {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(ptr_val));
- return (bcx.struct_gep(ptr_val, ix), alignment);
+ return (bcx.struct_gep(ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
// We need to get the pointer manually now.
use builder::Builder;
use common::{self, CrateContext, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
-use monomorphize::{self, Instance};
+use monomorphize::Instance;
use abi::FnType;
use type_of;
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn monomorphize<T>(&self, value: &T) -> T
- where T: TransNormalize<'tcx> {
- monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value)
+ where T: TransNormalize<'tcx>
+ {
+ self.ccx.tcx().trans_apply_param_substs(self.param_substs, value)
}
pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) {
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
- let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr");
+ let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr", None);
bcx.store(llval, alloc, None);
alloc
} else {
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
+use adt;
use base;
use common::{self, CrateContext, C_null};
use builder::Builder;
if common::val_ty(elem) == Type::i1(bcx.ccx) {
elem = bcx.zext(elem, Type::i8(bcx.ccx));
}
+ let layout = bcx.ccx.layout_of(self.ty);
+ let i = if let Layout::Univariant { ref variant, .. } = *layout {
+ adt::struct_llfields_index(variant, i)
+ } else {
+ i
+ };
llpair = bcx.insert_value(llpair, elem, i);
}
self.val = OperandValue::Immediate(llpair);
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty);
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx, ty) {
- let f_align = match *bcx.ccx.layout_of(ty) {
- Layout::Univariant { ref variant, .. } =>
- Alignment::from_packed(variant.packed) | align,
- _ => align
+ let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(ty) {
+ Layout::Univariant { ref variant, .. } => {
+ (adt::struct_llfields_index(variant, 0),
+ adt::struct_llfields_index(variant, 1),
+ Alignment::from_packed(variant.packed) | align)
+ },
+ _ => (0, 1, align)
};
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
- let a_ptr = bcx.struct_gep(llval, 0);
- let b_ptr = bcx.struct_gep(llval, 1);
+ let a_ptr = bcx.struct_gep(llval, ix0);
+ let b_ptr = bcx.struct_gep(llval, ix1);
OperandValue::Pair(
base::load_ty(bcx, a_ptr, f_align, a_ty),
bcx.store(base::from_immediate(bcx, s), lldest, align);
}
OperandValue::Pair(a, b) => {
- let f_align = match *bcx.ccx.layout_of(operand.ty) {
- Layout::Univariant { ref variant, .. } if variant.packed => {
- Some(1)
+ let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(operand.ty) {
+ Layout::Univariant { ref variant, .. } => {
+ (adt::struct_llfields_index(variant, 0),
+ adt::struct_llfields_index(variant, 1),
+ if variant.packed { Some(1) } else { None })
}
- _ => align
+ _ => (0, 1, align)
};
let a = base::from_immediate(bcx, a);
let b = base::from_immediate(bcx, b);
- bcx.store(a, bcx.struct_gep(lldest, 0), f_align);
- bcx.store(b, bcx.struct_gep(lldest, 1), f_align);
+ bcx.store(a, bcx.struct_gep(lldest, ix0), f_align);
+ bcx.store(b, bcx.struct_gep(lldest, ix1), f_align);
}
}
}
_ => {
// If this is a tuple or closure, we need to translate GEP indices.
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
- let translation = if let Layout::Univariant { ref variant, .. } = *layout {
- Some(&variant.memory_index)
- } else {
- None
+ let get_memory_index = |i| {
+ if let Layout::Univariant { ref variant, .. } = *layout {
+ adt::struct_llfields_index(variant, i)
+ } else {
+ i
+ }
};
let alignment = dest.alignment;
for (i, operand) in operands.iter().enumerate() {
// Note: perhaps this should be StructGep, but
// note that in some cases the values here will
// not be structs but arrays.
- let i = if let Some(ref t) = translation {
- t[i] as usize
- } else {
- i
- };
+ let i = get_memory_index(i);
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, alignment.to_align(), op);
}
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
let ref_ty = bcx.tcx().mk_ref(
- bcx.tcx().mk_region(ty::ReErased),
+ bcx.tcx().types.re_erased,
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
);
use glue;
use rustc::hir::def_id::DefId;
-use rustc::infer::TransNormalize;
use rustc::middle::lang_items::DropInPlaceFnLangItem;
-use rustc::traits::{self, SelectionContext, Reveal};
+use rustc::traits;
use rustc::ty::adjustment::CustomCoerceUnsized;
-use rustc::ty::fold::{TypeFolder, TypeFoldable};
use rustc::ty::subst::{Kind, Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::util::common::MemoizationMap;
-use syntax::ast;
-use syntax::codemap::{Span, DUMMY_SP};
+use syntax::codemap::DUMMY_SP;
pub use rustc::ty::Instance;
}
}
-/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
-/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
-/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
-fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
- span: Span,
- trait_ref: ty::PolyTraitRef<'tcx>)
- -> traits::Vtable<'tcx, ()>
-{
- let tcx = scx.tcx();
-
- // Remove any references to regions; this helps improve caching.
- let trait_ref = tcx.erase_regions(&trait_ref);
-
- scx.trait_cache().memoize(trait_ref, || {
- debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
- trait_ref, trait_ref.def_id());
-
- // Do the initial selection for the obligation. This yields the
- // shallow result we are looking for -- that is, what specific impl.
- tcx.infer_ctxt((), Reveal::All).enter(|infcx| {
- let mut selcx = SelectionContext::new(&infcx);
-
- let obligation_cause = traits::ObligationCause::misc(span,
- ast::DUMMY_NODE_ID);
- let obligation = traits::Obligation::new(obligation_cause,
- trait_ref.to_poly_trait_predicate());
-
- let selection = match selcx.select(&obligation) {
- Ok(Some(selection)) => selection,
- Ok(None) => {
- // Ambiguity can happen when monomorphizing during trans
- // expands to some humongo type that never occurred
- // statically -- this humongo type can then overflow,
- // leading to an ambiguous result. So report this as an
- // overflow bug, since I believe this is the only case
- // where ambiguity can result.
- debug!("Encountered ambiguity selecting `{:?}` during trans, \
- presuming due to overflow",
- trait_ref);
- tcx.sess.span_fatal(span,
- "reached the recursion limit during monomorphization \
- (selection ambiguity)");
- }
- Err(e) => {
- span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
- e, trait_ref)
- }
- };
-
- debug!("fulfill_obligation: selection={:?}", selection);
-
- // Currently, we use a fulfillment context to completely resolve
- // all nested obligations. This is because they can inform the
- // inference of the impl's type parameters.
- let mut fulfill_cx = traits::FulfillmentContext::new();
- let vtable = selection.map(|predicate| {
- debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
- fulfill_cx.register_predicate_obligation(&infcx, predicate);
- });
- let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
-
- info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
- vtable
- })
- })
-}
-
fn resolve_associated_item<'a, 'tcx>(
scx: &SharedCrateContext<'a, 'tcx>,
trait_item: &ty::AssociatedItem,
def_id, trait_id, rcvr_substs);
let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
- let vtbl = fulfill_obligation(scx, DUMMY_SP, ty::Binder(trait_ref));
+ let vtbl = tcx.trans_fulfill_obligation(DUMMY_SP, ty::Binder(trait_ref));
// Now that we know which impl is being used, we can dispatch to
// the actual function:
substs: scx.tcx().mk_substs_trait(source_ty, &[target_ty])
});
- match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
+ match scx.tcx().trans_fulfill_obligation(DUMMY_SP, trait_ref) {
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
scx.tcx().coerce_unsized_info(impl_def_id).custom_kind.unwrap()
}
}
}
-/// Monomorphizes a type from the AST by first applying the in-scope
-/// substitutions and then normalizing any associated types.
-pub fn apply_param_substs<'a, 'tcx, T>(scx: &SharedCrateContext<'a, 'tcx>,
- param_substs: &Substs<'tcx>,
- value: &T)
- -> T
- where T: TransNormalize<'tcx>
-{
- let tcx = scx.tcx();
- debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value);
- let substituted = value.subst(tcx, param_substs);
- let substituted = scx.tcx().erase_regions(&substituted);
- AssociatedTypeNormalizer::new(scx).fold(&substituted)
-}
-
/// Returns the normalized type of a struct field
pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_substs: &Substs<'tcx>,
tcx.normalize_associated_type(&f.ty(tcx, param_substs))
}
-struct AssociatedTypeNormalizer<'a, 'b: 'a, 'gcx: 'b> {
- shared: &'a SharedCrateContext<'b, 'gcx>,
-}
-
-impl<'a, 'b, 'gcx> AssociatedTypeNormalizer<'a, 'b, 'gcx> {
- fn new(shared: &'a SharedCrateContext<'b, 'gcx>) -> Self {
- AssociatedTypeNormalizer {
- shared: shared,
- }
- }
-
- fn fold<T:TypeFoldable<'gcx>>(&mut self, value: &T) -> T {
- if !value.has_projection_types() {
- value.clone()
- } else {
- value.fold_with(self)
- }
- }
-}
-
-impl<'a, 'b, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'b, 'gcx> {
- fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> {
- self.shared.tcx()
- }
-
- fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> {
- if !ty.has_projection_types() {
- ty
- } else {
- self.shared.project_cache().memoize(ty, || {
- debug!("AssociatedTypeNormalizer: ty={:?}", ty);
- self.shared.tcx().normalize_associated_type(&ty)
- })
- }
- }
-}
use std::cmp::Ordering;
use std::hash::Hash;
use std::sync::Arc;
-use symbol_map::SymbolMap;
+use symbol_cache::SymbolCache;
use syntax::ast::NodeId;
use syntax::symbol::{Symbol, InternedString};
use trans_item::{TransItem, InstantiationMode};
DepNode::WorkProduct(self.work_product_id())
}
- pub fn compute_symbol_name_hash(&self,
- scx: &SharedCrateContext,
- symbol_map: &SymbolMap) -> u64 {
+ pub fn compute_symbol_name_hash<'a>(&self,
+ scx: &SharedCrateContext<'a, 'tcx>,
+ symbol_cache: &SymbolCache<'a, 'tcx>)
+ -> u64 {
let mut state = IchHasher::new();
let exported_symbols = scx.exported_symbols();
- let all_items = self.items_in_deterministic_order(scx.tcx(), symbol_map);
+ let all_items = self.items_in_deterministic_order(scx.tcx(), symbol_cache);
for (item, _) in all_items {
- let symbol_name = symbol_map.get(item).unwrap();
+ let symbol_name = symbol_cache.get(item);
symbol_name.len().hash(&mut state);
symbol_name.hash(&mut state);
let exported = match item {
state.finish().to_smaller_hash()
}
- pub fn items_in_deterministic_order(&self,
- tcx: TyCtxt,
- symbol_map: &SymbolMap)
- -> Vec<(TransItem<'tcx>, llvm::Linkage)> {
+ pub fn items_in_deterministic_order<'a>(&self,
+ tcx: TyCtxt,
+ symbol_cache: &SymbolCache<'a, 'tcx>)
+ -> Vec<(TransItem<'tcx>, llvm::Linkage)> {
let mut items: Vec<(TransItem<'tcx>, llvm::Linkage)> =
self.items.iter().map(|(item, linkage)| (*item, *linkage)).collect();
match (node_id1, node_id2) {
(None, None) => {
- let symbol_name1 = symbol_map.get(trans_item1).unwrap();
- let symbol_name2 = symbol_map.get(trans_item2).unwrap();
- symbol_name1.cmp(symbol_name2)
+ let symbol_name1 = symbol_cache.get(trans_item1);
+ let symbol_name2 = symbol_cache.get(trans_item2);
+ symbol_name1.cmp(&symbol_name2)
}
// In the following two cases we can avoid looking up the symbol
(None, Some(_)) => Ordering::Less,
return ordering;
}
- let symbol_name1 = symbol_map.get(trans_item1).unwrap();
- let symbol_name2 = symbol_map.get(trans_item2).unwrap();
- symbol_name1.cmp(symbol_name2)
+ let symbol_name1 = symbol_cache.get(trans_item1);
+ let symbol_name2 = symbol_cache.get(trans_item2);
+ symbol_name1.cmp(&symbol_name2)
}
}
});
let mut initial_partitioning = place_root_translation_items(scx,
trans_items);
- debug_dump(scx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
+ debug_dump(tcx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
// If the partitioning should produce a fixed count of codegen units, merge
// until that count is reached.
if let PartitioningStrategy::FixedUnitCount(count) = strategy {
merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name.as_str());
- debug_dump(scx, "POST MERGING:", initial_partitioning.codegen_units.iter());
+ debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
}
// In the next step, we use the inlining map to determine which addtional
let post_inlining = place_inlined_translation_items(initial_partitioning,
inlining_map);
- debug_dump(scx, "POST INLINING:", post_inlining.0.iter());
+ debug_dump(tcx, "POST INLINING:", post_inlining.0.iter());
// Finally, sort by codegen unit name, so that we get deterministic results
let mut result = post_inlining.0;
Symbol::intern(&format!("{}{}{}", crate_name, NUMBERED_CODEGEN_UNIT_MARKER, index)).as_str()
}
-fn debug_dump<'a, 'b, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
+fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
label: &str,
cgus: I)
where I: Iterator<Item=&'b CodegenUnit<'tcx>>,
{
if cfg!(debug_assertions) {
debug!("{}", label);
+ let symbol_cache = SymbolCache::new(tcx);
for cgu in cgus {
- let symbol_map = SymbolMap::build(scx, cgu.items
- .iter()
- .map(|(&trans_item, _)| trans_item));
debug!("CodegenUnit {}:", cgu.name);
for (trans_item, linkage) in &cgu.items {
- let symbol_name = symbol_map.get_or_compute(scx, *trans_item);
+ let symbol_name = symbol_cache.get(*trans_item);
let symbol_hash_start = symbol_name.rfind('h');
let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..])
.unwrap_or("<no hash>");
debug!(" - {} [{:?}] [{}]",
- trans_item.to_string(scx.tcx()),
+ trans_item.to_string(tcx),
linkage,
symbol_hash);
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::ty::TyCtxt;
+use std::cell::RefCell;
+use syntax_pos::symbol::{InternedString, Symbol};
+use trans_item::TransItem;
+use util::nodemap::FxHashMap;
+
+// In the SymbolCache we collect the symbol names of translation items
+// and cache them for later reference. This is just a performance
+// optimization and the cache is populated lazilly; symbol names of
+// translation items are deterministic and fully defined by the item.
+// Thus they can always be recomputed if needed.
+
+pub struct SymbolCache<'a, 'tcx: 'a> {
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ index: RefCell<FxHashMap<TransItem<'tcx>, Symbol>>,
+}
+
+impl<'a, 'tcx> SymbolCache<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
+ SymbolCache {
+ tcx: tcx,
+ index: RefCell::new(FxHashMap())
+ }
+ }
+
+ pub fn get(&self, trans_item: TransItem<'tcx>) -> InternedString {
+ let mut index = self.index.borrow_mut();
+ index.entry(trans_item)
+ .or_insert_with(|| Symbol::intern(&trans_item.compute_symbol_name(self.tcx)))
+ .as_str()
+ }
+}
where I: Iterator<Item=TransItem<'tcx>>
{
// Check for duplicate symbol names
+ let tcx = scx.tcx();
let mut symbols: Vec<_> = trans_items.map(|trans_item| {
- (trans_item, trans_item.compute_symbol_name(scx))
+ (trans_item, trans_item.compute_symbol_name(tcx))
}).collect();
(&mut symbols[..]).sort_by(|&(_, ref sym1), &(_, ref sym2)|{
if let Some(sym) = self.get(trans_item) {
Cow::from(sym)
} else {
- Cow::from(trans_item.compute_symbol_name(scx))
+ Cow::from(trans_item.compute_symbol_name(scx.tcx()))
}
}
}
use back::symbol_names;
use rustc::hir;
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
+use rustc::ty::TyCtxt;
use syntax::ast;
-use common::SharedCrateContext;
use monomorphize::Instance;
const SYMBOL_NAME: &'static str = "rustc_symbol_name";
const ITEM_PATH: &'static str = "rustc_item_path";
-pub fn report_symbol_names(scx: &SharedCrateContext) {
+pub fn report_symbol_names<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
// if the `rustc_attrs` feature is not enabled, then the
// attributes we are interested in cannot be present anyway, so
// skip the walk.
- let tcx = scx.tcx();
if !tcx.sess.features.borrow().rustc_attrs {
return;
}
let _ignore = tcx.dep_graph.in_ignore();
- let mut visitor = SymbolNamesTest { scx: scx };
+ let mut visitor = SymbolNamesTest { tcx: tcx };
// FIXME(#37712) could use ItemLikeVisitor if trait items were item-like
tcx.hir.krate().visit_all_item_likes(&mut visitor.as_deep_visitor());
}
struct SymbolNamesTest<'a, 'tcx:'a> {
- scx: &'a SharedCrateContext<'a, 'tcx>,
+ tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> {
fn process_attrs(&mut self,
node_id: ast::NodeId) {
- let tcx = self.scx.tcx();
+ let tcx = self.tcx;
let def_id = tcx.hir.local_def_id(node_id);
for attr in tcx.get_attrs(def_id).iter() {
if attr.check_name(SYMBOL_NAME) {
// for now, can only use on monomorphic names
let instance = Instance::mono(tcx, def_id);
- let name = symbol_names::symbol_name(instance, self.scx);
+ let name = symbol_names::symbol_name(instance, self.tcx);
tcx.sess.span_err(attr.span, &format!("symbol-name({})", name));
} else if attr.check_name(ITEM_PATH) {
let path = tcx.item_path_str(def_id);
use attributes;
use base;
use consts;
-use context::{CrateContext, SharedCrateContext};
+use context::CrateContext;
use common;
use declare;
use llvm;
self.to_raw_string(),
ccx.codegen_unit().name());
- let symbol_name = ccx.symbol_map()
- .get_or_compute(ccx.shared(), *self);
+ let symbol_name = ccx.symbol_cache().get(*self);
debug!("symbol {}", &symbol_name);
ccx.instances().borrow_mut().insert(instance, lldecl);
}
- pub fn compute_symbol_name(&self,
- scx: &SharedCrateContext<'a, 'tcx>) -> String {
+ pub fn compute_symbol_name(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String {
match *self {
- TransItem::Fn(instance) => symbol_names::symbol_name(instance, scx),
+ TransItem::Fn(instance) => symbol_names::symbol_name(instance, tcx),
TransItem::Static(node_id) => {
- let def_id = scx.tcx().hir.local_def_id(node_id);
- symbol_names::symbol_name(Instance::mono(scx.tcx(), def_id), scx)
+ let def_id = tcx.hir.local_def_id(node_id);
+ symbol_names::symbol_name(Instance::mono(tcx, def_id), tcx)
}
TransItem::GlobalAsm(node_id) => {
- let def_id = scx.tcx().hir.local_def_id(node_id);
+ let def_id = tcx.hir.local_def_id(node_id);
format!("global_asm_{:?}", def_id)
}
}
},
ty::TyClosure(def_id, ref closure_substs) => {
self.push_def_path(def_id, output);
- let generics = self.tcx.item_generics(self.tcx.closure_base_def_id(def_id));
+ let generics = self.tcx.generics_of(self.tcx.closure_base_def_id(def_id));
let substs = closure_substs.substs.truncate_to(self.tcx, generics);
self.push_type_params(substs, iter::empty(), output);
}
pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize {
self.layout_of(ty).size(self).bytes() as machine::llsize
}
+
+ pub fn over_align_of(&self, t: Ty<'tcx>)
+ -> Option<machine::llalign> {
+ let layout = self.layout_of(t);
+ if let Some(align) = layout.over_align(&self.tcx().data_layout) {
+ Some(align as machine::llalign)
+ } else {
+ None
+ }
+ }
}
fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String {
extern crate cmake;
use std::env;
-use build_helper::native_lib_boilerplate;
+use build_helper::sanitizer_lib_boilerplate;
use cmake::Config;
fn main() {
if let Some(llvm_config) = env::var_os("LLVM_CONFIG") {
- let native = match native_lib_boilerplate("compiler-rt", "tsan", "clang_rt.tsan-x86_64",
- "build/lib/linux") {
+ let native = match sanitizer_lib_boilerplate("tsan") {
Ok(native) => native,
_ => return,
};
let tcx = self.tcx();
let r = match tcx.named_region_map.defs.get(&lifetime.id) {
Some(&rl::Region::Static) => {
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
}
Some(&rl::Region::LateBound(debruijn, id)) => {
.emit();
return Substs::for_item(tcx, def_id, |_, _| {
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
}, |_, _| {
tcx.types.err
});
// If the type is parameterized by this region, then replace this
// region with the current anon region binding (in other words,
// whatever & would get replaced with).
- let decl_generics = tcx.item_generics(def_id);
+ let decl_generics = tcx.generics_of(def_id);
let expected_num_region_params = decl_generics.regions.len();
let supplied_num_region_params = lifetimes.len();
if expected_num_region_params != supplied_num_region_params {
let is_object = self_ty.map_or(false, |ty| ty.sty == TRAIT_OBJECT_DUMMY_SELF);
let default_needs_object_self = |p: &ty::TypeParameterDef| {
if is_object && p.has_default {
- if ty::queries::ty::get(tcx, span, p.def_id).has_self_ty() {
+ if tcx.at(span).type_of(p.def_id).has_self_ty() {
// There is no suitable inference default for a type parameter
// that references self, in an object type.
return true;
if let Some(lifetime) = lifetimes.get(i) {
self.ast_region_to_region(lifetime, Some(def))
} else {
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
}
}, |def, substs| {
let i = def.index as usize;
// This is a default type parameter.
self.normalize_ty(
span,
- ty::queries::ty::get(tcx, span, def.def_id)
+ tcx.at(span).type_of(def.def_id)
.subst_spanned(tcx, substs, Some(span))
)
}
debug!("create_substs_for_ast_trait_ref(trait_segment={:?})",
trait_segment);
- let trait_def = self.tcx().lookup_trait_def(trait_def_id);
+ let trait_def = self.tcx().trait_def(trait_def_id);
match trait_segment.parameters {
hir::AngleBracketedParameters(_) => {
let substs = self.ast_path_substs_for_ty(span, did, item_segment);
self.normalize_ty(
span,
- ty::queries::ty::get(self.tcx(), span, did).subst(self.tcx(), substs)
+ self.tcx().at(span).type_of(did).subst(self.tcx(), substs)
)
}
span_err!(tcx.sess, span, E0228,
"the lifetime bound for this object type cannot be deduced \
from context; please supply an explicit bound");
- tcx.mk_region(ty::ReStatic)
+ tcx.types.re_static
})
}
})
let ty = self.projected_ty_from_poly_trait_ref(span, bound, assoc_name);
let ty = self.normalize_ty(span, ty);
- let item = tcx.associated_items(trait_did).find(|i| i.name == assoc_name);
- let def_id = item.expect("missing associated type").def_id;
- tcx.check_stability(def_id, ref_id, span);
- (ty, Def::AssociatedTy(def_id))
+ let item = tcx.associated_items(trait_did).find(|i| i.name == assoc_name)
+ .expect("missing associated type");
+ let def = Def::AssociatedTy(item.def_id);
+ if !tcx.vis_is_accessible_from(item.vis, ref_id) {
+ let msg = format!("{} `{}` is private", def.kind_name(), assoc_name);
+ tcx.sess.span_err(span, &msg);
+ }
+ tcx.check_stability(item.def_id, ref_id, span);
+
+ (ty, def)
}
fn qpath_to_ty(&self,
let node_id = tcx.hir.as_local_node_id(did).unwrap();
let item_id = tcx.hir.get_parent_node(node_id);
let item_def_id = tcx.hir.local_def_id(item_id);
- let generics = tcx.item_generics(item_def_id);
+ let generics = tcx.generics_of(item_def_id);
let index = generics.type_param_to_index[&tcx.hir.local_def_id(node_id).index];
tcx.mk_param(index, tcx.hir.name(node_id))
}
assert_eq!(opt_self_ty, None);
self.prohibit_type_params(&path.segments);
- let ty = ty::queries::ty::get(tcx, span, def_id);
+ let ty = tcx.at(span).type_of(def_id);
if let Some(free_substs) = self.get_free_substs() {
ty.subst(tcx, free_substs)
} else {
// If any of the derived region bounds are 'static, that is always
// the best choice.
if derived_region_bounds.iter().any(|&r| ty::ReStatic == *r) {
- return Some(tcx.mk_region(ty::ReStatic));
+ return Some(tcx.types.re_static);
}
// Determine whether there is exactly one unique region in the set
let expected_ty = self.structurally_resolved_type(pat.span, expected);
if let ty::TyRef(_, mt) = expected_ty.sty {
if let ty::TySlice(_) = mt.ty.sty {
- pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ pat_ty = tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_slice(tcx.types.u8))
}
}
use syntax::abi;
use syntax::feature_gate;
use syntax::ptr::P;
+use syntax_pos;
use std::collections::VecDeque;
use std::ops::Deref;
Ok(target)
}
+ /// Same as `try_coerce()`, but without side-effects.
+ pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool {
+ let source = self.resolve_type_vars_with_obligations(expr_ty);
+ debug!("coercion::can({:?} -> {:?})", source, target);
+
+ let cause = self.cause(syntax_pos::DUMMY_SP, ObligationCauseCode::ExprAssignable);
+ let coerce = Coerce::new(self, cause);
+ self.probe(|_| coerce.coerce::<hir::Expr>(&[], source, target)).is_ok()
+ }
+
/// Given some expressions, their known unified type and another expression,
/// tries to unify the types, potentially inserting coercions on any of the
/// provided expressions and returns their LUB (aka "common supertype").
debug!("compare_impl_method: trait_to_skol_substs={:?}",
trait_to_skol_substs);
- let impl_m_generics = tcx.item_generics(impl_m.def_id);
- let trait_m_generics = tcx.item_generics(trait_m.def_id);
- let impl_m_predicates = tcx.item_predicates(impl_m.def_id);
- let trait_m_predicates = tcx.item_predicates(trait_m.def_id);
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
+ let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
+ let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
// Check region bounds.
check_region_bounds_on_impl_method(tcx,
// environment. We can't just use `impl_env.caller_bounds`,
// however, because we want to replace all late-bound regions with
// region variables.
- let impl_predicates = tcx.item_predicates(impl_m_predicates.parent.unwrap());
+ let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
let mut hybrid_preds = impl_predicates.instantiate(tcx, impl_to_skol_substs);
debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds);
let tcx = infcx.tcx;
let m_sig = |method: &ty::AssociatedItem| {
- match tcx.item_type(method.def_id).sty {
+ match tcx.type_of(method.def_id).sty {
ty::TyFnDef(_, _, f) => f,
_ => bug!()
}
ty::ImplContainer(_) => impl_trait_ref.self_ty(),
ty::TraitContainer(_) => tcx.mk_self_type()
};
- let method_ty = tcx.item_type(method.def_id);
+ let method_ty = tcx.type_of(method.def_id);
let self_arg_ty = *method_ty.fn_sig().input(0).skip_binder();
match ExplicitSelf::determine(untransformed_self_ty, self_arg_ty) {
ExplicitSelf::ByValue => "self".to_string(),
trait_m: &ty::AssociatedItem,
trait_item_span: Option<Span>)
-> Result<(), ErrorReported> {
- let impl_m_generics = tcx.item_generics(impl_m.def_id);
- let trait_m_generics = tcx.item_generics(trait_m.def_id);
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
let num_impl_m_type_params = impl_m_generics.types.len();
let num_trait_m_type_params = trait_m_generics.types.len();
if num_impl_m_type_params != num_trait_m_type_params {
trait_item_span: Option<Span>)
-> Result<(), ErrorReported> {
let m_fty = |method: &ty::AssociatedItem| {
- match tcx.item_type(method.def_id).sty {
+ match tcx.type_of(method.def_id).sty {
ty::TyFnDef(_, _, f) => f,
_ => bug!()
}
trait_to_skol_substs);
// Compute skolemized form of impl and trait const tys.
- let impl_ty = tcx.item_type(impl_c.def_id).subst(tcx, impl_to_skol_substs);
- let trait_ty = tcx.item_type(trait_c.def_id).subst(tcx, trait_to_skol_substs);
+ let impl_ty = tcx.type_of(impl_c.def_id).subst(tcx, impl_to_skol_substs);
+ let trait_ty = tcx.type_of(trait_c.def_id).subst(tcx, trait_to_skol_substs);
let mut cause = ObligationCause::misc(impl_c_span, impl_c_node_id);
// There is no "body" here, so just pass dummy id.
use check::FnCtxt;
-use rustc::ty::Ty;
-use rustc::infer::{InferOk};
+use rustc::infer::InferOk;
use rustc::traits::ObligationCause;
use syntax::ast;
use syntax_pos::{self, Span};
use rustc::hir;
use rustc::hir::def::Def;
-use rustc::ty::{self, AssociatedItem};
+use rustc::ty::{self, Ty, AssociatedItem};
use errors::DiagnosticBuilder;
use super::method::probe;
if let Err(e) = self.try_coerce(expr, checked_ty, self.diverges.get(), expected) {
let cause = self.misc(expr.span);
let expr_ty = self.resolve_type_vars_with_obligations(checked_ty);
- let mode = probe::Mode::MethodCall;
- let suggestions = self.probe_for_return_type(syntax_pos::DUMMY_SP,
- mode,
- expected,
- checked_ty,
- ast::DUMMY_NODE_ID);
let mut err = self.report_mismatched_types(&cause, expected, expr_ty, e);
- if suggestions.len() > 0 {
- err.help(&format!("here are some functions which \
- might fulfill your needs:\n{}",
- self.get_best_match(&suggestions).join("\n")));
- };
+ if let Some(suggestion) = self.check_ref(expr,
+ checked_ty,
+ expected) {
+ err.help(&suggestion);
+ } else {
+ let mode = probe::Mode::MethodCall;
+ let suggestions = self.probe_for_return_type(syntax_pos::DUMMY_SP,
+ mode,
+ expected,
+ checked_ty,
+ ast::DUMMY_NODE_ID);
+ if suggestions.len() > 0 {
+ err.help(&format!("here are some functions which \
+ might fulfill your needs:\n{}",
+ self.get_best_match(&suggestions).join("\n")));
+ }
+ }
err.emit();
}
}
fn has_no_input_arg(&self, method: &AssociatedItem) -> bool {
match method.def() {
Def::Method(def_id) => {
- match self.tcx.item_type(def_id).sty {
+ match self.tcx.type_of(def_id).sty {
ty::TypeVariants::TyFnDef(_, _, sig) => {
sig.inputs().skip_binder().len() == 1
}
_ => false,
}
}
+
+ /// This function is used to determine potential "simple" improvements or users' errors and
+ /// provide them useful help. For example:
+ ///
+ /// ```
+ /// fn some_fn(s: &str) {}
+ ///
+ /// let x = "hey!".to_owned();
+ /// some_fn(x); // error
+ /// ```
+ ///
+ /// No need to find every potential function which could make a coercion to transform a
+ /// `String` into a `&str` since a `&` would do the trick!
+ ///
+ /// In addition of this check, it also checks between references mutability state. If the
+ /// expected is mutable but the provided isn't, maybe we could just say "Hey, try with
+ /// `&mut`!".
+ fn check_ref(&self,
+ expr: &hir::Expr,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>)
+ -> Option<String> {
+ match (&expected.sty, &checked_ty.sty) {
+ (&ty::TyRef(_, _), &ty::TyRef(_, _)) => None,
+ (&ty::TyRef(_, mutability), _) => {
+ // Check if it can work when put into a ref. For example:
+ //
+ // ```
+ // fn bar(x: &mut i32) {}
+ //
+ // let x = 0u32;
+ // bar(&x); // error, expected &mut
+ // ```
+ let ref_ty = match mutability.mutbl {
+ hir::Mutability::MutMutable => self.tcx.mk_mut_ref(
+ self.tcx.mk_region(ty::ReStatic),
+ checked_ty),
+ hir::Mutability::MutImmutable => self.tcx.mk_imm_ref(
+ self.tcx.mk_region(ty::ReStatic),
+ checked_ty),
+ };
+ if self.can_coerce(ref_ty, expected) {
+ if let Ok(src) = self.tcx.sess.codemap().span_to_snippet(expr.span) {
+ return Some(format!("try with `{}{}`",
+ match mutability.mutbl {
+ hir::Mutability::MutMutable => "&mut ",
+ hir::Mutability::MutImmutable => "&",
+ },
+ &src));
+ }
+ }
+ None
+ }
+ _ => None,
+ }
+ }
}
use rustc::infer::{self, InferOk};
use middle::region;
use rustc::ty::subst::{Subst, Substs};
-use rustc::ty::{self, AdtKind, Ty, TyCtxt};
+use rustc::ty::{self, Ty, TyCtxt};
use rustc::traits::{self, ObligationCause, Reveal};
use util::common::ErrorReported;
use util::nodemap::FxHashSet;
-use syntax::ast;
use syntax_pos::Span;
/// check_drop_impl confirms that the Drop implementation identfied by
pub fn check_drop_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
drop_impl_did: DefId)
-> Result<(), ErrorReported> {
- let dtor_self_type = tcx.item_type(drop_impl_did);
- let dtor_predicates = tcx.item_predicates(drop_impl_did);
+ let dtor_self_type = tcx.type_of(drop_impl_did);
+ let dtor_predicates = tcx.predicates_of(drop_impl_did);
match dtor_self_type.sty {
ty::TyAdt(adt_def, self_to_impl_substs) => {
ensure_drop_params_and_item_params_correspond(tcx,
let tcx = infcx.tcx;
let mut fulfillment_cx = traits::FulfillmentContext::new();
- let named_type = tcx.item_type(self_type_did);
+ let named_type = tcx.type_of(self_type_did);
let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs);
let drop_impl_span = tcx.def_span(drop_impl_did);
// We can assume the predicates attached to struct/enum definition
// hold.
- let generic_assumptions = tcx.item_predicates(self_type_did);
+ let generic_assumptions = tcx.predicates_of(self_type_did);
let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
///
pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>(
rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>,
- typ: ty::Ty<'tcx>,
+ ty: ty::Ty<'tcx>,
span: Span,
scope: region::CodeExtent)
+ -> Result<(), ErrorReported>
{
debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}",
- typ, scope);
+ ty, scope);
let parent_scope = match rcx.tcx.region_maps.opt_encl_scope(scope) {
- Some(parent_scope) => parent_scope,
- // If no enclosing scope, then it must be the root scope which cannot be outlived.
- None => return
+ Some(parent_scope) => parent_scope,
+ // If no enclosing scope, then it must be the root scope
+ // which cannot be outlived.
+ None => return Ok(())
};
-
- let result = iterate_over_potentially_unsafe_regions_in_type(
- &mut DropckContext {
- rcx: rcx,
- span: span,
- parent_scope: parent_scope,
- breadcrumbs: FxHashSet()
- },
- TypeContext::Root,
- typ,
- 0);
- match result {
- Ok(()) => {}
- Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => {
- let tcx = rcx.tcx;
- let mut err = struct_span_err!(tcx.sess, span, E0320,
- "overflow while adding drop-check rules for {}", typ);
- match *ctxt {
- TypeContext::Root => {
- // no need for an additional note if the overflow
- // was somehow on the root.
- }
- TypeContext::ADT { def_id, variant, field } => {
- let adt = tcx.lookup_adt_def(def_id);
- let variant_name = match adt.adt_kind() {
- AdtKind::Enum => format!("enum {} variant {}",
- tcx.item_path_str(def_id),
- variant),
- AdtKind::Struct => format!("struct {}",
- tcx.item_path_str(def_id)),
- AdtKind::Union => format!("union {}",
- tcx.item_path_str(def_id)),
- };
- span_note!(
- &mut err,
- span,
- "overflowed on {} field {} type: {}",
- variant_name,
- field,
- detected_on_typ);
+ let parent_scope = rcx.tcx.mk_region(ty::ReScope(parent_scope));
+ let origin = || infer::SubregionOrigin::SafeDestructor(span);
+
+ let ty = rcx.fcx.resolve_type_vars_if_possible(&ty);
+ let for_ty = ty;
+ let mut types = vec![(ty, 0)];
+ let mut known = FxHashSet();
+ while let Some((ty, depth)) = types.pop() {
+ let ty::DtorckConstraint {
+ dtorck_types, outlives
+ } = rcx.tcx.dtorck_constraint_for_ty(span, for_ty, depth, ty)?;
+
+ for ty in dtorck_types {
+ let ty = rcx.fcx.normalize_associated_types_in(span, &ty);
+ let ty = rcx.fcx.resolve_type_vars_with_obligations(ty);
+ let ty = rcx.fcx.resolve_type_and_region_vars_if_possible(&ty);
+ match ty.sty {
+ // All parameters live for the duration of the
+ // function.
+ ty::TyParam(..) => {}
+
+ // A projection that we couldn't resolve - it
+ // might have a destructor.
+ ty::TyProjection(..) | ty::TyAnon(..) => {
+ rcx.type_must_outlive(origin(), ty, parent_scope);
}
- }
- err.emit();
- }
- }
-}
-
-enum Error<'tcx> {
- Overflow(TypeContext, ty::Ty<'tcx>),
-}
-
-#[derive(Copy, Clone)]
-enum TypeContext {
- Root,
- ADT {
- def_id: DefId,
- variant: ast::Name,
- field: ast::Name,
- }
-}
-
-struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> {
- rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>,
- /// types that have already been traversed
- breadcrumbs: FxHashSet<Ty<'tcx>>,
- /// span for error reporting
- span: Span,
- /// the scope reachable dtorck types must outlive
- parent_scope: region::CodeExtent
-}
-
-// `context` is used for reporting overflow errors
-fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>(
- cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>,
- context: TypeContext,
- ty: Ty<'tcx>,
- depth: usize)
- -> Result<(), Error<'tcx>>
-{
- let tcx = cx.rcx.tcx;
- // Issue #22443: Watch out for overflow. While we are careful to
- // handle regular types properly, non-regular ones cause problems.
- let recursion_limit = tcx.sess.recursion_limit.get();
- if depth / 4 >= recursion_limit {
- // This can get into rather deep recursion, especially in the
- // presence of things like Vec<T> -> Unique<T> -> PhantomData<T> -> T.
- // use a higher recursion limit to avoid errors.
- return Err(Error::Overflow(context, ty))
- }
- // canoncialize the regions in `ty` before inserting - infinitely many
- // region variables can refer to the same region.
- let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty);
-
- if !cx.breadcrumbs.insert(ty) {
- debug!("iterate_over_potentially_unsafe_regions_in_type \
- {}ty: {} scope: {:?} - cached",
- (0..depth).map(|_| ' ').collect::<String>(),
- ty, cx.parent_scope);
- return Ok(()); // we already visited this type
- }
- debug!("iterate_over_potentially_unsafe_regions_in_type \
- {}ty: {} scope: {:?}",
- (0..depth).map(|_| ' ').collect::<String>(),
- ty, cx.parent_scope);
-
- // If `typ` has a destructor, then we must ensure that all
- // borrowed data reachable via `typ` must outlive the parent
- // of `scope`. This is handled below.
- //
- // However, there is an important special case: for any Drop
- // impl that is tagged as "blind" to their parameters,
- // we assume that data borrowed via such type parameters
- // remains unreachable via that Drop impl.
- //
- // For example, consider:
- //
- // ```rust
- // #[unsafe_destructor_blind_to_params]
- // impl<T> Drop for Vec<T> { ... }
- // ```
- //
- // which does have to be able to drop instances of `T`, but
- // otherwise cannot read data from `T`.
- //
- // Of course, for the type expression passed in for any such
- // unbounded type parameter `T`, we must resume the recursive
- // analysis on `T` (since it would be ignored by
- // type_must_outlive).
- let dropck_kind = has_dtor_of_interest(tcx, ty);
- debug!("iterate_over_potentially_unsafe_regions_in_type \
- ty: {:?} dropck_kind: {:?}", ty, dropck_kind);
- match dropck_kind {
- DropckKind::NoBorrowedDataAccessedInMyDtor => {
- // The maximally blind attribute.
- }
- DropckKind::BorrowedDataMustStrictlyOutliveSelf => {
- cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
- ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
- return Ok(());
- }
- DropckKind::RevisedSelf(revised_ty) => {
- cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span),
- revised_ty, tcx.mk_region(ty::ReScope(cx.parent_scope)));
- // Do not return early from this case; we want
- // to recursively process the internal structure of Self
- // (because even though the Drop for Self has been asserted
- // safe, the types instantiated for the generics of Self
- // may themselves carry dropck constraints.)
- }
- }
-
- debug!("iterate_over_potentially_unsafe_regions_in_type \
- {}ty: {} scope: {:?} - checking interior",
- (0..depth).map(|_| ' ').collect::<String>(),
- ty, cx.parent_scope);
-
- // We still need to ensure all referenced data is safe.
- match ty.sty {
- ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) |
- ty::TyFloat(_) | ty::TyStr | ty::TyNever => {
- // primitive - definitely safe
- Ok(())
- }
-
- ty::TyArray(ity, _) | ty::TySlice(ity) => {
- // single-element containers, behave like their element
- iterate_over_potentially_unsafe_regions_in_type(
- cx, context, ity, depth+1)
- }
-
- ty::TyAdt(def, substs) if def.is_phantom_data() => {
- // PhantomData<T> - behaves identically to T
- let ity = substs.type_at(0);
- iterate_over_potentially_unsafe_regions_in_type(
- cx, context, ity, depth+1)
- }
-
- ty::TyAdt(def, substs) => {
- let did = def.did;
- for variant in &def.variants {
- for field in variant.fields.iter() {
- let fty = field.ty(tcx, substs);
- let fty = cx.rcx.fcx.resolve_type_vars_with_obligations(
- cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty));
- iterate_over_potentially_unsafe_regions_in_type(
- cx,
- TypeContext::ADT {
- def_id: did,
- field: field.name,
- variant: variant.name,
- },
- fty,
- depth+1)?
+ _ => {
+ if let None = known.replace(ty) {
+ types.push((ty, depth+1));
+ }
}
}
- Ok(())
- }
-
- ty::TyClosure(def_id, substs) => {
- for ty in substs.upvar_tys(def_id, tcx) {
- iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
- }
- Ok(())
- }
-
- ty::TyTuple(tys, _) => {
- for ty in tys {
- iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)?
- }
- Ok(())
- }
-
- ty::TyRawPtr(..) | ty::TyRef(..) | ty::TyParam(..) => {
- // these always come with a witness of liveness (references
- // explicitly, pointers implicitly, parameters by the
- // caller).
- Ok(())
}
- ty::TyFnDef(..) | ty::TyFnPtr(_) => {
- // FIXME(#26656): this type is always destruction-safe, but
- // it implicitly witnesses Self: Fn, which can be false.
- Ok(())
- }
-
- ty::TyInfer(..) | ty::TyError => {
- tcx.sess.delay_span_bug(cx.span, "unresolved type in regionck");
- Ok(())
- }
-
- // these are always dtorck
- ty::TyDynamic(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(),
- }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-enum DropckKind<'tcx> {
- /// The "safe" kind; i.e. conservatively assume any borrow
- /// accessed by dtor, and therefore such data must strictly
- /// outlive self.
- ///
- /// Equivalent to RevisedTy with no change to the self type.
- BorrowedDataMustStrictlyOutliveSelf,
-
- /// The nearly completely-unsafe kind.
- ///
- /// Equivalent to RevisedSelf with *all* parameters remapped to ()
- /// (maybe...?)
- NoBorrowedDataAccessedInMyDtor,
-
- /// Assume all borrowed data access by dtor occurs as if Self has the
- /// type carried by this variant. In practice this means that some
- /// of the type parameters are remapped to `()` (and some lifetime
- /// parameters remapped to `'static`), because the developer has asserted
- /// that the destructor will not access their contents.
- RevisedSelf(Ty<'tcx>),
-}
-
-/// Returns the classification of what kind of check should be applied
-/// to `ty`, which may include a revised type where some of the type
-/// parameters are re-mapped to `()` to reflect the destructor's
-/// "purity" with respect to their actual contents.
-fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- ty: Ty<'tcx>)
- -> DropckKind<'tcx> {
- match ty.sty {
- ty::TyAdt(adt_def, substs) => {
- if !adt_def.is_dtorck(tcx) {
- return DropckKind::NoBorrowedDataAccessedInMyDtor;
+ for outlive in outlives {
+ if let Some(r) = outlive.as_region() {
+ rcx.sub_regions(origin(), parent_scope, r);
+ } else if let Some(ty) = outlive.as_type() {
+ rcx.type_must_outlive(origin(), ty, parent_scope);
}
-
- // Find the `impl<..> Drop for _` to inspect any
- // attributes attached to the impl's generics.
- let dtor_method = adt_def.destructor(tcx)
- .expect("dtorck type without destructor impossible");
- let method = tcx.associated_item(dtor_method.did);
- let impl_def_id = method.container.id();
- let revised_ty = revise_self_ty(tcx, adt_def, impl_def_id, substs);
- return DropckKind::RevisedSelf(revised_ty);
- }
- ty::TyDynamic(..) | ty::TyProjection(..) | ty::TyAnon(..) => {
- debug!("ty: {:?} isn't known, and therefore is a dropck type", ty);
- return DropckKind::BorrowedDataMustStrictlyOutliveSelf;
- },
- _ => {
- return DropckKind::NoBorrowedDataAccessedInMyDtor;
}
}
-}
-// Constructs new Ty just like the type defined by `adt_def` coupled
-// with `substs`, except each type and lifetime parameter marked as
-// `#[may_dangle]` in the Drop impl (identified by `impl_def_id`) is
-// respectively mapped to `()` or `'static`.
-//
-// For example: If the `adt_def` maps to:
-//
-// enum Foo<'a, X, Y> { ... }
-//
-// and the `impl_def_id` maps to:
-//
-// impl<#[may_dangle] 'a, X, #[may_dangle] Y> Drop for Foo<'a, X, Y> { ... }
-//
-// then revises input: `Foo<'r,i64,&'r i64>` to: `Foo<'static,i64,()>`
-fn revise_self_ty<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
- adt_def: &'tcx ty::AdtDef,
- impl_def_id: DefId,
- substs: &Substs<'tcx>)
- -> Ty<'tcx> {
- // Get generics for `impl Drop` to query for `#[may_dangle]` attr.
- let impl_bindings = tcx.item_generics(impl_def_id);
-
- // Get Substs attached to Self on `impl Drop`; process in parallel
- // with `substs`, replacing dangling entries as appropriate.
- let self_substs = {
- let impl_self_ty: Ty<'tcx> = tcx.item_type(impl_def_id);
- if let ty::TyAdt(self_adt_def, self_substs) = impl_self_ty.sty {
- assert_eq!(adt_def, self_adt_def);
- self_substs
- } else {
- bug!("Self in `impl Drop for _` must be an Adt.");
- }
- };
-
- // Walk `substs` + `self_substs`, build new substs appropriate for
- // `adt_def`; each non-dangling param reuses entry from `substs`.
- //
- // Note: The manner we map from a right-hand side (i.e. Region or
- // Ty) for a given `def` to generic parameter associated with that
- // right-hand side is tightly coupled to `Drop` impl constraints.
- //
- // E.g. we know such a Ty must be `TyParam`, because a destructor
- // for `struct Foo<X>` is defined via `impl<Y> Drop for Foo<Y>`,
- // and never by (for example) `impl<Z> Drop for Foo<Vec<Z>>`.
- let substs = Substs::for_item(
- tcx,
- adt_def.did,
- |def, _| {
- let r_orig = substs.region_for_def(def);
- let impl_self_orig = self_substs.region_for_def(def);
- let r = if let ty::Region::ReEarlyBound(ref ebr) = *impl_self_orig {
- if impl_bindings.region_param(ebr).pure_wrt_drop {
- tcx.mk_region(ty::ReStatic)
- } else {
- r_orig
- }
- } else {
- bug!("substs for an impl must map regions to ReEarlyBound");
- };
- debug!("has_dtor_of_interest mapping def {:?} orig {:?} to {:?}",
- def, r_orig, r);
- r
- },
- |def, _| {
- let t_orig = substs.type_for_def(def);
- let impl_self_orig = self_substs.type_for_def(def);
- let t = if let ty::TypeVariants::TyParam(ref pt) = impl_self_orig.sty {
- if impl_bindings.type_param(pt).pure_wrt_drop {
- tcx.mk_nil()
- } else {
- t_orig
- }
- } else {
- bug!("substs for an impl must map types to TyParam");
- };
- debug!("has_dtor_of_interest mapping def {:?} orig {:?} {:?} to {:?} {:?}",
- def, t_orig, t_orig.sty, t, t.sty);
- t
- });
-
- tcx.mk_adt(adt_def, &substs)
+ Ok(())
}
let def_id = tcx.hir.local_def_id(it.id);
let substs = Substs::for_item(tcx, def_id,
- |_, _| tcx.mk_region(ty::ReErased),
+ |_, _| tcx.types.re_erased,
|def, _| tcx.mk_param_from_def(def));
let fty = tcx.mk_fn_def(def_id, substs, ty::Binder(tcx.mk_fn_sig(
hir::Unsafety::Unsafe,
abi
)));
- let i_n_tps = tcx.item_generics(def_id).types.len();
+ let i_n_tps = tcx.generics_of(def_id).types.len();
if i_n_tps != n_tps {
let span = match it.node {
hir::ForeignItemFn(_, _, ref generics) => generics.span,
&ObligationCause::new(it.span,
it.id,
ObligationCauseCode::IntrinsicType),
- tcx.item_type(def_id),
+ tcx.type_of(def_id),
fty);
}
}
};
let def_id = tcx.hir.local_def_id(it.id);
- let i_n_tps = tcx.item_generics(def_id).types.len();
+ let i_n_tps = tcx.generics_of(def_id).types.len();
let name = it.name.as_str();
let (n_tps, inputs, output) = match &*name {
let mut structural_to_nomimal = FxHashMap();
- let sig = tcx.item_type(def_id).fn_sig();
+ let sig = tcx.type_of(def_id).fn_sig();
let sig = tcx.no_late_bound_regions(&sig).unwrap();
if intr.inputs.len() != sig.inputs().len() {
span_err!(tcx.sess, it.span, E0444,
// If they were not explicitly supplied, just construct fresh
// variables.
let num_supplied_types = supplied_method_types.len();
- let method_generics = self.tcx.item_generics(pick.item.def_id);
+ let method_generics = self.tcx.generics_of(pick.item.def_id);
let num_method_types = method_generics.types.len();
if num_supplied_types > 0 && num_supplied_types != num_method_types {
// type/early-bound-regions substitutions performed. There can
// be no late-bound regions appearing here.
let def_id = pick.item.def_id;
- let method_predicates = self.tcx.item_predicates(def_id)
+ let method_predicates = self.tcx.predicates_of(def_id)
.instantiate(self.tcx, all_substs);
let method_predicates = self.normalize_associated_types_in(self.span,
&method_predicates);
debug!("method_predicates after subst = {:?}", method_predicates);
- let sig = self.tcx.item_type(def_id).fn_sig();
+ let sig = self.tcx.type_of(def_id).fn_sig();
// Instantiate late-bound regions and substitute the trait
// parameters into the method type to get the actual method type.
let tcx = self.tcx;
let method_item = self.associated_item(trait_def_id, m_name).unwrap();
let def_id = method_item.def_id;
- let generics = tcx.item_generics(def_id);
+ let generics = tcx.generics_of(def_id);
assert_eq!(generics.types.len(), 0);
assert_eq!(generics.regions.len(), 0);
// NB: Instantiate late-bound regions first so that
// `instantiate_type_scheme` can normalize associated types that
// may reference those regions.
- let original_method_ty = tcx.item_type(def_id);
+ let original_method_ty = tcx.type_of(def_id);
let fn_sig = original_method_ty.fn_sig();
let fn_sig = self.replace_late_bound_regions_with_fresh_var(span,
infer::FnCall,
//
// Note that as the method comes from a trait, it should not have
// any late-bound regions appearing in its bounds.
- let bounds = self.tcx.item_predicates(def_id).instantiate(self.tcx, substs);
+ let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs);
let bounds = match self.normalize_associated_types_in_as_infer_ok(span, &bounds) {
InferOk { value, obligations: o } => {
obligations.extend(o);
}
let def = pick.item.def();
-
self.tcx.check_stability(def.def_id(), expr_id, span);
- if let probe::InherentImplPick = pick.kind {
- if !self.tcx.vis_is_accessible_from(pick.item.vis, self.body_id) {
- let msg = format!("{} `{}` is private", def.kind_name(), method_name);
- self.tcx.sess.span_err(span, &msg);
- }
- }
Ok(def)
}
///////////////////////////////////////////////////////////////////////////
// CANDIDATE ASSEMBLY
+ fn push_inherent_candidate(&mut self, xform_self_ty: Ty<'tcx>, item: ty::AssociatedItem,
+ kind: CandidateKind<'tcx>, import_id: Option<ast::NodeId>) {
+ if self.tcx.vis_is_accessible_from(item.vis, self.body_id) {
+ self.inherent_candidates.push(Candidate { xform_self_ty, item, kind, import_id });
+ } else if self.private_candidate.is_none() {
+ self.private_candidate = Some(item.def());
+ }
+ }
+
+ fn push_extension_candidate(&mut self, xform_self_ty: Ty<'tcx>, item: ty::AssociatedItem,
+ kind: CandidateKind<'tcx>, import_id: Option<ast::NodeId>) {
+ if self.tcx.vis_is_accessible_from(item.vis, self.body_id) {
+ self.extension_candidates.push(Candidate { xform_self_ty, item, kind, import_id });
+ } else if self.private_candidate.is_none() {
+ self.private_candidate = Some(item.def());
+ }
+ }
+
fn assemble_inherent_candidates(&mut self) {
let steps = self.steps.clone();
for step in steps.iter() {
}
fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: DefId) {
- let impl_def_ids = ty::queries::inherent_impls::get(self.tcx, self.span, def_id);
+ let impl_def_ids = self.tcx.at(self.span).inherent_impls(def_id);
for &impl_def_id in impl_def_ids.iter() {
self.assemble_inherent_impl_probe(impl_def_id);
}
continue
}
- if !self.tcx.vis_is_accessible_from(item.vis, self.body_id) {
- self.private_candidate = Some(item.def());
- continue
- }
-
let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id);
let impl_ty = impl_ty.subst(self.tcx, impl_substs);
debug!("assemble_inherent_impl_probe: xform_self_ty = {:?}",
xform_self_ty);
- self.inherent_candidates.push(Candidate {
- xform_self_ty: xform_self_ty,
- item: item,
- kind: InherentImplCandidate(impl_substs, obligations),
- import_id: None,
- });
+ self.push_inherent_candidate(xform_self_ty, item,
+ InherentImplCandidate(impl_substs, obligations), None);
}
}
let xform_self_ty =
this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs);
- this.inherent_candidates.push(Candidate {
- xform_self_ty: xform_self_ty,
- item: item,
- kind: ObjectCandidate,
- import_id: None,
- });
+ this.push_inherent_candidate(xform_self_ty, item, ObjectCandidate, None);
});
}
// `WhereClausePick`.
assert!(!trait_ref.substs.needs_infer());
- this.inherent_candidates.push(Candidate {
- xform_self_ty: xform_self_ty,
- item: item,
- kind: WhereClauseCandidate(poly_trait_ref),
- import_id: None,
- });
+ this.push_inherent_candidate(xform_self_ty, item,
+ WhereClauseCandidate(poly_trait_ref), None);
});
}
expected: ty::Ty<'tcx>) -> bool {
match method.def() {
Def::Method(def_id) => {
- let fty = self.tcx.item_type(def_id).fn_sig();
+ let fty = self.tcx.type_of(def_id).fn_sig();
self.probe(|_| {
let substs = self.fresh_substs_for_item(self.span, method.def_id);
let output = fty.output().subst(self.tcx, substs);
import_id: Option<ast::NodeId>,
trait_def_id: DefId,
item: ty::AssociatedItem) {
- let trait_def = self.tcx.lookup_trait_def(trait_def_id);
+ let trait_def = self.tcx.trait_def(trait_def_id);
// FIXME(arielb1): can we use for_each_relevant_impl here?
trait_def.for_each_impl(self.tcx, |impl_def_id| {
debug!("xform_self_ty={:?}", xform_self_ty);
- self.extension_candidates.push(Candidate {
- xform_self_ty: xform_self_ty,
- item: item.clone(),
- kind: ExtensionImplCandidate(impl_def_id, impl_substs, obligations),
- import_id: import_id,
- });
+ self.push_extension_candidate(xform_self_ty, item,
+ ExtensionImplCandidate(impl_def_id, impl_substs, obligations), import_id);
});
}
}
};
- let impl_type = self.tcx.item_type(impl_def_id);
+ let impl_type = self.tcx.type_of(impl_def_id);
let impl_simplified_type =
match ty::fast_reject::simplify_type(self.tcx, impl_type, false) {
Some(simplified_type) => simplified_type,
});
let xform_self_ty = self.xform_self_ty(&item, step.self_ty, substs);
- self.inherent_candidates.push(Candidate {
- xform_self_ty: xform_self_ty,
- item: item.clone(),
- kind: TraitCandidate,
- import_id: import_id,
- });
+ self.push_inherent_candidate(xform_self_ty, item, TraitCandidate, import_id);
}
Ok(())
trait_def_id,
item);
- for step in self.steps.iter() {
+ for step in Rc::clone(&self.steps).iter() {
debug!("assemble_projection_candidates: step={:?}", step);
let (def_id, substs) = match step.self_ty.sty {
def_id,
substs);
- let trait_predicates = self.tcx.item_predicates(def_id);
+ let trait_predicates = self.tcx.predicates_of(def_id);
let bounds = trait_predicates.instantiate(self.tcx, substs);
let predicates = bounds.predicates;
debug!("assemble_projection_candidates: predicates={:?}",
bound,
xform_self_ty);
- self.extension_candidates.push(Candidate {
- xform_self_ty: xform_self_ty,
- item: item.clone(),
- kind: TraitCandidate,
- import_id: import_id,
- });
+ self.push_extension_candidate(xform_self_ty, item, TraitCandidate, import_id);
}
}
}
bound,
xform_self_ty);
- self.extension_candidates.push(Candidate {
- xform_self_ty: xform_self_ty,
- item: item.clone(),
- kind: WhereClauseCandidate(poly_bound),
- import_id: import_id,
- });
+ self.push_extension_candidate(xform_self_ty, item,
+ WhereClauseCandidate(poly_bound), import_id);
}
}
// In general, during probing we erase regions. See
// `impl_self_ty()` for an explanation.
- let region = tcx.mk_region(ty::ReErased);
+ let region = tcx.types.re_erased;
// Search through mutabilities in order to find one where pick works:
[hir::MutImmutable, hir::MutMutable]
let cause = traits::ObligationCause::misc(self.span, self.body_id);
// Check whether the impl imposes obligations we have to worry about.
- let impl_bounds = self.tcx.item_predicates(impl_def_id);
+ let impl_bounds = self.tcx.predicates_of(impl_def_id);
let impl_bounds = impl_bounds.instantiate(self.tcx, substs);
let traits::Normalized { value: impl_bounds, obligations: norm_obligations } =
traits::normalize(selcx, cause.clone(), &impl_bounds);
impl_ty: Ty<'tcx>,
substs: &Substs<'tcx>)
-> Ty<'tcx> {
- let self_ty = self.tcx.item_type(method).fn_sig().input(0);
+ let self_ty = self.tcx.type_of(method).fn_sig().input(0);
debug!("xform_self_ty(impl_ty={:?}, self_ty={:?}, substs={:?})",
impl_ty,
self_ty,
// are given do not include type/lifetime parameters for the
// method yet. So create fresh variables here for those too,
// if there are any.
- let generics = self.tcx.item_generics(method);
+ let generics = self.tcx.generics_of(method);
assert_eq!(substs.types().count(), generics.parent_types as usize);
assert_eq!(substs.regions().count(), generics.parent_regions as usize);
} else {
// In general, during probe we erase regions. See
// `impl_self_ty()` for an explanation.
- self.tcx.mk_region(ty::ReErased)
+ self.tcx.types.re_erased
}
}, |def, cur_substs| {
let i = def.index as usize;
/// Get the type of an impl and generate substitutions with placeholders.
fn impl_ty_and_substs(&self, impl_def_id: DefId) -> (Ty<'tcx>, &'tcx Substs<'tcx>) {
- let impl_ty = self.tcx.item_type(impl_def_id);
+ let impl_ty = self.tcx.type_of(impl_def_id);
let substs = Substs::for_item(self.tcx,
impl_def_id,
- |_, _| self.tcx.mk_region(ty::ReErased),
+ |_, _| self.tcx.types.re_erased,
|_, _| self.next_ty_var(
TypeVariableOrigin::SubstitutionPlaceholder(
self.tcx.def_span(impl_def_id))));
}
pub fn check_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> CompileResult {
- ty::queries::typeck_item_bodies::get(tcx, DUMMY_SP, LOCAL_CRATE)
+ tcx.typeck_item_bodies(LOCAL_CRATE)
}
fn typeck_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) -> CompileResult {
debug_assert!(crate_num == LOCAL_CRATE);
tcx.sess.track_errors(|| {
tcx.visit_all_bodies_in_krate(|body_owner_def_id, _body_id| {
- tcx.item_tables(body_owner_def_id);
+ tcx.typeck_tables_of(body_owner_def_id);
});
})
}
pub fn provide(providers: &mut Providers) {
*providers = Providers {
typeck_item_bodies,
- typeck_tables,
+ typeck_tables_of,
closure_type,
closure_kind,
adt_destructor,
def_id: DefId)
-> ty::PolyFnSig<'tcx> {
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
- tcx.item_tables(def_id).closure_tys[&node_id]
+ tcx.typeck_tables_of(def_id).closure_tys[&node_id]
}
fn closure_kind<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> ty::ClosureKind {
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
- tcx.item_tables(def_id).closure_kinds[&node_id]
+ tcx.typeck_tables_of(def_id).closure_kinds[&node_id]
}
fn adt_destructor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
tcx.calculate_dtor(def_id, &mut dropck::check_drop_impl)
}
-fn typeck_tables<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx ty::TypeckTables<'tcx> {
// Closures' tables come from their outermost function,
// as they are part of the same "inference environment".
let outer_def_id = tcx.closure_base_def_id(def_id);
if outer_def_id != def_id {
- return tcx.item_tables(outer_def_id);
+ return tcx.typeck_tables_of(outer_def_id);
}
let id = tcx.hir.as_local_node_id(def_id).unwrap();
Inherited::build(tcx, id).enter(|inh| {
let fcx = if let Some(decl) = fn_decl {
- let fn_sig = tcx.item_type(def_id).fn_sig();
+ let fn_sig = tcx.type_of(def_id).fn_sig();
check_abi(tcx, span, fn_sig.abi());
check_fn(&inh, fn_sig, decl, id, body)
} else {
let fcx = FnCtxt::new(&inh, body.value.id);
- let expected_type = tcx.item_type(def_id);
+ let expected_type = tcx.type_of(def_id);
let expected_type = fcx.normalize_associated_types_in(body.value.span, &expected_type);
fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
id: ast::NodeId,
span: Span) {
let def_id = tcx.hir.local_def_id(id);
- let def = tcx.lookup_adt_def(def_id);
+ let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
if def.repr.simd() {
check_simd(tcx, span, def_id);
}
+
+ // if struct is packed and not aligned, check fields for alignment.
+ // Checks for combining packed and align attrs on single struct are done elsewhere.
+ if tcx.adt_def(def_id).repr.packed() && tcx.adt_def(def_id).repr.align == 0 {
+ check_packed(tcx, span, def_id);
+ }
}
fn check_union<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: ast::NodeId,
span: Span) {
let def_id = tcx.hir.local_def_id(id);
- let def = tcx.lookup_adt_def(def_id);
+ let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
}
// Consts can play a role in type-checking, so they are included here.
hir::ItemStatic(..) |
hir::ItemConst(..) => {
- tcx.item_tables(tcx.hir.local_def_id(it.id));
+ tcx.typeck_tables_of(tcx.hir.local_def_id(it.id));
}
hir::ItemEnum(ref enum_definition, _) => {
check_enum(tcx,
}
hir::ItemTy(_, ref generics) => {
let def_id = tcx.hir.local_def_id(it.id);
- let pty_ty = tcx.item_type(def_id);
+ let pty_ty = tcx.type_of(def_id);
check_bounds_are_used(tcx, generics, pty_ty);
}
hir::ItemForeignMod(ref m) => {
}
} else {
for item in &m.items {
- let generics = tcx.item_generics(tcx.hir.local_def_id(item.id));
+ let generics = tcx.generics_of(tcx.hir.local_def_id(item.id));
if !generics.types.is_empty() {
let mut err = struct_span_err!(tcx.sess, item.span, E0044,
"foreign items may not have type parameters");
fn check_on_unimplemented<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
item: &hir::Item) {
- let generics = tcx.item_generics(def_id);
+ let generics = tcx.generics_of(def_id);
if let Some(ref attr) = item.attrs.iter().find(|a| {
a.check_name("rustc_on_unimplemented")
}) {
if impl_trait_ref.references_error() { return; }
// Locate trait definition and items
- let trait_def = tcx.lookup_trait_def(impl_trait_ref.def_id);
+ let trait_def = tcx.trait_def(impl_trait_ref.def_id);
let mut overridden_associated_type = None;
let impl_items = || impl_item_refs.iter().map(|iiref| tcx.hir.impl_item(iiref.id));
let signature = |item: &ty::AssociatedItem| {
match item.kind {
ty::AssociatedKind::Method => {
- format!("{}", tcx.item_type(item.def_id).fn_sig().0)
+ format!("{}", tcx.type_of(item.def_id).fn_sig().0)
}
ty::AssociatedKind::Type => format!("type {};", item.name.to_string()),
ty::AssociatedKind::Const => {
- format!("const {}: {:?};", item.name.to_string(), tcx.item_type(item.def_id))
+ format!("const {}: {:?};", item.name.to_string(), tcx.type_of(item.def_id))
}
}
};
sp: Span,
item_def_id: DefId)
-> bool {
- let rty = tcx.item_type(item_def_id);
+ let rty = tcx.type_of(item_def_id);
// Check that it is possible to represent this type. This call identifies
// (1) types that contain themselves and (2) types that contain a different
}
pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
- let t = tcx.item_type(def_id);
+ let t = tcx.type_of(def_id);
match t.sty {
ty::TyAdt(def, substs) if def.is_struct() => {
let fields = &def.struct_variant().fields;
}
}
+fn check_packed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
+ if check_packed_inner(tcx, def_id, &mut Vec::new()) {
+ struct_span_err!(tcx.sess, sp, E0588,
+ "packed struct cannot transitively contain a `[repr(align)]` struct").emit();
+ }
+}
+
+fn check_packed_inner<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ stack: &mut Vec<DefId>) -> bool {
+ let t = tcx.type_of(def_id);
+ if stack.contains(&def_id) {
+ debug!("check_packed_inner: {:?} is recursive", t);
+ return false;
+ }
+ match t.sty {
+ ty::TyAdt(def, substs) if def.is_struct() => {
+ if tcx.adt_def(def.did).repr.align > 0 {
+ return true;
+ }
+ // push struct def_id before checking fields
+ stack.push(def_id);
+ for field in &def.struct_variant().fields {
+ let f = field.ty(tcx, substs);
+ match f.sty {
+ ty::TyAdt(def, _) => {
+ if check_packed_inner(tcx, def.did, stack) {
+ return true;
+ }
+ }
+ _ => ()
+ }
+ }
+ // only need to pop if not early out
+ stack.pop();
+ }
+ _ => ()
+ }
+ false
+}
+
#[allow(trivial_numeric_casts)]
pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
sp: Span,
vs: &'tcx [hir::Variant],
id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(id);
- let def = tcx.lookup_adt_def(def_id);
+ let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
if vs.is_empty() && tcx.has_attr(def_id, "repr") {
for v in vs {
if let Some(e) = v.node.disr_expr {
- tcx.item_tables(tcx.hir.local_def_id(e.node_id));
+ tcx.typeck_tables_of(tcx.hir.local_def_id(e.node_id));
}
}
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let item_id = tcx.hir.ty_param_owner(node_id);
let item_def_id = tcx.hir.local_def_id(item_id);
- let generics = tcx.item_generics(item_def_id);
+ let generics = tcx.generics_of(item_def_id);
let index = generics.type_param_to_index[&def_id.index];
ty::GenericPredicates {
parent: None,
/// generic type scheme.
fn instantiate_bounds(&self, span: Span, def_id: DefId, substs: &Substs<'tcx>)
-> ty::InstantiatedPredicates<'tcx> {
- let bounds = self.tcx.item_predicates(def_id);
+ let bounds = self.tcx.predicates_of(def_id);
let result = bounds.instantiate(self.tcx, substs);
let result = self.normalize_associated_types_in(span, &result);
debug!("instantiate_bounds(bounds={:?}, substs={:?}) = {:?}",
let ty_var = self.next_ty_var(TypeVariableOrigin::TypeInference(span));
self.anon_types.borrow_mut().insert(id, ty_var);
- let item_predicates = self.tcx.item_predicates(def_id);
- let bounds = item_predicates.instantiate(self.tcx, substs);
+ let predicates_of = self.tcx.predicates_of(def_id);
+ let bounds = predicates_of.instantiate(self.tcx, substs);
for predicate in bounds.predicates {
// Change the predicate to refer to the type variable,
//
// FIXME(#27579) all uses of this should be migrated to register_wf_obligation eventually
let cause = traits::ObligationCause::new(span, self.body_id, code);
- self.register_region_obligation(ty, self.tcx.mk_region(ty::ReEmpty), cause);
+ self.register_region_obligation(ty, self.tcx.types.re_empty, cause);
}
/// Registers obligations that all types appearing in `substs` are well-formed.
match lit.node {
ast::LitKind::Str(..) => tcx.mk_static_str(),
ast::LitKind::ByteStr(ref v) => {
- tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic),
+ tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_array(tcx.types.u8, v.len()))
}
ast::LitKind::Byte(_) => tcx.types.u8,
span: Span, // (potential) receiver for this impl
did: DefId)
-> TypeAndSubsts<'tcx> {
- let ity = self.tcx.item_type(did);
+ let ity = self.tcx.type_of(did);
debug!("impl_self_ty: ity={:?}", ity);
let substs = self.fresh_substs_for_item(span, did);
Def::VariantCtor(def_id, ..) => {
// Everything but the final segment should have no
// parameters at all.
- let mut generics = self.tcx.item_generics(def_id);
+ let mut generics = self.tcx.generics_of(def_id);
if let Some(def_id) = generics.parent {
// Variant and struct constructors use the
// generics of their parent type definition.
- generics = self.tcx.item_generics(def_id);
+ generics = self.tcx.generics_of(def_id);
}
type_segment = Some((segments.last().unwrap(), generics));
}
Def::Const(def_id) |
Def::Static(def_id, _) => {
fn_segment = Some((segments.last().unwrap(),
- self.tcx.item_generics(def_id)));
+ self.tcx.generics_of(def_id)));
}
// Case 3. Reference to a method or associated const.
ty::ImplContainer(_) => {}
}
- let generics = self.tcx.item_generics(def_id);
+ let generics = self.tcx.generics_of(def_id);
if segments.len() >= 2 {
- let parent_generics = self.tcx.item_generics(generics.parent.unwrap());
+ let parent_generics = self.tcx.generics_of(generics.parent.unwrap());
type_segment = Some((&segments[segments.len() - 2], parent_generics));
} else {
// `<T>::assoc` will end up here, and so can `T::assoc`.
self.to_ty(ast_ty)
} else if !infer_types && def.has_default {
// No type parameter provided, but a default exists.
- let default = self.tcx.item_type(def.def_id);
+ let default = self.tcx.type_of(def.def_id);
self.normalize_ty(
span,
default.subst_spanned(self.tcx, substs, Some(span))
// The things we are substituting into the type should not contain
// escaping late-bound regions, and nor should the base type scheme.
- let ty = self.tcx.item_type(def.def_id());
+ let ty = self.tcx.type_of(def.def_id());
assert!(!substs.has_escaping_regions());
assert!(!ty.has_escaping_regions());
// is inherent, there is no `Self` parameter, instead, the impl needs
// type parameters, which we can infer by unifying the provided `Self`
// with the substituted impl type.
- let ty = self.tcx.item_type(impl_def_id);
+ let ty = self.tcx.type_of(impl_def_id);
let impl_ty = self.instantiate_type_scheme(span, &substs, &ty);
match self.sub_types(false, &self.misc(span), self_ty, impl_ty) {
self.type_of_node_must_outlive(origin, id, var_region);
let typ = self.resolve_node_type(id);
- dropck::check_safety_of_destructor_if_necessary(self, typ, span, var_scope);
+ let _ = dropck::check_safety_of_destructor_if_necessary(
+ self, typ, span, var_scope);
})
}
}
match *region {
ty::ReScope(rvalue_scope) => {
let typ = self.resolve_type(cmt.ty);
- dropck::check_safety_of_destructor_if_necessary(self,
- typ,
- span,
- rvalue_scope);
+ let _ = dropck::check_safety_of_destructor_if_necessary(
+ self, typ, span, rvalue_scope);
}
ty::ReStatic => {}
_ => {
// ```
//
// we can thus deduce that `<T as SomeTrait<'a>>::SomeType : 'a`.
- let trait_predicates = self.tcx.item_predicates(projection_ty.trait_ref.def_id);
+ let trait_predicates = self.tcx.predicates_of(projection_ty.trait_ref.def_id);
assert_eq!(trait_predicates.parent, None);
let predicates = trait_predicates.predicates.as_slice().to_vec();
traits::elaborate_predicates(self.tcx, predicates)
let (mut implied_bounds, self_ty) = match item.container {
ty::TraitContainer(_) => (vec![], fcx.tcx.mk_self_type()),
ty::ImplContainer(def_id) => (fcx.impl_implied_bounds(def_id, span),
- fcx.tcx.item_type(def_id))
+ fcx.tcx.type_of(def_id))
};
match item.kind {
ty::AssociatedKind::Const => {
- let ty = fcx.tcx.item_type(item.def_id);
+ let ty = fcx.tcx.type_of(item.def_id);
let ty = fcx.instantiate_type_scheme(span, free_substs, &ty);
fcx.register_wf_obligation(ty, span, code.clone());
}
ty::AssociatedKind::Method => {
reject_shadowing_type_parameters(fcx.tcx, item.def_id);
- let method_ty = fcx.tcx.item_type(item.def_id);
+ let method_ty = fcx.tcx.type_of(item.def_id);
let method_ty = fcx.instantiate_type_scheme(span, free_substs, &method_ty);
let predicates = fcx.instantiate_bounds(span, item.def_id, free_substs);
let sig = method_ty.fn_sig();
}
ty::AssociatedKind::Type => {
if item.defaultness.has_value() {
- let ty = fcx.tcx.item_type(item.def_id);
+ let ty = fcx.tcx.type_of(item.def_id);
let ty = fcx.instantiate_type_scheme(span, free_substs, &ty);
fcx.register_wf_obligation(ty, span, code.clone());
}
//
// 3) that the trait definition does not have any type parameters
- let predicates = self.tcx.item_predicates(trait_def_id);
+ let predicates = self.tcx.predicates_of(trait_def_id);
// We must exclude the Self : Trait predicate contained by all
// traits.
}
});
- let has_ty_params = self.tcx.item_generics(trait_def_id).types.len() > 1;
+ let has_ty_params = self.tcx.generics_of(trait_def_id).types.len() > 1;
// We use an if-else here, since the generics will also trigger
// an extraneous error message when we find predicates like
self.for_item(item).with_fcx(|fcx, this| {
let free_substs = &fcx.parameter_environment.free_substs;
let def_id = fcx.tcx.hir.local_def_id(item.id);
- let ty = fcx.tcx.item_type(def_id);
+ let ty = fcx.tcx.type_of(def_id);
let item_ty = fcx.instantiate_type_scheme(item.span, free_substs, &ty);
let sig = item_ty.fn_sig();
debug!("check_item_type: {:?}", item);
self.for_item(item).with_fcx(|fcx, this| {
- let ty = fcx.tcx.item_type(fcx.tcx.hir.local_def_id(item.id));
+ let ty = fcx.tcx.type_of(fcx.tcx.hir.local_def_id(item.id));
let item_ty = fcx.instantiate_type_scheme(item.span,
&fcx.parameter_environment
.free_substs,
}
}
None => {
- let self_ty = fcx.tcx.item_type(item_def_id);
+ let self_ty = fcx.tcx.type_of(item_def_id);
let self_ty = fcx.instantiate_type_scheme(item.span, free_substs, &self_ty);
fcx.register_wf_obligation(self_ty, ast_self_ty.span, this.code.clone());
}
let span = method_sig.decl.inputs[0].span;
let free_substs = &fcx.parameter_environment.free_substs;
- let method_ty = fcx.tcx.item_type(method.def_id);
+ let method_ty = fcx.tcx.type_of(method.def_id);
let fty = fcx.instantiate_type_scheme(span, free_substs, &method_ty);
let sig = fcx.tcx.liberate_late_bound_regions(free_id_outlive, &fty.fn_sig());
ast_generics: &hir::Generics)
{
let item_def_id = self.tcx.hir.local_def_id(item.id);
- let ty = self.tcx.item_type(item_def_id);
+ let ty = self.tcx.type_of(item_def_id);
if self.tcx.has_error_field(ty) {
return;
}
- let ty_predicates = self.tcx.item_predicates(item_def_id);
+ let ty_predicates = self.tcx.predicates_of(item_def_id);
assert_eq!(ty_predicates.parent, None);
- let variances = self.tcx.item_variances(item_def_id);
+ let variances = self.tcx.variances_of(item_def_id);
let mut constrained_parameters: FxHashSet<_> =
variances.iter().enumerate()
}
fn reject_shadowing_type_parameters(tcx: TyCtxt, def_id: DefId) {
- let generics = tcx.item_generics(def_id);
- let parent = tcx.item_generics(generics.parent.unwrap());
+ let generics = tcx.generics_of(def_id);
+ let parent = tcx.generics_of(generics.parent.unwrap());
let impl_params: FxHashMap<_, _> = parent.types
.iter()
.map(|tp| (tp.name, tp.def_id))
let fields =
struct_def.fields().iter()
.map(|field| {
- let field_ty = self.tcx.item_type(self.tcx.hir.local_def_id(field.id));
+ let field_ty = self.tcx.type_of(self.tcx.hir.local_def_id(field.id));
let field_ty = self.instantiate_type_scheme(field.span,
&self.parameter_environment
.free_substs,
None => {
// Inherent impl: take implied bounds from the self type.
- let self_ty = self.tcx.item_type(impl_def_id);
+ let self_ty = self.tcx.type_of(impl_def_id);
let self_ty = self.instantiate_type_scheme(span, free_substs, &self_ty);
vec![self_ty]
}
let outside_ty = gcx.fold_regions(&inside_ty, &mut false, |r, _| {
match *r {
// 'static is valid everywhere.
- ty::ReStatic |
- ty::ReEmpty => gcx.mk_region(*r),
+ ty::ReStatic => gcx.types.re_static,
+ ty::ReEmpty => gcx.types.re_empty,
// Free regions that come from early-bound regions are valid.
ty::ReFree(ty::FreeRegion {
span_err!(self.tcx().sess, span, E0564,
"only named lifetimes are allowed in `impl Trait`, \
but `{}` was found in the type `{}`", r, inside_ty);
- gcx.mk_region(ty::ReStatic)
+ gcx.types.re_static
}
ty::ReVar(_) |
match self.infcx.fully_resolve(&r) {
Ok(r) => r,
Err(_) => {
- self.tcx.mk_region(ty::ReStatic)
+ self.tcx.types.re_static
}
}
}
let item_def_id = tcx.hir.local_def_id(item_id);
// this will have been written by the main typeck pass
- if let Some(tables) = tcx.maps.typeck_tables.borrow().get(&item_def_id) {
+ if let Some(tables) = tcx.maps.typeck_tables_of.borrow().get(&item_def_id) {
let imports = &tables.used_trait_imports;
debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports);
used_trait_imports.extend(imports);
fn visit_implementation_of_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
_drop_did: DefId,
impl_did: DefId) {
- match tcx.item_type(impl_did).sty {
+ match tcx.type_of(impl_did).sty {
ty::TyAdt(..) => {}
_ => {
// Destructors only work on nominal types.
return;
};
- let self_type = tcx.item_type(impl_did);
+ let self_type = tcx.type_of(impl_did);
debug!("visit_implementation_of_copy: self_type={:?} (bound)",
self_type);
// course.
if impl_did.is_local() {
let span = tcx.def_span(impl_did);
- ty::queries::coerce_unsized_info::get(tcx, span, impl_did);
+ tcx.at(span).coerce_unsized_info(impl_did);
}
}
bug!("coerce_unsized_info: invoked for non-local def-id {:?}", impl_did)
});
- let source = tcx.item_type(impl_did);
+ let source = tcx.type_of(impl_did);
let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
assert_eq!(trait_ref.def_id, coerce_unsized_trait);
let target = trait_ref.substs.type_at(1);
.filter_map(|(i, f)| {
let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
- if tcx.item_type(f.did).is_phantom_data() {
+ if tcx.type_of(f.did).is_phantom_data() {
// Ignore PhantomData fields
return None;
}
//! for any change, but it is very cheap to compute. In practice, most
//! code in the compiler never *directly* requests this map. Instead,
//! it requests the inherent impls specific to some type (via
-//! `ty::queries::inherent_impls::get(def_id)`). That value, however,
+//! `tcx.inherent_impls(def_id)`). That value, however,
//! is computed by selecting an idea from this table.
use rustc::dep_graph::DepNode;
use std::rc::Rc;
use syntax::ast;
-use syntax_pos::{DUMMY_SP, Span};
+use syntax_pos::Span;
/// On-demand query: yields a map containing all types mapped to their inherent impls.
pub fn crate_inherent_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
// [the plan]: https://github.com/rust-lang/rust-roadmap/issues/4
let result = tcx.dep_graph.with_ignore(|| {
- let crate_map = ty::queries::crate_inherent_impls::get(tcx, DUMMY_SP, ty_def_id.krate);
+ let crate_map = tcx.crate_inherent_impls(ty_def_id.krate);
match crate_map.inherent_impls.get(&ty_def_id) {
Some(v) => v.clone(),
None => Rc::new(vec![]),
}
let def_id = self.tcx.hir.local_def_id(item.id);
- let self_ty = self.tcx.item_type(def_id);
+ let self_ty = self.tcx.type_of(def_id);
match self_ty.sty {
ty::TyAdt(def, _) => {
self.check_def_id(item, def.did);
use rustc::traits::{self, Reveal};
use rustc::ty::{self, TyCtxt};
-use syntax_pos::DUMMY_SP;
-
pub fn crate_inherent_impls_overlap_check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
crate_num: CrateNum) {
assert_eq!(crate_num, LOCAL_CRATE);
}
fn check_for_overlapping_inherent_impls(&self, ty_def_id: DefId) {
- let impls = ty::queries::inherent_impls::get(self.tcx, DUMMY_SP, ty_def_id);
+ let impls = self.tcx.inherent_impls(ty_def_id);
for (i, &impl1_def_id) in impls.iter().enumerate() {
for &impl2_def_id in &impls[(i + 1)..] {
// mappings. That mapping code resides here.
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
-use rustc::ty::{self, TyCtxt, TypeFoldable};
+use rustc::ty::{TyCtxt, TypeFoldable};
use rustc::ty::maps::Providers;
use syntax::ast;
-use syntax_pos::DUMMY_SP;
mod builtin;
mod inherent_impls;
}
enforce_trait_manually_implementable(tcx, impl_def_id, trait_ref.def_id);
- let trait_def = tcx.lookup_trait_def(trait_ref.def_id);
+ let trait_def = tcx.trait_def(trait_ref.def_id);
trait_def.record_local_impl(tcx, impl_def_id, trait_ref);
}
}
pub fn check_coherence<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
for &trait_def_id in tcx.hir.krate().trait_impls.keys() {
- ty::queries::coherent_trait::get(tcx, DUMMY_SP, (LOCAL_CRATE, trait_def_id));
+ tcx.coherent_trait((LOCAL_CRATE, trait_def_id));
}
unsafety::check(tcx);
overlap::check_default_impls(tcx);
// these queries are executed for side-effects (error reporting):
- ty::queries::crate_inherent_impls::get(tcx, DUMMY_SP, LOCAL_CRATE);
- ty::queries::crate_inherent_impls_overlap_check::get(tcx, DUMMY_SP, LOCAL_CRATE);
+ tcx.crate_inherent_impls(LOCAL_CRATE);
+ tcx.crate_inherent_impls_overlap_check(LOCAL_CRATE);
}
let _task =
tcx.dep_graph.in_task(DepNode::CoherenceOverlapCheck(trait_def_id));
- let def = tcx.lookup_trait_def(trait_def_id);
+ let def = tcx.trait_def(trait_def_id);
// attempt to insert into the specialization graph
let insert_result = def.add_impl_for_specialization(tcx, impl_def_id);
None => {}
Some(trait_ref) => {
- let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id);
+ let trait_def = self.tcx.trait_def(trait_ref.def_id);
let unsafe_attr = impl_generics.and_then(|g| g.carries_unsafe_attr());
match (trait_def.unsafety, unsafe_attr, unsafety, polarity) {
(_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative) => {
arbitrary interdependencies. So instead we generally convert things
lazilly and on demand, and include logic that checks for cycles.
Demand is driven by calls to `AstConv::get_item_type_scheme` or
-`AstConv::lookup_trait_def`.
+`AstConv::trait_def`.
Currently, we "convert" types and traits in two phases (note that
conversion only affects the types of items / enum variants / methods;
pub fn provide(providers: &mut Providers) {
*providers = Providers {
- ty,
- generics,
- predicates,
- super_predicates,
+ type_of,
+ generics_of,
+ predicates_of,
+ super_predicates_of,
type_param_predicates,
trait_def,
adt_def,
impl_trait_ref,
+ impl_polarity,
+ is_foreign_item,
..*providers
};
}
for param in &generics.ty_params {
if param.default.is_some() {
let def_id = self.tcx.hir.local_def_id(param.id);
- self.tcx.item_type(def_id);
+ self.tcx.type_of(def_id);
}
}
intravisit::walk_generics(self, generics);
fn visit_expr(&mut self, expr: &'tcx hir::Expr) {
if let hir::ExprClosure(..) = expr.node {
let def_id = self.tcx.hir.local_def_id(expr.id);
- self.tcx.item_generics(def_id);
- self.tcx.item_type(def_id);
+ self.tcx.generics_of(def_id);
+ self.tcx.type_of(def_id);
}
intravisit::walk_expr(self, expr);
}
fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
if let hir::TyImplTrait(..) = ty.node {
let def_id = self.tcx.hir.local_def_id(ty.id);
- self.tcx.item_generics(def_id);
- self.tcx.item_predicates(def_id);
+ self.tcx.generics_of(def_id);
+ self.tcx.predicates_of(def_id);
}
intravisit::walk_ty(self, ty);
}
def_id: DefId)
-> ty::GenericPredicates<'tcx>
{
- ty::queries::type_param_predicates::get(self.tcx, span, (self.item_def_id, def_id))
+ self.tcx.at(span).type_param_predicates((self.item_def_id, def_id))
}
fn get_free_substs(&self) -> Option<&Substs<'tcx>> {
let param_id = tcx.hir.as_local_node_id(def_id).unwrap();
let param_owner = tcx.hir.ty_param_owner(param_id);
let param_owner_def_id = tcx.hir.local_def_id(param_owner);
- let generics = tcx.item_generics(param_owner_def_id);
+ let generics = tcx.generics_of(param_owner_def_id);
let index = generics.type_param_to_index[&def_id.index];
let ty = tcx.mk_param(index, tcx.hir.ty_param_name(param_id));
let parent = if item_def_id == param_owner_def_id {
None
} else {
- tcx.item_generics(item_def_id).parent
+ tcx.generics_of(item_def_id).parent
};
let mut result = parent.map_or(ty::GenericPredicates {
hir::ItemForeignMod(ref foreign_mod) => {
for item in &foreign_mod.items {
let def_id = tcx.hir.local_def_id(item.id);
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
}
}
hir::ItemEnum(ref enum_definition, _) => {
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
convert_enum_variant_types(tcx, def_id, &enum_definition.variants);
},
hir::ItemDefaultImpl(..) => {
tcx.impl_trait_ref(def_id);
}
hir::ItemImpl(..) => {
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
tcx.impl_trait_ref(def_id);
- tcx.item_predicates(def_id);
+ tcx.predicates_of(def_id);
},
hir::ItemTrait(..) => {
- tcx.item_generics(def_id);
- tcx.lookup_trait_def(def_id);
- ty::queries::super_predicates::get(tcx, it.span, def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.trait_def(def_id);
+ tcx.at(it.span).super_predicates_of(def_id);
+ tcx.predicates_of(def_id);
},
hir::ItemStruct(ref struct_def, _) |
hir::ItemUnion(ref struct_def, _) => {
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
for f in struct_def.fields() {
let def_id = tcx.hir.local_def_id(f.id);
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
}
if !struct_def.is_struct() {
},
hir::ItemTy(_, ref generics) => {
ensure_no_ty_param_bounds(tcx, it.span, generics, "type");
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
}
hir::ItemStatic(..) | hir::ItemConst(..) | hir::ItemFn(..) => {
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
}
}
}
fn convert_trait_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_item_id: ast::NodeId) {
let trait_item = tcx.hir.expect_trait_item(trait_item_id);
let def_id = tcx.hir.local_def_id(trait_item.id);
- tcx.item_generics(def_id);
+ tcx.generics_of(def_id);
match trait_item.node {
hir::TraitItemKind::Const(..) |
hir::TraitItemKind::Type(_, Some(_)) |
hir::TraitItemKind::Method(..) => {
- tcx.item_type(def_id);
+ tcx.type_of(def_id);
}
hir::TraitItemKind::Type(_, None) => {}
};
- tcx.item_predicates(def_id);
+ tcx.predicates_of(def_id);
}
fn convert_impl_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_item_id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(impl_item_id);
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
}
fn convert_variant_ctor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
ctor_id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(ctor_id);
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
}
fn convert_enum_variant_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
variants: &[hir::Variant]) {
- let def = tcx.lookup_adt_def(def_id);
+ let def = tcx.adt_def(def_id);
let repr_type = def.repr.discr_type();
let initial = repr_type.initial_discriminant(tcx);
let mut prev_discr = None::<ConstInt>;
let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr());
prev_discr = Some(if let Some(e) = variant.node.disr_expr {
let expr_did = tcx.hir.local_def_id(e.node_id);
- let result = ty::queries::monomorphic_const_eval::get(tcx, variant.span, expr_did);
+ let substs = Substs::empty();
+ let result = tcx.at(variant.span).const_eval((expr_did, substs));
// enum variant evaluation happens before the global constant check
// so we need to report the real error
for f in variant.node.data.fields() {
let def_id = tcx.hir.local_def_id(f.id);
- tcx.item_generics(def_id);
- tcx.item_type(def_id);
- tcx.item_predicates(def_id);
+ tcx.generics_of(def_id);
+ tcx.type_of(def_id);
+ tcx.predicates_of(def_id);
}
// Convert the ctor, if any. This also registers the variant as
/// Ensures that the super-predicates of the trait with def-id
/// trait_def_id are converted and stored. This also ensures that
/// the transitive super-predicates are converted;
-fn super_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- trait_def_id: DefId)
- -> ty::GenericPredicates<'tcx> {
+fn super_predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ trait_def_id: DefId)
+ -> ty::GenericPredicates<'tcx> {
debug!("super_predicates(trait_def_id={:?})", trait_def_id);
let trait_node_id = tcx.hir.as_local_node_id(trait_def_id).unwrap();
// Now require that immediate supertraits are converted,
// which will, in turn, reach indirect supertraits.
for bound in superbounds.iter().filter_map(|p| p.to_opt_poly_trait_ref()) {
- ty::queries::super_predicates::get(tcx, item.span, bound.def_id());
+ tcx.at(item.span).super_predicates_of(bound.def_id());
}
ty::GenericPredicates {
tcx.alloc_trait_def(def)
}
-fn generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> &'tcx ty::Generics {
+fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> &'tcx ty::Generics {
use rustc::hir::map::*;
use rustc::hir::*;
let mut parent_has_self = false;
let mut own_start = has_self as u32;
let (parent_regions, parent_types) = parent_def_id.map_or((0, 0), |def_id| {
- let generics = tcx.item_generics(def_id);
+ let generics = tcx.generics_of(def_id);
assert_eq!(has_self, false);
parent_has_self = generics.has_self;
own_start = generics.count() as u32;
})
}
-fn ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> Ty<'tcx> {
+fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> Ty<'tcx> {
use rustc::hir::map::*;
use rustc::hir::*;
ItemEnum(..) |
ItemStruct(..) |
ItemUnion(..) => {
- let def = tcx.lookup_adt_def(def_id);
+ let def = tcx.adt_def(def_id);
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_adt(def, substs)
}
NodeStructCtor(&ref def) |
NodeVariant(&Spanned { node: hir::Variant_ { data: ref def, .. }, .. }) => {
- let ty = tcx.item_type(tcx.hir.get_parent_did(node_id));
+ let ty = tcx.type_of(tcx.hir.get_parent_did(node_id));
match *def {
VariantData::Unit(..) | VariantData::Struct(..) => ty,
VariantData::Tuple(ref fields, _) => {
let inputs = fields.iter().map(|f| {
- tcx.item_type(tcx.hir.local_def_id(f.id))
+ tcx.type_of(tcx.hir.local_def_id(f.id))
});
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs, ty::Binder(tcx.mk_fn_sig(
NodeVariant(&Spanned { node: Variant_ { disr_expr: Some(e), .. }, .. })
if e.node_id == node_id => {
- tcx.lookup_adt_def(tcx.hir.get_parent_did(node_id))
+ tcx.adt_def(tcx.hir.get_parent_did(node_id))
.repr.discr_type().to_ty(tcx)
}
NodeTy(&hir::Ty { node: TyImplTrait(..), .. }) => {
let owner = tcx.hir.get_parent_did(node_id);
- tcx.item_tables(owner).node_id_to_type(node_id)
+ tcx.typeck_tables_of(owner).node_id_to_type(node_id)
}
x => {
}
hir::ItemImpl(.., ref opt_trait_ref, _, _) => {
opt_trait_ref.as_ref().map(|ast_trait_ref| {
- let selfty = tcx.item_type(def_id);
+ let selfty = tcx.type_of(def_id);
AstConv::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty)
})
}
}
}
+fn impl_polarity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> hir::ImplPolarity {
+ let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
+ match tcx.hir.expect_item(node_id).node {
+ hir::ItemImpl(_, polarity, ..) => polarity,
+ ref item => bug!("impl_polarity: {:?} not an impl", item)
+ }
+}
+
// Is it marked with ?Sized
fn is_unsized<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>,
ast_bounds: &[hir::TyParamBound],
.filter(move |l| !tcx.named_region_map.late_bound.contains(&l.lifetime.id))
}
-fn predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- def_id: DefId)
- -> ty::GenericPredicates<'tcx> {
+fn predicates_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> ty::GenericPredicates<'tcx> {
use rustc::hir::map::*;
use rustc::hir::*;
_ => &no_generics
};
- let generics = tcx.item_generics(def_id);
+ let generics = tcx.generics_of(def_id);
let parent_count = generics.parent_count() as u32;
let has_own_self = generics.has_self && parent_count == 0;
// on a trait we need to add in the supertrait bounds and bounds found on
// associated types.
if let Some((trait_ref, _)) = is_trait {
- predicates = tcx.item_super_predicates(def_id).predicates;
+ predicates = tcx.super_predicates_of(def_id).predicates;
// Add in a predicate that `Self:Trait` (where `Trait` is the
// current trait). This is needed for builtin bounds.
// in trait checking. See `setup_constraining_predicates`
// for details.
if let NodeItem(&Item { node: ItemImpl(..), .. }) = node {
- let self_ty = tcx.item_type(def_id);
+ let self_ty = tcx.type_of(def_id);
let trait_ref = tcx.impl_trait_ref(def_id);
ctp::setup_constraining_predicates(&mut predicates,
trait_ref,
let substs = Substs::identity_for_item(tcx, def_id);
tcx.mk_fn_def(def_id, substs, fty)
}
+
+fn is_foreign_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId)
+ -> bool {
+ match tcx.hir.get_if_local(def_id) {
+ Some(hir_map::NodeForeignItem(..)) => true,
+ Some(_) => false,
+ _ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id)
+ }
+}
// E0248, // value used as a type, now reported earlier during resolution as E0412
// E0249,
// E0319, // trait impls for defaulted traits allowed just for structs/enums
- E0320, // recursive overflow during dropck
// E0372, // coherence not object safe
E0377, // the trait `CoerceUnsized` may only be implemented for a coercion
// between structures with the same definition
// but `{}` was found in the type `{}`
E0567, // auto traits can not have type parameters
E0568, // auto-traits can not have predicates,
+ E0588, // packed struct cannot transitively contain a `[repr(align)]` struct
E0592, // duplicate definitions with name `{}`
}
impl_item_refs: &[hir::ImplItemRef])
{
// Every lifetime used in an associated type must be constrained.
- let impl_self_ty = tcx.item_type(impl_def_id);
- let impl_generics = tcx.item_generics(impl_def_id);
- let impl_predicates = tcx.item_predicates(impl_def_id);
+ let impl_self_ty = tcx.type_of(impl_def_id);
+ let impl_generics = tcx.generics_of(impl_def_id);
+ let impl_predicates = tcx.predicates_of(impl_def_id);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
let mut input_parameters = ctp::parameters_for_impl(impl_self_ty, impl_trait_ref);
item.kind == ty::AssociatedKind::Type && item.defaultness.has_value()
})
.flat_map(|def_id| {
- ctp::parameters_for(&tcx.item_type(def_id), true)
+ ctp::parameters_for(&tcx.type_of(def_id), true)
}).collect();
for (ty_lifetime, lifetime) in impl_generics.regions.iter()
.zip(&impl_hir_generics.lifetimes)
main_id: ast::NodeId,
main_span: Span) {
let main_def_id = tcx.hir.local_def_id(main_id);
- let main_t = tcx.item_type(main_def_id);
+ let main_t = tcx.type_of(main_def_id);
match main_t.sty {
ty::TyFnDef(..) => {
match tcx.hir.find(main_id) {
start_id: ast::NodeId,
start_span: Span) {
let start_def_id = tcx.hir.local_def_id(start_id);
- let start_t = tcx.item_type(start_def_id);
+ let start_t = tcx.type_of(start_def_id);
match start_t.sty {
ty::TyFnDef(..) => {
match tcx.hir.find(start_id) {
hir::ItemEnum(..) |
hir::ItemStruct(..) |
hir::ItemUnion(..) => {
- let generics = tcx.item_generics(did);
+ let generics = tcx.generics_of(did);
// Not entirely obvious: constraints on structs/enums do not
// affect the variance of their type parameters. See discussion
//
// self.add_constraints_from_generics(generics);
- for field in tcx.lookup_adt_def(did).all_fields() {
+ for field in tcx.adt_def(did).all_fields() {
self.add_constraints_from_ty(generics,
- tcx.item_type(field.did),
+ tcx.type_of(field.did),
self.covariant);
}
}
hir::ItemTrait(..) => {
- let generics = tcx.item_generics(did);
+ let generics = tcx.generics_of(did);
let trait_ref = ty::TraitRef {
def_id: did,
substs: Substs::identity_for_item(tcx, did)
} else {
// Parameter on an item defined within another crate:
// variance already inferred, just look it up.
- let variances = self.tcx().item_variances(item_def_id);
+ let variances = self.tcx().variances_of(item_def_id);
self.constant_term(variances[index])
}
}
trait_ref,
variance);
- let trait_generics = self.tcx().item_generics(trait_ref.def_id);
+ let trait_generics = self.tcx().generics_of(trait_ref.def_id);
// This edge is actually implied by the call to
- // `lookup_trait_def`, but I'm trying to be future-proof. See
+ // `trait_def`, but I'm trying to be future-proof. See
// README.md for a discussion on dep-graph management.
self.tcx().dep_graph.read(VarianceDepNode(trait_ref.def_id));
}
ty::TyAdt(def, substs) => {
- let adt_generics = self.tcx().item_generics(def.did);
+ let adt_generics = self.tcx().generics_of(def.did);
// This edge is actually implied by the call to
- // `lookup_trait_def`, but I'm trying to be future-proof. See
+ // `trait_def`, but I'm trying to be future-proof. See
// README.md for a discussion on dep-graph management.
self.tcx().dep_graph.read(VarianceDepNode(def.did));
ty::TyProjection(ref data) => {
let trait_ref = &data.trait_ref;
- let trait_generics = self.tcx().item_generics(trait_ref.def_id);
+ let trait_generics = self.tcx().generics_of(trait_ref.def_id);
// This edge is actually implied by the call to
- // `lookup_trait_def`, but I'm trying to be future-proof. See
+ // `trait_def`, but I'm trying to be future-proof. See
// README.md for a discussion on dep-graph management.
self.tcx().dep_graph.read(VarianceDepNode(trait_ref.def_id));
item_variances);
}
- tcx.maps.variances
- .borrow_mut()
+ tcx.maps.variances_of.borrow_mut()
.insert(item_def_id, Rc::new(item_variances));
}
}
// parameters".
if self.num_inferred() == inferreds_on_entry {
let item_def_id = self.tcx.hir.local_def_id(item_id);
- self.tcx.maps.variances
- .borrow_mut()
+ self.tcx.maps.variances_of.borrow_mut()
.insert(item_def_id, self.empty_variances.clone());
}
}
use std::iter::once;
use syntax::ast;
-use syntax_pos::DUMMY_SP;
use rustc::hir;
use rustc::hir::def::{Def, CtorKind};
pub fn build_external_trait(cx: &DocContext, did: DefId) -> clean::Trait {
let trait_items = cx.tcx.associated_items(did).map(|item| item.clean(cx)).collect();
- let predicates = cx.tcx.item_predicates(did);
- let generics = (cx.tcx.item_generics(did), &predicates).clean(cx);
+ let predicates = cx.tcx.predicates_of(did);
+ let generics = (cx.tcx.generics_of(did), &predicates).clean(cx);
let generics = filter_non_trait_generics(did, generics);
let (generics, supertrait_bounds) = separate_supertrait_bounds(generics);
clean::Trait {
- unsafety: cx.tcx.lookup_trait_def(did).unsafety,
+ unsafety: cx.tcx.trait_def(did).unsafety,
generics: generics,
items: trait_items,
bounds: supertrait_bounds,
}
fn build_external_function(cx: &DocContext, did: DefId) -> clean::Function {
- let sig = cx.tcx.item_type(did).fn_sig();
+ let sig = cx.tcx.type_of(did).fn_sig();
let constness = if cx.tcx.sess.cstore.is_const_fn(did) {
hir::Constness::Const
hir::Constness::NotConst
};
- let predicates = cx.tcx.item_predicates(did);
+ let predicates = cx.tcx.predicates_of(did);
clean::Function {
decl: (did, sig).clean(cx),
- generics: (cx.tcx.item_generics(did), &predicates).clean(cx),
+ generics: (cx.tcx.generics_of(did), &predicates).clean(cx),
unsafety: sig.unsafety(),
constness: constness,
abi: sig.abi(),
}
fn build_enum(cx: &DocContext, did: DefId) -> clean::Enum {
- let predicates = cx.tcx.item_predicates(did);
+ let predicates = cx.tcx.predicates_of(did);
clean::Enum {
- generics: (cx.tcx.item_generics(did), &predicates).clean(cx),
+ generics: (cx.tcx.generics_of(did), &predicates).clean(cx),
variants_stripped: false,
- variants: cx.tcx.lookup_adt_def(did).variants.clean(cx),
+ variants: cx.tcx.adt_def(did).variants.clean(cx),
}
}
fn build_struct(cx: &DocContext, did: DefId) -> clean::Struct {
- let predicates = cx.tcx.item_predicates(did);
- let variant = cx.tcx.lookup_adt_def(did).struct_variant();
+ let predicates = cx.tcx.predicates_of(did);
+ let variant = cx.tcx.adt_def(did).struct_variant();
clean::Struct {
struct_type: match variant.ctor_kind {
CtorKind::Fn => doctree::Tuple,
CtorKind::Const => doctree::Unit,
},
- generics: (cx.tcx.item_generics(did), &predicates).clean(cx),
+ generics: (cx.tcx.generics_of(did), &predicates).clean(cx),
fields: variant.fields.clean(cx),
fields_stripped: false,
}
}
fn build_union(cx: &DocContext, did: DefId) -> clean::Union {
- let predicates = cx.tcx.item_predicates(did);
- let variant = cx.tcx.lookup_adt_def(did).struct_variant();
+ let predicates = cx.tcx.predicates_of(did);
+ let variant = cx.tcx.adt_def(did).struct_variant();
clean::Union {
struct_type: doctree::Plain,
- generics: (cx.tcx.item_generics(did), &predicates).clean(cx),
+ generics: (cx.tcx.generics_of(did), &predicates).clean(cx),
fields: variant.fields.clean(cx),
fields_stripped: false,
}
}
fn build_type_alias(cx: &DocContext, did: DefId) -> clean::Typedef {
- let predicates = cx.tcx.item_predicates(did);
+ let predicates = cx.tcx.predicates_of(did);
clean::Typedef {
- type_: cx.tcx.item_type(did).clean(cx),
- generics: (cx.tcx.item_generics(did), &predicates).clean(cx),
+ type_: cx.tcx.type_of(did).clean(cx),
+ generics: (cx.tcx.generics_of(did), &predicates).clean(cx),
}
}
let tcx = cx.tcx;
let mut impls = Vec::new();
- for &did in ty::queries::inherent_impls::get(tcx, DUMMY_SP, did).iter() {
+ for &did in tcx.inherent_impls(did).iter() {
build_impl(cx, did, &mut impls);
}
});
}
- let for_ = tcx.item_type(did).clean(cx);
+ let for_ = tcx.type_of(did).clean(cx);
// Only inline impl if the implementing type is
// reachable in rustdoc generated documentation
}
}
- let predicates = tcx.item_predicates(did);
+ let predicates = tcx.predicates_of(did);
let trait_items = tcx.associated_items(did).filter_map(|item| {
match item.kind {
ty::AssociatedKind::Const => {
Some(clean::Item {
name: Some(item.name.clean(cx)),
inner: clean::AssociatedConstItem(
- tcx.item_type(item.def_id).clean(cx),
+ tcx.type_of(item.def_id).clean(cx),
default,
),
source: tcx.def_span(item.def_id).clean(cx),
}
ty::AssociatedKind::Type => {
let typedef = clean::Typedef {
- type_: tcx.item_type(item.def_id).clean(cx),
+ type_: tcx.type_of(item.def_id).clean(cx),
generics: clean::Generics {
lifetimes: vec![],
type_params: vec![],
}
}
}).collect::<Vec<_>>();
- let polarity = tcx.trait_impl_polarity(did);
+ let polarity = tcx.impl_polarity(did);
let trait_ = associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(polyt, _) => polyt.trait_,
provided_trait_methods: provided,
trait_: trait_,
for_: for_,
- generics: (tcx.item_generics(did), &predicates).clean(cx),
+ generics: (tcx.generics_of(did), &predicates).clean(cx),
items: trait_items,
polarity: Some(polarity.clean(cx)),
}),
}
fn print_inlined_const(cx: &DocContext, did: DefId) -> String {
- let body = cx.tcx.sess.cstore.maybe_get_item_body(cx.tcx, did).unwrap();
+ let body = cx.tcx.sess.cstore.item_body(cx.tcx, did);
let inlined = InlinedConst {
nested_bodies: cx.tcx.sess.cstore.item_body_nested_bodies(did)
};
fn build_const(cx: &DocContext, did: DefId) -> clean::Constant {
clean::Constant {
- type_: cx.tcx.item_type(did).clean(cx),
+ type_: cx.tcx.type_of(did).clean(cx),
expr: print_inlined_const(cx, did)
}
}
fn build_static(cx: &DocContext, did: DefId, mutable: bool) -> clean::Static {
clean::Static {
- type_: cx.tcx.item_type(did).clean(cx),
+ type_: cx.tcx.type_of(did).clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
did: self.def_id,
bounds: vec![], // these are filled in from the where-clauses
default: if self.has_default {
- Some(cx.tcx.item_type(self.def_id).clean(cx))
+ Some(cx.tcx.type_of(self.def_id).clean(cx))
} else {
None
}
fn clean(&self, cx: &DocContext) -> Item {
let inner = match self.kind {
ty::AssociatedKind::Const => {
- let ty = cx.tcx.item_type(self.def_id);
+ let ty = cx.tcx.type_of(self.def_id);
AssociatedConstItem(ty.clean(cx), None)
}
ty::AssociatedKind::Method => {
- let generics = (cx.tcx.item_generics(self.def_id),
- &cx.tcx.item_predicates(self.def_id)).clean(cx);
- let sig = cx.tcx.item_type(self.def_id).fn_sig();
+ let generics = (cx.tcx.generics_of(self.def_id),
+ &cx.tcx.predicates_of(self.def_id)).clean(cx);
+ let sig = cx.tcx.type_of(self.def_id).fn_sig();
let mut decl = (self.def_id, sig).clean(cx);
if self.method_has_self_argument {
let self_ty = match self.container {
ty::ImplContainer(def_id) => {
- cx.tcx.item_type(def_id)
+ cx.tcx.type_of(def_id)
}
ty::TraitContainer(_) => cx.tcx.mk_self_type()
};
// are actually located on the trait/impl itself, so we need to load
// all of the generics from there and then look for bounds that are
// applied to this associated type in question.
- let predicates = cx.tcx.item_predicates(did);
- let generics = (cx.tcx.item_generics(did), &predicates).clean(cx);
+ let predicates = cx.tcx.predicates_of(did);
+ let generics = (cx.tcx.generics_of(did), &predicates).clean(cx);
generics.where_predicates.iter().filter_map(|pred| {
let (name, self_type, trait_, bounds) = match *pred {
WherePredicate::BoundPredicate {
}
let ty = if self.defaultness.has_value() {
- Some(cx.tcx.item_type(self.def_id))
+ Some(cx.tcx.type_of(self.def_id))
} else {
None
};
ty::TyAnon(def_id, substs) => {
// Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
// by looking up the projections associated with the def_id.
- let item_predicates = cx.tcx.item_predicates(def_id);
+ let predicates_of = cx.tcx.predicates_of(def_id);
let substs = cx.tcx.lift(&substs).unwrap();
- let bounds = item_predicates.instantiate(cx.tcx, substs);
+ let bounds = predicates_of.instantiate(cx.tcx, substs);
ImplTrait(bounds.predicates.into_iter().filter_map(|predicate| {
predicate.to_opt_poly_trait_ref().clean(cx)
}).collect())
stability: get_stability(cx, self.did),
deprecation: get_deprecation(cx, self.did),
def_id: self.did,
- inner: StructFieldItem(cx.tcx.item_type(self.did).clean(cx)),
+ inner: StructFieldItem(cx.tcx.type_of(self.did).clean(cx)),
}
}
}
CtorKind::Const => VariantKind::CLike,
CtorKind::Fn => {
VariantKind::Tuple(
- self.fields.iter().map(|f| cx.tcx.item_type(f.did).clean(cx)).collect()
+ self.fields.iter().map(|f| cx.tcx.type_of(f.did).clean(cx)).collect()
)
}
CtorKind::Fictive => {
def_id: field.did,
stability: get_stability(cx, field.did),
deprecation: get_deprecation(cx, field.did),
- inner: StructFieldItem(cx.tcx.item_type(field.did).clean(cx))
+ inner: StructFieldItem(cx.tcx.type_of(field.did).clean(cx))
}
}).collect()
})
if child == trait_ {
return true
}
- let predicates = cx.tcx.item_super_predicates(child).predicates;
+ let predicates = cx.tcx.super_predicates_of(child).predicates;
predicates.iter().filter_map(|pred| {
if let ty::Predicate::Trait(ref pred) = *pred {
if pred.0.trait_ref.self_ty().is_self() {
use std::cell::RefCell;
use std::collections::{HashMap, VecDeque};
use std::default::Default;
+use std::ffi::CString;
use std::fmt::{self, Write};
use std::str;
use syntax::feature_gate::UnstableFeatures;
use html::render::derive_id;
use html::toc::TocBuilder;
use html::highlight;
+use html::escape::Escape;
use test;
use pulldown_cmark::{html, Event, Tag, Parser};
use pulldown_cmark::{Options, OPTION_ENABLE_FOOTNOTES, OPTION_ENABLE_TABLES};
+#[derive(PartialEq, Debug, Clone, Copy)]
+pub enum RenderType {
+ Hoedown,
+ Pulldown,
+}
+
/// A unit struct which has the `fmt::Display` trait implemented. When
/// formatted, this struct will emit the HTML corresponding to the rendered
/// version of the contained markdown string.
// The second parameter is whether we need a shorter version or not.
-pub struct Markdown<'a>(pub &'a str);
+pub struct Markdown<'a>(pub &'a str, pub RenderType);
/// A unit struct like `Markdown`, that renders the markdown with a
/// table of contents.
-pub struct MarkdownWithToc<'a>(pub &'a str);
+pub struct MarkdownWithToc<'a>(pub &'a str, pub RenderType);
/// A unit struct like `Markdown`, that renders the markdown escaping HTML tags.
-pub struct MarkdownHtml<'a>(pub &'a str);
+pub struct MarkdownHtml<'a>(pub &'a str, pub RenderType);
/// A unit struct like `Markdown`, that renders only the first paragraph.
pub struct MarkdownSummaryLine<'a>(pub &'a str);
}
}
+/// Returns a new string with all consecutive whitespace collapsed into
+/// single spaces.
+///
+/// Any leading or trailing whitespace will be trimmed.
+fn collapse_whitespace(s: &str) -> String {
+ s.split_whitespace().collect::<Vec<_>>().join(" ")
+}
+
/// Convert chars from a title for an id.
///
/// "Hello, world!" -> "hello-world"
const HOEDOWN_EXT_STRIKETHROUGH: libc::c_uint = 1 << 4;
const HOEDOWN_EXT_SUPERSCRIPT: libc::c_uint = 1 << 8;
const HOEDOWN_EXT_FOOTNOTES: libc::c_uint = 1 << 2;
+const HOEDOWN_HTML_ESCAPE: libc::c_uint = 1 << 1;
const HOEDOWN_EXTENSIONS: libc::c_uint =
HOEDOWN_EXT_NO_INTRA_EMPHASIS | HOEDOWN_EXT_TABLES |
unit: libc::size_t,
}
+struct MyOpaque {
+ dfltblk: extern "C" fn(*mut hoedown_buffer, *const hoedown_buffer,
+ *const hoedown_buffer, *const hoedown_renderer_data,
+ libc::size_t),
+ toc_builder: Option<TocBuilder>,
+}
+
extern {
fn hoedown_html_renderer_new(render_flags: libc::c_uint,
nesting_level: libc::c_int)
fn hoedown_document_free(md: *mut hoedown_document);
fn hoedown_buffer_new(unit: libc::size_t) -> *mut hoedown_buffer;
+ fn hoedown_buffer_puts(b: *mut hoedown_buffer, c: *const libc::c_char);
fn hoedown_buffer_free(b: *mut hoedown_buffer);
}
}
}
+pub fn render(w: &mut fmt::Formatter,
+ s: &str,
+ print_toc: bool,
+ html_flags: libc::c_uint) -> fmt::Result {
+ extern fn block(ob: *mut hoedown_buffer, orig_text: *const hoedown_buffer,
+ lang: *const hoedown_buffer, data: *const hoedown_renderer_data,
+ line: libc::size_t) {
+ unsafe {
+ if orig_text.is_null() { return }
+
+ let opaque = (*data).opaque as *mut hoedown_html_renderer_state;
+ let my_opaque: &MyOpaque = &*((*opaque).opaque as *const MyOpaque);
+ let text = (*orig_text).as_bytes();
+ let origtext = str::from_utf8(text).unwrap();
+ let origtext = origtext.trim_left();
+ debug!("docblock: ==============\n{:?}\n=======", text);
+ let rendered = if lang.is_null() || origtext.is_empty() {
+ false
+ } else {
+ let rlang = (*lang).as_bytes();
+ let rlang = str::from_utf8(rlang).unwrap();
+ if !LangString::parse(rlang).rust {
+ (my_opaque.dfltblk)(ob, orig_text, lang,
+ opaque as *const hoedown_renderer_data,
+ line);
+ true
+ } else {
+ false
+ }
+ };
+
+ let lines = origtext.lines().filter(|l| {
+ stripped_filtered_line(*l).is_none()
+ });
+ let text = lines.collect::<Vec<&str>>().join("\n");
+ if rendered { return }
+ PLAYGROUND.with(|play| {
+ // insert newline to clearly separate it from the
+ // previous block so we can shorten the html output
+ let mut s = String::from("\n");
+ let playground_button = play.borrow().as_ref().and_then(|&(ref krate, ref url)| {
+ if url.is_empty() {
+ return None;
+ }
+ let test = origtext.lines().map(|l| {
+ stripped_filtered_line(l).unwrap_or(l)
+ }).collect::<Vec<&str>>().join("\n");
+ let krate = krate.as_ref().map(|s| &**s);
+ let test = test::maketest(&test, krate, false,
+ &Default::default());
+ let channel = if test.contains("#![feature(") {
+ "&version=nightly"
+ } else {
+ ""
+ };
+ // These characters don't need to be escaped in a URI.
+ // FIXME: use a library function for percent encoding.
+ fn dont_escape(c: u8) -> bool {
+ (b'a' <= c && c <= b'z') ||
+ (b'A' <= c && c <= b'Z') ||
+ (b'0' <= c && c <= b'9') ||
+ c == b'-' || c == b'_' || c == b'.' ||
+ c == b'~' || c == b'!' || c == b'\'' ||
+ c == b'(' || c == b')' || c == b'*'
+ }
+ let mut test_escaped = String::new();
+ for b in test.bytes() {
+ if dont_escape(b) {
+ test_escaped.push(char::from(b));
+ } else {
+ write!(test_escaped, "%{:02X}", b).unwrap();
+ }
+ }
+ Some(format!(
+ r#"<a class="test-arrow" target="_blank" href="{}?code={}{}">Run</a>"#,
+ url, test_escaped, channel
+ ))
+ });
+ s.push_str(&highlight::render_with_highlighting(
+ &text,
+ Some("rust-example-rendered"),
+ None,
+ playground_button.as_ref().map(String::as_str)));
+ let output = CString::new(s).unwrap();
+ hoedown_buffer_puts(ob, output.as_ptr());
+ })
+ }
+ }
+
+ extern fn header(ob: *mut hoedown_buffer, text: *const hoedown_buffer,
+ level: libc::c_int, data: *const hoedown_renderer_data,
+ _: libc::size_t) {
+ // hoedown does this, we may as well too
+ unsafe { hoedown_buffer_puts(ob, "\n\0".as_ptr() as *const _); }
+
+ // Extract the text provided
+ let s = if text.is_null() {
+ "".to_owned()
+ } else {
+ let s = unsafe { (*text).as_bytes() };
+ str::from_utf8(&s).unwrap().to_owned()
+ };
+
+ // Discard '<em>', '<code>' tags and some escaped characters,
+ // transform the contents of the header into a hyphenated string
+ // without non-alphanumeric characters other than '-' and '_'.
+ //
+ // This is a terrible hack working around how hoedown gives us rendered
+ // html for text rather than the raw text.
+ let mut id = s.clone();
+ let repl_sub = vec!["<em>", "</em>", "<code>", "</code>",
+ "<strong>", "</strong>",
+ "<", ">", "&", "'", """];
+ for sub in repl_sub {
+ id = id.replace(sub, "");
+ }
+ let id = id.chars().filter_map(|c| {
+ if c.is_alphanumeric() || c == '-' || c == '_' {
+ if c.is_ascii() {
+ Some(c.to_ascii_lowercase())
+ } else {
+ Some(c)
+ }
+ } else if c.is_whitespace() && c.is_ascii() {
+ Some('-')
+ } else {
+ None
+ }
+ }).collect::<String>();
+
+ let opaque = unsafe { (*data).opaque as *mut hoedown_html_renderer_state };
+ let opaque = unsafe { &mut *((*opaque).opaque as *mut MyOpaque) };
+
+ let id = derive_id(id);
+
+ let sec = opaque.toc_builder.as_mut().map_or("".to_owned(), |builder| {
+ format!("{} ", builder.push(level as u32, s.clone(), id.clone()))
+ });
+
+ // Render the HTML
+ let text = format!("<h{lvl} id='{id}' class='section-header'>\
+ <a href='#{id}'>{sec}{}</a></h{lvl}>",
+ s, lvl = level, id = id, sec = sec);
+
+ let text = CString::new(text).unwrap();
+ unsafe { hoedown_buffer_puts(ob, text.as_ptr()) }
+ }
+
+ extern fn codespan(
+ ob: *mut hoedown_buffer,
+ text: *const hoedown_buffer,
+ _: *const hoedown_renderer_data,
+ _: libc::size_t
+ ) -> libc::c_int {
+ let content = if text.is_null() {
+ "".to_owned()
+ } else {
+ let bytes = unsafe { (*text).as_bytes() };
+ let s = str::from_utf8(bytes).unwrap();
+ collapse_whitespace(s)
+ };
+
+ let content = format!("<code>{}</code>", Escape(&content));
+ let element = CString::new(content).unwrap();
+ unsafe { hoedown_buffer_puts(ob, element.as_ptr()); }
+ // Return anything except 0, which would mean "also print the code span verbatim".
+ 1
+ }
+
+ unsafe {
+ let ob = hoedown_buffer_new(DEF_OUNIT);
+ let renderer = hoedown_html_renderer_new(html_flags, 0);
+ let mut opaque = MyOpaque {
+ dfltblk: (*renderer).blockcode.unwrap(),
+ toc_builder: if print_toc {Some(TocBuilder::new())} else {None}
+ };
+ (*((*renderer).opaque as *mut hoedown_html_renderer_state)).opaque
+ = &mut opaque as *mut _ as *mut libc::c_void;
+ (*renderer).blockcode = Some(block);
+ (*renderer).header = Some(header);
+ (*renderer).codespan = Some(codespan);
+
+ let document = hoedown_document_new(renderer, HOEDOWN_EXTENSIONS, 16);
+ hoedown_document_render(document, ob, s.as_ptr(),
+ s.len() as libc::size_t);
+ hoedown_document_free(document);
+
+ hoedown_html_renderer_free(renderer);
+
+ let mut ret = opaque.toc_builder.map_or(Ok(()), |builder| {
+ write!(w, "<nav id=\"TOC\">{}</nav>", builder.into_toc())
+ });
+
+ if ret.is_ok() {
+ let buf = (*ob).as_bytes();
+ ret = w.write_str(str::from_utf8(buf).unwrap());
+ }
+ hoedown_buffer_free(ob);
+ ret
+ }
+}
+
pub fn old_find_testable_code(doc: &str, tests: &mut ::test::Collector, position: Span) {
extern fn block(_ob: *mut hoedown_buffer,
text: *const hoedown_buffer,
LangString::parse(s)
};
if !block_info.rust { return }
+ let text = (*text).as_bytes();
let opaque = (*data).opaque as *mut hoedown_html_renderer_state;
let tests = &mut *((*opaque).opaque as *mut ::test::Collector);
- let line = tests.get_line() + line;
+ let text = str::from_utf8(text).unwrap();
+ let lines = text.lines().map(|l| {
+ stripped_filtered_line(l).unwrap_or(l)
+ });
+ let text = lines.collect::<Vec<&str>>().join("\n");
let filename = tests.get_filename();
- tests.add_old_test(line, filename);
+
+ if tests.render_type == RenderType::Hoedown {
+ let line = tests.get_line() + line;
+ tests.add_test(text.to_owned(),
+ block_info.should_panic, block_info.no_run,
+ block_info.ignore, block_info.test_harness,
+ block_info.compile_fail, block_info.error_codes,
+ line, filename);
+ } else {
+ tests.add_old_test(text, filename);
+ }
}
}
}
tests.set_position(position);
-
unsafe {
let ob = hoedown_buffer_new(DEF_OUNIT);
let renderer = hoedown_html_renderer_new(0, 0);
impl<'a> fmt::Display for Markdown<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- let Markdown(md) = *self;
+ let Markdown(md, render_type) = *self;
+
// This is actually common enough to special-case
if md.is_empty() { return Ok(()) }
+ if render_type == RenderType::Hoedown {
+ render(fmt, md, false, 0)
+ } else {
+ let mut opts = Options::empty();
+ opts.insert(OPTION_ENABLE_TABLES);
+ opts.insert(OPTION_ENABLE_FOOTNOTES);
- let mut opts = Options::empty();
- opts.insert(OPTION_ENABLE_TABLES);
- opts.insert(OPTION_ENABLE_FOOTNOTES);
-
- let p = Parser::new_ext(md, opts);
+ let p = Parser::new_ext(md, opts);
- let mut s = String::with_capacity(md.len() * 3 / 2);
+ let mut s = String::with_capacity(md.len() * 3 / 2);
- html::push_html(&mut s,
- Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
+ html::push_html(&mut s,
+ Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
- fmt.write_str(&s)
+ fmt.write_str(&s)
+ }
}
}
impl<'a> fmt::Display for MarkdownWithToc<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- let MarkdownWithToc(md) = *self;
+ let MarkdownWithToc(md, render_type) = *self;
- let mut opts = Options::empty();
- opts.insert(OPTION_ENABLE_TABLES);
- opts.insert(OPTION_ENABLE_FOOTNOTES);
+ if render_type == RenderType::Hoedown {
+ render(fmt, md, true, 0)
+ } else {
+ let mut opts = Options::empty();
+ opts.insert(OPTION_ENABLE_TABLES);
+ opts.insert(OPTION_ENABLE_FOOTNOTES);
- let p = Parser::new_ext(md, opts);
+ let p = Parser::new_ext(md, opts);
- let mut s = String::with_capacity(md.len() * 3 / 2);
+ let mut s = String::with_capacity(md.len() * 3 / 2);
- let mut toc = TocBuilder::new();
+ let mut toc = TocBuilder::new();
- html::push_html(&mut s,
- Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, Some(&mut toc)))));
+ html::push_html(&mut s,
+ Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, Some(&mut toc)))));
- write!(fmt, "<nav id=\"TOC\">{}</nav>", toc.into_toc())?;
+ write!(fmt, "<nav id=\"TOC\">{}</nav>", toc.into_toc())?;
- fmt.write_str(&s)
+ fmt.write_str(&s)
+ }
}
}
impl<'a> fmt::Display for MarkdownHtml<'a> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
- let MarkdownHtml(md) = *self;
+ let MarkdownHtml(md, render_type) = *self;
+
// This is actually common enough to special-case
if md.is_empty() { return Ok(()) }
+ if render_type == RenderType::Hoedown {
+ render(fmt, md, false, HOEDOWN_HTML_ESCAPE)
+ } else {
+ let mut opts = Options::empty();
+ opts.insert(OPTION_ENABLE_TABLES);
+ opts.insert(OPTION_ENABLE_FOOTNOTES);
- let mut opts = Options::empty();
- opts.insert(OPTION_ENABLE_TABLES);
- opts.insert(OPTION_ENABLE_FOOTNOTES);
-
- let p = Parser::new_ext(md, opts);
+ let p = Parser::new_ext(md, opts);
- // Treat inline HTML as plain text.
- let p = p.map(|event| match event {
- Event::Html(text) | Event::InlineHtml(text) => Event::Text(text),
- _ => event
- });
+ // Treat inline HTML as plain text.
+ let p = p.map(|event| match event {
+ Event::Html(text) | Event::InlineHtml(text) => Event::Text(text),
+ _ => event
+ });
- let mut s = String::with_capacity(md.len() * 3 / 2);
+ let mut s = String::with_capacity(md.len() * 3 / 2);
- html::push_html(&mut s,
- Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
+ html::push_html(&mut s,
+ Footnotes::new(CodeBlocks::new(HeadingLinks::new(p, None))));
- fmt.write_str(&s)
+ fmt.write_str(&s)
+ }
}
}
mod tests {
use super::{LangString, Markdown, MarkdownHtml};
use super::plain_summary_line;
+ use super::RenderType;
use html::render::reset_ids;
#[test]
#[test]
fn issue_17736() {
let markdown = "# title";
- format!("{}", Markdown(markdown));
+ format!("{}", Markdown(markdown, RenderType::Pulldown));
reset_ids(true);
}
#[test]
fn test_header() {
fn t(input: &str, expect: &str) {
- let output = format!("{}", Markdown(input));
+ let output = format!("{}", Markdown(input, RenderType::Pulldown));
assert_eq!(output, expect, "original: {}", input);
reset_ids(true);
}
#[test]
fn test_header_ids_multiple_blocks() {
fn t(input: &str, expect: &str) {
- let output = format!("{}", Markdown(input));
+ let output = format!("{}", Markdown(input, RenderType::Pulldown));
assert_eq!(output, expect, "original: {}", input);
}
#[test]
fn test_markdown_html_escape() {
fn t(input: &str, expect: &str) {
- let output = format!("{}", MarkdownHtml(input));
+ let output = format!("{}", MarkdownHtml(input, RenderType::Pulldown));
assert_eq!(output, expect, "original: {}", input);
}
use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace};
use html::format::fmt_impl_for_trait_page;
use html::item_type::ItemType;
-use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine};
+use html::markdown::{self, Markdown, MarkdownHtml, MarkdownSummaryLine, RenderType};
use html::{highlight, layout};
/// A pair of name and its optional document.
/// publicly reused items to redirect to the right location.
pub render_redirect_pages: bool,
pub shared: Arc<SharedContext>,
+ pub render_type: RenderType,
}
pub struct SharedContext {
dst: PathBuf,
passes: FxHashSet<String>,
css_file_extension: Option<PathBuf>,
- renderinfo: RenderInfo) -> Result<(), Error> {
+ renderinfo: RenderInfo,
+ render_type: RenderType) -> Result<(), Error> {
let src_root = match krate.src.parent() {
Some(p) => p.to_path_buf(),
None => PathBuf::new(),
dst: dst,
render_redirect_pages: false,
shared: Arc::new(scx),
+ render_type: render_type,
};
// Crawl the crate to build various caches used for the output
fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result {
document_stability(w, cx, item)?;
- document_full(w, item)?;
+ document_full(w, item, cx.render_type)?;
Ok(())
}
-fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink) -> fmt::Result {
+fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink,
+ render_type: RenderType) -> fmt::Result {
if let Some(s) = item.doc_value() {
let markdown = if s.contains('\n') {
format!("{} [Read more]({})",
format!("{}", &plain_summary_line(Some(s)))
};
write!(w, "<div class='docblock'>{}</div>",
- Markdown(&markdown))?;
+ Markdown(&markdown, render_type))?;
}
Ok(())
}
}
}
-fn document_full(w: &mut fmt::Formatter, item: &clean::Item) -> fmt::Result {
+fn document_full(w: &mut fmt::Formatter, item: &clean::Item,
+ render_type: RenderType) -> fmt::Result {
if let Some(s) = get_doc_value(item) {
write!(w, "<div class='docblock'>{}</div>",
- Markdown(&format!("{}{}", md_render_assoc_item(item), s)))?;
+ Markdown(&format!("{}{}", md_render_assoc_item(item), s), render_type))?;
}
Ok(())
}
</tr>",
name = *myitem.name.as_ref().unwrap(),
stab_docs = stab_docs,
- docs = MarkdownSummaryLine(doc_value),
+ docs = if cx.render_type == RenderType::Hoedown {
+ format!("{}",
+ shorter(Some(&Markdown(doc_value,
+ RenderType::Hoedown).to_string())))
+ } else {
+ format!("{}", MarkdownSummaryLine(doc_value))
+ },
class = myitem.type_(),
stab = myitem.stability_class().unwrap_or("".to_string()),
unsafety_flag = unsafety_flag,
} else {
String::new()
};
- let text = format!("Deprecated{}{}", since, MarkdownHtml(&deprecated_reason));
+ let text = format!("Deprecated{}{}",
+ since,
+ MarkdownHtml(&deprecated_reason, cx.render_type));
stability.push(format!("<div class='stab deprecated'>{}</div>", text))
};
let text = format!("<summary><span class=microscope>🔬</span> \
This is a nightly-only experimental API. {}\
</summary>{}",
- unstable_extra, MarkdownHtml(&stab.unstable_reason));
+ unstable_extra,
+ MarkdownHtml(&stab.unstable_reason, cx.render_type));
stability.push(format!("<div class='stab unstable'><details>{}</details></div>",
text));
}
String::new()
};
- let text = format!("Deprecated{}{}", since, MarkdownHtml(¬e));
+ let text = format!("Deprecated{}{}", since, MarkdownHtml(¬e, cx.render_type));
stability.push(format!("<div class='stab deprecated'>{}</div>", text))
}
write!(w, "</span>")?;
write!(w, "</h3>\n")?;
if let Some(ref dox) = i.impl_item.doc_value() {
- write!(w, "<div class='docblock'>{}</div>", Markdown(dox))?;
+ write!(w, "<div class='docblock'>{}</div>", Markdown(dox, cx.render_type))?;
}
}
// because impls can't have a stability.
document_stability(w, cx, it)?;
if get_doc_value(item).is_some() {
- document_full(w, item)?;
+ document_full(w, item, cx.render_type)?;
} else {
// In case the item isn't documented,
// provide short documentation from the trait.
- document_short(w, it, link)?;
+ document_short(w, it, link, cx.render_type)?;
}
}
} else {
}
} else {
document_stability(w, cx, item)?;
- document_short(w, item, link)?;
+ document_short(w, item, link, cx.render_type)?;
}
}
Ok(())
#![feature(staged_api)]
#![feature(test)]
#![feature(unicode)]
+#![feature(vec_remove_item)]
extern crate arena;
extern crate getopts;
use clean::AttributesExt;
+use html::markdown::RenderType;
+
struct Output {
krate: clean::Crate,
renderinfo: html::render::RenderInfo,
"URL to send code snippets to, may be reset by --markdown-playground-url \
or `#![doc(html_playground_url=...)]`",
"URL")),
+ unstable(optflag("", "enable-commonmark", "to enable commonmark doc rendering/testing")),
]
}
let css_file_extension = matches.opt_str("e").map(|s| PathBuf::from(&s));
let cfgs = matches.opt_strs("cfg");
+ let render_type = if matches.opt_present("enable-commonmark") {
+ RenderType::Pulldown
+ } else {
+ RenderType::Hoedown
+ };
+
if let Some(ref p) = css_file_extension {
if !p.is_file() {
writeln!(
match (should_test, markdown_input) {
(true, true) => {
- return markdown::test(input, cfgs, libs, externs, test_args, maybe_sysroot)
+ return markdown::test(input, cfgs, libs, externs, test_args, maybe_sysroot, render_type)
}
(true, false) => {
- return test::run(input, cfgs, libs, externs, test_args, crate_name, maybe_sysroot)
+ return test::run(input, cfgs, libs, externs, test_args, crate_name, maybe_sysroot,
+ render_type)
}
(false, true) => return markdown::render(input,
output.unwrap_or(PathBuf::from("doc")),
&matches, &external_html,
- !matches.opt_present("markdown-no-toc")),
+ !matches.opt_present("markdown-no-toc"),
+ render_type),
(false, false) => {}
}
output.unwrap_or(PathBuf::from("doc")),
passes.into_iter().collect(),
css_file_extension,
- renderinfo)
+ renderinfo,
+ render_type)
.expect("failed to generate documentation");
0
}
use html::escape::Escape;
use html::markdown;
use html::markdown::{Markdown, MarkdownWithToc, find_testable_code, old_find_testable_code};
+use html::markdown::RenderType;
use test::{TestOptions, Collector};
/// Separate any lines at the start of the file that begin with `# ` or `%`.
/// Render `input` (e.g. "foo.md") into an HTML file in `output`
/// (e.g. output = "bar" => "bar/foo.html").
pub fn render(input: &str, mut output: PathBuf, matches: &getopts::Matches,
- external_html: &ExternalHtml, include_toc: bool) -> isize {
+ external_html: &ExternalHtml, include_toc: bool,
+ render_type: RenderType) -> isize {
let input_p = Path::new(input);
output.push(input_p.file_stem().unwrap());
output.set_extension("html");
reset_ids(false);
let rendered = if include_toc {
- format!("{}", MarkdownWithToc(text))
+ format!("{}", MarkdownWithToc(text, render_type))
} else {
- format!("{}", Markdown(text))
+ format!("{}", Markdown(text, render_type))
};
let err = write!(
/// Run any tests/code examples in the markdown file `input`.
pub fn test(input: &str, cfgs: Vec<String>, libs: SearchPaths, externs: Externs,
- mut test_args: Vec<String>, maybe_sysroot: Option<PathBuf>) -> isize {
+ mut test_args: Vec<String>, maybe_sysroot: Option<PathBuf>,
+ render_type: RenderType) -> isize {
let input_str = match load_string(input) {
Ok(s) => s,
Err(LoadStringError::ReadFail) => return 1,
opts.no_crate_inject = true;
let mut collector = Collector::new(input.to_string(), cfgs, libs, externs,
true, opts, maybe_sysroot, None,
- Some(input.to_owned()));
+ Some(input.to_owned()),
+ render_type);
old_find_testable_code(&input_str, &mut collector, DUMMY_SP);
find_testable_code(&input_str, &mut collector, DUMMY_SP);
test_args.insert(0, "rustdoctest".to_string());
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use std::collections::HashMap;
use std::env;
use std::ffi::OsString;
use std::io::prelude::*;
use errors::emitter::ColorConfig;
use clean::Attributes;
-use html::markdown;
+use html::markdown::{self, RenderType};
#[derive(Clone, Default)]
pub struct TestOptions {
externs: Externs,
mut test_args: Vec<String>,
crate_name: Option<String>,
- maybe_sysroot: Option<PathBuf>)
+ maybe_sysroot: Option<PathBuf>,
+ render_type: RenderType)
-> isize {
let input_path = PathBuf::from(input);
let input = config::Input::File(input_path.clone());
opts,
maybe_sysroot,
Some(codemap),
- None);
+ None,
+ render_type);
{
let dep_graph = DepGraph::new(false);
pub struct Collector {
pub tests: Vec<testing::TestDescAndFn>,
// to be removed when hoedown will be definitely gone
- pub old_tests: Vec<String>,
+ pub old_tests: HashMap<String, Vec<String>>,
names: Vec<String>,
cfgs: Vec<String>,
libs: SearchPaths,
position: Span,
codemap: Option<Rc<CodeMap>>,
filename: Option<String>,
+ // to be removed when hoedown will be removed as well
+ pub render_type: RenderType,
}
impl Collector {
pub fn new(cratename: String, cfgs: Vec<String>, libs: SearchPaths, externs: Externs,
use_headers: bool, opts: TestOptions, maybe_sysroot: Option<PathBuf>,
- codemap: Option<Rc<CodeMap>>, filename: Option<String>) -> Collector {
+ codemap: Option<Rc<CodeMap>>, filename: Option<String>,
+ render_type: RenderType) -> Collector {
Collector {
tests: Vec::new(),
- old_tests: Vec::new(),
+ old_tests: HashMap::new(),
names: Vec::new(),
cfgs: cfgs,
libs: libs,
position: DUMMY_SP,
codemap: codemap,
filename: filename,
+ render_type: render_type,
}
}
}
}
- pub fn add_old_test(&mut self, line: usize, filename: String) {
- let name = self.generate_name(line, &filename);
- self.old_tests.push(name);
+ // to be removed once hoedown is gone
+ fn generate_name_beginning(&self, filename: &str) -> String {
+ if self.use_headers {
+ if let Some(ref header) = self.current_header {
+ format!("{} - {} (line", filename, header)
+ } else {
+ format!("{} - (line", filename)
+ }
+ } else {
+ format!("{} - {} (line", filename, self.names.join("::"))
+ }
+ }
+
+ pub fn add_old_test(&mut self, test: String, filename: String) {
+ let name_beg = self.generate_name_beginning(&filename);
+ let entry = self.old_tests.entry(name_beg)
+ .or_insert(Vec::new());
+ entry.push(test.trim().to_owned());
}
pub fn add_test(&mut self, test: String,
as_test_harness: bool, compile_fail: bool, error_codes: Vec<String>,
line: usize, filename: String) {
let name = self.generate_name(line, &filename);
- if self.old_tests.iter().find(|&x| x == &name).is_none() {
- let _ = writeln!(&mut io::stderr(),
- "WARNING: {} Code block is not currently run as a test, but will in \
- future versions of rustdoc. Please ensure this code block is a \
- runnable test, or use the `ignore` directive.",
- name);
- return
+ // to be removed when hoedown is removed
+ if self.render_type == RenderType::Pulldown {
+ let name_beg = self.generate_name_beginning(&filename);
+ let mut found = false;
+ let test = test.trim().to_owned();
+ if let Some(entry) = self.old_tests.get_mut(&name_beg) {
+ found = entry.remove_item(&test).is_some();
+ }
+ if !found {
+ let _ = writeln!(&mut io::stderr(),
+ "WARNING: {} Code block is not currently run as a test, but will \
+ in future versions of rustdoc. Please ensure this code block is \
+ a runnable test, or use the `ignore` directive.",
+ name);
+ return
+ }
}
let cfgs = self.cfgs.clone();
let libs = self.libs.clone();
attrs.unindent_doc_comments();
if let Some(doc) = attrs.doc_value() {
self.collector.cnt = 0;
- markdown::old_find_testable_code(doc, self.collector,
+ if self.collector.render_type == RenderType::Pulldown {
+ markdown::old_find_testable_code(doc, self.collector,
+ attrs.span.unwrap_or(DUMMY_SP));
+ markdown::find_testable_code(doc, self.collector,
attrs.span.unwrap_or(DUMMY_SP));
- markdown::find_testable_code(doc, self.collector,
- attrs.span.unwrap_or(DUMMY_SP));
+ } else {
+ markdown::old_find_testable_code(doc, self.collector,
+ attrs.span.unwrap_or(DUMMY_SP));
+ }
}
nested(self);
std_unicode = { path = "../libstd_unicode" }
unwind = { path = "../libunwind" }
+[target.x86_64-apple-darwin.dependencies]
+rustc_asan = { path = "../librustc_asan" }
+rustc_tsan = { path = "../librustc_tsan" }
+
[target.x86_64-unknown-linux-gnu.dependencies]
rustc_asan = { path = "../librustc_asan" }
rustc_lsan = { path = "../librustc_lsan" }
/// An owning iterator over the entries of a `HashMap`.
///
-/// This `struct` is created by the [`into_iter`] method on [`HashMap`]
+/// This `struct` is created by the [`into_iter`] method on [`HashMap`][`HashMap`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.HashMap.html#method.into_iter
self.elem.read().0
}
- /// Deprecated, renamed to `remove_entry`
- #[unstable(feature = "map_entry_recover_keys", issue = "34285")]
- #[rustc_deprecated(since = "1.12.0", reason = "renamed to `remove_entry`")]
- pub fn remove_pair(self) -> (K, V) {
- self.remove_entry()
- }
-
/// Take the ownership of the key and value from the map.
///
/// # Examples
/// An owning iterator over the items of a `HashSet`.
///
-/// This `struct` is created by the [`into_iter`] method on [`HashSet`]
+/// This `struct` is created by the [`into_iter`] method on [`HashSet`][`HashSet`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`HashSet`]: struct.HashSet.html
#[cfg(not(test))]
use intrinsics;
#[cfg(not(test))]
-use libc::c_int;
-#[cfg(not(test))]
use num::FpCategory;
pub fn atan2f(a: c_float, b: c_float) -> c_float;
pub fn atanf(n: c_float) -> c_float;
pub fn coshf(n: c_float) -> c_float;
- pub fn frexpf(n: c_float, value: &mut c_int) -> c_float;
- pub fn ldexpf(x: c_float, n: c_int) -> c_float;
pub fn sinhf(n: c_float) -> c_float;
pub fn tanf(n: c_float) -> c_float;
pub fn tanhf(n: c_float) -> c_float;
pub use self::shims::*;
#[cfg(target_env = "msvc")]
mod shims {
- use libc::{c_float, c_int};
+ use libc::c_float;
#[inline]
pub unsafe fn acosf(n: c_float) -> c_float {
f64::cosh(n as f64) as c_float
}
- #[inline]
- #[allow(deprecated)]
- pub unsafe fn frexpf(x: c_float, value: &mut c_int) -> c_float {
- let (a, b) = f64::frexp(x as f64);
- *value = b as c_int;
- a as c_float
- }
-
- #[inline]
- #[allow(deprecated)]
- pub unsafe fn ldexpf(x: c_float, n: c_int) -> c_float {
- f64::ldexp(x as f64, n as isize) as c_float
- }
-
#[inline]
pub unsafe fn sinhf(n: c_float) -> c_float {
f64::sinh(n as f64) as c_float
#[inline]
pub fn classify(self) -> FpCategory { num::Float::classify(self) }
- /// Returns the mantissa, base 2 exponent, and sign as integers, respectively.
- /// The original number can be recovered by `sign * mantissa * 2 ^ exponent`.
- /// The floating point encoding is documented in the [Reference][floating-point].
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// use std::f32;
- ///
- /// let num = 2.0f32;
- ///
- /// // (8388608, -22, 1)
- /// let (mantissa, exponent, sign) = num.integer_decode();
- /// let sign_f = sign as f32;
- /// let mantissa_f = mantissa as f32;
- /// let exponent_f = num.powf(exponent as f32);
- ///
- /// // 1 * 8388608 * 2^(-22) == 2
- /// let abs_difference = (sign_f * mantissa_f * exponent_f - num).abs();
- ///
- /// assert!(abs_difference <= f32::EPSILON);
- /// ```
- /// [floating-point]: ../reference/types.html#machine-types
- #[unstable(feature = "float_extras", reason = "signature is undecided",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- #[allow(deprecated)]
- pub fn integer_decode(self) -> (u64, i16, i8) {
- num::Float::integer_decode(self)
- }
-
/// Returns the largest integer less than or equal to a number.
///
/// ```
#[inline]
pub fn to_radians(self) -> f32 { num::Float::to_radians(self) }
- /// Constructs a floating point number of `x*2^exp`.
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// use std::f32;
- /// // 3*2^2 - 12 == 0
- /// let abs_difference = (f32::ldexp(3.0, 2) - 12.0).abs();
- ///
- /// assert!(abs_difference <= f32::EPSILON);
- /// ```
- #[unstable(feature = "float_extras",
- reason = "pending integer conventions",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- pub fn ldexp(x: f32, exp: isize) -> f32 {
- unsafe { cmath::ldexpf(x, exp as c_int) }
- }
-
- /// Breaks the number into a normalized fraction and a base-2 exponent,
- /// satisfying:
- ///
- /// * `self = x * 2^exp`
- /// * `0.5 <= abs(x) < 1.0`
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// use std::f32;
- ///
- /// let x = 4.0f32;
- ///
- /// // (1/2)*2^3 -> 1 * 8/2 -> 4.0
- /// let f = x.frexp();
- /// let abs_difference_0 = (f.0 - 0.5).abs();
- /// let abs_difference_1 = (f.1 as f32 - 3.0).abs();
- ///
- /// assert!(abs_difference_0 <= f32::EPSILON);
- /// assert!(abs_difference_1 <= f32::EPSILON);
- /// ```
- #[unstable(feature = "float_extras",
- reason = "pending integer conventions",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- pub fn frexp(self) -> (f32, isize) {
- unsafe {
- let mut exp = 0;
- let x = cmath::frexpf(self, &mut exp);
- (x, exp as isize)
- }
- }
-
- /// Returns the next representable floating-point value in the direction of
- /// `other`.
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// use std::f32;
- ///
- /// let x = 1.0f32;
- ///
- /// let abs_diff = (x.next_after(2.0) - 1.00000011920928955078125_f32).abs();
- ///
- /// assert!(abs_diff <= f32::EPSILON);
- /// ```
- #[unstable(feature = "float_extras",
- reason = "unsure about its place in the world",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- pub fn next_after(self, other: f32) -> f32 {
- unsafe { cmath::nextafterf(self, other) }
- }
-
/// Returns the maximum of the two numbers.
///
/// ```
assert_eq!(1e-38f32.classify(), Fp::Subnormal);
}
- #[test]
- #[allow(deprecated)]
- fn test_integer_decode() {
- assert_eq!(3.14159265359f32.integer_decode(), (13176795, -22, 1));
- assert_eq!((-8573.5918555f32).integer_decode(), (8779358, -10, -1));
- assert_eq!(2f32.powf(100.0).integer_decode(), (8388608, 77, 1));
- assert_eq!(0f32.integer_decode(), (0, -150, 1));
- assert_eq!((-0f32).integer_decode(), (0, -150, -1));
- assert_eq!(INFINITY.integer_decode(), (8388608, 105, 1));
- assert_eq!(NEG_INFINITY.integer_decode(), (8388608, 105, -1));
-
- // Ignore the "sign" (quiet / signalling flag) of NAN.
- // It can vary between runtime operations and LLVM folding.
- let (nan_m, nan_e, _nan_s) = NAN.integer_decode();
- assert_eq!((nan_m, nan_e), (12582912, 105));
- }
-
#[test]
fn test_floor() {
assert_approx_eq!(1.0f32.floor(), 1.0f32);
assert_eq!(neg_inf.to_radians(), neg_inf);
}
- #[test]
- #[allow(deprecated)]
- fn test_ldexp() {
- let f1 = 2.0f32.powi(-123);
- let f2 = 2.0f32.powi(-111);
- let f3 = 1.75 * 2.0f32.powi(-12);
- assert_eq!(f32::ldexp(1f32, -123), f1);
- assert_eq!(f32::ldexp(1f32, -111), f2);
- assert_eq!(f32::ldexp(1.75f32, -12), f3);
-
- assert_eq!(f32::ldexp(0f32, -123), 0f32);
- assert_eq!(f32::ldexp(-0f32, -123), -0f32);
-
- let inf: f32 = f32::INFINITY;
- let neg_inf: f32 = f32::NEG_INFINITY;
- let nan: f32 = f32::NAN;
- assert_eq!(f32::ldexp(inf, -123), inf);
- assert_eq!(f32::ldexp(neg_inf, -123), neg_inf);
- assert!(f32::ldexp(nan, -123).is_nan());
- }
-
- #[test]
- #[allow(deprecated)]
- fn test_frexp() {
- let f1 = 2.0f32.powi(-123);
- let f2 = 2.0f32.powi(-111);
- let f3 = 1.75 * 2.0f32.powi(-123);
- let (x1, exp1) = f1.frexp();
- let (x2, exp2) = f2.frexp();
- let (x3, exp3) = f3.frexp();
- assert_eq!((x1, exp1), (0.5f32, -122));
- assert_eq!((x2, exp2), (0.5f32, -110));
- assert_eq!((x3, exp3), (0.875f32, -122));
- assert_eq!(f32::ldexp(x1, exp1), f1);
- assert_eq!(f32::ldexp(x2, exp2), f2);
- assert_eq!(f32::ldexp(x3, exp3), f3);
-
- assert_eq!(0f32.frexp(), (0f32, 0));
- assert_eq!((-0f32).frexp(), (-0f32, 0));
- }
-
- #[test] #[cfg_attr(windows, ignore)] // FIXME #8755
- #[allow(deprecated)]
- fn test_frexp_nowin() {
- let inf: f32 = f32::INFINITY;
- let neg_inf: f32 = f32::NEG_INFINITY;
- let nan: f32 = f32::NAN;
- assert_eq!(match inf.frexp() { (x, _) => x }, inf);
- assert_eq!(match neg_inf.frexp() { (x, _) => x }, neg_inf);
- assert!(match nan.frexp() { (x, _) => x.is_nan() })
- }
-
#[test]
fn test_asinh() {
assert_eq!(0.0f32.asinh(), 0.0f32);
#[cfg(not(test))]
use intrinsics;
#[cfg(not(test))]
-use libc::c_int;
-#[cfg(not(test))]
use num::FpCategory;
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn classify(self) -> FpCategory { num::Float::classify(self) }
- /// Returns the mantissa, base 2 exponent, and sign as integers, respectively.
- /// The original number can be recovered by `sign * mantissa * 2 ^ exponent`.
- /// The floating point encoding is documented in the [Reference][floating-point].
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// let num = 2.0f64;
- ///
- /// // (8388608, -22, 1)
- /// let (mantissa, exponent, sign) = num.integer_decode();
- /// let sign_f = sign as f64;
- /// let mantissa_f = mantissa as f64;
- /// let exponent_f = num.powf(exponent as f64);
- ///
- /// // 1 * 8388608 * 2^(-22) == 2
- /// let abs_difference = (sign_f * mantissa_f * exponent_f - num).abs();
- ///
- /// assert!(abs_difference < 1e-10);
- /// ```
- /// [floating-point]: ../reference/types.html#machine-types
- #[unstable(feature = "float_extras", reason = "signature is undecided",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- #[allow(deprecated)]
- pub fn integer_decode(self) -> (u64, i16, i8) { num::Float::integer_decode(self) }
-
/// Returns the largest integer less than or equal to a number.
///
/// ```
#[inline]
pub fn to_radians(self) -> f64 { num::Float::to_radians(self) }
- /// Constructs a floating point number of `x*2^exp`.
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// // 3*2^2 - 12 == 0
- /// let abs_difference = (f64::ldexp(3.0, 2) - 12.0).abs();
- ///
- /// assert!(abs_difference < 1e-10);
- /// ```
- #[unstable(feature = "float_extras",
- reason = "pending integer conventions",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- pub fn ldexp(x: f64, exp: isize) -> f64 {
- unsafe { cmath::ldexp(x, exp as c_int) }
- }
-
- /// Breaks the number into a normalized fraction and a base-2 exponent,
- /// satisfying:
- ///
- /// * `self = x * 2^exp`
- /// * `0.5 <= abs(x) < 1.0`
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// let x = 4.0_f64;
- ///
- /// // (1/2)*2^3 -> 1 * 8/2 -> 4.0
- /// let f = x.frexp();
- /// let abs_difference_0 = (f.0 - 0.5).abs();
- /// let abs_difference_1 = (f.1 as f64 - 3.0).abs();
- ///
- /// assert!(abs_difference_0 < 1e-10);
- /// assert!(abs_difference_1 < 1e-10);
- /// ```
- #[unstable(feature = "float_extras",
- reason = "pending integer conventions",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- pub fn frexp(self) -> (f64, isize) {
- unsafe {
- let mut exp = 0;
- let x = cmath::frexp(self, &mut exp);
- (x, exp as isize)
- }
- }
-
- /// Returns the next representable floating-point value in the direction of
- /// `other`.
- ///
- /// ```
- /// #![feature(float_extras)]
- ///
- /// let x = 1.0f64;
- ///
- /// let abs_diff = (x.next_after(2.0) - 1.0000000000000002220446049250313_f64).abs();
- ///
- /// assert!(abs_diff < 1e-10);
- /// ```
- #[unstable(feature = "float_extras",
- reason = "unsure about its place in the world",
- issue = "27752")]
- #[rustc_deprecated(since = "1.11.0",
- reason = "never really came to fruition and easily \
- implementable outside the standard library")]
- #[inline]
- pub fn next_after(self, other: f64) -> f64 {
- unsafe { cmath::nextafter(self, other) }
- }
-
/// Returns the maximum of the two numbers.
///
/// ```
assert_eq!(1e-308f64.classify(), Fp::Subnormal);
}
- #[test]
- #[allow(deprecated)]
- fn test_integer_decode() {
- assert_eq!(3.14159265359f64.integer_decode(), (7074237752028906, -51, 1));
- assert_eq!((-8573.5918555f64).integer_decode(), (4713381968463931, -39, -1));
- assert_eq!(2f64.powf(100.0).integer_decode(), (4503599627370496, 48, 1));
- assert_eq!(0f64.integer_decode(), (0, -1075, 1));
- assert_eq!((-0f64).integer_decode(), (0, -1075, -1));
- assert_eq!(INFINITY.integer_decode(), (4503599627370496, 972, 1));
- assert_eq!(NEG_INFINITY.integer_decode(), (4503599627370496, 972, -1));
-
- // Ignore the "sign" (quiet / signalling flag) of NAN.
- // It can vary between runtime operations and LLVM folding.
- let (nan_m, nan_e, _nan_s) = NAN.integer_decode();
- assert_eq!((nan_m, nan_e), (6755399441055744, 972));
- }
-
#[test]
fn test_floor() {
assert_approx_eq!(1.0f64.floor(), 1.0f64);
assert_eq!(neg_inf.to_radians(), neg_inf);
}
- #[test]
- #[allow(deprecated)]
- fn test_ldexp() {
- let f1 = 2.0f64.powi(-123);
- let f2 = 2.0f64.powi(-111);
- let f3 = 1.75 * 2.0f64.powi(-12);
- assert_eq!(f64::ldexp(1f64, -123), f1);
- assert_eq!(f64::ldexp(1f64, -111), f2);
- assert_eq!(f64::ldexp(1.75f64, -12), f3);
-
- assert_eq!(f64::ldexp(0f64, -123), 0f64);
- assert_eq!(f64::ldexp(-0f64, -123), -0f64);
-
- let inf: f64 = INFINITY;
- let neg_inf: f64 = NEG_INFINITY;
- let nan: f64 = NAN;
- assert_eq!(f64::ldexp(inf, -123), inf);
- assert_eq!(f64::ldexp(neg_inf, -123), neg_inf);
- assert!(f64::ldexp(nan, -123).is_nan());
- }
-
- #[test]
- #[allow(deprecated)]
- fn test_frexp() {
- let f1 = 2.0f64.powi(-123);
- let f2 = 2.0f64.powi(-111);
- let f3 = 1.75 * 2.0f64.powi(-123);
- let (x1, exp1) = f1.frexp();
- let (x2, exp2) = f2.frexp();
- let (x3, exp3) = f3.frexp();
- assert_eq!((x1, exp1), (0.5f64, -122));
- assert_eq!((x2, exp2), (0.5f64, -110));
- assert_eq!((x3, exp3), (0.875f64, -122));
- assert_eq!(f64::ldexp(x1, exp1), f1);
- assert_eq!(f64::ldexp(x2, exp2), f2);
- assert_eq!(f64::ldexp(x3, exp3), f3);
-
- assert_eq!(0f64.frexp(), (0f64, 0));
- assert_eq!((-0f64).frexp(), (-0f64, 0));
- }
-
- #[test] #[cfg_attr(windows, ignore)] // FIXME #8755
- #[allow(deprecated)]
- fn test_frexp_nowin() {
- let inf: f64 = INFINITY;
- let neg_inf: f64 = NEG_INFINITY;
- let nan: f64 = NAN;
- assert_eq!(match inf.frexp() { (x, _) => x }, inf);
- assert_eq!(match neg_inf.frexp() { (x, _) => x }, neg_inf);
- assert!(match nan.frexp() { (x, _) => x.is_nan() })
- }
-
#[test]
fn test_asinh() {
assert_eq!(0.0f64.asinh(), 0.0f64);
/// variant will be returned. If an error is returned then it must be
/// guaranteed that no bytes were read.
///
+ /// An error of the `ErrorKind::Interrupted` kind is non-fatal and the read
+ /// operation should be retried if there is nothing else to do.
+ ///
/// # Examples
///
/// [`File`][file]s implement `Read`:
/// let mut f = File::open("foo.txt")?;
/// let mut buffer = [0; 10];
///
- /// // read 10 bytes
+ /// // read up to 10 bytes
/// f.read(&mut buffer[..])?;
/// # Ok(())
/// # }
/// It is **not** considered an error if the entire buffer could not be
/// written to this writer.
///
+ /// An error of the `ErrorKind::Interrupted` kind is non-fatal and the
+ /// write operation should be retried if there is nothing else to do.
+ ///
/// # Examples
///
/// ```
/// # fn foo() -> std::io::Result<()> {
/// let mut buffer = File::create("foo.txt")?;
///
+ /// // Writes some prefix of the byte string, not necessarily all of it.
/// buffer.write(b"some bytes")?;
/// # Ok(())
/// # }
/// Attempts to write an entire buffer into this write.
///
- /// This method will continuously call `write` while there is more data to
- /// write. This method will not return until the entire buffer has been
- /// successfully written or an error occurs. The first error generated from
- /// this method will be returned.
+ /// This method will continuously call `write` until there is no more data
+ /// to be written or an error of non-`ErrorKind::Interrupted` kind is
+ /// returned. This method will not return until the entire buffer has been
+ /// successfully written or such an error occurs. The first error that is
+ /// not of `ErrorKind::Interrupted` kind generated from this method will be
+ /// returned.
///
/// # Errors
///
- /// This function will return the first error that `write` returns.
+ /// This function will return the first error of
+ /// non-`ErrorKind::Interrupted` kind that `write` returns.
///
/// # Examples
///
done_first: bool,
}
+impl<T, U> Chain<T, U> {
+ /// Consumes the `Chain`, returning the wrapped readers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(more_io_inner_methods)]
+ ///
+ /// # use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut foo_file = File::open("foo.txt")?;
+ /// let mut bar_file = File::open("bar.txt")?;
+ ///
+ /// let chain = foo_file.chain(bar_file);
+ /// let (foo_file, bar_file) = chain.into_inner();
+ /// # Ok(())
+ /// # }
+ /// ```
+ #[unstable(feature = "more_io_inner_methods", issue="41519")]
+ pub fn into_inner(self) -> (T, U) {
+ (self.first, self.second)
+ }
+
+ /// Gets references to the underlying readers in this `Chain`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(more_io_inner_methods)]
+ ///
+ /// # use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut foo_file = File::open("foo.txt")?;
+ /// let mut bar_file = File::open("bar.txt")?;
+ ///
+ /// let chain = foo_file.chain(bar_file);
+ /// let (foo_file, bar_file) = chain.get_ref();
+ /// # Ok(())
+ /// # }
+ /// ```
+ #[unstable(feature = "more_io_inner_methods", issue="41519")]
+ pub fn get_ref(&self) -> (&T, &U) {
+ (&self.first, &self.second)
+ }
+
+ /// Gets mutable references to the underlying readers in this `Chain`.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying readers as doing so may corrupt the internal state of this
+ /// `Chain`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(more_io_inner_methods)]
+ ///
+ /// # use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut foo_file = File::open("foo.txt")?;
+ /// let mut bar_file = File::open("bar.txt")?;
+ ///
+ /// let mut chain = foo_file.chain(bar_file);
+ /// let (foo_file, bar_file) = chain.get_mut();
+ /// # Ok(())
+ /// # }
+ /// ```
+ #[unstable(feature = "more_io_inner_methods", issue="41519")]
+ pub fn get_mut(&mut self) -> (&mut T, &mut U) {
+ (&mut self.first, &mut self.second)
+ }
+}
+
#[stable(feature = "std_debug", since = "1.16.0")]
impl<T: fmt::Debug, U: fmt::Debug> fmt::Debug for Chain<T, U> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
pub fn into_inner(self) -> T {
self.inner
}
+
+ /// Gets a reference to the underlying reader.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(more_io_inner_methods)]
+ ///
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ ///
+ /// let mut buffer = [0; 5];
+ /// let mut handle = file.take(5);
+ /// handle.read(&mut buffer)?;
+ ///
+ /// let file = handle.get_ref();
+ /// # Ok(())
+ /// # }
+ /// ```
+ #[unstable(feature = "more_io_inner_methods", issue="41519")]
+ pub fn get_ref(&self) -> &T {
+ &self.inner
+ }
+
+ /// Gets a mutable reference to the underlying reader.
+ ///
+ /// Care should be taken to avoid modifying the internal I/O state of the
+ /// underlying reader as doing so may corrupt the internal limit of this
+ /// `Take`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(more_io_inner_methods)]
+ ///
+ /// use std::io;
+ /// use std::io::prelude::*;
+ /// use std::fs::File;
+ ///
+ /// # fn foo() -> io::Result<()> {
+ /// let mut file = File::open("foo.txt")?;
+ ///
+ /// let mut buffer = [0; 5];
+ /// let mut handle = file.take(5);
+ /// handle.read(&mut buffer)?;
+ ///
+ /// let file = handle.get_mut();
+ /// # Ok(())
+ /// # }
+ /// ```
+ #[unstable(feature = "more_io_inner_methods", issue="41519")]
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.inner
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
#![feature(allow_internal_unstable)]
#![feature(asm)]
#![feature(associated_consts)]
-#![feature(borrow_state)]
#![feature(box_syntax)]
#![feature(cfg_target_has_atomic)]
#![feature(cfg_target_thread_local)]
#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
#![feature(exact_size_is_empty)]
-#![feature(float_extras)]
#![feature(float_from_str_radix)]
#![feature(fn_traits)]
#![feature(fnbox)]
#![feature(untagged_unions)]
#![feature(unwind_attributes)]
#![feature(vec_push_all)]
-#![feature(zero_one)]
#![cfg_attr(test, feature(update_panic_count))]
#![cfg_attr(stage0, feature(pub_restricted))]
#![cfg_attr(test, feature(float_bits_conv))]
#![stable(feature = "rust1", since = "1.0.0")]
#![allow(missing_docs)]
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
-pub use core::num::{Zero, One};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::num::{FpCategory, ParseIntError, ParseFloatError, TryFromIntError};
#[stable(feature = "rust1", since = "1.0.0")]
}
}
+#[cfg(target_arch = "x86_64")]
+mod arch {
+ use os::raw::{c_uint, c_long, c_ulong};
+ use os::unix::raw::{uid_t, gid_t};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type dev_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type mode_t = u32;
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_ulong,
+ __unused: [c_long; 3],
+ }
+}
+
//! Cross-platform path manipulation.
//!
-//! This module provides two types, [`PathBuf`] and [`Path`] (akin to [`String`]
+//! This module provides two types, [`PathBuf`] and [`Path`][`Path`] (akin to [`String`]
//! and [`str`]), for working with paths abstractly. These types are thin wrappers
//! around [`OsString`] and [`OsStr`] respectively, meaning that they work directly
//! on strings according to the local platform's path syntax.
/// If [`self.file_name`] was [`None`], this is equivalent to pushing
/// `file_name`.
///
+ /// Otherwise it is equivalent to calling [`pop`] and then pushing
+ /// `file_name`. The new path will be a sibling of the original path.
+ /// (That is, it will have the same parent.)
+ ///
/// [`self.file_name`]: struct.PathBuf.html#method.file_name
/// [`None`]: ../../std/option/enum.Option.html#variant.None
+ /// [`pop`]: struct.PathBuf.html#method.pop
///
/// # Examples
///
})
}
- /// Returns the final component of the `Path`, if it is a normal file.
+ /// Returns the final component of the `Path`, if there is one.
+ ///
+ /// If the path is a normal file, this is the file name. If it's the path of a directory, this
+ /// is the directory name.
///
/// Returns [`None`] If the path terminates in `..`.
///
/// use std::path::Path;
/// use std::ffi::OsStr;
///
- /// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt").file_name());
+ /// assert_eq!(Some(OsStr::new("bin")), Path::new("/usr/bin/").file_name());
+ /// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("tmp/foo.txt").file_name());
/// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt/.").file_name());
/// assert_eq!(Some(OsStr::new("foo.txt")), Path::new("foo.txt/.//").file_name());
/// assert_eq!(None, Path::new("foo.txt/..").file_name());
+ /// assert_eq!(None, Path::new("/").file_name());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn file_name(&self) -> Option<&OsStr> {
///
/// let path = Path::new("/tmp/foo.txt");
/// assert_eq!(path.with_file_name("bar.txt"), PathBuf::from("/tmp/bar.txt"));
+ ///
+ /// let path = Path::new("/tmp");
+ /// assert_eq!(path.with_file_name("var"), PathBuf::from("/var"));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_file_name<S: AsRef<OsStr>>(&self, file_name: S) -> PathBuf {
/// An error returned from the [`recv`] function on a [`Receiver`].
///
/// The [`recv`] operation can only fail if the sending half of a
-/// [`channel`] (or [`sync_channel`]) is disconnected, implying that no further
+/// [`channel`][`channel`] (or [`sync_channel`]) is disconnected, implying that no further
/// messages will ever be received.
///
/// [`recv`]: struct.Receiver.html#method.recv
pub fn sub_duration(&self, other: &Duration) -> Instant {
Instant {
t: self.t.checked_sub(dur2intervals(other))
- .expect("overflow when adding duration to instant"),
+ .expect("overflow when subtracting duration from instant"),
}
}
}
self.meta_item().and_then(|meta_item| meta_item.value_str())
}
+ /// Returns a name and single literal value tuple of the MetaItem.
+ pub fn name_value_literal(&self) -> Option<(Name, &Lit)> {
+ self.meta_item().and_then(
+ |meta_item| meta_item.meta_item_list().and_then(
+ |meta_item_list| {
+ if meta_item_list.len() == 1 {
+ let nested_item = &meta_item_list[0];
+ if nested_item.is_literal() {
+ Some((meta_item.name(), nested_item.literal().unwrap()))
+ } else {
+ None
+ }
+ }
+ else {
+ None
+ }}))
+ }
+
/// Returns a MetaItem if self is a MetaItem with Kind Word.
pub fn word(&self) -> Option<&MetaItem> {
self.meta_item().and_then(|meta_item| if meta_item.is_word() {
continue
}
+ let mut recognised = false;
if let Some(mi) = item.word() {
let word = &*mi.name().as_str();
let hint = match word {
_ => match int_type_of_word(word) {
Some(ity) => Some(ReprInt(ity)),
None => {
- // Not a word we recognize
- span_err!(diagnostic, item.span, E0552,
- "unrecognized representation hint");
None
}
}
};
if let Some(h) = hint {
+ recognised = true;
acc.push(h);
}
- } else {
- span_err!(diagnostic, item.span, E0553,
- "unrecognized enum representation hint");
+ } else if let Some((name, value)) = item.name_value_literal() {
+ if name == "align" {
+ recognised = true;
+ let mut align_error = None;
+ if let ast::LitKind::Int(align, ast::LitIntType::Unsuffixed) = value.node {
+ if align.is_power_of_two() {
+ // rustc::ty::layout::Align restricts align to <= 32768
+ if align <= 32768 {
+ acc.push(ReprAlign(align as u16));
+ } else {
+ align_error = Some("larger than 32768");
+ }
+ } else {
+ align_error = Some("not a power of two");
+ }
+ } else {
+ align_error = Some("not an unsuffixed integer");
+ }
+ if let Some(align_error) = align_error {
+ span_err!(diagnostic, item.span, E0589,
+ "invalid `repr(align)` attribute: {}", align_error);
+ }
+ }
+ }
+ if !recognised {
+ // Not a word we recognize
+ span_err!(diagnostic, item.span, E0552,
+ "unrecognized representation hint");
}
}
}
ReprExtern,
ReprPacked,
ReprSimd,
+ ReprAlign(u16),
}
#[derive(Eq, Hash, PartialEq, Debug, RustcEncodable, RustcDecodable, Copy, Clone)]
E0550, // multiple deprecated attributes
E0551, // incorrect meta item
E0552, // unrecognized representation hint
- E0553, // unrecognized enum representation hint
E0554, // #[feature] may not be used on the [] release channel
E0555, // malformed feature attribute, expected #![feature(...)]
E0556, // malformed feature, expected just one word
E0557, // feature has been removed
E0584, // file for module `..` found at both .. and ..
+ E0589, // invalid `repr(align)` attribute
}
module.directory.pop();
self.cx.current_expansion.module = Rc::new(module);
+ let orig_mod_span = krate.module.inner;
+
let krate_item = Expansion::Items(SmallVector::one(P(ast::Item {
attrs: krate.attrs,
span: krate.span,
vis: ast::Visibility::Public,
})));
- match self.expand(krate_item).make_items().pop().unwrap().unwrap() {
- ast::Item { attrs, node: ast::ItemKind::Mod(module), .. } => {
+ match self.expand(krate_item).make_items().pop().map(P::unwrap) {
+ Some(ast::Item { attrs, node: ast::ItemKind::Mod(module), .. }) => {
krate.attrs = attrs;
krate.module = module;
},
+ None => {
+ // Resolution failed so we return an empty expansion
+ krate.attrs = vec![];
+ krate.module = ast::Mod {
+ inner: orig_mod_span,
+ items: vec![],
+ };
+ },
_ => unreachable!(),
};
// Allows the `catch {...}` expression
(active, catch_expr, "1.17.0", Some(31436)),
+ // Allows `repr(align(u16))` struct attribute (RFC 1358)
+ (active, repr_align, "1.17.0", Some(33626)),
+
// See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work.
(active, rvalue_static_promotion, "1.15.1", Some(38865)),
and possibly buggy");
}
+ if item.check_name("align") {
+ gate_feature_post!(&self, repr_align, i.span,
+ "the struct `#[repr(align(u16))]` attribute \
+ is experimental");
+ }
}
}
}
enum PrevTokenKind {
DocComment,
Comma,
+ Plus,
Interpolated,
Eof,
Other,
self.prev_token_kind = match self.token {
token::DocComment(..) => PrevTokenKind::DocComment,
token::Comma => PrevTokenKind::Comma,
+ token::BinOp(token::Plus) => PrevTokenKind::Plus,
token::Interpolated(..) => PrevTokenKind::Interpolated,
token::Eof => PrevTokenKind::Eof,
_ => PrevTokenKind::Other,
break;
}
}
+ let trailing_plus = self.prev_token_kind == PrevTokenKind::Plus;
self.expect(&token::CloseDelim(token::Paren))?;
if ts.len() == 1 && !last_comma {
let ty = ts.into_iter().nth(0).unwrap().unwrap();
+ let maybe_bounds = allow_plus && self.token == token::BinOp(token::Plus);
match ty.node {
- // Accept `(Trait1) + Trait2 + 'a` for backward compatibility (#39318).
- TyKind::Path(None, ref path)
- if allow_plus && self.token == token::BinOp(token::Plus) => {
- self.bump(); // `+`
- let pt = PolyTraitRef::new(Vec::new(), path.clone(), lo.to(self.prev_span));
- let mut bounds = vec![TraitTyParamBound(pt, TraitBoundModifier::None)];
- bounds.append(&mut self.parse_ty_param_bounds()?);
- TyKind::TraitObject(bounds)
+ // `(TY_BOUND_NOPAREN) + BOUND + ...`.
+ TyKind::Path(None, ref path) if maybe_bounds => {
+ self.parse_remaining_bounds(Vec::new(), path.clone(), lo, true)?
}
+ TyKind::TraitObject(ref bounds)
+ if maybe_bounds && bounds.len() == 1 && !trailing_plus => {
+ let path = match bounds[0] {
+ TraitTyParamBound(ref pt, ..) => pt.trait_ref.path.clone(),
+ _ => self.bug("unexpected lifetime bound"),
+ };
+ self.parse_remaining_bounds(Vec::new(), path, lo, true)?
+ }
+ // `(TYPE)`
_ => TyKind::Paren(P(ty))
}
} else {
// Just a type path or bound list (trait object type) starting with a trait.
// `Type`
// `Trait1 + Trait2 + 'a`
- if allow_plus && self.eat(&token::BinOp(token::Plus)) {
- let poly_trait = PolyTraitRef::new(Vec::new(), path, lo.to(self.prev_span));
- let mut bounds = vec![TraitTyParamBound(poly_trait, TraitBoundModifier::None)];
- bounds.append(&mut self.parse_ty_param_bounds()?);
- TyKind::TraitObject(bounds)
+ if allow_plus && self.check(&token::BinOp(token::Plus)) {
+ self.parse_remaining_bounds(Vec::new(), path, lo, true)?
} else {
TyKind::Path(None, path)
}
self.parse_ty_bare_fn(lifetime_defs)?
} else {
let path = self.parse_path(PathStyle::Type)?;
- let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
- let mut bounds = vec![TraitTyParamBound(poly_trait, TraitBoundModifier::None)];
- if allow_plus && self.eat(&token::BinOp(token::Plus)) {
- bounds.append(&mut self.parse_ty_param_bounds()?)
- }
- TyKind::TraitObject(bounds)
+ let parse_plus = allow_plus && self.check(&token::BinOp(token::Plus));
+ self.parse_remaining_bounds(lifetime_defs, path, lo, parse_plus)?
}
} else if self.eat_keyword(keywords::Impl) {
// FIXME: figure out priority of `+` in `impl Trait1 + Trait2` (#34511).
Ok(P(ty))
}
+ fn parse_remaining_bounds(&mut self, lifetime_defs: Vec<LifetimeDef>, path: ast::Path,
+ lo: Span, parse_plus: bool) -> PResult<'a, TyKind> {
+ let poly_trait_ref = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
+ let mut bounds = vec![TraitTyParamBound(poly_trait_ref, TraitBoundModifier::None)];
+ if parse_plus {
+ self.bump(); // `+`
+ bounds.append(&mut self.parse_ty_param_bounds()?);
+ }
+ Ok(TyKind::TraitObject(bounds))
+ }
+
fn maybe_recover_from_bad_type_plus(&mut self, allow_plus: bool, ty: &Ty) -> PResult<'a, ()> {
// Do not add `+` to expected tokens.
if !allow_plus || self.token != token::BinOp(token::Plus) {
// Parse bounds of a type parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`.
// BOUND = TY_BOUND | LT_BOUND
// LT_BOUND = LIFETIME (e.g. `'a`)
- // TY_BOUND = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g. `?for<'a: 'b> m::Trait<'a>`)
+ // TY_BOUND = TY_BOUND_NOPAREN | (TY_BOUND_NOPAREN)
+ // TY_BOUND_NOPAREN = [?] [for<LT_PARAM_DEFS>] SIMPLE_PATH (e.g. `?for<'a: 'b> m::Trait<'a>`)
fn parse_ty_param_bounds_common(&mut self, allow_plus: bool) -> PResult<'a, TyParamBounds> {
let mut bounds = Vec::new();
loop {
- let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None };
- if self.check_lifetime() {
- if let Some(question_span) = question {
- self.span_err(question_span,
- "`?` may only modify trait bounds, not lifetime bounds");
- }
- bounds.push(RegionTyParamBound(self.expect_lifetime()));
- } else if self.check_keyword(keywords::For) || self.check_path() {
- let lo = self.span;
- let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
- let path = self.parse_path(PathStyle::Type)?;
- let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
- let modifier = if question.is_some() {
- TraitBoundModifier::Maybe
+ let is_bound_start = self.check_path() || self.check_lifetime() ||
+ self.check(&token::Question) ||
+ self.check_keyword(keywords::For) ||
+ self.check(&token::OpenDelim(token::Paren));
+ if is_bound_start {
+ let has_parens = self.eat(&token::OpenDelim(token::Paren));
+ let question = if self.eat(&token::Question) { Some(self.prev_span) } else { None };
+ if self.token.is_lifetime() {
+ if let Some(question_span) = question {
+ self.span_err(question_span,
+ "`?` may only modify trait bounds, not lifetime bounds");
+ }
+ bounds.push(RegionTyParamBound(self.expect_lifetime()));
} else {
- TraitBoundModifier::None
- };
- bounds.push(TraitTyParamBound(poly_trait, modifier));
+ let lo = self.span;
+ let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
+ let path = self.parse_path(PathStyle::Type)?;
+ let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_span));
+ let modifier = if question.is_some() {
+ TraitBoundModifier::Maybe
+ } else {
+ TraitBoundModifier::None
+ };
+ bounds.push(TraitTyParamBound(poly_trait, modifier));
+ }
+ if has_parens {
+ self.expect(&token::CloseDelim(token::Paren))?;
+ if let Some(&RegionTyParamBound(..)) = bounds.last() {
+ self.span_err(self.prev_span,
+ "parenthesized lifetime bounds are not supported");
+ }
+ }
} else {
break
}
--> test.rs:2:10
|
2 | fn foo() {
- | __________^ starting here...
+ | __________^
3 | | }
- | |_^ ...ending here: test
+ | |_^ test
"#);
}
--> test.rs:2:10
|
2 | fn foo() {
- | __________^ starting here...
+ | __________^
3 | |
4 | |
5 | | }
- | |___^ ...ending here: test
+ | |___^ test
"#);
}
--> test.rs:3:3
|
3 | X0 Y0
- | ____^__- starting here...
+ | ____^__-
| | ___|
- | || starting here...
+ | ||
4 | || X1 Y1
5 | || X2 Y2
- | ||____^__- ...ending here: `Y` is a good letter too
+ | ||____^__- `Y` is a good letter too
| |____|
- | ...ending here: `X` is a good letter
+ | `X` is a good letter
"#);
}
--> test.rs:3:3
|
3 | X0 Y0
- | ____^__- starting here...
+ | ____^__-
| | ___|
- | || starting here...
+ | ||
4 | || Y1 X1
- | ||____-__^ ...ending here: `X` is a good letter
+ | ||____-__^ `X` is a good letter
| |_____|
- | ...ending here: `Y` is a good letter too
+ | `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |_________- starting here...
+ | |_________-
5 | || X2 Y2 Z2
- | ||____^ ...ending here: `X` is a good letter
+ | ||____^ `X` is a good letter
6 | | X3 Y3 Z3
- | |_____- ...ending here: `Y` is a good letter too
+ | |_____- `Y` is a good letter too
"#);
}
--> test.rs:3:3
|
3 | X0 Y0 Z0
- | _____^__-__- starting here...
+ | _____^__-__-
| | ____|__|
- | || ___| starting here...
- | ||| starting here...
+ | || ___|
+ | |||
4 | ||| X1 Y1 Z1
5 | ||| X2 Y2 Z2
- | |||____^__-__- ...ending here: `Z` label
+ | |||____^__-__- `Z` label
| ||____|__|
- | |____| ...ending here: `Y` is a good letter too
- | ...ending here: `X` is a good letter
+ | |____| `Y` is a good letter too
+ | `X` is a good letter
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |____^_- starting here...
+ | |____^_-
| ||____|
- | | ...ending here: `X` is a good letter
+ | | `X` is a good letter
5 | | X2 Y2 Z2
- | |____-______- ...ending here: `Y` is a good letter too
+ | |____-______- `Y` is a good letter too
| ____|
- | | starting here...
+ | |
6 | | X3 Y3 Z3
- | |________- ...ending here: `Z`
+ | |________- `Z`
"#);
}
vec![
SpanLabel {
start: Position {
- string: "Y0",
+ string: "X0",
count: 1,
},
end: Position {
],
r#"
error: foo
- --> test.rs:3:6
+ --> test.rs:3:3
|
-3 | X0 Y0 Z0
- | ______^ starting here...
+3 | / X0 Y0 Z0
4 | | X1 Y1 Z1
- | |____^ ...ending here: `X` is a good letter
+ | |____^ `X` is a good letter
5 | X2 Y2 Z2
- | ______- starting here...
+ | ______-
6 | | X3 Y3 Z3
- | |__________- ...ending here: `Y` is a good letter too
+ | |__________- `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |____^____- starting here...
+ | |____^____-
| ||____|
- | | ...ending here: `X` is a good letter
+ | | `X` is a good letter
5 | | X2 Y2 Z2
6 | | X3 Y3 Z3
- | |___________- ...ending here: `Y` is a good letter too
+ | |___________- `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | X1 Y1 Z1
- | |____^____- starting here...
+ | |____^____-
| ||____|
- | | ...ending here: `X` is a good letter
+ | | `X` is a good letter
5 | | 1
6 | | 2
7 | | 3
... |
15 | | X2 Y2 Z2
16 | | X3 Y3 Z3
- | |___________- ...ending here: `Y` is a good letter too
+ | |___________- `Y` is a good letter too
"#);
}
--> test.rs:3:6
|
3 | X0 Y0 Z0
- | ______^ starting here...
+ | ______^
4 | | 1
5 | | 2
6 | | 3
7 | | X1 Y1 Z1
- | |_________- starting here...
+ | |_________-
8 | || 4
9 | || 5
10 | || 6
11 | || X2 Y2 Z2
- | ||__________- ...ending here: `Z` is a good letter too
+ | ||__________- `Z` is a good letter too
... |
15 | | 10
16 | | X3 Y3 Z3
- | |_______^ ...ending here: `Y` is a good letter
+ | |_______^ `Y` is a good letter
"#);
}
for a in type_attrs {
for r in &attr::find_repr_attrs(diagnostic, a) {
repr_type_name = match *r {
- attr::ReprPacked | attr::ReprSimd => continue,
+ attr::ReprPacked | attr::ReprSimd | attr::ReprAlign(_) => continue,
attr::ReprExtern => "i32",
attr::ReprInt(attr::SignedInt(ast::IntTy::Is)) => "isize",
-Subproject commit 2e951c3ae354bcbd2e50b30798e232949a926b75
+Subproject commit a884d21cc5f0b23a1693d1e872fd8998a4fdd17f
// object (usually called `crtX.o), which then invokes initialization callbacks
// of other runtime components (registered via yet another special image section).
-#![feature(no_core, lang_items)]
+#![feature(no_core, lang_items, optin_builtin_traits)]
#![crate_type="rlib"]
#![no_core]
#![allow(non_camel_case_types)]
trait Sized {}
#[lang = "sync"]
trait Sync {}
+impl Sync for .. {}
#[lang = "copy"]
trait Copy {}
-impl<T> Sync for T {}
+#[cfg_attr(not(stage0), lang = "freeze")]
+trait Freeze {}
+impl Freeze for .. {}
#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))]
pub mod eh_frames {
// CHECK-NOT: call{{.*}}drop{{.*}}SomeUniqueName
// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
-// CHECK: invoke{{.*}}drop{{.*}}SomeUniqueName
+// CHECK: call{{.*}}drop{{.*}}SomeUniqueName
// CHECK-NOT: {{(call|invoke).*}}drop{{.*}}SomeUniqueName
// The next line checks for the } that ends the function definition
// CHECK-LABEL: {{^[}]}}
// CHECK: bitcast{{.*}}personalityslot
// CHECK-NEXT: call void @llvm.lifetime.start
might_unwind();
+ let _t = S;
might_unwind();
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// force-host
+// no-prefer-dynamic
+
+#![crate_type = "proc-macro"]
+#![feature(proc_macro)]
+
+extern crate proc_macro;
+use proc_macro::TokenStream;
+
+#[proc_macro_attribute]
+pub fn emit_unchanged(_args: TokenStream, input: TokenStream) -> TokenStream {
+ input
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue-41211.rs
+
+// FIXME: https://github.com/rust-lang/rust/issues/41430
+// This is a temporary regression test for the ICE reported in #41211
+
+#![feature(proc_macro)]
+#![emit_unchanged]
+//~^ ERROR: cannot find attribute macro `emit_unchanged` in this scope
+extern crate issue_41211;
+use issue_41211::emit_unchanged;
+
+fn main() {}
// except according to those terms.
#![allow(dead_code)]
+#![feature(attr_literals)]
#![feature(repr_simd)]
#[repr(C)] //~ ERROR: attribute should be applied to struct, enum or union
#[repr(C)]
enum EExtern { A, B }
+#[repr(align(8))] //~ ERROR: attribute should be applied to struct
+enum EAlign { A, B }
+
#[repr(packed)] //~ ERROR: attribute should be applied to struct
enum EPacked { A, B }
let _: &[i32] = [0];
//~^ ERROR mismatched types
//~| expected type `&[i32]`
- //~| found type `[{integer}; 1]`
//~| expected &[i32], found array of 1 elements
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
#![allow(dead_code)]
+#![feature(attr_literals)]
+#![feature(repr_align)]
#[repr(C)]
enum A { A }
#[repr(C, packed)]
struct E(i32);
-#[rustc_error]
-fn main() {} //~ ERROR compilation successful
+#[repr(packed, align(8))] //~ ERROR conflicting packed and align representation hints
+struct F(i32);
+
+fn main() {}
fn main() {
let _ = [0; f(2)];
//~^ ERROR calls in constants are limited to constant functions
- //~| ERROR constant evaluation error [E0080]
- //~| non-constant path in constant expression
}
#![feature(const_fn)]
+#[derive(PartialEq, Eq)]
enum Cake {
BlackForest,
Marmor,
}
use Cake::*;
-const BOO: (Cake, Cake) = (Marmor, BlackForest);
+struct Pair<A, B>(A, B);
+
+const BOO: Pair<Cake, Cake> = Pair(Marmor, BlackForest);
//~^ ERROR: constant evaluation error [E0080]
-//~| unimplemented constant expression: enum variants
+//~| unimplemented constant expression: tuple struct constructors
const FOO: Cake = BOO.1;
const fn foo() -> Cake {
Marmor
- //~^ ERROR: constant evaluation error [E0080]
- //~| unimplemented constant expression: enum variants
}
const WORKS: Cake = Marmor;
fn main() {
match BlackForest {
FOO => println!("hi"), //~ NOTE: for pattern here
- GOO => println!("meh"), //~ NOTE: for pattern here
+ GOO => println!("meh"),
WORKS => println!("möp"),
_ => println!("bye"),
}
pub fn main() {
let x: Box<Trait> = Box::new(Foo);
- let _y: &Trait = x; //~ ERROR mismatched types
+ let _y: &Trait = x; //~ ERROR E0308
//~| expected type `&Trait`
//~| found type `std::boxed::Box<Trait>`
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+
+#[repr(align(64))]
+struct Foo(u64, u64); //~ error: the struct `#[repr(align(u16))]` attribute is experimental
+
+fn main() {}
fn main() {
let mut c = for_stdin();
let mut v = Vec::new();
- c.read_to(v); //~ ERROR mismatched types
+ c.read_to(v); //~ ERROR E0308
}
fn main() {
check((3, 5));
//~^ ERROR mismatched types
+//~| HELP try with `&(3, 5)`
}
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![deny(private_in_public)]
-
-pub use inner::C;
-
-mod inner {
- trait A {
- fn a(&self) { }
- }
-
- pub trait B {
- fn b(&self) { }
- }
-
- pub trait C: A + B { //~ ERROR private trait `inner::A` in public interface
- //~^ WARN will become a hard error
- fn c(&self) { }
- }
-
- impl A for i32 {}
- impl B for i32 {}
- impl C for i32 {}
-
-}
-
-fn main() {
- // A is private
- // B is pub, not reexported
- // C : A + B is pub, reexported
-
- // 0.a(); // can't call
- // 0.b(); // can't call
- 0.c(); // ok
-
- C::a(&0); // can call
- C::b(&0); // can call
- C::c(&0); // ok
-}
fn main() {
let array: [usize; Dim3::dim()]
//~^ ERROR calls in constants are limited to constant functions
- //~| ERROR constant evaluation error
- //~| non-constant path in constant expression
= [0; Dim3::dim()];
//~^ ERROR calls in constants are limited to constant functions
- //~| ERROR constant evaluation error
- //~| non-constant path in constant expression
}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+enum Foo {
+ A = "" + 1
+ //~^ ERROR binary operation `+` cannot be applied to type `&'static str`
+}
+
+enum Bar {
+ A = Foo::A as isize
+}
+
+fn main() {}
enum Bar { X }
mod foo {
- trait Bar {
+ pub trait Bar {
fn method(&self) {}
fn method2(&self) {}
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Note: This test is checking that we forbid a coding pattern that
-// Issue #5873 explicitly wants to allow.
-
-enum State { ST_NULL, ST_WHITESPACE }
-
-fn main() {
- [State::ST_NULL; (State::ST_WHITESPACE as usize)];
- //~^ ERROR constant evaluation error
- //~| unimplemented constant expression: enum variants
-}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that we correctly prevent users from making trait objects
+// from traits with associated consts.
+
+#![feature(associated_consts)]
+
+trait Bar {
+ const X: usize;
+}
+
+fn make_bar<T:Bar>(t: &T) -> &Bar {
+ //~^ ERROR E0038
+ //~| NOTE the trait cannot contain associated consts like `X`
+ //~| NOTE the trait `Bar` cannot be made into an object
+ t
+}
+
+fn main() {
+}
}
}
-fn main() {
+fn main() { unsafe {
let u = m::U { a: 0 }; // OK
let u = m::U { b: 0 }; // OK
let u = m::U { c: 0 }; //~ ERROR field `c` of union `m::U` is private
let m::U { a } = u; // OK
let m::U { b } = u; // OK
let m::U { c } = u; //~ ERROR field `c` of union `m::U` is private
-}
+}}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![allow(dead_code)]
+#![feature(attr_literals)]
+#![feature(repr_align)]
+
+#[repr(align(16.0))] //~ ERROR: invalid `repr(align)` attribute: not an unsuffixed integer
+struct A(i32);
+
+#[repr(align(15))] //~ ERROR: invalid `repr(align)` attribute: not a power of two
+struct B(i32);
+
+#[repr(align(65536))] //~ ERROR: invalid `repr(align)` attribute: larger than 32768
+struct C(i32);
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+#![feature(repr_align)]
+#![allow(dead_code)]
+
+#[repr(align(16))]
+struct A(i32);
+
+struct B(A);
+
+#[repr(packed)]
+struct C(A); //~ ERROR: packed struct cannot transitively contain a `[repr(align)]` struct
+
+#[repr(packed)]
+struct D(B); //~ ERROR: packed struct cannot transitively contain a `[repr(align)]` struct
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_consts)]
+#![feature(associated_type_defaults)]
+
+struct S;
+
+mod method {
+ trait A {
+ fn a(&self) { }
+ }
+
+ pub trait B {
+ fn b(&self) { }
+ }
+
+ pub trait C: A + B {
+ fn c(&self) { }
+ }
+
+ impl A for ::S {}
+ impl B for ::S {}
+ impl C for ::S {}
+}
+
+mod assoc_const {
+ trait A {
+ const A: u8 = 0;
+ }
+
+ pub trait B {
+ const B: u8 = 0;
+ }
+
+ pub trait C: A + B {
+ const C: u8 = 0;
+ }
+
+ impl A for ::S {}
+ impl B for ::S {}
+ impl C for ::S {}
+}
+
+mod assoc_ty {
+ trait A {
+ type A = u8;
+ }
+
+ pub trait B {
+ type B = u8;
+ }
+
+ pub trait C: A + B {
+ type C = u8;
+ }
+
+ impl A for ::S {}
+ impl B for ::S {}
+ impl C for ::S {}
+}
+
+fn check_method() {
+ // A is private
+ // B is pub, not in scope
+ // C : A + B is pub, in scope
+ use method::C;
+
+ // Methods, method call
+ // a, b, c are resolved as trait items, their traits need to be in scope
+ S.a(); //~ ERROR no method named `a` found for type `S` in the current scope
+ S.b(); //~ ERROR no method named `b` found for type `S` in the current scope
+ S.c(); // OK
+ // a, b, c are resolved as inherent items, their traits don't need to be in scope
+ let c = &S as &C;
+ c.a(); //~ ERROR method `a` is private
+ c.b(); // OK
+ c.c(); // OK
+
+ // Methods, UFCS
+ // a, b, c are resolved as trait items, their traits need to be in scope
+ S::a(&S); //~ ERROR no associated item named `a` found for type `S` in the current scope
+ S::b(&S); //~ ERROR no associated item named `b` found for type `S` in the current scope
+ S::c(&S); // OK
+ // a, b, c are resolved as inherent items, their traits don't need to be in scope
+ C::a(&S); //~ ERROR method `a` is private
+ C::b(&S); // OK
+ C::c(&S); // OK
+}
+
+fn check_assoc_const() {
+ // A is private
+ // B is pub, not in scope
+ // C : A + B is pub, in scope
+ use assoc_const::C;
+
+ // Associated constants
+ // A, B, C are resolved as trait items, their traits need to be in scope
+ S::A; //~ ERROR no associated item named `A` found for type `S` in the current scope
+ S::B; //~ ERROR no associated item named `B` found for type `S` in the current scope
+ S::C; // OK
+ // A, B, C are resolved as inherent items, their traits don't need to be in scope
+ C::A; //~ ERROR associated constant `A` is private
+ //~^ ERROR the trait `assoc_const::C` cannot be made into an object
+ //~| ERROR the trait bound `assoc_const::C: assoc_const::A` is not satisfied
+ C::B; // ERROR the trait `assoc_const::C` cannot be made into an object
+ //~^ ERROR the trait bound `assoc_const::C: assoc_const::B` is not satisfied
+ C::C; // OK
+}
+
+fn check_assoc_ty<T: assoc_ty::C>() {
+ // A is private
+ // B is pub, not in scope
+ // C : A + B is pub, in scope
+ use assoc_ty::C;
+
+ // Associated types
+ // A, B, C are resolved as trait items, their traits need to be in scope, not implemented yet
+ let _: S::A; //~ ERROR ambiguous associated type
+ let _: S::B; //~ ERROR ambiguous associated type
+ let _: S::C; //~ ERROR ambiguous associated type
+ // A, B, C are resolved as inherent items, their traits don't need to be in scope
+ let _: T::A; //~ ERROR associated type `A` is private
+ let _: T::B; // OK
+ let _: T::C; // OK
+}
+
+fn main() {}
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-mod m {
- trait Priv {
- fn f(&self) {}
- }
- impl Priv for super::S {}
- pub trait Pub: Priv {}
-}
-
-struct S;
-impl m::Pub for S {}
-
-fn g<T: m::Pub>(arg: T) {
- arg.f(); //~ ERROR: source trait `m::Priv` is private
-}
-
-fn main() {
- g(S);
-}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn f<T: ?>() {} //~ ERROR expected identifier, found `>`
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only -Z continue-parse-after-error
+
+fn main() {
+ let _: Box<((Copy)) + Copy>;
+ //~^ ERROR expected a path on the left-hand side of `+`, not `((Copy))`
+ let _: Box<(Copy + Copy) + Copy>;
+ //~^ ERROR expected a path on the left-hand side of `+`, not `( Copy + Copy)`
+ let _: Box<(Copy +) + Copy>;
+ //~^ ERROR expected a path on the left-hand side of `+`, not `( Copy)`
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only -Z continue-parse-after-error
+
+fn f<T: Copy + ('a)>() {} //~ ERROR parenthesized lifetime bounds are not supported
+
+fn main() {
+ let _: Box<Copy + ('a)>; //~ ERROR parenthesized lifetime bounds are not supported
+ let _: Box<('a) + Copy>; //~ ERROR expected type, found `'a`
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z parse-only
+
+fn f<T: (Copy) + (?Sized) + (for<'a> Trait<'a>)>() {}
+
+fn main() {
+ let _: Box<(Copy) + (?Sized) + (for<'a> Trait<'a>)>;
+ let _: Box<(?Sized) + (for<'a> Trait<'a>) + (Copy)>;
+ let _: Box<(for<'a> Trait<'a>) + (Copy) + (?Sized)>;
+}
+
+FAIL //~ ERROR
-include ../tools.mk
-# NOTE the address sanitizer only supports x86_64 linux
-ifdef SANITIZER_SUPPORT
-all:
- $(RUSTC) -g -Z sanitizer=address -Z print-link-args overflow.rs | grep -q librustc_asan
- $(TMPDIR)/overflow 2>&1 | grep -q stack-buffer-overflow
+# NOTE the address sanitizer only supports x86_64 linux and macOS
+
+ifeq ($(TARGET),x86_64-apple-darwin)
+ASAN_SUPPORT=$(SANITIZER_SUPPORT)
+EXTRA_RUSTFLAG=-C rpath
else
-all:
+ifeq ($(TARGET),x86_64-unknown-linux-gnu)
+ASAN_SUPPORT=$(SANITIZER_SUPPORT)
+EXTRA_RUSTFLAG=
+endif
+endif
+all:
+ifeq ($(ASAN_SUPPORT),1)
+ $(RUSTC) -g -Z sanitizer=address -Z print-link-args $(EXTRA_RUSTFLAG) overflow.rs | grep -q librustc_asan
+ $(TMPDIR)/overflow 2>&1 | grep -q stack-buffer-overflow
endif
-include ../tools.mk
all:
- $(RUSTC) -Z sanitizer=leak --target i686-unknown-linux-gnu hello.rs 2>&1 | grep -q 'Sanitizers only work with the `x86_64-unknown-linux-gnu` target'
+ $(RUSTC) -Z sanitizer=leak --target i686-unknown-linux-gnu hello.rs 2>&1 | grep -q 'LeakSanitizer only works with the `x86_64-unknown-linux-gnu` target'
-include ../tools.mk
-ifdef SANITIZER_SUPPORT
all:
+ifeq ($(TARGET),x86_64-unknown-linux-gnu)
+ifdef SANITIZER_SUPPORT
$(RUSTC) -C opt-level=1 -g -Z sanitizer=leak -Z print-link-args leak.rs | grep -q librustc_lsan
$(TMPDIR)/leak 2>&1 | grep -q 'detected memory leaks'
-else
-all:
-
endif
+endif
+
-include ../tools.mk
-ifdef SANITIZER_SUPPORT
all:
+ifeq ($(TARGET),x86_64-unknown-linux-gnu)
+ifdef SANITIZER_SUPPORT
$(RUSTC) -g -Z sanitizer=memory -Z print-link-args uninit.rs | grep -q librustc_msan
$(TMPDIR)/uninit 2>&1 | grep -q use-of-uninitialized-value
-else
-all:
-
endif
+endif
+
#![crate_type = "lib"]
// we can compile to a variety of platforms, because we don't need
// cross-compiled standard libraries.
-#![feature(no_core)]
+#![feature(no_core, optin_builtin_traits)]
#![no_core]
#![feature(repr_simd, simd_ffi, link_llvm_intrinsics, lang_items)]
pub mod marker {
pub use Copy;
}
+
+#[lang = "freeze"]
+trait Freeze {}
+impl Freeze for .. {}
-include ../tools.mk
-# This is a whitelist of crates which are stable, we don't check for the
-# instability of these crates as they're all stable!
+# This is a whitelist of files which are stable crates or simply are not crates,
+# we don't check for the instability of these crates as they're all stable!
STABLE_CRATES := \
std \
core \
rsbegin.o \
rsend.o \
dllcrt2.o \
- crt2.o
+ crt2.o \
+ clang_rt.%_dynamic.dylib
# Generate a list of all crates in the sysroot. To do this we list all files in
# rustc's sysroot, look at the filename, strip everything after the `-`, and
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(lang_items, no_core)]
+#![feature(lang_items, no_core, optin_builtin_traits)]
#![no_core]
#[lang="copy"]
#[lang="sized"]
trait Sized { }
+#[lang = "freeze"]
+trait Freeze {}
+impl Freeze for .. {}
+
#[lang="start"]
fn start(_main: *const u8, _argc: isize, _argv: *const *const u8) -> isize { 0 }
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+#![feature(repr_align)]
+
+use std::mem;
+
+// Raising alignment
+#[repr(align(16))]
+struct Align16(i32);
+
+// Lowering has no effect
+#[repr(align(1))]
+struct Align1(i32);
+
+// Multiple attributes take the max
+#[repr(align(4))]
+#[repr(align(16))]
+#[repr(align(8))]
+struct AlignMany(i32);
+
+// Raising alignment may not alter size.
+#[repr(align(8))]
+#[allow(dead_code)]
+struct Align8Many {
+ a: i32,
+ b: i32,
+ c: i32,
+ d: u8,
+}
+
+enum Enum {
+ #[allow(dead_code)]
+ A(i32),
+ B(Align16)
+}
+
+// Nested alignment - use `#[repr(C)]` to suppress field reordering for sizeof test
+#[repr(C)]
+struct Nested {
+ a: i32,
+ b: i32,
+ c: Align16,
+ d: i8,
+}
+
+#[repr(packed)]
+struct Packed(i32);
+
+#[repr(align(16))]
+struct AlignContainsPacked {
+ a: Packed,
+ b: Packed,
+}
+
+impl Align16 {
+ // return aligned type
+ pub fn new(i: i32) -> Align16 {
+ Align16(i)
+ }
+ // pass aligned type
+ pub fn consume(a: Align16) -> i32 {
+ a.0
+ }
+}
+
+const CONST_ALIGN16: Align16 = Align16(7);
+static STATIC_ALIGN16: Align16 = Align16(8);
+
+// Check the actual address is aligned
+fn is_aligned_to<T>(p: &T, align: usize) -> bool {
+ let addr = p as *const T as usize;
+ (addr & (align - 1)) == 0
+}
+
+pub fn main() {
+ // check alignment and size by type and value
+ assert_eq!(mem::align_of::<Align16>(), 16);
+ assert_eq!(mem::size_of::<Align16>(), 16);
+
+ let a = Align16(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+
+ assert!(is_aligned_to(&a, 16));
+
+ // lowering should have no effect
+ assert_eq!(mem::align_of::<Align1>(), 4);
+ assert_eq!(mem::size_of::<Align1>(), 4);
+ let a = Align1(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 4);
+ assert_eq!(mem::size_of_val(&a), 4);
+ assert!(is_aligned_to(&a, 4));
+
+ // when multiple attributes are specified the max should be used
+ assert_eq!(mem::align_of::<AlignMany>(), 16);
+ assert_eq!(mem::size_of::<AlignMany>(), 16);
+ let a = AlignMany(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 16));
+
+ // raising alignment should not reduce size
+ assert_eq!(mem::align_of::<Align8Many>(), 8);
+ assert_eq!(mem::size_of::<Align8Many>(), 16);
+ let a = Align8Many { a: 1, b: 2, c: 3, d: 4 };
+ assert_eq!(a.a, 1);
+ assert_eq!(mem::align_of_val(&a), 8);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 8));
+
+ // return type
+ let a = Align16::new(1);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert_eq!(a.0, 1);
+ assert!(is_aligned_to(&a, 16));
+ assert_eq!(Align16::consume(a), 1);
+
+ // check const alignment, size and value
+ assert_eq!(mem::align_of_val(&CONST_ALIGN16), 16);
+ assert_eq!(mem::size_of_val(&CONST_ALIGN16), 16);
+ assert_eq!(CONST_ALIGN16.0, 7);
+ assert!(is_aligned_to(&CONST_ALIGN16, 16));
+
+ // check global static alignment, size and value
+ assert_eq!(mem::align_of_val(&STATIC_ALIGN16), 16);
+ assert_eq!(mem::size_of_val(&STATIC_ALIGN16), 16);
+ assert_eq!(STATIC_ALIGN16.0, 8);
+ assert!(is_aligned_to(&STATIC_ALIGN16, 16));
+
+ // Note that the size of Nested may change if struct field re-ordering is enabled
+ assert_eq!(mem::align_of::<Nested>(), 16);
+ assert_eq!(mem::size_of::<Nested>(), 48);
+ let a = Nested{ a: 1, b: 2, c: Align16(3), d: 4};
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::align_of_val(&a.b), 4);
+ assert_eq!(mem::align_of_val(&a.c), 16);
+ assert_eq!(mem::size_of_val(&a), 48);
+ assert!(is_aligned_to(&a, 16));
+ // check the correct fields are indexed
+ assert_eq!(a.a, 1);
+ assert_eq!(a.b, 2);
+ assert_eq!(a.c.0, 3);
+ assert_eq!(a.d, 4);
+
+ // enum should be aligned to max alignment
+ assert_eq!(mem::align_of::<Enum>(), 16);
+ assert_eq!(mem::align_of_val(&Enum::B(Align16(0))), 16);
+ let e = Enum::B(Align16(15));
+ match e {
+ Enum::B(ref a) => {
+ assert_eq!(a.0, 15);
+ assert_eq!(mem::align_of_val(a), 16);
+ assert_eq!(mem::size_of_val(a), 16);
+ },
+ _ => ()
+ }
+ assert!(is_aligned_to(&e, 16));
+
+ // arrays of aligned elements should also be aligned
+ assert_eq!(mem::align_of::<[Align16;2]>(), 16);
+ assert_eq!(mem::size_of::<[Align16;2]>(), 32);
+
+ let a = [Align16(0), Align16(1)];
+ assert_eq!(mem::align_of_val(&a[0]), 16);
+ assert_eq!(mem::align_of_val(&a[1]), 16);
+ assert!(is_aligned_to(&a, 16));
+
+ // check heap value is aligned
+ assert_eq!(mem::align_of_val(Box::new(Align16(0)).as_ref()), 16);
+
+ // check heap array is aligned
+ let a = vec!(Align16(0), Align16(1));
+ assert_eq!(mem::align_of_val(&a[0]), 16);
+ assert_eq!(mem::align_of_val(&a[1]), 16);
+
+ assert_eq!(mem::align_of::<AlignContainsPacked>(), 16);
+ assert_eq!(mem::size_of::<AlignContainsPacked>(), 16);
+ let a = AlignContainsPacked { a: Packed(1), b: Packed(2) };
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::align_of_val(&a.a), 1);
+ assert_eq!(mem::align_of_val(&a.b), 1);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 16));
+}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "lib"]
+
+#[repr(u32)]
+pub enum Foo {
+ Foo = Private::Variant as u32
+}
+
+#[repr(u8)]
+enum Private {
+ Variant = 42
+}
+
+#[inline(always)]
+pub fn foo() -> Foo {
+ Foo::Foo
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(const_fn)]
+
+#[derive(PartialEq, Eq)]
+enum Cake {
+ BlackForest,
+ Marmor,
+}
+use Cake::*;
+
+const BOO: (Cake, Cake) = (Marmor, BlackForest);
+const FOO: Cake = BOO.1;
+
+const fn foo() -> Cake {
+ Marmor
+}
+
+const WORKS: Cake = Marmor;
+
+const GOO: Cake = foo();
+
+fn main() {
+ match BlackForest {
+ FOO => println!("hi"),
+ GOO => println!("meh"),
+ WORKS => println!("möp"),
+ _ => println!("bye"),
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Note: This test was used to demonstrate #5873 (now #23898).
+
+enum State { ST_NULL, ST_WHITESPACE }
+
+fn main() {
+ [State::ST_NULL; (State::ST_WHITESPACE as usize)];
+}
// write_volatile causes an LLVM assert with composite types
+// ignore-emscripten See #41299: probably a bad optimization
+
#![feature(volatile)]
use std::ptr::{read_volatile, write_volatile};
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:issue-41394.rs
+
+extern crate issue_41394 as lib;
+
+fn main() {
+ assert_eq!(lib::foo() as u32, 42);
+}
// except according to those terms.
// ignore-emscripten no threads support
-#![feature(rustc_attrs, zero_one)]
+#![feature(rustc_attrs)]
-use std::num::Zero;
use std::thread;
+trait Int {
+ fn zero() -> Self;
+ fn one() -> Self;
+}
+macro_rules! doit {
+ ($($t:ident)*) => ($(impl Int for $t {
+ fn zero() -> $t { 0 }
+ fn one() -> $t { 1 }
+ })*)
+}
+doit! { i8 i16 i32 i64 isize }
+
macro_rules! check {
($($e:expr),*) => {
$(assert!(thread::spawn({
fn main() {
check![
- isize::min_value() / -1,
- i8::min_value() / -1,
- i16::min_value() / -1,
- i32::min_value() / -1,
- i64::min_value() / -1,
+ isize::min_value() / -isize::one(),
+ i8::min_value() / -i8::one(),
+ i16::min_value() / -i16::one(),
+ i32::min_value() / -i32::one(),
+ i64::min_value() / -i64::one(),
1isize / isize::zero(),
1i8 / i8::zero(),
1i16 / i16::zero(),
1i32 / i32::zero(),
1i64 / i64::zero(),
- isize::min_value() % -1,
- i8::min_value() % -1,
- i16::min_value() % -1,
- i32::min_value() % -1,
- i64::min_value() % -1,
+ isize::min_value() % -isize::one(),
+ i8::min_value() % -i8::one(),
+ i16::min_value() % -i16::one(),
+ i32::min_value() % -i32::one(),
+ i64::min_value() % -i64::one(),
1isize % isize::zero(),
1i8 % i8::zero(),
1i16 % i16::zero(),
exit_success_if_unwind::bar(do_panic);
}
}
- let s = Command::new(env::args_os().next().unwrap()).arg("foo").status();
+
+ let mut cmd = Command::new(env::args_os().next().unwrap());
+ cmd.arg("foo");
+
+
+ // ARMv6 hanges while printing the backtrace, see #41004
+ if cfg!(target_arch = "arm") && cfg!(target_env = "gnu") {
+ cmd.env("RUST_BACKTRACE", "0");
+ }
+
+ let s = cmd.status();
assert!(s.unwrap().code() != Some(0));
}
panic!("try to catch me");
}
}
- let s = Command::new(env::args_os().next().unwrap()).arg("foo").status();
+
+ let mut cmd = Command::new(env::args_os().next().unwrap());
+ cmd.arg("foo");
+
+ // ARMv6 hanges while printing the backtrace, see #41004
+ if cfg!(target_arch = "arm") && cfg!(target_env = "gnu") {
+ cmd.env("RUST_BACKTRACE", "0");
+ }
+
+ let s = cmd.status();
assert!(s.unwrap().code() != Some(0));
}
#![allow(warnings)]
#![feature(collections)]
-#![feature(drain, enumset, collections_bound, btree_range, vecmap)]
+#![feature(drain, collections_bound, btree_range, vecmap)]
extern crate collections;
use collections::BinaryHeap;
use collections::{BTreeMap, BTreeSet};
-use collections::EnumSet;
use collections::LinkedList;
use collections::String;
use collections::Vec;
use std::collections::HashSet;
use collections::Bound::Included;
-use collections::enum_set::CLike;
use std::mem;
fn is_sync<T>(_: T) where T: Sync {}
all_sync_send!(LinkedList::<usize>::new(), iter, iter_mut, into_iter);
- #[derive(Copy, Clone)]
- #[repr(usize)]
- #[allow(dead_code)]
- enum Foo { A, B, C }
- impl CLike for Foo {
- fn to_usize(&self) -> usize {
- *self as usize
- }
-
- fn from_usize(v: usize) -> Foo {
- unsafe { mem::transmute(v) }
- }
- }
- all_sync_send!(EnumSet::<Foo>::new(), iter);
-
all_sync_send!(VecDeque::<usize>::new(), iter, iter_mut, into_iter);
is_sync_send!(VecDeque::<usize>::new(), drain(..));
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(core_float)]
-#![feature(float_extras)]
#![feature(untagged_unions)]
extern crate core;
-use core::num::Float;
+use core::f32;
union U {
a: (u8, u8),
assert_eq!(u.a, (2, 2));
let mut w = W { a: 0b0_11111111_00000000000000000000000 };
- assert_eq!(w.b, f32::infinity());
- w.b = f32::neg_infinity();
+ assert_eq!(w.b, f32::INFINITY);
+ w.b = f32::NEG_INFINITY;
assert_eq!(w.a, 0b1_11111111_00000000000000000000000);
}
}
// except according to those terms.
-#![feature(binary_heap_extras)]
-
use std::collections::BinaryHeap;
fn make_pq() -> BinaryHeap<isize> {
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![crate_name = "foo"]
-
-// ignore-tidy-end-whitespace
-
-// @has foo/fn.f.html
-// @has - '<p>hard break:<br />'
-// @has - 'after hard break</p>'
-/// hard break:
-/// after hard break
-pub fn f() {}
+++ /dev/null
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![crate_name = "foo"]
-
-// ignore-tidy-linelength
-
-// @has foo/fn.f.html
-// @has - '<p>markdown test</p>'
-// @has - '<p>this is a <a href="https://example.com" title="this is a title">link</a>.</p>'
-// @has - '<hr />'
-// @has - '<p>a footnote<sup id="supref1"><a href="#ref1">1</a></sup>.</p>'
-// @has - '<p>another footnote<sup id="supref2"><a href="#ref2">2</a></sup>.</p>'
-// @has - '<p><img src="https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png" alt="Rust" /></p>'
-// @has - '<div class="footnotes"><hr><ol><li id="ref1">'
-// @has - '<p>Thing <a href="#supref1" rev="footnote">↩</a></p></li><li id="ref2">'
-// @has - '<p>Another Thing <a href="#supref2" rev="footnote">↩</a></p></li></ol></div>'
-/// markdown test
-///
-/// this is a [link].
-///
-/// [link]: https://example.com "this is a title"
-///
-/// -----------
-///
-/// a footnote[^footnote].
-///
-/// another footnote[^footnotebis].
-///
-/// [^footnote]: Thing
-///
-///
-/// [^footnotebis]: Another Thing
-///
-///
-/// ![Rust](https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png)
-pub fn f() {}
15 | fn renew<'b: 'a>(self) -> &'b mut [T];
| -------------------------------------- definition of `renew` from trait
...
-19 | fn renew<'b: 'a>(self) -> &'b mut [T] where 'a: 'b {
- | _____^ starting here...
+19 | / fn renew<'b: 'a>(self) -> &'b mut [T] where 'a: 'b {
20 | | //~^ ERROR E0276
21 | | &mut self[..]
22 | | }
- | |_____^ ...ending here: impl has extra requirement `'a: 'b`
+ | |_____^ impl has extra requirement `'a: 'b`
error: aborting due to previous error
19 | fn zip<B, U: Iterator<U>>(self, other: U) -> ZipIterator<Self, U>;
| ------------------------------------------------------------------ definition of `zip` from trait
...
-23 | fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> {
- | _____^ starting here...
+23 | / fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> {
24 | | //~^ ERROR E0276
25 | | ZipIterator{a: self, b: other}
26 | | }
- | |_____^ ...ending here: impl has extra requirement `U: Iterator<B>`
+ | |_____^ impl has extra requirement `U: Iterator<B>`
error: aborting due to previous error
--> $DIR/issue-40006.rs:11:9
|
11 | impl X {
- | _________^ starting here...
+ | _________^
12 | | Y
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: missing `fn`, `type`, or `const` for trait-item declaration
--> $DIR/issue-40006.rs:17:10
|
17 | trait X {
- | __________^ starting here...
+ | __________^
18 | | X() {}
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: expected `[`, found `#`
--> $DIR/issue-40006.rs:19:17
--> $DIR/issue-40006.rs:19:21
|
19 | fn xxx() { ### }
- | _____________________^ starting here...
+ | _____________________^
20 | | L = M;
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: missing `fn`, `type`, or `const` for trait-item declaration
--> $DIR/issue-40006.rs:20:11
|
20 | L = M;
- | ___________^ starting here...
+ | ___________^
21 | | Z = { 2 + 3 };
- | |____^ ...ending here: missing `fn`, `type`, or `const`
+ | |____^ missing `fn`, `type`, or `const`
error: expected one of `const`, `extern`, `fn`, `type`, `unsafe`, or `}`, found `;`
--> $DIR/issue-40006.rs:21:18
error[E0569]: requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
--> $DIR/dropck-eyepatch-implies-unsafe-impl.rs:32:1
|
-32 | impl<#[may_dangle] A, B: fmt::Debug> Drop for Pt<A, B> {
- | _^ starting here...
+32 | / impl<#[may_dangle] A, B: fmt::Debug> Drop for Pt<A, B> {
33 | | //~^ ERROR requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
34 | |
35 | | // (unsafe to access self.1 due to #[may_dangle] on A)
36 | | fn drop(&mut self) { println!("drop {} {:?}", self.0, self.2); }
37 | | }
- | |_^ ...ending here
+ | |_^
error[E0569]: requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
--> $DIR/dropck-eyepatch-implies-unsafe-impl.rs:38:1
|
-38 | impl<#[may_dangle] 'a, 'b, B: fmt::Debug> Drop for Pr<'a, 'b, B> {
- | _^ starting here...
+38 | / impl<#[may_dangle] 'a, 'b, B: fmt::Debug> Drop for Pr<'a, 'b, B> {
39 | | //~^ ERROR requires an `unsafe impl` declaration due to `#[may_dangle]` attribute
40 | |
41 | | // (unsafe to access self.1 due to #[may_dangle] on 'a)
42 | | fn drop(&mut self) { println!("drop {} {:?}", self.0, self.2); }
43 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to 2 previous errors
error: reached the type-length limit while instantiating `<T as Foo><(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(&(), &()), &(&()...`
--> $DIR/issue-37311.rs:23:5
|
-23 | fn recurse(&self) {
- | _____^ starting here...
+23 | / fn recurse(&self) {
24 | | (self, self).recurse();
25 | | }
- | |_____^ ...ending here
+ | |_____^
|
= note: consider adding a `#![type_length_limit="2097152"]` attribute to your crate
--> $DIR/ex1-return-one-existing-name-if-else.rs:11:44
|
11 | fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
12 | | if x > y { x } else { y }
13 | | }
- | |_^ ...ending here
+ | |_^
note: ...but the borrowed content is only valid for the anonymous lifetime #1 defined on the body at 11:43
--> $DIR/ex1-return-one-existing-name-if-else.rs:11:44
|
11 | fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
12 | | if x > y { x } else { y }
13 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to previous error
--> $DIR/ex2a-push-one-existing-name.rs:15:52
|
15 | fn foo<'a>(x: &mut Vec<Ref<'a, i32>>, y: Ref<i32>) {
- | ____________________________________________________^ starting here...
+ | ____________________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
note: ...does not necessarily outlive the lifetime 'a as defined on the body at 15:51
--> $DIR/ex2a-push-one-existing-name.rs:15:52
|
15 | fn foo<'a>(x: &mut Vec<Ref<'a, i32>>, y: Ref<i32>) {
- | ____________________________________________________^ starting here...
+ | ____________________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to previous error
--> $DIR/ex2b-push-no-existing-names.rs:15:44
|
15 | fn foo(x: &mut Vec<Ref<i32>>, y: Ref<i32>) {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
note: ...does not necessarily outlive the anonymous lifetime #2 defined on the body at 15:43
--> $DIR/ex2b-push-no-existing-names.rs:15:44
|
15 | fn foo(x: &mut Vec<Ref<i32>>, y: Ref<i32>) {
- | ____________________________________________^ starting here...
+ | ____________________________________________^
16 | | x.push(y);
17 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to previous error
--> $DIR/ex2c-push-inference-variable.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let z = Ref { data: y.data };
17 | | x.push(z);
18 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that reference does not outlive borrowed content
--> $DIR/ex2c-push-inference-variable.rs:16:25
|
--> $DIR/ex2c-push-inference-variable.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let z = Ref { data: y.data };
17 | | x.push(z);
18 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that expression is assignable (expected Ref<'b, _>, found Ref<'_, _>)
--> $DIR/ex2c-push-inference-variable.rs:17:12
|
--> $DIR/ex2d-push-inference-variable-2.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | a.push(b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that reference does not outlive borrowed content
--> $DIR/ex2d-push-inference-variable-2.rs:17:25
|
--> $DIR/ex2d-push-inference-variable-2.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | a.push(b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that expression is assignable (expected &mut std::vec::Vec<Ref<'_, i32>>, found &mut std::vec::Vec<Ref<'b, i32>>)
--> $DIR/ex2d-push-inference-variable-2.rs:16:33
|
--> $DIR/ex2e-push-inference-variable-3.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | Vec::push(a, b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that reference does not outlive borrowed content
--> $DIR/ex2e-push-inference-variable-3.rs:17:25
|
--> $DIR/ex2e-push-inference-variable-3.rs:15:67
|
15 | fn foo<'a, 'b, 'c>(x: &'a mut Vec<Ref<'b, i32>>, y: Ref<'c, i32>) {
- | ___________________________________________________________________^ starting here...
+ | ___________________________________________________________________^
16 | | let a: &mut Vec<Ref<i32>> = x;
17 | | let b = Ref { data: y.data };
18 | | Vec::push(a, b);
19 | | }
- | |_^ ...ending here
+ | |_^
note: ...so that expression is assignable (expected &mut std::vec::Vec<Ref<'_, i32>>, found &mut std::vec::Vec<Ref<'b, i32>>)
--> $DIR/ex2e-push-inference-variable-3.rs:16:33
|
error[E0308]: mismatched types
--> $DIR/abridged.rs:42:5
|
-42 | X {
- | _____^ starting here...
+42 | / X {
43 | | x: X {
44 | | x: "".to_string(),
45 | | y: 2,
46 | | },
47 | | y: 3,
48 | | }
- | |_____^ ...ending here: expected struct `std::string::String`, found integral variable
+ | |_____^ expected struct `std::string::String`, found integral variable
|
= note: expected type `X<X<_, std::string::String>, std::string::String>`
found type `X<X<_, {integer}>, {integer}>`
error[E0308]: mismatched types
--> $DIR/abridged.rs:52:5
|
-52 | X {
- | _____^ starting here...
+52 | / X {
53 | | x: X {
54 | | x: "".to_string(),
55 | | y: 2,
56 | | },
57 | | y: "".to_string(),
58 | | }
- | |_____^ ...ending here: expected struct `std::string::String`, found integral variable
+ | |_____^ expected struct `std::string::String`, found integral variable
|
= note: expected type `X<X<_, std::string::String>, _>`
found type `X<X<_, {integer}>, _>`
--> $DIR/main.rs:12:18
|
12 | let x: u32 = (
- | __________________^ starting here...
+ | __________________^
13 | | );
- | |_____^ ...ending here: expected u32, found ()
+ | |_____^ expected u32, found ()
|
= note: expected type `u32`
found type `()`
error[E0046]: not all trait items implemented, missing: `CONSTANT`, `Type`, `method`
--> $DIR/m2.rs:20:1
|
-20 | impl m1::X for X {
- | _^ starting here...
+20 | / impl m1::X for X {
21 | | }
- | |_^ ...ending here: missing `CONSTANT`, `Type`, `method` in implementation
+ | |_^ missing `CONSTANT`, `Type`, `method` in implementation
|
= note: `CONSTANT` from trait: `const CONSTANT: u32;`
= note: `Type` from trait: `type Type;`
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z print-type-sizes
+
+// This file illustrates how padding is handled: alignment
+// requirements can lead to the introduction of padding, either before
+// fields or at the end of the structure as a whole.
+//
+// It avoids using u64/i64 because on some targets that is only 4-byte
+// aligned (while on most it is 8-byte aligned) and so the resulting
+// padding and overall computed sizes can be quite different.
+#![feature(attr_literals)]
+#![feature(repr_align)]
+#![allow(dead_code)]
+
+#[repr(align(16))]
+#[derive(Default)]
+struct A(i32);
+
+enum E {
+ A(i32),
+ B(A)
+}
+
+#[derive(Default)]
+struct S {
+ a: i32,
+ b: i32,
+ c: A,
+ d: i8,
+}
+
+fn main() {
+ let _s: S = Default::default();
+}
--- /dev/null
+print-type-size type: `E`: 32 bytes, alignment: 16 bytes
+print-type-size discriminant: 4 bytes
+print-type-size variant `A`: 4 bytes
+print-type-size field `.0`: 4 bytes
+print-type-size variant `B`: 28 bytes
+print-type-size padding: 12 bytes
+print-type-size field `.0`: 16 bytes, alignment: 16 bytes
+print-type-size type: `S`: 32 bytes, alignment: 16 bytes
+print-type-size field `.c`: 16 bytes
+print-type-size field `.a`: 4 bytes
+print-type-size field `.b`: 4 bytes
+print-type-size field `.d`: 1 bytes
+print-type-size end padding: 7 bytes
+print-type-size type: `A`: 16 bytes, alignment: 16 bytes
+print-type-size field `.0`: 4 bytes
+print-type-size end padding: 12 bytes
//~| NOTE types differ in mutability
//~| NOTE expected type `&mut std::string::String`
//~| NOTE found type `&std::string::String`
- //~| HELP try with `&mut y`
test2(&y);
//~^ ERROR E0308
//~| NOTE types differ in mutability
|
= note: expected type `&str`
found type `std::string::String`
- = help: here are some functions which might fulfill your needs:
- - .as_str()
- - .trim()
- - .trim_left()
- - .trim_right()
+ = help: try with `&String::new()`
error[E0308]: mismatched types
--> $DIR/coerce-suggestions.rs:30:10
found type `&std::string::String`
error[E0308]: mismatched types
- --> $DIR/coerce-suggestions.rs:36:11
+ --> $DIR/coerce-suggestions.rs:35:11
|
-36 | test2(&y);
+35 | test2(&y);
| ^^ types differ in mutability
|
= note: expected type `&mut i32`
found type `&std::string::String`
error[E0308]: mismatched types
- --> $DIR/coerce-suggestions.rs:42:9
+ --> $DIR/coerce-suggestions.rs:41:9
|
-42 | f = box f;
+41 | f = box f;
| ^^^^^ cyclic type of infinite size
|
= note: expected type `_`
16 | fn bar(&self);
| -------------- `bar` from trait
...
-22 | impl Foo for FooConstForMethod {
- | _^ starting here...
+22 | / impl Foo for FooConstForMethod {
23 | | //~^ ERROR E0046
24 | | //~| NOTE missing `bar` in implementation
25 | | const bar: u64 = 1;
... |
28 | | const MY_CONST: u32 = 1;
29 | | }
- | |_^ ...ending here: missing `bar` in implementation
+ | |_^ missing `bar` in implementation
error[E0324]: item `MY_CONST` is an associated method, which doesn't match its trait `Foo`
--> $DIR/impl-wrong-item-for-trait.rs:37:5
17 | const MY_CONST: u32;
| -------------------- `MY_CONST` from trait
...
-33 | impl Foo for FooMethodForConst {
- | _^ starting here...
+33 | / impl Foo for FooMethodForConst {
34 | | //~^ ERROR E0046
35 | | //~| NOTE missing `MY_CONST` in implementation
36 | | fn bar(&self) {}
... |
39 | | //~| NOTE does not match trait
40 | | }
- | |_^ ...ending here: missing `MY_CONST` in implementation
+ | |_^ missing `MY_CONST` in implementation
error[E0325]: item `bar` is an associated type, which doesn't match its trait `Foo`
--> $DIR/impl-wrong-item-for-trait.rs:47:5
16 | fn bar(&self);
| -------------- `bar` from trait
...
-44 | impl Foo for FooTypeForMethod {
- | _^ starting here...
+44 | / impl Foo for FooTypeForMethod {
45 | | //~^ ERROR E0046
46 | | //~| NOTE missing `bar` in implementation
47 | | type bar = u64;
... |
50 | | const MY_CONST: u32 = 1;
51 | | }
- | |_^ ...ending here: missing `bar` in implementation
+ | |_^ missing `bar` in implementation
error[E0046]: not all trait items implemented, missing: `fmt`
--> $DIR/impl-wrong-item-for-trait.rs:53:1
|
-53 | impl Debug for FooTypeForMethod {
- | _^ starting here...
+53 | / impl Debug for FooTypeForMethod {
54 | | }
- | |_^ ...ending here: missing `fmt` in implementation
+ | |_^ missing `fmt` in implementation
|
= note: `fmt` from trait: `fn(&Self, &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error>`
error[E0046]: not all trait items implemented, missing: `Item`
--> $DIR/issue-23729.rs:20:9
|
-20 | impl Iterator for Recurrence {
- | _________^ starting here...
+20 | / impl Iterator for Recurrence {
21 | | //~^ ERROR E0046
22 | | //~| NOTE missing `Item` in implementation
23 | | //~| NOTE `Item` from trait: `type Item;`
... |
36 | | }
37 | | }
- | |_________^ ...ending here: missing `Item` in implementation
+ | |_________^ missing `Item` in implementation
|
= note: `Item` from trait: `type Item;`
error[E0046]: not all trait items implemented, missing: `Output`
--> $DIR/issue-23827.rs:36:1
|
-36 | impl<C: Component> FnOnce<(C,)> for Prototype {
- | _^ starting here...
+36 | / impl<C: Component> FnOnce<(C,)> for Prototype {
37 | | //~^ ERROR E0046
38 | | //~| NOTE missing `Output` in implementation
39 | | //~| NOTE `Output` from trait: `type Output;`
... |
42 | | }
43 | | }
- | |_^ ...ending here: missing `Output` in implementation
+ | |_^ missing `Output` in implementation
|
= note: `Output` from trait: `type Output;`
error[E0046]: not all trait items implemented, missing: `Target`
--> $DIR/issue-24356.rs:30:9
|
-30 | impl Deref for Thing {
- | _________^ starting here...
+30 | / impl Deref for Thing {
31 | | //~^ ERROR E0046
32 | | //~| NOTE missing `Target` in implementation
33 | | //~| NOTE `Target` from trait: `type Target;`
34 | | fn deref(&self) -> i8 { self.0 }
35 | | }
- | |_________^ ...ending here: missing `Target` in implementation
+ | |_________^ missing `Target` in implementation
|
= note: `Target` from trait: `type Target;`
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::net::TcpListener;
+use std::net::TcpStream;
+use std::io::{self, Read, Write};
+
+fn handle_client(stream: TcpStream) -> io::Result<()> {
+ stream.write_fmt(format!("message received"))
+}
+
+fn main() {
+ if let Ok(listener) = TcpListener::bind("127.0.0.1:8080") {
+ for incoming in listener.incoming() {
+ if let Ok(stream) = incoming {
+ handle_client(stream);
+ }
+ }
+ }
+}
--- /dev/null
+error[E0308]: mismatched types
+ --> $DIR/issue-33884.rs:16:22
+ |
+16 | stream.write_fmt(format!("message received"))
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected struct `std::fmt::Arguments`, found struct `std::string::String`
+ |
+ = note: expected type `std::fmt::Arguments<'_>`
+ found type `std::string::String`
+ = note: this error originates in a macro outside of the current crate
+
+error: aborting due to previous error
+
note: candidate #1 is defined in an impl for the type `Myisize`
--> $DIR/issue-7575.rs:51:5
|
-51 | fn fff(i: isize) -> isize { //~ NOTE candidate
- | _____^ starting here...
+51 | / fn fff(i: isize) -> isize { //~ NOTE candidate
52 | | i
53 | | }
- | |_____^ ...ending here
+ | |_____^
error: no method named `is_str` found for type `T` in the current scope
--> $DIR/issue-7575.rs:85:7
note: candidate #1 is defined in the trait `ManyImplTrait`
--> $DIR/issue-7575.rs:57:5
|
-57 | fn is_str() -> bool { //~ NOTE candidate
- | _____^ starting here...
+57 | / fn is_str() -> bool { //~ NOTE candidate
58 | | false
59 | | }
- | |_____^ ...ending here
+ | |_____^
= help: to disambiguate the method call, write `ManyImplTrait::is_str(t)` instead
= help: items from traits can only be used if the trait is implemented and in scope; the following trait defines an item `is_str`, perhaps you need to implement it:
= help: candidate #1: `ManyImplTrait`
error: unnecessary `unsafe` block
--> $DIR/lint-unused-unsafe.rs:33:9
|
-33 | unsafe { //~ ERROR: unnecessary `unsafe` block
- | _________^ starting here...
+33 | / unsafe { //~ ERROR: unnecessary `unsafe` block
34 | | unsf()
35 | | }
- | |_________^ ...ending here: unnecessary `unsafe` block
+ | |_________^ unnecessary `unsafe` block
|
note: because it's nested under this `unsafe` block
--> $DIR/lint-unused-unsafe.rs:32:5
|
-32 | unsafe { // don't put the warning here
- | _____^ starting here...
+32 | / unsafe { // don't put the warning here
33 | | unsafe { //~ ERROR: unnecessary `unsafe` block
34 | | unsf()
35 | | }
36 | | }
- | |_____^ ...ending here
+ | |_____^
error: unnecessary `unsafe` block
--> $DIR/lint-unused-unsafe.rs:39:5
|
-39 | unsafe { //~ ERROR: unnecessary `unsafe` block
- | _____^ starting here...
+39 | / unsafe { //~ ERROR: unnecessary `unsafe` block
40 | | unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
43 | | }
- | |_____^ ...ending here: unnecessary `unsafe` block
+ | |_____^ unnecessary `unsafe` block
|
note: because it's nested under this `unsafe` fn
--> $DIR/lint-unused-unsafe.rs:38:1
|
-38 | unsafe fn bad7() {
- | _^ starting here...
+38 | / unsafe fn bad7() {
39 | | unsafe { //~ ERROR: unnecessary `unsafe` block
40 | | unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
43 | | }
44 | | }
- | |_^ ...ending here
+ | |_^
error: unnecessary `unsafe` block
--> $DIR/lint-unused-unsafe.rs:40:9
|
-40 | unsafe { //~ ERROR: unnecessary `unsafe` block
- | _________^ starting here...
+40 | / unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
- | |_________^ ...ending here: unnecessary `unsafe` block
+ | |_________^ unnecessary `unsafe` block
|
note: because it's nested under this `unsafe` fn
--> $DIR/lint-unused-unsafe.rs:38:1
|
-38 | unsafe fn bad7() {
- | _^ starting here...
+38 | / unsafe fn bad7() {
39 | | unsafe { //~ ERROR: unnecessary `unsafe` block
40 | | unsafe { //~ ERROR: unnecessary `unsafe` block
41 | | unsf()
42 | | }
43 | | }
44 | | }
- | |_^ ...ending here
+ | |_^
error: aborting due to 8 previous errors
error[E0072]: recursive type `ListNode` has infinite size
--> $DIR/multiline-span-E0072.rs:12:1
|
-12 | struct
- | _^ starting here...
+12 | / struct
13 | | ListNode
14 | | {
15 | | head: u8,
16 | | tail: Option<ListNode>,
17 | | }
- | |_^ ...ending here: recursive type has infinite size
+ | |_^ recursive type has infinite size
|
= help: insert indirection (e.g., a `Box`, `Rc`, or `&`) at some point to make `ListNode` representable
--> $DIR/multiline-span-simple.rs:23:9
|
23 | foo(1 as u32 +
- | _________^ starting here...
+ | _________^
24 | |
25 | | bar(x,
26 | |
27 | | y),
- | |______________^ ...ending here: the trait `std::ops::Add<()>` is not implemented for `u32`
+ | |______________^ the trait `std::ops::Add<()>` is not implemented for `u32`
|
= note: no implementation for `u32 + ()`
error[E0282]: type annotations needed
--> $DIR/issue-40294.rs:15:1
|
-15 | fn foo<'a,'b,T>(x: &'a T, y: &'b T)
- | _^ starting here...
+15 | / fn foo<'a,'b,T>(x: &'a T, y: &'b T)
16 | | where &'a T : Foo,
17 | | &'b T : Foo
18 | | {
19 | | x.foo();
20 | | y.foo();
21 | | }
- | |_^ ...ending here: cannot infer type for `&'a T`
+ | |_^ cannot infer type for `&'a T`
error: aborting due to previous error
"s390x-unknown-linux-gnu",
"sparc64-unknown-linux-gnu",
"wasm32-unknown-emscripten",
+ "x86_64-linux-android",
"x86_64-apple-darwin",
"x86_64-apple-ios",
"x86_64-pc-windows-gnu",
}
const TEST_REPOS: &'static [Test] = &[
- Test {
- name: "cargo",
- repo: "https://github.com/rust-lang/cargo",
- sha: "0e1e34be7540bdaed4918457654fbf028cf69e56",
- lock: None,
- },
Test {
name: "iron",
repo: "https://github.com/iron/iron",
];
fn main() {
- // One of the projects being tested here is Cargo, and when being tested
- // Cargo will at some point call `nmake.exe` on Windows MSVC. Unfortunately
- // `nmake` will read these two environment variables below and try to
- // intepret them. We're likely being run, however, from MSYS `make` which
- // uses the same variables.
- //
- // As a result, to prevent confusion and errors, we remove these variables
- // from our environment to prevent passing MSYS make flags to nmake, causing
- // it to blow up.
- if cfg!(target_env = "msvc") {
- env::remove_var("MAKE");
- env::remove_var("MAKEFLAGS");
- }
-
let args = env::args().collect::<Vec<_>>();
let ref cargo = args[1];
let out_dir = Path::new(&args[2]);
use syntax::diagnostics::metadata::{get_metadata_dir, ErrorMetadataMap, ErrorMetadata};
-use rustdoc::html::markdown::{Markdown, PLAYGROUND};
+use rustdoc::html::markdown::{Markdown, PLAYGROUND, RenderType};
use rustc_serialize::json;
enum OutputFormat {
// Description rendered as markdown.
match info.description {
- Some(ref desc) => write!(output, "{}", Markdown(desc))?,
+ Some(ref desc) => write!(output, "{}", Markdown(desc, RenderType::Hoedown))?,
None => write!(output, "<p>No description.</p>\n")?,
}