+Version 1.12.0 (2016-09-29)
+===========================
+
+Highlights
+----------
+
+* [`rustc` translates code to LLVM IR via its own "middle" IR (MIR)]
+ (https://github.com/rust-lang/rust/pull/34096).
+ This translation pass is far simpler than the previous AST->LLVM pass, and
+ creates opportunities to perform new optimizations directly on the MIR. It
+ was previously described [on the Rust blog]
+ (https://blog.rust-lang.org/2016/04/19/MIR.html).
+* [`rustc` presents a new, more readable error format, along with
+ machine-readable JSON error output for use by IDEs]
+ (https://github.com/rust-lang/rust/pull/35401).
+ Most common editors supporting Rust have been updated to work with it. It was
+ previously described [on the Rust blog]
+ (https://blog.rust-lang.org/2016/08/10/Shape-of-errors-to-come.html).
+
+Compiler
+--------
+
+* [`rustc` translates code to LLVM IR via its own "middle" IR (MIR)]
+ (https://github.com/rust-lang/rust/pull/34096).
+ This translation pass is far simpler than the previous AST->LLVM pass, and
+ creates opportunities to perform new optimizations directly on the MIR. It
+ was previously described [on the Rust blog]
+ (https://blog.rust-lang.org/2016/04/19/MIR.html).
+* [Print the Rust target name, not the LLVM target name, with
+ `--print target-list`]
+ (https://github.com/rust-lang/rust/pull/35489)
+* [The computation of `TypeId` is correct in some cases where it was previously
+ producing inconsistent results]
+ (https://github.com/rust-lang/rust/pull/35267)
+* [The `mips-unknown-linux-gnu` target uses hardware floating point by default]
+ (https://github.com/rust-lang/rust/pull/34910)
+* [The `rustc` arguments, `--print target-cpus`, `--print target-features`,
+ `--print relocation-models`, and `--print code-models` print the available
+ options to the `-C target-cpu`, `-C target-feature`, `-C relocation-model` and
+ `-C code-model` code generation arguments]
+ (https://github.com/rust-lang/rust/pull/34845)
+* [`rustc` supports three new MUSL targets on ARM: `arm-unknown-linux-musleabi`,
+ `arm-unknown-linux-musleabihf`, and `armv7-unknown-linux-musleabihf`]
+ (https://github.com/rust-lang/rust/pull/35060).
+ These targets produce statically-linked binaries. There are no binary release
+ builds yet though.
+
+Diagnostics
+-----------
+
+* [`rustc` presents a new, more readable error format, along with
+ machine-readable JSON error output for use by IDEs]
+ (https://github.com/rust-lang/rust/pull/35401).
+ Most common editors supporting Rust have been updated to work with it. It was
+ previously described [on the Rust blog]
+ (https://blog.rust-lang.org/2016/08/10/Shape-of-errors-to-come.html).
+* [In error descriptions, references are now described in plain English,
+ instead of as "&-ptr"]
+ (https://github.com/rust-lang/rust/pull/35611)
+* [In error type descriptions, unknown numeric types are named `{integer}` or
+ `{float}` instead of `_`]
+ (https://github.com/rust-lang/rust/pull/35080)
+* [`rustc` emits a clearer error when inner attributes follow a doc comment]
+ (https://github.com/rust-lang/rust/pull/34676)
+
+Language
+--------
+
+* [`macro_rules!` invocations can be made within `macro_rules!` invocations]
+ (https://github.com/rust-lang/rust/pull/34925)
+* [`macro_rules!` meta-variables are hygienic]
+ (https://github.com/rust-lang/rust/pull/35453)
+* [`macro_rules!` `tt` matchers can be reparsed correctly, making them much more
+ useful]
+ (https://github.com/rust-lang/rust/pull/34908)
+* [`macro_rules!` `stmt` matchers correctly consume the entire contents when
+ inside non-braces invocations]
+ (https://github.com/rust-lang/rust/pull/34886)
+* [Semicolons are properly required as statement delimeters inside
+ `macro_rules!` invocations]
+ (https://github.com/rust-lang/rust/pull/34660)
+* [`cfg_attr` works on `path` attributes]
+ (https://github.com/rust-lang/rust/pull/34546)
+
+Stabilized APIs
+---------------
+
+* [`Cell::as_ptr`]
+ (https://doc.rust-lang.org/std/cell/struct.Cell.html#method.as_ptr)
+* [`RefCell::as_ptr`]
+ (https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.as_ptr)
+* [`IpAddr::is_unspecified`]
+ (https://doc.rust-lang.org/std/net/enum.IpAddr.html#method.is_unspecified)
+* [`IpAddr::is_loopback`]
+ (https://doc.rust-lang.org/std/net/enum.IpAddr.html#method.is_loopback)
+* [`IpAddr::is_multicast`]
+ (https://doc.rust-lang.org/std/net/enum.IpAddr.html#method.is_multicast)
+* [`Ipv4Addr::is_unspecified`]
+ (https://doc.rust-lang.org/std/net/struct.Ipv4Addr.html#method.is_unspecified)
+* [`Ipv6Addr::octets`]
+ (https://doc.rust-lang.org/std/net/struct.Ipv6Addr.html#method.octets)
+* [`LinkedList::contains`]
+ (https://doc.rust-lang.org/std/collections/linked_list/struct.LinkedList.html#method.contains)
+* [`VecDeque::contains`]
+ (https://doc.rust-lang.org/std/collections/vec_deque/struct.VecDeque.html#method.contains)
+* [`ExitStatusExt::from_raw`]
+ (https://doc.rust-lang.org/std/os/unix/process/trait.ExitStatusExt.html#tymethod.from_raw).
+ Both on Unix and Windows.
+* [`Receiver::recv_timeout`]
+ (https://doc.rust-lang.org/std/sync/mpsc/struct.Receiver.html#method.recv_timeout)
+* [`RecvTimeoutError`]
+ (https://doc.rust-lang.org/std/sync/mpsc/enum.RecvTimeoutError.html)
+* [`BinaryHeap::peek_mut`]
+ (https://doc.rust-lang.org/std/collections/binary_heap/struct.BinaryHeap.html#method.peek_mut)
+* [`PeekMut`]
+ (https://doc.rust-lang.org/std/collections/binary_heap/struct.PeekMut.html)
+* [`iter::Product`]
+ (https://doc.rust-lang.org/std/iter/trait.Product.html)
+* [`iter::Sum`]
+ (https://doc.rust-lang.org/std/iter/trait.Sum.html)
+* [`OccupiedEntry::remove_entry`]
+ (https://doc.rust-lang.org/std/collections/btree_map/struct.OccupiedEntry.html#method.remove_entry)
+* [`VacantEntry::into_key`]
+ (https://doc.rust-lang.org/std/collections/btree_map/struct.VacantEntry.html#method.into_key)
+
+Libraries
+---------
+
+* [The `format!` macro and friends now allow a single argument to be formatted
+ in multiple styles]
+ (https://github.com/rust-lang/rust/pull/33642)
+* [The lifetime bounds on `[T]::binary_search_by` and
+ `[T]::binary_search_by_key` have been adjusted to be more flexible]
+ (https://github.com/rust-lang/rust/pull/34762)
+* [`Option` implements `From` for its contained type]
+ (https://github.com/rust-lang/rust/pull/34828)
+* [`Cell`, `RefCell` and `UnsafeCell` implement `From` for their contained type]
+ (https://github.com/rust-lang/rust/pull/35392)
+* [`RwLock` panics if the reader count overflows]
+ (https://github.com/rust-lang/rust/pull/35378)
+* [`vec_deque::Drain`, `hash_map::Drain` and `hash_set::Drain` are covariant]
+ (https://github.com/rust-lang/rust/pull/35354)
+* [`vec::Drain` and `binary_heap::Drain` are covariant]
+ (https://github.com/rust-lang/rust/pull/34951)
+* [`Cow<str>` implements `FromIterator` for `char`, `&str` and `String`]
+ (https://github.com/rust-lang/rust/pull/35064)
+* [Sockets on Linux are correctly closed in subprocesses via `SOCK_CLOEXEC`]
+ (https://github.com/rust-lang/rust/pull/34946)
+* [`hash_map::Entry`, `hash_map::VacantEntry` and `hash_map::OccupiedEntry`
+ implement `Debug`]
+ (https://github.com/rust-lang/rust/pull/34937)
+* [`btree_map::Entry`, `btree_map::VacantEntry` and `btree_map::OccupiedEntry`
+ implement `Debug`]
+ (https://github.com/rust-lang/rust/pull/34885)
+* [`String` implements `AddAssign`]
+ (https://github.com/rust-lang/rust/pull/34890)
+* [Variadic `extern fn` pointers implement the `Clone`, `PartialEq`, `Eq`,
+ `PartialOrd`, `Ord`, `Hash`, `fmt::Pointer`, and `fmt::Debug` traits]
+ (https://github.com/rust-lang/rust/pull/34879)
+* [`FileType` implements `Debug`]
+ (https://github.com/rust-lang/rust/pull/34757)
+* [References to `Mutex` and `RwLock` are unwind-safe]
+ (https://github.com/rust-lang/rust/pull/34756)
+* [`mpsc::sync_channel` `Receiver`s return any available message before
+ reporting a disconnect]
+ (https://github.com/rust-lang/rust/pull/34731)
+* [Unicode definitions have been updated to 9.0]
+ (https://github.com/rust-lang/rust/pull/34599)
+* [`env` iterators implement `DoubleEndedIterator`]
+ (https://github.com/rust-lang/rust/pull/33312)
+
+Cargo
+-----
+
+* [Support local mirrors of registries]
+ (https://github.com/rust-lang/cargo/pull/2857)
+* [Add support for command aliases]
+ (https://github.com/rust-lang/cargo/pull/2679)
+* [Allow `opt-level="s"` / `opt-level="z"` in profile overrides]
+ (https://github.com/rust-lang/cargo/pull/3007)
+* [Make `cargo doc --open --target` work as expected]
+ (https://github.com/rust-lang/cargo/pull/2988)
+* [Speed up noop registry updates]
+ (https://github.com/rust-lang/cargo/pull/2974)
+* [Update OpenSSL]
+ (https://github.com/rust-lang/cargo/pull/2971)
+* [Fix `--panic=abort` with plugins]
+ (https://github.com/rust-lang/cargo/pull/2954)
+* [Always pass `-C metadata` to the compiler]
+ (https://github.com/rust-lang/cargo/pull/2946)
+* [Fix depending on git repos with workspaces]
+ (https://github.com/rust-lang/cargo/pull/2938)
+* [Add a `--lib` flag to `cargo new`]
+ (https://github.com/rust-lang/cargo/pull/2921)
+* [Add `http.cainfo` for custom certs]
+ (https://github.com/rust-lang/cargo/pull/2917)
+* [Indicate the compilation profile after compiling]
+ (https://github.com/rust-lang/cargo/pull/2909)
+* [Allow enabling features for dependencies with `--features`]
+ (https://github.com/rust-lang/cargo/pull/2876)
+* [Add `--jobs` flag to `cargo package`]
+ (https://github.com/rust-lang/cargo/pull/2867)
+* [Add `--dry-run` to `cargo publish`]
+ (https://github.com/rust-lang/cargo/pull/2849)
+* [Add support for `RUSTDOCFLAGS`]
+ (https://github.com/rust-lang/cargo/pull/2794)
+
+Performance
+-----------
+
+* [`panic::catch_unwind` is more optimized]
+ (https://github.com/rust-lang/rust/pull/35444)
+* [`panic::catch_unwind` no longer accesses thread-local storage on entry]
+ (https://github.com/rust-lang/rust/pull/34866)
+
+Tooling
+-------
+
+* [Test binaries now support a `--test-threads` argument to specify the number
+ of threads used to run tests, and which acts the same as the
+ `RUST_TEST_THREADS` environment variable]
+ (https://github.com/rust-lang/rust/pull/35414)
+* [The test runner now emits a warning when tests run over 60 seconds]
+ (https://github.com/rust-lang/rust/pull/35405)
+* [rustdoc: Fix methods in search results]
+ (https://github.com/rust-lang/rust/pull/34752)
+* [`rust-lldb` warns about unsupported versions of LLDB]
+ (https://github.com/rust-lang/rust/pull/34646)
+* [Rust releases now come with source packages that can be installed by rustup
+ via `rustup component add rust-src`]
+ (https://github.com/rust-lang/rust/pull/34366).
+ The resulting source code can be used by tools and IDES, located in the
+ sysroot under `lib/rustlib/src`.
+
+Misc
+----
+
+* [The compiler can now be built against LLVM 3.9]
+ (https://github.com/rust-lang/rust/pull/35594)
+* Many minor improvements to the documentation.
+* [The Rust exception handling "personality" routine is now written in Rust]
+ (https://github.com/rust-lang/rust/pull/34832)
+
+Compatibility Notes
+-------------------
+
+* [When printing Windows `OsStr`s, unpaired surrogate codepoints are escaped
+ with the lowercase format instead of the uppercase]
+ (https://github.com/rust-lang/rust/pull/35084)
+* [When formatting strings, if "precision" is specified, the "fill",
+ "align" and "width" specifiers are no longer ignored]
+ (https://github.com/rust-lang/rust/pull/34544)
+* [The `Debug` impl for strings no longer escapes all non-ASCII characters]
+ (https://github.com/rust-lang/rust/pull/34485)
+
+
Version 1.11.0 (2016-08-18)
===========================
Performance
-----------
-* [The time complexity of comparing variables for equivalence during type
+* [The time complexity of comparing variables for equivalence during type
unification is reduced from _O_(_n_!) to _O_(_n_)][1.9tu]. This leads
to major compilation time improvement in some scenarios.
* [`ToString` is specialized for `str`, giving it the same performance
CFG_CPUTYPE=$(isainfo -n)
;;
+ Haiku)
+ CFG_OSTYPE=unknown-haiku
+ ;;
+
MINGW*)
# msys' `uname` does not print gcc configuration, but prints msys
# configuration. so we cannot believe `uname -m`:
CFG_CPUTYPE=x86_64
;;
+ BePC)
+ CFG_CPUTYPE=i686
+ ;;
+
*)
err "unknown CPU type: $CFG_CPUTYPE"
esac
valopt infodir "${CFG_PREFIX}/share/info" "install additional info"
valopt llvm-root "" "set LLVM root"
valopt python "" "set path to python"
-valopt nodejs "" "set path to nodejs"
valopt jemalloc-root "" "set directory where libjemalloc_pic.a is located"
valopt build "${DEFAULT_BUILD}" "GNUs ./configure syntax LLVM build triple"
valopt android-cross-path "" "Android NDK standalone path (deprecated)"
err "Found $python_version, but Python 2.7 is required"
fi
-# Checking for node, but not required
-probe CFG_NODEJS nodejs node
-
# If we have no git directory then we are probably a tarball distribution
# and shouldn't attempt to load submodules
if [ ! -e ${CFG_SRC_DIR}.git ]
--- /dev/null
+# i686-unknown-haiku configuration
+CROSS_PREFIX_i686-unknown-haiku=i586-pc-haiku-
+CC_i686-unknown-haiku=$(CC)
+CXX_i686-unknown-haiku=$(CXX)
+CPP_i686-unknown-haiku=$(CPP)
+AR_i686-unknown-haiku=$(AR)
+CFG_LIB_NAME_i686-unknown-haiku=lib$(1).so
+CFG_STATIC_LIB_NAME_i686-unknown-haiku=lib$(1).a
+CFG_LIB_GLOB_i686-unknown-haiku=lib$(1)-*.so
+CFG_LIB_DSYM_GLOB_i686-unknown-haiku=lib$(1)-*.dylib.dSYM
+CFG_CFLAGS_i686-unknown-haiku := -m32 $(CFLAGS)
+CFG_GCCISH_CFLAGS_i686-unknown-haiku := -Wall -Werror -g -fPIC -m32 $(CFLAGS)
+CFG_GCCISH_CXXFLAGS_i686-unknown-haiku := -fno-rtti $(CXXFLAGS)
+CFG_GCCISH_LINK_FLAGS_i686-unknown-haiku := -shared -fPIC -ldl -pthread -lrt -g -m32
+CFG_GCCISH_PRE_LIB_FLAGS_i686-unknown-haiku := -Wl,-whole-archive
+CFG_GCCISH_POST_LIB_FLAGS_i686-unknown-haiku := -Wl,-no-whole-archive
+CFG_DEF_SUFFIX_i686-unknown-haiku := .linux.def
+CFG_LLC_FLAGS_i686-unknown-haiku :=
+CFG_INSTALL_NAME_i686-unknown-haiku =
+CFG_EXE_SUFFIX_i686-unknown-haiku =
+CFG_WINDOWSY_i686-unknown-haiku :=
+CFG_UNIXY_i686-unknown-haiku := 1
+CFG_PATH_MUNGE_i686-unknown-haiku := true
+CFG_LDPATH_i686-unknown-haiku :=
+CFG_RUN_i686-unknown-haiku=$(2)
+CFG_RUN_TARG_i686-unknown-haiku=$(call CFG_RUN_i686-unknown-haiku,,$(2))
+CFG_GNU_TRIPLE_i686-unknown-haiku := i686-unknown-haiku
--- /dev/null
+# wasm32-unknown-emscripten configuration
+CC_wasm32-unknown-emscripten=emcc
+CXX_wasm32-unknown-emscripten=em++
+CPP_wasm32-unknown-emscripten=$(CPP)
+AR_wasm32-unknown-emscripten=emar
+CFG_LIB_NAME_wasm32-unknown-emscripten=lib$(1).so
+CFG_STATIC_LIB_NAME_wasm32-unknown-emscripten=lib$(1).a
+CFG_LIB_GLOB_wasm32-unknown-emscripten=lib$(1)-*.so
+CFG_LIB_DSYM_GLOB_wasm32-unknown-emscripten=lib$(1)-*.dylib.dSYM
+CFG_JEMALLOC_CFLAGS_wasm32-unknown-emscripten := -m32 $(CFLAGS)
+CFG_GCCISH_CFLAGS_wasm32-unknown-emscripten := -g -fPIC -m32 -s BINARYEN=1 $(CFLAGS)
+CFG_GCCISH_CXXFLAGS_wasm32-unknown-emscripten := -fno-rtti -s BINARYEN=1 $(CXXFLAGS)
+CFG_GCCISH_LINK_FLAGS_wasm32-unknown-emscripten := -shared -fPIC -ldl -pthread -lrt -g -m32 -s BINARYEN=1
+CFG_GCCISH_DEF_FLAG_wasm32-unknown-emscripten := -Wl,--export-dynamic,--dynamic-list=
+CFG_LLC_FLAGS_wasm32-unknown-emscripten :=
+CFG_INSTALL_NAME_wasm32-unknown-emscripten =
+CFG_EXE_SUFFIX_wasm32-unknown-emscripten =
+CFG_WINDOWSY_wasm32-unknown-emscripten :=
+CFG_UNIXY_wasm32-unknown-emscripten := 1
+CFG_LDPATH_wasm32-unknown-emscripten :=
+CFG_RUN_wasm32-unknown-emscripten=$(2)
+CFG_RUN_TARG_wasm32-unknown-emscripten=$(call CFG_RUN_wasm32-unknown-emscripten,,$(2))
+CFG_GNU_TRIPLE_wasm32-unknown-emscripten := wasm32-unknown-emscripten
+CFG_DISABLE_JEMALLOC_wasm32-unknown-emscripten := 1
--- /dev/null
+# x86_64-unknown-haiku configuration
+CROSS_PREFIX_x86_64-unknown-haiku=x86_64-unknown-haiku-
+CC_x86_64-unknown-haiku=$(CC)
+CXX_x86_64-unknown-haiku=$(CXX)
+CPP_x86_64-unknown-haiku=$(CPP)
+AR_x86_64-unknown-haiku=$(AR)
+CFG_LIB_NAME_x86_64-unknown-haiku=lib$(1).so
+CFG_STATIC_LIB_NAME_x86_64-unknown-haiku=lib$(1).a
+CFG_LIB_GLOB_x86_64-unknown-haiku=lib$(1)-*.so
+CFG_LIB_DSYM_GLOB_x86_64-unknown-haiku=lib$(1)-*.dylib.dSYM
+CFG_CFLAGS_x86_64-unknown-haiku := -m64 $(CFLAGS)
+CFG_GCCISH_CFLAGS_x86_64-unknown-haiku := -Wall -Werror -g -fPIC -m64 $(CFLAGS)
+CFG_GCCISH_CXXFLAGS_x86_64-unknown-haiku := -fno-rtti $(CXXFLAGS)
+CFG_GCCISH_LINK_FLAGS_x86_64-unknown-haiku := -shared -fPIC -ldl -pthread -lrt -g -m64
+CFG_GCCISH_PRE_LIB_FLAGS_x86_64-unknown-haiku := -Wl,-whole-archive
+CFG_GCCISH_POST_LIB_FLAGS_x86_64-unknown-haiku := -Wl,-no-whole-archive
+CFG_DEF_SUFFIX_x86_64-unknown-haiku := .linux.def
+CFG_LLC_FLAGS_x86_64-unknown-haiku :=
+CFG_INSTALL_NAME_x86_64-unknown-haiku =
+CFG_EXE_SUFFIX_x86_64-unknown-haiku =
+CFG_WINDOWSY_x86_64-unknown-haiku :=
+CFG_UNIXY_x86_64-unknown-haiku := 1
+CFG_PATH_MUNGE_x86_64-unknown-haiku := true
+CFG_LDPATH_x86_64-unknown-haiku :=
+CFG_RUN_x86_64-unknown-haiku=$(2)
+CFG_RUN_TARG_x86_64-unknown-haiku=$(call CFG_RUN_x86_64-unknown-haiku,,$(2))
+CFG_GNU_TRIPLE_x86_64-unknown-haiku := x86_64-unknown-haiku
######################################################################
# The version number
-CFG_RELEASE_NUM=1.13.0
+CFG_RELEASE_NUM=1.14.0
# An optional number to put after the label, e.g. '.2' -> '-beta.2'
# NB Make sure it starts with a dot to conform to semver pre-release
# LLVM macros
######################################################################
-LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl systemz
+LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl systemz jsbackend
LLVM_REQUIRED_COMPONENTS=ipo bitreader bitwriter linker asmparser mcjit \
interpreter instrumentation
else
ifeq ($$(CFG_WINDOWSY_$(3)),1)
LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := PATH
+else
+ifeq ($$(OSTYPE_$(3)),unknown-haiku)
+ LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := LIBRARY_PATH
else
LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := LD_LIBRARY_PATH
endif
endif
+endif
LD_LIBRARY_PATH_ENV_HOSTDIR$(1)_T_$(2)_H_$(3) := \
$$(CURDIR)/$$(HLIB$(1)_H_$(3)):$$(CFG_LLVM_INST_DIR_$(3))/lib
"build_helper 0.1.0",
"cmake 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "gcc 0.3.31 (git+https://github.com/alexcrichton/gcc-rs)",
+ "gcc 0.3.35 (git+https://github.com/alexcrichton/gcc-rs)",
"getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
"md5 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num_cpus 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.1.73 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
- "toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "gcc 0.3.31 (registry+https://github.com/rust-lang/crates.io-index)",
+ "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "gcc"
-version = "0.3.31"
-source = "git+https://github.com/alexcrichton/gcc-rs#b8e2400883f1a2749b323354dad372cdd1c838c7"
+version = "0.3.35"
+source = "git+https://github.com/alexcrichton/gcc-rs#8ff5360b6e0dc4f3c9d3f71036f1ff403c68469d"
[[package]]
name = "gcc"
-version = "0.3.31"
+version = "0.3.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "libc"
-version = "0.2.10"
+version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num_cpus"
-version = "0.2.11"
+version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
[[package]]
name = "toml"
-version = "0.1.28"
+version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
[[package]]
name = "winapi"
-version = "0.2.6"
+version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
"checksum aho-corasick 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2b3fb52b09c1710b961acb35390d514be82e4ac96a9969a8e38565a29b878dc9"
"checksum cmake 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "dfcf5bcece56ef953b8ea042509e9dcbdfe97820b7e20d86beb53df30ed94978"
"checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922"
-"checksum gcc 0.3.31 (git+https://github.com/alexcrichton/gcc-rs)" = "<none>"
-"checksum gcc 0.3.31 (registry+https://github.com/rust-lang/crates.io-index)" = "cfe877476e53690ebb0ce7325d0bf43e198d9500291b54b3c65e518de5039b07"
+"checksum gcc 0.3.35 (git+https://github.com/alexcrichton/gcc-rs)" = "<none>"
+"checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312"
"checksum getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
-"checksum libc 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "55f3730be7e803cf350d32061958171731c2395831fbd67a61083782808183e0"
+"checksum libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "23e3757828fa702a20072c37ff47938e9dd331b92fac6e223d26d4b7a55f7ee2"
"checksum md5 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a5539a8dee9b4ae308c9c406a379838b435a8f2c84cf9fedc6d5a576be9888db"
"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
-"checksum num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "51fedae97a05f7353612fe017ab705a37e6db8f4d67c5c6fe739a9e70d6eed09"
+"checksum num_cpus 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "cee7e88156f3f9e19bdd598f8d6c9db7bf4078f99f8381f43a55b09648d1a6e3"
"checksum regex 0.1.73 (registry+https://github.com/rust-lang/crates.io-index)" = "56b7ee9f764ecf412c6e2fff779bca4b22980517ae335a21aeaf4e32625a5df2"
"checksum regex-syntax 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "31040aad7470ad9d8c46302dcffba337bb4289ca5da2e3cd6e37b64109a85199"
"checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b"
"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03"
"checksum thread_local 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "55dd963dbaeadc08aa7266bf7f91c3154a7805e32bb94b820b769d2ef3b4744d"
-"checksum toml 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "fcd27a04ca509aff336ba5eb2abc58d456f52c4ff64d9724d88acb85ead560b6"
+"checksum toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)" = "0590d72182e50e879c4da3b11c6488dae18fccb1ae0c7a3eda18e16795844796"
"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
-"checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4"
+"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
} else {
("RUSTC_REAL", "RUSTC_LIBDIR")
};
- let stage = env::var("RUSTC_STAGE").unwrap();
+ let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set");
- let rustc = env::var_os(rustc).unwrap();
- let libdir = env::var_os(libdir).unwrap();
+ let rustc = env::var_os(rustc).unwrap_or_else(|| panic!("{:?} was not set", rustc));
+ let libdir = env::var_os(libdir).unwrap_or_else(|| panic!("{:?} was not set", libdir));
let mut dylib_path = bootstrap::util::dylib_path();
dylib_path.insert(0, PathBuf::from(libdir));
if let Some(target) = target {
// The stage0 compiler has a special sysroot distinct from what we
// actually downloaded, so we just always pass the `--sysroot` option.
- cmd.arg("--sysroot").arg(env::var_os("RUSTC_SYSROOT").unwrap());
+ cmd.arg("--sysroot").arg(env::var_os("RUSTC_SYSROOT").expect("RUSTC_SYSROOT was not set"));
// When we build Rust dylibs they're all intended for intermediate
// usage, so make sure we pass the -Cprefer-dynamic flag instead of
let is_panic_abort = args.windows(2).any(|a| {
&*a[0] == "--crate-name" && &*a[1] == "panic_abort"
});
- // FIXME(stage0): remove this `stage != "0"` condition
- if is_panic_abort && stage != "0" {
+ if is_panic_abort {
cmd.arg("-C").arg("panic=abort");
}
fn main() {
let args = env::args_os().skip(1).collect::<Vec<_>>();
- let rustdoc = env::var_os("RUSTDOC_REAL").unwrap();
- let libdir = env::var_os("RUSTC_LIBDIR").unwrap();
+ let rustdoc = env::var_os("RUSTDOC_REAL").expect("RUSTDOC_REAL was not set");
+ let libdir = env::var_os("RUSTC_LIBDIR").expect("RUSTC_LIBDIR was not set");
+ let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set");
let mut dylib_path = bootstrap::util::dylib_path();
dylib_path.insert(0, PathBuf::from(libdir));
let mut cmd = Command::new(rustdoc);
cmd.args(&args)
- .arg("--cfg").arg(format!("stage{}", env::var("RUSTC_STAGE").unwrap()))
+ .arg("--cfg").arg(format!("stage{}", stage))
.arg("--cfg").arg("dox")
.env(bootstrap::util::dylib_path_var(),
env::join_paths(&dylib_path).unwrap());
Err(e) => panic!("\n\nfailed to run {:?}: {}\n\n", cmd, e),
})
}
-
cmd.arg("--host").arg(compiler.host);
cmd.arg("--llvm-filecheck").arg(build.llvm_filecheck(&build.config.build));
+ if let Some(nodejs) = build.config.nodejs.as_ref() {
+ cmd.arg("--nodejs").arg(nodejs);
+ }
+
let mut flags = vec!["-Crpath".to_string()];
if build.config.rust_optimize_tests {
flags.push("-O".to_string());
if target.contains("android") {
build.run(cargo.arg("--no-run"));
krate_android(build, compiler, target, mode);
+ } else if target.contains("emscripten") {
+ build.run(cargo.arg("--no-run"));
+ krate_emscripten(build, compiler, target, mode);
} else {
cargo.args(&build.flags.args);
build.run(&mut cargo);
}
}
+fn krate_emscripten(build: &Build,
+ compiler: &Compiler,
+ target: &str,
+ mode: Mode) {
+ let mut tests = Vec::new();
+ let out_dir = build.cargo_out(compiler, mode, target);
+ find_tests(&out_dir, target, &mut tests);
+ find_tests(&out_dir.join("deps"), target, &mut tests);
+
+ for test in tests {
+ let test_file_name = test.to_string_lossy().into_owned();
+ println!("running {}", test_file_name);
+ let nodejs = build.config.nodejs.as_ref().expect("nodejs not configured");
+ let status = Command::new(nodejs)
+ .arg(&test_file_name)
+ .stderr(::std::process::Stdio::inherit())
+ .status();
+ match status {
+ Ok(status) => {
+ if !status.success() {
+ panic!("some tests failed");
+ }
+ }
+ Err(e) => panic!(format!("failed to execute command: {}", e)),
+ };
+ }
+ }
+
+
fn find_tests(dir: &Path,
target: &str,
dst: &mut Vec<PathBuf>) {
}
let filename = e.file_name().into_string().unwrap();
if (target.contains("windows") && filename.ends_with(".exe")) ||
- (!target.contains("windows") && !filename.contains(".")) {
+ (!target.contains("windows") && !filename.contains(".")) ||
+ (target.contains("emscripten") && filename.contains(".js")){
dst.push(e.path());
}
}
use build_helper::output;
use filetime::FileTime;
-use util::{exe, staticlib, libdir, mtime, is_dylib, copy};
+use util::{exe, libdir, mtime, is_dylib, copy};
use {Build, Compiler, Mode};
/// Build the standard library.
let libdir = build.sysroot_libdir(compiler, target);
let _ = fs::remove_dir_all(&libdir);
t!(fs::create_dir_all(&libdir));
- // FIXME(stage0) remove this `if` after the next snapshot
- // The stage0 compiler still passes the `-lcompiler-rt` flag to the linker but now `bootstrap`
- // never builds a `libcopmiler-rt.a`! We'll fill the hole by simply copying stage0's
- // `libcompiler-rt.a` to where the stage1's one is expected (though we could as well just use
- // an empty `.a` archive). Note that the symbols of that stage0 `libcompiler-rt.a` won't make
- // it to the final binary because now `libcore.rlib` also contains the symbols that
- // `libcompiler-rt.a` provides. Since that rlib appears first in the linker arguments, its
- // symbols are used instead of `libcompiler-rt.a`'s.
- if compiler.stage == 0 {
- let rtlib = &staticlib("compiler-rt", target);
- let src = build.rustc.parent().unwrap().parent().unwrap().join("lib").join("rustlib")
- .join(target).join("lib").join(rtlib);
- copy(&src, &libdir.join(rtlib));
- }
// Some platforms have startup objects that may be required to produce the
// libstd dynamic library, for example.
add_to_sysroot(&out_dir, &libdir);
if target.contains("musl") && !target.contains("mips") {
- copy_musl_third_party_objects(build, &libdir);
+ copy_musl_third_party_objects(build, target, &libdir);
}
}
/// Copies the crt(1,i,n).o startup objects
///
/// Only required for musl targets that statically link to libc
-fn copy_musl_third_party_objects(build: &Build, into: &Path) {
+fn copy_musl_third_party_objects(build: &Build, target: &str, into: &Path) {
for &obj in &["crt1.o", "crti.o", "crtn.o"] {
- copy(&build.config.musl_root.as_ref().unwrap().join("lib").join(obj), &into.join(obj));
+ copy(&build.musl_root(target).unwrap().join("lib").join(obj), &into.join(obj));
}
}
// Fallback musl-root for all targets
pub musl_root: Option<PathBuf>,
pub prefix: Option<String>,
+ pub docdir: Option<String>,
+ pub libdir: Option<String>,
+ pub mandir: Option<String>,
pub codegen_tests: bool,
pub nodejs: Option<PathBuf>,
}
cc: Option<String>,
cxx: Option<String>,
android_ndk: Option<String>,
+ musl_root: Option<String>,
}
impl Config {
}
target.cxx = cfg.cxx.clone().map(PathBuf::from);
target.cc = cfg.cc.clone().map(PathBuf::from);
+ target.musl_root = cfg.musl_root.clone().map(PathBuf::from);
config.target_config.insert(triple.clone(), target);
}
"CFG_PREFIX" => {
self.prefix = Some(value.to_string());
}
+ "CFG_DOCDIR" => {
+ self.docdir = Some(value.to_string());
+ }
+ "CFG_LIBDIR" => {
+ self.libdir = Some(value.to_string());
+ }
+ "CFG_MANDIR" => {
+ self.mandir = Some(value.to_string());
+ }
"CFG_LLVM_ROOT" if value.len() > 0 => {
let target = self.target_config.entry(self.build.clone())
.or_insert(Target::default());
self.rustc = Some(PathBuf::from(value).join("bin/rustc"));
self.cargo = Some(PathBuf::from(value).join("bin/cargo"));
}
- "CFG_NODEJS" if value.len() > 0 => {
- self.nodejs = Some(PathBuf::from(value));
- }
_ => {}
}
}
use util::{cp_r, libdir, is_dylib, cp_filtered, copy};
use regex::{RegexSet, quote};
-fn package_vers(build: &Build) -> &str {
+pub fn package_vers(build: &Build) -> &str {
match &build.config.channel[..] {
"stable" => &build.release,
"beta" => "beta",
build.out.join("dist")
}
-fn tmpdir(build: &Build) -> PathBuf {
+pub fn tmpdir(build: &Build) -> PathBuf {
build.out.join("tmp/dist")
}
// We have to run a few shell scripts, which choke quite a bit on both `\`
// characters and on `C:\` paths, so normalize both of them away.
-fn sanitize_sh(path: &Path) -> String {
+pub fn sanitize_sh(path: &Path) -> String {
let path = path.to_str().unwrap().replace("\\", "/");
return change_drive(&path).unwrap_or(path);
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementation of the install aspects of the compiler.
+//!
+//! This module is responsible for installing the standard library,
+//! compiler, and documentation.
+
+use std::fs;
+use std::borrow::Cow;
+use std::path::Path;
+use std::process::Command;
+
+use Build;
+use dist::{package_vers, sanitize_sh, tmpdir};
+
+/// Installs everything.
+pub fn install(build: &Build, stage: u32, host: &str) {
+ let prefix = build.config.prefix.as_ref().clone().map(|x| Path::new(x))
+ .unwrap_or(Path::new("/usr/local"));
+ let docdir = build.config.docdir.as_ref().clone().map(|x| Cow::Borrowed(Path::new(x)))
+ .unwrap_or(Cow::Owned(prefix.join("share/doc/rust")));
+ let libdir = build.config.libdir.as_ref().clone().map(|x| Cow::Borrowed(Path::new(x)))
+ .unwrap_or(Cow::Owned(prefix.join("lib")));
+ let mandir = build.config.mandir.as_ref().clone().map(|x| Cow::Borrowed(Path::new(x)))
+ .unwrap_or(Cow::Owned(prefix.join("share/man")));
+ let empty_dir = build.out.join("tmp/empty_dir");
+ t!(fs::create_dir_all(&empty_dir));
+ if build.config.docs {
+ install_sh(&build, "docs", "rust-docs", stage, host, prefix,
+ &docdir, &libdir, &mandir, &empty_dir);
+ }
+ install_sh(&build, "std", "rust-std", stage, host, prefix,
+ &docdir, &libdir, &mandir, &empty_dir);
+ install_sh(&build, "rustc", "rustc", stage, host, prefix,
+ &docdir, &libdir, &mandir, &empty_dir);
+ t!(fs::remove_dir_all(&empty_dir));
+}
+
+fn install_sh(build: &Build, package: &str, name: &str, stage: u32, host: &str,
+ prefix: &Path, docdir: &Path, libdir: &Path, mandir: &Path, empty_dir: &Path) {
+ println!("Install {} stage{} ({})", package, stage, host);
+ let package_name = format!("{}-{}-{}", name, package_vers(build), host);
+
+ let mut cmd = Command::new("sh");
+ cmd.current_dir(empty_dir)
+ .arg(sanitize_sh(&tmpdir(build).join(&package_name).join("install.sh")))
+ .arg(format!("--prefix={}", sanitize_sh(prefix)))
+ .arg(format!("--docdir={}", sanitize_sh(docdir)))
+ .arg(format!("--libdir={}", sanitize_sh(libdir)))
+ .arg(format!("--mandir={}", sanitize_sh(mandir)))
+ .arg("--disable-ldconfig");
+ build.run(&mut cmd);
+}
mod dist;
mod doc;
mod flags;
+mod install;
mod native;
mod sanity;
mod step;
// Almost all of these are simple one-liners that shell out to the
// corresponding functionality in the extra modules, where more
// documentation can be found.
- for target in step::all(self) {
+ let steps = step::all(self);
+
+ self.verbose("bootstrap build plan:");
+ for step in &steps {
+ self.verbose(&format!("{:?}", step));
+ }
+
+ for target in steps {
let doc_out = self.out.join(&target.target).join("doc");
match target.src {
Llvm { _dummy } => {
DistStd { compiler } => dist::std(self, &compiler, target.target),
DistSrc { _dummy } => dist::rust_src(self),
+ Install { stage } => install::install(self, stage, target.target),
+
DebuggerScripts { stage } => {
let compiler = Compiler::new(stage, target.target);
dist::debugger_scripts(self,
continue
}
+ // `submodule.path` is the relative path to a submodule (from the repository root)
+ // `submodule_path` is the path to a submodule from the cwd
+
+ // use `submodule.path` when e.g. executing a submodule specific command from the
+ // repository root
+ // use `submodule_path` when e.g. executing a normal git command for the submodule
+ // (set via `current_dir`)
+ let submodule_path = self.src.join(submodule.path);
+
match submodule.state {
State::MaybeDirty => {
// drop staged changes
- self.run(git().arg("-C").arg(submodule.path).args(&["reset", "--hard"]));
+ self.run(git().current_dir(&submodule_path)
+ .args(&["reset", "--hard"]));
// drops unstaged changes
- self.run(git().arg("-C").arg(submodule.path).args(&["clean", "-fdx"]));
+ self.run(git().current_dir(&submodule_path)
+ .args(&["clean", "-fdx"]));
},
State::NotInitialized => {
self.run(git_submodule().arg("init").arg(submodule.path));
State::OutOfSync => {
// drops submodule commits that weren't reported to the (outer) git repository
self.run(git_submodule().arg("update").arg(submodule.path));
- self.run(git().arg("-C").arg(submodule.path).args(&["reset", "--hard"]));
- self.run(git().arg("-C").arg(submodule.path).args(&["clean", "-fdx"]));
+ self.run(git().current_dir(&submodule_path)
+ .args(&["reset", "--hard"]));
+ self.run(git().current_dir(&submodule_path)
+ .args(&["clean", "-fdx"]));
},
}
}
.env(format!("CFLAGS_{}", target), self.cflags(target).join(" "));
}
- // If we're building for OSX, inform the compiler and the linker that
- // we want to build a compiler runnable on 10.7
- if target.contains("apple-darwin") {
- cargo.env("MACOSX_DEPLOYMENT_TARGET", "10.7");
- }
-
// Environment variables *required* needed throughout the build
//
// FIXME: should update code to not require this env var
// LLVM/jemalloc/etc are all properly compiled.
if target.contains("apple-darwin") {
base.push("-stdlib=libc++".into());
- base.push("-mmacosx-version-min=10.7".into());
}
// This is a hack, because newer binutils broke things on some vms/distros
// (i.e., linking against unknown relocs disabled by the following flag)
// than an entry here.
let mut base = Vec::new();
- if target != self.config.build && !target.contains("msvc") {
+ if target != self.config.build && !target.contains("msvc") &&
+ !target.contains("emscripten") {
base.push(format!("-Clinker={}", self.cc(target).display()));
}
return base
$(Q)$(BOOTSTRAP) --step check-cargotest
dist:
$(Q)$(BOOTSTRAP) --step dist
+install:
+ $(Q)$(BOOTSTRAP) --step install
tidy:
$(Q)$(BOOTSTRAP) --step check-tidy --stage 0
.out_dir(&dst)
.profile(if build.config.llvm_optimize {"Release"} else {"Debug"})
.define("LLVM_ENABLE_ASSERTIONS", assertions)
- .define("LLVM_TARGETS_TO_BUILD", "X86;ARM;AArch64;Mips;PowerPC;SystemZ")
+ .define("LLVM_TARGETS_TO_BUILD", "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend")
.define("LLVM_INCLUDE_EXAMPLES", "OFF")
.define("LLVM_INCLUDE_TESTS", "OFF")
.define("LLVM_INCLUDE_DOCS", "OFF")
panic!("PATH contains invalid character '\"'");
}
}
- let mut need_cmd = |cmd: &OsStr| {
- if !checked.insert(cmd.to_owned()) {
- return
- }
+ let have_cmd = |cmd: &OsStr| {
for path in env::split_paths(&path).map(|p| p.join(cmd)) {
if fs::metadata(&path).is_ok() ||
fs::metadata(path.with_extension("exe")).is_ok() {
- return
+ return Some(path);
}
}
- panic!("\n\ncouldn't find required command: {:?}\n\n", cmd);
+ return None;
+ };
+
+ let mut need_cmd = |cmd: &OsStr| {
+ if !checked.insert(cmd.to_owned()) {
+ return
+ }
+ if have_cmd(cmd).is_none() {
+ panic!("\n\ncouldn't find required command: {:?}\n\n", cmd);
+ }
};
// If we've got a git directory we're gona need git to update
need_cmd("python".as_ref());
- // If a manual nodejs was added to the config,
- // of if a nodejs install is detected through config, use it.
+ // Look for the nodejs command, needed for emscripten testing
+ if let Some(node) = have_cmd("node".as_ref()) {
+ build.config.nodejs = Some(node);
+ } else if let Some(node) = have_cmd("nodejs".as_ref()) {
+ build.config.nodejs = Some(node);
+ }
+
if let Some(ref s) = build.config.nodejs {
need_cmd(s.as_ref());
}
// We're gonna build some custom C code here and there, host triples
// also build some C++ shims for LLVM so we need a C++ compiler.
for target in build.config.target.iter() {
+ // On emscripten we don't actually need the C compiler to just
+ // build the target artifacts, only for testing. For the sake
+ // of easier bot configuration, just skip detection.
+ if target.contains("emscripten") {
+ continue;
+ }
+
need_cmd(build.cc(target).as_ref());
if let Some(ar) = build.ar(target) {
need_cmd(ar.as_ref());
need_cmd(build.cxx(host).as_ref());
}
+ // The msvc hosts don't use jemalloc, turn it off globally to
+ // avoid packaging the dummy liballoc_jemalloc on that platform.
+ for host in build.config.host.iter() {
+ if host.contains("msvc") {
+ build.config.use_jemalloc = false;
+ }
+ }
+
// Externally configured LLVM requires FileCheck to exist
let filecheck = build.llvm_filecheck(&build.config.build);
if !filecheck.starts_with(&build.out) && !filecheck.exists() && build.config.codegen_tests {
}
for target in build.config.target.iter() {
- // Either can't build or don't want to run jemalloc on these targets
- if target.contains("rumprun") ||
- target.contains("bitrig") ||
- target.contains("openbsd") ||
- target.contains("msvc") ||
- target.contains("emscripten") {
- build.config.use_jemalloc = false;
- }
-
// Can't compile for iOS unless we're on OSX
if target.contains("apple-ios") &&
!build.config.build.contains("apple-darwin") {
}
}
None => {
- panic!("when targeting MUSL either the build.musl-root \
- option or the target.$TARGET.musl-root one must \
+ panic!("when targeting MUSL either the rust.musl-root \
+ option or the target.$TARGET.musl-root option must \
be specified in config.toml")
}
}
(dist_std, DistStd { compiler: Compiler<'a> }),
(dist_src, DistSrc { _dummy: () }),
+ // install target
+ (install, Install { stage: u32 }),
+
// Misc targets
(android_copy_libs, AndroidCopyLibs { compiler: Compiler<'a> }),
}
/// into a topologically sorted list which when executed left-to-right will
/// correctly sequence the entire build.
pub fn all(build: &Build) -> Vec<Step> {
+ build.verbose("inferred build steps:");
+
let mut ret = Vec::new();
let mut all = HashSet::new();
for target in top_level(build) {
set: &mut HashSet<Step<'a>>) {
if set.insert(target.clone()) {
for dep in target.deps(build) {
+ build.verbose(&format!("{:?}\n -> {:?}", target, dep));
fill(build, &dep, ret, set);
}
ret.push(target.clone());
}
}
- return targets
-
+ targets
}
fn add_steps<'a>(build: &'a Build,
self.check_crate_std(compiler),
self.check_crate_test(compiler),
self.check_debuginfo(compiler),
- self.dist(stage),
];
// If we're testing the build triple, then we know we can
// misc
self.check_linkcheck(stage),
self.check_tidy(stage),
+
+ // can we make the distributables?
+ self.dist(stage),
]);
}
- return base
+ base
}
Source::CheckLinkcheck { stage } => {
vec![self.tool_linkchecker(stage), self.doc(stage)]
Source::CheckCodegenUnits { compiler } |
Source::CheckIncremental { compiler } |
Source::CheckUi { compiler } |
- Source::CheckRustdoc { compiler } |
Source::CheckPretty { compiler } |
Source::CheckCFail { compiler } |
Source::CheckRPassValgrind { compiler } |
self.debugger_scripts(compiler.stage),
]
}
+ Source::CheckRustdoc { compiler } |
Source::CheckRPassFull { compiler } |
Source::CheckRFailFull { compiler } |
Source::CheckCFailFull { compiler } |
base.push(target.dist_std(compiler));
}
}
- return base
+ base
+ }
+
+ Source::Install { stage } => {
+ vec![self.dist(stage)]
}
Source::AndroidCopyLibs { compiler } => {
-Subproject commit 8598065bd965d9713bfafb6c1e766d63a7b17b89
+Subproject commit f03ba5a4e8bf16dcf42dd742a4ce255c36321356
science, and are also a hot topic in industry today. Computers are gaining more
and more cores, yet many programmers aren't prepared to fully utilize them.
-Rust's memory safety features also apply to its concurrency story too. Even
+Rust's memory safety features also apply to its concurrency story. Even
concurrent Rust programs must be memory safe, having no data races. Rust's type
system is up to the task, and gives you powerful ways to reason about
concurrent code at compile time.
```
`Arc<T>` by default has immutable contents. It allows the _sharing_ of data
-between threads, but shared mutable data is unsafe and when threads are
-involved can cause data races!
+between threads, but shared mutable data is unsafe—and when threads are
+involved—can cause data races!
Usually when we wish to make something in an immutable position mutable, we use
`"C:\Program Files\Rust stable GNU 1.x\bin"`.
Rust does not do its own linking, and so you’ll need to have a linker
-installed. Doing so will depend on your specific system, consult its
-documentation for more details.
-
-If not, there are a number of places where we can get help. The easiest is
-[the #rust-beginners IRC channel on irc.mozilla.org][irc-beginners] and for
-general discussion [the #rust IRC channel on irc.mozilla.org][irc], which we
+installed. Doing so will depend on your specific system. For
+Linux-based systems, Rust will attempt to call `cc` for linking. On
+`windows-msvc` (Rust built on Windows with Microsoft Visual Studio),
+this depends on having [Microsoft Visual C++ Build Tools][msvbt]
+installed. These do not need to be in `%PATH%` as `rustc` will find
+them automatically. In general, if you have your linker in a
+non-traditional location you can call `rustc
+linker=/path/to/cc`, where `/path/to/cc` should point to your linker path.
+
+[msvbt]: http://landinghub.visualstudio.com/visual-cpp-build-tools
+
+If you are still stuck, there are a number of places where we can get
+help. The easiest is
+[the #rust-beginners IRC channel on irc.mozilla.org][irc-beginners]
+and for general discussion
+[the #rust IRC channel on irc.mozilla.org][irc], which we
can access through [Mibbit][mibbit]. Then we'll be chatting with other
Rustaceans (a silly nickname we call ourselves) who can help us out. Other great
resources include [the user’s forum][users] and [Stack Overflow][stackoverflow].
## Writing and Running a Rust Program
-Next, make a new source file and call it *main.rs*. Rust files always end
-in a *.rs* extension. If you’re using more than one word in your filename, use
-an underscore to separate them; for example, you'd use *hello_world.rs* rather
-than *helloworld.rs*.
+We need to create a source file for our Rust program. Rust files always end
+in a *.rs* extension. If you are using more than one word in your filename,
+use an underscore to separate them; for example, you would use
+*my_program.rs* rather than *myprogram.rs*.
-Now open the *main.rs* file you just created, and type the following code:
+Now, make a new file and call it *main.rs*. Open the file and type
+the following code:
```rust
fn main() {
Hello, world!
```
+The `run` command comes in handy when you need to rapidly iterate on a
+project.
+
Notice that this example didn’t re-build the project. Cargo figured out that
the file hasn’t changed, and so it just ran the binary. If you'd modified your
source code, Cargo would have rebuilt the project before running it, and you
Excellent! Open up your `src/main.rs` again. We’ll be writing all of
our code in this file.
-Before we move on, let me show you one more Cargo command: `run`. `cargo run`
-is kind of like `cargo build`, but it also then runs the produced executable.
-Try it out:
+Remember the `run` command from last chapter? Try it out again here:
```bash
$ cargo run
Hello, world!
```
-Great! The `run` command comes in handy when you need to rapidly iterate on a
-project. Our game is such a project, we need to quickly test each
-iteration before moving on to the next one.
+Great! Our game is just the kind of project `run` is good for: we need
+to quickly test each iteration before moving on to the next one.
# Processing a Guess
return v.iter().fold(0, |a, &b| a + b);
}
// Borrow two vectors and sum them.
- // This kind of borrowing does not allow mutation to the borrowed.
+ // This kind of borrowing does not allow mutation through the borrowed reference.
fn foo(v1: &Vec<i32>, v2: &Vec<i32>) -> i32 {
// do stuff with v1 and v2
let s1 = sum_vec(v1);
* `-` (`- expr`): arithmetic negation. Overloadable (`Neg`).
* `-=` (`var -= expr`): arithmetic subtraction & assignment. Overloadable (`SubAssign`).
* `->` (`fn(…) -> type`, `|…| -> type`): function and closure return type. See [Functions], [Closures].
-* `-> !` (`fn(…) -> !`, `|…| -> !`): diverging function or closure. See [Diverging Functions].
* `.` (`expr.ident`): member access. See [Structs], [Method Syntax].
* `..` (`..`, `expr..`, `..expr`, `expr..expr`): right-exclusive range literal.
* `..` (`..expr`): struct literal update syntax. See [Structs (Update syntax)].
* `/*!…*/`: inner block doc comment. See [Comments].
* `/**…*/`: outer block doc comment. See [Comments].
+<!-- Special types -->
+
+* `!`: always empty Never type. See [Diverging Functions].
+
<!-- Various things involving parens and tuples -->
* `()`: empty tuple (*a.k.a.* unit), both literal and type.
Rust will not let us use a value that has not been initialized.
-Let take a minute to talk about this stuff we've added to `println!`.
+Let us take a minute to talk about this stuff we've added to `println!`.
If you include two curly braces (`{}`, some call them moustaches...) in your
string to print, Rust will interpret this as a request to interpolate some sort
bound := path | lifetime
```
+### Never type
+An empty type
+
+```antlr
+never_type : "!" ;
+```
+
### Object types
**FIXME:** grammar?
found, `.deref()` is called and the compiler continues to search for the method
implementation in the returned type `U`.
+## The `Send` trait
+
+The `Send` trait indicates that a value of this type is safe to send from one
+thread to another.
+
+## The 'Sync' trait
+
+The 'Sync' trait indicates that a value of this type is safe to share between
+multiple threads.
+
# Memory model
A Rust program's memory consists of a static set of *items* and a *heap*.
OS=`uname -s`
case $OS in
- ("Linux"|"FreeBSD"|"DragonFly"|"Bitrig"|"OpenBSD"|"SunOS")
+ ("Linux"|"FreeBSD"|"DragonFly"|"Bitrig"|"OpenBSD"|"SunOS"|"Haiku")
BIN_SUF=
LIB_SUF=.so
;;
/// }
/// ```
-#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Arc<T: ?Sized> {
ptr: Shared<ArcInner<T>>,
/// nodes behind strong `Arc<T>` pointers, and then storing the parent pointers
/// as `Weak<T>` pointers.
-#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "arc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<ArcInner<T>>,
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn manually_share_arc() {
let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let arc_v = Arc::new(v);
/// the destructor of `T` and free the allocated memory. Since the
/// way `Box` allocates and releases memory is unspecified, the
/// only valid pointer to pass to this function is the one taken
- /// from another `Box` via the `Box::into_raw` function.
+ /// from another `Box` via the [`Box::into_raw`] function.
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
///
+ /// [`Box::into_raw`]: struct.Box.html#method.into_raw
+ ///
/// # Examples
///
/// ```
/// memory previously managed by the `Box`. In particular, the
/// caller should properly destroy `T` and release the memory. The
/// proper way to do so is to convert the raw pointer back into a
- /// `Box` with the `Box::from_raw` function.
+ /// `Box` with the [`Box::from_raw`] function.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
/// is so that there is no conflict with a method on the inner type.
///
+ /// [`Box::from_raw`]: struct.Box.html#method.from_raw
+ ///
/// # Examples
///
/// ```
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unique)]
-#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![feature(unsize)]
#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol))]
/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
-#[cfg_attr(stage0, unsafe_no_drop_flag)]
pub struct RawVec<T> {
ptr: Unique<T>,
cap: usize,
/// that you have to call them as e.g. `Rc::get_mut(&value)` instead of
/// `value.get_mut()`. This avoids conflicts with methods of the inner
/// type `T`.
-#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Rc<T: ?Sized> {
ptr: Shared<RcBox<T>>,
///
/// [rc]: struct.Rc.html
/// [downgrade]: struct.Rc.html#method.downgrade
-#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rc_weak", since = "1.4.0")]
pub struct Weak<T: ?Sized> {
ptr: Shared<RcBox<T>>,
println!("cargo:rustc-cfg=cargobuild");
println!("cargo:rerun-if-changed=build.rs");
- let target = env::var("TARGET").unwrap();
- let host = env::var("HOST").unwrap();
+ let target = env::var("TARGET").expect("TARGET was not set");
+ let host = env::var("HOST").expect("HOST was not set");
let build_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap());
let src_dir = env::current_dir().unwrap();
+ // FIXME: This is a hack to support building targets that don't
+ // support jemalloc alongside hosts that do. The jemalloc build is
+ // controlled by a feature of the std crate, and if that feature
+ // changes between targets, it invalidates the fingerprint of
+ // std's build script (this is a cargo bug); so we must ensure
+ // that the feature set used by std is the same across all
+ // targets, which means we have to build the alloc_jemalloc crate
+ // for targets like emscripten, even if we don't use it.
+ if target.contains("rumprun") ||
+ target.contains("bitrig") ||
+ target.contains("openbsd") ||
+ target.contains("msvc") ||
+ target.contains("emscripten")
+ {
+ println!("cargo:rustc-cfg=dummy_jemalloc");
+ return;
+ }
+
if let Some(jemalloc) = env::var_os("JEMALLOC_OVERRIDE") {
let jemalloc = PathBuf::from(jemalloc);
println!("cargo:rustc-link-search=native={}",
.current_dir(&build_dir)
.arg("build_lib_static")
.arg("-j")
- .arg(env::var("NUM_JOBS").unwrap()));
+ .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")));
if target.contains("windows") {
println!("cargo:rustc-link-lib=static=jemalloc");
extern crate libc;
-use libc::{c_int, c_void, size_t};
+pub use imp::*;
-// Linkage directives to pull in jemalloc and its dependencies.
-//
-// On some platforms we need to be sure to link in `pthread` which jemalloc
-// depends on, and specifically on android we need to also link to libgcc.
-// Currently jemalloc is compiled with gcc which will generate calls to
-// intrinsics that are libgcc specific (e.g. those intrinsics aren't present in
-// libcompiler-rt), so link that in to get that support.
-#[link(name = "jemalloc", kind = "static")]
-#[cfg_attr(target_os = "android", link(name = "gcc"))]
-#[cfg_attr(all(not(windows),
- not(target_os = "android"),
- not(target_env = "musl")),
- link(name = "pthread"))]
-#[cfg(not(cargobuild))]
-extern "C" {}
-
-// Note that the symbols here are prefixed by default on OSX and Windows (we
-// don't explicitly request it), and on Android and DragonFly we explicitly
-// request it as unprefixing cause segfaults (mismatches in allocators).
-extern "C" {
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows"),
- link_name = "je_mallocx")]
- fn mallocx(size: size_t, flags: c_int) -> *mut c_void;
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows"),
- link_name = "je_rallocx")]
- fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows"),
- link_name = "je_xallocx")]
- fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows"),
- link_name = "je_sdallocx")]
- fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
- #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
- target_os = "dragonfly", target_os = "windows"),
- link_name = "je_nallocx")]
- fn nallocx(size: size_t, flags: c_int) -> size_t;
-}
+// See comments in build.rs for why we sometimes build a crate that does nothing
+#[cfg(not(dummy_jemalloc))]
+mod imp {
+ use libc::{c_int, c_void, size_t};
-// The minimum alignment guaranteed by the architecture. This value is used to
-// add fast paths for low alignment values. In practice, the alignment is a
-// constant at the call site and the branch will be optimized out.
-#[cfg(all(any(target_arch = "arm",
- target_arch = "mips",
- target_arch = "powerpc")))]
-const MIN_ALIGN: usize = 8;
-#[cfg(all(any(target_arch = "x86",
- target_arch = "x86_64",
- target_arch = "aarch64",
- target_arch = "powerpc64",
- target_arch = "mips64",
- target_arch = "s390x")))]
-const MIN_ALIGN: usize = 16;
-
-// MALLOCX_ALIGN(a) macro
-fn mallocx_align(a: usize) -> c_int {
- a.trailing_zeros() as c_int
-}
+ // Linkage directives to pull in jemalloc and its dependencies.
+ //
+ // On some platforms we need to be sure to link in `pthread` which jemalloc
+ // depends on, and specifically on android we need to also link to libgcc.
+ // Currently jemalloc is compiled with gcc which will generate calls to
+ // intrinsics that are libgcc specific (e.g. those intrinsics aren't present in
+ // libcompiler-rt), so link that in to get that support.
+ #[link(name = "jemalloc", kind = "static")]
+ #[cfg_attr(target_os = "android", link(name = "gcc"))]
+ #[cfg_attr(all(not(windows),
+ not(target_os = "android"),
+ not(target_env = "musl")),
+ link(name = "pthread"))]
+ #[cfg(not(cargobuild))]
+ extern "C" {}
+
+ // Note that the symbols here are prefixed by default on OSX and Windows (we
+ // don't explicitly request it), and on Android and DragonFly we explicitly
+ // request it as unprefixing cause segfaults (mismatches in allocators).
+ extern "C" {
+ #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+ target_os = "dragonfly", target_os = "windows"),
+ link_name = "je_mallocx")]
+ fn mallocx(size: size_t, flags: c_int) -> *mut c_void;
+ #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+ target_os = "dragonfly", target_os = "windows"),
+ link_name = "je_rallocx")]
+ fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
+ #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+ target_os = "dragonfly", target_os = "windows"),
+ link_name = "je_xallocx")]
+ fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
+ #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+ target_os = "dragonfly", target_os = "windows"),
+ link_name = "je_sdallocx")]
+ fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
+ #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios",
+ target_os = "dragonfly", target_os = "windows"),
+ link_name = "je_nallocx")]
+ fn nallocx(size: size_t, flags: c_int) -> size_t;
+ }
+
+ // The minimum alignment guaranteed by the architecture. This value is used to
+ // add fast paths for low alignment values. In practice, the alignment is a
+ // constant at the call site and the branch will be optimized out.
+ #[cfg(all(any(target_arch = "arm",
+ target_arch = "mips",
+ target_arch = "powerpc")))]
+ const MIN_ALIGN: usize = 8;
+ #[cfg(all(any(target_arch = "x86",
+ target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "powerpc64",
+ target_arch = "mips64",
+ target_arch = "s390x")))]
+ const MIN_ALIGN: usize = 16;
+
+ // MALLOCX_ALIGN(a) macro
+ fn mallocx_align(a: usize) -> c_int {
+ a.trailing_zeros() as c_int
+ }
+
+ fn align_to_flags(align: usize) -> c_int {
+ if align <= MIN_ALIGN {
+ 0
+ } else {
+ mallocx_align(align)
+ }
+ }
+
+ #[no_mangle]
+ pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
+ let flags = align_to_flags(align);
+ unsafe { mallocx(size as size_t, flags) as *mut u8 }
+ }
+
+ #[no_mangle]
+ pub extern "C" fn __rust_reallocate(ptr: *mut u8,
+ _old_size: usize,
+ size: usize,
+ align: usize)
+ -> *mut u8 {
+ let flags = align_to_flags(align);
+ unsafe { rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 }
+ }
+
+ #[no_mangle]
+ pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8,
+ _old_size: usize,
+ size: usize,
+ align: usize)
+ -> usize {
+ let flags = align_to_flags(align);
+ unsafe { xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize }
+ }
-fn align_to_flags(align: usize) -> c_int {
- if align <= MIN_ALIGN {
+ #[no_mangle]
+ pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
+ let flags = align_to_flags(align);
+ unsafe { sdallocx(ptr as *mut c_void, old_size as size_t, flags) }
+ }
+
+ #[no_mangle]
+ pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize {
+ let flags = align_to_flags(align);
+ unsafe { nallocx(size as size_t, flags) as usize }
+ }
+
+ // These symbols are used by jemalloc on android but the really old android
+ // we're building on doesn't have them defined, so just make sure the symbols
+ // are available.
+ #[no_mangle]
+ #[cfg(target_os = "android")]
+ pub extern "C" fn pthread_atfork(_prefork: *mut u8,
+ _postfork_parent: *mut u8,
+ _postfork_child: *mut u8)
+ -> i32 {
0
- } else {
- mallocx_align(align)
}
}
-#[no_mangle]
-pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
- let flags = align_to_flags(align);
- unsafe { mallocx(size as size_t, flags) as *mut u8 }
-}
+#[cfg(dummy_jemalloc)]
+mod imp {
+ fn bogus() -> ! {
+ panic!("jemalloc is not implemented for this platform");
+ }
-#[no_mangle]
-pub extern "C" fn __rust_reallocate(ptr: *mut u8,
- _old_size: usize,
- size: usize,
- align: usize)
- -> *mut u8 {
- let flags = align_to_flags(align);
- unsafe { rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 }
-}
+ #[no_mangle]
+ pub extern "C" fn __rust_allocate(_size: usize, _align: usize) -> *mut u8 {
+ bogus()
+ }
-#[no_mangle]
-pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8,
- _old_size: usize,
- size: usize,
- align: usize)
- -> usize {
- let flags = align_to_flags(align);
- unsafe { xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize }
-}
+ #[no_mangle]
+ pub extern "C" fn __rust_reallocate(_ptr: *mut u8,
+ _old_size: usize,
+ _size: usize,
+ _align: usize)
+ -> *mut u8 {
+ bogus()
+ }
-#[no_mangle]
-pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
- let flags = align_to_flags(align);
- unsafe { sdallocx(ptr as *mut c_void, old_size as size_t, flags) }
-}
+ #[no_mangle]
+ pub extern "C" fn __rust_reallocate_inplace(_ptr: *mut u8,
+ _old_size: usize,
+ _size: usize,
+ _align: usize)
+ -> usize {
+ bogus()
+ }
-#[no_mangle]
-pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize {
- let flags = align_to_flags(align);
- unsafe { nallocx(size as size_t, flags) as usize }
-}
+ #[no_mangle]
+ pub extern "C" fn __rust_deallocate(_ptr: *mut u8, _old_size: usize, _align: usize) {
+ bogus()
+ }
-// These symbols are used by jemalloc on android but the really old android
-// we're building on doesn't have them defined, so just make sure the symbols
-// are available.
-#[no_mangle]
-#[cfg(target_os = "android")]
-pub extern "C" fn pthread_atfork(_prefork: *mut u8,
- _postfork_parent: *mut u8,
- _postfork_child: *mut u8)
- -> i32 {
- 0
+ #[no_mangle]
+ pub extern "C" fn __rust_usable_size(_size: usize, _align: usize) -> usize {
+ bogus()
+ }
}
target_arch = "mips",
target_arch = "powerpc",
target_arch = "powerpc64",
- target_arch = "asmjs")))]
+ target_arch = "asmjs",
+ target_arch = "wasm32")))]
const MIN_ALIGN: usize = 8;
#[cfg(all(any(target_arch = "x86_64",
target_arch = "aarch64",
///
/// ```
/// #![feature(binary_heap_extras)]
+ /// #![allow(deprecated)]
///
/// use std::collections::BinaryHeap;
/// let mut heap = BinaryHeap::new();
#[unstable(feature = "binary_heap_extras",
reason = "needs to be audited",
issue = "28147")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")]
pub fn push_pop(&mut self, mut item: T) -> T {
match self.data.get_mut(0) {
None => return item,
///
/// ```
/// #![feature(binary_heap_extras)]
+ /// #![allow(deprecated)]
///
/// use std::collections::BinaryHeap;
/// let mut heap = BinaryHeap::new();
#[unstable(feature = "binary_heap_extras",
reason = "needs to be audited",
issue = "28147")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")]
pub fn replace(&mut self, mut item: T) -> Option<T> {
if !self.is_empty() {
swap(&mut item, &mut self.data[0]);
iter: vec::Drain<'a, T>,
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Iterator for Drain<'a, T> {
type Item = T;
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
#[unstable(feature = "fused", issue = "35602")]
use core::cmp::Ordering;
use core::hash::{Hash, Hasher};
-use core::ops::Deref;
+use core::ops::{Add, AddAssign, Deref};
use fmt;
self
}
}
+
+#[stable(feature = "cow_add", since = "1.13.0")]
+impl<'a> Add<&'a str> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ fn add(self, rhs: &'a str) -> Self {
+ if self == "" {
+ Cow::Borrowed(rhs)
+ } else if rhs == "" {
+ self
+ } else {
+ Cow::Owned(self.into_owned() + rhs)
+ }
+ }
+}
+
+#[stable(feature = "cow_add", since = "1.13.0")]
+impl<'a> Add<Cow<'a, str>> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ fn add(self, rhs: Cow<'a, str>) -> Self {
+ if self == "" {
+ rhs
+ } else if rhs == "" {
+ self
+ } else {
+ Cow::Owned(self.into_owned() + rhs.borrow())
+ }
+ }
+}
+
+#[stable(feature = "cow_add", since = "1.13.0")]
+impl<'a> AddAssign<&'a str> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: &'a str) {
+ if rhs == "" { return; }
+ self.to_mut().push_str(rhs);
+ }
+}
+
+#[stable(feature = "cow_add", since = "1.13.0")]
+impl<'a> AddAssign<Cow<'a, str>> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: Cow<'a, str>) {
+ if rhs == "" { return; }
+ self.to_mut().push_str(rhs.borrow());
+ }
+}
length: usize,
}
+#[stable(feature = "btree_drop", since = "1.7.0")]
impl<K, V> Drop for BTreeMap<K, V> {
#[unsafe_destructor_blind_to_params]
fn drop(&mut self) {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> {
fn clone(&self) -> BTreeMap<K, V> {
fn clone_subtree<K: Clone, V: Clone>(node: node::NodeRef<marker::Immut,
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> IntoIterator for &'a BTreeMap<K, V> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Iter<'a, K, V> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
if self.length == 0 {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> ExactSizeIterator for Iter<'a, K, V> {
fn len(&self) -> usize {
self.length
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Iter<'a, K, V> {
Iter {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> IntoIterator for &'a mut BTreeMap<K, V> {
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> DoubleEndedIterator for IterMut<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
if self.length == 0 {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> {
fn len(&self) -> usize {
self.length
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> IntoIterator for BTreeMap<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
}
}
+#[stable(feature = "btree_drop", since = "1.7.0")]
impl<K, V> Drop for IntoIter<K, V> {
fn drop(&mut self) {
for _ in &mut *self {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
fn next_back(&mut self) -> Option<(K, V)> {
if self.length == 0 {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K, V> ExactSizeIterator for IntoIter<K, V> {
fn len(&self) -> usize {
self.length
#[unstable(feature = "fused", issue = "35602")]
impl<K, V> FusedIterator for IntoIter<K, V> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Keys<'a, K, V> {
type Item = &'a K;
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
fn next_back(&mut self) -> Option<&'a K> {
self.inner.next_back().map(|(k, _)| k)
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {
fn len(&self) -> usize {
self.inner.len()
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Keys<'a, K, V> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Keys<'a, K, V> {
fn clone(&self) -> Keys<'a, K, V> {
Keys { inner: self.inner.clone() }
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
fn next_back(&mut self) -> Option<&'a V> {
self.inner.next_back().map(|(_, v)| v)
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {
fn len(&self) -> usize {
self.inner.len()
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for Values<'a, K, V> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Clone for Values<'a, K, V> {
fn clone(&self) -> Values<'a, K, V> {
Values { inner: self.inner.clone() }
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> BTreeMap<K, V> {
let mut map = BTreeMap::new();
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> {
#[inline]
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
}
}
+#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, K: Ord + Copy, V: Copy> Extend<(&'a K, &'a V)> for BTreeMap<K, V> {
fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Hash, V: Hash> Hash for BTreeMap<K, V> {
fn hash<H: Hasher>(&self, state: &mut H) {
for elt in self {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V> Default for BTreeMap<K, V> {
/// Creates an empty `BTreeMap<K, V>`.
fn default() -> BTreeMap<K, V> {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: PartialEq, V: PartialEq> PartialEq for BTreeMap<K, V> {
fn eq(&self, other: &BTreeMap<K, V>) -> bool {
self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a == b)
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Eq, V: Eq> Eq for BTreeMap<K, V> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: PartialOrd, V: PartialOrd> PartialOrd for BTreeMap<K, V> {
#[inline]
fn partial_cmp(&self, other: &BTreeMap<K, V>) -> Option<Ordering> {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Ord, V: Ord> Ord for BTreeMap<K, V> {
#[inline]
fn cmp(&self, other: &BTreeMap<K, V>) -> Ordering {
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<K: Debug, V: Debug> Debug for BTreeMap<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap<K, V>
where K: Borrow<Q>,
Q: Ord
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Iter<'a, T> {
Iter { iter: self.iter.clone() }
}
}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Difference<'a, T> {
fn clone(&self) -> Difference<'a, T> {
Difference {
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T: Ord> FusedIterator for Difference<'a, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for SymmetricDifference<'a, T> {
fn clone(&self) -> SymmetricDifference<'a, T> {
SymmetricDifference {
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Intersection<'a, T> {
fn clone(&self) -> Intersection<'a, T> {
Intersection {
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Union<'a, T> {
fn clone(&self) -> Union<'a, T> {
Union {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
impl<E: CLike + fmt::Debug> fmt::Debug for EnumSet<E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_set().entries(self).finish()
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, E> IntoIterator for &'a EnumSet<E> where E: CLike
{
type Item = E;
}
}
-#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, E: 'a + CLike + Copy> Extend<&'a E> for EnumSet<E> {
fn extend<I: IntoIterator<Item = &'a E>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
#![feature(step_by)]
#![feature(unicode)]
#![feature(unique)]
-#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![cfg_attr(test, feature(rand, test))]
#![no_std]
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn test_send() {
let n = list_from(&[1, 2, 3]);
thread::spawn(move || {
}
/// Use the syntax described in `std::fmt` to create a value of type `String`.
-/// See `std::fmt` for more information.
+/// See [`std::fmt`][fmt] for more information.
+///
+/// [fmt]: ../std/fmt/index.html
///
/// # Examples
///
core_slice::SliceExt::len(self)
}
- /// Returns true if the slice has a length of 0
+ /// Returns true if the slice has a length of 0.
///
/// # Example
///
core_slice::SliceExt::get_unchecked_mut(self, index)
}
- /// Returns an raw pointer to the slice's buffer
+ /// Returns an raw pointer to the slice's buffer.
///
/// The caller must ensure that the slice outlives the pointer this
/// function returns, or else it will end up pointing to garbage.
///
/// # Examples
///
- /// ```rust
+ /// ```
/// let mut v = ["a", "b", "c", "d"];
/// v.swap(1, 3);
/// assert!(v == ["a", "d", "c", "b"]);
///
/// # Example
///
- /// ```rust
+ /// ```
/// let mut v = [1, 2, 3];
/// v.reverse();
/// assert!(v == [3, 2, 1]);
}
/// Returns an iterator over `size` elements of the slice at a
- /// time. The chunks are slices and do not overlap. If `size` does not divide the
- /// length of the slice, then the last chunk will not have length
- /// `size`.
+ /// time. The chunks are slices and do not overlap. If `size` does
+ /// not divide the length of the slice, then the last chunk will
+ /// not have length `size`.
///
/// # Panics
///
///
/// # Examples
///
- /// ```rust
+ /// ```
/// let mut v = [1, 2, 3, 4, 5, 6];
///
/// // scoped to restrict the lifetime of the borrows
}
/// Returns an iterator over subslices separated by elements that match
- /// `pred`, limited to returning at most `n` items. The matched element is
+ /// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
}
/// Returns an iterator over subslices separated by elements that match
- /// `pred`, limited to returning at most `n` items. The matched element is
+ /// `pred`, limited to returning at most `n` items. The matched element is
/// not contained in the subslices.
///
/// The last element returned, if any, will contain the remainder of the
/// Returns an iterator over subslices separated by elements that match
/// `pred` limited to returning at most `n` items. This starts at the end of
- /// the slice and works backwards. The matched element is not contained in
+ /// the slice and works backwards. The matched element is not contained in
/// the subslices.
///
/// The last element returned, if any, will contain the remainder of the
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
- /// found; the fourth could match any position in `[1,4]`.
+ /// found; the fourth could match any position in `[1, 4]`.
///
- /// ```rust
+ /// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// assert_eq!(s.binary_search(&13), Ok(9));
///
/// Looks up a series of four elements. The first is found, with a
/// uniquely determined position; the second and third are not
- /// found; the fourth could match any position in `[1,4]`.
+ /// found; the fourth could match any position in `[1, 4]`.
///
- /// ```rust
+ /// ```
/// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55];
///
/// let seek = 13;
/// Binary search a sorted slice with a key extraction function.
///
/// Assumes that the slice is sorted by the key, for instance with
- /// `sort_by_key` using the same key extraction function.
+ /// [`sort_by_key`] using the same key extraction function.
///
/// If a matching value is found then returns `Ok`, containing the
/// index for the matched element; if no match is found then `Err`
/// is returned, containing the index where a matching element could
/// be inserted while maintaining sorted order.
///
+ /// [`sort_by_key`]: #method.sort_by_key
+ ///
/// # Examples
///
/// Looks up a series of four elements in a slice of pairs sorted by
/// their second elements. The first is found, with a uniquely
/// determined position; the second and third are not found; the
- /// fourth could match any position in `[1,4]`.
+ /// fourth could match any position in `[1, 4]`.
///
- /// ```rust
+ /// ```
/// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1),
/// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
/// (1, 21), (2, 34), (4, 55)];
///
/// # Examples
///
- /// ```rust
+ /// ```
/// let mut v = [-5, 4, 1, -3, 2];
///
/// v.sort();
///
/// # Examples
///
- /// ```rust
+ /// ```
/// let mut v = [-5i32, 4, 1, -3, 2];
///
/// v.sort_by_key(|k| k.abs());
///
/// # Examples
///
- /// ```rust
+ /// ```
/// let mut v = [5, 4, 1, 3, 2];
/// v.sort_by(|a, b| a.cmp(b));
/// assert!(v == [1, 2, 3, 4, 5]);
///
/// # Example
///
- /// ```rust
+ /// ```
/// let mut dst = [0, 0, 0];
/// let src = [1, 2, 3];
///
///
/// # Example
///
- /// ```rust
+ /// ```
/// let mut dst = [0, 0, 0];
/// let src = [1, 2, 3];
///
encoder: Utf16Encoder<Chars<'a>>,
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "encode_utf16", since = "1.8.0")]
impl<'a> Iterator for EncodeUtf16<'a> {
type Item = u16;
///
/// Basic usage:
///
- /// ```rust
+ /// ```
/// let bananas = "bananas";
///
/// assert!(bananas.ends_with("anas"));
///
/// It does _not_ give you:
///
- /// ```rust,ignore
+ /// ```,ignore
/// assert_eq!(d, &["a", "b", "c"]);
/// ```
///
}
/// An iterator over substrings of the given string slice, separated by a
- /// pattern, restricted to returning at most `count` items.
+ /// pattern, restricted to returning at most `n` items.
///
- /// The last element returned, if any, will contain the remainder of the
- /// string slice.
+ /// If `n` substrings are returned, the last substring (the `n`th substring)
+ /// will contain the remainder of the string.
///
/// The pattern can be a `&str`, [`char`], or a closure that determines the
/// split.
/// assert_eq!(v, ["abc", "defXghi"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn splitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> SplitN<'a, P> {
- core_str::StrExt::splitn(self, count, pat)
+ pub fn splitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> SplitN<'a, P> {
+ core_str::StrExt::splitn(self, n, pat)
}
/// An iterator over substrings of this string slice, separated by a
/// pattern, starting from the end of the string, restricted to returning
- /// at most `count` items.
+ /// at most `n` items.
///
- /// The last element returned, if any, will contain the remainder of the
- /// string slice.
+ /// If `n` substrings are returned, the last substring (the `n`th substring)
+ /// will contain the remainder of the string.
///
/// The pattern can be a `&str`, [`char`], or a closure that
/// determines the split.
/// assert_eq!(v, ["ghi", "abc1def"]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn rsplitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> RSplitN<'a, P>
+ pub fn rsplitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> RSplitN<'a, P>
where P::Searcher: ReverseSearcher<'a>
{
- core_str::StrExt::rsplitn(self, count, pat)
+ core_str::StrExt::rsplitn(self, n, pat)
}
/// An iterator over the matches of a pattern within the given string
//!
//! There are multiple ways to create a new `String` from a string literal:
//!
-//! ```rust
+//! ```
//! let s = "Hello".to_string();
//!
//! let s = String::from("world");
//! You can create a new `String` from an existing one by concatenating with
//! `+`:
//!
-//! ```rust
+//! ```
//! let s = "Hello".to_string();
//!
//! let message = s + " world!";
//! If you have a vector of valid UTF-8 bytes, you can make a `String` out of
//! it. You can do the reverse too.
//!
-//! ```rust
+//! ```
//! let sparkle_heart = vec![240, 159, 146, 150];
//!
//! // We know these bytes are valid, so we'll use `unwrap()`.
pub fn push(&mut self, ch: char) {
match ch.len_utf8() {
1 => self.vec.push(ch as u8),
- _ => self.vec.extend_from_slice(ch.encode_utf8().as_slice()),
+ _ => self.vec.extend_from_slice(ch.encode_utf8(&mut [0;4]).as_bytes()),
}
}
let len = self.len();
assert!(idx <= len);
assert!(self.is_char_boundary(idx));
- let bits = ch.encode_utf8();
+ let mut bits = [0; 4];
+ let bits = ch.encode_utf8(&mut bits).as_bytes();
unsafe {
- self.insert_bytes(idx, bits.as_slice());
+ self.insert_bytes(idx, bits);
}
}
/// Vec does not currently guarantee the order in which elements are dropped
/// (the order has changed in the past, and may change again).
///
-#[cfg_attr(stage0, unsafe_no_drop_flag)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Vec<T> {
buf: RawVec<T>,
end: *const T,
}
-#[stable(feature = "vec_intoiter_debug", since = "")]
+#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IntoIter")
///
/// # Examples
///
- /// ```rust
+ /// ```
/// # #![feature(vec_into_iter_as_slice)]
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
///
/// # Examples
///
- /// ```rust
+ /// ```
/// # #![feature(vec_into_iter_as_slice)]
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Send> Send for Drain<'a, T> {}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T> Iterator for Drain<'a, T> {
type Item = T;
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T> DoubleEndedIterator for Drain<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T> Drop for Drain<'a, T> {
fn drop(&mut self) {
// exhaust self first
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T> ExactSizeIterator for Drain<'a, T> {}
#[unstable(feature = "fused", issue = "35602")]
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Send> Send for Drain<'a, T> {}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Drop for Drain<'a, T> {
fn drop(&mut self) {
for _ in self.by_ref() {}
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Iterator for Drain<'a, T> {
type Item = T;
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
#[unstable(feature = "fused", issue = "35602")]
}
#[test]
+#[allow(deprecated)]
fn test_push_pop() {
let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]);
assert_eq!(heap.len(), 5);
}
#[test]
+#[allow(deprecated)]
fn test_replace() {
let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]);
assert_eq!(heap.len(), 5);
}
#[test]
+#[allow(deprecated)]
fn test_empty_replace() {
let mut heap = BinaryHeap::new();
assert!(heap.replace(5).is_none());
--- /dev/null
+// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::borrow::Cow;
+
+// check that Cow<'a, str> implements addition
+#[test]
+fn check_cow_add() {
+ borrowed1 = Cow::Borrowed("Hello, ");
+ borrowed2 = Cow::Borrowed("World!");
+ borrow_empty = Cow::Borrowed("");
+
+ owned1 = Cow::Owned("Hi, ".into());
+ owned2 = Cow::Owned("Rustaceans!".into());
+ owned_empty = Cow::Owned("".into());
+
+ assert_eq!("Hello, World!", borrowed1 + borrowed2);
+ assert_eq!("Hello, Rustaceans!", borrowed1 + owned2);
+
+ assert_eq!("Hello, World!", owned1 + borrowed2);
+ assert_eq!("Hello, Rustaceans!", owned1 + owned2);
+
+ if let Cow::Owned(_) = borrowed1 + borrow_empty {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = borrow_empty + borrowed1 {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = borrowed1 + owned_empty {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = owned_empty + borrowed1 {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+}
+
+fn check_cow_add_assign() {
+ borrowed1 = Cow::Borrowed("Hello, ");
+ borrowed2 = Cow::Borrowed("World!");
+ borrow_empty = Cow::Borrowed("");
+
+ owned1 = Cow::Owned("Hi, ".into());
+ owned2 = Cow::Owned("Rustaceans!".into());
+ owned_empty = Cow::Owned("".into());
+
+ let borrowed1clone = borrowed1.clone();
+ borrowed1clone += borrow_empty;
+ assert_eq!((&borrowed1clone).as_ptr(), (&borrowed1).as_ptr());
+
+ borrowed1clone += owned_empty;
+ assert_eq!((&borrowed1clone).as_ptr(), (&borrowed1).as_ptr());
+
+ owned1 += borrowed2;
+ borrowed1 += owned2;
+
+ assert_eq!("Hello, World!", owned1);
+ assert_eq!("Hello, Rustaceans!", borrowed1);
+}
extern crate test;
extern crate rustc_unicode;
-use std::hash::{Hash, Hasher, SipHasher};
+use std::hash::{Hash, Hasher};
+use std::collections::hash_map::DefaultHasher;
#[cfg(test)] #[macro_use] mod bench;
mod vec;
fn hash<T: Hash>(t: &T) -> u64 {
- let mut s = SipHasher::new();
+ let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
}
#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
fn test_box_slice_clone_panics() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
#[test]
fn test_chars_decoding() {
+ let mut bytes = [0; 4];
for c in (0..0x110000).filter_map(::std::char::from_u32) {
- let bytes = c.encode_utf8();
- let s = ::std::str::from_utf8(bytes.as_slice()).unwrap();
+ let s = c.encode_utf8(&mut bytes);
if Some(c) != s.chars().next() {
panic!("character {:x}={} does not decode correctly", c as u32, c);
}
#[test]
fn test_chars_rev_decoding() {
+ let mut bytes = [0; 4];
for c in (0..0x110000).filter_map(::std::char::from_u32) {
- let bytes = c.encode_utf8();
- let s = ::std::str::from_utf8(bytes.as_slice()).unwrap();
+ let s = c.encode_utf8(&mut bytes);
if Some(c) != s.chars().rev().next() {
panic!("character {:x}={} does not decode correctly", c as u32, c);
}
}
fn main() {
- let target = env::var("TARGET").unwrap();
+ let target = env::var("TARGET").expect("TARGET was not set");
+
+ // Emscripten's runtime includes all the builtins
+ if target.contains("emscripten") {
+ return;
+ }
+
let cfg = &mut gcc::Config::new();
if target.contains("msvc") {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(not(stage0), feature(compiler_builtins))]
+#![feature(compiler_builtins)]
#![no_std]
-#![cfg_attr(not(stage0), compiler_builtins)]
+#![compiler_builtins]
#![unstable(feature = "compiler_builtins_lib",
reason = "internal implementation detail of rustc right now",
issue = "0")]
macro_rules! array_impls {
($($N:expr)+) => {
$(
+ #[stable(feature = "rust1", since = "1.0.0")]
impl<T> AsRef<[T]> for [T; $N] {
#[inline]
fn as_ref(&self) -> &[T] {
}
}
+ #[stable(feature = "rust1", since = "1.0.0")]
impl<T> AsMut<[T]> for [T; $N] {
#[inline]
fn as_mut(&mut self) -> &mut [T] {
use cmp::Ordering;
use fmt::{self, Debug, Display};
-use marker::{PhantomData, Unsize};
+use marker::Unsize;
use ops::{Deref, DerefMut, CoerceUnsized};
/// A mutable memory location that admits only `Copy` data.
}
/// An error returned by [`RefCell::try_borrow`](struct.RefCell.html#method.try_borrow).
-#[unstable(feature = "try_borrow", issue = "35070")]
-pub struct BorrowError<'a, T: 'a + ?Sized> {
- marker: PhantomData<&'a RefCell<T>>,
+#[stable(feature = "try_borrow", since = "1.13.0")]
+pub struct BorrowError {
+ _private: (),
}
-#[unstable(feature = "try_borrow", issue = "35070")]
-impl<'a, T: ?Sized> Debug for BorrowError<'a, T> {
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Debug for BorrowError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BorrowError").finish()
}
}
-#[unstable(feature = "try_borrow", issue = "35070")]
-impl<'a, T: ?Sized> Display for BorrowError<'a, T> {
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Display for BorrowError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt("already mutably borrowed", f)
}
}
/// An error returned by [`RefCell::try_borrow_mut`](struct.RefCell.html#method.try_borrow_mut).
-#[unstable(feature = "try_borrow", issue = "35070")]
-pub struct BorrowMutError<'a, T: 'a + ?Sized> {
- marker: PhantomData<&'a RefCell<T>>,
+#[stable(feature = "try_borrow", since = "1.13.0")]
+pub struct BorrowMutError {
+ _private: (),
}
-#[unstable(feature = "try_borrow", issue = "35070")]
-impl<'a, T: ?Sized> Debug for BorrowMutError<'a, T> {
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Debug for BorrowMutError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BorrowMutError").finish()
}
}
-#[unstable(feature = "try_borrow", issue = "35070")]
-impl<'a, T: ?Sized> Display for BorrowMutError<'a, T> {
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Display for BorrowMutError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt("already borrowed", f)
}
/// # Examples
///
/// ```
- /// #![feature(try_borrow)]
- ///
/// use std::cell::RefCell;
///
/// let c = RefCell::new(5);
/// assert!(c.try_borrow().is_ok());
/// }
/// ```
- #[unstable(feature = "try_borrow", issue = "35070")]
+ #[stable(feature = "try_borrow", since = "1.13.0")]
#[inline]
- pub fn try_borrow(&self) -> Result<Ref<T>, BorrowError<T>> {
+ pub fn try_borrow(&self) -> Result<Ref<T>, BorrowError> {
match BorrowRef::new(&self.borrow) {
Some(b) => Ok(Ref {
value: unsafe { &*self.value.get() },
borrow: b,
}),
- None => Err(BorrowError { marker: PhantomData }),
+ None => Err(BorrowError { _private: () }),
}
}
/// # Examples
///
/// ```
- /// #![feature(try_borrow)]
- ///
/// use std::cell::RefCell;
///
/// let c = RefCell::new(5);
///
/// assert!(c.try_borrow_mut().is_ok());
/// ```
- #[unstable(feature = "try_borrow", issue = "35070")]
+ #[stable(feature = "try_borrow", since = "1.13.0")]
#[inline]
- pub fn try_borrow_mut(&self) -> Result<RefMut<T>, BorrowMutError<T>> {
+ pub fn try_borrow_mut(&self) -> Result<RefMut<T>, BorrowMutError> {
match BorrowRefMut::new(&self.borrow) {
Some(b) => Ok(RefMut {
value: unsafe { &mut *self.value.get() },
borrow: b,
}),
- None => Err(BorrowMutError { marker: PhantomData }),
+ None => Err(BorrowMutError { _private: () }),
}
}
use char_private::is_printable;
use convert::TryFrom;
use fmt;
+use slice;
use iter::FusedIterator;
use mem::transmute;
#[stable(feature = "core", since = "1.6.0")]
fn len_utf16(self) -> usize;
#[unstable(feature = "unicode", issue = "27784")]
- fn encode_utf8(self) -> EncodeUtf8;
+ fn encode_utf8(self, dst: &mut [u8]) -> &mut str;
#[unstable(feature = "unicode", issue = "27784")]
- fn encode_utf16(self) -> EncodeUtf16;
+ fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16];
}
#[stable(feature = "core", since = "1.6.0")]
}
#[inline]
- fn encode_utf8(self) -> EncodeUtf8 {
+ fn encode_utf8(self, dst: &mut [u8]) -> &mut str {
let code = self as u32;
- let mut buf = [0; 4];
- let pos = if code < MAX_ONE_B {
- buf[3] = code as u8;
- 3
- } else if code < MAX_TWO_B {
- buf[2] = (code >> 6 & 0x1F) as u8 | TAG_TWO_B;
- buf[3] = (code & 0x3F) as u8 | TAG_CONT;
- 2
- } else if code < MAX_THREE_B {
- buf[1] = (code >> 12 & 0x0F) as u8 | TAG_THREE_B;
- buf[2] = (code >> 6 & 0x3F) as u8 | TAG_CONT;
- buf[3] = (code & 0x3F) as u8 | TAG_CONT;
- 1
- } else {
- buf[0] = (code >> 18 & 0x07) as u8 | TAG_FOUR_B;
- buf[1] = (code >> 12 & 0x3F) as u8 | TAG_CONT;
- buf[2] = (code >> 6 & 0x3F) as u8 | TAG_CONT;
- buf[3] = (code & 0x3F) as u8 | TAG_CONT;
- 0
- };
- EncodeUtf8 { buf: buf, pos: pos }
+ unsafe {
+ let len =
+ if code < MAX_ONE_B && !dst.is_empty() {
+ *dst.get_unchecked_mut(0) = code as u8;
+ 1
+ } else if code < MAX_TWO_B && dst.len() >= 2 {
+ *dst.get_unchecked_mut(0) = (code >> 6 & 0x1F) as u8 | TAG_TWO_B;
+ *dst.get_unchecked_mut(1) = (code & 0x3F) as u8 | TAG_CONT;
+ 2
+ } else if code < MAX_THREE_B && dst.len() >= 3 {
+ *dst.get_unchecked_mut(0) = (code >> 12 & 0x0F) as u8 | TAG_THREE_B;
+ *dst.get_unchecked_mut(1) = (code >> 6 & 0x3F) as u8 | TAG_CONT;
+ *dst.get_unchecked_mut(2) = (code & 0x3F) as u8 | TAG_CONT;
+ 3
+ } else if dst.len() >= 4 {
+ *dst.get_unchecked_mut(0) = (code >> 18 & 0x07) as u8 | TAG_FOUR_B;
+ *dst.get_unchecked_mut(1) = (code >> 12 & 0x3F) as u8 | TAG_CONT;
+ *dst.get_unchecked_mut(2) = (code >> 6 & 0x3F) as u8 | TAG_CONT;
+ *dst.get_unchecked_mut(3) = (code & 0x3F) as u8 | TAG_CONT;
+ 4
+ } else {
+ panic!("encode_utf8: need {} bytes to encode U+{:X}, but the buffer has {}",
+ from_u32_unchecked(code).len_utf8(),
+ code,
+ dst.len())
+ };
+ transmute(slice::from_raw_parts_mut(dst.as_mut_ptr(), len))
+ }
}
#[inline]
- fn encode_utf16(self) -> EncodeUtf16 {
- let mut buf = [0; 2];
+ fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] {
let mut code = self as u32;
- let pos = if (code & 0xFFFF) == code {
- // The BMP falls through (assuming non-surrogate, as it should)
- buf[1] = code as u16;
- 1
- } else {
- // Supplementary planes break into surrogates.
- code -= 0x1_0000;
- buf[0] = 0xD800 | ((code >> 10) as u16);
- buf[1] = 0xDC00 | ((code as u16) & 0x3FF);
- 0
- };
- EncodeUtf16 { buf: buf, pos: pos }
+ unsafe {
+ if (code & 0xFFFF) == code && !dst.is_empty() {
+ // The BMP falls through (assuming non-surrogate, as it should)
+ *dst.get_unchecked_mut(0) = code as u16;
+ slice::from_raw_parts_mut(dst.as_mut_ptr(), 1)
+ } else if dst.len() >= 2 {
+ // Supplementary planes break into surrogates.
+ code -= 0x1_0000;
+ *dst.get_unchecked_mut(0) = 0xD800 | ((code >> 10) as u16);
+ *dst.get_unchecked_mut(1) = 0xDC00 | ((code as u16) & 0x3FF);
+ slice::from_raw_parts_mut(dst.as_mut_ptr(), 2)
+ } else {
+ panic!("encode_utf16: need {} units to encode U+{:X}, but the buffer has {}",
+ from_u32_unchecked(code).len_utf16(),
+ code,
+ dst.len())
+ }
+ }
}
}
#[unstable(feature = "fused", issue = "35602")]
impl FusedIterator for EscapeDebug {}
-/// An iterator over `u8` entries represending the UTF-8 encoding of a `char`
-/// value.
-///
-/// Constructed via the `.encode_utf8()` method on `char`.
-#[unstable(feature = "unicode", issue = "27784")]
-#[derive(Debug)]
-pub struct EncodeUtf8 {
- buf: [u8; 4],
- pos: usize,
-}
-
-impl EncodeUtf8 {
- /// Returns the remaining bytes of this iterator as a slice.
- #[unstable(feature = "unicode", issue = "27784")]
- pub fn as_slice(&self) -> &[u8] {
- &self.buf[self.pos..]
- }
-}
-
-#[unstable(feature = "unicode", issue = "27784")]
-impl Iterator for EncodeUtf8 {
- type Item = u8;
-
- fn next(&mut self) -> Option<u8> {
- if self.pos == self.buf.len() {
- None
- } else {
- let ret = Some(self.buf[self.pos]);
- self.pos += 1;
- ret
- }
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.as_slice().iter().size_hint()
- }
-}
-
-#[unstable(feature = "fused", issue = "35602")]
-impl FusedIterator for EncodeUtf8 {}
-
-/// An iterator over `u16` entries represending the UTF-16 encoding of a `char`
-/// value.
-///
-/// Constructed via the `.encode_utf16()` method on `char`.
-#[unstable(feature = "unicode", issue = "27784")]
-#[derive(Debug)]
-pub struct EncodeUtf16 {
- buf: [u16; 2],
- pos: usize,
-}
-
-impl EncodeUtf16 {
- /// Returns the remaining bytes of this iterator as a slice.
- #[unstable(feature = "unicode", issue = "27784")]
- pub fn as_slice(&self) -> &[u16] {
- &self.buf[self.pos..]
- }
-}
-
-
-#[unstable(feature = "unicode", issue = "27784")]
-impl Iterator for EncodeUtf16 {
- type Item = u16;
-
- fn next(&mut self) -> Option<u16> {
- if self.pos == self.buf.len() {
- None
- } else {
- let ret = Some(self.buf[self.pos]);
- self.pos += 1;
- ret
- }
- }
-
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.as_slice().iter().size_hint()
- }
-}
-#[unstable(feature = "fused", issue = "35602")]
-impl FusedIterator for EncodeUtf16 {}
/// An iterator over an iterator of bytes of the characters the bytes represent
/// as UTF-8
reason = "deriving hack, should not be public",
issue = "0")]
pub struct AssertParamIsCopy<T: Copy + ?Sized> { _field: ::marker::PhantomData<T> }
-#[cfg(stage0)]
-#[doc(hidden)]
-#[inline(always)]
-#[unstable(feature = "derive_clone_copy",
- reason = "deriving hack, should not be public",
- issue = "0")]
-pub fn assert_receiver_is_clone<T: Clone + ?Sized>(_: &T) {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T: ?Sized> Clone for &'a T {
//! Functionality for ordering and comparison.
//!
-//! This module defines both `PartialOrd` and `PartialEq` traits which are used
+//! This module defines both [`PartialOrd`] and [`PartialEq`] traits which are used
//! by the compiler to implement comparison operators. Rust programs may
-//! implement `PartialOrd` to overload the `<`, `<=`, `>`, and `>=` operators,
-//! and may implement `PartialEq` to overload the `==` and `!=` operators.
+//! implement [`PartialOrd`] to overload the `<`, `<=`, `>`, and `>=` operators,
+//! and may implement [`PartialEq`] to overload the `==` and `!=` operators.
+//!
+//! [`PartialOrd`]: trait.PartialOrd.html
+//! [`PartialEq`]: trait.PartialEq.html
//!
//! # Examples
//!
/// This function will return an instance of `Error` on error.
#[stable(feature = "fmt_write_char", since = "1.1.0")]
fn write_char(&mut self, c: char) -> Result {
- self.write_str(unsafe {
- str::from_utf8_unchecked(c.encode_utf8().as_slice())
- })
+ self.write_str(c.encode_utf8(&mut [0; 4]))
}
/// Glue for usage of the `write!` macro with implementors of this trait.
/// assert_eq!(output, "Hello world!");
/// ```
///
-/// Please note that using [`write!`][write_macro] might be preferrable. Example:
+/// Please note that using [`write!`] might be preferrable. Example:
///
/// ```
/// use std::fmt::Write;
/// assert_eq!(output, "Hello world!");
/// ```
///
-/// [write_macro]: ../../std/macro.write!.html
+/// [`write!`]: ../../std/macro.write.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn write(output: &mut Write, args: Arguments) -> Result {
let mut formatter = Formatter {
// Writes the sign if it exists, and then the prefix if it was requested
let write_prefix = |f: &mut Formatter| {
if let Some(c) = sign {
- f.buf.write_str(unsafe {
- str::from_utf8_unchecked(c.encode_utf8().as_slice())
- })?;
+ f.buf.write_str(c.encode_utf8(&mut [0; 4]))?;
}
if prefixed { f.buf.write_str(prefix) }
else { Ok(()) }
rt::v1::Alignment::Center => (padding / 2, (padding + 1) / 2),
};
- let fill = self.fill.encode_utf8();
- let fill = unsafe {
- str::from_utf8_unchecked(fill.as_slice())
- };
+ let mut fill = [0; 4];
+ let fill = self.fill.encode_utf8(&mut fill);
for _ in 0..pre_pad {
self.buf.write_str(fill)?;
if f.width.is_none() && f.precision.is_none() {
f.write_char(*self)
} else {
- f.pad(unsafe {
- str::from_utf8_unchecked(self.encode_utf8().as_slice())
- })
+ f.pad(self.encode_utf8(&mut [0; 4]))
}
}
}
// Implementation of Display/Debug for various core types
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Debug for *const T {
+impl<T: ?Sized> Debug for *const T {
fn fmt(&self, f: &mut Formatter) -> Result { Pointer::fmt(self, f) }
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Debug for *mut T {
+impl<T: ?Sized> Debug for *mut T {
fn fmt(&self, f: &mut Formatter) -> Result { Pointer::fmt(self, f) }
}
use mem;
#[stable(feature = "rust1", since = "1.0.0")]
+#[allow(deprecated)]
pub use self::sip::SipHasher;
#[unstable(feature = "sip_hash_13", issue = "29754")]
+#[allow(deprecated)]
pub use self::sip::{SipHasher13, SipHasher24};
mod sip;
//! An implementation of SipHash.
+#![allow(deprecated)]
+
use marker::PhantomData;
use ptr;
/// An implementation of SipHash 1-3.
///
+/// This is currently the default hashing function used by standard library
+/// (eg. `collections::HashMap` uses it by default).
+///
/// See: https://131002.net/siphash/
#[unstable(feature = "sip_hash_13", issue = "34767")]
+#[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
pub struct SipHasher13 {
hasher: Hasher<Sip13Rounds>,
///
/// See: https://131002.net/siphash/
#[unstable(feature = "sip_hash_13", issue = "34767")]
+#[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
pub struct SipHasher24 {
hasher: Hasher<Sip24Rounds>,
///
/// See: https://131002.net/siphash/
///
-/// This is currently the default hashing function used by standard library
-/// (eg. `collections::HashMap` uses it by default).
-///
/// SipHash is a general-purpose hashing function: it runs at a good
/// speed (competitive with Spooky and City) and permits strong _keyed_
/// hashing. This lets you key your hashtables from a strong RNG, such as
/// it is not intended for cryptographic purposes. As such, all
/// cryptographic uses of this implementation are _strongly discouraged_.
#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
#[derive(Debug, Clone, Default)]
pub struct SipHasher(SipHasher24);
data.to_le()
}
-macro_rules! rotl {
- ($x:expr, $b:expr) =>
- (($x << $b) | ($x >> (64_i32.wrapping_sub($b))))
-}
-
macro_rules! compress {
($state:expr) => ({
compress!($state.v0, $state.v1, $state.v2, $state.v3)
});
($v0:expr, $v1:expr, $v2:expr, $v3:expr) =>
({
- $v0 = $v0.wrapping_add($v1); $v1 = rotl!($v1, 13); $v1 ^= $v0;
- $v0 = rotl!($v0, 32);
- $v2 = $v2.wrapping_add($v3); $v3 = rotl!($v3, 16); $v3 ^= $v2;
- $v0 = $v0.wrapping_add($v3); $v3 = rotl!($v3, 21); $v3 ^= $v0;
- $v2 = $v2.wrapping_add($v1); $v1 = rotl!($v1, 17); $v1 ^= $v2;
- $v2 = rotl!($v2, 32);
+ $v0 = $v0.wrapping_add($v1); $v1 = $v1.rotate_left(13); $v1 ^= $v0;
+ $v0 = $v0.rotate_left(32);
+ $v2 = $v2.wrapping_add($v3); $v3 = $v3.rotate_left(16); $v3 ^= $v2;
+ $v0 = $v0.wrapping_add($v3); $v3 = $v3.rotate_left(21); $v3 ^= $v0;
+ $v2 = $v2.wrapping_add($v1); $v1 = $v1.rotate_left(17); $v1 ^= $v2;
+ $v2 = $v2.rotate_left(32);
});
}
/// Creates a new `SipHasher` with the two initial keys set to 0.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
pub fn new() -> SipHasher {
SipHasher::new_with_keys(0, 0)
}
/// Creates a `SipHasher` that is keyed off the provided keys.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher {
SipHasher(SipHasher24::new_with_keys(key0, key1))
}
}
-
impl SipHasher13 {
/// Creates a new `SipHasher13` with the two initial keys set to 0.
#[inline]
#[unstable(feature = "sip_hash_13", issue = "34767")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
pub fn new() -> SipHasher13 {
SipHasher13::new_with_keys(0, 0)
}
/// Creates a `SipHasher13` that is keyed off the provided keys.
#[inline]
#[unstable(feature = "sip_hash_13", issue = "34767")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 {
SipHasher13 {
hasher: Hasher::new_with_keys(key0, key1)
/// Creates a new `SipHasher24` with the two initial keys set to 0.
#[inline]
#[unstable(feature = "sip_hash_13", issue = "34767")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
pub fn new() -> SipHasher24 {
SipHasher24::new_with_keys(0, 0)
}
/// Creates a `SipHasher24` that is keyed off the provided keys.
#[inline]
#[unstable(feature = "sip_hash_13", issue = "34767")]
+ #[rustc_deprecated(since = "1.13.0", reason = "use `DefaultHasher` instead")]
pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher24 {
SipHasher24 {
hasher: Hasher::new_with_keys(key0, key1)
/// own, or if it does not enable any significant optimizations.
pub fn assume(b: bool);
- #[cfg(not(stage0))]
/// Hints to the compiler that branch condition is likely to be true.
/// Returns the value passed to it.
///
/// Any use other than with `if` statements will probably not have an effect.
pub fn likely(b: bool) -> bool;
- #[cfg(not(stage0))]
/// Hints to the compiler that branch condition is likely to be false.
/// Returns the value passed to it.
///
/// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
/// bytes of memory starting at `dst` to `val`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr;
+ ///
+ /// let mut vec = vec![0; 4];
+ /// unsafe {
+ /// let vec_ptr = vec.as_mut_ptr();
+ /// ptr::write_bytes(vec_ptr, b'a', 2);
+ /// }
+ /// assert_eq!(vec, [b'a', b'a', 0, 0]);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
it: I,
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "iter_cloned", since = "1.1.0")]
impl<'a, I, T: 'a> Iterator for Cloned<I>
where I: Iterator<Item=&'a T>, T: Clone
{
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "iter_cloned", since = "1.1.0")]
impl<'a, I, T: 'a> DoubleEndedIterator for Cloned<I>
where I: DoubleEndedIterator<Item=&'a T>, T: Clone
{
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "iter_cloned", since = "1.1.0")]
impl<'a, I, T: 'a> ExactSizeIterator for Cloned<I>
where I: ExactSizeIterator<Item=&'a T>, T: Clone
{}
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[unstable(feature = "step_by", reason = "recent addition",
+ issue = "27741")]
impl<A> Iterator for StepBy<A, ops::RangeFrom<A>> where
A: Clone,
for<'a> &'a A: Add<&'a A, Output = A>
impl<A> FusedIterator for StepBy<A, ops::RangeFrom<A>>
where A: Clone, for<'a> &'a A: Add<&'a A, Output = A> {}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[unstable(feature = "step_by", reason = "recent addition",
+ issue = "27741")]
impl<A: Step + Clone> Iterator for StepBy<A, ops::Range<A>> {
type Item = A;
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
impl ExactSizeIterator for ops::Range<$t> { }
+ )*)
+}
+macro_rules! range_incl_exact_iter_impl {
+ ($($t:ty)*) => ($(
#[unstable(feature = "inclusive_range",
reason = "recently added, follows RFC",
issue = "28237")]
}
}
-// Ranges of u64 and i64 are excluded because they cannot guarantee having
-// a length <= usize::MAX, which is required by ExactSizeIterator.
+// These macros generate `ExactSizeIterator` impls for various range types.
+// Range<{u,i}64> and RangeInclusive<{u,i}{32,64,size}> are excluded
+// because they cannot guarantee having a length <= usize::MAX, which is
+// required by ExactSizeIterator.
range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32);
+range_incl_exact_iter_impl!(u8 u16 i8 i16);
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Step + Clone> DoubleEndedIterator for ops::Range<A> where
#![stable(feature = "rust1", since = "1.0.0")]
+use clone;
+use cmp;
+use fmt;
+use hash;
use intrinsics;
+use marker::{Copy, PhantomData, Sized};
use ptr;
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
ptr::read(src as *const T as *const U)
}
+
+/// Opaque type representing the discriminant of an enum.
+///
+/// See the `discriminant` function in this module for more information.
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+pub struct Discriminant<T>(u64, PhantomData<*const T>);
+
+// N.B. These trait implementations cannot be derived because we don't want any bounds on T.
+
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+impl<T> Copy for Discriminant<T> {}
+
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+impl<T> clone::Clone for Discriminant<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+impl<T> cmp::PartialEq for Discriminant<T> {
+ fn eq(&self, rhs: &Self) -> bool {
+ self.0 == rhs.0
+ }
+}
+
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+impl<T> cmp::Eq for Discriminant<T> {}
+
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+impl<T> hash::Hash for Discriminant<T> {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ self.0.hash(state);
+ }
+}
+
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+impl<T> fmt::Debug for Discriminant<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.debug_tuple("Discriminant")
+ .field(&self.0)
+ .finish()
+ }
+}
+
+/// Returns a value uniquely identifying the enum variant in `v`.
+///
+/// If `T` is not an enum, calling this function will not result in undefined behavior, but the
+/// return value is unspecified.
+///
+/// # Stability
+///
+/// The discriminant of an enum variant may change if the enum definition changes. A discriminant
+/// of some variant will not change between compilations with the same compiler.
+///
+/// # Examples
+///
+/// This can be used to compare enums that carry data, while disregarding
+/// the actual data:
+///
+/// ```
+/// #![feature(discriminant_value)]
+/// use std::mem;
+///
+/// enum Foo { A(&'static str), B(i32), C(i32) }
+///
+/// assert!(mem::discriminant(&Foo::A("bar")) == mem::discriminant(&Foo::A("baz")));
+/// assert!(mem::discriminant(&Foo::B(1)) == mem::discriminant(&Foo::B(2)));
+/// assert!(mem::discriminant(&Foo::B(3)) != mem::discriminant(&Foo::C(3)));
+/// ```
+#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")]
+pub fn discriminant<T>(v: &T) -> Discriminant<T> {
+ unsafe {
+ Discriminant(intrinsics::discriminant_value(v), PhantomData)
+ }
+}
+
/// Basic usage:
///
/// ```
- /// # #![feature(no_panic_abs)]
- ///
/// use std::i32;
///
/// assert_eq!((-5i32).checked_abs(), Some(5));
/// assert_eq!(i32::MIN.checked_abs(), None);
/// ```
- #[unstable(feature = "no_panic_abs", issue = "35057")]
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
#[inline]
pub fn checked_abs(self) -> Option<Self> {
if self.is_negative() {
/// Basic usage:
///
/// ```
- /// # #![feature(no_panic_abs)]
- ///
/// assert_eq!(100i8.wrapping_abs(), 100);
/// assert_eq!((-100i8).wrapping_abs(), 100);
/// assert_eq!((-128i8).wrapping_abs(), -128);
/// assert_eq!((-128i8).wrapping_abs() as u8, 128);
/// ```
- #[unstable(feature = "no_panic_abs", issue = "35057")]
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
#[inline(always)]
pub fn wrapping_abs(self) -> Self {
if self.is_negative() {
/// Basic usage:
///
/// ```
- /// # #![feature(no_panic_abs)]
- ///
/// assert_eq!(10i8.overflowing_abs(), (10,false));
/// assert_eq!((-10i8).overflowing_abs(), (10,false));
/// assert_eq!((-128i8).overflowing_abs(), (-128,true));
/// ```
- #[unstable(feature = "no_panic_abs", issue = "35057")]
+ #[stable(feature = "no_panic_abs", since = "1.13.0")]
#[inline]
pub fn overflowing_abs(self) -> (Self, bool) {
if self.is_negative() {
macro_rules! same_sign_from_int_impl {
($storage:ty, $target:ty, $($source:ty),*) => {$(
- #[stable(feature = "rust1", since = "1.0.0")]
+ #[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<$source> for $target {
type Err = TryFromIntError;
macro_rules! cross_sign_from_int_impl {
($unsigned:ty, $($signed:ty),*) => {$(
- #[stable(feature = "rust1", since = "1.0.0")]
+ #[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<$unsigned> for $signed {
type Err = TryFromIntError;
}
}
- #[stable(feature = "rust1", since = "1.0.0")]
+ #[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<$signed> for $unsigned {
type Err = TryFromIntError;
}
}
- #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
impl ShlAssign<$f> for Wrapping<$t> {
#[inline(always)]
fn shl_assign(&mut self, other: $f) {
}
}
- #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
impl ShrAssign<$f> for Wrapping<$t> {
#[inline(always)]
fn shr_assign(&mut self, other: $f) {
}
}
- #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
impl ShlAssign<$f> for Wrapping<$t> {
#[inline(always)]
fn shl_assign(&mut self, other: $f) {
}
}
- #[stable(feature = "wrapping_impls", since = "1.7.0")]
+ #[stable(feature = "op_assign_traits", since = "1.8.0")]
impl ShrAssign<$f> for Wrapping<$t> {
#[inline(always)]
fn shr_assign(&mut self, other: $f) {
//!
//! Some of these traits are imported by the prelude, so they are available in
//! every Rust program. Only operators backed by traits can be overloaded. For
-//! example, the addition operator (`+`) can be overloaded through the `Add`
+//! example, the addition operator (`+`) can be overloaded through the [`Add`]
//! trait, but since the assignment operator (`=`) has no backing trait, there
//! is no way of overloading its semantics. Additionally, this module does not
//! provide any mechanism to create new operators. If traitless overloading or
//! contexts involving built-in types, this is usually not a problem.
//! However, using these operators in generic code, requires some
//! attention if values have to be reused as opposed to letting the operators
-//! consume them. One option is to occasionally use `clone()`.
+//! consume them. One option is to occasionally use [`clone()`].
//! Another option is to rely on the types involved providing additional
//! operator implementations for references. For example, for a user-defined
//! type `T` which is supposed to support addition, it is probably a good
-//! idea to have both `T` and `&T` implement the traits `Add<T>` and `Add<&T>`
-//! so that generic code can be written without unnecessary cloning.
+//! idea to have both `T` and `&T` implement the traits [`Add<T>`][`Add`] and
+//! [`Add<&T>`][`Add`] so that generic code can be written without unnecessary
+//! cloning.
//!
//! # Examples
//!
-//! This example creates a `Point` struct that implements `Add` and `Sub`, and
-//! then demonstrates adding and subtracting two `Point`s.
+//! This example creates a `Point` struct that implements [`Add`] and [`Sub`],
+//! and then demonstrates adding and subtracting two `Point`s.
//!
//! ```rust
//! use std::ops::{Add, Sub};
//! See the documentation for each trait for an example implementation.
//!
//! The [`Fn`], [`FnMut`], and [`FnOnce`] traits are implemented by types that can be
-//! invoked like functions. Note that `Fn` takes `&self`, `FnMut` takes `&mut
-//! self` and `FnOnce` takes `self`. These correspond to the three kinds of
+//! invoked like functions. Note that [`Fn`] takes `&self`, [`FnMut`] takes `&mut
+//! self` and [`FnOnce`] takes `self`. These correspond to the three kinds of
//! methods that can be invoked on an instance: call-by-reference,
//! call-by-mutable-reference, and call-by-value. The most common use of these
//! traits is to act as bounds to higher-level functions that take functions or
//! closures as arguments.
//!
-//! [`Fn`]: trait.Fn.html
-//! [`FnMut`]: trait.FnMut.html
-//! [`FnOnce`]: trait.FnOnce.html
-//!
-//! Taking a `Fn` as a parameter:
+//! Taking a [`Fn`] as a parameter:
//!
//! ```rust
//! fn call_with_one<F>(func: F) -> usize
//! assert_eq!(call_with_one(double), 2);
//! ```
//!
-//! Taking a `FnMut` as a parameter:
+//! Taking a [`FnMut`] as a parameter:
//!
//! ```rust
//! fn do_twice<F>(mut func: F)
//! assert_eq!(x, 5);
//! ```
//!
-//! Taking a `FnOnce` as a parameter:
+//! Taking a [`FnOnce`] as a parameter:
//!
//! ```rust
//! fn consume_with_relish<F>(func: F)
//!
//! // `consume_and_return_x` can no longer be invoked at this point
//! ```
+//!
+//! [`Fn`]: trait.Fn.html
+//! [`FnMut`]: trait.FnMut.html
+//! [`FnOnce`]: trait.FnOnce.html
+//! [`Add`]: trait.Add.html
+//! [`Sub`]: trait.Sub.html
+//! [`clone()`]: ../clone/trait.Clone.html#tymethod.clone
#![stable(feature = "rust1", since = "1.0.0")]
shr_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
/// The `Index` trait is used to specify the functionality of indexing operations
-/// like `arr[idx]` when used in an immutable context.
+/// like `container[index]` when used in an immutable context.
///
/// # Examples
///
#[stable(feature = "rust1", since = "1.0.0")]
type Output: ?Sized;
- /// The method for the indexing (`Foo[Bar]`) operation
+ /// The method for the indexing (`container[index]`) operation
#[stable(feature = "rust1", since = "1.0.0")]
fn index(&self, index: Idx) -> &Self::Output;
}
/// The `IndexMut` trait is used to specify the functionality of indexing
-/// operations like `arr[idx]`, when used in a mutable context.
+/// operations like `container[index]`, when used in a mutable context.
///
/// # Examples
///
-/// A trivial implementation of `IndexMut`. When `Foo[Bar]` happens, it ends up
-/// calling `index_mut`, and therefore, `main` prints `Indexing!`.
+/// A trivial implementation of `IndexMut` for a type `Foo`. When `&mut Foo[2]`
+/// happens, it ends up calling `index_mut`, and therefore, `main` prints
+/// `Mutable indexing with 2!`.
///
/// ```
/// use std::ops::{Index, IndexMut};
///
/// #[derive(Copy, Clone)]
/// struct Foo;
-/// struct Bar;
///
-/// impl Index<Bar> for Foo {
+/// impl Index<usize> for Foo {
/// type Output = Foo;
///
-/// fn index<'a>(&'a self, _index: Bar) -> &'a Foo {
+/// fn index(&self, _index: usize) -> &Foo {
/// self
/// }
/// }
///
-/// impl IndexMut<Bar> for Foo {
-/// fn index_mut<'a>(&'a mut self, _index: Bar) -> &'a mut Foo {
-/// println!("Indexing!");
+/// impl IndexMut<usize> for Foo {
+/// fn index_mut(&mut self, index: usize) -> &mut Foo {
+/// println!("Mutable indexing with {}!", index);
/// self
/// }
/// }
///
/// fn main() {
-/// &mut Foo[Bar];
+/// &mut Foo[2];
/// }
/// ```
#[lang = "index_mut"]
#[rustc_on_unimplemented = "the type `{Self}` cannot be mutably indexed by `{Idx}`"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait IndexMut<Idx: ?Sized>: Index<Idx> {
- /// The method for the indexing (`Foo[Bar]`) operation
+ /// The method for the mutable indexing (`container[index]`) operation
#[stable(feature = "rust1", since = "1.0.0")]
fn index_mut(&mut self, index: Idx) -> &mut Self::Output;
}
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[unstable(feature = "unique", issue = "27730")]
impl<T> fmt::Pointer for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self.pointer, f)
#[cfg(target_has_atomic = "8")]
#[stable(feature = "rust1", since = "1.0.0")]
impl Default for AtomicBool {
- /// Creates an `AtomicBool` initialised as false.
+ /// Creates an `AtomicBool` initialized to `false`.
fn default() -> Self {
Self::new(false)
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::char;
+use std::{char,str};
use std::convert::TryFrom;
#[test]
#[test]
fn test_encode_utf8() {
fn check(input: char, expect: &[u8]) {
- assert_eq!(input.encode_utf8().as_slice(), expect);
- for (a, b) in input.encode_utf8().zip(expect) {
- assert_eq!(a, *b);
- }
+ let mut buf = [0; 4];
+ let ptr = buf.as_ptr();
+ let s = input.encode_utf8(&mut buf);
+ assert_eq!(s.as_ptr() as usize, ptr as usize);
+ assert!(str::from_utf8(s.as_bytes()).is_ok());
+ assert_eq!(s.as_bytes(), expect);
}
check('x', &[0x78]);
#[test]
fn test_encode_utf16() {
fn check(input: char, expect: &[u16]) {
- assert_eq!(input.encode_utf16().as_slice(), expect);
- for (a, b) in input.encode_utf16().zip(expect) {
- assert_eq!(a, *b);
- }
+ let mut buf = [0; 2];
+ let ptr = buf.as_mut_ptr();
+ let b = input.encode_utf16(&mut buf);
+ assert_eq!(b.as_mut_ptr() as usize, ptr as usize);
+ assert_eq!(b, expect);
}
check('x', &[0x0078]);
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+
+#![allow(deprecated)]
+
use test::{Bencher, black_box};
use core::hash::{Hash, Hasher};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// FIXME https://github.com/kripken/emscripten/issues/4563
+// NB we have to actually not compile this test to avoid
+// an undefined symbol error
+#![cfg(not(target_os = "emscripten"))]
+
use core::num::flt2dec::estimator::*;
#[test]
-Subproject commit d4f6a19c55a03e3f9f6fb7377911b37ed807eb6c
+Subproject commit b474785561d58efbd27add9d22339dcabad742ad
"alloc 0.0.0",
"core 0.0.0",
"libc 0.0.0",
+ "unwind 0.0.0",
]
[[package]]
"core 0.0.0",
]
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "core 0.0.0",
+ "libc 0.0.0",
+]
+
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unwinding for emscripten
+//!
+//! Whereas Rust's usual unwinding implementation for Unix platforms
+//! calls into the libunwind APIs directly, on emscripten we instead
+//! call into the C++ unwinding APIs. This is just an expedience since
+//! emscripten's runtime always implements those APIs and does not
+//! implement libunwind.
+
+#![allow(private_no_mangle_fns)]
+
+use core::any::Any;
+use core::ptr;
+use alloc::boxed::Box;
+use libc::{self, c_int};
+use unwind as uw;
+use core::mem;
+
+pub fn payload() -> *mut u8 {
+ ptr::null_mut()
+}
+
+pub unsafe fn cleanup(ptr: *mut u8) -> Box<Any + Send> {
+ assert!(!ptr.is_null());
+ let ex = ptr::read(ptr as *mut _);
+ __cxa_free_exception(ptr as *mut _);
+ ex
+}
+
+pub unsafe fn panic(data: Box<Any + Send>) -> u32 {
+ let sz = mem::size_of_val(&data);
+ let exception = __cxa_allocate_exception(sz);
+ if exception == ptr::null_mut() {
+ return uw::_URC_FATAL_PHASE1_ERROR as u32;
+ }
+ let exception = exception as *mut Box<Any + Send>;
+ ptr::write(exception, data);
+ __cxa_throw(exception as *mut _, ptr::null_mut(), ptr::null_mut());
+
+ unreachable!()
+}
+
+#[lang = "eh_personality"]
+#[no_mangle]
+unsafe extern "C" fn rust_eh_personality(version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code {
+ __gxx_personality_v0(version, actions,
+ exception_class,
+ exception_object,
+ context)
+}
+
+extern {
+ fn __cxa_allocate_exception(thrown_size: libc::size_t) -> *mut libc::c_void;
+ fn __cxa_free_exception(thrown_exception: *mut libc::c_void);
+ fn __cxa_throw(thrown_exception: *mut libc::c_void,
+ tinfo: *mut libc::c_void,
+ dest: *mut libc::c_void);
+ fn __gxx_personality_v0(version: c_int,
+ actions: uw::_Unwind_Action,
+ exception_class: uw::_Unwind_Exception_Class,
+ exception_object: *mut uw::_Unwind_Exception,
+ context: *mut uw::_Unwind_Context)
+ -> uw::_Unwind_Reason_Code;
+}
mod imp;
// i686-pc-windows-gnu and all others
-#[cfg(any(unix, all(windows, target_arch = "x86", target_env = "gnu")))]
+#[cfg(any(all(unix, not(target_os = "emscripten")),
+ all(windows, target_arch = "x86", target_env = "gnu")))]
#[path = "gcc.rs"]
mod imp;
+// emscripten
+#[cfg(target_os = "emscripten")]
+#[path = "emcc.rs"]
+mod imp;
+
mod dwarf;
mod windows;
self.add_ast_node(pat.id, &[pats_exit])
}
- PatKind::Vec(ref pre, ref vec, ref post) => {
+ PatKind::Slice(ref pre, ref vec, ref post) => {
let pre_exit = self.pats_all(pre.iter(), pred);
let vec_exit = self.pats_all(vec.iter(), pre_exit);
let post_exit = self.pats_all(post.iter(), vec_exit);
self.add_unreachable_node()
}
- hir::ExprVec(ref elems) => {
+ hir::ExprArray(ref elems) => {
self.straightline(expr, pred, elems.iter().map(|e| &**e))
}
use syntax::ast;
use hir;
+#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
+pub enum CtorKind {
+ // Constructor function automatically created by a tuple struct/variant.
+ Fn,
+ // Constructor constant automatically created by a unit struct/variant.
+ Const,
+ // Unusable name in value namespace created by a struct variant.
+ Fictive,
+}
+
#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum Def {
- Fn(DefId),
- SelfTy(Option<DefId> /* trait */, Option<DefId> /* impl */),
+ // Type namespace
Mod(DefId),
- Static(DefId, bool /* is_mutbl */),
- Const(DefId),
- AssociatedConst(DefId),
- Local(DefId),
- Variant(DefId),
+ Struct(DefId), // DefId refers to NodeId of the struct itself
+ Union(DefId),
Enum(DefId),
+ Variant(DefId),
+ Trait(DefId),
TyAlias(DefId),
AssociatedTy(DefId),
- Trait(DefId),
PrimTy(hir::PrimTy),
TyParam(DefId),
- Upvar(DefId, // def id of closed over local
- usize, // index in the freevars list of the closure
- ast::NodeId), // expr node that creates the closure
+ SelfTy(Option<DefId> /* trait */, Option<DefId> /* impl */),
- // If Def::Struct lives in type namespace it denotes a struct item and its DefId refers
- // to NodeId of the struct itself.
- // If Def::Struct lives in value namespace (e.g. tuple struct, unit struct expressions)
- // it denotes a constructor and its DefId refers to NodeId of the struct's constructor.
- Struct(DefId),
- Union(DefId),
- Label(ast::NodeId),
+ // Value namespace
+ Fn(DefId),
+ Const(DefId),
+ Static(DefId, bool /* is_mutbl */),
+ StructCtor(DefId, CtorKind), // DefId refers to NodeId of the struct's constructor
+ VariantCtor(DefId, CtorKind),
Method(DefId),
+ AssociatedConst(DefId),
+ Local(DefId),
+ Upvar(DefId, // def id of closed over local
+ usize, // index in the freevars list of the closure
+ ast::NodeId), // expr node that creates the closure
+ Label(ast::NodeId),
+
+ // Both namespaces
Err,
}
#[derive(Copy, Clone, RustcEncodable, RustcDecodable)]
pub struct Export {
- pub name: ast::Name, // The name of the target.
- pub def_id: DefId, // The definition of the target.
+ pub name: ast::Name, // The name of the target.
+ pub def: Def, // The definition of the target.
+}
+
+impl CtorKind {
+ pub fn from_ast(vdata: &ast::VariantData) -> CtorKind {
+ match *vdata {
+ ast::VariantData::Tuple(..) => CtorKind::Fn,
+ ast::VariantData::Unit(..) => CtorKind::Const,
+ ast::VariantData::Struct(..) => CtorKind::Fictive,
+ }
+ }
+ pub fn from_hir(vdata: &hir::VariantData) -> CtorKind {
+ match *vdata {
+ hir::VariantData::Tuple(..) => CtorKind::Fn,
+ hir::VariantData::Unit(..) => CtorKind::Const,
+ hir::VariantData::Struct(..) => CtorKind::Fictive,
+ }
+ }
}
impl Def {
pub fn def_id(&self) -> DefId {
match *self {
Def::Fn(id) | Def::Mod(id) | Def::Static(id, _) |
- Def::Variant(id) | Def::Enum(id) | Def::TyAlias(id) | Def::AssociatedTy(id) |
- Def::TyParam(id) | Def::Struct(id) | Def::Union(id) | Def::Trait(id) |
- Def::Method(id) | Def::Const(id) | Def::AssociatedConst(id) |
- Def::Local(id) | Def::Upvar(id, ..) => {
+ Def::Variant(id) | Def::VariantCtor(id, ..) | Def::Enum(id) | Def::TyAlias(id) |
+ Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) |
+ Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) |
+ Def::AssociatedConst(id) | Def::Local(id) | Def::Upvar(id, ..) => {
id
}
Def::Mod(..) => "module",
Def::Static(..) => "static",
Def::Variant(..) => "variant",
+ Def::VariantCtor(.., CtorKind::Fn) => "tuple variant",
+ Def::VariantCtor(.., CtorKind::Const) => "unit variant",
+ Def::VariantCtor(.., CtorKind::Fictive) => "struct variant",
Def::Enum(..) => "enum",
- Def::TyAlias(..) => "type",
+ Def::TyAlias(..) => "type alias",
Def::AssociatedTy(..) => "associated type",
Def::Struct(..) => "struct",
+ Def::StructCtor(.., CtorKind::Fn) => "tuple struct",
+ Def::StructCtor(.., CtorKind::Const) => "unit struct",
+ Def::StructCtor(.., CtorKind::Fictive) => bug!("impossible struct constructor"),
Def::Union(..) => "union",
Def::Trait(..) => "trait",
Def::Method(..) => "method",
visitor.visit_id(typ.id);
match typ.node {
- TyVec(ref ty) => {
+ TySlice(ref ty) => {
visitor.visit_ty(ty)
}
TyPtr(ref mutable_type) => {
visitor.visit_ty(ty);
walk_list!(visitor, visit_ty_param_bound, bounds);
}
- TyFixedLengthVec(ref ty, ref expression) => {
+ TyArray(ref ty, ref expression) => {
visitor.visit_ty(ty);
visitor.visit_expr(expression)
}
visitor.visit_expr(upper_bound)
}
PatKind::Wild => (),
- PatKind::Vec(ref prepatterns, ref slice_pattern, ref postpatterns) => {
+ PatKind::Slice(ref prepatterns, ref slice_pattern, ref postpatterns) => {
walk_list!(visitor, visit_pat, prepatterns);
walk_list!(visitor, visit_pat, slice_pattern);
walk_list!(visitor, visit_pat, postpatterns);
ExprBox(ref subexpression) => {
visitor.visit_expr(subexpression)
}
- ExprVec(ref subexpressions) => {
+ ExprArray(ref subexpressions) => {
walk_list!(visitor, visit_expr, subexpressions);
}
ExprRepeat(ref element, ref count) => {
use hir::map::Definitions;
use hir::map::definitions::DefPathData;
use hir::def_id::{DefIndex, DefId};
-use hir::def::{Def, PathResolution};
+use hir::def::{Def, CtorKind, PathResolution};
use session::Session;
+use lint;
use std::collections::BTreeMap;
use std::iter;
}
fn lower_ty(&mut self, t: &Ty) -> P<hir::Ty> {
- use syntax::ast::TyKind::*;
P(hir::Ty {
id: t.id,
node: match t.node {
- Infer | ImplicitSelf => hir::TyInfer,
- Vec(ref ty) => hir::TyVec(self.lower_ty(ty)),
- Ptr(ref mt) => hir::TyPtr(self.lower_mt(mt)),
- Rptr(ref region, ref mt) => {
+ TyKind::Infer | TyKind::ImplicitSelf => hir::TyInfer,
+ TyKind::Slice(ref ty) => hir::TySlice(self.lower_ty(ty)),
+ TyKind::Ptr(ref mt) => hir::TyPtr(self.lower_mt(mt)),
+ TyKind::Rptr(ref region, ref mt) => {
hir::TyRptr(self.lower_opt_lifetime(region), self.lower_mt(mt))
}
- BareFn(ref f) => {
+ TyKind::BareFn(ref f) => {
hir::TyBareFn(P(hir::BareFnTy {
lifetimes: self.lower_lifetime_defs(&f.lifetimes),
unsafety: self.lower_unsafety(f.unsafety),
decl: self.lower_fn_decl(&f.decl),
}))
}
- Never => hir::TyNever,
- Tup(ref tys) => hir::TyTup(tys.iter().map(|ty| self.lower_ty(ty)).collect()),
- Paren(ref ty) => {
+ TyKind::Never => hir::TyNever,
+ TyKind::Tup(ref tys) => {
+ hir::TyTup(tys.iter().map(|ty| self.lower_ty(ty)).collect())
+ }
+ TyKind::Paren(ref ty) => {
return self.lower_ty(ty);
}
- Path(ref qself, ref path) => {
+ TyKind::Path(ref qself, ref path) => {
let qself = qself.as_ref().map(|&QSelf { ref ty, position }| {
hir::QSelf {
ty: self.lower_ty(ty),
});
hir::TyPath(qself, self.lower_path(path))
}
- ObjectSum(ref ty, ref bounds) => {
+ TyKind::ObjectSum(ref ty, ref bounds) => {
hir::TyObjectSum(self.lower_ty(ty), self.lower_bounds(bounds))
}
- FixedLengthVec(ref ty, ref e) => {
- hir::TyFixedLengthVec(self.lower_ty(ty), self.lower_expr(e))
+ TyKind::Array(ref ty, ref e) => {
+ hir::TyArray(self.lower_ty(ty), self.lower_expr(e))
}
- Typeof(ref expr) => {
+ TyKind::Typeof(ref expr) => {
hir::TyTypeof(self.lower_expr(expr))
}
- PolyTraitRef(ref bounds) => {
+ TyKind::PolyTraitRef(ref bounds) => {
hir::TyPolyTraitRef(self.lower_bounds(bounds))
}
- ImplTrait(ref bounds) => {
+ TyKind::ImplTrait(ref bounds) => {
hir::TyImplTrait(self.lower_bounds(bounds))
}
- Mac(_) => panic!("TyMac should have been expanded by now."),
+ TyKind::Mac(_) => panic!("TyMac should have been expanded by now."),
},
span: t.span,
})
})
}
PatKind::Lit(ref e) => hir::PatKind::Lit(self.lower_expr(e)),
- PatKind::TupleStruct(ref pth, ref pats, ddpos) => {
- hir::PatKind::TupleStruct(self.lower_path(pth),
- pats.iter().map(|x| self.lower_pat(x)).collect(),
- ddpos)
+ PatKind::TupleStruct(ref path, ref pats, ddpos) => {
+ match self.resolver.get_resolution(p.id).map(|d| d.base_def) {
+ Some(def @ Def::StructCtor(_, CtorKind::Const)) |
+ Some(def @ Def::VariantCtor(_, CtorKind::Const)) => {
+ // Temporarily lower `UnitVariant(..)` into `UnitVariant`
+ // for backward compatibility.
+ let msg = format!("expected tuple struct/variant, found {} `{}`",
+ def.kind_name(), path);
+ self.sess.add_lint(
+ lint::builtin::MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT,
+ p.id, p.span, msg
+ );
+ hir::PatKind::Path(None, self.lower_path(path))
+ }
+ _ => hir::PatKind::TupleStruct(self.lower_path(path),
+ pats.iter().map(|x| self.lower_pat(x)).collect(), ddpos)
+ }
}
PatKind::Path(ref opt_qself, ref path) => {
let opt_qself = opt_qself.as_ref().map(|qself| {
PatKind::Range(ref e1, ref e2) => {
hir::PatKind::Range(self.lower_expr(e1), self.lower_expr(e2))
}
- PatKind::Vec(ref before, ref slice, ref after) => {
- hir::PatKind::Vec(before.iter().map(|x| self.lower_pat(x)).collect(),
+ PatKind::Slice(ref before, ref slice, ref after) => {
+ hir::PatKind::Slice(before.iter().map(|x| self.lower_pat(x)).collect(),
slice.as_ref().map(|x| self.lower_pat(x)),
after.iter().map(|x| self.lower_pat(x)).collect())
}
}
ExprKind::Vec(ref exprs) => {
- hir::ExprVec(exprs.iter().map(|x| self.lower_expr(x)).collect())
+ hir::ExprArray(exprs.iter().map(|x| self.lower_expr(x)).collect())
}
ExprKind::Repeat(ref expr, ref count) => {
let expr = self.lower_expr(expr);
use middle::cstore::InlinedItem;
use syntax::ast::*;
+use syntax::ext::hygiene::Mark;
use syntax::visit;
-use syntax::parse::token;
+use syntax::parse::token::{self, keywords};
/// Creates def ids for nodes in the HIR.
-pub struct DefCollector<'ast> {
+pub struct DefCollector<'a> {
// If we are walking HIR (c.f., AST), we need to keep a reference to the
// crate.
- hir_crate: Option<&'ast hir::Crate>,
- definitions: &'ast mut Definitions,
+ hir_crate: Option<&'a hir::Crate>,
+ definitions: &'a mut Definitions,
parent_def: Option<DefIndex>,
+ pub visit_macro_invoc: Option<&'a mut FnMut(MacroInvocationData)>,
}
-impl<'ast> DefCollector<'ast> {
- pub fn new(definitions: &'ast mut Definitions) -> DefCollector<'ast> {
+pub struct MacroInvocationData {
+ pub mark: Mark,
+ pub def_index: DefIndex,
+ pub const_integer: bool,
+}
+
+impl<'a> DefCollector<'a> {
+ pub fn new(definitions: &'a mut Definitions) -> Self {
DefCollector {
hir_crate: None,
definitions: definitions,
parent_def: None,
+ visit_macro_invoc: None,
}
}
pub fn extend(parent_node: NodeId,
parent_def_path: DefPath,
parent_def_id: DefId,
- definitions: &'ast mut Definitions)
- -> DefCollector<'ast> {
+ definitions: &'a mut Definitions)
+ -> Self {
let mut collector = DefCollector::new(definitions);
assert_eq!(parent_def_path.krate, parent_def_id.krate);
self.create_def_with_parent(Some(CRATE_DEF_INDEX), DUMMY_NODE_ID, DefPathData::Misc);
}
- pub fn walk_item(&mut self, ii: &'ast InlinedItem, krate: &'ast hir::Crate) {
+ pub fn walk_item(&mut self, ii: &'a InlinedItem, krate: &'a hir::Crate) {
self.hir_crate = Some(krate);
ii.visit(self);
}
self.definitions.create_def_with_parent(parent, node_id, data)
}
- fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_def: DefIndex, f: F) {
+ pub fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_def: DefIndex, f: F) {
let parent = self.parent_def;
self.parent_def = Some(parent_def);
f(self);
self.parent_def = parent;
}
- fn visit_ast_const_integer(&mut self, expr: &Expr) {
- // Find the node which will be used after lowering.
- if let ExprKind::Paren(ref inner) = expr.node {
- return self.visit_ast_const_integer(inner);
- }
-
- // FIXME(eddyb) Closures should have separate
- // function definition IDs and expression IDs.
- if let ExprKind::Closure(..) = expr.node {
- return;
+ pub fn visit_ast_const_integer(&mut self, expr: &Expr) {
+ match expr.node {
+ // Find the node which will be used after lowering.
+ ExprKind::Paren(ref inner) => return self.visit_ast_const_integer(inner),
+ ExprKind::Mac(..) => return self.visit_macro_invoc(expr.id, true),
+ // FIXME(eddyb) Closures should have separate
+ // function definition IDs and expression IDs.
+ ExprKind::Closure(..) => return,
+ _ => {}
}
self.create_def(expr.id, DefPathData::Initializer);
}
- fn visit_hir_const_integer(&mut self, expr: &'ast hir::Expr) {
+ fn visit_hir_const_integer(&mut self, expr: &hir::Expr) {
// FIXME(eddyb) Closures should have separate
// function definition IDs and expression IDs.
if let hir::ExprClosure(..) = expr.node {
self.create_def(expr.id, DefPathData::Initializer);
}
+
+ fn visit_macro_invoc(&mut self, id: NodeId, const_integer: bool) {
+ if let Some(ref mut visit) = self.visit_macro_invoc {
+ visit(MacroInvocationData {
+ mark: Mark::from_placeholder_id(id),
+ const_integer: const_integer,
+ def_index: self.parent_def.unwrap(),
+ })
+ }
+ }
}
-impl<'ast> visit::Visitor for DefCollector<'ast> {
+impl<'a> visit::Visitor for DefCollector<'a> {
fn visit_item(&mut self, i: &Item) {
debug!("visit_item: {:?}", i);
ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) | ItemKind::Trait(..) |
ItemKind::ExternCrate(..) | ItemKind::ForeignMod(..) | ItemKind::Ty(..) =>
DefPathData::TypeNs(i.ident.name.as_str()),
+ ItemKind::Mod(..) if i.ident == keywords::Invalid.ident() => {
+ return visit::walk_item(self, i);
+ }
ItemKind::Mod(..) => DefPathData::Module(i.ident.name.as_str()),
ItemKind::Static(..) | ItemKind::Const(..) | ItemKind::Fn(..) =>
DefPathData::ValueNs(i.ident.name.as_str()),
- ItemKind::Mac(..) => DefPathData::MacroDef(i.ident.name.as_str()),
+ ItemKind::Mac(..) if i.id == DUMMY_NODE_ID => return, // Scope placeholder
+ ItemKind::Mac(..) => return self.visit_macro_invoc(i.id, false),
ItemKind::Use(..) => DefPathData::Misc,
};
let def = self.create_def(i.id, def_data);
TraitItemKind::Method(..) | TraitItemKind::Const(..) =>
DefPathData::ValueNs(ti.ident.name.as_str()),
TraitItemKind::Type(..) => DefPathData::TypeNs(ti.ident.name.as_str()),
- TraitItemKind::Macro(..) => DefPathData::MacroDef(ti.ident.name.as_str()),
+ TraitItemKind::Macro(..) => return self.visit_macro_invoc(ti.id, false),
};
let def = self.create_def(ti.id, def_data);
ImplItemKind::Method(..) | ImplItemKind::Const(..) =>
DefPathData::ValueNs(ii.ident.name.as_str()),
ImplItemKind::Type(..) => DefPathData::TypeNs(ii.ident.name.as_str()),
- ImplItemKind::Macro(..) => DefPathData::MacroDef(ii.ident.name.as_str()),
+ ImplItemKind::Macro(..) => return self.visit_macro_invoc(ii.id, false),
};
let def = self.create_def(ii.id, def_data);
fn visit_pat(&mut self, pat: &Pat) {
let parent_def = self.parent_def;
- if let PatKind::Ident(_, id, _) = pat.node {
- let def = self.create_def(pat.id, DefPathData::Binding(id.node.name.as_str()));
- self.parent_def = Some(def);
+ match pat.node {
+ PatKind::Mac(..) => return self.visit_macro_invoc(pat.id, false),
+ PatKind::Ident(_, id, _) => {
+ let def = self.create_def(pat.id, DefPathData::Binding(id.node.name.as_str()));
+ self.parent_def = Some(def);
+ }
+ _ => {}
}
visit::walk_pat(self, pat);
fn visit_expr(&mut self, expr: &Expr) {
let parent_def = self.parent_def;
- if let ExprKind::Repeat(_, ref count) = expr.node {
- self.visit_ast_const_integer(count);
- }
-
- if let ExprKind::Closure(..) = expr.node {
- let def = self.create_def(expr.id, DefPathData::ClosureExpr);
- self.parent_def = Some(def);
+ match expr.node {
+ ExprKind::Mac(..) => return self.visit_macro_invoc(expr.id, false),
+ ExprKind::Repeat(_, ref count) => self.visit_ast_const_integer(count),
+ ExprKind::Closure(..) => {
+ let def = self.create_def(expr.id, DefPathData::ClosureExpr);
+ self.parent_def = Some(def);
+ }
+ _ => {}
}
visit::walk_expr(self, expr);
}
fn visit_ty(&mut self, ty: &Ty) {
- if let TyKind::FixedLengthVec(_, ref length) = ty.node {
- self.visit_ast_const_integer(length);
- }
- if let TyKind::ImplTrait(..) = ty.node {
- self.create_def(ty.id, DefPathData::ImplTrait);
+ match ty.node {
+ TyKind::Mac(..) => return self.visit_macro_invoc(ty.id, false),
+ TyKind::Array(_, ref length) => self.visit_ast_const_integer(length),
+ TyKind::ImplTrait(..) => {
+ self.create_def(ty.id, DefPathData::ImplTrait);
+ }
+ _ => {}
}
visit::walk_ty(self, ty);
}
fn visit_macro_def(&mut self, macro_def: &MacroDef) {
self.create_def(macro_def.id, DefPathData::MacroDef(macro_def.ident.name.as_str()));
}
+
+ fn visit_stmt(&mut self, stmt: &Stmt) {
+ match stmt.node {
+ StmtKind::Mac(..) => self.visit_macro_invoc(stmt.id, false),
+ _ => visit::walk_stmt(self, stmt),
+ }
+ }
}
// We walk the HIR rather than the AST when reading items from metadata.
}
fn visit_ty(&mut self, ty: &'ast hir::Ty) {
- if let hir::TyFixedLengthVec(_, ref length) = ty.node {
+ if let hir::TyArray(_, ref length) = ty.node {
self.visit_hir_const_integer(length);
}
if let hir::TyImplTrait(..) = ty.node {
// except according to those terms.
use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE};
-use hir::map::def_collector::DefCollector;
use rustc_data_structures::fnv::FnvHashMap;
use std::fmt::Write;
-use std::hash::{Hash, Hasher, SipHasher};
-use syntax::{ast, visit};
+use std::hash::{Hash, Hasher};
+use std::collections::hash_map::DefaultHasher;
+use syntax::ast;
use syntax::parse::token::{self, InternedString};
use ty::TyCtxt;
use util::nodemap::NodeMap;
}
pub fn deterministic_hash(&self, tcx: TyCtxt) -> u64 {
- let mut state = SipHasher::new();
+ let mut state = DefaultHasher::new();
self.deterministic_hash_to(tcx, &mut state);
state.finish()
}
}
}
- pub fn collect(&mut self, krate: &ast::Crate) {
- let mut def_collector = DefCollector::new(self);
- def_collector.collect_root();
- visit::walk_crate(&mut def_collector, krate);
- }
-
/// Get the number of definitions.
pub fn len(&self) -> usize {
self.data.len()
pub use self::Node::*;
use self::MapEntry::*;
use self::collector::NodeCollector;
-use self::def_collector::DefCollector;
+pub use self::def_collector::{DefCollector, MacroInvocationData};
pub use self::definitions::{Definitions, DefKey, DefPath, DefPathData,
DisambiguatedDefPathData, InlinedRootPath};
EntryVariant(p, _) |
EntryExpr(p, _) |
EntryStmt(p, _) |
- EntryTy(p, _) |
+ EntryTy(p, _) |
EntryLocal(p, _) |
EntryPat(p, _) |
EntryBlock(p, _) |
PatKind::Box(ref s) | PatKind::Ref(ref s, _) => {
s.walk_(it)
}
- PatKind::Vec(ref before, ref slice, ref after) => {
+ PatKind::Slice(ref before, ref slice, ref after) => {
before.iter().all(|p| p.walk_(it)) &&
slice.iter().all(|p| p.walk_(it)) &&
after.iter().all(|p| p.walk_(it))
/// A range pattern, e.g. `1...2`
Range(P<Expr>, P<Expr>),
/// `[a, b, ..i, y, z]` is represented as:
- /// `PatKind::Vec(box [a, b], Some(i), box [y, z])`
- Vec(HirVec<P<Pat>>, Option<P<Pat>>, HirVec<P<Pat>>),
+ /// `PatKind::Slice(box [a, b], Some(i), box [y, z])`
+ Slice(HirVec<P<Pat>>, Option<P<Pat>>, HirVec<P<Pat>>),
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)]
/// A `box x` expression.
ExprBox(P<Expr>),
/// An array (`[a, b, c, d]`)
- ExprVec(HirVec<P<Expr>>),
+ ExprArray(HirVec<P<Expr>>),
/// A function call
///
/// The first field resolves to the function itself (usually an `ExprPath`),
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
/// The different kinds of types recognized by the compiler
pub enum Ty_ {
- /// A variable length array (`[T]`)
- TyVec(P<Ty>),
+ /// A variable length slice (`[T]`)
+ TySlice(P<Ty>),
/// A fixed length array (`[T; n]`)
- TyFixedLengthVec(P<Ty>, P<Expr>),
+ TyArray(P<Ty>, P<Expr>),
/// A raw pointer (`*const T` or `*mut T`)
TyPtr(MutTy),
/// A reference (`&'a T` or `&'a mut T`)
PatKind::Path(..) |
PatKind::Struct(..) => {
match dm.get(&pat.id).map(|d| d.full_def()) {
- Some(Def::Variant(..)) => true,
+ Some(Def::Variant(..)) | Some(Def::VariantCtor(..)) => true,
_ => false
}
}
- PatKind::Vec(..) => true,
+ PatKind::Slice(..) => true,
_ => false
}
}
PatKind::TupleStruct(..) |
PatKind::Path(..) |
PatKind::Struct(..) => {
- match dm.get(&p.id) {
- Some(&PathResolution { base_def: Def::Variant(id), .. }) => {
- variants.push(id);
- }
+ match dm.get(&p.id).map(|d| d.full_def()) {
+ Some(Def::Variant(id)) |
+ Some(Def::VariantCtor(id, ..)) => variants.push(id),
_ => ()
}
}
self.maybe_print_comment(ty.span.lo)?;
self.ibox(0)?;
match ty.node {
- hir::TyVec(ref ty) => {
+ hir::TySlice(ref ty) => {
word(&mut self.s, "[")?;
self.print_type(&ty)?;
word(&mut self.s, "]")?;
hir::TyImplTrait(ref bounds) => {
self.print_bounds("impl ", &bounds[..])?;
}
- hir::TyFixedLengthVec(ref ty, ref v) => {
+ hir::TyArray(ref ty, ref v) => {
word(&mut self.s, "[")?;
self.print_type(&ty)?;
word(&mut self.s, "; ")?;
self.word_space("box")?;
self.print_expr(expr)?;
}
- hir::ExprVec(ref exprs) => {
+ hir::ExprArray(ref exprs) => {
self.print_expr_vec(&exprs[..])?;
}
hir::ExprRepeat(ref element, ref count) => {
word(&mut self.s, "...")?;
self.print_expr(&end)?;
}
- PatKind::Vec(ref before, ref slice, ref after) => {
+ PatKind::Slice(ref before, ref slice, ref after) => {
word(&mut self.s, "[")?;
self.commasep(Inconsistent, &before[..], |s, p| s.print_pat(&p))?;
if let Some(ref p) = *slice {
hir::TyPtr(ref mut_ty) => {
ty_queue.push(&mut_ty.ty);
}
- hir::TyVec(ref ty) |
- hir::TyFixedLengthVec(ref ty, _) => {
+ hir::TySlice(ref ty) |
+ hir::TyArray(ref ty, _) => {
ty_queue.push(&ty);
}
hir::TyTup(ref tys) => ty_queue.extend(tys.iter().map(|ty| &**ty)),
ty: build_to(mut_ty.ty, to),
})
}
- hir::TyVec(ty) => hir::TyVec(build_to(ty, to)),
- hir::TyFixedLengthVec(ty, e) => {
- hir::TyFixedLengthVec(build_to(ty, to), e)
+ hir::TySlice(ty) => hir::TySlice(build_to(ty, to)),
+ hir::TyArray(ty, e) => {
+ hir::TyArray(build_to(ty, to), e)
}
hir::TyTup(tys) => {
hir::TyTup(tys.into_iter().map(|ty| build_to(ty, to)).collect())
pub fn plug_leaks<T>(&self,
skol_map: SkolemizationMap<'tcx>,
snapshot: &CombinedSnapshot,
- value: &T) -> T
+ value: T) -> T
where T : TypeFoldable<'tcx>
{
debug!("plug_leaks(skol_map={:?}, value={:?})",
skol_map,
value);
+ if skol_map.is_empty() {
+ return value;
+ }
+
// Compute a mapping from the "taint set" of each skolemized
// region back to the `ty::BoundRegion` that it originally
// represented. Because `leak_check` passed, we know that
// Remove any instantiated type variables from `value`; those can hide
// references to regions from the `fold_regions` code below.
- let value = self.resolve_type_vars_if_possible(value);
+ let value = self.resolve_type_vars_if_possible(&value);
// Map any skolemization byproducts back to a late-bound
// region. Put that late-bound region at whatever the outermost
}
});
- debug!("plug_leaks: result={:?}",
- result);
-
self.pop_skolemized(skol_map, snapshot);
debug!("plug_leaks: result={:?}", result);
use mir::repr::Mir;
use mir::mir_map::MirMap;
use session::Session;
-use session::config::PanicStrategy;
use session::search_paths::PathKind;
use util::nodemap::{NodeSet, DefIdMap};
use std::path::PathBuf;
use rustc_back::target::Target;
use hir;
use hir::intravisit::Visitor;
+use rustc_back::PanicStrategy;
pub use self::NativeLibraryKind::{NativeStatic, NativeFramework, NativeUnknown};
-> Option<DefIndex>;
fn def_key(&self, def: DefId) -> hir_map::DefKey;
fn relative_def_path(&self, def: DefId) -> Option<hir_map::DefPath>;
- fn variant_kind(&self, def_id: DefId) -> Option<ty::VariantKind>;
- fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option<DefId>;
fn struct_field_names(&self, def: DefId) -> Vec<ast::Name>;
fn item_children(&self, did: DefId) -> Vec<def::Export>;
fn relative_def_path(&self, def: DefId) -> Option<hir_map::DefPath> {
bug!("relative_def_path")
}
- fn variant_kind(&self, def_id: DefId) -> Option<ty::VariantKind> { bug!("variant_kind") }
- fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option<DefId>
- { bug!("struct_ctor_def_id") }
fn struct_field_names(&self, def: DefId) -> Vec<ast::Name> { bug!("struct_field_names") }
fn item_children(&self, did: DefId) -> Vec<def::Export> { bug!("item_children") }
fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") }
}
-pub enum LoadedMacro {
+pub struct LoadedMacro {
+ pub import_site: Span,
+ pub kind: LoadedMacroKind,
+}
+
+pub enum LoadedMacroKind {
Def(ast::MacroDef),
CustomDerive(String, Rc<MultiItemModifier>),
}
self.check_def_id(def.def_id());
}
_ if self.ignore_non_const_paths => (),
- Def::PrimTy(_) => (),
- Def::SelfTy(..) => (),
- Def::Variant(variant_id) => {
+ Def::PrimTy(..) | Def::SelfTy(..) => (),
+ Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => {
if let Some(enum_id) = self.tcx.parent_def_id(variant_id) {
self.check_def_id(enum_id);
}
use hir::def_id::CrateNum;
use session;
-use session::config::{self, PanicStrategy};
+use session::config;
use middle::cstore::LinkagePreference::{self, RequireStatic, RequireDynamic};
use util::nodemap::FnvHashMap;
+use rustc_back::PanicStrategy;
/// A list of dependencies for a certain crate type.
///
// only one, but we perform validation here that all the panic strategy
// compilation modes for the whole DAG are valid.
if let Some((cnum, found_strategy)) = panic_runtime {
- let desired_strategy = sess.opts.cg.panic.clone();
+ let desired_strategy = sess.panic_strategy();
// First up, validate that our selected panic runtime is indeed exactly
// our same strategy.
}
}
- hir::ExprVec(ref exprs) => {
+ hir::ExprArray(ref exprs) => {
self.consume_exprs(exprs);
}
// the leaves of the pattern tree structure.
return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| {
match tcx.expect_def_or_none(pat.id) {
- Some(Def::Variant(variant_did)) => {
+ Some(Def::Variant(variant_did)) |
+ Some(Def::VariantCtor(variant_did, ..)) => {
let enum_did = tcx.parent_def_id(variant_did).unwrap();
let downcast_cmt = if tcx.lookup_adt_def(enum_did).is_univariant() {
cmt_pat
debug!("variant downcast_cmt={:?} pat={:?}", downcast_cmt, pat);
delegate.matched_pat(pat, downcast_cmt, match_mode);
}
- Some(Def::Struct(..)) | Some(Def::Union(..)) |
+ Some(Def::Struct(..)) | Some(Def::StructCtor(..)) | Some(Def::Union(..)) |
Some(Def::TyAlias(..)) | Some(Def::AssociatedTy(..)) => {
debug!("struct cmt_pat={:?} pat={:?}", cmt_pat, pat);
delegate.matched_pat(pat, cmt_pat, match_mode);
}
- _ => {}
+ None | Some(Def::Local(..)) |
+ Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {}
+ def => bug!("unexpected definition: {:?}", def)
}
}));
}
}
};
- span_err!(self.infcx.tcx.sess, span, E0512,
+ struct_span_err!(self.infcx.tcx.sess, span, E0512,
"transmute called with differently sized types: \
{} ({}) to {} ({})",
from, skeleton_string(from, sk_from),
- to, skeleton_string(to, sk_to));
+ to, skeleton_string(to, sk_to))
+ .span_label(span,
+ &format!("transmuting between {} and {}",
+ skeleton_string(from, sk_from),
+ skeleton_string(to, sk_to)))
+ .emit();
}
}
// otherwise, live nodes are not required:
hir::ExprIndex(..) | hir::ExprField(..) | hir::ExprTupField(..) |
- hir::ExprVec(..) | hir::ExprCall(..) | hir::ExprMethodCall(..) |
+ hir::ExprArray(..) | hir::ExprCall(..) | hir::ExprMethodCall(..) |
hir::ExprTup(..) | hir::ExprBinary(..) | hir::ExprAddrOf(..) |
hir::ExprCast(..) | hir::ExprUnary(..) | hir::ExprBreak(_) |
hir::ExprAgain(_) | hir::ExprLit(_) | hir::ExprRet(..) |
// Uninteresting cases: just propagate in rev exec order
- hir::ExprVec(ref exprs) => {
+ hir::ExprArray(ref exprs) => {
self.propagate_through_exprs(&exprs[..], succ)
}
hir::ExprCall(..) | hir::ExprMethodCall(..) | hir::ExprIf(..) |
hir::ExprMatch(..) | hir::ExprWhile(..) | hir::ExprLoop(..) |
hir::ExprIndex(..) | hir::ExprField(..) | hir::ExprTupField(..) |
- hir::ExprVec(..) | hir::ExprTup(..) | hir::ExprBinary(..) |
+ hir::ExprArray(..) | hir::ExprTup(..) | hir::ExprBinary(..) |
hir::ExprCast(..) | hir::ExprUnary(..) | hir::ExprRet(..) |
hir::ExprBreak(..) | hir::ExprAgain(..) | hir::ExprLit(_) |
hir::ExprBlock(..) | hir::ExprAddrOf(..) |
use hir::map as ast_map;
use infer::InferCtxt;
use middle::const_qualif::ConstQualif;
-use hir::def::Def;
+use hir::def::{Def, CtorKind};
use ty::adjustment;
use ty::{self, Ty, TyCtxt};
hir::ExprClosure(..) | hir::ExprRet(..) |
hir::ExprUnary(..) |
hir::ExprMethodCall(..) | hir::ExprCast(..) |
- hir::ExprVec(..) | hir::ExprTup(..) | hir::ExprIf(..) |
+ hir::ExprArray(..) | hir::ExprTup(..) | hir::ExprIf(..) |
hir::ExprBinary(..) | hir::ExprWhile(..) |
hir::ExprBlock(..) | hir::ExprLoop(..) | hir::ExprMatch(..) |
hir::ExprLit(..) | hir::ExprBreak(..) |
id, expr_ty, def);
match def {
- Def::Struct(..) | Def::Union(..) | Def::Variant(..) | Def::Const(..) |
+ Def::StructCtor(..) | Def::VariantCtor(..) | Def::Const(..) |
Def::AssociatedConst(..) | Def::Fn(..) | Def::Method(..) => {
Ok(self.cat_rvalue_node(id, span, expr_ty))
}
- Def::Mod(_) |
- Def::Trait(_) | Def::Enum(..) | Def::TyAlias(..) | Def::PrimTy(_) |
- Def::TyParam(..) |
- Def::Label(_) | Def::SelfTy(..) |
- Def::AssociatedTy(..) => {
- span_bug!(span, "Unexpected definition in \
- memory categorization: {:?}", def);
- }
-
Def::Static(_, mutbl) => {
Ok(Rc::new(cmt_ {
id:id,
}))
}
- Def::Err => bug!("Def::Err in memory categorization")
+ def => span_bug!(span, "unexpected definition in memory categorization: {:?}", def)
}
}
// alone) because PatKind::Struct can also refer to variants.
let cmt = match self.tcx().expect_def_or_none(pat.id) {
Some(Def::Err) => return Err(()),
- Some(Def::Variant(variant_did)) => {
+ Some(Def::Variant(variant_did)) |
+ Some(Def::VariantCtor(variant_did, ..)) => {
// univariant enums do not need downcasts
let enum_did = self.tcx().parent_def_id(variant_did).unwrap();
if !self.tcx().lookup_adt_def(enum_did).is_univariant() {
match pat.node {
PatKind::TupleStruct(_, ref subpats, ddpos) => {
let expected_len = match self.tcx().expect_def(pat.id) {
- Def::Variant(def_id) => {
+ Def::VariantCtor(def_id, CtorKind::Fn) => {
let enum_def = self.tcx().parent_def_id(def_id).unwrap();
self.tcx().lookup_adt_def(enum_def).variant_with_id(def_id).fields.len()
}
- Def::Struct(..) => {
+ Def::StructCtor(_, CtorKind::Fn) => {
match self.pat_ty(&pat)?.sty {
ty::TyAdt(adt_def, _) => {
adt_def.struct_variant().fields.len()
self.cat_pattern_(subcmt, &subpat, op)?;
}
- PatKind::Vec(ref before, ref slice, ref after) => {
+ PatKind::Slice(ref before, ref slice, ref after) => {
let context = InteriorOffsetKind::Pattern;
let elt_cmt = self.cat_index(pat, cmt, context)?;
for before_pat in before {
field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat))
}
- PatKind::Vec(ref pats1, ref pats2, ref pats3) => {
+ PatKind::Slice(ref pats1, ref pats2, ref pats3) => {
pats1.iter().any(|p| is_binding_pat(&p)) ||
pats2.iter().any(|p| is_binding_pat(&p)) ||
pats3.iter().any(|p| is_binding_pat(&p))
visitor, &field.expr, blk_id);
}
}
- hir::ExprVec(ref subexprs) |
+ hir::ExprArray(ref subexprs) |
hir::ExprTup(ref subexprs) => {
for subexpr in subexprs {
record_rvalue_scope_if_borrow_expr(
&feature, &r),
None => format!("use of unstable library feature '{}'", &feature)
};
- emit_feature_err(&self.tcx.sess.parse_sess.span_diagnostic,
- &feature, span, GateIssue::Library(Some(issue)), &msg);
+ emit_feature_err(&self.tcx.sess.parse_sess, &feature, span,
+ GateIssue::Library(Some(issue)), &msg);
}
}
Some(&Stability { ref level, ref feature, .. }) => {
&Option<DeprecationEntry>)) {
// Paths in import prefixes may have no resolution.
match tcx.expect_def_or_none(id) {
- Some(Def::PrimTy(..)) => {}
- Some(Def::SelfTy(..)) => {}
- Some(def) => {
- maybe_do_stability_check(tcx, def.def_id(), path.span, cb);
- }
- None => {}
+ None | Some(Def::PrimTy(..)) | Some(Def::SelfTy(..)) => {}
+ Some(def) => maybe_do_stability_check(tcx, def.def_id(), path.span, cb)
}
}
cb: &mut FnMut(DefId, Span,
&Option<&Stability>,
&Option<DeprecationEntry>)) {
- match tcx.expect_def(item.node.id) {
- Def::PrimTy(..) => {}
- def => {
- maybe_do_stability_check(tcx, def.def_id(), item.span, cb);
- }
- }
+ maybe_do_stability_check(tcx, tcx.expect_def(item.node.id).def_id(), item.span, cb);
}
pub fn check_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pat: &hir::Pat,
//! Validity checking for weak lang items
-use session::config::{self, PanicStrategy};
+use session::config;
use session::Session;
use middle::lang_items;
+use rustc_back::PanicStrategy;
use syntax::ast;
use syntax::parse::token::InternedString;
use syntax_pos::Span;
// symbols. Other panic runtimes ensure that the relevant symbols are
// available to link things together, but they're never exercised.
let mut whitelisted = HashSet::new();
- if sess.opts.cg.panic != PanicStrategy::Unwind {
+ if sess.panic_strategy() != PanicStrategy::Unwind {
whitelisted.insert(lang_items::EhPersonalityLangItem);
whitelisted.insert(lang_items::EhUnwindResumeLangItem);
}
use rustc_data_structures::control_flow_graph::dominators::{Dominators, dominators};
use rustc_data_structures::control_flow_graph::{GraphPredecessors, GraphSuccessors};
use rustc_data_structures::control_flow_graph::ControlFlowGraph;
+use hir::def::CtorKind;
use hir::def_id::DefId;
use ty::subst::Substs;
use ty::{self, AdtDef, ClosureSubsts, Region, Ty};
/// Rvalues promoted from this function, such as borrows of constants.
/// Each of them is the Mir of a constant with the fn's type parameters
- /// in scope, but no vars or args and a separate set of temps.
+ /// in scope, but a separate set of locals.
pub promoted: IndexVec<Promoted, Mir<'tcx>>,
/// Return type of the function.
pub return_ty: Ty<'tcx>,
- /// Variables: these are stack slots corresponding to user variables. They may be
- /// assigned many times.
- pub var_decls: IndexVec<Var, VarDecl<'tcx>>,
-
- /// Args: these are stack slots corresponding to the input arguments.
- pub arg_decls: IndexVec<Arg, ArgDecl<'tcx>>,
+ /// Declarations of locals.
+ ///
+ /// The first local is the return value pointer, followed by `arg_count`
+ /// locals for the function arguments, followed by any user-declared
+ /// variables and temporaries.
+ pub local_decls: IndexVec<Local, LocalDecl<'tcx>>,
- /// Temp declarations: stack slots that for temporaries created by
- /// the compiler. These are assigned once, but they are not SSA
- /// values in that it is possible to borrow them and mutate them
- /// through the resulting reference.
- pub temp_decls: IndexVec<Temp, TempDecl<'tcx>>,
+ /// Number of arguments this function takes.
+ ///
+ /// Starting at local 1, `arg_count` locals will be provided by the caller
+ /// and can be assumed to be initialized.
+ ///
+ /// If this MIR was built for a constant, this will be 0.
+ pub arg_count: usize,
/// Names and capture modes of all the closure upvars, assuming
/// the first argument is either the closure or a reference to it.
pub upvar_decls: Vec<UpvarDecl>,
+ /// Mark an argument local (which must be a tuple) as getting passed as
+ /// its individual components at the LLVM level.
+ ///
+ /// This is used for the "rust-call" ABI.
+ pub spread_arg: Option<Local>,
+
/// A span representing this MIR, for error reporting
pub span: Span,
visibility_scopes: IndexVec<VisibilityScope, VisibilityScopeData>,
promoted: IndexVec<Promoted, Mir<'tcx>>,
return_ty: Ty<'tcx>,
- var_decls: IndexVec<Var, VarDecl<'tcx>>,
- arg_decls: IndexVec<Arg, ArgDecl<'tcx>>,
- temp_decls: IndexVec<Temp, TempDecl<'tcx>>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+ arg_count: usize,
upvar_decls: Vec<UpvarDecl>,
span: Span) -> Self
{
+ // We need `arg_count` locals, and one for the return pointer
+ assert!(local_decls.len() >= arg_count + 1,
+ "expected at least {} locals, got {}", arg_count + 1, local_decls.len());
+ assert_eq!(local_decls[RETURN_POINTER].ty, return_ty);
+
Mir {
basic_blocks: basic_blocks,
visibility_scopes: visibility_scopes,
promoted: promoted,
return_ty: return_ty,
- var_decls: var_decls,
- arg_decls: arg_decls,
- temp_decls: temp_decls,
+ local_decls: local_decls,
+ arg_count: arg_count,
upvar_decls: upvar_decls,
+ spread_arg: None,
span: span,
cache: Cache::new()
}
dominators(self)
}
- /// Maps locals (Arg's, Var's, Temp's and ReturnPointer, in that order)
- /// to their index in the whole list of locals. This is useful if you
- /// want to treat all locals the same instead of repeating yourself.
- pub fn local_index(&self, lvalue: &Lvalue<'tcx>) -> Option<Local> {
- let idx = match *lvalue {
- Lvalue::Arg(arg) => arg.index(),
- Lvalue::Var(var) => {
- self.arg_decls.len() +
- var.index()
- }
- Lvalue::Temp(temp) => {
- self.arg_decls.len() +
- self.var_decls.len() +
- temp.index()
+ #[inline]
+ pub fn local_kind(&self, local: Local) -> LocalKind {
+ let index = local.0 as usize;
+ if index == 0 {
+ debug_assert!(self.local_decls[local].mutability == Mutability::Mut,
+ "return pointer should be mutable");
+
+ LocalKind::ReturnPointer
+ } else if index < self.arg_count + 1 {
+ LocalKind::Arg
+ } else if self.local_decls[local].name.is_some() {
+ LocalKind::Var
+ } else {
+ debug_assert!(self.local_decls[local].mutability == Mutability::Mut,
+ "temp should be mutable");
+
+ LocalKind::Temp
+ }
+ }
+
+ /// Returns an iterator over all temporaries.
+ #[inline]
+ pub fn temps_iter<'a>(&'a self) -> impl Iterator<Item=Local> + 'a {
+ (self.arg_count+1..self.local_decls.len()).filter_map(move |index| {
+ let local = Local::new(index);
+ if self.local_decls[local].source_info.is_none() {
+ Some(local)
+ } else {
+ None
}
- Lvalue::ReturnPointer => {
- self.arg_decls.len() +
- self.var_decls.len() +
- self.temp_decls.len()
+ })
+ }
+
+ /// Returns an iterator over all user-declared locals.
+ #[inline]
+ pub fn vars_iter<'a>(&'a self) -> impl Iterator<Item=Local> + 'a {
+ (self.arg_count+1..self.local_decls.len()).filter_map(move |index| {
+ let local = Local::new(index);
+ if self.local_decls[local].source_info.is_none() {
+ None
+ } else {
+ Some(local)
}
- Lvalue::Static(_) |
- Lvalue::Projection(_) => return None
- };
- Some(Local::new(idx))
+ })
}
- /// Counts the number of locals, such that local_index
- /// will always return an index smaller than this count.
- pub fn count_locals(&self) -> usize {
- self.arg_decls.len() +
- self.var_decls.len() +
- self.temp_decls.len() + 1
+ /// Returns an iterator over all function arguments.
+ #[inline]
+ pub fn args_iter(&self) -> impl Iterator<Item=Local> {
+ let arg_count = self.arg_count;
+ (1..arg_count+1).map(Local::new)
}
- pub fn format_local(&self, local: Local) -> String {
- let mut index = local.index();
- index = match index.checked_sub(self.arg_decls.len()) {
- None => return format!("{:?}", Arg::new(index)),
- Some(index) => index,
- };
- index = match index.checked_sub(self.var_decls.len()) {
- None => return format!("{:?}", Var::new(index)),
- Some(index) => index,
- };
- index = match index.checked_sub(self.temp_decls.len()) {
- None => return format!("{:?}", Temp::new(index)),
- Some(index) => index,
- };
- debug_assert!(index == 0);
- return "ReturnPointer".to_string()
+ /// Returns an iterator over all user-defined variables and compiler-generated temporaries (all
+ /// locals that are neither arguments nor the return pointer).
+ #[inline]
+ pub fn vars_and_temps_iter(&self) -> impl Iterator<Item=Local> {
+ let arg_count = self.arg_count;
+ let local_count = self.local_decls.len();
+ (arg_count+1..local_count).map(Local::new)
}
/// Changes a statement to a nop. This is both faster than deleting instructions and avoids
///////////////////////////////////////////////////////////////////////////
// Variables and temps
-/// A "variable" is a binding declared by the user as part of the fn
-/// decl, a let, etc.
-#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
-pub struct VarDecl<'tcx> {
- /// `let mut x` vs `let x`
- pub mutability: Mutability,
-
- /// name that user gave the variable; not that, internally,
- /// mir references variables by index
- pub name: Name,
+newtype_index!(Local, "_");
- /// type inferred for this variable (`let x: ty = ...`)
- pub ty: Ty<'tcx>,
+pub const RETURN_POINTER: Local = Local(0);
- /// source information (span, scope, etc.) for the declaration
- pub source_info: SourceInfo,
-}
-
-/// A "temp" is a temporary that we place on the stack. They are
-/// anonymous, always mutable, and have only a type.
-#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
-pub struct TempDecl<'tcx> {
- pub ty: Ty<'tcx>,
+/// Classifies locals into categories. See `Mir::local_kind`.
+#[derive(PartialEq, Eq, Debug)]
+pub enum LocalKind {
+ /// User-declared variable binding
+ Var,
+ /// Compiler-introduced temporary
+ Temp,
+ /// Function argument
+ Arg,
+ /// Location of function's return value
+ ReturnPointer,
}
-/// A "arg" is one of the function's formal arguments. These are
-/// anonymous and distinct from the bindings that the user declares.
-///
-/// For example, in this function:
-///
-/// ```
-/// fn foo((x, y): (i32, u32)) { ... }
-/// ```
+/// A MIR local.
///
-/// there is only one argument, of type `(i32, u32)`, but two bindings
-/// (`x` and `y`).
+/// This can be a binding declared by the user, a temporary inserted by the compiler, a function
+/// argument, or the return pointer.
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
-pub struct ArgDecl<'tcx> {
+pub struct LocalDecl<'tcx> {
+ /// `let mut x` vs `let x`.
+ ///
+ /// Temporaries and the return pointer are always mutable.
+ pub mutability: Mutability,
+
+ /// Type of this local.
pub ty: Ty<'tcx>,
- /// If true, this argument is a tuple after monomorphization,
- /// and has to be collected from multiple actual arguments.
- pub spread: bool,
+ /// Name of the local, used in debuginfo and pretty-printing.
+ ///
+ /// Note that function arguments can also have this set to `Some(_)`
+ /// to generate better debuginfo.
+ pub name: Option<Name>,
+
+ /// For user-declared variables, stores their source information.
+ ///
+ /// For temporaries, this is `None`.
+ ///
+ /// This is the primary way to differentiate between user-declared
+ /// variables and compiler-generated temporaries.
+ pub source_info: Option<SourceInfo>,
+}
+
+impl<'tcx> LocalDecl<'tcx> {
+ /// Create a new `LocalDecl` for a temporary.
+ #[inline]
+ pub fn new_temp(ty: Ty<'tcx>) -> Self {
+ LocalDecl {
+ mutability: Mutability::Mut,
+ ty: ty,
+ name: None,
+ source_info: None,
+ }
+ }
- /// Either keywords::Invalid or the name of a single-binding
- /// pattern associated with this argument. Useful for debuginfo.
- pub debug_name: Name
+ /// Builds a `LocalDecl` for the return pointer.
+ ///
+ /// This must be inserted into the `local_decls` list as the first local.
+ #[inline]
+ pub fn new_return_pointer(return_ty: Ty) -> LocalDecl {
+ LocalDecl {
+ mutability: Mutability::Mut,
+ ty: return_ty,
+ source_info: None,
+ name: None, // FIXME maybe we do want some name here?
+ }
+ }
}
/// A closure capture, with its name and mode.
/// continue. Emitted by build::scope::diverge_cleanup.
Resume,
- /// Indicates a normal return. The ReturnPointer lvalue should
+ /// Indicates a normal return. The return pointer lvalue should
/// have been filled in by now. This should occur at most once.
Return,
///////////////////////////////////////////////////////////////////////////
// Lvalues
-newtype_index!(Var, "var");
-newtype_index!(Temp, "tmp");
-newtype_index!(Arg, "arg");
-newtype_index!(Local, "local");
-
/// A path to a value; something that can be evaluated without
/// changing or disturbing program state.
#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
pub enum Lvalue<'tcx> {
- /// local variable declared by the user
- Var(Var),
-
- /// temporary introduced during lowering into MIR
- Temp(Temp),
-
- /// formal parameter of the function; note that these are NOT the
- /// bindings that the user declares, which are vars
- Arg(Arg),
+ /// local variable
+ Local(Local),
/// static or static mut variable
Static(DefId),
- /// the return pointer of the fn
- ReturnPointer,
-
/// projection out of an lvalue (access a field, deref a pointer, etc)
Projection(Box<LvalueProjection<'tcx>>),
}
elem: elem,
}))
}
-
- pub fn from_local(mir: &Mir<'tcx>, local: Local) -> Lvalue<'tcx> {
- let mut index = local.index();
- index = match index.checked_sub(mir.arg_decls.len()) {
- None => return Lvalue::Arg(Arg(index as u32)),
- Some(index) => index,
- };
- index = match index.checked_sub(mir.var_decls.len()) {
- None => return Lvalue::Var(Var(index as u32)),
- Some(index) => index,
- };
- index = match index.checked_sub(mir.temp_decls.len()) {
- None => return Lvalue::Temp(Temp(index as u32)),
- Some(index) => index,
- };
- debug_assert!(index == 0);
- Lvalue::ReturnPointer
- }
}
impl<'tcx> Debug for Lvalue<'tcx> {
use self::Lvalue::*;
match *self {
- Var(id) => write!(fmt, "{:?}", id),
- Arg(id) => write!(fmt, "{:?}", id),
- Temp(id) => write!(fmt, "{:?}", id),
+ Local(id) => write!(fmt, "{:?}", id),
Static(def_id) =>
write!(fmt, "{}", ty::tls::with(|tcx| tcx.item_path_str(def_id))),
- ReturnPointer =>
- write!(fmt, "return"),
Projection(ref data) =>
match data.elem {
ProjectionElem::Downcast(ref adt_def, index) =>
#[derive(Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
pub enum AggregateKind<'tcx> {
- Vec,
+ Array,
Tuple,
/// The second field is variant number (discriminant), it's equal to 0
/// for struct and union expressions. The fourth field is active field
}
Aggregate(ref kind, ref lvs) => {
- use self::AggregateKind::*;
-
fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result {
let mut tuple_fmt = fmt.debug_tuple("");
for lv in lvs {
}
match *kind {
- Vec => write!(fmt, "{:?}", lvs),
+ AggregateKind::Array => write!(fmt, "{:?}", lvs),
- Tuple => {
+ AggregateKind::Tuple => {
match lvs.len() {
0 => write!(fmt, "()"),
1 => write!(fmt, "({:?},)", lvs[0]),
}
}
- Adt(adt_def, variant, substs, _) => {
+ AggregateKind::Adt(adt_def, variant, substs, _) => {
let variant_def = &adt_def.variants[variant];
ppaux::parameterized(fmt, substs, variant_def.did,
ppaux::Ns::Value, &[])?;
- match variant_def.kind {
- ty::VariantKind::Unit => Ok(()),
- ty::VariantKind::Tuple => fmt_tuple(fmt, lvs),
- ty::VariantKind::Struct => {
+ match variant_def.ctor_kind {
+ CtorKind::Const => Ok(()),
+ CtorKind::Fn => fmt_tuple(fmt, lvs),
+ CtorKind::Fictive => {
let mut struct_fmt = fmt.debug_struct("");
for (field, lv) in variant_def.fields.iter().zip(lvs) {
struct_fmt.field(&field.name.as_str(), lv);
}
}
- Closure(def_id, _) => ty::tls::with(|tcx| {
+ AggregateKind::Closure(def_id, _) => ty::tls::with(|tcx| {
if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
let name = format!("[closure@{:?}]", tcx.map.span(node_id));
let mut struct_fmt = fmt.debug_struct(&name);
-> LvalueTy<'tcx>
{
match *elem {
- ProjectionElem::Deref =>
+ ProjectionElem::Deref => {
+ let ty = self.to_ty(tcx)
+ .builtin_deref(true, ty::LvaluePreference::NoPreference)
+ .unwrap_or_else(|| {
+ bug!("deref projection of non-dereferencable ty {:?}", self)
+ })
+ .ty;
LvalueTy::Ty {
- ty: self.to_ty(tcx).builtin_deref(true, ty::LvaluePreference::NoPreference)
- .unwrap()
- .ty
- },
+ ty: ty,
+ }
+ }
ProjectionElem::Index(_) | ProjectionElem::ConstantIndex { .. } =>
LvalueTy::Ty {
ty: self.to_ty(tcx).builtin_index().unwrap()
impl<'tcx> Lvalue<'tcx> {
pub fn ty<'a, 'gcx>(&self, mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> LvalueTy<'tcx> {
- match self {
- &Lvalue::Var(index) =>
- LvalueTy::Ty { ty: mir.var_decls[index].ty },
- &Lvalue::Temp(index) =>
- LvalueTy::Ty { ty: mir.temp_decls[index].ty },
- &Lvalue::Arg(index) =>
- LvalueTy::Ty { ty: mir.arg_decls[index].ty },
- &Lvalue::Static(def_id) =>
+ match *self {
+ Lvalue::Local(index) =>
+ LvalueTy::Ty { ty: mir.local_decls[index].ty },
+ Lvalue::Static(def_id) =>
LvalueTy::Ty { ty: tcx.lookup_item_type(def_id).ty },
- &Lvalue::ReturnPointer =>
- LvalueTy::Ty { ty: mir.return_ty },
- &Lvalue::Projection(ref proj) =>
+ Lvalue::Projection(ref proj) =>
proj.base.ty(mir, tcx).projection_ty(tcx, &proj.elem),
}
}
}
&Rvalue::Aggregate(ref ak, ref ops) => {
match *ak {
- AggregateKind::Vec => {
+ AggregateKind::Array => {
if let Some(operand) = ops.get(0) {
let ty = operand.ty(mir, tcx);
Some(tcx.mk_array(ty, ops.len()))
self.super_typed_const_val(val, location);
}
- fn visit_var_decl(&mut self,
- var_decl: & $($mutability)* VarDecl<'tcx>) {
- self.super_var_decl(var_decl);
- }
-
- fn visit_temp_decl(&mut self,
- temp_decl: & $($mutability)* TempDecl<'tcx>) {
- self.super_temp_decl(temp_decl);
- }
-
- fn visit_arg_decl(&mut self,
- arg_decl: & $($mutability)* ArgDecl<'tcx>) {
- self.super_arg_decl(arg_decl);
+ fn visit_local_decl(&mut self,
+ local_decl: & $($mutability)* LocalDecl<'tcx>) {
+ self.super_local_decl(local_decl);
}
fn visit_visibility_scope(&mut self,
self.visit_ty(&$($mutability)* mir.return_ty);
- for var_decl in &$($mutability)* mir.var_decls {
- self.visit_var_decl(var_decl);
- }
-
- for arg_decl in &$($mutability)* mir.arg_decls {
- self.visit_arg_decl(arg_decl);
- }
-
- for temp_decl in &$($mutability)* mir.temp_decls {
- self.visit_temp_decl(temp_decl);
+ for local_decl in &$($mutability)* mir.local_decls {
+ self.visit_local_decl(local_decl);
}
self.visit_span(&$($mutability)* mir.span);
Rvalue::Aggregate(ref $($mutability)* kind,
ref $($mutability)* operands) => {
match *kind {
- AggregateKind::Vec => {
+ AggregateKind::Array => {
}
AggregateKind::Tuple => {
}
context: LvalueContext<'tcx>,
location: Location) {
match *lvalue {
- Lvalue::Var(_) |
- Lvalue::Temp(_) |
- Lvalue::Arg(_) |
- Lvalue::ReturnPointer => {
+ Lvalue::Local(_) => {
}
Lvalue::Static(ref $($mutability)* def_id) => {
self.visit_def_id(def_id, location);
}
}
- fn super_var_decl(&mut self,
- var_decl: & $($mutability)* VarDecl<'tcx>) {
- let VarDecl {
+ fn super_local_decl(&mut self,
+ local_decl: & $($mutability)* LocalDecl<'tcx>) {
+ let LocalDecl {
mutability: _,
- name: _,
ref $($mutability)* ty,
+ name: _,
ref $($mutability)* source_info,
- } = *var_decl;
-
- self.visit_ty(ty);
- self.visit_source_info(source_info);
- }
-
- fn super_temp_decl(&mut self,
- temp_decl: & $($mutability)* TempDecl<'tcx>) {
- let TempDecl {
- ref $($mutability)* ty,
- } = *temp_decl;
-
- self.visit_ty(ty);
- }
-
- fn super_arg_decl(&mut self,
- arg_decl: & $($mutability)* ArgDecl<'tcx>) {
- let ArgDecl {
- ref $($mutability)* ty,
- spread: _,
- debug_name: _
- } = *arg_decl;
+ } = *local_decl;
self.visit_ty(ty);
+ if let Some(ref $($mutability)* info) = *source_info {
+ self.visit_source_info(info);
+ }
}
fn super_visibility_scope(&mut self,
use session::{early_error, early_warn, Session};
use session::search_paths::SearchPaths;
+use rustc_back::PanicStrategy;
use rustc_back::target::Target;
use lint;
use middle::cstore;
use std::collections::btree_map::Keys as BTreeMapKeysIter;
use std::collections::btree_map::Values as BTreeMapValuesIter;
-use std::env;
use std::fmt;
-use std::hash::{Hasher, SipHasher};
+use std::hash::Hasher;
+use std::collections::hash_map::DefaultHasher;
use std::iter::FromIterator;
use std::path::PathBuf;
$warn_text,
self.error_format)*]);
})*
- let mut hasher = SipHasher::new();
+ let mut hasher = DefaultHasher::new();
dep_tracking::stable_hash(sub_hashes,
&mut hasher,
self.error_format);
alt_std_name: Option<String> [TRACKED],
// Indicates how the compiler should treat unstable features
unstable_features: UnstableFeatures [TRACKED],
+
+ // Indicates whether this run of the compiler is actually rustdoc. This
+ // is currently just a hack and will be removed eventually, so please
+ // try to not rely on this too much.
+ actually_rustdoc: bool [TRACKED],
}
);
libs: Vec::new(),
unstable_features: UnstableFeatures::Disallow,
debug_assertions: true,
+ actually_rustdoc: false,
}
}
}
}
-#[derive(Clone, PartialEq, Hash, RustcEncodable, RustcDecodable)]
-pub enum PanicStrategy {
- Unwind,
- Abort,
-}
-
-impl PanicStrategy {
- pub fn desc(&self) -> &str {
- match *self {
- PanicStrategy::Unwind => "unwind",
- PanicStrategy::Abort => "abort",
- }
- }
-}
-
/// Declare a macro that will define all CodegenOptions/DebuggingOptions fields and parsers all
/// at once. The goal of this macro is to define an interface that can be
/// programmatically used by the option parser in order to initialize the struct
impl<'a> dep_tracking::DepTrackingHash for $struct_name {
- fn hash(&self, hasher: &mut SipHasher, error_format: ErrorOutputType) {
+ fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) {
let mut sub_hashes = BTreeMap::new();
$({
hash_option!($opt,
pub const parse_opt_bool: Option<&'static str> =
Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`");
pub const parse_string: Option<&'static str> = Some("a string");
+ pub const parse_string_push: Option<&'static str> = Some("a string");
pub const parse_opt_string: Option<&'static str> = Some("a string");
pub const parse_list: Option<&'static str> = Some("a space-separated list of strings");
pub const parse_opt_list: Option<&'static str> = Some("a space-separated list of strings");
#[allow(dead_code)]
mod $mod_set {
- use super::{$struct_name, Passes, SomePasses, AllPasses, PanicStrategy};
+ use super::{$struct_name, Passes, SomePasses, AllPasses};
+ use rustc_back::PanicStrategy;
$(
pub fn $opt(cg: &mut $struct_name, v: Option<&str>) -> bool {
}
}
+ fn parse_string_push(slot: &mut Vec<String>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => { slot.push(s.to_string()); true },
+ None => false,
+ }
+ }
+
fn parse_list(slot: &mut Vec<String>, v: Option<&str>)
-> bool {
match v {
}
}
- fn parse_panic_strategy(slot: &mut PanicStrategy, v: Option<&str>) -> bool {
+ fn parse_panic_strategy(slot: &mut Option<PanicStrategy>, v: Option<&str>) -> bool {
match v {
- Some("unwind") => *slot = PanicStrategy::Unwind,
- Some("abort") => *slot = PanicStrategy::Abort,
+ Some("unwind") => *slot = Some(PanicStrategy::Unwind),
+ Some("abort") => *slot = Some(PanicStrategy::Abort),
_ => return false
}
true
"tool to assemble archives with"),
linker: Option<String> = (None, parse_opt_string, [UNTRACKED],
"system linker to link outputs with"),
+ link_arg: Vec<String> = (vec![], parse_string_push, [UNTRACKED],
+ "a single extra argument to pass to the linker (can be used several times)"),
link_args: Option<Vec<String>> = (None, parse_opt_list, [UNTRACKED],
"extra arguments to pass to the linker (space separated)"),
link_dead_code: bool = (false, parse_bool, [UNTRACKED],
"explicitly enable the cfg(debug_assertions) directive"),
inline_threshold: Option<usize> = (None, parse_opt_uint, [TRACKED],
"set the inlining threshold for"),
- panic: PanicStrategy = (PanicStrategy::Unwind, parse_panic_strategy,
+ panic: Option<PanicStrategy> = (None, parse_panic_strategy,
[TRACKED], "panic strategy to compile crate with"),
}
let os = &sess.target.target.target_os;
let env = &sess.target.target.target_env;
let vendor = &sess.target.target.target_vendor;
- let max_atomic_width = sess.target.target.options.max_atomic_width;
+ let max_atomic_width = sess.target.target.max_atomic_width();
let fam = if let Some(ref fam) = sess.target.target.options.target_family {
intern(fam)
crate_name: crate_name,
alt_std_name: None,
libs: libs,
- unstable_features: get_unstable_features_setting(),
+ unstable_features: UnstableFeatures::from_environment(),
debug_assertions: debug_assertions,
+ actually_rustdoc: false,
},
cfg)
}
-pub fn get_unstable_features_setting() -> UnstableFeatures {
- // Whether this is a feature-staged build, i.e. on the beta or stable channel
- let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
- // The secret key needed to get through the rustc build itself by
- // subverting the unstable features lints
- let bootstrap_secret_key = option_env!("CFG_BOOTSTRAP_KEY");
- // The matching key to the above, only known by the build system
- let bootstrap_provided_key = env::var("RUSTC_BOOTSTRAP_KEY").ok();
- match (disable_unstable_features, bootstrap_secret_key, bootstrap_provided_key) {
- (_, Some(ref s), Some(ref p)) if s == p => UnstableFeatures::Cheat,
- (true, ..) => UnstableFeatures::Disallow,
- (false, ..) => UnstableFeatures::Allow
- }
-}
-
pub fn parse_crate_types_from_list(list_list: Vec<String>) -> Result<Vec<CrateType>, String> {
let mut crate_types: Vec<CrateType> = Vec::new();
for unparsed_crate_type in &list_list {
pub mod nightly_options {
use getopts;
use syntax::feature_gate::UnstableFeatures;
- use super::{ErrorOutputType, OptionStability, RustcOptGroup, get_unstable_features_setting};
+ use super::{ErrorOutputType, OptionStability, RustcOptGroup};
use session::{early_error, early_warn};
pub fn is_unstable_enabled(matches: &getopts::Matches) -> bool {
}
pub fn is_nightly_build() -> bool {
- match get_unstable_features_setting() {
- UnstableFeatures::Allow | UnstableFeatures::Cheat => true,
- _ => false,
- }
+ UnstableFeatures::from_environment().is_nightly_build()
}
pub fn check_nightly_options(matches: &getopts::Matches, flags: &[RustcOptGroup]) {
let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options");
- let really_allows_unstable_options = match get_unstable_features_setting() {
- UnstableFeatures::Disallow => false,
- _ => true,
- };
+ let really_allows_unstable_options = UnstableFeatures::from_environment()
+ .is_nightly_build();
for opt in flags.iter() {
if opt.stability == OptionStability::Stable {
use middle::cstore;
use session::search_paths::{PathKind, SearchPaths};
use std::collections::BTreeMap;
- use std::hash::{Hash, SipHasher};
+ use std::hash::Hash;
use std::path::PathBuf;
- use super::{Passes, PanicStrategy, CrateType, OptLevel, DebugInfoLevel,
+ use std::collections::hash_map::DefaultHasher;
+ use super::{Passes, CrateType, OptLevel, DebugInfoLevel,
OutputTypes, Externs, ErrorOutputType};
use syntax::feature_gate::UnstableFeatures;
+ use rustc_back::PanicStrategy;
pub trait DepTrackingHash {
- fn hash(&self, &mut SipHasher, ErrorOutputType);
+ fn hash(&self, &mut DefaultHasher, ErrorOutputType);
}
macro_rules! impl_dep_tracking_hash_via_hash {
($t:ty) => (
impl DepTrackingHash for $t {
- fn hash(&self, hasher: &mut SipHasher, _: ErrorOutputType) {
+ fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) {
Hash::hash(self, hasher);
}
}
macro_rules! impl_dep_tracking_hash_for_sortable_vec_of {
($t:ty) => (
impl DepTrackingHash for Vec<$t> {
- fn hash(&self, hasher: &mut SipHasher, error_format: ErrorOutputType) {
+ fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) {
let mut elems: Vec<&$t> = self.iter().collect();
elems.sort();
Hash::hash(&elems.len(), hasher);
impl_dep_tracking_hash_via_hash!(Option<bool>);
impl_dep_tracking_hash_via_hash!(Option<usize>);
impl_dep_tracking_hash_via_hash!(Option<String>);
+ impl_dep_tracking_hash_via_hash!(Option<PanicStrategy>);
impl_dep_tracking_hash_via_hash!(Option<lint::Level>);
impl_dep_tracking_hash_via_hash!(Option<PathBuf>);
impl_dep_tracking_hash_via_hash!(CrateType);
impl_dep_tracking_hash_for_sortable_vec_of!((String, cstore::NativeLibraryKind));
impl DepTrackingHash for SearchPaths {
- fn hash(&self, hasher: &mut SipHasher, _: ErrorOutputType) {
+ fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) {
let mut elems: Vec<_> = self
.iter(PathKind::All)
.collect();
where T1: DepTrackingHash,
T2: DepTrackingHash
{
- fn hash(&self, hasher: &mut SipHasher, error_format: ErrorOutputType) {
+ fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) {
Hash::hash(&0, hasher);
DepTrackingHash::hash(&self.0, hasher, error_format);
Hash::hash(&1, hasher);
// This is a stable hash because BTreeMap is a sorted container
pub fn stable_hash(sub_hashes: BTreeMap<&'static str, &DepTrackingHash>,
- hasher: &mut SipHasher,
+ hasher: &mut DefaultHasher,
error_format: ErrorOutputType) {
for (key, sub_hash) in sub_hashes {
// Using Hash::hash() instead of DepTrackingHash::hash() is fine for
use std::iter::FromIterator;
use std::path::PathBuf;
use std::rc::Rc;
- use super::{OutputType, OutputTypes, Externs, PanicStrategy};
+ use super::{OutputType, OutputTypes, Externs};
+ use rustc_back::PanicStrategy;
use syntax::{ast, attr};
use syntax::parse::token::InternedString;
use syntax::codemap::dummy_spanned;
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
opts = reference.clone();
- opts.cg.panic = PanicStrategy::Abort;
+ opts.cg.panic = Some(PanicStrategy::Abort);
assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash());
}
use middle::cstore::CrateStore;
use middle::dependency_format;
use session::search_paths::PathKind;
-use session::config::{DebugInfoLevel, PanicStrategy};
+use session::config::DebugInfoLevel;
use ty::tls;
use util::nodemap::{NodeMap, FnvHashMap};
use util::common::duration_to_secs_str;
use syntax::feature_gate::AttributeType;
use syntax_pos::{Span, MultiSpan};
+use rustc_back::PanicStrategy;
use rustc_back::target::Target;
use rustc_data_structures::flock;
use llvm;
use std::collections::HashMap;
use std::env;
use std::ffi::CString;
+use std::io::Write;
use std::rc::Rc;
use std::fmt;
use std::time::Duration;
pub fn lto(&self) -> bool {
self.opts.cg.lto
}
+ /// Returns the panic strategy for this compile session. If the user explicitly selected one
+ /// using '-C panic', use that, otherwise use the panic strategy defined by the target.
+ pub fn panic_strategy(&self) -> PanicStrategy {
+ self.opts.cg.panic.unwrap_or(self.target.target.options.panic_strategy)
+ }
pub fn no_landing_pads(&self) -> bool {
- self.opts.debugging_opts.no_landing_pads ||
- self.opts.cg.panic == PanicStrategy::Abort
+ self.opts.debugging_opts.no_landing_pads || self.panic_strategy() == PanicStrategy::Abort
}
pub fn unstable_options(&self) -> bool {
self.opts.debugging_opts.unstable_options
local_crate_source_file,
registry,
cstore,
- Rc::new(codemap::CodeMap::new()))
+ Rc::new(codemap::CodeMap::new()),
+ None)
}
pub fn build_session_with_codemap(sopts: config::Options,
local_crate_source_file: Option<PathBuf>,
registry: errors::registry::Registry,
cstore: Rc<for<'a> CrateStore<'a>>,
- codemap: Rc<codemap::CodeMap>)
+ codemap: Rc<codemap::CodeMap>,
+ emitter_dest: Option<Box<Write + Send>>)
-> Session {
// FIXME: This is not general enough to make the warning lint completely override
// normal diagnostic warnings, since the warning lint can also be denied and changed
.unwrap_or(true);
let treat_err_as_bug = sopts.debugging_opts.treat_err_as_bug;
- let emitter: Box<Emitter> = match sopts.error_format {
- config::ErrorOutputType::HumanReadable(color_config) => {
+ let emitter: Box<Emitter> = match (sopts.error_format, emitter_dest) {
+ (config::ErrorOutputType::HumanReadable(color_config), None) => {
Box::new(EmitterWriter::stderr(color_config,
Some(codemap.clone())))
}
- config::ErrorOutputType::Json => {
+ (config::ErrorOutputType::HumanReadable(_), Some(dst)) => {
+ Box::new(EmitterWriter::new(dst,
+ Some(codemap.clone())))
+ }
+ (config::ErrorOutputType::Json, None) => {
Box::new(JsonEmitter::stderr(Some(registry), codemap.clone()))
}
+ (config::ErrorOutputType::Json, Some(dst)) => {
+ Box::new(JsonEmitter::new(dst, Some(registry), codemap.clone()))
+ }
};
let diagnostic_handler =
Ok(result) => {
let span = obligation.cause.span;
match infcx.leak_check(false, span, &skol_map, snapshot) {
- Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, &result)),
+ Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, result)),
Err(e) => Err(MismatchedProjectionTypes { err: e }),
}
}
normalized_ty,
&[]);
obligations.push(skol_obligation);
- this.infcx().plug_leaks(skol_map, snapshot, &obligations)
+ this.infcx().plug_leaks(skol_map, snapshot, obligations)
})
}).collect()
}
predicate: predicate.value
}))
}).collect();
- self.infcx().plug_leaks(skol_map, snapshot, &predicates)
+ self.infcx().plug_leaks(skol_map, snapshot, predicates)
}
}
use hir::def::DefMap;
use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE};
use hir::map as ast_map;
-use hir::map::{DefKey, DefPath, DefPathData, DisambiguatedDefPathData};
+use hir::map::{DefKey, DefPathData, DisambiguatedDefPathData};
use middle::free_region::FreeRegionMap;
use middle::region::RegionMaps;
use middle::resolve_lifetime;
}
}
- pub fn retrace_path(self, path: &DefPath) -> Option<DefId> {
- debug!("retrace_path(path={:?}, krate={:?})", path, self.crate_name(path.krate));
+ pub fn retrace_path(self,
+ krate: CrateNum,
+ path_data: &[DisambiguatedDefPathData])
+ -> Option<DefId> {
+ debug!("retrace_path(path={:?}, krate={:?})", path_data, self.crate_name(krate));
let root_key = DefKey {
parent: None,
},
};
- let root_index = self.def_index_for_def_key(path.krate, root_key)
+ let root_index = self.def_index_for_def_key(krate, root_key)
.expect("no root key?");
debug!("retrace_path: root_index={:?}", root_index);
let mut index = root_index;
- for data in &path.data {
+ for data in path_data {
let key = DefKey { parent: Some(index), disambiguated_data: data.clone() };
debug!("retrace_path: key={:?}", key);
- match self.def_index_for_def_key(path.krate, key) {
+ match self.def_index_for_def_key(krate, key) {
Some(i) => index = i,
None => return None,
}
}
- Some(DefId { krate: path.krate, index: index })
+ Some(DefId { krate: krate, index: index })
}
pub fn type_parameter_def(self,
UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
AbiMismatch(ExpectedFound<abi::Abi>),
Mutability,
- BoxMutability,
- PtrMutability,
- RefMutability,
- VecMutability,
TupleSize(ExpectedFound<usize>),
FixedArraySize(ExpectedFound<usize>),
- TyParamSize(ExpectedFound<usize>),
ArgCount,
RegionsDoesNotOutlive(&'tcx Region, &'tcx Region),
RegionsNotSame(&'tcx Region, &'tcx Region),
RegionsInsufficientlyPolymorphic(BoundRegion, &'tcx Region),
RegionsOverlyPolymorphic(BoundRegion, &'tcx Region),
Sorts(ExpectedFound<Ty<'tcx>>),
- IntegerAsChar,
IntMismatch(ExpectedFound<ty::IntVarValue>),
FloatMismatch(ExpectedFound<ast::FloatTy>),
Traits(ExpectedFound<DefId>),
BuiltinBoundsMismatch(ExpectedFound<ty::BuiltinBounds>),
VariadicMismatch(ExpectedFound<bool>),
CyclicTy,
- ConvergenceMismatch(ExpectedFound<bool>),
ProjectionNameMismatched(ExpectedFound<Name>),
ProjectionBoundsLength(ExpectedFound<usize>),
TyParamDefaultMismatch(ExpectedFound<type_variable::Default<'tcx>>)
values.found)
}
Mutability => write!(f, "types differ in mutability"),
- BoxMutability => {
- write!(f, "boxed types differ in mutability")
- }
- VecMutability => write!(f, "vectors differ in mutability"),
- PtrMutability => write!(f, "pointers differ in mutability"),
- RefMutability => write!(f, "references differ in mutability"),
- TyParamSize(values) => {
- write!(f, "expected a type with {} type params, \
- found one with {} type params",
- values.expected,
- values.found)
- }
FixedArraySize(values) => {
write!(f, "expected an array with a fixed size of {} elements, \
found one with {} elements",
values.found)
}
}
- IntegerAsChar => {
- write!(f, "expected an integral type, found `char`")
- }
IntMismatch(ref values) => {
write!(f, "expected `{:?}`, found `{:?}`",
values.expected,
if values.expected { "variadic" } else { "non-variadic" },
if values.found { "variadic" } else { "non-variadic" })
}
- ConvergenceMismatch(ref values) => {
- write!(f, "expected {} fn, found {} function",
- if values.expected { "converging" } else { "diverging" },
- if values.found { "converging" } else { "diverging" })
- }
ProjectionNameMismatched(ref values) => {
write!(f, "expected {}, found {}",
values.expected,
FloatSimplifiedType(ast::FloatTy),
AdtSimplifiedType(DefId),
StrSimplifiedType,
- VecSimplifiedType,
+ ArraySimplifiedType,
PtrSimplifiedType,
NeverSimplifiedType,
TupleSimplifiedType(usize),
ty::TyFloat(float_type) => Some(FloatSimplifiedType(float_type)),
ty::TyAdt(def, _) => Some(AdtSimplifiedType(def.did)),
ty::TyStr => Some(StrSimplifiedType),
- ty::TyArray(..) | ty::TySlice(_) => Some(VecSimplifiedType),
+ ty::TyArray(..) | ty::TySlice(_) => Some(ArraySimplifiedType),
ty::TyRawPtr(_) => Some(PtrSimplifiedType),
ty::TyTrait(ref trait_info) => {
Some(TraitSimplifiedType(trait_info.principal.def_id()))
}
&ty::TyProjection(ref data) => {
+ // currently we can't normalize projections that
+ // include bound regions, so track those separately.
+ if !data.has_escaping_regions() {
+ self.add_flags(TypeFlags::HAS_NORMALIZABLE_PROJECTION);
+ }
self.add_flags(TypeFlags::HAS_PROJECTION);
self.add_projection_ty(data);
}
TypeFlags::HAS_FREE_REGIONS |
TypeFlags::HAS_TY_INFER |
TypeFlags::HAS_PARAMS |
- TypeFlags::HAS_PROJECTION |
+ TypeFlags::HAS_NORMALIZABLE_PROJECTION |
TypeFlags::HAS_TY_ERR |
TypeFlags::HAS_SELF)
}
/// If true, the size is exact, otherwise it's only a lower bound.
pub sized: bool,
- /// Offsets for the first byte after each field.
- /// That is, field_offset(i) = offset_after_field[i - 1] and the
- /// whole structure's size is the last offset, excluding padding.
- // FIXME(eddyb) use small vector optimization for the common case.
- pub offset_after_field: Vec<Size>
+ /// Offsets for the first byte of each field.
+ /// FIXME(eddyb) use small vector optimization for the common case.
+ pub offsets: Vec<Size>,
+
+ pub min_size: Size,
}
impl<'a, 'gcx, 'tcx> Struct {
align: if packed { dl.i8_align } else { dl.aggregate_align },
packed: packed,
sized: true,
- offset_after_field: vec![]
+ offsets: vec![],
+ min_size: Size::from_bytes(0),
}
}
scapegoat: Ty<'gcx>)
-> Result<(), LayoutError<'gcx>>
where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
- self.offset_after_field.reserve(fields.size_hint().0);
+ self.offsets.reserve(fields.size_hint().0);
+
+ let mut offset = self.min_size;
for field in fields {
if !self.sized {
bug!("Struct::extend: field #{} of `{}` comes after unsized field",
- self.offset_after_field.len(), scapegoat);
+ self.offsets.len(), scapegoat);
}
let field = field?;
}
// Invariant: offset < dl.obj_size_bound() <= 1<<61
- let mut offset = if !self.packed {
+ if !self.packed {
let align = field.align(dl);
self.align = self.align.max(align);
- self.offset_after_field.last_mut().map_or(Size::from_bytes(0), |last| {
- *last = last.abi_align(align);
- *last
- })
- } else {
- self.offset_after_field.last().map_or(Size::from_bytes(0), |&last| last)
- };
+ offset = offset.abi_align(align);
+ }
+
+ self.offsets.push(offset);
+
offset = offset.checked_add(field.size(dl), dl)
.map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
-
- self.offset_after_field.push(offset);
}
+ self.min_size = offset;
+
Ok(())
}
/// Get the size without trailing alignment padding.
- pub fn min_size(&self) -> Size {
- self.offset_after_field.last().map_or(Size::from_bytes(0), |&last| last)
- }
/// Get the size with trailing aligment padding.
pub fn stride(&self) -> Size {
- self.min_size().abi_align(self.align)
+ self.min_size.abi_align(self.align)
}
/// Determine whether a structure would be zero-sized, given its fields.
}
Ok(None)
}
-
- pub fn offset_of_field(&self, index: usize) -> Size {
- assert!(index < self.offset_after_field.len());
- if index == 0 {
- Size::from_bytes(0)
- } else {
- self.offset_after_field[index-1]
- }
- }
}
/// An untagged union.
});
let mut st = Struct::new(dl, false);
st.extend(dl, discr.iter().map(Ok).chain(fields), ty)?;
- size = cmp::max(size, st.min_size());
+ size = cmp::max(size, st.min_size);
align = align.max(st.align);
Ok(st)
}).collect::<Result<Vec<_>, _>>()?;
let old_ity_size = Int(min_ity).size(dl);
let new_ity_size = Int(ity).size(dl);
for variant in &mut variants {
- for offset in &mut variant.offset_after_field {
+ for offset in &mut variant.offsets[1..] {
if *offset > old_ity_size {
break;
}
*offset = new_ity_size;
}
+ // We might be making the struct larger.
+ if variant.min_size <= old_ity_size {
+ variant.min_size = new_ity_size;
+ }
}
}
use dep_graph::{self, DepNode};
use hir::map as ast_map;
use middle;
-use hir::def::{Def, PathResolution, ExportMap};
+use hir::def::{Def, CtorKind, PathResolution, ExportMap};
use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE};
use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem};
use middle::region::{CodeExtent, ROOT_CODE_EXTENT};
// Only set for TyInfer other than Fresh.
const KEEP_IN_LOCAL_TCX = 1 << 11,
+ // Is there a projection that does not involve a bound region?
+ // Currently we can't normalize projections w/ bound regions.
+ const HAS_NORMALIZABLE_PROJECTION = 1 << 12,
+
const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits |
TypeFlags::HAS_SELF.bits |
TypeFlags::HAS_RE_EARLY_BOUND.bits,
pub name: Name, // struct's name if this is a struct
pub disr_val: Disr,
pub fields: Vec<FieldDefData<'tcx, 'container>>,
- pub kind: VariantKind,
+ pub ctor_kind: CtorKind,
}
pub struct FieldDefData<'tcx, 'container: 'tcx> {
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum AdtKind { Struct, Union, Enum }
-#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
-pub enum VariantKind { Struct, Tuple, Unit }
-
-impl VariantKind {
- pub fn from_variant_data(vdata: &hir::VariantData) -> Self {
- match *vdata {
- hir::VariantData::Struct(..) => VariantKind::Struct,
- hir::VariantData::Tuple(..) => VariantKind::Tuple,
- hir::VariantData::Unit(..) => VariantKind::Unit,
- }
- }
-}
-
impl<'a, 'gcx, 'tcx, 'container> AdtDefData<'gcx, 'container> {
fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>,
did: DefId,
pub fn variant_of_def(&self, def: Def) -> &VariantDefData<'gcx, 'container> {
match def {
- Def::Variant(vid) => self.variant_with_id(vid),
- Def::Struct(..) | Def::Union(..) |
+ Def::Variant(vid) | Def::VariantCtor(vid, ..) => self.variant_with_id(vid),
+ Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) |
Def::TyAlias(..) | Def::AssociatedTy(..) => self.struct_variant(),
_ => bug!("unexpected def {:?} in variant_of_def", def)
}
hir::ExprClosure(..) |
hir::ExprBlock(..) |
hir::ExprRepeat(..) |
- hir::ExprVec(..) |
+ hir::ExprArray(..) |
hir::ExprBreak(..) |
hir::ExprAgain(..) |
hir::ExprRet(..) |
// or variant or their constructors, panics otherwise.
pub fn expect_variant_def(self, def: Def) -> VariantDef<'tcx> {
match def {
- Def::Variant(did) => {
+ Def::Variant(did) | Def::VariantCtor(did, ..) => {
let enum_did = self.parent_def_id(did).unwrap();
self.lookup_adt_def(enum_did).variant_with_id(did)
}
- Def::Struct(did) | Def::Union(did) => {
+ Def::Struct(did) | Def::StructCtor(did, ..) | Def::Union(did) => {
self.lookup_adt_def(did).struct_variant()
}
_ => bug!("expect_variant_def used with unexpected def {:?}", def)
UnsafetyMismatch(x) => UnsafetyMismatch(x),
AbiMismatch(x) => AbiMismatch(x),
Mutability => Mutability,
- BoxMutability => BoxMutability,
- PtrMutability => PtrMutability,
- RefMutability => RefMutability,
- VecMutability => VecMutability,
TupleSize(x) => TupleSize(x),
FixedArraySize(x) => FixedArraySize(x),
- TyParamSize(x) => TyParamSize(x),
ArgCount => ArgCount,
RegionsDoesNotOutlive(a, b) => {
return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b))
RegionsOverlyPolymorphic(a, b) => {
return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b))
}
- IntegerAsChar => IntegerAsChar,
IntMismatch(x) => IntMismatch(x),
FloatMismatch(x) => FloatMismatch(x),
Traits(x) => Traits(x),
BuiltinBoundsMismatch(x) => BuiltinBoundsMismatch(x),
VariadicMismatch(x) => VariadicMismatch(x),
CyclicTy => CyclicTy,
- ConvergenceMismatch(x) => ConvergenceMismatch(x),
ProjectionNameMismatched(x) => ProjectionNameMismatched(x),
ProjectionBoundsLength(x) => ProjectionBoundsLength(x),
use hir::def_id::DefId;
use infer::InferCtxt;
+use hir::map as ast_map;
use hir::pat_util;
use traits::{self, Reveal};
use ty::{self, Ty, AdtKind, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable};
use rustc_const_math::{ConstInt, ConstIsize, ConstUsize};
use std::cmp;
-use std::hash::{Hash, SipHasher, Hasher};
+use std::hash::{Hash, Hasher};
+use std::collections::hash_map::DefaultHasher;
use std::intrinsics;
use syntax::ast::{self, Name};
use syntax::attr::{self, SignedInt, UnsignedInt};
/// Creates a hash of the type `Ty` which will be the same no matter what crate
/// context it's calculated within. This is used by the `type_id` intrinsic.
pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
- let mut hasher = TypeIdHasher::new(self, SipHasher::new());
+ let mut hasher = TypeIdHasher::new(self, DefaultHasher::default());
hasher.visit_ty(ty);
hasher.finish()
}
}
}
+// When hashing a type this ends up affecting properties like symbol names. We
+// want these symbol names to be calculated independent of other factors like
+// what architecture you're compiling *from*.
+//
+// The hashing just uses the standard `Hash` trait, but the implementations of
+// `Hash` for the `usize` and `isize` types are *not* architecture independent
+// (e.g. they has 4 or 8 bytes). As a result we want to avoid `usize` and
+// `isize` completely when hashing. To ensure that these don't leak in we use a
+// custom hasher implementation here which inflates the size of these to a `u64`
+// and `i64`.
+struct WidenUsizeHasher<H> {
+ inner: H,
+}
+
+impl<H> WidenUsizeHasher<H> {
+ fn new(inner: H) -> WidenUsizeHasher<H> {
+ WidenUsizeHasher { inner: inner }
+ }
+}
+
+impl<H: Hasher> Hasher for WidenUsizeHasher<H> {
+ fn write(&mut self, bytes: &[u8]) {
+ self.inner.write(bytes)
+ }
+
+ fn finish(&self) -> u64 {
+ self.inner.finish()
+ }
+
+ fn write_u8(&mut self, i: u8) {
+ self.inner.write_u8(i)
+ }
+ fn write_u16(&mut self, i: u16) {
+ self.inner.write_u16(i)
+ }
+ fn write_u32(&mut self, i: u32) {
+ self.inner.write_u32(i)
+ }
+ fn write_u64(&mut self, i: u64) {
+ self.inner.write_u64(i)
+ }
+ fn write_usize(&mut self, i: usize) {
+ self.inner.write_u64(i as u64)
+ }
+ fn write_i8(&mut self, i: i8) {
+ self.inner.write_i8(i)
+ }
+ fn write_i16(&mut self, i: i16) {
+ self.inner.write_i16(i)
+ }
+ fn write_i32(&mut self, i: i32) {
+ self.inner.write_i32(i)
+ }
+ fn write_i64(&mut self, i: i64) {
+ self.inner.write_i64(i)
+ }
+ fn write_isize(&mut self, i: isize) {
+ self.inner.write_i64(i as i64)
+ }
+}
+
pub struct TypeIdHasher<'a, 'gcx: 'a+'tcx, 'tcx: 'a, H> {
tcx: TyCtxt<'a, 'gcx, 'tcx>,
- state: H
+ state: WidenUsizeHasher<H>,
}
impl<'a, 'gcx, 'tcx, H: Hasher> TypeIdHasher<'a, 'gcx, 'tcx, H> {
pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, state: H) -> Self {
TypeIdHasher {
tcx: tcx,
- state: state
+ state: WidenUsizeHasher::new(state),
}
}
fn def_id(&mut self, did: DefId) {
// Hash the DefPath corresponding to the DefId, which is independent
// of compiler internal state.
- let tcx = self.tcx;
- let def_path = tcx.def_path(did);
- def_path.deterministic_hash_to(tcx, &mut self.state);
+ let path = self.tcx.def_path(did);
+ self.def_path(&path)
+ }
+
+ pub fn def_path(&mut self, def_path: &ast_map::DefPath) {
+ def_path.deterministic_hash_to(self.tcx, &mut self.state);
}
}
extern crate serialize;
#[macro_use] extern crate log;
+extern crate serialize as rustc_serialize; // used by deriving
+
pub mod tempdir;
pub mod sha2;
pub mod target;
pub mod slice;
pub mod dynamic_lib;
+
+use serialize::json::{Json, ToJson};
+
+#[derive(Clone, Copy, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)]
+pub enum PanicStrategy {
+ Unwind,
+ Abort,
+}
+
+impl PanicStrategy {
+ pub fn desc(&self) -> &str {
+ match *self {
+ PanicStrategy::Unwind => "unwind",
+ PanicStrategy::Abort => "abort",
+ }
+ }
+}
+
+impl ToJson for PanicStrategy {
+ fn to_json(&self) -> Json {
+ match *self {
+ PanicStrategy::Abort => "abort".to_json(),
+ PanicStrategy::Unwind => "unwind".to_json(),
+ }
+ }
+}
options: TargetOptions {
features: "+neon,+fp-armv8,+cyclone".to_string(),
eliminate_frame_pointer: false,
- max_atomic_width: 128,
+ max_atomic_width: Some(128),
.. base
},
})
pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
- base.max_atomic_width = 128;
+ base.max_atomic_width = Some(128);
// As documented in http://developer.android.com/ndk/guides/cpu-features.html
// the neon (ASIMD) and FP must exist on all android aarch64 targets.
base.features = "+neon,+fp-armv8".to_string();
pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
- base.max_atomic_width = 128;
+ base.max_atomic_width = Some(128);
Ok(Target {
llvm_target: "aarch64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
base.features = "+v7,+vfp3,+d16".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "arm-linux-androideabi".to_string(),
pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "arm-unknown-linux-gnueabi".to_string(),
target_endian: "little".to_string(),
pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "arm-unknown-linux-gnueabihf".to_string(),
target_endian: "little".to_string(),
// Most of these settings are copied from the arm_unknown_linux_gnueabi
// target.
base.features = "+v6".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
// It's important we use "gnueabi" and not "musleabi" here. LLVM uses it
// to determine the calling convention and float ABI, and it doesn't
// Most of these settings are copied from the arm_unknown_linux_gnueabihf
// target.
base.features = "+v6,+vfp2".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
// It's important we use "gnueabihf" and not "musleabihf" here. LLVM
// uses it to determine the calling convention and float ABI, and it
target_vendor: "apple".to_string(),
options: TargetOptions {
features: "+v7,+vfp3,+neon".to_string(),
- max_atomic_width: 64,
+ max_atomic_width: Some(64),
.. base
}
})
pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
base.features = "+v7,+thumb2,+vfp3,+d16".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "armv7-none-linux-android".to_string(),
options: TargetOptions {
// Info about features at https://wiki.debian.org/ArmHardFloatPort
- features: "+v7,+vfp3,+d16,+thumb2".to_string(),
+ features: "+v7,+vfp3,+d16,+thumb2,-neon".to_string(),
cpu: "generic".to_string(),
- max_atomic_width: 64,
+ max_atomic_width: Some(64),
.. base
}
})
// target.
base.features = "+v7,+vfp3,+neon".to_string();
base.cpu = "cortex-a8".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
// It's important we use "gnueabihf" and not "musleabihf" here. LLVM
// uses it to determine the calling convention and float ABI, and LLVM
target_vendor: "apple".to_string(),
options: TargetOptions {
features: "+v7,+vfp4,+neon".to_string(),
- max_atomic_width: 64,
+ max_atomic_width: Some(64),
.. base
}
})
linker_is_gnu: true,
allow_asm: false,
obj_is_bitcode: true,
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
+ post_link_args: vec!["-s".to_string(), "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()],
.. Default::default()
};
Ok(Target {
--- /dev/null
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::TargetOptions;
+use std::default::Default;
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ linker: "cc".to_string(),
+ dynamic_linking: true,
+ executables: true,
+ has_rpath: true,
+ linker_is_gnu: true,
+ .. Default::default()
+ }
+}
target_env: "".to_string(),
target_vendor: "apple".to_string(),
options: TargetOptions {
- max_atomic_width: 64,
+ max_atomic_width: Some(64),
.. base
}
})
pub fn target() -> TargetResult {
let mut base = super::apple_base::opts();
base.cpu = "yonah".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m32".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::android_base::opts();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
// http://developer.android.com/ndk/guides/abis.html#x86
base.cpu = "pentiumpro".to_string();
pub fn target() -> TargetResult {
let mut base = super::windows_base::opts();
base.cpu = "pentium4".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
// Mark all dynamic libraries and executables as compatible with the larger 4GiB address
// space available to x86 Windows binaries on x86_64.
pub fn target() -> TargetResult {
let mut base = super::windows_msvc_base::opts();
base.cpu = "pentium4".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
// Mark all dynamic libraries and executables as compatible with the larger 4GiB address
// space available to x86 Windows binaries on x86_64.
pub fn target() -> TargetResult {
let mut base = super::dragonfly_base::opts();
base.cpu = "pentium4".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m32".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::freebsd_base::opts();
base.cpu = "pentium4".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m32".to_string());
Ok(Target {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::haiku_base::opts();
+ base.cpu = "pentium4".to_string();
+ base.max_atomic_width = Some(64);
+ base.pre_link_args.push("-m32".to_string());
+
+ Ok(Target {
+ llvm_target: "i686-unknown-haiku".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(),
+ arch: "x86".to_string(),
+ target_os: "haiku".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "unknown".to_string(),
+ options: base,
+ })
+}
pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "pentium4".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m32".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::linux_musl_base::opts();
base.cpu = "pentium4".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m32".to_string());
base.pre_link_args.push("-Wl,-melf_i386".to_string());
exe_suffix: ".pexe".to_string(),
linker_is_gnu: true,
allow_asm: false,
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
.. Default::default()
};
Ok(Target {
llvm_target: "mips64-unknown-linux-gnuabi64".to_string(),
target_endian: "big".to_string(),
target_pointer_width: "64".to_string(),
- data_layout: "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
+ data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
arch: "mips64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
// NOTE(mips64r2) matches C toolchain
cpu: "mips64r2".to_string(),
features: "+mips64r2".to_string(),
- max_atomic_width: 64,
+ max_atomic_width: Some(64),
..super::linux_base::opts()
},
})
llvm_target: "mips64el-unknown-linux-gnuabi64".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "64".to_string(),
- data_layout: "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(),
arch: "mips64".to_string(),
target_os: "linux".to_string(),
target_env: "gnu".to_string(),
// NOTE(mips64r2) matches C toolchain
cpu: "mips64r2".to_string(),
features: "+mips64r2".to_string(),
- max_atomic_width: 64,
+ max_atomic_width: Some(64),
..super::linux_base::opts()
},
})
options: TargetOptions {
cpu: "mips32r2".to_string(),
features: "+mips32r2".to_string(),
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
..super::linux_base::opts()
},
})
options: TargetOptions {
cpu: "mips32r2".to_string(),
features: "+mips32r2,+soft-float".to_string(),
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
..super::linux_base::opts()
}
})
options: TargetOptions {
cpu: "mips32r2".to_string(),
features: "+mips32r2,+soft-float".to_string(),
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
..super::linux_base::opts()
},
})
options: TargetOptions {
cpu: "mips32".to_string(),
features: "+mips32".to_string(),
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
..super::linux_base::opts()
},
})
options: TargetOptions {
cpu: "mips32".to_string(),
features: "+mips32,+soft-float".to_string(),
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
..super::linux_base::opts()
}
})
options: TargetOptions {
cpu: "mips32".to_string(),
features: "+mips32,+soft-float".to_string(),
- max_atomic_width: 32,
+ max_atomic_width: Some(32),
..super::linux_base::opts()
},
})
use std::io::prelude::*;
use syntax::abi::Abi;
+use PanicStrategy;
+
mod android_base;
mod apple_base;
mod apple_ios_base;
mod bitrig_base;
mod dragonfly_base;
mod freebsd_base;
+mod haiku_base;
mod linux_base;
mod linux_musl_base;
mod openbsd_base;
mod solaris_base;
mod windows_base;
mod windows_msvc_base;
+mod thumb_base;
pub type TargetResult = Result<Target, String>;
macro_rules! supported_targets {
- ( $(($triple:expr, $module:ident)),+ ) => (
+ ( $(($triple:expr, $module:ident),)+ ) => (
$(mod $module;)*
/// List of supported targets
("x86_64-unknown-netbsd", x86_64_unknown_netbsd),
("x86_64-rumprun-netbsd", x86_64_rumprun_netbsd),
+ ("i686-unknown-haiku", i686_unknown_haiku),
+ ("x86_64-unknown-haiku", x86_64_unknown_haiku),
+
("x86_64-apple-darwin", x86_64_apple_darwin),
("i686-apple-darwin", i686_apple_darwin),
("i586-pc-windows-msvc", i586_pc_windows_msvc),
("le32-unknown-nacl", le32_unknown_nacl),
- ("asmjs-unknown-emscripten", asmjs_unknown_emscripten)
+ ("asmjs-unknown-emscripten", asmjs_unknown_emscripten),
+ ("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
+
+ ("thumbv6m-none-eabi", thumbv6m_none_eabi),
+ ("thumbv7m-none-eabi", thumbv7m_none_eabi),
+ ("thumbv7em-none-eabi", thumbv7em_none_eabi),
+ ("thumbv7em-none-eabihf", thumbv7em_none_eabihf),
}
/// Everything `rustc` knows about how to compile for a specific target.
// will 'just work'.
pub obj_is_bitcode: bool,
- /// Maximum integer size in bits that this target can perform atomic
- /// operations on.
- pub max_atomic_width: u64,
+ /// Don't use this field; instead use the `.max_atomic_width()` method.
+ pub max_atomic_width: Option<u64>,
+
+ /// Panic strategy: "unwind" or "abort"
+ pub panic_strategy: PanicStrategy,
}
impl Default for TargetOptions {
allow_asm: true,
has_elf_tls: false,
obj_is_bitcode: false,
- max_atomic_width: 0,
+ max_atomic_width: None,
+ panic_strategy: PanicStrategy::Unwind,
}
}
}
}
}
+ /// Maximum integer size in bits that this target can perform atomic
+ /// operations on.
+ pub fn max_atomic_width(&self) -> u64 {
+ self.options.max_atomic_width.unwrap_or(self.target_pointer_width.parse().unwrap())
+ }
+
/// Load a target descriptor from a JSON object.
pub fn from_json(obj: Json) -> TargetResult {
// While ugly, this code must remain this way to retain
options: Default::default(),
};
- // Default max-atomic-width to target-pointer-width
- base.options.max_atomic_width = base.target_pointer_width.parse().unwrap();
-
macro_rules! key {
($key_name:ident) => ( {
let name = (stringify!($key_name)).replace("_", "-");
.map(|o| o.as_boolean()
.map(|s| base.options.$key_name = s));
} );
- ($key_name:ident, u64) => ( {
+ ($key_name:ident, Option<u64>) => ( {
let name = (stringify!($key_name)).replace("_", "-");
obj.find(&name[..])
.map(|o| o.as_u64()
- .map(|s| base.options.$key_name = s));
+ .map(|s| base.options.$key_name = Some(s)));
+ } );
+ ($key_name:ident, PanicStrategy) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| {
+ match s {
+ "unwind" => base.options.$key_name = PanicStrategy::Unwind,
+ "abort" => base.options.$key_name = PanicStrategy::Abort,
+ _ => return Some(Err(format!("'{}' is not a valid value for \
+ panic-strategy. Use 'unwind' or 'abort'.",
+ s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
} );
($key_name:ident, list) => ( {
let name = (stringify!($key_name)).replace("_", "-");
key!(exe_allocation_crate);
key!(has_elf_tls, bool);
key!(obj_is_bitcode, bool);
- key!(max_atomic_width, u64);
+ key!(max_atomic_width, Option<u64>);
+ try!(key!(panic_strategy, PanicStrategy));
Ok(base)
}
target_option_val!(has_elf_tls);
target_option_val!(obj_is_bitcode);
target_option_val!(max_atomic_width);
+ target_option_val!(panic_strategy);
Json::Object(d)
}
let mut base = super::linux_base::opts();
base.cpu = "ppc64".to_string();
base.pre_link_args.push("-m64".to_string());
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
let mut base = super::linux_base::opts();
base.cpu = "ppc64le".to_string();
base.pre_link_args.push("-m64".to_string());
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.pre_link_args.push("-m32".to_string());
- base.max_atomic_width = 32;
+ base.max_atomic_width = Some(32);
Ok(Target {
llvm_target: "powerpc-unknown-linux-gnu".to_string(),
// cabi_s390x.rs are for now hard-coded to assume the no-vector ABI.
// Pass the -vector feature string to LLVM to respect this assumption.
base.features = "-vector".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "s390x-unknown-linux-gnu".to_string(),
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// These 4 `thumbv*` targets cover the ARM Cortex-M family of processors which are widely used in
+// microcontrollers. Namely, all these processors:
+//
+// - Cortex-M0
+// - Cortex-M0+
+// - Cortex-M1
+// - Cortex-M3
+// - Cortex-M4(F)
+// - Cortex-M7(F)
+//
+// We have opted for 4 targets instead of one target per processor (e.g. `cortex-m0`, `cortex-m3`,
+// etc) because the differences between some processors like the cortex-m0 and cortex-m1 are almost
+// non-existent from the POV of codegen so it doesn't make sense to have separate targets for them.
+// And if differences exist between two processors under the same target, rustc flags can be used to
+// optimize for one processor or the other.
+//
+// Also, we have not chosen a single target (`arm-none-eabi`) like GCC does because this makes
+// difficult to integrate Rust code and C code. Targeting the Cortex-M4 requires different gcc flags
+// than the ones you would use for the Cortex-M0 and with a single target it'd be impossible to
+// differentiate one processor from the other.
+//
+// About arm vs thumb in the name. The Cortex-M devices only support the Thumb instruction set,
+// which is more compact (higher code density), and not the ARM instruction set. That's why LLVM
+// triples use thumb instead of arm. We follow suit because having thumb in the name let us
+// differentiate these targets from our other `arm(v7)-*-*-gnueabi(hf)` targets in the context of
+// build scripts / gcc flags.
+
+use PanicStrategy;
+use std::default::Default;
+use target::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+ // See rust-lang/rfcs#1645 for a discussion about these defaults
+ TargetOptions {
+ executables: true,
+ // In 99%+ of cases, we want to use the `arm-none-eabi-gcc` compiler (there aren't many
+ // options around)
+ linker: "arm-none-eabi-gcc".to_string(),
+ // Because these devices have very little resources having an unwinder is too onerous so we
+ // default to "abort" because the "unwind" strategy is very rare.
+ panic_strategy: PanicStrategy::Abort,
+ // Similarly, one almost always never wants to use relocatable code because of the extra
+ // costs it involves.
+ relocation_model: "static".to_string(),
+ .. Default::default()
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Targets the Cortex-M0, Cortex-M0+ and Cortex-M1 processors (ARMv6-M architecture)
+
+use target::{Target, TargetOptions, TargetResult};
+
+pub fn target() -> TargetResult {
+ Ok(Target {
+ llvm_target: "thumbv6m-none-eabi".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
+ arch: "arm".to_string(),
+ target_os: "none".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "".to_string(),
+
+ options: TargetOptions {
+ // The ARMv6-M architecture doesn't support unaligned loads/stores so we disable them
+ // with +strict-align.
+ features: "+strict-align".to_string(),
+ // There are no atomic instructions available in the instruction set of the ARMv6-M
+ // architecture
+ max_atomic_width: Some(0),
+ .. super::thumb_base::opts()
+ }
+ })
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Targets the Cortex-M4 and Cortex-M7 processors (ARMv7E-M)
+//
+// This target assumes that the device doesn't have a FPU (Floating Point Unit) and lowers all the
+// floating point operations to software routines (intrinsics).
+//
+// As such, this target uses the "soft" calling convention (ABI) where floating point values are
+// passed to/from subroutines via general purpose registers (R0, R1, etc.).
+//
+// To opt-in to hardware accelerated floating point operations, you can use, for example,
+// `-C target-feature=+vfp4` or `-C target-cpu=cortex-m4`.
+
+use target::{Target, TargetOptions, TargetResult};
+
+pub fn target() -> TargetResult {
+ Ok(Target {
+ llvm_target: "thumbv7em-none-eabi".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
+ arch: "arm".to_string(),
+ target_os: "none".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "".to_string(),
+
+ options: TargetOptions {
+ max_atomic_width: Some(32),
+ .. super::thumb_base::opts()
+ },
+ })
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Targets the Cortex-M4F and Cortex-M7F processors (ARMv7E-M)
+//
+// This target assumes that the device does have a FPU (Floating Point Unit) and lowers all (single
+// precision) floating point operations to hardware instructions.
+//
+// Additionally, this target uses the "hard" floating convention (ABI) where floating point values
+// are passed to/from subroutines via FPU registers (S0, S1, D0, D1, etc.).
+//
+// To opt into double precision hardware support, use the `-C target-feature=-fp-only-sp` flag.
+
+use target::{Target, TargetOptions, TargetResult};
+
+pub fn target() -> TargetResult {
+ Ok(Target {
+ llvm_target: "thumbv7em-none-eabihf".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
+ arch: "arm".to_string(),
+ target_os: "none".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "".to_string(),
+
+ options: TargetOptions {
+ // `+vfp4` is the lowest common denominator between the Cortex-M4 (vfp4-16) and the
+ // Cortex-M7 (vfp5)
+ // `+d16` both the Cortex-M4 and the Cortex-M7 only have 16 double-precision registers
+ // available
+ // `+fp-only-sp` The Cortex-M4 only supports single precision floating point operations
+ // whereas in the Cortex-M7 double precision is optional
+ //
+ // Reference:
+ // ARMv7-M Architecture Reference Manual - A2.5 The optional floating-point extension
+ features: "+vfp4,+d16,+fp-only-sp".to_string(),
+ max_atomic_width: Some(32),
+ .. super::thumb_base::opts()
+ }
+ })
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Targets the Cortex-M3 processor (ARMv7-M)
+
+use target::{Target, TargetOptions, TargetResult};
+
+pub fn target() -> TargetResult {
+ Ok(Target {
+ llvm_target: "thumbv7m-none-eabi".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(),
+ arch: "arm".to_string(),
+ target_os: "none".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "".to_string(),
+
+ options: TargetOptions {
+ max_atomic_width: Some(32),
+ .. super::thumb_base::opts()
+ },
+ })
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::{Target, TargetOptions};
+
+pub fn target() -> Result<Target, String> {
+ let opts = TargetOptions {
+ linker: "emcc".to_string(),
+ ar: "emar".to_string(),
+
+ dynamic_linking: false,
+ executables: true,
+ // Today emcc emits two files - a .js file to bootstrap and
+ // possibly interpret the wasm, and a .wasm file
+ exe_suffix: ".js".to_string(),
+ linker_is_gnu: true,
+ allow_asm: false,
+ obj_is_bitcode: true,
+ max_atomic_width: Some(32),
+ post_link_args: vec!["-s".to_string(), "BINARYEN=1".to_string(),
+ "-s".to_string(), "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()],
+ .. Default::default()
+ };
+ Ok(Target {
+ llvm_target: "asmjs-unknown-emscripten".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ target_os: "emscripten".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "unknown".to_string(),
+ data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(),
+ arch: "wasm32".to_string(),
+ options: opts,
+ })
+}
pub fn target() -> TargetResult {
let mut base = super::apple_base::opts();
base.cpu = "core2".to_string();
- base.max_atomic_width = 128; // core2 support cmpxchg16b
+ base.max_atomic_width = Some(128); // core2 support cmpxchg16b
base.eliminate_frame_pointer = false;
base.pre_link_args.push("-m64".to_string());
target_env: "".to_string(),
target_vendor: "apple".to_string(),
options: TargetOptions {
- max_atomic_width: 64,
+ max_atomic_width: Some(64),
.. base
}
})
let mut base = super::windows_base::opts();
base.cpu = "x86-64".to_string();
base.pre_link_args.push("-m64".to_string());
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "x86_64-pc-windows-gnu".to_string(),
pub fn target() -> TargetResult {
let mut base = super::windows_msvc_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "x86_64-pc-windows-msvc".to_string(),
base.pre_link_args.push("-m64".to_string());
base.linker = "x86_64-rumprun-netbsd-gcc".to_string();
base.ar = "x86_64-rumprun-netbsd-ar".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.dynamic_linking = false;
base.has_rpath = false;
let mut base = super::solaris_base::opts();
base.pre_link_args.push("-m64".to_string());
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
Ok(Target {
llvm_target: "x86_64-pc-solaris".to_string(),
pub fn target() -> TargetResult {
let mut base = super::bitrig_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m64".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::dragonfly_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m64".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::freebsd_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m64".to_string());
Ok(Target {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::{Target, TargetResult};
+
+pub fn target() -> TargetResult {
+ let mut base = super::haiku_base::opts();
+ base.cpu = "x86-64".to_string();
+ base.max_atomic_width = Some(64);
+ base.pre_link_args.push("-m64".to_string());
+
+ Ok(Target {
+ llvm_target: "x86_64-unknown-haiku".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "64".to_string(),
+ data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(),
+ arch: "x86_64".to_string(),
+ target_os: "haiku".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "unknown".to_string(),
+ options: base,
+ })
+}
pub fn target() -> TargetResult {
let mut base = super::linux_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m64".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::linux_musl_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m64".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::netbsd_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m64".to_string());
Ok(Target {
pub fn target() -> TargetResult {
let mut base = super::openbsd_base::opts();
base.cpu = "x86-64".to_string();
- base.max_atomic_width = 64;
+ base.max_atomic_width = Some(64);
base.pre_link_args.push("-m64".to_string());
Ok(Target {
#[cfg(test)]
#[allow(non_upper_case_globals)]
mod tests {
- use std::hash::{Hash, Hasher, SipHasher};
+ use std::hash::{Hash, Hasher};
+ use std::collections::hash_map::DefaultHasher;
use std::option::Option::{None, Some};
bitflags! {
}
fn hash<T: Hash>(t: &T) -> u64 {
- let mut s = SipHasher::new();
+ let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
};
assert!(args.len() == 1);
let peek_arg_lval = match args[0] {
- repr::Operand::Consume(ref lval @ repr::Lvalue::Temp(_)) => {
- lval
- }
- repr::Operand::Consume(_) |
- repr::Operand::Constant(_) => {
+ repr::Operand::Consume(ref lval @ repr::Lvalue::Local(_)) => Some(lval),
+ _ => None,
+ };
+
+ let peek_arg_lval = match peek_arg_lval {
+ Some(arg) => arg,
+ None => {
tcx.sess.diagnostic().span_err(
span, "dataflow::sanity_check cannot feed a non-temp to rustc_peek.");
return;
env: &'a MoveDataParamEnv<'tcx>,
flow_inits: DataflowResults<MaybeInitializedLvals<'a, 'tcx>>,
flow_uninits: DataflowResults<MaybeUninitializedLvals<'a, 'tcx>>,
- drop_flags: FnvHashMap<MovePathIndex, Temp>,
+ drop_flags: FnvHashMap<MovePathIndex, Local>,
patch: MirPatch<'tcx>,
}
}
fn drop_flag(&mut self, index: MovePathIndex) -> Option<Lvalue<'tcx>> {
- self.drop_flags.get(&index).map(|t| Lvalue::Temp(*t))
+ self.drop_flags.get(&index).map(|t| Lvalue::Local(*t))
}
/// create a patch that elaborates all drops in the input
statements.push(Statement {
source_info: c.source_info,
kind: StatementKind::Assign(
- Lvalue::Temp(flag),
+ Lvalue::Local(flag),
self.constant_bool(c.source_info.span, false)
)
});
}
let tcx = self.tcx;
- let unit_temp = Lvalue::Temp(self.patch.new_temp(tcx.mk_nil()));
+ let unit_temp = Lvalue::Local(self.patch.new_temp(tcx.mk_nil()));
let free_func = tcx.lang_items.require(lang_items::BoxFreeFnLangItem)
.unwrap_or_else(|e| tcx.sess.fatal(&e));
let substs = Substs::new(tcx, iter::once(Kind::from(ty)));
if let Some(&flag) = self.drop_flags.get(&path) {
let span = self.patch.source_info_for_location(self.mir, loc).span;
let val = self.constant_bool(span, val.value());
- self.patch.add_assign(loc, Lvalue::Temp(flag), val);
+ self.patch.add_assign(loc, Lvalue::Local(flag), val);
}
}
let span = self.patch.source_info_for_location(self.mir, loc).span;
let false_ = self.constant_bool(span, false);
for flag in self.drop_flags.values() {
- self.patch.add_assign(loc, Lvalue::Temp(*flag), false_.clone());
+ self.patch.add_assign(loc, Lvalue::Local(*flag), false_.clone());
}
}
/// Tables mapping from an l-value to its MovePathIndex.
#[derive(Debug)]
pub struct MovePathLookup<'tcx> {
- vars: IndexVec<Var, MovePathIndex>,
- temps: IndexVec<Temp, MovePathIndex>,
- args: IndexVec<Arg, MovePathIndex>,
-
- /// The move path representing the return value is constructed
- /// lazily when we first encounter it in the input MIR.
- return_ptr: Option<MovePathIndex>,
+ locals: IndexVec<Local, MovePathIndex>,
/// projections are made from a base-lvalue and a projection
/// elem. The base-lvalue will have a unique MovePathIndex; we use
moves: IndexVec::new(),
loc_map: LocationMap::new(mir),
rev_lookup: MovePathLookup {
- vars: mir.var_decls.indices().map(Lvalue::Var).map(|v| {
+ locals: mir.local_decls.indices().map(Lvalue::Local).map(|v| {
Self::new_move_path(&mut move_paths, &mut path_map, None, v)
}).collect(),
- temps: mir.temp_decls.indices().map(Lvalue::Temp).map(|t| {
- Self::new_move_path(&mut move_paths, &mut path_map, None, t)
- }).collect(),
- args: mir.arg_decls.indices().map(Lvalue::Arg).map(|a| {
- Self::new_move_path(&mut move_paths, &mut path_map, None, a)
- }).collect(),
- return_ptr: None,
projections: FnvHashMap(),
},
move_paths: move_paths,
{
debug!("lookup({:?})", lval);
match *lval {
- Lvalue::Var(var) => Ok(self.data.rev_lookup.vars[var]),
- Lvalue::Arg(arg) => Ok(self.data.rev_lookup.args[arg]),
- Lvalue::Temp(temp) => Ok(self.data.rev_lookup.temps[temp]),
+ Lvalue::Local(local) => Ok(self.data.rev_lookup.locals[local]),
// error: can't move out of a static
Lvalue::Static(..) => Err(MovePathError::IllegalMove),
- Lvalue::ReturnPointer => match self.data.rev_lookup.return_ptr {
- Some(ptr) => Ok(ptr),
- ref mut ptr @ None => {
- let path = Self::new_move_path(
- &mut self.data.move_paths,
- &mut self.data.path_map,
- None,
- lval.clone());
- *ptr = Some(path);
- Ok(path)
- }
- },
Lvalue::Projection(ref proj) => {
self.move_path_for_projection(lval, proj)
}
// parent.
pub fn find(&self, lval: &Lvalue<'tcx>) -> LookupResult {
match *lval {
- Lvalue::Var(var) => LookupResult::Exact(self.vars[var]),
- Lvalue::Temp(temp) => LookupResult::Exact(self.temps[temp]),
- Lvalue::Arg(arg) => LookupResult::Exact(self.args[arg]),
+ Lvalue::Local(local) => LookupResult::Exact(self.locals[local]),
Lvalue::Static(..) => LookupResult::Parent(None),
- Lvalue::ReturnPointer => LookupResult::Exact(self.return_ptr.unwrap()),
Lvalue::Projection(ref proj) => {
match self.find(&proj.base) {
LookupResult::Exact(base_path) => {
TerminatorKind::Unreachable => { }
TerminatorKind::Return => {
- self.gather_move(loc, &Lvalue::ReturnPointer);
+ self.gather_move(loc, &Lvalue::Local(RETURN_POINTER));
}
TerminatorKind::If { .. } |
where F: FnMut(MovePathIndex, DropFlagState)
{
let move_data = &ctxt.move_data;
- for (arg, _) in mir.arg_decls.iter_enumerated() {
- let lvalue = repr::Lvalue::Arg(arg);
+ for arg in mir.args_iter() {
+ let lvalue = repr::Lvalue::Local(arg);
let lookup_result = move_data.rev_lookup.find(&lvalue);
on_lookup_result_bits(tcx, mir, move_data,
lookup_result,
patch_map: IndexVec<BasicBlock, Option<TerminatorKind<'tcx>>>,
new_blocks: Vec<BasicBlockData<'tcx>>,
new_statements: Vec<(Location, StatementKind<'tcx>)>,
- new_temps: Vec<TempDecl<'tcx>>,
+ new_locals: Vec<LocalDecl<'tcx>>,
resume_block: BasicBlock,
- next_temp: usize,
+ next_local: usize,
}
impl<'tcx> MirPatch<'tcx> {
let mut result = MirPatch {
patch_map: IndexVec::from_elem(None, mir.basic_blocks()),
new_blocks: vec![],
- new_temps: vec![],
new_statements: vec![],
- next_temp: mir.temp_decls.len(),
+ new_locals: vec![],
+ next_local: mir.local_decls.len(),
resume_block: START_BLOCK
};
}
}
- pub fn new_temp(&mut self, ty: Ty<'tcx>) -> Temp {
- let index = self.next_temp;
- self.next_temp += 1;
- self.new_temps.push(TempDecl { ty: ty });
- Temp::new(index as usize)
+ pub fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
+ let index = self.next_local;
+ self.next_local += 1;
+ self.new_locals.push(LocalDecl::new_temp(ty));
+ Local::new(index as usize)
}
pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
pub fn apply(self, mir: &mut Mir<'tcx>) {
debug!("MirPatch: {:?} new temps, starting from index {}: {:?}",
- self.new_temps.len(), mir.temp_decls.len(), self.new_temps);
+ self.new_locals.len(), mir.local_decls.len(), self.new_locals);
debug!("MirPatch: {} new blocks, starting from index {}",
self.new_blocks.len(), mir.basic_blocks().len());
mir.basic_blocks_mut().extend(self.new_blocks);
- mir.temp_decls.extend(self.new_temps);
+ mir.local_decls.extend(self.new_locals);
for (src, patch) in self.patch_map.into_iter_enumerated() {
if let Some(patch) = patch {
debug!("MirPatch: patching block {:?}", src);
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::mem_categorization::{cmt};
use rustc::hir::pat_util::*;
+use rustc::session::Session;
use rustc::traits::Reveal;
use rustc::ty::{self, Ty, TyCtxt};
+use rustc_errors::DiagnosticBuilder;
use std::cmp::Ordering;
use std::fmt;
use std::iter::{FromIterator, IntoIterator, repeat};
tcx.sess.abort_if_errors();
}
+fn create_e0004<'a>(sess: &'a Session, sp: Span, error_message: String) -> DiagnosticBuilder<'a> {
+ struct_span_err!(sess, sp, E0004, "{}", &error_message)
+}
+
fn check_expr(cx: &mut MatchCheckCtxt, ex: &hir::Expr) {
intravisit::walk_expr(cx, ex);
match ex.node {
if inlined_arms.is_empty() {
if !pat_ty.is_uninhabited(cx.tcx) {
// We know the type is inhabited, so this must be wrong
- let mut err = struct_span_err!(cx.tcx.sess, ex.span, E0002,
- "non-exhaustive patterns: type {} is non-empty",
- pat_ty);
+ let mut err = create_e0004(cx.tcx.sess, ex.span,
+ format!("non-exhaustive patterns: type {} \
+ is non-empty",
+ pat_ty));
span_help!(&mut err, ex.span,
"Please ensure that all possible cases are being handled; \
possibly adding wildcards or more match arms.");
if edef.is_enum() {
if let Def::Local(..) = cx.tcx.expect_def(p.id) {
if edef.variants.iter().any(|variant| {
- variant.name == name.node && variant.kind == ty::VariantKind::Unit
+ variant.name == name.node && variant.ctor_kind == CtorKind::Const
}) {
let ty_path = cx.tcx.item_path_str(edef.did);
let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170,
1 => format!("pattern {} not covered", joined_patterns),
_ => format!("patterns {} not covered", joined_patterns)
};
- struct_span_err!(cx.tcx.sess, sp, E0004,
- "non-exhaustive patterns: {} not covered",
- joined_patterns
- ).span_label(sp, &label_text).emit();
+ create_e0004(cx.tcx.sess, sp,
+ format!("non-exhaustive patterns: {} not covered",
+ joined_patterns))
+ .span_label(sp, &label_text)
+ .emit();
},
}
}
}
PatKind::Box(inner) => PatKind::Box(self.fold_pat(inner)),
PatKind::Ref(inner, mutbl) => PatKind::Ref(self.fold_pat(inner), mutbl),
- PatKind::Vec(before, slice, after) => {
- PatKind::Vec(before.move_map(|x| self.fold_pat(x)),
- slice.map(|x| self.fold_pat(x)),
- after.move_map(|x| self.fold_pat(x)))
+ PatKind::Slice(before, slice, after) => {
+ PatKind::Slice(before.move_map(|x| self.fold_pat(x)),
+ slice.map(|x| self.fold_pat(x)),
+ after.move_map(|x| self.fold_pat(x)))
}
PatKind::Wild |
PatKind::Lit(_) |
ty::TyAdt(adt, _) => {
let v = ctor.variant_for_adt(adt);
- match v.kind {
- ty::VariantKind::Struct => {
+ match v.ctor_kind {
+ CtorKind::Fictive => {
let field_pats: hir::HirVec<_> = v.fields.iter()
.zip(pats)
.filter(|&(_, ref pat)| pat.node != PatKind::Wild)
let has_more_fields = field_pats.len() < pats_len;
PatKind::Struct(def_to_path(cx.tcx, v.did), field_pats, has_more_fields)
}
- ty::VariantKind::Tuple => {
+ CtorKind::Fn => {
PatKind::TupleStruct(def_to_path(cx.tcx, v.did), pats.collect(), None)
}
- ty::VariantKind::Unit => {
+ CtorKind::Const => {
PatKind::Path(None, def_to_path(cx.tcx, v.did))
}
}
ty::TySlice(_) => match ctor {
&Slice(n) => {
assert_eq!(pats_len, n);
- PatKind::Vec(pats.collect(), None, hir::HirVec::new())
+ PatKind::Slice(pats.collect(), None, hir::HirVec::new())
},
_ => unreachable!()
},
ty::TyArray(_, len) => {
assert_eq!(pats_len, len);
- PatKind::Vec(pats.collect(), None, hir::HirVec::new())
+ PatKind::Slice(pats.collect(), None, hir::HirVec::new())
}
_ => {
};
let max_slice_length = rows.iter().filter_map(|row| match row[0].0.node {
- PatKind::Vec(ref before, _, ref after) => Some(before.len() + after.len()),
+ PatKind::Slice(ref before, _, ref after) => Some(before.len() + after.len()),
_ => None
}).max().map_or(0, |v| v + 1);
match pat.node {
PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) =>
match cx.tcx.expect_def(pat.id) {
- Def::Variant(id) => vec![Variant(id)],
- Def::Struct(..) | Def::Union(..) |
+ Def::Variant(id) | Def::VariantCtor(id, ..) => vec![Variant(id)],
+ Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) |
Def::TyAlias(..) | Def::AssociatedTy(..) => vec![Single],
Def::Const(..) | Def::AssociatedConst(..) =>
span_bug!(pat.span, "const pattern should've been rewritten"),
vec![ConstantValue(eval_const_expr(cx.tcx, &expr))],
PatKind::Range(ref lo, ref hi) =>
vec![ConstantRange(eval_const_expr(cx.tcx, &lo), eval_const_expr(cx.tcx, &hi))],
- PatKind::Vec(ref before, ref slice, ref after) =>
+ PatKind::Slice(ref before, ref slice, ref after) =>
match left_ty.sty {
ty::TyArray(..) => vec![Single],
ty::TySlice(_) if slice.is_some() => {
Def::Const(..) | Def::AssociatedConst(..) =>
span_bug!(pat_span, "const pattern should've \
been rewritten"),
- Def::Variant(id) if *constructor != Variant(id) => None,
- Def::Variant(..) | Def::Struct(..) => Some(Vec::new()),
- def => span_bug!(pat_span, "specialize: unexpected \
- definition {:?}", def),
+ Def::VariantCtor(id, CtorKind::Const) if *constructor != Variant(id) => None,
+ Def::VariantCtor(_, CtorKind::Const) |
+ Def::StructCtor(_, CtorKind::Const) => Some(Vec::new()),
+ def => span_bug!(pat_span, "specialize: unexpected definition: {:?}", def),
}
}
Def::Const(..) | Def::AssociatedConst(..) =>
span_bug!(pat_span, "const pattern should've \
been rewritten"),
- Def::Variant(id) if *constructor != Variant(id) => None,
- Def::Variant(..) | Def::Struct(..) => {
+ Def::VariantCtor(id, CtorKind::Fn) if *constructor != Variant(id) => None,
+ Def::VariantCtor(_, CtorKind::Fn) |
+ Def::StructCtor(_, CtorKind::Fn) => {
match ddpos {
Some(ddpos) => {
let mut pats: Vec<_> = args[..ddpos].iter().map(|p| {
None => Some(args.iter().map(|p| wpat(p)).collect())
}
}
- _ => None
+ def => span_bug!(pat_span, "specialize: unexpected definition: {:?}", def),
}
}
}
}
- PatKind::Vec(ref before, ref slice, ref after) => {
+ PatKind::Slice(ref before, ref slice, ref after) => {
let pat_len = before.len() + after.len();
match *constructor {
Single => {
use rustc::hir::map::blocks::FnLikeNode;
use rustc::middle::cstore::InlinedItem;
use rustc::traits;
-use rustc::hir::def::{Def, PathResolution};
+use rustc::hir::def::{Def, CtorKind, PathResolution};
use rustc::hir::def_id::DefId;
use rustc::hir::pat_util::def_to_path;
use rustc::ty::{self, Ty, TyCtxt};
entry.insert(PathResolution::new(def));
}
let path = match def {
- Def::Struct(def_id) => def_to_path(tcx, def_id),
- Def::Variant(variant_did) => def_to_path(tcx, variant_did),
+ Def::StructCtor(def_id, CtorKind::Fn) |
+ Def::VariantCtor(def_id, CtorKind::Fn) => def_to_path(tcx, def_id),
Def::Fn(..) | Def::Method(..) => return Ok(P(hir::Pat {
id: expr.id,
node: PatKind::Lit(P(expr.clone())),
PatKind::Struct(path.clone(), field_pats, false)
}
- hir::ExprVec(ref exprs) => {
+ hir::ExprArray(ref exprs) => {
let pats = exprs.iter()
.map(|expr| const_expr_to_pat(tcx, &expr, pat_id, span))
.collect::<Result<_, _>>()?;
- PatKind::Vec(pats, None, hir::HirVec::new())
+ PatKind::Slice(pats, None, hir::HirVec::new())
}
hir::ExprPath(_, ref path) => {
match tcx.expect_def(expr.id) {
- Def::Struct(..) | Def::Variant(..) => PatKind::Path(None, path.clone()),
+ Def::StructCtor(_, CtorKind::Const) |
+ Def::VariantCtor(_, CtorKind::Const) => PatKind::Path(None, path.clone()),
Def::Const(def_id) | Def::AssociatedConst(def_id) => {
let substs = Some(tcx.node_id_item_substs(expr.id).substs);
let (expr, _ty) = lookup_const_by_id(tcx, def_id, substs).unwrap();
signal!(e, NonConstPath);
}
},
- Def::Variant(variant_def) => {
+ Def::VariantCtor(variant_def, ..) => {
if let Some(const_expr) = lookup_variant_by_id(tcx, variant_def) {
match eval_const_expr_partial(tcx, const_expr, ty_hint, None) {
Ok(val) => val,
signal!(e, UnimplementedConstVal("enum variants"));
}
}
- Def::Struct(..) => {
+ Def::StructCtor(..) => {
ConstVal::Struct(e.id)
}
Def::Local(def_id) => {
Array(_, n) if idx >= n => {
signal!(e, IndexOutOfBounds { len: n, index: idx })
}
- Array(v, n) => if let hir::ExprVec(ref v) = tcx.map.expect_expr(v).node {
+ Array(v, n) => if let hir::ExprArray(ref v) = tcx.map.expect_expr(v).node {
assert_eq!(n as usize as u64, n);
eval_const_expr_partial(tcx, &v[idx as usize], ty_hint, fn_args)?
} else {
_ => signal!(e, IndexedNonVec),
}
}
- hir::ExprVec(ref v) => Array(e.id, v.len() as u64),
+ hir::ExprArray(ref v) => Array(e.id, v.len() as u64),
hir::ExprRepeat(_, ref n) => {
let len_hint = ty_hint.checked_or(tcx.types.usize);
Repeat(
pub const F_SETLKW: libc::c_int = 9;
}
+ #[cfg(target_os = "haiku")]
+ mod os {
+ use libc;
+
+ pub struct flock {
+ pub l_type: libc::c_short,
+ pub l_whence: libc::c_short,
+ pub l_start: libc::off_t,
+ pub l_len: libc::off_t,
+ pub l_pid: libc::pid_t,
+
+ // not actually here, but brings in line with freebsd
+ pub l_sysid: libc::c_int,
+ }
+
+ pub const F_UNLCK: libc::c_short = 0x0200;
+ pub const F_WRLCK: libc::c_short = 0x0400;
+ pub const F_SETLK: libc::c_int = 0x0080;
+ pub const F_SETLKW: libc::c_int = 0x0100;
+ }
+
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod os {
use libc;
}
sess.track_errors(|| sess.lint_store.borrow_mut().process_command_line(sess))?;
+ // Currently, we ignore the name resolution data structures for the purposes of dependency
+ // tracking. Instead we will run name resolution and include its output in the hash of each
+ // item, much like we do for macro expansion. In other words, the hash reflects not just
+ // its contents but the results of name resolution on those contents. Hopefully we'll push
+ // this back at some point.
+ let _ignore = sess.dep_graph.in_ignore();
let mut crate_loader = CrateLoader::new(sess, &cstore, &krate, crate_name);
let resolver_arenas = Resolver::arenas();
let mut resolver =
sess.diagnostic())
});
- krate = time(time_passes, "maybe creating a macro crate", || {
- let crate_types = sess.crate_types.borrow();
- let is_rustc_macro_crate = crate_types.contains(&config::CrateTypeRustcMacro);
- let num_crate_types = crate_types.len();
- syntax_ext::rustc_macro_registrar::modify(&sess.parse_sess,
- &mut resolver,
- krate,
- is_rustc_macro_crate,
- num_crate_types,
- sess.diagnostic(),
- &sess.features.borrow())
- });
+ // If we're in rustdoc we're always compiling as an rlib, but that'll trip a
+ // bunch of checks in the `modify` function below. For now just skip this
+ // step entirely if we're rustdoc as it's not too useful anyway.
+ if !sess.opts.actually_rustdoc {
+ krate = time(time_passes, "maybe creating a macro crate", || {
+ let crate_types = sess.crate_types.borrow();
+ let num_crate_types = crate_types.len();
+ let is_rustc_macro_crate = crate_types.contains(&config::CrateTypeRustcMacro);
+ syntax_ext::rustc_macro_registrar::modify(&sess.parse_sess,
+ &mut resolver,
+ krate,
+ is_rustc_macro_crate,
+ num_crate_types,
+ sess.diagnostic(),
+ &sess.features.borrow())
+ });
+ }
if sess.opts.debugging_opts.input_stats {
println!("Post-expansion node count: {}", count_nodes(&krate));
})
})?;
- // Collect defintions for def ids.
- time(sess.time_passes(), "collecting defs", || resolver.definitions.collect(&krate));
-
time(sess.time_passes(),
"early lint checks",
|| lint::check_ast_crate(sess, &krate));
|| ast_validation::check_crate(sess, &krate));
time(sess.time_passes(), "name resolution", || -> CompileResult {
- // Currently, we ignore the name resolution data structures for the purposes of dependency
- // tracking. Instead we will run name resolution and include its output in the hash of each
- // item, much like we do for macro expansion. In other words, the hash reflects not just
- // its contents but the results of name resolution on those contents. Hopefully we'll push
- // this back at some point.
- let _ignore = sess.dep_graph.in_ignore();
- resolver.build_reduced_graph(&krate);
resolver.resolve_imports();
// Since import resolution will eventually happen in expansion,
use rustc::dep_graph::DepGraph;
use rustc::session::{self, config, Session, build_session, CompileResult};
use rustc::session::config::{Input, PrintRequest, OutputType, ErrorOutputType};
-use rustc::session::config::{get_unstable_features_setting, nightly_options};
+use rustc::session::config::nightly_options;
+use rustc::session::early_error;
use rustc::lint::Lint;
use rustc::lint;
use rustc_metadata::loader;
use std::sync::{Arc, Mutex};
use std::thread;
-use rustc::session::early_error;
-
use syntax::{ast, json};
use syntax::codemap::{CodeMap, FileLoader, RealFileLoader};
use syntax::feature_gate::{GatedCfg, UnstableFeatures};
}
}
-pub fn run(args: Vec<String>) -> isize {
+pub fn run<F>(run_compiler: F) -> isize
+ where F: FnOnce() -> (CompileResult, Option<Session>) + Send + 'static
+{
monitor(move || {
- let (result, session) = run_compiler(&args, &mut RustcDefaultCalls);
+ let (result, session) = run_compiler();
if let Err(err_count) = result {
if err_count > 0 {
match session {
Some(sess) => sess.fatal(&abort_msg(err_count)),
None => {
let emitter =
- errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto,
- None);
+ errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto, None);
let handler = errors::Handler::with_emitter(true, false, Box::new(emitter));
handler.emit(&MultiSpan::new(),
&abort_msg(err_count),
0
}
-pub fn run_compiler<'a>(args: &[String],
- callbacks: &mut CompilerCalls<'a>)
- -> (CompileResult, Option<Session>) {
- run_compiler_with_file_loader(args, callbacks, box RealFileLoader)
-}
-
// Parse args and run the compiler. This is the primary entry point for rustc.
// See comments on CompilerCalls below for details about the callbacks argument.
// The FileLoader provides a way to load files from sources other than the file system.
-pub fn run_compiler_with_file_loader<'a, L>(args: &[String],
- callbacks: &mut CompilerCalls<'a>,
- loader: Box<L>)
- -> (CompileResult, Option<Session>)
- where L: FileLoader + 'static {
+pub fn run_compiler<'a>(args: &[String],
+ callbacks: &mut CompilerCalls<'a>,
+ file_loader: Option<Box<FileLoader + 'static>>,
+ emitter_dest: Option<Box<Write + Send>>)
+ -> (CompileResult, Option<Session>)
+{
macro_rules! do_or_return {($expr: expr, $sess: expr) => {
match $expr {
Compilation::Stop => return (Ok(()), $sess),
let dep_graph = DepGraph::new(sopts.build_dep_graph());
let cstore = Rc::new(CStore::new(&dep_graph));
+
+ let loader = file_loader.unwrap_or(box RealFileLoader);
let codemap = Rc::new(CodeMap::with_file_loader(loader));
let sess = session::build_session_with_codemap(sopts,
&dep_graph,
input_file_path,
descriptions,
cstore.clone(),
- codemap);
+ codemap,
+ emitter_dest);
rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess));
let mut cfg = config::build_configuration(&sess, cfg);
target_features::add_configuration(&mut cfg, &sess);
}
}
PrintRequest::Cfg => {
- let allow_unstable_cfg = match get_unstable_features_setting() {
- UnstableFeatures::Disallow => false,
- _ => true,
- };
+ let allow_unstable_cfg = UnstableFeatures::from_environment()
+ .is_nightly_build();
for cfg in cfg {
if !allow_unstable_cfg && GatedCfg::gate(&*cfg).is_some() {
}
let thread = cfg.spawn(move || {
- io::set_panic(box err);
+ io::set_panic(Some(box err));
f()
});
// Panic so the process returns a failure code, but don't pollute the
// output with some unnecessary panic messages, we've already
// printed everything that we needed to.
- io::set_panic(box io::sink());
+ io::set_panic(Some(box io::sink()));
panic!();
}
}
pub fn main() {
- let result = run(env::args().collect());
+ let result = run(|| run_compiler(&env::args().collect::<Vec<_>>(),
+ &mut RustcDefaultCalls,
+ None,
+ None));
process::exit(result as i32);
}
pub fn new(dst: Box<Write + Send>,
code_map: Option<Rc<CodeMapper>>)
-> EmitterWriter {
- EmitterWriter { dst: Raw(dst),
- cm: code_map}
+ EmitterWriter {
+ dst: Raw(dst),
+ cm: code_map,
+ }
}
fn preprocess_annotations(&self, msp: &MultiSpan) -> Vec<FileWithAnnotatedLines> {
use syntax::ast;
use std::cell::RefCell;
-use std::hash::{Hash, SipHasher, Hasher};
+use std::hash::{Hash, Hasher};
+use std::collections::hash_map::DefaultHasher;
use rustc::dep_graph::DepNode;
use rustc::hir;
use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
{
assert!(def_id.is_local());
debug!("HashItemsVisitor::calculate(def_id={:?})", def_id);
- // FIXME: this should use SHA1, not SipHash. SipHash is not
+ // FIXME: this should use SHA1, not DefaultHasher. DefaultHasher is not
// built to avoid collisions.
- let mut state = SipHasher::new();
+ let mut state = DefaultHasher::new();
walk_op(&mut StrictVersionHashVisitor::new(&mut state,
self.tcx,
&mut self.def_path_hashes,
fn compute_crate_hash(&mut self) {
let krate = self.tcx.map.krate();
- let mut crate_state = SipHasher::new();
+ let mut crate_state = DefaultHasher::new();
let crate_disambiguator = self.tcx.sess.local_crate_disambiguator();
"crate_disambiguator".hash(&mut crate_state);
use self::SawExprComponent::*;
use self::SawAbiComponent::*;
+use self::SawItemComponent::*;
+use self::SawPatComponent::*;
+use self::SawTyComponent::*;
+use self::SawTraitOrImplItemComponent::*;
+use syntax::abi::Abi;
use syntax::ast::{self, Name, NodeId};
use syntax::parse::token;
use syntax_pos::{Span, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos};
use rustc::hir::intravisit as visit;
use rustc::ty::TyCtxt;
use rustc_data_structures::fnv;
-use std::hash::{Hash, SipHasher};
+use std::hash::Hash;
+use std::collections::hash_map::DefaultHasher;
use super::def_path_hash::DefPathHashes;
use super::caching_codemap_view::CachingCodemapView;
pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> {
pub tcx: TyCtxt<'hash, 'tcx, 'tcx>,
- pub st: &'a mut SipHasher,
+ pub st: &'a mut DefaultHasher,
// collect a deterministic hash of def-ids that we have seen
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>,
hash_spans: bool,
}
impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> {
- pub fn new(st: &'a mut SipHasher,
+ pub fn new(st: &'a mut DefaultHasher,
tcx: TyCtxt<'hash, 'tcx, 'tcx>,
def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>,
codemap: &'a mut CachingCodemapView<'tcx>,
SawMod,
SawForeignItem,
- SawItem,
- SawTy,
+ SawItem(SawItemComponent),
+ SawTy(SawTyComponent),
SawGenerics,
- SawTraitItem,
- SawImplItem,
+ SawTraitItem(SawTraitOrImplItemComponent),
+ SawImplItem(SawTraitOrImplItemComponent),
SawStructField,
SawVariant,
SawPath(bool),
SawPathParameters,
SawPathListItem,
SawBlock,
- SawPat,
+ SawPat(SawPatComponent),
SawLocal,
SawArm,
SawExpr(SawExprComponent<'a>),
/// because the SVH is just a developer convenience; there is no
/// guarantee of collision-freedom, hash collisions are just
/// (hopefully) unlikely.)
+///
+/// The xxxComponent enums and saw_xxx functions for Item, Pat,
+/// Ty, TraitItem and ImplItem follow the same methodology.
#[derive(Hash)]
enum SawExprComponent<'a> {
SawExprAgain(Option<token::InternedString>),
SawExprBox,
- SawExprVec,
+ SawExprArray,
SawExprCall,
SawExprMethodCall,
SawExprTup,
fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> {
match *node {
ExprBox(..) => SawExprBox,
- ExprVec(..) => SawExprVec,
+ ExprArray(..) => SawExprArray,
ExprCall(..) => SawExprCall,
ExprMethodCall(..) => SawExprMethodCall,
ExprTup(..) => SawExprTup,
}
}
+#[derive(Hash)]
+enum SawItemComponent {
+ SawItemExternCrate,
+ SawItemUse,
+ SawItemStatic(Mutability),
+ SawItemConst,
+ SawItemFn(Unsafety, Constness, Abi),
+ SawItemMod,
+ SawItemForeignMod,
+ SawItemTy,
+ SawItemEnum,
+ SawItemStruct,
+ SawItemUnion,
+ SawItemTrait(Unsafety),
+ SawItemDefaultImpl(Unsafety),
+ SawItemImpl(Unsafety, ImplPolarity)
+}
+
+fn saw_item(node: &Item_) -> SawItemComponent {
+ match *node {
+ ItemExternCrate(..) => SawItemExternCrate,
+ ItemUse(..) => SawItemUse,
+ ItemStatic(_, mutability, _) => SawItemStatic(mutability),
+ ItemConst(..) =>SawItemConst,
+ ItemFn(_, unsafety, constness, abi, _, _) => SawItemFn(unsafety, constness, abi),
+ ItemMod(..) => SawItemMod,
+ ItemForeignMod(..) => SawItemForeignMod,
+ ItemTy(..) => SawItemTy,
+ ItemEnum(..) => SawItemEnum,
+ ItemStruct(..) => SawItemStruct,
+ ItemUnion(..) => SawItemUnion,
+ ItemTrait(unsafety, ..) => SawItemTrait(unsafety),
+ ItemDefaultImpl(unsafety, _) => SawItemDefaultImpl(unsafety),
+ ItemImpl(unsafety, implpolarity, ..) => SawItemImpl(unsafety, implpolarity)
+ }
+}
+
+#[derive(Hash)]
+enum SawPatComponent {
+ SawPatWild,
+ SawPatBinding(BindingMode),
+ SawPatStruct,
+ SawPatTupleStruct,
+ SawPatPath,
+ SawPatTuple,
+ SawPatBox,
+ SawPatRef(Mutability),
+ SawPatLit,
+ SawPatRange,
+ SawPatSlice
+}
+
+fn saw_pat(node: &PatKind) -> SawPatComponent {
+ match *node {
+ PatKind::Wild => SawPatWild,
+ PatKind::Binding(bindingmode, ..) => SawPatBinding(bindingmode),
+ PatKind::Struct(..) => SawPatStruct,
+ PatKind::TupleStruct(..) => SawPatTupleStruct,
+ PatKind::Path(..) => SawPatPath,
+ PatKind::Tuple(..) => SawPatTuple,
+ PatKind::Box(..) => SawPatBox,
+ PatKind::Ref(_, mutability) => SawPatRef(mutability),
+ PatKind::Lit(..) => SawPatLit,
+ PatKind::Range(..) => SawPatRange,
+ PatKind::Slice(..) => SawPatSlice
+ }
+}
+
+#[derive(Hash)]
+enum SawTyComponent {
+ SawTySlice,
+ SawTyArray,
+ SawTyPtr(Mutability),
+ SawTyRptr(Mutability),
+ SawTyBareFn(Unsafety, Abi),
+ SawTyNever,
+ SawTyTup,
+ SawTyPath,
+ SawTyObjectSum,
+ SawTyPolyTraitRef,
+ SawTyImplTrait,
+ SawTyTypeof,
+ SawTyInfer
+}
+
+fn saw_ty(node: &Ty_) -> SawTyComponent {
+ match *node {
+ TySlice(..) => SawTySlice,
+ TyArray(..) => SawTyArray,
+ TyPtr(ref mty) => SawTyPtr(mty.mutbl),
+ TyRptr(_, ref mty) => SawTyRptr(mty.mutbl),
+ TyBareFn(ref barefnty) => SawTyBareFn(barefnty.unsafety, barefnty.abi),
+ TyNever => SawTyNever,
+ TyTup(..) => SawTyTup,
+ TyPath(..) => SawTyPath,
+ TyObjectSum(..) => SawTyObjectSum,
+ TyPolyTraitRef(..) => SawTyPolyTraitRef,
+ TyImplTrait(..) => SawTyImplTrait,
+ TyTypeof(..) => SawTyTypeof,
+ TyInfer => SawTyInfer
+ }
+}
+
+#[derive(Hash)]
+enum SawTraitOrImplItemComponent {
+ SawTraitOrImplItemConst,
+ SawTraitOrImplItemMethod(Unsafety, Constness, Abi),
+ SawTraitOrImplItemType
+}
+
+fn saw_trait_item(ti: &TraitItem_) -> SawTraitOrImplItemComponent {
+ match *ti {
+ ConstTraitItem(..) => SawTraitOrImplItemConst,
+ MethodTraitItem(ref sig, _) =>
+ SawTraitOrImplItemMethod(sig.unsafety, sig.constness, sig.abi),
+ TypeTraitItem(..) => SawTraitOrImplItemType
+ }
+}
+
+fn saw_impl_item(ii: &ImplItemKind) -> SawTraitOrImplItemComponent {
+ match *ii {
+ ImplItemKind::Const(..) => SawTraitOrImplItemConst,
+ ImplItemKind::Method(ref sig, _) =>
+ SawTraitOrImplItemMethod(sig.unsafety, sig.constness, sig.abi),
+ ImplItemKind::Type(..) => SawTraitOrImplItemType
+ }
+}
+
#[derive(Clone, Copy, Hash, Eq, PartialEq)]
enum SawSpanExpnKind {
NoExpansion,
fn visit_item(&mut self, i: &'tcx Item) {
debug!("visit_item: {:?} st={:?}", i, self.st);
-
- SawItem.hash(self.st);
- // Hash the value of the discriminant of the Item variant.
- self.hash_discriminant(&i.node);
+ SawItem(saw_item(&i.node)).hash(self.st);
hash_span!(self, i.span);
hash_attrs!(self, &i.attrs);
visit::walk_item(self, i)
fn visit_ty(&mut self, t: &'tcx Ty) {
debug!("visit_ty: st={:?}", self.st);
- SawTy.hash(self.st);
+ SawTy(saw_ty(&t.node)).hash(self.st);
hash_span!(self, t.span);
visit::walk_ty(self, t)
}
fn visit_trait_item(&mut self, ti: &'tcx TraitItem) {
debug!("visit_trait_item: st={:?}", self.st);
- SawTraitItem.hash(self.st);
- self.hash_discriminant(&ti.node);
+ SawTraitItem(saw_trait_item(&ti.node)).hash(self.st);
hash_span!(self, ti.span);
hash_attrs!(self, &ti.attrs);
visit::walk_trait_item(self, ti)
fn visit_impl_item(&mut self, ii: &'tcx ImplItem) {
debug!("visit_impl_item: st={:?}", self.st);
- SawImplItem.hash(self.st);
- self.hash_discriminant(&ii.node);
+ SawImplItem(saw_impl_item(&ii.node)).hash(self.st);
hash_span!(self, ii.span);
hash_attrs!(self, &ii.attrs);
visit::walk_impl_item(self, ii)
fn visit_pat(&mut self, p: &'tcx Pat) {
debug!("visit_pat: st={:?}", self.st);
- SawPat.hash(self.st);
- self.hash_discriminant(&p.node);
+ SawPat(saw_pat(&p.node)).hash(self.st);
hash_span!(self, p.span);
visit::walk_pat(self, p)
}
Def::Mod(..) |
Def::Static(..) |
Def::Variant(..) |
+ Def::VariantCtor(..) |
Def::Enum(..) |
Def::TyAlias(..) |
Def::AssociatedTy(..) |
Def::TyParam(..) |
Def::Struct(..) |
+ Def::StructCtor(..) |
Def::Union(..) |
Def::Trait(..) |
Def::Method(..) |
use rustc::util::nodemap::DefIdMap;
use std::fmt::{self, Debug};
use std::iter::once;
+use std::collections::HashMap;
/// Index into the DefIdDirectory
#[derive(Copy, Clone, Debug, PartialOrd, Ord, Hash, PartialEq, Eq,
}
pub fn retrace(&self, tcx: TyCtxt) -> RetracedDefIdDirectory {
- let max_current_crate = self.max_current_crate(tcx);
+
+ fn make_key(name: &str, disambiguator: &str) -> String {
+ format!("{}/{}", name, disambiguator)
+ }
+
+ let new_krates: HashMap<_, _> =
+ once(LOCAL_CRATE)
+ .chain(tcx.sess.cstore.crates())
+ .map(|krate| (make_key(&tcx.crate_name(krate),
+ &tcx.crate_disambiguator(krate)), krate))
+ .collect();
let ids = self.paths.iter()
.map(|path| {
- if self.krate_still_valid(tcx, max_current_crate, path.krate) {
- tcx.retrace_path(path)
+ let old_krate_id = path.krate.as_usize();
+ assert!(old_krate_id < self.krates.len());
+ let old_crate_info = &self.krates[old_krate_id];
+ let old_crate_key = make_key(&old_crate_info.name,
+ &old_crate_info.disambiguator);
+ if let Some(&new_crate_key) = new_krates.get(&old_crate_key) {
+ tcx.retrace_path(new_crate_key, &path.data)
} else {
- debug!("crate {} changed from {:?} to {:?}/{:?}",
- path.krate,
- self.krates[path.krate.as_usize()],
- tcx.crate_name(path.krate),
- tcx.crate_disambiguator(path.krate));
+ debug!("crate {:?} no longer exists", old_crate_key);
None
}
})
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This module defines a generic file format that allows to check if a given
+//! file generated by incremental compilation was generated by a compatible
+//! compiler version. This file format is used for the on-disk version of the
+//! dependency graph and the exported metadata hashes.
+//!
+//! In practice "compatible compiler version" means "exactly the same compiler
+//! version", since the header encodes the git commit hash of the compiler.
+//! Since we can always just ignore the incremental compilation cache and
+//! compiler versions don't change frequently for the typical user, being
+//! conservative here practically has no downside.
+
+use std::io::{self, Read};
+use std::path::Path;
+use std::fs::File;
+use std::env;
+
+use rustc::session::config::nightly_options;
+
+/// The first few bytes of files generated by incremental compilation
+const FILE_MAGIC: &'static [u8] = b"RSIC";
+
+/// Change this if the header format changes
+const HEADER_FORMAT_VERSION: u16 = 0;
+
+/// A version string that hopefully is always different for compiler versions
+/// with different encodings of incremental compilation artifacts. Contains
+/// the git commit hash.
+const RUSTC_VERSION: Option<&'static str> = option_env!("CFG_VERSION");
+
+pub fn write_file_header<W: io::Write>(stream: &mut W) -> io::Result<()> {
+ stream.write_all(FILE_MAGIC)?;
+ stream.write_all(&[(HEADER_FORMAT_VERSION >> 0) as u8,
+ (HEADER_FORMAT_VERSION >> 8) as u8])?;
+
+ let rustc_version = rustc_version();
+ assert_eq!(rustc_version.len(), (rustc_version.len() as u8) as usize);
+ stream.write_all(&[rustc_version.len() as u8])?;
+ stream.write_all(rustc_version.as_bytes())?;
+
+ Ok(())
+}
+
+/// Reads the contents of a file with a file header as defined in this module.
+///
+/// - Returns `Ok(Some(data))` if the file existed and was generated by a
+/// compatible compiler version. `data` is the entire contents of the file
+/// *after* the header.
+/// - Returns `Ok(None)` if the file did not exist or was generated by an
+/// incompatible version of the compiler.
+/// - Returns `Err(..)` if some kind of IO error occurred while reading the
+/// file.
+pub fn read_file(path: &Path) -> io::Result<Option<Vec<u8>>> {
+ if !path.exists() {
+ return Ok(None);
+ }
+
+ let mut file = File::open(path)?;
+
+ // Check FILE_MAGIC
+ {
+ debug_assert!(FILE_MAGIC.len() == 4);
+ let mut file_magic = [0u8; 4];
+ file.read_exact(&mut file_magic)?;
+ if file_magic != FILE_MAGIC {
+ return Ok(None)
+ }
+ }
+
+ // Check HEADER_FORMAT_VERSION
+ {
+ debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2);
+ let mut header_format_version = [0u8; 2];
+ file.read_exact(&mut header_format_version)?;
+ let header_format_version = (header_format_version[0] as u16) |
+ ((header_format_version[1] as u16) << 8);
+
+ if header_format_version != HEADER_FORMAT_VERSION {
+ return Ok(None)
+ }
+ }
+
+ // Check RUSTC_VERSION
+ {
+ let mut rustc_version_str_len = [0u8; 1];
+ file.read_exact(&mut rustc_version_str_len)?;
+ let rustc_version_str_len = rustc_version_str_len[0] as usize;
+ let mut buffer = Vec::with_capacity(rustc_version_str_len);
+ buffer.resize(rustc_version_str_len, 0);
+ file.read_exact(&mut buffer[..])?;
+
+ if &buffer[..] != rustc_version().as_bytes() {
+ return Ok(None);
+ }
+ }
+
+ let mut data = vec![];
+ file.read_to_end(&mut data)?;
+
+ Ok(Some(data))
+}
+
+fn rustc_version() -> String {
+ if nightly_options::is_nightly_build() {
+ if let Some(val) = env::var_os("RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER") {
+ return val.to_string_lossy().into_owned()
+ }
+ }
+
+ RUSTC_VERSION.expect("Cannot use rustc without explicit version for \
+ incremental compilation")
+ .to_string()
+}
let print_file_copy_stats = tcx.sess.opts.debugging_opts.incremental_info;
// Try copying over all files from the source directory
- if copy_files(&session_dir, &source_directory, print_file_copy_stats).is_ok() {
+ if let Ok(allows_links) = copy_files(&session_dir, &source_directory,
+ print_file_copy_stats) {
debug!("successfully copied data from: {}",
source_directory.display());
+ if !allows_links {
+ tcx.sess.warn(&format!("Hard linking files in the incremental \
+ compilation cache failed. Copying files \
+ instead. Consider moving the cache \
+ directory to a file system which supports \
+ hard linking in session dir `{}`",
+ session_dir.display())
+ );
+ }
+
tcx.sess.init_incr_comp_session(session_dir, directory_lock);
return Ok(true)
} else {
let _ = garbage_collect_session_directories(sess);
}
+pub fn delete_all_session_dir_contents(sess: &Session) -> io::Result<()> {
+ let sess_dir_iterator = sess.incr_comp_session_dir().read_dir()?;
+ for entry in sess_dir_iterator {
+ let entry = entry?;
+ safe_remove_file(&entry.path())?
+ }
+ Ok(())
+}
+
fn copy_files(target_dir: &Path,
source_dir: &Path,
print_stats_on_success: bool)
- -> Result<(), ()> {
+ -> Result<bool, ()> {
// We acquire a shared lock on the lock file of the directory, so that
// nobody deletes it out from under us while we are reading from it.
let lock_file_path = lock_file_path(source_dir);
println!("incr. comp. session directory: {} files copied", files_copied);
}
- Ok(())
+ Ok(files_linked > 0 || files_copied == 0)
}
/// Generate unique directory path of the form:
crate_name: &str,
crate_disambiguator: &str)
-> PathBuf {
- use std::hash::{SipHasher, Hasher, Hash};
+ use std::hash::{Hasher, Hash};
+ use std::collections::hash_map::DefaultHasher;
let incr_dir = sess.opts.incremental.as_ref().unwrap().clone();
// The full crate disambiguator is really long. A hash of it should be
// sufficient.
- let mut hasher = SipHasher::new();
+ let mut hasher = DefaultHasher::new();
crate_disambiguator.hash(&mut hasher);
let crate_name = format!("{}-{}", crate_name, encode_base_36(hasher.finish()));
use rustc_data_structures::flock;
use rustc_serialize::Decodable;
use rustc_serialize::opaque::Decoder;
-use std::io::{ErrorKind, Read};
-use std::fs::File;
use IncrementalHashesMap;
use super::data::*;
use super::fs::*;
+use super::file_format;
pub struct HashContext<'a, 'tcx: 'a> {
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
let hashes_file_path = metadata_hash_import_path(&session_dir);
- let mut data = vec![];
- match
- File::open(&hashes_file_path)
- .and_then(|mut file| file.read_to_end(&mut data))
+ match file_format::read_file(&hashes_file_path)
{
- Ok(_) => {
+ Ok(Some(data)) => {
match self.load_from_data(cnum, &data, svh) {
Ok(()) => { }
Err(err) => {
}
}
}
+ Ok(None) => {
+ // If the file is not found, that's ok.
+ }
Err(err) => {
- match err.kind() {
- ErrorKind::NotFound => {
- // If the file is not found, that's ok.
- }
- _ => {
- self.tcx.sess.err(
- &format!("could not load dep information from `{}`: {}",
- hashes_file_path.display(), err));
- return;
- }
- }
+ self.tcx.sess.err(
+ &format!("could not load dep information from `{}`: {}",
+ hashes_file_path.display(), err));
}
}
}
use rustc_data_structures::fnv::{FnvHashSet, FnvHashMap};
use rustc_serialize::Decodable as RustcDecodable;
use rustc_serialize::opaque::Decoder;
-use std::io::Read;
-use std::fs::{self, File};
+use std::fs;
use std::path::{Path};
use IncrementalHashesMap;
use super::dirty_clean;
use super::hash::*;
use super::fs::*;
+use super::file_format;
pub type DirtyNodes = FnvHashSet<DepNode<DefPathIndex>>;
}
fn load_data(sess: &Session, path: &Path) -> Option<Vec<u8>> {
- if !path.exists() {
- return None;
- }
-
- let mut data = vec![];
- match
- File::open(path)
- .and_then(|mut file| file.read_to_end(&mut data))
- {
- Ok(_) => {
- Some(data)
+ match file_format::read_file(path) {
+ Ok(Some(data)) => return Some(data),
+ Ok(None) => {
+ // The file either didn't exist or was produced by an incompatible
+ // compiler version. Neither is an error.
}
Err(err) => {
sess.err(
&format!("could not load dep-graph from `{}`: {}",
path.display(), err));
- None
}
}
+
+ if let Err(err) = delete_all_session_dir_contents(sess) {
+ sess.err(&format!("could not clear incompatible incremental \
+ compilation session directory `{}`: {}",
+ path.display(), err));
+ }
+
+ None
}
/// Decode the dep graph and load the edges/nodes that are still clean
debug!("load_prev_metadata_hashes() - File: {}", file_path.display());
- let mut data = vec![];
- if !File::open(&file_path)
- .and_then(|mut file| file.read_to_end(&mut data)).is_ok() {
- debug!("load_prev_metadata_hashes() - Couldn't read file containing \
- hashes at `{}`", file_path.display());
- return
- }
+ let data = match file_format::read_file(&file_path) {
+ Ok(Some(data)) => data,
+ Ok(None) => {
+ debug!("load_prev_metadata_hashes() - File produced by incompatible \
+ compiler version: {}", file_path.display());
+ return
+ }
+ Err(err) => {
+ debug!("load_prev_metadata_hashes() - Error reading file `{}`: {}",
+ file_path.display(), err);
+ return
+ }
+ };
debug!("load_prev_metadata_hashes() - Decoding hashes");
- let mut decoder = Decoder::new(&mut data, 0);
+ let mut decoder = Decoder::new(&data, 0);
let _ = Svh::decode(&mut decoder).unwrap();
let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder).unwrap();
debug!("load_prev_metadata_hashes() - successfully loaded {} hashes",
serialized_hashes.index_map.len());
}
+
mod preds;
mod save;
mod work_product;
+mod file_format;
pub use self::fs::finalize_session_directory;
pub use self::fs::in_incr_comp_dir;
use rustc_data_structures::fnv::FnvHashMap;
use rustc_serialize::Encodable as RustcEncodable;
use rustc_serialize::opaque::Encoder;
-use std::hash::{Hash, Hasher, SipHasher};
+use std::hash::{Hash, Hasher};
use std::io::{self, Cursor, Write};
use std::fs::{self, File};
use std::path::PathBuf;
+use std::collections::hash_map::DefaultHasher;
use IncrementalHashesMap;
use super::data::*;
use super::preds::*;
use super::fs::*;
use super::dirty_clean;
+use super::file_format;
pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
incremental_hashes_map: &IncrementalHashesMap,
// generate the data in a memory buffer
let mut wr = Cursor::new(Vec::new());
+ file_format::write_file_header(&mut wr).unwrap();
match encode(&mut Encoder::new(&mut wr)) {
Ok(()) => {}
Err(err) => {
.collect();
hashes.sort();
- let mut state = SipHasher::new();
+ let mut state = DefaultHasher::new();
hashes.hash(&mut state);
let hash = state.finish();
.zip(variants)
.map(|(variant, variant_layout)| {
// Subtract the size of the enum discriminant
- let bytes = variant_layout.min_size().bytes()
+ let bytes = variant_layout.min_size.bytes()
.saturating_sub(discr_size);
debug!("- variant `{}` is {} bytes large", variant.node.name, bytes);
fn main() {
println!("cargo:rustc-cfg=cargobuild");
- let target = env::var("TARGET").unwrap();
+ let target = env::var("TARGET").expect("TARGET was not set");
let llvm_config = env::var_os("LLVM_CONFIG")
.map(PathBuf::from)
.unwrap_or_else(|| {
// can't trust all the output of llvm-config becaues it might be targeted
// for the host rather than the target. As a result a bunch of blocks below
// are gated on `if !is_crossed`
- let target = env::var("TARGET").unwrap();
- let host = env::var("HOST").unwrap();
+ let target = env::var("TARGET").expect("TARGET was not set");
+ let host = env::var("HOST").expect("HOST was not set");
let is_crossed = target != host;
- let optional_components = ["x86", "arm", "aarch64", "mips", "powerpc", "pnacl", "systemz"];
+ let optional_components = ["x86", "arm", "aarch64", "mips", "powerpc", "pnacl", "systemz",
+ "jsbackend"];
// FIXME: surely we don't need all these components, right? Stuff like mcjit
// or interpreter the compiler itself never uses.
LLVMInitializeSystemZTargetMC,
LLVMInitializeSystemZAsmPrinter,
LLVMInitializeSystemZAsmParser);
+ init_target!(llvm_component = "jsbackend",
+ LLVMInitializeJSBackendTargetInfo,
+ LLVMInitializeJSBackendTarget,
+ LLVMInitializeJSBackendTargetMC);
}
pub fn last_error() -> Option<String> {
__internal::with_parse_sess(|sess| {
let src = src.to_string();
let cfg = Vec::new();
- let name = "rustc-macro source code".to_string();
+ let name = "<rustc-macro source code>".to_string();
let mut parser = parse::new_parser_from_source_str(sess, cfg, name,
src);
let mut ret = TokenStream { inner: Vec::new() };
use rustc::hir::svh::Svh;
use rustc::middle::cstore::LoadedMacro;
use rustc::session::{config, Session};
-use rustc::session::config::PanicStrategy;
+use rustc_back::PanicStrategy;
use rustc::session::search_paths::PathKind;
use rustc::middle;
use rustc::middle::cstore::{CrateStore, validate_crate_name, ExternCrate};
// The logic for finding the panic runtime here is pretty much the same
// as the allocator case with the only addition that the panic strategy
// compilation mode also comes into play.
- let desired_strategy = self.sess.opts.cg.panic.clone();
+ let desired_strategy = self.sess.panic_strategy();
let mut runtime_found = false;
let mut needs_panic_runtime = attr::contains_name(&krate.attrs,
"needs_panic_runtime");
use rustc::mir::repr::Mir;
use rustc::mir::mir_map::MirMap;
use rustc::util::nodemap::{NodeSet, DefIdMap};
-use rustc::session::config::PanicStrategy;
+use rustc_back::PanicStrategy;
use std::path::PathBuf;
use syntax::ast;
self.dep_graph.read(DepNode::MetaData(def_id));
let mut result = vec![];
self.get_crate_data(def_id.krate)
- .each_child_of_item(def_id.index, |child| result.push(child.def_id));
+ .each_child_of_item(def_id.index, |child| result.push(child.def.def_id()));
result
}
self.get_crate_data(def.krate).def_path(def.index)
}
- fn variant_kind(&self, def_id: DefId) -> Option<ty::VariantKind>
- {
- self.dep_graph.read(DepNode::MetaData(def_id));
- self.get_crate_data(def_id.krate).get_variant_kind(def_id.index)
- }
-
- fn struct_ctor_def_id(&self, struct_def_id: DefId) -> Option<DefId>
- {
- self.dep_graph.read(DepNode::MetaData(struct_def_id));
- self.get_crate_data(struct_def_id.krate).get_struct_ctor_def_id(struct_def_id.index)
- }
-
fn struct_field_names(&self, def: DefId) -> Vec<ast::Name>
{
self.dep_graph.read(DepNode::MetaData(def));
let mut bfs_queue = &mut VecDeque::new();
let mut add_child = |bfs_queue: &mut VecDeque<_>, child: def::Export, parent: DefId| {
- let child = child.def_id;
+ let child = child.def.def_id();
if self.visibility(child) != ty::Visibility::Public {
return;
use rustc::hir::map::DefKey;
use rustc::hir::svh::Svh;
use rustc::middle::cstore::ExternCrate;
-use rustc::session::config::PanicStrategy;
+use rustc_back::PanicStrategy;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet, DefIdMap, FnvHashSet};
use rustc::hir::intravisit::IdRange;
use rustc::middle::cstore::{InlinedItem, LinkagePreference};
-use rustc::hir::def::{self, Def};
+use rustc::hir::def::{self, Def, CtorKind};
use rustc::hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE};
use rustc::middle::lang_items;
use rustc::ty::{self, Ty, TyCtxt};
name: self.item_name(item),
fields: fields,
disr_val: ConstInt::Infer(data.disr),
- kind: data.kind,
+ ctor_kind: data.ctor_kind,
}, data.struct_ctor)
}
// FIXME(eddyb) Don't encode these in children.
EntryKind::ForeignMod => {
for child_index in child.children.decode(self) {
- callback(def::Export {
- def_id: self.local_def_id(child_index),
- name: self.item_name(&self.entry(child_index))
- });
+ if let Some(def) = self.get_def(child_index) {
+ callback(def::Export {
+ def: def,
+ name: self.item_name(&self.entry(child_index))
+ });
+ }
}
continue;
}
}
let def_key = child.def_key.decode(self);
- if let Some(name) = def_key.disambiguated_data.data.get_opt_name() {
- callback(def::Export {
- def_id: self.local_def_id(child_index),
- name: name
- });
+ if let (Some(def), Some(name)) = (self.get_def(child_index),
+ def_key.disambiguated_data.data.get_opt_name()) {
+ callback(def::Export { def: def, name: name });
+ // For non-reexport structs and variants add their constructors to children.
+ // Reexport lists automatically contain constructors when necessary.
+ match def {
+ Def::Struct(..) => {
+ if let Some(ctor_def_id) = self.get_struct_ctor_def_id(child_index) {
+ let ctor_kind = self.get_ctor_kind(child_index);
+ let ctor_def = Def::StructCtor(ctor_def_id, ctor_kind);
+ callback(def::Export { def: ctor_def, name: name });
+ }
+ }
+ Def::Variant(def_id) => {
+ // Braced variants, unlike structs, generate unusable names in
+ // value namespace, they are reserved for possible future use.
+ let ctor_kind = self.get_ctor_kind(child_index);
+ let ctor_def = Def::VariantCtor(def_id, ctor_kind);
+ callback(def::Export { def: ctor_def, name: name });
+ }
+ _ => {}
+ }
}
}
}
self.entry(id).variances.decode(self).collect()
}
- pub fn get_variant_kind(&self, node_id: DefIndex) -> Option<ty::VariantKind> {
+ pub fn get_ctor_kind(&self, node_id: DefIndex) -> CtorKind {
match self.entry(node_id).kind {
EntryKind::Struct(data) |
EntryKind::Union(data) |
- EntryKind::Variant(data) => Some(data.decode(self).kind),
- _ => None
+ EntryKind::Variant(data) => data.decode(self).ctor_kind,
+ _ => CtorKind::Fictive,
}
}
```compile_fail,E0466
#[macro_use(a_macro(another_macro))] // error: invalid import declaration
-extern crate some_crate;
+extern crate core as some_crate;
#[macro_use(i_want = "some_macros")] // error: invalid import declaration
-extern crate another_crate;
+extern crate core as another_crate;
```
This is a syntax error at the level of attribute declarations. The proper
```compile_fail,E0467
#[macro_reexport] // error: no macros listed for export
-extern crate macros_for_good;
+extern crate core as macros_for_good;
#[macro_reexport(fun_macro = "foo")] // error: not a macro identifier
-extern crate other_macros_for_good;
+extern crate core as other_macros_for_good;
```
This is a syntax error at the level of attribute declarations.
```compile_fail,E0468
mod foo {
#[macro_use(helpful_macro)] // error: must be at crate root to import
- extern crate some_crate; // macros from another crate
- helpful_macro!(...)
+ extern crate core; // macros from another crate
+ helpful_macro!(...);
}
```
let def_id = variant.did;
let data = VariantData {
- kind: variant.kind,
+ ctor_kind: variant.ctor_kind,
disr: variant.disr_val.to_u64_unchecked(),
struct_ctor: None
};
fn encode_struct_ctor(&mut self, (adt_def_id, def_id): (DefId, DefId))
-> Entry<'tcx> {
- let variant = self.tcx.lookup_adt_def(adt_def_id).struct_variant();
+ let tcx = self.tcx;
+ let variant = tcx.lookup_adt_def(adt_def_id).struct_variant();
let data = VariantData {
- kind: variant.kind,
+ ctor_kind: variant.ctor_kind,
disr: variant.disr_val.to_u64_unchecked(),
struct_ctor: Some(def_id.index)
};
+ let struct_id = tcx.map.as_local_node_id(adt_def_id).unwrap();
+ let struct_vis = &tcx.map.expect_item(struct_id).vis;
+
Entry {
kind: EntryKind::Struct(self.lazy(&data)),
- visibility: ty::Visibility::Public,
+ visibility: struct_vis.simplify(),
def_key: self.encode_def_key(def_id),
attributes: LazySeq::empty(),
children: LazySeq::empty(),
None
};
EntryKind::Struct(self.lazy(&VariantData {
- kind: variant.kind,
+ ctor_kind: variant.ctor_kind,
disr: variant.disr_val.to_u64_unchecked(),
struct_ctor: struct_ctor
}))
let variant = tcx.lookup_adt_def(def_id).struct_variant();
EntryKind::Union(self.lazy(&VariantData {
- kind: variant.kind,
+ ctor_kind: variant.ctor_kind,
disr: variant.disr_val.to_u64_unchecked(),
struct_ctor: None
}))
hir::ItemStruct(ref struct_def, _) => {
self.encode_fields(def_id);
- // If this is a tuple-like struct, encode the type of the constructor.
- match self.tcx.lookup_adt_def(def_id).struct_variant().kind {
- ty::VariantKind::Struct => {
- // no value for structs like struct Foo { ... }
- }
- ty::VariantKind::Tuple | ty::VariantKind::Unit => {
- // there is a value for structs like `struct
- // Foo()` and `struct Foo`
- let ctor_def_id = self.tcx.map.local_def_id(struct_def.id());
- self.record(ctor_def_id,
- EncodeContext::encode_struct_ctor,
- (def_id, ctor_def_id));
- }
+ // If the struct has a constructor, encode it.
+ if !struct_def.is_struct() {
+ let ctor_def_id = self.tcx.map.local_def_id(struct_def.id());
+ self.record(ctor_def_id,
+ EncodeContext::encode_struct_ctor,
+ (def_id, ctor_def_id));
}
}
hir::ItemUnion(..) => {
let link_meta = self.link_meta;
let is_rustc_macro = tcx.sess.crate_types.borrow().contains(&CrateTypeRustcMacro);
let root = self.lazy(&CrateRoot {
- rustc_version: RUSTC_VERSION.to_string(),
+ rustc_version: rustc_version(),
name: link_meta.crate_name.clone(),
triple: tcx.sess.opts.target_triple.clone(),
hash: link_meta.crate_hash,
disambiguator: tcx.sess.local_crate_disambiguator().to_string(),
- panic_strategy: tcx.sess.opts.cg.panic.clone(),
+ panic_strategy: tcx.sess.panic_strategy(),
plugin_registrar_fn: tcx.sess.plugin_registrar_fn.get().map(|id| {
tcx.map.local_def_id(id).index
}),
//! metadata::loader or metadata::creader for all the juicy details!
use cstore::MetadataBlob;
-use schema::{METADATA_HEADER, RUSTC_VERSION};
+use schema::{METADATA_HEADER, rustc_version};
use rustc::hir::svh::Svh;
use rustc::session::Session;
}
if !self.rejected_via_version.is_empty() {
err.help(&format!("please recompile that crate using this compiler ({})",
- RUSTC_VERSION));
+ rustc_version()));
let mismatches = self.rejected_via_version.iter();
for (i, &CrateMismatch { ref path, ref got }) in mismatches.enumerate() {
err.note(&format!("crate `{}` path #{}: {} compiled by {:?}",
fn crate_matches(&mut self, metadata: &MetadataBlob, libpath: &Path) -> Option<Svh> {
let root = metadata.get_root();
- if root.rustc_version != RUSTC_VERSION {
+ let rustc_version = rustc_version();
+ if root.rustc_version != rustc_version {
info!("Rejecting via version: expected {} got {}",
- RUSTC_VERSION, root.rustc_version);
+ rustc_version, root.rustc_version);
self.rejected_via_version.push(CrateMismatch {
path: libpath.to_path_buf(),
got: root.rustc_version
use creader::{CrateLoader, Macros};
use rustc::hir::def_id::DefIndex;
-use rustc::middle::cstore::LoadedMacro;
+use rustc::middle::cstore::{LoadedMacro, LoadedMacroKind};
use rustc::session::Session;
use rustc::util::nodemap::FnvHashMap;
use rustc_back::dynamic_lib::DynamicLibrary;
use syntax::attr;
use syntax::parse::token;
use syntax_ext::deriving::custom::CustomDerive;
-use syntax_pos::Span;
+use syntax_pos::{Span, DUMMY_SP};
pub fn call_bad_macro_reexport(a: &Session, b: Span) {
span_err!(a, b, E0467, "bad macro reexport");
pub type MacroSelection = FnvHashMap<token::InternedString, Span>;
+enum ImportSelection {
+ All(Span),
+ Some(MacroSelection),
+}
+
pub fn load_macros(loader: &mut CrateLoader, extern_crate: &ast::Item, allows_macros: bool)
-> Vec<LoadedMacro> {
loader.load_crate(extern_crate, allows_macros)
extern_crate: &ast::Item,
allows_macros: bool) -> Vec<LoadedMacro> {
// Parse the attributes relating to macros.
- let mut import = Some(FnvHashMap()); // None => load all
+ let mut import = ImportSelection::Some(FnvHashMap());
let mut reexport = FnvHashMap();
for attr in &extern_crate.attrs {
"macro_use" => {
let names = attr.meta_item_list();
if names.is_none() {
- // no names => load all
- import = None;
- }
- if let (Some(sel), Some(names)) = (import.as_mut(), names) {
- for attr in names {
+ import = ImportSelection::All(attr.span);
+ } else if let ImportSelection::Some(ref mut sel) = import {
+ for attr in names.unwrap() {
if let Some(word) = attr.word() {
sel.insert(word.name().clone(), attr.span());
} else {
fn load_macros<'b>(&mut self,
vi: &ast::Item,
allows_macros: bool,
- import: Option<MacroSelection>,
+ import: ImportSelection,
reexport: MacroSelection)
-> Vec<LoadedMacro> {
- if let Some(sel) = import.as_ref() {
+ if let ImportSelection::Some(ref sel) = import {
if sel.is_empty() && reexport.is_empty() {
return Vec::new();
}
for mut def in macros.macro_rules.drain(..) {
let name = def.ident.name.as_str();
- def.use_locally = match import.as_ref() {
- None => true,
- Some(sel) => sel.contains_key(&name),
+ let import_site = match import {
+ ImportSelection::All(span) => Some(span),
+ ImportSelection::Some(ref sel) => sel.get(&name).cloned()
};
+ def.use_locally = import_site.is_some();
def.export = reexport.contains_key(&name);
def.allow_internal_unstable = attr::contains_name(&def.attrs,
"allow_internal_unstable");
debug!("load_macros: loaded: {:?}", def);
- ret.push(LoadedMacro::Def(def));
+ ret.push(LoadedMacro {
+ kind: LoadedMacroKind::Def(def),
+ import_site: import_site.unwrap_or(DUMMY_SP),
+ });
seen.insert(name);
}
// exported macros, enforced elsewhere
assert_eq!(ret.len(), 0);
- if import.is_some() {
+ if let ImportSelection::Some(..) = import {
self.sess.span_err(vi.span, "`rustc-macro` crates cannot be \
selectively imported from, must \
use `#[macro_use]`");
self.load_derive_macros(vi.span, ¯os, index, &mut ret);
}
- if let Some(sel) = import.as_ref() {
+ if let ImportSelection::Some(sel) = import {
for (name, span) in sel {
if !seen.contains(&name) {
- span_err!(self.sess, *span, E0469,
+ span_err!(self.sess, span, E0469,
"imported macro not found");
}
}
mem::transmute::<*mut u8, fn(&mut Registry)>(sym)
};
- struct MyRegistrar<'a>(&'a mut Vec<LoadedMacro>);
+ struct MyRegistrar<'a>(&'a mut Vec<LoadedMacro>, Span);
impl<'a> Registry for MyRegistrar<'a> {
fn register_custom_derive(&mut self,
trait_name: &str,
expand: fn(TokenStream) -> TokenStream) {
let derive = Rc::new(CustomDerive::new(expand));
- self.0.push(LoadedMacro::CustomDerive(trait_name.to_string(), derive));
+ self.0.push(LoadedMacro {
+ kind: LoadedMacroKind::CustomDerive(trait_name.to_string(), derive),
+ import_site: self.1,
+ });
}
}
- registrar(&mut MyRegistrar(ret));
+ registrar(&mut MyRegistrar(ret, span));
// Intentionally leak the dynamic library. We can't ever unload it
// since the library can make things that will live arbitrarily long.
use index;
use rustc::hir;
-use rustc::hir::def;
+use rustc::hir::def::{self, CtorKind};
use rustc::hir::def_id::{DefIndex, DefId};
use rustc::middle::cstore::{LinkagePreference, NativeLibraryKind};
use rustc::middle::lang_items;
use rustc::mir;
use rustc::ty::{self, Ty};
-use rustc::session::config::PanicStrategy;
+use rustc_back::PanicStrategy;
use rustc_serialize as serialize;
use syntax::{ast, attr};
use std::marker::PhantomData;
-#[cfg(not(test))]
-pub const RUSTC_VERSION: &'static str = concat!("rustc ", env!("CFG_VERSION"));
-
-#[cfg(test)]
-pub const RUSTC_VERSION: &'static str = "rustc 0.0.0-unit-test";
+pub fn rustc_version() -> String {
+ format!("rustc {}", option_env!("CFG_VERSION").unwrap_or("unknown version"))
+}
/// Metadata encoding version.
/// NB: increment this if you change the format of metadata such that
#[derive(RustcEncodable, RustcDecodable)]
pub struct VariantData {
- pub kind: ty::VariantKind,
+ pub ctor_kind: CtorKind,
pub disr: u64,
/// If this is a struct's only variant, this
success.and(slice.index(idx))
}
ExprKind::SelfRef => {
- block.and(Lvalue::Arg(Arg::new(0)))
+ block.and(Lvalue::Local(Local::new(1)))
}
ExprKind::VarRef { id } => {
let index = this.var_indices[&id];
- block.and(Lvalue::Var(index))
+ block.and(Lvalue::Local(index))
}
ExprKind::StaticRef { id } => {
block.and(Lvalue::Static(id))
ExprKind::LogicalOp { .. } |
ExprKind::Box { .. } |
ExprKind::Cast { .. } |
+ ExprKind::Use { .. } |
ExprKind::NeverToAny { .. } |
ExprKind::ReifyFnPointer { .. } |
ExprKind::UnsafeFnPointer { .. } |
let source = unpack!(block = this.as_operand(block, source));
block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty))
}
+ ExprKind::Use { source } => {
+ let source = unpack!(block = this.as_operand(block, source));
+ block.and(Rvalue::Use(source))
+ }
ExprKind::ReifyFnPointer { source } => {
let source = unpack!(block = this.as_operand(block, source));
block.and(Rvalue::Cast(CastKind::ReifyFnPointer, source, expr.ty))
.map(|f| unpack!(block = this.as_operand(block, f)))
.collect();
- block.and(Rvalue::Aggregate(AggregateKind::Vec, fields))
+ block.and(Rvalue::Aggregate(AggregateKind::Array, fields))
}
ExprKind::Tuple { fields } => { // see (*) above
// first process the set of fields
ExprKind::Binary { .. } |
ExprKind::Box { .. } |
ExprKind::Cast { .. } |
+ ExprKind::Use { .. } |
ExprKind::ReifyFnPointer { .. } |
ExprKind::UnsafeFnPointer { .. } |
ExprKind::Unsize { .. } |
ExprKind::Binary { .. } |
ExprKind::Box { .. } |
ExprKind::Cast { .. } |
+ ExprKind::Use { .. } |
ExprKind::ReifyFnPointer { .. } |
ExprKind::UnsafeFnPointer { .. } |
ExprKind::Unsize { .. } |
}
ExprKind::Return { value } => {
block = match value {
- Some(value) => unpack!(this.into(&Lvalue::ReturnPointer, block, value)),
+ Some(value) => {
+ unpack!(this.into(&Lvalue::Local(RETURN_POINTER), block, value))
+ }
None => {
- this.cfg.push_assign_unit(block, source_info, &Lvalue::ReturnPointer);
+ this.cfg.push_assign_unit(block,
+ source_info,
+ &Lvalue::Local(RETURN_POINTER));
block
}
};
var,
subpattern: None, .. } => {
self.storage_live_for_bindings(block, &irrefutable_pat);
- let lvalue = Lvalue::Var(self.var_indices[&var]);
+ let lvalue = Lvalue::Local(self.var_indices[&var]);
return self.into(&lvalue, block, initializer);
}
_ => {}
pattern: &Pattern<'tcx>) {
match *pattern.kind {
PatternKind::Binding { var, ref subpattern, .. } => {
- let lvalue = Lvalue::Var(self.var_indices[&var]);
+ let lvalue = Lvalue::Local(self.var_indices[&var]);
let source_info = self.source_info(pattern.span);
self.cfg.push(block, Statement {
source_info: source_info,
let source_info = self.source_info(binding.span);
self.cfg.push(block, Statement {
source_info: source_info,
- kind: StatementKind::StorageLive(Lvalue::Var(var_index))
+ kind: StatementKind::StorageLive(Lvalue::Local(var_index))
});
self.cfg.push_assign(block, source_info,
- &Lvalue::Var(var_index), rvalue);
+ &Lvalue::Local(var_index), rvalue);
}
}
name: Name,
var_id: NodeId,
var_ty: Ty<'tcx>)
- -> Var
+ -> Local
{
debug!("declare_binding(var_id={:?}, name={:?}, var_ty={:?}, source_info={:?})",
var_id, name, var_ty, source_info);
- let var = self.var_decls.push(VarDecl::<'tcx> {
- source_info: source_info,
+ let var = self.local_decls.push(LocalDecl::<'tcx> {
mutability: mutability,
- name: name,
ty: var_ty.clone(),
+ name: Some(name),
+ source_info: Some(source_info),
});
let extent = self.extent_of_innermost_scope();
- self.schedule_drop(source_info.span, extent, &Lvalue::Var(var), var_ty);
+ self.schedule_drop(source_info.span, extent, &Lvalue::Local(var), var_ty);
self.var_indices.insert(var_id, var);
debug!("declare_binding: var={:?}", var);
/// NB: **No cleanup is scheduled for this temporary.** You should
/// call `schedule_drop` once the temporary is initialized.
pub fn temp(&mut self, ty: Ty<'tcx>) -> Lvalue<'tcx> {
- let temp = self.temp_decls.push(TempDecl { ty: ty });
- let lvalue = Lvalue::Temp(temp);
+ let temp = self.local_decls.push(LocalDecl::new_temp(ty));
+ let lvalue = Lvalue::Local(temp);
debug!("temp: created temp {:?} with type {:?}",
- lvalue, self.temp_decls[temp].ty);
+ lvalue, self.local_decls[temp].ty);
lvalue
}
cfg: CFG<'tcx>,
fn_span: Span,
+ arg_count: usize,
/// the current set of scopes, updated as we traverse;
/// see the `scope` module for more details
visibility_scopes: IndexVec<VisibilityScope, VisibilityScopeData>,
visibility_scope: VisibilityScope,
- var_decls: IndexVec<Var, VarDecl<'tcx>>,
- var_indices: NodeMap<Var>,
- temp_decls: IndexVec<Temp, TempDecl<'tcx>>,
+ /// Maps node ids of variable bindings to the `Local`s created for them.
+ var_indices: NodeMap<Local>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
unit_temp: Option<Lvalue<'tcx>>,
/// cached block with the RESUME terminator; this is created
-> (Mir<'tcx>, ScopeAuxiliaryVec)
where A: Iterator<Item=(Ty<'gcx>, Option<&'gcx hir::Pat>)>
{
+ let arguments: Vec<_> = arguments.collect();
+
let tcx = hir.tcx();
let span = tcx.map.span(fn_id);
- let mut builder = Builder::new(hir, span);
+ let mut builder = Builder::new(hir, span, arguments.len(), return_ty);
let body_id = ast_block.id;
let call_site_extent =
tcx.region_maps.lookup_code_extent(
CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body_id });
let mut block = START_BLOCK;
- let mut arg_decls = unpack!(block = builder.in_scope(call_site_extent, block, |builder| {
- let arg_decls = unpack!(block = builder.in_scope(arg_extent, block, |builder| {
- builder.args_and_body(block, return_ty, arguments, arg_extent, ast_block)
+ unpack!(block = builder.in_scope(call_site_extent, block, |builder| {
+ unpack!(block = builder.in_scope(arg_extent, block, |builder| {
+ builder.args_and_body(block, return_ty, &arguments, arg_extent, ast_block)
}));
let source_info = builder.source_info(span);
TerminatorKind::Goto { target: return_block });
builder.cfg.terminate(return_block, source_info,
TerminatorKind::Return);
- return_block.and(arg_decls)
+ return_block.unit()
}));
assert_eq!(block, builder.return_block());
+ let mut spread_arg = None;
match tcx.node_id_to_type(fn_id).sty {
ty::TyFnDef(_, _, f) if f.abi == Abi::RustCall => {
// RustCall pseudo-ABI untuples the last argument.
- if let Some(last_arg) = arg_decls.last() {
- arg_decls[last_arg].spread = true;
- }
+ spread_arg = Some(Local::new(arguments.len()));
}
_ => {}
}
}).collect()
});
- builder.finish(upvar_decls, arg_decls, return_ty)
+ let (mut mir, aux) = builder.finish(upvar_decls, return_ty);
+ mir.spread_arg = spread_arg;
+ (mir, aux)
}
pub fn construct_const<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>,
ast_expr: &'tcx hir::Expr)
-> (Mir<'tcx>, ScopeAuxiliaryVec) {
let tcx = hir.tcx();
+ let ty = tcx.expr_ty_adjusted(ast_expr);
let span = tcx.map.span(item_id);
- let mut builder = Builder::new(hir, span);
+ let mut builder = Builder::new(hir, span, 0, ty);
let extent = tcx.region_maps.temporary_scope(ast_expr.id)
.unwrap_or(ROOT_CODE_EXTENT);
let mut block = START_BLOCK;
let _ = builder.in_scope(extent, block, |builder| {
let expr = builder.hir.mirror(ast_expr);
- unpack!(block = builder.into(&Lvalue::ReturnPointer, block, expr));
+ unpack!(block = builder.into(&Lvalue::Local(RETURN_POINTER), block, expr));
let source_info = builder.source_info(span);
let return_block = builder.return_block();
return_block.unit()
});
- let ty = tcx.expr_ty_adjusted(ast_expr);
- builder.finish(vec![], IndexVec::new(), ty)
+ builder.finish(vec![], ty)
}
impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
- fn new(hir: Cx<'a, 'gcx, 'tcx>, span: Span) -> Builder<'a, 'gcx, 'tcx> {
+ fn new(hir: Cx<'a, 'gcx, 'tcx>,
+ span: Span,
+ arg_count: usize,
+ return_ty: Ty<'tcx>)
+ -> Builder<'a, 'gcx, 'tcx> {
let mut builder = Builder {
hir: hir,
cfg: CFG { basic_blocks: IndexVec::new() },
fn_span: span,
+ arg_count: arg_count,
scopes: vec![],
visibility_scopes: IndexVec::new(),
visibility_scope: ARGUMENT_VISIBILITY_SCOPE,
scope_auxiliary: IndexVec::new(),
loop_scopes: vec![],
- temp_decls: IndexVec::new(),
- var_decls: IndexVec::new(),
+ local_decls: IndexVec::from_elem_n(LocalDecl::new_return_pointer(return_ty), 1),
var_indices: NodeMap(),
unit_temp: None,
cached_resume_block: None,
fn finish(self,
upvar_decls: Vec<UpvarDecl>,
- arg_decls: IndexVec<Arg, ArgDecl<'tcx>>,
return_ty: Ty<'tcx>)
-> (Mir<'tcx>, ScopeAuxiliaryVec) {
for (index, block) in self.cfg.basic_blocks.iter().enumerate() {
self.visibility_scopes,
IndexVec::new(),
return_ty,
- self.var_decls,
- arg_decls,
- self.temp_decls,
+ self.local_decls,
+ self.arg_count,
upvar_decls,
self.fn_span
), self.scope_auxiliary)
}
- fn args_and_body<A>(&mut self,
- mut block: BasicBlock,
- return_ty: Ty<'tcx>,
- arguments: A,
- argument_extent: CodeExtent,
- ast_block: &'gcx hir::Block)
- -> BlockAnd<IndexVec<Arg, ArgDecl<'tcx>>>
- where A: Iterator<Item=(Ty<'gcx>, Option<&'gcx hir::Pat>)>
+ fn args_and_body(&mut self,
+ mut block: BasicBlock,
+ return_ty: Ty<'tcx>,
+ arguments: &[(Ty<'gcx>, Option<&'gcx hir::Pat>)],
+ argument_extent: CodeExtent,
+ ast_block: &'gcx hir::Block)
+ -> BlockAnd<()>
{
- // to start, translate the argument patterns and collect the argument types.
+ // Allocate locals for the function arguments
+ for &(ty, pattern) in arguments.iter() {
+ // If this is a simple binding pattern, give the local a nice name for debuginfo.
+ let mut name = None;
+ if let Some(pat) = pattern {
+ if let hir::PatKind::Binding(_, ref ident, _) = pat.node {
+ name = Some(ident.node);
+ }
+ }
+
+ self.local_decls.push(LocalDecl {
+ mutability: Mutability::Not,
+ ty: ty,
+ source_info: None,
+ name: name,
+ });
+ }
+
let mut scope = None;
- let arg_decls = arguments.enumerate().map(|(index, (ty, pattern))| {
- let lvalue = Lvalue::Arg(Arg::new(index));
+ // Bind the argument patterns
+ for (index, &(ty, pattern)) in arguments.iter().enumerate() {
+ // Function arguments always get the first Local indices after the return pointer
+ let lvalue = Lvalue::Local(Local::new(index + 1));
+
if let Some(pattern) = pattern {
let pattern = self.hir.irrefutable_pat(pattern);
scope = self.declare_bindings(scope, ast_block.span, &pattern);
self.schedule_drop(pattern.as_ref().map_or(ast_block.span, |pat| pat.span),
argument_extent, &lvalue, ty);
- let mut name = keywords::Invalid.name();
- if let Some(pat) = pattern {
- if let hir::PatKind::Binding(_, ref ident, _) = pat.node {
- name = ident.node;
- }
- }
-
- ArgDecl {
- ty: ty,
- spread: false,
- debug_name: name
- }
- }).collect();
+ }
// Enter the argument pattern bindings visibility scope, if it exists.
if let Some(visibility_scope) = scope {
// FIXME(#32959): temporary hack for the issue at hand
let return_is_unit = return_ty.is_nil();
// start the first basic block and translate the body
- unpack!(block = self.ast_block(&Lvalue::ReturnPointer, return_is_unit, block, ast_block));
+ unpack!(block = self.ast_block(&Lvalue::Local(RETURN_POINTER),
+ return_is_unit, block, ast_block));
- block.and(arg_decls)
+ block.unit()
}
fn get_unit_temp(&mut self) -> Lvalue<'tcx> {
For now, we keep a mapping from each `CodeExtent` to its
corresponding SEME region for later reference (see caveat in next
paragraph). This is because region scopes are tied to
-them. Eventually, when we shift to non-lexical lifetimes, three should
+them. Eventually, when we shift to non-lexical lifetimes, there should
be no need to remember this mapping.
There is one additional wrinkle, actually, that I wanted to hide from
early exit occurs, the method `exit_scope` is called. It is given the
current point in execution where the early exit occurs, as well as the
scope you want to branch to (note that all early exits from to some
-other enclosing scope). `exit_scope` will record thid exit point and
+other enclosing scope). `exit_scope` will record this exit point and
also add all drops.
Panics are handled in a similar fashion, except that a panic always
self.diverge_cleanup();
let scope = self.scopes.pop().unwrap();
assert_eq!(scope.extent, extent);
- unpack!(block = build_scope_drops(&mut self.cfg, &scope, &self.scopes, block));
+ unpack!(block = build_scope_drops(&mut self.cfg,
+ &scope,
+ &self.scopes,
+ block,
+ self.arg_count));
self.scope_auxiliary[scope.id]
.postdoms
.push(self.cfg.current_location(block));
scope.cached_exits.insert((target, extent), b);
b
};
- unpack!(block = build_scope_drops(&mut self.cfg, scope, rest, block));
+ unpack!(block = build_scope_drops(&mut self.cfg,
+ scope,
+ rest,
+ block,
+ self.arg_count));
if let Some(ref free_data) = scope.free {
let next = self.cfg.start_new_block();
let free = build_free(self.hir.tcx(), &tmp, free_data, next);
} else {
// Only temps and vars need their storage dead.
match *lvalue {
- Lvalue::Temp(_) | Lvalue::Var(_) => DropKind::Storage,
+ Lvalue::Local(index) if index.index() > self.arg_count => DropKind::Storage,
_ => return
}
};
fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
scope: &Scope<'tcx>,
earlier_scopes: &[Scope<'tcx>],
- mut block: BasicBlock)
+ mut block: BasicBlock,
+ arg_count: usize)
-> BlockAnd<()> {
let mut iter = scope.drops.iter().rev().peekable();
while let Some(drop_data) = iter.next() {
DropKind::Storage => {
// Only temps and vars need their storage dead.
match drop_data.location {
- Lvalue::Temp(_) | Lvalue::Var(_) => {}
+ Lvalue::Local(index) if index.index() > arg_count => {}
_ => continue
}
use rustc::mir::repr::{Local, Location, Lvalue, Mir};
use rustc::mir::visit::{LvalueContext, MutVisitor, Visitor};
-use rustc_data_structures::indexed_vec::{Idx, IndexVec};
+use rustc_data_structures::indexed_vec::IndexVec;
use std::marker::PhantomData;
use std::mem;
pub struct DefUseAnalysis<'tcx> {
info: IndexVec<Local, Info<'tcx>>,
- mir_summary: MirSummary,
}
#[derive(Clone)]
impl<'tcx> DefUseAnalysis<'tcx> {
pub fn new(mir: &Mir<'tcx>) -> DefUseAnalysis<'tcx> {
DefUseAnalysis {
- info: IndexVec::from_elem_n(Info::new(), mir.count_locals()),
- mir_summary: MirSummary::new(mir),
+ info: IndexVec::from_elem_n(Info::new(), mir.local_decls.len()),
}
}
pub fn analyze(&mut self, mir: &Mir<'tcx>) {
let mut finder = DefUseFinder {
info: mem::replace(&mut self.info, IndexVec::new()),
- mir_summary: self.mir_summary,
};
finder.visit_mir(mir);
self.info = finder.info
for lvalue_use in &self.info[local].defs_and_uses {
MutateUseVisitor::new(local,
&mut callback,
- self.mir_summary,
mir).visit_location(mir, lvalue_use.location)
}
}
struct DefUseFinder<'tcx> {
info: IndexVec<Local, Info<'tcx>>,
- mir_summary: MirSummary,
}
impl<'tcx> DefUseFinder<'tcx> {
fn lvalue_mut_info(&mut self, lvalue: &Lvalue<'tcx>) -> Option<&mut Info<'tcx>> {
let info = &mut self.info;
- self.mir_summary.local_index(lvalue).map(move |local| &mut info[local])
+
+ if let Lvalue::Local(local) = *lvalue {
+ Some(&mut info[local])
+ } else {
+ None
+ }
}
}
struct MutateUseVisitor<'tcx, F> {
query: Local,
callback: F,
- mir_summary: MirSummary,
phantom: PhantomData<&'tcx ()>,
}
impl<'tcx, F> MutateUseVisitor<'tcx, F> {
- fn new(query: Local, callback: F, mir_summary: MirSummary, _: &Mir<'tcx>)
+ fn new(query: Local, callback: F, _: &Mir<'tcx>)
-> MutateUseVisitor<'tcx, F>
where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, LvalueContext<'tcx>, Location) {
MutateUseVisitor {
query: query,
callback: callback,
- mir_summary: mir_summary,
phantom: PhantomData,
}
}
lvalue: &mut Lvalue<'tcx>,
context: LvalueContext<'tcx>,
location: Location) {
- if self.mir_summary.local_index(lvalue) == Some(self.query) {
- (self.callback)(lvalue, context, location)
- }
- self.super_lvalue(lvalue, context, location)
- }
-}
-
-/// A small structure that enables various metadata of the MIR to be queried
-/// without a reference to the MIR itself.
-#[derive(Clone, Copy)]
-pub struct MirSummary {
- arg_count: usize,
- var_count: usize,
- temp_count: usize,
-}
-
-impl MirSummary {
- pub fn new(mir: &Mir) -> MirSummary {
- MirSummary {
- arg_count: mir.arg_decls.len(),
- var_count: mir.var_decls.len(),
- temp_count: mir.temp_decls.len(),
- }
- }
-
- pub fn local_index<'tcx>(&self, lvalue: &Lvalue<'tcx>) -> Option<Local> {
- match *lvalue {
- Lvalue::Arg(arg) => Some(Local::new(arg.index())),
- Lvalue::Var(var) => Some(Local::new(var.index() + self.arg_count)),
- Lvalue::Temp(temp) => {
- Some(Local::new(temp.index() + self.arg_count + self.var_count))
+ if let Lvalue::Local(local) = *lvalue {
+ if local == self.query {
+ (self.callback)(lvalue, context, location)
}
- Lvalue::ReturnPointer => {
- Some(Local::new(self.arg_count + self.var_count + self.temp_count))
- }
- _ => None,
}
+ self.super_lvalue(lvalue, context, location)
}
}
-
write!(w, " label=<fn {}(", dot::escape_html(&tcx.node_path_str(nid)))?;
// fn argument types.
- for (i, arg) in mir.arg_decls.iter().enumerate() {
+ for (i, arg) in mir.args_iter().enumerate() {
if i > 0 {
write!(w, ", ")?;
}
- write!(w, "{:?}: {}", Lvalue::Arg(Arg::new(i)), escape(&arg.ty))?;
+ write!(w, "{:?}: {}", Lvalue::Local(arg), escape(&mir.local_decls[arg].ty))?;
}
write!(w, ") -> {}", escape(mir.return_ty))?;
write!(w, r#"<br align="left"/>"#)?;
- // User variable types (including the user's name in a comment).
- for (i, var) in mir.var_decls.iter().enumerate() {
+ for local in mir.vars_and_temps_iter() {
+ let decl = &mir.local_decls[local];
+
write!(w, "let ")?;
- if var.mutability == Mutability::Mut {
+ if decl.mutability == Mutability::Mut {
write!(w, "mut ")?;
}
- write!(w, r#"{:?}: {}; // {}<br align="left"/>"#,
- Lvalue::Var(Var::new(i)), escape(&var.ty), var.name)?;
- }
- // Compiler-introduced temporary types.
- for (i, temp) in mir.temp_decls.iter().enumerate() {
- write!(w, r#"let mut {:?}: {};<br align="left"/>"#,
- Lvalue::Temp(Temp::new(i)), escape(&temp.ty))?;
+ if let Some(name) = decl.name {
+ write!(w, r#"{:?}: {}; // {}<br align="left"/>"#,
+ Lvalue::Local(local), escape(&decl.ty), name)?;
+ } else {
+ write!(w, r#"let mut {:?}: {};<br align="left"/>"#,
+ Lvalue::Local(local), escape(&decl.ty))?;
+ }
}
writeln!(w, ">;")
use hair::cx::block;
use hair::cx::to_ref::ToRef;
use rustc::hir::map;
-use rustc::hir::def::Def;
+use rustc::hir::def::{Def, CtorKind};
use rustc::middle::const_val::ConstVal;
use rustc_const_eval as const_eval;
use rustc::middle::region::CodeExtent;
// Tuple-like ADTs are represented as ExprCall. We convert them here.
expr_ty.ty_adt_def().and_then(|adt_def|{
match cx.tcx.expect_def(fun.id) {
- Def::Variant(variant_id) => {
+ Def::VariantCtor(variant_id, CtorKind::Fn) => {
Some((adt_def, adt_def.variant_index_with_id(variant_id)))
},
- Def::Struct(..) => {
+ Def::StructCtor(_, CtorKind::Fn) => {
Some((adt_def, 0))
},
_ => None
// Check to see if this cast is a "coercion cast", where the cast is actually done
// using a coercion (or is a no-op).
if let Some(&TyCastKind::CoercionCast) = cx.tcx.cast_kinds.borrow().get(&source.id) {
- // Skip the actual cast itexpr, as it's now a no-op.
- return source.make_mirror(cx);
+ // Convert the lexpr to a vexpr.
+ ExprKind::Use { source: source.to_ref() }
} else {
ExprKind::Cast { source: source.to_ref() }
}
value: value.to_ref(),
value_extents: cx.tcx.region_maps.node_extent(value.id)
},
- hir::ExprVec(ref fields) =>
+ hir::ExprArray(ref fields) =>
ExprKind::Vec { fields: fields.to_ref() },
hir::ExprTup(ref fields) =>
ExprKind::Tuple { fields: fields.to_ref() },
// Otherwise there may be def_map borrow conflicts
let def = cx.tcx.expect_def(expr.id);
let def_id = match def {
- // A regular function.
- Def::Fn(def_id) | Def::Method(def_id) => def_id,
- Def::Struct(def_id) => match cx.tcx.node_id_to_type(expr.id).sty {
- // A tuple-struct constructor. Should only be reached if not called in the same
- // expression.
- ty::TyFnDef(..) => def_id,
- // A unit struct which is used as a value. We return a completely different ExprKind
- // here to account for this special case.
+ // A regular function, constructor function or a constant.
+ Def::Fn(def_id) | Def::Method(def_id) |
+ Def::StructCtor(def_id, CtorKind::Fn) |
+ Def::VariantCtor(def_id, CtorKind::Fn) |
+ Def::Const(def_id) | Def::AssociatedConst(def_id) => def_id,
+
+ Def::StructCtor(def_id, CtorKind::Const) |
+ Def::VariantCtor(def_id, CtorKind::Const) => match cx.tcx.node_id_to_type(expr.id).sty {
+ // A unit struct/variant which is used as a value.
+ // We return a completely different ExprKind here to account for this special case.
ty::TyAdt(adt_def, substs) => return ExprKind::Adt {
adt_def: adt_def,
- variant_index: 0,
+ variant_index: adt_def.variant_index_with_id(def_id),
substs: substs,
fields: vec![],
- base: None
- },
- ref sty => bug!("unexpected sty: {:?}", sty)
- },
- Def::Variant(variant_id) => match cx.tcx.node_id_to_type(expr.id).sty {
- // A variant constructor. Should only be reached if not called in the same
- // expression.
- ty::TyFnDef(..) => variant_id,
- // A unit variant, similar special case to the struct case above.
- ty::TyAdt(adt_def, substs) => {
- let index = adt_def.variant_index_with_id(variant_id);
- return ExprKind::Adt {
- adt_def: adt_def,
- substs: substs,
- variant_index: index,
- fields: vec![],
- base: None
- };
+ base: None,
},
ref sty => bug!("unexpected sty: {:?}", sty)
},
- Def::Const(def_id) |
- Def::AssociatedConst(def_id) => def_id,
Def::Static(node_id, _) => return ExprKind::StaticRef {
id: node_id,
PatternKind::Deref { subpattern: self.to_pattern(subpattern) }
}
- PatKind::Vec(ref prefix, ref slice, ref suffix) => {
+ PatKind::Slice(ref prefix, ref slice, ref suffix) => {
let ty = self.cx.tcx.node_id_to_type(pat.id);
match ty.sty {
ty::TyRef(_, mt) =>
subpatterns: Vec<FieldPattern<'tcx>>)
-> PatternKind<'tcx> {
match self.cx.tcx.expect_def(pat.id) {
- Def::Variant(variant_id) => {
+ Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => {
let enum_id = self.cx.tcx.parent_def_id(variant_id).unwrap();
let adt_def = self.cx.tcx.lookup_adt_def(enum_id);
if adt_def.variants.len() > 1 {
}
}
- Def::Struct(..) | Def::Union(..) |
+ Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) |
Def::TyAlias(..) | Def::AssociatedTy(..) => {
PatternKind::Leaf { subpatterns: subpatterns }
}
Cast {
source: ExprRef<'tcx>,
},
+ Use {
+ source: ExprRef<'tcx>,
+ }, // Use a lexpr to get a vexpr.
NeverToAny {
source: ExprRef<'tcx>,
},
// Array lengths, i.e. [T; constant].
fn visit_ty(&mut self, ty: &'tcx hir::Ty) {
- if let hir::TyFixedLengthVec(_, ref length) = ty.node {
+ if let hir::TyArray(_, ref length) = ty.node {
self.build_const_integer(length);
}
intravisit::walk_ty(self, ty);
format!("scope {} at {}", scope.index(), tcx.sess.codemap().span_to_string(span))
}
+/// Prints user-defined variables in a scope tree.
+///
+/// Returns the total number of variables printed.
fn write_scope_tree(tcx: TyCtxt,
mir: &Mir,
scope_tree: &FnvHashMap<VisibilityScope, Vec<VisibilityScope>>,
writeln!(w, "{0:1$}scope {2} {{", "", indent, child.index())?;
// User variable types (including the user's name in a comment).
- for (id, var) in mir.var_decls.iter_enumerated() {
- // Skip if not declared in this scope.
- if var.source_info.scope != child {
+ for local in mir.vars_iter() {
+ let var = &mir.local_decls[local];
+ let (name, source_info) = if var.source_info.unwrap().scope == child {
+ (var.name.unwrap(), var.source_info.unwrap())
+ } else {
+ // Not a variable or not declared in this scope.
continue;
- }
+ };
let mut_str = if var.mutability == Mutability::Mut {
"mut "
INDENT,
indent,
mut_str,
- id,
+ local,
var.ty);
writeln!(w, "{0:1$} // \"{2}\" in {3}",
indented_var,
ALIGN,
- var.name,
- comment(tcx, var.source_info))?;
+ name,
+ comment(tcx, source_info))?;
}
write_scope_tree(tcx, mir, scope_tree, w, child, depth + 1)?;
}
}
+ // Print return pointer
+ let indented_retptr = format!("{}let mut {:?}: {};",
+ INDENT,
+ RETURN_POINTER,
+ mir.return_ty);
+ writeln!(w, "{0:1$} // return pointer",
+ indented_retptr,
+ ALIGN)?;
+
write_scope_tree(tcx, mir, &scope_tree, w, ARGUMENT_VISIBILITY_SCOPE, 1)?;
- write_mir_decls(mir, w)
+ write_temp_decls(mir, w)?;
+
+ // Add an empty line before the first block is printed.
+ writeln!(w, "")?;
+
+ Ok(())
}
fn write_mir_sig(tcx: TyCtxt, src: MirSource, mir: &Mir, w: &mut Write)
write!(w, "(")?;
// fn argument types.
- for (i, arg) in mir.arg_decls.iter_enumerated() {
- if i.index() != 0 {
+ for (i, arg) in mir.args_iter().enumerate() {
+ if i != 0 {
write!(w, ", ")?;
}
- write!(w, "{:?}: {}", Lvalue::Arg(i), arg.ty)?;
+ write!(w, "{:?}: {}", Lvalue::Local(arg), mir.local_decls[arg].ty)?;
}
write!(w, ") -> {}", mir.return_ty)
} else {
- assert!(mir.arg_decls.is_empty());
+ assert_eq!(mir.arg_count, 0);
write!(w, ": {} =", mir.return_ty)
}
}
-fn write_mir_decls(mir: &Mir, w: &mut Write) -> io::Result<()> {
+fn write_temp_decls(mir: &Mir, w: &mut Write) -> io::Result<()> {
// Compiler-introduced temporary types.
- for (id, temp) in mir.temp_decls.iter_enumerated() {
- writeln!(w, "{}let mut {:?}: {};", INDENT, id, temp.ty)?;
- }
-
- // Wrote any declaration? Add an empty line before the first block is printed.
- if !mir.var_decls.is_empty() || !mir.temp_decls.is_empty() {
- writeln!(w, "")?;
+ for temp in mir.temps_iter() {
+ writeln!(w, "{}let mut {:?}: {};", INDENT, temp, mir.local_decls[temp].ty)?;
}
Ok(())
//! (non-mutating) use of `SRC`. These restrictions are conservative and may be relaxed in the
//! future.
-use def_use::{DefUseAnalysis, MirSummary};
+use def_use::DefUseAnalysis;
use rustc::mir::repr::{Constant, Local, Location, Lvalue, Mir, Operand, Rvalue, StatementKind};
use rustc::mir::transform::{MirPass, MirSource, Pass};
use rustc::mir::visit::MutVisitor;
use rustc::ty::TyCtxt;
-use rustc_data_structures::indexed_vec::Idx;
use transform::qualify_consts;
pub struct CopyPropagation;
def_use_analysis.analyze(mir);
let mut changed = false;
- for dest_local_index in 0..mir.count_locals() {
- let dest_local = Local::new(dest_local_index);
- debug!("Considering destination local: {}", mir.format_local(dest_local));
+ for dest_local in mir.local_decls.indices() {
+ debug!("Considering destination local: {:?}", dest_local);
let action;
let location;
let dest_use_info = def_use_analysis.local_info(dest_local);
let dest_def_count = dest_use_info.def_count_not_including_drop();
if dest_def_count == 0 {
- debug!(" Can't copy-propagate local: dest {} undefined",
- mir.format_local(dest_local));
+ debug!(" Can't copy-propagate local: dest {:?} undefined",
+ dest_local);
continue
}
if dest_def_count > 1 {
- debug!(" Can't copy-propagate local: dest {} defined {} times",
- mir.format_local(dest_local),
+ debug!(" Can't copy-propagate local: dest {:?} defined {} times",
+ dest_local,
dest_use_info.def_count());
continue
}
if dest_use_info.use_count() == 0 {
- debug!(" Can't copy-propagate local: dest {} unused",
- mir.format_local(dest_local));
+ debug!(" Can't copy-propagate local: dest {:?} unused",
+ dest_local);
continue
}
let dest_lvalue_def = dest_use_info.defs_and_uses.iter().filter(|lvalue_def| {
// That use of the source must be an assignment.
match statement.kind {
- StatementKind::Assign(ref dest_lvalue, Rvalue::Use(ref operand)) if
- Some(dest_local) == mir.local_index(dest_lvalue) => {
+ StatementKind::Assign(Lvalue::Local(local), Rvalue::Use(ref operand)) if
+ local == dest_local => {
let maybe_action = match *operand {
Operand::Consume(ref src_lvalue) => {
- Action::local_copy(mir, &def_use_analysis, src_lvalue)
+ Action::local_copy(&def_use_analysis, src_lvalue)
}
Operand::Constant(ref src_constant) => {
Action::constant(src_constant)
}
impl<'tcx> Action<'tcx> {
- fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis, src_lvalue: &Lvalue<'tcx>)
+ fn local_copy(def_use_analysis: &DefUseAnalysis, src_lvalue: &Lvalue<'tcx>)
-> Option<Action<'tcx>> {
// The source must be a local.
- let src_local = match mir.local_index(src_lvalue) {
- Some(src_local) => src_local,
- None => {
- debug!(" Can't copy-propagate local: source is not a local");
- return None
- }
+ let src_local = if let Lvalue::Local(local) = *src_lvalue {
+ local
+ } else {
+ debug!(" Can't copy-propagate local: source is not a local");
+ return None;
};
// We're trying to copy propagate a local.
// First, remove all markers.
//
// FIXME(pcwalton): Don't do this. Merge live ranges instead.
- debug!(" Replacing all uses of {} with {} (local)",
- mir.format_local(dest_local),
- mir.format_local(src_local));
+ debug!(" Replacing all uses of {:?} with {:?} (local)",
+ dest_local,
+ src_local);
for lvalue_use in &def_use_analysis.local_info(dest_local).defs_and_uses {
if lvalue_use.context.is_storage_marker() {
mir.make_statement_nop(lvalue_use.location)
}
// Replace all uses of the destination local with the source local.
- let src_lvalue = Lvalue::from_local(mir, src_local);
+ let src_lvalue = Lvalue::Local(src_local);
def_use_analysis.replace_all_defs_and_uses_with(dest_local, mir, src_lvalue);
// Finally, zap the now-useless assignment instruction.
// First, remove all markers.
//
// FIXME(pcwalton): Don't do this. Merge live ranges instead.
- debug!(" Replacing all uses of {} with {:?} (constant)",
- mir.format_local(dest_local),
+ debug!(" Replacing all uses of {:?} with {:?} (constant)",
+ dest_local,
src_constant);
let dest_local_info = def_use_analysis.local_info(dest_local);
for lvalue_use in &dest_local_info.defs_and_uses {
}
// Replace all uses of the destination local with the constant.
- let mut visitor = ConstantPropagationVisitor::new(MirSummary::new(mir),
- dest_local,
+ let mut visitor = ConstantPropagationVisitor::new(dest_local,
src_constant);
for dest_lvalue_use in &dest_local_info.defs_and_uses {
visitor.visit_location(mir, dest_lvalue_use.location)
struct ConstantPropagationVisitor<'tcx> {
dest_local: Local,
constant: Constant<'tcx>,
- mir_summary: MirSummary,
uses_replaced: usize,
}
impl<'tcx> ConstantPropagationVisitor<'tcx> {
- fn new(mir_summary: MirSummary, dest_local: Local, constant: Constant<'tcx>)
+ fn new(dest_local: Local, constant: Constant<'tcx>)
-> ConstantPropagationVisitor<'tcx> {
ConstantPropagationVisitor {
dest_local: dest_local,
constant: constant,
- mir_summary: mir_summary,
uses_replaced: 0,
}
}
self.super_operand(operand, location);
match *operand {
- Operand::Consume(ref lvalue) => {
- if self.mir_summary.local_index(lvalue) != Some(self.dest_local) {
- return
- }
- }
- Operand::Constant(_) => return,
+ Operand::Consume(Lvalue::Local(local)) if local == self.dest_local => {}
+ _ => return,
}
*operand = Operand::Constant(self.constant.clone());
self.uses_replaced += 1
}
}
-
use rustc::mir::repr::*;
use rustc::mir::transform::{MirPass, MirSource, Pass};
use rustc_data_structures::indexed_vec::Idx;
-use rustc::ty::VariantKind;
pub struct Deaggregator;
}
debug!("getting variant {:?}", variant);
debug!("for adt_def {:?}", adt_def);
- let variant_def = &adt_def.variants[variant];
- if variant_def.kind == VariantKind::Struct {
- return Some(i);
- }
+ return Some(i);
};
None
}
//! Performs various peephole optimizations.
-use rustc::mir::repr::{Location, Lvalue, Mir, Operand, ProjectionElem, Rvalue};
+use rustc::mir::repr::{Location, Lvalue, Mir, Operand, ProjectionElem, Rvalue, Local};
use rustc::mir::transform::{MirPass, MirSource, Pass};
use rustc::mir::visit::{MutVisitor, Visitor};
use rustc::ty::TyCtxt;
use rustc::util::nodemap::FnvHashSet;
+use rustc_data_structures::indexed_vec::Idx;
use std::mem;
pub struct InstCombine {
debug!("Replacing `&*`: {:?}", rvalue);
let new_lvalue = match *rvalue {
Rvalue::Ref(_, _, Lvalue::Projection(ref mut projection)) => {
- mem::replace(&mut projection.base, Lvalue::ReturnPointer)
+ // Replace with dummy
+ mem::replace(&mut projection.base, Lvalue::Local(Local::new(0)))
}
_ => bug!("Detected `&*` but didn't find `&*`!"),
};
struct OptimizationList {
and_stars: FnvHashSet<Location>,
}
-
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
+use std::iter;
use std::mem;
use std::usize;
ShuffleIndices(BasicBlock)
}
-struct TempCollector {
- temps: IndexVec<Temp, TempState>,
- span: Span
+struct TempCollector<'tcx> {
+ temps: IndexVec<Local, TempState>,
+ span: Span,
+ mir: &'tcx Mir<'tcx>,
}
-impl<'tcx> Visitor<'tcx> for TempCollector {
- fn visit_lvalue(&mut self, lvalue: &Lvalue<'tcx>, context: LvalueContext, location: Location) {
+impl<'tcx> Visitor<'tcx> for TempCollector<'tcx> {
+ fn visit_lvalue(&mut self,
+ lvalue: &Lvalue<'tcx>,
+ context: LvalueContext<'tcx>,
+ location: Location) {
self.super_lvalue(lvalue, context, location);
- if let Lvalue::Temp(index) = *lvalue {
+ if let Lvalue::Local(index) = *lvalue {
+ // We're only interested in temporaries
+ if self.mir.local_kind(index) != LocalKind::Temp {
+ return;
+ }
+
// Ignore drops, if the temp gets promoted,
// then it's constant and thus drop is noop.
// Storage live ranges are also irrelevant.
}
}
-pub fn collect_temps(mir: &Mir, rpo: &mut ReversePostorder) -> IndexVec<Temp, TempState> {
+pub fn collect_temps(mir: &Mir, rpo: &mut ReversePostorder) -> IndexVec<Local, TempState> {
let mut collector = TempCollector {
- temps: IndexVec::from_elem(TempState::Undefined, &mir.temp_decls),
- span: mir.span
+ temps: IndexVec::from_elem(TempState::Undefined, &mir.local_decls),
+ span: mir.span,
+ mir: mir,
};
for (bb, data) in rpo {
collector.visit_basic_block_data(bb, data);
struct Promoter<'a, 'tcx: 'a> {
source: &'a mut Mir<'tcx>,
promoted: Mir<'tcx>,
- temps: &'a mut IndexVec<Temp, TempState>,
+ temps: &'a mut IndexVec<Local, TempState>,
/// If true, all nested temps are also kept in the
/// source MIR, not moved to the promoted MIR.
})
}
- fn assign(&mut self, dest: Lvalue<'tcx>, rvalue: Rvalue<'tcx>, span: Span) {
+ fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
let last = self.promoted.basic_blocks().last().unwrap();
let data = &mut self.promoted[last];
data.statements.push(Statement {
span: span,
scope: ARGUMENT_VISIBILITY_SCOPE
},
- kind: StatementKind::Assign(dest, rvalue)
+ kind: StatementKind::Assign(Lvalue::Local(dest), rvalue)
});
}
/// Copy the initialization of this temp to the
/// promoted MIR, recursing through temps.
- fn promote_temp(&mut self, temp: Temp) -> Temp {
+ fn promote_temp(&mut self, temp: Local) -> Local {
let old_keep_original = self.keep_original;
let (bb, stmt_idx) = match self.temps[temp] {
TempState::Defined {
});
}
- let new_temp = self.promoted.temp_decls.push(TempDecl {
- ty: self.source.temp_decls[temp].ty
- });
+ let new_temp = self.promoted.local_decls.push(
+ LocalDecl::new_temp(self.source.local_decls[temp].ty));
// Inject the Rvalue or Call into the promoted MIR.
if stmt_idx < no_stmts {
- self.assign(Lvalue::Temp(new_temp), rvalue.unwrap(), source_info.span);
+ self.assign(new_temp, rvalue.unwrap(), source_info.span);
} else {
let last = self.promoted.basic_blocks().last().unwrap();
let new_target = self.new_block();
let mut call = call.unwrap();
match call {
TerminatorKind::Call { ref mut destination, ..} => {
- *destination = Some((Lvalue::Temp(new_temp), new_target));
+ *destination = Some((Lvalue::Local(new_temp), new_target));
}
_ => bug!()
}
}
}
};
- self.visit_rvalue(&mut rvalue, Location{
+ self.visit_rvalue(&mut rvalue, Location {
block: BasicBlock::new(0),
statement_index: usize::MAX
});
- self.assign(Lvalue::ReturnPointer, rvalue, span);
+
+ self.assign(RETURN_POINTER, rvalue, span);
self.source.promoted.push(self.promoted);
}
}
lvalue: &mut Lvalue<'tcx>,
context: LvalueContext<'tcx>,
location: Location) {
- if let Lvalue::Temp(ref mut temp) = *lvalue {
- *temp = self.promote_temp(*temp);
+ if let Lvalue::Local(ref mut temp) = *lvalue {
+ if self.source.local_kind(*temp) == LocalKind::Temp {
+ *temp = self.promote_temp(*temp);
+ }
}
self.super_lvalue(lvalue, context, location);
}
pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
- mut temps: IndexVec<Temp, TempState>,
+ mut temps: IndexVec<Local, TempState>,
candidates: Vec<Candidate>) {
// Visit candidates in reverse, in case they're nested.
for candidate in candidates.into_iter().rev() {
"expected assignment to promote");
}
};
- if let Lvalue::Temp(index) = *dest {
+ if let Lvalue::Local(index) = *dest {
if temps[index] == TempState::PromotedOut {
// Already promoted.
continue;
}
};
+ // Declare return pointer local
+ let initial_locals = iter::once(LocalDecl::new_return_pointer(ty)).collect();
+
let mut promoter = Promoter {
- source: mir,
promoted: Mir::new(
IndexVec::new(),
Some(VisibilityScopeData {
}).into_iter().collect(),
IndexVec::new(),
ty,
- IndexVec::new(),
- IndexVec::new(),
- IndexVec::new(),
+ initial_locals,
+ 0,
vec![],
span
),
+ source: mir,
temps: &mut temps,
keep_original: false
};
}
// Eliminate assignments to, and drops of promoted temps.
- let promoted = |index: Temp| temps[index] == TempState::PromotedOut;
+ let promoted = |index: Local| temps[index] == TempState::PromotedOut;
for block in mir.basic_blocks_mut() {
block.statements.retain(|statement| {
match statement.kind {
- StatementKind::Assign(Lvalue::Temp(index), _) |
- StatementKind::StorageLive(Lvalue::Temp(index)) |
- StatementKind::StorageDead(Lvalue::Temp(index)) => {
+ StatementKind::Assign(Lvalue::Local(index), _) |
+ StatementKind::StorageLive(Lvalue::Local(index)) |
+ StatementKind::StorageDead(Lvalue::Local(index)) => {
!promoted(index)
}
_ => true
});
let terminator = block.terminator_mut();
match terminator.kind {
- TerminatorKind::Drop { location: Lvalue::Temp(index), target, .. } => {
+ TerminatorKind::Drop { location: Lvalue::Local(index), target, .. } => {
if promoted(index) {
terminator.kind = TerminatorKind::Goto {
target: target
param_env: ty::ParameterEnvironment<'tcx>,
qualif_map: &'a mut DefIdMap<Qualif>,
mir_map: Option<&'a MirMap<'tcx>>,
- temp_qualif: IndexVec<Temp, Option<Qualif>>,
+ temp_qualif: IndexVec<Local, Option<Qualif>>,
return_qualif: Option<Qualif>,
qualif: Qualif,
const_fn_arg_vars: BitVector,
- temp_promotion_state: IndexVec<Temp, TempState>,
+ temp_promotion_state: IndexVec<Local, TempState>,
promotion_candidates: Vec<Candidate>
}
param_env: param_env,
qualif_map: qualif_map,
mir_map: mir_map,
- temp_qualif: IndexVec::from_elem(None, &mir.temp_decls),
+ temp_qualif: IndexVec::from_elem(None, &mir.local_decls),
return_qualif: None,
qualif: Qualif::empty(),
- const_fn_arg_vars: BitVector::new(mir.var_decls.len()),
+ const_fn_arg_vars: BitVector::new(mir.local_decls.len()),
temp_promotion_state: temps,
promotion_candidates: vec![]
}
// Only handle promotable temps in non-const functions.
if self.mode == Mode::Fn {
- if let Lvalue::Temp(index) = *dest {
- if self.temp_promotion_state[index].is_promotable() {
+ if let Lvalue::Local(index) = *dest {
+ if self.mir.local_kind(index) == LocalKind::Temp
+ && self.temp_promotion_state[index].is_promotable() {
+ debug!("store to promotable temp {:?}", index);
store(&mut self.temp_qualif[index]);
}
}
}
match *dest {
- Lvalue::Temp(index) => store(&mut self.temp_qualif[index]),
- Lvalue::ReturnPointer => store(&mut self.return_qualif),
+ Lvalue::Local(index) if self.mir.local_kind(index) == LocalKind::Temp => {
+ debug!("store to temp {:?}", index);
+ store(&mut self.temp_qualif[index])
+ }
+ Lvalue::Local(index) if self.mir.local_kind(index) == LocalKind::ReturnPointer => {
+ debug!("store to return pointer {:?}", index);
+ store(&mut self.return_qualif)
+ }
Lvalue::Projection(box Projection {
- base: Lvalue::Temp(index),
+ base: Lvalue::Local(index),
elem: ProjectionElem::Deref
- }) if self.mir.temp_decls[index].ty.is_unique()
+ }) if self.mir.local_kind(index) == LocalKind::Temp
+ && self.mir.local_decls[index].ty.is_unique()
&& self.temp_qualif[index].map_or(false, |qualif| {
qualif.intersects(Qualif::NOT_CONST)
}) => {
/// Qualify a whole const, static initializer or const fn.
fn qualify_const(&mut self) -> Qualif {
+ debug!("qualifying {} {}", self.mode, self.tcx.item_path_str(self.def_id));
+
let mir = self.mir;
let mut seen_blocks = BitVector::new(mir.basic_blocks().len());
TerminatorKind::Return => {
// Check for unused values. This usually means
// there are extra statements in the AST.
- for temp in mir.temp_decls.indices() {
+ for temp in mir.temps_iter() {
if self.temp_qualif[temp].is_none() {
continue;
}
// Make sure there are no extra unassigned variables.
self.qualif = Qualif::NOT_CONST;
- for index in 0..mir.var_decls.len() {
- if !self.const_fn_arg_vars.contains(index) {
- self.assign(&Lvalue::Var(Var::new(index)), Location {
+ for index in mir.vars_iter() {
+ if !self.const_fn_arg_vars.contains(index.index()) {
+ debug!("unassigned variable {:?}", index);
+ self.assign(&Lvalue::Local(index), Location {
block: bb,
statement_index: usize::MAX,
});
context: LvalueContext<'tcx>,
location: Location) {
match *lvalue {
- Lvalue::Arg(_) => {
- self.add(Qualif::FN_ARGUMENT);
- }
- Lvalue::Var(_) => {
- self.add(Qualif::NOT_CONST);
- }
- Lvalue::Temp(index) => {
- if !self.temp_promotion_state[index].is_promotable() {
- self.add(Qualif::NOT_PROMOTABLE);
+ Lvalue::Local(local) => match self.mir.local_kind(local) {
+ LocalKind::ReturnPointer => {
+ self.not_const();
+ }
+ LocalKind::Arg => {
+ self.add(Qualif::FN_ARGUMENT);
+ }
+ LocalKind::Var => {
+ self.add(Qualif::NOT_CONST);
}
+ LocalKind::Temp => {
+ if !self.temp_promotion_state[local].is_promotable() {
+ self.add(Qualif::NOT_PROMOTABLE);
+ }
- if let Some(qualif) = self.temp_qualif[index] {
- self.add(qualif);
- } else {
- self.not_const();
+ if let Some(qualif) = self.temp_qualif[local] {
+ self.add(qualif);
+ } else {
+ self.not_const();
+ }
}
- }
+ },
Lvalue::Static(_) => {
self.add(Qualif::STATIC);
if self.mode == Mode::Const || self.mode == Mode::ConstFn {
a constant instead", self.mode);
}
}
- Lvalue::ReturnPointer => {
- self.not_const();
- }
Lvalue::Projection(ref proj) => {
self.nest(|this| {
this.super_lvalue(lvalue, context, location);
if self.mode == Mode::Fn || self.mode == Mode::ConstFn {
if !self.qualif.intersects(Qualif::NEVER_PROMOTE) {
// We can only promote direct borrows of temps.
- if let Lvalue::Temp(_) = *lvalue {
- self.promotion_candidates.push(candidate);
+ if let Lvalue::Local(local) = *lvalue {
+ if self.mir.local_kind(local) == LocalKind::Temp {
+ self.promotion_candidates.push(candidate);
+ }
}
}
}
self.visit_rvalue(rvalue, location);
// Check the allowed const fn argument forms.
- if let (Mode::ConstFn, &Lvalue::Var(index)) = (self.mode, dest) {
- if self.const_fn_arg_vars.insert(index.index()) {
+ if let (Mode::ConstFn, &Lvalue::Local(index)) = (self.mode, dest) {
+ if self.mir.local_kind(index) == LocalKind::Var &&
+ self.const_fn_arg_vars.insert(index.index()) {
+
// Direct use of an argument is permitted.
- if let Rvalue::Use(Operand::Consume(Lvalue::Arg(_))) = *rvalue {
- return;
+ if let Rvalue::Use(Operand::Consume(Lvalue::Local(local))) = *rvalue {
+ if self.mir.local_kind(local) == LocalKind::Arg {
+ return;
+ }
}
// Avoid a generic error for other uses of arguments.
if self.qualif.intersects(Qualif::FN_ARGUMENT) {
- let decl = &self.mir.var_decls[index];
- span_err!(self.tcx.sess, decl.source_info.span, E0022,
+ let decl = &self.mir.local_decls[index];
+ span_err!(self.tcx.sess, decl.source_info.unwrap().span, E0022,
"arguments of constant functions can only \
be immutable by-value bindings");
return;
impl<'l, 'tcx> MirPass<'tcx> for SimplifyCfg<'l> {
fn run_pass<'a>(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, _src: MirSource, mir: &mut Mir<'tcx>) {
+ debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, mir);
CfgSimplifier::new(mir).simplify();
remove_dead_blocks(mir);
// we can't use mir.predecessors() here because that counts
// dead blocks, which we don't want to.
+ pred_count[START_BLOCK] = 1;
+
for (_, data) in traversal::preorder(mir) {
if let Some(ref term) = data.terminator {
for &tgt in term.successors().iter() {
debug!("collapsing goto chain from {:?} to {:?}", *start, target);
*changed |= *start != target;
- self.pred_count[target] += 1;
- self.pred_count[*start] -= 1;
+
+ if self.pred_count[*start] == 1 {
+ // This is the last reference to *start, so the pred-count to
+ // to target is moved into the current block.
+ self.pred_count[*start] = 0;
+ } else {
+ self.pred_count[target] += 1;
+ self.pred_count[*start] -= 1;
+ }
+
*start = target;
}
fn visit_mir(&mut self, mir: &Mir<'tcx>) {
self.sanitize_type(&"return type", mir.return_ty);
- for var_decl in &mir.var_decls {
- self.sanitize_type(var_decl, var_decl.ty);
- }
- for (n, arg_decl) in mir.arg_decls.iter().enumerate() {
- self.sanitize_type(&(n, arg_decl), arg_decl.ty);
- }
- for (n, tmp_decl) in mir.temp_decls.iter().enumerate() {
- self.sanitize_type(&(n, tmp_decl), tmp_decl.ty);
+ for local_decl in &mir.local_decls {
+ self.sanitize_type(local_decl, local_decl.ty);
}
if self.errors_reported {
return;
fn sanitize_lvalue(&mut self, lvalue: &Lvalue<'tcx>, location: Location) -> LvalueTy<'tcx> {
debug!("sanitize_lvalue: {:?}", lvalue);
match *lvalue {
- Lvalue::Var(index) => LvalueTy::Ty { ty: self.mir.var_decls[index].ty },
- Lvalue::Temp(index) => LvalueTy::Ty { ty: self.mir.temp_decls[index].ty },
- Lvalue::Arg(index) => LvalueTy::Ty { ty: self.mir.arg_decls[index].ty },
+ Lvalue::Local(index) => LvalueTy::Ty { ty: self.mir.local_decls[index].ty },
Lvalue::Static(def_id) =>
LvalueTy::Ty { ty: self.tcx().lookup_item_type(def_id).ty },
- Lvalue::ReturnPointer => {
- LvalueTy::Ty { ty: self.mir.return_ty }
- }
Lvalue::Projection(ref proj) => {
let base_ty = self.sanitize_lvalue(&proj.base, location);
if let LvalueTy::Ty { ty } = base_ty {
StatementKind::StorageLive(ref lv) |
StatementKind::StorageDead(ref lv) => {
match *lv {
- Lvalue::Temp(_) | Lvalue::Var(_) => {}
+ Lvalue::Local(_) => {}
_ => {
- span_mirbug!(self, stmt, "bad lvalue: expected temp or var");
+ span_mirbug!(self, stmt, "bad lvalue: expected local");
}
}
}
impl<'tcx> MirPass<'tcx> for TypeckMir {
fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource, mir: &mut Mir<'tcx>) {
+ debug!("run_pass: {}", tcx.node_path_str(src.item_id()));
+
if tcx.sess.err_count() > 0 {
// compiling a broken program can obviously result in a
// broken MIR, so try not to report duplicate errors.
span,
E0449,
"unnecessary visibility qualifier");
+ if vis == &Visibility::Public {
+ err.span_label(span, &format!("`pub` not needed here"));
+ }
if let Some(note) = note {
- err.span_note(span, note);
+ err.note(note);
}
err.emit();
}
use rustc_const_eval::ErrKind::UnresolvedPath;
use rustc_const_eval::EvalHint::ExprTypeChecked;
use rustc_const_math::{ConstMathErr, Op};
-use rustc::hir::def::Def;
+use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::DefId;
use rustc::middle::expr_use_visitor as euv;
use rustc::middle::mem_categorization as mc;
}
hir::ExprPath(..) => {
match v.tcx.expect_def(e.id) {
- Def::Variant(..) => {
- // Count the discriminator or function pointer.
- v.add_qualif(ConstQualif::NON_ZERO_SIZED);
- }
- Def::Struct(..) => {
- if let ty::TyFnDef(..) = node_ty.sty {
- // Count the function pointer.
- v.add_qualif(ConstQualif::NON_ZERO_SIZED);
- }
- }
- Def::Fn(..) | Def::Method(..) => {
- // Count the function pointer.
+ Def::VariantCtor(_, CtorKind::Const) => {
+ // Size is determined by the whole enum, may be non-zero.
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
}
+ Def::VariantCtor(..) | Def::StructCtor(..) |
+ Def::Fn(..) | Def::Method(..) => {}
Def::Static(..) => {
match v.mode {
Mode::Static | Mode::StaticMut => {}
}
// The callee is an arbitrary expression, it doesn't necessarily have a definition.
let is_const = match v.tcx.expect_def_or_none(callee.id) {
- Some(Def::Struct(..)) => true,
- Some(Def::Variant(..)) => {
- // Count the discriminator.
+ Some(Def::StructCtor(_, CtorKind::Fn)) |
+ Some(Def::VariantCtor(_, CtorKind::Fn)) => {
+ // `NON_ZERO_SIZED` is about the call result, not about the ctor itself.
v.add_qualif(ConstQualif::NON_ZERO_SIZED);
true
}
hir::ExprIndex(..) |
hir::ExprField(..) |
hir::ExprTupField(..) |
- hir::ExprVec(_) |
+ hir::ExprArray(_) |
hir::ExprType(..) |
hir::ExprTup(..) => {}
use rustc::dep_graph::DepNode;
use rustc::hir::map as ast_map;
use rustc::session::{CompileResult, Session};
-use rustc::hir::def::{Def, DefMap};
+use rustc::hir::def::{Def, CtorKind, DefMap};
use rustc::util::nodemap::NodeMap;
use syntax::ast;
});
if any_static {
if !self.sess.features.borrow().static_recursion {
- emit_feature_err(&self.sess.parse_sess.span_diagnostic,
+ emit_feature_err(&self.sess.parse_sess,
"static_recursion",
*self.root_span,
GateIssue::Language,
// affect the specific variant used, but we need to check
// the whole enum definition to see what expression that
// might be (if any).
- Some(Def::Variant(variant_id)) => {
+ Some(Def::VariantCtor(variant_id, CtorKind::Const)) => {
if let Some(variant_id) = self.ast_map.as_local_node_id(variant_id) {
let variant = self.ast_map.expect_variant(variant_id);
let enum_id = self.ast_map.get_parent(variant_id);
} else {
span_bug!(e.span,
"`check_static_recursion` found \
- non-enum in Def::Variant");
+ non-enum in Def::VariantCtor");
}
}
}
use rustc::dep_graph::DepNode;
use rustc::hir::{self, PatKind};
-use rustc::hir::def::{self, Def};
+use rustc::hir::def::{self, Def, CtorKind};
use rustc::hir::def_id::DefId;
use rustc::hir::intravisit::{self, Visitor};
use rustc::hir::pat_util::EnumerateAndAdjustIterator;
if self.prev_level.is_some() {
if let Some(exports) = self.export_map.get(&id) {
for export in exports {
- if let Some(node_id) = self.tcx.map.as_local_node_id(export.def_id) {
+ if let Some(node_id) = self.tcx.map.as_local_node_id(export.def.def_id()) {
self.update(node_id, Some(AccessLevel::Exported));
}
}
}
}
hir::ExprPath(..) => {
- if let Def::Struct(..) = self.tcx.expect_def(expr.id) {
- let expr_ty = self.tcx.expr_ty(expr);
- let def = match expr_ty.sty {
- ty::TyFnDef(.., &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
- output: ty, ..
- }), ..}) => ty,
- _ => expr_ty
- }.ty_adt_def().unwrap();
-
- let private_indexes : Vec<_> = def.struct_variant().fields.iter().enumerate()
- .filter(|&(_,f)| {
- !f.vis.is_accessible_from(self.curitem, &self.tcx.map)
- }).map(|(n,&_)|n).collect();
+ if let def @ Def::StructCtor(_, CtorKind::Fn) = self.tcx.expect_def(expr.id) {
+ let adt_def = self.tcx.expect_variant_def(def);
+ let private_indexes = adt_def.fields.iter().enumerate().filter(|&(_, field)| {
+ !field.vis.is_accessible_from(self.curitem, &self.tcx.map)
+ }).map(|(i, _)| i).collect::<Vec<_>>();
if !private_indexes.is_empty() {
-
let mut error = struct_span_err!(self.tcx.sess, expr.span, E0450,
"cannot invoke tuple struct constructor \
- with private fields");
+ with private fields");
error.span_label(expr.span,
&format!("cannot construct with a private field"));
- if let Some(def_id) = self.tcx.map.as_local_node_id(def.did) {
- if let Some(hir::map::NodeItem(node)) = self.tcx.map.find(def_id) {
- if let hir::Item_::ItemStruct(ref tuple_data, _) = node.node {
-
- for i in private_indexes {
- error.span_label(tuple_data.fields()[i].span,
- &format!("private field declared here"));
- }
+ if let Some(node_id) = self.tcx.map.as_local_node_id(adt_def.did) {
+ let node = self.tcx.map.find(node_id);
+ if let Some(hir::map::NodeStructCtor(vdata)) = node {
+ for i in private_indexes {
+ error.span_label(vdata.fields()[i].span,
+ &format!("private field declared here"));
}
}
}
if !vis.is_at_least(self.required_visibility, &self.tcx.map) {
if self.tcx.sess.features.borrow().pub_restricted ||
self.old_error_set.contains(&ty.id) {
- span_err!(self.tcx.sess, ty.span, E0446,
+ let mut err = struct_span_err!(self.tcx.sess, ty.span, E0446,
"private type in public interface");
+ err.span_label(ty.span, &format!("can't leak private type"));
+ err.emit();
} else {
self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC,
node_id,
//! Here we build the "reduced graph": the graph of the module tree without
//! any imports resolved.
+use macros;
use resolve_imports::ImportDirectiveSubclass::{self, GlobImport};
use {Module, ModuleS, ModuleKind};
use Namespace::{self, TypeNS, ValueNS};
use Resolver;
use {resolve_error, resolve_struct_error, ResolutionError};
+use rustc::middle::cstore::LoadedMacroKind;
use rustc::hir::def::*;
use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId};
-use rustc::hir::map::DefPathData;
use rustc::ty;
use std::cell::Cell;
+use std::rc::Rc;
use syntax::ast::Name;
use syntax::attr;
use syntax::parse::token;
-use syntax::ast::{Block, Crate};
-use syntax::ast::{ForeignItem, ForeignItemKind, Item, ItemKind};
+use syntax::ast::{self, Block, ForeignItem, ForeignItemKind, Item, ItemKind};
use syntax::ast::{Mutability, StmtKind, TraitItem, TraitItemKind};
use syntax::ast::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple};
+use syntax::ext::base::{MultiItemModifier, Resolver as SyntaxResolver};
+use syntax::ext::hygiene::Mark;
+use syntax::feature_gate::{self, emit_feature_err};
+use syntax::ext::tt::macro_rules;
use syntax::parse::token::keywords;
use syntax::visit::{self, Visitor};
}
impl<'b> Resolver<'b> {
- /// Constructs the reduced graph for the entire crate.
- pub fn build_reduced_graph(&mut self, krate: &Crate) {
- visit::walk_crate(&mut BuildReducedGraphVisitor { resolver: self }, krate);
- }
-
/// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined;
/// otherwise, reports an error.
fn define<T>(&mut self, parent: Module<'b>, name: Name, ns: Namespace, def: T)
fn block_needs_anonymous_module(&mut self, block: &Block) -> bool {
// If any statements are items, we need to create an anonymous module
block.stmts.iter().any(|statement| match statement.node {
- StmtKind::Item(_) => true,
+ StmtKind::Item(_) | StmtKind::Mac(_) => true,
_ => false,
})
}
- /// Constructs the reduced graph for one item.
- fn build_reduced_graph_for_item(&mut self, item: &Item) {
- self.crate_loader.process_item(item, &self.definitions);
+ fn insert_field_names(&mut self, def_id: DefId, field_names: Vec<Name>) {
+ if !field_names.is_empty() {
+ self.field_names.insert(def_id, field_names);
+ }
+ }
+ /// Constructs the reduced graph for one item.
+ fn build_reduced_graph_for_item(&mut self, item: &Item, expansion: Mark) {
let parent = self.current_module;
let name = item.ident.name;
let sp = item.span;
}
ItemKind::ExternCrate(_) => {
- // n.b. we don't need to look at the path option here, because cstore already
- // did
+ // We need to error on `#[macro_use] extern crate` when it isn't at the
+ // crate root, because `$crate` won't work properly.
+ let is_crate_root = self.current_module.parent.is_none();
+ for loaded_macro in self.crate_loader.load_macros(item, is_crate_root) {
+ match loaded_macro.kind {
+ LoadedMacroKind::Def(mut def) => {
+ let name = def.ident.name;
+ if def.use_locally {
+ let ext = macro_rules::compile(&self.session.parse_sess, &def);
+ let shadowing =
+ self.resolve_macro_name(Mark::root(), name, false).is_some();
+ self.expansion_data[&Mark::root()].module.macros.borrow_mut()
+ .insert(name, macros::NameBinding {
+ ext: Rc::new(ext),
+ expansion: expansion,
+ shadowing: shadowing,
+ span: loaded_macro.import_site,
+ });
+ self.macro_names.insert(name);
+ }
+ if def.export {
+ def.id = self.next_node_id();
+ self.exported_macros.push(def);
+ }
+ }
+ LoadedMacroKind::CustomDerive(name, ext) => {
+ self.insert_custom_derive(&name, ext, item.span);
+ }
+ }
+ }
+ self.crate_loader.process_item(item, &self.definitions);
+
+ // n.b. we don't need to look at the path option here, because cstore already did
if let Some(crate_id) = self.session.cstore.extern_mod_stmt_cnum(item.id) {
let def_id = DefId {
krate: crate_id,
}
}
+ ItemKind::Mod(..) if item.ident == keywords::Invalid.ident() => {} // Crate root
+
ItemKind::Mod(..) => {
let def = Def::Mod(self.definitions.local_def_id(item.id));
let module = self.arenas.alloc_module(ModuleS {
attr::contains_name(&item.attrs, "no_implicit_prelude")
},
normal_ancestor_id: Some(item.id),
+ macros_escape: self.contains_macro_use(&item.attrs),
..ModuleS::new(Some(parent), ModuleKind::Def(def, name))
});
self.define(parent, name, TypeNS, (module, sp, vis));
self.current_module = module;
}
- ItemKind::ForeignMod(..) => {}
+ ItemKind::ForeignMod(..) => self.crate_loader.process_item(item, &self.definitions),
// These items live in the value namespace.
ItemKind::Static(_, m, _) => {
// If this is a tuple or unit struct, define a name
// in the value namespace as well.
if !struct_def.is_struct() {
- let def = Def::Struct(self.definitions.local_def_id(struct_def.id()));
- self.define(parent, name, ValueNS, (def, sp, vis));
+ let ctor_def = Def::StructCtor(self.definitions.local_def_id(struct_def.id()),
+ CtorKind::from_ast(struct_def));
+ self.define(parent, name, ValueNS, (ctor_def, sp, vis));
}
- // Record the def ID and fields of this struct.
- let field_names = struct_def.fields().iter().enumerate().map(|(index, field)| {
+ // Record field names for error reporting.
+ let field_names = struct_def.fields().iter().filter_map(|field| {
self.resolve_visibility(&field.vis);
field.ident.map(|ident| ident.name)
- .unwrap_or_else(|| token::intern(&index.to_string()))
}).collect();
let item_def_id = self.definitions.local_def_id(item.id);
- self.structs.insert(item_def_id, field_names);
+ self.insert_field_names(item_def_id, field_names);
}
ItemKind::Union(ref vdata, _) => {
let def = Def::Union(self.definitions.local_def_id(item.id));
self.define(parent, name, TypeNS, (def, sp, vis));
- // Record the def ID and fields of this union.
- let field_names = vdata.fields().iter().enumerate().map(|(index, field)| {
+ // Record field names for error reporting.
+ let field_names = vdata.fields().iter().filter_map(|field| {
self.resolve_visibility(&field.vis);
field.ident.map(|ident| ident.name)
- .unwrap_or_else(|| token::intern(&index.to_string()))
}).collect();
let item_def_id = self.definitions.local_def_id(item.id);
- self.structs.insert(item_def_id, field_names);
+ self.insert_field_names(item_def_id, field_names);
}
ItemKind::DefaultImpl(..) | ItemKind::Impl(..) => {}
parent: Module<'b>,
vis: ty::Visibility) {
let name = variant.node.name.name;
- if variant.node.data.is_struct() {
- // Not adding fields for variants as they are not accessed with a self receiver
- let variant_def_id = self.definitions.local_def_id(variant.node.data.id());
- self.structs.insert(variant_def_id, Vec::new());
- }
+ let def_id = self.definitions.local_def_id(variant.node.data.id());
- // Variants are always treated as importable to allow them to be glob used.
- // All variants are defined in both type and value namespaces as future-proofing.
- let def = Def::Variant(self.definitions.local_def_id(variant.node.data.id()));
- self.define(parent, name, ValueNS, (def, variant.span, vis));
+ // Define a name in the type namespace.
+ let def = Def::Variant(def_id);
self.define(parent, name, TypeNS, (def, variant.span, vis));
+
+ // Define a constructor name in the value namespace.
+ // Braced variants, unlike structs, generate unusable names in
+ // value namespace, they are reserved for possible future use.
+ let ctor_kind = CtorKind::from_ast(&variant.node.data);
+ let ctor_def = Def::VariantCtor(def_id, ctor_kind);
+ self.define(parent, name, ValueNS, (ctor_def, variant.span, vis));
}
/// Constructs the reduced graph for one foreign item.
/// Builds the reduced graph for a single item in an external crate.
fn build_reduced_graph_for_external_crate_def(&mut self, parent: Module<'b>,
child: Export) {
- let def_id = child.def_id;
let name = child.name;
-
- let def = if let Some(def) = self.session.cstore.describe_def(def_id) {
- def
- } else {
- return;
- };
-
+ let def = child.def;
+ let def_id = def.def_id();
let vis = if parent.is_trait() {
ty::Visibility::Public
} else {
};
match def {
- Def::Mod(_) | Def::Enum(..) => {
- debug!("(building reduced graph for external crate) building module {} {:?}",
- name, vis);
+ Def::Mod(..) | Def::Enum(..) => {
let module = self.new_module(parent, ModuleKind::Def(def, name), false);
- let _ = self.try_define(parent, name, TypeNS, (module, DUMMY_SP, vis));
+ self.define(parent, name, TypeNS, (module, DUMMY_SP, vis));
}
- Def::Variant(variant_id) => {
- debug!("(building reduced graph for external crate) building variant {}", name);
- // Variants are always treated as importable to allow them to be glob used.
- // All variants are defined in both type and value namespaces as future-proofing.
- let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis));
- let _ = self.try_define(parent, name, ValueNS, (def, DUMMY_SP, vis));
- if self.session.cstore.variant_kind(variant_id) == Some(ty::VariantKind::Struct) {
- // Not adding fields for variants as they are not accessed with a self receiver
- self.structs.insert(variant_id, Vec::new());
- }
+ Def::Variant(..) => {
+ self.define(parent, name, TypeNS, (def, DUMMY_SP, vis));
+ }
+ Def::VariantCtor(..) => {
+ self.define(parent, name, ValueNS, (def, DUMMY_SP, vis));
}
Def::Fn(..) |
Def::Static(..) |
Def::Const(..) |
Def::AssociatedConst(..) |
Def::Method(..) => {
- debug!("(building reduced graph for external crate) building value (fn/static) {}",
- name);
- let _ = self.try_define(parent, name, ValueNS, (def, DUMMY_SP, vis));
+ self.define(parent, name, ValueNS, (def, DUMMY_SP, vis));
}
- Def::Trait(_) => {
- debug!("(building reduced graph for external crate) building type {}", name);
-
- // If this is a trait, add all the trait item names to the trait
- // info.
+ Def::Trait(..) => {
+ let module = self.new_module(parent, ModuleKind::Def(def, name), false);
+ self.define(parent, name, TypeNS, (module, DUMMY_SP, vis));
+ // If this is a trait, add all the trait item names to the trait info.
let trait_item_def_ids = self.session.cstore.impl_or_trait_items(def_id);
- for &trait_item_def in &trait_item_def_ids {
- let trait_item_name =
- self.session.cstore.def_key(trait_item_def)
- .disambiguated_data.data.get_opt_name()
- .expect("opt_item_name returned None for trait");
-
- debug!("(building reduced graph for external crate) ... adding trait item \
- '{}'",
- trait_item_name);
-
+ for trait_item_def_id in trait_item_def_ids {
+ let trait_item_name = self.session.cstore.def_key(trait_item_def_id)
+ .disambiguated_data.data.get_opt_name()
+ .expect("opt_item_name returned None for trait");
self.trait_item_map.insert((trait_item_name, def_id), false);
}
-
- let module = self.new_module(parent, ModuleKind::Def(def, name), false);
- let _ = self.try_define(parent, name, TypeNS, (module, DUMMY_SP, vis));
}
Def::TyAlias(..) | Def::AssociatedTy(..) => {
- debug!("(building reduced graph for external crate) building type {}", name);
- let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis));
+ self.define(parent, name, TypeNS, (def, DUMMY_SP, vis));
}
- Def::Struct(_)
- if self.session.cstore.def_key(def_id).disambiguated_data.data !=
- DefPathData::StructCtor
- => {
- debug!("(building reduced graph for external crate) building type and value for {}",
- name);
- let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis));
- if let Some(ctor_def_id) = self.session.cstore.struct_ctor_def_id(def_id) {
- let def = Def::Struct(ctor_def_id);
- let _ = self.try_define(parent, name, ValueNS, (def, DUMMY_SP, vis));
- }
+ Def::Struct(..) => {
+ self.define(parent, name, TypeNS, (def, DUMMY_SP, vis));
- // Record the def ID and fields of this struct.
- let fields = self.session.cstore.struct_field_names(def_id);
- self.structs.insert(def_id, fields);
+ // Record field names for error reporting.
+ let field_names = self.session.cstore.struct_field_names(def_id);
+ self.insert_field_names(def_id, field_names);
+ }
+ Def::StructCtor(..) => {
+ self.define(parent, name, ValueNS, (def, DUMMY_SP, vis));
}
- Def::Union(_) => {
- let _ = self.try_define(parent, name, TypeNS, (def, DUMMY_SP, vis));
+ Def::Union(..) => {
+ self.define(parent, name, TypeNS, (def, DUMMY_SP, vis));
- // Record the def ID and fields of this union.
- let fields = self.session.cstore.struct_field_names(def_id);
- self.structs.insert(def_id, fields);
+ // Record field names for error reporting.
+ let field_names = self.session.cstore.struct_field_names(def_id);
+ self.insert_field_names(def_id, field_names);
}
- Def::Struct(..) => {}
Def::Local(..) |
Def::PrimTy(..) |
Def::TyParam(..) |
Def::Label(..) |
Def::SelfTy(..) |
Def::Err => {
- bug!("didn't expect `{:?}`", def);
+ bug!("unexpected definition: {:?}", def);
}
}
}
}
module.populated.set(true)
}
+
+ // does this attribute list contain "macro_use"?
+ fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool {
+ for attr in attrs {
+ if attr.check_name("macro_escape") {
+ let msg = "macro_escape is a deprecated synonym for macro_use";
+ let mut err = self.session.struct_span_warn(attr.span, msg);
+ if let ast::AttrStyle::Inner = attr.node.style {
+ err.help("consider an outer attribute, #[macro_use] mod ...").emit();
+ } else {
+ err.emit();
+ }
+ } else if !attr.check_name("macro_use") {
+ continue;
+ }
+
+ if !attr.is_word() {
+ self.session.span_err(attr.span, "arguments to macro_use are not allowed here");
+ }
+ return true;
+ }
+
+ false
+ }
+
+ fn insert_custom_derive(&mut self, name: &str, ext: Rc<MultiItemModifier>, sp: Span) {
+ if !self.session.features.borrow().rustc_macro {
+ let sess = &self.session.parse_sess;
+ let msg = "loading custom derive macro crates is experimentally supported";
+ emit_feature_err(sess, "rustc_macro", sp, feature_gate::GateIssue::Language, msg);
+ }
+ if self.derive_modes.insert(token::intern(name), ext).is_some() {
+ self.session.span_err(sp, &format!("cannot shadow existing derive mode `{}`", name));
+ }
+ }
}
-struct BuildReducedGraphVisitor<'a, 'b: 'a> {
- resolver: &'a mut Resolver<'b>,
+pub struct BuildReducedGraphVisitor<'a, 'b: 'a> {
+ pub resolver: &'a mut Resolver<'b>,
+ pub expansion: Mark,
+}
+
+impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
+ fn visit_invoc(&mut self, id: ast::NodeId) {
+ self.resolver.expansion_data.get_mut(&Mark::from_placeholder_id(id)).unwrap().module =
+ self.resolver.current_module;
+ }
+}
+
+macro_rules! method {
+ ($visit:ident: $ty:ty, $invoc:path, $walk:ident) => {
+ fn $visit(&mut self, node: &$ty) {
+ match node.node {
+ $invoc(..) => self.visit_invoc(node.id),
+ _ => visit::$walk(self, node),
+ }
+ }
+ }
}
impl<'a, 'b> Visitor for BuildReducedGraphVisitor<'a, 'b> {
+ method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item);
+ method!(visit_stmt: ast::Stmt, ast::StmtKind::Mac, walk_stmt);
+ method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr);
+ method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat);
+ method!(visit_ty: ast::Ty, ast::TyKind::Mac, walk_ty);
+
fn visit_item(&mut self, item: &Item) {
+ match item.node {
+ ItemKind::Mac(..) if item.id == ast::DUMMY_NODE_ID => return, // Scope placeholder
+ ItemKind::Mac(..) => return self.visit_invoc(item.id),
+ _ => {}
+ }
+
let parent = self.resolver.current_module;
- self.resolver.build_reduced_graph_for_item(item);
+ self.resolver.build_reduced_graph_for_item(item, self.expansion);
visit::walk_item(self, item);
self.resolver.current_module = parent;
}
fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) {
self.resolver.build_reduced_graph_for_foreign_item(foreign_item);
+ visit::walk_foreign_item(self, foreign_item);
}
fn visit_block(&mut self, block: &Block) {
let parent = self.resolver.current_module;
let def_id = parent.def_id().unwrap();
+ if let TraitItemKind::Macro(_) = item.node {
+ return self.visit_invoc(item.id);
+ }
+
// Add the item to the trait info.
let item_def_id = self.resolver.definitions.local_def_id(item.id);
let mut is_static_method = false;
(Def::Method(item_def_id), ValueNS)
}
TraitItemKind::Type(..) => (Def::AssociatedTy(item_def_id), TypeNS),
- TraitItemKind::Macro(_) => panic!("unexpanded macro in resolve!"),
+ TraitItemKind::Macro(_) => bug!(), // handled above
};
self.resolver.trait_item_map.insert((item.ident.name, def_id), is_static_method);
use self::UseLexicalScopeFlag::*;
use self::ModulePrefixResult::*;
-use rustc::hir::map::Definitions;
+use rustc::hir::map::{Definitions, DefCollector};
use rustc::hir::{self, PrimTy, TyBool, TyChar, TyFloat, TyInt, TyUint, TyStr};
use rustc::middle::cstore::CrateLoader;
use rustc::session::Session;
use errors::DiagnosticBuilder;
use std::cell::{Cell, RefCell};
-use std::rc::Rc;
use std::fmt;
use std::mem::replace;
+use std::rc::Rc;
use resolve_imports::{ImportDirective, NameResolution};
let mut err = struct_span_err!(resolver.session,
span,
E0425,
- "unresolved name `{}`{}",
- path,
- msg);
+ "unresolved name `{}`",
+ path);
+ if msg != "" {
+ err.span_label(span, &msg);
+ } else {
+ err.span_label(span, &format!("unresolved name"));
+ }
+
match context {
UnresolvedNameContext::Other => {
if msg.is_empty() && is_static_method && is_field {
E0531,
"unresolved {} `{}`",
expected_what,
- path.segments.last().unwrap().identifier)
+ path)
}
ResolutionError::PatPathUnexpected(expected_what, found_what, path) => {
struct_span_err!(resolver.session,
"expected {}, found {} `{}`",
expected_what,
found_what,
- path.segments.last().unwrap().identifier)
+ path)
}
}
}
// access the children must be preceded with a
// `populate_module_if_necessary` call.
populated: Cell<bool>,
+
+ macros: RefCell<FnvHashMap<Name, macros::NameBinding>>,
+ macros_escape: bool,
}
pub type Module<'a> = &'a ModuleS<'a>;
globs: RefCell::new((Vec::new())),
traits: RefCell::new(None),
populated: Cell::new(true),
+ macros: RefCell::new(FnvHashMap()),
+ macros_escape: false,
}
}
fn is_variant(&self) -> bool {
match self.kind {
- NameBindingKind::Def(Def::Variant(..)) => true,
+ NameBindingKind::Def(Def::Variant(..)) |
+ NameBindingKind::Def(Def::VariantCtor(..)) => true,
_ => false,
}
}
trait_item_map: FnvHashMap<(Name, DefId), bool /* is static method? */>,
- structs: FnvHashMap<DefId, Vec<Name>>,
+ // Names of fields of an item `DefId` accessible with dot syntax.
+ // Used for hints during error reporting.
+ field_names: FnvHashMap<DefId, Vec<Name>>,
// All imports known to succeed or fail.
determined_imports: Vec<&'a ImportDirective<'a>>,
privacy_errors: Vec<PrivacyError<'a>>,
ambiguity_errors: Vec<AmbiguityError<'a>>,
+ macro_shadowing_errors: FnvHashSet<Span>,
arenas: &'a ResolverArenas<'a>,
dummy_binding: &'a NameBinding<'a>,
macro_names: FnvHashSet<Name>,
// Maps the `Mark` of an expansion to its containing module or block.
- expansion_data: FnvHashMap<u32, macros::ExpansionData>,
+ expansion_data: FnvHashMap<Mark, macros::ExpansionData<'a>>,
}
pub struct ResolverArenas<'a> {
let mut module_map = NodeMap();
module_map.insert(CRATE_NODE_ID, graph_root);
+ let mut definitions = Definitions::new();
+ DefCollector::new(&mut definitions).collect_root();
+
let mut expansion_data = FnvHashMap();
- expansion_data.insert(0, macros::ExpansionData::default()); // Crate root expansion
+ expansion_data.insert(Mark::root(), macros::ExpansionData::root(graph_root));
Resolver {
session: session,
- definitions: Definitions::new(),
+ definitions: definitions,
macros_at_scope: FnvHashMap(),
// The outermost module has def ID 0; this is not reflected in the
prelude: None,
trait_item_map: FnvHashMap(),
- structs: FnvHashMap(),
+ field_names: FnvHashMap(),
determined_imports: Vec::new(),
indeterminate_imports: Vec::new(),
privacy_errors: Vec::new(),
ambiguity_errors: Vec::new(),
+ macro_shadowing_errors: FnvHashSet(),
arenas: arenas,
dummy_binding: arenas.alloc_name_binding(NameBinding {
/// Entry point to crate resolution.
pub fn resolve_crate(&mut self, krate: &Crate) {
+ // Collect `DefId`s for exported macro defs.
+ for def in &krate.exported_macros {
+ DefCollector::new(&mut self.definitions).with_parent(CRATE_DEF_INDEX, |collector| {
+ collector.visit_macro_def(def)
+ })
+ }
+
self.current_module = self.graph_root;
visit::walk_crate(self, krate);
let always_binding = !pat_src.is_refutable() || opt_pat.is_some() ||
bmode != BindingMode::ByValue(Mutability::Immutable);
match def {
- Def::Struct(..) | Def::Variant(..) |
- Def::Const(..) | Def::AssociatedConst(..) if !always_binding => {
- // A constant, unit variant, etc pattern.
+ Def::StructCtor(_, CtorKind::Const) |
+ Def::VariantCtor(_, CtorKind::Const) |
+ Def::Const(..) if !always_binding => {
+ // A unit struct/variant or constant pattern.
let name = ident.node.name;
self.record_use(name, ValueNS, binding.unwrap(), ident.span);
Some(PathResolution::new(def))
}
- Def::Struct(..) | Def::Variant(..) |
- Def::Const(..) | Def::AssociatedConst(..) | Def::Static(..) => {
+ Def::StructCtor(..) | Def::VariantCtor(..) |
+ Def::Const(..) | Def::Static(..) => {
// A fresh binding that shadows something unacceptable.
resolve_error(
self,
}
def => {
span_bug!(ident.span, "unexpected definition for an \
- identifier in pattern {:?}", def);
+ identifier in pattern: {:?}", def);
}
}
}).unwrap_or_else(|| {
self.record_def(pat.id, resolution);
}
- PatKind::TupleStruct(ref path, ..) => {
+ PatKind::TupleStruct(ref path, ref pats, ddpos) => {
self.resolve_pattern_path(pat.id, None, path, ValueNS, |def| {
match def {
- Def::Struct(..) | Def::Variant(..) => true,
+ Def::StructCtor(_, CtorKind::Fn) |
+ Def::VariantCtor(_, CtorKind::Fn) => true,
+ // `UnitVariant(..)` is accepted for backward compatibility.
+ Def::StructCtor(_, CtorKind::Const) |
+ Def::VariantCtor(_, CtorKind::Const)
+ if pats.is_empty() && ddpos.is_some() => true,
_ => false,
}
- }, "variant or struct");
+ }, "tuple struct/variant");
}
PatKind::Path(ref qself, ref path) => {
self.resolve_pattern_path(pat.id, qself.as_ref(), path, ValueNS, |def| {
match def {
- Def::Struct(..) | Def::Variant(..) |
+ Def::StructCtor(_, CtorKind::Const) |
+ Def::VariantCtor(_, CtorKind::Const) |
Def::Const(..) | Def::AssociatedConst(..) => true,
_ => false,
}
- }, "variant, struct or constant");
+ }, "unit struct/variant or constant");
}
PatKind::Struct(ref path, ..) => {
// Look for a field with the same name in the current self_type.
if let Some(resolution) = self.def_map.get(&node_id) {
match resolution.base_def {
- Def::Enum(did) | Def::TyAlias(did) | Def::Union(did) |
- Def::Struct(did) | Def::Variant(did) if resolution.depth == 0 => {
- if let Some(fields) = self.structs.get(&did) {
- if fields.iter().any(|&field_name| name == field_name) {
+ Def::Struct(did) | Def::Union(did) if resolution.depth == 0 => {
+ if let Some(field_names) = self.field_names.get(&did) {
+ if field_names.iter().any(|&field_name| name == field_name) {
return Field;
}
}
if let Some(path_res) = self.resolve_possibly_assoc_item(expr.id,
maybe_qself.as_ref(), path, ValueNS) {
// Check if struct variant
- let is_struct_variant = if let Def::Variant(variant_id) = path_res.base_def {
- self.structs.contains_key(&variant_id)
- } else {
- false
+ let is_struct_variant = match path_res.base_def {
+ Def::VariantCtor(_, CtorKind::Fictive) => true,
+ _ => false,
};
if is_struct_variant {
- let _ = self.structs.contains_key(&path_res.base_def.def_id());
let path_name = path_names_to_string(path, 0);
let mut err = resolve_struct_error(self,
}
} else {
// Be helpful if the name refers to a struct
- // (The pattern matching def_tys where the id is in self.structs
- // matches on regular structs while excluding tuple- and enum-like
- // structs, which wouldn't result in this error.)
let path_name = path_names_to_string(path, 0);
let type_res = self.with_no_errors(|this| {
this.resolve_path(expr.id, path, 0, TypeNS)
let mut context = UnresolvedNameContext::Other;
let mut def = Def::Err;
if !msg.is_empty() {
- msg = format!(". Did you mean {}?", msg);
+ msg = format!("did you mean {}?", msg);
} else {
// we display a help message if this is a module
let name_path = path.segments.iter()
} else {
// danger, shouldn't be ident?
names.push(token::intern("<opaque>"));
- collect_mod(names, module);
+ collect_mod(names, module.parent.unwrap());
}
}
collect_mod(&mut names, module);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use Resolver;
-use rustc::middle::cstore::LoadedMacro;
-use rustc::util::nodemap::FnvHashMap;
-use std::cell::RefCell;
-use std::mem;
+use {Module, Resolver};
+use build_reduced_graph::BuildReducedGraphVisitor;
+use rustc::hir::def_id::{CRATE_DEF_INDEX, DefIndex};
+use rustc::hir::map::{self, DefCollector};
use std::rc::Rc;
-use syntax::ast::{self, Name};
+use syntax::ast;
use syntax::errors::DiagnosticBuilder;
use syntax::ext::base::{self, MultiModifier, MultiDecorator, MultiItemModifier};
-use syntax::ext::base::{NormalTT, Resolver as SyntaxResolver, SyntaxExtension};
+use syntax::ext::base::{NormalTT, SyntaxExtension};
use syntax::ext::expand::{Expansion, Invocation, InvocationKind};
-use syntax::ext::hygiene::Mark;
+use syntax::ext::hygiene::{Mark, SyntaxContext};
use syntax::ext::tt::macro_rules;
-use syntax::feature_gate::{self, emit_feature_err};
-use syntax::parse::token::{self, intern};
+use syntax::parse::token::intern;
use syntax::util::lev_distance::find_best_match_for_name;
-use syntax::visit::{self, Visitor};
-use syntax_pos::Span;
+use syntax_pos::{Span, DUMMY_SP};
+
+// FIXME(jseyfried) Merge with `::NameBinding`.
+pub struct NameBinding {
+ pub ext: Rc<SyntaxExtension>,
+ pub expansion: Mark,
+ pub shadowing: bool,
+ pub span: Span,
+}
-#[derive(Clone, Default)]
-pub struct ExpansionData {
- module: Rc<ModuleData>,
+#[derive(Clone)]
+pub struct ExpansionData<'a> {
+ backtrace: SyntaxContext,
+ pub module: Module<'a>,
+ def_index: DefIndex,
+ // True if this expansion is in a `const_integer` position, for example `[u32; m!()]`.
+ // c.f. `DefCollector::visit_ast_const_integer`.
+ const_integer: bool,
}
-// FIXME(jseyfried): merge with `::ModuleS`.
-#[derive(Default)]
-struct ModuleData {
- parent: Option<Rc<ModuleData>>,
- macros: RefCell<FnvHashMap<Name, Rc<SyntaxExtension>>>,
- macros_escape: bool,
+impl<'a> ExpansionData<'a> {
+ pub fn root(graph_root: Module<'a>) -> Self {
+ ExpansionData {
+ backtrace: SyntaxContext::empty(),
+ module: graph_root,
+ def_index: CRATE_DEF_INDEX,
+ const_integer: false,
+ }
+ }
}
impl<'a> base::Resolver for Resolver<'a> {
self.session.next_node_id()
}
- fn visit_expansion(&mut self, mark: Mark, expansion: &Expansion) {
- expansion.visit_with(&mut ExpansionVisitor {
- current_module: self.expansion_data[&mark.as_u32()].module.clone(),
- resolver: self,
+ fn get_module_scope(&mut self, id: ast::NodeId) -> Mark {
+ let mark = Mark::fresh();
+ let module = self.module_map[&id];
+ self.expansion_data.insert(mark, ExpansionData {
+ backtrace: SyntaxContext::empty(),
+ module: module,
+ def_index: module.def_id().unwrap().index,
+ const_integer: false,
});
+ mark
+ }
+
+ fn visit_expansion(&mut self, mark: Mark, expansion: &Expansion) {
+ self.collect_def_ids(mark, expansion);
+ self.current_module = self.expansion_data[&mark].module;
+ expansion.visit_with(&mut BuildReducedGraphVisitor { resolver: self, expansion: mark });
}
fn add_macro(&mut self, scope: Mark, mut def: ast::MacroDef) {
self.session.span_err(def.span, "user-defined macros may not be named `macro_rules`");
}
if def.use_locally {
- let ext = macro_rules::compile(&self.session.parse_sess, &def);
- self.add_ext(scope, def.ident, Rc::new(ext));
+ let ExpansionData { mut module, backtrace, .. } = self.expansion_data[&scope];
+ while module.macros_escape {
+ module = module.parent.unwrap();
+ }
+ let binding = NameBinding {
+ ext: Rc::new(macro_rules::compile(&self.session.parse_sess, &def)),
+ expansion: backtrace.data().prev_ctxt.data().outer_mark,
+ shadowing: self.resolve_macro_name(scope, def.ident.name, false).is_some(),
+ span: def.span,
+ };
+ module.macros.borrow_mut().insert(def.ident.name, binding);
+ self.macro_names.insert(def.ident.name);
}
if def.export {
def.id = self.next_node_id();
}
}
- fn add_ext(&mut self, scope: Mark, ident: ast::Ident, ext: Rc<SyntaxExtension>) {
+ fn add_ext(&mut self, ident: ast::Ident, ext: Rc<SyntaxExtension>) {
if let NormalTT(..) = *ext {
self.macro_names.insert(ident.name);
}
-
- let mut module = self.expansion_data[&scope.as_u32()].module.clone();
- while module.macros_escape {
- module = module.parent.clone().unwrap();
- }
- module.macros.borrow_mut().insert(ident.name, ext);
+ self.graph_root.macros.borrow_mut().insert(ident.name, NameBinding {
+ ext: ext,
+ expansion: Mark::root(),
+ shadowing: false,
+ span: DUMMY_SP,
+ });
}
fn add_expansions_at_stmt(&mut self, id: ast::NodeId, macros: Vec<Mark>) {
fn find_attr_invoc(&mut self, attrs: &mut Vec<ast::Attribute>) -> Option<ast::Attribute> {
for i in 0..attrs.len() {
let name = intern(&attrs[i].name());
- match self.expansion_data[&0].module.macros.borrow().get(&name) {
- Some(ext) => match **ext {
+ match self.expansion_data[&Mark::root()].module.macros.borrow().get(&name) {
+ Some(binding) => match *binding.ext {
MultiModifier(..) | MultiDecorator(..) | SyntaxExtension::AttrProcMacro(..) => {
return Some(attrs.remove(i))
}
InvocationKind::Attr { ref attr, .. } => (intern(&*attr.name()), attr.span),
};
- let mut module = self.expansion_data[&scope.as_u32()].module.clone();
- loop {
- if let Some(ext) = module.macros.borrow().get(&name) {
- return Some(ext.clone());
- }
- match module.parent.clone() {
- Some(parent) => module = parent,
- None => break,
- }
- }
-
- let mut err =
- self.session.struct_span_err(span, &format!("macro undefined: '{}!'", name));
- self.suggest_macro_name(&name.as_str(), &mut err);
- err.emit();
- None
+ self.resolve_macro_name(scope, name, true).or_else(|| {
+ let mut err =
+ self.session.struct_span_err(span, &format!("macro undefined: '{}!'", name));
+ self.suggest_macro_name(&name.as_str(), &mut err);
+ err.emit();
+ None
+ })
}
fn resolve_derive_mode(&mut self, ident: ast::Ident) -> Option<Rc<MultiItemModifier>> {
}
impl<'a> Resolver<'a> {
- fn suggest_macro_name(&mut self, name: &str, err: &mut DiagnosticBuilder<'a>) {
- if let Some(suggestion) = find_best_match_for_name(self.macro_names.iter(), name, None) {
- if suggestion != name {
- err.help(&format!("did you mean `{}!`?", suggestion));
- } else {
- err.help(&format!("have you added the `#[macro_use]` on the module/import?"));
- }
- }
- }
-
- fn insert_custom_derive(&mut self, name: &str, ext: Rc<MultiItemModifier>, sp: Span) {
- if !self.session.features.borrow().rustc_macro {
- let diagnostic = &self.session.parse_sess.span_diagnostic;
- let msg = "loading custom derive macro crates is experimentally supported";
- emit_feature_err(diagnostic, "rustc_macro", sp, feature_gate::GateIssue::Language, msg);
- }
- if self.derive_modes.insert(token::intern(name), ext).is_some() {
- self.session.span_err(sp, &format!("cannot shadow existing derive mode `{}`", name));
- }
- }
-}
-
-struct ExpansionVisitor<'b, 'a: 'b> {
- resolver: &'b mut Resolver<'a>,
- current_module: Rc<ModuleData>,
-}
-
-impl<'a, 'b> ExpansionVisitor<'a, 'b> {
- fn visit_invoc(&mut self, id: ast::NodeId) {
- self.resolver.expansion_data.insert(id.as_u32(), ExpansionData {
- module: self.current_module.clone(),
- });
- }
+ pub fn resolve_macro_name(&mut self, scope: Mark, name: ast::Name, record_used: bool)
+ -> Option<Rc<SyntaxExtension>> {
+ let ExpansionData { mut module, backtrace, .. } = self.expansion_data[&scope];
+ loop {
+ if let Some(binding) = module.macros.borrow().get(&name) {
+ let mut backtrace = backtrace.data();
+ while binding.expansion != backtrace.outer_mark {
+ if backtrace.outer_mark != Mark::root() {
+ backtrace = backtrace.prev_ctxt.data();
+ continue
+ }
- // does this attribute list contain "macro_use"?
- fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool {
- for attr in attrs {
- if attr.check_name("macro_escape") {
- let msg = "macro_escape is a deprecated synonym for macro_use";
- let mut err = self.resolver.session.struct_span_warn(attr.span, msg);
- if let ast::AttrStyle::Inner = attr.node.style {
- err.help("consider an outer attribute, #[macro_use] mod ...").emit();
- } else {
- err.emit();
+ if record_used && binding.shadowing &&
+ self.macro_shadowing_errors.insert(binding.span) {
+ let msg = format!("`{}` is already in scope", name);
+ self.session.struct_span_err(binding.span, &msg)
+ .note("macro-expanded `macro_rules!`s and `#[macro_use]`s \
+ may not shadow existing macros (see RFC 1560)")
+ .emit();
+ }
+ break
}
- } else if !attr.check_name("macro_use") {
- continue;
+ return Some(binding.ext.clone());
}
-
- if !attr.is_word() {
- self.resolver.session.span_err(attr.span,
- "arguments to macro_use are not allowed here");
+ match module.parent {
+ Some(parent) => module = parent,
+ None => break,
}
- return true;
}
-
- false
+ None
}
-}
-macro_rules! method {
- ($visit:ident: $ty:ty, $invoc:path, $walk:ident) => {
- fn $visit(&mut self, node: &$ty) {
- match node.node {
- $invoc(..) => self.visit_invoc(node.id),
- _ => visit::$walk(self, node),
+ fn suggest_macro_name(&mut self, name: &str, err: &mut DiagnosticBuilder<'a>) {
+ if let Some(suggestion) = find_best_match_for_name(self.macro_names.iter(), name, None) {
+ if suggestion != name {
+ err.help(&format!("did you mean `{}!`?", suggestion));
+ } else {
+ err.help(&format!("have you added the `#[macro_use]` on the module/import?"));
}
}
}
-}
-impl<'a, 'b> Visitor for ExpansionVisitor<'a, 'b> {
- method!(visit_trait_item: ast::TraitItem, ast::TraitItemKind::Macro, walk_trait_item);
- method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item);
- method!(visit_stmt: ast::Stmt, ast::StmtKind::Mac, walk_stmt);
- method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr);
- method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat);
- method!(visit_ty: ast::Ty, ast::TyKind::Mac, walk_ty);
+ fn collect_def_ids(&mut self, mark: Mark, expansion: &Expansion) {
+ let expansion_data = &mut self.expansion_data;
+ let ExpansionData { backtrace, def_index, const_integer, module } = expansion_data[&mark];
+ let visit_macro_invoc = &mut |invoc: map::MacroInvocationData| {
+ expansion_data.entry(invoc.mark).or_insert(ExpansionData {
+ backtrace: backtrace.apply_mark(invoc.mark),
+ def_index: invoc.def_index,
+ const_integer: invoc.const_integer,
+ module: module,
+ });
+ };
- fn visit_item(&mut self, item: &ast::Item) {
- match item.node {
- ast::ItemKind::Mac(..) if item.id == ast::DUMMY_NODE_ID => {} // Scope placeholder
- ast::ItemKind::Mac(..) => self.visit_invoc(item.id),
- ast::ItemKind::Mod(..) => {
- let module_data = ModuleData {
- parent: Some(self.current_module.clone()),
- macros: RefCell::new(FnvHashMap()),
- macros_escape: self.contains_macro_use(&item.attrs),
- };
- let orig_module = mem::replace(&mut self.current_module, Rc::new(module_data));
- visit::walk_item(self, item);
- self.current_module = orig_module;
- }
- ast::ItemKind::ExternCrate(..) => {
- // We need to error on `#[macro_use] extern crate` when it isn't at the
- // crate root, because `$crate` won't work properly.
- // FIXME(jseyfried): This will be nicer once `ModuleData` is merged with `ModuleS`.
- let is_crate_root = self.current_module.parent.as_ref().unwrap().parent.is_none();
- for def in self.resolver.crate_loader.load_macros(item, is_crate_root) {
- match def {
- LoadedMacro::Def(def) => self.resolver.add_macro(Mark::root(), def),
- LoadedMacro::CustomDerive(name, ext) => {
- self.resolver.insert_custom_derive(&name, ext, item.span);
- }
- }
+ let mut def_collector = DefCollector::new(&mut self.definitions);
+ def_collector.visit_macro_invoc = Some(visit_macro_invoc);
+ def_collector.with_parent(def_index, |def_collector| {
+ if const_integer {
+ if let Expansion::Expr(ref expr) = *expansion {
+ def_collector.visit_ast_const_integer(expr);
}
- visit::walk_item(self, item);
}
- _ => visit::walk_item(self, item),
- }
- }
-
- fn visit_block(&mut self, block: &ast::Block) {
- let module_data = ModuleData {
- parent: Some(self.current_module.clone()),
- macros: RefCell::new(FnvHashMap()),
- macros_escape: false,
- };
- let orig_module = mem::replace(&mut self.current_module, Rc::new(module_data));
- visit::walk_block(self, block);
- self.current_module = orig_module;
+ expansion.visit_with(def_collector)
+ });
}
}
};
match (value_result, type_result) {
- // With `#![feature(item_like_imports)]`, all namespaces
- // must be re-exported with extra visibility for an error to occur.
- (Ok(value_binding), Ok(type_binding)) if self.new_import_semantics => {
+ // All namespaces must be re-exported with extra visibility for an error to occur.
+ (Ok(value_binding), Ok(type_binding)) => {
let vis = directive.vis.get();
if !value_binding.pseudo_vis().is_at_least(vis, self) &&
!type_binding.pseudo_vis().is_at_least(vis, self) {
(binding.is_import() || binding.is_extern_crate()) {
let def = binding.def();
if def != Def::Err {
- reexports.push(Export { name: name, def_id: def.def_id() });
+ reexports.push(Export { name: name, def: def });
}
}
use rustc::ty::{self, TyCtxt, ImplOrTraitItem, ImplOrTraitItemContainer};
use std::collections::HashSet;
+use std::collections::hash_map::DefaultHasher;
use std::hash::*;
use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID};
}
}
- // looks up anything, not just a type
- fn lookup_type_ref(&self, ref_id: NodeId) -> Option<DefId> {
+ fn lookup_def_id(&self, ref_id: NodeId) -> Option<DefId> {
self.tcx.expect_def_or_none(ref_id).and_then(|def| {
match def {
- Def::PrimTy(..) => None,
- Def::SelfTy(..) => None,
+ Def::PrimTy(..) | Def::SelfTy(..) => None,
def => Some(def.def_id()),
}
})
}.lower(self.tcx));
}
Def::Struct(..) |
+ Def::Variant(..) |
Def::Union(..) |
Def::Enum(..) |
Def::TyAlias(..) |
- Def::AssociatedTy(..) |
Def::Trait(_) => {
self.dumper.type_ref(TypeRefData {
span: sub_span.expect("No span found for type ref"),
}.lower(self.tcx));
}
Def::Static(..) |
- Def::Const(_) |
- Def::AssociatedConst(..) |
- Def::Local(..) |
- Def::Variant(..) |
- Def::Upvar(..) => {
+ Def::Const(..) |
+ Def::StructCtor(..) |
+ Def::VariantCtor(..) => {
self.dumper.variable_ref(VariableRefData {
span: sub_span.expect("No span found for var ref"),
ref_id: def_id,
scope: scope
}.lower(self.tcx));
}
+ Def::Local(..) |
+ Def::Upvar(..) |
Def::SelfTy(..) |
Def::Label(_) |
Def::TyParam(..) |
Def::Method(..) |
+ Def::AssociatedTy(..) |
+ Def::AssociatedConst(..) |
Def::PrimTy(_) |
Def::Err => {
span_bug!(span,
None => {
if let Some(NodeItem(item)) = self.tcx.map.get_if_local(id) {
if let hir::ItemImpl(_, _, _, _, ref ty, _) = item.node {
- trait_id = self.lookup_type_ref(ty.id);
+ trait_id = self.lookup_def_id(ty.id);
}
}
}
};
let trait_ref = &trait_ref.trait_ref;
- if let Some(id) = self.lookup_type_ref(trait_ref.ref_id) {
+ if let Some(id) = self.lookup_def_id(trait_ref.ref_id) {
let sub_span = self.span.sub_span_for_type_name(trait_ref.path.span);
if !self.span.filter_generated(sub_span, trait_ref.path.span) {
self.dumper.type_ref(TypeRefData {
}
}
}
- Def::Local(..) |
- Def::Static(..) |
+ Def::Fn(..) |
Def::Const(..) |
+ Def::Static(..) |
+ Def::StructCtor(..) |
+ Def::VariantCtor(..) |
Def::AssociatedConst(..) |
+ Def::Local(..) |
+ Def::Upvar(..) |
Def::Struct(..) |
+ Def::Union(..) |
Def::Variant(..) |
- Def::Fn(..) => self.write_sub_paths_truncated(path, false),
+ Def::TyAlias(..) |
+ Def::AssociatedTy(..) => self.write_sub_paths_truncated(path, false),
_ => {}
}
}
None => return,
Some(data) => data,
};
- let mut hasher = SipHasher::new();
+ let mut hasher = DefaultHasher::new();
data.callee_span.hash(&mut hasher);
let hash = hasher.finish();
let qualname = format!("{}::{}", data.name, hash);
match use_item.node {
ast::ViewPathSimple(ident, ref path) => {
let sub_span = self.span.span_for_last_ident(path.span);
- let mod_id = match self.lookup_type_ref(item.id) {
+ let mod_id = match self.lookup_def_id(item.id) {
Some(def_id) => {
let scope = self.cur_scope;
self.process_def_kind(item.id, path.span, sub_span, def_id, scope);
for plid in list {
let scope = self.cur_scope;
let id = plid.node.id;
- if let Some(def_id) = self.lookup_type_ref(id) {
+ if let Some(def_id) = self.lookup_def_id(id) {
let span = plid.span;
self.process_def_kind(id, span, Some(span), def_id, scope);
}
self.process_macro_use(t.span, t.id);
match t.node {
ast::TyKind::Path(_, ref path) => {
- if let Some(id) = self.lookup_type_ref(t.id) {
+ if let Some(id) = self.lookup_def_id(t.id) {
let sub_span = self.span.sub_span_for_type_name(t.span);
if !self.span.filter_generated(sub_span, t.span) {
self.dumper.type_ref(TypeRefData {
}.lower(self.tcx));
}
}
- Def::Variant(..) | Def::Enum(..) |
- Def::TyAlias(..) | Def::Struct(..) => {
+ Def::StructCtor(..) | Def::VariantCtor(..) |
+ Def::Const(..) | Def::AssociatedConst(..) |
+ Def::Struct(..) | Def::Variant(..) |
+ Def::TyAlias(..) | Def::AssociatedTy(..) => {
paths_to_process.push((id, p.clone(), Some(ref_kind)))
}
- // FIXME(nrc) what are these doing here?
- Def::Static(..) |
- Def::Const(..) |
- Def::AssociatedConst(..) => {}
def => error!("unexpected definition kind when processing collected paths: {:?}",
def),
}
Def::Static(..) |
Def::Const(..) |
Def::AssociatedConst(..) |
- Def::Variant(..) => {
+ Def::StructCtor(..) |
+ Def::VariantCtor(..) => {
Some(Data::VariableRefData(VariableRefData {
name: self.span_utils.snippet(sub_span.unwrap()),
span: sub_span.unwrap(),
}))
}
Def::Struct(def_id) |
+ Def::Variant(def_id, ..) |
Def::Union(def_id) |
Def::Enum(def_id) |
Def::TyAlias(def_id) |
+ Def::AssociatedTy(def_id) |
Def::Trait(def_id) |
Def::TyParam(def_id) => {
Some(Data::TypeRefData(TypeRefData {
qualname: String::new() // FIXME: generate the real qualname
}))
}
- _ => None,
+ Def::PrimTy(..) |
+ Def::SelfTy(..) |
+ Def::Label(..) |
+ Def::Err => None,
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use llvm::{self, ValueRef};
+use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector};
use base;
use build::AllocaFcx;
use common::{type_is_fat_ptr, BlockAndBuilder, C_uint};
"powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
"s390x" => cabi_s390x::compute_abi_info(ccx, self),
"asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
+ "wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
}
}
}
}
+
+pub fn align_up_to(off: usize, a: usize) -> usize {
+ return (off + a - 1) / a * a;
+}
+
+fn align(off: usize, ty: Type, pointer: usize) -> usize {
+ let a = ty_align(ty, pointer);
+ return align_up_to(off, a);
+}
+
+pub fn ty_align(ty: Type, pointer: usize) -> usize {
+ match ty.kind() {
+ Integer => ((ty.int_width() as usize) + 7) / 8,
+ Pointer => pointer,
+ Float => 4,
+ Double => 8,
+ Struct => {
+ if ty.is_packed() {
+ 1
+ } else {
+ let str_tys = ty.field_types();
+ str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t, pointer)))
+ }
+ }
+ Array => {
+ let elt = ty.element_type();
+ ty_align(elt, pointer)
+ }
+ Vector => {
+ let len = ty.vector_length();
+ let elt = ty.element_type();
+ ty_align(elt, pointer) * len
+ }
+ _ => bug!("ty_align: unhandled type")
+ }
+}
+
+pub fn ty_size(ty: Type, pointer: usize) -> usize {
+ match ty.kind() {
+ Integer => ((ty.int_width() as usize) + 7) / 8,
+ Pointer => pointer,
+ Float => 4,
+ Double => 8,
+ Struct => {
+ if ty.is_packed() {
+ let str_tys = ty.field_types();
+ str_tys.iter().fold(0, |s, t| s + ty_size(*t, pointer))
+ } else {
+ let str_tys = ty.field_types();
+ let size = str_tys.iter().fold(0, |s, t| {
+ align(s, *t, pointer) + ty_size(*t, pointer)
+ });
+ align(size, ty, pointer)
+ }
+ }
+ Array => {
+ let len = ty.array_length();
+ let elt = ty.element_type();
+ let eltsz = ty_size(elt, pointer);
+ len * eltsz
+ }
+ Vector => {
+ let len = ty.vector_length();
+ let elt = ty.element_type();
+ let eltsz = ty_size(elt, pointer);
+ len * eltsz
+ },
+ _ => bug!("ty_size: unhandled type")
+ }
+}
let meta = val.meta;
- let offset = st.offset_of_field(ix).bytes();
+ let offset = st.offsets[ix].bytes();
let unaligned_offset = C_uint(bcx.ccx(), offset);
// Get the alignment of the field
let lldiscr = C_integral(Type::from_integer(ccx, d), discr.0 as u64, true);
let mut vals_with_discr = vec![lldiscr];
vals_with_discr.extend_from_slice(vals);
- let mut contents = build_const_struct(ccx, &variant.offset_after_field[..],
- &vals_with_discr[..], variant.packed);
- let needed_padding = l.size(dl).bytes() - variant.min_size().bytes();
+ let mut contents = build_const_struct(ccx, &variant,
+ &vals_with_discr[..]);
+ let needed_padding = l.size(dl).bytes() - variant.min_size.bytes();
if needed_padding > 0 {
contents.push(padding(ccx, needed_padding));
}
layout::Univariant { ref variant, .. } => {
assert_eq!(discr, Disr(0));
let contents = build_const_struct(ccx,
- &variant.offset_after_field[..], vals, variant.packed);
+ &variant, vals);
C_struct(ccx, &contents[..], variant.packed)
}
layout::Vector { .. } => {
}
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
if discr.0 == nndiscr {
- C_struct(ccx, &build_const_struct(ccx,
- &nonnull.offset_after_field[..],
- vals, nonnull.packed),
+ C_struct(ccx, &build_const_struct(ccx, &nonnull, vals),
false)
} else {
let fields = compute_fields(ccx, t, nndiscr as usize, false);
// field; see #8506.
C_null(type_of::sizing_type_of(ccx, ty))
}).collect::<Vec<ValueRef>>();
- C_struct(ccx, &build_const_struct(ccx,
- &nonnull.offset_after_field[..],
- &vals[..],
- false),
+ C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]),
false)
}
}
/// a two-element struct will locate it at offset 4, and accesses to it
/// will read the wrong memory.
fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- offset_after_field: &[layout::Size],
- vals: &[ValueRef],
- packed: bool)
+ st: &layout::Struct,
+ vals: &[ValueRef])
-> Vec<ValueRef> {
- assert_eq!(vals.len(), offset_after_field.len());
+ assert_eq!(vals.len(), st.offsets.len());
if vals.len() == 0 {
return Vec::new();
// offset of current value
let mut offset = 0;
let mut cfields = Vec::new();
- let target_offsets = offset_after_field.iter().map(|i| i.bytes());
- for (&val, target_offset) in vals.iter().zip(target_offsets) {
- assert!(!is_undef(val));
- cfields.push(val);
- offset += machine::llsize_of_alloc(ccx, val_ty(val));
- if !packed {
- let val_align = machine::llalign_of_min(ccx, val_ty(val));
- offset = roundup(offset, val_align);
- }
- if offset != target_offset {
+ let offsets = st.offsets.iter().map(|i| i.bytes());
+ for (&val, target_offset) in vals.iter().zip(offsets) {
+ if offset < target_offset {
cfields.push(padding(ccx, target_offset - offset));
offset = target_offset;
}
+ assert!(!is_undef(val));
+ cfields.push(val);
+ offset += machine::llsize_of_alloc(ccx, val_ty(val));
}
- let size = offset_after_field.last().unwrap();
- if offset < size.bytes() {
- cfields.push(padding(ccx, size.bytes() - offset));
+ if offset < st.min_size.bytes() {
+ cfields.push(padding(ccx, st.min_size.bytes() - offset));
}
cfields
let empty_vec = Vec::new();
let empty_str = String::new();
let args = sess.opts.cg.link_args.as_ref().unwrap_or(&empty_vec);
- let mut args = args.iter().chain(used_link_args.iter());
+ let more_args = &sess.opts.cg.link_arg;
+ let mut args = args.iter().chain(more_args.iter()).chain(used_link_args.iter());
let relocation_model = sess.opts.cg.relocation_model.as_ref()
.unwrap_or(&empty_str);
if (t.options.relocation_model == "pic" || *relocation_model == "pic")
if let Some(ref args) = sess.opts.cg.link_args {
cmd.args(args);
}
+ cmd.args(&sess.opts.cg.link_arg);
cmd.args(&used_link_args);
}
let mut hash_state = scx.symbol_hasher().borrow_mut();
record_time(&tcx.sess.perf_stats.symbol_hash_time, || {
hash_state.reset();
- let mut hasher = Sha256Hasher(&mut hash_state);
+ let hasher = Sha256Hasher(&mut hash_state);
+ let mut hasher = ty::util::TypeIdHasher::new(tcx, hasher);
// the main symbol name is not necessarily unique; hash in the
// compiler's internal def-path, guaranteeing each symbol has a
// truly unique path
- def_path.deterministic_hash_to(tcx, &mut hasher);
+ hasher.def_path(def_path);
// Include the main item-type. Note that, in this case, the
// assertions about `needs_subst` may not hold, but this item-type
// ought to be the same for every reference anyway.
- let mut hasher = ty::util::TypeIdHasher::new(tcx, hasher);
assert!(!item_type.has_erasable_regions());
hasher.visit_ty(item_type);
StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
}
+pub fn get_meta_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
+ b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
+}
+
+pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
+ b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
+}
+
fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
match bcx.tcx().lang_items.require(it) {
Ok(id) => id,
}
}
-pub fn compare_fat_ptrs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- lhs_addr: ValueRef,
- lhs_extra: ValueRef,
- rhs_addr: ValueRef,
- rhs_extra: ValueRef,
- _t: Ty<'tcx>,
- op: hir::BinOp_,
- debug_loc: DebugLoc)
- -> ValueRef {
- match op {
- hir::BiEq => {
- let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
- let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc);
- And(bcx, addr_eq, extra_eq, debug_loc)
- }
- hir::BiNe => {
- let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc);
- let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc);
- Or(bcx, addr_eq, extra_eq, debug_loc)
- }
- hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => {
- // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
- let (op, strict_op) = match op {
- hir::BiLt => (llvm::IntULT, llvm::IntULT),
- hir::BiLe => (llvm::IntULE, llvm::IntULT),
- hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
- hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
- _ => bug!(),
- };
-
- let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
- let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc);
- let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc);
-
- let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc);
- Or(bcx, addr_strict, addr_eq_extra_op, debug_loc)
- }
- _ => {
- bug!("unexpected fat ptr binop");
- }
- }
-}
-
-pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- lhs: ValueRef,
- rhs: ValueRef,
- t: Ty<'tcx>,
- op: hir::BinOp_,
- debug_loc: DebugLoc)
- -> ValueRef {
- match t.sty {
- ty::TyTuple(ref tys) if tys.is_empty() => {
- // We don't need to do actual comparisons for nil.
- // () == () holds but () < () does not.
- match op {
- hir::BiEq | hir::BiLe | hir::BiGe => return C_bool(bcx.ccx(), true),
- hir::BiNe | hir::BiLt | hir::BiGt => return C_bool(bcx.ccx(), false),
- // refinements would be nice
- _ => bug!("compare_scalar_types: must be a comparison operator"),
- }
- }
- ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyBool | ty::TyUint(_) | ty::TyChar => {
- ICmp(bcx,
- bin_op_to_icmp_predicate(op, false),
- lhs,
- rhs,
- debug_loc)
- }
- ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
- ICmp(bcx,
- bin_op_to_icmp_predicate(op, false),
- lhs,
- rhs,
- debug_loc)
- }
- ty::TyRawPtr(_) => {
- let lhs_addr = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_ADDR]));
- let lhs_extra = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_EXTRA]));
-
- let rhs_addr = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_ADDR]));
- let rhs_extra = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_EXTRA]));
- compare_fat_ptrs(bcx,
- lhs_addr,
- lhs_extra,
- rhs_addr,
- rhs_extra,
- t,
- op,
- debug_loc)
- }
- ty::TyInt(_) => {
- ICmp(bcx,
- bin_op_to_icmp_predicate(op, true),
- lhs,
- rhs,
- debug_loc)
- }
- ty::TyFloat(_) => {
- FCmp(bcx,
- bin_op_to_fcmp_predicate(op),
- lhs,
- rhs,
- debug_loc)
- }
- // Should never get here, because t is scalar.
- _ => bug!("non-scalar type passed to compare_scalar_types"),
- }
-}
-
pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs: ValueRef,
rhs: ValueRef,
}
}
+pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
+ let assume_intrinsic = b.ccx.get_intrinsic("llvm.assume");
+ b.call(assume_intrinsic, &[val], None);
+}
+
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
if common::type_is_fat_ptr(cx.tcx(), t) {
- Store(cx,
- ExtractValue(cx, v, abi::FAT_PTR_ADDR),
- get_dataptr(cx, dst));
- Store(cx,
- ExtractValue(cx, v, abi::FAT_PTR_EXTRA),
- get_meta(cx, dst));
+ let lladdr = ExtractValue(cx, v, abi::FAT_PTR_ADDR);
+ let llextra = ExtractValue(cx, v, abi::FAT_PTR_EXTRA);
+ store_fat_ptr(cx, lladdr, llextra, dst, t);
} else {
Store(cx, from_immediate(cx, v), dst);
}
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
src: ValueRef,
- _ty: Ty<'tcx>)
- -> (ValueRef, ValueRef) {
- // FIXME: emit metadata
- (Load(cx, get_dataptr(cx, src)),
- Load(cx, get_meta(cx, src)))
+ ty: Ty<'tcx>)
+ -> (ValueRef, ValueRef)
+{
+ if cx.unreachable.get() {
+ // FIXME: remove me
+ return (Load(cx, get_dataptr(cx, src)),
+ Load(cx, get_meta(cx, src)));
+ }
+
+ load_fat_ptr_builder(&B(cx), src, ty)
+}
+
+pub fn load_fat_ptr_builder<'a, 'tcx>(
+ b: &Builder<'a, 'tcx>,
+ src: ValueRef,
+ t: Ty<'tcx>)
+ -> (ValueRef, ValueRef)
+{
+
+ let ptr = get_dataptr_builder(b, src);
+ let ptr = if t.is_region_ptr() || t.is_unique() {
+ b.load_nonnull(ptr)
+ } else {
+ b.load(ptr)
+ };
+
+ // FIXME: emit metadata on `meta`.
+ let meta = b.load(get_meta_builder(b, src));
+
+ (ptr, meta)
}
pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
#![allow(non_upper_case_globals)]
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
-use abi::{FnType, ArgType};
+use abi::{self, FnType, ArgType};
use context::CrateContext;
use type_::Type;
-use std::cmp;
-
-fn align_up_to(off: usize, a: usize) -> usize {
- return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
- let a = ty_align(ty);
- return align_up_to(off, a);
-}
-
-fn ty_align(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- 1
- } else {
- let str_tys = ty.field_types();
- str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
- }
- }
- Array => {
- let elt = ty.element_type();
- ty_align(elt)
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- ty_align(elt) * len
- }
- _ => bug!("ty_align: unhandled type")
- }
-}
-
fn ty_size(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- let str_tys = ty.field_types();
- str_tys.iter().fold(0, |s, t| s + ty_size(*t))
- } else {
- let str_tys = ty.field_types();
- let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
- align(size, ty)
- }
- }
- Array => {
- let len = ty.array_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- _ => bug!("ty_size: unhandled type")
- }
+ abi::ty_size(ty, 8)
}
fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
#![allow(non_upper_case_globals)]
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
-use abi::{FnType, ArgType};
+use abi::{self, align_up_to, FnType, ArgType};
use context::CrateContext;
use type_::Type;
type TyAlignFn = fn(ty: Type) -> usize;
-fn align_up_to(off: usize, a: usize) -> usize {
- return (off + a - 1) / a * a;
-}
-
fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize {
let a = align_fn(ty);
return align_up_to(off, a);
}
fn general_ty_align(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 4,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- 1
- } else {
- let str_tys = ty.field_types();
- str_tys.iter().fold(1, |a, t| cmp::max(a, general_ty_align(*t)))
- }
- }
- Array => {
- let elt = ty.element_type();
- general_ty_align(elt)
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- general_ty_align(elt) * len
- }
- _ => bug!("ty_align: unhandled type")
- }
+ abi::ty_align(ty, 4)
}
// For more information see:
use libc::c_uint;
use std::cmp;
use llvm;
-use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
-use abi::{ArgType, FnType};
+use llvm::{Integer, Pointer, Float, Double, Vector};
+use abi::{self, align_up_to, ArgType, FnType};
use context::CrateContext;
use type_::Type;
-fn align_up_to(off: usize, a: usize) -> usize {
- return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
- let a = ty_align(ty);
- return align_up_to(off, a);
-}
-
fn ty_align(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 4,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- 1
- } else {
- let str_tys = ty.field_types();
- str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
- }
- }
- Array => {
- let elt = ty.element_type();
- ty_align(elt)
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- ty_align(elt) * len
- }
- _ => bug!("ty_align: unhandled type")
- }
+ abi::ty_align(ty, 4)
}
fn ty_size(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 4,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- let str_tys = ty.field_types();
- str_tys.iter().fold(0, |s, t| s + ty_size(*t))
- } else {
- let str_tys = ty.field_types();
- let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
- align(size, ty)
- }
- }
- Array => {
- let len = ty.array_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- _ => bug!("ty_size: unhandled type")
- }
+ abi::ty_size(ty, 4)
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
use libc::c_uint;
use std::cmp;
use llvm;
-use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
-use abi::{ArgType, FnType};
+use llvm::{Integer, Pointer, Float, Double, Vector};
+use abi::{self, align_up_to, ArgType, FnType};
use context::CrateContext;
use type_::Type;
-fn align_up_to(off: usize, a: usize) -> usize {
- return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
- let a = ty_align(ty);
- return align_up_to(off, a);
-}
-
fn ty_align(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- 1
- } else {
- let str_tys = ty.field_types();
- str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
- }
- }
- Array => {
- let elt = ty.element_type();
- ty_align(elt)
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- ty_align(elt) * len
- }
- _ => bug!("ty_align: unhandled type")
- }
+ abi::ty_align(ty, 8)
}
fn ty_size(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- let str_tys = ty.field_types();
- str_tys.iter().fold(0, |s, t| s + ty_size(*t))
- } else {
- let str_tys = ty.field_types();
- let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
- align(size, ty)
- }
- }
- Array => {
- let len = ty.array_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- _ => bug!("ty_size: unhandled type")
- }
+ abi::ty_size(ty, 8)
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
use libc::c_uint;
use llvm;
-use llvm::{Integer, Pointer, Float, Double, Struct, Array};
-use abi::{FnType, ArgType};
+use llvm::{Integer, Pointer, Float, Double, Vector};
+use abi::{self, align_up_to, FnType, ArgType};
use context::CrateContext;
use type_::Type;
use std::cmp;
-fn align_up_to(off: usize, a: usize) -> usize {
- return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
- let a = ty_align(ty);
- return align_up_to(off, a);
-}
-
fn ty_align(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 4,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- 1
- } else {
- let str_tys = ty.field_types();
- str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
- }
- }
- Array => {
- let elt = ty.element_type();
- ty_align(elt)
- }
- _ => bug!("ty_size: unhandled type")
+ if ty.kind() == Vector {
+ bug!("ty_size: unhandled type")
+ } else {
+ abi::ty_align(ty, 4)
}
}
fn ty_size(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 4,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- let str_tys = ty.field_types();
- str_tys.iter().fold(0, |s, t| s + ty_size(*t))
- } else {
- let str_tys = ty.field_types();
- let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
- align(size, ty)
- }
- }
- Array => {
- let len = ty.array_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- _ => bug!("ty_size: unhandled type")
+ if ty.kind() == Vector {
+ bug!("ty_size: unhandled type")
+ } else {
+ abi::ty_size(ty, 4)
}
}
// Alignment of 128 bit types is not currently handled, this will
// need to be fixed when PowerPC vector support is added.
-use llvm::{Integer, Pointer, Float, Double, Struct, Array};
-use abi::{FnType, ArgType};
+use llvm::{Integer, Pointer, Float, Double, Struct, Vector, Array};
+use abi::{self, FnType, ArgType};
use context::CrateContext;
use type_::Type;
-use std::cmp;
-
-fn align_up_to(off: usize, a: usize) -> usize {
- return (off + a - 1) / a * a;
-}
-
-fn align(off: usize, ty: Type) -> usize {
- let a = ty_align(ty);
- return align_up_to(off, a);
-}
-
-fn ty_align(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- 1
- } else {
- let str_tys = ty.field_types();
- str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
- }
- }
- Array => {
- let elt = ty.element_type();
- ty_align(elt)
- }
- _ => bug!("ty_align: unhandled type")
- }
-}
-
fn ty_size(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- let str_tys = ty.field_types();
- str_tys.iter().fold(0, |s, t| s + ty_size(*t))
- } else {
- let str_tys = ty.field_types();
- let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
- align(size, ty)
- }
- }
- Array => {
- let len = ty.array_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- _ => bug!("ty_size: unhandled type")
+ if ty.kind() == Vector {
+ bug!("ty_size: unhandled type")
+ } else {
+ abi::ty_size(ty, 8)
}
}
// for a pre-z13 machine or using -mno-vx.
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
-use abi::{FnType, ArgType};
+use abi::{align_up_to, FnType, ArgType};
use context::CrateContext;
use type_::Type;
use std::cmp;
-fn align_up_to(off: usize, a: usize) -> usize {
- return (off + a - 1) / a * a;
-}
-
fn align(off: usize, ty: Type) -> usize {
let a = ty_align(ty);
return align_up_to(off, a);
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute, Vector};
-use abi::{ArgType, FnType};
+use abi::{self, ArgType, FnType};
use context::CrateContext;
use type_::Type;
-use std::cmp;
-
#[derive(Clone, Copy, PartialEq)]
enum RegClass {
NoClass,
}
fn ty_align(ty: Type) -> usize {
- match ty.kind() {
- Integer => ((ty.int_width() as usize) + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- if ty.is_packed() {
- 1
- } else {
- let str_tys = ty.field_types();
- str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
- }
- }
- Array => {
- let elt = ty.element_type();
- ty_align(elt)
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- ty_align(elt) * len
- }
- _ => bug!("ty_align: unhandled type")
- }
+ abi::ty_align(ty, 8)
}
fn ty_size(ty: Type) -> usize {
- match ty.kind() {
- Integer => (ty.int_width() as usize + 7) / 8,
- Pointer => 8,
- Float => 4,
- Double => 8,
- Struct => {
- let str_tys = ty.field_types();
- if ty.is_packed() {
- str_tys.iter().fold(0, |s, t| s + ty_size(*t))
- } else {
- let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
- align(size, ty)
- }
- }
- Array => {
- let len = ty.array_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
- Vector => {
- let len = ty.vector_length();
- let elt = ty.element_type();
- let eltsz = ty_size(elt);
- len * eltsz
- }
-
- _ => bug!("ty_size: unhandled type")
- }
+ abi::ty_size(ty, 8)
}
fn all_mem(cls: &mut [RegClass]) {
llreffn: ValueRef)
-> ValueRef
{
+ if let Some(&llfn) = ccx.instances().borrow().get(&method_instance) {
+ return llfn;
+ }
+
debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={:?})",
closure_def_id, substs, Value(llreffn));
// Create the by-value helper.
let function_name = method_instance.symbol_name(ccx.shared());
- let lloncefn = declare::declare_fn(ccx, &function_name, llonce_fn_ty);
+ let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty);
attributes::set_frame_pointer_elimination(ccx, lloncefn);
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
fcx.finish(bcx, DebugLoc::None);
+ ccx.instances().borrow_mut().insert(method_instance, lloncefn);
+
lloncefn
}
let concrete_substs = monomorphize::apply_param_substs(scx,
param_substs,
&fn_substs);
- assert!(concrete_substs.is_normalized_for_trans());
+ assert!(concrete_substs.is_normalized_for_trans(),
+ "concrete_substs not normalized for trans: {:?}",
+ concrete_substs);
TransItem::Fn(Instance::new(def_id, concrete_substs))
}
Layout::FatPointer { .. } => true,
Layout::Univariant { ref variant, .. } => {
// There must be only 2 fields.
- if variant.offset_after_field.len() != 2 {
+ if variant.offsets.len() != 2 {
return false;
}
// Find all the scopes with variables defined in them.
let mut has_variables = BitVector::new(mir.visibility_scopes.len());
- for var in &mir.var_decls {
- has_variables.insert(var.source_info.scope.index());
+ for var in mir.vars_iter() {
+ let decl = &mir.local_decls[var];
+ has_variables.insert(decl.source_info.unwrap().scope.index());
}
// Instantiate all scopes.
use llvm::{self, ValueRef};
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType, DILexicalBlock};
+use rustc::hir::def::CtorKind;
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::hir;
impl<'tcx> StructMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> {
- if self.variant.kind == ty::VariantKind::Unit {
- return Vec::new();
- }
-
let field_size = if self.is_simd {
let fty = monomorphize::field_ty(cx.tcx(),
self.substs,
};
self.variant.fields.iter().enumerate().map(|(i, f)| {
- let name = if self.variant.kind == ty::VariantKind::Tuple {
+ let name = if self.variant.ctor_kind == CtorKind::Fn {
format!("__{}", i)
} else {
f.name.to_string()
// For the metadata of the wrapper struct, we need to create a
// MemberDescription of the struct's single field.
let sole_struct_member_description = MemberDescription {
- name: match non_null_variant.kind {
- ty::VariantKind::Tuple => "__0".to_string(),
- ty::VariantKind::Struct => {
+ name: match non_null_variant.ctor_kind {
+ CtorKind::Fn => "__0".to_string(),
+ CtorKind::Fictive => {
non_null_variant.fields[0].name.to_string()
}
- ty::VariantKind::Unit => bug!()
+ CtorKind::Const => bug!()
},
llvm_type: non_null_llvm_type,
type_metadata: non_null_type_metadata,
containing_scope);
// Get the argument names from the enum variant info
- let mut arg_names: Vec<_> = match variant.kind {
- ty::VariantKind::Unit => vec![],
- ty::VariantKind::Tuple => {
+ let mut arg_names: Vec<_> = match variant.ctor_kind {
+ CtorKind::Const => vec![],
+ CtorKind::Fn => {
variant.fields
.iter()
.enumerate()
.map(|(i, _)| format!("__{}", i))
.collect()
}
- ty::VariantKind::Struct => {
+ CtorKind::Fictive => {
variant.fields
.iter()
.map(|f| f.name.to_string())
let layout = ccx.layout_of(t);
debug!("DST {} layout: {:?}", t, layout);
- // Returns size in bytes of all fields except the last one
- // (we will be recursing on the last one).
- fn local_prefix_bytes(variant: &ty::layout::Struct) -> u64 {
- let fields = variant.offset_after_field.len();
- if fields > 1 {
- variant.offset_after_field[fields - 2].bytes()
- } else {
- 0
- }
- }
-
let (sized_size, sized_align) = match *layout {
ty::layout::Layout::Univariant { ref variant, .. } => {
- (local_prefix_bytes(variant), variant.align.abi())
+ (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi())
}
_ => {
bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
use rustc::mir::traversal;
use common::{self, Block, BlockAndBuilder};
use glue;
-use std::iter;
use super::rvalue;
pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>,
analyzer.visit_mir(mir);
- let local_types = mir.arg_decls.iter().map(|a| a.ty)
- .chain(mir.var_decls.iter().map(|v| v.ty))
- .chain(mir.temp_decls.iter().map(|t| t.ty))
- .chain(iter::once(mir.return_ty));
- for (index, ty) in local_types.enumerate() {
+ for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() {
let ty = bcx.monomorphize(&ty);
debug!("local {} has type {:?}", index, ty);
if ty.is_scalar() ||
fn new(mir: &'mir mir::Mir<'tcx>,
bcx: &'mir BlockAndBuilder<'bcx, 'tcx>)
-> LocalAnalyzer<'mir, 'bcx, 'tcx> {
- let local_count = mir.count_locals();
LocalAnalyzer {
mir: mir,
bcx: bcx,
- lvalue_locals: BitVector::new(local_count),
- seen_assigned: BitVector::new(local_count)
+ lvalue_locals: BitVector::new(mir.local_decls.len()),
+ seen_assigned: BitVector::new(mir.local_decls.len())
}
}
location: Location) {
debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue);
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
self.mark_assigned(index);
if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) {
self.mark_as_lvalue(index);
// Allow uses of projections of immediate pair fields.
if let mir::Lvalue::Projection(ref proj) = *lvalue {
- if self.mir.local_index(&proj.base).is_some() {
+ if let mir::Lvalue::Local(_) = proj.base {
let ty = proj.base.ty(self.mir, self.bcx.tcx());
let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx()));
}
}
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
match context {
LvalueContext::Call => {
self.mark_assigned(index);
use super::{MirContext, LocalRef};
use super::analyze::CleanupKind;
use super::constant::Const;
-use super::lvalue::{LvalueRef, load_fat_ptr};
+use super::lvalue::{LvalueRef};
use super::operand::OperandRef;
use super::operand::OperandValue::*;
}
let llval = if let Some(cast_ty) = ret.cast {
- let index = mir.local_index(&mir::Lvalue::ReturnPointer).unwrap();
- let op = match self.locals[index] {
+ let op = match self.locals[mir::RETURN_POINTER] {
LocalRef::Operand(Some(op)) => op,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Lvalue(tr_lvalue) => {
}
load
} else {
- let op = self.trans_consume(&bcx, &mir::Lvalue::ReturnPointer);
+ let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
op.pack_if_pair(&bcx).immediate()
};
bcx.ret(llval);
for (n, &ty) in arg_types.iter().enumerate() {
let ptr = adt::trans_field_ptr_builder(bcx, tuple.ty, base, Disr(0), n);
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
- let (lldata, llextra) = load_fat_ptr(bcx, ptr);
+ let (lldata, llextra) = base::load_fat_ptr_builder(bcx, ptr, ty);
Pair(lldata, llextra)
} else {
// trans_argument will load this if it needs to
if fn_ret_ty.is_ignore() {
return ReturnDest::Nothing;
}
- let dest = if let Some(index) = self.mir.local_index(dest) {
+ let dest = if let mir::Lvalue::Local(index) = *dest {
let ret_ty = self.monomorphized_lvalue_ty(dest);
match self.locals[index] {
LocalRef::Lvalue(dest) => dest,
fn new(ccx: &'a CrateContext<'a, 'tcx>,
mir: &'a mir::Mir<'tcx>,
substs: &'tcx Substs<'tcx>,
- args: IndexVec<mir::Arg, Const<'tcx>>)
+ args: IndexVec<mir::Local, Const<'tcx>>)
-> MirConstContext<'a, 'tcx> {
let mut context = MirConstContext {
ccx: ccx,
mir: mir,
substs: substs,
- locals: (0..mir.count_locals()).map(|_| None).collect(),
+ locals: (0..mir.local_decls.len()).map(|_| None).collect(),
};
for (i, arg) in args.into_iter().enumerate() {
- let index = mir.local_index(&mir::Lvalue::Arg(mir::Arg::new(i))).unwrap();
+ // Locals after local 0 are the function arguments
+ let index = mir::Local::new(i + 1);
context.locals[index] = Some(arg);
}
context
fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
mut instance: Instance<'tcx>,
- args: IndexVec<mir::Arg, Const<'tcx>>)
+ args: IndexVec<mir::Local, Const<'tcx>>)
-> Result<Const<'tcx>, ConstEvalErr> {
// Try to resolve associated constants.
if let Some(trait_id) = ccx.tcx().trait_of_item(instance.def) {
mir::TerminatorKind::Goto { target } => target,
mir::TerminatorKind::Return => {
failure?;
- let index = self.mir.local_index(&mir::Lvalue::ReturnPointer).unwrap();
- return Ok(self.locals[index].unwrap_or_else(|| {
+ return Ok(self.locals[mir::RETURN_POINTER].unwrap_or_else(|| {
span_bug!(span, "no returned value in constant");
}));
}
}
fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) {
- if let Some(index) = self.mir.local_index(dest) {
+ if let mir::Lvalue::Local(index) = *dest {
self.locals[index] = Some(value);
} else {
span_bug!(span, "assignment to {:?} in constant", dest);
-> Result<ConstLvalue<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
return Ok(self.locals[index].unwrap_or_else(|| {
span_bug!(span, "{:?} not initialized", lvalue)
}).as_lvalue());
}
let lvalue = match *lvalue {
- mir::Lvalue::Var(_) |
- mir::Lvalue::Temp(_) |
- mir::Lvalue::Arg(_) |
- mir::Lvalue::ReturnPointer => bug!(), // handled above
+ mir::Lvalue::Local(_) => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
ConstLvalue {
base: Base::Static(consts::get_static(self.ccx, def_id)),
}
match *kind {
- mir::AggregateKind::Vec => {
+ mir::AggregateKind::Array => {
self.const_array(dest_ty, &fields)
}
mir::AggregateKind::Adt(..) |
use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
-use abi;
use adt;
use base;
-use builder::Builder;
use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef};
use consts;
use machine;
}
}
-pub fn get_meta(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
- b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
-}
-
-pub fn get_dataptr(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
- b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
-}
-
-pub fn load_fat_ptr(b: &Builder, fat_ptr: ValueRef) -> (ValueRef, ValueRef) {
- (b.load(get_dataptr(b, fat_ptr)), b.load(get_meta(b, fat_ptr)))
-}
-
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_lvalue(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
let ccx = bcx.ccx();
let tcx = bcx.tcx();
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
match self.locals[index] {
LocalRef::Lvalue(lvalue) => {
return lvalue;
}
let result = match *lvalue {
- mir::Lvalue::Var(_) |
- mir::Lvalue::Temp(_) |
- mir::Lvalue::Arg(_) |
- mir::Lvalue::ReturnPointer => bug!(), // handled above
+ mir::Lvalue::Local(_) => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
let const_ty = self.monomorphized_lvalue_ty(lvalue);
LvalueRef::new_sized(consts::get_static(ccx, def_id),
lvalue: &mir::Lvalue<'tcx>, f: F) -> U
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
{
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
match self.locals[index] {
LocalRef::Lvalue(lvalue) => f(self, lvalue),
LocalRef::Operand(None) => {
pub use self::constant::trans_static_initializer;
-use self::lvalue::{LvalueRef, get_dataptr, get_meta};
+use self::lvalue::{LvalueRef};
use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
// Allocate variable and temp allocas
mircx.locals = {
let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals);
- let vars = mir.var_decls.iter().enumerate().map(|(i, decl)| {
+
+ let mut allocate_local = |local| {
+ let decl = &mir.local_decls[local];
let ty = bcx.monomorphize(&decl.ty);
- let debug_scope = mircx.scopes[decl.source_info.scope];
- let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
- let local = mir.local_index(&mir::Lvalue::Var(mir::Var::new(i))).unwrap();
- if !lvalue_locals.contains(local.index()) && !dbg {
- return LocalRef::new_operand(bcx.ccx(), ty);
- }
+ if let Some(name) = decl.name {
+ // User variable
+ let source_info = decl.source_info.unwrap();
+ let debug_scope = mircx.scopes[source_info.scope];
+ let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
- let lvalue = LvalueRef::alloca(&bcx, ty, &decl.name.as_str());
- if dbg {
- let dbg_loc = mircx.debug_loc(decl.source_info);
- if let DebugLoc::ScopeAt(scope, span) = dbg_loc {
- bcx.with_block(|bcx| {
- declare_local(bcx, decl.name, ty, scope,
- VariableAccess::DirectVariable { alloca: lvalue.llval },
- VariableKind::LocalVariable, span);
- });
- } else {
- panic!("Unexpected");
+ if !lvalue_locals.contains(local.index()) && !dbg {
+ debug!("alloc: {:?} ({}) -> operand", local, name);
+ return LocalRef::new_operand(bcx.ccx(), ty);
}
- }
- LocalRef::Lvalue(lvalue)
- });
-
- let locals = mir.temp_decls.iter().enumerate().map(|(i, decl)| {
- (mir::Lvalue::Temp(mir::Temp::new(i)), decl.ty)
- }).chain(iter::once((mir::Lvalue::ReturnPointer, mir.return_ty)));
-
- args.into_iter().chain(vars).chain(locals.map(|(lvalue, ty)| {
- let ty = bcx.monomorphize(&ty);
- let local = mir.local_index(&lvalue).unwrap();
- if lvalue == mir::Lvalue::ReturnPointer && fcx.fn_ty.ret.is_indirect() {
- let llretptr = llvm::get_param(fcx.llfn, 0);
- LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
- } else if lvalue_locals.contains(local.index()) {
- LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", lvalue)))
+
+ debug!("alloc: {:?} ({}) -> lvalue", local, name);
+ let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
+ if dbg {
+ let dbg_loc = mircx.debug_loc(source_info);
+ if let DebugLoc::ScopeAt(scope, span) = dbg_loc {
+ bcx.with_block(|bcx| {
+ declare_local(bcx, name, ty, scope,
+ VariableAccess::DirectVariable { alloca: lvalue.llval },
+ VariableKind::LocalVariable, span);
+ });
+ } else {
+ panic!("Unexpected");
+ }
+ }
+ LocalRef::Lvalue(lvalue)
} else {
- // If this is an immediate local, we do not create an
- // alloca in advance. Instead we wait until we see the
- // definition and update the operand there.
- LocalRef::new_operand(bcx.ccx(), ty)
+ // Temporary or return pointer
+ if local == mir::RETURN_POINTER && fcx.fn_ty.ret.is_indirect() {
+ debug!("alloc: {:?} (return pointer) -> lvalue", local);
+ let llretptr = llvm::get_param(fcx.llfn, 0);
+ LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
+ } else if lvalue_locals.contains(local.index()) {
+ debug!("alloc: {:?} -> lvalue", local);
+ LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local)))
+ } else {
+ // If this is an immediate local, we do not create an
+ // alloca in advance. Instead we wait until we see the
+ // definition and update the operand there.
+ debug!("alloc: {:?} -> operand", local);
+ LocalRef::new_operand(bcx.ccx(), ty)
+ }
}
- })).collect()
+ };
+
+ let retptr = allocate_local(mir::RETURN_POINTER);
+ iter::once(retptr)
+ .chain(args.into_iter())
+ .chain(mir.vars_and_temps_iter().map(allocate_local))
+ .collect()
};
// Branch to the START block
None
};
- mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| {
+ mir.args_iter().enumerate().map(|(arg_index, local)| {
+ let arg_decl = &mir.local_decls[local];
let arg_ty = bcx.monomorphize(&arg_decl.ty);
- let local = mir.local_index(&mir::Lvalue::Arg(mir::Arg::new(arg_index))).unwrap();
- if arg_decl.spread {
+
+ if Some(local) == mir.spread_arg {
// This argument (e.g. the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have
// to reconstruct it into a tuple local variable, from multiple
// they are the two sub-fields of a single aggregate field.
let meta = &fcx.fn_ty.args[idx];
idx += 1;
- arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, dst));
- meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, dst));
+ arg.store_fn_arg(bcx, &mut llarg_idx,
+ base::get_dataptr_builder(bcx, dst));
+ meta.store_fn_arg(bcx, &mut llarg_idx,
+ base::get_meta_builder(bcx, dst));
} else {
arg.store_fn_arg(bcx, &mut llarg_idx, dst);
}
// so make an alloca to store them in.
let meta = &fcx.fn_ty.args[idx];
idx += 1;
- arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, lltemp));
- meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, lltemp));
+ arg.store_fn_arg(bcx, &mut llarg_idx,
+ base::get_dataptr_builder(bcx, lltemp));
+ meta.store_fn_arg(bcx, &mut llarg_idx,
+ base::get_meta_builder(bcx, lltemp));
} else {
// otherwise, arg is passed by value, so make a
// temporary and store it there
bcx.with_block(|bcx| arg_scope.map(|scope| {
// Is this a regular argument?
if arg_index > 0 || mir.upvar_decls.is_empty() {
- declare_local(bcx, arg_decl.debug_name, arg_ty, scope,
- VariableAccess::DirectVariable { alloca: llval },
+ declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty,
+ scope, VariableAccess::DirectVariable { alloca: llval },
VariableKind::ArgumentVariable(arg_index + 1),
bcx.fcx().span.unwrap_or(DUMMY_SP));
return;
{
debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
- let val = if common::type_is_imm_pair(bcx.ccx(), ty) {
+ let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
+ let (lldata, llextra) = base::load_fat_ptr_builder(bcx, llval, ty);
+ OperandValue::Pair(lldata, llextra)
+ } else if common::type_is_imm_pair(bcx.ccx(), ty) {
+ let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx(), ty).unwrap();
let a_ptr = bcx.struct_gep(llval, 0);
let b_ptr = bcx.struct_gep(llval, 1);
- // This is None only for fat pointers, which don't
- // need any special load-time behavior anyway.
- let pair_fields = common::type_pair_fields(bcx.ccx(), ty);
- let (a, b) = if let Some([a_ty, b_ty]) = pair_fields {
- (base::load_ty_builder(bcx, a_ptr, a_ty),
- base::load_ty_builder(bcx, b_ptr, b_ty))
- } else {
- (bcx.load(a_ptr), bcx.load(b_ptr))
- };
- OperandValue::Pair(a, b)
+ OperandValue::Pair(
+ base::load_ty_builder(bcx, a_ptr, a_ty),
+ base::load_ty_builder(bcx, b_ptr, b_ty)
+ )
} else if common::type_is_immediate(bcx.ccx(), ty) {
OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty))
} else {
// watch out for locals that do not have an
// alloca; they are handled somewhat differently
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
match self.locals[index] {
LocalRef::Operand(Some(o)) => {
return o;
// Moves out of pair fields are trivial.
if let &mir::Lvalue::Projection(ref proj) = lvalue {
- if let Some(index) = self.mir.local_index(&proj.base) {
+ if let mir::Lvalue::Local(index) = proj.base {
if let LocalRef::Operand(Some(o)) = self.locals[index] {
match (o.val, &proj.elem) {
(OperandValue::Pair(a, b),
use llvm::{self, ValueRef};
use rustc::ty::{self, Ty};
use rustc::ty::cast::{CastTy, IntTy};
+use rustc::ty::layout::Layout;
use rustc::mir::repr as mir;
use asm;
use base;
use callee::Callee;
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
+use common::{C_integral};
use debuginfo::DebugLoc;
use adt;
use machine;
+use type_::Type;
use type_of;
use tvec;
use value::Value;
use super::MirContext;
use super::constant::const_scalar_checked_binop;
use super::operand::{OperandRef, OperandValue};
-use super::lvalue::{LvalueRef, get_dataptr};
+use super::lvalue::{LvalueRef};
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_rvalue(&mut self,
let tr_elem = self.trans_operand(&bcx, elem);
let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx(), size);
- let base = get_dataptr(&bcx, dest.llval);
+ let base = base::get_dataptr_builder(&bcx, dest.llval);
let bcx = bcx.map_block(|block| {
tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| {
self.store_operand_direct(block, llslot, tr_elem);
}
OperandValue::Pair(..) => bug!("Unexpected Pair operand")
};
- (discr, adt::is_discr_signed(&l))
+ let (signed, min, max) = match l {
+ &Layout::CEnum { signed, min, max, .. } => {
+ (signed, min, max)
+ }
+ _ => bug!("CEnum {:?} is not an enum", operand)
+ };
+
+ if max > min {
+ // We want `table[e as usize]` to not
+ // have bound checks, and this is the most
+ // convenient place to put the `assume`.
+
+ base::call_assume(&bcx, bcx.icmp(
+ llvm::IntULE,
+ discr,
+ C_integral(common::val_ty(discr), max, false)
+ ))
+ }
+
+ (discr, signed)
} else {
(operand.immediate(), operand.ty.is_signed())
};
match (lhs.val, rhs.val) {
(OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra)) => {
- bcx.with_block(|bcx| {
- base::compare_fat_ptrs(bcx,
- lhs_addr, lhs_extra,
- rhs_addr, rhs_extra,
- lhs.ty, op.to_hir_binop(),
- debug_loc)
- })
+ self.trans_fat_ptr_binop(&bcx, op,
+ lhs_addr, lhs_extra,
+ rhs_addr, rhs_extra,
+ lhs.ty)
}
_ => bug!()
}
input_ty: Ty<'tcx>) -> ValueRef {
let is_float = input_ty.is_fp();
let is_signed = input_ty.is_signed();
+ let is_nil = input_ty.is_nil();
+ let is_bool = input_ty.is_bool();
match op {
mir::BinOp::Add => if is_float {
bcx.fadd(lhs, rhs)
DebugLoc::None)
})
}
- mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt |
- mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => {
- bcx.with_block(|bcx| {
- base::compare_scalar_types(bcx, lhs, rhs, input_ty,
- op.to_hir_binop(), DebugLoc::None)
+ mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
+ mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
+ C_bool(bcx.ccx(), match op {
+ mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
+ mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
+ _ => unreachable!()
})
+ } else if is_float {
+ bcx.fcmp(
+ base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
+ lhs, rhs
+ )
+ } else {
+ let (lhs, rhs) = if is_bool {
+ // FIXME(#36856) -- extend the bools into `i8` because
+ // LLVM's i1 comparisons are broken.
+ (bcx.zext(lhs, Type::i8(bcx.ccx())),
+ bcx.zext(rhs, Type::i8(bcx.ccx())))
+ } else {
+ (lhs, rhs)
+ };
+
+ bcx.icmp(
+ base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
+ lhs, rhs
+ )
+ }
+ }
+ }
+
+ pub fn trans_fat_ptr_binop(&mut self,
+ bcx: &BlockAndBuilder<'bcx, 'tcx>,
+ op: mir::BinOp,
+ lhs_addr: ValueRef,
+ lhs_extra: ValueRef,
+ rhs_addr: ValueRef,
+ rhs_extra: ValueRef,
+ _input_ty: Ty<'tcx>)
+ -> ValueRef {
+ match op {
+ mir::BinOp::Eq => {
+ bcx.and(
+ bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
+ bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
+ )
+ }
+ mir::BinOp::Ne => {
+ bcx.or(
+ bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
+ bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
+ )
+ }
+ mir::BinOp::Le | mir::BinOp::Lt |
+ mir::BinOp::Ge | mir::BinOp::Gt => {
+ // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
+ let (op, strict_op) = match op {
+ mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
+ mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
+ mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
+ mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
+ _ => bug!(),
+ };
+
+ bcx.or(
+ bcx.icmp(strict_op, lhs_addr, rhs_addr),
+ bcx.and(
+ bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
+ bcx.icmp(op, lhs_extra, rhs_extra)
+ )
+ )
+ }
+ _ => {
+ bug!("unexpected fat ptr binop");
}
}
}
debug_loc.apply(bcx.fcx());
match statement.kind {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
match self.locals[index] {
LocalRef::Lvalue(tr_dest) => {
self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc)
lvalue: &mir::Lvalue<'tcx>,
intrinsic: base::Lifetime)
-> BlockAndBuilder<'bcx, 'tcx> {
- if let Some(index) = self.mir.local_index(lvalue) {
+ if let mir::Lvalue::Local(index) = *lvalue {
if let LocalRef::Lvalue(tr_lval) = self.locals[index] {
intrinsic.call(&bcx, tr_lval.llval);
}
use rustc::ty::TyCtxt;
use rustc::ty::item_path::characteristic_def_id_of_type;
use std::cmp::Ordering;
-use std::hash::{Hash, Hasher, SipHasher};
+use std::hash::{Hash, Hasher};
use std::sync::Arc;
+use std::collections::hash_map::DefaultHasher;
use symbol_map::SymbolMap;
use syntax::ast::NodeId;
use syntax::parse::token::{self, InternedString};
}
pub fn compute_symbol_name_hash(&self, tcx: TyCtxt, symbol_map: &SymbolMap) -> u64 {
- let mut state = SipHasher::new();
+ let mut state = DefaultHasher::new();
let all_items = self.items_in_deterministic_order(tcx, symbol_map);
for (item, _) in all_items {
let symbol_name = symbol_map.get(item).unwrap();
let mut initial_partitioning = place_root_translation_items(scx,
trans_items);
- debug_dump(tcx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
+ debug_dump(scx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter());
// If the partitioning should produce a fixed count of codegen units, merge
// until that count is reached.
if let PartitioningStrategy::FixedUnitCount(count) = strategy {
merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name[..]);
- debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
+ debug_dump(scx, "POST MERGING:", initial_partitioning.codegen_units.iter());
}
// In the next step, we use the inlining map to determine which addtional
let post_inlining = place_inlined_translation_items(initial_partitioning,
inlining_map);
- debug_dump(tcx, "POST INLINING:", post_inlining.0.iter());
+ debug_dump(scx, "POST INLINING:", post_inlining.0.iter());
// Finally, sort by codegen unit name, so that we get deterministic results
let mut result = post_inlining.0;
index)[..])
}
-fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+fn debug_dump<'a, 'b, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
label: &str,
cgus: I)
where I: Iterator<Item=&'b CodegenUnit<'tcx>>,
if cfg!(debug_assertions) {
debug!("{}", label);
for cgu in cgus {
+ let symbol_map = SymbolMap::build(scx, cgu.items
+ .iter()
+ .map(|(&trans_item, _)| trans_item));
debug!("CodegenUnit {}:", cgu.name);
for (trans_item, linkage) in &cgu.items {
- debug!(" - {} [{:?}]", trans_item.to_string(tcx), linkage);
+ let symbol_name = symbol_map.get_or_compute(scx, *trans_item);
+ let symbol_hash_start = symbol_name.rfind('h');
+ let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..])
+ .unwrap_or("<no hash>");
+
+ debug!(" - {} [{:?}] [{}]",
+ trans_item.to_string(scx.tcx()),
+ linkage,
+ symbol_hash);
}
debug!("");
// For now, require that parenthetical notation be used
// only with `Fn()` etc.
if !self.tcx().sess.features.borrow().unboxed_closures && trait_def.paren_sugar {
- emit_feature_err(&self.tcx().sess.parse_sess.span_diagnostic,
+ emit_feature_err(&self.tcx().sess.parse_sess,
"unboxed_closures", span, GateIssue::Language,
"\
the precise format of `Fn`-family traits' \
// For now, require that parenthetical notation be used
// only with `Fn()` etc.
if !self.tcx().sess.features.borrow().unboxed_closures && !trait_def.paren_sugar {
- emit_feature_err(&self.tcx().sess.parse_sess.span_diagnostic,
+ emit_feature_err(&self.tcx().sess.parse_sess,
"unboxed_closures", span, GateIssue::Language,
"\
parenthetical notation is only stable when used with `Fn`-family traits");
-> Result<ty::PolyTraitRef<'tcx>, ErrorReported>
{
if bounds.is_empty() {
- span_err!(self.tcx().sess, span, E0220,
+ struct_span_err!(self.tcx().sess, span, E0220,
"associated type `{}` not found for `{}`",
assoc_name,
- ty_param_name);
+ ty_param_name)
+ .span_label(span, &format!("associated type `{}` not found", assoc_name))
+ .emit();
return Err(ErrorReported);
}
}
let result_ty = match ast_ty.node {
- hir::TyVec(ref ty) => {
+ hir::TySlice(ref ty) => {
tcx.mk_slice(self.ast_ty_to_ty(rscope, &ty))
}
hir::TyObjectSum(ref ty, ref bounds) => {
ty
}
- hir::TyFixedLengthVec(ref ty, ref e) => {
+ hir::TyArray(ref ty, ref e) => {
if let Ok(length) = eval_length(tcx.global_tcx(), &e, "array length") {
tcx.mk_array(self.ast_ty_to_ty(rscope, &ty), length)
} else {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use hir::def::Def;
+use rustc::hir::{self, PatKind};
+use rustc::hir::def::{Def, CtorKind};
+use rustc::hir::pat_util::EnumerateAndAdjustIterator;
use rustc::infer::{self, InferOk, TypeOrigin};
-use hir::pat_util::EnumerateAndAdjustIterator;
-use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference, VariantKind};
+use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference};
use check::{FnCtxt, Expectation};
-use lint;
use util::nodemap::FnvHashMap;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use syntax::ptr::P;
use syntax_pos::Span;
-use rustc::hir::{self, PatKind};
-use rustc::hir::print as pprust;
-
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
pub fn check_pat(&self, pat: &'gcx hir::Pat, expected: Ty<'tcx>) {
let tcx = self.tcx;
tcx.types.err
}
}
- PatKind::Vec(ref before, ref slice, ref after) => {
+ PatKind::Slice(ref before, ref slice, ref after) => {
let expected_ty = self.structurally_resolved_type(pat.span, expected);
let (inner_ty, slice_ty) = match expected_ty.sty {
ty::TyArray(inner_ty, size) => {
expected: Ty<'tcx>) -> Ty<'tcx>
{
let tcx = self.tcx;
- let report_unexpected_def = || {
+ let report_unexpected_def = |def: Def| {
span_err!(tcx.sess, pat.span, E0533,
- "`{}` does not name a unit variant, unit struct or a constant",
- pprust::path_to_string(path));
+ "expected unit struct/variant or constant, found {} `{}`",
+ def.kind_name(), path);
};
// Resolve the path and check the definition for errors.
return tcx.types.err;
}
Def::Method(..) => {
- report_unexpected_def();
+ report_unexpected_def(def);
return tcx.types.err;
}
- Def::Variant(..) | Def::Struct(..) => {
- let variant = tcx.expect_variant_def(def);
- if variant.kind != VariantKind::Unit {
- report_unexpected_def();
- return tcx.types.err;
- }
- }
+ Def::VariantCtor(_, CtorKind::Const) |
+ Def::StructCtor(_, CtorKind::Const) |
Def::Const(..) | Def::AssociatedConst(..) => {} // OK
- _ => bug!("unexpected pattern definition {:?}", def)
+ _ => bug!("unexpected pattern definition: {:?}", def)
}
// Type check the path.
self.check_pat(&pat, tcx.types.err);
}
};
- let report_unexpected_def = |is_lint| {
- let msg = format!("`{}` does not name a tuple variant or a tuple struct",
- pprust::path_to_string(path));
- if is_lint {
- tcx.sess.add_lint(lint::builtin::MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT,
- pat.id, pat.span, msg);
- } else {
- struct_span_err!(tcx.sess, pat.span, E0164, "{}", msg)
- .span_label(pat.span, &format!("not a tuple variant or struct")).emit();
- on_error();
- }
+ let report_unexpected_def = |def: Def| {
+ let msg = format!("expected tuple struct/variant, found {} `{}`",
+ def.kind_name(), path);
+ struct_span_err!(tcx.sess, pat.span, E0164, "{}", msg)
+ .span_label(pat.span, &format!("not a tuple variant or struct")).emit();
+ on_error();
};
// Resolve the path and check the definition for errors.
on_error();
return tcx.types.err;
}
- Def::Const(..) | Def::AssociatedConst(..) | Def::Method(..) => {
- report_unexpected_def(false);
+ Def::AssociatedConst(..) | Def::Method(..) => {
+ report_unexpected_def(def);
return tcx.types.err;
}
- Def::Variant(..) | Def::Struct(..) => {
+ Def::VariantCtor(_, CtorKind::Fn) |
+ Def::StructCtor(_, CtorKind::Fn) => {
tcx.expect_variant_def(def)
}
- _ => bug!("unexpected pattern definition {:?}", def)
+ _ => bug!("unexpected pattern definition: {:?}", def)
};
- if variant.kind == VariantKind::Unit && subpats.is_empty() && ddpos.is_some() {
- // Matching unit structs with tuple variant patterns (`UnitVariant(..)`)
- // is allowed for backward compatibility.
- report_unexpected_def(true);
- } else if variant.kind != VariantKind::Tuple {
- report_unexpected_def(false);
- return tcx.types.err;
- }
// Type check the path.
let pat_ty = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.id);
- let pat_ty = if pat_ty.is_fn() {
- // Replace constructor type with constructed type for tuple struct patterns.
- tcx.no_late_bound_regions(&pat_ty.fn_ret()).unwrap()
- } else {
- // Leave the type as is for unit structs (backward compatibility).
- pat_ty
- };
+ // Replace constructor type with constructed type for tuple struct patterns.
+ let pat_ty = tcx.no_late_bound_regions(&pat_ty.fn_ret()).expect("expected fn type");
self.demand_eqtype(pat.span, expected, pat_ty);
// Type check subpatterns.
self.check_pat(&subpat, field_ty);
}
} else {
- let subpats_ending = if subpats.len() == 1 {
- ""
- } else {
- "s"
- };
- let fields_ending = if variant.fields.len() == 1 {
- ""
- } else {
- "s"
- };
+ let subpats_ending = if subpats.len() == 1 { "" } else { "s" };
+ let fields_ending = if variant.fields.len() == 1 { "" } else { "s" };
struct_span_err!(tcx.sess, pat.span, E0023,
"this pattern has {} field{}, but the corresponding {} has {} field{}",
subpats.len(), subpats_ending, def.kind_name(),
for &Spanned { node: ref field, span } in fields {
let field_ty = match used_fields.entry(field.name) {
Occupied(occupied) => {
- let mut err = struct_span_err!(tcx.sess, span, E0025,
- "field `{}` bound multiple times \
- in the pattern",
- field.name);
- span_note!(&mut err, *occupied.get(),
- "field `{}` previously bound here",
- field.name);
- err.emit();
+ struct_span_err!(tcx.sess, span, E0025,
+ "field `{}` bound multiple times \
+ in the pattern",
+ field.name)
+ .span_label(span,
+ &format!("multiple uses of `{}` in pattern", field.name))
+ .span_label(*occupied.get(), &format!("first use of `{}`", field.name))
+ .emit();
tcx.types.err
}
Vacant(vacant) => {
expected: Expectation<'tcx>) -> Ty<'tcx>
{
let original_callee_ty = self.check_expr(callee_expr);
+ let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty);
- let mut autoderef = self.autoderef(callee_expr.span, original_callee_ty);
+ let mut autoderef = self.autoderef(callee_expr.span, expr_ty);
let result = autoderef.by_ref().flat_map(|(adj_ty, idx)| {
self.try_overloaded_call_step(call_expr, callee_expr, adj_ty, idx)
}).next();
}
if impl_m.fty.sig.0.inputs.len() != trait_m.fty.sig.0.inputs.len() {
- span_err!(tcx.sess, impl_m_span, E0050,
+ let trait_number_args = trait_m.fty.sig.0.inputs.len();
+ let impl_number_args = impl_m.fty.sig.0.inputs.len();
+ let trait_m_node_id = tcx.map.as_local_node_id(trait_m.def_id);
+ let trait_span = if let Some(trait_id) = trait_m_node_id {
+ match tcx.map.expect_trait_item(trait_id).node {
+ TraitItem_::MethodTraitItem(ref trait_m_sig, _) => {
+ if let Some(arg) = trait_m_sig.decl.inputs.get(
+ if trait_number_args > 0 {
+ trait_number_args - 1
+ } else {
+ 0
+ }) {
+ Some(arg.pat.span)
+ } else {
+ trait_item_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m)
+ }
+ } else {
+ trait_item_span
+ };
+ let impl_m_node_id = tcx.map.as_local_node_id(impl_m.def_id).unwrap();
+ let impl_span = match tcx.map.expect_impl_item(impl_m_node_id).node {
+ ImplItemKind::Method(ref impl_m_sig, _) => {
+ if let Some(arg) = impl_m_sig.decl.inputs.get(
+ if impl_number_args > 0 {
+ impl_number_args - 1
+ } else {
+ 0
+ }) {
+ arg.pat.span
+ } else {
+ impl_m_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m)
+ };
+ let mut err = struct_span_err!(tcx.sess, impl_span, E0050,
"method `{}` has {} parameter{} \
but the declaration in trait `{}` has {}",
trait_m.name,
- impl_m.fty.sig.0.inputs.len(),
- if impl_m.fty.sig.0.inputs.len() == 1 {""} else {"s"},
+ impl_number_args,
+ if impl_number_args == 1 {""} else {"s"},
tcx.item_path_str(trait_m.def_id),
- trait_m.fty.sig.0.inputs.len());
+ trait_number_args);
+ if let Some(trait_span) = trait_span {
+ err.span_label(trait_span,
+ &format!("trait requires {}",
+ &if trait_number_args != 1 {
+ format!("{} parameters", trait_number_args)
+ } else {
+ format!("{} parameter", trait_number_args)
+ }));
+ }
+ err.span_label(impl_span,
+ &format!("expected {}, found {}",
+ &if trait_number_args != 1 {
+ format!("{} parameters", trait_number_args)
+ } else {
+ format!("{} parameter", trait_number_args)
+ },
+ impl_number_args));
+ err.emit();
return;
}
span_err!(tcx.sess, it.span, E0444,
"platform-specific intrinsic has invalid number of \
arguments: found {}, expected {}",
- intr.inputs.len(), sig.inputs.len());
+ sig.inputs.len(), intr.inputs.len());
return
}
let input_pairs = intr.inputs.iter().zip(&sig.inputs);
use std::ops::Deref;
-struct ConfirmContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a>{
+struct ConfirmContext<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
span: Span,
self_expr: &'gcx hir::Expr,
unadjusted_self_ty: Ty<'tcx>,
pick: probe::Pick<'tcx>,
supplied_method_types: Vec<Ty<'tcx>>)
- -> ty::MethodCallee<'tcx>
- {
+ -> ty::MethodCallee<'tcx> {
debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, supplied_method_types={:?})",
unadjusted_self_ty,
pick,
span: Span,
self_expr: &'gcx hir::Expr,
call_expr: &'gcx hir::Expr)
- -> ConfirmContext<'a, 'gcx, 'tcx>
- {
- ConfirmContext { fcx: fcx, span: span, self_expr: self_expr, call_expr: call_expr }
+ -> ConfirmContext<'a, 'gcx, 'tcx> {
+ ConfirmContext {
+ fcx: fcx,
+ span: span,
+ self_expr: self_expr,
+ call_expr: call_expr,
+ }
}
fn confirm(&mut self,
unadjusted_self_ty: Ty<'tcx>,
pick: probe::Pick<'tcx>,
supplied_method_types: Vec<Ty<'tcx>>)
- -> ty::MethodCallee<'tcx>
- {
+ -> ty::MethodCallee<'tcx> {
// Adjust the self expression the user provided and obtain the adjusted type.
let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick);
// Create substitutions for the method's type parameters.
let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick);
- let all_substs =
- self.instantiate_method_substs(
- &pick,
- supplied_method_types,
- rcvr_substs);
+ let all_substs = self.instantiate_method_substs(&pick, supplied_method_types, rcvr_substs);
debug!("all_substs={:?}", all_substs);
// Create the final signature for the method, replacing late-bound regions.
- let InstantiatedMethodSig {
- method_sig, method_predicates
- } = self.instantiate_method_sig(&pick, all_substs);
+ let InstantiatedMethodSig { method_sig, method_predicates } =
+ self.instantiate_method_sig(&pick, all_substs);
let method_self_ty = method_sig.inputs[0];
// Unify the (adjusted) self type with what the method expects.
// Create the method type
let def_id = pick.item.def_id();
let method_ty = pick.item.as_opt_method().unwrap();
- let fty = self.tcx.mk_fn_def(def_id, all_substs,
+ let fty = self.tcx.mk_fn_def(def_id,
+ all_substs,
self.tcx.mk_bare_fn(ty::BareFnTy {
- sig: ty::Binder(method_sig),
- unsafety: method_ty.fty.unsafety,
- abi: method_ty.fty.abi.clone(),
- }));
+ sig: ty::Binder(method_sig),
+ unsafety: method_ty.fty.unsafety,
+ abi: method_ty.fty.abi.clone(),
+ }));
// Add any trait/regions obligations specified on the method's type parameters.
self.add_obligations(fty, all_substs, &method_predicates);
let callee = ty::MethodCallee {
def_id: def_id,
ty: fty,
- substs: all_substs
+ substs: all_substs,
};
if let Some(hir::MutMutable) = pick.autoref {
fn adjust_self_ty(&mut self,
unadjusted_self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>)
- -> Ty<'tcx>
- {
+ -> Ty<'tcx> {
let (autoref, unsize) = if let Some(mutbl) = pick.autoref {
let region = self.next_region_var(infer::Autoref(self.span));
let autoref = AutoPtr(region, mutbl);
- (Some(autoref), pick.unsize.map(|target| {
- target.adjust_for_autoref(self.tcx, Some(autoref))
- }))
+ (Some(autoref),
+ pick.unsize.map(|target| target.adjust_for_autoref(self.tcx, Some(autoref))))
} else {
// No unsizing should be performed without autoref (at
// least during method dispach). This is because we
autoderef.finalize(LvaluePreference::NoPreference, Some(self.self_expr));
// Write out the final adjustment.
- self.write_adjustment(self.self_expr.id, AdjustDerefRef(AutoDerefRef {
- autoderefs: pick.autoderefs,
- autoref: autoref,
- unsize: unsize
- }));
+ self.write_adjustment(self.self_expr.id,
+ AdjustDerefRef(AutoDerefRef {
+ autoderefs: pick.autoderefs,
+ autoref: autoref,
+ unsize: unsize,
+ }));
if let Some(target) = unsize {
target
fn fresh_receiver_substs(&mut self,
self_ty: Ty<'tcx>,
pick: &probe::Pick<'tcx>)
- -> &'tcx Substs<'tcx>
- {
+ -> &'tcx Substs<'tcx> {
match pick.kind {
probe::InherentImplPick => {
let impl_def_id = pick.item.container().id();
assert!(self.tcx.impl_trait_ref(impl_def_id).is_none(),
- "impl {:?} is not an inherent impl", impl_def_id);
+ "impl {:?} is not an inherent impl",
+ impl_def_id);
self.impl_self_ty(self.span, impl_def_id).substs
}
// argument type), but those cases have already
// been ruled out when we deemed the trait to be
// "object safe".
- let original_poly_trait_ref =
- principal.with_self_ty(this.tcx, object_ty);
- let upcast_poly_trait_ref =
- this.upcast(original_poly_trait_ref, trait_def_id);
+ let original_poly_trait_ref = principal.with_self_ty(this.tcx, object_ty);
+ let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id);
let upcast_trait_ref =
this.replace_late_bound_regions_with_fresh_var(&upcast_poly_trait_ref);
debug!("original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}",
// the impl ([$A,$B,$C]) not the receiver type ([$C]).
let impl_polytype = self.impl_self_ty(self.span, impl_def_id);
let impl_trait_ref =
- self.instantiate_type_scheme(
- self.span,
- impl_polytype.substs,
- &self.tcx.impl_trait_ref(impl_def_id).unwrap());
+ self.instantiate_type_scheme(self.span,
+ impl_polytype.substs,
+ &self.tcx.impl_trait_ref(impl_def_id).unwrap());
impl_trait_ref.substs
}
}
}
- fn extract_existential_trait_ref<R, F>(&mut self,
- self_ty: Ty<'tcx>,
- mut closure: F) -> R
+ fn extract_existential_trait_ref<R, F>(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R
where F: FnMut(&mut ConfirmContext<'a, 'gcx, 'tcx>,
Ty<'tcx>,
- ty::PolyExistentialTraitRef<'tcx>) -> R,
+ ty::PolyExistentialTraitRef<'tcx>)
+ -> R
{
// If we specified that this is an object method, then the
// self-type ought to be something that can be dereferenced to
// etc).
// FIXME: this feels, like, super dubious
- self.fcx.autoderef(self.span, self_ty)
+ self.fcx
+ .autoderef(self.span, self_ty)
.filter_map(|(ty, _)| {
match ty.sty {
ty::TyTrait(ref data) => Some(closure(self, ty, data.principal)),
})
.next()
.unwrap_or_else(|| {
- span_bug!(
- self.span,
- "self-type `{}` for ObjectPick never dereferenced to an object",
- self_ty)
+ span_bug!(self.span,
+ "self-type `{}` for ObjectPick never dereferenced to an object",
+ self_ty)
})
}
pick: &probe::Pick<'tcx>,
mut supplied_method_types: Vec<Ty<'tcx>>,
substs: &Substs<'tcx>)
- -> &'tcx Substs<'tcx>
- {
+ -> &'tcx Substs<'tcx> {
// Determine the values for the generic parameters of the method.
// If they were not explicitly supplied, just construct fresh
// variables.
if num_supplied_types > 0 && num_supplied_types != num_method_types {
if num_method_types == 0 {
- span_err!(self.tcx.sess, self.span, E0035,
- "does not take type parameters");
+ struct_span_err!(self.tcx.sess,
+ self.span,
+ E0035,
+ "does not take type parameters")
+ .span_label(self.span, &"called with unneeded type parameters")
+ .emit();
} else {
- span_err!(self.tcx.sess, self.span, E0036,
- "incorrect number of type parameters given for this method: \
- expected {}, found {}",
- num_method_types, num_supplied_types);
+ struct_span_err!(self.tcx.sess,
+ self.span,
+ E0036,
+ "incorrect number of type parameters given for this method: \
+ expected {}, found {}",
+ num_method_types,
+ num_supplied_types)
+ .span_label(self.span,
+ &format!("Passed {} type argument{}, expected {}",
+ num_supplied_types,
+ if num_supplied_types != 1 { "s" } else { "" },
+ num_method_types))
+ .emit();
}
supplied_method_types = vec![self.tcx.types.err; num_method_types];
}
//
// FIXME -- permit users to manually specify lifetimes
let supplied_start = substs.params().len() + method.generics.regions.len();
- Substs::for_item(self.tcx, method.def_id, |def, _| {
+ Substs::for_item(self.tcx,
+ method.def_id,
+ |def, _| {
let i = def.index as usize;
if i < substs.params().len() {
substs.region_at(i)
} else {
self.region_var_for_def(self.span, def)
}
- }, |def, cur_substs| {
+ },
+ |def, cur_substs| {
let i = def.index as usize;
if i < substs.params().len() {
substs.type_at(i)
})
}
- fn unify_receivers(&mut self,
- self_ty: Ty<'tcx>,
- method_self_ty: Ty<'tcx>)
- {
- match self.sub_types(false, TypeOrigin::Misc(self.span),
- self_ty, method_self_ty) {
+ fn unify_receivers(&mut self, self_ty: Ty<'tcx>, method_self_ty: Ty<'tcx>) {
+ match self.sub_types(false, TypeOrigin::Misc(self.span), self_ty, method_self_ty) {
Ok(InferOk { obligations, .. }) => {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty());
}
Err(_) => {
- span_bug!(
- self.span,
- "{} was a subtype of {} but now is not?",
- self_ty, method_self_ty);
+ span_bug!(self.span,
+ "{} was a subtype of {} but now is not?",
+ self_ty,
+ method_self_ty);
}
}
}
fn instantiate_method_sig(&mut self,
pick: &probe::Pick<'tcx>,
all_substs: &'tcx Substs<'tcx>)
- -> InstantiatedMethodSig<'tcx>
- {
+ -> InstantiatedMethodSig<'tcx> {
debug!("instantiate_method_sig(pick={:?}, all_substs={:?})",
pick,
all_substs);
// Instantiate the bounds on the method with the
// type/early-bound-regions substitutions performed. There can
// be no late-bound regions appearing here.
- let method_predicates = pick.item.as_opt_method().unwrap()
- .predicates.instantiate(self.tcx, all_substs);
- let method_predicates = self.normalize_associated_types_in(self.span,
- &method_predicates);
+ let method_predicates = pick.item
+ .as_opt_method()
+ .unwrap()
+ .predicates
+ .instantiate(self.tcx, all_substs);
+ let method_predicates = self.normalize_associated_types_in(self.span, &method_predicates);
- debug!("method_predicates after subst = {:?}",
- method_predicates);
+ debug!("method_predicates after subst = {:?}", method_predicates);
// Instantiate late-bound regions and substitute the trait
// parameters into the method type to get the actual method type.
// NB: Instantiate late-bound regions first so that
// `instantiate_type_scheme` can normalize associated types that
// may reference those regions.
- let method_sig = self.replace_late_bound_regions_with_fresh_var(
- &pick.item.as_opt_method().unwrap().fty.sig);
+ let method_sig = self.replace_late_bound_regions_with_fresh_var(&pick.item
+ .as_opt_method()
+ .unwrap()
+ .fty
+ .sig);
debug!("late-bound lifetimes from method instantiated, method_sig={:?}",
method_sig);
let method_sig = self.instantiate_type_scheme(self.span, all_substs, &method_sig);
- debug!("type scheme substituted, method_sig={:?}",
- method_sig);
+ debug!("type scheme substituted, method_sig={:?}", method_sig);
InstantiatedMethodSig {
method_sig: method_sig,
all_substs,
method_predicates);
- self.add_obligations_for_parameters(
- traits::ObligationCause::misc(self.span, self.body_id),
- method_predicates);
+ self.add_obligations_for_parameters(traits::ObligationCause::misc(self.span, self.body_id),
+ method_predicates);
// this is a projection from a trait reference, so we have to
// make sure that the trait reference inputs are well-formed.
for (i, &expr) in exprs.iter().rev().enumerate() {
// Count autoderefs.
let autoderef_count = match self.tables
- .borrow()
- .adjustments
- .get(&expr.id) {
+ .borrow()
+ .adjustments
+ .get(&expr.id) {
Some(&AdjustDerefRef(ref adj)) => adj.autoderefs,
Some(_) | None => 0,
};
debug!("convert_lvalue_derefs_to_mutable: i={} expr={:?} \
autoderef_count={}",
- i, expr, autoderef_count);
+ i,
+ expr,
+ autoderef_count);
if autoderef_count > 0 {
let mut autoderef = self.autoderef(expr.span, self.node_ty(expr.id));
autoderef.nth(autoderef_count).unwrap_or_else(|| {
- span_bug!(expr.span, "expr was deref-able {} times but now isn't?",
+ span_bug!(expr.span,
+ "expr was deref-able {} times but now isn't?",
autoderef_count);
});
autoderef.finalize(PreferMutLvalue, Some(expr));
// (ab)use the normal type checking paths.
let adj = self.tables.borrow().adjustments.get(&base_expr.id).cloned();
let (autoderefs, unsize) = match adj {
- Some(AdjustDerefRef(adr)) => match adr.autoref {
- None => {
- assert!(adr.unsize.is_none());
- (adr.autoderefs, None)
- }
- Some(AutoPtr(..)) => {
- (adr.autoderefs, adr.unsize.map(|target| {
- target.builtin_deref(false, NoPreference)
- .expect("fixup: AutoPtr is not &T").ty
- }))
+ Some(AdjustDerefRef(adr)) => {
+ match adr.autoref {
+ None => {
+ assert!(adr.unsize.is_none());
+ (adr.autoderefs, None)
+ }
+ Some(AutoPtr(..)) => {
+ (adr.autoderefs,
+ adr.unsize.map(|target| {
+ target.builtin_deref(false, NoPreference)
+ .expect("fixup: AutoPtr is not &T")
+ .ty
+ }))
+ }
+ Some(_) => {
+ span_bug!(base_expr.span,
+ "unexpected adjustment autoref {:?}",
+ adr);
+ }
}
- Some(_) => {
- span_bug!(
- base_expr.span,
- "unexpected adjustment autoref {:?}",
- adr);
- }
- },
+ }
None => (0, None),
Some(_) => {
- span_bug!(
- base_expr.span,
- "unexpected adjustment type");
+ span_bug!(base_expr.span, "unexpected adjustment type");
}
};
(target, true)
} else {
(self.adjust_expr_ty(base_expr,
- Some(&AdjustDerefRef(AutoDerefRef {
- autoderefs: autoderefs,
- autoref: None,
- unsize: None
- }))), false)
+ Some(&AdjustDerefRef(AutoDerefRef {
+ autoderefs: autoderefs,
+ autoref: None,
+ unsize: None,
+ }))),
+ false)
};
let index_expr_ty = self.node_ty(index_expr.id);
- let result = self.try_index_step(
- ty::MethodCall::expr(expr.id),
- expr,
- &base_expr,
- adjusted_base_ty,
- autoderefs,
- unsize,
- PreferMutLvalue,
- index_expr_ty);
+ let result = self.try_index_step(ty::MethodCall::expr(expr.id),
+ expr,
+ &base_expr,
+ adjusted_base_ty,
+ autoderefs,
+ unsize,
+ PreferMutLvalue,
+ index_expr_ty);
if let Some((input_ty, return_ty)) = result {
self.demand_suptype(index_expr.span, input_ty, index_expr_ty);
let method_call = ty::MethodCall::expr(expr.id);
if self.tables.borrow().method_map.contains_key(&method_call) {
let method = self.try_overloaded_deref(expr.span,
- Some(&base_expr),
- self.node_ty(base_expr.id),
- PreferMutLvalue);
+ Some(&base_expr),
+ self.node_ty(base_expr.id),
+ PreferMutLvalue);
let method = method.expect("re-trying deref failed");
self.tables.borrow_mut().method_map.insert(method_call, method);
}
fn upcast(&mut self,
source_trait_ref: ty::PolyTraitRef<'tcx>,
target_trait_def_id: DefId)
- -> ty::PolyTraitRef<'tcx>
- {
- let upcast_trait_refs = self.tcx.upcast_choices(source_trait_ref.clone(),
- target_trait_def_id);
+ -> ty::PolyTraitRef<'tcx> {
+ let upcast_trait_refs = self.tcx
+ .upcast_choices(source_trait_ref.clone(), target_trait_def_id);
// must be exactly one trait ref or we'd get an ambig error etc
if upcast_trait_refs.len() != 1 {
- span_bug!(
- self.span,
- "cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
- source_trait_ref,
- target_trait_def_id,
- upcast_trait_refs);
+ span_bug!(self.span,
+ "cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
+ source_trait_ref,
+ target_trait_def_id,
+ upcast_trait_refs);
}
upcast_trait_refs.into_iter().next().unwrap()
}
fn replace_late_bound_regions_with_fresh_var<T>(&self, value: &ty::Binder<T>) -> T
- where T : TypeFoldable<'tcx>
+ where T: TypeFoldable<'tcx>
{
- self.fcx.replace_late_bound_regions_with_fresh_var(
- self.span, infer::FnCall, value).0
+ self.fcx
+ .replace_late_bound_regions_with_fresh_var(self.span, infer::FnCall, value)
+ .0
}
}
Ambiguity(Vec<CandidateSource>),
// Using a `Fn`/`FnMut`/etc method on a raw closure type before we have inferred its kind.
- ClosureAmbiguity(/* DefId of fn trait */ DefId),
+ ClosureAmbiguity(// DefId of fn trait
+ DefId),
// Found an applicable method, but it is not visible.
PrivateMatch(Def),
pub static_candidates: Vec<CandidateSource>,
pub unsatisfied_predicates: Vec<TraitRef<'tcx>>,
pub out_of_scope_traits: Vec<DefId>,
- pub mode: probe::Mode
+ pub mode: probe::Mode,
}
impl<'tcx> NoMatchData<'tcx> {
pub fn new(static_candidates: Vec<CandidateSource>,
unsatisfied_predicates: Vec<TraitRef<'tcx>>,
out_of_scope_traits: Vec<DefId>,
- mode: probe::Mode) -> Self {
+ mode: probe::Mode)
+ -> Self {
NoMatchData {
static_candidates: static_candidates,
unsatisfied_predicates: unsatisfied_predicates,
out_of_scope_traits: out_of_scope_traits,
- mode: mode
+ mode: mode,
}
}
}
#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum CandidateSource {
ImplSource(DefId),
- TraitSource(/* trait id */ DefId),
+ TraitSource(// trait id
+ DefId),
}
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
self_ty: ty::Ty<'tcx>,
call_expr_id: ast::NodeId,
allow_private: bool)
- -> bool
- {
+ -> bool {
let mode = probe::Mode::MethodCall;
match self.probe_method(span, mode, method_name, self_ty, call_expr_id) {
Ok(..) => true,
supplied_method_types: Vec<ty::Ty<'tcx>>,
call_expr: &'gcx hir::Expr,
self_expr: &'gcx hir::Expr)
- -> Result<ty::MethodCallee<'tcx>, MethodError<'tcx>>
- {
+ -> Result<ty::MethodCallee<'tcx>, MethodError<'tcx>> {
debug!("lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
method_name,
self_ty,
self.tcx.used_trait_imports.borrow_mut().insert(import_id);
}
- Ok(self.confirm_method(span, self_expr, call_expr, self_ty, pick, supplied_method_types))
+ Ok(self.confirm_method(span,
+ self_expr,
+ call_expr,
+ self_ty,
+ pick,
+ supplied_method_types))
}
pub fn lookup_method_in_trait(&self,
trait_def_id: DefId,
self_ty: ty::Ty<'tcx>,
opt_input_types: Option<Vec<ty::Ty<'tcx>>>)
- -> Option<ty::MethodCallee<'tcx>>
- {
- self.lookup_method_in_trait_adjusted(span, self_expr, m_name, trait_def_id,
- 0, false, self_ty, opt_input_types)
+ -> Option<ty::MethodCallee<'tcx>> {
+ self.lookup_method_in_trait_adjusted(span,
+ self_expr,
+ m_name,
+ trait_def_id,
+ 0,
+ false,
+ self_ty,
+ opt_input_types)
}
/// `lookup_in_trait_adjusted` is used for overloaded operators.
unsize: bool,
self_ty: ty::Ty<'tcx>,
opt_input_types: Option<Vec<ty::Ty<'tcx>>>)
- -> Option<ty::MethodCallee<'tcx>>
- {
+ -> Option<ty::MethodCallee<'tcx>> {
debug!("lookup_in_trait_adjusted(self_ty={:?}, self_expr={:?}, \
m_name={}, trait_def_id={:?})",
self_ty,
assert!(trait_def.generics.regions.is_empty());
// Construct a trait-reference `self_ty : Trait<input_tys>`
- let substs = Substs::for_item(self.tcx, trait_def_id, |def, _| {
- self.region_var_for_def(span, def)
- }, |def, substs| {
+ let substs = Substs::for_item(self.tcx,
+ trait_def_id,
+ |def, _| self.region_var_for_def(span, def),
+ |def, substs| {
if def.index == 0 {
self_ty
} else if let Some(ref input_types) = opt_input_types {
// Construct an obligation
let poly_trait_ref = trait_ref.to_poly_trait_ref();
- let obligation = traits::Obligation::misc(span,
- self.body_id,
- poly_trait_ref.to_predicate());
+ let obligation =
+ traits::Obligation::misc(span, self.body_id, poly_trait_ref.to_predicate());
// Now we want to know if this can be matched
let mut selcx = traits::SelectionContext::new(self);
assert_eq!(method_ty.generics.regions.len(), 0);
debug!("lookup_in_trait_adjusted: method_item={:?} method_ty={:?}",
- method_item, method_ty);
+ method_item,
+ method_ty);
// Instantiate late-bound regions and substitute the trait
// parameters into the method type to get the actual method type.
// NB: Instantiate late-bound regions first so that
// `instantiate_type_scheme` can normalize associated types that
// may reference those regions.
- let fn_sig = self.replace_late_bound_regions_with_fresh_var(span,
- infer::FnCall,
- &method_ty.fty.sig).0;
+ let fn_sig =
+ self.replace_late_bound_regions_with_fresh_var(span, infer::FnCall, &method_ty.fty.sig)
+ .0;
let fn_sig = self.instantiate_type_scheme(span, trait_ref.substs, &fn_sig);
let transformed_self_ty = fn_sig.inputs[0];
let def_id = method_item.def_id();
- let fty = tcx.mk_fn_def(def_id, trait_ref.substs,
+ let fty = tcx.mk_fn_def(def_id,
+ trait_ref.substs,
tcx.mk_bare_fn(ty::BareFnTy {
- sig: ty::Binder(fn_sig),
- unsafety: method_ty.fty.unsafety,
- abi: method_ty.fty.abi.clone(),
- }));
+ sig: ty::Binder(fn_sig),
+ unsafety: method_ty.fty.unsafety,
+ abi: method_ty.fty.abi.clone(),
+ }));
debug!("lookup_in_trait_adjusted: matched method fty={:?} obligation={:?}",
fty,
// any late-bound regions appearing in its bounds.
let method_bounds = self.instantiate_bounds(span, trait_ref.substs, &method_ty.predicates);
assert!(!method_bounds.has_escaping_regions());
- self.add_obligations_for_parameters(
- traits::ObligationCause::misc(span, self.body_id),
- &method_bounds);
+ self.add_obligations_for_parameters(traits::ObligationCause::misc(span, self.body_id),
+ &method_bounds);
// Also register an obligation for the method type being well-formed.
self.register_wf_obligation(fty, span, traits::MiscObligation);
// Insert any adjustments needed (always an autoref of some mutability).
match self_expr {
- None => { }
+ None => {}
Some(self_expr) => {
debug!("lookup_in_trait_adjusted: inserting adjustment if needed \
(self-id={}, autoderefs={}, unsize={}, explicit_self={:?})",
- self_expr.id, autoderefs, unsize,
+ self_expr.id,
+ autoderefs,
+ unsize,
method_ty.explicit_self);
match method_ty.explicit_self {
match transformed_self_ty.sty {
ty::TyRef(region, ty::TypeAndMut { mutbl, ty: _ }) => {
self.write_adjustment(self_expr.id,
- AdjustDerefRef(AutoDerefRef {
- autoderefs: autoderefs,
- autoref: Some(AutoPtr(region, mutbl)),
- unsize: if unsize {
- Some(transformed_self_ty)
- } else {
- None
- }
- }));
+ AdjustDerefRef(AutoDerefRef {
+ autoderefs: autoderefs,
+ autoref: Some(AutoPtr(region, mutbl)),
+ unsize: if unsize {
+ Some(transformed_self_ty)
+ } else {
+ None
+ },
+ }));
}
_ => {
- span_bug!(
- span,
- "trait method is &self but first arg is: {}",
- transformed_self_ty);
+ span_bug!(span,
+ "trait method is &self but first arg is: {}",
+ transformed_self_ty);
}
}
}
_ => {
- span_bug!(
- span,
- "unexpected explicit self type in operator method: {:?}",
- method_ty.explicit_self);
+ span_bug!(span,
+ "unexpected explicit self type in operator method: {:?}",
+ method_ty.explicit_self);
}
}
}
let callee = ty::MethodCallee {
def_id: def_id,
ty: fty,
- substs: trait_ref.substs
+ substs: trait_ref.substs,
};
debug!("callee = {:?}", callee);
method_name: ast::Name,
self_ty: ty::Ty<'tcx>,
expr_id: ast::NodeId)
- -> Result<Def, MethodError<'tcx>>
- {
+ -> Result<Def, MethodError<'tcx>> {
let mode = probe::Mode::Path;
let pick = self.probe_method(span, mode, method_name, self_ty, expr_id)?;
pub fn impl_or_trait_item(&self,
def_id: DefId,
item_name: ast::Name)
- -> Option<ty::ImplOrTraitItem<'tcx>>
- {
- self.tcx.impl_or_trait_items(def_id)
+ -> Option<ty::ImplOrTraitItem<'tcx>> {
+ self.tcx
+ .impl_or_trait_items(def_id)
.iter()
.map(|&did| self.tcx.impl_or_trait_item(did))
.find(|m| m.name() == item_name)
use super::{CandidateSource, ImplSource, TraitSource};
use super::suggest;
-use check::{FnCtxt};
+use check::FnCtxt;
use hir::def_id::DefId;
use hir::def::Def;
use rustc::ty::subst::{Subst, Substs};
use self::CandidateKind::*;
pub use self::PickKind::*;
-struct ProbeContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+struct ProbeContext<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
span: Span,
mode: Mode,
/// Collects near misses when trait bounds for type parameters are unsatisfied and is only used
/// for error reporting
- unsatisfied_predicates: Vec<TraitRef<'tcx>>
+ unsatisfied_predicates: Vec<TraitRef<'tcx>>,
}
impl<'a, 'gcx, 'tcx> Deref for ProbeContext<'a, 'gcx, 'tcx> {
struct CandidateStep<'tcx> {
self_ty: Ty<'tcx>,
autoderefs: usize,
- unsize: bool
+ unsize: bool,
}
#[derive(Debug)]
#[derive(Debug)]
enum CandidateKind<'tcx> {
InherentImplCandidate(&'tcx Substs<'tcx>,
- /* Normalize obligations */ Vec<traits::PredicateObligation<'tcx>>),
- ExtensionImplCandidate(/* Impl */ DefId, &'tcx Substs<'tcx>,
- /* Normalize obligations */ Vec<traits::PredicateObligation<'tcx>>),
+ // Normalize obligations
+ Vec<traits::PredicateObligation<'tcx>>),
+ ExtensionImplCandidate(// Impl
+ DefId,
+ &'tcx Substs<'tcx>,
+ // Normalize obligations
+ Vec<traits::PredicateObligation<'tcx>>),
ObjectCandidate,
TraitCandidate,
- WhereClauseCandidate(/* Trait */ ty::PolyTraitRef<'tcx>),
+ WhereClauseCandidate(// Trait
+ ty::PolyTraitRef<'tcx>),
}
#[derive(Debug)]
#[derive(Clone,Debug)]
pub enum PickKind<'tcx> {
InherentImplPick,
- ExtensionImplPick(/* Impl */ DefId),
+ ExtensionImplPick(// Impl
+ DefId),
ObjectPick,
TraitPick,
- WhereClausePick(/* Trait */ ty::PolyTraitRef<'tcx>),
+ WhereClausePick(// Trait
+ ty::PolyTraitRef<'tcx>),
}
pub type PickResult<'tcx> = Result<Pick<'tcx>, MethodError<'tcx>>;
// An expression of the form `Type::item` or `<T>::item`.
// No autoderefs are performed, lookup is done based on the type each
// implementation is for, and static methods are included.
- Path
+ Path,
}
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
item_name: ast::Name,
self_ty: Ty<'tcx>,
scope_expr_id: ast::NodeId)
- -> PickResult<'tcx>
- {
+ -> PickResult<'tcx> {
debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})",
self_ty,
item_name,
let steps = if mode == Mode::MethodCall {
match self.create_steps(span, self_ty) {
Some(steps) => steps,
- None =>return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), Vec::new(),
- Vec::new(), mode))),
+ None => {
+ return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(),
+ Vec::new(),
+ Vec::new(),
+ mode)))
+ }
}
} else {
vec![CandidateStep {
- self_ty: self_ty,
- autoderefs: 0,
- unsize: false
- }]
+ self_ty: self_ty,
+ autoderefs: 0,
+ unsize: false,
+ }]
};
// Create a list of simplified self types, if we can.
let mut simplified_steps = Vec::new();
for step in &steps {
match ty::fast_reject::simplify_type(self.tcx, step.self_ty, true) {
- None => { break; }
- Some(simplified_type) => { simplified_steps.push(simplified_type); }
+ None => {
+ break;
+ }
+ Some(simplified_type) => {
+ simplified_steps.push(simplified_type);
+ }
}
}
- let opt_simplified_steps =
- if simplified_steps.len() < steps.len() {
- None // failed to convert at least one of the steps
- } else {
- Some(simplified_steps)
- };
+ let opt_simplified_steps = if simplified_steps.len() < steps.len() {
+ None // failed to convert at least one of the steps
+ } else {
+ Some(simplified_steps)
+ };
debug!("ProbeContext: steps for self_ty={:?} are {:?}",
self_ty,
// this creates one big transaction so that all type variables etc
// that we create during the probe process are removed later
self.probe(|_| {
- let mut probe_cx = ProbeContext::new(self,
- span,
- mode,
- item_name,
- steps,
- opt_simplified_steps);
+ let mut probe_cx =
+ ProbeContext::new(self, span, mode, item_name, steps, opt_simplified_steps);
probe_cx.assemble_inherent_candidates();
probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?;
probe_cx.pick()
})
}
- fn create_steps(&self,
- span: Span,
- self_ty: Ty<'tcx>)
- -> Option<Vec<CandidateStep<'tcx>>>
- {
+ fn create_steps(&self, span: Span, self_ty: Ty<'tcx>) -> Option<Vec<CandidateStep<'tcx>>> {
// FIXME: we don't need to create the entire steps in one pass
let mut autoderef = self.autoderef(span, self_ty);
- let mut steps: Vec<_> = autoderef.by_ref().map(|(ty, d)| CandidateStep {
- self_ty: ty,
- autoderefs: d,
- unsize: false
- }).collect();
+ let mut steps: Vec<_> = autoderef.by_ref()
+ .map(|(ty, d)| {
+ CandidateStep {
+ self_ty: ty,
+ autoderefs: d,
+ unsize: false,
+ }
+ })
+ .collect();
let final_ty = autoderef.unambiguous_final_ty();
match final_ty.sty {
steps.push(CandidateStep {
self_ty: self.tcx.mk_slice(elem_ty),
autoderefs: dereferences,
- unsize: true
+ unsize: true,
});
}
ty::TyError => return None,
item_name: ast::Name,
steps: Vec<CandidateStep<'tcx>>,
opt_simplified_steps: Option<Vec<ty::fast_reject::SimplifiedType>>)
- -> ProbeContext<'a, 'gcx, 'tcx>
- {
+ -> ProbeContext<'a, 'gcx, 'tcx> {
ProbeContext {
fcx: fcx,
span: span,
}
fn assemble_probe(&mut self, self_ty: Ty<'tcx>) {
- debug!("assemble_probe: self_ty={:?}",
- self_ty);
+ debug!("assemble_probe: self_ty={:?}", self_ty);
match self_ty.sty {
ty::TyTrait(box ref data) => {
let lang_def_id = self.tcx.lang_items.f64_impl();
self.assemble_inherent_impl_for_primitive(lang_def_id);
}
- _ => {
- }
+ _ => {}
}
}
let item = match self.impl_or_trait_item(impl_def_id) {
Some(m) => m,
- None => { return; } // No method with correct name on this impl
+ None => {
+ return;
+ } // No method with correct name on this impl
};
if !self.has_applicable_self(&item) {
if !item.vis().is_accessible_from(self.body_id, &self.tcx.map) {
self.private_candidate = Some(item.def());
- return
+ return;
}
let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id);
self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| {
let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref);
- let xform_self_ty = this.xform_self_ty(&item,
- new_trait_ref.self_ty(),
- new_trait_ref.substs);
+ let xform_self_ty =
+ this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs);
this.inherent_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
param_ty: ty::ParamTy) {
// FIXME -- Do we want to commit to this behavior for param bounds?
- let bounds: Vec<_> =
- self.parameter_environment.caller_bounds
+ let bounds: Vec<_> = self.parameter_environment
+ .caller_bounds
.iter()
.filter_map(|predicate| {
match *predicate {
ty::TyParam(ref p) if *p == param_ty => {
Some(trait_predicate.to_poly_trait_ref())
}
- _ => None
+ _ => None,
}
}
ty::Predicate::Equate(..) |
ty::Predicate::WellFormed(..) |
ty::Predicate::ObjectSafe(..) |
ty::Predicate::ClosureKind(..) |
- ty::Predicate::TypeOutlives(..) => {
- None
- }
+ ty::Predicate::TypeOutlives(..) => None,
}
})
.collect();
self.elaborate_bounds(&bounds, |this, poly_trait_ref, item| {
- let trait_ref =
- this.erase_late_bound_regions(&poly_trait_ref);
+ let trait_ref = this.erase_late_bound_regions(&poly_trait_ref);
- let xform_self_ty =
- this.xform_self_ty(&item,
- trait_ref.self_ty(),
- trait_ref.substs);
+ let xform_self_ty = this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs);
if let Some(ref m) = item.as_opt_method() {
debug!("found match: trait_ref={:?} substs={:?} m={:?}",
// Do a search through a list of bounds, using a callback to actually
// create the candidates.
- fn elaborate_bounds<F>(
- &mut self,
- bounds: &[ty::PolyTraitRef<'tcx>],
- mut mk_cand: F,
- ) where
- F: for<'b> FnMut(
- &mut ProbeContext<'b, 'gcx, 'tcx>,
- ty::PolyTraitRef<'tcx>,
- ty::ImplOrTraitItem<'tcx>,
- ),
+ fn elaborate_bounds<F>(&mut self, bounds: &[ty::PolyTraitRef<'tcx>], mut mk_cand: F)
+ where F: for<'b> FnMut(&mut ProbeContext<'b, 'gcx, 'tcx>,
+ ty::PolyTraitRef<'tcx>,
+ ty::ImplOrTraitItem<'tcx>)
{
debug!("elaborate_bounds(bounds={:?})", bounds);
for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
let item = match self.impl_or_trait_item(bound_trait_ref.def_id()) {
Some(v) => v,
- None => { continue; }
+ None => {
+ continue;
+ }
};
if !self.has_applicable_self(&item) {
fn assemble_extension_candidates_for_traits_in_scope(&mut self,
expr_id: ast::NodeId)
- -> Result<(), MethodError<'tcx>>
- {
+ -> Result<(), MethodError<'tcx>> {
let mut duplicates = FnvHashSet();
let opt_applicable_traits = self.tcx.trait_map.get(&expr_id);
if let Some(applicable_traits) = opt_applicable_traits {
fn assemble_extension_candidates_for_trait(&mut self,
trait_def_id: DefId)
- -> Result<(), MethodError<'tcx>>
- {
+ -> Result<(), MethodError<'tcx>> {
debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})",
trait_def_id);
// Check whether `trait_def_id` defines a method with suitable name:
- let trait_items =
- self.tcx.trait_items(trait_def_id);
- let maybe_item =
- trait_items.iter()
- .find(|item| item.name() == self.item_name);
+ let trait_items = self.tcx.trait_items(trait_def_id);
+ let maybe_item = trait_items.iter()
+ .find(|item| item.name() == self.item_name);
let item = match maybe_item {
Some(i) => i,
- None => { return Ok(()); }
+ None => {
+ return Ok(());
+ }
};
// Check whether `trait_def_id` defines a method with suitable name:
fn assemble_extension_candidates_for_trait_impls(&mut self,
trait_def_id: DefId,
- item: ty::ImplOrTraitItem<'tcx>)
- {
+ item: ty::ImplOrTraitItem<'tcx>) {
let trait_def = self.tcx.lookup_trait_def(trait_def_id);
// FIXME(arielb1): can we use for_each_relevant_impl here?
debug!("impl_substs={:?}", impl_substs);
- let impl_trait_ref =
- self.tcx.impl_trait_ref(impl_def_id)
+ let impl_trait_ref = self.tcx.impl_trait_ref(impl_def_id)
.unwrap() // we know this is a trait impl
.subst(self.tcx, impl_substs);
// Determine the receiver type that the method itself expects.
let xform_self_ty =
- self.xform_self_ty(&item,
- impl_trait_ref.self_ty(),
- impl_trait_ref.substs);
+ self.xform_self_ty(&item, impl_trait_ref.self_ty(), impl_trait_ref.substs);
// Normalize the receiver. We can't use normalize_associated_types_in
// as it will pollute the fcx's fulfillment context after this probe
fn impl_can_possibly_match(&self, impl_def_id: DefId) -> bool {
let simplified_steps = match self.opt_simplified_steps {
Some(ref simplified_steps) => simplified_steps,
- None => { return true; }
+ None => {
+ return true;
+ }
};
let impl_type = self.tcx.lookup_item_type(impl_def_id);
let impl_simplified_type =
match ty::fast_reject::simplify_type(self.tcx, impl_type.ty, false) {
Some(simplified_type) => simplified_type,
- None => { return true; }
+ None => {
+ return true;
+ }
};
simplified_steps.contains(&impl_simplified_type)
fn assemble_closure_candidates(&mut self,
trait_def_id: DefId,
item: ty::ImplOrTraitItem<'tcx>)
- -> Result<(), MethodError<'tcx>>
- {
+ -> Result<(), MethodError<'tcx>> {
// Check if this is one of the Fn,FnMut,FnOnce traits.
let tcx = self.tcx;
let kind = if Some(trait_def_id) == tcx.lang_items.fn_trait() {
// for the purposes of our method lookup, we only take
// receiver type into account, so we can just substitute
// fresh types here to use during substitution and subtyping.
- let substs = Substs::for_item(self.tcx, trait_def_id, |def, _| {
- self.region_var_for_def(self.span, def)
- }, |def, substs| {
+ let substs = Substs::for_item(self.tcx,
+ trait_def_id,
+ |def, _| self.region_var_for_def(self.span, def),
+ |def, substs| {
if def.index == 0 {
step.self_ty
} else {
}
});
- let xform_self_ty = self.xform_self_ty(&item,
- step.self_ty,
- substs);
+ let xform_self_ty = self.xform_self_ty(&item, step.self_ty, substs);
self.inherent_candidates.push(Candidate {
xform_self_ty: xform_self_ty,
item: item.clone(),
fn assemble_projection_candidates(&mut self,
trait_def_id: DefId,
- item: ty::ImplOrTraitItem<'tcx>)
- {
+ item: ty::ImplOrTraitItem<'tcx>) {
debug!("assemble_projection_candidates(\
trait_def_id={:?}, \
item={:?})",
item);
for step in self.steps.iter() {
- debug!("assemble_projection_candidates: step={:?}",
- step);
+ debug!("assemble_projection_candidates: step={:?}", step);
let (def_id, substs) = match step.self_ty.sty {
- ty::TyProjection(ref data) => {
- (data.trait_ref.def_id, data.trait_ref.substs)
- }
+ ty::TyProjection(ref data) => (data.trait_ref.def_id, data.trait_ref.substs),
ty::TyAnon(def_id, substs) => (def_id, substs),
_ => continue,
};
debug!("assemble_projection_candidates: def_id={:?} substs={:?}",
- def_id, substs);
+ def_id,
+ substs);
let trait_predicates = self.tcx.lookup_predicates(def_id);
let bounds = trait_predicates.instantiate(self.tcx, substs);
let predicates = bounds.predicates;
debug!("assemble_projection_candidates: predicates={:?}",
predicates);
- for poly_bound in
- traits::elaborate_predicates(self.tcx, predicates)
+ for poly_bound in traits::elaborate_predicates(self.tcx, predicates)
.filter_map(|p| p.to_opt_poly_trait_ref())
- .filter(|b| b.def_id() == trait_def_id)
- {
+ .filter(|b| b.def_id() == trait_def_id) {
let bound = self.erase_late_bound_regions(&poly_bound);
debug!("assemble_projection_candidates: def_id={:?} substs={:?} bound={:?}",
- def_id, substs, bound);
+ def_id,
+ substs,
+ bound);
if self.can_equate(&step.self_ty, &bound.self_ty()).is_ok() {
- let xform_self_ty = self.xform_self_ty(&item,
- bound.self_ty(),
- bound.substs);
+ let xform_self_ty = self.xform_self_ty(&item, bound.self_ty(), bound.substs);
debug!("assemble_projection_candidates: bound={:?} xform_self_ty={:?}",
bound,
fn assemble_where_clause_candidates(&mut self,
trait_def_id: DefId,
- item: ty::ImplOrTraitItem<'tcx>)
- {
+ item: ty::ImplOrTraitItem<'tcx>) {
debug!("assemble_where_clause_candidates(trait_def_id={:?})",
trait_def_id);
let caller_predicates = self.parameter_environment.caller_bounds.clone();
for poly_bound in traits::elaborate_predicates(self.tcx, caller_predicates)
- .filter_map(|p| p.to_opt_poly_trait_ref())
- .filter(|b| b.def_id() == trait_def_id)
- {
+ .filter_map(|p| p.to_opt_poly_trait_ref())
+ .filter(|b| b.def_id() == trait_def_id) {
let bound = self.erase_late_bound_regions(&poly_bound);
- let xform_self_ty = self.xform_self_ty(&item,
- bound.self_ty(),
- bound.substs);
+ let xform_self_ty = self.xform_self_ty(&item, bound.self_ty(), bound.substs);
debug!("assemble_where_clause_candidates: bound={:?} xform_self_ty={:?}",
bound,
let out_of_scope_traits = match self.pick_core() {
Some(Ok(p)) => vec![p.item.container().id()],
- Some(Err(MethodError::Ambiguity(v))) => v.into_iter().map(|source| {
- match source {
- TraitSource(id) => id,
- ImplSource(impl_id) => {
- match tcx.trait_id_of_impl(impl_id) {
- Some(id) => id,
- None =>
- span_bug!(span,
- "found inherent method when looking at traits")
+ Some(Err(MethodError::Ambiguity(v))) => {
+ v.into_iter()
+ .map(|source| {
+ match source {
+ TraitSource(id) => id,
+ ImplSource(impl_id) => {
+ match tcx.trait_id_of_impl(impl_id) {
+ Some(id) => id,
+ None => {
+ span_bug!(span,
+ "found inherent method when looking at traits")
+ }
+ }
+ }
}
- }
- }
- }).collect(),
+ })
+ .collect()
+ }
Some(Err(MethodError::NoMatch(NoMatchData { out_of_scope_traits: others, .. }))) => {
assert!(others.is_empty());
vec![]
return Err(MethodError::PrivateMatch(def));
}
- Err(MethodError::NoMatch(NoMatchData::new(static_candidates, unsatisfied_predicates,
- out_of_scope_traits, self.mode)))
+ Err(MethodError::NoMatch(NoMatchData::new(static_candidates,
+ unsatisfied_predicates,
+ out_of_scope_traits,
+ self.mode)))
}
fn pick_core(&mut self) -> Option<PickResult<'tcx>> {
self.pick_autorefd_method(step)
}
- fn pick_by_value_method(&mut self,
- step: &CandidateStep<'tcx>)
- -> Option<PickResult<'tcx>>
- {
- /*!
- * For each type `T` in the step list, this attempts to find a
- * method where the (transformed) self type is exactly `T`. We
- * do however do one transformation on the adjustment: if we
- * are passing a region pointer in, we will potentially
- * *reborrow* it to a shorter lifetime. This allows us to
- * transparently pass `&mut` pointers, in particular, without
- * consuming them for their entire lifetime.
- */
+ fn pick_by_value_method(&mut self, step: &CandidateStep<'tcx>) -> Option<PickResult<'tcx>> {
+ //! For each type `T` in the step list, this attempts to find a
+ //! method where the (transformed) self type is exactly `T`. We
+ //! do however do one transformation on the adjustment: if we
+ //! are passing a region pointer in, we will potentially
+ //! *reborrow* it to a shorter lifetime. This allows us to
+ //! transparently pass `&mut` pointers, in particular, without
+ //! consuming them for their entire lifetime.
if step.unsize {
return None;
}
- self.pick_method(step.self_ty).map(|r| r.map(|mut pick| {
- pick.autoderefs = step.autoderefs;
+ self.pick_method(step.self_ty).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
- // Insert a `&*` or `&mut *` if this is a reference type:
- if let ty::TyRef(_, mt) = step.self_ty.sty {
- pick.autoderefs += 1;
- pick.autoref = Some(mt.mutbl);
- }
+ // Insert a `&*` or `&mut *` if this is a reference type:
+ if let ty::TyRef(_, mt) = step.self_ty.sty {
+ pick.autoderefs += 1;
+ pick.autoref = Some(mt.mutbl);
+ }
- pick
- }))
+ pick
+ })
+ })
}
- fn pick_autorefd_method(&mut self,
- step: &CandidateStep<'tcx>)
- -> Option<PickResult<'tcx>>
- {
+ fn pick_autorefd_method(&mut self, step: &CandidateStep<'tcx>) -> Option<PickResult<'tcx>> {
let tcx = self.tcx;
// In general, during probing we erase regions. See
let region = tcx.mk_region(ty::ReErased);
// Search through mutabilities in order to find one where pick works:
- [hir::MutImmutable, hir::MutMutable].iter().filter_map(|&m| {
- let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut {
- ty: step.self_ty,
- mutbl: m
- });
- self.pick_method(autoref_ty).map(|r| r.map(|mut pick| {
- pick.autoderefs = step.autoderefs;
- pick.autoref = Some(m);
- pick.unsize = if step.unsize {
- Some(step.self_ty)
- } else {
- None
- };
- pick
- }))
- }).nth(0)
+ [hir::MutImmutable, hir::MutMutable]
+ .iter()
+ .filter_map(|&m| {
+ let autoref_ty = tcx.mk_ref(region,
+ ty::TypeAndMut {
+ ty: step.self_ty,
+ mutbl: m,
+ });
+ self.pick_method(autoref_ty).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+ pick.autoref = Some(m);
+ pick.unsize = if step.unsize {
+ Some(step.self_ty)
+ } else {
+ None
+ };
+ pick
+ })
+ })
+ })
+ .nth(0)
}
fn pick_method(&mut self, self_ty: Ty<'tcx>) -> Option<PickResult<'tcx>> {
}
debug!("searching extension candidates");
- let res = self.consider_candidates(self_ty, &self.extension_candidates,
+ let res = self.consider_candidates(self_ty,
+ &self.extension_candidates,
&mut possibly_unsatisfied_predicates);
if let None = res {
self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates);
probes: &[Candidate<'tcx>],
possibly_unsatisfied_predicates: &mut Vec<TraitRef<'tcx>>)
-> Option<PickResult<'tcx>> {
- let mut applicable_candidates: Vec<_> =
- probes.iter()
- .filter(|&probe| self.consider_probe(self_ty,
- probe,possibly_unsatisfied_predicates))
- .collect();
+ let mut applicable_candidates: Vec<_> = probes.iter()
+ .filter(|&probe| self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates))
+ .collect();
debug!("applicable_candidates: {:?}", applicable_candidates);
if applicable_candidates.len() > 1 {
match self.collapse_candidates_to_trait_pick(&applicable_candidates[..]) {
- Some(pick) => { return Some(Ok(pick)); }
- None => { }
+ Some(pick) => {
+ return Some(Ok(pick));
+ }
+ None => {}
}
}
return Some(Err(MethodError::Ambiguity(sources)));
}
- applicable_candidates.pop().map(|probe| {
- Ok(probe.to_unadjusted_pick())
- })
+ applicable_candidates.pop().map(|probe| Ok(probe.to_unadjusted_pick()))
}
- fn consider_probe(&self, self_ty: Ty<'tcx>, probe: &Candidate<'tcx>,
- possibly_unsatisfied_predicates: &mut Vec<TraitRef<'tcx>>) -> bool {
- debug!("consider_probe: self_ty={:?} probe={:?}",
- self_ty,
- probe);
+ fn consider_probe(&self,
+ self_ty: Ty<'tcx>,
+ probe: &Candidate<'tcx>,
+ possibly_unsatisfied_predicates: &mut Vec<TraitRef<'tcx>>)
+ -> bool {
+ debug!("consider_probe: self_ty={:?} probe={:?}", self_ty, probe);
self.probe(|_| {
// First check that the self type can be related.
- match self.sub_types(false, TypeOrigin::Misc(DUMMY_SP),
- self_ty, probe.xform_self_ty) {
+ match self.sub_types(false,
+ TypeOrigin::Misc(DUMMY_SP),
+ self_ty,
+ probe.xform_self_ty) {
Ok(InferOk { obligations, .. }) => {
// FIXME(#32730) propagate obligations
assert!(obligations.is_empty())
// Check whether the impl imposes obligations we have to worry about.
let impl_bounds = self.tcx.lookup_predicates(impl_def_id);
let impl_bounds = impl_bounds.instantiate(self.tcx, substs);
- let traits::Normalized { value: impl_bounds,
- obligations: norm_obligations } =
+ let traits::Normalized { value: impl_bounds, obligations: norm_obligations } =
traits::normalize(selcx, cause.clone(), &impl_bounds);
// Convert the bounds into obligations.
- let obligations =
- traits::predicates_for_generics(cause.clone(),
- &impl_bounds);
+ let obligations = traits::predicates_for_generics(cause.clone(), &impl_bounds);
debug!("impl_obligations={:?}", obligations);
// Evaluate those obligations to see if they might possibly hold.
///
/// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we
/// use, so it's ok to just commit to "using the method from the trait Foo".
- fn collapse_candidates_to_trait_pick(&self,
- probes: &[&Candidate<'tcx>])
- -> Option<Pick<'tcx>> {
+ fn collapse_candidates_to_trait_pick(&self, probes: &[&Candidate<'tcx>]) -> Option<Pick<'tcx>> {
// Do all probes correspond to the same trait?
let container = probes[0].item.container();
match container {
ty::TraitContainer(_) => {}
- ty::ImplContainer(_) => return None
+ ty::ImplContainer(_) => return None,
}
if probes[1..].iter().any(|p| p.item.container() != container) {
return None;
import_id: probes[0].import_id,
autoderefs: 0,
autoref: None,
- unsize: None
+ unsize: None,
})
}
fn has_applicable_self(&self, item: &ty::ImplOrTraitItem) -> bool {
// "fast track" -- check for usage of sugar
match *item {
- ty::ImplOrTraitItem::MethodTraitItem(ref method) =>
+ ty::ImplOrTraitItem::MethodTraitItem(ref method) => {
match method.explicit_self {
ty::ExplicitSelfCategory::Static => self.mode == Mode::Path,
ty::ExplicitSelfCategory::ByValue |
ty::ExplicitSelfCategory::ByReference(..) |
ty::ExplicitSelfCategory::ByBox => true,
- },
+ }
+ }
ty::ImplOrTraitItem::ConstTraitItem(..) => self.mode == Mode::Path,
_ => false,
}
item: &ty::ImplOrTraitItem<'tcx>,
impl_ty: Ty<'tcx>,
substs: &Substs<'tcx>)
- -> Ty<'tcx>
- {
+ -> Ty<'tcx> {
match item.as_opt_method() {
- Some(ref method) => self.xform_method_self_ty(method, impl_ty,
- substs),
+ Some(ref method) => self.xform_method_self_ty(method, impl_ty, substs),
None => impl_ty,
}
}
method: &Rc<ty::Method<'tcx>>,
impl_ty: Ty<'tcx>,
substs: &Substs<'tcx>)
- -> Ty<'tcx>
- {
+ -> Ty<'tcx> {
debug!("xform_self_ty(impl_ty={:?}, self_ty={:?}, substs={:?})",
impl_ty,
method.fty.sig.0.inputs.get(0),
// are given do not include type/lifetime parameters for the
// method yet. So create fresh variables here for those too,
// if there are any.
- assert_eq!(substs.types().count(), method.generics.parent_types as usize);
- assert_eq!(substs.regions().count(), method.generics.parent_regions as usize);
+ assert_eq!(substs.types().count(),
+ method.generics.parent_types as usize);
+ assert_eq!(substs.regions().count(),
+ method.generics.parent_regions as usize);
if self.mode == Mode::Path {
return impl_ty;
if method.generics.types.is_empty() && method.generics.regions.is_empty() {
xform_self_ty.subst(self.tcx, substs)
} else {
- let substs = Substs::for_item(self.tcx, method.def_id, |def, _| {
+ let substs = Substs::for_item(self.tcx,
+ method.def_id,
+ |def, _| {
let i = def.index as usize;
if i < substs.params().len() {
substs.region_at(i)
// `impl_self_ty()` for an explanation.
self.tcx.mk_region(ty::ReErased)
}
- }, |def, cur_substs| {
+ },
+ |def, cur_substs| {
let i = def.index as usize;
if i < substs.params().len() {
substs.type_at(i)
}
/// Get the type of an impl and generate substitutions with placeholders.
- fn impl_ty_and_substs(&self,
- impl_def_id: DefId)
- -> (Ty<'tcx>, &'tcx Substs<'tcx>)
- {
+ fn impl_ty_and_substs(&self, impl_def_id: DefId) -> (Ty<'tcx>, &'tcx Substs<'tcx>) {
let impl_ty = self.tcx.lookup_item_type(impl_def_id).ty;
- let substs = Substs::for_item(self.tcx, impl_def_id,
+ let substs = Substs::for_item(self.tcx,
+ impl_def_id,
|_, _| self.tcx.mk_region(ty::ReErased),
|_, _| self.next_ty_var());
/// and/or tracking the substitution and
/// so forth.
fn erase_late_bound_regions<T>(&self, value: &ty::Binder<T>) -> T
- where T : TypeFoldable<'tcx>
+ where T: TypeFoldable<'tcx>
{
self.tcx.erase_late_bound_regions(value)
}
/// Find item with name `item_name` defined in impl/trait `def_id`
/// and return it, or `None`, if no such item was defined there.
- fn impl_or_trait_item(&self, def_id: DefId)
- -> Option<ty::ImplOrTraitItem<'tcx>>
- {
+ fn impl_or_trait_item(&self, def_id: DefId) -> Option<ty::ImplOrTraitItem<'tcx>> {
self.fcx.impl_or_trait_item(def_id, self.item_name)
}
}
item: self.item.clone(),
kind: match self.kind {
InherentImplCandidate(..) => InherentImplPick,
- ExtensionImplCandidate(def_id, ..) => {
- ExtensionImplPick(def_id)
- }
+ ExtensionImplCandidate(def_id, ..) => ExtensionImplPick(def_id),
ObjectCandidate => ObjectPick,
TraitCandidate => TraitPick,
WhereClauseCandidate(ref trait_ref) => {
import_id: self.import_id,
autoderefs: 0,
autoref: None,
- unsize: None
+ unsize: None,
}
}
fn to_source(&self) -> CandidateSource {
match self.kind {
- InherentImplCandidate(..) => {
- ImplSource(self.item.container().id())
- }
+ InherentImplCandidate(..) => ImplSource(self.item.container().id()),
ExtensionImplCandidate(def_id, ..) => ImplSource(def_id),
ObjectCandidate |
TraitCandidate |
use CrateCtxt;
-use check::{FnCtxt};
+use check::FnCtxt;
use rustc::hir::map as hir_map;
use rustc::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TypeFoldable};
use hir::def::Def;
use middle::lang_items::FnOnceTraitLangItem;
use rustc::ty::subst::Substs;
use rustc::traits::{Obligation, SelectionContext};
-use util::nodemap::{FnvHashSet};
+use util::nodemap::FnvHashSet;
use syntax::ast;
use errors::DiagnosticBuilder;
match ty.sty {
// Not all of these (e.g. unsafe fns) implement FnOnce
// so we look for these beforehand
- ty::TyClosure(..) | ty::TyFnDef(..) | ty::TyFnPtr(_) => true,
+ ty::TyClosure(..) |
+ ty::TyFnDef(..) |
+ ty::TyFnPtr(_) => true,
// If it's not a simple function, look for things which implement FnOnce
_ => {
let fn_once = match tcx.lang_items.require(FnOnceTraitLangItem) {
Ok(fn_once) => fn_once,
- Err(..) => return false
+ Err(..) => return false,
};
- self.autoderef(span, ty).any(|(ty, _)| self.probe(|_| {
- let fn_once_substs =
- Substs::new_trait(tcx, ty, &[self.next_ty_var()]);
- let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs);
- let poly_trait_ref = trait_ref.to_poly_trait_ref();
- let obligation = Obligation::misc(span,
- self.body_id,
- poly_trait_ref
- .to_predicate());
- SelectionContext::new(self).evaluate_obligation(&obligation)
- }))
+ self.autoderef(span, ty).any(|(ty, _)| {
+ self.probe(|_| {
+ let fn_once_substs = Substs::new_trait(tcx, ty, &[self.next_ty_var()]);
+ let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs);
+ let poly_trait_ref = trait_ref.to_poly_trait_ref();
+ let obligation =
+ Obligation::misc(span, self.body_id, poly_trait_ref.to_predicate());
+ SelectionContext::new(self).evaluate_obligation(&obligation)
+ })
+ })
}
}
}
rcvr_ty: Ty<'tcx>,
item_name: ast::Name,
rcvr_expr: Option<&hir::Expr>,
- error: MethodError<'tcx>)
- {
+ error: MethodError<'tcx>) {
// avoid suggestions when we don't know what's going on.
if rcvr_ty.references_error() {
- return
+ return;
}
- let report_candidates = |err: &mut DiagnosticBuilder,
- mut sources: Vec<CandidateSource>| {
+ let report_candidates = |err: &mut DiagnosticBuilder, mut sources: Vec<CandidateSource>| {
sources.sort();
sources.dedup();
// the impl, if local to crate (item may be defaulted), else nothing.
let item = self.impl_or_trait_item(impl_did, item_name)
.or_else(|| {
- self.impl_or_trait_item(
- self.tcx.impl_trait_ref(impl_did).unwrap().def_id,
-
- item_name
- )
- }).unwrap();
- let note_span = self.tcx.map.span_if_local(item.def_id()).or_else(|| {
- self.tcx.map.span_if_local(impl_did)
- });
+ self.impl_or_trait_item(self.tcx
+ .impl_trait_ref(impl_did)
+ .unwrap()
+ .def_id,
+
+ item_name)
+ })
+ .unwrap();
+ let note_span = self.tcx
+ .map
+ .span_if_local(item.def_id())
+ .or_else(|| self.tcx.map.span_if_local(impl_did));
let impl_ty = self.impl_self_ty(span, impl_did).ty;
CandidateSource::TraitSource(trait_did) => {
let item = self.impl_or_trait_item(trait_did, item_name).unwrap();
let item_span = self.tcx.map.def_id_span(item.def_id(), span);
- span_note!(err, item_span,
+ span_note!(err,
+ item_span,
"candidate #{} is defined in the trait `{}`",
idx + 1,
self.tcx.item_path_str(trait_did));
MethodError::NoMatch(NoMatchData { static_candidates: static_sources,
unsatisfied_predicates,
out_of_scope_traits,
- mode, .. }) => {
+ mode,
+ .. }) => {
let tcx = self.tcx;
- let mut err = self.type_error_struct(
- span,
- |actual| {
- format!("no {} named `{}` found for type `{}` \
- in the current scope",
- if mode == Mode::MethodCall { "method" }
- else { "associated item" },
- item_name,
- actual)
- },
- rcvr_ty);
+ let mut err = self.type_error_struct(span,
+ |actual| {
+ format!("no {} named `{}` found for type `{}` in the current scope",
+ if mode == Mode::MethodCall {
+ "method"
+ } else {
+ "associated item"
+ },
+ item_name,
+ actual)
+ },
+ rcvr_ty);
// If the method name is the name of a field with a function or closure type,
// give a helping note that it has to be called as (x.f)(...).
for (ty, _) in self.autoderef(span, rcvr_ty) {
match ty.sty {
ty::TyAdt(def, substs) if !def.is_enum() => {
- if let Some(field) = def.struct_variant().
- find_field_named(item_name) {
+ if let Some(field) = def.struct_variant()
+ .find_field_named(item_name) {
let snippet = tcx.sess.codemap().span_to_snippet(expr.span);
let expr_string = match snippet {
Ok(expr_string) => expr_string,
- _ => "s".into() // Default to a generic placeholder for the
- // expression when we can't generate a
- // string snippet
+ _ => "s".into(), // Default to a generic placeholder for the
+ // expression when we can't generate a
+ // string snippet
};
let field_ty = field.ty(tcx, substs);
if self.is_fn_ty(&field_ty, span) {
- err.span_note(span, &format!(
- "use `({0}.{1})(...)` if you meant to call the \
- function stored in the `{1}` field",
- expr_string, item_name));
+ err.span_note(span,
+ &format!("use `({0}.{1})(...)` if you \
+ meant to call the function \
+ stored in the `{1}` field",
+ expr_string,
+ item_name));
} else {
- err.span_note(span, &format!(
- "did you mean to write `{0}.{1}`?",
- expr_string, item_name));
+ err.span_note(span,
+ &format!("did you mean to write `{0}.{1}`?",
+ expr_string,
+ item_name));
}
break;
}
}
if let Some(expr) = rcvr_expr {
- if let Ok (expr_string) = tcx.sess.codemap().span_to_snippet(expr.span) {
+ if let Ok(expr_string) = tcx.sess.codemap().span_to_snippet(expr.span) {
report_function!(expr.span, expr_string);
- }
- else if let Expr_::ExprPath(_, path) = expr.node.clone() {
+ } else if let Expr_::ExprPath(_, path) = expr.node.clone() {
if let Some(segment) = path.segments.last() {
report_function!(expr.span, segment.name);
}
}
if !static_sources.is_empty() {
- err.note(
- "found the following associated functions; to be used as \
- methods, functions must have a `self` parameter");
+ err.note("found the following associated functions; to be used as methods, \
+ functions must have a `self` parameter");
report_candidates(&mut err, static_sources);
}
if !unsatisfied_predicates.is_empty() {
let bound_list = unsatisfied_predicates.iter()
- .map(|p| format!("`{} : {}`",
- p.self_ty(),
- p))
+ .map(|p| format!("`{} : {}`", p.self_ty(), p))
.collect::<Vec<_>>()
.join(", ");
- err.note(
- &format!("the method `{}` exists but the \
- following trait bounds were not satisfied: {}",
- item_name,
- bound_list));
+ err.note(&format!("the method `{}` exists but the following trait bounds \
+ were not satisfied: {}",
+ item_name,
+ bound_list));
}
- self.suggest_traits_to_import(&mut err, span, rcvr_ty, item_name,
- rcvr_expr, out_of_scope_traits);
+ self.suggest_traits_to_import(&mut err,
+ span,
+ rcvr_ty,
+ item_name,
+ rcvr_expr,
+ out_of_scope_traits);
err.emit();
}
MethodError::Ambiguity(sources) => {
- let mut err = struct_span_err!(self.sess(), span, E0034,
+ let mut err = struct_span_err!(self.sess(),
+ span,
+ E0034,
"multiple applicable items in scope");
err.span_label(span, &format!("multiple `{}` found", item_name));
let msg = format!("the `{}` method from the `{}` trait cannot be explicitly \
invoked on this closure as we have not yet inferred what \
kind of closure it is",
- item_name,
- self.tcx.item_path_str(trait_def_id));
+ item_name,
+ self.tcx.item_path_str(trait_def_id));
let msg = if let Some(callee) = rcvr_expr {
format!("{}; use overloaded call notation instead (e.g., `{}()`)",
- msg, pprust::expr_to_string(callee))
+ msg,
+ pprust::expr_to_string(callee))
} else {
msg
};
rcvr_ty: Ty<'tcx>,
item_name: ast::Name,
rcvr_expr: Option<&hir::Expr>,
- valid_out_of_scope_traits: Vec<DefId>)
- {
+ valid_out_of_scope_traits: Vec<DefId>) {
if !valid_out_of_scope_traits.is_empty() {
let mut candidates = valid_out_of_scope_traits;
candidates.sort();
candidates.dedup();
- let msg = format!(
- "items from traits can only be used if the trait is in scope; \
- the following {traits_are} implemented but not in scope, \
- perhaps add a `use` for {one_of_them}:",
- traits_are = if candidates.len() == 1 {"trait is"} else {"traits are"},
- one_of_them = if candidates.len() == 1 {"it"} else {"one of them"});
+ let msg = format!("items from traits can only be used if the trait is in scope; the \
+ following {traits_are} implemented but not in scope, perhaps add \
+ a `use` for {one_of_them}:",
+ traits_are = if candidates.len() == 1 {
+ "trait is"
+ } else {
+ "traits are"
+ },
+ one_of_them = if candidates.len() == 1 {
+ "it"
+ } else {
+ "one of them"
+ });
err.help(&msg[..]);
if candidates.len() > limit {
err.note(&format!("and {} others", candidates.len() - limit));
}
- return
+ return;
}
let type_is_local = self.type_derefs_to_local(span, rcvr_ty, rcvr_expr);
// this isn't perfect (that is, there are cases when
// implementing a trait would be legal but is rejected
// here).
- (type_is_local || info.def_id.is_local())
- && self.impl_or_trait_item(info.def_id, item_name).is_some()
+ (type_is_local || info.def_id.is_local()) &&
+ self.impl_or_trait_item(info.def_id, item_name).is_some()
})
.collect::<Vec<_>>();
// FIXME #21673 this help message could be tuned to the case
// of a type parameter: suggest adding a trait bound rather
// than implementing.
- let msg = format!(
- "items from traits can only be used if the trait is implemented and in scope; \
- the following {traits_define} an item `{name}`, \
- perhaps you need to implement {one_of_them}:",
- traits_define = if candidates.len() == 1 {"trait defines"} else {"traits define"},
- one_of_them = if candidates.len() == 1 {"it"} else {"one of them"},
- name = item_name);
+ let msg = format!("items from traits can only be used if the trait is implemented \
+ and in scope; the following {traits_define} an item `{name}`, \
+ perhaps you need to implement {one_of_them}:",
+ traits_define = if candidates.len() == 1 {
+ "trait defines"
+ } else {
+ "traits define"
+ },
+ one_of_them = if candidates.len() == 1 {
+ "it"
+ } else {
+ "one of them"
+ },
+ name = item_name);
err.help(&msg[..]);
fn type_derefs_to_local(&self,
span: Span,
rcvr_ty: Ty<'tcx>,
- rcvr_expr: Option<&hir::Expr>) -> bool {
+ rcvr_expr: Option<&hir::Expr>)
+ -> bool {
fn is_local(ty: Ty) -> bool {
match ty.sty {
ty::TyAdt(def, _) => def.did.is_local(),
// non-local (there are "edge" cases, e.g. (LocalType,), but
// the noise from these sort of types is usually just really
// annoying, rather than any sort of help).
- _ => false
+ _ => false,
}
}
impl TraitInfo {
fn new(def_id: DefId) -> TraitInfo {
- TraitInfo {
- def_id: def_id,
- }
+ TraitInfo { def_id: def_id }
}
}
impl PartialEq for TraitInfo {
}
impl Eq for TraitInfo {}
impl PartialOrd for TraitInfo {
- fn partial_cmp(&self, other: &TraitInfo) -> Option<Ordering> { Some(self.cmp(other)) }
+ fn partial_cmp(&self, other: &TraitInfo) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
}
impl Ord for TraitInfo {
fn cmp(&self, other: &TraitInfo) -> Ordering {
// Crate-local:
//
// meh.
- struct Visitor<'a, 'tcx:'a> {
+ struct Visitor<'a, 'tcx: 'a> {
map: &'a hir_map::Map<'tcx>,
traits: &'a mut AllTraitsVec,
}
}
ccx.tcx.map.krate().visit_all_items(&mut Visitor {
map: &ccx.tcx.map,
- traits: &mut traits
+ traits: &mut traits,
});
// Cross-crate:
fn handle_external_def(ccx: &CrateCtxt,
traits: &mut AllTraitsVec,
external_mods: &mut FnvHashSet<DefId>,
- def_id: DefId) {
- match ccx.tcx.sess.cstore.describe_def(def_id) {
- Some(Def::Trait(_)) => {
+ def: Def) {
+ let def_id = def.def_id();
+ match def {
+ Def::Trait(..) => {
traits.push(TraitInfo::new(def_id));
}
- Some(Def::Mod(_)) => {
+ Def::Mod(..) => {
if !external_mods.insert(def_id) {
return;
}
for child in ccx.tcx.sess.cstore.item_children(def_id) {
- handle_external_def(ccx, traits, external_mods, child.def_id)
+ handle_external_def(ccx, traits, external_mods, child.def)
}
}
_ => {}
}
}
for cnum in ccx.tcx.sess.cstore.crates() {
- handle_external_def(ccx, &mut traits, &mut external_mods, DefId {
+ let def_id = DefId {
krate: cnum,
- index: CRATE_DEF_INDEX
- });
+ index: CRATE_DEF_INDEX,
+ };
+ handle_external_def(ccx, &mut traits, &mut external_mods, Def::Mod(def_id));
}
*ccx.all_traits.borrow_mut() = Some(traits);
assert!(borrow.is_some());
AllTraits {
borrow: borrow,
- idx: 0
+ idx: 0,
}
}
pub struct AllTraits<'a> {
borrow: cell::Ref<'a, Option<AllTraitsVec>>,
- idx: usize
+ idx: usize,
}
impl<'a> Iterator for AllTraits<'a> {
use astconv::{AstConv, ast_region_to_region, PathParamMode};
use dep_graph::DepNode;
use fmt_macros::{Parser, Piece, Position};
-use hir::def::{Def, PathResolution};
+use hir::def::{Def, CtorKind, PathResolution};
use hir::def_id::{DefId, LOCAL_CRATE};
use hir::pat_util;
use rustc::infer::{self, InferCtxt, InferOk, TypeOrigin, TypeTrace, type_variable};
fn visit_ty(&mut self, t: &'tcx hir::Ty) {
match t.node {
- hir::TyFixedLengthVec(_, ref expr) => {
+ hir::TyArray(_, ref expr) => {
check_const_with_type(self.ccx, &expr, self.ccx.tcx.types.usize, expr.id);
}
_ => {}
// need to record the type for that node
fn visit_ty(&mut self, t: &'gcx hir::Ty) {
match t.node {
- hir::TyFixedLengthVec(ref ty, ref count_expr) => {
+ hir::TyArray(ref ty, ref count_expr) => {
self.visit_ty(&ty);
self.fcx.check_expr_with_hint(&count_expr, self.fcx.tcx.types.usize);
}
match self.locals.borrow().get(&nid) {
Some(&t) => t,
None => {
- span_err!(self.tcx.sess, span, E0513,
- "no type for local variable {}",
- nid);
+ struct_span_err!(self.tcx.sess, span, E0513,
+ "no type for local variable {}",
+ self.tcx.map.node_to_string(nid))
+ .span_label(span, &"no type for variable")
+ .emit();
self.tcx.types.err
}
}
.emit();
self.tcx().types.err
} else {
- let mut err = self.type_error_struct(expr.span, |actual| {
- format!("attempted access of field `{}` on type `{}`, \
- but no field with that name was found",
+ let mut err = self.type_error_struct(field.span, |actual| {
+ format!("no field `{}` on type `{}`",
field.node, actual)
}, expr_t);
match expr_t.sty {
ty::TyAdt(def, _) if !def.is_enum() => {
if let Some(suggested_field_name) =
Self::suggest_field_name(def.struct_variant(), field, vec![]) {
- err.span_help(field.span,
- &format!("did you mean `{}`?", suggested_field_name));
- };
+ err.span_label(field.span,
+ &format!("did you mean `{}`?", suggested_field_name));
+ } else {
+ err.span_label(field.span,
+ &format!("unknown field"));
+ };
}
ty::TyRawPtr(..) => {
err.note(&format!("`{0}` is a native pointer; perhaps you need to deref with \
while let Some((base_t, autoderefs)) = autoderef.next() {
let field = match base_t.sty {
ty::TyAdt(base_def, substs) if base_def.is_struct() => {
- tuple_like = base_def.struct_variant().kind == ty::VariantKind::Tuple;
+ tuple_like = base_def.struct_variant().ctor_kind == CtorKind::Fn;
if !tuple_like { continue }
debug!("tuple struct named {:?}", base_t);
Def::Struct(type_did) | Def::Union(type_did) => {
Some((type_did, self.tcx.expect_variant_def(def)))
}
- Def::TyAlias(did) => {
+ Def::TyAlias(did) | Def::AssociatedTy(did) => {
match self.tcx.opt_lookup_item_type(did).map(|scheme| &scheme.ty.sty) {
Some(&ty::TyAdt(adt, _)) if !adt.is_enum() => {
Some((did, adt.struct_variant()))
};
if let Some((def_id, variant)) = variant {
- if variant.kind == ty::VariantKind::Tuple &&
+ if variant.ctor_kind == CtorKind::Fn &&
!self.tcx.sess.features.borrow().relaxed_adts {
- emit_feature_err(&self.tcx.sess.parse_sess.span_diagnostic,
+ emit_feature_err(&self.tcx.sess.parse_sess,
"relaxed_adts", span, GateIssue::Language,
"tuple structs and variants in struct patterns are unstable");
}
self.check_method_call(expr, name, &args[..], &tps[..], expected, lvalue_pref)
}
hir::ExprCast(ref e, ref t) => {
- if let hir::TyFixedLengthVec(_, ref count_expr) = t.node {
+ if let hir::TyArray(_, ref count_expr) = t.node {
self.check_expr_with_hint(&count_expr, tcx.types.usize);
}
self.check_expr_eq_type(&e, typ);
typ
}
- hir::ExprVec(ref args) => {
+ hir::ExprArray(ref args) => {
let uty = expected.to_option(self).and_then(|uty| {
match uty.sty {
ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty),
//
// There are basically four cases to consider:
//
- // 1. Reference to a *type*, such as a struct or enum:
- //
- // mod a { struct Foo<T> { ... } }
- //
- // Because we don't allow types to be declared within one
- // another, a path that leads to a type will always look like
- // `a::b::Foo<T>` where `a` and `b` are modules. This implies
- // that only the final segment can have type parameters, and
- // they are located in the TypeSpace.
+ // 1. Reference to a constructor of enum variant or struct:
//
- // *Note:* Generally speaking, references to types don't
- // actually pass through this function, but rather the
- // `ast_ty_to_ty` function in `astconv`. However, in the case
- // of struct patterns (and maybe literals) we do invoke
- // `instantiate_value_path` to get the general type of an instance of
- // a struct. (In these cases, there are actually no type
- // parameters permitted at present, but perhaps we will allow
- // them in the future.)
- //
- // 1b. Reference to an enum variant or tuple-like struct:
- //
- // struct foo<T>(...)
- // enum E<T> { foo(...) }
+ // struct Foo<T>(...)
+ // enum E<T> { Foo(...) }
//
// In these cases, the parameters are declared in the type
// space.
//
- // 2. Reference to a *fn item*:
+ // 2. Reference to a fn item or a free constant:
//
// fn foo<T>() { }
//
// type parameters. However, in this case, those parameters are
// declared on a value, and hence are in the `FnSpace`.
//
- // 3. Reference to a *method*:
+ // 3. Reference to a method or an associated constant:
//
// impl<A> SomeStruct<A> {
// fn foo<B>(...)
// `SomeStruct::<A>`, contains parameters in TypeSpace, and the
// final segment, `foo::<B>` contains parameters in fn space.
//
- // 4. Reference to an *associated const*:
+ // 4. Reference to a local variable
//
- // impl<A> AnotherStruct<A> {
- // const FOO: B = BAR;
- // }
- //
- // The path in this case will look like
- // `a::b::AnotherStruct::<A>::FOO`, so the penultimate segment
- // only will have parameters in TypeSpace.
+ // Local variables can't have any type parameters.
//
// The first step then is to categorize the segments appropriately.
let mut type_segment = None;
let mut fn_segment = None;
match def {
- // Case 1 and 1b. Reference to a *type* or *enum variant*.
- Def::Struct(def_id) |
- Def::Union(def_id) |
- Def::Variant(def_id) |
- Def::Enum(def_id) |
- Def::TyAlias(def_id) |
- Def::AssociatedTy(def_id) |
- Def::Trait(def_id) => {
+ // Case 1. Reference to a struct/variant constructor.
+ Def::StructCtor(def_id, ..) |
+ Def::VariantCtor(def_id, ..) => {
// Everything but the final segment should have no
// parameters at all.
let mut generics = self.tcx.lookup_generics(def_id);
fn_segment = Some((segments.last().unwrap(), generics));
}
- // Other cases. Various nonsense that really shouldn't show up
- // here. If they do, an error will have been reported
- // elsewhere. (I hope)
- Def::Mod(..) |
- Def::PrimTy(..) |
- Def::SelfTy(..) |
- Def::TyParam(..) |
- Def::Local(..) |
- Def::Label(..) |
- Def::Upvar(..) |
- Def::Err => {}
+ // Case 4. Local variable, no generics.
+ Def::Local(..) | Def::Upvar(..) => {}
+
+ _ => bug!("unexpected definition: {:?}", def),
}
// In `<T as Trait<A, B>>::method`, `A` and `B` are mandatory, but
fn visit_ty(&mut self, t: &hir::Ty) {
match t.node {
- hir::TyFixedLengthVec(ref ty, ref count_expr) => {
+ hir::TyArray(ref ty, ref count_expr) => {
self.visit_ty(&ty);
write_ty_to_tcx(self.fcx.ccx, count_expr.id, self.tcx().types.usize);
}
use rustc::ty::subst::Subst;
use rustc::ty::{self, TyCtxt, TypeFoldable};
use rustc::traits::{self, Reveal};
-use rustc::ty::{ParameterEnvironment};
+use rustc::ty::ParameterEnvironment;
use rustc::ty::{Ty, TyBool, TyChar, TyError};
use rustc::ty::{TyParam, TyRawPtr};
use rustc::ty::{TyRef, TyAdt, TyTrait, TyNever, TyTuple};
mod overlap;
mod unsafety;
-struct CoherenceChecker<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
+struct CoherenceChecker<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
crate_context: &'a CrateCtxt<'a, 'gcx>,
inference_context: InferCtxt<'a, 'gcx, 'tcx>,
}
-struct CoherenceCheckVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
- cc: &'a CoherenceChecker<'a, 'gcx, 'tcx>
+struct CoherenceCheckVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
+ cc: &'a CoherenceChecker<'a, 'gcx, 'tcx>,
}
impl<'a, 'gcx, 'tcx, 'v> intravisit::Visitor<'v> for CoherenceCheckVisitor<'a, 'gcx, 'tcx> {
}
impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> {
-
// Returns the def ID of the base type, if there is one.
fn get_base_type_def_id(&self, span: Span, ty: Ty<'tcx>) -> Option<DefId> {
match ty.sty {
- TyAdt(def, _) => {
- Some(def.did)
- }
+ TyAdt(def, _) => Some(def.did),
- TyTrait(ref t) => {
- Some(t.principal.def_id())
- }
+ TyTrait(ref t) => Some(t.principal.def_id()),
- TyBox(_) => {
- self.inference_context.tcx.lang_items.owned_box()
- }
+ TyBox(_) => self.inference_context.tcx.lang_items.owned_box(),
- TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) |
- TyStr | TyArray(..) | TySlice(..) | TyFnDef(..) | TyFnPtr(_) |
- TyTuple(..) | TyParam(..) | TyError | TyNever |
- TyRawPtr(_) | TyRef(..) | TyProjection(..) => {
- None
- }
+ TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | TyStr | TyArray(..) |
+ TySlice(..) | TyFnDef(..) | TyFnPtr(_) | TyTuple(..) | TyParam(..) | TyError |
+ TyNever | TyRawPtr(_) | TyRef(..) | TyProjection(..) => None,
TyInfer(..) | TyClosure(..) | TyAnon(..) => {
// `ty` comes from a user declaration so we should only expect types
// that the user can type
- span_bug!(
- span,
- "coherence encountered unexpected type searching for base type: {}",
- ty);
+ span_bug!(span,
+ "coherence encountered unexpected type searching for base type: {}",
+ ty);
}
}
}
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// builds up the trait inheritance table.
- self.crate_context.tcx.visit_all_items_in_krate(
- DepNode::CoherenceCheckImpl,
- &mut CoherenceCheckVisitor { cc: self });
+ self.crate_context.tcx.visit_all_items_in_krate(DepNode::CoherenceCheckImpl,
+ &mut CoherenceCheckVisitor { cc: self });
// Populate the table of destructors. It might seem a bit strange to
// do this here, but it's actually the most convenient place, since
fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'gcx>, impl_def_id: DefId) {
debug!("add_trait_impl: impl_trait_ref={:?} impl_def_id={:?}",
- impl_trait_ref, impl_def_id);
+ impl_trait_ref,
+ impl_def_id);
let trait_def = self.crate_context.tcx.lookup_trait_def(impl_trait_ref.def_id);
trait_def.record_local_impl(self.crate_context.tcx, impl_def_id, impl_trait_ref);
}
fn create_impl_from_item(&self, item: &Item) -> Vec<DefId> {
match item.node {
ItemImpl(.., ref impl_items) => {
- impl_items.iter().map(|impl_item| {
- self.crate_context.tcx.map.local_def_id(impl_item.id)
- }).collect()
+ impl_items.iter()
+ .map(|impl_item| self.crate_context.tcx.map.local_def_id(impl_item.id))
+ .collect()
}
_ => {
span_bug!(item.span, "can't convert a non-impl to an impl");
}
}
- //
// Destructors
//
fn populate_destructors(&self) {
let tcx = self.crate_context.tcx;
let drop_trait = match tcx.lang_items.drop_trait() {
- Some(id) => id, None => { return }
+ Some(id) => id,
+ None => return,
};
tcx.populate_implementations_for_trait_if_necessary(drop_trait);
let drop_trait = tcx.lookup_trait_def(drop_trait);
match tcx.map.find(impl_node_id) {
Some(hir_map::NodeItem(item)) => {
let span = match item.node {
- ItemImpl(.., ref ty, _) => {
- ty.span
- },
- _ => item.span
+ ItemImpl(.., ref ty, _) => ty.span,
+ _ => item.span,
};
- struct_span_err!(tcx.sess, span, E0120,
- "the Drop trait may only be implemented on structures")
+ struct_span_err!(tcx.sess,
+ span,
+ E0120,
+ "the Drop trait may only be implemented on \
+ structures")
.span_label(span,
&format!("implementing Drop requires a struct"))
.emit();
let copy_trait = tcx.lookup_trait_def(copy_trait);
copy_trait.for_each_impl(tcx, |impl_did| {
- debug!("check_implementations_of_copy: impl_did={:?}",
- impl_did);
+ debug!("check_implementations_of_copy: impl_did={:?}", impl_did);
let impl_node_id = if let Some(n) = tcx.map.as_local_node_id(impl_did) {
n
} else {
debug!("check_implementations_of_copy(): impl not in this \
crate");
- return
+ return;
};
let self_type = tcx.lookup_item_type(impl_did);
match param_env.can_type_implement_copy(tcx, self_type, span) {
Ok(()) => {}
Err(CopyImplementationError::InfrigingField(name)) => {
- struct_span_err!(tcx.sess, span, E0204,
- "the trait `Copy` may not be implemented for \
- this type")
- .span_label(span, &format!(
- "field `{}` does not implement `Copy`", name)
- )
- .emit()
-
+ struct_span_err!(tcx.sess,
+ span,
+ E0204,
+ "the trait `Copy` may not be implemented for this type")
+ .span_label(span, &format!("field `{}` does not implement `Copy`", name))
+ .emit()
}
Err(CopyImplementationError::InfrigingVariant(name)) => {
let item = tcx.map.expect_item(impl_node_id);
span
};
- struct_span_err!(tcx.sess, span, E0205,
+ struct_span_err!(tcx.sess,
+ span,
+ E0205,
"the trait `Copy` may not be implemented for this type")
- .span_label(span, &format!("variant `{}` does not implement `Copy`",
- name))
+ .span_label(span,
+ &format!("variant `{}` does not implement `Copy`", name))
.emit()
}
Err(CopyImplementationError::NotAnAdt) => {
span
};
- struct_span_err!(tcx.sess, span, E0206,
+ struct_span_err!(tcx.sess,
+ span,
+ E0206,
"the trait `Copy` may not be implemented for this type")
.span_label(span, &format!("type is not a structure or enumeration"))
.emit();
}
Err(CopyImplementationError::HasDestructor) => {
- struct_span_err!(tcx.sess, span, E0184,
- "the trait `Copy` may not be implemented for this type; \
- the type has a destructor")
+ struct_span_err!(tcx.sess,
+ span,
+ E0184,
+ "the trait `Copy` may not be implemented for this type; the \
+ type has a destructor")
.span_label(span, &format!("Copy not allowed on types with destructors"))
.emit();
}
let trait_ref = self.crate_context.tcx.impl_trait_ref(impl_did).unwrap();
let target = trait_ref.substs.type_at(1);
debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (bound)",
- source, target);
+ source,
+ target);
let span = tcx.map.span(impl_node_id);
let param_env = ParameterEnvironment::for_item(tcx, impl_node_id);
assert!(!source.has_escaping_regions());
debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (free)",
- source, target);
+ source,
+ target);
tcx.infer_ctxt(None, Some(param_env), Reveal::ExactMatch).enter(|infcx| {
let origin = TypeOrigin::Misc(span);
- let check_mutbl = |mt_a: ty::TypeAndMut<'gcx>, mt_b: ty::TypeAndMut<'gcx>,
+ let check_mutbl = |mt_a: ty::TypeAndMut<'gcx>,
+ mt_b: ty::TypeAndMut<'gcx>,
mk_ptr: &Fn(Ty<'gcx>) -> Ty<'gcx>| {
if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) {
- infcx.report_mismatched_types(origin, mk_ptr(mt_b.ty),
- target, ty::error::TypeError::Mutability);
+ infcx.report_mismatched_types(origin,
+ mk_ptr(mt_b.ty),
+ target,
+ ty::error::TypeError::Mutability);
}
(mt_a.ty, mt_b.ty, unsize_trait, None)
};
}
(&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b))
- if def_a.is_struct() && def_b.is_struct() => {
+ if def_a.is_struct() && def_b.is_struct() => {
if def_a != def_b {
let source_path = tcx.item_path_str(def_a.did);
let target_path = tcx.item_path_str(def_b.did);
- span_err!(tcx.sess, span, E0377,
+ span_err!(tcx.sess,
+ span,
+ E0377,
"the trait `CoerceUnsized` may only be implemented \
for a coercion between structures with the same \
definition; expected {}, found {}",
- source_path, target_path);
+ source_path,
+ target_path);
return;
}
let fields = &def_a.struct_variant().fields;
- let diff_fields = fields.iter().enumerate().filter_map(|(i, f)| {
- let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
-
- if f.unsubst_ty().is_phantom_data() {
- // Ignore PhantomData fields
- None
- } else if infcx.sub_types(false, origin, b, a).is_ok() {
- // Ignore fields that aren't significantly changed
- None
- } else {
- // Collect up all fields that were significantly changed
- // i.e. those that contain T in coerce_unsized T -> U
- Some((i, a, b))
- }
- }).collect::<Vec<_>>();
+ let diff_fields = fields.iter()
+ .enumerate()
+ .filter_map(|(i, f)| {
+ let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
+
+ if f.unsubst_ty().is_phantom_data() {
+ // Ignore PhantomData fields
+ None
+ } else if infcx.sub_types(false, origin, b, a).is_ok() {
+ // Ignore fields that aren't significantly changed
+ None
+ } else {
+ // Collect up all fields that were significantly changed
+ // i.e. those that contain T in coerce_unsized T -> U
+ Some((i, a, b))
+ }
+ })
+ .collect::<Vec<_>>();
if diff_fields.is_empty() {
- span_err!(tcx.sess, span, E0374,
+ span_err!(tcx.sess,
+ span,
+ E0374,
"the trait `CoerceUnsized` may only be implemented \
for a coercion between structures with one field \
being coerced, none found");
tcx.map.span(impl_node_id)
};
- let mut err = struct_span_err!(tcx.sess, span, E0375,
- "implementing the trait `CoerceUnsized` \
- requires multiple coercions");
+ let mut err = struct_span_err!(tcx.sess,
+ span,
+ E0375,
+ "implementing the trait \
+ `CoerceUnsized` requires multiple \
+ coercions");
err.note("`CoerceUnsized` may only be implemented for \
a coercion between structures with one field being coerced");
err.note(&format!("currently, {} fields need coercions: {}",
- diff_fields.len(),
- diff_fields.iter().map(|&(i, a, b)| {
- format!("{} ({} to {})", fields[i].name, a, b)
- }).collect::<Vec<_>>().join(", ") ));
+ diff_fields.len(),
+ diff_fields.iter()
+ .map(|&(i, a, b)| {
+ format!("{} ({} to {})", fields[i].name, a, b)
+ })
+ .collect::<Vec<_>>()
+ .join(", ")));
err.span_label(span, &format!("requires multiple coercions"));
err.emit();
return;
}
_ => {
- span_err!(tcx.sess, span, E0376,
+ span_err!(tcx.sess,
+ span,
+ E0376,
"the trait `CoerceUnsized` may only be implemented \
for a coercion between structures");
return;
// Register an obligation for `A: Trait<B>`.
let cause = traits::ObligationCause::misc(span, impl_node_id);
- let predicate = tcx.predicate_for_trait_def(cause, trait_def_id, 0,
- source, &[target]);
+ let predicate =
+ tcx.predicate_for_trait_def(cause, trait_def_id, 0, source, &[target]);
fulfill_cx.register_predicate_obligation(&infcx, predicate);
// Check that all transitive obligations are satisfied.
// Finally, resolve all regions.
let mut free_regions = FreeRegionMap::new();
- free_regions.relate_free_regions_from_predicates(
- &infcx.parameter_environment.caller_bounds);
+ free_regions.relate_free_regions_from_predicates(&infcx.parameter_environment
+ .caller_bounds);
infcx.resolve_regions_and_report_errors(&free_regions, impl_node_id);
if let Some(kind) = kind {
fn enforce_trait_manually_implementable(tcx: TyCtxt, sp: Span, trait_def_id: DefId) {
if tcx.sess.features.borrow().unboxed_closures {
// the feature gate allows all of them
- return
+ return;
}
let did = Some(trait_def_id);
let li = &tcx.lang_items;
} else if did == li.fn_once_trait() {
"FnOnce"
} else {
- return // everything OK
+ return; // everything OK
};
let mut err = struct_span_err!(tcx.sess,
sp,
E0183,
"manual implementations of `{}` are experimental",
trait_name);
- help!(&mut err, "add `#![feature(unboxed_closures)]` to the crate attributes to enable");
+ help!(&mut err,
+ "add `#![feature(unboxed_closures)]` to the crate attributes to enable");
err.emit();
}
let _task = ccx.tcx.dep_graph.in_task(DepNode::Coherence);
ccx.tcx.infer_ctxt(None, None, Reveal::ExactMatch).enter(|infcx| {
CoherenceChecker {
- crate_context: ccx,
- inference_context: infcx,
- }.check();
+ crate_context: ccx,
+ inference_context: infcx,
+ }
+ .check();
});
unsafety::check(ccx.tcx);
orphan::check(ccx.tcx);
tcx.visit_all_items_in_krate(DepNode::CoherenceOrphanCheck, &mut orphan);
}
-struct OrphanChecker<'cx, 'tcx:'cx> {
- tcx: TyCtxt<'cx, 'tcx, 'tcx>
+struct OrphanChecker<'cx, 'tcx: 'cx> {
+ tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> {
fn check_def_id(&self, item: &hir::Item, def_id: DefId) {
if def_id.krate != LOCAL_CRATE {
- struct_span_err!(self.tcx.sess, item.span, E0116,
- "cannot define inherent `impl` for a type outside of the \
- crate where the type is defined")
- .span_label(item.span, &format!("impl for type defined outside of crate."))
+ struct_span_err!(self.tcx.sess,
+ item.span,
+ E0116,
+ "cannot define inherent `impl` for a type outside of the crate \
+ where the type is defined")
+ .span_label(item.span,
+ &format!("impl for type defined outside of crate."))
.note("define and implement a trait or new type instead")
.emit();
}
ty: &str,
span: Span) {
match lang_def_id {
- Some(lang_def_id) if lang_def_id == impl_def_id => { /* OK */ },
+ Some(lang_def_id) if lang_def_id == impl_def_id => {
+ // OK
+ }
_ => {
- struct_span_err!(self.tcx.sess, span, E0390,
- "only a single inherent implementation marked with `#[lang = \"{}\"]` \
- is allowed for the `{}` primitive", lang, ty)
+ struct_span_err!(self.tcx.sess,
+ span,
+ E0390,
+ "only a single inherent implementation marked with `#[lang = \
+ \"{}\"]` is allowed for the `{}` primitive",
+ lang,
+ ty)
.span_help(span, "consider using a trait to implement these methods")
.emit();
}
return;
}
_ => {
- struct_span_err!(self.tcx.sess, ty.span, E0118,
+ struct_span_err!(self.tcx.sess,
+ ty.span,
+ E0118,
"no base type found for inherent implementation")
- .span_label(ty.span, &format!("impl requires a base type"))
- .note(&format!("either implement a trait on it or create a newtype \
- to wrap it instead"))
- .emit();
+ .span_label(ty.span, &format!("impl requires a base type"))
+ .note(&format!("either implement a trait on it or create a newtype \
+ to wrap it instead"))
+ .emit();
return;
}
}
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
let trait_def_id = trait_ref.def_id;
match traits::orphan_check(self.tcx, def_id) {
- Ok(()) => { }
+ Ok(()) => {}
Err(traits::OrphanCheckErr::NoLocalInputType) => {
- struct_span_err!(
- self.tcx.sess, item.span, E0117,
- "only traits defined in the current crate can be \
- implemented for arbitrary types")
- .span_label(item.span, &format!("impl doesn't use types inside crate"))
- .note(&format!("the impl does not reference any \
- types defined in this crate"))
- .emit();
+ struct_span_err!(self.tcx.sess,
+ item.span,
+ E0117,
+ "only traits defined in the current crate can be \
+ implemented for arbitrary types")
+ .span_label(item.span, &format!("impl doesn't use types inside crate"))
+ .note(&format!("the impl does not reference any types defined in \
+ this crate"))
+ .emit();
return;
}
Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => {
- span_err!(self.tcx.sess, item.span, E0210,
+ span_err!(self.tcx.sess,
+ item.span,
+ E0210,
"type parameter `{}` must be used as the type parameter for \
some local type (e.g. `MyStruct<T>`); only traits defined in \
the current crate can be implemented for a type parameter",
trait_ref,
trait_def_id,
self.tcx.trait_has_default_impl(trait_def_id));
- if
- self.tcx.trait_has_default_impl(trait_def_id) &&
- trait_def_id.krate != LOCAL_CRATE
- {
+ if self.tcx.trait_has_default_impl(trait_def_id) &&
+ trait_def_id.krate != LOCAL_CRATE {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match self_ty.sty {
ty::TyAdt(self_def, _) => Some(self_def.did),
if self_def_id.is_local() {
None
} else {
- Some(format!(
- "cross-crate traits with a default impl, like `{}`, \
- can only be implemented for a struct/enum type \
- defined in the current crate",
- self.tcx.item_path_str(trait_def_id)))
+ Some(format!("cross-crate traits with a default impl, like `{}`, \
+ can only be implemented for a struct/enum type \
+ defined in the current crate",
+ self.tcx.item_path_str(trait_def_id)))
}
}
_ => {
- Some(format!(
- "cross-crate traits with a default impl, like `{}`, \
- can only be implemented for a struct/enum type, \
- not `{}`",
- self.tcx.item_path_str(trait_def_id),
- self_ty))
+ Some(format!("cross-crate traits with a default impl, like `{}`, can \
+ only be implemented for a struct/enum type, not `{}`",
+ self.tcx.item_path_str(trait_def_id),
+ self_ty))
}
};
// Disallow *all* explicit impls of `Sized` and `Unsize` for now.
if Some(trait_def_id) == self.tcx.lang_items.sized_trait() {
- struct_span_err!(self.tcx.sess, item.span, E0322,
- "explicit impls for the `Sized` trait are not permitted")
+ struct_span_err!(self.tcx.sess,
+ item.span,
+ E0322,
+ "explicit impls for the `Sized` trait are not permitted")
.span_label(item.span, &format!("impl of 'Sized' not allowed"))
.emit();
return;
}
if Some(trait_def_id) == self.tcx.lang_items.unsize_trait() {
- span_err!(self.tcx.sess, item.span, E0328,
+ span_err!(self.tcx.sess,
+ item.span,
+ E0328,
"explicit impls for the `Unsize` trait are not permitted");
return;
}
self.tcx.map.node_to_string(item.id));
let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap();
if trait_ref.def_id.krate != LOCAL_CRATE {
- struct_span_err!(self.tcx.sess, item_trait_ref.path.span, E0318,
- "cannot create default implementations for traits outside the \
- crate they're defined in; define a new trait instead")
+ struct_span_err!(self.tcx.sess,
+ item_trait_ref.path.span,
+ E0318,
+ "cannot create default implementations for traits outside \
+ the crate they're defined in; define a new trait instead")
.span_label(item_trait_ref.path.span,
&format!("`{}` trait not defined in this crate",
item_trait_ref.path))
}
}
-impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OrphanChecker<'cx, 'tcx> {
+impl<'cx, 'tcx, 'v> intravisit::Visitor<'v> for OrphanChecker<'cx, 'tcx> {
fn visit_item(&mut self, item: &hir::Item) {
self.check_item(item);
}
use lint;
pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
- let mut overlap = OverlapChecker { tcx: tcx,
- default_impls: DefIdMap() };
+ let mut overlap = OverlapChecker {
+ tcx: tcx,
+ default_impls: DefIdMap(),
+ };
// this secondary walk specifically checks for some other cases,
// like defaulted traits, for which additional overlap rules exist
tcx.visit_all_items_in_krate(DepNode::CoherenceOverlapCheckSpecial, &mut overlap);
}
-struct OverlapChecker<'cx, 'tcx:'cx> {
+struct OverlapChecker<'cx, 'tcx: 'cx> {
tcx: TyCtxt<'cx, 'tcx, 'tcx>,
// maps from a trait def-id to an impl id
impl<'cx, 'tcx> OverlapChecker<'cx, 'tcx> {
fn check_for_common_items_in_impls(&self, impl1: DefId, impl2: DefId) {
#[derive(Copy, Clone, PartialEq)]
- enum Namespace { Type, Value }
+ enum Namespace {
+ Type,
+ Value,
+ }
fn name_and_namespace<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
- -> (ast::Name, Namespace)
- {
+ -> (ast::Name, Namespace) {
let item = tcx.impl_or_trait_item(def_id);
- (item.name(), match item {
- ty::TypeTraitItem(..) => Namespace::Type,
- ty::ConstTraitItem(..) => Namespace::Value,
- ty::MethodTraitItem(..) => Namespace::Value,
- })
+ (item.name(),
+ match item {
+ ty::TypeTraitItem(..) => Namespace::Type,
+ ty::ConstTraitItem(..) => Namespace::Value,
+ ty::MethodTraitItem(..) => Namespace::Value,
+ })
}
let impl_items = self.tcx.impl_or_trait_item_def_ids.borrow();
let inherent_impls = self.tcx.inherent_impls.borrow();
let impls = match inherent_impls.get(&ty_def_id) {
Some(impls) => impls,
- None => return
+ None => return,
};
for (i, &impl1_def_id) in impls.iter().enumerate() {
- for &impl2_def_id in &impls[(i+1)..] {
+ for &impl2_def_id in &impls[(i + 1)..] {
self.tcx.infer_ctxt(None, None, Reveal::ExactMatch).enter(|infcx| {
if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id).is_some() {
self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id)
}
}
-impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> {
+impl<'cx, 'tcx, 'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> {
fn visit_item(&mut self, item: &'v hir::Item) {
match item.node {
- hir::ItemEnum(..) | hir::ItemStruct(..) | hir::ItemUnion(..) => {
+ hir::ItemEnum(..) |
+ hir::ItemStruct(..) |
+ hir::ItemUnion(..) => {
let type_def_id = self.tcx.map.local_def_id(item.id);
self.check_for_overlapping_inherent_impls(type_def_id);
}
let prev_default_impl = self.default_impls.insert(trait_ref.def_id, item.id);
if let Some(prev_id) = prev_default_impl {
- let mut err = struct_span_err!(
- self.tcx.sess,
- self.tcx.span_of_impl(impl_def_id).unwrap(), E0521,
- "redundant default implementations of trait `{}`:",
- trait_ref);
- err.span_note(self.tcx.span_of_impl(self.tcx.map.local_def_id(prev_id))
+ let mut err = struct_span_err!(self.tcx.sess,
+ self.tcx.span_of_impl(impl_def_id).unwrap(),
+ E0521,
+ "redundant default implementations of trait \
+ `{}`:",
+ trait_ref);
+ err.span_note(self.tcx
+ .span_of_impl(self.tcx.map.local_def_id(prev_id))
.unwrap(),
"redundant implementation is here:");
err.emit();
let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap();
let trait_def_id = trait_ref.def_id;
- let _task = self.tcx.dep_graph.in_task(
- DepNode::CoherenceOverlapCheck(trait_def_id));
+ let _task =
+ self.tcx.dep_graph.in_task(DepNode::CoherenceOverlapCheck(trait_def_id));
let def = self.tcx.lookup_trait_def(trait_def_id);
// insertion failed due to overlap
if let Err(overlap) = insert_result {
- let mut err = struct_span_err!(
- self.tcx.sess, self.tcx.span_of_impl(impl_def_id).unwrap(), E0119,
- "conflicting implementations of trait `{}`{}:",
- overlap.trait_desc,
- overlap.self_desc.clone().map_or(String::new(),
- |ty| format!(" for type `{}`", ty)));
+ let mut err = struct_span_err!(self.tcx.sess,
+ self.tcx.span_of_impl(impl_def_id).unwrap(),
+ E0119,
+ "conflicting implementations of trait `{}`{}:",
+ overlap.trait_desc,
+ overlap.self_desc.clone().map_or(String::new(),
+ |ty| {
+ format!(" for type `{}`", ty)
+ }));
match self.tcx.span_of_impl(overlap.with_impl) {
Ok(span) => {
- err.span_label(span,
- &format!("first implementation here"));
+ err.span_label(span, &format!("first implementation here"));
err.span_label(self.tcx.span_of_impl(impl_def_id).unwrap(),
&format!("conflicting implementation{}",
overlap.self_desc
|ty| format!(" for `{}`", ty))));
}
Err(cname) => {
- err.note(&format!("conflicting implementation in crate `{}`",
- cname));
+ err.note(&format!("conflicting implementation in crate `{}`", cname));
}
}
let mut supertrait_def_ids =
traits::supertrait_def_ids(self.tcx, data.principal.def_id());
if supertrait_def_ids.any(|d| d == trait_def_id) {
- span_err!(self.tcx.sess, item.span, E0371,
+ span_err!(self.tcx.sess,
+ item.span,
+ E0371,
"the object type `{}` automatically \
implements the trait `{}`",
trait_ref.self_ty(),
tcx.map.krate().visit_all_items(&mut orphan);
}
-struct UnsafetyChecker<'cx, 'tcx:'cx> {
- tcx: TyCtxt<'cx, 'tcx, 'tcx>
+struct UnsafetyChecker<'cx, 'tcx: 'cx> {
+ tcx: TyCtxt<'cx, 'tcx, 'tcx>,
}
impl<'cx, 'tcx, 'v> UnsafetyChecker<'cx, 'tcx> {
- fn check_unsafety_coherence(&mut self, item: &'v hir::Item,
+ fn check_unsafety_coherence(&mut self,
+ item: &'v hir::Item,
unsafety: hir::Unsafety,
polarity: hir::ImplPolarity) {
match self.tcx.impl_trait_ref(self.tcx.map.local_def_id(item.id)) {
None => {
// Inherent impl.
match unsafety {
- hir::Unsafety::Normal => { /* OK */ }
+ hir::Unsafety::Normal => {
+ // OK
+ }
hir::Unsafety::Unsafe => {
- span_err!(self.tcx.sess, item.span, E0197,
+ span_err!(self.tcx.sess,
+ item.span,
+ E0197,
"inherent impls cannot be declared as unsafe");
}
}
Some(trait_ref) => {
let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id);
match (trait_def.unsafety, unsafety, polarity) {
- (hir::Unsafety::Unsafe,
- hir::Unsafety::Unsafe, hir::ImplPolarity::Negative) => {
- span_err!(self.tcx.sess, item.span, E0198,
+ (hir::Unsafety::Unsafe, hir::Unsafety::Unsafe, hir::ImplPolarity::Negative) => {
+ span_err!(self.tcx.sess,
+ item.span,
+ E0198,
"negative implementations are not unsafe");
}
(hir::Unsafety::Normal, hir::Unsafety::Unsafe, _) => {
- span_err!(self.tcx.sess, item.span, E0199,
+ span_err!(self.tcx.sess,
+ item.span,
+ E0199,
"implementing the trait `{}` is not unsafe",
trait_ref);
}
- (hir::Unsafety::Unsafe,
- hir::Unsafety::Normal, hir::ImplPolarity::Positive) => {
- span_err!(self.tcx.sess, item.span, E0200,
+ (hir::Unsafety::Unsafe, hir::Unsafety::Normal, hir::ImplPolarity::Positive) => {
+ span_err!(self.tcx.sess,
+ item.span,
+ E0200,
"the trait `{}` requires an `unsafe impl` declaration",
trait_ref);
}
- (hir::Unsafety::Unsafe,
- hir::Unsafety::Normal, hir::ImplPolarity::Negative) |
- (hir::Unsafety::Unsafe,
- hir::Unsafety::Unsafe, hir::ImplPolarity::Positive) |
+ (hir::Unsafety::Unsafe, hir::Unsafety::Normal, hir::ImplPolarity::Negative) |
+ (hir::Unsafety::Unsafe, hir::Unsafety::Unsafe, hir::ImplPolarity::Positive) |
(hir::Unsafety::Normal, hir::Unsafety::Normal, _) => {
- /* OK */
+ // OK
}
}
}
}
}
-impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for UnsafetyChecker<'cx, 'tcx> {
+impl<'cx, 'tcx, 'v> intravisit::Visitor<'v> for UnsafetyChecker<'cx, 'tcx> {
fn visit_item(&mut self, item: &'v hir::Item) {
match item.node {
hir::ItemDefaultImpl(unsafety, _) => {
hir::ItemImpl(unsafety, polarity, ..) => {
self.check_unsafety_coherence(item, unsafety, polarity);
}
- _ => { }
+ _ => {}
}
}
}
use rustc::ty::subst::Substs;
use rustc::ty::{ToPredicate, ImplContainer, ImplOrTraitItemContainer, TraitContainer};
use rustc::ty::{self, AdtKind, ToPolyTraitRef, Ty, TyCtxt, TypeScheme};
-use rustc::ty::{VariantKind};
use rustc::ty::util::IntTypeExt;
use rscope::*;
use rustc::dep_graph::DepNode;
use syntax_pos::Span;
use rustc::hir::{self, intravisit, map as hir_map, print as pprust};
-use rustc::hir::def::Def;
+use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::DefId;
///////////////////////////////////////////////////////////////////////////
let tcx = ccx.tcx;
let def_id = tcx.map.local_def_id(ctor_id);
generics_of_def_id(ccx, def_id);
- let ctor_ty = match variant.kind {
- VariantKind::Unit | VariantKind::Struct => scheme.ty,
- VariantKind::Tuple => {
+ let ctor_ty = match variant.ctor_kind {
+ CtorKind::Fictive | CtorKind::Const => scheme.ty,
+ CtorKind::Fn => {
let inputs: Vec<_> =
variant.fields
.iter()
name: name,
disr_val: disr_val,
fields: fields,
- kind: VariantKind::from_variant_data(def),
+ ctor_kind: CtorKind::from_hir(def),
}
}
} else if let Some(disr) = repr_type.disr_incr(tcx, prev_disr) {
Some(disr)
} else {
- span_err!(tcx.sess, v.span, E0370,
- "enum discriminant overflowed on value after {}; \
- set explicitly via {} = {} if that is desired outcome",
- prev_disr.unwrap(), v.node.name, wrapped_disr);
+ struct_span_err!(tcx.sess, v.span, E0370,
+ "enum discriminant overflowed")
+ .span_label(v.span, &format!("overflowed on value after {}", prev_disr.unwrap()))
+ .note(&format!("explicitly set `{} = {}` if that is desired outcome",
+ v.node.name, wrapped_disr))
+ .emit();
None
}.unwrap_or(wrapped_disr);
prev_disr = Some(disr);
```
"##,
+E0513: r##"
+The type of the variable couldn't be found out.
+
+Erroneous code example:
+
+```compile_fail,E0513
+use std::mem;
+
+unsafe {
+ let size = mem::size_of::<u32>();
+ mem::transmute_copy::<u32, [u8; size]>(&8_8);
+ // error: no type for local variable
+}
+```
+
+To fix this error, please use a constant size instead of `size`. To make
+this error more obvious, you could run:
+
+```compile_fail,E0080
+use std::mem;
+
+unsafe {
+ mem::transmute_copy::<u32, [u8; mem::size_of::<u32>()]>(&8_8);
+ // error: constant evaluation error
+}
+```
+
+So now, you can fix your code by setting the size directly:
+
+```
+use std::mem;
+
+unsafe {
+ mem::transmute_copy::<u32, [u8; 4]>(&8_8);
+ // `u32` is 4 bytes so we replace the `mem::size_of` call with its size
+}
+```
+"##,
+
E0516: r##"
The `typeof` keyword is currently reserved but unimplemented.
Erroneous code example:
E0399, // trait items need to be implemented because the associated
// type `{}` was overridden
E0436, // functional record update requires a struct
- E0513, // no type for local variable ..
E0521, // redundant default implementations of trait
E0533, // `{}` does not name a unit variant, unit struct or a constant
E0562, // `impl Trait` not allowed outside of function
+## Variance of type and lifetime parameters
+
This file infers the variance of type and lifetime parameters. The
algorithm is taken from Section 4 of the paper "Taming the Wildcards:
Combining Definition- and Use-Site Variance" published in PLDI'11 and
variance of C must be at most covariant *and* contravariant. All of these
results are based on a variance lattice defined as follows:
- * Top (bivariant)
- - +
- o Bottom (invariant)
+ * Top (bivariant)
+ - +
+ o Bottom (invariant)
-Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the
+Based on this lattice, the solution `V(A)=+`, `V(B)=-`, `V(C)=o` is the
optimal solution. Note that there is always a naive solution which
just declares all variables to be invariant.
V(X) <= Term
Term := + | - | * | o | V(X) | Term x Term
-Here the notation V(X) indicates the variance of a type/region
+Here the notation `V(X)` indicates the variance of a type/region
parameter `X` with respect to its defining class. `Term x Term`
represents the "variance transform" as defined in the paper:
- If the variance of a type variable `X` in type expression `E` is `V2`
+> If the variance of a type variable `X` in type expression `E` is `V2`
and the definition-site variance of the [corresponding] type parameter
of a class `C` is `V1`, then the variance of `X` in the type expression
`C<E>` is `V3 = V1.xform(V2)`.
inputs. To see why this makes sense, consider what subtyping for a
trait reference means:
- <T as Trait> <: <U as Trait>
+ <T as Trait> <: <U as Trait>
means that if I know that `T as Trait`, I also know that `U as
Trait`. Moreover, if you think of it as dictionary passing style,
Now if I have `<&'static () as Identity>::Out`, this can be
validly derived as `&'a ()` for any `'a`:
- <&'a () as Identity> <: <&'static () as Identity>
- if &'static () < : &'a () -- Identity is contravariant in Self
- if 'static : 'a -- Subtyping rules for relations
+ <&'a () as Identity> <: <&'static () as Identity>
+ if &'static () < : &'a () -- Identity is contravariant in Self
+ if 'static : 'a -- Subtyping rules for relations
This change otoh means that `<'static () as Identity>::Out` is
always `&'static ()` (which might then be upcast to `'a ()`,
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::char::{MAX, from_digit, from_u32, from_u32_unchecked};
#[stable(feature = "rust1", since = "1.0.0")]
-pub use core::char::{EncodeUtf16, EncodeUtf8, EscapeDebug, EscapeDefault, EscapeUnicode};
+pub use core::char::{EscapeDebug, EscapeDefault, EscapeUnicode};
// unstable reexports
#[unstable(feature = "try_from", issue = "33417")]
C::len_utf16(self)
}
- /// Returns an iterator over the bytes of this character as UTF-8.
+ /// Encodes this character as UTF-8 into the provided byte buffer,
+ /// and then returns the subslice of the buffer that contains the encoded character.
///
- /// The returned iterator also has an `as_slice()` method to view the
- /// encoded bytes as a byte slice.
+ /// # Panics
+ ///
+ /// Panics if the buffer is not large enough.
+ /// A buffer of length four is large enough to encode any `char`.
///
/// # Examples
///
+ /// In both of these examples, 'ß' takes two bytes to encode.
+ ///
/// ```
/// #![feature(unicode)]
///
- /// let iterator = 'ß'.encode_utf8();
- /// assert_eq!(iterator.as_slice(), [0xc3, 0x9f]);
+ /// let mut b = [0; 2];
///
- /// for (i, byte) in iterator.enumerate() {
- /// println!("byte {}: {:x}", i, byte);
- /// }
+ /// let result = 'ß'.encode_utf8(&mut b);
+ ///
+ /// assert_eq!(result, "ß");
+ ///
+ /// assert_eq!(result.len(), 2);
+ /// ```
+ ///
+ /// A buffer that's too small:
+ ///
+ /// ```
+ /// #![feature(unicode)]
+ /// use std::thread;
+ ///
+ /// let result = thread::spawn(|| {
+ /// let mut b = [0; 1];
+ ///
+ /// // this panics
+ /// 'ß'.encode_utf8(&mut b);
+ /// }).join();
+ ///
+ /// assert!(result.is_err());
/// ```
- #[unstable(feature = "unicode", issue = "27784")]
+ #[unstable(feature = "unicode",
+ reason = "pending decision about Iterator/Writer/Reader",
+ issue = "27784")]
#[inline]
- pub fn encode_utf8(self) -> EncodeUtf8 {
- C::encode_utf8(self)
+ pub fn encode_utf8(self, dst: &mut [u8]) -> &mut str {
+ C::encode_utf8(self, dst)
}
- /// Returns an iterator over the `u16` entries of this character as UTF-16.
+ /// Encodes this character as UTF-16 into the provided `u16` buffer,
+ /// and then returns the subslice of the buffer that contains the encoded character.
///
- /// The returned iterator also has an `as_slice()` method to view the
- /// encoded form as a slice.
+ /// # Panics
+ ///
+ /// Panics if the buffer is not large enough.
+ /// A buffer of length 2 is large enough to encode any `char`.
///
/// # Examples
///
+ /// In both of these examples, '𝕊' takes two `u16`s to encode.
+ ///
/// ```
/// #![feature(unicode)]
///
- /// let iterator = '𝕊'.encode_utf16();
- /// assert_eq!(iterator.as_slice(), [0xd835, 0xdd4a]);
+ /// let mut b = [0; 2];
///
- /// for (i, val) in iterator.enumerate() {
- /// println!("entry {}: {:x}", i, val);
- /// }
+ /// let result = '𝕊'.encode_utf16(&mut b);
+ ///
+ /// assert_eq!(result.len(), 2);
/// ```
- #[unstable(feature = "unicode", issue = "27784")]
+ ///
+ /// A buffer that's too small:
+ ///
+ /// ```
+ /// #![feature(unicode)]
+ /// use std::thread;
+ ///
+ /// let result = thread::spawn(|| {
+ /// let mut b = [0; 1];
+ ///
+ /// // this panics
+ /// '𝕊'.encode_utf16(&mut b);
+ /// }).join();
+ ///
+ /// assert!(result.is_err());
+ /// ```
+ #[unstable(feature = "unicode",
+ reason = "pending decision about Iterator/Writer/Reader",
+ issue = "27784")]
#[inline]
- pub fn encode_utf16(self) -> EncodeUtf16 {
- C::encode_utf16(self)
+ pub fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] {
+ C::encode_utf16(self, dst)
}
/// Returns true if this `char` is an alphabetic code point, and false if not.
return Some(tmp);
}
+ let mut buf = [0; 2];
self.chars.next().map(|ch| {
- let n = CharExt::encode_utf16(ch);
- let n = n.as_slice();
- if n.len() == 2 {
- self.extra = n[1];
+ let n = CharExt::encode_utf16(ch, &mut buf).len();
+ if n == 2 {
+ self.extra = buf[1];
}
- n[0]
+ buf[0]
})
}
impl<I> FusedIterator for Utf16Encoder<I>
where I: FusedIterator<Item = char> {}
+#[stable(feature = "split_whitespace", since = "1.1.0")]
impl<'a> Iterator for SplitWhitespace<'a> {
type Item = &'a str;
self.inner.next()
}
}
+
+#[stable(feature = "split_whitespace", since = "1.1.0")]
impl<'a> DoubleEndedIterator for SplitWhitespace<'a> {
fn next_back(&mut self) -> Option<&'a str> {
self.inner.next_back()
use syntax::ast;
use rustc::hir;
-use rustc::hir::def::Def;
+use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::DefId;
-use rustc::hir::map::DefPathData;
use rustc::hir::print as pprust;
-use rustc::ty::{self, TyCtxt, VariantKind};
+use rustc::ty::{self, TyCtxt};
use rustc::util::nodemap::FnvHashSet;
use rustc_const_eval::lookup_const_by_id;
let did = def.def_id();
let inner = match def {
Def::Trait(did) => {
- record_extern_fqn(cx, did, clean::TypeTrait);
+ record_extern_fqn(cx, did, clean::TypeKind::Trait);
ret.extend(build_impls(cx, tcx, did));
clean::TraitItem(build_external_trait(cx, tcx, did))
}
Def::Fn(did) => {
- record_extern_fqn(cx, did, clean::TypeFunction);
+ record_extern_fqn(cx, did, clean::TypeKind::Function);
clean::FunctionItem(build_external_function(cx, tcx, did))
}
- Def::Struct(did)
- // If this is a struct constructor, we skip it
- if tcx.def_key(did).disambiguated_data.data != DefPathData::StructCtor => {
- record_extern_fqn(cx, did, clean::TypeStruct);
+ Def::Struct(did) => {
+ record_extern_fqn(cx, did, clean::TypeKind::Struct);
ret.extend(build_impls(cx, tcx, did));
clean::StructItem(build_struct(cx, tcx, did))
}
Def::Union(did) => {
- record_extern_fqn(cx, did, clean::TypeUnion);
+ record_extern_fqn(cx, did, clean::TypeKind::Union);
ret.extend(build_impls(cx, tcx, did));
clean::UnionItem(build_union(cx, tcx, did))
}
Def::TyAlias(did) => {
- record_extern_fqn(cx, did, clean::TypeTypedef);
+ record_extern_fqn(cx, did, clean::TypeKind::Typedef);
ret.extend(build_impls(cx, tcx, did));
clean::TypedefItem(build_type_alias(cx, tcx, did), false)
}
Def::Enum(did) => {
- record_extern_fqn(cx, did, clean::TypeEnum);
+ record_extern_fqn(cx, did, clean::TypeKind::Enum);
ret.extend(build_impls(cx, tcx, did));
clean::EnumItem(build_enum(cx, tcx, did))
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
- Def::Variant(..) => return Some(Vec::new()),
+ // Similarly, consider that struct type is reexported next to its constructor.
+ Def::Variant(..) |
+ Def::VariantCtor(..) |
+ Def::StructCtor(..) => return Some(Vec::new()),
Def::Mod(did) => {
- record_extern_fqn(cx, did, clean::TypeModule);
+ record_extern_fqn(cx, did, clean::TypeKind::Module);
clean::ModuleItem(build_module(cx, tcx, did))
}
Def::Static(did, mtbl) => {
- record_extern_fqn(cx, did, clean::TypeStatic);
+ record_extern_fqn(cx, did, clean::TypeKind::Static);
clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
- Def::Const(did) | Def::AssociatedConst(did) => {
- record_extern_fqn(cx, did, clean::TypeConst);
+ Def::Const(did) => {
+ record_extern_fqn(cx, did, clean::TypeKind::Const);
clean::ConstantItem(build_const(cx, tcx, did))
}
_ => return None,
let variant = tcx.lookup_adt_def(did).struct_variant();
clean::Struct {
- struct_type: match variant.kind {
- VariantKind::Struct => doctree::Plain,
- VariantKind::Tuple => doctree::Tuple,
- VariantKind::Unit => doctree::Unit,
+ struct_type: match variant.ctor_kind {
+ CtorKind::Fictive => doctree::Plain,
+ CtorKind::Fn => doctree::Tuple,
+ CtorKind::Const => doctree::Unit,
},
generics: (t.generics, &predicates).clean(cx),
fields: variant.fields.clean(cx),
tcx.lang_items.char_impl(),
tcx.lang_items.str_impl(),
tcx.lang_items.slice_impl(),
- tcx.lang_items.slice_impl(),
- tcx.lang_items.const_ptr_impl()
+ tcx.lang_items.const_ptr_impl(),
+ tcx.lang_items.mut_ptr_impl(),
];
for def_id in primitive_impls.iter().filter_map(|&def_id| def_id) {
// visit each node at most once.
let mut visited = FnvHashSet();
for item in tcx.sess.cstore.item_children(did) {
- if tcx.sess.cstore.visibility(item.def_id) == ty::Visibility::Public {
- if !visited.insert(item.def_id) { continue }
- if let Some(def) = tcx.sess.cstore.describe_def(item.def_id) {
- if let Some(i) = try_inline_def(cx, tcx, def) {
- items.extend(i)
- }
+ let def_id = item.def.def_id();
+ if tcx.sess.cstore.visibility(def_id) == ty::Visibility::Public {
+ if !visited.insert(def_id) { continue }
+ if let Some(i) = try_inline_def(cx, tcx, item.def) {
+ items.extend(i)
}
}
}
_ => true,
}
});
- return g;
+ g
}
/// Supertrait bounds for a trait are also listed in the generics coming from
//! that clean them.
pub use self::Type::*;
-pub use self::TypeKind::*;
-pub use self::VariantKind::*;
pub use self::Mutability::*;
-pub use self::Import::*;
pub use self::ItemEnum::*;
pub use self::Attribute::*;
pub use self::TyParamBound::*;
use rustc_trans::back::link;
use rustc::middle::privacy::AccessLevels;
use rustc::middle::resolve_lifetime::DefRegion::*;
-use rustc::hir::def::Def;
+use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::{self, DefId, DefIndex, CRATE_DEF_INDEX};
use rustc::hir::print as pprust;
use rustc::ty::subst::Substs;
let root = DefId { krate: self.0, index: CRATE_DEF_INDEX };
cx.tcx_opt().map(|tcx| {
for item in tcx.sess.cstore.item_children(root) {
- let attrs = inline::load_attrs(cx, tcx, item.def_id);
+ let attrs = inline::load_attrs(cx, tcx, item.def.def_id());
PrimitiveType::find(&attrs).map(|prim| primitives.push(prim));
}
});
}
}
pub fn is_mod(&self) -> bool {
- ItemType::from(self) == ItemType::Module
+ self.type_() == ItemType::Module
}
pub fn is_trait(&self) -> bool {
- ItemType::from(self) == ItemType::Trait
+ self.type_() == ItemType::Trait
}
pub fn is_struct(&self) -> bool {
- ItemType::from(self) == ItemType::Struct
+ self.type_() == ItemType::Struct
}
pub fn is_enum(&self) -> bool {
- ItemType::from(self) == ItemType::Module
+ self.type_() == ItemType::Module
}
pub fn is_fn(&self) -> bool {
- ItemType::from(self) == ItemType::Function
+ self.type_() == ItemType::Function
}
pub fn is_associated_type(&self) -> bool {
- ItemType::from(self) == ItemType::AssociatedType
+ self.type_() == ItemType::AssociatedType
}
pub fn is_associated_const(&self) -> bool {
- ItemType::from(self) == ItemType::AssociatedConst
+ self.type_() == ItemType::AssociatedConst
}
pub fn is_method(&self) -> bool {
- ItemType::from(self) == ItemType::Method
+ self.type_() == ItemType::Method
}
pub fn is_ty_method(&self) -> bool {
- ItemType::from(self) == ItemType::TyMethod
+ self.type_() == ItemType::TyMethod
}
pub fn is_primitive(&self) -> bool {
- ItemType::from(self) == ItemType::Primitive
+ self.type_() == ItemType::Primitive
}
pub fn is_stripped(&self) -> bool {
match self.inner { StrippedItem(..) => true, _ => false }
match self.inner {
StructItem(ref _struct) => Some(_struct.fields_stripped),
UnionItem(ref union) => Some(union.fields_stripped),
- VariantItem(Variant { kind: StructVariant(ref vstruct)} ) => {
+ VariantItem(Variant { kind: VariantKind::Struct(ref vstruct)} ) => {
Some(vstruct.fields_stripped)
},
_ => None,
pub fn stable_since(&self) -> Option<&str> {
self.stability.as_ref().map(|s| &s.since[..])
}
+
+ /// Returns a documentation-level item type from the item.
+ pub fn type_(&self) -> ItemType {
+ ItemType::from(self)
+ }
}
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
(tcx.lang_items.sync_trait().unwrap(),
external_path(cx, "Sync", None, false, vec![], empty)),
};
- inline::record_extern_fqn(cx, did, TypeTrait);
+ inline::record_extern_fqn(cx, did, TypeKind::Trait);
TraitBound(PolyTrait {
trait_: ResolvedPath {
path: path,
Some(tcx) => tcx,
None => return RegionBound(Lifetime::statik())
};
- inline::record_extern_fqn(cx, self.def_id, TypeTrait);
+ inline::record_extern_fqn(cx, self.def_id, TypeKind::Trait);
let path = external_path(cx, &tcx.item_name(self.def_id).as_str(),
Some(self.def_id), true, vec![], self.substs);
pub fn get_ref<'a>(&'a self) -> &'a str {
let Lifetime(ref s) = *self;
let s: &'a str = s;
- return s;
+ s
}
pub fn statik() -> Lifetime {
impl FnDecl {
pub fn has_self(&self) -> bool {
- return self.inputs.values.len() > 0 && self.inputs.values[0].name == "self";
+ self.inputs.values.len() > 0 && self.inputs.values[0].name == "self"
}
pub fn self_type(&self) -> Option<SelfTy> {
#[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)]
pub enum TypeKind {
- TypeEnum,
- TypeFunction,
- TypeModule,
- TypeConst,
- TypeStatic,
- TypeStruct,
- TypeUnion,
- TypeTrait,
- TypeVariant,
- TypeTypedef,
+ Enum,
+ Function,
+ Module,
+ Const,
+ Static,
+ Struct,
+ Union,
+ Trait,
+ Variant,
+ Typedef,
}
pub trait GetDefId {
None
}
- pub fn to_string(&self) -> &'static str {
+ pub fn as_str(&self) -> &'static str {
match *self {
PrimitiveType::Isize => "isize",
PrimitiveType::I8 => "i8",
}
pub fn to_url_str(&self) -> &'static str {
- self.to_string()
+ self.as_str()
}
/// Creates a rustdoc-specific node id for primitive types.
TyRptr(ref l, ref m) =>
BorrowedRef {lifetime: l.clean(cx), mutability: m.mutbl.clean(cx),
type_: box m.ty.clean(cx)},
- TyVec(ref ty) => Vector(box ty.clean(cx)),
- TyFixedLengthVec(ref ty, ref e) => {
+ TySlice(ref ty) => Vector(box ty.clean(cx)),
+ TyArray(ref ty, ref e) => {
let n = if let Some(tcx) = cx.tcx_opt() {
use rustc_const_math::{ConstInt, ConstUsize};
use rustc_const_eval::eval_const_expr;
ty::TyAdt(def, substs) => {
let did = def.did;
let kind = match def.adt_kind() {
- AdtKind::Struct => TypeStruct,
- AdtKind::Union => TypeUnion,
- AdtKind::Enum => TypeEnum,
+ AdtKind::Struct => TypeKind::Struct,
+ AdtKind::Union => TypeKind::Union,
+ AdtKind::Enum => TypeKind::Enum,
};
inline::record_extern_fqn(cx, did, kind);
let path = external_path(cx, &cx.tcx().item_name(did).as_str(),
}
ty::TyTrait(ref obj) => {
let did = obj.principal.def_id();
- inline::record_extern_fqn(cx, did, TypeTrait);
+ inline::record_extern_fqn(cx, did, TypeKind::Trait);
let mut typarams = vec![];
obj.region_bound.clean(cx).map(|b| typarams.push(RegionBound(b)));
deprecation: self.depr.clean(cx),
def_id: cx.map.local_def_id(self.def.id()),
inner: VariantItem(Variant {
- kind: struct_def_to_variant_kind(&self.def, cx),
+ kind: self.def.clean(cx),
}),
}
}
impl<'tcx> Clean<Item> for ty::VariantDefData<'tcx, 'static> {
fn clean(&self, cx: &DocContext) -> Item {
- let kind = match self.kind {
- ty::VariantKind::Unit => CLikeVariant,
- ty::VariantKind::Tuple => {
- TupleVariant(
+ let kind = match self.ctor_kind {
+ CtorKind::Const => VariantKind::CLike,
+ CtorKind::Fn => {
+ VariantKind::Tuple(
self.fields.iter().map(|f| f.unsubst_ty().clean(cx)).collect()
)
}
- ty::VariantKind::Struct => {
- StructVariant(VariantStruct {
+ CtorKind::Fictive => {
+ VariantKind::Struct(VariantStruct {
struct_type: doctree::Plain,
fields_stripped: false,
fields: self.fields.iter().map(|field| {
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
pub enum VariantKind {
- CLikeVariant,
- TupleVariant(Vec<Type>),
- StructVariant(VariantStruct),
+ CLike,
+ Tuple(Vec<Type>),
+ Struct(VariantStruct),
}
-fn struct_def_to_variant_kind(struct_def: &hir::VariantData, cx: &DocContext) -> VariantKind {
- if struct_def.is_struct() {
- StructVariant(struct_def.clean(cx))
- } else if struct_def.is_unit() {
- CLikeVariant
- } else {
- TupleVariant(struct_def.fields().iter().map(|x| x.ty.clean(cx)).collect())
+impl Clean<VariantKind> for hir::VariantData {
+ fn clean(&self, cx: &DocContext) -> VariantKind {
+ if self.is_struct() {
+ VariantKind::Struct(self.clean(cx))
+ } else if self.is_unit() {
+ VariantKind::CLike
+ } else {
+ VariantKind::Tuple(self.fields().iter().map(|x| x.ty.clean(cx)).collect())
+ }
}
}
});
let (mut ret, inner) = match self.node {
hir::ViewPathGlob(ref p) => {
- (vec![], GlobImport(resolve_use_source(cx, p.clean(cx), self.id)))
+ (vec![], Import::Glob(resolve_use_source(cx, p.clean(cx), self.id)))
}
hir::ViewPathList(ref p, ref list) => {
// Attempt to inline all reexported items, but be sure
if remaining.is_empty() {
return ret;
}
- (ret, ImportList(resolve_use_source(cx, p.clean(cx), self.id),
- remaining))
+ (ret, Import::List(resolve_use_source(cx, p.clean(cx), self.id), remaining))
}
hir::ViewPathSimple(name, ref p) => {
if !denied {
return items;
}
}
- (vec![], SimpleImport(name.clean(cx),
- resolve_use_source(cx, p.clean(cx), self.id)))
+ (vec![], Import::Simple(name.clean(cx),
+ resolve_use_source(cx, p.clean(cx), self.id)))
}
};
ret.push(Item {
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
pub enum Import {
// use source as str;
- SimpleImport(String, ImportSource),
+ Simple(String, ImportSource),
// use source::*;
- GlobImport(ImportSource),
+ Glob(ImportSource),
// use source::{a, b, c};
- ImportList(ImportSource, Vec<ViewListIdent>),
+ List(ImportSource, Vec<ViewListIdent>),
}
#[derive(Clone, RustcEncodable, RustcDecodable, Debug)]
},
PatKind::Range(..) => panic!("tried to get argument name from PatKind::Range, \
which is not allowed in function arguments"),
- PatKind::Vec(ref begin, ref mid, ref end) => {
+ PatKind::Slice(ref begin, ref mid, ref end) => {
let begin = begin.iter().map(|p| name_from_pat(&**p));
let mid = mid.as_ref().map(|p| format!("..{}", name_from_pat(&**p))).into_iter();
let end = end.iter().map(|p| name_from_pat(&**p));
let tcx = cx.tcx();
let (did, kind) = match def {
- Def::Fn(i) => (i, TypeFunction),
- Def::TyAlias(i) => (i, TypeTypedef),
- Def::Enum(i) => (i, TypeEnum),
- Def::Trait(i) => (i, TypeTrait),
- Def::Struct(i) => (i, TypeStruct),
- Def::Union(i) => (i, TypeUnion),
- Def::Mod(i) => (i, TypeModule),
- Def::Static(i, _) => (i, TypeStatic),
- Def::Variant(i) => (tcx.parent_def_id(i).unwrap(), TypeEnum),
- Def::SelfTy(Some(def_id), _) => (def_id, TypeTrait),
+ Def::Fn(i) => (i, TypeKind::Function),
+ Def::TyAlias(i) => (i, TypeKind::Typedef),
+ Def::Enum(i) => (i, TypeKind::Enum),
+ Def::Trait(i) => (i, TypeKind::Trait),
+ Def::Struct(i) => (i, TypeKind::Struct),
+ Def::Union(i) => (i, TypeKind::Union),
+ Def::Mod(i) => (i, TypeKind::Module),
+ Def::Static(i, _) => (i, TypeKind::Static),
+ Def::Variant(i) => (tcx.parent_def_id(i).unwrap(), TypeKind::Enum),
+ Def::SelfTy(Some(def_id), _) => (def_id, TypeKind::Trait),
Def::SelfTy(_, Some(impl_def_id)) => {
return impl_def_id
}
};
if did.is_local() { return did }
inline::record_extern_fqn(cx, did, kind);
- if let TypeTrait = kind {
+ if let TypeKind::Trait = kind {
let t = inline::build_external_trait(cx, tcx, did);
cx.external_traits.borrow_mut().insert(did, t);
}
Some(did) => did,
None => return fallback(box t.clean(cx)),
};
- inline::record_extern_fqn(cx, did, TypeStruct);
+ inline::record_extern_fqn(cx, did, TypeKind::Struct);
ResolvedPath {
typarams: None,
did: did,
for param in &mut params {
param.bounds = ty_bounds(mem::replace(&mut param.bounds, Vec::new()));
}
- return params;
+ params
}
fn ty_bounds(bounds: Vec<clean::TyParamBound>) -> Vec<clean::TyParamBound> {
use std::cell::{RefCell, Cell};
use std::mem;
use std::rc::Rc;
+use std::path::PathBuf;
use visit_ast::RustdocVisitor;
use clean;
cfgs: Vec<String>,
externs: config::Externs,
input: Input,
- triple: Option<String>) -> (clean::Crate, RenderInfo)
+ triple: Option<String>,
+ maybe_sysroot: Option<PathBuf>) -> (clean::Crate, RenderInfo)
{
// Parse, resolve, and typecheck the given crate.
let warning_lint = lint::builtin::WARNINGS.name_lower();
let sessopts = config::Options {
- maybe_sysroot: None,
+ maybe_sysroot: maybe_sysroot,
search_paths: search_paths,
crate_types: vec!(config::CrateTypeRlib),
lint_opts: vec!((warning_lint, lint::Allow)),
target_triple: triple.unwrap_or(config::host_triple().to_string()),
// Ensure that rustdoc works even if rustc is feature-staged
unstable_features: UnstableFeatures::Allow,
+ actually_rustdoc: true,
..config::basic_options().clone()
};
VariantItem(i) => {
let i2 = i.clone(); // this clone is small
match i.kind {
- StructVariant(mut j) => {
+ VariantKind::Struct(mut j) => {
let num_fields = j.fields.len();
j.fields = j.fields.into_iter().filter_map(|x| self.fold_item(x)).collect();
j.fields_stripped |= num_fields != j.fields.len() ||
j.fields.iter().any(|f| f.is_stripped());
- VariantItem(Variant {kind: StructVariant(j), ..i2})
+ VariantItem(Variant {kind: VariantKind::Struct(j), ..i2})
},
_ => VariantItem(i2)
}
tybounds(f, typarams)
}
clean::Infer => write!(f, "_"),
- clean::Primitive(prim) => primitive_link(f, prim, prim.to_string()),
+ clean::Primitive(prim) => primitive_link(f, prim, prim.as_str()),
clean::BareFunction(ref decl) => {
write!(f, "{}{}fn{}{}",
UnsafetySpace(decl.unsafety),
impl fmt::Display for clean::Import {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
- clean::SimpleImport(ref name, ref src) => {
+ clean::Import::Simple(ref name, ref src) => {
if *name == src.path.last_name() {
write!(f, "use {};", *src)
} else {
write!(f, "use {} as {};", *src, *name)
}
}
- clean::GlobImport(ref src) => {
+ clean::Import::Glob(ref src) => {
write!(f, "use {}::*;", *src)
}
- clean::ImportList(ref src, ref names) => {
+ clean::Import::List(ref src, ref names) => {
write!(f, "use {}::{{", *src)?;
for (i, n) in names.iter().enumerate() {
if i > 0 {
impl From<clean::TypeKind> for ItemType {
fn from(kind: clean::TypeKind) -> ItemType {
match kind {
- clean::TypeStruct => ItemType::Struct,
- clean::TypeUnion => ItemType::Union,
- clean::TypeEnum => ItemType::Enum,
- clean::TypeFunction => ItemType::Function,
- clean::TypeTrait => ItemType::Trait,
- clean::TypeModule => ItemType::Module,
- clean::TypeStatic => ItemType::Static,
- clean::TypeConst => ItemType::Constant,
- clean::TypeVariant => ItemType::Variant,
- clean::TypeTypedef => ItemType::Typedef,
+ clean::TypeKind::Struct => ItemType::Struct,
+ clean::TypeKind::Union => ItemType::Union,
+ clean::TypeKind::Enum => ItemType::Enum,
+ clean::TypeKind::Function => ItemType::Function,
+ clean::TypeKind::Trait => ItemType::Trait,
+ clean::TypeKind::Module => ItemType::Module,
+ clean::TypeKind::Static => ItemType::Static,
+ clean::TypeKind::Const => ItemType::Constant,
+ clean::TypeKind::Variant => ItemType::Variant,
+ clean::TypeKind::Typedef => ItemType::Typedef,
}
}
}
#![allow(non_camel_case_types)]
use libc;
-use rustc::session::config::get_unstable_features_setting;
use std::ascii::AsciiExt;
use std::cell::RefCell;
use std::default::Default;
let mut data = LangString::all_false();
let mut allow_compile_fail = false;
let mut allow_error_code_check = false;
- match get_unstable_features_setting() {
- UnstableFeatures::Allow | UnstableFeatures::Cheat => {
- allow_compile_fail = true;
- allow_error_code_check = true;
- }
- _ => {},
- };
+ if UnstableFeatures::from_environment().is_nightly_build() {
+ allow_compile_fail = true;
+ allow_error_code_check = true;
+ }
let tokens = string.split(|c: char|
!(c == '_' || c == '-' || c.is_alphanumeric())
use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE};
use rustc::middle::privacy::AccessLevels;
use rustc::middle::stability;
-use rustc::session::config::get_unstable_features_setting;
use rustc::hir;
use rustc::util::nodemap::{FnvHashMap, FnvHashSet};
use rustc_data_structures::flock;
/// Current hierarchy of components leading down to what's currently being
/// rendered
pub current: Vec<String>,
- /// String representation of how to get back to the root path of the 'doc/'
- /// folder in terms of a relative URL.
- pub root_path: String,
/// The current destination folder of where HTML artifacts should be placed.
/// This changes as the context descends into the module hierarchy.
pub dst: PathBuf,
krate = render_sources(&dst, &mut scx, krate)?;
let cx = Context {
current: Vec::new(),
- root_path: String::new(),
dst: dst,
render_redirect_pages: false,
shared: Arc::new(scx),
for &(did, ref item) in orphan_impl_items {
if let Some(&(ref fqp, _)) = paths.get(&did) {
search_index.push(IndexItem {
- ty: item_type(item),
+ ty: item.type_(),
name: item.name.clone().unwrap(),
path: fqp[..fqp.len() - 1].join("::"),
desc: Escape(&shorter(item.doc_value())).to_string(),
ret.push(line.to_string());
}
}
- return Ok(ret);
+ Ok(ret)
}
// Update the search index
}
}
-/// Returns a documentation-level item type from the item.
-fn item_type(item: &clean::Item) -> ItemType {
- ItemType::from(item)
-}
-
/// Takes a path to a source file and cleans the path to it. This canonicalizes
/// things like ".." to components which preserve the "top down" hierarchy of a
/// static HTML tree. Each component in the cleaned path will be passed as an
// inserted later on when serializing the search-index.
if item.def_id.index != CRATE_DEF_INDEX {
self.search_index.push(IndexItem {
- ty: item_type(&item),
+ ty: item.type_(),
name: s.to_string(),
path: path.join("::").to_string(),
desc: Escape(&shorter(item.doc_value())).to_string(),
self.access_levels.is_public(item.def_id)
{
self.paths.insert(item.def_id,
- (self.stack.clone(), item_type(&item)));
+ (self.stack.clone(), item.type_()));
}
}
// link variants to their parent enum because pages aren't emitted
clean::PrimitiveItem(..) if item.visibility.is_some() => {
self.paths.insert(item.def_id, (self.stack.clone(),
- item_type(&item)));
+ item.type_()));
}
_ => {}
self.seen_mod = orig_seen_mod;
self.stripped_mod = orig_stripped_mod;
self.parent_is_trait_impl = orig_parent_is_trait_impl;
- return ret;
+ ret
}
}
}
impl Context {
+ /// String representation of how to get back to the root path of the 'doc/'
+ /// folder in terms of a relative URL.
+ fn root_path(&self) -> String {
+ repeat("../").take(self.current.len()).collect::<String>()
+ }
+
/// Recurse in the directory structure and change the "root path" to make
/// sure it always points to the top (relatively)
fn recurse<T, F>(&mut self, s: String, f: F) -> T where
}
let prev = self.dst.clone();
self.dst.push(&s);
- self.root_path.push_str("../");
self.current.push(s);
info!("Recursing into {}", self.dst.display());
// Go back to where we were at
self.dst = prev;
- let len = self.root_path.len();
- self.root_path.truncate(len - 3);
self.current.pop().unwrap();
- return ret;
+ ret
}
/// Main method for rendering a crate.
title.push_str(it.name.as_ref().unwrap());
}
title.push_str(" - Rust");
- let tyname = item_type(it).css_class();
+ let tyname = it.type_().css_class();
let desc = if it.is_crate() {
format!("API documentation for the Rust `{}` crate.",
self.shared.layout.krate)
let keywords = make_item_keywords(it);
let page = layout::Page {
css_class: tyname,
- root_path: &self.root_path,
+ root_path: &self.root_path(),
title: &title,
description: &desc,
keywords: &keywords,
&Item{ cx: self, item: it },
self.shared.css_file_extension.is_some())?;
} else {
- let mut url = repeat("../").take(self.current.len())
- .collect::<String>();
+ let mut url = self.root_path();
if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) {
for name in &names[..names.len() - 1] {
url.push_str(name);
// buf will be empty if the item is stripped and there is no redirect for it
if !buf.is_empty() {
let name = item.name.as_ref().unwrap();
- let item_type = item_type(&item);
+ let item_type = item.type_();
let file_name = &item_path(item_type, name);
let joint_dst = self.dst.join(file_name);
try_err!(fs::create_dir_all(&self.dst), &self.dst);
for item in &m.items {
if maybe_ignore_item(item) { continue }
- let short = item_type(item).css_class();
+ let short = item.type_().css_class();
let myname = match item.name {
None => continue,
Some(ref s) => s.to_string(),
for (_, items) in &mut map {
items.sort();
}
- return map;
+ map
}
}
}).map(|l| &l.1);
let root = match root {
Some(&Remote(ref s)) => s.to_string(),
- Some(&Local) => self.cx.root_path.clone(),
+ Some(&Local) => self.cx.root_path(),
None | Some(&Unknown) => return None,
};
Some(format!("{root}/{krate}/macro.{name}.html?gotomacrosrc=1",
let path = PathBuf::from(&self.item.source.filename);
self.cx.shared.local_sources.get(&path).map(|path| {
format!("{root}src/{krate}/{path}#{href}",
- root = self.cx.root_path,
+ root = self.cx.root_path(),
krate = self.cx.shared.layout.krate,
path = path,
href = href)
};
let mut path = match cache.extern_locations.get(&self.item.def_id.krate) {
Some(&(_, Remote(ref s))) => s.to_string(),
- Some(&(_, Local)) => self.cx.root_path.clone(),
+ Some(&(_, Local)) => self.cx.root_path(),
Some(&(_, Unknown)) => return None,
None => return None,
};
}
Some(format!("{path}{file}?gotosrc={goto}",
path = path,
- file = item_path(item_type(self.item), external_path.last().unwrap()),
+ file = item_path(self.item.type_(), external_path.last().unwrap()),
goto = self.item.def_id.index.as_usize()))
}
}
}
}
write!(fmt, "<a class='{}' href=''>{}</a>",
- item_type(self.item), self.item.name.as_ref().unwrap())?;
+ self.item.type_(), self.item.name.as_ref().unwrap())?;
write!(fmt, "</span>")?; // in-band
write!(fmt, "<span class='out-of-band'>")?;
let mut s = cx.current.join("::");
s.push_str("::");
s.push_str(item.name.as_ref().unwrap());
- return s
+ s
}
fn shorter<'a>(s: Option<&'a str>) -> String {
}
fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: usize, idx2: usize) -> Ordering {
- let ty1 = item_type(i1);
- let ty2 = item_type(i2);
+ let ty1 = i1.type_();
+ let ty2 = i2.type_();
if ty1 != ty2 {
return (reorder(ty1), idx1).cmp(&(reorder(ty2), idx2))
}
continue;
}
- let myty = Some(item_type(myitem));
+ let myty = Some(myitem.type_());
if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) {
// Put `extern crate` and `use` re-exports in the same section.
curty = myty;
name = *myitem.name.as_ref().unwrap(),
stab_docs = stab_docs,
docs = shorter(Some(&Markdown(doc_value).to_string())),
- class = item_type(myitem),
+ class = myitem.type_(),
stab = myitem.stability_class(),
- href = item_path(item_type(myitem), myitem.name.as_ref().unwrap()),
+ href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()),
title = full_path(cx, myitem))?;
}
}
fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
f: &clean::Function) -> fmt::Result {
// FIXME(#24111): remove when `const_fn` is stabilized
- let vis_constness = match get_unstable_features_setting() {
+ let vis_constness = match UnstableFeatures::from_environment() {
UnstableFeatures::Allow => f.constness,
_ => hir::Constness::NotConst
};
fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item)
-> fmt::Result {
let name = m.name.as_ref().unwrap();
- let item_type = item_type(m);
+ let item_type = m.type_();
let id = derive_id(format!("{}.{}", item_type, name));
let ns_id = derive_id(format!("{}.{}", name, item_type.name_space()));
write!(w, "<h3 id='{id}' class='method stab {stab}'>\
let (ref path, _) = cache.external_paths[&it.def_id];
path[..path.len() - 1].join("/")
},
- ty = item_type(it).css_class(),
+ ty = it.type_().css_class(),
name = *it.name.as_ref().unwrap())?;
Ok(())
}
use html::item_type::ItemType::*;
let name = it.name.as_ref().unwrap();
- let ty = match item_type(it) {
+ let ty = match it.type_() {
Typedef | AssociatedType => AssociatedType,
s@_ => s,
};
link: AssocItemLink)
-> fmt::Result {
let name = meth.name.as_ref().unwrap();
- let anchor = format!("#{}.{}", item_type(meth), name);
+ let anchor = format!("#{}.{}", meth.type_(), name);
let href = match link {
AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id),
AssocItemLink::Anchor(None) => anchor,
}
};
// FIXME(#24111): remove when `const_fn` is stabilized
- let vis_constness = match get_unstable_features_setting() {
+ let vis_constness = match UnstableFeatures::from_environment() {
UnstableFeatures::Allow => constness,
_ => hir::Constness::NotConst
};
match v.inner {
clean::VariantItem(ref var) => {
match var.kind {
- clean::CLikeVariant => write!(w, "{}", name)?,
- clean::TupleVariant(ref tys) => {
+ clean::VariantKind::CLike => write!(w, "{}", name)?,
+ clean::VariantKind::Tuple(ref tys) => {
write!(w, "{}(", name)?;
for (i, ty) in tys.iter().enumerate() {
if i > 0 {
}
write!(w, ")")?;
}
- clean::StructVariant(ref s) => {
+ clean::VariantKind::Struct(ref s) => {
render_struct(w,
v,
None,
ns_id = ns_id,
name = variant.name.as_ref().unwrap())?;
if let clean::VariantItem(ref var) = variant.inner {
- if let clean::TupleVariant(ref tys) = var.kind {
+ if let clean::VariantKind::Tuple(ref tys) = var.kind {
write!(w, "(")?;
for (i, ty) in tys.iter().enumerate() {
if i > 0 {
write!(w, "</code></span></span>")?;
document(w, cx, variant)?;
- use clean::{Variant, StructVariant};
- if let clean::VariantItem( Variant { kind: StructVariant(ref s) } ) = variant.inner {
+ use clean::{Variant, VariantKind};
+ if let clean::VariantItem(Variant {
+ kind: VariantKind::Struct(ref s)
+ }) = variant.inner {
write!(w, "<h3 class='fields'>Fields</h3>\n
<table>")?;
for field in &s.fields {
link: AssocItemLink, render_mode: RenderMode,
is_default_item: bool, outer_version: Option<&str>,
trait_: Option<&clean::Trait>) -> fmt::Result {
- let item_type = item_type(item);
+ let item_type = item.type_();
let name = item.name.as_ref().unwrap();
let render_method_item: bool = match render_mode {
write!(fmt, "::<wbr>")?;
}
write!(fmt, "<a href='{}index.html'>{}</a>",
- &cx.root_path[..(cx.current.len() - i - 1) * 3],
+ &cx.root_path()[..(cx.current.len() - i - 1) * 3],
*name)?;
}
write!(fmt, "</p>")?;
relpath: '{path}'\
}};</script>",
name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""),
- ty = item_type(it).css_class(),
+ ty = it.type_().css_class(),
path = relpath)?;
if parentlen == 0 {
// there is no sidebar-items.js beyond the crate root path
font-size: 90%;
}
+/* Shift where in trait listing down a line */
+pre.trait .where::before {
+ content: '\a ';
+}
+
nav {
border-bottom: 1px solid;
padding-bottom: 10px;
use clean::Attributes;
-type Pass = (&'static str, // name
- fn(clean::Crate) -> plugins::PluginResult, // fn
- &'static str); // description
-
-const PASSES: &'static [Pass] = &[
- ("strip-hidden", passes::strip_hidden,
- "strips all doc(hidden) items from the output"),
- ("unindent-comments", passes::unindent_comments,
- "removes excess indentation on comments in order for markdown to like it"),
- ("collapse-docs", passes::collapse_docs,
- "concatenates all document attributes into one document attribute"),
- ("strip-private", passes::strip_private,
- "strips all private items from a crate which cannot be seen externally, \
- implies strip-priv-imports"),
- ("strip-priv-imports", passes::strip_priv_imports,
- "strips all private import statements (`use`, `extern crate`) from a crate"),
-];
-
-const DEFAULT_PASSES: &'static [&'static str] = &[
- "strip-hidden",
- "strip-private",
- "collapse-docs",
- "unindent-comments",
-];
-
struct Output {
krate: clean::Crate,
renderinfo: html::render::RenderInfo,
}
pub fn main() {
- const STACK_SIZE: usize = 32000000; // 32MB
+ const STACK_SIZE: usize = 32_000_000; // 32MB
let res = std::thread::Builder::new().stack_size(STACK_SIZE).spawn(move || {
let s = env::args().collect::<Vec<_>>();
main_args(&s)
own theme", "PATH")),
unstable(optmulti("Z", "",
"internal and debugging options (only on nightly build)", "FLAG")),
+ stable(optopt("", "sysroot", "Override the system root", "PATH")),
)
}
if matches.opt_strs("passes") == ["list"] {
println!("Available passes for running rustdoc:");
- for &(name, _, description) in PASSES {
+ for &(name, _, description) in passes::PASSES {
println!("{:>20} - {}", name, description);
}
println!("\nDefault passes for rustdoc:");
- for &name in DEFAULT_PASSES {
+ for &name in passes::DEFAULT_PASSES {
println!("{:>20}", name);
}
return 0;
if matches.free.is_empty() {
println!("expected an input file to act on");
return 1;
- } if matches.free.len() > 1 {
+ }
+ if matches.free.len() > 1 {
println!("only one input file may be specified");
return 1;
}
passes.into_iter().collect(),
css_file_extension,
renderinfo)
- .expect("failed to generate documentation")
+ .expect("failed to generate documentation");
+ 0
}
Some(s) => {
println!("unknown output format: {}", s);
- return 1;
+ 1
}
}
-
- return 0;
}
/// Looks inside the command line arguments to extract the relevant input format
}
let cfgs = matches.opt_strs("cfg");
let triple = matches.opt_str("target");
+ let maybe_sysroot = matches.opt_str("sysroot").map(PathBuf::from);
let cr = PathBuf::from(cratefile);
info!("starting to run rustc");
use rustc::session::config::Input;
tx.send(core::run_core(paths, cfgs, externs, Input::File(cr),
- triple)).unwrap();
+ triple, maybe_sysroot)).unwrap();
});
let (mut krate, renderinfo) = rx.recv().unwrap();
info!("finished with rustc");
}
if default_passes {
- for name in DEFAULT_PASSES.iter().rev() {
+ for name in passes::DEFAULT_PASSES.iter().rev() {
passes.insert(0, name.to_string());
}
}
.unwrap_or("/tmp/rustdoc/plugins".to_string());
let mut pm = plugins::PluginManager::new(PathBuf::from(path));
for pass in &passes {
- let plugin = match PASSES.iter()
- .position(|&(p, ..)| {
- p == *pass
- }) {
- Some(i) => PASSES[i].1,
+ let plugin = match passes::PASSES.iter()
+ .position(|&(p, ..)| {
+ p == *pass
+ }) {
+ Some(i) => passes::PASSES[i].1,
None => {
error!("unknown pass {}, skipping", *pass);
continue
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use rustc::hir::def_id::DefId;
-use rustc::middle::privacy::AccessLevels;
-use rustc::util::nodemap::DefIdSet;
-use std::cmp;
-use std::mem;
-use std::string::String;
-use std::usize;
-
-use clean::{self, Attributes, GetDefId};
-use clean::Item;
-use plugins;
-use fold;
-use fold::DocFolder;
-use fold::FoldItem::Strip;
-
-/// Strip items marked `#[doc(hidden)]`
-pub fn strip_hidden(krate: clean::Crate) -> plugins::PluginResult {
- let mut retained = DefIdSet();
-
- // strip all #[doc(hidden)] items
- let krate = {
- struct Stripper<'a> {
- retained: &'a mut DefIdSet,
- update_retained: bool,
- }
- impl<'a> fold::DocFolder for Stripper<'a> {
- fn fold_item(&mut self, i: Item) -> Option<Item> {
- if i.attrs.list("doc").has_word("hidden") {
- debug!("found one in strip_hidden; removing");
- // use a dedicated hidden item for given item type if any
- match i.inner {
- clean::StructFieldItem(..) | clean::ModuleItem(..) => {
- // We need to recurse into stripped modules to
- // strip things like impl methods but when doing so
- // we must not add any items to the `retained` set.
- let old = mem::replace(&mut self.update_retained, false);
- let ret = Strip(self.fold_item_recur(i).unwrap()).fold();
- self.update_retained = old;
- return ret;
- }
- _ => return None,
- }
- } else {
- if self.update_retained {
- self.retained.insert(i.def_id);
- }
- }
- self.fold_item_recur(i)
- }
- }
- let mut stripper = Stripper{ retained: &mut retained, update_retained: true };
- stripper.fold_crate(krate)
- };
-
- // strip all impls referencing stripped items
- let mut stripper = ImplStripper { retained: &retained };
- stripper.fold_crate(krate)
-}
-
-/// Strip private items from the point of view of a crate or externally from a
-/// crate, specified by the `xcrate` flag.
-pub fn strip_private(mut krate: clean::Crate) -> plugins::PluginResult {
- // This stripper collects all *retained* nodes.
- let mut retained = DefIdSet();
- let access_levels = krate.access_levels.clone();
-
- // strip all private items
- {
- let mut stripper = Stripper {
- retained: &mut retained,
- access_levels: &access_levels,
- update_retained: true,
- };
- krate = ImportStripper.fold_crate(stripper.fold_crate(krate));
- }
-
- // strip all impls referencing private items
- let mut stripper = ImplStripper { retained: &retained };
- stripper.fold_crate(krate)
-}
-
-struct Stripper<'a> {
- retained: &'a mut DefIdSet,
- access_levels: &'a AccessLevels<DefId>,
- update_retained: bool,
-}
-
-impl<'a> fold::DocFolder for Stripper<'a> {
- fn fold_item(&mut self, i: Item) -> Option<Item> {
- match i.inner {
- clean::StrippedItem(..) => {
- // We need to recurse into stripped modules to strip things
- // like impl methods but when doing so we must not add any
- // items to the `retained` set.
- let old = mem::replace(&mut self.update_retained, false);
- let ret = self.fold_item_recur(i);
- self.update_retained = old;
- return ret;
- }
- // These items can all get re-exported
- clean::TypedefItem(..) | clean::StaticItem(..) |
- clean::StructItem(..) | clean::EnumItem(..) |
- clean::TraitItem(..) | clean::FunctionItem(..) |
- clean::VariantItem(..) | clean::MethodItem(..) |
- clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) |
- clean::ConstantItem(..) | clean::UnionItem(..) => {
- if i.def_id.is_local() {
- if !self.access_levels.is_exported(i.def_id) {
- return None;
- }
- }
- }
-
- clean::StructFieldItem(..) => {
- if i.visibility != Some(clean::Public) {
- return Strip(i).fold();
- }
- }
-
- clean::ModuleItem(..) => {
- if i.def_id.is_local() && i.visibility != Some(clean::Public) {
- let old = mem::replace(&mut self.update_retained, false);
- let ret = Strip(self.fold_item_recur(i).unwrap()).fold();
- self.update_retained = old;
- return ret;
- }
- }
-
- // handled in the `strip-priv-imports` pass
- clean::ExternCrateItem(..) | clean::ImportItem(..) => {}
-
- clean::DefaultImplItem(..) | clean::ImplItem(..) => {}
-
- // tymethods/macros have no control over privacy
- clean::MacroItem(..) | clean::TyMethodItem(..) => {}
-
- // Primitives are never stripped
- clean::PrimitiveItem(..) => {}
-
- // Associated consts and types are never stripped
- clean::AssociatedConstItem(..) |
- clean::AssociatedTypeItem(..) => {}
- }
-
- let fastreturn = match i.inner {
- // nothing left to do for traits (don't want to filter their
- // methods out, visibility controlled by the trait)
- clean::TraitItem(..) => true,
-
- // implementations of traits are always public.
- clean::ImplItem(ref imp) if imp.trait_.is_some() => true,
- // Struct variant fields have inherited visibility
- clean::VariantItem(clean::Variant {
- kind: clean::StructVariant(..)
- }) => true,
- _ => false,
- };
-
- let i = if fastreturn {
- if self.update_retained {
- self.retained.insert(i.def_id);
- }
- return Some(i);
- } else {
- self.fold_item_recur(i)
- };
-
- i.and_then(|i| {
- match i.inner {
- // emptied modules have no need to exist
- clean::ModuleItem(ref m)
- if m.items.is_empty() &&
- i.doc_value().is_none() => None,
- _ => {
- if self.update_retained {
- self.retained.insert(i.def_id);
- }
- Some(i)
- }
- }
- })
- }
-}
-
-// This stripper discards all impls which reference stripped items
-struct ImplStripper<'a> {
- retained: &'a DefIdSet
-}
-
-impl<'a> fold::DocFolder for ImplStripper<'a> {
- fn fold_item(&mut self, i: Item) -> Option<Item> {
- if let clean::ImplItem(ref imp) = i.inner {
- // emptied none trait impls can be stripped
- if imp.trait_.is_none() && imp.items.is_empty() {
- return None;
- }
- if let Some(did) = imp.for_.def_id() {
- if did.is_local() && !imp.for_.is_generic() &&
- !self.retained.contains(&did)
- {
- return None;
- }
- }
- if let Some(did) = imp.trait_.def_id() {
- if did.is_local() && !self.retained.contains(&did) {
- return None;
- }
- }
- }
- self.fold_item_recur(i)
- }
-}
-
-// This stripper discards all private import statements (`use`, `extern crate`)
-struct ImportStripper;
-impl fold::DocFolder for ImportStripper {
- fn fold_item(&mut self, i: Item) -> Option<Item> {
- match i.inner {
- clean::ExternCrateItem(..) |
- clean::ImportItem(..) if i.visibility != Some(clean::Public) => None,
- _ => self.fold_item_recur(i)
- }
- }
-}
-
-pub fn strip_priv_imports(krate: clean::Crate) -> plugins::PluginResult {
- ImportStripper.fold_crate(krate)
-}
-
-pub fn unindent_comments(krate: clean::Crate) -> plugins::PluginResult {
- struct CommentCleaner;
- impl fold::DocFolder for CommentCleaner {
- fn fold_item(&mut self, mut i: Item) -> Option<Item> {
- let mut avec: Vec<clean::Attribute> = Vec::new();
- for attr in &i.attrs {
- match attr {
- &clean::NameValue(ref x, ref s)
- if "doc" == *x => {
- avec.push(clean::NameValue("doc".to_string(),
- unindent(s)))
- }
- x => avec.push(x.clone())
- }
- }
- i.attrs = avec;
- self.fold_item_recur(i)
- }
- }
- let mut cleaner = CommentCleaner;
- let krate = cleaner.fold_crate(krate);
- krate
-}
-
-pub fn collapse_docs(krate: clean::Crate) -> plugins::PluginResult {
- struct Collapser;
- impl fold::DocFolder for Collapser {
- fn fold_item(&mut self, mut i: Item) -> Option<Item> {
- let mut docstr = String::new();
- for attr in &i.attrs {
- if let clean::NameValue(ref x, ref s) = *attr {
- if "doc" == *x {
- docstr.push_str(s);
- docstr.push('\n');
- }
- }
- }
- let mut a: Vec<clean::Attribute> = i.attrs.iter().filter(|&a| match a {
- &clean::NameValue(ref x, _) if "doc" == *x => false,
- _ => true
- }).cloned().collect();
- if !docstr.is_empty() {
- a.push(clean::NameValue("doc".to_string(), docstr));
- }
- i.attrs = a;
- self.fold_item_recur(i)
- }
- }
- let mut collapser = Collapser;
- let krate = collapser.fold_crate(krate);
- krate
-}
-
-pub fn unindent(s: &str) -> String {
- let lines = s.lines().collect::<Vec<&str> >();
- let mut saw_first_line = false;
- let mut saw_second_line = false;
- let min_indent = lines.iter().fold(usize::MAX, |min_indent, line| {
-
- // After we see the first non-whitespace line, look at
- // the line we have. If it is not whitespace, and therefore
- // part of the first paragraph, then ignore the indentation
- // level of the first line
- let ignore_previous_indents =
- saw_first_line &&
- !saw_second_line &&
- !line.chars().all(|c| c.is_whitespace());
-
- let min_indent = if ignore_previous_indents {
- usize::MAX
- } else {
- min_indent
- };
-
- if saw_first_line {
- saw_second_line = true;
- }
-
- if line.chars().all(|c| c.is_whitespace()) {
- min_indent
- } else {
- saw_first_line = true;
- let mut whitespace = 0;
- line.chars().all(|char| {
- // Compare against either space or tab, ignoring whether they
- // are mixed or not
- if char == ' ' || char == '\t' {
- whitespace += 1;
- true
- } else {
- false
- }
- });
- cmp::min(min_indent, whitespace)
- }
- });
-
- if !lines.is_empty() {
- let mut unindented = vec![ lines[0].trim().to_string() ];
- unindented.extend_from_slice(&lines[1..].iter().map(|&line| {
- if line.chars().all(|c| c.is_whitespace()) {
- line.to_string()
- } else {
- assert!(line.len() >= min_indent);
- line[min_indent..].to_string()
- }
- }).collect::<Vec<_>>());
- unindented.join("\n")
- } else {
- s.to_string()
- }
-}
-
-#[cfg(test)]
-mod unindent_tests {
- use super::unindent;
-
- #[test]
- fn should_unindent() {
- let s = " line1\n line2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\nline2");
- }
-
- #[test]
- fn should_unindent_multiple_paragraphs() {
- let s = " line1\n\n line2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\n\nline2");
- }
-
- #[test]
- fn should_leave_multiple_indent_levels() {
- // Line 2 is indented another level beyond the
- // base indentation and should be preserved
- let s = " line1\n\n line2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\n\n line2");
- }
-
- #[test]
- fn should_ignore_first_line_indent() {
- // The first line of the first paragraph may not be indented as
- // far due to the way the doc string was written:
- //
- // #[doc = "Start way over here
- // and continue here"]
- let s = "line1\n line2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\nline2");
- }
-
- #[test]
- fn should_not_ignore_first_line_indent_in_a_single_line_para() {
- let s = "line1\n\n line2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\n\n line2");
- }
-
- #[test]
- fn should_unindent_tabs() {
- let s = "\tline1\n\tline2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\nline2");
- }
-
- #[test]
- fn should_trim_mixed_indentation() {
- let s = "\t line1\n\t line2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\nline2");
-
- let s = " \tline1\n \tline2".to_string();
- let r = unindent(&s);
- assert_eq!(r, "line1\nline2");
- }
-}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::string::String;
+
+use clean::{self, Item};
+use plugins;
+use fold;
+use fold::DocFolder;
+
+pub fn collapse_docs(krate: clean::Crate) -> plugins::PluginResult {
+ let mut collapser = Collapser;
+ let krate = collapser.fold_crate(krate);
+ krate
+}
+
+struct Collapser;
+
+impl fold::DocFolder for Collapser {
+ fn fold_item(&mut self, mut i: Item) -> Option<Item> {
+ let mut docstr = String::new();
+ for attr in &i.attrs {
+ if let clean::NameValue(ref x, ref s) = *attr {
+ if "doc" == *x {
+ docstr.push_str(s);
+ docstr.push('\n');
+ }
+ }
+ }
+ let mut a: Vec<clean::Attribute> = i.attrs.iter().filter(|&a| match a {
+ &clean::NameValue(ref x, _) if "doc" == *x => false,
+ _ => true
+ }).cloned().collect();
+ if !docstr.is_empty() {
+ a.push(clean::NameValue("doc".to_string(), docstr));
+ }
+ i.attrs = a;
+ self.fold_item_recur(i)
+ }
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::hir::def_id::DefId;
+use rustc::middle::privacy::AccessLevels;
+use rustc::util::nodemap::DefIdSet;
+use std::mem;
+
+use clean::{self, GetDefId, Item};
+use fold;
+use fold::FoldItem::Strip;
+use plugins;
+
+mod collapse_docs;
+pub use self::collapse_docs::collapse_docs;
+
+mod strip_hidden;
+pub use self::strip_hidden::strip_hidden;
+
+mod strip_private;
+pub use self::strip_private::strip_private;
+
+mod strip_priv_imports;
+pub use self::strip_priv_imports::strip_priv_imports;
+
+mod unindent_comments;
+pub use self::unindent_comments::unindent_comments;
+
+type Pass = (&'static str, // name
+ fn(clean::Crate) -> plugins::PluginResult, // fn
+ &'static str); // description
+
+pub const PASSES: &'static [Pass] = &[
+ ("strip-hidden", strip_hidden,
+ "strips all doc(hidden) items from the output"),
+ ("unindent-comments", unindent_comments,
+ "removes excess indentation on comments in order for markdown to like it"),
+ ("collapse-docs", collapse_docs,
+ "concatenates all document attributes into one document attribute"),
+ ("strip-private", strip_private,
+ "strips all private items from a crate which cannot be seen externally, \
+ implies strip-priv-imports"),
+ ("strip-priv-imports", strip_priv_imports,
+ "strips all private import statements (`use`, `extern crate`) from a crate"),
+];
+
+pub const DEFAULT_PASSES: &'static [&'static str] = &[
+ "strip-hidden",
+ "strip-private",
+ "collapse-docs",
+ "unindent-comments",
+];
+
+
+struct Stripper<'a> {
+ retained: &'a mut DefIdSet,
+ access_levels: &'a AccessLevels<DefId>,
+ update_retained: bool,
+}
+
+impl<'a> fold::DocFolder for Stripper<'a> {
+ fn fold_item(&mut self, i: Item) -> Option<Item> {
+ match i.inner {
+ clean::StrippedItem(..) => {
+ // We need to recurse into stripped modules to strip things
+ // like impl methods but when doing so we must not add any
+ // items to the `retained` set.
+ let old = mem::replace(&mut self.update_retained, false);
+ let ret = self.fold_item_recur(i);
+ self.update_retained = old;
+ return ret;
+ }
+ // These items can all get re-exported
+ clean::TypedefItem(..) | clean::StaticItem(..) |
+ clean::StructItem(..) | clean::EnumItem(..) |
+ clean::TraitItem(..) | clean::FunctionItem(..) |
+ clean::VariantItem(..) | clean::MethodItem(..) |
+ clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) |
+ clean::ConstantItem(..) | clean::UnionItem(..) => {
+ if i.def_id.is_local() {
+ if !self.access_levels.is_exported(i.def_id) {
+ return None;
+ }
+ }
+ }
+
+ clean::StructFieldItem(..) => {
+ if i.visibility != Some(clean::Public) {
+ return Strip(i).fold();
+ }
+ }
+
+ clean::ModuleItem(..) => {
+ if i.def_id.is_local() && i.visibility != Some(clean::Public) {
+ let old = mem::replace(&mut self.update_retained, false);
+ let ret = Strip(self.fold_item_recur(i).unwrap()).fold();
+ self.update_retained = old;
+ return ret;
+ }
+ }
+
+ // handled in the `strip-priv-imports` pass
+ clean::ExternCrateItem(..) | clean::ImportItem(..) => {}
+
+ clean::DefaultImplItem(..) | clean::ImplItem(..) => {}
+
+ // tymethods/macros have no control over privacy
+ clean::MacroItem(..) | clean::TyMethodItem(..) => {}
+
+ // Primitives are never stripped
+ clean::PrimitiveItem(..) => {}
+
+ // Associated consts and types are never stripped
+ clean::AssociatedConstItem(..) |
+ clean::AssociatedTypeItem(..) => {}
+ }
+
+ let fastreturn = match i.inner {
+ // nothing left to do for traits (don't want to filter their
+ // methods out, visibility controlled by the trait)
+ clean::TraitItem(..) => true,
+
+ // implementations of traits are always public.
+ clean::ImplItem(ref imp) if imp.trait_.is_some() => true,
+ // Struct variant fields have inherited visibility
+ clean::VariantItem(clean::Variant {
+ kind: clean::VariantKind::Struct(..)
+ }) => true,
+ _ => false,
+ };
+
+ let i = if fastreturn {
+ if self.update_retained {
+ self.retained.insert(i.def_id);
+ }
+ return Some(i);
+ } else {
+ self.fold_item_recur(i)
+ };
+
+ i.and_then(|i| {
+ match i.inner {
+ // emptied modules have no need to exist
+ clean::ModuleItem(ref m)
+ if m.items.is_empty() &&
+ i.doc_value().is_none() => None,
+ _ => {
+ if self.update_retained {
+ self.retained.insert(i.def_id);
+ }
+ Some(i)
+ }
+ }
+ })
+ }
+}
+
+// This stripper discards all impls which reference stripped items
+struct ImplStripper<'a> {
+ retained: &'a DefIdSet
+}
+
+impl<'a> fold::DocFolder for ImplStripper<'a> {
+ fn fold_item(&mut self, i: Item) -> Option<Item> {
+ if let clean::ImplItem(ref imp) = i.inner {
+ // emptied none trait impls can be stripped
+ if imp.trait_.is_none() && imp.items.is_empty() {
+ return None;
+ }
+ if let Some(did) = imp.for_.def_id() {
+ if did.is_local() && !imp.for_.is_generic() &&
+ !self.retained.contains(&did)
+ {
+ return None;
+ }
+ }
+ if let Some(did) = imp.trait_.def_id() {
+ if did.is_local() && !self.retained.contains(&did) {
+ return None;
+ }
+ }
+ }
+ self.fold_item_recur(i)
+ }
+}
+
+// This stripper discards all private import statements (`use`, `extern crate`)
+struct ImportStripper;
+impl fold::DocFolder for ImportStripper {
+ fn fold_item(&mut self, i: Item) -> Option<Item> {
+ match i.inner {
+ clean::ExternCrateItem(..) |
+ clean::ImportItem(..) if i.visibility != Some(clean::Public) => None,
+ _ => self.fold_item_recur(i)
+ }
+ }
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::util::nodemap::DefIdSet;
+use std::mem;
+
+use clean::{self, Attributes};
+use clean::Item;
+use plugins;
+use fold;
+use fold::DocFolder;
+use fold::FoldItem::Strip;
+use passes::ImplStripper;
+
+/// Strip items marked `#[doc(hidden)]`
+pub fn strip_hidden(krate: clean::Crate) -> plugins::PluginResult {
+ let mut retained = DefIdSet();
+
+ // strip all #[doc(hidden)] items
+ let krate = {
+ let mut stripper = Stripper{ retained: &mut retained, update_retained: true };
+ stripper.fold_crate(krate)
+ };
+
+ // strip all impls referencing stripped items
+ let mut stripper = ImplStripper { retained: &retained };
+ stripper.fold_crate(krate)
+}
+
+struct Stripper<'a> {
+ retained: &'a mut DefIdSet,
+ update_retained: bool,
+}
+
+impl<'a> fold::DocFolder for Stripper<'a> {
+ fn fold_item(&mut self, i: Item) -> Option<Item> {
+ if i.attrs.list("doc").has_word("hidden") {
+ debug!("found one in strip_hidden; removing");
+ // use a dedicated hidden item for given item type if any
+ match i.inner {
+ clean::StructFieldItem(..) | clean::ModuleItem(..) => {
+ // We need to recurse into stripped modules to
+ // strip things like impl methods but when doing so
+ // we must not add any items to the `retained` set.
+ let old = mem::replace(&mut self.update_retained, false);
+ let ret = Strip(self.fold_item_recur(i).unwrap()).fold();
+ self.update_retained = old;
+ return ret;
+ }
+ _ => return None,
+ }
+ } else {
+ if self.update_retained {
+ self.retained.insert(i.def_id);
+ }
+ }
+ self.fold_item_recur(i)
+ }
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use clean;
+use fold::DocFolder;
+use plugins;
+use passes::ImportStripper;
+
+pub fn strip_priv_imports(krate: clean::Crate) -> plugins::PluginResult {
+ ImportStripper.fold_crate(krate)
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use rustc::util::nodemap::DefIdSet;
+
+use clean;
+use plugins;
+use fold::DocFolder;
+use passes::{ImplStripper, ImportStripper, Stripper};
+
+/// Strip private items from the point of view of a crate or externally from a
+/// crate, specified by the `xcrate` flag.
+pub fn strip_private(mut krate: clean::Crate) -> plugins::PluginResult {
+ // This stripper collects all *retained* nodes.
+ let mut retained = DefIdSet();
+ let access_levels = krate.access_levels.clone();
+
+ // strip all private items
+ {
+ let mut stripper = Stripper {
+ retained: &mut retained,
+ access_levels: &access_levels,
+ update_retained: true,
+ };
+ krate = ImportStripper.fold_crate(stripper.fold_crate(krate));
+ }
+
+ // strip all impls referencing private items
+ let mut stripper = ImplStripper { retained: &retained };
+ stripper.fold_crate(krate)
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::cmp;
+use std::string::String;
+use std::usize;
+
+use clean::{self, Item};
+use plugins;
+use fold::{self, DocFolder};
+
+pub fn unindent_comments(krate: clean::Crate) -> plugins::PluginResult {
+ let mut cleaner = CommentCleaner;
+ let krate = cleaner.fold_crate(krate);
+ krate
+}
+
+struct CommentCleaner;
+
+impl fold::DocFolder for CommentCleaner {
+ fn fold_item(&mut self, mut i: Item) -> Option<Item> {
+ let mut avec: Vec<clean::Attribute> = Vec::new();
+ for attr in &i.attrs {
+ match attr {
+ &clean::NameValue(ref x, ref s)
+ if "doc" == *x => {
+ avec.push(clean::NameValue("doc".to_string(),
+ unindent(s)))
+ }
+ x => avec.push(x.clone())
+ }
+ }
+ i.attrs = avec;
+ self.fold_item_recur(i)
+ }
+}
+
+fn unindent(s: &str) -> String {
+ let lines = s.lines().collect::<Vec<&str> >();
+ let mut saw_first_line = false;
+ let mut saw_second_line = false;
+ let min_indent = lines.iter().fold(usize::MAX, |min_indent, line| {
+
+ // After we see the first non-whitespace line, look at
+ // the line we have. If it is not whitespace, and therefore
+ // part of the first paragraph, then ignore the indentation
+ // level of the first line
+ let ignore_previous_indents =
+ saw_first_line &&
+ !saw_second_line &&
+ !line.chars().all(|c| c.is_whitespace());
+
+ let min_indent = if ignore_previous_indents {
+ usize::MAX
+ } else {
+ min_indent
+ };
+
+ if saw_first_line {
+ saw_second_line = true;
+ }
+
+ if line.chars().all(|c| c.is_whitespace()) {
+ min_indent
+ } else {
+ saw_first_line = true;
+ let mut whitespace = 0;
+ line.chars().all(|char| {
+ // Compare against either space or tab, ignoring whether they
+ // are mixed or not
+ if char == ' ' || char == '\t' {
+ whitespace += 1;
+ true
+ } else {
+ false
+ }
+ });
+ cmp::min(min_indent, whitespace)
+ }
+ });
+
+ if !lines.is_empty() {
+ let mut unindented = vec![ lines[0].trim().to_string() ];
+ unindented.extend_from_slice(&lines[1..].iter().map(|&line| {
+ if line.chars().all(|c| c.is_whitespace()) {
+ line.to_string()
+ } else {
+ assert!(line.len() >= min_indent);
+ line[min_indent..].to_string()
+ }
+ }).collect::<Vec<_>>());
+ unindented.join("\n")
+ } else {
+ s.to_string()
+ }
+}
+
+#[cfg(test)]
+mod unindent_tests {
+ use super::unindent;
+
+ #[test]
+ fn should_unindent() {
+ let s = " line1\n line2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\nline2");
+ }
+
+ #[test]
+ fn should_unindent_multiple_paragraphs() {
+ let s = " line1\n\n line2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\n\nline2");
+ }
+
+ #[test]
+ fn should_leave_multiple_indent_levels() {
+ // Line 2 is indented another level beyond the
+ // base indentation and should be preserved
+ let s = " line1\n\n line2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\n\n line2");
+ }
+
+ #[test]
+ fn should_ignore_first_line_indent() {
+ // The first line of the first paragraph may not be indented as
+ // far due to the way the doc string was written:
+ //
+ // #[doc = "Start way over here
+ // and continue here"]
+ let s = "line1\n line2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\nline2");
+ }
+
+ #[test]
+ fn should_not_ignore_first_line_indent_in_a_single_line_para() {
+ let s = "line1\n\n line2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\n\n line2");
+ }
+
+ #[test]
+ fn should_unindent_tabs() {
+ let s = "\tline1\n\tline2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\nline2");
+ }
+
+ #[test]
+ fn should_trim_mixed_indentation() {
+ let s = "\t line1\n\t line2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\nline2");
+
+ let s = " \tline1\n \tline2".to_string();
+ let r = unindent(&s);
+ assert_eq!(r, "line1\nline2");
+ }
+}
use rustc::dep_graph::DepGraph;
use rustc::hir::map as hir_map;
use rustc::session::{self, config};
-use rustc::session::config::{get_unstable_features_setting, OutputType,
- OutputTypes, Externs};
+use rustc::session::config::{OutputType, OutputTypes, Externs};
use rustc::session::search_paths::{SearchPaths, PathKind};
use rustc_back::dynamic_lib::DynamicLibrary;
use rustc_back::tempdir::TempDir;
use rustc_metadata::cstore::CStore;
use rustc_resolve::MakeGlobMap;
use syntax::codemap::CodeMap;
+use syntax::feature_gate::UnstableFeatures;
use errors;
use errors::emitter::ColorConfig;
search_paths: libs.clone(),
crate_types: vec!(config::CrateTypeDylib),
externs: externs.clone(),
- unstable_features: get_unstable_features_setting(),
+ unstable_features: UnstableFeatures::from_environment(),
..config::basic_options().clone()
};
}
}
- return opts;
+ opts
}
fn runtest(test: &str, cratename: &str, cfgs: Vec<String>, libs: SearchPaths,
.. config::basic_codegen_options()
},
test: as_test_harness,
- unstable_features: get_unstable_features_setting(),
+ unstable_features: UnstableFeatures::from_environment(),
..config::basic_options().clone()
};
let codemap = Rc::new(CodeMap::new());
let emitter = errors::emitter::EmitterWriter::new(box Sink(data.clone()),
Some(codemap.clone()));
- let old = io::set_panic(box Sink(data.clone()));
+ let old = io::set_panic(Some(box Sink(data.clone())));
let _bomb = Bomb(data.clone(), old.unwrap_or(box io::stdout()));
// Compile the code
Ok(r) => {
match r {
Err(count) => {
- if count > 0 && compile_fail == false {
+ if count > 0 && !compile_fail {
sess.fatal("aborting due to previous error(s)")
- } else if count == 0 && compile_fail == true {
+ } else if count == 0 && compile_fail {
panic!("test compiled while it wasn't supposed to")
}
if count > 0 && error_codes.len() > 0 {
}
}
Err(_) => {
- if compile_fail == false {
+ if !compile_fail {
panic!("couldn't compile the test");
}
if error_codes.len() > 0 {
info!("final test program: {}", prog);
- return prog
+ prog
}
fn partition_source(s: &str) -> (String, String) {
}
}
- return (before, after);
+ (before, after)
}
pub struct Collector {
_ => false,
};
self.view_item_stack.remove(&def_node_id);
- return ret;
+ ret
}
pub fn visit_item(&mut self, item: &hir::Item,
pub fn visit_mod(&mut self, def_id: DefId) {
for item in self.cstore.item_children(def_id) {
- self.visit_item(item.def_id);
+ self.visit_item(item.def);
}
}
- fn visit_item(&mut self, def_id: DefId) {
+ fn visit_item(&mut self, def: Def) {
+ let def_id = def.def_id();
let vis = self.cstore.visibility(def_id);
let inherited_item_level = if vis == Visibility::Public {
self.prev_level
let item_level = self.update(def_id, inherited_item_level);
- if let Some(Def::Mod(_)) = self.cstore.describe_def(def_id) {
+ if let Def::Mod(..) = def {
let orig_level = self.prev_level;
self.prev_level = item_level;
}
fn escape_char(writer: &mut fmt::Write, v: char) -> EncodeResult {
- escape_str(writer, unsafe {
- str::from_utf8_unchecked(v.encode_utf8().as_slice())
- })
+ escape_str(writer, v.encode_utf8(&mut [0; 4]))
}
fn spaces(wr: &mut fmt::Write, mut n: usize) -> EncodeResult {
println!("cargo:rustc-cfg=cargobuild");
println!("cargo:rerun-if-changed=build.rs");
- let target = env::var("TARGET").unwrap();
- let host = env::var("HOST").unwrap();
+ let target = env::var("TARGET").expect("TARGET was not set");
+ let host = env::var("HOST").expect("HOST was not set");
if cfg!(feature = "backtrace") && !target.contains("apple") && !target.contains("msvc") &&
!target.contains("emscripten") {
build_libbacktrace(&host, &target);
run(Command::new("make")
.current_dir(&build_dir)
.arg(format!("INCDIR={}", src_dir.display()))
- .arg("-j").arg(env::var("NUM_JOBS").unwrap()));
+ .arg("-j").arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set")));
}
use borrow::Borrow;
use cmp::max;
use fmt::{self, Debug};
+#[allow(deprecated)]
use hash::{Hash, Hasher, BuildHasher, SipHasher13};
use iter::{FromIterator, FusedIterator};
use mem::{self, replace};
use super::table::{self, Bucket, EmptyBucket, FullBucket, FullBucketMut, RawTable, SafeHash};
use super::table::BucketState::{Empty, Full};
-const INITIAL_LOG2_CAP: usize = 5;
-const INITIAL_CAPACITY: usize = 1 << INITIAL_LOG2_CAP; // 2^5
+const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two
-/// The default behavior of HashMap implements a load factor of 90.9%.
-/// This behavior is characterized by the following condition:
-///
-/// - if size > 0.909 * capacity: grow the map
+/// The default behavior of HashMap implements a maximum load factor of 90.9%.
#[derive(Clone)]
struct DefaultResizePolicy;
DefaultResizePolicy
}
+ /// A hash map's "capacity" is the number of elements it can hold without
+ /// being resized. Its "raw capacity" is the number of slots required to
+ /// provide that capacity, accounting for maximum loading. The raw capacity
+ /// is always zero or a power of two.
#[inline]
- fn min_capacity(&self, usable_size: usize) -> usize {
- // Here, we are rephrasing the logic by specifying the lower limit
- // on capacity:
- //
- // - if `cap < size * 1.1`: grow the map
- usable_size * 11 / 10
+ fn raw_capacity(&self, len: usize) -> usize {
+ if len == 0 {
+ 0
+ } else {
+ // 1. Account for loading: `raw_capacity >= len * 1.1`.
+ // 2. Ensure it is a power of two.
+ // 3. Ensure it is at least the minimum size.
+ let mut raw_cap = len * 11 / 10;
+ assert!(raw_cap >= len, "raw_cap overflow");
+ raw_cap = raw_cap.checked_next_power_of_two().expect("raw_capacity overflow");
+ raw_cap = max(MIN_NONZERO_RAW_CAPACITY, raw_cap);
+ raw_cap
+ }
}
- /// An inverse of `min_capacity`, approximately.
+ /// The capacity of the given raw capacity.
#[inline]
- fn usable_capacity(&self, cap: usize) -> usize {
- // As the number of entries approaches usable capacity,
- // min_capacity(size) must be smaller than the internal capacity,
- // so that the map is not resized:
- // `min_capacity(usable_capacity(x)) <= x`.
- // The left-hand side can only be smaller due to flooring by integer
- // division.
- //
+ fn capacity(&self, raw_cap: usize) -> usize {
// This doesn't have to be checked for overflow since allocation size
// in bytes will overflow earlier than multiplication by 10.
//
// As per https://github.com/rust-lang/rust/pull/30991 this is updated
- // to be: (cap * den + den - 1) / num
- (cap * 10 + 10 - 1) / 11
- }
-}
-
-#[test]
-fn test_resize_policy() {
- let rp = DefaultResizePolicy;
- for n in 0..1000 {
- assert!(rp.min_capacity(rp.usable_capacity(n)) <= n);
- assert!(rp.usable_capacity(rp.min_capacity(n)) <= n);
+ // to be: (raw_cap * den + den - 1) / num
+ (raw_cap * 10 + 10 - 1) / 11
}
}
//
// FIXME(Gankro, pczarn): review the proof and put it all in a separate README.md
-/// A hash map implementation which uses linear probing with Robin
-/// Hood bucket stealing.
+/// A hash map implementation which uses linear probing with Robin Hood bucket
+/// stealing.
///
-/// By default, HashMap uses a somewhat slow hashing algorithm which can provide resistance
-/// to DoS attacks. Rust makes a best attempt at acquiring random numbers without IO
-/// blocking from your system. Because of this HashMap is not guaranteed to provide
-/// DoS resistance since the numbers generated might not be truly random. If you do
-/// require this behavior you can create your own hashing function using
-/// [BuildHasherDefault](../hash/struct.BuildHasherDefault.html).
+/// By default, `HashMap` uses a hashing algorithm selected to provide
+/// resistance against HashDoS attacks. The algorithm is randomly seeded, and a
+/// reasonable best-effort is made to generate this seed from a high quality,
+/// secure source of randomness provided by the host without blocking the
+/// program. Because of this, the randomness of the seed is dependant on the
+/// quality of the system's random number generator at the time it is created.
+/// In particular, seeds generated when the system's entropy pool is abnormally
+/// low such as during system boot may be of a lower quality.
+///
+/// The default hashing algorithm is currently SipHash 1-3, though this is
+/// subject to change at any point in the future. While its performance is very
+/// competitive for medium sized keys, other hashing algorithms will outperform
+/// it for small keys such as integers as well as large keys such as long
+/// strings, though those algorithms will typically *not* protect against
+/// attacks such as HashDoS.
+///
+/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
+/// `HashMap::default`, `HashMap::with_hasher`, and
+/// `HashMap::with_capacity_and_hasher` methods. Many alternative algorithms
+/// are available on crates.io, such as the `fnv` crate.
///
/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although
/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`.
/// println!("{:?} has {} hp", viking, health);
/// }
/// ```
+///
+/// A HashMap with fixed list of elements can be initialized from an array:
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// fn main() {
+/// let timber_resources: HashMap<&str, i32> =
+/// [("Norway", 100),
+/// ("Denmark", 50),
+/// ("Iceland", 10)]
+/// .iter().cloned().collect();
+/// // use the values stored in map
+/// }
+/// ```
+
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct HashMap<K, V, S = RandomState> {
// The caller should ensure that invariants by Robin Hood Hashing hold.
fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
- let cap = self.table.capacity();
+ let raw_cap = self.raw_capacity();
let mut buckets = Bucket::new(&mut self.table, hash);
let ib = buckets.index();
- while buckets.index() != ib + cap {
+ while buckets.index() != ib + raw_cap {
// We don't need to compare hashes for value swap.
// Not even DIBs for Robin Hood.
buckets = match buckets.peek() {
Default::default()
}
- /// Creates an empty `HashMap` with the given initial capacity.
+ /// Creates an empty `HashMap` with the specified capacity.
+ ///
+ /// The hash map will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the hash map will not allocate.
///
/// # Examples
///
}
}
- /// Creates an empty `HashMap` with space for at least `capacity`
- /// elements, using `hasher` to hash the keys.
+ /// Creates an empty `HashMap` with the specified capacity, using `hasher`
+ /// to hash the keys.
///
+ /// The hash map will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the hash map will not allocate.
/// Warning: `hasher` is normally randomly generated, and
/// is designed to allow HashMaps to be resistant to attacks that
/// cause many collisions and very poor performance. Setting it
#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> HashMap<K, V, S> {
let resize_policy = DefaultResizePolicy::new();
- let min_cap = max(INITIAL_CAPACITY, resize_policy.min_capacity(capacity));
- let internal_cap = min_cap.checked_next_power_of_two().expect("capacity overflow");
- assert!(internal_cap >= capacity, "capacity overflow");
+ let raw_cap = resize_policy.raw_capacity(capacity);
HashMap {
hash_builder: hash_builder,
resize_policy: resize_policy,
- table: RawTable::new(internal_cap),
+ table: RawTable::new(raw_cap),
}
}
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
- self.resize_policy.usable_capacity(self.table.capacity())
+ self.resize_policy.capacity(self.raw_capacity())
+ }
+
+ /// Returns the hash map's raw capacity.
+ #[inline]
+ fn raw_capacity(&self) -> usize {
+ self.table.capacity()
}
/// Reserves capacity for at least `additional` more elements to be inserted
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
- let new_size = self.len().checked_add(additional).expect("capacity overflow");
- let min_cap = self.resize_policy.min_capacity(new_size);
-
- // An invalid value shouldn't make us run out of space. This includes
- // an overflow check.
- assert!(new_size <= min_cap);
-
- if self.table.capacity() < min_cap {
- let new_capacity = max(min_cap.next_power_of_two(), INITIAL_CAPACITY);
- self.resize(new_capacity);
+ let remaining = self.capacity() - self.len(); // this can't overflow
+ if remaining < additional {
+ let min_cap = self.len().checked_add(additional).expect("reserve overflow");
+ let raw_cap = self.resize_policy.raw_capacity(min_cap);
+ self.resize(raw_cap);
}
}
- /// Resizes the internal vectors to a new capacity. It's your responsibility to:
- /// 1) Make sure the new capacity is enough for all the elements, accounting
+ /// Resizes the internal vectors to a new capacity. It's your
+ /// responsibility to:
+ /// 1) Ensure `new_raw_cap` is enough for all the elements, accounting
/// for the load factor.
- /// 2) Ensure `new_capacity` is a power of two or zero.
- fn resize(&mut self, new_capacity: usize) {
- assert!(self.table.size() <= new_capacity);
- assert!(new_capacity.is_power_of_two() || new_capacity == 0);
+ /// 2) Ensure `new_raw_cap` is a power of two or zero.
+ fn resize(&mut self, new_raw_cap: usize) {
+ assert!(self.table.size() <= new_raw_cap);
+ assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0);
- let mut old_table = replace(&mut self.table, RawTable::new(new_capacity));
+ let mut old_table = replace(&mut self.table, RawTable::new(new_raw_cap));
let old_size = old_table.size();
if old_table.capacity() == 0 || old_table.size() == 0 {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn shrink_to_fit(&mut self) {
- let min_capacity = self.resize_policy.min_capacity(self.len());
- let min_capacity = max(min_capacity.next_power_of_two(), INITIAL_CAPACITY);
-
- // An invalid value shouldn't make us run out of space.
- debug_assert!(self.len() <= min_capacity);
-
- if self.table.capacity() != min_capacity {
- let old_table = replace(&mut self.table, RawTable::new(min_capacity));
+ let new_raw_cap = self.resize_policy.raw_capacity(self.len());
+ if self.raw_capacity() != new_raw_cap {
+ let old_table = replace(&mut self.table, RawTable::new(new_raw_cap));
let old_size = old_table.size();
// Shrink the table. Naive algorithm for resizing:
#[unstable(feature = "fused", issue = "35602")]
impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, K, V> Iterator for Drain<'a, K, V> {
type Item = (K, V);
self.inner.size_hint()
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "drain", since = "1.6.0")]
impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
#[inline]
fn len(&self) -> usize {
impl BuildHasher for RandomState {
type Hasher = DefaultHasher;
#[inline]
+ #[allow(deprecated)]
fn build_hasher(&self) -> DefaultHasher {
DefaultHasher(SipHasher13::new_with_keys(self.k0, self.k1))
}
///
/// [`RandomState`]: struct.RandomState.html
/// [`Hasher`]: ../../hash/trait.Hasher.html
-#[unstable(feature = "hashmap_default_hasher", issue = "0")]
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+#[allow(deprecated)]
+#[derive(Debug)]
pub struct DefaultHasher(SipHasher13);
-#[unstable(feature = "hashmap_default_hasher", issue = "0")]
+impl DefaultHasher {
+ /// Creates a new `DefaultHasher`.
+ ///
+ /// This hasher is not guaranteed to be the same as all other
+ /// `DefaultHasher` instances, but is the same as all other `DefaultHasher`
+ /// instances created through `new` or `default`.
+ #[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+ #[allow(deprecated)]
+ pub fn new() -> DefaultHasher {
+ DefaultHasher(SipHasher13::new_with_keys(0, 0))
+ }
+}
+
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
+impl Default for DefaultHasher {
+ fn default() -> DefaultHasher {
+ DefaultHasher::new()
+ }
+}
+
+#[stable(feature = "hashmap_default_hasher", since = "1.13.0")]
impl Hasher for DefaultHasher {
#[inline]
fn write(&mut self, msg: &[u8]) {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "hashmap_build_hasher", since = "1.7.0")]
impl Default for RandomState {
/// Constructs a new `RandomState`.
#[inline]
use rand::{thread_rng, Rng};
#[test]
- fn test_create_capacities() {
+ fn test_zero_capacities() {
type HM = HashMap<i32, i32>;
let m = HM::new();
let m = HM::with_hasher(RandomState::new());
assert_eq!(m.capacity(), 0);
+
+ let m = HM::with_capacity(0);
+ assert_eq!(m.capacity(), 0);
+
+ let m = HM::with_capacity_and_hasher(0, RandomState::new());
+ assert_eq!(m.capacity(), 0);
+
+ let mut m = HM::new();
+ m.insert(1, 1);
+ m.insert(2, 2);
+ m.remove(&1);
+ m.remove(&2);
+ m.shrink_to_fit();
+ assert_eq!(m.capacity(), 0);
+
+ let mut m = HM::new();
+ m.reserve(0);
+ assert_eq!(m.capacity(), 0);
}
#[test]
assert!(m.is_empty());
let mut i = 0;
- let old_cap = m.table.capacity();
- while old_cap == m.table.capacity() {
+ let old_raw_cap = m.raw_capacity();
+ while old_raw_cap == m.raw_capacity() {
m.insert(i, i);
i += 1;
}
let mut m = HashMap::new();
assert_eq!(m.len(), 0);
- assert_eq!(m.table.capacity(), 0);
+ assert_eq!(m.raw_capacity(), 0);
assert!(m.is_empty());
m.insert(0, 0);
m.remove(&0);
assert!(m.is_empty());
- let initial_cap = m.table.capacity();
- m.reserve(initial_cap);
- let cap = m.table.capacity();
+ let initial_raw_cap = m.raw_capacity();
+ m.reserve(initial_raw_cap);
+ let raw_cap = m.raw_capacity();
- assert_eq!(cap, initial_cap * 2);
+ assert_eq!(raw_cap, initial_raw_cap * 2);
let mut i = 0;
- for _ in 0..cap * 3 / 4 {
+ for _ in 0..raw_cap * 3 / 4 {
m.insert(i, i);
i += 1;
}
// three quarters full
assert_eq!(m.len(), i);
- assert_eq!(m.table.capacity(), cap);
+ assert_eq!(m.raw_capacity(), raw_cap);
- for _ in 0..cap / 4 {
+ for _ in 0..raw_cap / 4 {
m.insert(i, i);
i += 1;
}
// half full
- let new_cap = m.table.capacity();
- assert_eq!(new_cap, cap * 2);
+ let new_raw_cap = m.raw_capacity();
+ assert_eq!(new_raw_cap, raw_cap * 2);
- for _ in 0..cap / 2 - 1 {
+ for _ in 0..raw_cap / 2 - 1 {
i -= 1;
m.remove(&i);
- assert_eq!(m.table.capacity(), new_cap);
+ assert_eq!(m.raw_capacity(), new_raw_cap);
}
// A little more than one quarter full.
m.shrink_to_fit();
- assert_eq!(m.table.capacity(), cap);
+ assert_eq!(m.raw_capacity(), raw_cap);
// again, a little more than half full
- for _ in 0..cap / 2 - 1 {
+ for _ in 0..raw_cap / 2 - 1 {
i -= 1;
m.remove(&i);
}
assert_eq!(m.len(), i);
assert!(!m.is_empty());
- assert_eq!(m.table.capacity(), initial_cap);
+ assert_eq!(m.raw_capacity(), initial_raw_cap);
}
#[test]
/// println!("{:?}", x);
/// }
/// ```
+///
+/// HashSet with fixed list of elements can be initialized from an array:
+///
+/// ```
+/// use std::collections::HashSet;
+///
+/// fn main() {
+/// let viking_names: HashSet<&str> =
+/// [ "Einar", "Olaf", "Harald" ].iter().cloned().collect();
+/// // use the values stored in the set
+/// }
+/// ```
+
+
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct HashSet<T, S = RandomState> {
HashSet { map: HashMap::new() }
}
- /// Creates an empty HashSet with space for at least `n` elements in
- /// the hash table.
+ /// Creates an empty `HashSet` with the specified capacity.
+ ///
+ /// The hash set will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the hash set will not allocate.
///
/// # Examples
///
HashSet { map: HashMap::with_hasher(hasher) }
}
- /// Creates an empty HashSet with space for at least `capacity`
- /// elements in the hash table, using `hasher` to hash the keys.
+ /// Creates an empty HashSet with with the specified capacity, using
+ /// `hasher` to hash the keys.
+ ///
+ /// The hash set will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the hash set will not allocate.
///
/// Warning: `hasher` is normally randomly generated, and
/// is designed to allow `HashSet`s to be resistant to attacks that
use super::super::map::RandomState;
#[test]
- fn test_create_capacities() {
+ fn test_zero_capacities() {
type HS = HashSet<i32>;
let s = HS::new();
let s = HS::with_hasher(RandomState::new());
assert_eq!(s.capacity(), 0);
+
+ let s = HS::with_capacity(0);
+ assert_eq!(s.capacity(), 0);
+
+ let s = HS::with_capacity_and_hasher(0, RandomState::new());
+ assert_eq!(s.capacity(), 0);
+
+ let mut s = HS::new();
+ s.insert(1);
+ s.insert(2);
+ s.remove(&1);
+ s.remove(&2);
+ s.shrink_to_fit();
+ assert_eq!(s.capacity(), 0);
+
+ let mut s = HS::new();
+ s.reserve(0);
+ assert_eq!(s.capacity(), 0);
}
#[test]
/// around just the "table" part of the hashtable. It enforces some
/// invariants at the type level and employs some performance trickery,
/// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
-#[cfg_attr(stage0, unsafe_no_drop_flag)]
pub struct RawTable<K, V> {
capacity: usize,
size: usize,
use fmt;
use io;
use path::{Path, PathBuf};
+use sys;
use sys::os as os_imp;
/// Returns the current working directory as a `PathBuf`.
///
/// This structure is created through the `std::env::args_os` method.
#[stable(feature = "env", since = "1.0.0")]
-pub struct ArgsOs { inner: os_imp::Args }
+pub struct ArgsOs { inner: sys::args::Args }
/// Returns the arguments which this program was started with (normally passed
/// via the command line).
/// ```
#[stable(feature = "env", since = "1.0.0")]
pub fn args_os() -> ArgsOs {
- ArgsOs { inner: os_imp::args() }
+ ArgsOs { inner: sys::args::args() }
}
#[stable(feature = "env", since = "1.0.0")]
/// Constants associated with the current target
#[stable(feature = "env", since = "1.0.0")]
pub mod consts {
+ use sys::env::os;
+
/// A string describing the architecture of the CPU that is currently
/// in use.
///
/// - unix
/// - windows
#[stable(feature = "env", since = "1.0.0")]
- pub const FAMILY: &'static str = super::os::FAMILY;
+ pub const FAMILY: &'static str = os::FAMILY;
/// A string describing the specific operating system in use.
/// Example value is `linux`.
/// - android
/// - windows
#[stable(feature = "env", since = "1.0.0")]
- pub const OS: &'static str = super::os::OS;
+ pub const OS: &'static str = os::OS;
/// Specifies the filename prefix used for shared libraries on this
/// platform. Example value is `lib`.
/// - lib
/// - `""` (an empty string)
#[stable(feature = "env", since = "1.0.0")]
- pub const DLL_PREFIX: &'static str = super::os::DLL_PREFIX;
+ pub const DLL_PREFIX: &'static str = os::DLL_PREFIX;
/// Specifies the filename suffix used for shared libraries on this
/// platform. Example value is `.so`.
/// - .dylib
/// - .dll
#[stable(feature = "env", since = "1.0.0")]
- pub const DLL_SUFFIX: &'static str = super::os::DLL_SUFFIX;
+ pub const DLL_SUFFIX: &'static str = os::DLL_SUFFIX;
/// Specifies the file extension used for shared libraries on this
/// platform that goes after the dot. Example value is `so`.
/// - dylib
/// - dll
#[stable(feature = "env", since = "1.0.0")]
- pub const DLL_EXTENSION: &'static str = super::os::DLL_EXTENSION;
+ pub const DLL_EXTENSION: &'static str = os::DLL_EXTENSION;
/// Specifies the filename suffix used for executable binaries on this
/// platform. Example value is `.exe`.
/// - .pexe
/// - `""` (an empty string)
#[stable(feature = "env", since = "1.0.0")]
- pub const EXE_SUFFIX: &'static str = super::os::EXE_SUFFIX;
+ pub const EXE_SUFFIX: &'static str = os::EXE_SUFFIX;
/// Specifies the file extension, if any, used for executable binaries
/// on this platform. Example value is `exe`.
/// - exe
/// - `""` (an empty string)
#[stable(feature = "env", since = "1.0.0")]
- pub const EXE_EXTENSION: &'static str = super::os::EXE_EXTENSION;
-
-}
-
-#[cfg(target_os = "linux")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "linux";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "macos")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "macos";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".dylib";
- pub const DLL_EXTENSION: &'static str = "dylib";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "ios")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "ios";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".dylib";
- pub const DLL_EXTENSION: &'static str = "dylib";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "freebsd")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "freebsd";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "dragonfly")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "dragonfly";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "bitrig")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "bitrig";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "netbsd")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "netbsd";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "openbsd")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "openbsd";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "android")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "android";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "solaris")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "solaris";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = "";
- pub const EXE_EXTENSION: &'static str = "";
-}
-
-#[cfg(target_os = "windows")]
-mod os {
- pub const FAMILY: &'static str = "windows";
- pub const OS: &'static str = "windows";
- pub const DLL_PREFIX: &'static str = "";
- pub const DLL_SUFFIX: &'static str = ".dll";
- pub const DLL_EXTENSION: &'static str = "dll";
- pub const EXE_SUFFIX: &'static str = ".exe";
- pub const EXE_EXTENSION: &'static str = "exe";
-}
-
-#[cfg(all(target_os = "nacl", not(target_arch = "le32")))]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "nacl";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = ".nexe";
- pub const EXE_EXTENSION: &'static str = "nexe";
-}
-#[cfg(all(target_os = "nacl", target_arch = "le32"))]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "pnacl";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".pso";
- pub const DLL_EXTENSION: &'static str = "pso";
- pub const EXE_SUFFIX: &'static str = ".pexe";
- pub const EXE_EXTENSION: &'static str = "pexe";
-}
-
-#[cfg(target_os = "emscripten")]
-mod os {
- pub const FAMILY: &'static str = "unix";
- pub const OS: &'static str = "emscripten";
- pub const DLL_PREFIX: &'static str = "lib";
- pub const DLL_SUFFIX: &'static str = ".so";
- pub const DLL_EXTENSION: &'static str = "so";
- pub const EXE_SUFFIX: &'static str = ".js";
- pub const EXE_EXTENSION: &'static str = "js";
+ pub const EXE_EXTENSION: &'static str = os::EXE_EXTENSION;
}
#[cfg(target_arch = "x86")]
pub const ARCH: &'static str = "asmjs";
}
+#[cfg(target_arch = "wasm32")]
+mod arch {
+ pub const ARCH: &'static str = "wasm32";
+}
+
#[cfg(test)]
mod tests {
use super::*;
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn test_var_big() {
let mut s = "".to_string();
let mut i = 0;
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn test_self_exe_path() {
let path = current_exe();
assert!(path.is_ok());
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn test_env_set_get_huge() {
let n = make_rand_name();
let s = repeat("x").take(10000).collect::<String>();
//! # The `Error` trait
//!
//! `Error` is a trait representing the basic expectations for error values,
-//! i.e. values of type `E` in `Result<T, E>`. At a minimum, errors must provide
+//! i.e. values of type `E` in [`Result<T, E>`]. At a minimum, errors must provide
//! a description, but they may optionally provide additional detail (via
-//! `Display`) and cause chain information:
+//! [`Display`]) and cause chain information:
//!
//! ```
//! use std::fmt::Display;
//! }
//! ```
//!
-//! The `cause` method is generally used when errors cross "abstraction
+//! The [`cause`] method is generally used when errors cross "abstraction
//! boundaries", i.e. when a one module must report an error that is "caused"
//! by an error from a lower-level module. This setup makes it possible for the
//! high-level module to provide its own errors that do not commit to any
//! particular implementation, but also reveal some of its implementation for
-//! debugging via `cause` chains.
+//! debugging via [`cause`] chains.
+//!
+//! [`Result<T, E>`]: ../result/enum.Result.html
+//! [`Display`]: ../fmt/trait.Display.html
+//! [`cause`]: trait.Error.html#method.cause
#![stable(feature = "rust1", since = "1.0.0")]
}
}
-#[unstable(feature = "try_borrow", issue = "35070")]
-impl<'a, T: ?Sized + Reflect> Error for cell::BorrowError<'a, T> {
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Error for cell::BorrowError {
fn description(&self) -> &str {
"already mutably borrowed"
}
}
-#[unstable(feature = "try_borrow", issue = "35070")]
-impl<'a, T: ?Sized + Reflect> Error for cell::BorrowMutError<'a, T> {
+#[stable(feature = "try_borrow", since = "1.13.0")]
+impl Error for cell::BorrowMutError {
fn description(&self) -> &str {
"already borrowed"
}
}
// Turns this `CString` into an empty string to prevent
-// memory unsafe code from working by accident.
+// memory unsafe code from working by accident. Inline
+// to prevent LLVM from optimizing it away in debug builds.
#[stable(feature = "cstring_drop", since = "1.13.0")]
impl Drop for CString {
+ #[inline]
fn drop(&mut self) {
unsafe { *self.inner.get_unchecked_mut(0) = 0; }
}
use super::*;
use os::raw::c_char;
use borrow::Cow::{Borrowed, Owned};
- use hash::{SipHasher, Hash, Hasher};
+ use hash::{Hash, Hasher};
+ use collections::hash_map::DefaultHasher;
#[test]
fn c_to_rust() {
let ptr = data.as_ptr() as *const c_char;
let cstr: &'static CStr = unsafe { CStr::from_ptr(ptr) };
- let mut s = SipHasher::new_with_keys(0, 0);
+ let mut s = DefaultHasher::new();
cstr.hash(&mut s);
let cstr_hash = s.finish();
- let mut s = SipHasher::new_with_keys(0, 0);
+ let mut s = DefaultHasher::new();
CString::new(&data[..data.len() - 1]).unwrap().hash(&mut s);
let cstring_hash = s.finish();
OsString { inner: Buf::from_string(String::new()) }
}
- #[cfg(unix)]
- fn _from_bytes(vec: Vec<u8>) -> Option<OsString> {
- use os::unix::ffi::OsStringExt;
- Some(OsString::from_vec(vec))
- }
-
- #[cfg(windows)]
- fn _from_bytes(vec: Vec<u8>) -> Option<OsString> {
- String::from_utf8(vec).ok().map(OsString::from)
- }
-
/// Converts to an `OsStr` slice.
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_os_str(&self) -> &OsStr {
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use io::prelude::*;
if let SeekFrom::Current(n) = pos {
let remainder = (self.cap - self.pos) as i64;
// it should be safe to assume that remainder fits within an i64 as the alternative
- // means we managed to allocate 8 ebibytes and that's absurd.
+ // means we managed to allocate 8 exbibytes and that's absurd.
// But it's not out of the realm of possibility for some weird underlying reader to
// support seeking by i64::min_value() so we need to handle underflow when subtracting
// remainder.
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn panic_in_write_doesnt_flush_in_drop() {
static WRITES: AtomicUsize = AtomicUsize::new(0);
use io::prelude::*;
+use core::convert::TryInto;
use cmp;
use io::{self, SeekFrom, Error, ErrorKind};
#[stable(feature = "rust1", since = "1.0.0")]
impl Write for Cursor<Vec<u8>> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let pos: usize = self.position().try_into().map_err(|_| {
+ Error::new(ErrorKind::InvalidInput,
+ "cursor position exceeds maximum possible vector length")
+ })?;
// Make sure the internal buffer is as least as big as where we
// currently are
- let pos = self.position();
- let amt = pos.saturating_sub(self.inner.len() as u64);
- // use `resize` so that the zero filling is as efficient as possible
let len = self.inner.len();
- self.inner.resize(len + amt as usize, 0);
-
+ if len < pos {
+ // use `resize` so that the zero filling is as efficient as possible
+ self.inner.resize(pos, 0);
+ }
// Figure out what bytes will be used to overwrite what's currently
// there (left), and what will be appended on the end (right)
{
- let pos = pos as usize;
let space = self.inner.len() - pos;
let (left, right) = buf.split_at(cmp::min(space, buf.len()));
self.inner[pos..pos + left.len()].copy_from_slice(left);
}
// Bump us forward
- self.set_position(pos + buf.len() as u64);
+ self.set_position((pos + buf.len()) as u64);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> { Ok(()) }
let mut r = Cursor::new(Vec::new());
assert!(r.seek(SeekFrom::End(-2)).is_err());
}
+
+ #[test]
+ #[cfg(target_pointer_width = "32")]
+ fn vec_seek_and_write_past_usize_max() {
+ let mut c = Cursor::new(Vec::new());
+ c.set_position(<usize>::max_value() as u64 + 1);
+ assert!(c.write_all(&[1, 2, 3]).is_err());
+ }
}
#[stable(feature = "rust1", since = "1.0.0")]
fn consume(&mut self, amt: usize);
- /// Read all bytes into `buf` until the delimiter `byte` is reached.
+ /// Read all bytes into `buf` until the delimiter `byte` or EOF is reached.
///
/// This function will read bytes from the underlying stream until the
/// delimiter or EOF is found. Once found, all bytes up to, and including,
/// the delimiter (if found) will be appended to `buf`.
///
- /// If this reader is currently at EOF then this function will not modify
- /// `buf` and will return `Ok(n)` where `n` is the number of bytes which
- /// were read.
+ /// If successful, this function will return the total number of bytes read.
///
/// # Errors
///
/// up to, and including, the delimiter (if found) will be appended to
/// `buf`.
///
- /// If this reader is currently at EOF then this function will not modify
- /// `buf` and will return `Ok(n)` where `n` is the number of bytes which
- /// were read.
+ /// If successful, this function will return the total number of bytes read.
///
/// # Errors
///
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
if !self.done_first {
match self.first.read(buf)? {
- 0 => { self.done_first = true; }
+ 0 if buf.len() != 0 => { self.done_first = true; }
n => return Ok(n),
}
}
/// # Ok(())
/// # }
/// ```
- #[unstable(feature = "io_take_into_inner", issue = "0")]
+ #[unstable(feature = "io_take_into_inner", issue = "23755")]
pub fn into_inner(self) -> T {
self.inner
}
use super::repeat;
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn read_until() {
let mut buf = Cursor::new(&b"12"[..]);
let mut v = Vec::new();
cmp_bufread(chain1, chain2, &testdata[..]);
}
+ #[test]
+ fn chain_zero_length_read_is_not_eof() {
+ let a = b"A";
+ let b = b"B";
+ let mut s = String::new();
+ let mut chain = (&a[..]).chain(&b[..]);
+ chain.read(&mut []).unwrap();
+ chain.read_to_string(&mut s).unwrap();
+ assert_eq!("AB", s);
+ }
+
#[bench]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn bench_read_to_end(b: &mut test::Bencher) {
b.iter(|| {
let mut lr = repeat(1).take(10000000);
}
fn handle_ebadf<T>(r: io::Result<T>, default: T) -> io::Result<T> {
- #[cfg(windows)]
- const ERR: i32 = ::sys::c::ERROR_INVALID_HANDLE as i32;
- #[cfg(not(windows))]
- const ERR: i32 = ::libc::EBADF as i32;
+ use sys::stdio::EBADF_ERR;
match r {
- Err(ref e) if e.raw_os_error() == Some(ERR) => Ok(default),
+ Err(ref e) if e.raw_os_error() == Some(EBADF_ERR) => Ok(default),
r => r
}
}
with a more general mechanism",
issue = "0")]
#[doc(hidden)]
-pub fn set_panic(sink: Box<Write + Send>) -> Option<Box<Write + Send>> {
+pub fn set_panic(sink: Option<Box<Write + Send>>) -> Option<Box<Write + Send>> {
use panicking::LOCAL_STDERR;
use mem;
LOCAL_STDERR.with(move |slot| {
- mem::replace(&mut *slot.borrow_mut(), Some(sink))
+ mem::replace(&mut *slot.borrow_mut(), sink)
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
with a more general mechanism",
issue = "0")]
#[doc(hidden)]
-pub fn set_print(sink: Box<Write + Send>) -> Option<Box<Write + Send>> {
+pub fn set_print(sink: Option<Box<Write + Send>>) -> Option<Box<Write + Send>> {
use mem;
LOCAL_STDOUT.with(move |slot| {
- mem::replace(&mut *slot.borrow_mut(), Some(sink))
+ mem::replace(&mut *slot.borrow_mut(), sink)
}).and_then(|mut s| {
let _ = s.flush();
Some(s)
use super::*;
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn panic_doesnt_poison() {
thread::spawn(|| {
let _a = stdin();
#![feature(str_utf16)]
#![feature(test, rustc_private)]
#![feature(thread_local)]
-#![feature(try_borrow)]
#![feature(try_from)]
#![feature(unboxed_closures)]
#![feature(unicode)]
#![feature(unique)]
-#![cfg_attr(stage0, feature(unsafe_no_drop_flag))]
#![feature(unwind_attributes)]
#![feature(vec_push_all)]
#![feature(zero_one)]
#[macro_export]
macro_rules! cfg { ($($cfg:tt)*) => ({ /* compiler built-in */ }) }
- /// Parse the current given file as an expression.
- ///
- /// This is generally a bad idea, because it's going to behave unhygienically.
+ /// Parse the file provided in the argument as an expression or an
+ /// item according to the context. This file is located relative
+ /// to the current file (similarly to how modules are found).
+ ///
+ /// Using this macro is often a bad idea, because if the file is
+ /// parsed as an expression, it is going to be placed in the
+ /// surrounding code unhygenically. This could result in variables
+ /// or functions being different from what the file expected if
+ /// there are variables or functions that have the same name in
+ /// the current file.
///
/// # Examples
///
// Original implementation taken from rust-memchr
// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
-
-
/// A safe interface to `memchr`.
///
/// Returns the index corresponding to the first occurrence of `needle` in
/// let haystack = b"the quick brown fox";
/// assert_eq!(memchr(b'k', haystack), Some(8));
/// ```
+#[inline]
pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
- // libc memchr
- #[cfg(not(target_os = "windows"))]
- fn memchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
- use libc;
-
- let p = unsafe {
- libc::memchr(
- haystack.as_ptr() as *const libc::c_void,
- needle as libc::c_int,
- haystack.len() as libc::size_t)
- };
- if p.is_null() {
- None
- } else {
- Some(p as usize - (haystack.as_ptr() as usize))
- }
- }
-
- // use fallback on windows, since it's faster
- #[cfg(target_os = "windows")]
- fn memchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memchr(needle, haystack)
- }
-
- memchr_specific(needle, haystack)
+ ::sys::memchr::memchr(needle, haystack)
}
/// A safe interface to `memrchr`.
/// let haystack = b"the quick brown fox";
/// assert_eq!(memrchr(b'o', haystack), Some(17));
/// ```
+#[inline]
pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
-
- #[cfg(target_os = "linux")]
- fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
- use libc;
-
- // GNU's memrchr() will - unlike memchr() - error if haystack is empty.
- if haystack.is_empty() {return None}
- let p = unsafe {
- libc::memrchr(
- haystack.as_ptr() as *const libc::c_void,
- needle as libc::c_int,
- haystack.len() as libc::size_t)
- };
- if p.is_null() {
- None
- } else {
- Some(p as usize - (haystack.as_ptr() as usize))
- }
- }
-
- #[cfg(not(target_os = "linux"))]
- fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
- fallback::memrchr(needle, haystack)
- }
-
- memrchr_specific(needle, haystack)
-}
-
-#[allow(dead_code)]
-mod fallback {
- use cmp;
- use mem;
-
- const LO_U64: u64 = 0x0101010101010101;
- const HI_U64: u64 = 0x8080808080808080;
-
- // use truncation
- const LO_USIZE: usize = LO_U64 as usize;
- const HI_USIZE: usize = HI_U64 as usize;
-
- /// Return `true` if `x` contains any zero byte.
- ///
- /// From *Matters Computational*, J. Arndt
- ///
- /// "The idea is to subtract one from each of the bytes and then look for
- /// bytes where the borrow propagated all the way to the most significant
- /// bit."
- #[inline]
- fn contains_zero_byte(x: usize) -> bool {
- x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
- }
-
- #[cfg(target_pointer_width = "32")]
- #[inline]
- fn repeat_byte(b: u8) -> usize {
- let mut rep = (b as usize) << 8 | b as usize;
- rep = rep << 16 | rep;
- rep
- }
-
- #[cfg(target_pointer_width = "64")]
- #[inline]
- fn repeat_byte(b: u8) -> usize {
- let mut rep = (b as usize) << 8 | b as usize;
- rep = rep << 16 | rep;
- rep = rep << 32 | rep;
- rep
- }
-
- /// Return the first index matching the byte `a` in `text`.
- pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
- // Scan for a single byte value by reading two `usize` words at a time.
- //
- // Split `text` in three parts
- // - unaligned initial part, before the first word aligned address in text
- // - body, scan by 2 words at a time
- // - the last remaining part, < 2 word size
- let len = text.len();
- let ptr = text.as_ptr();
- let usize_bytes = mem::size_of::<usize>();
-
- // search up to an aligned boundary
- let align = (ptr as usize) & (usize_bytes- 1);
- let mut offset;
- if align > 0 {
- offset = cmp::min(usize_bytes - align, len);
- if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
- return Some(index);
- }
- } else {
- offset = 0;
- }
-
- // search the body of the text
- let repeated_x = repeat_byte(x);
-
- if len >= 2 * usize_bytes {
- while offset <= len - 2 * usize_bytes {
- unsafe {
- let u = *(ptr.offset(offset as isize) as *const usize);
- let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize);
-
- // break if there is a matching byte
- let zu = contains_zero_byte(u ^ repeated_x);
- let zv = contains_zero_byte(v ^ repeated_x);
- if zu || zv {
- break;
- }
- }
- offset += usize_bytes * 2;
- }
- }
-
- // find the byte after the point the body loop stopped
- text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
- }
-
- /// Return the last index matching the byte `a` in `text`.
- pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
- // Scan for a single byte value by reading two `usize` words at a time.
- //
- // Split `text` in three parts
- // - unaligned tail, after the last word aligned address in text
- // - body, scan by 2 words at a time
- // - the first remaining bytes, < 2 word size
- let len = text.len();
- let ptr = text.as_ptr();
- let usize_bytes = mem::size_of::<usize>();
-
- // search to an aligned boundary
- let end_align = (ptr as usize + len) & (usize_bytes - 1);
- let mut offset;
- if end_align > 0 {
- offset = if end_align >= len { 0 } else { len - end_align };
- if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
- return Some(offset + index);
- }
- } else {
- offset = len;
- }
-
- // search the body of the text
- let repeated_x = repeat_byte(x);
-
- while offset >= 2 * usize_bytes {
- unsafe {
- let u = *(ptr.offset(offset as isize - 2 * usize_bytes as isize) as *const usize);
- let v = *(ptr.offset(offset as isize - usize_bytes as isize) as *const usize);
-
- // break if there is a matching byte
- let zu = contains_zero_byte(u ^ repeated_x);
- let zv = contains_zero_byte(v ^ repeated_x);
- if zu || zv {
- break;
- }
- }
- offset -= 2 * usize_bytes;
- }
-
- // find the byte before the point the body loop stopped
- text[..offset].iter().rposition(|elt| *elt == x)
- }
-
- // test fallback implementations on all platforms
- #[test]
- fn matches_one() {
- assert_eq!(Some(0), memchr(b'a', b"a"));
- }
-
- #[test]
- fn matches_begin() {
- assert_eq!(Some(0), memchr(b'a', b"aaaa"));
- }
-
- #[test]
- fn matches_end() {
- assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
- }
-
- #[test]
- fn matches_nul() {
- assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
- }
-
- #[test]
- fn matches_past_nul() {
- assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
- }
-
- #[test]
- fn no_match_empty() {
- assert_eq!(None, memchr(b'a', b""));
- }
-
- #[test]
- fn no_match() {
- assert_eq!(None, memchr(b'a', b"xyz"));
- }
-
- #[test]
- fn matches_one_reversed() {
- assert_eq!(Some(0), memrchr(b'a', b"a"));
- }
-
- #[test]
- fn matches_begin_reversed() {
- assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
- }
-
- #[test]
- fn matches_end_reversed() {
- assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
- }
-
- #[test]
- fn matches_nul_reversed() {
- assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
- }
-
- #[test]
- fn matches_past_nul_reversed() {
- assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
- }
-
- #[test]
- fn no_match_empty_reversed() {
- assert_eq!(None, memrchr(b'a', b""));
- }
-
- #[test]
- fn no_match_reversed() {
- assert_eq!(None, memrchr(b'a', b"xyz"));
- }
-
- #[test]
- fn each_alignment_reversed() {
- let mut data = [1u8; 64];
- let needle = 2;
- let pos = 40;
- data[pos] = needle;
- for start in 0..16 {
- assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
- }
- }
+ ::sys::memchr::memrchr(needle, haystack)
}
#[cfg(test)]
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use net::*;
use net::test::{tsa, sa6, sa4};
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-#[allow(deprecated)]
+#[stable(feature = "ip_addr", since = "1.7.0")]
impl fmt::Display for IpAddr {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
}
// Tests for this module
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use net::*;
use net::Ipv6MulticastScope::*;
mod tcp;
mod udp;
mod parser;
-#[cfg(test)] mod test;
+#[cfg(test)]
+mod test;
/// Possible values which can be passed to the [`shutdown`] method of
/// [`TcpStream`].
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "ip_addr", since = "1.7.0")]
impl FromStr for IpAddr {
type Err = AddrParseError;
fn from_str(s: &str) -> Result<IpAddr, AddrParseError> {
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use io::ErrorKind;
use io::prelude::*;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#[allow(dead_code)] // not used on emscripten
+
use env;
use net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr, ToSocketAddrs};
use sync::atomic::{AtomicUsize, Ordering};
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use io::ErrorKind;
use net::*;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use libc;
+
+use fs::Metadata;
+use sys_common::AsInner;
+
+#[allow(deprecated)]
+use os::haiku::raw;
+
+/// OS-specific extension methods for `fs::Metadata`
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned `stat` are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[rustc_deprecated(since = "1.8.0",
+ reason = "deprecated in favor of the accessor \
+ methods of this trait")]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_crtime(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_crtime_nsec(&self) -> i64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe {
+ &*(self.as_inner().as_inner() as *const libc::stat
+ as *const raw::stat)
+ }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_crtime(&self) -> i64 {
+ self.as_inner().as_inner().st_crtime as i64
+ }
+ fn st_crtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_crtime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Haiku-specific definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+pub mod raw;
+pub mod fs;
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Haiku-specific raw type definitions
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+
+#![allow(deprecated)]
+
+use os::raw::{c_long};
+use os::unix::raw::{uid_t, gid_t};
+
+// Use the direct definition of usize, instead of uintptr_t like in libc
+#[stable(feature = "pthread_t", since = "1.8.0")] pub type pthread_t = usize;
+
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = i32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type dev_t = i32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type mode_t = u32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = i32;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = i64;
+#[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i32;
+
+#[repr(C)]
+#[derive(Clone)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: ino_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: mode_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: nlink_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: uid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: gid_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: off_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: dev_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: blksize_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_crtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_crtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_type: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: blkcnt_t,
+}
target_arch = "le32",
target_arch = "powerpc",
target_arch = "arm",
- target_arch = "asmjs"))]
+ target_arch = "asmjs",
+ target_arch = "wasm32"))]
mod arch {
use os::raw::{c_long, c_short, c_uint};
#[cfg(target_os = "bitrig")] pub mod bitrig;
#[cfg(target_os = "dragonfly")] pub mod dragonfly;
#[cfg(target_os = "freebsd")] pub mod freebsd;
+#[cfg(target_os = "haiku")] pub mod haiku;
#[cfg(target_os = "ios")] pub mod ios;
#[cfg(target_os = "linux")] pub mod linux;
#[cfg(target_os = "macos")] pub mod macos;
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for *const T {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for *mut T {}
-#[stable(feature = "catch_unwind", since = "1.9.0")]
+#[unstable(feature = "unique", issue = "27730")]
impl<T: UnwindSafe> UnwindSafe for Unique<T> {}
-#[stable(feature = "catch_unwind", since = "1.9.0")]
+#[unstable(feature = "shared", issue = "27730")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Shared<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: ?Sized> UnwindSafe for Mutex<T> {}
use ffi::{OsStr, OsString};
-use self::platform::{is_sep_byte, is_verbatim_sep, MAIN_SEP_STR, parse_prefix};
+use sys::path::{is_sep_byte, is_verbatim_sep, MAIN_SEP_STR, parse_prefix};
////////////////////////////////////////////////////////////////////////////////
// GENERAL NOTES
// OsStr APIs for parsing, but it will take a while for those to become
// available.
-////////////////////////////////////////////////////////////////////////////////
-// Platform-specific definitions
-////////////////////////////////////////////////////////////////////////////////
-
-// The following modules give the most basic tools for parsing paths on various
-// platforms. The bulk of the code is devoted to parsing prefixes on Windows.
-
-#[cfg(unix)]
-mod platform {
- use super::Prefix;
- use ffi::OsStr;
-
- #[inline]
- pub fn is_sep_byte(b: u8) -> bool {
- b == b'/'
- }
-
- #[inline]
- pub fn is_verbatim_sep(b: u8) -> bool {
- b == b'/'
- }
-
- pub fn parse_prefix(_: &OsStr) -> Option<Prefix> {
- None
- }
-
- pub const MAIN_SEP_STR: &'static str = "/";
- pub const MAIN_SEP: char = '/';
-}
-
-#[cfg(windows)]
-mod platform {
- use ascii::*;
-
- use super::{os_str_as_u8_slice, u8_slice_as_os_str, Prefix};
- use ffi::OsStr;
-
- #[inline]
- pub fn is_sep_byte(b: u8) -> bool {
- b == b'/' || b == b'\\'
- }
-
- #[inline]
- pub fn is_verbatim_sep(b: u8) -> bool {
- b == b'\\'
- }
-
- pub fn parse_prefix<'a>(path: &'a OsStr) -> Option<Prefix> {
- use super::Prefix::*;
- unsafe {
- // The unsafety here stems from converting between &OsStr and &[u8]
- // and back. This is safe to do because (1) we only look at ASCII
- // contents of the encoding and (2) new &OsStr values are produced
- // only from ASCII-bounded slices of existing &OsStr values.
- let mut path = os_str_as_u8_slice(path);
-
- if path.starts_with(br"\\") {
- // \\
- path = &path[2..];
- if path.starts_with(br"?\") {
- // \\?\
- path = &path[2..];
- if path.starts_with(br"UNC\") {
- // \\?\UNC\server\share
- path = &path[4..];
- let (server, share) = match parse_two_comps(path, is_verbatim_sep) {
- Some((server, share)) =>
- (u8_slice_as_os_str(server), u8_slice_as_os_str(share)),
- None => (u8_slice_as_os_str(path), u8_slice_as_os_str(&[])),
- };
- return Some(VerbatimUNC(server, share));
- } else {
- // \\?\path
- let idx = path.iter().position(|&b| b == b'\\');
- if idx == Some(2) && path[1] == b':' {
- let c = path[0];
- if c.is_ascii() && (c as char).is_alphabetic() {
- // \\?\C:\ path
- return Some(VerbatimDisk(c.to_ascii_uppercase()));
- }
- }
- let slice = &path[..idx.unwrap_or(path.len())];
- return Some(Verbatim(u8_slice_as_os_str(slice)));
- }
- } else if path.starts_with(b".\\") {
- // \\.\path
- path = &path[2..];
- let pos = path.iter().position(|&b| b == b'\\');
- let slice = &path[..pos.unwrap_or(path.len())];
- return Some(DeviceNS(u8_slice_as_os_str(slice)));
- }
- match parse_two_comps(path, is_sep_byte) {
- Some((server, share)) if !server.is_empty() && !share.is_empty() => {
- // \\server\share
- return Some(UNC(u8_slice_as_os_str(server), u8_slice_as_os_str(share)));
- }
- _ => (),
- }
- } else if path.get(1) == Some(& b':') {
- // C:
- let c = path[0];
- if c.is_ascii() && (c as char).is_alphabetic() {
- return Some(Disk(c.to_ascii_uppercase()));
- }
- }
- return None;
- }
-
- fn parse_two_comps(mut path: &[u8], f: fn(u8) -> bool) -> Option<(&[u8], &[u8])> {
- let first = match path.iter().position(|x| f(*x)) {
- None => return None,
- Some(x) => &path[..x],
- };
- path = &path[(first.len() + 1)..];
- let idx = path.iter().position(|x| f(*x));
- let second = &path[..idx.unwrap_or(path.len())];
- Some((first, second))
- }
- }
-
- pub const MAIN_SEP_STR: &'static str = "\\";
- pub const MAIN_SEP: char = '\\';
-}
-
////////////////////////////////////////////////////////////////////////////////
// Windows Prefixes
////////////////////////////////////////////////////////////////////////////////
/// The primary separator for the current platform
#[stable(feature = "rust1", since = "1.0.0")]
-pub const MAIN_SEPARATOR: char = platform::MAIN_SEP;
+pub const MAIN_SEPARATOR: char = ::sys::path::MAIN_SEP;
////////////////////////////////////////////////////////////////////////////////
// Misc helpers
#[test]
pub fn test_compare() {
- use hash::{Hash, Hasher, SipHasher};
+ use hash::{Hash, Hasher};
+ use collections::hash_map::DefaultHasher;
fn hash<T: Hash>(t: T) -> u64 {
- let mut s = SipHasher::new_with_keys(0, 0);
+ let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Working with processes.
+//! A module for working with processes.
+//!
+//! # Examples
+//!
+//! Basic usage where we try to execute the `cat` shell command:
+//!
+//! ```should_panic
+//! use std::process::Command;
+//!
+//! let mut child = Command::new("/bin/cat")
+//! .arg("file.txt")
+//! .spawn()
+//! .expect("failed to execute child");
+//!
+//! let ecode = child.wait()
+//! .expect("failed to wait on child");
+//!
+//! assert!(ecode.success());
+//! ```
#![stable(feature = "process", since = "1.0.0")]
::sys::os::exit(code)
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use io::prelude::*;
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn test_os_rng_tasks() {
let mut txs = vec!();
thread_info::set(main_guard, thread);
// Store our args if necessary in a squirreled away location
- sys_common::args::init(argc, argv);
+ sys::args::init(argc, argv);
// Let's run some code!
let res = panic::catch_unwind(mem::transmute::<_, fn()>(main));
#[cfg(target_os = "ios")]
#[link(name = "System")]
extern {}
+
+#[cfg(target_os = "haiku")]
+#[link(name = "network")]
+extern {}
use thread;
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn test_barrier() {
const N: usize = 10;
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn notify_one() {
let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn notify_all() {
const N: usize = 10;
}
#[test]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn wait_timeout_ms() {
let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
#[test]
#[should_panic]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn two_mutexes() {
let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use env;
use super::*;
// wait for the child thread to exit before we exit
rx2.recv().unwrap();
}
+
+ #[test]
+ fn issue_32114() {
+ let (tx, _) = channel();
+ let _ = tx.send(123);
+ assert_eq!(tx.send(123), Err(SendError(123)));
+ }
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod sync_tests {
use env;
use thread;
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sync::mpsc::channel;
use super::{Queue, Data, Empty, Inconsistent};
// Couldn't send the data, the port hung up first. Return the data
// back up the stack.
DISCONNECTED => {
+ self.state.swap(DISCONNECTED, Ordering::SeqCst);
+ self.upgrade = NothingSent;
Err(self.data.take().unwrap())
}
}
}
-#[stable(feature = "mpsc_debug", since = "1.7.0")]
impl fmt::Debug for Select {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Select {{ .. }}")
}
}
-#[stable(feature = "mpsc_debug", since = "1.7.0")]
impl<'rx, T:Send+'rx> fmt::Debug for Handle<'rx, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Handle {{ .. }}")
}
}
-#[cfg(test)]
#[allow(unused_imports)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use thread;
use sync::mpsc::*;
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sync::Arc;
use super::Queue;
&guard.__lock.poison
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sync::mpsc::channel;
use sync::{Arc, Mutex, Condvar};
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use panic;
use sync::mpsc::channel;
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
#![allow(deprecated)] // rand
+++ /dev/null
-// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Global storage for command line arguments
-//!
-//! The current incarnation of the Rust runtime expects for
-//! the processes `argc` and `argv` arguments to be stored
-//! in a globally-accessible location for use by the `os` module.
-//!
-//! Only valid to call on Linux. Mac and Windows use syscalls to
-//! discover the command line arguments.
-//!
-//! FIXME #7756: Would be nice for this to not exist.
-
-#![allow(dead_code)] // different code on OSX/linux/etc
-
-/// One-time global initialization.
-pub unsafe fn init(argc: isize, argv: *const *const u8) { imp::init(argc, argv) }
-
-/// One-time global cleanup.
-pub unsafe fn cleanup() { imp::cleanup() }
-
-/// Make a clone of the global arguments.
-pub fn clone() -> Option<Vec<Vec<u8>>> { imp::clone() }
-
-#[cfg(any(target_os = "linux",
- target_os = "android",
- target_os = "freebsd",
- target_os = "dragonfly",
- target_os = "bitrig",
- target_os = "netbsd",
- target_os = "openbsd",
- target_os = "solaris",
- target_os = "emscripten"))]
-mod imp {
- use libc::c_char;
- use mem;
- use ffi::CStr;
-
- use sys_common::mutex::Mutex;
-
- static mut GLOBAL_ARGS_PTR: usize = 0;
- static LOCK: Mutex = Mutex::new();
-
- pub unsafe fn init(argc: isize, argv: *const *const u8) {
- let args = (0..argc).map(|i| {
- CStr::from_ptr(*argv.offset(i) as *const c_char).to_bytes().to_vec()
- }).collect();
-
- LOCK.lock();
- let ptr = get_global_ptr();
- assert!((*ptr).is_none());
- (*ptr) = Some(box args);
- LOCK.unlock();
- }
-
- pub unsafe fn cleanup() {
- LOCK.lock();
- *get_global_ptr() = None;
- LOCK.unlock();
- }
-
- pub fn clone() -> Option<Vec<Vec<u8>>> {
- unsafe {
- LOCK.lock();
- let ptr = get_global_ptr();
- let ret = (*ptr).as_ref().map(|s| (**s).clone());
- LOCK.unlock();
- return ret
- }
- }
-
- fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> {
- unsafe { mem::transmute(&GLOBAL_ARGS_PTR) }
- }
-
-}
-
-#[cfg(any(target_os = "macos",
- target_os = "ios",
- target_os = "windows"))]
-mod imp {
- pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
- }
-
- pub fn cleanup() {
- }
-
- pub fn clone() -> Option<Vec<Vec<u8>>> {
- panic!()
- }
-}
}
#[cfg(test)]
+#[allow(dead_code)] // not used on emscripten
pub mod test {
use path::{Path, PathBuf};
use env;
}
#[bench]
+ #[cfg_attr(target_os = "emscripten", ignore)]
fn bench_uninitialized(b: &mut ::test::Bencher) {
b.iter(|| {
let mut lr = repeat(1).take(10000000);
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// Original implementation taken from rust-memchr
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+#[allow(dead_code)]
+pub mod fallback {
+ use cmp;
+ use mem;
+
+ const LO_U64: u64 = 0x0101010101010101;
+ const HI_U64: u64 = 0x8080808080808080;
+
+ // use truncation
+ const LO_USIZE: usize = LO_U64 as usize;
+ const HI_USIZE: usize = HI_U64 as usize;
+
+ /// Return `true` if `x` contains any zero byte.
+ ///
+ /// From *Matters Computational*, J. Arndt
+ ///
+ /// "The idea is to subtract one from each of the bytes and then look for
+ /// bytes where the borrow propagated all the way to the most significant
+ /// bit."
+ #[inline]
+ fn contains_zero_byte(x: usize) -> bool {
+ x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
+ }
+
+ #[cfg(target_pointer_width = "32")]
+ #[inline]
+ fn repeat_byte(b: u8) -> usize {
+ let mut rep = (b as usize) << 8 | b as usize;
+ rep = rep << 16 | rep;
+ rep
+ }
+
+ #[cfg(target_pointer_width = "64")]
+ #[inline]
+ fn repeat_byte(b: u8) -> usize {
+ let mut rep = (b as usize) << 8 | b as usize;
+ rep = rep << 16 | rep;
+ rep = rep << 32 | rep;
+ rep
+ }
+
+ /// Return the first index matching the byte `a` in `text`.
+ pub fn memchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Scan for a single byte value by reading two `usize` words at a time.
+ //
+ // Split `text` in three parts
+ // - unaligned initial part, before the first word aligned address in text
+ // - body, scan by 2 words at a time
+ // - the last remaining part, < 2 word size
+ let len = text.len();
+ let ptr = text.as_ptr();
+ let usize_bytes = mem::size_of::<usize>();
+
+ // search up to an aligned boundary
+ let align = (ptr as usize) & (usize_bytes- 1);
+ let mut offset;
+ if align > 0 {
+ offset = cmp::min(usize_bytes - align, len);
+ if let Some(index) = text[..offset].iter().position(|elt| *elt == x) {
+ return Some(index);
+ }
+ } else {
+ offset = 0;
+ }
+
+ // search the body of the text
+ let repeated_x = repeat_byte(x);
+
+ if len >= 2 * usize_bytes {
+ while offset <= len - 2 * usize_bytes {
+ unsafe {
+ let u = *(ptr.offset(offset as isize) as *const usize);
+ let v = *(ptr.offset((offset + usize_bytes) as isize) as *const usize);
+
+ // break if there is a matching byte
+ let zu = contains_zero_byte(u ^ repeated_x);
+ let zv = contains_zero_byte(v ^ repeated_x);
+ if zu || zv {
+ break;
+ }
+ }
+ offset += usize_bytes * 2;
+ }
+ }
+
+ // find the byte after the point the body loop stopped
+ text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i)
+ }
+
+ /// Return the last index matching the byte `a` in `text`.
+ pub fn memrchr(x: u8, text: &[u8]) -> Option<usize> {
+ // Scan for a single byte value by reading two `usize` words at a time.
+ //
+ // Split `text` in three parts
+ // - unaligned tail, after the last word aligned address in text
+ // - body, scan by 2 words at a time
+ // - the first remaining bytes, < 2 word size
+ let len = text.len();
+ let ptr = text.as_ptr();
+ let usize_bytes = mem::size_of::<usize>();
+
+ // search to an aligned boundary
+ let end_align = (ptr as usize + len) & (usize_bytes - 1);
+ let mut offset;
+ if end_align > 0 {
+ offset = if end_align >= len { 0 } else { len - end_align };
+ if let Some(index) = text[offset..].iter().rposition(|elt| *elt == x) {
+ return Some(offset + index);
+ }
+ } else {
+ offset = len;
+ }
+
+ // search the body of the text
+ let repeated_x = repeat_byte(x);
+
+ while offset >= 2 * usize_bytes {
+ unsafe {
+ let u = *(ptr.offset(offset as isize - 2 * usize_bytes as isize) as *const usize);
+ let v = *(ptr.offset(offset as isize - usize_bytes as isize) as *const usize);
+
+ // break if there is a matching byte
+ let zu = contains_zero_byte(u ^ repeated_x);
+ let zv = contains_zero_byte(v ^ repeated_x);
+ if zu || zv {
+ break;
+ }
+ }
+ offset -= 2 * usize_bytes;
+ }
+
+ // find the byte before the point the body loop stopped
+ text[..offset].iter().rposition(|elt| *elt == x)
+ }
+
+ // test fallback implementations on all platforms
+ #[test]
+ fn matches_one() {
+ assert_eq!(Some(0), memchr(b'a', b"a"));
+ }
+
+ #[test]
+ fn matches_begin() {
+ assert_eq!(Some(0), memchr(b'a', b"aaaa"));
+ }
+
+ #[test]
+ fn matches_end() {
+ assert_eq!(Some(4), memchr(b'z', b"aaaaz"));
+ }
+
+ #[test]
+ fn matches_nul() {
+ assert_eq!(Some(4), memchr(b'\x00', b"aaaa\x00"));
+ }
+
+ #[test]
+ fn matches_past_nul() {
+ assert_eq!(Some(5), memchr(b'z', b"aaaa\x00z"));
+ }
+
+ #[test]
+ fn no_match_empty() {
+ assert_eq!(None, memchr(b'a', b""));
+ }
+
+ #[test]
+ fn no_match() {
+ assert_eq!(None, memchr(b'a', b"xyz"));
+ }
+
+ #[test]
+ fn matches_one_reversed() {
+ assert_eq!(Some(0), memrchr(b'a', b"a"));
+ }
+
+ #[test]
+ fn matches_begin_reversed() {
+ assert_eq!(Some(3), memrchr(b'a', b"aaaa"));
+ }
+
+ #[test]
+ fn matches_end_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"zaaaa"));
+ }
+
+ #[test]
+ fn matches_nul_reversed() {
+ assert_eq!(Some(4), memrchr(b'\x00', b"aaaa\x00"));
+ }
+
+ #[test]
+ fn matches_past_nul_reversed() {
+ assert_eq!(Some(0), memrchr(b'z', b"z\x00aaaa"));
+ }
+
+ #[test]
+ fn no_match_empty_reversed() {
+ assert_eq!(None, memrchr(b'a', b""));
+ }
+
+ #[test]
+ fn no_match_reversed() {
+ assert_eq!(None, memrchr(b'a', b"xyz"));
+ }
+
+ #[test]
+ fn each_alignment_reversed() {
+ let mut data = [1u8; 64];
+ let needle = 2;
+ let pos = 40;
+ data[pos] = needle;
+ for start in 0..16 {
+ assert_eq!(Some(pos - start), memrchr(needle, &data[start..]));
+ }
+ }
+}
})
}
-pub mod args;
pub mod at_exit_imp;
#[cfg(any(not(cargobuild), feature = "backtrace"))]
pub mod backtrace;
pub mod condvar;
pub mod io;
+pub mod memchr;
pub mod mutex;
pub mod net;
pub mod poison;
pub fn cleanup() {
static CLEANUP: Once = Once::new();
CLEANUP.call_once(|| unsafe {
- args::cleanup();
+ sys::args::cleanup();
sys::stack_overflow::cleanup();
at_exit_imp::cleanup();
});
#[cfg(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
- target_os = "solaris"))]
+ target_os = "solaris", target_os = "haiku"))]
use sys::net::netc::IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP;
#[cfg(not(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
- target_os = "solaris")))]
+ target_os = "solaris", target_os = "haiku")))]
use sys::net::netc::IPV6_ADD_MEMBERSHIP;
#[cfg(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
- target_os = "solaris"))]
+ target_os = "solaris", target_os = "haiku"))]
use sys::net::netc::IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP;
#[cfg(not(any(target_os = "dragonfly", target_os = "freebsd",
target_os = "ios", target_os = "macos",
target_os = "openbsd", target_os = "netbsd",
- target_os = "solaris")))]
+ target_os = "solaris", target_os = "haiku")))]
use sys::net::netc::IPV6_DROP_MEMBERSHIP;
+#[cfg(target_os = "linux")]
+use libc::MSG_NOSIGNAL;
+#[cfg(not(target_os = "linux"))]
+const MSG_NOSIGNAL: c_int = 0x0; // unused dummy value
+
////////////////////////////////////////////////////////////////////////////////
// sockaddr and misc bindings
////////////////////////////////////////////////////////////////////////////////
c::send(*self.inner.as_inner(),
buf.as_ptr() as *const c_void,
len,
- 0)
+ MSG_NOSIGNAL)
})?;
Ok(ret as usize)
}
let ret = cvt(unsafe {
c::sendto(*self.inner.as_inner(),
buf.as_ptr() as *const c_void, len,
- 0, dstp, dstlen)
+ MSG_NOSIGNAL, dstp, dstlen)
})?;
Ok(ret as usize)
}
c::send(*self.inner.as_inner(),
buf.as_ptr() as *const c_void,
len,
- 0)
+ MSG_NOSIGNAL)
})?;
Ok(ret as usize)
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use cell::RefCell;
/// Copied from String::push
/// This does **not** include the WTF-8 concatenation check.
fn push_code_point_unchecked(&mut self, code_point: CodePoint) {
- let bytes = unsafe {
- char::from_u32_unchecked(code_point.value).encode_utf8()
+ let c = unsafe {
+ char::from_u32_unchecked(code_point.value)
};
- self.bytes.extend_from_slice(bytes.as_slice());
+ let mut bytes = [0; 4];
+ let bytes = c.encode_utf8(&mut bytes).as_bytes();
+ self.bytes.extend_from_slice(bytes)
}
#[inline]
return Some(tmp);
}
+ let mut buf = [0; 2];
self.code_points.next().map(|code_point| {
- let n = unsafe {
- char::from_u32_unchecked(code_point.value).encode_utf16()
+ let c = unsafe {
+ char::from_u32_unchecked(code_point.value)
};
- let n = n.as_slice();
- if n.len() == 2 {
- self.extra = n[1];
+ let n = c.encode_utf16(&mut buf).len();
+ if n == 2 {
+ self.extra = buf[1];
}
- n[0]
+ buf[0]
})
}
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Global initialization and retreival of command line arguments.
+//!
+//! On some platforms these are stored during runtime startup,
+//! and on some they are retrieved from the system on demand.
+
+#![allow(dead_code)] // runtime init functions not used during testing
+
+use ffi::OsString;
+use marker::PhantomData;
+use vec;
+
+/// One-time global initialization.
+pub unsafe fn init(argc: isize, argv: *const *const u8) { imp::init(argc, argv) }
+
+/// One-time global cleanup.
+pub unsafe fn cleanup() { imp::cleanup() }
+
+/// Returns the command line arguments
+pub fn args() -> Args {
+ imp::args()
+}
+
+pub struct Args {
+ iter: vec::IntoIter<OsString>,
+ _dont_send_or_sync_me: PhantomData<*mut ()>,
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> { self.iter.next() }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize { self.iter.len() }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> { self.iter.next_back() }
+}
+
+#[cfg(any(target_os = "linux",
+ target_os = "android",
+ target_os = "freebsd",
+ target_os = "dragonfly",
+ target_os = "bitrig",
+ target_os = "netbsd",
+ target_os = "openbsd",
+ target_os = "solaris",
+ target_os = "emscripten",
+ target_os = "haiku"))]
+mod imp {
+ use os::unix::prelude::*;
+ use mem;
+ use ffi::{CStr, OsString};
+ use marker::PhantomData;
+ use libc;
+ use super::Args;
+
+ use sys_common::mutex::Mutex;
+
+ static mut GLOBAL_ARGS_PTR: usize = 0;
+ static LOCK: Mutex = Mutex::new();
+
+ pub unsafe fn init(argc: isize, argv: *const *const u8) {
+ let args = (0..argc).map(|i| {
+ CStr::from_ptr(*argv.offset(i) as *const libc::c_char).to_bytes().to_vec()
+ }).collect();
+
+ LOCK.lock();
+ let ptr = get_global_ptr();
+ assert!((*ptr).is_none());
+ (*ptr) = Some(box args);
+ LOCK.unlock();
+ }
+
+ pub unsafe fn cleanup() {
+ LOCK.lock();
+ *get_global_ptr() = None;
+ LOCK.unlock();
+ }
+
+ pub fn args() -> Args {
+ let bytes = clone().unwrap_or(Vec::new());
+ let v: Vec<OsString> = bytes.into_iter().map(|v| {
+ OsStringExt::from_vec(v)
+ }).collect();
+ Args { iter: v.into_iter(), _dont_send_or_sync_me: PhantomData }
+ }
+
+ fn clone() -> Option<Vec<Vec<u8>>> {
+ unsafe {
+ LOCK.lock();
+ let ptr = get_global_ptr();
+ let ret = (*ptr).as_ref().map(|s| (**s).clone());
+ LOCK.unlock();
+ return ret
+ }
+ }
+
+ fn get_global_ptr() -> *mut Option<Box<Vec<Vec<u8>>>> {
+ unsafe { mem::transmute(&GLOBAL_ARGS_PTR) }
+ }
+
+}
+
+#[cfg(any(target_os = "macos",
+ target_os = "ios"))]
+mod imp {
+ use ffi::CStr;
+ use marker::PhantomData;
+ use libc;
+ use super::Args;
+
+ pub unsafe fn init(_argc: isize, _argv: *const *const u8) {
+ }
+
+ pub fn cleanup() {
+ }
+
+ #[cfg(target_os = "macos")]
+ pub fn args() -> Args {
+ use os::unix::prelude::*;
+ extern {
+ // These functions are in crt_externs.h.
+ fn _NSGetArgc() -> *mut libc::c_int;
+ fn _NSGetArgv() -> *mut *mut *mut libc::c_char;
+ }
+
+ let vec = unsafe {
+ let (argc, argv) = (*_NSGetArgc() as isize,
+ *_NSGetArgv() as *const *const libc::c_char);
+ (0.. argc as isize).map(|i| {
+ let bytes = CStr::from_ptr(*argv.offset(i)).to_bytes().to_vec();
+ OsStringExt::from_vec(bytes)
+ }).collect::<Vec<_>>()
+ };
+ Args {
+ iter: vec.into_iter(),
+ _dont_send_or_sync_me: PhantomData,
+ }
+ }
+
+ // As _NSGetArgc and _NSGetArgv aren't mentioned in iOS docs
+ // and use underscores in their names - they're most probably
+ // are considered private and therefore should be avoided
+ // Here is another way to get arguments using Objective C
+ // runtime
+ //
+ // In general it looks like:
+ // res = Vec::new()
+ // let args = [[NSProcessInfo processInfo] arguments]
+ // for i in (0..[args count])
+ // res.push([args objectAtIndex:i])
+ // res
+ #[cfg(target_os = "ios")]
+ pub fn args() -> Args {
+ use ffi::OsString;
+ use mem;
+ use str;
+
+ extern {
+ fn sel_registerName(name: *const libc::c_uchar) -> Sel;
+ fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId;
+ fn objc_getClass(class_name: *const libc::c_uchar) -> NsId;
+ }
+
+ #[link(name = "Foundation", kind = "framework")]
+ #[link(name = "objc")]
+ #[cfg(not(cargobuild))]
+ extern {}
+
+ type Sel = *const libc::c_void;
+ type NsId = *const libc::c_void;
+
+ let mut res = Vec::new();
+
+ unsafe {
+ let process_info_sel = sel_registerName("processInfo\0".as_ptr());
+ let arguments_sel = sel_registerName("arguments\0".as_ptr());
+ let utf8_sel = sel_registerName("UTF8String\0".as_ptr());
+ let count_sel = sel_registerName("count\0".as_ptr());
+ let object_at_sel = sel_registerName("objectAtIndex:\0".as_ptr());
+
+ let klass = objc_getClass("NSProcessInfo\0".as_ptr());
+ let info = objc_msgSend(klass, process_info_sel);
+ let args = objc_msgSend(info, arguments_sel);
+
+ let cnt: usize = mem::transmute(objc_msgSend(args, count_sel));
+ for i in 0..cnt {
+ let tmp = objc_msgSend(args, object_at_sel, i);
+ let utf_c_str: *const libc::c_char =
+ mem::transmute(objc_msgSend(tmp, utf8_sel));
+ let bytes = CStr::from_ptr(utf_c_str).to_bytes();
+ res.push(OsString::from(str::from_utf8(bytes).unwrap()))
+ }
+ }
+
+ Args { iter: res.into_iter(), _dont_send_or_sync_me: PhantomData }
+ }
+}
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[cfg(target_os = "linux")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "linux";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "macos")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "macos";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".dylib";
+ pub const DLL_EXTENSION: &'static str = "dylib";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "ios")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "ios";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".dylib";
+ pub const DLL_EXTENSION: &'static str = "dylib";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "freebsd")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "freebsd";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "dragonfly")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "dragonfly";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "bitrig")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "bitrig";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "netbsd")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "netbsd";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "openbsd")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "openbsd";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "android")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "android";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(target_os = "solaris")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "solaris";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(all(target_os = "nacl", not(target_arch = "le32")))]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "nacl";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = ".nexe";
+ pub const EXE_EXTENSION: &'static str = "nexe";
+}
+#[cfg(all(target_os = "nacl", target_arch = "le32"))]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "pnacl";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".pso";
+ pub const DLL_EXTENSION: &'static str = "pso";
+ pub const EXE_SUFFIX: &'static str = ".pexe";
+ pub const EXE_EXTENSION: &'static str = "pexe";
+}
+
+#[cfg(target_os = "haiku")]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "haiku";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = "";
+ pub const EXE_EXTENSION: &'static str = "";
+}
+
+#[cfg(all(target_os = "emscripten", target_arch = "asmjs"))]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "emscripten";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = ".js";
+ pub const EXE_EXTENSION: &'static str = "js";
+}
+
+#[cfg(all(target_os = "emscripten", target_arch = "wasm32"))]
+pub mod os {
+ pub const FAMILY: &'static str = "unix";
+ pub const OS: &'static str = "emscripten";
+ pub const DLL_PREFIX: &'static str = "lib";
+ pub const DLL_SUFFIX: &'static str = ".so";
+ pub const DLL_EXTENSION: &'static str = "so";
+ pub const EXE_SUFFIX: &'static str = ".js";
+ pub const EXE_EXTENSION: &'static str = "js";
+}
use sys::net::Socket;
use sys_common::{AsInner, FromInner, IntoInner};
+#[cfg(target_os = "linux")]
+use libc::MSG_NOSIGNAL;
+#[cfg(not(target_os = "linux"))]
+const MSG_NOSIGNAL: libc::c_int = 0x0; // unused dummy value
+
fn sun_path_offset() -> usize {
unsafe {
// Work with an actual instance of the type since using a null pointer is UB
let count = cvt(libc::sendto(*d.0.as_inner(),
buf.as_ptr() as *const _,
buf.len(),
- 0,
+ MSG_NOSIGNAL,
&addr as *const _ as *const _,
len))?;
Ok(count as usize)
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod test {
use thread;
use io;
}
}
-#[stable(feature = "process_extensions", since = "1.2.0")]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
impl IntoRawFd for process::ChildStdin {
fn into_raw_fd(self) -> RawFd {
self.into_inner().into_fd().into_raw()
}
}
-#[stable(feature = "process_extensions", since = "1.2.0")]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
impl IntoRawFd for process::ChildStdout {
fn into_raw_fd(self) -> RawFd {
self.into_inner().into_fd().into_raw()
}
}
-#[stable(feature = "process_extensions", since = "1.2.0")]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
impl IntoRawFd for process::ChildStderr {
fn into_raw_fd(self) -> RawFd {
self.into_inner().into_fd().into_raw()
Ok(ret as usize)
}
- #[cfg(not(any(target_env = "newlib", target_os = "solaris", target_os = "emscripten")))]
+ #[cfg(not(any(target_env = "newlib",
+ target_os = "solaris",
+ target_os = "emscripten",
+ target_os = "haiku")))]
pub fn set_cloexec(&self) -> io::Result<()> {
unsafe {
cvt(libc::ioctl(self.fd, libc::FIOCLEX))?;
Ok(())
}
}
- #[cfg(any(target_env = "newlib", target_os = "solaris", target_os = "emscripten"))]
+ #[cfg(any(target_env = "newlib",
+ target_os = "solaris",
+ target_os = "emscripten",
+ target_os = "haiku"))]
pub fn set_cloexec(&self) -> io::Result<()> {
unsafe {
let previous = cvt(libc::fcntl(self.fd, libc::F_GETFD))?;
// resolve so we at least compile this.
//
// [1]: http://comments.gmane.org/gmane.linux.lib.musl.general/2963
- #[cfg(target_os = "android")]
+ #[cfg(any(target_os = "android", target_os = "haiku"))]
use libc::F_DUPFD as F_DUPFD_CLOEXEC;
- #[cfg(not(target_os = "android"))]
+ #[cfg(not(any(target_os = "android", target_os="haiku")))]
use libc::F_DUPFD_CLOEXEC;
let make_filedesc = |fd| {
stat(&self.path()).map(|m| m.file_type())
}
- #[cfg(not(target_os = "solaris"))]
+ #[cfg(target_os = "haiku")]
+ pub fn file_type(&self) -> io::Result<FileType> {
+ lstat(&self.path()).map(|m| m.file_type())
+ }
+
+ #[cfg(not(any(target_os = "solaris", target_os = "haiku")))]
pub fn file_type(&self) -> io::Result<FileType> {
match self.entry.d_type {
libc::DT_CHR => Ok(FileType { mode: libc::S_IFCHR }),
target_os = "linux",
target_os = "emscripten",
target_os = "android",
- target_os = "solaris"))]
+ target_os = "solaris",
+ target_os = "haiku"))]
pub fn ino(&self) -> u64 {
self.entry.d_ino as u64
}
}
#[cfg(any(target_os = "android",
target_os = "linux",
- target_os = "emscripten"))]
+ target_os = "emscripten",
+ target_os = "haiku"))]
fn name_bytes(&self) -> &[u8] {
unsafe {
CStr::from_ptr(self.entry.d_name.as_ptr()).to_bytes()
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// Original implementation taken from rust-memchr
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+pub fn memchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+ use libc;
+
+ let p = unsafe {
+ libc::memchr(
+ haystack.as_ptr() as *const libc::c_void,
+ needle as libc::c_int,
+ haystack.len() as libc::size_t)
+ };
+ if p.is_null() {
+ None
+ } else {
+ Some(p as usize - (haystack.as_ptr() as usize))
+ }
+}
+
+pub fn memrchr(needle: u8, haystack: &[u8]) -> Option<usize> {
+
+ #[cfg(target_os = "linux")]
+ fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
+ use libc;
+
+ // GNU's memrchr() will - unlike memchr() - error if haystack is empty.
+ if haystack.is_empty() {return None}
+ let p = unsafe {
+ libc::memrchr(
+ haystack.as_ptr() as *const libc::c_void,
+ needle as libc::c_int,
+ haystack.len() as libc::size_t)
+ };
+ if p.is_null() {
+ None
+ } else {
+ Some(p as usize - (haystack.as_ptr() as usize))
+ }
+ }
+
+ #[cfg(not(target_os = "linux"))]
+ fn memrchr_specific(needle: u8, haystack: &[u8]) -> Option<usize> {
+ ::sys_common::memchr::fallback::memrchr(needle, haystack)
+ }
+
+ memrchr_specific(needle, haystack)
+}
#[cfg(target_os = "bitrig")] pub use os::bitrig as platform;
#[cfg(target_os = "dragonfly")] pub use os::dragonfly as platform;
#[cfg(target_os = "freebsd")] pub use os::freebsd as platform;
+#[cfg(target_os = "haiku")] pub use os::haiku as platform;
#[cfg(target_os = "ios")] pub use os::ios as platform;
#[cfg(target_os = "linux")] pub use os::linux as platform;
#[cfg(target_os = "macos")] pub use os::macos as platform;
#[macro_use]
pub mod weak;
+pub mod args;
pub mod android;
#[cfg(any(not(cargobuild), feature = "backtrace"))]
pub mod backtrace;
pub mod condvar;
+pub mod env;
pub mod ext;
pub mod fd;
pub mod fs;
+pub mod memchr;
pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
+pub mod path;
pub mod pipe;
pub mod process;
pub mod rand;
use ffi::CStr;
use io;
-use libc::{self, c_int, size_t, sockaddr, socklen_t};
+use libc::{self, c_int, size_t, sockaddr, socklen_t, EAI_SYSTEM};
use net::{SocketAddr, Shutdown};
use str;
use sys::fd::FileDesc;
#[cfg(not(target_os = "linux"))]
const SOCK_CLOEXEC: c_int = 0;
+// Another conditional contant for name resolution: Macos et iOS use
+// SO_NOSIGPIPE as a setsockopt flag to disable SIGPIPE emission on socket.
+// Other platforms do otherwise.
+#[cfg(target_vendor = "apple")]
+use libc::SO_NOSIGPIPE;
+#[cfg(not(target_vendor = "apple"))]
+const SO_NOSIGPIPE: c_int = 0;
+
pub struct Socket(FileDesc);
pub fn init() {}
pub fn cvt_gai(err: c_int) -> io::Result<()> {
- if err == 0 { return Ok(()) }
+ if err == 0 {
+ return Ok(())
+ }
+ if err == EAI_SYSTEM {
+ return Err(io::Error::last_os_error())
+ }
let detail = unsafe {
str::from_utf8(CStr::from_ptr(libc::gai_strerror(err)).to_bytes()).unwrap()
let fd = cvt(libc::socket(fam, ty, 0))?;
let fd = FileDesc::new(fd);
fd.set_cloexec()?;
- Ok(Socket(fd))
+ let socket = Socket(fd);
+ if cfg!(target_vendor = "apple") {
+ setsockopt(&socket, libc::SOL_SOCKET, SO_NOSIGPIPE, 1)?;
+ }
+ Ok(socket)
}
}
target_os = "ios",
target_os = "freebsd"),
link_name = "__error")]
+ #[cfg_attr(target_os = "haiku", link_name = "_errnop")]
fn errno_location() -> *mut c_int;
}
}
}
-pub struct Args {
- iter: vec::IntoIter<OsString>,
- _dont_send_or_sync_me: PhantomData<*mut ()>,
-}
-
-impl Iterator for Args {
- type Item = OsString;
- fn next(&mut self) -> Option<OsString> { self.iter.next() }
- fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() }
-}
-
-impl ExactSizeIterator for Args {
- fn len(&self) -> usize { self.iter.len() }
-}
-
-impl DoubleEndedIterator for Args {
- fn next_back(&mut self) -> Option<OsString> { self.iter.next_back() }
-}
-
-/// Returns the command line arguments
-///
-/// Returns a list of the command line arguments.
-#[cfg(target_os = "macos")]
-pub fn args() -> Args {
- extern {
- // These functions are in crt_externs.h.
- fn _NSGetArgc() -> *mut c_int;
- fn _NSGetArgv() -> *mut *mut *mut c_char;
- }
-
- let vec = unsafe {
- let (argc, argv) = (*_NSGetArgc() as isize,
- *_NSGetArgv() as *const *const c_char);
- (0.. argc as isize).map(|i| {
- let bytes = CStr::from_ptr(*argv.offset(i)).to_bytes().to_vec();
- OsStringExt::from_vec(bytes)
- }).collect::<Vec<_>>()
- };
- Args {
- iter: vec.into_iter(),
- _dont_send_or_sync_me: PhantomData,
- }
-}
-
-// As _NSGetArgc and _NSGetArgv aren't mentioned in iOS docs
-// and use underscores in their names - they're most probably
-// are considered private and therefore should be avoided
-// Here is another way to get arguments using Objective C
-// runtime
-//
-// In general it looks like:
-// res = Vec::new()
-// let args = [[NSProcessInfo processInfo] arguments]
-// for i in (0..[args count])
-// res.push([args objectAtIndex:i])
-// res
-#[cfg(target_os = "ios")]
-pub fn args() -> Args {
- use mem;
-
- extern {
- fn sel_registerName(name: *const libc::c_uchar) -> Sel;
- fn objc_msgSend(obj: NsId, sel: Sel, ...) -> NsId;
- fn objc_getClass(class_name: *const libc::c_uchar) -> NsId;
+#[cfg(target_os = "haiku")]
+pub fn current_exe() -> io::Result<PathBuf> {
+ // Use Haiku's image info functions
+ #[repr(C)]
+ struct image_info {
+ id: i32,
+ type_: i32,
+ sequence: i32,
+ init_order: i32,
+ init_routine: *mut libc::c_void, // function pointer
+ term_routine: *mut libc::c_void, // function pointer
+ device: libc::dev_t,
+ node: libc::ino_t,
+ name: [libc::c_char; 1024], // MAXPATHLEN
+ text: *mut libc::c_void,
+ data: *mut libc::c_void,
+ text_size: i32,
+ data_size: i32,
+ api_version: i32,
+ abi: i32,
}
- #[link(name = "Foundation", kind = "framework")]
- #[link(name = "objc")]
- #[cfg(not(cargobuild))]
- extern {}
-
- type Sel = *const libc::c_void;
- type NsId = *const libc::c_void;
-
- let mut res = Vec::new();
-
unsafe {
- let process_info_sel = sel_registerName("processInfo\0".as_ptr());
- let arguments_sel = sel_registerName("arguments\0".as_ptr());
- let utf8_sel = sel_registerName("UTF8String\0".as_ptr());
- let count_sel = sel_registerName("count\0".as_ptr());
- let object_at_sel = sel_registerName("objectAtIndex:\0".as_ptr());
-
- let klass = objc_getClass("NSProcessInfo\0".as_ptr());
- let info = objc_msgSend(klass, process_info_sel);
- let args = objc_msgSend(info, arguments_sel);
-
- let cnt: usize = mem::transmute(objc_msgSend(args, count_sel));
- for i in 0..cnt {
- let tmp = objc_msgSend(args, object_at_sel, i);
- let utf_c_str: *const libc::c_char =
- mem::transmute(objc_msgSend(tmp, utf8_sel));
- let bytes = CStr::from_ptr(utf_c_str).to_bytes();
- res.push(OsString::from(str::from_utf8(bytes).unwrap()))
+ extern {
+ fn _get_next_image_info(team_id: i32, cookie: *mut i32,
+ info: *mut image_info, size: i32) -> i32;
}
- }
- Args { iter: res.into_iter(), _dont_send_or_sync_me: PhantomData }
-}
-
-#[cfg(any(target_os = "linux",
- target_os = "android",
- target_os = "freebsd",
- target_os = "dragonfly",
- target_os = "bitrig",
- target_os = "netbsd",
- target_os = "openbsd",
- target_os = "solaris",
- target_os = "nacl",
- target_os = "emscripten"))]
-pub fn args() -> Args {
- use sys_common;
- let bytes = sys_common::args::clone().unwrap_or(Vec::new());
- let v: Vec<OsString> = bytes.into_iter().map(|v| {
- OsStringExt::from_vec(v)
- }).collect();
- Args { iter: v.into_iter(), _dont_send_or_sync_me: PhantomData }
+ let mut info: image_info = mem::zeroed();
+ let mut cookie: i32 = 0;
+ // the executable can be found at team id 0
+ let result = _get_next_image_info(0, &mut cookie, &mut info,
+ mem::size_of::<image_info>() as i32);
+ if result != 0 {
+ use io::ErrorKind;
+ Err(io::Error::new(ErrorKind::Other, "Error getting executable path"))
+ } else {
+ let name = CStr::from_ptr(info.name.as_ptr()).to_bytes();
+ Ok(PathBuf::from(OsStr::from_bytes(name)))
+ }
+ }
}
pub struct Env {
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use path::Prefix;
+use ffi::OsStr;
+
+#[inline]
+pub fn is_sep_byte(b: u8) -> bool {
+ b == b'/'
+}
+
+#[inline]
+pub fn is_verbatim_sep(b: u8) -> bool {
+ b == b'/'
+}
+
+pub fn parse_prefix(_: &OsStr) -> Option<Prefix> {
+ None
+}
+
+pub const MAIN_SEP_STR: &'static str = "/";
+pub const MAIN_SEP: char = '/';
}
// NaCl has no signal support.
- if cfg!(not(target_os = "nacl")) {
+ if cfg!(not(any(target_os = "nacl", target_os = "emscripten"))) {
// Reset signal handling so the child process starts in a
// standardized state. libstd ignores SIGPIPE, and signal-handling
// libraries often set a mask. Child processes inherit ignored
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use super::*;
}
fn flush(&mut self) -> io::Result<()> { Ok(()) }
}
+
+pub const EBADF_ERR: i32 = ::libc::EBADF as i32;
unsafe impl Send for Thread {}
unsafe impl Sync for Thread {}
+// The pthread_attr_setstacksize symbol doesn't exist in the emscripten libc,
+// so we have to not link to it to satisfy emcc's ERROR_ON_UNDEFINED_SYMBOLS.
+#[cfg(not(target_os = "emscripten"))]
+unsafe fn pthread_attr_setstacksize(attr: *mut libc::pthread_attr_t,
+ stack_size: libc::size_t) -> libc::c_int {
+ libc::pthread_attr_setstacksize(attr, stack_size)
+}
+
+#[cfg(target_os = "emscripten")]
+unsafe fn pthread_attr_setstacksize(_attr: *mut libc::pthread_attr_t,
+ _stack_size: libc::size_t) -> libc::c_int {
+ panic!()
+}
+
impl Thread {
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
-> io::Result<Thread> {
assert_eq!(libc::pthread_attr_init(&mut attr), 0);
let stack_size = cmp::max(stack, min_stack_size(&attr));
- match libc::pthread_attr_setstacksize(&mut attr,
- stack_size as libc::size_t) {
+ match pthread_attr_setstacksize(&mut attr,
+ stack_size as libc::size_t) {
0 => {}
n => {
assert_eq!(n, libc::EINVAL);
name.as_ptr() as *mut libc::c_void);
}
}
- #[cfg(any(target_env = "newlib", target_os = "solaris", target_os = "emscripten"))]
+ #[cfg(any(target_env = "newlib",
+ target_os = "solaris",
+ target_os = "haiku",
+ target_os = "emscripten"))]
pub fn set_name(_name: &CStr) {
- // Newlib, Illumos and Emscripten have no way to set a thread name.
+ // Newlib, Illumos, Haiku, and Emscripten have no way to set a thread name.
}
pub fn sleep(dur: Duration) {
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)] // runtime init functions not used during testing
+
+use os::windows::prelude::*;
+use sys::c;
+use slice;
+use ops::Range;
+use ffi::OsString;
+use libc::{c_int, c_void};
+
+pub unsafe fn init(_argc: isize, _argv: *const *const u8) { }
+
+pub unsafe fn cleanup() { }
+
+pub fn args() -> Args {
+ unsafe {
+ let mut nArgs: c_int = 0;
+ let lpCmdLine = c::GetCommandLineW();
+ let szArgList = c::CommandLineToArgvW(lpCmdLine, &mut nArgs);
+
+ // szArcList can be NULL if CommandLinToArgvW failed,
+ // but in that case nArgs is 0 so we won't actually
+ // try to read a null pointer
+ Args { cur: szArgList, range: 0..(nArgs as isize) }
+ }
+}
+
+pub struct Args {
+ range: Range<isize>,
+ cur: *mut *mut u16,
+}
+
+unsafe fn os_string_from_ptr(ptr: *mut u16) -> OsString {
+ let mut len = 0;
+ while *ptr.offset(len) != 0 { len += 1; }
+
+ // Push it onto the list.
+ let ptr = ptr as *const u16;
+ let buf = slice::from_raw_parts(ptr, len as usize);
+ OsStringExt::from_wide(buf)
+}
+
+impl Iterator for Args {
+ type Item = OsString;
+ fn next(&mut self) -> Option<OsString> {
+ self.range.next().map(|i| unsafe { os_string_from_ptr(*self.cur.offset(i)) } )
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) { self.range.size_hint() }
+}
+
+impl DoubleEndedIterator for Args {
+ fn next_back(&mut self) -> Option<OsString> {
+ self.range.next_back().map(|i| unsafe { os_string_from_ptr(*self.cur.offset(i)) } )
+ }
+}
+
+impl ExactSizeIterator for Args {
+ fn len(&self) -> usize { self.range.len() }
+}
+
+impl Drop for Args {
+ fn drop(&mut self) {
+ // self.cur can be null if CommandLineToArgvW previously failed,
+ // but LocalFree ignores NULL pointers
+ unsafe { c::LocalFree(self.cur as *mut c_void); }
+ }
+}
--- /dev/null
+// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub mod os {
+ pub const FAMILY: &'static str = "windows";
+ pub const OS: &'static str = "windows";
+ pub const DLL_PREFIX: &'static str = "";
+ pub const DLL_SUFFIX: &'static str = ".dll";
+ pub const DLL_EXTENSION: &'static str = "dll";
+ pub const EXE_SUFFIX: &'static str = ".exe";
+ pub const EXE_EXTENSION: &'static str = "exe";
+}
}
}
-#[stable(feature = "process_extensions", since = "1.2.0")]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
impl IntoRawHandle for process::Child {
fn into_raw_handle(self) -> RawHandle {
self.into_inner().into_handle().into_raw() as *mut _
}
}
-#[stable(feature = "process_extensions", since = "1.2.0")]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
impl IntoRawHandle for process::ChildStdin {
fn into_raw_handle(self) -> RawHandle {
self.into_inner().into_handle().into_raw() as *mut _
}
}
-#[stable(feature = "process_extensions", since = "1.2.0")]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
impl IntoRawHandle for process::ChildStdout {
fn into_raw_handle(self) -> RawHandle {
self.into_inner().into_handle().into_raw() as *mut _
}
}
-#[stable(feature = "process_extensions", since = "1.2.0")]
+#[stable(feature = "into_raw_os", since = "1.4.0")]
impl IntoRawHandle for process::ChildStderr {
fn into_raw_handle(self) -> RawHandle {
self.into_inner().into_handle().into_raw() as *mut _
fn from_raw(raw: u32) -> Self;
}
-#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "exit_status_from", since = "1.12.0")]
impl ExitStatusExt for process::ExitStatus {
fn from_raw(raw: u32) -> Self {
process::ExitStatus::from_inner(From::from(raw))
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// Original implementation taken from rust-memchr
+// Copyright 2015 Andrew Gallant, bluss and Nicolas Koch
+
+// Fallback memchr is fastest on windows
+pub use sys_common::memchr::fallback::{memchr, memrchr};
#[macro_use] pub mod compat;
+pub mod args;
pub mod backtrace;
pub mod c;
pub mod condvar;
pub mod dynamic_lib;
+pub mod env;
pub mod ext;
pub mod fs;
pub mod handle;
+pub mod memchr;
pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
+pub mod path;
pub mod pipe;
pub mod process;
pub mod rand;
use ffi::{OsString, OsStr};
use fmt;
use io;
-use libc::{c_int, c_void};
-use ops::Range;
use os::windows::ffi::EncodeWide;
use path::{self, PathBuf};
use ptr;
}).map(|_| ())
}
-pub struct Args {
- range: Range<isize>,
- cur: *mut *mut u16,
-}
-
-unsafe fn os_string_from_ptr(ptr: *mut u16) -> OsString {
- let mut len = 0;
- while *ptr.offset(len) != 0 { len += 1; }
-
- // Push it onto the list.
- let ptr = ptr as *const u16;
- let buf = slice::from_raw_parts(ptr, len as usize);
- OsStringExt::from_wide(buf)
-}
-
-impl Iterator for Args {
- type Item = OsString;
- fn next(&mut self) -> Option<OsString> {
- self.range.next().map(|i| unsafe { os_string_from_ptr(*self.cur.offset(i)) } )
- }
- fn size_hint(&self) -> (usize, Option<usize>) { self.range.size_hint() }
-}
-
-impl DoubleEndedIterator for Args {
- fn next_back(&mut self) -> Option<OsString> {
- self.range.next_back().map(|i| unsafe { os_string_from_ptr(*self.cur.offset(i)) } )
- }
-}
-
-impl ExactSizeIterator for Args {
- fn len(&self) -> usize { self.range.len() }
-}
-
-impl Drop for Args {
- fn drop(&mut self) {
- // self.cur can be null if CommandLineToArgvW previously failed,
- // but LocalFree ignores NULL pointers
- unsafe { c::LocalFree(self.cur as *mut c_void); }
- }
-}
-
-pub fn args() -> Args {
- unsafe {
- let mut nArgs: c_int = 0;
- let lpCmdLine = c::GetCommandLineW();
- let szArgList = c::CommandLineToArgvW(lpCmdLine, &mut nArgs);
-
- // szArcList can be NULL if CommandLinToArgvW failed,
- // but in that case nArgs is 0 so we won't actually
- // try to read a null pointer
- Args { cur: szArgList, range: 0..(nArgs as isize) }
- }
-}
-
pub fn temp_dir() -> PathBuf {
super::fill_utf16_buf(|buf, sz| unsafe {
c::GetTempPathW(sz, buf)
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use ascii::*;
+
+use path::Prefix;
+use ffi::OsStr;
+use mem;
+
+fn os_str_as_u8_slice(s: &OsStr) -> &[u8] {
+ unsafe { mem::transmute(s) }
+}
+unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
+ mem::transmute(s)
+}
+
+#[inline]
+pub fn is_sep_byte(b: u8) -> bool {
+ b == b'/' || b == b'\\'
+}
+
+#[inline]
+pub fn is_verbatim_sep(b: u8) -> bool {
+ b == b'\\'
+}
+
+pub fn parse_prefix<'a>(path: &'a OsStr) -> Option<Prefix> {
+ use path::Prefix::*;
+ unsafe {
+ // The unsafety here stems from converting between &OsStr and &[u8]
+ // and back. This is safe to do because (1) we only look at ASCII
+ // contents of the encoding and (2) new &OsStr values are produced
+ // only from ASCII-bounded slices of existing &OsStr values.
+ let mut path = os_str_as_u8_slice(path);
+
+ if path.starts_with(br"\\") {
+ // \\
+ path = &path[2..];
+ if path.starts_with(br"?\") {
+ // \\?\
+ path = &path[2..];
+ if path.starts_with(br"UNC\") {
+ // \\?\UNC\server\share
+ path = &path[4..];
+ let (server, share) = match parse_two_comps(path, is_verbatim_sep) {
+ Some((server, share)) =>
+ (u8_slice_as_os_str(server), u8_slice_as_os_str(share)),
+ None => (u8_slice_as_os_str(path), u8_slice_as_os_str(&[])),
+ };
+ return Some(VerbatimUNC(server, share));
+ } else {
+ // \\?\path
+ let idx = path.iter().position(|&b| b == b'\\');
+ if idx == Some(2) && path[1] == b':' {
+ let c = path[0];
+ if c.is_ascii() && (c as char).is_alphabetic() {
+ // \\?\C:\ path
+ return Some(VerbatimDisk(c.to_ascii_uppercase()));
+ }
+ }
+ let slice = &path[..idx.unwrap_or(path.len())];
+ return Some(Verbatim(u8_slice_as_os_str(slice)));
+ }
+ } else if path.starts_with(b".\\") {
+ // \\.\path
+ path = &path[2..];
+ let pos = path.iter().position(|&b| b == b'\\');
+ let slice = &path[..pos.unwrap_or(path.len())];
+ return Some(DeviceNS(u8_slice_as_os_str(slice)));
+ }
+ match parse_two_comps(path, is_sep_byte) {
+ Some((server, share)) if !server.is_empty() && !share.is_empty() => {
+ // \\server\share
+ return Some(UNC(u8_slice_as_os_str(server), u8_slice_as_os_str(share)));
+ }
+ _ => (),
+ }
+ } else if path.get(1) == Some(& b':') {
+ // C:
+ let c = path[0];
+ if c.is_ascii() && (c as char).is_alphabetic() {
+ return Some(Disk(c.to_ascii_uppercase()));
+ }
+ }
+ return None;
+ }
+
+ fn parse_two_comps(mut path: &[u8], f: fn(u8) -> bool) -> Option<(&[u8], &[u8])> {
+ let first = match path.iter().position(|x| f(*x)) {
+ None => return None,
+ Some(x) => &path[..x],
+ };
+ path = &path[(first.len() + 1)..];
+ let idx = path.iter().position(|x| f(*x));
+ let second = &path[..idx.unwrap_or(path.len())];
+ Some((first, second))
+ }
+}
+
+pub const MAIN_SEP_STR: &'static str = "\\";
+pub const MAIN_SEP: char = '\\';
fn invalid_encoding() -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, "text was not valid unicode")
}
+
+pub const EBADF_ERR: i32 = ::sys::c::ERROR_INVALID_HANDLE as i32;
}
}
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use sync::mpsc::{channel, Sender};
use cell::{Cell, UnsafeCell};
//!
//! [`Cell`]: ../cell/struct.Cell.html
//! [`RefCell`]: ../cell/struct.RefCell.html
-//! [`thread_local!`]: ../macro.thread_local!.html
+//! [`thread_local!`]: ../macro.thread_local.html
//! [`with`]: struct.LocalKey.html#method.with
#![stable(feature = "rust1", since = "1.0.0")]
// Tests
////////////////////////////////////////////////////////////////////////////////
-#[cfg(test)]
+#[cfg(all(test, not(target_os = "emscripten")))]
mod tests {
use any::Any;
use sync::mpsc::{channel, Sender};
pub struct SystemTime(time::SystemTime);
/// An error returned from the `duration_since` method on `SystemTime`,
-/// used to learn about why how far in the opposite direction a timestamp lies.
+/// used to learn how far in the opposite direction a system time lies.
#[derive(Clone, Debug)]
#[stable(feature = "time2", since = "1.8.0")]
pub struct SystemTimeError(Duration);
Netbsd,
Openbsd,
NaCl,
+ Haiku,
Solaris,
}
Os::Netbsd => "netbsd".fmt(f),
Os::Openbsd => "openbsd".fmt(f),
Os::NaCl => "nacl".fmt(f),
+ Os::Haiku => "haiku".fmt(f),
Os::Solaris => "solaris".fmt(f),
}
}
/// A lifetime definition, e.g. `'a: 'b+'c+'d`
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct LifetimeDef {
+ pub attrs: ThinVec<Attribute>,
pub lifetime: Lifetime,
pub bounds: Vec<Lifetime>
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct TyParam {
+ pub attrs: ThinVec<Attribute>,
pub ident: Ident,
pub id: NodeId,
pub bounds: TyParamBounds,
PatKind::Box(ref s) | PatKind::Ref(ref s, _) => {
s.walk(it)
}
- PatKind::Vec(ref before, ref slice, ref after) => {
+ PatKind::Slice(ref before, ref slice, ref after) => {
before.iter().all(|p| p.walk(it)) &&
slice.iter().all(|p| p.walk(it)) &&
after.iter().all(|p| p.walk(it))
/// A range pattern, e.g. `1...2`
Range(P<Expr>, P<Expr>),
/// `[a, b, ..i, y, z]` is represented as:
- /// `PatKind::Vec(box [a, b], Some(i), box [y, z])`
- Vec(Vec<P<Pat>>, Option<P<Pat>>, Vec<P<Pat>>),
+ /// `PatKind::Slice(box [a, b], Some(i), box [y, z])`
+ Slice(Vec<P<Pat>>, Option<P<Pat>>, Vec<P<Pat>>),
/// A macro pattern; pre-expansion
Mac(Mac),
}
/// The different kinds of types recognized by the compiler
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub enum TyKind {
- /// A variable-length array (`[T]`)
- Vec(P<Ty>),
+ /// A variable-length slice (`[T]`)
+ Slice(P<Ty>),
/// A fixed length array (`[T; n]`)
- FixedLengthVec(P<Ty>, P<Expr>),
+ Array(P<Ty>, P<Expr>),
/// A raw pointer (`*const T` or `*mut T`)
Ptr(MutTy),
/// A reference (`&'a T` or `&'a mut T`)
// flag the offending attributes
for attr in attrs.iter() {
if !self.features.map(|features| features.stmt_expr_attributes).unwrap_or(true) {
- emit_feature_err(&self.sess.span_diagnostic,
+ emit_feature_err(&self.sess,
"stmt_expr_attributes",
attr.span,
GateIssue::Language,
attr.check_name("cfg")
}
-fn is_test_or_bench(attr: &ast::Attribute) -> bool {
+pub fn is_test_or_bench(attr: &ast::Attribute) -> bool {
attr.check_name("test") || attr.check_name("bench")
}
let ty = ecx.ty(
span,
- ast::TyKind::FixedLengthVec(
+ ast::TyKind::Array(
ecx.ty(
span,
ast::TyKind::Tup(vec![ty_str.clone(), ty_str])
use ext::expand::{self, Invocation, Expansion};
use ext::hygiene::Mark;
use fold::{self, Folder};
-use parse;
-use parse::parser::{self, Parser};
+use parse::{self, parser};
use parse::token;
use parse::token::{InternedString, str_to_ident};
use ptr::P;
}
}
-pub struct TokResult<'a> {
- pub parser: Parser<'a>,
- pub span: Span,
-}
-
-impl<'a> TokResult<'a> {
- // There is quite a lot of overlap here with ParserAnyMacro in ext/tt/macro_rules.rs
- // We could probably share more code.
- // FIXME(#36641) Unify TokResult and ParserAnyMacro.
- fn ensure_complete_parse(&mut self, allow_semi: bool) {
- let macro_span = &self.span;
- self.parser.ensure_complete_parse(allow_semi, |parser| {
- let token_str = parser.this_token_to_string();
- let msg = format!("macro expansion ignores token `{}` and any following", token_str);
- let span = parser.span;
- parser.diagnostic()
- .struct_span_err(span, &msg)
- .span_note(*macro_span, "caused by the macro expansion here")
- .emit();
- });
- }
-}
-
-impl<'a> MacResult for TokResult<'a> {
- fn make_items(mut self: Box<Self>) -> Option<SmallVector<P<ast::Item>>> {
- if self.parser.sess.span_diagnostic.has_errors() {
- return Some(SmallVector::zero());
- }
-
- let mut items = SmallVector::zero();
- loop {
- match self.parser.parse_item() {
- Ok(Some(item)) => items.push(item),
- Ok(None) => {
- self.ensure_complete_parse(false);
- return Some(items);
- }
- Err(mut e) => {
- e.emit();
- return Some(SmallVector::zero());
- }
- }
- }
- }
-
- fn make_impl_items(mut self: Box<Self>) -> Option<SmallVector<ast::ImplItem>> {
- let mut items = SmallVector::zero();
- loop {
- if self.parser.token == token::Eof {
- break;
- }
- match self.parser.parse_impl_item() {
- Ok(item) => items.push(item),
- Err(mut e) => {
- e.emit();
- return Some(SmallVector::zero());
- }
- }
- }
- self.ensure_complete_parse(false);
- Some(items)
- }
-
- fn make_trait_items(mut self: Box<Self>) -> Option<SmallVector<ast::TraitItem>> {
- let mut items = SmallVector::zero();
- loop {
- if self.parser.token == token::Eof {
- break;
- }
- match self.parser.parse_trait_item() {
- Ok(item) => items.push(item),
- Err(mut e) => {
- e.emit();
- return Some(SmallVector::zero());
- }
- }
- }
- self.ensure_complete_parse(false);
- Some(items)
- }
-
- fn make_expr(mut self: Box<Self>) -> Option<P<ast::Expr>> {
- match self.parser.parse_expr() {
- Ok(e) => {
- self.ensure_complete_parse(true);
- Some(e)
- }
- Err(mut e) => {
- e.emit();
- Some(DummyResult::raw_expr(self.span))
- }
- }
- }
-
- fn make_pat(mut self: Box<Self>) -> Option<P<ast::Pat>> {
- match self.parser.parse_pat() {
- Ok(e) => {
- self.ensure_complete_parse(false);
- Some(e)
- }
- Err(mut e) => {
- e.emit();
- Some(P(DummyResult::raw_pat(self.span)))
- }
- }
- }
-
- fn make_stmts(mut self: Box<Self>) -> Option<SmallVector<ast::Stmt>> {
- let mut stmts = SmallVector::zero();
- loop {
- if self.parser.token == token::Eof {
- break;
- }
- match self.parser.parse_full_stmt(false) {
- Ok(Some(stmt)) => stmts.push(stmt),
- Ok(None) => { /* continue */ }
- Err(mut e) => {
- e.emit();
- return Some(SmallVector::zero());
- }
- }
- }
- self.ensure_complete_parse(false);
- Some(stmts)
- }
-
- fn make_ty(mut self: Box<Self>) -> Option<P<ast::Ty>> {
- match self.parser.parse_ty() {
- Ok(e) => {
- self.ensure_complete_parse(false);
- Some(e)
- }
- Err(mut e) => {
- e.emit();
- Some(DummyResult::raw_ty(self.span))
- }
- }
- }
-}
-
/// Represents a thing that maps token trees to Macro Results
pub trait TTMacroExpander {
fn expand<'cx>(&self,
pub trait Resolver {
fn next_node_id(&mut self) -> ast::NodeId;
+ fn get_module_scope(&mut self, id: ast::NodeId) -> Mark;
fn visit_expansion(&mut self, mark: Mark, expansion: &Expansion);
fn add_macro(&mut self, scope: Mark, def: ast::MacroDef);
- fn add_ext(&mut self, scope: Mark, ident: ast::Ident, ext: Rc<SyntaxExtension>);
+ fn add_ext(&mut self, ident: ast::Ident, ext: Rc<SyntaxExtension>);
fn add_expansions_at_stmt(&mut self, id: ast::NodeId, macros: Vec<Mark>);
fn find_attr_invoc(&mut self, attrs: &mut Vec<Attribute>) -> Option<Attribute>;
impl Resolver for DummyResolver {
fn next_node_id(&mut self) -> ast::NodeId { ast::DUMMY_NODE_ID }
+ fn get_module_scope(&mut self, _id: ast::NodeId) -> Mark { Mark::root() }
fn visit_expansion(&mut self, _invoc: Mark, _expansion: &Expansion) {}
fn add_macro(&mut self, _scope: Mark, _def: ast::MacroDef) {}
- fn add_ext(&mut self, _scope: Mark, _ident: ast::Ident, _ext: Rc<SyntaxExtension>) {}
+ fn add_ext(&mut self, _ident: ast::Ident, _ext: Rc<SyntaxExtension>) {}
fn add_expansions_at_stmt(&mut self, _id: ast::NodeId, _macros: Vec<Mark>) {}
fn find_attr_invoc(&mut self, _attrs: &mut Vec<Attribute>) -> Option<Attribute> { None }
pub depth: usize,
pub backtrace: ExpnId,
pub module: Rc<ModuleData>,
- pub in_block: bool,
+
+ // True if non-inline modules without a `#[path]` are forbidden at the root of this expansion.
+ pub no_noninline_mod: bool,
}
/// One of these is made during expansion and incrementally updated as we go;
pub ecfg: expand::ExpansionConfig<'a>,
pub crate_root: Option<&'static str>,
pub resolver: &'a mut Resolver,
+ pub resolve_err_count: usize,
pub current_expansion: ExpansionData,
}
ecfg: ecfg,
crate_root: None,
resolver: resolver,
+ resolve_err_count: 0,
current_expansion: ExpansionData {
mark: Mark::root(),
depth: 0,
backtrace: NO_EXPANSION,
module: Rc::new(ModuleData { mod_path: Vec::new(), directory: PathBuf::new() }),
- in_block: false,
+ no_noninline_mod: false,
},
}
}
for (name, extension) in user_exts {
let ident = ast::Ident::with_empty_ctxt(name);
- self.resolver.add_ext(Mark::root(), ident, Rc::new(extension));
+ self.resolver.add_ext(ident, Rc::new(extension));
}
let mut module = ModuleData {
fn typaram(&self,
span: Span,
id: ast::Ident,
+ attrs: Vec<ast::Attribute>,
bounds: ast::TyParamBounds,
default: Option<P<ast::Ty>>) -> ast::TyParam;
fn lifetime_def(&self,
span: Span,
name: ast::Name,
+ attrs: Vec<ast::Attribute>,
bounds: Vec<ast::Lifetime>)
-> ast::LifetimeDef;
ident: ast::Ident,
typ: P<ast::Ty>,
ex: P<ast::Expr>)
- -> P<ast::Stmt>;
+ -> ast::Stmt;
fn stmt_let_type_only(&self, span: Span, ty: P<ast::Ty>) -> ast::Stmt;
fn stmt_item(&self, sp: Span, item: P<ast::Item>) -> ast::Stmt;
fn typaram(&self,
span: Span,
id: ast::Ident,
+ attrs: Vec<ast::Attribute>,
bounds: ast::TyParamBounds,
default: Option<P<ast::Ty>>) -> ast::TyParam {
ast::TyParam {
ident: id,
id: ast::DUMMY_NODE_ID,
+ attrs: attrs.into(),
bounds: bounds,
default: default,
span: span
fn lifetime_def(&self,
span: Span,
name: ast::Name,
+ attrs: Vec<ast::Attribute>,
bounds: Vec<ast::Lifetime>)
-> ast::LifetimeDef {
ast::LifetimeDef {
+ attrs: attrs.into(),
lifetime: self.lifetime(span, name),
bounds: bounds
}
ident: ast::Ident,
typ: P<ast::Ty>,
ex: P<ast::Expr>)
- -> P<ast::Stmt> {
+ -> ast::Stmt {
let pat = if mutbl {
let binding_mode = ast::BindingMode::ByValue(ast::Mutability::Mutable);
self.pat_ident_binding_mode(sp, ident, binding_mode)
span: sp,
attrs: ast::ThinVec::new(),
});
- P(ast::Stmt {
+ ast::Stmt {
id: ast::DUMMY_NODE_ID,
node: ast::StmtKind::Local(local),
span: sp,
- })
+ }
}
// Generate `let _: Type;`, usually used for type assertions.
// except according to those terms.
use ast::{Block, Crate, Ident, Mac_, PatKind};
-use ast::{MacStmtStyle, StmtKind, ItemKind};
+use ast::{Name, MacStmtStyle, StmtKind, ItemKind};
use ast;
use ext::hygiene::Mark;
use ext::placeholders::{placeholder, PlaceholderExpander};
use attr::{self, HasAttrs};
use codemap::{ExpnInfo, NameAndSpan, MacroBang, MacroAttribute};
use syntax_pos::{self, Span, ExpnId};
-use config::StripUnconfigured;
+use config::{is_test_or_bench, StripUnconfigured};
use ext::base::*;
use feature_gate::{self, Features};
use fold;
use fold::*;
-use parse::{ParseSess, lexer};
+use parse::{ParseSess, PResult, lexer};
use parse::parser::Parser;
-use parse::token::{intern, keywords};
+use parse::token::{self, intern, keywords};
use print::pprust;
use ptr::P;
use tokenstream::{TokenTree, TokenStream};
($($kind:ident: $ty:ty [$($vec:ident, $ty_elt:ty)*], $kind_name:expr, .$make:ident,
$(.$fold:ident)* $(lift .$fold_elt:ident)*,
$(.$visit:ident)* $(lift .$visit_elt:ident)*;)*) => {
- #[derive(Copy, Clone)]
+ #[derive(Copy, Clone, PartialEq, Eq)]
pub enum ExpansionKind { OptExpr, $( $kind, )* }
pub enum Expansion { OptExpr(Option<P<ast::Expr>>), $( $kind($ty), )* }
impl ExpansionKind {
- fn name(self) -> &'static str {
+ pub fn name(self) -> &'static str {
match self {
ExpansionKind::OptExpr => "expression",
$( ExpansionKind::$kind => $kind_name, )*
self.expand(Expansion::$kind(SmallVector::one(node))).$make()
})*)*
}
+
+ impl<'a> MacResult for ::ext::tt::macro_rules::ParserAnyMacro<'a> {
+ $(fn $make(self: Box<::ext::tt::macro_rules::ParserAnyMacro<'a>>) -> Option<$ty> {
+ Some(self.make(ExpansionKind::$kind).$make())
+ })*
+ }
}
}
_ => unreachable!(),
};
- if self.cx.parse_sess.span_diagnostic.err_count() > err_count {
+ if self.cx.parse_sess.span_diagnostic.err_count() - self.cx.resolve_err_count > err_count {
self.cx.parse_sess.span_diagnostic.abort_if_errors();
}
self.cx.cfg = crate_config;
if self.monotonic {
+ let err_count = self.cx.parse_sess.span_diagnostic.err_count();
let mark = self.cx.current_expansion.mark;
self.cx.resolver.visit_expansion(mark, &result.0);
+ self.cx.resolve_err_count += self.cx.parse_sess.span_diagnostic.err_count() - err_count;
}
result
};
attr::mark_used(&attr);
+ let name = intern(&attr.name());
self.cx.bt_push(ExpnInfo {
call_site: attr.span,
callee: NameAndSpan {
- format: MacroAttribute(intern(&attr.name())),
+ format: MacroAttribute(name),
span: Some(attr.span),
allow_internal_unstable: false,
}
let item_toks = TokenStream::from_tts(tts_for_item(&item, &self.cx.parse_sess));
let tok_result = mac.expand(self.cx, attr.span, attr_toks, item_toks);
- let parser = self.cx.new_parser_from_tts(&tok_result.to_tts());
- let result = Box::new(TokResult { parser: parser, span: attr.span });
-
- kind.make_from(result).unwrap_or_else(|| {
- let msg = format!("macro could not be expanded into {} position", kind.name());
- self.cx.span_err(attr.span, &msg);
- kind.dummy(attr.span)
- })
+ self.parse_expansion(tok_result, kind, name, attr.span)
}
_ => unreachable!(),
}
// Detect use of feature-gated or invalid attributes on macro invoations
// since they will not be detected after macro expansion.
for attr in attrs.iter() {
- feature_gate::check_attribute(&attr, &self.cx.parse_sess.span_diagnostic,
+ feature_gate::check_attribute(&attr, &self.cx.parse_sess,
&self.cx.parse_sess.codemap(),
&self.cx.ecfg.features.unwrap());
}
},
});
-
- let tok_result = expandfun.expand(self.cx,
- span,
- TokenStream::from_tts(marked_tts));
- let parser = self.cx.new_parser_from_tts(&tok_result.to_tts());
- let result = Box::new(TokResult { parser: parser, span: span });
- // FIXME better span info.
- kind.make_from(result).map(|i| i.fold_with(&mut ChangeSpan { span: span }))
+ let toks = TokenStream::from_tts(marked_tts);
+ let tok_result = expandfun.expand(self.cx, span, toks);
+ Some(self.parse_expansion(tok_result, kind, extname, span))
}
};
expn_id: Some(self.cx.backtrace()),
})
}
+
+ fn parse_expansion(&mut self, toks: TokenStream, kind: ExpansionKind, name: Name, span: Span)
+ -> Expansion {
+ let mut parser = self.cx.new_parser_from_tts(&toks.to_tts());
+ let expansion = match parser.parse_expansion(kind, false) {
+ Ok(expansion) => expansion,
+ Err(mut err) => {
+ err.emit();
+ return kind.dummy(span);
+ }
+ };
+ parser.ensure_complete_parse(name, kind.name(), span);
+ // FIXME better span info
+ expansion.fold_with(&mut ChangeSpan { span: span })
+ }
+}
+
+impl<'a> Parser<'a> {
+ pub fn parse_expansion(&mut self, kind: ExpansionKind, macro_legacy_warnings: bool)
+ -> PResult<'a, Expansion> {
+ Ok(match kind {
+ ExpansionKind::Items => {
+ let mut items = SmallVector::zero();
+ while let Some(item) = self.parse_item()? {
+ items.push(item);
+ }
+ Expansion::Items(items)
+ }
+ ExpansionKind::TraitItems => {
+ let mut items = SmallVector::zero();
+ while self.token != token::Eof {
+ items.push(self.parse_trait_item()?);
+ }
+ Expansion::TraitItems(items)
+ }
+ ExpansionKind::ImplItems => {
+ let mut items = SmallVector::zero();
+ while self.token != token::Eof {
+ items.push(self.parse_impl_item()?);
+ }
+ Expansion::ImplItems(items)
+ }
+ ExpansionKind::Stmts => {
+ let mut stmts = SmallVector::zero();
+ while self.token != token::Eof {
+ if let Some(stmt) = self.parse_full_stmt(macro_legacy_warnings)? {
+ stmts.push(stmt);
+ }
+ }
+ Expansion::Stmts(stmts)
+ }
+ ExpansionKind::Expr => Expansion::Expr(self.parse_expr()?),
+ ExpansionKind::OptExpr => Expansion::OptExpr(Some(self.parse_expr()?)),
+ ExpansionKind::Ty => Expansion::Ty(self.parse_ty()?),
+ ExpansionKind::Pat => Expansion::Pat(self.parse_pat()?),
+ })
+ }
+
+ pub fn ensure_complete_parse(&mut self, macro_name: ast::Name, kind_name: &str, span: Span) {
+ if self.token != token::Eof {
+ let msg = format!("macro expansion ignores token `{}` and any following",
+ self.this_token_to_string());
+ let mut err = self.diagnostic().struct_span_err(self.span, &msg);
+ let msg = format!("caused by the macro expansion here; the usage \
+ of `{}!` is likely invalid in {} context",
+ macro_name, kind_name);
+ err.span_note(span, &msg).emit();
+ }
+ }
}
struct InvocationCollector<'a, 'b: 'a> {
}
fn fold_block(&mut self, block: P<Block>) -> P<Block> {
- let orig_in_block = mem::replace(&mut self.cx.current_expansion.in_block, true);
+ let no_noninline_mod = mem::replace(&mut self.cx.current_expansion.no_noninline_mod, true);
let result = noop_fold_block(block, self);
- self.cx.current_expansion.in_block = orig_in_block;
+ self.cx.current_expansion.no_noninline_mod = no_noninline_mod;
result
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
let item = configure!(self, item);
- let (item, attr) = self.classify_item(item);
+ let (mut item, attr) = self.classify_item(item);
if let Some(attr) = attr {
let item = Annotatable::Item(fully_configure!(self, item, noop_fold_item));
return self.collect_attr(attr, item, ExpansionKind::Items).make_items();
return noop_fold_item(item, self);
}
+ let orig_no_noninline_mod = self.cx.current_expansion.no_noninline_mod;
let mut module = (*self.cx.current_expansion.module).clone();
module.mod_path.push(item.ident);
let inline_module = item.span.contains(inner) || inner == syntax_pos::DUMMY_SP;
if inline_module {
- module.directory.push(&*{
- ::attr::first_attr_value_str_by_name(&item.attrs, "path")
- .unwrap_or(item.ident.name.as_str())
- });
+ if let Some(path) = attr::first_attr_value_str_by_name(&item.attrs, "path") {
+ self.cx.current_expansion.no_noninline_mod = false;
+ module.directory.push(&*path);
+ } else {
+ module.directory.push(&*item.ident.name.as_str());
+ }
} else {
+ self.cx.current_expansion.no_noninline_mod = false;
module.directory =
PathBuf::from(self.cx.parse_sess.codemap().span_to_filename(inner));
module.directory.pop();
mem::replace(&mut self.cx.current_expansion.module, Rc::new(module));
let result = noop_fold_item(item, self);
self.cx.current_expansion.module = orig_module;
+ self.cx.current_expansion.no_noninline_mod = orig_no_noninline_mod;
return result;
}
+ // Ensure that test functions are accessible from the test harness.
+ ast::ItemKind::Fn(..) if self.cx.ecfg.should_test => {
+ if item.attrs.iter().any(|attr| is_test_or_bench(attr)) {
+ item = item.map(|mut item| { item.vis = ast::Visibility::Public; item });
+ }
+ noop_fold_item(item, self)
+ }
_ => noop_fold_item(item, self),
}
}
//! and definition contexts*. J. Funct. Program. 22, 2 (March 2012), 181-216.
//! DOI=10.1017/S0956796812000093 http://dx.doi.org/10.1017/S0956796812000093
+use ast::NodeId;
use std::cell::RefCell;
use std::collections::HashMap;
use std::fmt;
Mark(0)
}
+ pub fn from_placeholder_id(id: NodeId) -> Self {
+ Mark(id.as_u32())
+ }
+
pub fn as_u32(&self) -> u32 {
self.0
}
use syntax_pos::{Span, DUMMY_SP};
use ext::base::{DummyResult, ExtCtxt, MacEager, MacResult, SyntaxExtension};
use ext::base::{IdentMacroExpander, NormalTT, TTMacroExpander};
+use ext::expand::{Expansion, ExpansionKind};
use ext::placeholders;
use ext::tt::macro_parser::{Success, Error, Failure};
use ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use parse::token::{self, gensym_ident, NtTT, Token};
use parse::token::Token::*;
use print;
-use ptr::P;
use tokenstream::{self, TokenTree};
-use util::small_vector::SmallVector;
-
-use std::cell::RefCell;
use std::collections::{HashMap};
use std::collections::hash_map::{Entry};
use std::rc::Rc;
-struct ParserAnyMacro<'a> {
- parser: RefCell<Parser<'a>>,
+pub struct ParserAnyMacro<'a> {
+ parser: Parser<'a>,
/// Span of the expansion site of the macro this parser is for
site_span: Span,
}
impl<'a> ParserAnyMacro<'a> {
- /// Make sure we don't have any tokens left to parse, so we don't
- /// silently drop anything. `allow_semi` is so that "optional"
- /// semicolons at the end of normal expressions aren't complained
- /// about e.g. the semicolon in `macro_rules! kapow { () => {
- /// panic!(); } }` doesn't get picked up by .parse_expr(), but it's
- /// allowed to be there.
- fn ensure_complete_parse(&self, allow_semi: bool, context: &str) {
- let mut parser = self.parser.borrow_mut();
- parser.ensure_complete_parse(allow_semi, |parser| {
- let token_str = parser.this_token_to_string();
- let msg = format!("macro expansion ignores token `{}` and any \
- following",
- token_str);
- let span = parser.span;
- let mut err = parser.diagnostic().struct_span_err(span, &msg);
- let msg = format!("caused by the macro expansion here; the usage \
- of `{}!` is likely invalid in {} context",
- self.macro_ident, context);
- err.span_note(self.site_span, &msg)
- .emit();
- });
- }
-}
-
-impl<'a> MacResult for ParserAnyMacro<'a> {
- fn make_expr(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Expr>> {
- let ret = panictry!(self.parser.borrow_mut().parse_expr());
- self.ensure_complete_parse(true, "expression");
- Some(ret)
- }
- fn make_pat(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Pat>> {
- let ret = panictry!(self.parser.borrow_mut().parse_pat());
- self.ensure_complete_parse(false, "pattern");
- Some(ret)
- }
- fn make_items(self: Box<ParserAnyMacro<'a>>) -> Option<SmallVector<P<ast::Item>>> {
- let mut ret = SmallVector::zero();
- while let Some(item) = panictry!(self.parser.borrow_mut().parse_item()) {
- ret.push(item);
- }
- self.ensure_complete_parse(false, "item");
- Some(ret)
- }
-
- fn make_impl_items(self: Box<ParserAnyMacro<'a>>)
- -> Option<SmallVector<ast::ImplItem>> {
- let mut ret = SmallVector::zero();
- loop {
- let mut parser = self.parser.borrow_mut();
- match parser.token {
- token::Eof => break,
- _ => ret.push(panictry!(parser.parse_impl_item()))
- }
- }
- self.ensure_complete_parse(false, "item");
- Some(ret)
- }
-
- fn make_trait_items(self: Box<ParserAnyMacro<'a>>)
- -> Option<SmallVector<ast::TraitItem>> {
- let mut ret = SmallVector::zero();
- loop {
- let mut parser = self.parser.borrow_mut();
- match parser.token {
- token::Eof => break,
- _ => ret.push(panictry!(parser.parse_trait_item()))
- }
- }
- self.ensure_complete_parse(false, "item");
- Some(ret)
- }
-
-
- fn make_stmts(self: Box<ParserAnyMacro<'a>>)
- -> Option<SmallVector<ast::Stmt>> {
- let mut ret = SmallVector::zero();
- loop {
- let mut parser = self.parser.borrow_mut();
- match parser.token {
- token::Eof => break,
- _ => match parser.parse_full_stmt(true) {
- Ok(maybe_stmt) => match maybe_stmt {
- Some(stmt) => ret.push(stmt),
- None => (),
- },
- Err(mut e) => {
- e.emit();
- break;
- }
- }
- }
+ pub fn make(mut self: Box<ParserAnyMacro<'a>>, kind: ExpansionKind) -> Expansion {
+ let ParserAnyMacro { site_span, macro_ident, ref mut parser } = *self;
+ let expansion = panictry!(parser.parse_expansion(kind, true));
+
+ // We allow semicolons at the end of expressions -- e.g. the semicolon in
+ // `macro_rules! m { () => { panic!(); } }` isn't parsed by `.parse_expr()`,
+ // but `m!()` is allowed in expression positions (c.f. issue #34706).
+ if kind == ExpansionKind::Expr && parser.token == token::Semi {
+ parser.bump();
}
- self.ensure_complete_parse(false, "statement");
- Some(ret)
- }
- fn make_ty(self: Box<ParserAnyMacro<'a>>) -> Option<P<ast::Ty>> {
- let ret = panictry!(self.parser.borrow_mut().parse_ty());
- self.ensure_complete_parse(false, "type");
- Some(ret)
+ // Make sure we don't have any tokens left to parse so we don't silently drop anything.
+ parser.ensure_complete_parse(macro_ident.name, kind.name(), site_span);
+ expansion
}
}
rhs);
let mut p = Parser::new(cx.parse_sess(), cx.cfg(), Box::new(trncbr));
p.directory = cx.current_expansion.module.directory.clone();
- p.restrictions = match cx.current_expansion.in_block {
+ p.restrictions = match cx.current_expansion.no_noninline_mod {
true => Restrictions::NO_NONINLINE_MOD,
false => Restrictions::empty(),
};
// Let the context choose how to interpret the result.
// Weird, but useful for X-macros.
return Box::new(ParserAnyMacro {
- parser: RefCell::new(p),
+ parser: p,
// Pass along the original expansion site and the name of the macro
// so we can print a useful error message if the parse of the expanded
(**tt).clone()
}
_ => sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
- }).collect()
+ }).collect::<Vec<TokenTree>>()
}
_ => sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
};
valid &= check_rhs(sess, rhs);
}
+ // don't abort iteration early, so that errors for multiple lhses can be reported
+ for lhs in &lhses {
+ valid &= check_lhs_no_empty_seq(sess, &[lhs.clone()])
+ }
+
let exp: Box<_> = Box::new(MacroRulesMacroExpander {
name: def.ident,
imported_from: def.imported_from,
// after parsing/expansion. we can report every error in every macro this way.
}
+/// Check that the lhs contains no repetition which could match an empty token
+/// tree, because then the matcher would hang indefinitely.
+fn check_lhs_no_empty_seq(sess: &ParseSess, tts: &[TokenTree]) -> bool {
+ for tt in tts {
+ match *tt {
+ TokenTree::Token(_, _) => (),
+ TokenTree::Delimited(_, ref del) => if !check_lhs_no_empty_seq(sess, &del.tts) {
+ return false;
+ },
+ TokenTree::Sequence(span, ref seq) => {
+ if seq.separator.is_none() {
+ if seq.tts.iter().all(|seq_tt| {
+ match *seq_tt {
+ TokenTree::Sequence(_, ref sub_seq) =>
+ sub_seq.op == tokenstream::KleeneOp::ZeroOrMore,
+ _ => false,
+ }
+ }) {
+ sess.span_diagnostic.span_err(span, "repetition matches empty token tree");
+ return false;
+ }
+ }
+ if !check_lhs_no_empty_seq(sess, &seq.tts) {
+ return false;
+ }
+ }
+ }
+ }
+
+ true
+}
+
fn check_rhs(sess: &ParseSess, rhs: &TokenTree) -> bool {
match *rhs {
TokenTree::Delimited(..) => return true,
use parse::token::InternedString;
use std::ascii::AsciiExt;
+use std::env;
macro_rules! setter {
($field: ident) => {{
// Used to identify the `compiler_builtins` crate
// rustc internal
(active, compiler_builtins, "1.13.0", None),
+
+ // Allows attributes on lifetime/type formal parameters in generics (RFC 1327)
+ (active, generic_param_attrs, "1.11.0", Some(34761)),
);
declare_features! (
pub fn check_and_emit(&self, sess: &ParseSess, features: &Features) {
let (cfg, feature, has_feature) = GATED_CFGS[self.index];
if !has_feature(features) && !sess.codemap().span_allows_unstable(self.span) {
- let diagnostic = &sess.span_diagnostic;
let explain = format!("`cfg({})` is experimental and subject to change", cfg);
- emit_feature_err(diagnostic, feature, self.span, GateIssue::Language, &explain);
+ emit_feature_err(sess, feature, self.span, GateIssue::Language, &explain);
}
}
}
struct Context<'a> {
features: &'a Features,
- span_handler: &'a Handler,
+ parse_sess: &'a ParseSess,
cm: &'a CodeMap,
plugin_attributes: &'a [(String, AttributeType)],
}
let has_feature: bool = has_feature(&$cx.features);
debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature);
if !has_feature && !cx.cm.span_allows_unstable(span) {
- emit_feature_err(cx.span_handler, name, span, GateIssue::Language, explain);
+ emit_feature_err(cx.parse_sess, name, span, GateIssue::Language, explain);
}
}}
}
}
}
-pub fn check_attribute(attr: &ast::Attribute, handler: &Handler,
+pub fn check_attribute(attr: &ast::Attribute, parse_sess: &ParseSess,
cm: &CodeMap, features: &Features) {
let cx = Context {
- features: features, span_handler: handler,
+ features: features, parse_sess: parse_sess,
cm: cm, plugin_attributes: &[]
};
cx.check_attribute(attr, true);
Library(Option<u32>)
}
-pub fn emit_feature_err(diag: &Handler, feature: &str, span: Span, issue: GateIssue,
+pub fn emit_feature_err(sess: &ParseSess, feature: &str, span: Span, issue: GateIssue,
explain: &str) {
+ let diag = &sess.span_diagnostic;
+
let issue = match issue {
GateIssue::Language => find_lang_feature_issue(feature),
GateIssue::Library(lib) => lib,
};
// #23973: do not suggest `#![feature(...)]` if we are in beta/stable
- if option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some() {
- err.emit();
- return;
+ if sess.unstable_features.is_nightly_build() {
+ err.help(&format!("add #![feature({})] to the \
+ crate attributes to enable",
+ feature));
}
- err.help(&format!("add #![feature({})] to the \
- crate attributes to enable",
- feature));
+
err.emit();
}
if attr::contains_name(&i.attrs[..], "simd") {
gate_feature_post!(&self, simd, i.span,
"SIMD types are experimental and possibly buggy");
- self.context.span_handler.span_warn(i.span,
- "the `#[simd]` attribute is deprecated, \
- use `#[repr(simd)]` instead");
+ self.context.parse_sess.span_diagnostic.span_warn(i.span,
+ "the `#[simd]` attribute \
+ is deprecated, use \
+ `#[repr(simd)]` instead");
}
for attr in &i.attrs {
if attr.name() == "repr" {
fn visit_pat(&mut self, pattern: &ast::Pat) {
match pattern.node {
- PatKind::Vec(_, Some(_), ref last) if !last.is_empty() => {
+ PatKind::Slice(_, Some(_), ref last) if !last.is_empty() => {
gate_feature_post!(&self, advanced_slice_patterns,
pattern.span,
"multiple-element slice matches anywhere \
but at the end of a slice (e.g. \
`[0, ..xs, 0]`) are experimental")
}
- PatKind::Vec(..) => {
+ PatKind::Slice(..) => {
gate_feature_post!(&self, slice_patterns,
pattern.span,
"slice pattern syntax is experimental");
visit::walk_vis(self, vis)
}
+
+ fn visit_generics(&mut self, g: &ast::Generics) {
+ for t in &g.ty_params {
+ if !t.attrs.is_empty() {
+ gate_feature_post!(&self, generic_param_attrs, t.attrs[0].span,
+ "attributes on type parameter bindings are experimental");
+ }
+ }
+ visit::walk_generics(self, g)
+ }
+
+ fn visit_lifetime_def(&mut self, lifetime_def: &ast::LifetimeDef) {
+ if !lifetime_def.attrs.is_empty() {
+ gate_feature_post!(&self, generic_param_attrs, lifetime_def.attrs[0].span,
+ "attributes on lifetime bindings are experimental");
+ }
+ visit::walk_lifetime_def(self, lifetime_def)
+ }
}
pub fn get_features(span_handler: &Handler, krate_attrs: &[ast::Attribute]) -> Features {
maybe_stage_features(&sess.span_diagnostic, krate, unstable);
let ctx = Context {
features: features,
- span_handler: &sess.span_diagnostic,
+ parse_sess: sess,
cm: sess.codemap(),
plugin_attributes: plugin_attributes,
};
Cheat
}
+impl UnstableFeatures {
+ pub fn from_environment() -> UnstableFeatures {
+ // Whether this is a feature-staged build, i.e. on the beta or stable channel
+ let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
+ // The secret key needed to get through the rustc build itself by
+ // subverting the unstable features lints
+ let bootstrap_secret_key = option_env!("CFG_BOOTSTRAP_KEY");
+ // The matching key to the above, only known by the build system
+ let bootstrap_provided_key = env::var("RUSTC_BOOTSTRAP_KEY").ok();
+ match (disable_unstable_features, bootstrap_secret_key, bootstrap_provided_key) {
+ (_, Some(ref s), Some(ref p)) if s == p => UnstableFeatures::Cheat,
+ (true, _, _) => UnstableFeatures::Disallow,
+ (false, _, _) => UnstableFeatures::Allow
+ }
+ }
+
+ pub fn is_nightly_build(&self) -> bool {
+ match *self {
+ UnstableFeatures::Allow | UnstableFeatures::Cheat => true,
+ _ => false,
+ }
+ }
+}
+
fn maybe_stage_features(span_handler: &Handler, krate: &ast::Crate,
unstable: UnstableFeatures) {
let allow_features = match unstable {
id: fld.new_id(id),
node: match node {
TyKind::Infer | TyKind::ImplicitSelf => node,
- TyKind::Vec(ty) => TyKind::Vec(fld.fold_ty(ty)),
+ TyKind::Slice(ty) => TyKind::Slice(fld.fold_ty(ty)),
TyKind::Ptr(mt) => TyKind::Ptr(fld.fold_mt(mt)),
TyKind::Rptr(region, mt) => {
TyKind::Rptr(fld.fold_opt_lifetime(region), fld.fold_mt(mt))
TyKind::ObjectSum(fld.fold_ty(ty),
fld.fold_bounds(bounds))
}
- TyKind::FixedLengthVec(ty, e) => {
- TyKind::FixedLengthVec(fld.fold_ty(ty), fld.fold_expr(e))
+ TyKind::Array(ty, e) => {
+ TyKind::Array(fld.fold_ty(ty), fld.fold_expr(e))
}
TyKind::Typeof(expr) => {
TyKind::Typeof(fld.fold_expr(expr))
}
pub fn noop_fold_ty_param<T: Folder>(tp: TyParam, fld: &mut T) -> TyParam {
- let TyParam {id, ident, bounds, default, span} = tp;
+ let TyParam {attrs, id, ident, bounds, default, span} = tp;
+ let attrs: Vec<_> = attrs.into();
TyParam {
+ attrs: attrs.into_iter()
+ .flat_map(|x| fld.fold_attribute(x).into_iter())
+ .collect::<Vec<_>>()
+ .into(),
id: fld.new_id(id),
ident: ident,
bounds: fld.fold_bounds(bounds),
pub fn noop_fold_lifetime_def<T: Folder>(l: LifetimeDef, fld: &mut T)
-> LifetimeDef {
+ let attrs: Vec<_> = l.attrs.into();
LifetimeDef {
+ attrs: attrs.into_iter()
+ .flat_map(|x| fld.fold_attribute(x).into_iter())
+ .collect::<Vec<_>>()
+ .into(),
lifetime: fld.fold_lifetime(l.lifetime),
bounds: fld.fold_lifetimes(l.bounds),
}
PatKind::Range(e1, e2) => {
PatKind::Range(folder.fold_expr(e1), folder.fold_expr(e2))
},
- PatKind::Vec(before, slice, after) => {
- PatKind::Vec(before.move_map(|x| folder.fold_pat(x)),
+ PatKind::Slice(before, slice, after) => {
+ PatKind::Slice(before.move_map(|x| folder.fold_pat(x)),
slice.map(|x| folder.fold_pat(x)),
after.move_map(|x| folder.fold_pat(x)))
}
}
impl JsonEmitter {
+ pub fn stderr(registry: Option<Registry>,
+ code_map: Rc<CodeMap>) -> JsonEmitter {
+ JsonEmitter {
+ dst: Box::new(io::stderr()),
+ registry: registry,
+ cm: code_map,
+ }
+ }
+
pub fn basic() -> JsonEmitter {
JsonEmitter::stderr(None, Rc::new(CodeMap::new()))
}
- pub fn stderr(registry: Option<Registry>,
- code_map: Rc<CodeMap>) -> JsonEmitter {
+ pub fn new(dst: Box<Write + Send>,
+ registry: Option<Registry>,
+ code_map: Rc<CodeMap>) -> JsonEmitter {
JsonEmitter {
- dst: Box::new(io::stderr()),
+ dst: dst,
registry: registry,
cm: code_map,
}
self.last_pos = self.pos;
let current_byte_offset = self.byte_offset(self.pos).to_usize();
if current_byte_offset < self.source_text.len() {
- assert!(self.curr.is_some());
let last_char = self.curr.unwrap();
let ch = char_at(&self.source_text, current_byte_offset);
- let next = current_byte_offset + ch.len_utf8();
- let byte_offset_diff = next - current_byte_offset;
+ let byte_offset_diff = ch.len_utf8();
self.pos = self.pos + Pos::from_usize(byte_offset_diff);
self.curr = Some(ch);
self.col = self.col + CharPos(1);
// line comments starting with "///" or "//!" are doc-comments
let doc_comment = self.curr_is('/') || self.curr_is('!');
- let start_bpos = if doc_comment {
- self.pos - BytePos(3)
- } else {
- self.last_pos - BytePos(2)
- };
+ let start_bpos = self.last_pos - BytePos(2);
while !self.is_eof() {
match self.curr.unwrap() {
use codemap::CodeMap;
use syntax_pos::{self, Span, FileMap};
use errors::{Handler, ColorConfig, DiagnosticBuilder};
+use feature_gate::UnstableFeatures;
use parse::parser::Parser;
use parse::token::InternedString;
use ptr::P;
/// Info about a parsing session.
pub struct ParseSess {
pub span_diagnostic: Handler, // better be the same as the one in the reader!
+ pub unstable_features: UnstableFeatures,
/// Used to determine and report recursive mod inclusions
included_mod_stack: RefCell<Vec<PathBuf>>,
code_map: Rc<CodeMap>,
pub fn with_span_handler(handler: Handler, code_map: Rc<CodeMap>) -> ParseSess {
ParseSess {
span_diagnostic: handler,
+ unstable_features: UnstableFeatures::from_environment(),
included_mod_stack: RefCell::new(vec![]),
code_map: code_map
}
}
}
- fn parse_ident_into_path(&mut self) -> PResult<'a, ast::Path> {
- let ident = self.parse_ident()?;
- Ok(ast::Path::from_ident(self.last_span, ident))
- }
-
/// Check if the next token is `tok`, and return `true` if so.
///
/// This method will automatically add `tok` to `expected_tokens` if `tok` is not
let lo = self.span.lo;
let (name, node) = if self.eat_keyword(keywords::Type) {
- let TyParam {ident, bounds, default, ..} = self.parse_ty_param()?;
+ let TyParam {ident, bounds, default, ..} = self.parse_ty_param(vec![])?;
self.expect(&token::Semi)?;
(ident, TraitItemKind::Type(bounds, default))
} else if self.is_const_item() {
None
};
(ident, TraitItemKind::Const(ty, default))
- } else if !self.token.is_any_keyword()
- && self.look_ahead(1, |t| *t == token::Not)
- && (self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren))
- || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) {
- // trait item macro.
- // code copied from parse_macro_use_or_failure... abstraction!
- let lo = self.span.lo;
- let pth = self.parse_ident_into_path()?;
- self.expect(&token::Not)?;
+ } else if self.token.is_path_start() {
+ // trait item macro.
+ // code copied from parse_macro_use_or_failure... abstraction!
+ let lo = self.span.lo;
+ let pth = self.parse_path(PathStyle::Mod)?;
+ self.expect(&token::Not)?;
- // eat a matched-delimiter token tree:
- let delim = self.expect_open_delim()?;
- let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
- SeqSep::none(),
- |pp| pp.parse_token_tree())?;
- let m_ = Mac_ { path: pth, tts: tts };
- let m: ast::Mac = codemap::Spanned { node: m_,
- span: mk_sp(lo,
- self.last_span.hi) };
- if delim != token::Brace {
- self.expect(&token::Semi)?
- }
- (keywords::Invalid.ident(), ast::TraitItemKind::Macro(m))
- } else {
- let (constness, unsafety, abi) = match self.parse_fn_front_matter() {
- Ok(cua) => cua,
- Err(e) => {
- loop {
- match self.token {
- token::Eof => break,
- token::CloseDelim(token::Brace) |
- token::Semi => {
- self.bump();
- break;
- }
- token::OpenDelim(token::Brace) => {
- self.parse_token_tree()?;
- break;
- }
- _ => self.bump()
+ // eat a matched-delimiter token tree:
+ let delim = self.expect_open_delim()?;
+ let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
+ SeqSep::none(),
+ |pp| pp.parse_token_tree())?;
+ if delim != token::Brace {
+ self.expect(&token::Semi)?
+ }
+
+ let mac = spanned(lo, self.last_span.hi, Mac_ { path: pth, tts: tts });
+ (keywords::Invalid.ident(), ast::TraitItemKind::Macro(mac))
+ } else {
+ let (constness, unsafety, abi) = match self.parse_fn_front_matter() {
+ Ok(cua) => cua,
+ Err(e) => {
+ loop {
+ match self.token {
+ token::Eof => break,
+ token::CloseDelim(token::Brace) |
+ token::Semi => {
+ self.bump();
+ break;
+ }
+ token::OpenDelim(token::Brace) => {
+ self.parse_token_tree()?;
+ break;
}
+ _ => self.bump(),
}
-
- return Err(e);
}
- };
- let ident = self.parse_ident()?;
- let mut generics = self.parse_generics()?;
+ return Err(e);
+ }
+ };
- let d = self.parse_fn_decl_with_self(|p: &mut Parser<'a>|{
- // This is somewhat dubious; We don't want to allow
- // argument names to be left off if there is a
- // definition...
- p.parse_arg_general(false)
- })?;
+ let ident = self.parse_ident()?;
+ let mut generics = self.parse_generics()?;
- generics.where_clause = self.parse_where_clause()?;
- let sig = ast::MethodSig {
- unsafety: unsafety,
- constness: constness,
- decl: d,
- generics: generics,
- abi: abi,
- };
+ let d = self.parse_fn_decl_with_self(|p: &mut Parser<'a>|{
+ // This is somewhat dubious; We don't want to allow
+ // argument names to be left off if there is a
+ // definition...
+ p.parse_arg_general(false)
+ })?;
- let body = match self.token {
- token::Semi => {
- self.bump();
- debug!("parse_trait_methods(): parsing required method");
- None
- }
- token::OpenDelim(token::Brace) => {
- debug!("parse_trait_methods(): parsing provided method");
- let (inner_attrs, body) =
- self.parse_inner_attrs_and_block()?;
- attrs.extend(inner_attrs.iter().cloned());
- Some(body)
- }
+ generics.where_clause = self.parse_where_clause()?;
+ let sig = ast::MethodSig {
+ unsafety: unsafety,
+ constness: constness,
+ decl: d,
+ generics: generics,
+ abi: abi,
+ };
- _ => {
- let token_str = self.this_token_to_string();
- return Err(self.fatal(&format!("expected `;` or `{{`, found `{}`",
- token_str)[..]))
- }
- };
- (ident, ast::TraitItemKind::Method(sig, body))
+ let body = match self.token {
+ token::Semi => {
+ self.bump();
+ debug!("parse_trait_methods(): parsing required method");
+ None
+ }
+ token::OpenDelim(token::Brace) => {
+ debug!("parse_trait_methods(): parsing provided method");
+ let (inner_attrs, body) = self.parse_inner_attrs_and_block()?;
+ attrs.extend(inner_attrs.iter().cloned());
+ Some(body)
+ }
+ _ => {
+ let token_str = self.this_token_to_string();
+ return Err(self.fatal(&format!("expected `;` or `{{`, found `{}`", token_str)));
+ }
};
+ (ident, ast::TraitItemKind::Method(sig, body))
+ };
+
Ok(TraitItem {
id: ast::DUMMY_NODE_ID,
ident: name,
// Parse the `; e` in `[ i32; e ]`
// where `e` is a const expression
let t = match self.maybe_parse_fixed_length_of_vec()? {
- None => TyKind::Vec(t),
- Some(suffix) => TyKind::FixedLengthVec(t, suffix)
+ None => TyKind::Slice(t),
+ Some(suffix) => TyKind::Array(t, suffix)
};
self.expect(&token::CloseDelim(token::Bracket))?;
t
TyKind::Path(Some(qself), path)
} else if self.token.is_path_start() {
let path = self.parse_path(PathStyle::Type)?;
- if self.check(&token::Not) {
+ if self.eat(&token::Not) {
// MACRO INVOCATION
- self.bump();
let delim = self.expect_open_delim()?;
let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
SeqSep::none(),
/// Parses `lifetime_defs = [ lifetime_defs { ',' lifetime_defs } ]` where `lifetime_def =
/// lifetime [':' lifetimes]`
- pub fn parse_lifetime_defs(&mut self) -> PResult<'a, Vec<ast::LifetimeDef>> {
-
+ ///
+ /// If `followed_by_ty_params` is None, then we are in a context
+ /// where only lifetime parameters are allowed, and thus we should
+ /// error if we encounter attributes after the bound lifetimes.
+ ///
+ /// If `followed_by_ty_params` is Some(r), then there may be type
+ /// parameter bindings after the lifetimes, so we should pass
+ /// along the parsed attributes to be attached to the first such
+ /// type parmeter.
+ pub fn parse_lifetime_defs(&mut self,
+ followed_by_ty_params: Option<&mut Vec<ast::Attribute>>)
+ -> PResult<'a, Vec<ast::LifetimeDef>>
+ {
let mut res = Vec::new();
loop {
+ let attrs = self.parse_outer_attributes()?;
match self.token {
token::Lifetime(_) => {
let lifetime = self.parse_lifetime()?;
} else {
Vec::new()
};
- res.push(ast::LifetimeDef { lifetime: lifetime,
+ res.push(ast::LifetimeDef { attrs: attrs.into(),
+ lifetime: lifetime,
bounds: bounds });
}
_ => {
+ if let Some(recv) = followed_by_ty_params {
+ assert!(recv.is_empty());
+ *recv = attrs;
+ } else {
+ let msg = "trailing attribute after lifetime parameters";
+ return Err(self.fatal(msg));
+ }
+ debug!("parse_lifetime_defs ret {:?}", res);
return Ok(res);
}
}
let pth = self.parse_path(PathStyle::Expr)?;
// `!`, as an operator, is prefix, so we know this isn't that
- if self.check(&token::Not) {
+ if self.eat(&token::Not) {
// MACRO INVOCATION expression
- self.bump();
-
let delim = self.expect_open_delim()?;
- let tts = self.parse_seq_to_end(
- &token::CloseDelim(delim),
- SeqSep::none(),
- |p| p.parse_token_tree())?;
+ let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
+ SeqSep::none(),
+ |p| p.parse_token_tree())?;
let hi = self.last_span.hi;
-
- return Ok(self.mk_mac_expr(lo,
- hi,
- Mac_ { path: pth, tts: tts },
- attrs));
+ return Ok(self.mk_mac_expr(lo, hi, Mac_ { path: pth, tts: tts }, attrs));
}
if self.check(&token::OpenDelim(token::Brace)) {
// This is a struct literal, unless we're prohibited
Restrictions::RESTRICTION_NO_STRUCT_LITERAL
);
if !prohibited {
- // It's a struct literal.
- self.bump();
- let mut fields = Vec::new();
- let mut base = None;
-
- attrs.extend(self.parse_inner_attributes()?);
-
- while self.token != token::CloseDelim(token::Brace) {
- if self.eat(&token::DotDot) {
- match self.parse_expr() {
- Ok(e) => {
- base = Some(e);
- }
- Err(mut e) => {
- e.emit();
- self.recover_stmt();
- }
- }
- break;
- }
-
- match self.parse_field() {
- Ok(f) => fields.push(f),
- Err(mut e) => {
- e.emit();
- self.recover_stmt();
- break;
- }
- }
-
- match self.expect_one_of(&[token::Comma],
- &[token::CloseDelim(token::Brace)]) {
- Ok(()) => {}
- Err(mut e) => {
- e.emit();
- self.recover_stmt();
- break;
- }
- }
- }
-
- hi = self.span.hi;
- self.expect(&token::CloseDelim(token::Brace))?;
- ex = ExprKind::Struct(pth, fields, base);
- return Ok(self.mk_expr(lo, hi, ex, attrs));
+ return self.parse_struct_expr(lo, pth, attrs);
}
}
return Ok(self.mk_expr(lo, hi, ex, attrs));
}
+ fn parse_struct_expr(&mut self, lo: BytePos, pth: ast::Path, mut attrs: ThinVec<Attribute>)
+ -> PResult<'a, P<Expr>> {
+ self.bump();
+ let mut fields = Vec::new();
+ let mut base = None;
+
+ attrs.extend(self.parse_inner_attributes()?);
+
+ while self.token != token::CloseDelim(token::Brace) {
+ if self.eat(&token::DotDot) {
+ match self.parse_expr() {
+ Ok(e) => {
+ base = Some(e);
+ }
+ Err(mut e) => {
+ e.emit();
+ self.recover_stmt();
+ }
+ }
+ break;
+ }
+
+ match self.parse_field() {
+ Ok(f) => fields.push(f),
+ Err(mut e) => {
+ e.emit();
+ self.recover_stmt();
+ break;
+ }
+ }
+
+ match self.expect_one_of(&[token::Comma],
+ &[token::CloseDelim(token::Brace)]) {
+ Ok(()) => {}
+ Err(mut e) => {
+ e.emit();
+ self.recover_stmt();
+ break;
+ }
+ }
+ }
+
+ let hi = self.span.hi;
+ self.expect(&token::CloseDelim(token::Brace))?;
+ return Ok(self.mk_expr(lo, hi, ExprKind::Struct(pth, fields, base), attrs));
+ }
+
fn parse_or_use_outer_attributes(&mut self,
already_parsed_attrs: Option<ThinVec<Attribute>>)
-> PResult<'a, ThinVec<Attribute>> {
let lo = self.span.lo;
let pat;
match self.token {
- token::Underscore => {
- // Parse _
- self.bump();
- pat = PatKind::Wild;
- }
- token::BinOp(token::And) | token::AndAnd => {
- // Parse &pat / &mut pat
- self.expect_and()?;
- let mutbl = self.parse_mutability()?;
- if let token::Lifetime(ident) = self.token {
- return Err(self.fatal(&format!("unexpected lifetime `{}` in pattern", ident)));
+ token::Underscore => {
+ // Parse _
+ self.bump();
+ pat = PatKind::Wild;
+ }
+ token::BinOp(token::And) | token::AndAnd => {
+ // Parse &pat / &mut pat
+ self.expect_and()?;
+ let mutbl = self.parse_mutability()?;
+ if let token::Lifetime(ident) = self.token {
+ return Err(self.fatal(&format!("unexpected lifetime `{}` in pattern", ident)));
+ }
+ let subpat = self.parse_pat()?;
+ pat = PatKind::Ref(subpat, mutbl);
+ }
+ token::OpenDelim(token::Paren) => {
+ // Parse (pat,pat,pat,...) as tuple pattern
+ self.bump();
+ let (fields, ddpos) = self.parse_pat_tuple_elements(true)?;
+ self.expect(&token::CloseDelim(token::Paren))?;
+ pat = PatKind::Tuple(fields, ddpos);
+ }
+ token::OpenDelim(token::Bracket) => {
+ // Parse [pat,pat,...] as slice pattern
+ self.bump();
+ let (before, slice, after) = self.parse_pat_vec_elements()?;
+ self.expect(&token::CloseDelim(token::Bracket))?;
+ pat = PatKind::Slice(before, slice, after);
}
-
- let subpat = self.parse_pat()?;
- pat = PatKind::Ref(subpat, mutbl);
- }
- token::OpenDelim(token::Paren) => {
- // Parse (pat,pat,pat,...) as tuple pattern
- self.bump();
- let (fields, ddpos) = self.parse_pat_tuple_elements(true)?;
- self.expect(&token::CloseDelim(token::Paren))?;
- pat = PatKind::Tuple(fields, ddpos);
- }
- token::OpenDelim(token::Bracket) => {
- // Parse [pat,pat,...] as slice pattern
- self.bump();
- let (before, slice, after) = self.parse_pat_vec_elements()?;
- self.expect(&token::CloseDelim(token::Bracket))?;
- pat = PatKind::Vec(before, slice, after);
- }
- _ => {
// At this point, token != _, &, &&, (, [
- if self.eat_keyword(keywords::Mut) {
+ _ => if self.eat_keyword(keywords::Mut) {
// Parse mut ident @ pat
pat = self.parse_pat_ident(BindingMode::ByValue(Mutability::Mutable))?;
} else if self.eat_keyword(keywords::Ref) {
// Parse box pat
let subpat = self.parse_pat()?;
pat = PatKind::Box(subpat);
+ } else if self.token.is_ident() && self.token.is_path_start() &&
+ self.look_ahead(1, |t| match *t {
+ token::OpenDelim(token::Paren) | token::OpenDelim(token::Brace) |
+ token::DotDotDot | token::ModSep | token::Not => false,
+ _ => true,
+ }) {
+ // Parse ident @ pat
+ // This can give false positives and parse nullary enums,
+ // they are dealt with later in resolve
+ let binding_mode = BindingMode::ByValue(Mutability::Immutable);
+ pat = self.parse_pat_ident(binding_mode)?;
} else if self.token.is_path_start() {
// Parse pattern starting with a path
- if self.token.is_ident() && self.look_ahead(1, |t| *t != token::DotDotDot &&
- *t != token::OpenDelim(token::Brace) &&
- *t != token::OpenDelim(token::Paren) &&
- *t != token::ModSep) {
- // Plain idents have some extra abilities here compared to general paths
- if self.look_ahead(1, |t| *t == token::Not) {
+ let (qself, path) = if self.eat_lt() {
+ // Parse a qualified path
+ let (qself, path) = self.parse_qualified_path(PathStyle::Expr)?;
+ (Some(qself), path)
+ } else {
+ // Parse an unqualified path
+ (None, self.parse_path(PathStyle::Expr)?)
+ };
+ match self.token {
+ token::Not if qself.is_none() => {
// Parse macro invocation
- let path = self.parse_ident_into_path()?;
self.bump();
let delim = self.expect_open_delim()?;
- let tts = self.parse_seq_to_end(
- &token::CloseDelim(delim),
- SeqSep::none(), |p| p.parse_token_tree())?;
- let mac = Mac_ { path: path, tts: tts };
- pat = PatKind::Mac(codemap::Spanned {node: mac,
- span: mk_sp(lo, self.last_span.hi)});
- } else {
- // Parse ident @ pat
- // This can give false positives and parse nullary enums,
- // they are dealt with later in resolve
- let binding_mode = BindingMode::ByValue(Mutability::Immutable);
- pat = self.parse_pat_ident(binding_mode)?;
+ let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
+ SeqSep::none(),
+ |p| p.parse_token_tree())?;
+ let mac = spanned(lo, self.last_span.hi, Mac_ { path: path, tts: tts });
+ pat = PatKind::Mac(mac);
}
- } else {
- let (qself, path) = if self.eat_lt() {
- // Parse a qualified path
- let (qself, path) =
- self.parse_qualified_path(PathStyle::Expr)?;
- (Some(qself), path)
- } else {
- // Parse an unqualified path
- (None, self.parse_path(PathStyle::Expr)?)
- };
- match self.token {
- token::DotDotDot => {
+ token::DotDotDot => {
// Parse range
let hi = self.last_span.hi;
let begin =
self.bump();
let end = self.parse_pat_range_end()?;
pat = PatKind::Range(begin, end);
- }
- token::OpenDelim(token::Brace) => {
- if qself.is_some() {
+ }
+ token::OpenDelim(token::Brace) => {
+ if qself.is_some() {
return Err(self.fatal("unexpected `{` after qualified path"));
}
// Parse struct pattern
});
self.bump();
pat = PatKind::Struct(path, fields, etc);
- }
- token::OpenDelim(token::Paren) => {
+ }
+ token::OpenDelim(token::Paren) => {
if qself.is_some() {
return Err(self.fatal("unexpected `(` after qualified path"));
}
let (fields, ddpos) = self.parse_pat_tuple_elements(false)?;
self.expect(&token::CloseDelim(token::Paren))?;
pat = PatKind::TupleStruct(path, fields, ddpos)
- }
- _ => {
- pat = PatKind::Path(qself, path);
- }
}
+ _ => pat = PatKind::Path(qself, path),
}
} else {
// Try to parse everything else as literal with optional minus
}
}
}
- }
}
let hi = self.last_span.hi;
node: StmtKind::Local(self.parse_local(attrs.into())?),
span: mk_sp(lo, self.last_span.hi),
}
- } else if self.token.is_ident()
- && !self.token.is_any_keyword()
- && self.look_ahead(1, |t| *t == token::Not) {
- // it's a macro invocation:
+ } else if self.token.is_path_start() && self.token != token::Lt && {
+ !self.check_keyword(keywords::Union) ||
+ self.look_ahead(1, |t| *t == token::Not || *t == token::ModSep)
+ } {
+ let pth = self.parse_path(PathStyle::Expr)?;
- // Potential trouble: if we allow macros with paths instead of
- // idents, we'd need to look ahead past the whole path here...
- let pth = self.parse_ident_into_path()?;
- self.bump();
+ if !self.eat(&token::Not) {
+ let expr = if self.check(&token::OpenDelim(token::Brace)) {
+ self.parse_struct_expr(lo, pth, ThinVec::new())?
+ } else {
+ let hi = self.last_span.hi;
+ self.mk_expr(lo, hi, ExprKind::Path(None, pth), ThinVec::new())
+ };
+
+ let expr = self.with_res(Restrictions::RESTRICTION_STMT_EXPR, |this| {
+ let expr = this.parse_dot_or_call_expr_with(expr, lo, attrs.into())?;
+ this.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(expr))
+ })?;
+
+ return Ok(Some(Stmt {
+ id: ast::DUMMY_NODE_ID,
+ node: StmtKind::Expr(expr),
+ span: mk_sp(lo, self.last_span.hi),
+ }));
+ }
+ // it's a macro invocation
let id = match self.token {
token::OpenDelim(_) => keywords::Invalid.ident(), // no special identifier
_ => self.parse_ident()?,
}
/// Matches typaram = IDENT (`?` unbound)? optbounds ( EQ ty )?
- fn parse_ty_param(&mut self) -> PResult<'a, TyParam> {
+ fn parse_ty_param(&mut self, preceding_attrs: Vec<ast::Attribute>) -> PResult<'a, TyParam> {
let span = self.span;
let ident = self.parse_ident()?;
};
Ok(TyParam {
+ attrs: preceding_attrs.into(),
ident: ident,
id: ast::DUMMY_NODE_ID,
bounds: bounds,
let span_lo = self.span.lo;
if self.eat(&token::Lt) {
- let lifetime_defs = self.parse_lifetime_defs()?;
+ // Upon encountering attribute in generics list, we do not
+ // know if it is attached to lifetime or to type param.
+ //
+ // Solution: 1. eagerly parse attributes in tandem with
+ // lifetime defs, 2. store last set of parsed (and unused)
+ // attributes in `attrs`, and 3. pass in those attributes
+ // when parsing formal type param after lifetime defs.
+ let mut attrs = vec![];
+ let lifetime_defs = self.parse_lifetime_defs(Some(&mut attrs))?;
let mut seen_default = false;
+ let mut post_lifetime_attrs = Some(attrs);
let ty_params = self.parse_seq_to_gt(Some(token::Comma), |p| {
p.forbid_lifetime()?;
- let ty_param = p.parse_ty_param()?;
+ // Move out of `post_lifetime_attrs` if present. O/w
+ // not first type param: parse attributes anew.
+ let attrs = match post_lifetime_attrs.as_mut() {
+ None => p.parse_outer_attributes()?,
+ Some(attrs) => mem::replace(attrs, vec![]),
+ };
+ post_lifetime_attrs = None;
+ let ty_param = p.parse_ty_param(attrs)?;
if ty_param.default.is_some() {
seen_default = true;
} else if seen_default {
}
Ok(ty_param)
})?;
+ if let Some(attrs) = post_lifetime_attrs {
+ if !attrs.is_empty() {
+ self.span_err(attrs[0].span,
+ "trailing attribute after lifetime parameters");
+ }
+ }
Ok(ast::Generics {
lifetimes: lifetime_defs,
ty_params: ty_params,
let bound_lifetimes = if self.eat_keyword(keywords::For) {
// Higher ranked constraint.
self.expect(&token::Lt)?;
- let lifetime_defs = self.parse_lifetime_defs()?;
+ let lifetime_defs = self.parse_lifetime_defs(None)?;
self.expect_gt()?;
lifetime_defs
} else {
fn parse_impl_method(&mut self, vis: &Visibility)
-> PResult<'a, (Ident, Vec<ast::Attribute>, ast::ImplItemKind)> {
// code copied from parse_macro_use_or_failure... abstraction!
- if !self.token.is_any_keyword()
- && self.look_ahead(1, |t| *t == token::Not)
- && (self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren))
- || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) {
+ if self.token.is_path_start() {
// method macro.
let last_span = self.last_span;
self.complain_if_pub_macro(&vis, last_span);
let lo = self.span.lo;
- let pth = self.parse_ident_into_path()?;
+ let pth = self.parse_path(PathStyle::Mod)?;
self.expect(&token::Not)?;
// eat a matched-delimiter token tree:
let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
SeqSep::none(),
|p| p.parse_token_tree())?;
- let m_ = Mac_ { path: pth, tts: tts };
- let m: ast::Mac = codemap::Spanned { node: m_,
- span: mk_sp(lo,
- self.last_span.hi) };
if delim != token::Brace {
self.expect(&token::Semi)?
}
- Ok((keywords::Invalid.ident(), vec![], ast::ImplItemKind::Macro(m)))
+
+ let mac = spanned(lo, self.last_span.hi, Mac_ { path: pth, tts: tts });
+ Ok((keywords::Invalid.ident(), vec![], ast::ImplItemKind::Macro(mac)))
} else {
let (constness, unsafety, abi) = self.parse_fn_front_matter()?;
let ident = self.parse_ident()?;
fn parse_late_bound_lifetime_defs(&mut self) -> PResult<'a, Vec<ast::LifetimeDef>> {
if self.eat_keyword(keywords::For) {
self.expect(&token::Lt)?;
- let lifetime_defs = self.parse_lifetime_defs()?;
+ let lifetime_defs = self.parse_lifetime_defs(None)?;
self.expect_gt()?;
Ok(lifetime_defs)
} else {
}
} else {
let directory = self.directory.clone();
- self.push_directory(id, &outer_attrs);
+ let restrictions = self.push_directory(id, &outer_attrs);
self.expect(&token::OpenDelim(token::Brace))?;
let mod_inner_lo = self.span.lo;
let attrs = self.parse_inner_attributes()?;
- let m = self.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)?;
+ let m = self.with_res(restrictions, |this| {
+ this.parse_mod_items(&token::CloseDelim(token::Brace), mod_inner_lo)
+ })?;
self.directory = directory;
Ok((id, ItemKind::Mod(m), Some(attrs)))
}
}
- fn push_directory(&mut self, id: Ident, attrs: &[Attribute]) {
- let default_path = self.id_to_interned_str(id);
- let file_path = match ::attr::first_attr_value_str_by_name(attrs, "path") {
- Some(d) => d,
- None => default_path,
- };
- self.directory.push(&*file_path)
+ fn push_directory(&mut self, id: Ident, attrs: &[Attribute]) -> Restrictions {
+ if let Some(path) = ::attr::first_attr_value_str_by_name(attrs, "path") {
+ self.directory.push(&*path);
+ self.restrictions - Restrictions::NO_NONINLINE_MOD
+ } else {
+ let default_path = self.id_to_interned_str(id);
+ self.directory.push(&*default_path);
+ self.restrictions
+ }
}
pub fn submod_path_from_attr(attrs: &[ast::Attribute], dir_path: &Path) -> Option<PathBuf> {
lo: BytePos,
visibility: Visibility
) -> PResult<'a, Option<P<Item>>> {
- if macros_allowed && !self.token.is_any_keyword()
- && self.look_ahead(1, |t| *t == token::Not)
- && (self.look_ahead(2, |t| t.is_ident())
- || self.look_ahead(2, |t| *t == token::OpenDelim(token::Paren))
- || self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))) {
+ if macros_allowed && self.token.is_path_start() {
// MACRO INVOCATION ITEM
let last_span = self.last_span;
let mac_lo = self.span.lo;
// item macro.
- let pth = self.parse_ident_into_path()?;
+ let pth = self.parse_path(PathStyle::Mod)?;
self.expect(&token::Not)?;
// a 'special' identifier (like what `macro_rules!` uses)
let tts = self.parse_seq_to_end(&token::CloseDelim(delim),
SeqSep::none(),
|p| p.parse_token_tree())?;
- // single-variant-enum... :
- let m = Mac_ { path: pth, tts: tts };
- let m: ast::Mac = codemap::Spanned { node: m,
- span: mk_sp(mac_lo,
- self.last_span.hi) };
-
if delim != token::Brace {
if !self.eat(&token::Semi) {
let last_span = self.last_span;
}
}
- let item_ = ItemKind::Mac(m);
- let last_span = self.last_span;
- let item = self.mk_item(lo,
- last_span.hi,
- id,
- item_,
- visibility,
- attrs);
+ let hi = self.last_span.hi;
+ let mac = spanned(mac_lo, hi, Mac_ { path: pth, tts: tts });
+ let item = self.mk_item(lo, hi, id, ItemKind::Mac(mac), visibility, attrs);
return Ok(Some(item));
}
_ => Err(self.fatal("expected string literal"))
}
}
-
- pub fn ensure_complete_parse<F>(&mut self, allow_semi: bool, on_err: F)
- where F: FnOnce(&Parser)
- {
- if allow_semi && self.token == token::Semi {
- self.bump();
- }
- if self.token != token::Eof {
- on_err(self);
- }
- }
}
try!(self.maybe_print_comment(ty.span.lo));
try!(self.ibox(0));
match ty.node {
- ast::TyKind::Vec(ref ty) => {
+ ast::TyKind::Slice(ref ty) => {
try!(word(&mut self.s, "["));
try!(self.print_type(&ty));
try!(word(&mut self.s, "]"));
ast::TyKind::ImplTrait(ref bounds) => {
try!(self.print_bounds("impl ", &bounds[..]));
}
- ast::TyKind::FixedLengthVec(ref ty, ref v) => {
+ ast::TyKind::Array(ref ty, ref v) => {
try!(word(&mut self.s, "["));
try!(self.print_type(&ty));
try!(word(&mut self.s, "; "));
try!(word(&mut self.s, "..."));
try!(self.print_expr(&end));
}
- PatKind::Vec(ref before, ref slice, ref after) => {
+ PatKind::Slice(ref before, ref slice, ref after) => {
try!(word(&mut self.s, "["));
try!(self.commasep(Inconsistent,
&before[..],
use std::slice;
use std::mem;
use std::vec;
-use attr;
+use attr::{self, HasAttrs};
use syntax_pos::{self, DUMMY_SP, NO_EXPANSION, Span, FileMap, BytePos};
use std::rc::Rc;
}
debug!("current path: {}", path_name_i(&self.cx.path));
- let i = if is_test_fn(&self.cx, &i) || is_bench_fn(&self.cx, &i) {
+ if is_test_fn(&self.cx, &i) || is_bench_fn(&self.cx, &i) {
match i.node {
ast::ItemKind::Fn(_, ast::Unsafety::Unsafe, _, _, _, _) => {
let diag = self.cx.span_diagnostic;
};
self.cx.testfns.push(test);
self.tests.push(i.ident);
- // debug!("have {} test/bench functions",
- // cx.testfns.len());
-
- // Make all tests public so we can call them from outside
- // the module (note that the tests are re-exported and must
- // be made public themselves to avoid privacy errors).
- i.map(|mut i| {
- i.vis = ast::Visibility::Public;
- i
- })
}
}
- } else {
- i
- };
+ }
+ let mut item = i.unwrap();
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
- let res = match i.node {
- ast::ItemKind::Mod(..) => fold::noop_fold_item(i, self),
- _ => SmallVector::one(i),
- };
+ if let ast::ItemKind::Mod(module) = item.node {
+ let tests = mem::replace(&mut self.tests, Vec::new());
+ let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
+ let mut mod_folded = fold::noop_fold_mod(module, self);
+ let tests = mem::replace(&mut self.tests, tests);
+ let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
+
+ if !tests.is_empty() || !tested_submods.is_empty() {
+ let (it, sym) = mk_reexport_mod(&mut self.cx, item.id, tests, tested_submods);
+ mod_folded.items.push(it);
+
+ if !self.cx.path.is_empty() {
+ self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
+ } else {
+ debug!("pushing nothing, sym: {:?}", sym);
+ self.cx.toplevel_reexport = Some(sym);
+ }
+ }
+ item.node = ast::ItemKind::Mod(mod_folded);
+ }
if ident.name != keywords::Invalid.name() {
self.cx.path.pop();
}
- res
- }
-
- fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
- let tests = mem::replace(&mut self.tests, Vec::new());
- let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
- let mut mod_folded = fold::noop_fold_mod(m, self);
- let tests = mem::replace(&mut self.tests, tests);
- let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
-
- if !tests.is_empty() || !tested_submods.is_empty() {
- let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
- mod_folded.items.push(it);
-
- if !self.cx.path.is_empty() {
- self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
- } else {
- debug!("pushing nothing, sym: {:?}", sym);
- self.cx.toplevel_reexport = Some(sym);
- }
- }
-
- mod_folded
+ SmallVector::one(P(item))
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { mac }
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { mac }
}
-fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
+fn mk_reexport_mod(cx: &mut TestCtxt, parent: ast::NodeId, tests: Vec<ast::Ident>,
tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
let super_ = token::str_to_ident("super");
+ // Generate imports with `#[allow(private_in_public)]` to work around issue #36768.
+ let allow_private_in_public = cx.ext_cx.attribute(DUMMY_SP, cx.ext_cx.meta_list(
+ DUMMY_SP,
+ InternedString::new("allow"),
+ vec![cx.ext_cx.meta_list_item_word(DUMMY_SP, InternedString::new("private_in_public"))],
+ ));
let items = tests.into_iter().map(|r| {
cx.ext_cx.item_use_simple(DUMMY_SP, ast::Visibility::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
+ .map_attrs(|_| vec![allow_private_in_public.clone()])
}).chain(tested_submods.into_iter().map(|(r, sym)| {
let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.item_use_simple_(DUMMY_SP, ast::Visibility::Public, r, path)
+ .map_attrs(|_| vec![allow_private_in_public.clone()])
})).collect();
let reexport_mod = ast::Mod {
};
let sym = token::gensym_ident("__test_reexports");
+ let parent = if parent == ast::DUMMY_NODE_ID { ast::CRATE_NODE_ID } else { parent };
+ cx.ext_cx.current_expansion.mark = cx.ext_cx.resolver.get_module_scope(parent);
let it = cx.ext_cx.monotonic_expander().fold_item(P(ast::Item {
ident: sym.clone(),
attrs: Vec::new(),
let static_lt = ecx.lifetime(sp, keywords::StaticLifetime.name());
// &'static [self::test::TestDescAndFn]
let static_type = ecx.ty_rptr(sp,
- ecx.ty(sp, ast::TyKind::Vec(struct_type)),
+ ecx.ty(sp, ast::TyKind::Slice(struct_type)),
Some(static_lt),
ast::Mutability::Immutable);
// static TESTS: $static_type = &[...];
pub fn walk_lifetime_def<V: Visitor>(visitor: &mut V, lifetime_def: &LifetimeDef) {
visitor.visit_lifetime(&lifetime_def.lifetime);
walk_list!(visitor, visit_lifetime, &lifetime_def.bounds);
+ walk_list!(visitor, visit_attribute, &*lifetime_def.attrs);
}
pub fn walk_poly_trait_ref<V>(visitor: &mut V, trait_ref: &PolyTraitRef, _: &TraitBoundModifier)
pub fn walk_ty<V: Visitor>(visitor: &mut V, typ: &Ty) {
match typ.node {
- TyKind::Vec(ref ty) | TyKind::Paren(ref ty) => {
+ TyKind::Slice(ref ty) | TyKind::Paren(ref ty) => {
visitor.visit_ty(ty)
}
TyKind::Ptr(ref mutable_type) => {
visitor.visit_ty(ty);
walk_list!(visitor, visit_ty_param_bound, bounds);
}
- TyKind::FixedLengthVec(ref ty, ref expression) => {
+ TyKind::Array(ref ty, ref expression) => {
visitor.visit_ty(ty);
visitor.visit_expr(expression)
}
visitor.visit_expr(upper_bound)
}
PatKind::Wild => (),
- PatKind::Vec(ref prepatterns, ref slice_pattern, ref postpatterns) => {
+ PatKind::Slice(ref prepatterns, ref slice_pattern, ref postpatterns) => {
walk_list!(visitor, visit_pat, prepatterns);
walk_list!(visitor, visit_pat, slice_pattern);
walk_list!(visitor, visit_pat, postpatterns);
visitor.visit_ident(param.span, param.ident);
walk_list!(visitor, visit_ty_param_bound, ¶m.bounds);
walk_list!(visitor, visit_ty, ¶m.default);
+ walk_list!(visitor, visit_attribute, &*param.attrs);
}
walk_list!(visitor, visit_lifetime_def, &generics.lifetimes);
for predicate in &generics.where_clause.predicates {
tts: &[tokenstream::TokenTree])
-> Box<base::MacResult + 'cx> {
if !cx.ecfg.enable_asm() {
- feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
+ feature_gate::emit_feature_err(&cx.parse_sess,
"asm",
sp,
feature_gate::GateIssue::Language,
tts: &[TokenTree])
-> Box<base::MacResult + 'cx> {
if !cx.ecfg.enable_concat_idents() {
- feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
+ feature_gate::emit_feature_err(&cx.parse_sess,
"concat_idents",
sp,
feature_gate::GateIssue::Language,
bounds.push((*declared_bound).clone());
}
- cx.typaram(self.span, ty_param.ident, P::from_vec(bounds), None)
+ cx.typaram(self.span, ty_param.ident, vec![], P::from_vec(bounds), None)
}));
// and similarly for where clauses
fn mk_ty_param(cx: &ExtCtxt,
span: Span,
name: &str,
+ attrs: &[ast::Attribute],
bounds: &[Path],
self_ident: Ident,
self_generics: &Generics)
cx.typarambound(path)
})
.collect();
- cx.typaram(span, cx.ident_of(name), bounds, None)
+ cx.typaram(span, cx.ident_of(name), attrs.to_owned(), bounds, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>, span: Span)
let bounds = bounds.iter()
.map(|b| cx.lifetime(span, cx.ident_of(*b).name))
.collect();
- cx.lifetime_def(span, cx.ident_of(*lt).name, bounds)
+ cx.lifetime_def(span, cx.ident_of(*lt).name, vec![], bounds)
})
.collect();
let ty_params = self.bounds
.map(|t| {
match *t {
(ref name, ref bounds) => {
- mk_ty_param(cx, span, *name, bounds, self_ty, self_generics)
+ mk_ty_param(cx, span, *name, &[], bounds, self_ty, self_generics)
}
}
})
cx.span_err(mitem.span, "unexpected value in `derive`");
}
- let traits = mitem.meta_item_list().unwrap_or(&[]);
+ let mut traits = mitem.meta_item_list().unwrap_or(&[]).to_owned();
if traits.is_empty() {
cx.span_warn(mitem.span, "empty trait list in `derive`");
}
+ // First, weed out malformed #[derive]
+ traits.retain(|titem| {
+ if titem.word().is_none() {
+ cx.span_err(titem.span, "malformed `derive` entry");
+ false
+ } else {
+ true
+ }
+ });
+
+ // Next, check for old-style #[derive(Foo)]
+ //
+ // These all get expanded to `#[derive_Foo]` and will get expanded first. If
+ // we actually add any attributes here then we return to get those expanded
+ // and then eventually we'll come back to finish off the other derive modes.
+ let mut new_attributes = Vec::new();
+ traits.retain(|titem| {
+ let tword = titem.word().unwrap();
+ let tname = tword.name();
+
+ let derive_mode = ast::Ident::with_empty_ctxt(intern(&tname));
+ let derive_mode = cx.resolver.resolve_derive_mode(derive_mode);
+ if is_builtin_trait(&tname) || derive_mode.is_some() {
+ return true
+ }
+
+ if !cx.ecfg.enable_custom_derive() {
+ feature_gate::emit_feature_err(&cx.parse_sess,
+ "custom_derive",
+ titem.span,
+ feature_gate::GateIssue::Language,
+ feature_gate::EXPLAIN_CUSTOM_DERIVE);
+ } else {
+ let name = intern_and_get_ident(&format!("derive_{}", tname));
+ let mitem = cx.meta_word(titem.span, name);
+ new_attributes.push(cx.attribute(mitem.span, mitem));
+ }
+ false
+ });
+ if new_attributes.len() > 0 {
+ item = item.map(|mut i| {
+ let list = cx.meta_list(mitem.span,
+ intern_and_get_ident("derive"),
+ traits);
+ i.attrs.extend(new_attributes);
+ i.attrs.push(cx.attribute(mitem.span, list));
+ i
+ });
+ return vec![Annotatable::Item(item)]
+ }
+
+ // Now check for macros-1.1 style custom #[derive].
+ //
+ // Expand each of them in order given, but *before* we expand any built-in
+ // derive modes. The logic here is to:
+ //
+ // 1. Collect the remaining `#[derive]` annotations into a list. If
+ // there are any left, attach a `#[derive]` attribute to the item
+ // that we're currently expanding with the remaining derive modes.
+ // 2. Manufacture a `#[derive(Foo)]` attribute to pass to the expander.
+ // 3. Expand the current item we're expanding, getting back a list of
+ // items that replace it.
+ // 4. Extend the returned list with the current list of items we've
+ // collected so far.
+ // 5. Return everything!
+ //
+ // If custom derive extensions end up threading through the `#[derive]`
+ // attribute, we'll get called again later on to continue expanding
+ // those modes.
+ let macros_11_derive = traits.iter()
+ .cloned()
+ .enumerate()
+ .filter(|&(_, ref name)| !is_builtin_trait(&name.name().unwrap()))
+ .next();
+ if let Some((i, titem)) = macros_11_derive {
+ let tname = ast::Ident::with_empty_ctxt(intern(&titem.name().unwrap()));
+ let ext = cx.resolver.resolve_derive_mode(tname).unwrap();
+ traits.remove(i);
+ if traits.len() > 0 {
+ item = item.map(|mut i| {
+ let list = cx.meta_list(mitem.span,
+ intern_and_get_ident("derive"),
+ traits);
+ i.attrs.push(cx.attribute(mitem.span, list));
+ i
+ });
+ }
+ let titem = cx.meta_list_item_word(titem.span, titem.name().unwrap());
+ let mitem = cx.meta_list(titem.span,
+ intern_and_get_ident("derive"),
+ vec![titem]);
+ let item = Annotatable::Item(item);
+ return ext.expand(cx, mitem.span, &mitem, item)
+ }
+
+ // Ok, at this point we know that there are no old-style `#[derive_Foo]` nor
+ // any macros-1.1 style `#[derive(Foo)]`. Expand all built-in traits here.
+
// RFC #1445. `#[derive(PartialEq, Eq)]` adds a (trusted)
// `#[structural_match]` attribute.
if traits.iter().filter_map(|t| t.name()).any(|t| t == "PartialEq") &&
});
}
- let mut other_items = Vec::new();
-
- let mut iter = traits.iter();
- while let Some(titem) = iter.next() {
-
- let tword = match titem.word() {
- Some(name) => name,
- None => {
- cx.span_err(titem.span, "malformed `derive` entry");
- continue
- }
+ let mut items = Vec::new();
+ for titem in traits.iter() {
+ let tname = titem.word().unwrap().name();
+ let name = intern_and_get_ident(&format!("derive({})", tname));
+ let mitem = cx.meta_word(titem.span, name);
+
+ let span = Span {
+ expn_id: cx.codemap().record_expansion(codemap::ExpnInfo {
+ call_site: titem.span,
+ callee: codemap::NameAndSpan {
+ format: codemap::MacroAttribute(intern(&format!("derive({})", tname))),
+ span: Some(titem.span),
+ allow_internal_unstable: true,
+ },
+ }),
+ ..titem.span
};
- let tname = tword.name();
- // If this is a built-in derive mode, then we expand it immediately
- // here.
- if is_builtin_trait(&tname) {
- let name = intern_and_get_ident(&format!("derive({})", tname));
- let mitem = cx.meta_word(titem.span, name);
-
- let span = Span {
- expn_id: cx.codemap().record_expansion(codemap::ExpnInfo {
- call_site: titem.span,
- callee: codemap::NameAndSpan {
- format: codemap::MacroAttribute(intern(&format!("derive({})", tname))),
- span: Some(titem.span),
- allow_internal_unstable: true,
- },
- }),
- ..titem.span
- };
-
- let my_item = Annotatable::Item(item);
- expand_builtin(&tname, cx, span, &mitem, &my_item, &mut |a| {
- other_items.push(a);
- });
- item = my_item.expect_item();
-
- // Otherwise if this is a `rustc_macro`-style derive mode, we process it
- // here. The logic here is to:
- //
- // 1. Collect the remaining `#[derive]` annotations into a list. If
- // there are any left, attach a `#[derive]` attribute to the item
- // that we're currently expanding with the remaining derive modes.
- // 2. Manufacture a `#[derive(Foo)]` attribute to pass to the expander.
- // 3. Expand the current item we're expanding, getting back a list of
- // items that replace it.
- // 4. Extend the returned list with the current list of items we've
- // collected so far.
- // 5. Return everything!
- //
- // If custom derive extensions end up threading through the `#[derive]`
- // attribute, we'll get called again later on to continue expanding
- // those modes.
- } else if let Some(ext) =
- cx.resolver.resolve_derive_mode(ast::Ident::with_empty_ctxt(intern(&tname))) {
- let remaining_derives = iter.cloned().collect::<Vec<_>>();
- if remaining_derives.len() > 0 {
- let list = cx.meta_list(titem.span,
- intern_and_get_ident("derive"),
- remaining_derives);
- let attr = cx.attribute(titem.span, list);
- item = item.map(|mut i| {
- i.attrs.push(attr);
- i
- });
- }
- let titem = cx.meta_list_item_word(titem.span, tname.clone());
- let mitem = cx.meta_list(titem.span,
- intern_and_get_ident("derive"),
- vec![titem]);
- let item = Annotatable::Item(item);
- let mut items = ext.expand(cx, mitem.span, &mitem, item);
- items.extend(other_items);
- return items
-
- // If we've gotten this far then it means that we're in the territory of
- // the old custom derive mechanism. If the feature isn't enabled, we
- // issue an error, otherwise manufacture the `derive_Foo` attribute.
- } else if !cx.ecfg.enable_custom_derive() {
- feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
- "custom_derive",
- titem.span,
- feature_gate::GateIssue::Language,
- feature_gate::EXPLAIN_CUSTOM_DERIVE);
- } else {
- let name = intern_and_get_ident(&format!("derive_{}", tname));
- let mitem = cx.meta_word(titem.span, name);
- item = item.map(|mut i| {
- i.attrs.push(cx.attribute(mitem.span, mitem));
- i
- });
- }
+ let my_item = Annotatable::Item(item);
+ expand_builtin(&tname, cx, span, &mitem, &my_item, &mut |a| {
+ items.push(a);
+ });
+ item = my_item.expect_item();
}
- other_items.insert(0, Annotatable::Item(item));
- return other_items
+ items.insert(0, Annotatable::Item(item));
+ return items
}
macro_rules! derive_traits {
-> P<ast::Expr> {
let sp = piece_ty.span;
let ty = ecx.ty_rptr(sp,
- ecx.ty(sp, ast::TyKind::Vec(piece_ty)),
+ ecx.ty(sp, ast::TyKind::Slice(piece_ty)),
Some(ecx.lifetime(sp, keywords::StaticLifetime.name())),
ast::Mutability::Immutable);
let slice = ecx.expr_vec_slice(sp, pieces);
use std::rc::Rc;
use syntax::ast;
use syntax::ext::base::{MacroExpanderFn, NormalTT, IdentTT, MultiModifier};
-use syntax::ext::hygiene::Mark;
use syntax::ext::tt::macro_rules::MacroRulesExpander;
use syntax::parse::token::intern;
pub fn register_builtins(resolver: &mut syntax::ext::base::Resolver, enable_quotes: bool) {
let mut register = |name, ext| {
- resolver.add_ext(Mark::root(), ast::Ident::with_empty_ctxt(intern(name)), Rc::new(ext));
+ resolver.add_ext(ast::Ident::with_empty_ctxt(intern(name)), Rc::new(ext));
};
register("macro_rules", IdentTT(Box::new(MacroRulesExpander), None, false));
tts: &[tokenstream::TokenTree])
-> Box<base::MacResult + 'cx> {
if !cx.ecfg.enable_log_syntax() {
- feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
+ feature_gate::emit_feature_err(&cx.parse_sess,
"log_syntax",
sp,
feature_gate::GateIssue::Language,
tt: &[TokenTree])
-> Box<base::MacResult + 'static> {
if !cx.ecfg.enable_trace_macros() {
- feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
+ feature_gate::emit_feature_err(&cx.parse_sess,
"trace_macros",
sp,
feature_gate::GateIssue::Language,
// According to /etc/terminfo/README, after looking at
// ~/.terminfo, ncurses will search /etc/terminfo, then
// /lib/terminfo, and eventually /usr/share/terminfo.
+ // On Haiku the database can be found at /boot/system/data/terminfo
Err(..) => {
dirs_to_search.push(PathBuf::from("/etc/terminfo"));
dirs_to_search.push(PathBuf::from("/lib/terminfo"));
dirs_to_search.push(PathBuf::from("/usr/share/terminfo"));
+ dirs_to_search.push(PathBuf::from("/boot/system/data/terminfo"));
}
}
}
pub color: ColorConfig,
pub quiet: bool,
pub test_threads: Option<usize>,
+ pub skip: Vec<String>,
}
impl TestOpts {
color: AutoColor,
quiet: false,
test_threads: None,
+ skip: vec![],
}
}
}
task, allow printing directly"),
getopts::optopt("", "test-threads", "Number of threads used for running tests \
in parallel", "n_threads"),
+ getopts::optmulti("", "skip", "Skip tests whose names contain FILTER (this flag can \
+ be used multiple times)","FILTER"),
getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
getopts::optopt("", "color", "Configure coloring of output:
auto = colorize if stdout is a tty and tests are run on serially (default);
color: color,
quiet: quiet,
test_threads: test_threads,
+ skip: matches.opt_strs("skip"),
};
Some(Ok(test_opts))
}
cpus as usize
}
+
+ #[cfg(target_os = "haiku")]
+ fn num_cpus() -> usize {
+ // FIXME: implement
+ 1
+ }
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
}
};
+ // Skip tests that match any of the skip filters
+ filtered = filtered.into_iter()
+ .filter(|t| !opts.skip.iter().any(|sf| t.desc.name.as_slice().contains(&sf[..])))
+ .collect();
+
// Maybe pull out the ignored test and unignore them
filtered = if !opts.run_ignored {
filtered
}
}
- thread::spawn(move || {
- let data = Arc::new(Mutex::new(Vec::new()));
- let data2 = data.clone();
- let cfg = thread::Builder::new().name(match desc.name {
- DynTestName(ref name) => name.clone(),
- StaticTestName(name) => name.to_owned(),
+ // If the platform is single-threaded we're just going to run
+ // the test synchronously, regardless of the concurrency
+ // level.
+ let supports_threads = !cfg!(target_os = "emscripten");
+
+ // Buffer for capturing standard I/O
+ let data = Arc::new(Mutex::new(Vec::new()));
+ let data2 = data.clone();
+
+ if supports_threads {
+ thread::spawn(move || {
+ let cfg = thread::Builder::new().name(match desc.name {
+ DynTestName(ref name) => name.clone(),
+ StaticTestName(name) => name.to_owned(),
+ });
+
+ let result_guard = cfg.spawn(move || {
+ if !nocapture {
+ io::set_print(Some(box Sink(data2.clone())));
+ io::set_panic(Some(box Sink(data2)));
+ }
+ testfn()
+ })
+ .unwrap();
+ let test_result = calc_result(&desc, result_guard.join());
+ let stdout = data.lock().unwrap().to_vec();
+ monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
});
+ } else {
+ let oldio = if !nocapture {
+ Some((
+ io::set_print(Some(box Sink(data2.clone()))),
+ io::set_panic(Some(box Sink(data2)))
+ ))
+ } else {
+ None
+ };
+
+ use std::panic::{catch_unwind, AssertUnwindSafe};
+
+ let result = catch_unwind(AssertUnwindSafe(|| {
+ testfn()
+ }));
- let result_guard = cfg.spawn(move || {
- if !nocapture {
- io::set_print(box Sink(data2.clone()));
- io::set_panic(box Sink(data2));
- }
- testfn()
- })
- .unwrap();
- let test_result = calc_result(&desc, result_guard.join());
+ if let Some((printio, panicio)) = oldio {
+ io::set_print(printio);
+ io::set_panic(panicio);
+ };
+
+ let test_result = calc_result(&desc, result);
let stdout = data.lock().unwrap().to_vec();
monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
- });
+ }
}
match testfn {
///
/// This function is a no-op, and does not even read from `dummy`.
#[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
- target_arch = "asmjs")))]
+ target_arch = "asmjs", target_arch = "wasm32")))]
pub fn black_box<T>(dummy: T) -> T {
// we need to "use" the argument in some way LLVM can't
// introspect.
dummy
}
#[cfg(any(all(target_os = "nacl", target_arch = "le32"),
- target_arch = "asmjs"))]
+ target_arch = "asmjs", target_arch = "wasm32"))]
#[inline(never)]
pub fn black_box<T>(dummy: T) -> T {
dummy
fn main() {
println!("cargo:rustc-cfg=cargobuild");
- let target = env::var("TARGET").unwrap();
+ let target = env::var("TARGET").expect("TARGET was not set");
if target.contains("linux") {
if target.contains("musl") && !target.contains("mips") {
#[cfg(target_arch = "s390x")]
pub const unwinder_private_data_size: usize = 2;
-#[cfg(target_arch = "asmjs")]
+#[cfg(target_os = "emscripten")]
pub const unwinder_private_data_size: usize = 20;
#[repr(C)]
#[cfg_attr(any(all(target_os = "linux", not(target_env = "musl")),
target_os = "freebsd",
target_os = "solaris",
+ target_os = "haiku",
all(target_os = "linux",
target_env = "musl",
not(target_arch = "x86"),
-Subproject commit 7801978ec1f3637fcda1b564048ebc732bf586af
+Subproject commit 3e03f7374169cd41547d75e62ac2ab8a103a913c
-Subproject commit 755bc3db4ff795865ea31b5b4f38ac920d8acacb
+Subproject commit 4f994850808a572e2cc8d43f968893c8e942e9bf
Archive::Child child;
RustArchiveMember(): filename(NULL), name(NULL),
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
child(NULL, NULL, NULL)
#else
child(NULL, NULL)
struct RustArchiveIterator {
Archive::child_iterator cur;
Archive::child_iterator end;
-#if LLVM_VERSION_MINOR >= 9
+#if LLVM_VERSION_GE(3, 9)
Error err;
#endif
};
return nullptr;
}
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
ErrorOr<std::unique_ptr<Archive>> archive_or =
#else
Expected<std::unique_ptr<Archive>> archive_or =
Archive::create(buf_or.get()->getMemBufferRef());
if (!archive_or) {
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
LLVMRustSetLastError(archive_or.getError().message().c_str());
#else
LLVMRustSetLastError(toString(archive_or.takeError()).c_str());
LLVMRustArchiveIteratorNew(LLVMRustArchiveRef ra) {
Archive *ar = ra->getBinary();
RustArchiveIterator *rai = new RustArchiveIterator();
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
rai->cur = ar->child_begin();
#else
rai->cur = ar->child_begin(rai->err);
extern "C" LLVMRustArchiveChildConstRef
LLVMRustArchiveIteratorNext(LLVMRustArchiveIteratorRef rai) {
-#if LLVM_VERSION_MINOR >= 9
+#if LLVM_VERSION_GE(3, 9)
if (rai->err) {
LLVMRustSetLastError(toString(std::move(rai->err)).c_str());
return NULL;
#endif
if (rai->cur == rai->end)
return NULL;
-#if LLVM_VERSION_MINOR == 8
+#if LLVM_VERSION_EQ(3, 8)
const ErrorOr<Archive::Child>* cur = rai->cur.operator->();
if (!*cur) {
LLVMRustSetLastError(cur->getError().message().c_str());
bool WriteSymbtab,
LLVMRustArchiveKind rust_kind) {
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
std::vector<NewArchiveIterator> Members;
#else
std::vector<NewArchiveMember> Members;
auto Member = NewMembers[i];
assert(Member->name);
if (Member->filename) {
-#if LLVM_VERSION_MINOR >= 9
+#if LLVM_VERSION_GE(3, 9)
Expected<NewArchiveMember> MOrErr = NewArchiveMember::getFile(Member->filename, true);
if (!MOrErr) {
LLVMRustSetLastError(toString(MOrErr.takeError()).c_str());
return LLVMRustResult::Failure;
}
Members.push_back(std::move(*MOrErr));
-#elif LLVM_VERSION_MINOR == 8
+#elif LLVM_VERSION_EQ(3, 8)
Members.push_back(NewArchiveIterator(Member->filename));
#else
Members.push_back(NewArchiveIterator(Member->filename, Member->name));
#endif
} else {
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
Members.push_back(NewArchiveIterator(Member->child, Member->name));
#else
Expected<NewArchiveMember> MOrErr = NewArchiveMember::getOldMember(Member->child, true);
#endif
}
}
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
auto pair = writeArchive(Dst, Members, WriteSymbtab, Kind, true, false);
#else
auto pair = writeArchive(Dst, Members, WriteSymbtab, Kind, true);
initializeVectorization(Registry);
initializeIPO(Registry);
initializeAnalysis(Registry);
-#if LLVM_VERSION_MINOR == 7
+#if LLVM_VERSION_EQ(3, 7)
initializeIPA(Registry);
#endif
initializeTransformUtils(Registry);
bool FunctionSections,
bool DataSections) {
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
Reloc::Model RM;
#else
Optional<Reloc::Model> RM;
RM = Reloc::DynamicNoPIC;
break;
default:
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
RM = Reloc::Default;
#endif
break;
}
TargetOptions Options;
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
Options.PositionIndependentExecutable = PositionIndependentExecutable;
#endif
LLVMRustRunRestrictionPass(LLVMModuleRef M, char **symbols, size_t len) {
llvm::legacy::PassManager passes;
-#if LLVM_VERSION_MINOR <= 8
+#if LLVM_VERSION_LE(3, 8)
ArrayRef<const char*> ref(symbols, len);
passes.add(llvm::createInternalizePass(ref));
#else
extern "C" void
LLVMRustSetModulePIELevel(LLVMModuleRef M) {
-#if LLVM_VERSION_MINOR >= 9
+#if LLVM_VERSION_GE(3, 9)
unwrap(M)->setPIELevel(PIELevel::Level::Large);
#endif
}
LLVMRustMetadataRef File,
LLVMRustMetadataRef ParameterTypes) {
return wrap(Builder->createSubroutineType(
-#if LLVM_VERSION_MINOR == 7
+#if LLVM_VERSION_EQ(3, 7)
unwrapDI<DIFile>(File),
#endif
DITypeRefArray(unwrap<MDTuple>(ParameterTypes))));
LLVMValueRef Fn,
LLVMRustMetadataRef TParam,
LLVMRustMetadataRef Decl) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
DITemplateParameterArray TParams =
DITemplateParameterArray(unwrap<MDTuple>(TParam));
DISubprogram *Sub = Builder->createFunction(
int64_t* AddrOps,
unsigned AddrOpsCount,
unsigned ArgNo) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
if (Tag == 0x100) { // DW_TAG_auto_variable
return wrap(Builder->createAutoVariable(
unwrapDI<DIDescriptor>(Scope), Name,
raw_string_ostream Stream(Err);
DiagnosticPrinterRawOStream DP(Stream);
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
if (Linker::linkModules(*Dst, std::move(Src.get()))) {
#else
if (Linker::LinkModules(Dst, Src->get(), [&](const DiagnosticInfo &DI) { DI.print(DP); })) {
return LLVMRustDiagnosticKind::OptimizationRemarkMissed;
case DK_OptimizationRemarkAnalysis:
return LLVMRustDiagnosticKind::OptimizationRemarkAnalysis;
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
case DK_OptimizationRemarkAnalysisFPCommute:
return LLVMRustDiagnosticKind::OptimizationRemarkAnalysisFPCommute;
case DK_OptimizationRemarkAnalysisAliasing:
return LLVMRustDiagnosticKind::OptimizationRemarkAnalysisAliasing;
#endif
default:
-#if LLVM_VERSION_MINOR >= 9
+#if LLVM_VERSION_GE(3, 9)
return (kind >= DK_FirstRemark && kind <= DK_LastRemark) ?
LLVMRustDiagnosticKind::OptimizationRemarkOther :
LLVMRustDiagnosticKind::Other;
return LLVMVectorTypeKind;
case Type::X86_MMXTyID:
return LLVMX86_MMXTypeKind;
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
case Type::TokenTyID:
return LLVMTokenTypeKind;
#endif
unsigned ArgCnt,
LLVMValueRef *LLArgs,
const char *Name) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
Value **Args = unwrap(LLArgs);
if (ParentPad == NULL) {
Type *Ty = Type::getTokenTy(unwrap(Builder)->getContext());
LLVMRustBuildCleanupRet(LLVMBuilderRef Builder,
LLVMValueRef CleanupPad,
LLVMBasicBlockRef UnwindBB) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
CleanupPadInst *Inst = cast<CleanupPadInst>(unwrap(CleanupPad));
return wrap(unwrap(Builder)->CreateCleanupRet(Inst, unwrap(UnwindBB)));
#else
unsigned ArgCnt,
LLVMValueRef *LLArgs,
const char *Name) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
Value **Args = unwrap(LLArgs);
return wrap(unwrap(Builder)->CreateCatchPad(unwrap(ParentPad),
ArrayRef<Value*>(Args, ArgCnt),
LLVMRustBuildCatchRet(LLVMBuilderRef Builder,
LLVMValueRef Pad,
LLVMBasicBlockRef BB) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
return wrap(unwrap(Builder)->CreateCatchRet(cast<CatchPadInst>(unwrap(Pad)),
unwrap(BB)));
#else
LLVMBasicBlockRef BB,
unsigned NumHandlers,
const char *Name) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
if (ParentPad == NULL) {
Type *Ty = Type::getTokenTy(unwrap(Builder)->getContext());
ParentPad = wrap(Constant::getNullValue(Ty));
extern "C" void
LLVMRustAddHandler(LLVMValueRef CatchSwitchRef,
LLVMBasicBlockRef Handler) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
Value *CatchSwitch = unwrap(CatchSwitchRef);
cast<CatchSwitchInst>(CatchSwitch)->addHandler(unwrap(Handler));
#endif
extern "C" void
LLVMRustSetPersonalityFn(LLVMBuilderRef B,
LLVMValueRef Personality) {
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
unwrap(B)->GetInsertBlock()
->getParent()
->setPersonalityFn(cast<Function>(unwrap(Personality)));
#endif
}
-#if LLVM_VERSION_MINOR >= 8
+#if LLVM_VERSION_GE(3, 8)
extern "C" OperandBundleDef*
LLVMRustBuildOperandBundleDef(const char *Name,
LLVMValueRef *Inputs,
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2016-09-17
+2016-09-25
#include "llvm-c/ExecutionEngine.h"
#include "llvm-c/Object.h"
-#if LLVM_VERSION_MINOR >= 7
+#define LLVM_VERSION_GE(major, minor) \
+ (LLVM_VERSION_MAJOR > (major) || LLVM_VERSION_MAJOR == (major) && LLVM_VERSION_MINOR >= (minor))
+
+#define LLVM_VERSION_EQ(major, minor) \
+ (LLVM_VERSION_MAJOR == (major) && LLVM_VERSION_MINOR == (minor))
+
+#define LLVM_VERSION_LE(major, minor) \
+ (LLVM_VERSION_MAJOR < (major) || LLVM_VERSION_MAJOR == (major) && LLVM_VERSION_MINOR <= (minor))
+
+#if LLVM_VERSION_GE(3, 7)
#include "llvm/IR/LegacyPassManager.h"
#else
#include "llvm/PassManager.h"
# tarball for a stable release you'll likely see `1.x.0-$date` where `1.x.0` was
# released on `$date`
-rustc: beta-2016-08-17
-rustc_key: 195e6261
-cargo: nightly-2016-08-21
+rustc: beta-2016-09-28
+rustc_key: 62b3e239
+cargo: nightly-2016-09-26
}
-//~ TRANS_ITEM fn overloaded_operators::{{impl}}[2]::eq[0]
-//~ TRANS_ITEM fn overloaded_operators::{{impl}}[2]::ne[0]
+//~ TRANS_ITEM fn overloaded_operators::{{impl}}[4]::eq[0]
+//~ TRANS_ITEM fn overloaded_operators::{{impl}}[4]::ne[0]
#[derive(PartialEq)]
pub struct Equatable(u32);
impl Add<u32> for Equatable {
type Output = u32;
- //~ TRANS_ITEM fn overloaded_operators::{{impl}}[3]::add[0]
+ //~ TRANS_ITEM fn overloaded_operators::{{impl}}[2]::add[0]
fn add(self, rhs: u32) -> u32 {
self.0 + rhs
}
impl Deref for Equatable {
type Target = u32;
- //~ TRANS_ITEM fn overloaded_operators::{{impl}}[4]::deref[0]
+ //~ TRANS_ITEM fn overloaded_operators::{{impl}}[3]::deref[0]
fn deref(&self) -> &Self::Target {
&self.0
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -O
+
+#![crate_type = "lib"]
+
+pub enum Foo {
+ A, B
+}
+
+// CHECK-LABEL: @lookup
+#[no_mangle]
+pub fn lookup(buf: &[u8; 2], f: Foo) -> u8 {
+ // CHECK-NOT: panic_bounds_check
+ buf[f as usize]
+}
// CHECK: [[S_b:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"** %b to i8*
// CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_b]])
-// CHECK: [[S_tmp2:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"* %tmp2 to i8*
-// CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_tmp2]])
+// CHECK: [[S__5:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"* %_5 to i8*
+// CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S__5]])
-// CHECK: [[E_tmp2:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"* %tmp2 to i8*
-// CHECK: call void @llvm.lifetime.end(i{{[0-9 ]+}}, i8* [[E_tmp2]])
+// CHECK: [[E__5:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"* %_5 to i8*
+// CHECK: call void @llvm.lifetime.end(i{{[0-9 ]+}}, i8* [[E__5]])
// CHECK: [[E_b:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"** %b to i8*
// CHECK: call void @llvm.lifetime.end(i{{[0-9 ]+}}, i8* [[E_b]])
#![no_std]
-extern crate core;
extern crate rand;
extern crate serialize as rustc_serialize;
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// aux-build:append-impl.rs
-
-#![feature(rustc_macro)]
-#![allow(warnings)]
-
-#[macro_use]
-extern crate append_impl;
-
-trait Append {
- fn foo(&self);
-}
-
-#[derive(PartialEq,
- Append,
- Eq)]
-struct A {
-//~^ ERROR: the semantics of constant patterns is not yet settled
- inner: u32,
-}
-
-fn main() {
- A { inner: 3 }.foo();
-}
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// force-host
-// no-prefer-dynamic
-
-#![feature(rustc_macro)]
-#![feature(rustc_macro_lib)]
-#![crate_type = "rustc-macro"]
-
-extern crate rustc_macro;
-
-use rustc_macro::TokenStream;
-
-#[rustc_macro_derive(Append)]
-pub fn derive_a(input: TokenStream) -> TokenStream {
- let mut input = input.to_string();
- input.push_str("
- impl Append for A {
- fn foo(&self) {}
- }
- ");
- input.parse().unwrap()
-}
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
- let x = Some(1);
-
- match x { } //~ ERROR E0002
-}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x = Some(1);
+
+ match x { } //~ ERROR E0004
+}
fn main() {
let x = Foo { a:1, b:2 };
- let Foo { a: x, a: y, b: 0 } = x; //~ ERROR E0025
+ let Foo { a: x, a: y, b: 0 } = x;
+ //~^ ERROR field `a` bound multiple times in the pattern
+ //~| NOTE multiple uses of `a` in pattern
+ //~| NOTE first use of `a`
}
fn main() {
let trait_obj: &SomeTrait = SomeTrait;
//~^ ERROR E0425
+ //~| NOTE unresolved name
//~| ERROR E0038
//~| method `foo` has no receiver
//~| NOTE the trait `SomeTrait` cannot be made into an object
fn main() {
let x = Test;
x.method::<i32>(); //~ ERROR E0035
+ //~| NOTE called with unneeded type parameters
}
let x = Test;
let v = &[0];
x.method::<i32, i32>(v); //~ ERROR E0036
+ //~| NOTE Passed 2 type arguments, expected 1
}
// except according to those terms.
trait Foo {
- fn foo(&self, x: u8) -> bool;
+ fn foo(&self, x: u8) -> bool; //~ NOTE trait requires 2 parameters
+ fn bar(&self, x: u8, y: u8, z: u8); //~ NOTE trait requires 4 parameters
+ fn less(&self); //~ NOTE trait requires 1 parameter
}
struct Bar;
impl Foo for Bar {
fn foo(&self) -> bool { true } //~ ERROR E0050
+ //~| NOTE expected 2 parameters, found 1
+ fn bar(&self) { } //~ ERROR E0050
+ //~| NOTE expected 4 parameters, found 1
+ fn less(&self, x: u8, y: u8, z: u8) { } //~ ERROR E0050
+ //~| NOTE expected 1 parameter, found 4
}
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-enum Foo { B { i: u32 } }
+#![feature(associated_consts)]
+
+enum Foo {}
+
+impl Foo {
+ const B: u8 = 0;
+}
fn bar(foo: Foo) -> u32 {
match foo {
}
type Foo = Trait<F=i32>; //~ ERROR E0220
- //~^ ERROR E0191
-
+ //~| NOTE associated type `F` not found
+ //~| ERROR E0191
+ //~| NOTE missing associated type `Bar` value
fn main() {
}
struct Bar(u32);
pub fn bar() -> Bar { //~ ERROR E0446
+ //~| NOTE can't leak private type
Bar(0)
}
}
}
pub impl Bar {} //~ ERROR E0449
+ //~| NOTE `pub` not needed here
+ //~| NOTE place qualifiers on individual impl items instead
pub impl Foo for Bar { //~ ERROR E0449
+ //~| NOTE `pub` not needed here
pub fn foo() {} //~ ERROR E0449
+ //~| NOTE `pub` not needed here
}
fn main() {
fn main() {
unsafe { takes_u8(::std::mem::transmute(0u16)); } //~ ERROR E0512
+ //~| transmuting between 16 bits and 8 bits
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::mem;
+
+fn main() {
+ unsafe {
+ let size = mem::size_of::<u32>();
+ mem::transmute_copy::<u32, [u8; size]>(&8_8); //~ ERROR E0513
+ //~| NOTE no type for variable
+ }
+}
// system allocator. Do this by linking in jemalloc and making sure that we get
// an error.
+// ignore-emscripten FIXME: What "other allocator" should we use for emcc?
+
#![feature(alloc_jemalloc)]
extern crate allocator_dylib;
// Ensure that rust dynamic libraries use jemalloc as their allocator, verifying
// by linking in the system allocator here and ensuring that we get a complaint.
+// ignore-emscripten FIXME: What "other allocator" is correct for emscripten?
+
#![feature(alloc_system)]
extern crate allocator_dylib2;
// Check that bogus field access is non-fatal
fn main() {
let x = 0;
- let _ = x.foo; //~ ERROR attempted access of field
- let _ = x.bar; //~ ERROR attempted access of field
+ let _ = x.foo; //~ no field `foo` on type `{integer}`
+ let _ = x.bar; //~ no field `bar` on type `{integer}`
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test ensures that attributes on formals in generic parameter
+// lists are included when we are checking for unstable attributes.
+//
+// Note that feature(generic_param_attrs) *is* enabled here. We are
+// checking feature-gating of the attributes themselves, not the
+// capability to parse such attributes in that context.
+
+#![feature(generic_param_attrs)]
+#![allow(dead_code)]
+
+struct StLt<#[lt_struct] 'a>(&'a u32);
+//~^ ERROR The attribute `lt_struct` is currently unknown to the compiler
+struct StTy<#[ty_struct] I>(I);
+//~^ ERROR The attribute `ty_struct` is currently unknown to the compiler
+
+enum EnLt<#[lt_enum] 'b> { A(&'b u32), B }
+//~^ ERROR The attribute `lt_enum` is currently unknown to the compiler
+enum EnTy<#[ty_enum] J> { A(J), B }
+//~^ ERROR The attribute `ty_enum` is currently unknown to the compiler
+
+trait TrLt<#[lt_trait] 'c> { fn foo(&self, _: &'c [u32]) -> &'c u32; }
+//~^ ERROR The attribute `lt_trait` is currently unknown to the compiler
+trait TrTy<#[ty_trait] K> { fn foo(&self, _: K); }
+//~^ ERROR The attribute `ty_trait` is currently unknown to the compiler
+
+type TyLt<#[lt_type] 'd> = &'d u32;
+//~^ ERROR The attribute `lt_type` is currently unknown to the compiler
+type TyTy<#[ty_type] L> = (L, );
+//~^ ERROR The attribute `ty_type` is currently unknown to the compiler
+
+impl<#[lt_inherent] 'e> StLt<'e> { }
+//~^ ERROR The attribute `lt_inherent` is currently unknown to the compiler
+impl<#[ty_inherent] M> StTy<M> { }
+//~^ ERROR The attribute `ty_inherent` is currently unknown to the compiler
+
+impl<#[lt_impl_for] 'f> TrLt<'f> for StLt<'f> {
+ //~^ ERROR The attribute `lt_impl_for` is currently unknown to the compiler
+ fn foo(&self, _: &'f [u32]) -> &'f u32 { loop { } }
+}
+impl<#[ty_impl_for] N> TrTy<N> for StTy<N> {
+ //~^ ERROR The attribute `ty_impl_for` is currently unknown to the compiler
+ fn foo(&self, _: N) { }
+}
+
+fn f_lt<#[lt_fn] 'g>(_: &'g [u32]) -> &'g u32 { loop { } }
+//~^ ERROR The attribute `lt_fn` is currently unknown to the compiler
+fn f_ty<#[ty_fn] O>(_: O) { }
+//~^ ERROR The attribute `ty_fn` is currently unknown to the compiler
+
+impl<I> StTy<I> {
+ fn m_lt<#[lt_meth] 'h>(_: &'h [u32]) -> &'h u32 { loop { } }
+ //~^ ERROR The attribute `lt_meth` is currently unknown to the compiler
+ fn m_ty<#[ty_meth] P>(_: P) { }
+ //~^ ERROR The attribute `ty_meth` is currently unknown to the compiler
+}
+
+fn hof_lt<Q>(_: Q)
+ where Q: for <#[lt_hof] 'i> Fn(&'i [u32]) -> &'i u32
+ //~^ ERROR The attribute `lt_hof` is currently unknown to the compiler
+{
+}
+
+fn main() {
+
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test ensures that attributes on formals in generic parameter
+// lists are rejected if feature(generic_param_attrs) is not enabled.
+//
+// (We are prefixing all tested features with `rustc_`, to ensure that
+// the attributes themselves won't be rejected by the compiler when
+// using `rustc_attrs` feature. There is a separate compile-fail/ test
+// ensuring that the attribute feature-gating works in this context.)
+
+#![feature(rustc_attrs)]
+#![allow(dead_code)]
+
+struct StLt<#[rustc_lt_struct] 'a>(&'a u32);
+//~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+struct StTy<#[rustc_ty_struct] I>(I);
+//~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+
+enum EnLt<#[rustc_lt_enum] 'b> { A(&'b u32), B }
+//~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+enum EnTy<#[rustc_ty_enum] J> { A(J), B }
+//~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+
+trait TrLt<#[rustc_lt_trait] 'c> { fn foo(&self, _: &'c [u32]) -> &'c u32; }
+//~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+trait TrTy<#[rustc_ty_trait] K> { fn foo(&self, _: K); }
+//~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+
+type TyLt<#[rustc_lt_type] 'd> = &'d u32;
+//~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+type TyTy<#[rustc_ty_type] L> = (L, );
+//~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+
+impl<#[rustc_lt_inherent] 'e> StLt<'e> { }
+//~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+impl<#[rustc_ty_inherent] M> StTy<M> { }
+//~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+
+impl<#[rustc_lt_impl_for] 'f> TrLt<'f> for StLt<'f> {
+ //~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+ fn foo(&self, _: &'f [u32]) -> &'f u32 { loop { } }
+}
+impl<#[rustc_ty_impl_for] N> TrTy<N> for StTy<N> {
+ //~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+ fn foo(&self, _: N) { }
+}
+
+fn f_lt<#[rustc_lt_fn] 'g>(_: &'g [u32]) -> &'g u32 { loop { } }
+//~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+fn f_ty<#[rustc_ty_fn] O>(_: O) { }
+//~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+
+impl<I> StTy<I> {
+ fn m_lt<#[rustc_lt_meth] 'h>(_: &'h [u32]) -> &'h u32 { loop { } }
+ //~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+ fn m_ty<#[rustc_ty_meth] P>(_: P) { }
+ //~^ ERROR attributes on type parameter bindings are experimental (see issue #34761)
+}
+
+fn hof_lt<Q>(_: Q)
+ where Q: for <#[rustc_lt_hof] 'i> Fn(&'i [u32]) -> &'i u32
+ //~^ ERROR attributes on lifetime bindings are experimental (see issue #34761)
+{
+}
+
+fn main() {
+
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test checks variations on `<#[attr] 'a, #[oops]>`, where
+// `#[oops]` is left dangling (that is, it is unattached, with no
+// formal binding following it).
+
+#![feature(generic_param_attrs, rustc_attrs)]
+#![allow(dead_code)]
+
+struct RefIntPair<'a, 'b>(&'a u32, &'b u32);
+
+impl<#[rustc_1] 'a, 'b, #[oops]> RefIntPair<'a, 'b> {
+ //~^ ERROR trailing attribute after lifetime parameters
+}
+
+fn main() {
+
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test checks variations on `<#[attr] 'a, #[oops]>`, where
+// `#[oops]` is left dangling (that is, it is unattached, with no
+// formal binding following it).
+
+#![feature(generic_param_attrs, rustc_attrs)]
+#![allow(dead_code)]
+
+struct RefAny<'a, T>(&'a T);
+
+impl<#[rustc_1] 'a, #[rustc_2] T, #[oops]> RefAny<'a, T> {
+ //~^ ERROR expected identifier, found `>`
+}
+
+fn main() {
+
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test checks variations on `<#[attr] 'a, #[oops]>`, where
+// `#[oops]` is left dangling (that is, it is unattached, with no
+// formal binding following it).
+
+struct RefIntPair<'a, 'b>(&'a u32, &'b u32);
+
+fn hof_lt<Q>(_: Q)
+ where Q: for <#[rustc_1] 'a, 'b, #[oops]> Fn(RefIntPair<'a,'b>) -> &'b u32
+ //~^ ERROR trailing attribute after lifetime parameters
+{
+
+}
+
+fn main() {
+
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(item_like_imports, relaxed_adts)]
+
+pub mod c {
+ pub struct S {}
+ pub struct TS();
+ pub struct US;
+ pub enum E {
+ V {},
+ TV(),
+ UV,
+ }
+
+ pub struct Item;
+}
+
+pub mod xm1 {
+ pub use ::c::*;
+ pub type S = ::c::Item;
+}
+pub mod xm2 {
+ pub use ::c::*;
+ pub const S: ::c::Item = ::c::Item;
+}
+
+pub mod xm3 {
+ pub use ::c::*;
+ pub type TS = ::c::Item;
+}
+pub mod xm4 {
+ pub use ::c::*;
+ pub const TS: ::c::Item = ::c::Item;
+}
+
+pub mod xm5 {
+ pub use ::c::*;
+ pub type US = ::c::Item;
+}
+pub mod xm6 {
+ pub use ::c::*;
+ pub const US: ::c::Item = ::c::Item;
+}
+
+pub mod xm7 {
+ pub use ::c::E::*;
+ pub type V = ::c::Item;
+}
+pub mod xm8 {
+ pub use ::c::E::*;
+ pub const V: ::c::Item = ::c::Item;
+}
+
+pub mod xm9 {
+ pub use ::c::E::*;
+ pub type TV = ::c::Item;
+}
+pub mod xmA {
+ pub use ::c::E::*;
+ pub const TV: ::c::Item = ::c::Item;
+}
+
+pub mod xmB {
+ pub use ::c::E::*;
+ pub type UV = ::c::Item;
+}
+pub mod xmC {
+ pub use ::c::E::*;
+ pub const UV: ::c::Item = ::c::Item;
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// FIXME: Remove when `item_like_imports` is stabilized.
+
+#![feature(relaxed_adts)]
+
+pub mod c {
+ pub struct S {}
+ pub struct TS();
+ pub struct US;
+ pub enum E {
+ V {},
+ TV(),
+ UV,
+ }
+
+ pub struct Item;
+}
+
+pub mod proxy {
+ pub use c::*;
+ pub use c::E::*;
+}
+
+pub mod xm1 {
+ pub use ::proxy::*;
+ pub type S = ::c::Item;
+}
+pub mod xm2 {
+ pub use ::proxy::*;
+ pub const S: ::c::Item = ::c::Item;
+}
+
+pub mod xm3 {
+ pub use ::proxy::*;
+ pub type TS = ::c::Item;
+}
+pub mod xm4 {
+ pub use ::proxy::*;
+ pub const TS: ::c::Item = ::c::Item;
+}
+
+pub mod xm5 {
+ pub use ::proxy::*;
+ pub type US = ::c::Item;
+}
+pub mod xm6 {
+ pub use ::proxy::*;
+ pub const US: ::c::Item = ::c::Item;
+}
+
+pub mod xm7 {
+ pub use ::proxy::*;
+ pub type V = ::c::Item;
+}
+pub mod xm8 {
+ pub use ::proxy::*;
+ pub const V: ::c::Item = ::c::Item;
+}
+
+pub mod xm9 {
+ pub use ::proxy::*;
+ pub type TV = ::c::Item;
+}
+pub mod xmA {
+ pub use ::proxy::*;
+ pub const TV: ::c::Item = ::c::Item;
+}
+
+pub mod xmB {
+ pub use ::proxy::*;
+ pub type UV = ::c::Item;
+}
+pub mod xmC {
+ pub use ::proxy::*;
+ pub const UV: ::c::Item = ::c::Item;
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: unresolved name `m1::arguments`. Did you mean `arguments`?
+// error-pattern: unresolved name `m1::arguments`
mod m1 {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: unresolved name `m1::arguments`. Did you mean `arguments`?
+// error-pattern: unresolved name `m1::arguments`
mod m1 {
pub mod arguments {}
fn main() {
let bar = 5;
- //~^ ERROR let bindings cannot shadow structs
+ //~^ ERROR let bindings cannot shadow unit structs
use foo::bar;
}
//~| NOTE required for the cast to the object type `Foo`
// check no error cascade
- let _ = main.f as *const u32; //~ ERROR attempted access of field
+ let _ = main.f as *const u32; //~ no field `f` on type `fn() {main}`
let cf: *const Foo = &0;
let _ = cf as *const [u16];
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+// Test that changing what a `type` points to does not go unnoticed.
+
+// compile-flags: -Z query-dep-graph
+
+#![feature(rustc_attrs)]
+#![allow(dead_code)]
+#![allow(unused_variables)]
+
+fn main() { }
+
+
+#[rustc_if_this_changed]
+type TypeAlias = u32;
+
+#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
+struct Struct {
+ x: TypeAlias,
+ y: u32
+}
+
+#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
+enum Enum {
+ Variant1(TypeAlias),
+ Variant2(i32)
+}
+
+#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
+trait Trait {
+ fn method(&self, _: TypeAlias);
+}
+
+struct SomeType;
+
+#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
+impl SomeType {
+ fn method(&self, _: TypeAlias) {}
+}
+
+#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
+type TypeAlias2 = TypeAlias;
+
+#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
+fn function(_: TypeAlias) {
+
+}
pub fn tcx(&self) -> &'a &'tcx () { self.1 }
fn lol(&mut self, b: &Foo)
{
- b.c; //~ ERROR no field with that name was found
+ b.c; //~ ERROR no field `c` on type `&Foo`
self.tcx();
}
}
enum A {
Ok = i8::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed on value after 127i8; set explicitly via OhNo = -128i8 if that is desired outcome
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 127i8
+ //~| NOTE explicitly set `OhNo = -128i8` if that is desired outcome
}
}
enum A {
Ok = u8::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed on value after 255u8; set explicitly via OhNo = 0u8 if that is desired outcome
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 255u8
+ //~| NOTE explicitly set `OhNo = 0u8` if that is desired outcome
}
}
enum A {
Ok = i16::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 32767i16
+ //~| NOTE explicitly set `OhNo = -32768i16` if that is desired outcome
}
}
enum A {
Ok = u16::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 65535u16
+ //~| NOTE explicitly set `OhNo = 0u16` if that is desired outcome
}
}
enum A {
Ok = i32::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 2147483647i32
+ //~| NOTE explicitly set `OhNo = -2147483648i32` if that is desired outcome
}
}
enum A {
Ok = u32::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 4294967295u32
+ //~| NOTE explicitly set `OhNo = 0u32` if that is desired outcome
}
}
enum A {
Ok = i64::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 9223372036854775807i64
+ //~| NOTE explicitly set `OhNo = -9223372036854775808i64` if that is desired outcome
}
}
enum A {
Ok = u64::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 18446744073709551615u64
+ //~| NOTE explicitly set `OhNo = 0u64` if that is desired outcome
}
}
enum A {
Ok = i8::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed on value after 127i8; set explicitly via OhNo = -128i8 if that is desired outcome
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 127i8
+ //~| NOTE explicitly set `OhNo = -128i8` if that is desired outcome
}
let x = A::Ok;
enum A {
Ok = u8::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed on value after 255u8; set explicitly via OhNo = 0u8 if that is desired outcome
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 255u8
+ //~| NOTE explicitly set `OhNo = 0u8` if that is desired outcome
}
let x = A::Ok;
enum A {
Ok = i16::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| NOTE overflowed on value after 32767i16
+ //~| NOTE explicitly set `OhNo = -32768i16` if that is desired outcome
}
let x = A::Ok;
enum A {
Ok = u16::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| overflowed on value after 65535u16
+ //~| NOTE explicitly set `OhNo = 0u16` if that is desired outcome
}
let x = A::Ok;
enum A {
Ok = i32::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| overflowed on value after 2147483647i32
+ //~| NOTE explicitly set `OhNo = -2147483648i32` if that is desired outcome
}
let x = A::Ok;
enum A {
Ok = u32::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| overflowed on value after 4294967295u32
+ //~| NOTE explicitly set `OhNo = 0u32` if that is desired outcome
}
let x = A::Ok;
enum A {
Ok = i64::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| overflowed on value after 9223372036854775807i64
+ //~| NOTE explicitly set `OhNo = -9223372036854775808i64` if that is desired outcome
}
let x = A::Ok;
enum A {
Ok = u64::MAX - 1,
Ok2,
- OhNo, //~ ERROR enum discriminant overflowed
+ OhNo, //~ ERROR enum discriminant overflowed [E0370]
+ //~| overflowed on value after 18446744073709551615u64
+ //~| NOTE explicitly set `OhNo = 0u64` if that is desired outcome
}
let x = A::Ok;
}
match e3 {
E::Empty3 => ()
- //~^ ERROR `E::Empty3` does not name a unit variant, unit struct or a constant
+ //~^ ERROR expected unit struct/variant or constant, found struct variant `E::Empty3`
}
match xe1 {
XEmpty1 => () // Not an error, `XEmpty1` is interpreted as a new binding
}
match xe3 {
XE::XEmpty3 => ()
- //~^ ERROR `XE::XEmpty3` does not name a unit variant, unit struct or a constant
+ //~^ ERROR expected unit struct/variant or constant, found struct variant `XE::XEmpty3`
}
}
let xe1 = XEmpty1 {};
match e1 {
- Empty1() => () //~ ERROR unresolved variant or struct `Empty1`
+ Empty1() => () //~ ERROR unresolved tuple struct/variant `Empty1`
}
match xe1 {
- XEmpty1() => () //~ ERROR unresolved variant or struct `XEmpty1`
+ XEmpty1() => () //~ ERROR unresolved tuple struct/variant `XEmpty1`
}
match e1 {
- Empty1(..) => () //~ ERROR unresolved variant or struct `Empty1`
+ Empty1(..) => () //~ ERROR unresolved tuple struct/variant `Empty1`
}
match xe1 {
- XEmpty1(..) => () //~ ERROR unresolved variant or struct `XEmpty1`
+ XEmpty1(..) => () //~ ERROR unresolved tuple struct/variant `XEmpty1`
}
}
let xe3 = XE::XEmpty3 {};
match e3 {
- E::Empty3() => () //~ ERROR `E::Empty3` does not name a tuple variant or a tuple struct
+ E::Empty3() => ()
+ //~^ ERROR expected tuple struct/variant, found struct variant `E::Empty3`
}
match xe3 {
- XE::XEmpty3() => () //~ ERROR `XE::XEmpty3` does not name a tuple variant or a tuple struct
+ XE::XEmpty3() => ()
+ //~^ ERROR expected tuple struct/variant, found struct variant `XE::XEmpty3`
}
match e3 {
- E::Empty3(..) => () //~ ERROR `E::Empty3` does not name a tuple variant or a tuple struct
+ E::Empty3(..) => ()
+ //~^ ERROR expected tuple struct/variant, found struct variant `E::Empty3`
}
match xe3 {
- XE::XEmpty3(..) => () //~ ERROR `XE::XEmpty3` does not name a tuple variant or a tuple
+ XE::XEmpty3(..) => ()
+ //~^ ERROR expected tuple struct/variant, found struct variant `XE::XEmpty3
}
}
let xe5 = XE::XEmpty5();
match e2 {
- Empty2 => () //~ ERROR `Empty2` does not name a unit variant, unit struct or a constant
+ Empty2 => () //~ ERROR match bindings cannot shadow tuple structs
}
match xe6 {
- XEmpty6 => () //~ ERROR `XEmpty6` does not name a unit variant, unit struct or a constant
+ XEmpty6 => () //~ ERROR match bindings cannot shadow tuple structs
}
match e4 {
- E::Empty4 => () //~ ERROR `E::Empty4` does not name a unit variant, unit struct or a
+ E::Empty4 => ()
+ //~^ ERROR expected unit struct/variant or constant, found tuple variant `E::Empty4`
}
match xe5 {
- XE::XEmpty5 => (), //~ ERROR `XE::XEmpty5` does not name a unit variant, unit struct or a
+ XE::XEmpty5 => (),
+ //~^ ERROR expected unit struct/variant or constant, found tuple variant `XE::XEmpty5`
_ => {},
}
}
let xe4 = XE::XEmpty4;
match e2 {
- Empty2(..) => () //~ ERROR `Empty2` does not name a tuple variant or a tuple struct
+ Empty2(..) => () //~ ERROR expected tuple struct/variant, found unit struct `Empty2`
//~^ WARNING hard error
}
match xe2 {
- XEmpty2(..) => () //~ ERROR `XEmpty2` does not name a tuple variant or a tuple struct
+ XEmpty2(..) => () //~ ERROR expected tuple struct/variant, found unit struct `XEmpty2`
//~^ WARNING hard error
}
match e4 {
- E::Empty4(..) => () //~ ERROR `E::Empty4` does not name a tuple variant or a tuple struct
+ E::Empty4(..) => () //~ ERROR expected tuple struct/variant, found unit variant `E::Empty4`
//~^ WARNING hard error
}
match xe4 {
- XE::XEmpty4(..) => (), //~ ERROR `XE::XEmpty4` does not name a tuple variant or a tuple
- //~^ WARNING hard error
+ XE::XEmpty4(..) => (),
+ //~^ ERROR expected tuple struct/variant, found unit variant `XE::XEmpty4`
+ //~| WARNING hard error
_ => {},
}
}
Empty4
}
-// remove attribute after warning cycle and promoting warnings to errors
fn main() {
let e2 = Empty2;
let e4 = E::Empty4;
let xe4 = XE::XEmpty4;
match e2 {
- Empty2() => () //~ ERROR `Empty2` does not name a tuple variant or a tuple struct
+ Empty2() => ()
+ //~^ ERROR expected tuple struct/variant, found unit struct `Empty2`
}
match xe2 {
- XEmpty2() => () //~ ERROR `XEmpty2` does not name a tuple variant or a tuple struct
+ XEmpty2() => ()
+ //~^ ERROR expected tuple struct/variant, found unit struct `XEmpty2`
}
match e4 {
- E::Empty4() => () //~ ERROR `E::Empty4` does not name a tuple variant or a tuple struct
+ E::Empty4() => ()
+ //~^ ERROR expected tuple struct/variant, found unit variant `E::Empty4`
}
match xe4 {
- XE::XEmpty4() => (), //~ ERROR `XE::XEmpty4` does not name a tuple variant or a tuple
+ XE::XEmpty4() => (),
+ //~^ ERROR expected tuple struct/variant, found unit variant `XE::XEmpty4`
_ => {},
}
}
struct hello(isize);
fn main() {
- let hello = 0; //~ERROR let bindings cannot shadow structs
+ let hello = 0; //~ERROR let bindings cannot shadow tuple structs
}
// except according to those terms.
fn main() {
- let a(1) = 13; //~ ERROR unresolved variant or struct `a`
+ let a(1) = 13; //~ ERROR unresolved tuple struct/variant `a`
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-extern crate bäz; //~ ERROR non-ascii idents
+extern crate core as bäz; //~ ERROR non-ascii idents
use föö::bar; //~ ERROR non-ascii idents
fn main() {
match Foo(true) {
- foo(x) //~ ERROR expected variant or struct, found function `foo`
+ foo(x) //~ ERROR expected tuple struct/variant, found function `foo`
=> ()
}
}
#[cfg(not(works))]
unsafe fn access(n:*mut A) -> (i32, f64) {
- let x : i32 = n.x; //~ ERROR attempted access of field `x`
+ let x : i32 = n.x; //~ no field `x` on type `*mut A`
//~| NOTE `n` is a native pointer; perhaps you need to deref with `(*n).x`
- let y : f64 = n.y; //~ ERROR attempted access of field `y`
+ let y : f64 = n.y; //~ no field `y` on type `*mut A`
//~| NOTE `n` is a native pointer; perhaps you need to deref with `(*n).y`
(x, y)
}
fn main() {
match () {
- foo::bar => {} //~ ERROR expected variant, struct or constant, found function `bar`
+ foo::bar => {} //~ ERROR expected unit struct/variant or constant, found function `foo::bar`
}
}
impl Foo for *const BarTy {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
a;
//~^ ERROR: unresolved name `a`
+ //~| NOTE unresolved name
}
}
impl<'a> Foo for &'a BarTy {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
x;
- //~^ ERROR: unresolved name `x`. Did you mean `self.x`?
+ //~^ ERROR: unresolved name `x`
+ //~| NOTE did you mean `self.x`?
y;
- //~^ ERROR: unresolved name `y`. Did you mean `self.y`?
+ //~^ ERROR: unresolved name `y`
+ //~| NOTE did you mean `self.y`?
a;
//~^ ERROR: unresolved name `a`
+ //~| NOTE unresolved name
bah;
- //~^ ERROR: unresolved name `bah`. Did you mean to call `Foo::bah`?
+ //~^ ERROR: unresolved name `bah`
+ //~| NOTE did you mean to call `Foo::bah`?
b;
//~^ ERROR: unresolved name `b`
+ //~| NOTE unresolved name
}
}
impl<'a> Foo for &'a mut BarTy {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
x;
- //~^ ERROR: unresolved name `x`. Did you mean `self.x`?
+ //~^ ERROR: unresolved name `x`
+ //~| NOTE did you mean `self.x`?
y;
- //~^ ERROR: unresolved name `y`. Did you mean `self.y`?
+ //~^ ERROR: unresolved name `y`
+ //~| NOTE did you mean `self.y`?
a;
//~^ ERROR: unresolved name `a`
+ //~| NOTE unresolved name
bah;
- //~^ ERROR: unresolved name `bah`. Did you mean to call `Foo::bah`?
+ //~^ ERROR: unresolved name `bah`
+ //~| NOTE did you mean to call `Foo::bah`?
b;
//~^ ERROR: unresolved name `b`
+ //~| NOTE unresolved name
}
}
impl Foo for Box<BarTy> {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
bah;
- //~^ ERROR: unresolved name `bah`. Did you mean to call `Foo::bah`?
+ //~^ ERROR: unresolved name `bah`
+ //~| NOTE did you mean to call `Foo::bah`?
}
}
impl Foo for *const isize {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
bah;
- //~^ ERROR: unresolved name `bah`. Did you mean to call `Foo::bah`?
+ //~^ ERROR: unresolved name `bah`
+ //~| NOTE did you mean to call `Foo::bah`?
}
}
impl<'a> Foo for &'a isize {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
bah;
- //~^ ERROR: unresolved name `bah`. Did you mean to call `Foo::bah`?
+ //~^ ERROR: unresolved name `bah`
+ //~| NOTE did you mean to call `Foo::bah`?
}
}
impl<'a> Foo for &'a mut isize {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
bah;
- //~^ ERROR: unresolved name `bah`. Did you mean to call `Foo::bah`?
+ //~^ ERROR: unresolved name `bah`
+ //~| NOTE did you mean to call `Foo::bah`?
}
}
impl Foo for Box<isize> {
fn bar(&self) {
baz();
- //~^ ERROR: unresolved name `baz`. Did you mean to call `self.baz`?
+ //~^ ERROR: unresolved name `baz`
+ //~| NOTE did you mean to call `self.baz`?
bah;
- //~^ ERROR: unresolved name `bah`. Did you mean to call `Foo::bah`?
+ //~^ ERROR: unresolved name `bah`
+ //~| NOTE did you mean to call `Foo::bah`?
}
}
fn main() {
let foo = "str";
- println!("{}", foo.desc); //~ ERROR attempted access of field `desc` on type `&str`,
- // but no field with that name was found
+ println!("{}", foo.desc); //~ no field `desc` on type `&str`
}
fn main() {
let Foo {
- a: _, //~ NOTE field `a` previously bound here
- a: _ //~ ERROR field `a` bound multiple times in the pattern
+ a: _, //~ NOTE first use of `a`
+ a: _
+ //~^ ERROR field `a` bound multiple times in the pattern
+ //~| NOTE multiple uses of `a` in pattern
} = Foo { a: 29 };
let Foo {
- a, //~ NOTE field `a` previously bound here
- a: _ //~ ERROR field `a` bound multiple times in the pattern
+ a, //~ NOTE first use of `a`
+ a: _
+ //~^ ERROR field `a` bound multiple times in the pattern
+ //~| NOTE multiple uses of `a` in pattern
} = Foo { a: 29 };
let Foo {
- a, //~ NOTE field `a` previously bound here
- //~^ NOTE field `a` previously bound here
- a: _, //~ ERROR field `a` bound multiple times in the pattern
- a: x //~ ERROR field `a` bound multiple times in the pattern
+ a,
+ //~^ NOTE first use of `a`
+ //~| NOTE first use of `a`
+ a: _,
+ //~^ ERROR field `a` bound multiple times in the pattern
+ //~| NOTE multiple uses of `a` in pattern
+ a: x
+ //~^ ERROR field `a` bound multiple times in the pattern
+ //~| NOTE multiple uses of `a` in pattern
} = Foo { a: 29 };
}
fn main() {
match 1 {
self::X => { },
- //~^ ERROR expected variant, struct or constant, found static `X`
+ //~^ ERROR expected unit struct/variant or constant, found static `self::X`
_ => { },
}
}
let f = FooB { x: 3, y: 4 };
match f {
FooB(a, b) => println!("{} {}", a, b),
-//~^ ERROR `FooB` does not name a tuple variant or a tuple struct
+ //~^ ERROR expected tuple struct/variant, found struct variant `FooB`
}
}
fn main() {
let a: [isize; STRUCT.nonexistent_field];
- //~^ ERROR attempted access of field `nonexistent_field`
+ //~^ no field `nonexistent_field` on type `MyStruct`
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: expected item, found `parse_error`
+// error-pattern: expected one of `!` or `::`, found `<eof>`
include!("auxiliary/issue-21146-inc.rs");
fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Bound {}
+pub struct Foo<T: Bound>(T);
+
+pub trait Trait1 {}
+impl<T: Bound> Trait1 for Foo<T> {}
+
+pub trait Trait2 {}
+impl<T> Trait2 for Foo<T> {} //~ ERROR the trait bound `T: Bound` is not satisfied
+
+fn main() {}
fn main() {
Foo::Bar.a;
- //~^ ERROR: attempted access of field `a` on type `Foo`, but no field with that name was found
+ //~^ no field `a` on type `Foo`
}
// If this provides a suggestion, it's a bug as MaybeDog doesn't impl Groom
shave();
//~^ ERROR: unresolved name `shave`
+ //~| NOTE unresolved name
}
}
fn shave(other: usize) {
whiskers -= other;
//~^ ERROR: unresolved name `whiskers`
+ //~| NOTE unresolved name
//~| HELP this is an associated function
shave(4);
- //~^ ERROR: unresolved name `shave`. Did you mean to call `Groom::shave`?
+ //~^ ERROR: unresolved name `shave`
+ //~| NOTE did you mean to call `Groom::shave`?
purr();
//~^ ERROR: unresolved name `purr`
+ //~| NOTE unresolved name
}
}
fn purr_louder() {
static_method();
//~^ ERROR: unresolved name `static_method`
+ //~| NOTE unresolved name
purr();
//~^ ERROR: unresolved name `purr`
+ //~| NOTE unresolved name
purr();
//~^ ERROR: unresolved name `purr`
+ //~| NOTE unresolved name
purr();
//~^ ERROR: unresolved name `purr`
+ //~| NOTE unresolved name
}
}
fn purr(&self) {
grow_older();
//~^ ERROR: unresolved name `grow_older`
+ //~| NOTE unresolved name
shave();
//~^ ERROR: unresolved name `shave`
+ //~| NOTE unresolved name
}
fn burn_whiskers(&mut self) {
whiskers = 0;
- //~^ ERROR: unresolved name `whiskers`. Did you mean `self.whiskers`?
+ //~^ ERROR: unresolved name `whiskers`
+ //~| NOTE did you mean `self.whiskers`?
}
pub fn grow_older(other:usize) {
whiskers = 4;
//~^ ERROR: unresolved name `whiskers`
+ //~| NOTE unresolved name
//~| HELP this is an associated function
purr_louder();
//~^ ERROR: unresolved name `purr_louder`
+ //~| NOTE unresolved name
}
}
fn main() {
self += 1;
//~^ ERROR: unresolved name `self`
+ //~| NOTE unresolved name
//~| HELP: module `self`
// it's a bug if this suggests a missing `self` as we're not in a method
}
// except according to those terms.
fn main() {
- 1.create_a_type_error[ //~ ERROR attempted access of field
+ 1.create_a_type_error[ //~ no field `create_a_type_error` on type `{integer}`
()+() //~ ERROR binary operation `+` cannot be applied
// ^ ensure that we typeck the inner expression ^
];
}
fn test(a: Foo) {
- println!("{}", a.b); //~ ERROR attempted access of field
+ println!("{}", a.b); //~ no field `b` on type `Foo`
}
fn main() {
let x = Attribute::Code {
attr_name_idx: 42,
};
- let z = (&x).attr_name_idx; //~ ERROR attempted access of field
- let y = x.attr_name_idx; //~ ERROR attempted access of field
+ let z = (&x).attr_name_idx; //~ no field `attr_name_idx` on type `&Attribute`
+ let y = x.attr_name_idx; //~ no field `attr_name_idx` on type `Attribute`
}
fn main() {
match Some(1) {
- None @ _ => {} //~ ERROR match bindings cannot shadow variants
+ None @ _ => {} //~ ERROR match bindings cannot shadow unit variants
};
const C: u8 = 1;
match 1 {
let u = A { x: 1 }; //~ ERROR `A` does not name a struct or a struct variant
let v = u32 { x: 1 }; //~ ERROR `u32` does not name a struct or a struct variant
match () {
- A { x: 1 } => {} //~ ERROR expected variant, struct or type alias, found module `A`
- u32 { x: 1 } => {} //~ ERROR expected variant, struct or type alias, found builtin type `u32
+ A { x: 1 } => {}
+ //~^ ERROR expected variant, struct or type alias, found module `A`
+ u32 { x: 1 } => {}
+ //~^ ERROR expected variant, struct or type alias, found builtin type `u32`
}
}
}
fn main() {
- if let C1(..) = 0 {} //~ ERROR expected variant or struct, found constant `C1`
- if let S::C2(..) = 0 {} //~ ERROR `S::C2` does not name a tuple variant or a tuple struct
+ if let C1(..) = 0 {} //~ ERROR expected tuple struct/variant, found constant `C1`
+ if let S::C2(..) = 0 {}
+ //~^ ERROR expected tuple struct/variant, found associated constant `S::C2`
}
macro_rules! log {
( $ctx:expr, $( $args:expr),* ) => {
if $ctx.trace {
- //~^ ERROR attempted access of field `trace` on type `&T`, but no field with that name
+ //~^ no field `trace` on type `&T`
println!( $( $args, )* );
}
}
fn main() {
match Foo::Baz {
Foo::Bar => {}
- //~^ ERROR `Foo::Bar` does not name a unit variant, unit struct or a constant
+ //~^ ERROR expected unit struct/variant or constant, found tuple variant `Foo::Bar`
_ => {}
}
match S {
S(()) => {}
- //~^ ERROR `S` does not name a tuple variant or a tuple struct
+ //~^ ERROR expected tuple struct/variant, found unit struct `S`
}
}
const C: S = S(10);
fn main() {
- let C(a) = S(11); //~ ERROR expected variant or struct, found constant `C`
- let C(..) = S(11); //~ ERROR expected variant or struct, found constant `C`
+ let C(a) = S(11); //~ ERROR expected tuple struct/variant, found constant `C`
+ let C(..) = S(11); //~ ERROR expected tuple struct/variant, found constant `C`
}
let _ = bar!();
}}
-macro_rules! bar { // test issue #31856
+macro_rules! m { // test issue #31856
($n:ident) => (
let a = 1;
let $n = a;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ extern crate rand;
+ use rand::Rng; //~ ERROR unresolved import
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! foo {
+ ( $()* ) => {};
+ //~^ ERROR repetition matches empty token tree
+ ( $()+ ) => {};
+ //~^ ERROR repetition matches empty token tree
+
+ ( $(),* ) => {}; // PASS
+ ( $(),+ ) => {}; // PASS
+
+ ( [$()*] ) => {};
+ //~^ ERROR repetition matches empty token tree
+ ( [$()+] ) => {};
+ //~^ ERROR repetition matches empty token tree
+
+ ( [$(),*] ) => {}; // PASS
+ ( [$(),+] ) => {}; // PASS
+
+ ( $($()* $(),* $(a)* $(a),* )* ) => {};
+ //~^ ERROR repetition matches empty token tree
+ ( $($()* $(),* $(a)* $(a),* )+ ) => {};
+ //~^ ERROR repetition matches empty token tree
+
+ ( $(a $(),* $(a)* $(a),* )* ) => {}; // PASS
+ ( $($(a)+ $(),* $(a)* $(a),* )+ ) => {}; // PASS
+
+ ( $(a $()+)* ) => {};
+ //~^ ERROR repetition matches empty token tree
+ ( $(a $()*)+ ) => {};
+ //~^ ERROR repetition matches empty token tree
+}
+
+
+// --- Original Issue --- //
+
+macro_rules! make_vec {
+ (a $e1:expr $($(, a $e2:expr)*)*) => ([$e1 $($(, $e2)*)*]);
+ //~^ ERROR repetition matches empty token tree
+}
+
+fn main() {
+ let _ = make_vec!(a 1, a 2, a 3);
+}
+
+
+// --- Minified Issue --- //
+
+macro_rules! m {
+ ( $()* ) => {}
+ //~^ ERROR repetition matches empty token tree
+}
+
+m!();
fn main() {
let z = match 3 {
- x(1) => x(1) //~ ERROR unresolved variant or struct `x`
+ x(1) => x(1) //~ ERROR unresolved tuple struct/variant `x`
//~^ ERROR unresolved name `x`
};
assert!(z == 3);
//~| ERROR macro expansion ignores token `typeof`
//~| ERROR macro expansion ignores token `;`
//~| ERROR macro expansion ignores token `;`
- //~| ERROR macro expansion ignores token `i`
}
-m!(); //~ NOTE the usage of `m!` is likely invalid in item context
-
fn main() {
let a: m!(); //~ NOTE the usage of `m!` is likely invalid in type context
let i = m!(); //~ NOTE the usage of `m!` is likely invalid in expression context
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:two_macros.rs
+
+macro_rules! foo { () => {} }
+macro_rules! macro_one { () => {} }
+
+macro_rules! m1 { () => {
+ macro_rules! foo { () => {} } //~ ERROR `foo` is already in scope
+ //~^ NOTE macro-expanded `macro_rules!`s and `#[macro_use]`s may not shadow existing macros
+
+ #[macro_use] //~ ERROR `macro_one` is already in scope
+ //~^ NOTE macro-expanded `macro_rules!`s and `#[macro_use]`s may not shadow existing macros
+ extern crate two_macros;
+}}
+m1!(); //~ NOTE in this expansion
+ //~| NOTE in this expansion
+ //~| NOTE in this expansion
+ //~| NOTE in this expansion
+
+fn f() { macro_one!(); }
+foo!();
+
+macro_rules! m2 { () => {
+ macro_rules! foo { () => {} }
+ #[macro_use] extern crate two_macros as __;
+
+ fn g() { macro_one!(); }
+ foo!();
+}}
+m2!();
+//^ Since `foo` and `macro_one` are not used outside this expansion, they are not shadowing errors.
+
+fn main() {}
color::rgb(_, _, _) => { }
color::cmyk(_, _, _, _) => { }
color::no_color(_) => { }
- //~^ ERROR `color::no_color` does not name a tuple variant or a tuple struct
+ //~^ ERROR expected tuple struct/variant, found unit variant `color::no_color`
}
}
}
fn foo(c: color) {
match c {
color::rgb(_, _) => { }
- //~^ ERROR this pattern has 2 fields, but the corresponding variant has 3 fields
+ //~^ ERROR this pattern has 2 fields, but the corresponding tuple variant has 3 fields
color::cmyk(_, _, _, _) => { }
color::no_color => { }
}
fn main() {
match 0u32 {
- Foo::bar => {} //~ ERROR `Foo::bar` does not name a unit variant, unit struct or a constant
+ Foo::bar => {} //~ ERROR expected unit struct/variant or constant, found method `Foo::bar`
}
match 0u32 {
- <Foo>::bar => {} //~ ERROR `bar` does not name a unit variant, unit struct or a constant
+ <Foo>::bar => {} //~ ERROR expected unit struct/variant or constant, found method `bar`
}
match 0u32 {
<Foo>::trait_bar => {}
- //~^ ERROR `trait_bar` does not name a unit variant, unit struct or a constant
+ //~^ ERROR expected unit struct/variant or constant, found method `trait_bar`
}
}
fn main() {
match 0u32 {
<Foo as MyTrait>::trait_bar => {}
- //~^ ERROR expected variant, struct or constant, found method `trait_bar`
+ //~^ ERROR expected unit struct/variant or constant, found method `MyTrait::trait_bar`
}
}
use std::option::*;
fn main() {
- let None: isize = 42; //~ ERROR let bindings cannot shadow variants
+ let None: isize = 42; //~ ERROR let bindings cannot shadow unit variants
log(debug, None);
//~^ ERROR unresolved name `debug`
//~| ERROR unresolved name `log`
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:namespace-mix-new.rs
+
+#![feature(item_like_imports, relaxed_adts)]
+
+extern crate namespace_mix_new;
+use namespace_mix_new::*;
+
+mod c {
+ pub struct S {}
+ pub struct TS();
+ pub struct US;
+ pub enum E {
+ V {},
+ TV(),
+ UV,
+ }
+
+ pub struct Item;
+}
+
+// Use something emitting the type argument name, e.g. unsatisfied bound.
+trait Impossible {}
+fn check<T: Impossible>(_: T) {}
+
+mod m1 {
+ pub use ::c::*;
+ pub type S = ::c::Item;
+}
+mod m2 {
+ pub use ::c::*;
+ pub const S: ::c::Item = ::c::Item;
+}
+
+fn f12() {
+ check(m1::S{}); //~ ERROR c::Item
+ check(m1::S); //~ ERROR unresolved name
+ check(m2::S{}); //~ ERROR c::S
+ check(m2::S); //~ ERROR c::Item
+}
+fn xf12() {
+ check(xm1::S{}); //~ ERROR c::Item
+ check(xm1::S); //~ ERROR unresolved name
+ check(xm2::S{}); //~ ERROR c::S
+ check(xm2::S); //~ ERROR c::Item
+}
+
+mod m3 {
+ pub use ::c::*;
+ pub type TS = ::c::Item;
+}
+mod m4 {
+ pub use ::c::*;
+ pub const TS: ::c::Item = ::c::Item;
+}
+
+fn f34() {
+ check(m3::TS{}); //~ ERROR c::Item
+ check(m3::TS); //~ ERROR c::TS
+ check(m4::TS{}); //~ ERROR c::TS
+ check(m4::TS); //~ ERROR c::Item
+}
+fn xf34() {
+ check(xm3::TS{}); //~ ERROR c::Item
+ check(xm3::TS); //~ ERROR c::TS
+ check(xm4::TS{}); //~ ERROR c::TS
+ check(xm4::TS); //~ ERROR c::Item
+}
+
+mod m5 {
+ pub use ::c::*;
+ pub type US = ::c::Item;
+}
+mod m6 {
+ pub use ::c::*;
+ pub const US: ::c::Item = ::c::Item;
+}
+
+fn f56() {
+ check(m5::US{}); //~ ERROR c::Item
+ check(m5::US); //~ ERROR c::US
+ check(m6::US{}); //~ ERROR c::US
+ check(m6::US); //~ ERROR c::Item
+}
+fn xf56() {
+ check(xm5::US{}); //~ ERROR c::Item
+ check(xm5::US); //~ ERROR c::US
+ check(xm6::US{}); //~ ERROR c::US
+ check(xm6::US); //~ ERROR c::Item
+}
+
+mod m7 {
+ pub use ::c::E::*;
+ pub type V = ::c::Item;
+}
+mod m8 {
+ pub use ::c::E::*;
+ pub const V: ::c::Item = ::c::Item;
+}
+
+fn f78() {
+ check(m7::V{}); //~ ERROR c::Item
+ check(m7::V); //~ ERROR name of a struct or struct variant
+ check(m8::V{}); //~ ERROR c::E
+ check(m8::V); //~ ERROR c::Item
+}
+fn xf78() {
+ check(xm7::V{}); //~ ERROR c::Item
+ check(xm7::V); //~ ERROR name of a struct or struct variant
+ check(xm8::V{}); //~ ERROR c::E
+ check(xm8::V); //~ ERROR c::Item
+}
+
+mod m9 {
+ pub use ::c::E::*;
+ pub type TV = ::c::Item;
+}
+mod mA {
+ pub use ::c::E::*;
+ pub const TV: ::c::Item = ::c::Item;
+}
+
+fn f9A() {
+ check(m9::TV{}); //~ ERROR c::Item
+ check(m9::TV); //~ ERROR c::E
+ check(mA::TV{}); //~ ERROR c::E
+ check(mA::TV); //~ ERROR c::Item
+}
+fn xf9A() {
+ check(xm9::TV{}); //~ ERROR c::Item
+ check(xm9::TV); //~ ERROR c::E
+ check(xmA::TV{}); //~ ERROR c::E
+ check(xmA::TV); //~ ERROR c::Item
+}
+
+mod mB {
+ pub use ::c::E::*;
+ pub type UV = ::c::Item;
+}
+mod mC {
+ pub use ::c::E::*;
+ pub const UV: ::c::Item = ::c::Item;
+}
+
+fn fBC() {
+ check(mB::UV{}); //~ ERROR c::Item
+ check(mB::UV); //~ ERROR c::E
+ check(mC::UV{}); //~ ERROR c::E
+ check(mC::UV); //~ ERROR c::Item
+}
+fn xfBC() {
+ check(xmB::UV{}); //~ ERROR c::Item
+ check(xmB::UV); //~ ERROR c::E
+ check(xmC::UV{}); //~ ERROR c::E
+ check(xmC::UV); //~ ERROR c::Item
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// FIXME: Remove when `item_like_imports` is stabilized.
+
+// aux-build:namespace-mix-old.rs
+
+#![feature(relaxed_adts)]
+
+extern crate namespace_mix_old;
+use namespace_mix_old::{xm1, xm2, xm3, xm4, xm5, xm6, xm7, xm8, xm9, xmA, xmB, xmC};
+
+mod c {
+ pub struct S {}
+ pub struct TS();
+ pub struct US;
+ pub enum E {
+ V {},
+ TV(),
+ UV,
+ }
+
+ pub struct Item;
+}
+
+mod proxy {
+ pub use c::*;
+ pub use c::E::*;
+}
+
+// Use something emitting the type argument name, e.g. unsatisfied bound.
+trait Impossible {}
+fn check<T: Impossible>(_: T) {}
+
+mod m1 {
+ pub use ::proxy::*;
+ pub type S = ::c::Item;
+}
+mod m2 {
+ pub use ::proxy::*;
+ pub const S: ::c::Item = ::c::Item;
+}
+
+fn f12() {
+ check(m1::S{}); //~ ERROR c::Item
+ check(m1::S); //~ ERROR unresolved name
+ check(m2::S{}); //~ ERROR c::S
+ check(m2::S); //~ ERROR c::Item
+}
+fn xf12() {
+ check(xm1::S{}); //~ ERROR c::Item
+ check(xm1::S); //~ ERROR unresolved name
+ check(xm2::S{}); //~ ERROR c::S
+ check(xm2::S); //~ ERROR c::Item
+}
+
+mod m3 {
+ pub use ::proxy::*;
+ pub type TS = ::c::Item;
+}
+mod m4 {
+ pub use ::proxy::*;
+ pub const TS: ::c::Item = ::c::Item;
+}
+
+fn f34() {
+ check(m3::TS{}); //~ ERROR c::Item
+ check(m3::TS); //~ ERROR c::TS
+ check(m4::TS{}); //~ ERROR c::TS
+ check(m4::TS); //~ ERROR c::Item
+}
+fn xf34() {
+ check(xm3::TS{}); //~ ERROR c::Item
+ check(xm3::TS); //~ ERROR c::TS
+ check(xm4::TS{}); //~ ERROR c::TS
+ check(xm4::TS); //~ ERROR c::Item
+}
+
+mod m5 {
+ pub use ::proxy::*;
+ pub type US = ::c::Item;
+}
+mod m6 {
+ pub use ::proxy::*;
+ pub const US: ::c::Item = ::c::Item;
+}
+
+fn f56() {
+ check(m5::US{}); //~ ERROR c::Item
+ check(m5::US); //~ ERROR c::US
+ check(m6::US{}); //~ ERROR c::US
+ check(m6::US); //~ ERROR c::Item
+}
+fn xf56() {
+ check(xm5::US{}); //~ ERROR c::Item
+ check(xm5::US); //~ ERROR c::US
+ check(xm6::US{}); //~ ERROR c::US
+ check(xm6::US); //~ ERROR c::Item
+}
+
+mod m7 {
+ pub use ::proxy::*;
+ pub type V = ::c::Item;
+}
+mod m8 {
+ pub use ::proxy::*;
+ pub const V: ::c::Item = ::c::Item;
+}
+
+fn f78() {
+ check(m7::V{}); //~ ERROR c::Item
+ check(m7::V); //~ ERROR name of a struct or struct variant
+ check(m8::V{}); //~ ERROR c::E
+ check(m8::V); //~ ERROR c::Item
+}
+fn xf78() {
+ check(xm7::V{}); //~ ERROR c::Item
+ check(xm7::V); //~ ERROR name of a struct or struct variant
+ check(xm8::V{}); //~ ERROR c::E
+ check(xm8::V); //~ ERROR c::Item
+}
+
+mod m9 {
+ pub use ::proxy::*;
+ pub type TV = ::c::Item;
+}
+mod mA {
+ pub use ::proxy::*;
+ pub const TV: ::c::Item = ::c::Item;
+}
+
+fn f9A() {
+ check(m9::TV{}); //~ ERROR c::Item
+ check(m9::TV); //~ ERROR c::E
+ check(mA::TV{}); //~ ERROR c::E
+ check(mA::TV); //~ ERROR c::Item
+}
+fn xf9A() {
+ check(xm9::TV{}); //~ ERROR c::Item
+ check(xm9::TV); //~ ERROR c::E
+ check(xmA::TV{}); //~ ERROR c::E
+ check(xmA::TV); //~ ERROR c::Item
+}
+
+mod mB {
+ pub use ::proxy::*;
+ pub type UV = ::c::Item;
+}
+mod mC {
+ pub use ::proxy::*;
+ pub const UV: ::c::Item = ::c::Item;
+}
+
+fn fBC() {
+ check(mB::UV{}); //~ ERROR c::Item
+ check(mB::UV); //~ ERROR c::E
+ check(mC::UV{}); //~ ERROR c::E
+ check(mC::UV); //~ ERROR c::Item
+}
+fn xfBC() {
+ check(xmB::UV{}); //~ ERROR c::Item
+ check(xmB::UV); //~ ERROR c::E
+ check(xmC::UV{}); //~ ERROR c::E
+ check(xmC::UV); //~ ERROR c::Item
+}
+
+fn main() {}
// Related issues: #20401, #20506, #20614, #20752, #20829, #20846, #20885, #20886
fn main() {
- "".homura[""]; //~ ERROR no field with that name was found
+ "".homura[""]; //~ no field `homura` on type `&'static str`
}
struct foo(usize);
fn main() {
- let (foo, _) = (2, 3); //~ ERROR let bindings cannot shadow structs
+ let (foo, _) = (2, 3); //~ ERROR let bindings cannot shadow tuple structs
}
}
match S(1, 2, 3) {
S(1, 2, 3, 4) => {}
- //~^ ERROR this pattern has 4 fields, but the corresponding struct has 3 fields
+ //~^ ERROR this pattern has 4 fields, but the corresponding tuple struct has 3 fields
S(1, 2, .., 3, 4) => {}
- //~^ ERROR this pattern has 4 fields, but the corresponding struct has 3 fields
+ //~^ ERROR this pattern has 4 fields, but the corresponding tuple struct has 3 fields
_ => {}
}
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+::foo::bar!(); //~ ERROR expected macro name without module separators
+foo::bar!(); //~ ERROR expected macro name without module separators
+
+trait T {
+ foo::bar!(); //~ ERROR expected macro name without module separators
+ ::foo::bar!(); //~ ERROR expected macro name without module separators
+}
+
+struct S {
+ x: foo::bar!(), //~ ERROR expected macro name without module separators
+ y: ::foo::bar!(), //~ ERROR expected macro name without module separators
+}
+
+impl S {
+ foo::bar!(); //~ ERROR expected macro name without module separators
+ ::foo::bar!(); //~ ERROR expected macro name without module separators
+}
+
+fn main() {
+ foo::bar!(); //~ ERROR expected macro name without module separators
+ ::foo::bar!(); //~ ERROR expected macro name without module separators
+
+ let _ = foo::bar!(); //~ ERROR expected macro name without module separators
+ let _ = ::foo::bar!(); //~ ERROR expected macro name without module separators
+
+ let foo::bar!() = 0; //~ ERROR expected macro name without module separators
+ let ::foo::bar!() = 0; //~ ERROR expected macro name without module separators
+}
fn main() {
match A::B(1, 2) {
A::B(_, _, _) => (), //~ ERROR this pattern has 3 fields, but
- A::D(_) => (), //~ ERROR `A::D` does not name a tuple variant or a tuple struct
+ A::D(_) => (), //~ ERROR expected tuple struct/variant, found unit variant `A::D`
_ => ()
}
match 'c' {
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(platform_intrinsics)]
+extern "platform-intrinsic" {
+ fn x86_mm_movemask_ps() -> i32; //~ERROR found 0, expected 1
+}
+
+fn main() { }
fn main() {
match 10 {
<S as Tr>::A::f::<u8> => {}
- //~^ ERROR `Tr::A::f<u8>` does not name a unit variant, unit struct or a constant
+ //~^ ERROR expected unit struct/variant or constant, found method `Tr::A::f<u8>`
0 ... <S as Tr>::A::f::<u8> => {} //~ ERROR only char and numeric types are allowed in range
}
}
// except according to those terms.
fn main() {
- assert(true); //~ERROR unresolved name `assert`. Did you mean the macro `assert!`?
+ assert(true);
+ //~^ ERROR unresolved name `assert`
+ //~| NOTE did you mean the macro `assert!`?
}
ref mut Self => (),
//~^ ERROR expected identifier, found keyword `Self`
Self!() => (),
- //~^ ERROR expected identifier, found keyword `Self`
- //~^^ ERROR macro undefined: 'Self!'
+ //~^ ERROR macro undefined: 'Self!'
Foo { x: Self } => (),
//~^ ERROR expected identifier, found keyword `Self`
Foo { Self } => (),
}
}
-use std::option::Option as Self;
-//~^ ERROR expected identifier, found keyword `Self`
+mod m1 {
+ extern crate core as Self;
+ //~^ ERROR expected identifier, found keyword `Self`
+}
-extern crate Self;
-//~^ ERROR expected identifier, found keyword `Self`
+mod m2 {
+ use std::option::Option as Self;
+ //~^ ERROR expected identifier, found keyword `Self`
+}
-trait Self {}
-//~^ ERROR expected identifier, found keyword `Self`
+mod m3 {
+ trait Self {}
+ //~^ ERROR expected identifier, found keyword `Self`
+}
foo: 0,
bar: 0.5,
};
- let x = foo.baa;//~ ERROR attempted access of field `baa` on type `BuildData`
- //~^ HELP did you mean `bar`?
+ let x = foo.baa;//~ no field `baa` on type `BuildData`
+ //~^ did you mean `bar`?
println!("{}", x);
}
impl a {
fn foo(&self) {
- let a { x, y } = self.d; //~ ERROR attempted access of field `d`
+ let a { x, y } = self.d; //~ ERROR no field `d` on type `&a`
//~^ ERROR struct `a` does not have a field named `x`
//~^^ ERROR struct `a` does not have a field named `y`
//~^^^ ERROR pattern does not mention field `b`
fn main() {
if foo { //~ NOTE: unclosed delimiter
//~^ ERROR: unresolved name `foo`
+ //~| NOTE unresolved name
) //~ ERROR: incorrect close delimiter: `)`
}
callback: F)
-> io::Result<bool> {
if !is_directory(path.as_ref()) { //~ ERROR: unresolved name `is_directory`
+ //~| NOTE unresolved name
callback(path.as_ref(); //~ NOTE: unclosed delimiter
//~^ ERROR: expected one of
fs::create_dir_all(path.as_ref()).map(|()| true) //~ ERROR: mismatched types
//~^^^ ERROR: unresolved name `bar`
//~^^^^ ERROR: unresolved name `foo`
//~^^^^^ ERROR: expected one of `)`, `,`, `.`, `<`, `?`
+ //~| NOTE unresolved name
+ //~| NOTE unresolved name
} //~ ERROR: incorrect close delimiter: `}`
//~^ ERROR: incorrect close delimiter: `}`
//~^^ ERROR: expected expression, found `)`
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-fn main() {
- let foo = 1;
-
- // `foo` shouldn't be suggested, it is too dissimilar from `bar`.
- println!("Hello {}", bar);
- //~^ ERROR: unresolved name `bar`
-
- // But this is close enough.
- println!("Hello {}", fob);
- //~^ ERROR: unresolved name `fob`. Did you mean `foo`?
-}
fn f<F:Trait(isize) -> isize>(x: F) {}
//~^ ERROR E0244
//~| NOTE expected no type arguments, found 1
-//~| ERROR associated type `Output` not found
+//~| ERROR E0220
+//~| NOTE associated type `Output` not found
fn main() {}
let u = U { principle: 0 };
//~^ ERROR union `U` has no field named `principle`
//~| NOTE field does not exist - did you mean `principal`?
- let w = u.principial; //~ ERROR attempted access of field `principial` on type `U`
- //~^ HELP did you mean `principal`?
+ let w = u.principial; //~ ERROR no field `principial` on type `U`
+ //~^ did you mean `principal`?
let y = u.calculate; //~ ERROR attempted to take value of method `calculate` on type `U`
//~^ HELP maybe a `()` to call it is missing?
// are prohibited by various checks, such as that the enum is
// instantiable and so forth).
- return p.f; //~ ERROR attempted access of field `f` on type `*const Rec`
+ return p.f; //~ ERROR no field `f` on type `*const Rec`
}
fn main() {
foo::<static_priv_by_default::m>();
//~^ ERROR: enum `m` is private
foo::<static_priv_by_default::n>();
- //~^ ERROR: type `n` is private
+ //~^ ERROR: type alias `n` is private
// public items in a private mod should be inaccessible
static_priv_by_default::foo::a;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test case makes sure that the compiler does not try to re-use anything
+// from the incremental compilation cache if the cache was produced by a
+// different compiler version. This is tested by artificially forcing the
+// emission of a different compiler version in the header of rpass1 artifacts,
+// and then making sure that the only object file of the test program gets
+// re-translated although the program stays unchanged.
+
+// The `l33t haxx0r` Rust compiler is known to produce incr. comp. artifacts
+// that are outrageously incompatible with just about anything, even itself:
+//[rpass1] rustc-env:RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER="l33t haxx0r rustc 2.1 LTS"
+
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+#![rustc_partition_translated(module="cache_file_headers", cfg="rpass2")]
+
+fn main() {
+ // empty
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type="rlib"]
+
+pub static A : u32 = 32;
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type="rlib"]
+
+pub static B: u32 = 32;
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:a.rs
+// aux-build:b.rs
+// revisions:rpass1 rpass2
+
+#![feature(rustc_attrs)]
+
+
+#[cfg(rpass1)]
+extern crate a;
+#[cfg(rpass1)]
+extern crate b;
+
+#[cfg(rpass2)]
+extern crate b;
+#[cfg(rpass2)]
+extern crate a;
+
+use a::A;
+use b::B;
+
+//? #[rustc_clean(label="TypeckItemBody", cfg="rpass2")]
+pub fn main() {
+ A + B;
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+// This test case tests the incremental compilation hash (ICH) implementation
+// for enum definitions.
+
+// The general pattern followed here is: Change one thing between rev1 and rev2
+// and make sure that the hash has changed, then change nothing between rev2 and
+// rev3 and make sure that the hash has not changed.
+
+// We also test the ICH for enum definitions exported in metadata. Same as
+// above, we want to make sure that the change between rev1 and rev2 also
+// results in a change of the ICH for the enum's metadata, and that it stays
+// the same between rev2 and rev3.
+
+// must-compile-successfully
+// revisions: cfail1 cfail2 cfail3
+// compile-flags: -Z query-dep-graph
+
+#![allow(warnings)]
+#![feature(rustc_attrs)]
+#![crate_type="rlib"]
+
+
+
+// Change enum visibility -----------------------------------------------------
+#[cfg(cfail1)]
+enum EnumVisibility { A }
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_clean(cfg="cfail3")]
+pub enum EnumVisibility { A }
+
+
+
+// Change name of a c-style variant -------------------------------------------
+#[cfg(cfail1)]
+enum EnumChangeNameCStyleVariant {
+ Variant1,
+ Variant2,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeNameCStyleVariant {
+ Variant1,
+ Variant2Changed,
+}
+
+
+
+// Change name of a tuple-style variant ---------------------------------------
+#[cfg(cfail1)]
+enum EnumChangeNameTupleStyleVariant {
+ Variant1,
+ Variant2(u32, f32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeNameTupleStyleVariant {
+ Variant1,
+ Variant2Changed(u32, f32),
+}
+
+
+
+// Change name of a struct-style variant --------------------------------------
+#[cfg(cfail1)]
+enum EnumChangeNameStructStyleVariant {
+ Variant1,
+ Variant2 { a: u32, b: f32 },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeNameStructStyleVariant {
+ Variant1,
+ Variant2Changed { a: u32, b: f32 },
+}
+
+
+
+// Change the value of a c-style variant --------------------------------------
+#[cfg(cfail1)]
+enum EnumChangeValueCStyleVariant0 {
+ Variant1,
+ Variant2 = 11,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeValueCStyleVariant0 {
+ Variant1,
+ Variant2 = 22,
+}
+
+#[cfg(cfail1)]
+enum EnumChangeValueCStyleVariant1 {
+ Variant1,
+ Variant2,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeValueCStyleVariant1 {
+ Variant1,
+ Variant2 = 11,
+}
+
+
+
+// Add a c-style variant ------------------------------------------------------
+#[cfg(cfail1)]
+enum EnumAddCStyleVariant {
+ Variant1,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumAddCStyleVariant {
+ Variant1,
+ Variant2,
+}
+
+
+
+// Remove a c-style variant ---------------------------------------------------
+#[cfg(cfail1)]
+enum EnumRemoveCStyleVariant {
+ Variant1,
+ Variant2,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumRemoveCStyleVariant {
+ Variant1,
+}
+
+
+
+// Add a tuple-style variant --------------------------------------------------
+#[cfg(cfail1)]
+enum EnumAddTupleStyleVariant {
+ Variant1,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumAddTupleStyleVariant {
+ Variant1,
+ Variant2(u32, f32),
+}
+
+
+
+// Remove a tuple-style variant -----------------------------------------------
+#[cfg(cfail1)]
+enum EnumRemoveTupleStyleVariant {
+ Variant1,
+ Variant2(u32, f32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumRemoveTupleStyleVariant {
+ Variant1,
+}
+
+
+
+// Add a struct-style variant -------------------------------------------------
+#[cfg(cfail1)]
+enum EnumAddStructStyleVariant {
+ Variant1,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumAddStructStyleVariant {
+ Variant1,
+ Variant2 { a: u32, b: f32 },
+}
+
+
+
+// Remove a struct-style variant ----------------------------------------------
+#[cfg(cfail1)]
+enum EnumRemoveStructStyleVariant {
+ Variant1,
+ Variant2 { a: u32, b: f32 },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumRemoveStructStyleVariant {
+ Variant1,
+}
+
+
+
+// Change the type of a field in a tuple-style variant ------------------------
+#[cfg(cfail1)]
+enum EnumChangeFieldTypeTupleStyleVariant {
+ Variant1(u32, u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeFieldTypeTupleStyleVariant {
+ Variant1(u32, u64),
+}
+
+
+
+// Change the type of a field in a struct-style variant -----------------------
+#[cfg(cfail1)]
+enum EnumChangeFieldTypeStructStyleVariant {
+ Variant1,
+ Variant2 { a: u32, b: u32 },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeFieldTypeStructStyleVariant {
+ Variant1,
+ Variant2 { a: u32, b: u64 },
+}
+
+
+
+// Change the name of a field in a struct-style variant -----------------------
+#[cfg(cfail1)]
+enum EnumChangeFieldNameStructStyleVariant {
+ Variant1 { a: u32, b: u32 },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeFieldNameStructStyleVariant {
+ Variant1 { a: u32, c: u32 },
+}
+
+
+
+// Change order of fields in a tuple-style variant ----------------------------
+#[cfg(cfail1)]
+enum EnumChangeOrderTupleStyleVariant {
+ Variant1(u32, u64),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeOrderTupleStyleVariant {
+ Variant1(u64, u32),
+}
+
+
+
+// Change order of fields in a struct-style variant ---------------------------
+#[cfg(cfail1)]
+enum EnumChangeFieldOrderStructStyleVariant {
+ Variant1 { a: u32, b: f32 },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumChangeFieldOrderStructStyleVariant {
+ Variant1 { b: f32, a: u32 },
+}
+
+
+
+// Add a field to a tuple-style variant ---------------------------------------
+#[cfg(cfail1)]
+enum EnumAddFieldTupleStyleVariant {
+ Variant1(u32, u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumAddFieldTupleStyleVariant {
+ Variant1(u32, u32, u32),
+}
+
+
+
+// Add a field to a struct-style variant --------------------------------------
+#[cfg(cfail1)]
+enum EnumAddFieldStructStyleVariant {
+ Variant1 { a: u32, b: u32 },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumAddFieldStructStyleVariant {
+ Variant1 { a: u32, b: u32, c: u32 },
+}
+
+
+
+// Add #[must_use] to the enum ------------------------------------------------
+#[cfg(cfail1)]
+enum EnumAddMustUse {
+ Variant1,
+ Variant2,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[must_use]
+enum EnumAddMustUse {
+ Variant1,
+ Variant2,
+}
+
+
+
+// Add #[repr(C)] to the enum -------------------------------------------------
+#[cfg(cfail1)]
+enum EnumAddReprC {
+ Variant1,
+ Variant2,
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddReprC {
+ Variant1,
+ Variant2,
+}
+
+
+
+// Change the name of a type parameter ----------------------------------------
+#[cfg(cfail1)]
+enum EnumChangeNameOfTypeParameter<S> {
+ Variant1(S),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumChangeNameOfTypeParameter<T> {
+ Variant1(T),
+}
+
+
+
+// Add a type parameter ------------------------------------------------------
+#[cfg(cfail1)]
+enum EnumAddTypeParameter<S> {
+ Variant1(S),
+ Variant2(S),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddTypeParameter<S, T> {
+ Variant1(S),
+ Variant2(T),
+}
+
+
+
+// Change the name of a lifetime parameter ------------------------------------
+#[cfg(cfail1)]
+enum EnumChangeNameOfLifetimeParameter<'a> {
+ Variant1(&'a u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumChangeNameOfLifetimeParameter<'b> {
+ Variant1(&'b u32),
+}
+
+
+
+// Add a lifetime parameter ---------------------------------------------------
+#[cfg(cfail1)]
+enum EnumAddLifetimeParameter<'a> {
+ Variant1(&'a u32),
+ Variant2(&'a u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddLifetimeParameter<'a, 'b> {
+ Variant1(&'a u32),
+ Variant2(&'b u32),
+}
+
+
+
+// Add a lifetime bound to a lifetime parameter -------------------------------
+#[cfg(cfail1)]
+enum EnumAddLifetimeParameterBound<'a, 'b> {
+ Variant1(&'a u32),
+ Variant2(&'b u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddLifetimeParameterBound<'a, 'b: 'a> {
+ Variant1(&'a u32),
+ Variant2(&'b u32),
+}
+
+// Add a lifetime bound to a type parameter -----------------------------------
+#[cfg(cfail1)]
+enum EnumAddLifetimeBoundToParameter<'a, T> {
+ Variant1(T),
+ Variant2(&'a u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddLifetimeBoundToParameter<'a, T: 'a> {
+ Variant1(T),
+ Variant2(&'a u32),
+}
+
+
+
+// Add a trait bound to a type parameter --------------------------------------
+#[cfg(cfail1)]
+enum EnumAddTraitBound<S> {
+ Variant1(S),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddTraitBound<T: Sync> {
+ Variant1(T),
+}
+
+
+
+// Add a lifetime bound to a lifetime parameter in where clause ---------------
+#[cfg(cfail1)]
+enum EnumAddLifetimeParameterBoundWhere<'a, 'b> {
+ Variant1(&'a u32),
+ Variant2(&'b u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddLifetimeParameterBoundWhere<'a, 'b> where 'b: 'a {
+ Variant1(&'a u32),
+ Variant2(&'b u32),
+}
+
+
+
+// Add a lifetime bound to a type parameter in where clause -------------------
+#[cfg(cfail1)]
+enum EnumAddLifetimeBoundToParameterWhere<'a, T> {
+ Variant1(T),
+ Variant2(&'a u32),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddLifetimeBoundToParameterWhere<'a, T> where T: 'a {
+ Variant1(T),
+ Variant2(&'a u32),
+}
+
+
+
+// Add a trait bound to a type parameter in where clause ----------------------
+#[cfg(cfail1)]
+enum EnumAddTraitBoundWhere<S> {
+ Variant1(S),
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[repr(C)]
+enum EnumAddTraitBoundWhere<T> where T: Sync {
+ Variant1(T),
+}
+
+
+
+// In an enum with two variants, swap usage of type parameters ----------------
+#[cfg(cfail1)]
+enum EnumSwapUsageTypeParameters<A, B> {
+ Variant1 { a: A },
+ Variant2 { a: B },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumSwapUsageTypeParameters<A, B> {
+ Variant1 { a: B },
+ Variant2 { a: A },
+}
+
+
+
+// In an enum with two variants, swap usage of lifetime parameters ------------
+#[cfg(cfail1)]
+enum EnumSwapUsageLifetimeParameters<'a, 'b> {
+ Variant1 { a: &'a u32 },
+ Variant2 { b: &'b u32 },
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+enum EnumSwapUsageLifetimeParameters<'a, 'b> {
+ Variant1 { a: &'b u32 },
+ Variant2 { b: &'a u32 },
+}
+
+
+
+struct ReferencedType1;
+struct ReferencedType2;
+
+
+
+// Change field type in tuple-style variant indirectly by modifying a use statement
+mod change_field_type_indirectly_tuple_style {
+ #[cfg(cfail1)]
+ use super::ReferencedType1 as FieldType;
+ #[cfg(not(cfail1))]
+ use super::ReferencedType2 as FieldType;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ enum TupleStyle {
+ Variant1(FieldType)
+ }
+}
+
+
+
+// Change field type in record-style variant indirectly by modifying a use statement
+mod change_field_type_indirectly_struct_style {
+ #[cfg(cfail1)]
+ use super::ReferencedType1 as FieldType;
+ #[cfg(not(cfail1))]
+ use super::ReferencedType2 as FieldType;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ enum StructStyle {
+ Variant1 { a: FieldType }
+ }
+}
+
+
+
+trait ReferencedTrait1 {}
+trait ReferencedTrait2 {}
+
+
+
+// Change trait bound of type parameter indirectly by modifying a use statement
+mod change_trait_bound_indirectly {
+ #[cfg(cfail1)]
+ use super::ReferencedTrait1 as Trait;
+ #[cfg(not(cfail1))]
+ use super::ReferencedTrait2 as Trait;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ enum Enum<T: Trait> {
+ Variant1(T)
+ }
+}
+
+
+
+// Change trait bound of type parameter in where clause indirectly by modifying a use statement
+mod change_trait_bound_indirectly_where {
+ #[cfg(cfail1)]
+ use super::ReferencedTrait1 as Trait;
+ #[cfg(not(cfail1))]
+ use super::ReferencedTrait2 as Trait;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ enum Enum<T> where T: Trait {
+ Variant1(T)
+ }
+}
+
+
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+// This test case tests the incremental compilation hash (ICH) implementation
+// for function interfaces.
+
+// The general pattern followed here is: Change one thing between rev1 and rev2
+// and make sure that the hash has changed, then change nothing between rev2 and
+// rev3 and make sure that the hash has not changed.
+
+// must-compile-successfully
+// revisions: cfail1 cfail2 cfail3
+// compile-flags: -Z query-dep-graph
+
+
+#![allow(warnings)]
+#![feature(conservative_impl_trait)]
+#![feature(intrinsics)]
+#![feature(linkage)]
+#![feature(rustc_attrs)]
+#![crate_type="rlib"]
+
+
+// Add Parameter ---------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn add_parameter() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn add_parameter(p: i32) {}
+
+
+// Add Return Type -------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn add_return_type() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn add_return_type() -> () {}
+
+
+// Change Parameter Type -------------------------------------------------------
+
+#[cfg(cfail1)]
+fn type_of_parameter(p: i32) {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn type_of_parameter(p: i64) {}
+
+
+// Change Parameter Type Reference ---------------------------------------------
+
+#[cfg(cfail1)]
+fn type_of_parameter_ref(p: &i32) {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn type_of_parameter_ref(p: &mut i32) {}
+
+
+// Change Parameter Order ------------------------------------------------------
+
+#[cfg(cfail1)]
+fn order_of_parameters(p1: i32, p2: i64) {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn order_of_parameters(p2: i64, p1: i32) {}
+
+
+// Unsafe ----------------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn make_unsafe() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+unsafe fn make_unsafe() {}
+
+
+// Extern ----------------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn make_extern() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+extern fn make_extern() {}
+
+
+// Extern C Extern Rust-Intrinsic ----------------------------------------------
+
+#[cfg(cfail1)]
+extern "C" fn make_intrinsic() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+extern "rust-intrinsic" fn make_intrinsic() {}
+
+
+// Type Parameter --------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn type_parameter() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn type_parameter<T>() {}
+
+
+// Lifetime Parameter ----------------------------------------------------------
+
+#[cfg(cfail1)]
+fn lifetime_parameter() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn lifetime_parameter<'a>() {}
+
+
+// Trait Bound -----------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn trait_bound<T>() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn trait_bound<T: Eq>() {}
+
+
+// Builtin Bound ---------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn builtin_bound<T>() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn builtin_bound<T: Send>() {}
+
+
+// Lifetime Bound --------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn lifetime_bound<'a, T>() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn lifetime_bound<'a, T: 'a>() {}
+
+
+// Second Trait Bound ----------------------------------------------------------
+
+#[cfg(cfail1)]
+fn second_trait_bound<T: Eq>() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn second_trait_bound<T: Eq + Clone>() {}
+
+
+// Second Builtin Bound --------------------------------------------------------
+
+#[cfg(cfail1)]
+fn second_builtin_bound<T: Send>() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn second_builtin_bound<T: Send + Sized>() {}
+
+
+// Second Lifetime Bound -------------------------------------------------------
+
+#[cfg(cfail1)]
+fn second_lifetime_bound<'a, 'b, T: 'a>() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn second_lifetime_bound<'a, 'b, T: 'a + 'b>() {}
+
+
+// Inline ----------------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn inline() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[inline]
+fn inline() {}
+
+
+// Inline Never ----------------------------------------------------------------
+
+#[cfg(cfail1)]
+#[inline(always)]
+fn inline_never() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[inline(never)]
+fn inline_never() {}
+
+
+// No Mangle -------------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn no_mangle() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[no_mangle]
+fn no_mangle() {}
+
+
+// Linkage ---------------------------------------------------------------------
+
+#[cfg(cfail1)]
+fn linkage() {}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+#[linkage="weak_odr"]
+fn linkage() {}
+
+
+// Return Impl Trait -----------------------------------------------------------
+
+#[cfg(cfail1)]
+fn return_impl_trait() -> i32 {
+ 0
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn return_impl_trait() -> impl Clone {
+ 0
+}
+
+
+// Change Return Impl Trait ----------------------------------------------------
+
+#[cfg(cfail1)]
+fn change_return_impl_trait() -> impl Clone {
+ 0
+}
+
+#[cfg(not(cfail1))]
+#[rustc_dirty(label="Hir", cfg="cfail2")]
+#[rustc_clean(label="Hir", cfg="cfail3")]
+#[rustc_metadata_dirty(cfg="cfail2")]
+#[rustc_metadata_clean(cfg="cfail3")]
+fn change_return_impl_trait() -> impl Copy {
+ 0
+}
+
+
+// Change Return Type Indirectly -----------------------------------------------
+
+struct ReferencedType1;
+struct ReferencedType2;
+
+mod change_return_type_indirectly {
+ #[cfg(cfail1)]
+ use super::ReferencedType1 as ReturnType;
+ #[cfg(not(cfail1))]
+ use super::ReferencedType2 as ReturnType;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ fn indirect_return_type() -> ReturnType {
+ ReturnType {}
+ }
+}
+
+
+// Change Parameter Type Indirectly --------------------------------------------
+
+mod change_parameter_type_indirectly {
+ #[cfg(cfail1)]
+ use super::ReferencedType1 as ParameterType;
+ #[cfg(not(cfail1))]
+ use super::ReferencedType2 as ParameterType;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ fn indirect_parameter_type(p: ParameterType) {}
+}
+
+
+// Change Trait Bound Indirectly -----------------------------------------------
+
+trait ReferencedTrait1 {}
+trait ReferencedTrait2 {}
+
+mod change_trait_bound_indirectly {
+ #[cfg(cfail1)]
+ use super::ReferencedTrait1 as Trait;
+ #[cfg(not(cfail1))]
+ use super::ReferencedTrait2 as Trait;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ fn indirect_trait_bound<T: Trait>(p: T) {}
+}
+
+
+// Change Trait Bound Indirectly In Where Clause -------------------------------
+
+mod change_trait_bound_indirectly_in_where_clause {
+ #[cfg(cfail1)]
+ use super::ReferencedTrait1 as Trait;
+ #[cfg(not(cfail1))]
+ use super::ReferencedTrait2 as Trait;
+
+ #[rustc_dirty(label="Hir", cfg="cfail2")]
+ #[rustc_clean(label="Hir", cfg="cfail3")]
+ #[rustc_metadata_dirty(cfg="cfail2")]
+ #[rustc_metadata_clean(cfg="cfail3")]
+ fn indirect_trait_bound_where<T>(p: T) where T: Trait {}
+}
let x: X = X { x: 22 };
//[cfail2]~^ ERROR struct `X` has no field named `x`
x.x as u32
- //[cfail2]~^ ERROR attempted access of field `x`
+ //[cfail2]~^ ERROR no field `x` on type `X`
}
#[rustc_dirty(label="TypeckItemBody", cfg="cfail2")]
pub fn use_EmbedX(embed: EmbedX) -> u32 {
embed.x.x as u32
- //[cfail2]~^ ERROR attempted access of field `x`
+ //[cfail2]~^ ERROR no field `x` on type `X`
}
#[rustc_clean(label="TypeckItemBody", cfg="cfail2")]
// END RUST SOURCE
// START rustc.node13.Deaggregator.before.mir
// bb0: {
-// var0 = arg0; // scope 0 at main.rs:8:8: 8:9
-// tmp0 = var0; // scope 1 at main.rs:9:14: 9:15
-// return = Baz { x: tmp0, y: const F32(0), z: const false }; // scope ...
-// goto -> bb1; // scope 1 at main.rs:8:1: 10:2
+// _2 = _1;
+// _3 = _2;
+// _0 = Baz { x: _3, y: const F32(0), z: const false };
+// return;
// }
// END rustc.node13.Deaggregator.before.mir
// START rustc.node13.Deaggregator.after.mir
// bb0: {
-// var0 = arg0; // scope 0 at main.rs:8:8: 8:9
-// tmp0 = var0; // scope 1 at main.rs:9:14: 9:15
-// (return.0: usize) = tmp0; // scope 1 at main.rs:9:5: 9:34
-// (return.1: f32) = const F32(0); // scope 1 at main.rs:9:5: 9:34
-// (return.2: bool) = const false; // scope 1 at main.rs:9:5: 9:34
-// goto -> bb1; // scope 1 at main.rs:8:1: 10:2
+// _2 = _1;
+// _3 = _2;
+// (_0.0: usize) = _3;
+// (_0.1: f32) = const F32(0);
+// (_0.2: bool) = const false;
+// return;
// }
-// END rustc.node13.Deaggregator.after.mir
\ No newline at end of file
+// END rustc.node13.Deaggregator.after.mir
// END RUST SOURCE
// START rustc.node10.Deaggregator.before.mir
// bb0: {
-// var0 = arg0; // scope 0 at main.rs:7:8: 7:9
-// tmp0 = var0; // scope 1 at main.rs:8:19: 8:20
-// return = Baz::Foo { x: tmp0 }; // scope 1 at main.rs:8:5: 8:21
-// goto -> bb1; // scope 1 at main.rs:7:1: 9:2
+// _2 = _1;
+// _3 = _2;
+// _0 = Baz::Foo { x: _3 };
+// return;
// }
// END rustc.node10.Deaggregator.before.mir
// START rustc.node10.Deaggregator.after.mir
// bb0: {
-// var0 = arg0; // scope 0 at main.rs:7:8: 7:9
-// tmp0 = var0; // scope 1 at main.rs:8:19: 8:20
-// ((return as Foo).0: usize) = tmp0; // scope 1 at main.rs:8:5: 8:21
-// discriminant(return) = 1; // scope 1 at main.rs:8:5: 8:21
-// goto -> bb1; // scope 1 at main.rs:7:1: 9:2
+// _2 = _1;
+// _3 = _2;
+// ((_0 as Foo).0: usize) = _3;
+// discriminant(_0) = 1;
+// return;
// }
-// END rustc.node10.Deaggregator.after.mir
\ No newline at end of file
+// END rustc.node10.Deaggregator.after.mir
// END RUST SOURCE
// START rustc.node4.SimplifyBranches.initial-before.mir
// bb0: {
-// if(const false) -> [true: bb1, false: bb2]; // scope 0 at simplify_if.rs:12:5: 14:6
+// if(const false) -> [true: bb1, false: bb2];
// }
// END rustc.node4.SimplifyBranches.initial-before.mir
// START rustc.node4.SimplifyBranches.initial-after.mir
// bb0: {
-// goto -> bb2; // scope 0 at simplify_if.rs:12:5: 14:6
+// goto -> bb2;
// }
-// END rustc.node4.SimplifyBranches.initial-after.mir
\ No newline at end of file
+// END rustc.node4.SimplifyBranches.initial-after.mir
// END RUST SOURCE
// START rustc.node4.TypeckMir.before.mir
// bb0: {
-// StorageLive(var0); // scope 0 at storage_ranges.rs:14:9: 14:10
-// var0 = const 0i32; // scope 0 at storage_ranges.rs:14:13: 14:14
-// StorageLive(var1); // scope 1 at storage_ranges.rs:16:13: 16:14
-// StorageLive(tmp1); // scope 1 at storage_ranges.rs:16:18: 16:25
-// StorageLive(tmp2); // scope 1 at storage_ranges.rs:16:23: 16:24
-// tmp2 = var0; // scope 1 at storage_ranges.rs:16:23: 16:24
-// tmp1 = std::option::Option<i32>::Some(tmp2,); // scope 1 at storage_ranges.rs:16:18: 16:25
-// var1 = &tmp1; // scope 1 at storage_ranges.rs:16:17: 16:25
-// StorageDead(tmp2); // scope 1 at storage_ranges.rs:16:23: 16:24
-// tmp0 = (); // scope 2 at storage_ranges.rs:15:5: 17:6
-// StorageDead(tmp1); // scope 1 at storage_ranges.rs:16:18: 16:25
-// StorageDead(var1); // scope 1 at storage_ranges.rs:16:13: 16:14
-// StorageLive(var2); // scope 1 at storage_ranges.rs:18:9: 18:10
-// var2 = const 1i32; // scope 1 at storage_ranges.rs:18:13: 18:14
-// return = (); // scope 3 at storage_ranges.rs:13:11: 19:2
-// StorageDead(var2); // scope 1 at storage_ranges.rs:18:9: 18:10
-// StorageDead(var0); // scope 0 at storage_ranges.rs:14:9: 14:10
-// goto -> bb1; // scope 0 at storage_ranges.rs:13:1: 19:2
-// }
-//
-// bb1: {
-// return; // scope 0 at storage_ranges.rs:13:1: 19:2
+// StorageLive(_1);
+// _1 = const 0i32;
+// StorageLive(_3);
+// StorageLive(_4);
+// StorageLive(_5);
+// _5 = _1;
+// _4 = std::option::Option<i32>::Some(_5,);
+// _3 = &_4;
+// StorageDead(_5);
+// _2 = ();
+// StorageDead(_4);
+// StorageDead(_3);
+// StorageLive(_6);
+// _6 = const 1i32;
+// _0 = ();
+// StorageDead(_6);
+// StorageDead(_1);
+// return;
// }
// END rustc.node4.TypeckMir.before.mir
// compile-flags: -Z parse-only
extern {
- f(); //~ ERROR expected one of `fn`, `pub`, `static`, or `}`, found `f`
+ f(); //~ ERROR expected one of `!` or `::`, found `(`
}
fn main() {
// compile-flags: -Z parse-only
trait MyTrait<T>: Iterator {
- Item = T; //~ ERROR expected one of `const`, `extern`, `fn`, `type`, or `unsafe`, found `Item`
+ Item = T; //~ ERROR expected one of `!` or `::`, found `=`
+ //~| ERROR expected item, found `=`
}
// except according to those terms.
// error-pattern:thread '<unnamed>' panicked at 'test'
+// ignore-emscripten Needs threads
use std::thread;
// except according to those terms.
// error-pattern:thread 'owned name' panicked at 'test'
+// ignore-emscripten Needs threads.
use std::thread::Builder;
// except according to those terms.
// error-pattern:Ensure that the child thread runs by panicking
+// ignore-emscripten Needs threads.
use std::thread;
// error-pattern:thread 'test_foo' panicked at
// compile-flags: --test
// ignore-pretty: does not work well with `--test`
+// ignore-emscripten
#[test]
fn test_foo() {
// error-pattern:thread 'test_foo' panicked at
// compile-flags: --test
// ignore-pretty: does not work well with `--test`
+// ignore-emscripten
#[test]
#[should_panic(expected = "foobar")]
// compile-flags: --test
// exec-env:RUST_TEST_THREADS=foo
// ignore-pretty: does not work well with `--test`
+// ignore-emscripten
#[test]
fn do_nothing() {}
--- /dev/null
+-include ../tools.mk
+RUSTC_FLAGS = -C link-arg="-lfoo" -C link-arg="-lbar" -Z print-link-args
+
+all:
+ $(RUSTC) $(RUSTC_FLAGS) empty.rs | grep lfoo | grep lbar
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() { }
format!("_ _ --sysroot {} --crate-type dylib", path.to_str().unwrap())
.split(' ').map(|s| s.to_string()).collect();
- let (result, _) = rustc_driver::run_compiler_with_file_loader(
- &args, &mut JitCalls, box JitLoader);
+ let (result, _) = rustc_driver::run_compiler(
+ &args, &mut JitCalls, Some(box JitLoader), None);
if let Err(n) = result {
panic!("Error {}", n);
}
--- /dev/null
+-include ../tools.mk
+
+all:
+ $(RUSTC) foo.rs
+ $(RUSTC) bar.rs --emit dep-info
+ grep "rustc-macro source" $(TMPDIR)/bar.d && exit 1 || exit 0
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(rustc_macro)]
+
+#[macro_use]
+extern crate foo;
+
+#[derive(A)]
+struct A;
+
+fn main() {
+ let _b = B;
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "rustc-macro"]
+#![feature(rustc_macro)]
+#![feature(rustc_macro_lib)]
+
+extern crate rustc_macro;
+
+use rustc_macro::TokenStream;
+
+#[rustc_macro_derive(A)]
+pub fn derive(input: TokenStream) -> TokenStream {
+ let input = input.to_string();
+ assert!(input.contains("struct A;"));
+ "struct B;".parse().unwrap()
+}
-include ../tools.mk
-# This test case makes sure that monomorphizations of the same function with the
-# same set of generic arguments will have the same symbol names when
-# instantiated in different crates.
+# The following command will:
+# 1. dump the symbols of a library using `nm`
+# 2. extract only those lines that we are interested in via `grep`
+# 3. from those lines, extract just the symbol name via `sed`
+# (symbol names always start with "_ZN" and end with "E")
+# 4. sort those symbol names for deterministic comparison
+# 5. write the result into a file
dump-symbols = nm "$(TMPDIR)/lib$(1).rlib" \
- | grep "some_test_function" \
- | sed "s/^[0-9a-f]\{8,16\}/00000000/" \
+ | grep -E "some_test_function|Bar|bar" \
+ | sed "s/.*\(_ZN.*E\).*/\1/" \
| sort \
> "$(TMPDIR)/$(1).nm"
#![crate_type="rlib"]
+pub trait Foo {
+ fn foo<T>();
+}
+
+pub struct Bar;
+
+impl Foo for Bar {
+ fn foo<T>() {}
+}
+
+pub fn bar() {
+ Bar::foo::<Bar>();
+}
+
pub fn some_test_function<T>(t: T) -> T {
t
}
let x = 2u64;
stable_symbol_names1::some_test_function(&x);
}
+
+pub fn trait_impl_test_function() {
+ use stable_symbol_names1::*;
+ Bar::foo::<Bar>();
+ bar();
+}
--- /dev/null
+-include ../tools.mk
+
+# The target used below doesn't support atomic operations. Verify that's the case
+all:
+ $(RUSTC) --print cfg --target thumbv6m-none-eabi | grep -qv target_has_atomic
let mut tc = TestCalls { count: 1 };
// we should never get use this filename, but lets make sure they are valid args.
let args = vec!["compiler-calls".to_string(), "foo.rs".to_string()];
- rustc_driver::run_compiler(&args, &mut tc);
+ rustc_driver::run_compiler(&args, &mut tc, None, None);
assert_eq!(tc.count, 30);
}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:append-impl.rs
+
+#![feature(rustc_macro)]
+#![allow(warnings)]
+
+#[macro_use]
+extern crate append_impl;
+
+trait Append {
+ fn foo(&self);
+}
+
+#[derive(PartialEq,
+ Append,
+ Eq)]
+struct A {
+ inner: u32,
+}
+
+fn main() {
+ A { inner: 3 }.foo();
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// force-host
+// no-prefer-dynamic
+
+#![feature(rustc_macro)]
+#![feature(rustc_macro_lib)]
+#![crate_type = "rustc-macro"]
+
+extern crate rustc_macro;
+
+use rustc_macro::TokenStream;
+
+#[rustc_macro_derive(Append)]
+pub fn derive_a(input: TokenStream) -> TokenStream {
+ let mut input = input.to_string();
+ input.push_str("
+ impl Append for A {
+ fn foo(&self) {}
+ }
+ ");
+ input.parse().unwrap()
+}
pub fn derive(input: TokenStream) -> TokenStream {
let input = input.to_string();
assert!(input.contains("struct A;"));
- assert!(input.contains("#[derive(Eq, Copy, Clone)]"));
- "#[derive(Eq, Copy, Clone)] struct A;".parse().unwrap()
+ assert!(input.contains("#[derive(Debug, PartialEq, Eq, Copy, Clone)]"));
+ "#[derive(Debug, PartialEq, Eq, Copy, Clone)] struct A;".parse().unwrap()
}
// no-prefer-dynamic
// aux-build:allocator-dummy.rs
+// ignore-emscripten
#![feature(test)]
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test ensures we can attach attributes to the formals in all
+// places where generic parameter lists occur, assuming appropriate
+// feature gates are enabled.
+//
+// (We are prefixing all tested features with `rustc_`, to ensure that
+// the attributes themselves won't be rejected by the compiler when
+// using `rustc_attrs` feature. There is a separate compile-fail/ test
+// ensuring that the attribute feature-gating works in this context.)
+
+#![feature(generic_param_attrs, rustc_attrs)]
+#![allow(dead_code)]
+
+struct StLt<#[rustc_lt_struct] 'a>(&'a u32);
+struct StTy<#[rustc_ty_struct] I>(I);
+
+enum EnLt<#[rustc_lt_enum] 'b> { A(&'b u32), B }
+enum EnTy<#[rustc_ty_enum] J> { A(J), B }
+
+trait TrLt<#[rustc_lt_trait] 'c> { fn foo(&self, _: &'c [u32]) -> &'c u32; }
+trait TrTy<#[rustc_ty_trait] K> { fn foo(&self, _: K); }
+
+type TyLt<#[rustc_lt_type] 'd> = &'d u32;
+type TyTy<#[rustc_ty_type] L> = (L, );
+
+impl<#[rustc_lt_inherent] 'e> StLt<'e> { }
+impl<#[rustc_ty_inherent] M> StTy<M> { }
+
+impl<#[rustc_lt_impl_for] 'f> TrLt<'f> for StLt<'f> {
+ fn foo(&self, _: &'f [u32]) -> &'f u32 { loop { } }
+}
+impl<#[rustc_ty_impl_for] N> TrTy<N> for StTy<N> {
+ fn foo(&self, _: N) { }
+}
+
+fn f_lt<#[rustc_lt_fn] 'g>(_: &'g [u32]) -> &'g u32 { loop { } }
+fn f_ty<#[rustc_ty_fn] O>(_: O) { }
+
+impl<I> StTy<I> {
+ fn m_lt<#[rustc_lt_meth] 'h>(_: &'h [u32]) -> &'h u32 { loop { } }
+ fn m_ty<#[rustc_ty_meth] P>(_: P) { }
+}
+
+fn hof_lt<Q>(_: Q)
+ where Q: for <#[rustc_lt_hof] 'i> Fn(&'i [u32]) -> &'i u32
+{
+}
+
+fn main() {
+
+}
+++ /dev/null
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// ignore-emscripten
-
-// Test that `CString::new("hello").unwrap().as_ptr()` pattern
-// leads to failure.
-
-use std::env;
-use std::ffi::{CString, CStr};
-use std::os::raw::c_char;
-use std::process::{Command, Stdio};
-
-fn main() {
- let args: Vec<String> = env::args().collect();
- if args.len() > 1 && args[1] == "child" {
- // Repeat several times to be more confident that
- // it is `Drop` for `CString` that does the cleanup,
- // and not just some lucky UB.
- let xs = vec![CString::new("Hello").unwrap(); 10];
- let ys = xs.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
- drop(xs);
- assert!(ys.into_iter().any(is_hello));
- return;
- }
-
- let output = Command::new(&args[0]).arg("child").output().unwrap();
- assert!(!output.status.success());
-}
-
-fn is_hello(s: *const c_char) -> bool {
- // `s` is a dangling pointer and reading it is technically
- // undefined behavior. But we want to prevent the most diabolical
- // kind of UB (apart from nasal demons): reading a value that was
- // previously written.
- //
- // Segfaulting or reading an empty string is Ok,
- // reading "Hello" is bad.
- let s = unsafe { CStr::from_ptr(s) };
- let hello = CString::new("Hello").unwrap();
- s == hello.as_ref()
-}
StructVariant { x: isize, y : usize }
}
+#[derive(Debug)]
+struct Pointers(*const Send, *mut Sync);
+
macro_rules! t {
($x:expr, $expected:expr) => {
assert_eq!(format!("{:?}", $x), $expected.to_string())
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(discriminant_value)]
+
+use std::mem;
+
+enum ADT {
+ First(u32, u32),
+ Second(u64)
+}
+
+pub fn main() {
+ assert!(mem::discriminant(&ADT::First(0,0)) == mem::discriminant(&ADT::First(1,1)));
+ assert!(mem::discriminant(&ADT::Second(5)) == mem::discriminant(&ADT::Second(6)));
+ assert!(mem::discriminant(&ADT::First(2,2)) != mem::discriminant(&ADT::Second(2)));
+
+ let _ = mem::discriminant(&10);
+ let _ = mem::discriminant(&"test");
+}
+
// pretty-expanded FIXME #23616
// ignore-msvc
+// ignore-emscripten
struct TwoU8s {
one: u8,
// ignore-windows
// ignore-android
// ignore-emscripten
+// ignore-haiku
#![feature(libc)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-emscripten missing rust_begin_unwind
+
#![feature(lang_items, start, collections)]
#![no_std]
fn g() {
let x = 0;
- macro_rules! m { ($x:ident) => {
- macro_rules! m2 { () => { ($x, x) } }
+ macro_rules! m { ($m1:ident, $m2:ident, $x:ident) => {
+ macro_rules! $m1 { () => { ($x, x) } }
let x = 1;
- macro_rules! m3 { () => { ($x, x) } }
+ macro_rules! $m2 { () => { ($x, x) } }
} }
let x = 2;
- m!(x);
+ m!(m2, m3, x);
let x = 3;
assert_eq!(m2!(), (2, 0));
assert_eq!(m3!(), (2, 1));
let x = 4;
- m!(x);
- assert_eq!(m2!(), (4, 0));
- assert_eq!(m3!(), (4, 1));
+ m!(m4, m5, x);
+ assert_eq!(m4!(), (4, 0));
+ assert_eq!(m5!(), (4, 1));
}
mod foo {
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+pub trait Indexable<T>: std::ops::Index<usize, Output = T> {
+ fn index2(&self, i: usize) -> &T {
+ &self[i]
+ }
+}
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #36381. The trans collector was asserting that
+// there are no projection types, but the `<&str as
+// StreamOnce>::Position` projection contained a late-bound region,
+// and we don't currently normalize in that case until the function is
+// actually invoked.
+
+pub trait StreamOnce {
+ type Position;
+}
+
+impl<'a> StreamOnce for &'a str {
+ type Position = usize;
+}
+
+pub fn parser<F>(_: F) {
+}
+
+fn follow(_: &str) -> <&str as StreamOnce>::Position {
+ panic!()
+}
+
+fn main() {
+ parser(follow);
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:--test
+#![deny(private_in_public)]
+
+#[test] fn foo() {}
+mod foo {}
+
+#[test] fn core() {}
+extern crate core;
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Ensure that types that rely on obligations are autoderefed
+// correctly
+
+fn main() {
+ let x : Vec<Box<Fn()>> = vec![Box::new(|| ())];
+ x[0]()
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+macro_rules! m { () => { 1 } }
+macro_rules! n { () => { 1 + m!() } }
+
+fn main() {
+ let _: [u32; n!()] = [0, 0];
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Regression test for #36856.
+
+// compile-flags:-g
+
+fn g() -> bool {
+ false
+}
+
+pub fn main() {
+ let a = !g();
+ if a != !g() {
+ panic!();
+ }
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// check that casts are not being treated as lexprs.
+
+fn main() {
+ let mut a = 0i32;
+ let b = &(a as i32);
+ a = 1;
+ assert_ne!(&a as *const i32, b as *const i32);
+ assert_eq!(*b, 0);
+
+ assert_eq!(issue_36936(), 1);
+}
+
+
+struct A(u32);
+
+impl Drop for A {
+ fn drop(&mut self) {
+ self.0 = 0;
+ }
+}
+
+fn issue_36936() -> u32 {
+ let a = &(A(1) as A);
+ a.0
+}
pub fn main() {
assert_eq!(mod_dir_simple::syrup::foo(), 10);
+
+ #[path = "auxiliary"]
+ mod foo {
+ mod two_macros;
+ }
+
+ #[path = "auxiliary"]
+ mod bar {
+ macro_rules! m { () => { mod two_macros; } }
+ m!();
+ }
}
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-emscripten Not sure what's happening here.
use std::mem;
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-emscripten
use std::mem;
// compile-flags:-C panic=abort
// aux-build:exit-success-if-unwind.rs
// no-prefer-dynamic
+// ignore-emscripten Function not implemented
extern crate exit_success_if_unwind;
// compile-flags:-C panic=abort
// no-prefer-dynamic
+// ignore-emscripten Function not implemented.
use std::process::Command;
use std::env;
// compile-flags:-C lto -C panic=abort
// no-prefer-dynamic
+// ignore-emscripten Function not implemented.
use std::process::Command;
use std::env;
// compile-flags:-C lto -C panic=unwind
// no-prefer-dynamic
+// ignore-emscripten Function not implemented.
use std::process::Command;
use std::env;
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-emscripten Function not implemented.
use std::env;
use std::io;
// test the size hints and emptying
let mut long = 0...255u8;
- let mut short = 42...42;
+ let mut short = 42...42u8;
assert_eq!(long.size_hint(), (256, Some(256)));
assert_eq!(short.size_hint(), (1, Some(1)));
long.next();
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-emscripten linking with emcc failed
#![feature(repr_simd, platform_intrinsics, concat_idents, test)]
#![allow(non_camel_case_types)]
let data = Arc::new(Mutex::new(Vec::new()));
let sink = Sink(data.clone());
let res = thread::Builder::new().spawn(move|| -> () {
- io::set_panic(Box::new(sink));
+ io::set_panic(Some(Box::new(sink)));
panic!("Hello, world!")
}).unwrap().join();
assert!(res.is_err());
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// no-prefer-dynamic
+
+#![feature(rustc_macro)]
+#![feature(rustc_macro_lib)]
+#![crate_type = "rustc-macro"]
+
+extern crate rustc_macro;
+
+use rustc_macro::TokenStream;
+
+#[rustc_macro_derive(Foo)]
+pub fn foo(input: TokenStream) -> TokenStream {
+ input
+}
--> $DIR/tab.rs:14:2
|
14 | \tbar;
- | \t^^^
+ | \t^^^ unresolved name
error: aborting due to previous error
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo {
+ bar: u8
+}
+
+fn main() {
+ let f = Foo { bar: 22 };
+ f.baz;
+}
--- /dev/null
+error: no field `baz` on type `Foo`
+ --> $DIR/issue-36798.rs:17:7
+ |
+17 | f.baz;
+ | ^^^ did you mean `bar`?
+
+error: aborting due to previous error
+
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo {
+ bar: u8
+}
+
+fn main() {
+ let f = Foo { bar: 22 };
+ f.zz;
+}
--- /dev/null
+error: no field `zz` on type `Foo`
+ --> $DIR/issue-36798_unknown_field.rs:17:7
+ |
+17 | f.zz;
+ | ^^ unknown field
+
+error: aborting due to previous error
+
50 | fake_method_stmt!();
| -------------------- in this macro invocation
-error: attempted access of field `fake` on type `{integer}`, but no field with that name was found
- --> $DIR/macro-backtrace-invalid-internals.rs:21:11
+error: no field `fake` on type `{integer}`
+ --> $DIR/macro-backtrace-invalid-internals.rs:21:13
|
21 | 1.fake
- | ^^^^^^
+ | ^^^^
...
51 | fake_field_stmt!();
| ------------------- in this macro invocation
54 | let _ = fake_method_expr!();
| ------------------- in this macro invocation
-error: attempted access of field `fake` on type `{integer}`, but no field with that name was found
- --> $DIR/macro-backtrace-invalid-internals.rs:39:11
+error: no field `fake` on type `{integer}`
+ --> $DIR/macro-backtrace-invalid-internals.rs:39:13
|
39 | 1.fake
- | ^^^^^^
+ | ^^^^
...
55 | let _ = fake_field_expr!();
| ------------------ in this macro invocation
--> $DIR/macro-backtrace-nested.rs:15:12
|
15 | () => (fake)
- | ^^^^
+ | ^^^^ unresolved name
...
27 | 1 + call_nested_expr!();
| ------------------- in this macro invocation
--> $DIR/macro-backtrace-nested.rs:15:12
|
15 | () => (fake)
- | ^^^^
+ | ^^^^ unresolved name
...
28 | call_nested_expr_sum!();
| ------------------------ in this macro invocation
--> $DIR/type-binding.rs:16:20
|
16 | fn homura<T: Deref<Trget = i32>>(_: T) {}
- | ^^^^^^^^^^^
+ | ^^^^^^^^^^^ associated type `Trget` not found
error: aborting due to previous error
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let foo = 1;
+
+ // `foo` shouldn't be suggested, it is too dissimilar from `bar`.
+ println!("Hello {}", bar);
+
+ // But this is close enough.
+ println!("Hello {}", fob);
+}
--- /dev/null
+error[E0425]: unresolved name `bar`
+ --> $DIR/typo-suggestion.rs:15:26
+ |
+15 | println!("Hello {}", bar);
+ | ^^^ unresolved name
+
+error[E0425]: unresolved name `fob`
+ --> $DIR/typo-suggestion.rs:18:26
+ |
+18 | println!("Hello {}", fob);
+ | ^^^ did you mean `foo`?
+
+error: aborting due to 2 previous errors
+
const TEST_REPOS: &'static [Test] = &[Test {
name: "cargo",
repo: "https://github.com/rust-lang/cargo",
- sha: "2d85908217f99a30aa5f68e05a8980704bb71fad",
+ sha: "d8936af1390ab0844e5e68b459214f2529c9f647",
lock: None,
},
Test {
pub cflags: String,
pub llvm_components: String,
pub llvm_cxxflags: String,
+ pub nodejs: Option<String>,
}
reqopt("", "cflags", "flags for the C compiler", "FLAGS"),
reqopt("", "llvm-components", "list of LLVM components built in", "LIST"),
reqopt("", "llvm-cxxflags", "C++ flags for LLVM", "FLAGS"),
+ optopt("", "nodejs", "the name of nodejs", "PATH"),
optflag("h", "help", "show this message"));
let (argv0, args_) = args.split_first().unwrap();
cflags: matches.opt_str("cflags").unwrap(),
llvm_components: matches.opt_str("llvm-components").unwrap(),
llvm_cxxflags: matches.opt_str("llvm-cxxflags").unwrap(),
+ nodejs: matches.opt_str("nodejs"),
}
}
},
color: test::AutoColor,
test_threads: None,
+ skip: vec![],
}
}
}
};
+ // Debugging emscripten code doesn't make sense today
+ let mut ignore = early_props.ignore;
+ if (config.mode == DebugInfoGdb || config.mode == DebugInfoLldb) &&
+ config.target.contains("emscripten") {
+ ignore = true;
+ }
+
test::TestDescAndFn {
desc: test::TestDesc {
name: make_test_name(config, testpaths),
- ignore: early_props.ignore,
+ ignore: ignore,
should_panic: should_panic,
},
testfn: make_test_closure(config, testpaths),
"arm-linux-androideabi" | "armv7-linux-androideabi" | "aarch64-linux-android" => {
self._arm_exec_compiled_test(env)
}
-
_=> {
let aux_dir = self.aux_output_dir_name();
self.compose_and_run(self.make_run_args(),
fn make_exe_name(&self) -> PathBuf {
let mut f = self.output_base_name();
// FIXME: This is using the host architecture exe suffix, not target!
- if self.config.target == "asmjs-unknown-emscripten" {
+ if self.config.target.contains("emscripten") {
let mut fname = f.file_name().unwrap().to_os_string();
fname.push(".js");
f.set_file_name(&fname);
let mut args = self.split_maybe_args(&self.config.runtool);
// If this is emscripten, then run tests under nodejs
- if self.config.target == "asmjs-unknown-emscripten" {
- args.push("nodejs".to_owned());
+ if self.config.target.contains("emscripten") {
+ let nodejs = self.config.nodejs.clone().unwrap_or("nodejs".to_string());
+ args.push(nodejs);
}
let exe_file = self.make_exe_name();
("darwin", "macos"),
("dragonfly", "dragonfly"),
("freebsd", "freebsd"),
+ ("haiku", "haiku"),
("ios", "ios"),
("linux", "linux"),
("mingw32", "windows"),
("sparc", "sparc"),
("x86_64", "x86_64"),
("xcore", "xcore"),
- ("asmjs", "asmjs")];
+ ("asmjs", "asmjs"),
+ ("wasm32", "wasm32")];
pub fn get_os(triple: &str) -> &'static str {
for &(triple_os, os) in OS_TABLE {
mod features;
mod cargo;
mod cargo_lock;
+mod pal;
fn main() {
let path = env::args_os().skip(1).next().expect("need an argument");
cargo::check(&path, &mut bad);
features::check(&path, &mut bad);
cargo_lock::check(&path, &mut bad);
+ pal::check(&path, &mut bad);
if bad {
panic!("some tidy checks failed");
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Tidy check to enforce rules about platform-specific code in std
+//!
+//! This is intended to maintain existing standards of code
+//! organization in hopes that the standard library will continue to
+//! be refactored to isolate platform-specific bits, making porting
+//! easier; where "standard library" roughly means "all the
+//! dependencies of the std and test crates".
+//!
+//! This generally means placing restrictions on where `cfg(unix)`,
+//! `cfg(windows)`, `cfg(target_os)` and `cfg(target_env)` may appear,
+//! the basic objective being to isolate platform-specific code to the
+//! platform-specific `std::sys` modules, and to the allocation,
+//! unwinding, and libc crates.
+//!
+//! Following are the basic rules, though there are currently
+//! exceptions:
+//!
+//! - core may not have platform-specific code
+//! - liballoc_system may have platform-specific code
+//! - liballoc_jemalloc may have platform-specific code
+//! - libpanic_abort may have platform-specific code
+//! - libpanic_unwind may have platform-specific code
+//! - libunwind may have platform-specific code
+//! - other crates in the std facade may not
+//! - std may have platform-specific code in the following places
+//! - sys/unix/
+//! - sys/windows/
+//! - os/
+//!
+//! `std/sys_common` should _not_ contain platform-specific code.
+//! Finally, because std contains tests with platform-specific
+//! `ignore` attributes, once the parser encounters `mod tests`,
+//! platform-specific cfgs are allowed. Not sure yet how to deal with
+//! this in the long term.
+
+use std::fs::File;
+use std::io::Read;
+use std::path::Path;
+use std::iter::Iterator;
+
+// Paths that may contain platform-specific code
+const EXCEPTION_PATHS: &'static [&'static str] = &[
+ // std crates
+ "src/liballoc_jemalloc",
+ "src/liballoc_system",
+ "src/liblibc",
+ "src/libpanic_abort",
+ "src/libpanic_unwind",
+ "src/libunwind",
+ "src/libstd/sys/unix", // This is where platform-specific code for std should live
+ "src/libstd/sys/windows", // Ditto
+ "src/libstd/os", // Platform-specific public interfaces
+ "src/rtstartup", // Not sure what to do about this. magic stuff for mingw
+
+ // temporary exceptions
+ "src/libstd/lib.rs", // This could probably be done within the sys directory
+ "src/libstd/rtdeps.rs", // Until rustbuild replaces make
+ "src/libstd/path.rs",
+ "src/libstd/io/stdio.rs",
+ "src/libstd/num/f32.rs",
+ "src/libstd/num/f64.rs",
+ "src/libstd/thread/local.rs",
+ "src/libstd/sys/common/mod.rs",
+ "src/libstd/sys/common/net.rs",
+ "src/libstd/sys/common/util.rs",
+ "src/libterm", // Not sure how to make this crate portable, but test needs it
+ "src/libtest", // Probably should defer to unstable std::sys APIs
+
+ // std testing crates, ok for now at least
+ "src/libcoretest",
+
+ // non-std crates
+ "src/test",
+ "src/tools",
+ "src/librustc",
+ "src/librustdoc",
+ "src/libsyntax",
+ "src/bootstrap",
+];
+
+pub fn check(path: &Path, bad: &mut bool) {
+ let ref mut contents = String::new();
+ // Sanity check that the complex parsing here works
+ let ref mut saw_target_arch = false;
+ let ref mut saw_cfg_bang = false;
+ super::walk(path, &mut super::filter_dirs, &mut |file| {
+ let filestr = file.to_string_lossy().replace("\\", "/");
+ if !filestr.ends_with(".rs") { return }
+
+ let is_exception_path = EXCEPTION_PATHS.iter().any(|s| filestr.contains(&**s));
+ if is_exception_path { return }
+
+ check_cfgs(contents, &file, bad, saw_target_arch, saw_cfg_bang);
+ });
+
+ assert!(*saw_target_arch);
+ assert!(*saw_cfg_bang);
+}
+
+fn check_cfgs(contents: &mut String, file: &Path,
+ bad: &mut bool, saw_target_arch: &mut bool, saw_cfg_bang: &mut bool) {
+ contents.truncate(0);
+ t!(t!(File::open(file), file).read_to_string(contents));
+
+ // For now it's ok to have platform-specific code after 'mod tests'.
+ let mod_tests_idx = find_test_mod(contents);
+ let contents = &contents[..mod_tests_idx];
+ // Pull out all "cfg(...)" and "cfg!(...)" strings
+ let cfgs = parse_cfgs(contents);
+
+ let mut line_numbers: Option<Vec<usize>> = None;
+ let mut err = |idx: usize, cfg: &str| {
+ if line_numbers.is_none() {
+ line_numbers = Some(contents.match_indices('\n').map(|(i, _)| i).collect());
+ }
+ let line_numbers = line_numbers.as_ref().expect("");
+ let line = match line_numbers.binary_search(&idx) {
+ Ok(_) => unreachable!(),
+ Err(i) => i + 1
+ };
+ println!("{}:{}: platform-specific cfg: {}", file.display(), line, cfg);
+ *bad = true;
+ };
+
+ for (idx, cfg) in cfgs.into_iter() {
+ // Sanity check that the parsing here works
+ if !*saw_target_arch && cfg.contains("target_arch") { *saw_target_arch = true }
+ if !*saw_cfg_bang && cfg.contains("cfg!") { *saw_cfg_bang = true }
+
+ let contains_platform_specific_cfg =
+ cfg.contains("target_os")
+ || cfg.contains("target_env")
+ || cfg.contains("target_vendor")
+ || cfg.contains("unix")
+ || cfg.contains("windows");
+
+ if !contains_platform_specific_cfg { continue }
+
+ let preceeded_by_doc_comment = {
+ let pre_contents = &contents[..idx];
+ let pre_newline = pre_contents.rfind('\n');
+ let pre_doc_comment = pre_contents.rfind("///");
+ match (pre_newline, pre_doc_comment) {
+ (Some(n), Some(c)) => n < c,
+ (None, Some(_)) => true,
+ (_, None) => false,
+ }
+ };
+
+ if preceeded_by_doc_comment { continue }
+
+ err(idx, cfg);
+ }
+}
+
+fn find_test_mod(contents: &str) -> usize {
+ if let Some(mod_tests_idx) = contents.find("mod tests") {
+ // Also capture a previos line indicating "mod tests" in cfg-ed out
+ let prev_newline_idx = contents[..mod_tests_idx].rfind('\n').unwrap_or(mod_tests_idx);
+ let prev_newline_idx = contents[..prev_newline_idx].rfind('\n');
+ if let Some(nl) = prev_newline_idx {
+ let prev_line = &contents[nl + 1 .. mod_tests_idx];
+ let emcc_cfg = "cfg(all(test, not(target_os";
+ if prev_line.contains(emcc_cfg) {
+ nl
+ } else {
+ mod_tests_idx
+ }
+ } else {
+ mod_tests_idx
+ }
+ } else {
+ contents.len()
+ }
+}
+
+fn parse_cfgs<'a>(contents: &'a str) -> Vec<(usize, &'a str)> {
+ let candidate_cfgs = contents.match_indices("cfg");
+ let candidate_cfg_idxs = candidate_cfgs.map(|(i, _)| i);
+ // This is puling out the indexes of all "cfg" strings
+ // that appear to be tokens succeeded by a paren.
+ let cfgs = candidate_cfg_idxs.filter(|i| {
+ let pre_idx = i.saturating_sub(*i);
+ let succeeds_non_ident = !contents.as_bytes().get(pre_idx)
+ .cloned()
+ .map(char::from)
+ .map(char::is_alphanumeric)
+ .unwrap_or(false);
+ let contents_after = &contents[*i..];
+ let first_paren = contents_after.find('(');
+ let paren_idx = first_paren.map(|ip| i + ip);
+ let preceeds_whitespace_and_paren = paren_idx.map(|ip| {
+ let maybe_space = &contents[*i + "cfg".len() .. ip];
+ maybe_space.chars().all(|c| char::is_whitespace(c) || c == '!')
+ }).unwrap_or(false);
+
+ succeeds_non_ident && preceeds_whitespace_and_paren
+ });
+
+ cfgs.map(|i| {
+ let mut depth = 0;
+ let contents_from = &contents[i..];
+ for (j, byte) in contents_from.bytes().enumerate() {
+ match byte {
+ b'(' => {
+ depth += 1;
+ }
+ b')' => {
+ depth -= 1;
+ if depth == 0 {
+ return (i, &contents_from[.. j + 1]);
+ }
+ }
+ _ => { }
+ }
+ }
+
+ unreachable!()
+ }).collect()
+}