valopt infodir "${CFG_PREFIX}/share/info" "install additional info"
valopt mandir "${CFG_PREFIX}/share/man" "install man pages in PATH"
+valopt release-channel "source" "the name of the release channel to build"
+
# On windows we just store the libraries in the bin directory because
# there's no rpath. This is where the build system itself puts libraries;
# --libdir is used to configure the installation directory.
step_msg "validating $CFG_SELF args"
validate_opt
+# Validate the release channel
+case "$CFG_RELEASE_CHANNEL" in
+ (source | nightly | beta | stable)
+ ;;
+ (*)
+ err "release channel must be 'source', 'nightly', 'beta' or 'stable'"
+ ;;
+esac
+
+# Continue supporting the old --enable-nightly flag to transition the bots
+# XXX Remove me
+if [ $CFG_ENABLE_NIGHTLY -eq 1 ]
+then
+ CFG_RELEASE_CHANNEL=nightly
+ putvar CFG_RELEASE_CHANNEL
+fi
+
step_msg "looking for build programs"
probe_need CFG_PERL perl
# check that gcc, cc and g++ all point to the same compiler.
# note that for xcode 5, g++ points to clang, not clang++
if !((chk_cc gcc clang && chk_cc g++ clang) ||
- (chk_cc gcc gcc &&( chk_cc g++ g++ || chk g++ gcc))) then
+ (chk_cc gcc gcc &&( chk_cc g++ g++ || chk g++ gcc))); then
err "the gcc and g++ in your path point to different compilers.
Check which versions are in your path with gcc --version and g++ --version.
To resolve this problem, either fix your PATH or run configure with --enable-clang"
-.TH RUSTC "1" "March 2014" "rustc 0.12.0-pre" "User Commands"
+.TH RUSTC "1" "March 2014" "rustc 0.12.0" "User Commands"
.SH NAME
rustc \- The Rust compiler
.SH SYNOPSIS
-.TH RUSTDOC "1" "March 2014" "rustdoc 0.12.0-pre" "User Commands"
+.TH RUSTDOC "1" "March 2014" "rustdoc 0.12.0" "User Commands"
.SH NAME
rustdoc \- generate documentation from Rust source code
.SH SYNOPSIS
TOOLS := compiletest rustdoc rustc
DEPS_core :=
-DEPS_rlibc :=
+DEPS_rlibc := core
DEPS_unicode := core
DEPS_alloc := core libc native:jemalloc
DEPS_debug := std
# The version number
CFG_RELEASE_NUM=0.12.0
-CFG_RELEASE_LABEL=-pre
CFG_FILENAME_EXTRA=4e7c5e5c
-ifndef CFG_ENABLE_NIGHTLY
-# This is the normal version string
-CFG_RELEASE=$(CFG_RELEASE_NUM)$(CFG_RELEASE_LABEL)
-CFG_PACKAGE_VERS=$(CFG_RELEASE)
-else
-# Modify the version label for nightly builds
-CFG_RELEASE=$(CFG_RELEASE_NUM)$(CFG_RELEASE_LABEL)-nightly
-# When building nightly distributables just reuse the same "rust-nightly" name
-# so when we upload we'll always override the previous nighly. This doesn't actually
-# impact the version reported by rustc - it's just for file naming.
+ifeq ($(CFG_RELEASE_CHANNEL),stable)
+# This is the normal semver version string, e.g. "0.12.0", "0.12.0-nightly"
+CFG_RELEASE=$(CFG_RELEASE_NUM)
+# This is the string used in dist artifact file names, e.g. "0.12.0", "nightly"
+CFG_PACKAGE_VERS=$(CFG_RELEASE_NUM)
+endif
+ifeq ($(CFG_RELEASE_CHANNEL),beta)
+CFG_RELEASE=$(CFG_RELEASE_NUM)-beta
+# When building beta/nightly distributables just reuse the same "beta"
+# name so when we upload we'll always override the previous
+# nighly. This doesn't actually impact the version reported by rustc -
+# it's just for file naming.
+CFG_PACKAGE_VERS=beta
+endif
+ifeq ($(CFG_RELEASE_CHANNEL),nightly)
+CFG_RELEASE=$(CFG_RELEASE_NUM)-nightly
CFG_PACKAGE_VERS=nightly
endif
+ifeq ($(CFG_RELEASE_CHANNEL),source)
+CFG_RELEASE=$(CFG_RELEASE_NUM)-pre
+CFG_PACKAGE_VERS=$(CFG_RELEASE_NUM)-pre
+endif
+
# The name of the package to use for creating tarballs, installers etc.
CFG_PACKAGE_NAME=rust-$(CFG_PACKAGE_VERS)
CFG_RUN_TARG_mips-unknown-linux-gnu=
RUSTC_FLAGS_mips-unknown-linux-gnu := -C target-cpu=mips32r2 -C target-feature="+mips32r2,+o32" -C soft-float
-# i686-pc-mingw32 configuration
-CC_i686-pc-mingw32=$(CC)
-CXX_i686-pc-mingw32=$(CXX)
-CPP_i686-pc-mingw32=$(CPP)
-AR_i686-pc-mingw32=$(AR)
-CFG_LIB_NAME_i686-pc-mingw32=$(1).dll
-CFG_STATIC_LIB_NAME_i686-pc-mingw32=$(1).lib
-CFG_LIB_GLOB_i686-pc-mingw32=$(1)-*.dll
-CFG_LIB_DSYM_GLOB_i686-pc-mingw32=$(1)-*.dylib.dSYM
-CFG_CFLAGS_mips-i686-pc-mingw32 := -m32 -march=i686 -D_WIN32_WINNT=0x0600 $(CFLAGS)
-CFG_GCCISH_CFLAGS_i686-pc-mingw32 := -Wall -Werror -g -m32 -march=i686 -D_WIN32_WINNT=0x0600 -I$(CFG_SRC_DIR)src/etc/mingw-fix-include $(CFLAGS)
-CFG_GCCISH_CXXFLAGS_i686-pc-mingw32 := -fno-rtti $(CXXFLAGS)
-CFG_GCCISH_LINK_FLAGS_i686-pc-mingw32 := -shared -fPIC -g -m32
-CFG_GCCISH_DEF_FLAG_i686-pc-mingw32 :=
-CFG_GCCISH_PRE_LIB_FLAGS_i686-pc-mingw32 :=
-CFG_GCCISH_POST_LIB_FLAGS_i686-pc-mingw32 :=
-CFG_DEF_SUFFIX_i686-pc-mingw32 := .mingw32.def
-CFG_LLC_FLAGS_i686-pc-mingw32 :=
-CFG_INSTALL_NAME_i686-pc-mingw32 =
-CFG_LIBUV_LINK_FLAGS_i686-pc-mingw32 := -lws2_32 -lpsapi -liphlpapi
-CFG_LLVM_BUILD_ENV_i686-pc-mingw32 := CPATH=$(CFG_SRC_DIR)src/etc/mingw-fix-include
-CFG_EXE_SUFFIX_i686-pc-mingw32 := .exe
-CFG_WINDOWSY_i686-pc-mingw32 := 1
-CFG_UNIXY_i686-pc-mingw32 :=
-CFG_PATH_MUNGE_i686-pc-mingw32 :=
-CFG_LDPATH_i686-pc-mingw32 :=$(CFG_LDPATH_i686-pc-mingw32):$(PATH)
-CFG_RUN_i686-pc-mingw32=PATH="$(CFG_LDPATH_i686-pc-mingw32):$(1)" $(2)
-CFG_RUN_TARG_i686-pc-mingw32=$(call CFG_RUN_i686-pc-mingw32,$(HLIB$(1)_H_$(CFG_BUILD)),$(2))
-RUSTC_FLAGS_i686-pc-mingw32=-C link-args="-Wl,--large-address-aware"
-
# i586-mingw32msvc configuration
CC_i586-mingw32msvc=$(CFG_MINGW32_CROSS_PATH)/bin/i586-mingw32msvc-gcc
CXX_i586-mingw32msvc=$(CFG_MINGW32_CROSS_PATH)/bin/i586-mingw32msvc-g++
CFG_STATIC_LIB_NAME_i686-w64-mingw32=$(1).lib
CFG_LIB_GLOB_i686-w64-mingw32=$(1)-*.dll
CFG_LIB_DSYM_GLOB_i686-w64-mingw32=$(1)-*.dylib.dSYM
-CFG_CFLAGS_i586-w64-mingw32 := -march=i586 -m32 -D_WIN32_WINNT=0x0600 $(CFLAGS)
+CFG_CFLAGS_i686-w64-mingw32 := -march=i686 -m32 -D_WIN32_WINNT=0x0600 $(CFLAGS)
CFG_GCCISH_CFLAGS_i686-w64-mingw32 := -Wall -Werror -g -m32 -D_WIN32_WINNT=0x0600 $(CFLAGS)
CFG_GCCISH_CXXFLAGS_i686-w64-mingw32 := -fno-rtti $(CXXFLAGS)
CFG_GCCISH_LINK_FLAGS_i686-w64-mingw32 := -shared -g -m32
CFG_LDPATH_i686-w64-mingw32 :=$(CFG_LDPATH_i686-w64-mingw32):$(PATH)
CFG_RUN_i686-w64-mingw32=PATH="$(CFG_LDPATH_i686-w64-mingw32):$(1)" $(2)
CFG_RUN_TARG_i686-w64-mingw32=$(call CFG_RUN_i686-w64-mingw32,$(HLIB$(1)_H_$(CFG_BUILD)),$(2))
+# Stop rustc from OOMing when building itself (I think)
+RUSTC_FLAGS_i686-w64-mingw32=-C link-args="-Wl,--large-address-aware"
RUSTC_CROSS_FLAGS_i686-w64-mingw32 :=
# x86_64-w64-mingw32 configuration
$(S)src/jemalloc/*/*/*/*)
endif
+# See #17183 for details, this file is touched during the build process so we
+# don't want to consider it as a dependency.
+JEMALLOC_DEPS := $(filter-out $(S)src/jemalloc/VERSION,$(JEMALLOC_DEPS))
+
JEMALLOC_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc)
ifeq ($$(CFG_WINDOWSY_$(1)),1)
JEMALLOC_REAL_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),jemalloc_s)
check-docs: cleantestlibs cleantmptestlogs check-stage2-docs
$(Q)$(CFG_PYTHON) $(S)src/etc/check-summary.py tmp/*.log
-# NOTE: Remove after reprogramming windows bots
-check-fast: check-lite
-
# Some less critical tests that are not prone to breakage.
# Not run as part of the normal test suite, but tested by bors on checkin.
check-secondary: check-lexer check-pretty
// parallel (especially when we have lots and lots of child processes).
// For context, see #8904
io::test::raise_fd_limit();
- let res = test::run_tests_console(&opts, tests.move_iter().collect());
+ let res = test::run_tests_console(&opts, tests.into_iter().collect());
match res {
Ok(true) => {}
Ok(false) => fail!("Some tests failed"),
},
_ => None
}
-}
\ No newline at end of file
+}
let mut cmd = Command::new(prog);
cmd.args(args);
add_target_env(&mut cmd, lib_path, aux_path);
- for (key, val) in env.move_iter() {
+ for (key, val) in env.into_iter() {
cmd.env(key, val);
}
let mut cmd = Command::new(prog);
cmd.args(args);
add_target_env(&mut cmd, lib_path, aux_path);
- for (key, val) in env.move_iter() {
+ for (key, val) in env.into_iter() {
cmd.env(key, val);
}
"--debuginfo".to_string()
];
let new_options =
- split_maybe_args(options).move_iter()
+ split_maybe_args(options).into_iter()
.filter(|x| !options_to_remove.contains(x))
.collect::<Vec<String>>()
.connect(" ");
// run test via adb_run_wrapper
runargs.push("shell".to_string());
- for (key, val) in env.move_iter() {
+ for (key, val) in env.into_iter() {
runargs.push(format!("{}={}", key, val));
}
runargs.push(format!("{}/adb_run_wrapper.sh", config.adb_test_dir));
% The Rust Containers and Iterators Guide
-# Containers
+This guide has been removed, with no direct replacement.
-The container traits are defined in the `std::container` module.
-
-## Unique vectors
-
-Vectors have `O(1)` indexing, push (to the end) and pop (from the end). Vectors
-are the most common container in Rust, and are flexible enough to fit many use
-cases.
-
-Vectors can also be sorted and used as efficient lookup tables with the
-`bsearch()` method, if all the elements are inserted at one time and
-deletions are unnecessary.
-
-## Maps and sets
-
-Maps are collections of unique keys with corresponding values, and sets are
-just unique keys without a corresponding value. The `Map` and `Set` traits in
-`std::container` define the basic interface.
-
-The standard library provides three owned map/set types:
-
-* `collections::HashMap` and `collections::HashSet`, requiring the keys to
- implement `Eq` and `Hash`
-* `collections::TrieMap` and `collections::TrieSet`, requiring the keys to be `uint`
-* `collections::TreeMap` and `collections::TreeSet`, requiring the keys
- to implement `Ord`
-
-These maps do not use managed pointers so they can be sent between tasks as
-long as the key and value types are sendable. Neither the key or value type has
-to be copyable.
-
-The `TrieMap` and `TreeMap` maps are ordered, while `HashMap` uses an arbitrary
-order.
-
-Each `HashMap` instance has a random 128-bit key to use with a keyed hash,
-making the order of a set of keys in a given hash table randomized. Rust
-provides a [SipHash](https://131002.net/siphash/) implementation for any type
-implementing the `Hash` trait.
-
-## Double-ended queues
-
-The `collections::ringbuf` module implements a double-ended queue with `O(1)`
-amortized inserts and removals from both ends of the container. It also has
-`O(1)` indexing like a vector. The contained elements are not required to be
-copyable, and the queue will be sendable if the contained type is sendable.
-Its interface `Deque` is defined in `collections`.
-
-The `extra::dlist` module implements a double-ended linked list, also
-implementing the `Deque` trait, with `O(1)` removals and inserts at either end,
-and `O(1)` concatenation.
-
-## Priority queues
-
-The `collections::priority_queue` module implements a queue ordered by a key. The
-contained elements are not required to be copyable, and the queue will be
-sendable if the contained type is sendable.
-
-Insertions have `O(log n)` time complexity and checking or popping the largest
-element is `O(1)`. Converting a vector to a priority queue can be done
-in-place, and has `O(n)` complexity. A priority queue can also be converted to
-a sorted vector in-place, allowing it to be used for an `O(n log n)` in-place
-heapsort.
-
-# Iterators
-
-## Iteration protocol
-
-The iteration protocol is defined by the `Iterator` trait in the
-`std::iter` module. The minimal implementation of the trait is a `next`
-method, yielding the next element from an iterator object:
-
-~~~
-/// An infinite stream of zeroes
-struct ZeroStream;
-
-impl Iterator<int> for ZeroStream {
- fn next(&mut self) -> Option<int> {
- Some(0)
- }
-}
-~~~
-
-Reaching the end of the iterator is signalled by returning `None` instead of
-`Some(item)`:
-
-~~~
-# fn main() {}
-/// A stream of N zeroes
-struct ZeroStream {
- remaining: uint
-}
-
-impl ZeroStream {
- fn new(n: uint) -> ZeroStream {
- ZeroStream { remaining: n }
- }
-}
-
-impl Iterator<int> for ZeroStream {
- fn next(&mut self) -> Option<int> {
- if self.remaining == 0 {
- None
- } else {
- self.remaining -= 1;
- Some(0)
- }
- }
-}
-~~~
-
-In general, you cannot rely on the behavior of the `next()` method after it has
-returned `None`. Some iterators may return `None` forever. Others may behave
-differently.
-
-## Container iterators
-
-Containers implement iteration over the contained elements by returning an
-iterator object. For example, for vector slices several iterators are available:
-
-* `iter()` for immutable references to the elements
-* `mut_iter()` for mutable references to the elements
-* `move_iter()` to move the elements out by-value
-
-A typical mutable container will implement at least `iter()`, `mut_iter()` and
-`move_iter()`. If it maintains an order, the returned iterators will be
-`DoubleEndedIterator`s, which are described below.
-
-### Freezing
-
-Unlike most other languages with external iterators, Rust has no *iterator
-invalidation*. As long as an iterator is still in scope, the compiler will prevent
-modification of the container through another handle.
-
-~~~
-let mut xs = [1i, 2, 3];
-{
- let _it = xs.iter();
-
- // the vector is frozen for this scope, the compiler will statically
- // prevent modification
-}
-// the vector becomes unfrozen again at the end of the scope
-~~~
-
-These semantics are due to most container iterators being implemented with `&`
-and `&mut`.
-
-## Iterator adaptors
-
-The `Iterator` trait provides many common algorithms as default methods. For
-example, the `fold` method will accumulate the items yielded by an `Iterator`
-into a single value:
-
-~~~
-let xs = [1i, 9, 2, 3, 14, 12];
-let result = xs.iter().fold(0, |accumulator, item| accumulator - *item);
-assert_eq!(result, -41);
-~~~
-
-Most adaptors return an adaptor object implementing the `Iterator` trait itself:
-
-~~~
-let xs = [1i, 9, 2, 3, 14, 12];
-let ys = [5i, 2, 1, 8];
-let sum = xs.iter().chain(ys.iter()).fold(0, |a, b| a + *b);
-assert_eq!(sum, 57);
-~~~
-
-Some iterator adaptors may return `None` before exhausting the underlying
-iterator. Additionally, if these iterator adaptors are called again after
-returning `None`, they may call their underlying iterator again even if the
-adaptor will continue to return `None` forever. This may not be desired if the
-underlying iterator has side-effects.
-
-In order to provide a guarantee about behavior once `None` has been returned, an
-iterator adaptor named `fuse()` is provided. This returns an iterator that will
-never call its underlying iterator again once `None` has been returned:
-
-~~~
-let xs = [1i,2,3,4,5];
-let mut calls = 0i;
-
-{
- let it = xs.iter().scan((), |_, x| {
- calls += 1;
- if *x < 3 { Some(x) } else { None }});
-
- // the iterator will only yield 1 and 2 before returning None
- // If we were to call it 5 times, calls would end up as 5, despite
- // only 2 values being yielded (and therefore 3 unique calls being
- // made). The fuse() adaptor can fix this.
-
- let mut it = it.fuse();
- it.next();
- it.next();
- it.next();
- it.next();
- it.next();
-}
-
-assert_eq!(calls, 3);
-~~~
-
-## For loops
-
-The function `range` (or `range_inclusive`) allows to simply iterate through a given range:
-
-~~~
-for i in range(0i, 5) {
- print!("{} ", i) // prints "0 1 2 3 4"
-}
-
-for i in std::iter::range_inclusive(0i, 5) { // needs explicit import
- print!("{} ", i) // prints "0 1 2 3 4 5"
-}
-~~~
-
-The `for` keyword can be used as sugar for iterating through any iterator:
-
-~~~
-let xs = [2u, 3, 5, 7, 11, 13, 17];
-
-// print out all the elements in the vector
-for x in xs.iter() {
- println!("{}", *x)
-}
-
-// print out all but the first 3 elements in the vector
-for x in xs.iter().skip(3) {
- println!("{}", *x)
-}
-~~~
-
-For loops are *often* used with a temporary iterator object, as above. They can
-also advance the state of an iterator in a mutable location:
-
-~~~
-let xs = [1i, 2, 3, 4, 5];
-let ys = ["foo", "bar", "baz", "foobar"];
-
-// create an iterator yielding tuples of elements from both vectors
-let mut it = xs.iter().zip(ys.iter());
-
-// print out the pairs of elements up to (&3, &"baz")
-for (x, y) in it {
- println!("{} {}", *x, *y);
-
- if *x == 3 {
- break;
- }
-}
-
-// yield and print the last pair from the iterator
-println!("last: {}", it.next());
-
-// the iterator is now fully consumed
-assert!(it.next().is_none());
-~~~
-
-## Conversion
-
-Iterators offer generic conversion to containers with the `collect` adaptor:
-
-~~~
-let xs = [0i, 1, 1, 2, 3, 5, 8];
-let ys = xs.iter().rev().skip(1).map(|&x| x * 2).collect::<Vec<int>>();
-assert_eq!(ys, vec![10, 6, 4, 2, 2, 0]);
-~~~
-
-The method requires a type hint for the container type, if the surrounding code
-does not provide sufficient information.
-
-Containers can provide conversion from iterators through `collect` by
-implementing the `FromIterator` trait. For example, the implementation for
-vectors is as follows:
-
-~~~ {.ignore}
-impl<T> FromIterator<T> for Vec<T> {
- fn from_iter<I:Iterator<A>>(mut iterator: I) -> Vec<T> {
- let (lower, _) = iterator.size_hint();
- let mut vector = Vec::with_capacity(lower);
- for element in iterator {
- vector.push(element);
- }
- vector
- }
-}
-~~~
-
-### Size hints
-
-The `Iterator` trait provides a `size_hint` default method, returning a lower
-bound and optionally on upper bound on the length of the iterator:
-
-~~~ {.ignore}
-fn size_hint(&self) -> (uint, Option<uint>) { (0, None) }
-~~~
-
-The vector implementation of `FromIterator` from above uses the lower bound
-to pre-allocate enough space to hold the minimum number of elements the
-iterator will yield.
-
-The default implementation is always correct, but it should be overridden if
-the iterator can provide better information.
-
-The `ZeroStream` from earlier can provide an exact lower and upper bound:
-
-~~~
-# fn main() {}
-/// A stream of N zeroes
-struct ZeroStream {
- remaining: uint
-}
-
-impl ZeroStream {
- fn new(n: uint) -> ZeroStream {
- ZeroStream { remaining: n }
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- (self.remaining, Some(self.remaining))
- }
-}
-
-impl Iterator<int> for ZeroStream {
- fn next(&mut self) -> Option<int> {
- if self.remaining == 0 {
- None
- } else {
- self.remaining -= 1;
- Some(0)
- }
- }
-}
-~~~
-
-## Double-ended iterators
-
-The `DoubleEndedIterator` trait represents an iterator able to yield elements
-from either end of a range. It inherits from the `Iterator` trait and extends
-it with the `next_back` function.
-
-A `DoubleEndedIterator` can have its direction changed with the `rev` adaptor,
-returning another `DoubleEndedIterator` with `next` and `next_back` exchanged.
-
-~~~
-let xs = [1i, 2, 3, 4, 5, 6];
-let mut it = xs.iter();
-println!("{}", it.next()); // prints `Some(1)`
-println!("{}", it.next()); // prints `Some(2)`
-println!("{}", it.next_back()); // prints `Some(6)`
-
-// prints `5`, `4` and `3`
-for &x in it.rev() {
- println!("{}", x)
-}
-~~~
-
-The `chain`, `map`, `filter`, `filter_map` and `inspect` adaptors are
-`DoubleEndedIterator` implementations if the underlying iterators are.
-
-~~~
-let xs = [1i, 2, 3, 4];
-let ys = [5i, 6, 7, 8];
-let mut it = xs.iter().chain(ys.iter()).map(|&x| x * 2);
-
-println!("{}", it.next()); // prints `Some(2)`
-
-// prints `16`, `14`, `12`, `10`, `8`, `6`, `4`
-for x in it.rev() {
- println!("{}", x);
-}
-~~~
-
-The `reverse_` method is also available for any double-ended iterator yielding
-mutable references. It can be used to reverse a container in-place. Note that
-the trailing underscore is a workaround for issue #5898 and will be removed.
-
-~~~
-let mut ys = [1i, 2, 3, 4, 5];
-ys.mut_iter().reverse_();
-assert!(ys == [5i, 4, 3, 2, 1]);
-~~~
-
-## Random-access iterators
-
-The `RandomAccessIterator` trait represents an iterator offering random access
-to the whole range. The `indexable` method retrieves the number of elements
-accessible with the `idx` method.
-
-The `chain` adaptor is an implementation of `RandomAccessIterator` if the
-underlying iterators are.
-
-~~~
-let xs = [1i, 2, 3, 4, 5];
-let ys = [7i, 9, 11];
-let mut it = xs.iter().chain(ys.iter());
-println!("{}", it.idx(0)); // prints `Some(1)`
-println!("{}", it.idx(5)); // prints `Some(7)`
-println!("{}", it.idx(7)); // prints `Some(11)`
-println!("{}", it.idx(8)); // prints `None`
-
-// yield two elements from the beginning, and one from the end
-it.next();
-it.next();
-it.next_back();
-
-println!("{}", it.idx(0)); // prints `Some(3)`
-println!("{}", it.idx(4)); // prints `Some(9)`
-println!("{}", it.idx(6)); // prints `None`
-~~~
+You may enjoy reading the [iterator](std/iter/index.html) and
+[collections](std/collections/index.html) documentation.
}
fn main() {
- let mut futures = Vec::from_fn(1000, |ind| Future::spawn( proc() { partial_sum(ind) }));
+ let mut futures = Vec::from_fn(200, |ind| Future::spawn( proc() { partial_sum(ind) }));
let mut final_res = 0f64;
- for ft in futures.mut_iter() {
+ for ft in futures.iter_mut() {
final_res += ft.get();
}
println!("π^2/6 is not far from : {}", final_res);
0
}
-// These functions are invoked by the compiler, but not
+// These functions and traits are used by the compiler, but not
// for a bare-bones hello world. These are normally
// provided by libstd.
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
+#[lang = "sized"] trait Sized { }
# // fn main() {} tricked you, rustdoc!
```
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
+#[lang = "sized"] trait Sized { }
# // fn main() {} tricked you, rustdoc!
```
The compiler currently makes a few assumptions about symbols which are available
in the executable to call. Normally these functions are provided by the standard
-library, but without it you must define your own.
+xlibrary, but without it you must define your own.
The first of these two functions, `stack_exhausted`, is invoked whenever stack
overflow is detected. This function has a number of restrictions about how it
information), but crates which do not trigger failure can be assured that this
function is never called.
+The final item in the example is a trait called `Sized`. This a trait
+that represents data of a known static size: it is integral to the
+Rust type system, and so the compiler expects the standard library to
+provide it. Since you are not using the standard library, you have to
+provide it yourself.
+
## Using libcore
> **Note**: the core library's structure is unstable, and it is recommended to
#[lang = "stack_exhausted"] extern fn stack_exhausted() {}
#[lang = "eh_personality"] extern fn eh_personality() {}
+#[lang = "sized"] trait Sized {}
```
Note the use of `abort`: the `exchange_malloc` lang item is assumed to
(If you're concerned about `curl | sudo sh`, please keep reading. Disclaimer
below.)
-If you're on Windows, please [download this .exe and run
-it](https://static.rust-lang.org/dist/rust-nightly-install.exe).
+If you're on Windows, please download either the [32-bit
+installer](https://static.rust-lang.org/dist/rust-nightly-i686-w64-mingw32.exe)
+or the [64-bit
+installer](https://static.rust-lang.org/dist/rust-nightly-x86_64-w64-mingw32.exe)
+and run it.
If you decide you don't want Rust anymore, we'll be a bit sad, but that's okay.
Not every programming language is great for everyone. Just pass an argument to
```{bash}
$ rustc main.rs
-$ ./hello_world # or hello_world.exe on Windows
+$ ./main # or main.exe on Windows
Hello, world!
```
}
```
-These two lines define a **function** in Rust. The `main` function is special:
+These lines define a **function** in Rust. The `main` function is special:
it's the beginning of every Rust program. The first line says "I'm declaring a
function named `main`, which takes no arguments and returns nothing." If there
were arguments, they would go inside the parentheses (`(` and `)`), and because
This line does all of the work in our little program. There are a number of
details that are important here. The first is that it's indented with four
spaces, not tabs. Please configure your editor of choice to insert four spaces
-with the tab key. We provide some sample configurations for various editors
-[here](https://github.com/rust-lang/rust/tree/master/src/etc).
+with the tab key. We provide some [sample configurations for various
+editors](https://github.com/rust-lang/rust/tree/master/src/etc).
The second point is the `println!()` part. This is calling a Rust **macro**,
which is how metaprogramming is done in Rust. If it were a function instead, it
```
There are now two files: our source code, with the `.rs` extension, and the
-executable (`hello_world.exe` on Windows, `hello_world` everywhere else)
+executable (`main.exe` on Windows, `main` everywhere else)
```{bash}
-$ ./hello_world # or hello_world.exe on Windows
+$ ./main # or main.exe on Windows
```
This prints out our `Hello, world!` text to our terminal.
Rust is a statically typed language, which means that we specify our types up
front. So why does our first example compile? Well, Rust has this thing called
-"[Hindley-Milner type
-inference](http://en.wikipedia.org/wiki/Hindley%E2%80%93Milner_type_system)",
-named after some really smart type theorists. If you clicked that link, don't
-be scared: what this means for you is that Rust will attempt to infer the types
-in your program, and it's pretty good at it. If it can infer the type, Rust
+"type inference." If it can figure out what the type of something is, Rust
doesn't require you to actually type it out.
-We can add the type if we want to. Types come after a colon (`:`):
+We can add the type if we want to, though. Types come after a colon (`:`):
```{rust}
let x: int = 5;
The `for` loop is used to loop a particular number of times. Rust's `for` loops
work a bit differently than in other systems languages, however. Rust's `for`
-loop doesn't look like this C `for` loop:
+loop doesn't look like this "C style" `for` loop:
-```{ignore,c}
+```{c}
for (x = 0; x < 10; x++) {
printf( "%d\n", x );
}
```
-It looks like this:
+Instead, it looks like this:
```{rust}
for x in range(0i, 10i) {
the iterator, and we loop another time. When there are no more values, the
`for` loop is over.
-In our example, the `range` function is a function, provided by Rust, that
-takes a start and an end position, and gives an iterator over those values. The
-upper bound is exclusive, though, so our loop will print `0` through `9`, not
-`10`.
+In our example, `range` is a function that takes a start and an end position,
+and gives an iterator over those values. The upper bound is exclusive, though,
+so our loop will print `0` through `9`, not `10`.
Rust does not have the "C style" `for` loop on purpose. Manually controlling
each element of the loop is complicated and error prone, even for experienced C
-developers.
+developers.
We'll talk more about `for` when we cover **iterator**s, later in the Guide.
and then prints it back out:
```{rust,ignore}
-use std::io;
-
fn main() {
println!("Type something!");
know that we're safe.
There's another detail here that's not 100% clear because of how `println!`
-works. `num` is actually of type `&int`, that is, it's a reference to an `int`,
+works. `num` is actually of type `&int`. That is, it's a reference to an `int`,
not an `int` itself. `println!` handles the dereferencing for us, so we don't
see it. This code works fine too:
* [Strings](guide-strings.html)
* [Pointers](guide-pointers.html)
* [References and Lifetimes](guide-lifetimes.html)
-* [Containers and Iterators](guide-container.html)
* [Tasks and Communication](guide-tasks.html)
* [Foreign Function Interface](guide-ffi.html)
* [Writing Unsafe and Low-Level Code](guide-unsafe.html)
# The standard library
-You can find function-level documentation for the entire standard library
-[here](std/index.html). There's a list of crates on the left with more specific
-sections, or you can use the search bar at the top to search for something if
-you know its name.
+We have [API documentation for the entire standard
+library](std/index.html). There's a list of crates on the left with more
+specific sections, or you can use the search bar at the top to search for
+something if you know its name.
# External documentation
* `tuple_indexing` - Allows use of tuple indexing (expressions like `expr.0`)
+* `associated_types` - Allows type aliases in traits. Experimental.
+
If a feature is promoted to a language feature, then all existing programs will
start to receive compilation warnings about #[feature] directives which enabled
the new feature (because the directive is no longer necessary). However, if
they exist to support interoperability with foreign code,
and writing performance-critical or low-level functions.
-The standard library contains addtional 'smart pointer' types beyond references
+The standard library contains additional 'smart pointer' types beyond references
and raw pointers.
### Function types
<!ENTITY rustIdent "[a-zA-Z_][a-zA-Z_0-9]*">
<!ENTITY rustIntSuf "([iu](8|16|32|64)?)?">
]>
-<language name="Rust" version="0.12.0-pre" kateversion="2.4" section="Sources" extensions="*.rs" mimetype="text/x-rust" priority="15">
+<language name="Rust" version="0.12.0" kateversion="2.4" section="Sources" extensions="*.rs" mimetype="text/x-rust" priority="15">
<highlighting>
<list name="fn">
<item> fn </item>
"test/bench/shootout-binarytrees.rs", # BSD
"test/bench/shootout-chameneos-redux.rs", # BSD
"test/bench/shootout-fannkuch-redux.rs", # BSD
+ "test/bench/shootout-fasta.rs", # BSD
"test/bench/shootout-k-nucleotide.rs", # BSD
"test/bench/shootout-mandelbrot.rs", # BSD
"test/bench/shootout-meteor.rs", # BSD
"test/bench/shootout-nbody.rs", # BSD
- "test/bench/shootout-pidigits.rs", # BSD
"test/bench/shootout-regex-dna.rs", # BSD
"test/bench/shootout-reverse-complement.rs", # BSD
+ "test/bench/shootout-spectralnorm.rs", # BSD
"test/bench/shootout-threadring.rs", # BSD
]
typeset -A opt_args
+_rustc_debuginfo_levels=(
+ "0[no debug info]"
+ "1[line-tables only (for stacktraces and breakpoints)]"
+ "2[full debug info with variable and type information (same as -g)]"
+)
+
+_rustc_crate_types=(
+ 'bin'
+ 'lib'
+ 'rlib'
+ 'dylib'
+ 'staticlib'
+)
+
+_rustc_emit_types=(
+ 'asm'
+ 'bc'
+ 'ir'
+ 'obj'
+ 'link'
+)
+_rustc_pretty_types=(
+ 'normal[un-annotated source]'
+ 'expanded[crates expanded]'
+ 'typed[crates expanded, with type annotations]'
+ 'identified[fully parenthesized, AST nodes and blocks with IDs]'
+ 'flowgraph=[graphviz formatted flowgraph for node]:NODEID:'
+)
+_rustc_color_types=(
+ 'auto[colorize, if output goes to a tty (default)]'
+ 'always[always colorize output]'
+ 'never[never colorize output]'
+)
+
+_rustc_opts_vals=(
+ --crate-name='[Specify the name of the crate being built]'
+ --crate-type='[Comma separated list of types of crates for the compiler to emit]:TYPES:_values -s "," "Crate types" "$_rustc_crate_types[@]"'
+ --emit='[Comma separated list of types of output for the compiler to emit]:TYPES:_values -s "," "Emit Targets" "$_rustc_emit_types[@]"'
+ --debuginfo='[Emit DWARF debug info to the objects created]:LEVEL:_values "Debug Levels" "$_rustc_debuginfo_levels[@]"'
+ --dep-info='[Output dependency info to <filename> after compiling]::FILE:_files -/'
+ --sysroot='[Override the system root]:PATH:_files -/'
+ --cfg='[Configure the compilation environment]:SPEC:'
+ --out-dir='[Write output to compiler-chosen filename in <dir>. Ignored if -o is specified. (default the current directory)]:DIR:_files -/'
+ -o'[Write output to <filename>. Ignored if more than one --emit is specified.]:FILENAME:_files'
+ --opt-level='[Optimize with possible levels 0-3]:LEVEL:(0 1 2 3)'
+ --pretty='[Pretty-print the input instead of compiling]::TYPE:_values "TYPES" "$_rustc_pretty_types[@]"'
+ -L'[Add a directory to the library search path]:DIR:_files -/'
+ --target='[Target triple cpu-manufacturer-kernel\[-os\] to compile]:TRIPLE:'
+ --color='[Configure coloring of output]:CONF:_values "COLORS" "$_rustc_color_types[@]"'
+ {-v,--version}'[Print version info and exit]::VERBOSE:(verbose)'
+ --explain='[Provide a detailed explanation of an error message]:OPT:'
+ --extern'[Specify where an external rust library is located]:ARG:'
+)
+
_rustc_opts_switches=(
- --ar'[Program to use for managing archives instead of the default.]'
- -c'[Compile and assemble, but do not link]'
- --cfg'[Configure the compilation environment]'
- --crate-id'[Output the crate id and exit]'
- --crate-file-name'[deprecated in favor of --print-file-name]'
- --crate-name'[Specify the name of the crate being built]'
- --crate-type'[Specify the type of crate to crate]'
- --debuginfo'[Emit DWARF debug info to the objects created: 0 = no debug info, 1 = line-tables only (for stacktraces and breakpoints), 2 = full debug info with variable and type information (same as -g)]'
- --dep-info'[Output dependency info to <filename> after compiling]'
-g'[Equivalent to --debuginfo=2]'
{-h,--help}'[Display this message]'
- -L'[Add a directory to the library search path]'
- --linker'[Program to use for linking instead of the default.]'
- --link-args'[FLAGS is a space-separated list of flags passed to the linker]'
- --llvm-args'[A list of arguments to pass to llvm, comma separated]'
- --ls'[List the symbols defined by a library crate]'
--no-analysis'[Parse and expand the output, but run no analysis or produce output]'
- --no-rpath'[Disables setting the rpath in libs/exes]'
--no-trans'[Run all passes except translation; no output]'
-O'[Equivalent to --opt-level=2]'
- -o'[Write output to <filename>]'
- --opt-level'[Optimize with possible levels 0-3]'
- --out-dir'[Write output to compiler-chosen filename in <dir>]'
--parse-only'[Parse only; do not compile, assemble, or link]'
- --passes'[Comma or space separated list of pass names to use]'
- --pretty'[Pretty-print the input instead of compiling]'
--print-crate-name'[Output the crate name and exit]'
--print-file-name'[Output the file(s) that would be written if compilation continued and exit]'
- --save-temps'[Write intermediate files (.bc, .opt.bc, .o) in addition to normal output]'
- --sysroot'[Override the system root]'
--test'[Build a test harness]'
- --target'[Target triple cpu-manufacturer-kernel\[-os\] to compile]'
- --target-cpu'[Select target processor (llc -mcpu=help for details)]'
- --target-feature'[Target specific attributes (llc -mattr=help for details)]'
- --relocation-model'[Relocation model (llc --help for details)]'
- {-v,--version}'[Print version info and exit]'
)
+_rustc_opts_codegen=(
+ 'ar=[Path to the archive utility to use when assembling archives.]:BIN:_path_files'
+ 'linker=[Path to the linker utility to use when linking libraries, executables, and objects.]:BIN:_path_files'
+ 'link-args=[A space-separated list of extra arguments to pass to the linker when the linker is invoked.]:ARGS:'
+ 'target-cpu=[Selects a target processor. If the value is "help", then a list of available CPUs is printed.]:CPU:'
+ 'target-feature=[A space-separated list of features to enable or disable for the target. A preceding "+" enables a feature while a preceding "-" disables it. Available features can be discovered through target-cpu=help.]:FEATURE:'
+ 'passes=[A space-separated list of extra LLVM passes to run. A value of "list" will cause rustc to print all known passes and exit. The passes specified are appended at the end of the normal pass manager.]:LIST:'
+ 'llvm-args=[A space-separated list of arguments to pass through to LLVM.]:ARGS:'
+ 'save-temps[If specified, the compiler will save more files (.bc, .o, .no-opt.bc) generated throughout compilation in the output directory.]'
+ 'rpath[If specified, then the rpath value for dynamic libraries will be set in either dynamic library or executable outputs.]'
+ 'no-prepopulate-passes[Suppresses pre-population of the LLVM pass manager that is run over the module.]'
+ 'no-vectorize-loops[Suppresses running the loop vectorization LLVM pass, regardless of optimization level.]'
+ 'no-vectorize-slp[Suppresses running the LLVM SLP vectorization pass, regardless of optimization level.]'
+ 'soft-float[Generates software floating point library calls instead of hardware instructions.]'
+ 'prefer-dynamic[Prefers dynamic linking to static linking.]'
+ "no-integrated-as[Force usage of an external assembler rather than LLVM's integrated one.]"
+ 'no-redzone[disable the use of the redzone]'
+ 'relocation-model=[The relocation model to use. (default: pic)]:MODEL:(pic static dynamic-no-pic)'
+ 'code-model=[choose the code model to use (llc -code-model for details)]:MODEL:'
+ 'metadata=[metadata to mangle symbol names with]:VAL:'
+ 'extra-filenames=[extra data to put in each output filename]:VAL:'
+ 'codegen-units=[divide crate into N units to optimize in parallel]:N:'
+ 'help[Show all codegen options]'
+)
+
_rustc_opts_lint=(
- 'attribute-usage[detects bad use of attributes]'
- 'ctypes[proper use of libc types in foreign modules]'
- 'dead-assignment[detect assignments that will never be read]'
- 'dead-code[detect piece of code that will never be used]'
- 'default-type-param-usage[prevents explicitly setting a type parameter with a default]'
- 'deprecated[detects use of #\[deprecated\] items]'
+ 'help[Show a list of all lints]'
'experimental[detects use of #\[experimental\] items]'
- 'heap-memory[use of any (~ type or @ type) heap memory]'
+ 'heap-memory[use of any (Box type or @ type) heap memory]'
'managed-heap-memory[use of managed (@ type) heap memory]'
'missing-doc[detects missing documentation for public members]'
- 'non-camel-case-types[types, variants and traits should have camel case names]'
- 'non-uppercase-pattern-statics[static constants in match patterns should be all caps]'
'non-uppercase-statics[static constants should have uppercase identifiers]'
'owned-heap-memory[use of owned (~ type) heap memory]'
+ 'unnecessary-qualification[detects unnecessarily qualified names]'
+ 'unsafe-block[usage of an `unsafe` block]'
+ 'unstable[detects use of #\[unstable\] items (incl. items with no stability attribute)]'
+ 'unused-result[unused result of an expression in a statement]'
+ 'variant-size-difference[detects enums with widely varying variant sizes]'
+ 'ctypes[proper use of libc types in foreign modules]'
+ 'dead-assignment[detect assignments that will never be read]'
+ 'dead-code[detect piece of code that will never be used]'
+ 'deprecated[detects use of #\[deprecated\] items]'
+ 'non-camel-case-types[types, variants and traits should have camel case names]'
+ 'non-snake-case[methods, functions, lifetime parameters and modules should have snake case names]'
'path-statement[path statements with no effect]'
+ 'raw-pointer-deriving[uses of #\[deriving\] with raw pointers are rarely correct]'
'type-limits[comparisons made useless by limits of the types involved]'
'type-overflow[literal out of range for its type]'
- 'unknown-crate-type[unknown crate type found in #\[crate_type\] directive]'
- 'unknown-features[unknown features found in crate-level #\[feature\] directives]'
'unnecessary-allocation[detects unnecessary allocations that can be eliminated]'
'unnecessary-parens[`if`, `match`, `while` and `return` do not need parentheses]'
- 'unnecessary-qualification[detects unnecessarily qualified names]'
- 'unnecessary-typecast[detects unnecessary type casts, that can be removed]'
'unreachable-code[detects unreachable code]'
'unrecognized-lint[unrecognized lint attribute]'
- 'unsafe-block[usage of an `unsafe` block]'
- 'unstable[detects use of #\[unstable\] items (incl. items with no stability attribute)]'
+ 'unsigned-negate[using an unary minus operator on unsigned type]'
+ 'unused-attribute[detects attributes that were not used by the compiler]'
'unused-imports[imports that are never used]'
'unused-must-use[unused result of a type flagged as #\[must_use\]]'
"unused-mut[detect mut variables which don't need to be mutable]"
- 'unused-result[unused result of an expression in a statement]'
'unused-unsafe[unnecessary use of an `unsafe` block]'
'unused-variable[detect variables which are not used in any way]'
+ 'visible-private-types[detect use of private types in exported type signatures]'
'warnings[mass-change the level for lints which produce warnings]'
'while-true[suggest using `loop { }` instead of `while true { }`]'
+ 'unknown-crate-type[unknown crate type found in #\[crate_type\] directive]'
+ 'unknown-features[unknown features found in crate-level #\[feature\] directives]'
+ 'bad-style[group of non_camel_case_types, non_snake_case, non_uppercase_statics]'
+ 'unused[group of unused_imports, unused_variable, dead_assignment, dead_code, unused_mut, unreachable_code]'
)
_rustc_opts_debug=(
+ 'verbose[in general, enable more debug printouts]'
+ 'time-passes[measure time of each rustc pass]'
+ 'count-llvm-insns[count where LLVM instrs originate]'
+ 'time-llvm-passes[measure time of each LLVM pass]'
+ 'trans-stats[gather trans statistics]'
'asm-comments[generate comments into the assembly (may change behavior)]'
+ 'no-verify[skip LLVM verification]'
'borrowck-stats[gather borrowck statistics]'
- 'count-llvm-insns[count where LLVM instrs originate]'
- 'count-type-sizes[count the sizes of aggregate types]'
- 'debug-info[Produce debug info (experimental)]'
+ 'no-landing-pads[omit landing pads for unwinding]'
'debug-llvm[enable debug output from LLVM]'
- 'extra-debug-info[Extra debugging info (experimental)]'
- 'gc[Garbage collect shared data (experimental)]'
- 'gen-crate-map[Force generation of a toplevel crate map]'
- 'lto[Perform LLVM link-time optimizations]'
+ 'show-span[show spans for compiler debugging]'
+ 'count-type-sizes[count the sizes of aggregate types]'
'meta-stats[gather metadata statistics]'
- "no-integrated-as[Use external assembler rather than LLVM's integrated one]"
- 'no-landing-pads[omit landing pads for unwinding]'
'no-opt[do not optimize, even if -O is passed]'
- "no-prepopulate-passes[Don't pre-populate the pass managers with a list of passes, only use the passes from --passes]"
- "no-vectorize-loops[Don't run the loop vectorization optimization passes]"
- "no-vectorize-slp[Don't run LLVM's SLP vectorization passes]"
- 'no-verify[skip LLVM verification]'
- 'prefer-dynamic[Prefer dynamic linking to static linking]'
'print-link-args[Print the arguments passed to the linker]'
+ 'gc[Garbage collect shared data (experimental)]'
'print-llvm-passes[Prints the llvm optimization passes being run]'
- 'soft-float[Generate software floating point library calls]'
- 'time-llvm-passes[measure time of each LLVM pass]'
- 'time-passes[measure time of each rustc pass]'
- 'trans-stats[gather trans statistics]'
- 'verbose[in general, enable more debug printouts]'
+ 'lto[Perform LLVM link-time optimizations]'
+ 'ast-json[Print the AST as JSON and halt]'
+ 'ast-json-noexpand[Print the pre-expansion AST as JSON and halt]'
+ 'ls[List the symbols defined by a library crate]'
+ 'save-analysis[Write syntax and type analysis information in addition to normal output]'
+ 'flowgraph-print-loans[Include loan analysis data in --pretty flowgraph output]'
+ 'flowgraph-print-moves[Include move analysis data in --pretty flowgraph output]'
+ 'flowgraph-print-assigns[Include assignment analysis data in --pretty flowgraph output]'
+ 'flowgraph-print-all[Include all dataflow analysis data in --pretty flowgraph output]'
)
_rustc_opts_fun_lint(){
}
_rustc_opts_fun_debug(){
- _describe 'options' _rustc_opts_debug
+ _values 'options' "$_rustc_opts_debug[@]"
+}
+
+_rustc_opts_fun_codegen(){
+ _values 'options' "$_rustc_opts_codegen[@]"
}
_arguments -s : \
- '(-W --warn)'{-W,--warn}'[Set lint warnings]:lint options:_rustc_opts_fun_lint' \
- '(-A --allow)'{-A,--allow}'[Set lint allowed]:lint options:_rustc_opts_fun_lint' \
- '(-D --deny)'{-D,--deny}'[Set lint denied]:lint options:_rustc_opts_fun_lint' \
- '(-F --forbid)'{-F,--forbid}'[Set lint forbidden]:lint options:_rustc_opts_fun_lint' \
+ '(-W --warn)'{-W,--warn=}'[Set lint warnings]:lint options:_rustc_opts_fun_lint' \
+ '(-A --allow)'{-A,--allow=}'[Set lint allowed]:lint options:_rustc_opts_fun_lint' \
+ '(-D --deny)'{-D,--deny=}'[Set lint denied]:lint options:_rustc_opts_fun_lint' \
+ '(-F --forbid)'{-F,--forbid=}'[Set lint forbidden]:lint options:_rustc_opts_fun_lint' \
'*-Z[Set internal debugging options]:debug options:_rustc_opts_fun_debug' \
+ '*-C[Set internal Codegen options]:codegen options:_rustc_opts_fun_codegen' \
"$_rustc_opts_switches[@]" \
- '*::files:_files -g "*.rs"'
+ "$_rustc_opts_vals[@]" \
+ '::files:_files -g "*.rs"'
let sp = syntax::codemap::Span {
lo: syntax::codemap::BytePos(from_str::<u32>(start).unwrap() - offset),
hi: syntax::codemap::BytePos(from_str::<u32>(end).unwrap() + 1),
- expn_info: None
+ expn_id: syntax::codemap::NO_EXPANSION
};
TokenAndSpan {
-Subproject commit aae04170ccbfeea620502106b581c3c216cd132a
+Subproject commit 2dba541881fb8e35246d653bbe2e7c7088777a4a
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// FIXME: #13994: port to the sized deallocation API when available
// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias`
-// and `nonnull`
-
-#[cfg(not(test))] use core::raw;
-#[cfg(stage0, not(test))] use util;
/// Returns a pointer to `size` bytes of memory.
///
imp::stats_print();
}
-// The compiler never calls `exchange_free` on Box<ZeroSizeType>, so zero-size
-// allocations can point to this `static`. It would be incorrect to use a null
-// pointer, due to enums assuming types like unique pointers are never null.
-pub static mut EMPTY: uint = 12345;
+/// An arbitrary non-null address to represent zero-size allocations.
+///
+/// This preserves the non-null invariant for types like `Box<T>`. The address may overlap with
+/// non-zero-size memory allocations.
+pub static EMPTY: *mut () = 0x1 as *mut ();
/// The allocator for unique pointers.
#[cfg(not(test))]
#[inline]
unsafe fn exchange_malloc(size: uint, align: uint) -> *mut u8 {
if size == 0 {
- &EMPTY as *const uint as *mut u8
+ EMPTY as *mut u8
} else {
allocate(size, align)
}
deallocate(ptr, size, align);
}
-// FIXME: #7496
-#[cfg(stage0, not(test))]
-#[lang="closure_exchange_malloc"]
-#[inline]
-#[allow(deprecated)]
-unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
- align: uint) -> *mut u8 {
- let total_size = util::get_box_size(size, align);
- let p = allocate(total_size, 8);
-
- let alloc = p as *mut raw::Box<()>;
- (*alloc).drop_glue = drop_glue;
-
- alloc as *mut u8
-}
-
-// FIXME: #7496
-#[cfg(not(stage0), not(test))]
-#[lang="closure_exchange_malloc"]
-#[inline]
-#[allow(deprecated)]
-unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
- align: uint) -> *mut u8 {
- let p = allocate(size, align);
-
- let alloc = p as *mut raw::Box<()>;
- (*alloc).drop_glue = drop_glue;
-
- alloc as *mut u8
-}
-
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
#[cfg(jemalloc)]
mod imp {
use core::option::{None, Option};
- use core::ptr::{RawPtr, mut_null, null};
+ use core::ptr::{RawPtr, null_mut, null};
use core::num::Int;
use libc::{c_char, c_int, c_void, size_t};
use super::MIN_ALIGN;
flags: c_int) -> *mut c_void;
fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t,
flags: c_int) -> size_t;
- #[cfg(stage0)]
- fn je_dallocx(ptr: *mut c_void, flags: c_int);
- #[cfg(not(stage0))]
fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
fn je_nallocx(size: size_t, flags: c_int) -> size_t;
fn je_malloc_stats_print(write_cb: Option<extern "C" fn(cbopaque: *mut c_void,
}
#[inline]
- #[cfg(stage0)]
- pub unsafe fn deallocate(ptr: *mut u8, _size: uint, align: uint) {
- let flags = align_to_flags(align);
- je_dallocx(ptr as *mut c_void, flags)
- }
-
- #[inline]
- #[cfg(not(stage0))]
pub unsafe fn deallocate(ptr: *mut u8, size: uint, align: uint) {
let flags = align_to_flags(align);
je_sdallocx(ptr as *mut c_void, size as size_t, flags)
pub fn stats_print() {
unsafe {
- je_malloc_stats_print(None, mut_null(), null())
+ je_malloc_stats_print(None, null_mut(), null())
}
}
}
#[cfg(not(jemalloc), unix)]
mod imp {
use core::cmp;
- use core::mem;
use core::ptr;
use libc;
use libc_heap;
//! The global (exchange) heap.
use libc::{c_void, size_t, free, malloc, realloc};
-use core::ptr::{RawPtr, mut_null};
+use core::ptr::{RawPtr, null_mut};
/// A wrapper around libc::malloc, aborting on out-of-memory.
#[inline]
// `malloc(0)` may allocate, but it may also return a null pointer
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/malloc.html
if size == 0 {
- mut_null()
+ null_mut()
} else {
let p = malloc(size as size_t);
if p.is_null() {
// http://pubs.opengroup.org/onlinepubs/9699919799/functions/realloc.html
if size == 0 {
free(ptr as *mut c_void);
- mut_null()
+ null_mut()
} else {
let p = realloc(ptr as *mut c_void, size as size_t);
if p.is_null() {
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
unsafe {
- let chunk = TypedArenaChunk::<T>::new(ptr::mut_null(), capacity);
+ let chunk = TypedArenaChunk::<T>::new(ptr::null_mut(), capacity);
TypedArena {
ptr: Cell::new((*chunk).start() as *const T),
end: Cell::new((*chunk).end() as *const T),
// `op` is a bitwise operation, since any bits that should've
// been masked were fine to change anyway. `b` is masked to
// make sure its unmasked bits do not cause damage.
- for (a, (_, b)) in self.storage.mut_iter()
+ for (a, (_, b)) in self.storage.iter_mut()
.zip(other.mask_words(0)) {
let w = op(*a, b);
if *a != w {
/// ```
#[inline]
pub fn set_all(&mut self) {
- for w in self.storage.mut_iter() { *w = !0u; }
+ for w in self.storage.iter_mut() { *w = !0u; }
}
/// Flips all bits.
/// ```
#[inline]
pub fn negate(&mut self) {
- for w in self.storage.mut_iter() { *w = !*w; }
+ for w in self.storage.iter_mut() { *w = !*w; }
}
/// Calculates the union of two bitvectors. This acts like the bitwise `or`
impl Mutable for Bitv {
#[inline]
fn clear(&mut self) {
- for w in self.storage.mut_iter() { *w = 0u; }
+ for w in self.storage.iter_mut() { *w = 0u; }
}
}
fn clone_from(&mut self, source: &Bitv) {
self.nbits = source.nbits;
self.storage.reserve(source.storage.len());
- for (i, w) in self.storage.mut_iter().enumerate() { *w = source.storage[i]; }
+ for (i, w) in self.storage.iter_mut().enumerate() { *w = source.storage[i]; }
}
}
impl<T> Rawlink<T> {
/// Like Option::None for Rawlink
fn none() -> Rawlink<T> {
- Rawlink{p: ptr::mut_null()}
+ Rawlink{p: ptr::null_mut()}
}
/// Like Option::Some for Rawlink
/// ```
pub fn insert_when(&mut self, elt: T, f: |&T, &T| -> bool) {
{
- let mut it = self.mut_iter();
+ let mut it = self.iter_mut();
loop {
match it.peek_next() {
None => break,
/// This operation should compute in O(max(N, M)) time.
pub fn merge(&mut self, mut other: DList<T>, f: |&T, &T| -> bool) {
{
- let mut it = self.mut_iter();
+ let mut it = self.iter_mut();
loop {
let take_a = match (it.peek_next(), other.front()) {
(_ , None) => return,
Items{nelem: self.len(), head: &self.list_head, tail: self.list_tail}
}
+ /// Deprecated: use `iter_mut`.
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'a>(&'a mut self) -> MutItems<'a, T> {
+ self.iter_mut()
+ }
+
/// Provides a forward iterator with mutable references.
#[inline]
- pub fn mut_iter<'a>(&'a mut self) -> MutItems<'a, T> {
+ pub fn iter_mut<'a>(&'a mut self) -> MutItems<'a, T> {
let head_raw = match self.list_head {
Some(ref mut h) => Rawlink::some(&mut **h),
None => Rawlink::none(),
}
}
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> MoveItems<T> {
+ self.into_iter()
+ }
/// Consumes the list into an iterator yielding elements by value.
#[inline]
- pub fn move_iter(self) -> MoveItems<T> {
+ pub fn into_iter(self) -> MoveItems<T> {
MoveItems{list: self}
}
}
check_links(&m);
let sum = v.append(u.as_slice());
assert_eq!(sum.len(), m.len());
- for elt in sum.move_iter() {
+ for elt in sum.into_iter() {
assert_eq!(m.pop_front(), Some(elt))
}
}
check_links(&m);
let sum = u.append(v.as_slice());
assert_eq!(sum.len(), m.len());
- for elt in sum.move_iter() {
+ for elt in sum.into_iter() {
assert_eq!(m.pop_front(), Some(elt))
}
}
m.rotate_backward(); check_links(&m);
m.push_front(9); check_links(&m);
m.rotate_forward(); check_links(&m);
- assert_eq!(vec![3i,9,5,1,2], m.move_iter().collect());
+ assert_eq!(vec![3i,9,5,1,2], m.into_iter().collect());
}
#[test]
fn test_mut_iter() {
let mut m = generate_test();
let mut len = m.len();
- for (i, elt) in m.mut_iter().enumerate() {
+ for (i, elt) in m.iter_mut().enumerate() {
assert_eq!(i as int, *elt);
len -= 1;
}
assert_eq!(len, 0);
let mut n = DList::new();
- assert!(n.mut_iter().next().is_none());
+ assert!(n.iter_mut().next().is_none());
n.push_front(4i);
n.push(5);
- let mut it = n.mut_iter();
+ let mut it = n.iter_mut();
assert_eq!(it.size_hint(), (2, Some(2)));
assert!(it.next().is_some());
assert!(it.next().is_some());
#[test]
fn test_iterator_mut_double_end() {
let mut n = DList::new();
- assert!(n.mut_iter().next_back().is_none());
+ assert!(n.iter_mut().next_back().is_none());
n.push_front(4i);
n.push_front(5);
n.push_front(6);
- let mut it = n.mut_iter();
+ let mut it = n.iter_mut();
assert_eq!(it.size_hint(), (3, Some(3)));
assert_eq!(*it.next().unwrap(), 6);
assert_eq!(it.size_hint(), (2, Some(2)));
let mut m = list_from(&[0i,2,4,6,8]);
let len = m.len();
{
- let mut it = m.mut_iter();
+ let mut it = m.iter_mut();
it.insert_next(-2);
loop {
match it.next() {
}
check_links(&m);
assert_eq!(m.len(), 3 + len * 2);
- assert_eq!(m.move_iter().collect::<Vec<int>>(), vec![-2,0,1,2,3,4,5,6,7,8,9,0,1]);
+ assert_eq!(m.into_iter().collect::<Vec<int>>(), vec![-2,0,1,2,3,4,5,6,7,8,9,0,1]);
}
#[test]
m.merge(n, |a, b| a <= b);
assert_eq!(m.len(), len);
check_links(&m);
- let res = m.move_iter().collect::<Vec<int>>();
+ let res = m.into_iter().collect::<Vec<int>>();
assert_eq!(res, vec![-1, 0, 0, 0, 1, 3, 5, 6, 7, 2, 7, 7, 9]);
}
m.push(4);
m.insert_ordered(3);
check_links(&m);
- assert_eq!(vec![2,3,4], m.move_iter().collect::<Vec<int>>());
+ assert_eq!(vec![2,3,4], m.into_iter().collect::<Vec<int>>());
}
#[test]
fn test_mut_rev_iter() {
let mut m = generate_test();
- for (i, elt) in m.mut_iter().rev().enumerate() {
+ for (i, elt) in m.iter_mut().rev().enumerate() {
assert_eq!((6-i) as int, *elt);
}
let mut n = DList::new();
- assert!(n.mut_iter().rev().next().is_none());
+ assert!(n.iter_mut().rev().next().is_none());
n.push_front(4i);
- let mut it = n.mut_iter().rev();
+ let mut it = n.iter_mut().rev();
assert!(it.next().is_some());
assert!(it.next().is_none());
}
check_links(&m);
let mut i = 0u;
- for (a, &b) in m.move_iter().zip(v.iter()) {
+ for (a, &b) in m.into_iter().zip(v.iter()) {
i += 1;
assert_eq!(a, b);
}
let v = &[0i, ..128];
let mut m: DList<int> = v.iter().map(|&x|x).collect();
b.iter(|| {
- assert!(m.mut_iter().count() == 128);
+ assert!(m.iter_mut().count() == 128);
})
}
#[bench]
let v = &[0i, ..128];
let mut m: DList<int> = v.iter().map(|&x|x).collect();
b.iter(|| {
- assert!(m.mut_iter().rev().count() == 128);
+ assert!(m.iter_mut().rev().count() == 128);
})
}
}
//! representation to hold C-like enum variants.
use core::prelude::*;
+use core::fmt;
-#[deriving(Clone, PartialEq, Eq, Hash, Show)]
+#[deriving(Clone, PartialEq, Eq, Hash)]
/// A specialized `Set` implementation to use enum types.
pub struct EnumSet<E> {
// We must maintain the invariant that no bits are set
bits: uint
}
+impl<E:CLike+fmt::Show> fmt::Show for EnumSet<E> {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(fmt, "{{"));
+ let mut first = true;
+ for e in self.iter() {
+ if !first {
+ try!(write!(fmt, ", "));
+ }
+ try!(write!(fmt, "{}", e));
+ first = false;
+ }
+ write!(fmt, "}}")
+ }
+}
+
/// An interface for casting C-like enum to uint and back.
pub trait CLike {
/// Converts a C-like enum to a `uint`.
assert!(e.is_empty());
}
+ #[test]
+ fn test_show() {
+ let mut e = EnumSet::empty();
+ assert_eq!("{}", e.to_string().as_slice());
+ e.add(A);
+ assert_eq!("{A}", e.to_string().as_slice());
+ e.add(C);
+ assert_eq!("{A, C}", e.to_string().as_slice());
+ }
+
///////////////////////////////////////////////////////////////////////////
// intersect
//! A priority queue implemented with a binary heap.
//!
+//! Insertions have `O(log n)` time complexity and checking or popping the largest element is
+//! `O(1)`. Converting a vector to a priority queue can be done in-place, and has `O(n)`
+//! complexity. A priority queue can also be converted to a sorted vector in-place, allowing it to
+//! be used for an `O(n log n)` in-place heapsort.
+//!
//! # Example
//!
//! This is a larger example which implements [Dijkstra's algorithm][dijkstra]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! A double-ended queue implemented as a circular buffer.
-//!
-//! `RingBuf` implements the trait `Deque`. It should be imported with
-//! `use collections::Deque`.
+//! This crate implements a double-ended queue with `O(1)` amortized inserts and removals from both
+//! ends of the container. It also has `O(1)` indexing like a vector. The contained elements are
+//! not required to be copyable, and the queue will be sendable if the contained type is sendable.
+//! Its interface `Deque` is defined in `collections`.
use core::prelude::*;
impl<T> Mutable for RingBuf<T> {
/// Clears the `RingBuf`, removing all values.
fn clear(&mut self) {
- for x in self.elts.mut_iter() { *x = None }
+ for x in self.elts.iter_mut() { *x = None }
self.nelts = 0;
self.lo = 0;
}
Items{index: 0, rindex: self.nelts, lo: self.lo, elts: self.elts.as_slice()}
}
+ /// Deprecated: use `iter_mut`
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'a>(&'a mut self) -> MutItems<'a, T> {
+ self.iter_mut()
+ }
+
/// Returns a front-to-back iterator which returns mutable references.
///
/// # Example
/// buf.push(5i);
/// buf.push(3);
/// buf.push(4);
- /// for num in buf.mut_iter() {
+ /// for num in buf.iter_mut() {
/// *num = *num - 2;
/// }
/// let b: &[_] = &[&mut 3, &mut 1, &mut 2];
- /// assert_eq!(buf.mut_iter().collect::<Vec<&mut int>>().as_slice(), b);
+ /// assert_eq!(buf.iter_mut().collect::<Vec<&mut int>>().as_slice(), b);
/// ```
- pub fn mut_iter<'a>(&'a mut self) -> MutItems<'a, T> {
+ pub fn iter_mut<'a>(&'a mut self) -> MutItems<'a, T> {
let start_index = raw_index(self.lo, self.elts.len(), 0);
let end_index = raw_index(self.lo, self.elts.len(), self.nelts);
// start_index to self.elts.len()
// and then
// 0 to end_index
- let (temp, remaining1) = self.elts.mut_split_at(start_index);
- let (remaining2, _) = temp.mut_split_at(end_index);
+ let (temp, remaining1) = self.elts.split_at_mut(start_index);
+ let (remaining2, _) = temp.split_at_mut(end_index);
MutItems { remaining1: remaining1,
remaining2: remaining2,
nelts: self.nelts }
} else {
// Items to iterate goes from start_index to end_index:
- let (empty, elts) = self.elts.mut_split_at(0);
- let remaining1 = elts.mut_slice(start_index, end_index);
+ let (empty, elts) = self.elts.split_at_mut(0);
+ let remaining1 = elts.slice_mut(start_index, end_index);
MutItems { remaining1: remaining1,
remaining2: empty,
nelts: self.nelts }
#[test]
fn test_mut_rev_iter_wrap() {
let mut d = RingBuf::with_capacity(3);
- assert!(d.mut_iter().rev().next().is_none());
+ assert!(d.iter_mut().rev().next().is_none());
d.push(1i);
d.push(2);
assert_eq!(d.pop_front(), Some(1));
d.push(4);
- assert_eq!(d.mut_iter().rev().map(|x| *x).collect::<Vec<int>>(),
+ assert_eq!(d.iter_mut().rev().map(|x| *x).collect::<Vec<int>>(),
vec!(4, 3, 2));
}
#[test]
fn test_mut_iter() {
let mut d = RingBuf::new();
- assert!(d.mut_iter().next().is_none());
+ assert!(d.iter_mut().next().is_none());
for i in range(0u, 3) {
d.push_front(i);
}
- for (i, elt) in d.mut_iter().enumerate() {
+ for (i, elt) in d.iter_mut().enumerate() {
assert_eq!(*elt, 2 - i);
*elt = i;
}
{
- let mut it = d.mut_iter();
+ let mut it = d.iter_mut();
assert_eq!(*it.next().unwrap(), 0);
assert_eq!(*it.next().unwrap(), 1);
assert_eq!(*it.next().unwrap(), 2);
#[test]
fn test_mut_rev_iter() {
let mut d = RingBuf::new();
- assert!(d.mut_iter().rev().next().is_none());
+ assert!(d.iter_mut().rev().next().is_none());
for i in range(0u, 3) {
d.push_front(i);
}
- for (i, elt) in d.mut_iter().rev().enumerate() {
+ for (i, elt) in d.iter_mut().rev().enumerate() {
assert_eq!(*elt, i);
*elt = i;
}
{
- let mut it = d.mut_iter().rev();
+ let mut it = d.iter_mut().rev();
assert_eq!(*it.next().unwrap(), 0);
assert_eq!(*it.next().unwrap(), 1);
assert_eq!(*it.next().unwrap(), 2);
//! }
//! ```
//!
-//! * `.mut_iter()` returns an iterator that allows modifying each value.
+//! * `.iter_mut()` returns an iterator that allows modifying each value.
//! * Further iterators exist that split, chunk or permute the slice.
#![doc(primitive = "slice")]
self.sdir.as_mut_slice().swap(i, j);
// Swap the direction of each larger SizeDirection
- for x in self.sdir.mut_iter() {
+ for x in self.sdir.iter_mut() {
if x.size > sd.size {
x.dir = match x.dir { Pos => Neg, Neg => Pos };
}
#[inline]
fn move_from(self, mut src: Vec<T>, start: uint, end: uint) -> uint {
- for (a, b) in self.mut_iter().zip(src.mut_slice(start, end).mut_iter()) {
+ for (a, b) in self.iter_mut().zip(src.slice_mut(start, end).iter_mut()) {
mem::swap(a, b);
}
cmp::min(self.len(), end-start)
self.swap(j, i-1);
// Step 4: Reverse the (previously) weakly decreasing part
- self.mut_slice_from(i).reverse();
+ self.slice_from_mut(i).reverse();
true
}
}
// Step 2: Reverse the weakly increasing part
- self.mut_slice_from(i).reverse();
+ self.slice_from_mut(i).reverse();
// Step 3: Find the rightmost element equal to or bigger than the pivot (i-1)
let mut j = self.len() - 1;
fn test_iter_size_hints() {
let mut xs = [1i, 2, 5, 10, 11];
assert_eq!(xs.iter().size_hint(), (5, Some(5)));
- assert_eq!(xs.mut_iter().size_hint(), (5, Some(5)));
+ assert_eq!(xs.iter_mut().size_hint(), (5, Some(5)));
}
#[test]
#[test]
fn test_mut_iterator() {
let mut xs = [1i, 2, 3, 4, 5];
- for x in xs.mut_iter() {
+ for x in xs.iter_mut() {
*x += 1;
}
assert!(xs == [2, 3, 4, 5, 6])
#[test]
fn test_mut_rev_iterator() {
let mut xs = [1u, 2, 3, 4, 5];
- for (i,x) in xs.mut_iter().rev().enumerate() {
+ for (i,x) in xs.iter_mut().rev().enumerate() {
*x += i;
}
assert!(xs == [5, 5, 5, 5, 5])
#[test]
fn test_move_iterator() {
let xs = vec![1u,2,3,4,5];
- assert_eq!(xs.move_iter().fold(0, |a: uint, b: uint| 10*a + b), 12345);
+ assert_eq!(xs.into_iter().fold(0, |a: uint, b: uint| 10*a + b), 12345);
}
#[test]
fn test_move_rev_iterator() {
let xs = vec![1u,2,3,4,5];
- assert_eq!(xs.move_iter().rev().fold(0, |a: uint, b: uint| 10*a + b), 54321);
+ assert_eq!(xs.into_iter().rev().fold(0, |a: uint, b: uint| 10*a + b), 54321);
}
#[test]
assert!(a == [7i,2,3,4]);
let mut a = [1i,2,3,4,5];
let b = vec![5i,6,7,8,9,0];
- assert_eq!(a.mut_slice(2,4).move_from(b,1,6), 2);
+ assert_eq!(a.slice_mut(2,4).move_from(b,1,6), 2);
assert!(a == [1i,2,6,7,5]);
}
#[test]
fn test_reverse_part() {
let mut values = [1i,2,3,4,5];
- values.mut_slice(1, 4).reverse();
+ values.slice_mut(1, 4).reverse();
assert!(values == [1,4,3,2,5]);
}
fn test_bytes_set_memory() {
use slice::bytes::MutableByteVector;
let mut values = [1u8,2,3,4,5];
- values.mut_slice(0,5).set_memory(0xAB);
+ values.slice_mut(0,5).set_memory(0xAB);
assert!(values == [0xAB, 0xAB, 0xAB, 0xAB, 0xAB]);
- values.mut_slice(2,4).set_memory(0xFF);
+ values.slice_mut(2,4).set_memory(0xFF);
assert!(values == [0xAB, 0xAB, 0xFF, 0xFF, 0xAB]);
}
fn test_mut_split_at() {
let mut values = [1u8,2,3,4,5];
{
- let (left, right) = values.mut_split_at(2);
+ let (left, right) = values.split_at_mut(2);
assert!(left.slice(0, left.len()) == [1, 2]);
- for p in left.mut_iter() {
+ for p in left.iter_mut() {
*p += 1;
}
assert!(right.slice(0, right.len()) == [3, 4, 5]);
- for p in right.mut_iter() {
+ for p in right.iter_mut() {
*p += 2;
}
}
}
assert_eq!(cnt, 5);
- for f in v.mut_iter() {
+ for f in v.iter_mut() {
assert!(*f == Foo);
cnt += 1;
}
assert_eq!(cnt, 8);
- for f in v.move_iter() {
+ for f in v.into_iter() {
assert!(f == Foo);
cnt += 1;
}
#[test]
fn test_mut_splitator() {
let mut xs = [0i,1,0,2,3,0,0,4,5,0];
- assert_eq!(xs.mut_split(|x| *x == 0).count(), 6);
- for slice in xs.mut_split(|x| *x == 0) {
+ assert_eq!(xs.split_mut(|x| *x == 0).count(), 6);
+ for slice in xs.split_mut(|x| *x == 0) {
slice.reverse();
}
assert!(xs == [0,1,0,3,2,0,0,5,4,0]);
let mut xs = [0i,1,0,2,3,0,0,4,5,0,6,7];
- for slice in xs.mut_split(|x| *x == 0).take(5) {
+ for slice in xs.split_mut(|x| *x == 0).take(5) {
slice.reverse();
}
assert!(xs == [0,1,0,3,2,0,0,5,4,0,6,7]);
#[test]
fn test_mut_splitator_rev() {
let mut xs = [1i,2,0,3,4,0,0,5,6,0];
- for slice in xs.mut_split(|x| *x == 0).rev().take(4) {
+ for slice in xs.split_mut(|x| *x == 0).rev().take(4) {
slice.reverse();
}
assert!(xs == [1,2,0,4,3,0,0,6,5,0]);
#[test]
fn test_mut_chunks() {
let mut v = [0u8, 1, 2, 3, 4, 5, 6];
- for (i, chunk) in v.mut_chunks(3).enumerate() {
- for x in chunk.mut_iter() {
+ for (i, chunk) in v.chunks_mut(3).enumerate() {
+ for x in chunk.iter_mut() {
*x = i as u8;
}
}
#[test]
fn test_mut_chunks_rev() {
let mut v = [0u8, 1, 2, 3, 4, 5, 6];
- for (i, chunk) in v.mut_chunks(3).rev().enumerate() {
- for x in chunk.mut_iter() {
+ for (i, chunk) in v.chunks_mut(3).rev().enumerate() {
+ for x in chunk.iter_mut() {
*x = i as u8;
}
}
#[should_fail]
fn test_mut_chunks_0() {
let mut v = [1i, 2, 3, 4];
- let _it = v.mut_chunks(0);
+ let _it = v.chunks_mut(0);
}
#[test]
#[test]
fn test_mut_last() {
let mut x = [1i, 2, 3, 4, 5];
- let h = x.mut_last();
+ let h = x.last_mut();
assert_eq!(*h.unwrap(), 5);
let y: &mut [int] = [];
- assert!(y.mut_last().is_none());
+ assert!(y.last_mut().is_none());
}
}
b.iter(|| {
let mut i = 0i;
- for x in v.mut_iter() {
+ for x in v.iter_mut() {
*x = i;
i += 1;
}
unsafe {
v.set_len(1024);
}
- for x in v.mut_iter() {
+ for x in v.iter_mut() {
*x = 0i;
}
v
#[inline]
fn clone_from(&mut self, source: &SmallIntMap<V>) {
self.v.reserve(source.v.len());
- for (i, w) in self.v.mut_iter().enumerate() {
+ for (i, w) in self.v.iter_mut().enumerate() {
*w = source.v[i].clone();
}
}
}
}
+ /// Deprecated: use `iter_mut`
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'r>(&'r mut self) -> MutEntries<'r, V> {
+ self.iter_mut()
+ }
+
/// Returns an iterator visiting all key-value pairs in ascending order by the keys,
/// with mutable references to the values.
/// The iterator's element type is `(uint, &'r mut V)`.
/// map.insert(2, "b");
/// map.insert(3, "c");
///
- /// for (key, value) in map.mut_iter() {
+ /// for (key, value) in map.iter_mut() {
/// *value = "x";
/// }
///
/// assert_eq!(value, &"x");
/// }
/// ```
- pub fn mut_iter<'r>(&'r mut self) -> MutEntries<'r, V> {
+ pub fn iter_mut<'r>(&'r mut self) -> MutEntries<'r, V> {
MutEntries {
front: 0,
back: self.v.len(),
- iter: self.v.mut_iter()
+ iter: self.v.iter_mut()
}
}
+ /// Deprecated: use `into_iter` instead.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(&mut self)
+ -> FilterMap<(uint, Option<V>), (uint, V),
+ Enumerate<vec::MoveItems<Option<V>>>> {
+ self.into_iter()
+ }
+
/// Returns an iterator visiting all key-value pairs in ascending order by
/// the keys, emptying (but not consuming) the original `SmallIntMap`.
/// The iterator's element type is `(uint, &'r V)`.
/// map.insert(2, "b");
///
/// // Not possible with .iter()
- /// let vec: Vec<(uint, &str)> = map.move_iter().collect();
+ /// let vec: Vec<(uint, &str)> = map.into_iter().collect();
///
/// assert_eq!(vec, vec![(1, "a"), (2, "b"), (3, "c")]);
/// ```
- pub fn move_iter(&mut self)
+ pub fn into_iter(&mut self)
-> FilterMap<(uint, Option<V>), (uint, V),
Enumerate<vec::MoveItems<Option<V>>>>
{
let values = replace(&mut self.v, vec!());
- values.move_iter().enumerate().filter_map(|(i, v)| {
+ values.into_iter().enumerate().filter_map(|(i, v)| {
v.map(|v| (i, v))
})
}
assert_eq!(m.iter().size_hint(), (0, Some(11)));
assert_eq!(m.iter().rev().size_hint(), (0, Some(11)));
- assert_eq!(m.mut_iter().size_hint(), (0, Some(11)));
- assert_eq!(m.mut_iter().rev().size_hint(), (0, Some(11)));
+ assert_eq!(m.iter_mut().size_hint(), (0, Some(11)));
+ assert_eq!(m.iter_mut().rev().size_hint(), (0, Some(11)));
}
#[test]
assert!(m.insert(6, 10));
assert!(m.insert(10, 11));
- for (k, v) in m.mut_iter() {
+ for (k, v) in m.iter_mut() {
*v += k as int;
}
assert!(m.insert(6, 10));
assert!(m.insert(10, 11));
- for (k, v) in m.mut_iter().rev() {
+ for (k, v) in m.iter_mut().rev() {
*v += k as int;
}
let mut m = SmallIntMap::new();
m.insert(1, box 2i);
let mut called = false;
- for (k, v) in m.move_iter() {
+ for (k, v) in m.into_iter() {
assert!(!called);
called = true;
assert_eq!(k, 1);
use {Mutable, MutableSeq};
use str;
- use str::{Str, StrSlice, Owned, Slice};
+ use str::{Str, StrSlice, Owned};
use super::String;
use vec::Vec;
#[test]
fn test_from_utf8_lossy() {
let xs = b"hello";
- assert_eq!(String::from_utf8_lossy(xs), Slice("hello"));
+ assert_eq!(String::from_utf8_lossy(xs), str::Slice("hello"));
let xs = "ศไทย中华Việt Nam".as_bytes();
- assert_eq!(String::from_utf8_lossy(xs), Slice("ศไทย中华Việt Nam"));
+ assert_eq!(String::from_utf8_lossy(xs), str::Slice("ศไทย中华Việt Nam"));
let xs = b"Hello\xC2 There\xFF Goodbye";
assert_eq!(String::from_utf8_lossy(xs),
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! An ordered map and set implemented as self-balancing binary search
-//! trees. The only requirement for the types is that the key implements
-//! `Ord`.
+//! Maps are collections of unique keys with corresponding values, and sets are
+//! just unique keys without a corresponding value. The `Map` and `Set` traits in
+//! `std::container` define the basic interface.
+//!
+//! This crate defines the `TreeMap` and `TreeSet` types. Their keys must implement `Ord`.
+//!
+//! `TreeMap`s are ordered.
//!
//! ## Example
//!
}
impl<K: Ord, V> MutableMap<K, V> for TreeMap<K, V> {
- // See comments on def_tree_find_mut_with
+ // See comments on tree_find_with_mut
#[inline]
fn find_mut<'a>(&'a mut self, key: &K) -> Option<&'a mut V> {
- tree_find_mut_with(&mut self.root, |x| key.cmp(x))
+ tree_find_with_mut(&mut self.root, |x| key.cmp(x))
}
fn swap(&mut self, key: K, value: V) -> Option<V> {
RevEntries{iter: self.iter()}
}
+ /// Deprecated: use `iter_mut`.
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
+ self.iter_mut()
+ }
+
/// Gets a lazy forward iterator over the key-value pairs in the
/// map, with the values being mutable.
///
/// map.insert("b", 2i);
///
/// // Add 10 until we find "b"
- /// for (key, value) in map.mut_iter() {
+ /// for (key, value) in map.iter_mut() {
/// *value += 10;
/// if key == &"b" { break }
/// }
/// assert_eq!(map.find(&"b"), Some(&12));
/// assert_eq!(map.find(&"c"), Some(&3));
/// ```
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
+ pub fn iter_mut<'a>(&'a mut self) -> MutEntries<'a, K, V> {
MutEntries {
stack: vec!(),
- node: mut_deref(&mut self.root),
+ node: deref_mut(&mut self.root),
remaining_min: self.length,
remaining_max: self.length
}
}
+ /// Deprecated: use `rev_iter_mut`.
+ #[deprecated = "use rev_iter_mut"]
+ pub fn mut_rev_iter<'a>(&'a mut self) -> RevMutEntries<'a, K, V> {
+ self.rev_iter_mut()
+ }
+
/// Gets a lazy reverse iterator over the key-value pairs in the
/// map, with the values being mutable.
///
/// map.insert("b", 2i);
///
/// // Add 10 until we find "b"
- /// for (key, value) in map.mut_rev_iter() {
+ /// for (key, value) in map.rev_iter_mut() {
/// *value += 10;
/// if key == &"b" { break }
/// }
/// assert_eq!(map.find(&"b"), Some(&12));
/// assert_eq!(map.find(&"c"), Some(&13));
/// ```
- pub fn mut_rev_iter<'a>(&'a mut self) -> RevMutEntries<'a, K, V> {
- RevMutEntries{iter: self.mut_iter()}
+ pub fn rev_iter_mut<'a>(&'a mut self) -> RevMutEntries<'a, K, V> {
+ RevMutEntries{iter: self.iter_mut()}
}
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ self.into_iter()
+ }
/// Gets a lazy iterator that consumes the treemap.
///
/// map.insert("b", 2i);
///
/// // Not possible with a regular `.iter()`
- /// let vec: Vec<(&str, int)> = map.move_iter().collect();
+ /// let vec: Vec<(&str, int)> = map.into_iter().collect();
/// assert_eq!(vec, vec![("a", 1), ("b", 2), ("c", 3)]);
/// ```
- pub fn move_iter(self) -> MoveEntries<K, V> {
+ pub fn into_iter(self) -> MoveEntries<K, V> {
let TreeMap { root: root, length: length } = self;
let stk = match root {
None => vec!(),
tree_find_with(&self.root, f)
}
+ /// Deprecated: use `find_with_mut`.
+ #[deprecated = "use find_with_mut"]
+ pub fn find_mut_with<'a>(&'a mut self, f:|&K| -> Ordering) -> Option<&'a mut V> {
+ self.find_with_mut(f)
+ }
+
/// Returns the value for which `f(key)` returns `Equal`. `f` is invoked
/// with current key and guides tree navigation. That means `f` should
/// be aware of natural ordering of the tree.
/// t.insert("User-Agent", "Curl-Rust/0.1");
///
/// let new_ua = "Safari/156.0";
- /// match t.find_mut_with(|k| "User-Agent".cmp(k)) {
+ /// match t.find_with_mut(|k| "User-Agent".cmp(k)) {
/// Some(x) => *x = new_ua,
/// None => fail!(),
/// }
/// assert_eq!(t.find(&"User-Agent"), Some(&new_ua));
/// ```
#[inline]
- pub fn find_mut_with<'a>(&'a mut self, f:|&K| -> Ordering) -> Option<&'a mut V> {
- tree_find_mut_with(&mut self.root, f)
+ pub fn find_with_mut<'a>(&'a mut self, f:|&K| -> Ordering) -> Option<&'a mut V> {
+ tree_find_with_mut(&mut self.root, f)
}
}
/// Gets a lazy iterator that should be initialized using
/// `traverse_left`/`traverse_right`/`traverse_complete`.
- fn mut_iter_for_traversal<'a>(&'a mut self) -> MutEntries<'a, K, V> {
+ fn iter_mut_for_traversal<'a>(&'a mut self) -> MutEntries<'a, K, V> {
MutEntries {
stack: vec!(),
- node: mut_deref(&mut self.root),
+ node: deref_mut(&mut self.root),
remaining_min: 0,
remaining_max: self.length
}
}
+ /// Deprecated: use `lower_bound_mut`.
+ #[deprecated = "use lower_bound_mut"]
+ pub fn mut_lower_bound<'a>(&'a mut self, k: &K) -> MutEntries<'a, K, V> {
+ self.lower_bound_mut(k)
+ }
+
/// Returns a lazy value iterator to the first key-value pair (with
/// the value being mutable) whose key is not less than `k`.
///
/// map.insert(6, "c");
/// map.insert(8, "d");
///
- /// assert_eq!(map.mut_lower_bound(&4).next(), Some((&4, &mut "b")));
- /// assert_eq!(map.mut_lower_bound(&5).next(), Some((&6, &mut "c")));
- /// assert_eq!(map.mut_lower_bound(&10).next(), None);
+ /// assert_eq!(map.lower_bound_mut(&4).next(), Some((&4, &mut "b")));
+ /// assert_eq!(map.lower_bound_mut(&5).next(), Some((&6, &mut "c")));
+ /// assert_eq!(map.lower_bound_mut(&10).next(), None);
///
- /// for (key, value) in map.mut_lower_bound(&4) {
+ /// for (key, value) in map.lower_bound_mut(&4) {
/// *value = "changed";
/// }
///
/// assert_eq!(map.find(&6), Some(&"changed"));
/// assert_eq!(map.find(&8), Some(&"changed"));
/// ```
- pub fn mut_lower_bound<'a>(&'a mut self, k: &K) -> MutEntries<'a, K, V> {
- bound_setup!(self.mut_iter_for_traversal(), k, true)
+ pub fn lower_bound_mut<'a>(&'a mut self, k: &K) -> MutEntries<'a, K, V> {
+ bound_setup!(self.iter_mut_for_traversal(), k, true)
+ }
+
+ /// Deprecated: use `upper_bound_mut`.
+ #[deprecated = "use upper_bound_mut"]
+ pub fn mut_upper_bound<'a>(&'a mut self, k: &K) -> MutEntries<'a, K, V> {
+ self.upper_bound_mut(k)
}
/// Returns a lazy iterator to the first key-value pair (with the
/// map.insert(6, "c");
/// map.insert(8, "d");
///
- /// assert_eq!(map.mut_upper_bound(&4).next(), Some((&6, &mut "c")));
- /// assert_eq!(map.mut_upper_bound(&5).next(), Some((&6, &mut "c")));
- /// assert_eq!(map.mut_upper_bound(&10).next(), None);
+ /// assert_eq!(map.upper_bound_mut(&4).next(), Some((&6, &mut "c")));
+ /// assert_eq!(map.upper_bound_mut(&5).next(), Some((&6, &mut "c")));
+ /// assert_eq!(map.upper_bound_mut(&10).next(), None);
///
- /// for (key, value) in map.mut_upper_bound(&4) {
+ /// for (key, value) in map.upper_bound_mut(&4) {
/// *value = "changed";
/// }
///
/// assert_eq!(map.find(&6), Some(&"changed"));
/// assert_eq!(map.find(&8), Some(&"changed"));
/// ```
- pub fn mut_upper_bound<'a>(&'a mut self, k: &K) -> MutEntries<'a, K, V> {
- bound_setup!(self.mut_iter_for_traversal(), k, false)
+ pub fn upper_bound_mut<'a>(&'a mut self, k: &K) -> MutEntries<'a, K, V> {
+ bound_setup!(self.iter_mut_for_traversal(), k, false)
}
}
define_iterator! {
MutEntries,
RevMutEntries,
- deref = mut_deref,
+ deref = deref_mut,
addr_mut = mut
}
}
}
-fn mut_deref<K, V>(x: &mut Option<Box<TreeNode<K, V>>>)
+fn deref_mut<K, V>(x: &mut Option<Box<TreeNode<K, V>>>)
-> *mut TreeNode<K, V> {
match *x {
Some(ref mut n) => {
let n: &mut TreeNode<K, V> = &mut **n;
n as *mut TreeNode<K, V>
}
- None => ptr::mut_null()
+ None => ptr::null_mut()
}
}
RevSetItems{iter: self.map.rev_iter()}
}
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> MoveSetItems<T> {
+ self.into_iter()
+ }
+
/// Creates a consuming iterator, that is, one that moves each value out of the
/// set in ascending order. The set cannot be used after calling this.
///
/// let set: TreeSet<int> = [1i, 4, 3, 5, 2].iter().map(|&x| x).collect();
///
/// // Not possible with a regular `.iter()`
- /// let v: Vec<int> = set.move_iter().collect();
+ /// let v: Vec<int> = set.into_iter().collect();
/// assert_eq!(v, vec![1, 2, 3, 4, 5]);
/// ```
#[inline]
- pub fn move_iter(self) -> MoveSetItems<T> {
- self.map.move_iter().map(|(value, _)| value)
+ pub fn into_iter(self) -> MoveSetItems<T> {
+ self.map.into_iter().map(|(value, _)| value)
}
/// Gets a lazy iterator pointing to the first value not less than `v` (greater or equal).
}
// See comments above tree_find_with
-fn tree_find_mut_with<'r, K, V>(node: &'r mut Option<Box<TreeNode<K, V>>>,
+fn tree_find_with_mut<'r, K, V>(node: &'r mut Option<Box<TreeNode<K, V>>>,
f: |&K| -> Ordering) -> Option<&'r mut V> {
let mut current = node;
fn heir_swap<K: Ord, V>(node: &mut Box<TreeNode<K, V>>,
child: &mut Option<Box<TreeNode<K, V>>>) {
// *could* be done without recursion, but it won't borrow check
- for x in child.mut_iter() {
+ for x in child.iter_mut() {
if x.right.is_some() {
heir_swap(node, &mut x.right);
} else {
if right_level > save.level {
let save_level = save.level;
- for x in save.right.mut_iter() { x.level = save_level }
+ for x in save.right.iter_mut() { x.level = save_level }
}
skew(save);
- for right in save.right.mut_iter() {
+ for right in save.right.iter_mut() {
skew(right);
- for x in right.right.mut_iter() { skew(x) }
+ for x in right.right.iter_mut() { skew(x) }
}
split(save);
- for x in save.right.mut_iter() { split(x) }
+ for x in save.right.iter_mut() { split(x) }
}
return ret;
assert!(m.insert("t2", 8));
assert!(m.insert("t5", 14));
let new = 100;
- match m.find_mut_with(|k| "t5".cmp(k)) {
+ match m.find_with_mut(|k| "t5".cmp(k)) {
None => fail!(), Some(x) => *x = new
}
assert_eq!(m.find_with(|k| "t5".cmp(k)), Some(&new));
assert!(m.insert(i, 100 * i));
}
- for (i, (&k, v)) in m.mut_iter().enumerate() {
+ for (i, (&k, v)) in m.iter_mut().enumerate() {
*v += k * 10 + i; // 000 + 00 + 0, 100 + 10 + 1, ...
}
assert!(m.insert(i, 100 * i));
}
- for (i, (&k, v)) in m.mut_rev_iter().enumerate() {
+ for (i, (&k, v)) in m.rev_iter_mut().enumerate() {
*v += k * 10 + (9 - i); // 900 + 90 + (9 - 0), 800 + 80 + (9 - 1), ...
}
}
for i in range(1i, 199) {
- let mut lb_it = m_lower.mut_lower_bound(&i);
+ let mut lb_it = m_lower.lower_bound_mut(&i);
let (&k, v) = lb_it.next().unwrap();
let lb = i + i % 2;
assert_eq!(lb, k);
*v -= k;
}
for i in range(0i, 198) {
- let mut ub_it = m_upper.mut_upper_bound(&i);
+ let mut ub_it = m_upper.upper_bound_mut(&i);
let (&k, v) = ub_it.next().unwrap();
let ub = i + 2 - i % 2;
assert_eq!(ub, k);
*v -= k;
}
- assert!(m_lower.mut_lower_bound(&199).next().is_none());
+ assert!(m_lower.lower_bound_mut(&199).next().is_none());
- assert!(m_upper.mut_upper_bound(&198).next().is_none());
+ assert!(m_upper.upper_bound_mut(&198).next().is_none());
assert!(m_lower.iter().all(|(_, &x)| x == 0));
assert!(m_upper.iter().all(|(_, &x)| x == 0));
#[test]
fn test_keys() {
let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<TreeMap<int, char>>();
+ let map = vec.into_iter().collect::<TreeMap<int, char>>();
let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
#[test]
fn test_values() {
let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<TreeMap<int, char>>();
+ let map = vec.into_iter().collect::<TreeMap<int, char>>();
let values = map.values().map(|&v| v).collect::<Vec<char>>();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
let s: TreeSet<int> = range(0i, 5).collect();
let mut n = 0;
- for x in s.move_iter() {
+ for x in s.into_iter() {
assert_eq!(x, n);
n += 1;
}
#[test]
fn test_move_iter_size_hint() {
- let s: TreeSet<int> = vec!(0i, 1).move_iter().collect();
+ let s: TreeSet<int> = vec!(0i, 1).into_iter().collect();
- let mut it = s.move_iter();
+ let mut it = s.into_iter();
assert_eq!(it.size_hint(), (2, Some(2)));
assert!(it.next() != None);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Ordered containers with unsigned integer keys,
-//! implemented as radix tries (`TrieSet` and `TrieMap` types).
+//! Maps are collections of unique keys with corresponding values, and sets are
+//! just unique keys without a corresponding value. The `Map` and `Set` traits in
+//! `std::container` define the basic interface.
+//!
+//! This crate defines `TrieMap` and `TrieSet`, which require `uint` keys.
+//!
+//! `TrieMap` is ordered.
use core::prelude::*;
iter
}
+ /// Deprecated: use `iter_mut`.
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, T> {
+ self.iter_mut()
+ }
+
/// Gets an iterator over the key-value pairs in the map, with the
/// ability to mutate the values.
///
/// use std::collections::TrieMap;
/// let mut map: TrieMap<int> = [(1, 2), (2, 4), (3, 6)].iter().map(|&x| x).collect();
///
- /// for (key, value) in map.mut_iter() {
+ /// for (key, value) in map.iter_mut() {
/// *value = -(key as int);
/// }
///
/// assert_eq!(map.find(&2), Some(&-2));
/// assert_eq!(map.find(&3), Some(&-3));
/// ```
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, T> {
+ pub fn iter_mut<'a>(&'a mut self) -> MutEntries<'a, T> {
let mut iter = unsafe {MutEntries::new()};
- iter.stack[0] = self.root.children.mut_iter();
+ iter.stack[0] = self.root.children.iter_mut();
iter.length = 1;
iter.remaining_min = self.length;
iter.remaining_max = self.length;
}
// If `upper` is true then returns upper_bound else returns lower_bound.
#[inline]
- fn mut_bound<'a>(&'a mut self, key: uint, upper: bool) -> MutEntries<'a, T> {
+ fn bound_mut<'a>(&'a mut self, key: uint, upper: bool) -> MutEntries<'a, T> {
bound!(MutEntries, self = self,
key = key, is_upper = upper,
slice_from = mut_slice_from, iter = mut_iter,
mutability = mut)
}
+ /// Deprecated: use `lower_bound_mut`.
+ #[deprecated = "use lower_bound_mut"]
+ pub fn mut_lower_bound<'a>(&'a mut self, key: uint) -> MutEntries<'a, T> {
+ self.lower_bound_mut(key)
+ }
+
/// Gets an iterator pointing to the first key-value pair whose key is not less than `key`.
/// If all keys in the map are less than `key` an empty iterator is returned.
///
/// use std::collections::TrieMap;
/// let mut map: TrieMap<&str> = [(2, "a"), (4, "b"), (6, "c")].iter().map(|&x| x).collect();
///
- /// assert_eq!(map.mut_lower_bound(4).next(), Some((4, &mut "b")));
- /// assert_eq!(map.mut_lower_bound(5).next(), Some((6, &mut "c")));
- /// assert_eq!(map.mut_lower_bound(10).next(), None);
+ /// assert_eq!(map.lower_bound_mut(4).next(), Some((4, &mut "b")));
+ /// assert_eq!(map.lower_bound_mut(5).next(), Some((6, &mut "c")));
+ /// assert_eq!(map.lower_bound_mut(10).next(), None);
///
- /// for (key, value) in map.mut_lower_bound(4) {
+ /// for (key, value) in map.lower_bound_mut(4) {
/// *value = "changed";
/// }
///
/// assert_eq!(map.find(&4), Some(&"changed"));
/// assert_eq!(map.find(&6), Some(&"changed"));
/// ```
- pub fn mut_lower_bound<'a>(&'a mut self, key: uint) -> MutEntries<'a, T> {
- self.mut_bound(key, false)
+ pub fn lower_bound_mut<'a>(&'a mut self, key: uint) -> MutEntries<'a, T> {
+ self.bound_mut(key, false)
+ }
+
+ /// Deprecated: use `upper_bound_mut`.
+ #[deprecated = "use upper_bound_mut"]
+ pub fn mut_upper_bound<'a>(&'a mut self, key: uint) -> MutEntries<'a, T> {
+ self.upper_bound_mut(key)
}
/// Gets an iterator pointing to the first key-value pair whose key is greater than `key`.
/// use std::collections::TrieMap;
/// let mut map: TrieMap<&str> = [(2, "a"), (4, "b"), (6, "c")].iter().map(|&x| x).collect();
///
- /// assert_eq!(map.mut_upper_bound(4).next(), Some((6, &mut "c")));
- /// assert_eq!(map.mut_upper_bound(5).next(), Some((6, &mut "c")));
- /// assert_eq!(map.mut_upper_bound(10).next(), None);
+ /// assert_eq!(map.upper_bound_mut(4).next(), Some((6, &mut "c")));
+ /// assert_eq!(map.upper_bound_mut(5).next(), Some((6, &mut "c")));
+ /// assert_eq!(map.upper_bound_mut(10).next(), None);
///
- /// for (key, value) in map.mut_upper_bound(4) {
+ /// for (key, value) in map.upper_bound_mut(4) {
/// *value = "changed";
/// }
///
/// assert_eq!(map.find(&4), Some(&"b"));
/// assert_eq!(map.find(&6), Some(&"changed"));
/// ```
- pub fn mut_upper_bound<'a>(&'a mut self, key: uint) -> MutEntries<'a, T> {
- self.mut_bound(key, true)
+ pub fn upper_bound_mut<'a>(&'a mut self, key: uint) -> MutEntries<'a, T> {
+ self.bound_mut(key, true)
}
}
#[test]
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
- let map = vec.move_iter().collect::<TrieMap<char>>();
+ let map = vec.into_iter().collect::<TrieMap<char>>();
let keys = map.keys().collect::<Vec<uint>>();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
#[test]
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
- let map = vec.move_iter().collect::<TrieMap<char>>();
+ let map = vec.into_iter().collect::<TrieMap<char>>();
let values = map.values().map(|&v| v).collect::<Vec<char>>();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
#[test]
fn test_mut_iter() {
let mut empty_map : TrieMap<uint> = TrieMap::new();
- assert!(empty_map.mut_iter().next().is_none());
+ assert!(empty_map.iter_mut().next().is_none());
let first = uint::MAX - 10000;
let last = uint::MAX;
}
let mut i = 0;
- for (k, v) in map.mut_iter() {
+ for (k, v) in map.iter_mut() {
assert_eq!(k, first + i);
*v -= k / 2;
i += 1;
}
for i in range(0u, 199) {
- let mut lb_it = m_lower.mut_lower_bound(i);
+ let mut lb_it = m_lower.lower_bound_mut(i);
let (k, v) = lb_it.next().unwrap();
let lb = i + i % 2;
assert_eq!(lb, k);
}
for i in range(0u, 198) {
- let mut ub_it = m_upper.mut_upper_bound(i);
+ let mut ub_it = m_upper.upper_bound_mut(i);
let (k, v) = ub_it.next().unwrap();
let ub = i + 2 - i % 2;
assert_eq!(ub, k);
*v -= k;
}
- assert!(m_lower.mut_lower_bound(199).next().is_none());
- assert!(m_upper.mut_upper_bound(198).next().is_none());
+ assert!(m_lower.lower_bound_mut(199).next().is_none());
+ assert!(m_upper.upper_bound_mut(198).next().is_none());
assert!(m_lower.iter().all(|(_, &x)| x == 0));
assert!(m_upper.iter().all(|(_, &x)| x == 0));
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! An owned, growable vector.
+//! A growable list type, written `Vec<T>` but pronounced 'vector.'
+//!
+//! Vectors have `O(1)` indexing, push (to the end) and pop (from the end).
use core::prelude::*;
-use alloc::heap::{allocate, reallocate, deallocate};
+use alloc::heap::{EMPTY, allocate, reallocate, deallocate};
use core::cmp::max;
use core::default::Default;
use core::fmt;
use core::mem;
use core::num;
+use core::ops;
use core::ptr;
use core::raw::Slice as RawSlice;
use core::uint;
use slice::{MutableOrdSlice, MutableSliceAllocating, CloneableVector};
use slice::{Items, MutItems};
-
-#[doc(hidden)]
-pub static PTR_MARKER: u8 = 0;
-
/// An owned, growable vector.
///
/// # Examples
// non-null value which is fine since we never call deallocate on the ptr
// if cap is 0. The reason for this is because the pointer of a slice
// being NULL would break the null pointer optimization for enums.
- Vec { len: 0, cap: 0, ptr: &PTR_MARKER as *const _ as *mut T }
+ Vec { len: 0, cap: 0, ptr: EMPTY as *mut T }
}
/// Constructs a new, empty `Vec` with the specified capacity.
#[inline]
pub fn with_capacity(capacity: uint) -> Vec<T> {
if mem::size_of::<T>() == 0 {
- Vec { len: 0, cap: uint::MAX, ptr: &PTR_MARKER as *const _ as *mut T }
+ Vec { len: 0, cap: uint::MAX, ptr: EMPTY as *mut T }
} else if capacity == 0 {
Vec::new()
} else {
let mut xs = Vec::with_capacity(length);
while xs.len < length {
let len = xs.len;
- ptr::write(xs.as_mut_slice().unsafe_mut_ref(len), op(len));
+ ptr::write(xs.as_mut_slice().unsafe_mut(len), op(len));
xs.len += 1;
}
xs
let mut lefts = Vec::new();
let mut rights = Vec::new();
- for elt in self.move_iter() {
+ for elt in self.into_iter() {
if f(&elt) {
lefts.push(elt);
} else {
let mut xs = Vec::with_capacity(length);
while xs.len < length {
let len = xs.len;
- ptr::write(xs.as_mut_slice().unsafe_mut_ref(len),
+ ptr::write(xs.as_mut_slice().unsafe_mut(len),
value.clone());
xs.len += 1;
}
// during the loop can prevent this optimisation.
unsafe {
ptr::write(
- self.as_mut_slice().unsafe_mut_ref(len),
+ self.as_mut_slice().unsafe_mut(len),
other.unsafe_get(i).clone());
self.set_len(len + 1);
}
}
// reuse the contained values' allocations/resources.
- for (place, thing) in self.mut_iter().zip(other.iter()) {
+ for (place, thing) in self.iter_mut().zip(other.iter()) {
place.clone_from(thing)
}
}
}*/
+impl<T> ops::Slice<uint, [T]> for Vec<T> {
+ #[inline]
+ fn as_slice_<'a>(&'a self) -> &'a [T] {
+ self.as_slice()
+ }
+
+ #[inline]
+ fn slice_from_<'a>(&'a self, start: &uint) -> &'a [T] {
+ self.as_slice().slice_from_(start)
+ }
+
+ #[inline]
+ fn slice_to_<'a>(&'a self, end: &uint) -> &'a [T] {
+ self.as_slice().slice_to_(end)
+ }
+ #[inline]
+ fn slice_<'a>(&'a self, start: &uint, end: &uint) -> &'a [T] {
+ self.as_slice().slice_(start, end)
+ }
+}
+
+impl<T> ops::SliceMut<uint, [T]> for Vec<T> {
+ #[inline]
+ fn as_mut_slice_<'a>(&'a mut self) -> &'a mut [T] {
+ self.as_mut_slice()
+ }
+
+ #[inline]
+ fn slice_from_mut_<'a>(&'a mut self, start: &uint) -> &'a mut [T] {
+ self.as_mut_slice().slice_from_mut_(start)
+ }
+
+ #[inline]
+ fn slice_to_mut_<'a>(&'a mut self, end: &uint) -> &'a mut [T] {
+ self.as_mut_slice().slice_to_mut_(end)
+ }
+ #[inline]
+ fn slice_mut_<'a>(&'a mut self, start: &uint, end: &uint) -> &'a mut [T] {
+ self.as_mut_slice().slice_mut_(start, end)
+ }
+}
impl<T> FromIterator<T> for Vec<T> {
#[inline]
fn from_iter<I:Iterator<T>>(mut iterator: I) -> Vec<T> {
}
}
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> MoveItems<T> {
+ self.into_iter()
+ }
+
/// Creates a consuming iterator, that is, one that moves each
/// value out of the vector (from start to end). The vector cannot
/// be used after calling this.
///
/// ```
/// let v = vec!["a".to_string(), "b".to_string()];
- /// for s in v.move_iter() {
+ /// for s in v.into_iter() {
/// // s has type String, not &String
/// println!("{}", s);
/// }
/// ```
#[inline]
- pub fn move_iter(self) -> MoveItems<T> {
+ pub fn into_iter(self) -> MoveItems<T> {
unsafe {
let iter = mem::transmute(self.as_slice().iter());
let ptr = self.ptr;
self.as_slice().iter()
}
+ /// Deprecated: use `iter_mut`.
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'a>(&'a mut self) -> MutItems<'a,T> {
+ self.iter_mut()
+ }
/// Returns an iterator over mutable references to the elements of the
/// vector in order.
///
/// ```
/// let mut vec = vec![1i, 2, 3];
- /// for num in vec.mut_iter() {
+ /// for num in vec.iter_mut() {
/// *num = 0;
/// }
/// ```
#[inline]
- pub fn mut_iter<'a>(&'a mut self) -> MutItems<'a,T> {
- self.as_mut_slice().mut_iter()
+ pub fn iter_mut<'a>(&'a mut self) -> MutItems<'a,T> {
+ self.as_mut_slice().iter_mut()
}
/// Sorts the vector, in place, using `compare` to compare elements.
self.as_slice().last()
}
+ /// Deprecated: use `last_mut`.
+ #[deprecated = "use last_mut"]
+ pub fn mut_last<'a>(&'a mut self) -> Option<&'a mut T> {
+ self.last_mut()
+ }
+
/// Returns a mutable reference to the last element of a vector, or `None`
/// if it is empty.
///
///
/// ```
/// let mut vec = vec![1i, 2, 3];
- /// *vec.mut_last().unwrap() = 4;
+ /// *vec.last_mut().unwrap() = 4;
/// assert_eq!(vec, vec![1i, 2, 4]);
/// ```
#[inline]
- pub fn mut_last<'a>(&'a mut self) -> Option<&'a mut T> {
- self.as_mut_slice().mut_last()
+ pub fn last_mut<'a>(&'a mut self) -> Option<&'a mut T> {
+ self.as_mut_slice().last_mut()
}
/// Removes an element from anywhere in the vector and return it, replacing
/// ```
#[inline]
pub fn push_all_move(&mut self, other: Vec<T>) {
- self.extend(other.move_iter());
+ self.extend(other.into_iter());
+ }
+
+ /// Deprecated: use `slice_mut`.
+ #[deprecated = "use slice_mut"]
+ pub fn mut_slice<'a>(&'a mut self, start: uint, end: uint)
+ -> &'a mut [T] {
+ self.slice_mut(start, end)
}
/// Returns a mutable slice of `self` between `start` and `end`.
///
/// ```
/// let mut vec = vec![1i, 2, 3, 4];
- /// assert!(vec.mut_slice(0, 2) == [1, 2]);
+ /// assert!(vec.slice_mut(0, 2) == [1, 2]);
/// ```
#[inline]
- pub fn mut_slice<'a>(&'a mut self, start: uint, end: uint)
+ pub fn slice_mut<'a>(&'a mut self, start: uint, end: uint)
-> &'a mut [T] {
- self.as_mut_slice().mut_slice(start, end)
+ self.as_mut_slice().slice_mut(start, end)
+ }
+
+ /// Deprecated: use "slice_from_mut".
+ #[deprecated = "use slice_from_mut"]
+ pub fn mut_slice_from<'a>(&'a mut self, start: uint) -> &'a mut [T] {
+ self.slice_from_mut(start)
}
/// Returns a mutable slice of `self` from `start` to the end of the `Vec`.
///
/// ```
/// let mut vec = vec![1i, 2, 3, 4];
- /// assert!(vec.mut_slice_from(2) == [3, 4]);
+ /// assert!(vec.slice_from_mut(2) == [3, 4]);
/// ```
#[inline]
- pub fn mut_slice_from<'a>(&'a mut self, start: uint) -> &'a mut [T] {
- self.as_mut_slice().mut_slice_from(start)
+ pub fn slice_from_mut<'a>(&'a mut self, start: uint) -> &'a mut [T] {
+ self.as_mut_slice().slice_from_mut(start)
+ }
+
+ /// Deprecated: use `slice_to_mut`.
+ #[deprecated = "use slice_to_mut"]
+ pub fn mut_slice_to<'a>(&'a mut self, end: uint) -> &'a mut [T] {
+ self.slice_to_mut(end)
}
/// Returns a mutable slice of `self` from the start of the `Vec` to `end`.
///
/// ```
/// let mut vec = vec![1i, 2, 3, 4];
- /// assert!(vec.mut_slice_to(2) == [1, 2]);
+ /// assert!(vec.slice_to_mut(2) == [1, 2]);
/// ```
#[inline]
- pub fn mut_slice_to<'a>(&'a mut self, end: uint) -> &'a mut [T] {
- self.as_mut_slice().mut_slice_to(end)
+ pub fn slice_to_mut<'a>(&'a mut self, end: uint) -> &'a mut [T] {
+ self.as_mut_slice().slice_to_mut(end)
+ }
+
+ /// Deprecated: use `split_at_mut`.
+ #[deprecated = "use split_at_mut"]
+ pub fn mut_split_at<'a>(&'a mut self, mid: uint) -> (&'a mut [T], &'a mut [T]) {
+ self.split_at_mut(mid)
}
/// Returns a pair of mutable slices that divides the `Vec` at an index.
///
/// // scoped to restrict the lifetime of the borrows
/// {
- /// let (left, right) = vec.mut_split_at(0);
+ /// let (left, right) = vec.split_at_mut(0);
/// assert!(left == &mut []);
/// assert!(right == &mut [1, 2, 3, 4, 5, 6]);
/// }
///
/// {
- /// let (left, right) = vec.mut_split_at(2);
+ /// let (left, right) = vec.split_at_mut(2);
/// assert!(left == &mut [1, 2]);
/// assert!(right == &mut [3, 4, 5, 6]);
/// }
///
/// {
- /// let (left, right) = vec.mut_split_at(6);
+ /// let (left, right) = vec.split_at_mut(6);
/// assert!(left == &mut [1, 2, 3, 4, 5, 6]);
/// assert!(right == &mut []);
/// }
/// ```
#[inline]
- pub fn mut_split_at<'a>(&'a mut self, mid: uint) -> (&'a mut [T], &'a mut [T]) {
- self.as_mut_slice().mut_split_at(mid)
+ pub fn split_at_mut<'a>(&'a mut self, mid: uint) -> (&'a mut [T], &'a mut [T]) {
+ self.as_mut_slice().split_at_mut(mid)
}
/// Reverses the order of elements in a vector, in place.
}
}
+impl<T> ExactSize<T> for MoveItems<T> {}
+
#[unsafe_destructor]
impl<T> Drop for MoveItems<T> {
fn drop(&mut self) {
}
}
+/// An owned, partially type-converted vector.
+///
+/// This struct takes two type parameters `T` and `U` which must be of the
+/// same, non-zero size having the same minimal alignment.
+///
+/// No allocations are performed by usage, only a deallocation happens in the
+/// destructor which should only run when unwinding.
+///
+/// It can be used to convert a vector of `T`s into a vector of `U`s, by
+/// converting the individual elements one-by-one.
+///
+/// You may call the `push` method as often as you get a `Some(t)` from `pop`.
+/// After pushing the same number of `U`s as you got `T`s, you can `unwrap` the
+/// vector.
+///
+/// # Example
+///
+/// ```ignore
+/// let pv = PartialVec::from_vec(vec![0u32, 1]);
+/// assert_eq!(pv.pop(), Some(0));
+/// assert_eq!(pv.pop(), Some(1));
+/// assert_eq!(pv.pop(), None);
+/// pv.push(2u32);
+/// pv.push(3);
+/// assert_eq!(pv.into_vec().as_slice(), &[2, 3]);
+/// ```
+//
+// Upheld invariants:
+//
+// (a) `vec` isn't modified except when the `PartialVec` goes out of scope, the
+// only thing it is used for is keeping the memory which the `PartialVec`
+// uses for the inplace conversion.
+//
+// (b) `start_u` points to the start of the vector.
+//
+// (c) `end_u` points to one element beyond the vector.
+//
+// (d) `start_u` <= `end_u` <= `start_t` <= `end_t`.
+//
+// (e) From `start_u` (incl.) to `end_u` (excl.) there are sequential instances
+// of type `U`.
+//
+// (f) From `start_t` (incl.) to `end_t` (excl.) there are sequential instances
+// of type `T`.
+//
+// (g) The size of `T` and `U` is equal and non-zero.
+//
+// (h) The `min_align_of` of `T` and `U` is equal.
+
+struct PartialVec<T,U> {
+ vec: Vec<T>,
+
+ start_u: *mut U,
+ end_u: *mut U,
+ start_t: *mut T,
+ end_t: *mut T,
+}
+
+impl<T,U> PartialVec<T,U> {
+ /// Creates a `PartialVec` from a `Vec`.
+ ///
+ /// # Failure
+ ///
+ /// Fails if `T` and `U` have differing sizes, are zero-sized or have
+ /// differing minimal alignments.
+ fn from_vec(mut vec: Vec<T>) -> PartialVec<T,U> {
+ // FIXME: Assert statically that the types `T` and `U` have the same
+ // size.
+ //
+ // These asserts make sure (g) and (h) are satisfied.
+ assert!(mem::size_of::<T>() != 0);
+ assert!(mem::size_of::<U>() != 0);
+ assert!(mem::size_of::<T>() == mem::size_of::<U>());
+ assert!(mem::min_align_of::<T>() == mem::min_align_of::<U>());
+
+ let start = vec.as_mut_ptr();
+
+ // This `as int` cast is safe, because the size of the elements of the
+ // vector is not 0, and:
+ //
+ // 1) If the size of the elements in the vector is 1, the `int` may
+ // overflow, but it has the correct bit pattern so that the
+ // `.offset()` function will work.
+ //
+ // Example:
+ // Address space 0x0-0xF.
+ // `u8` array at: 0x1.
+ // Size of `u8` array: 0x8.
+ // Calculated `offset`: -0x8.
+ // After `array.offset(offset)`: 0x9.
+ // (0x1 + 0x8 = 0x1 - 0x8)
+ //
+ // 2) If the size of the elements in the vector is >1, the `uint` ->
+ // `int` conversion can't overflow.
+ let offset = vec.len() as int;
+
+ let start_u = start as *mut U;
+ let end_u = start as *mut U;
+ let start_t = start;
+
+ // This points inside the vector, as the vector has length `offset`.
+ let end_t = unsafe { start_t.offset(offset) };
+
+ // (b) is satisfied, `start_u` points to the start of `vec`.
+ //
+ // (c) is also satisfied, `end_t` points to the end of `vec`.
+ //
+ // `start_u == end_u == start_t <= end_t`, so also `start_u <= end_u <=
+ // start_t <= end_t`, thus (b).
+ //
+ // As `start_u == end_u`, it is represented correctly that there are no
+ // instances of `U` in `vec`, thus (e) is satisfied.
+ //
+ // At start, there are only elements of type `T` in `vec`, so (f) is
+ // satisfied, as `start_t` points to the start of `vec` and `end_t` to
+ // the end of it.
+
+ PartialVec {
+ // (a) is satisfied, `vec` isn't modified in the function.
+ vec: vec,
+ start_u: start_u,
+ end_u: end_u,
+ start_t: start_t,
+ end_t: end_t,
+ }
+ }
+
+ /// Pops a `T` from the `PartialVec`.
+ ///
+ /// Removes the next `T` from the vector and returns it as `Some(T)`, or
+ /// `None` if there are none left.
+ fn pop(&mut self) -> Option<T> {
+ // The `if` ensures that there are more `T`s in `vec`.
+ if self.start_t < self.end_t {
+ let result;
+ unsafe {
+ // (f) is satisfied before, so in this if branch there actually
+ // is a `T` at `start_t`. After shifting the pointer by one,
+ // (f) is again satisfied.
+ result = ptr::read(self.start_t as *const T);
+ self.start_t = self.start_t.offset(1);
+ }
+ Some(result)
+ } else {
+ None
+ }
+ }
+
+ /// Pushes a new `U` to the `PartialVec`.
+ ///
+ /// # Failure
+ ///
+ /// Fails if not enough `T`s were popped to have enough space for the new
+ /// `U`.
+ fn push(&mut self, value: U) {
+ // The assert assures that still `end_u <= start_t` (d) after
+ // the function.
+ assert!(self.end_u as *const () < self.start_t as *const (),
+ "writing more elements to PartialVec than reading from it")
+ unsafe {
+ // (e) is satisfied before, and after writing one `U`
+ // to `end_u` and shifting it by one, it's again
+ // satisfied.
+ ptr::write(self.end_u, value);
+ self.end_u = self.end_u.offset(1);
+ }
+ }
+
+ /// Unwraps the new `Vec` of `U`s after having pushed enough `U`s and
+ /// popped all `T`s.
+ ///
+ /// # Failure
+ ///
+ /// Fails if not all `T`s were popped, also fails if not the same amount of
+ /// `U`s was pushed before calling `unwrap`.
+ fn into_vec(mut self) -> Vec<U> {
+ // If `self.end_u == self.end_t`, we know from (e) that there are no
+ // more `T`s in `vec`, we also know that the whole length of `vec` is
+ // now used by `U`s, thus we can just interpret `vec` as a vector of
+ // `U` safely.
+
+ assert!(self.end_u as *const () == self.end_t as *const (),
+ "trying to unwrap a PartialVec before completing the writes to it");
+
+ // Extract `vec` and prevent the destructor of `PartialVec` from
+ // running. Note that none of the function calls can fail, thus no
+ // resources can be leaked (as the `vec` member of `PartialVec` is the
+ // only one which holds allocations -- and it is returned from this
+ // function.
+ unsafe {
+ let vec_len = self.vec.len();
+ let vec_cap = self.vec.capacity();
+ let vec_ptr = self.vec.as_mut_ptr() as *mut U;
+ mem::forget(self);
+ Vec::from_raw_parts(vec_len, vec_cap, vec_ptr)
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<T,U> Drop for PartialVec<T,U> {
+ fn drop(&mut self) {
+ unsafe {
+ // As per (a) `vec` hasn't been modified until now. As it has a
+ // length currently, this would run destructors of `T`s which might
+ // not be there. So at first, set `vec`s length to `0`. This must
+ // be done at first to remain memory-safe as the destructors of `U`
+ // or `T` might cause unwinding where `vec`s destructor would be
+ // executed.
+ self.vec.set_len(0);
+
+ // As per (e) and (f) we have instances of `U`s and `T`s in `vec`.
+ // Destruct them.
+ while self.start_u < self.end_u {
+ let _ = ptr::read(self.start_u as *const U); // Run a `U` destructor.
+ self.start_u = self.start_u.offset(1);
+ }
+ while self.start_t < self.end_t {
+ let _ = ptr::read(self.start_t as *const T); // Run a `T` destructor.
+ self.start_t = self.start_t.offset(1);
+ }
+ // After this destructor ran, the destructor of `vec` will run,
+ // deallocating the underlying memory.
+ }
+ }
+}
+
+impl<T> Vec<T> {
+ /// Converts a `Vec<T>` to a `Vec<U>` where `T` and `U` have the same
+ /// non-zero size and the same minimal alignment.
+ ///
+ /// # Failure
+ ///
+ /// Fails if `T` and `U` have differing sizes, are zero-sized or have
+ /// differing minimal alignments.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let v = vec![0u, 1, 2];
+ /// let w = v.map_in_place(|i| i + 3);
+ /// assert_eq!(w.as_slice(), [3, 4, 5].as_slice());
+ ///
+ /// #[deriving(PartialEq, Show)]
+ /// struct Newtype(u8);
+ /// let bytes = vec![0x11, 0x22];
+ /// let newtyped_bytes = bytes.map_in_place(|x| Newtype(x));
+ /// assert_eq!(newtyped_bytes.as_slice(), [Newtype(0x11), Newtype(0x22)].as_slice());
+ /// ```
+ pub fn map_in_place<U>(self, f: |T| -> U) -> Vec<U> {
+ let mut pv = PartialVec::from_vec(self);
+ loop {
+ let maybe_t = pv.pop();
+ match maybe_t {
+ Some(t) => pv.push(f(t)),
+ None => return pv.into_vec(),
+ };
+ }
+ }
+}
+
+
#[cfg(test)]
mod tests {
extern crate test;
fn test_mut_slice_from() {
let mut values = Vec::from_slice([1u8,2,3,4,5]);
{
- let slice = values.mut_slice_from(2);
+ let slice = values.slice_from_mut(2);
assert!(slice == [3, 4, 5]);
- for p in slice.mut_iter() {
+ for p in slice.iter_mut() {
*p += 2;
}
}
fn test_mut_slice_to() {
let mut values = Vec::from_slice([1u8,2,3,4,5]);
{
- let slice = values.mut_slice_to(2);
+ let slice = values.slice_to_mut(2);
assert!(slice == [1, 2]);
- for p in slice.mut_iter() {
+ for p in slice.iter_mut() {
*p += 1;
}
}
fn test_mut_split_at() {
let mut values = Vec::from_slice([1u8,2,3,4,5]);
{
- let (left, right) = values.mut_split_at(2);
+ let (left, right) = values.split_at_mut(2);
assert!(left.slice(0, left.len()) == [1, 2]);
- for p in left.mut_iter() {
+ for p in left.iter_mut() {
*p += 1;
}
assert!(right.slice(0, right.len()) == [3, 4, 5]);
- for p in right.mut_iter() {
+ for p in right.iter_mut() {
*p += 2;
}
}
for &() in v.iter() {}
- assert_eq!(v.mut_iter().count(), 2);
+ assert_eq!(v.iter_mut().count(), 2);
v.push(());
- assert_eq!(v.mut_iter().count(), 3);
+ assert_eq!(v.iter_mut().count(), 3);
v.push(());
- assert_eq!(v.mut_iter().count(), 4);
+ assert_eq!(v.iter_mut().count(), 4);
- for &() in v.mut_iter() {}
+ for &() in v.iter_mut() {}
unsafe { v.set_len(0); }
- assert_eq!(v.mut_iter().count(), 0);
+ assert_eq!(v.iter_mut().count(), 0);
}
#[test]
let _ = vec[3];
}
+ // NOTE uncomment after snapshot
+ /*
+ #[test]
+ #[should_fail]
+ fn test_slice_out_of_bounds_1() {
+ let x: Vec<int> = vec![1, 2, 3, 4, 5];
+ x[-1..];
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_slice_out_of_bounds_2() {
+ let x: Vec<int> = vec![1, 2, 3, 4, 5];
+ x[..6];
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_slice_out_of_bounds_3() {
+ let x: Vec<int> = vec![1, 2, 3, 4, 5];
+ x[-1..4];
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_slice_out_of_bounds_4() {
+ let x: Vec<int> = vec![1, 2, 3, 4, 5];
+ x[1..6];
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_slice_out_of_bounds_5() {
+ let x: Vec<int> = vec![1, 2, 3, 4, 5];
+ x[3..2];
+ }
+ */
+
#[test]
fn test_swap_remove_empty() {
let mut vec: Vec<uint> = vec!();
vec.push(1);
vec.push(2);
let ptr = vec.as_ptr();
- vec = vec.move_iter().unwrap();
+ vec = vec.into_iter().unwrap();
assert_eq!(vec.as_ptr(), ptr);
assert_eq!(vec.capacity(), 7);
assert_eq!(vec.len(), 0);
}
+ #[test]
+ #[should_fail]
+ fn test_map_inp_lace_incompatible_types_fail() {
+ let v = vec![0u, 1, 2];
+ v.map_in_place(|_| ());
+ }
+
+ #[test]
+ fn test_map_in_place() {
+ let v = vec![0u, 1, 2];
+ assert_eq!(v.map_in_place(|i: uint| i as int - 1).as_slice(), [-1i, 0, 1].as_slice());
+ }
+
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| {
b.bytes = src_len as u64;
b.iter(|| {
- let dst: Vec<uint> = FromIterator::from_iter(src.clone().move_iter());
+ let dst: Vec<uint> = FromIterator::from_iter(src.clone().into_iter());
assert_eq!(dst.len(), src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
});
b.iter(|| {
let mut dst = dst.clone();
- dst.extend(src.clone().move_iter());
+ dst.extend(src.clone().into_iter());
assert_eq!(dst.len(), dst_len + src_len);
assert!(dst.iter().enumerate().all(|(i, x)| i == *x));
});
_ => ()
}
- buf.mut_slice_to(end).reverse();
+ buf.slice_to_mut(end).reverse();
// Remember start of the fractional digits.
// Points one beyond end of buf if none get generated,
impl<'a> fmt::FormatWriter for Filler<'a> {
fn write(&mut self, bytes: &[u8]) -> fmt::Result {
- slice::bytes::copy_memory(self.buf.mut_slice_from(*self.end),
+ slice::bytes::copy_memory(self.buf.slice_from_mut(*self.end),
bytes);
*self.end += bytes.len();
Ok(())
// Writes the sign if it exists, and then the prefix if it was requested
let write_prefix = |f: &mut Formatter| {
- for c in sign.move_iter() {
+ for c in sign.into_iter() {
let mut b = [0, ..4];
let n = c.encode_utf8(b).unwrap_or(0);
try!(f.buf.write(b.slice_to(n)));
if is_positive {
// Accumulate each digit of the number from the least significant
// to the most significant figure.
- for byte in buf.mut_iter().rev() {
+ for byte in buf.iter_mut().rev() {
let n = x % base; // Get the current place value.
x = x / base; // Deaccumulate the number.
*byte = self.digit(cast(n).unwrap()); // Store the digit in the buffer.
}
} else {
// Do the same as above, but accounting for two's complement.
- for byte in buf.mut_iter().rev() {
+ for byte in buf.iter_mut().rev() {
let n = -(x % base); // Get the current place value.
x = x / base; // Deaccumulate the number.
*byte = self.digit(cast(n).unwrap()); // Store the digit in the buffer.
//! These definitions are similar to their `ct` equivalents, but differ in that
//! these can be statically allocated and are slightly optimized for the runtime
-#[cfg(stage0)]
-#[doc(hidden)]
-pub enum Piece<'a> {
- String(&'a str),
- Argument(Argument<'a>),
-}
-
#[doc(hidden)]
pub struct Argument<'a> {
pub position: Position,
This `for` loop syntax can be applied to any iterator over any type.
-## Iteration protocol and more
-
-More detailed information about iterators can be found in the [container
-guide](http://doc.rust-lang.org/guide-container.html) with
-the rest of the rust manuals.
-
*/
use clone::Clone;
/// sum
/// }
/// let x = vec![1i,2,3,7,8,9];
- /// assert_eq!(process(x.move_iter()), 1006);
+ /// assert_eq!(process(x.into_iter()), 1006);
/// ```
#[inline]
fn fuse(self) -> Fuse<Self> {
#[inline]
fn next(&mut self) -> Option<B> {
loop {
- for inner in self.frontiter.mut_iter() {
+ for inner in self.frontiter.iter_mut() {
for x in *inner {
return Some(x)
}
#[inline]
fn next_back(&mut self) -> Option<B> {
loop {
- for inner in self.backiter.mut_iter() {
+ for inner in self.backiter.iter_mut() {
match inner.next_back() {
None => (),
y => return y
* ```
*/
#[lang="index"]
-pub trait Index<Index,Result> {
+pub trait Index<Index, Result> {
/// The method for the indexing (`Foo[Bar]`) operation
fn index<'a>(&'a self, index: &Index) -> &'a Result;
}
* # Example
*
* A trivial implementation of `IndexMut`. When `Foo[Foo]` happens, it ends up
- * calling `index`, and therefore, `main` prints `Indexing!`.
+ * calling `index_mut`, and therefore, `main` prints `Indexing!`.
*
* ```
* struct Foo;
* ```
*/
#[lang="index_mut"]
-pub trait IndexMut<Index,Result> {
+pub trait IndexMut<Index, Result> {
/// The method for the indexing (`Foo[Bar]`) operation
fn index_mut<'a>(&'a mut self, index: &Index) -> &'a mut Result;
}
+/**
+ *
+ * The `Slice` trait is used to specify the functionality of slicing operations
+ * like `arr[from..to]` when used in an immutable context.
+ *
+ * # Example
+ *
+ * A trivial implementation of `Slice`. When `Foo[..Foo]` happens, it ends up
+ * calling `slice_to`, and therefore, `main` prints `Slicing!`.
+ *
+ * ```
+ * struct Foo;
+ *
+ * impl ::core::ops::Slice<Foo, Foo> for Foo {
+ * fn as_slice_<'a>(&'a self) -> &'a Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * fn slice_from_<'a>(&'a self, from: &Foo) -> &'a Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * fn slice_to_<'a>(&'a self, to: &Foo) -> &'a Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * fn slice_<'a>(&'a self, from: &Foo, to: &Foo) -> &'a Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * }
+ *
+ * fn main() {
+ * Foo[..Foo];
+ * }
+ * ```
+ */
+// FIXME(#17273) remove the postscript _s
+#[lang="slice"]
+pub trait Slice<Idx, Sized? Result> for Sized? {
+ /// The method for the slicing operation foo[]
+ fn as_slice_<'a>(&'a self) -> &'a Result;
+ /// The method for the slicing operation foo[from..]
+ fn slice_from_<'a>(&'a self, from: &Idx) -> &'a Result;
+ /// The method for the slicing operation foo[..to]
+ fn slice_to_<'a>(&'a self, to: &Idx) -> &'a Result;
+ /// The method for the slicing operation foo[from..to]
+ fn slice_<'a>(&'a self, from: &Idx, to: &Idx) -> &'a Result;
+}
+
+/**
+ *
+ * The `SliceMut` trait is used to specify the functionality of slicing
+ * operations like `arr[from..to]`, when used in a mutable context.
+ *
+ * # Example
+ *
+ * A trivial implementation of `SliceMut`. When `Foo[Foo..]` happens, it ends up
+ * calling `slice_from_mut`, and therefore, `main` prints `Slicing!`.
+ *
+ * ```
+ * struct Foo;
+ *
+ * impl ::core::ops::SliceMut<Foo, Foo> for Foo {
+ * fn as_mut_slice_<'a>(&'a mut self) -> &'a mut Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * fn slice_from_mut_<'a>(&'a mut self, from: &Foo) -> &'a mut Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * fn slice_to_mut_<'a>(&'a mut self, to: &Foo) -> &'a mut Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * fn slice_mut_<'a>(&'a mut self, from: &Foo, to: &Foo) -> &'a mut Foo {
+ * println!("Slicing!");
+ * self
+ * }
+ * }
+ *
+ * fn main() {
+ * Foo[mut Foo..];
+ * }
+ * ```
+ */
+// FIXME(#17273) remove the postscript _s
+#[lang="slice_mut"]
+pub trait SliceMut<Idx, Sized? Result> for Sized? {
+ /// The method for the slicing operation foo[]
+ fn as_mut_slice_<'a>(&'a mut self) -> &'a mut Result;
+ /// The method for the slicing operation foo[from..]
+ fn slice_from_mut_<'a>(&'a mut self, from: &Idx) -> &'a mut Result;
+ /// The method for the slicing operation foo[..to]
+ fn slice_to_mut_<'a>(&'a mut self, to: &Idx) -> &'a mut Result;
+ /// The method for the slicing operation foo[from..to]
+ fn slice_mut_<'a>(&'a mut self, from: &Idx, to: &Idx) -> &'a mut Result;
+}
/**
*
* The `Deref` trait is used to specify the functionality of dereferencing
Item{opt: self.as_ref()}
}
+ /// Deprecated: use `iter_mut`
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'r>(&'r mut self) -> Item<&'r mut T> {
+ self.iter_mut()
+ }
+
/// Returns a mutable iterator over the possibly contained value.
#[inline]
#[unstable = "waiting for iterator conventions"]
- pub fn mut_iter<'r>(&'r mut self) -> Item<&'r mut T> {
+ pub fn iter_mut<'r>(&'r mut self) -> Item<&'r mut T> {
Item{opt: self.as_mut()}
}
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> Item<T> {
+ self.into_iter()
+ }
+
/// Returns a consuming iterator over the possibly contained value.
#[inline]
#[unstable = "waiting for iterator conventions"]
- pub fn move_iter(self) -> Item<T> {
+ pub fn into_iter(self) -> Item<T> {
Item{opt: self}
}
#[unstable = "may need a different name after pending changes to pointer types"]
pub fn null<T>() -> *const T { 0 as *const T }
+/// Deprecated: use `null_mut`.
+#[deprecated = "use null_mut"]
+pub fn mut_null<T>() -> *mut T { null_mut() }
+
/// Create an unsafe mutable null pointer.
///
/// # Example
/// ```
/// use std::ptr;
///
-/// let p: *mut int = ptr::mut_null();
+/// let p: *mut int = ptr::null_mut();
/// assert!(p.is_null());
/// ```
#[inline]
#[unstable = "may need a different name after pending changes to pointer types"]
-pub fn mut_null<T>() -> *mut T { 0 as *mut T }
+pub fn null_mut<T>() -> *mut T { 0 as *mut T }
/// Zeroes out `count * size_of::<T>` bytes of memory at `dst`
#[inline]
impl<T> RawPtr<T> for *mut T {
#[inline]
- fn null() -> *mut T { mut_null() }
+ fn null() -> *mut T { null_mut() }
#[inline]
fn is_null(&self) -> bool { *self == RawPtr::null() }
Item{opt: self.as_ref().ok()}
}
+ /// Deprecated: use `iter_mut`.
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter<'r>(&'r mut self) -> Item<&'r mut T> {
+ self.iter_mut()
+ }
+
/// Returns a mutable iterator over the possibly contained value.
#[inline]
#[unstable = "waiting for iterator conventions"]
- pub fn mut_iter<'r>(&'r mut self) -> Item<&'r mut T> {
+ pub fn iter_mut<'r>(&'r mut self) -> Item<&'r mut T> {
Item{opt: self.as_mut().ok()}
}
+ /// Deprecated: `use into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> Item<T> {
+ self.into_iter()
+ }
+
/// Returns a consuming iterator over the possibly contained value.
#[inline]
#[unstable = "waiting for iterator conventions"]
- pub fn move_iter(self) -> Item<T> {
+ pub fn into_iter(self) -> Item<T> {
Item{opt: self.ok()}
}
use default::Default;
use iter::*;
use num::{CheckedAdd, Saturating, div_rem};
+use ops;
use option::{None, Option, Some};
use ptr;
use ptr::RawPtr;
}
}
+impl<T> ops::Slice<uint, [T]> for [T] {
+ #[inline]
+ fn as_slice_<'a>(&'a self) -> &'a [T] {
+ self
+ }
+
+ #[inline]
+ fn slice_from_<'a>(&'a self, start: &uint) -> &'a [T] {
+ self.slice_(start, &self.len())
+ }
+
+ #[inline]
+ fn slice_to_<'a>(&'a self, end: &uint) -> &'a [T] {
+ self.slice_(&0, end)
+ }
+ #[inline]
+ fn slice_<'a>(&'a self, start: &uint, end: &uint) -> &'a [T] {
+ assert!(*start <= *end);
+ assert!(*end <= self.len());
+ unsafe {
+ transmute(RawSlice {
+ data: self.as_ptr().offset(*start as int),
+ len: (*end - *start)
+ })
+ }
+ }
+}
+
+impl<T> ops::SliceMut<uint, [T]> for [T] {
+ #[inline]
+ fn as_mut_slice_<'a>(&'a mut self) -> &'a mut [T] {
+ self
+ }
+
+ #[inline]
+ fn slice_from_mut_<'a>(&'a mut self, start: &uint) -> &'a mut [T] {
+ let len = &self.len();
+ self.slice_mut_(start, len)
+ }
+
+ #[inline]
+ fn slice_to_mut_<'a>(&'a mut self, end: &uint) -> &'a mut [T] {
+ self.slice_mut_(&0, end)
+ }
+ #[inline]
+ fn slice_mut_<'a>(&'a mut self, start: &uint, end: &uint) -> &'a mut [T] {
+ assert!(*start <= *end);
+ assert!(*end <= self.len());
+ unsafe {
+ transmute(RawSlice {
+ data: self.as_ptr().offset(*start as int),
+ len: (*end - *start)
+ })
+ }
+ }
+}
+
/// Extension methods for vectors such that their elements are
/// mutable.
#[experimental = "may merge with other traits; may lose region param; needs review"]
/// Primarily intended for getting a &mut [T] from a [T, ..N].
fn as_mut_slice(self) -> &'a mut [T];
+ /// Deprecated: use `slice_mut`.
+ #[deprecated = "use slice_mut"]
+ fn mut_slice(self, start: uint, end: uint) -> &'a mut [T] {
+ self.slice_mut(start, end)
+ }
+
/// Returns a mutable subslice spanning the interval [`start`, `end`).
///
/// Fails when the end of the new slice lies beyond the end of the
/// original slice (i.e. when `end > self.len()`) or when `start > end`.
///
/// Slicing with `start` equal to `end` yields an empty slice.
- fn mut_slice(self, start: uint, end: uint) -> &'a mut [T];
+ fn slice_mut(self, start: uint, end: uint) -> &'a mut [T];
+
+ /// Deprecated: use `slice_from_mut`.
+ #[deprecated = "use slice_from_mut"]
+ fn mut_slice_from(self, start: uint) -> &'a mut [T] {
+ self.slice_from_mut(start)
+ }
/// Returns a mutable subslice from `start` to the end of the slice.
///
/// Fails when `start` is strictly greater than the length of the original slice.
///
/// Slicing from `self.len()` yields an empty slice.
- fn mut_slice_from(self, start: uint) -> &'a mut [T];
+ fn slice_from_mut(self, start: uint) -> &'a mut [T];
+
+ /// Deprecated: use `slice_to_mut`.
+ #[deprecated = "use slice_to_mut"]
+ fn mut_slice_to(self, end: uint) -> &'a mut [T] {
+ self.slice_to_mut(end)
+ }
/// Returns a mutable subslice from the start of the slice to `end`.
///
/// Fails when `end` is strictly greater than the length of the original slice.
///
/// Slicing to `0` yields an empty slice.
- fn mut_slice_to(self, end: uint) -> &'a mut [T];
+ fn slice_to_mut(self, end: uint) -> &'a mut [T];
+
+ /// Deprecated: use `iter_mut`.
+ #[deprecated = "use iter_mut"]
+ fn mut_iter(self) -> MutItems<'a, T> {
+ self.iter_mut()
+ }
/// Returns an iterator that allows modifying each value
- fn mut_iter(self) -> MutItems<'a, T>;
+ fn iter_mut(self) -> MutItems<'a, T>;
+
+ /// Deprecated: use `last_mut`.
+ #[deprecated = "use last_mut"]
+ fn mut_last(self) -> Option<&'a mut T> {
+ self.last_mut()
+ }
/// Returns a mutable pointer to the last item in the vector.
- fn mut_last(self) -> Option<&'a mut T>;
+ fn last_mut(self) -> Option<&'a mut T>;
+
+ /// Deprecated: use `split_mut`.
+ #[deprecated = "use split_mut"]
+ fn mut_split(self, pred: |&T|: 'a -> bool) -> MutSplits<'a, T> {
+ self.split_mut(pred)
+ }
/// Returns an iterator over the mutable subslices of the vector
/// which are separated by elements that match `pred`. The
/// matched element is not contained in the subslices.
- fn mut_split(self, pred: |&T|: 'a -> bool) -> MutSplits<'a, T>;
+ fn split_mut(self, pred: |&T|: 'a -> bool) -> MutSplits<'a, T>;
+
+ /// Deprecated: use `chunks_mut`.
+ #[deprecated = "use chunks_mut"]
+ fn mut_chunks(self, chunk_size: uint) -> MutChunks<'a, T> {
+ self.chunks_mut(chunk_size)
+ }
/**
* Returns an iterator over `chunk_size` elements of the vector at a time.
*
* Fails if `chunk_size` is 0.
*/
- fn mut_chunks(self, chunk_size: uint) -> MutChunks<'a, T>;
+ fn chunks_mut(self, chunk_size: uint) -> MutChunks<'a, T>;
/**
* Returns a mutable reference to the first element in this slice
* ```ignore
* if self.len() == 0 { return None; }
* let head = &mut self[0];
- * *self = self.mut_slice_from(1);
+ * *self = self.slice_from_mut(1);
* Some(head)
* ```
*
* ```ignore
* if self.len() == 0 { return None; }
* let tail = &mut self[self.len() - 1];
- * *self = self.mut_slice_to(self.len() - 1);
+ * *self = self.slice_to_mut(self.len() - 1);
* Some(tail)
* ```
*
/// ```
fn swap(self, a: uint, b: uint);
+ /// Deprecated: use `split_at_mut`.
+ #[deprecated = "use split_at_mut"]
+ fn mut_split_at(self, mid: uint) -> (&'a mut [T], &'a mut [T]) {
+ self.split_at_mut(mid)
+ }
/// Divides one `&mut` into two at an index.
///
///
/// // scoped to restrict the lifetime of the borrows
/// {
- /// let (left, right) = v.mut_split_at(0);
+ /// let (left, right) = v.split_at_mut(0);
/// assert!(left == &mut []);
/// assert!(right == &mut [1i, 2, 3, 4, 5, 6]);
/// }
///
/// {
- /// let (left, right) = v.mut_split_at(2);
+ /// let (left, right) = v.split_at_mut(2);
/// assert!(left == &mut [1i, 2]);
/// assert!(right == &mut [3i, 4, 5, 6]);
/// }
///
/// {
- /// let (left, right) = v.mut_split_at(6);
+ /// let (left, right) = v.split_at_mut(6);
/// assert!(left == &mut [1i, 2, 3, 4, 5, 6]);
/// assert!(right == &mut []);
/// }
/// ```
- fn mut_split_at(self, mid: uint) -> (&'a mut [T], &'a mut [T]);
+ fn split_at_mut(self, mid: uint) -> (&'a mut [T], &'a mut [T]);
/// Reverse the order of elements in a vector, in place.
///
/// ```
fn reverse(self);
+ /// Deprecated: use `unsafe_mut`.
+ #[deprecated = "use unsafe_mut"]
+ unsafe fn unsafe_mut_ref(self, index: uint) -> &'a mut T {
+ self.unsafe_mut(index)
+ }
+
/// Returns an unsafe mutable pointer to the element in index
- unsafe fn unsafe_mut_ref(self, index: uint) -> &'a mut T;
+ unsafe fn unsafe_mut(self, index: uint) -> &'a mut T;
/// Return an unsafe mutable pointer to the vector's buffer.
///
#[inline]
fn as_mut_slice(self) -> &'a mut [T] { self }
- fn mut_slice(self, start: uint, end: uint) -> &'a mut [T] {
+ fn slice_mut(self, start: uint, end: uint) -> &'a mut [T] {
assert!(start <= end);
assert!(end <= self.len());
unsafe {
}
#[inline]
- fn mut_slice_from(self, start: uint) -> &'a mut [T] {
+ fn slice_from_mut(self, start: uint) -> &'a mut [T] {
let len = self.len();
- self.mut_slice(start, len)
+ self.slice_mut(start, len)
}
#[inline]
- fn mut_slice_to(self, end: uint) -> &'a mut [T] {
- self.mut_slice(0, end)
+ fn slice_to_mut(self, end: uint) -> &'a mut [T] {
+ self.slice_mut(0, end)
}
#[inline]
- fn mut_split_at(self, mid: uint) -> (&'a mut [T], &'a mut [T]) {
+ fn split_at_mut(self, mid: uint) -> (&'a mut [T], &'a mut [T]) {
unsafe {
let len = self.len();
let self2: &'a mut [T] = mem::transmute_copy(&self);
- (self.mut_slice(0, mid), self2.mut_slice(mid, len))
+ (self.slice_mut(0, mid), self2.slice_mut(mid, len))
}
}
#[inline]
- fn mut_iter(self) -> MutItems<'a, T> {
+ fn iter_mut(self) -> MutItems<'a, T> {
unsafe {
let p = self.as_mut_ptr();
if mem::size_of::<T>() == 0 {
}
#[inline]
- fn mut_last(self) -> Option<&'a mut T> {
+ fn last_mut(self) -> Option<&'a mut T> {
let len = self.len();
if len == 0 { return None; }
Some(&mut self[len - 1])
}
#[inline]
- fn mut_split(self, pred: |&T|: 'a -> bool) -> MutSplits<'a, T> {
+ fn split_mut(self, pred: |&T|: 'a -> bool) -> MutSplits<'a, T> {
MutSplits { v: self, pred: pred, finished: false }
}
#[inline]
- fn mut_chunks(self, chunk_size: uint) -> MutChunks<'a, T> {
+ fn chunks_mut(self, chunk_size: uint) -> MutChunks<'a, T> {
assert!(chunk_size > 0);
MutChunks { v: self, chunk_size: chunk_size }
}
while i < ln / 2 {
// Unsafe swap to avoid the bounds check in safe swap.
unsafe {
- let pa: *mut T = self.unsafe_mut_ref(i);
- let pb: *mut T = self.unsafe_mut_ref(ln - i - 1);
+ let pa: *mut T = self.unsafe_mut(i);
+ let pb: *mut T = self.unsafe_mut(ln - i - 1);
ptr::swap(pa, pb);
}
i += 1;
}
#[inline]
- unsafe fn unsafe_mut_ref(self, index: uint) -> &'a mut T {
+ unsafe fn unsafe_mut(self, index: uint) -> &'a mut T {
transmute((self.repr().data as *mut T).offset(index as int))
}
#[inline]
unsafe fn unsafe_set(self, index: uint, val: T) {
- *self.unsafe_mut_ref(index) = val;
+ *self.unsafe_mut(index) = val;
}
#[inline]
impl<'a, T:Clone> MutableCloneableSlice<T> for &'a mut [T] {
#[inline]
fn clone_from_slice(self, src: &[T]) -> uint {
- for (a, b) in self.mut_iter().zip(src.iter()) {
+ for (a, b) in self.iter_mut().zip(src.iter()) {
a.clone_from(b);
}
cmp::min(self.len(), src.len())
self.finished = true;
let tmp = mem::replace(&mut self.v, &mut []);
let len = tmp.len();
- let (head, tail) = tmp.mut_split_at(len);
+ let (head, tail) = tmp.split_at_mut(len);
self.v = tail;
Some(head)
}
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
- let (head, tail) = tmp.mut_split_at(idx);
- self.v = tail.mut_slice_from(1);
+ let (head, tail) = tmp.split_at_mut(idx);
+ self.v = tail.slice_from_mut(1);
Some(head)
}
}
}
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
- let (head, tail) = tmp.mut_split_at(idx);
+ let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
- Some(tail.mut_slice_from(1))
+ Some(tail.slice_from_mut(1))
}
}
}
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
let tmp = mem::replace(&mut self.v, &mut []);
- let (head, tail) = tmp.mut_split_at(sz);
+ let (head, tail) = tmp.split_at_mut(sz);
self.v = tail;
Some(head)
}
let sz = if remainder != 0 { remainder } else { self.chunk_size };
let tmp = mem::replace(&mut self.v, &mut []);
let tmp_len = tmp.len();
- let (head, tail) = tmp.mut_split_at(tmp_len - sz);
+ let (head, tail) = tmp.split_at_mut(tmp_len - sz);
self.v = head;
Some(tail)
}
memory: uint
}
-// This is the Two-Way search algorithm, which was introduced in the paper:
-// Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
+/*
+ This is the Two-Way search algorithm, which was introduced in the paper:
+ Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
+
+ Here's some background information.
+
+ A *word* is a string of symbols. The *length* of a word should be a familiar
+ notion, and here we denote it for any word x by |x|.
+ (We also allow for the possibility of the *empty word*, a word of length zero).
+
+ If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
+ *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
+ For example, both 1 and 2 are periods for the string "aa". As another example,
+ the only period of the string "abcd" is 4.
+
+ We denote by period(x) the *smallest* period of x (provided that x is non-empty).
+ This is always well-defined since every non-empty word x has at least one period,
+ |x|. We sometimes call this *the period* of x.
+
+ If u, v and x are words such that x = uv, where uv is the concatenation of u and
+ v, then we say that (u, v) is a *factorization* of x.
+
+ Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
+ that both of the following hold
+
+ - either w is a suffix of u or u is a suffix of w
+ - either w is a prefix of v or v is a prefix of w
+
+ then w is said to be a *repetition* for the factorization (u, v).
+
+ Just to unpack this, there are four possibilities here. Let w = "abc". Then we
+ might have:
+
+ - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
+ - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
+ - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
+ - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
+
+ Note that the word vu is a repetition for any factorization (u,v) of x = uv,
+ so every factorization has at least one repetition.
+
+ If x is a string and (u, v) is a factorization for x, then a *local period* for
+ (u, v) is an integer r such that there is some word w such that |w| = r and w is
+ a repetition for (u, v).
+
+ We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
+ call this *the local period* of (u, v). Provided that x = uv is non-empty, this
+ is well-defined (because each non-empty word has at least one factorization, as
+ noted above).
+
+ It can be proven that the following is an equivalent definition of a local period
+ for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
+ all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
+ defined. (i.e. i > 0 and i + r < |x|).
+
+ Using the above reformulation, it is easy to prove that
+
+ 1 <= local_period(u, v) <= period(uv)
+
+ A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
+ *critical factorization*.
+
+ The algorithm hinges on the following theorem, which is stated without proof:
+
+ **Critical Factorization Theorem** Any word x has at least one critical
+ factorization (u, v) such that |u| < period(x).
+
+ The purpose of maximal_suffix is to find such a critical factorization.
+
+*/
impl TwoWaySearcher {
fn new(needle: &[u8]) -> TwoWaySearcher {
let (crit_pos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
period = period2;
}
+ // This isn't in the original algorithm, as far as I'm aware.
let byteset = needle.iter()
.fold(0, |a, &b| (1 << ((b & 0x3f) as uint)) | a);
- // The logic here (calculating crit_pos and period, the final if statement to see which
- // period to use for the TwoWaySearcher) is essentially an implementation of the
- // "small-period" function from the paper (p. 670)
+ // A particularly readable explanation of what's going on here can be found
+ // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
+ // see the code for "Algorithm CP" on p. 323.
//
- // In the paper they check whether `needle.slice_to(crit_pos)` is a suffix of
- // `needle.slice(crit_pos, crit_pos + period)`, which is precisely what this does
+ // What's going on is we have some critical factorization (u, v) of the
+ // needle, and we want to determine whether u is a suffix of
+ // v.slice_to(period). If it is, we use "Algorithm CP1". Otherwise we use
+ // "Algorithm CP2", which is optimized for when the period of the needle
+ // is large.
if needle.slice_to(crit_pos) == needle.slice(period, period + crit_pos) {
TwoWaySearcher {
crit_pos: crit_pos,
}
}
+ // One of the main ideas of Two-Way is that we factorize the needle into
+ // two halves, (u, v), and begin trying to find v in the haystack by scanning
+ // left to right. If v matches, we try to match u by scanning right to left.
+ // How far we can jump when we encounter a mismatch is all based on the fact
+ // that (u, v) is a critical factorization for the needle.
#[inline]
fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> Option<(uint, uint)> {
'search: loop {
((haystack[self.position + needle.len() - 1] & 0x3f)
as uint)) & 1 == 0 {
self.position += needle.len();
+ if !long_period {
+ self.memory = 0;
+ }
continue 'search;
}
}
}
- // returns (i, p) where i is the "critical position", the starting index of
- // of maximal suffix, and p is the period of the suffix
- // see p. 668 of the paper
+ // Computes a critical factorization (u, v) of `arr`.
+ // Specifically, returns (i, p), where i is the starting index of v in some
+ // critical factorization (u, v) and p = period(v)
#[inline]
fn maximal_suffix(arr: &[u8], reversed: bool) -> (uint, uint) {
let mut left = -1; // Corresponds to i in the paper
#[test]
fn test_reverse() {
let mut ys = [1i, 2, 3, 4, 5];
- ys.mut_iter().reverse_();
+ ys.iter_mut().reverse_();
assert!(ys == [5, 4, 3, 2, 1]);
}
let mut x = Some(val);
{
- let mut it = x.mut_iter();
+ let mut it = x.iter_mut();
assert_eq!(it.size_hint(), (1, Some(1)));
// test that it does not take more elements than it needs
let mut functions = [|| Some(()), || None, || fail!()];
- let v: Option<Vec<()>> = collect(functions.mut_iter().map(|f| (*f)()));
+ let v: Option<Vec<()>> = collect(functions.iter_mut().map(|f| (*f)()));
assert!(v == None);
}
// test that it does not take more elements than it needs
let mut functions = [|| Ok(()), || Err(1i), || fail!()];
- let v: Result<Vec<()>, int> = collect(functions.mut_iter().map(|f| (*f)()));
+ let v: Result<Vec<()>, int> = collect(functions.iter_mut().map(|f| (*f)()));
assert!(v == Err(1));
}
// test that it does not take more elements than it needs
let mut functions = [|| Ok(()), || Err(1i), || fail!()];
- assert_eq!(fold_(functions.mut_iter()
+ assert_eq!(fold_(functions.iter_mut()
.map(|f| (*f)())),
Err(1));
}
check_contains_all_substrings("012345678901234567890123456789bcdabcdabcd");
}
+#[test]
+fn strslice_issue_16878() {
+ assert!(!"1234567ah012345678901ah".contains("hah"));
+ assert!(!"00abc01234567890123456789abc".contains("bcabc"));
+}
+
#[test]
fn test_strslice_contains() {
use std::char;
use std::str;
+use std::string;
/// A piece is a portion of the format string which represents the next part
/// to emit. These are emitted as a stream by the `Parser` class.
String(&'a str),
/// This describes that formatting should process the next argument (as
/// specified inside) for emission.
- Argument(Argument<'a>),
+ NextArgument(Argument<'a>),
}
/// Representation of an argument specification.
input: &'a str,
cur: str::CharOffsets<'a>,
/// Error messages accumulated during parsing
- pub errors: Vec<String>,
+ pub errors: Vec<string::String>,
}
impl<'a> Iterator<Piece<'a>> for Parser<'a> {
if self.consume('{') {
Some(String(self.string(pos + 1)))
} else {
- let ret = Some(Argument(self.argument()));
+ let ret = Some(NextArgument(self.argument()));
self.must_consume('}');
ret
}
#[test]
fn format_nothing() {
- same("{}", [Argument(Argument {
+ same("{}", [NextArgument(Argument {
position: ArgumentNext,
format: fmtdflt(),
})]);
}
#[test]
fn format_position() {
- same("{3}", [Argument(Argument {
+ same("{3}", [NextArgument(Argument {
position: ArgumentIs(3),
format: fmtdflt(),
})]);
}
#[test]
fn format_position_nothing_else() {
- same("{3:}", [Argument(Argument {
+ same("{3:}", [NextArgument(Argument {
position: ArgumentIs(3),
format: fmtdflt(),
})]);
}
#[test]
fn format_type() {
- same("{3:a}", [Argument(Argument {
+ same("{3:a}", [NextArgument(Argument {
position: ArgumentIs(3),
format: FormatSpec {
fill: None,
}
#[test]
fn format_align_fill() {
- same("{3:>}", [Argument(Argument {
+ same("{3:>}", [NextArgument(Argument {
position: ArgumentIs(3),
format: FormatSpec {
fill: None,
ty: "",
},
})]);
- same("{3:0<}", [Argument(Argument {
+ same("{3:0<}", [NextArgument(Argument {
position: ArgumentIs(3),
format: FormatSpec {
fill: Some('0'),
ty: "",
},
})]);
- same("{3:*<abcd}", [Argument(Argument {
+ same("{3:*<abcd}", [NextArgument(Argument {
position: ArgumentIs(3),
format: FormatSpec {
fill: Some('*'),
}
#[test]
fn format_counts() {
- same("{:10s}", [Argument(Argument {
+ same("{:10s}", [NextArgument(Argument {
position: ArgumentNext,
format: FormatSpec {
fill: None,
ty: "s",
},
})]);
- same("{:10$.10s}", [Argument(Argument {
+ same("{:10$.10s}", [NextArgument(Argument {
position: ArgumentNext,
format: FormatSpec {
fill: None,
ty: "s",
},
})]);
- same("{:.*s}", [Argument(Argument {
+ same("{:.*s}", [NextArgument(Argument {
position: ArgumentNext,
format: FormatSpec {
fill: None,
ty: "s",
},
})]);
- same("{:.10$s}", [Argument(Argument {
+ same("{:.10$s}", [NextArgument(Argument {
position: ArgumentNext,
format: FormatSpec {
fill: None,
ty: "s",
},
})]);
- same("{:a$.b$s}", [Argument(Argument {
+ same("{:a$.b$s}", [NextArgument(Argument {
position: ArgumentNext,
format: FormatSpec {
fill: None,
}
#[test]
fn format_flags() {
- same("{:-}", [Argument(Argument {
+ same("{:-}", [NextArgument(Argument {
position: ArgumentNext,
format: FormatSpec {
fill: None,
ty: "",
},
})]);
- same("{:+#}", [Argument(Argument {
+ same("{:+#}", [NextArgument(Argument {
position: ArgumentNext,
format: FormatSpec {
fill: None,
}
#[test]
fn format_mixture() {
- same("abcd {3:a} efg", [String("abcd "), Argument(Argument {
+ same("abcd {3:a} efg", [String("abcd "), NextArgument(Argument {
position: ArgumentIs(3),
format: FormatSpec {
fill: None,
match fs::readdir(path) {
Ok(mut children) => {
children.sort_by(|p1, p2| p2.filename().cmp(&p1.filename()));
- Some(children.move_iter().collect())
+ Some(children.into_iter().collect())
}
Err(..) => None
}
None => {
match list_dir_sorted(path) {
Some(entries) => {
- todo.extend(entries.move_iter().map(|x|(x, idx)));
+ todo.extend(entries.into_iter().map(|x|(x, idx)));
// Matching the special directory entries . and .. that refer to
// the current and parent directory respectively requires that
fn to_opt_strs(self) -> Vec<Option<&'static str>> {
match self {
UnlabelledNodes(len)
- => Vec::from_elem(len, None).move_iter().collect(),
+ => Vec::from_elem(len, None).into_iter().collect(),
AllNodesLabelled(lbls)
- => lbls.move_iter().map(
+ => lbls.into_iter().map(
|l|Some(l)).collect(),
SomeNodesLabelled(lbls)
- => lbls.move_iter().collect(),
+ => lbls.into_iter().collect(),
}
}
}
/// Process everything in the work queue (continually)
fn work(&mut self) {
while self.work.len() > 0 {
- for work in mem::replace(&mut self.work, vec![]).move_iter() {
+ for work in mem::replace(&mut self.work, vec![]).into_iter() {
work();
}
}
let messages = unsafe {
mem::replace(&mut *self.messages.lock(), Vec::new())
};
- for message in messages.move_iter() {
+ for message in messages.into_iter() {
self.message(message);
}
}
fn message(&mut self, message: Message) {
match message {
RunRemote(i) => {
- match self.remotes.mut_iter().find(|& &(id, _)| id == i) {
+ match self.remotes.iter_mut().find(|& &(id, _)| id == i) {
Some(&(_, ref mut f)) => f.call(),
None => unreachable!()
}
// Now that we've got all our work queues, create one scheduler per
// queue, spawn the scheduler into a thread, and be sure to keep a
// handle to the scheduler and the thread to keep them alive.
- for worker in workers.move_iter() {
+ for worker in workers.into_iter() {
rtdebug!("inserting a regular scheduler");
let mut sched = box Scheduler::new(pool.id,
// Tell all existing schedulers about this new scheduler so they can all
// steal work from it
- for handle in self.handles.mut_iter() {
+ for handle in self.handles.iter_mut() {
handle.send(NewNeighbor(stealer.clone()));
}
}
// Now that everyone's gone, tell everything to shut down.
- for mut handle in replace(&mut self.handles, vec![]).move_iter() {
+ for mut handle in replace(&mut self.handles, vec![]).into_iter() {
handle.send(Shutdown);
}
- for thread in replace(&mut self.threads, vec![]).move_iter() {
+ for thread in replace(&mut self.threads, vec![]).into_iter() {
thread.join();
}
}
// Test the literal string from args against the current filter, if there
// is one.
match unsafe { FILTER.as_ref() } {
- Some(filter) if filter.is_match(args.to_string().as_slice()) => return,
+ Some(filter) if !filter.is_match(args.to_string().as_slice()) => return,
_ => {}
}
/// Initialize logging for the current process.
///
-/// This is not threadsafe at all, so initialization os performed through a
+/// This is not threadsafe at all, so initialization is performed through a
/// `Once` primitive (and this function is called from that primitive).
fn init() {
let (mut directives, filter) = match os::getenv("RUST_LOG") {
let root = unsafe { CString::new(root.as_ptr(), false) };
let root = Path::new(root);
- dirs.move_iter().filter(|path| {
+ dirs.into_iter().filter(|path| {
path.as_vec() != b"." && path.as_vec() != b".."
}).map(|path| root.join(path).to_c_str()).collect()
}
let ret = unsafe {
libc::ReadFile(self.handle(), buf.as_ptr() as libc::LPVOID,
buf.len() as libc::DWORD, &mut read,
- ptr::mut_null())
+ ptr::null_mut())
};
if ret != 0 {
Ok(read as uint)
let ret = unsafe {
libc::WriteFile(self.handle(), cur as libc::LPVOID,
remaining as libc::DWORD, &mut amt,
- ptr::mut_null())
+ ptr::null_mut())
};
if ret != 0 {
remaining -= amt as uint;
libc::CreateFileW(path.as_ptr(),
dwDesiredAccess,
dwShareMode,
- ptr::mut_null(),
+ ptr::null_mut(),
dwCreationDisposition,
dwFlagsAndAttributes,
- ptr::mut_null())
+ ptr::null_mut())
};
if handle == libc::INVALID_HANDLE_VALUE {
Err(super::last_error())
let p = try!(to_utf16(p));
super::mkerr_winbool(unsafe {
// FIXME: turn mode into something useful? #2623
- libc::CreateDirectoryW(p.as_ptr(), ptr::mut_null())
+ libc::CreateDirectoryW(p.as_ptr(), ptr::null_mut())
})
}
let root = unsafe { CString::new(root.as_ptr(), false) };
let root = Path::new(root);
- dirs.move_iter().filter(|path| {
+ dirs.into_iter().filter(|path| {
path.as_vec() != b"." && path.as_vec() != b".."
}).map(|path| root.join(path).to_c_str()).collect()
}
libc::CreateFileW(p.as_ptr(),
libc::GENERIC_READ,
libc::FILE_SHARE_READ,
- ptr::mut_null(),
+ ptr::null_mut(),
libc::OPEN_EXISTING,
libc::FILE_ATTRIBUTE_NORMAL,
- ptr::mut_null())
+ ptr::null_mut())
};
if handle == libc::INVALID_HANDLE_VALUE {
return Err(super::last_error())
let src = try!(to_utf16(src));
let dst = try!(to_utf16(dst));
super::mkerr_winbool(unsafe {
- libc::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::mut_null())
+ libc::CreateHardLinkW(dst.as_ptr(), src.as_ptr(), ptr::null_mut())
})
}
pub fn new() -> (HANDLE, HANDLE) {
unsafe {
- let handle = CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE,
+ let handle = CreateEventA(ptr::null_mut(), libc::FALSE, libc::FALSE,
ptr::null());
(handle, handle)
}
Vec<Option<Box<rtio::RtioPipe + Send>>>)> {
process::Process::spawn(cfg).map(|(p, io)| {
(box p as Box<rtio::RtioProcess + Send>,
- io.move_iter().map(|p| p.map(|p| {
+ io.into_iter().map(|p| p.map(|p| {
box p as Box<rtio::RtioPipe + Send>
})).collect())
})
}
enum InAddr {
- InAddr(libc::in_addr),
+ In4Addr(libc::in_addr),
In6Addr(libc::in6_addr),
}
(b as u32 << 16) |
(c as u32 << 8) |
(d as u32 << 0);
- InAddr(libc::in_addr {
+ In4Addr(libc::in_addr {
s_addr: Int::from_be(ip)
})
}
-> libc::socklen_t {
unsafe {
let len = match ip_to_inaddr(addr.ip) {
- InAddr(inaddr) => {
+ In4Addr(inaddr) => {
let storage = storage as *mut _ as *mut libc::sockaddr_in;
(*storage).sin_family = libc::AF_INET as libc::sa_family_t;
(*storage).sin_port = htons(addr.port);
while !self.inner.closed.load(atomic::SeqCst) {
match retry(|| unsafe {
- libc::accept(self.fd(), ptr::mut_null(), ptr::mut_null())
+ libc::accept(self.fd(), ptr::null_mut(), ptr::null_mut())
}) {
-1 if util::wouldblock() => {}
-1 => return Err(os::last_error()),
if wsaevents.lNetworkEvents & c::FD_ACCEPT == 0 { continue }
match unsafe {
- libc::accept(self.fd(), ptr::mut_null(), ptr::mut_null())
+ libc::accept(self.fd(), ptr::null_mut(), ptr::null_mut())
} {
-1 if util::wouldblock() => {}
-1 => return Err(os::last_error()),
pub fn set_membership(&mut self, addr: rtio::IpAddr,
opt: libc::c_int) -> IoResult<()> {
match ip_to_inaddr(addr) {
- InAddr(addr) => {
+ In4Addr(addr) => {
let mreq = libc::ip_mreq {
imr_multiaddr: addr,
// interface == INADDR_ANY
})
}
s.sun_family = libc::AF_UNIX as libc::sa_family_t;
- for (slot, value) in s.sun_path.mut_iter().zip(addr.iter()) {
+ for (slot, value) in s.sun_path.iter_mut().zip(addr.iter()) {
*slot = value;
}
impl Event {
fn new(manual_reset: bool, initial_state: bool) -> IoResult<Event> {
let event = unsafe {
- libc::CreateEventW(ptr::mut_null(),
+ libc::CreateEventW(ptr::null_mut(),
manual_reset as libc::BOOL,
initial_state as libc::BOOL,
ptr::null())
65536,
65536,
0,
- ptr::mut_null()
+ ptr::null_mut()
)
}
libc::CreateFileW(p,
libc::GENERIC_READ | libc::GENERIC_WRITE,
0,
- ptr::mut_null(),
+ ptr::null_mut(),
libc::OPEN_EXISTING,
libc::FILE_FLAG_OVERLAPPED,
- ptr::mut_null())
+ ptr::null_mut())
};
if result != libc::INVALID_HANDLE_VALUE {
return Some(result)
libc::CreateFileW(p,
libc::GENERIC_READ | libc::FILE_WRITE_ATTRIBUTES,
0,
- ptr::mut_null(),
+ ptr::null_mut(),
libc::OPEN_EXISTING,
libc::FILE_FLAG_OVERLAPPED,
- ptr::mut_null())
+ ptr::null_mut())
};
if result != libc::INVALID_HANDLE_VALUE {
return Some(result)
libc::CreateFileW(p,
libc::GENERIC_WRITE | libc::FILE_READ_ATTRIBUTES,
0,
- ptr::mut_null(),
+ ptr::null_mut(),
libc::OPEN_EXISTING,
libc::FILE_FLAG_OVERLAPPED,
- ptr::mut_null())
+ ptr::null_mut())
};
if result != libc::INVALID_HANDLE_VALUE {
return Some(result)
let ret = unsafe {
libc::SetNamedPipeHandleState(inner.handle,
&mut mode,
- ptr::mut_null(),
- ptr::mut_null())
+ ptr::null_mut(),
+ ptr::null_mut())
};
return if ret == 0 {
Err(super::last_error())
}
fn cancel_io(&self) -> IoResult<()> {
- match unsafe { c::CancelIoEx(self.handle(), ptr::mut_null()) } {
+ match unsafe { c::CancelIoEx(self.handle(), ptr::null_mut()) } {
0 if os::errno() == libc::ERROR_NOT_FOUND as uint => {
Ok(())
}
if b"PATH" != key.as_bytes_no_nul() { continue }
// Split the value and test each path to see if the program exists.
- for path in os::split_paths(v.as_bytes_no_nul()).move_iter() {
+ for path in os::split_paths(v.as_bytes_no_nul()).into_iter() {
let path = path.join(cfg.program.as_bytes_no_nul())
.with_extension(os::consts::EXE_EXTENSION);
if path.exists() {
let size = mem::size_of::<libc::SECURITY_ATTRIBUTES>();
let mut sa = libc::SECURITY_ATTRIBUTES {
nLength: size as libc::DWORD,
- lpSecurityDescriptor: ptr::mut_null(),
+ lpSecurityDescriptor: ptr::null_mut(),
bInheritHandle: 1,
};
let filename: Vec<u16> = "NUL".utf16_units().collect();
&mut sa,
libc::OPEN_EXISTING,
0,
- ptr::mut_null());
+ ptr::null_mut());
if *slot == INVALID_HANDLE_VALUE {
return Err(super::last_error())
}
cmd_str = cmd_str.append_one(0);
let created = CreateProcessW(ptr::null(),
cmd_str.as_mut_ptr(),
- ptr::mut_null(),
- ptr::mut_null(),
+ ptr::null_mut(),
+ ptr::null_mut(),
TRUE,
flags, envp, dirp,
&mut si, &mut pi);
fn zeroed_startupinfo() -> libc::types::os::arch::extra::STARTUPINFO {
libc::types::os::arch::extra::STARTUPINFO {
cb: 0,
- lpReserved: ptr::mut_null(),
- lpDesktop: ptr::mut_null(),
- lpTitle: ptr::mut_null(),
+ lpReserved: ptr::null_mut(),
+ lpDesktop: ptr::null_mut(),
+ lpTitle: ptr::null_mut(),
dwX: 0,
dwY: 0,
dwXSize: 0,
dwFlags: 0,
wShowWindow: 0,
cbReserved2: 0,
- lpReserved2: ptr::mut_null(),
+ lpReserved2: ptr::null_mut(),
hStdInput: libc::INVALID_HANDLE_VALUE,
hStdOutput: libc::INVALID_HANDLE_VALUE,
hStdError: libc::INVALID_HANDLE_VALUE,
#[cfg(windows)]
fn zeroed_process_information() -> libc::types::os::arch::extra::PROCESS_INFORMATION {
libc::types::os::arch::extra::PROCESS_INFORMATION {
- hProcess: ptr::mut_null(),
- hThread: ptr::mut_null(),
+ hProcess: ptr::null_mut(),
+ hThread: ptr::null_mut(),
dwProcessId: 0,
dwThreadId: 0
}
Err(..) => {
Ok(SpawnProcessResult {
pid: pid,
- handle: ptr::mut_null()
+ handle: ptr::null_mut()
})
}
Ok(..) => fail!("short read on the cloexec pipe"),
cb(blk.as_mut_ptr() as *mut c_void)
}
- _ => cb(ptr::mut_null())
+ _ => cb(ptr::null_mut())
}
}
tv = util::ms_to_timeval(ms);
(&mut tv as *mut _, idx)
}
- None => (ptr::mut_null(), -1),
+ None => (ptr::null_mut(), -1),
};
// Wait for something to happen
c::fd_set(&mut set, input);
c::fd_set(&mut set, read_fd);
- match unsafe { c::select(max, &mut set, ptr::mut_null(),
- ptr::mut_null(), p) } {
+ match unsafe { c::select(max, &mut set, ptr::null_mut(),
+ ptr::null_mut(), p) } {
// interrupted, retry
-1 if os::errno() == libc::EINTR as int => continue,
// Once this helper thread is done, we re-register the old sigchld
// handler and close our intermediate file descriptors.
unsafe {
- assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::mut_null()), 0);
+ assert_eq!(c::sigaction(c::SIGCHLD, &old, ptr::null_mut()), 0);
let _ = libc::close(read_fd);
let _ = libc::close(WRITE_FD);
WRITE_FD = -1;
pub fn now() -> u64 {
unsafe {
let mut now: libc::timeval = mem::zeroed();
- assert_eq!(c::gettimeofday(&mut now, ptr::mut_null()), 0);
+ assert_eq!(c::gettimeofday(&mut now, ptr::null_mut()), 0);
return (now.tv_sec as u64) * 1000 + (now.tv_usec as u64) / 1000;
}
}
'outer: loop {
let timeout = if active.len() == 0 {
// Empty array? no timeout (wait forever for the next request)
- ptr::mut_null()
+ ptr::null_mut()
} else {
let now = now();
// If this request has already expired, then signal it and go
c::fd_set(&mut set, input);
match unsafe {
- c::select(input + 1, &mut set, ptr::mut_null(),
- ptr::mut_null(), timeout)
+ c::select(input + 1, &mut set, ptr::null_mut(),
+ ptr::null_mut(), timeout)
} {
// timed out
0 => signal(&mut active, &mut dead),
unsafe { HELPER.boot(|| {}, helper) }
let obj = unsafe {
- imp::CreateWaitableTimerA(ptr::mut_null(), 0, ptr::null())
+ imp::CreateWaitableTimerA(ptr::null_mut(), 0, ptr::null())
};
if obj.is_null() {
Err(super::last_error())
// 100ns intervals, so we multiply by 10^4.
let due = -(msecs as i64 * 10000) as libc::LARGE_INTEGER;
assert_eq!(unsafe {
- imp::SetWaitableTimer(self.obj, &due, 0, ptr::mut_null(),
- ptr::mut_null(), 0)
+ imp::SetWaitableTimer(self.obj, &due, 0, ptr::null_mut(),
+ ptr::null_mut(), 0)
}, 1);
let _ = unsafe { imp::WaitForSingleObject(self.obj, libc::INFINITE) };
// see above for the calculation
let due = -(msecs as i64 * 10000) as libc::LARGE_INTEGER;
assert_eq!(unsafe {
- imp::SetWaitableTimer(self.obj, &due, 0, ptr::mut_null(),
- ptr::mut_null(), 0)
+ imp::SetWaitableTimer(self.obj, &due, 0, ptr::null_mut(),
+ ptr::null_mut(), 0)
}, 1);
unsafe { HELPER.send(NewTimer(self.obj, cb, true)) }
let due = -(msecs as i64 * 10000) as libc::LARGE_INTEGER;
assert_eq!(unsafe {
imp::SetWaitableTimer(self.obj, &due, msecs as libc::LONG,
- ptr::mut_null(), ptr::mut_null(), 0)
+ ptr::null_mut(), ptr::null_mut(), 0)
}, 1);
unsafe { HELPER.send(NewTimer(self.obj, cb, false)) }
utf16.as_mut_ptr() as LPVOID,
utf16.len() as u32,
&mut num as LPDWORD,
- ptr::mut_null()) } {
+ ptr::null_mut()) } {
0 => return Err(super::last_error()),
_ => (),
};
utf16.as_ptr() as LPCVOID,
utf16.len() as u32,
&mut num as LPDWORD,
- ptr::mut_null()) } {
+ ptr::null_mut()) } {
0 => Err(super::last_error()),
_ => Ok(()),
}
// undefined what the value of the 'tv' is after select
// returns EINTR).
let mut tv = ms_to_timeval(timeout - (::io::timer::now() - start));
- c::select(fd + 1, ptr::mut_null(), set as *mut _,
- ptr::mut_null(), &mut tv)
+ c::select(fd + 1, ptr::null_mut(), set as *mut _,
+ ptr::null_mut(), &mut tv)
})
}
#[cfg(windows)]
fn await(_fd: net::sock_t, set: &mut c::fd_set,
timeout: u64) -> libc::c_int {
let mut tv = ms_to_timeval(timeout);
- unsafe { c::select(1, ptr::mut_null(), set, ptr::mut_null(), &mut tv) }
+ unsafe { c::select(1, ptr::null_mut(), set, ptr::null_mut(), &mut tv) }
}
}
}
let (read, write) = match status {
- Readable => (&mut set as *mut _, ptr::mut_null()),
- Writable => (ptr::mut_null(), &mut set as *mut _),
+ Readable => (&mut set as *mut _, ptr::null_mut()),
+ Writable => (ptr::null_mut(), &mut set as *mut _),
};
let mut tv: libc::timeval = unsafe { mem::zeroed() };
match retry(|| {
let now = ::io::timer::now();
let tvp = match deadline {
- None => ptr::mut_null(),
+ None => ptr::null_mut(),
Some(deadline) => {
// If we're past the deadline, then pass a 0 timeout to
// select() so we can poll the status
}
};
let r = unsafe {
- c::select(max as libc::c_int, read, write, ptr::mut_null(), tvp)
+ c::select(max as libc::c_int, read, write, ptr::null_mut(), tvp)
};
r
}) {
//! ## Example
//!
//! ```rust
+//! # #![allow(deprecated)]
//! use num::bigint::BigUint;
//! use std::num::{Zero, One};
//! use std::mem::replace;
//! It's easy to generate large random numbers:
//!
//! ```rust
+//! # #![allow(deprecated)]
//! use num::bigint::{ToBigInt, RandBigInt};
//! use std::rand;
//!
fn to_biguint(&self) -> Option<BigUint> {
if self.sign == Plus {
Some(self.data.clone())
- } else if self.sign == Zero {
+ } else if self.sign == NoSign {
Some(Zero::zero())
} else {
None
/// A Sign is a `BigInt`'s composing element.
#[deriving(PartialEq, PartialOrd, Eq, Ord, Clone, Show)]
-pub enum Sign { Minus, Zero, Plus }
+pub enum Sign { Minus, NoSign, Plus }
impl Neg<Sign> for Sign {
/// Negate Sign value.
fn neg(&self) -> Sign {
match *self {
Minus => Plus,
- Zero => Zero,
+ NoSign => NoSign,
Plus => Minus
}
}
if scmp != Equal { return scmp; }
match self.sign {
- Zero => Equal,
+ NoSign => Equal,
Plus => self.data.cmp(&other.data),
Minus => other.data.cmp(&self.data),
}
impl Zero for BigInt {
#[inline]
fn zero() -> BigInt {
- BigInt::from_biguint(Zero, Zero::zero())
+ BigInt::from_biguint(NoSign, Zero::zero())
}
#[inline]
- fn is_zero(&self) -> bool { self.sign == Zero }
+ fn is_zero(&self) -> bool { self.sign == NoSign }
}
impl One for BigInt {
#[inline]
fn abs(&self) -> BigInt {
match self.sign {
- Plus | Zero => self.clone(),
+ Plus | NoSign => self.clone(),
Minus => BigInt::from_biguint(Plus, self.data.clone())
}
}
match self.sign {
Plus => BigInt::from_biguint(Plus, One::one()),
Minus => BigInt::from_biguint(Minus, One::one()),
- Zero => Zero::zero(),
+ NoSign => Zero::zero(),
}
}
#[inline]
fn add(&self, other: &BigInt) -> BigInt {
match (self.sign, other.sign) {
- (Zero, _) => other.clone(),
- (_, Zero) => self.clone(),
+ (NoSign, _) => other.clone(),
+ (_, NoSign) => self.clone(),
(Plus, Plus) => BigInt::from_biguint(Plus, self.data + other.data),
(Plus, Minus) => self - (-*other),
(Minus, Plus) => other - (-*self),
#[inline]
fn sub(&self, other: &BigInt) -> BigInt {
match (self.sign, other.sign) {
- (Zero, _) => -other,
- (_, Zero) => self.clone(),
+ (NoSign, _) => -other,
+ (_, NoSign) => self.clone(),
(Plus, Plus) => match self.data.cmp(&other.data) {
Less => BigInt::from_biguint(Minus, other.data - self.data),
Greater => BigInt::from_biguint(Plus, self.data - other.data),
#[inline]
fn mul(&self, other: &BigInt) -> BigInt {
match (self.sign, other.sign) {
- (Zero, _) | (_, Zero) => Zero::zero(),
+ (NoSign, _) | (_, NoSign) => Zero::zero(),
(Plus, Plus) | (Minus, Minus) => {
BigInt::from_biguint(Plus, self.data * other.data)
},
let d = BigInt::from_biguint(Plus, d_ui);
let r = BigInt::from_biguint(Plus, r_ui);
match (self.sign, other.sign) {
- (_, Zero) => fail!(),
- (Plus, Plus) | (Zero, Plus) => ( d, r),
- (Plus, Minus) | (Zero, Minus) => (-d, r),
+ (_, NoSign) => fail!(),
+ (Plus, Plus) | (NoSign, Plus) => ( d, r),
+ (Plus, Minus) | (NoSign, Minus) => (-d, r),
(Minus, Plus) => (-d, -r),
(Minus, Minus) => ( d, -r)
}
let d = BigInt::from_biguint(Plus, d_ui);
let m = BigInt::from_biguint(Plus, m_ui);
match (self.sign, other.sign) {
- (_, Zero) => fail!(),
- (Plus, Plus) | (Zero, Plus) => (d, m),
- (Plus, Minus) | (Zero, Minus) => if m.is_zero() {
+ (_, NoSign) => fail!(),
+ (Plus, Plus) | (NoSign, Plus) => (d, m),
+ (Plus, Minus) | (NoSign, Minus) => if m.is_zero() {
(-d, Zero::zero())
} else {
(-d - One::one(), m + *other)
fn to_i64(&self) -> Option<i64> {
match self.sign {
Plus => self.data.to_i64(),
- Zero => Some(0),
+ NoSign => Some(0),
Minus => {
self.data.to_u64().and_then(|n| {
let m: u64 = 1 << 63;
fn to_u64(&self) -> Option<u64> {
match self.sign {
Plus => self.data.to_u64(),
- Zero => Some(0),
+ NoSign => Some(0),
Minus => None
}
}
fn to_str_radix(&self, radix: uint) -> String {
match self.sign {
Plus => self.data.to_str_radix(radix),
- Zero => "0".to_string(),
+ NoSign => "0".to_string(),
Minus => format!("-{}", self.data.to_str_radix(radix)),
}
}
if self.gen() {
return self.gen_bigint(bit_size);
} else {
- Zero
+ NoSign
}
} else if self.gen() {
Plus
/// The digits are be in base 2^32.
#[inline]
pub fn from_biguint(sign: Sign, data: BigUint) -> BigInt {
- if sign == Zero || data.is_zero() {
- return BigInt { sign: Zero, data: Zero::zero() };
+ if sign == NoSign || data.is_zero() {
+ return BigInt { sign: NoSign, data: Zero::zero() };
}
BigInt { sign: sign, data: data }
}
pub fn to_biguint(&self) -> Option<BigUint> {
match self.sign {
Plus => Some(self.data.clone()),
- Zero => Some(Zero::zero()),
+ NoSign => Some(Zero::zero()),
Minus => None
}
}
mod bigint_tests {
use Integer;
use super::{BigDigit, BigUint, ToBigUint};
- use super::{Sign, Minus, Zero, Plus, BigInt, RandBigInt, ToBigInt};
+ use super::{Sign, Minus, NoSign, Plus, BigInt, RandBigInt, ToBigInt};
use std::cmp::{Less, Equal, Greater};
use std::i64;
assert_eq!(inp, ans);
}
check(Plus, 1, Plus, 1);
- check(Plus, 0, Zero, 0);
+ check(Plus, 0, NoSign, 0);
check(Minus, 1, Minus, 1);
- check(Zero, 1, Zero, 0);
+ check(NoSign, 1, NoSign, 0);
}
#[test]
#[test]
fn test_hash() {
- let a = BigInt::new(Zero, vec!());
- let b = BigInt::new(Zero, vec!(0));
+ let a = BigInt::new(NoSign, vec!());
+ let b = BigInt::new(NoSign, vec!(0));
let c = BigInt::new(Plus, vec!(1));
let d = BigInt::new(Plus, vec!(1,0,0,0,0,0));
let e = BigInt::new(Plus, vec!(0,0,0,0,0,1));
//! approximate a square root to arbitrary precision:
//!
//! ```
+//! # #![allow(deprecated)]
//! extern crate num;
//!
//! use num::bigint::BigInt;
#![feature(default_type_params)]
#![crate_name = "num"]
-#![experimental]
+#![deprecated = "This is now a cargo package located at: \
+ https://github.com/rust-lang/num"]
+#![allow(deprecated)]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
// we convert the list from individual weights to cumulative
// weights so we can binary search. This *could* drop elements
// with weight == 0 as an optimisation.
- for item in items.mut_iter() {
+ for item in items.iter_mut() {
running_total = match running_total.checked_add(&item.weight) {
Some(n) => n,
None => fail!("WeightedChoice::new called with a total weight \
// - 1], 0, 0, ...], to fill rng.rsl.
let seed_iter = seed.iter().map(|&x| x).chain(Repeat::new(0u32));
- for (rsl_elem, seed_elem) in self.rsl.mut_iter().zip(seed_iter) {
+ for (rsl_elem, seed_elem) in self.rsl.iter_mut().zip(seed_iter) {
*rsl_elem = seed_elem;
}
self.cnt = 0;
// - 1], 0, 0, ...], to fill rng.rsl.
let seed_iter = seed.iter().map(|&x| x).chain(Repeat::new(0u64));
- for (rsl_elem, seed_elem) in self.rsl.mut_iter().zip(seed_iter) {
+ for (rsl_elem, seed_elem) in self.rsl.iter_mut().zip(seed_iter) {
*rsl_elem = seed_elem;
}
self.cnt = 0;
// optimisations are on.
let mut count = 0i;
let mut num = 0;
- for byte in dest.mut_iter() {
+ for byte in dest.iter_mut() {
if count == 0 {
// we could micro-optimise here by generating a u32 if
// we only need a few more bytes to fill the vector
// Do the necessary writes
if left.len() > 0 {
- slice::bytes::copy_memory(self.buf.mut_slice_from(self.pos), left);
+ slice::bytes::copy_memory(self.buf.slice_from_mut(self.pos), left);
}
if right.len() > 0 {
self.buf.push_all(right);
use parse;
use parse::{
Flags, FLAG_EMPTY,
- Nothing, Literal, Dot, Class, Begin, End, WordBoundary, Capture, Cat, Alt,
+ Nothing, Literal, Dot, AstClass, Begin, End, WordBoundary, Capture, Cat, Alt,
Rep,
ZeroOne, ZeroMore, OneMore,
};
Nothing => {},
Literal(c, flags) => self.push(OneChar(c, flags)),
Dot(nl) => self.push(Any(nl)),
- Class(ranges, flags) =>
+ AstClass(ranges, flags) =>
self.push(CharClass(ranges, flags)),
Begin(flags) => self.push(EmptyBegin(flags)),
End(flags) => self.push(EmptyEnd(flags)),
self.push(Save(2 * cap + 1));
}
Cat(xs) => {
- for x in xs.move_iter() {
+ for x in xs.into_iter() {
self.compile(x)
}
}
FLAG_EMPTY, FLAG_NOCASE, FLAG_MULTI, FLAG_DOTNL,
FLAG_SWAP_GREED, FLAG_NEGATED,
};
- pub use re::{Dynamic, Native};
+ pub use re::{Dynamic, ExDynamic, Native, ExNative};
pub use vm::{
MatchKind, Exists, Location, Submatches,
StepState, StepMatchEarlyReturn, StepMatch, StepContinue,
Nothing,
Literal(char, Flags),
Dot(Flags),
- Class(Vec<(char, char)>, Flags),
+ AstClass(Vec<(char, char)>, Flags),
Begin(Flags),
End(Flags),
WordBoundary(Flags),
/// state.
#[deriving(Show)]
enum BuildAst {
- Ast(Ast),
+ Expr(Ast),
Paren(Flags, uint, String), // '('
Bar, // '|'
}
fn unwrap(self) -> Result<Ast, Error> {
match self {
- Ast(x) => Ok(x),
+ Expr(x) => Ok(x),
_ => fail!("Tried to unwrap non-AST item: {}", self),
}
}
}
fn push(&mut self, ast: Ast) {
- self.stack.push(Ast(ast))
+ self.stack.push(Expr(ast))
}
fn push_repeater(&mut self, c: char) -> Result<(), Error> {
match c {
'[' =>
match self.try_parse_ascii() {
- Some(Class(asciis, flags)) => {
- alts.push(Class(asciis, flags ^ negated));
+ Some(AstClass(asciis, flags)) => {
+ alts.push(AstClass(asciis, flags ^ negated));
continue
}
Some(ast) =>
},
'\\' => {
match try!(self.parse_escape()) {
- Class(asciis, flags) => {
- alts.push(Class(asciis, flags ^ negated));
+ AstClass(asciis, flags) => {
+ alts.push(AstClass(asciis, flags ^ negated));
continue
}
Literal(c2, _) => c = c2, // process below
']' => {
if ranges.len() > 0 {
let flags = negated | (self.flags & FLAG_NOCASE);
- let mut ast = Class(combine_ranges(ranges), flags);
- for alt in alts.move_iter() {
+ let mut ast = AstClass(combine_ranges(ranges), flags);
+ for alt in alts.into_iter() {
ast = Alt(box alt, box ast)
}
self.push(ast);
} else if alts.len() > 0 {
let mut ast = alts.pop().unwrap();
- for alt in alts.move_iter() {
+ for alt in alts.into_iter() {
ast = Alt(box alt, box ast)
}
self.push(ast);
Some(ranges) => {
self.chari = closer;
let flags = negated | (self.flags & FLAG_NOCASE);
- Some(Class(combine_ranges(ranges), flags))
+ Some(AstClass(combine_ranges(ranges), flags))
}
}
}
let ranges = perl_unicode_class(c);
let mut flags = self.flags & FLAG_NOCASE;
if c.is_uppercase() { flags |= FLAG_NEGATED }
- Ok(Class(ranges, flags))
+ Ok(AstClass(ranges, flags))
}
_ => {
self.err(format!("Invalid escape sequence '\\\\{}'",
name).as_slice())
}
Some(ranges) => {
- Ok(Class(ranges, negated | (self.flags & FLAG_NOCASE)))
+ Ok(AstClass(ranges, negated | (self.flags & FLAG_NOCASE)))
}
}
}
while i > from {
i = i - 1;
match self.stack.pop().unwrap() {
- Ast(x) => combined = mk(x, combined),
+ Expr(x) => combined = mk(x, combined),
_ => {},
}
}
// This is currently O(n^2), but I think with sufficient cleverness,
// it can be reduced to O(n) **if necessary**.
let mut ordered: Vec<(char, char)> = Vec::with_capacity(unordered.len());
- for (us, ue) in unordered.move_iter() {
+ for (us, ue) in unordered.into_iter() {
let (mut us, mut ue) = (us, ue);
assert!(us <= ue);
let mut which: Option<uint> = None;
// See the comments for the `program` module in `lib.rs` for a more
// detailed explanation for what `regex!` requires.
#[doc(hidden)]
- Dynamic(Dynamic),
+ Dynamic(ExDynamic),
#[doc(hidden)]
- Native(Native),
+ Native(ExNative),
}
#[deriving(Clone)]
#[doc(hidden)]
-pub struct Dynamic {
+pub struct ExDynamic {
original: String,
names: Vec<Option<String>>,
#[doc(hidden)]
}
#[doc(hidden)]
-pub struct Native {
+pub struct ExNative {
#[doc(hidden)]
pub original: &'static str,
#[doc(hidden)]
pub prog: fn(MatchKind, &str, uint, uint) -> Vec<Option<uint>>
}
-impl Clone for Native {
- fn clone(&self) -> Native { *self }
+impl Clone for ExNative {
+ fn clone(&self) -> ExNative { *self }
}
impl fmt::Show for Regex {
pub fn new(re: &str) -> Result<Regex, parse::Error> {
let ast = try!(parse::parse(re));
let (prog, names) = Program::new(ast);
- Ok(Dynamic(Dynamic {
+ Ok(Dynamic(ExDynamic {
original: re.to_string(),
names: names,
prog: prog,
/// Returns the original string of this regex.
pub fn as_str<'a>(&'a self) -> &'a str {
match *self {
- Dynamic(Dynamic { ref original, .. }) => original.as_slice(),
- Native(Native { ref original, .. }) => original.as_slice(),
+ Dynamic(ExDynamic { ref original, .. }) => original.as_slice(),
+ Native(ExNative { ref original, .. }) => original.as_slice(),
}
}
fn exec_slice(re: &Regex, which: MatchKind,
input: &str, s: uint, e: uint) -> CaptureLocs {
match *re {
- Dynamic(Dynamic { ref prog, .. }) => vm::run(which, prog, input, s, e),
- Native(Native { prog, .. }) => prog(which, input, s, e),
+ Dynamic(ExDynamic { ref prog, .. }) => vm::run(which, prog, input, s, e),
+ Native(ExNative { prog, .. }) => prog(which, input, s, e),
}
}
let mut rng = task_rng();
let mut bytes = rng.gen_ascii_chars().map(|n| n as u8).take(n)
.collect::<Vec<u8>>();
- for (i, b) in bytes.mut_iter().enumerate() {
+ for (i, b) in bytes.iter_mut().enumerate() {
if i % 20 == 0 {
*b = b'\n'
}
return StepMatch
}
Submatches => {
- for (slot, val) in groups.mut_iter().zip(caps.iter()) {
+ for (slot, val) in groups.iter_mut().zip(caps.iter()) {
*slot = *val;
}
return StepMatch
*t.groups.get_mut(1) = groups[1];
}
(false, Submatches) => {
- for (slot, val) in t.groups.mut_iter().zip(groups.iter()) {
+ for (slot, val) in t.groups.iter_mut().zip(groups.iter()) {
*slot = *val;
}
}
use regex::native::{
OneChar, CharClass, Any, Save, Jump, Split,
Match, EmptyBegin, EmptyEnd, EmptyWordBoundary,
- Program, Dynamic, Native,
+ Program, Dynamic, ExDynamic, Native,
FLAG_NOCASE, FLAG_MULTI, FLAG_DOTNL, FLAG_NEGATED,
};
}
};
let prog = match re {
- Dynamic(Dynamic { ref prog, .. }) => prog.clone(),
+ Dynamic(ExDynamic { ref prog, .. }) => prog.clone(),
Native(_) => unreachable!(),
};
t.groups[1] = groups[1];
}
Submatches => {
- for (slot, val) in t.groups.mut_iter().zip(groups.iter()) {
+ for (slot, val) in t.groups.iter_mut().zip(groups.iter()) {
*slot = *val;
}
}
}
}
-::regex::native::Native(::regex::native::Native {
+::regex::native::Native(::regex::native::ExNative {
original: $regex,
names: CAP_NAMES,
prog: exec,
return StepMatch
}
Submatches => {
- for (slot, val) in groups.mut_iter().zip(caps.iter()) {
+ for (slot, val) in groups.iter_mut().zip(caps.iter()) {
*slot = *val;
}
return StepMatch
// LLVM to optimize these function calls to themselves!
#![no_builtins]
+#[phase(plugin, link)] extern crate core;
+
#[cfg(test)] extern crate native;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate debug;
#[cfg(test)] #[phase(plugin, link)] extern crate std;
-#[cfg(test)] #[phase(plugin, link)] extern crate core;
// Require the offset intrinsics for LLVM to properly optimize the
// implementations below. If pointer arithmetic is done through integers the
}
pub fn mangle_internal_name_by_path_and_seq(path: PathElems, flav: &str) -> String {
- mangle(path.chain(Some(gensym_name(flav)).move_iter()), None)
+ mangle(path.chain(Some(gensym_name(flav)).into_iter()), None)
}
pub fn get_cc_prog(sess: &Session) -> String {
ab.add_rlib(&p, name.as_slice(), sess.lto()).unwrap();
let native_libs = csearch::get_native_libraries(&sess.cstore, cnum);
- all_native_libs.extend(native_libs.move_iter());
+ all_native_libs.extend(native_libs.into_iter());
}
ab.update_symbols();
}
// Rust does its' own LTO
- cmd.arg("-fno-lto").arg("-fno-use-linker-plugin");
+ cmd.arg("-fno-lto");
+
+ // clang fails hard if -fno-use-linker-plugin is passed
+ if sess.targ_cfg.os == abi::OsWindows {
+ cmd.arg("-fno-use-linker-plugin");
+ }
// If we're building a dylib, we don't use --gc-sections because LLVM has
// already done the best it can do, and we also don't want to eliminate the
// we're just getting an ordering of crate numbers, we're not worried about
// the paths.
let crates = sess.cstore.get_used_crates(cstore::RequireStatic);
- for (cnum, _) in crates.move_iter() {
+ for (cnum, _) in crates.into_iter() {
let libs = csearch::get_native_libraries(&sess.cstore, cnum);
for &(kind, ref lib) in libs.iter() {
match kind {
// load the bitcode from the archive. Then merge it into the current LLVM
// module that we've got.
let crates = sess.cstore.get_used_crates(cstore::RequireStatic);
- for (cnum, path) in crates.move_iter() {
+ for (cnum, path) in crates.into_iter() {
let name = sess.cstore.get_crate_data(cnum).name.clone();
let path = match path {
Some(p) => p,
use back::lto;
use back::link::{get_cc_prog, remove};
use driver::driver::{CrateTranslation, ModuleTranslation, OutputFilenames};
-use driver::config::NoDebugInfo;
+use driver::config::{NoDebugInfo, Passes, SomePasses, AllPasses};
use driver::session::Session;
use driver::config;
use llvm;
-use llvm::{ModuleRef, TargetMachineRef, PassManagerRef};
+use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef, ContextRef};
use util::common::time;
use syntax::abi;
use syntax::codemap;
use std::iter::Unfold;
use std::ptr;
use std::str;
+use std::mem;
use std::sync::{Arc, Mutex};
use std::task::TaskBuilder;
-use libc::{c_uint, c_int};
+use libc::{c_uint, c_int, c_void};
#[deriving(Clone, PartialEq, PartialOrd, Ord, Eq)]
lto_ctxt: Option<(&'a Session, &'a [String])>,
// Handler to use for diagnostics produced during codegen.
handler: &'a Handler,
+ // LLVM optimizations for which we want to print remarks.
+ remark: Passes,
}
impl<'a> CodegenContext<'a> {
- fn new(handler: &'a Handler) -> CodegenContext<'a> {
- CodegenContext {
- lto_ctxt: None,
- handler: handler,
- }
- }
-
fn new_with_session(sess: &'a Session, reachable: &'a [String]) -> CodegenContext<'a> {
CodegenContext {
lto_ctxt: Some((sess, reachable)),
handler: sess.diagnostic().handler(),
+ remark: sess.opts.cg.remark.clone(),
+ }
+ }
+}
+
+struct DiagHandlerFreeVars<'a> {
+ llcx: ContextRef,
+ cgcx: &'a CodegenContext<'a>,
+}
+
+unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_void) {
+ let DiagHandlerFreeVars { llcx, cgcx }
+ = *mem::transmute::<_, *const DiagHandlerFreeVars>(user);
+
+ match llvm::diagnostic::Diagnostic::unpack(info) {
+ llvm::diagnostic::Optimization(opt) => {
+ let pass_name = CString::new(opt.pass_name, false);
+ let pass_name = pass_name.as_str().expect("got a non-UTF8 pass name from LLVM");
+ let enabled = match cgcx.remark {
+ AllPasses => true,
+ SomePasses(ref v) => v.iter().any(|s| s.as_slice() == pass_name),
+ };
+
+ if enabled {
+ let loc = llvm::debug_loc_to_string(llcx, opt.debug_loc);
+ cgcx.handler.note(format!("optimization {:s} for {:s} at {:s}: {:s}",
+ opt.kind.describe(),
+ pass_name,
+ if loc.is_empty() { "[unknown]" } else { loc.as_slice() },
+ llvm::twine_to_string(opt.message)).as_slice());
+ }
}
+
+ _ => (),
}
}
let ModuleTranslation { llmod, llcx } = mtrans;
let tm = config.tm;
+ // llcx doesn't outlive this function, so we can put this on the stack.
+ let fv = DiagHandlerFreeVars {
+ llcx: llcx,
+ cgcx: cgcx,
+ };
+ if !cgcx.remark.is_empty() {
+ llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler,
+ &fv as *const DiagHandlerFreeVars
+ as *mut c_void);
+ }
+
if config.emit_no_opt_bc {
let ext = format!("{}.no-opt.bc", name_extra);
output_names.with_extension(ext.as_slice()).with_c_str(|buf| {
if config.emit_asm {
let path = output_names.with_extension(format!("{}.s", name_extra).as_slice());
with_codegen(tm, llmod, config.no_builtins, |cpm| {
- write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::AssemblyFile);
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::AssemblyFileType);
});
}
if config.emit_obj {
let path = output_names.with_extension(format!("{}.o", name_extra).as_slice());
with_codegen(tm, llmod, config.no_builtins, |cpm| {
- write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::ObjectFile);
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::ObjectFileType);
});
}
});
for i in range(0, num_workers) {
let work_items_arc = work_items_arc.clone();
let diag_emitter = diag_emitter.clone();
+ let remark = sess.opts.cg.remark.clone();
let future = TaskBuilder::new().named(format!("codegen-{}", i)).try_future(proc() {
let diag_handler = mk_handler(box diag_emitter);
// Must construct cgcx inside the proc because it has non-Send
// fields.
- let cgcx = CodegenContext::new(&diag_handler);
+ let cgcx = CodegenContext {
+ lto_ctxt: None,
+ handler: &diag_handler,
+ remark: remark,
+ };
loop {
// Avoid holding the lock for the entire duration of the match.
}
let mut failed = false;
- for future in futures.move_iter() {
+ for future in futures.into_iter() {
match future.unwrap() {
Ok(()) => {},
Err(_) => {
E0092,
E0093,
E0094,
- E0095,
- E0096,
- E0097,
- E0098,
- E0099,
E0100,
E0101,
E0102,
E0139,
E0140,
E0141,
- E0142,
E0143,
E0144,
E0145,
E0157,
E0158,
E0159,
- E0160,
E0161
)
--pretty flowgraph output", FLOWGRAPH_PRINT_ALL))
}
+#[deriving(Clone)]
+pub enum Passes {
+ SomePasses(Vec<String>),
+ AllPasses,
+}
+
+impl Passes {
+ pub fn is_empty(&self) -> bool {
+ match *self {
+ SomePasses(ref v) => v.is_empty(),
+ AllPasses => false,
+ }
+ }
+}
+
/// Declare a macro that will define all CodegenOptions fields and parsers all
/// at once. The goal of this macro is to define an interface that can be
/// programmatically used by the option parser in order to initialize the struct
&[ $( (stringify!($opt), cgsetters::$opt, $desc) ),* ];
mod cgsetters {
- use super::CodegenOptions;
+ use super::{CodegenOptions, Passes, SomePasses, AllPasses};
$(
pub fn $opt(cg: &mut CodegenOptions, v: Option<&str>) -> bool {
None => false
}
}
+
+ fn parse_passes(slot: &mut Passes, v: Option<&str>) -> bool {
+ match v {
+ Some("all") => {
+ *slot = AllPasses;
+ true
+ }
+ v => {
+ let mut passes = vec!();
+ if parse_list(&mut passes, v) {
+ *slot = SomePasses(passes);
+ true
+ } else {
+ false
+ }
+ }
+ }
+ }
}
) )
"extra data to put in each output filename"),
codegen_units: uint = (1, parse_uint,
"divide crate into N units to optimize in parallel"),
+ remark: Passes = (SomePasses(Vec::new()), parse_passes,
+ "print remarks for these optimization passes (space separated, or \"all\")"),
)
pub fn build_codegen_options(matches: &getopts::Matches) -> CodegenOptions
{
let mut cg = basic_codegen_options();
- for option in matches.opt_strs("C").move_iter() {
+ for option in matches.opt_strs("C").into_iter() {
let mut iter = option.as_slice().splitn(1, '=');
let key = iter.next().unwrap();
let value = iter.next();
if sess.opts.test {
append_configuration(&mut user_cfg, InternedString::new("test"))
}
- user_cfg.move_iter().collect::<Vec<_>>().append(default_cfg.as_slice())
+ user_cfg.into_iter().collect::<Vec<_>>().append(default_cfg.as_slice())
}
pub fn get_os(triple: &str) -> Option<abi::Os> {
// Convert strings provided as --cfg [cfgspec] into a crate_cfg
fn parse_cfgspecs(cfgspecs: Vec<String> ) -> ast::CrateConfig {
- cfgspecs.move_iter().map(|s| {
+ cfgspecs.into_iter().map(|s| {
parse::parse_meta_from_source_str("cfgspec".to_string(),
s.to_string(),
Vec::new(),
let mut describe_lints = false;
for &level in [lint::Allow, lint::Warn, lint::Deny, lint::Forbid].iter() {
- for lint_name in matches.opt_strs(level.as_str()).move_iter() {
+ for lint_name in matches.opt_strs(level.as_str()).into_iter() {
if lint_name.as_slice() == "help" {
describe_lints = true;
} else {
None |
Some("2") => FullDebugInfo,
Some(arg) => {
- early_error(format!("optimization level needs to be between \
- 0-3 (instead was `{}`)",
+ early_error(format!("debug info level needs to be between \
+ 0-2 (instead was `{}`)",
arg).as_slice());
}
}
}
let cg = build_codegen_options(matches);
+ if !cg.remark.is_empty() && debuginfo == NoDebugInfo {
+ early_warn("-C remark will not show source locations without --debuginfo");
+ }
+
let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
Some("auto") => Auto,
Some("always") => Always,
use back::write;
use driver::session::Session;
use driver::config;
-use front;
use lint;
use llvm::{ContextRef, ModuleRef};
use metadata::common::LinkMeta;
use metadata::creader;
-use middle::{trans, freevars, stability, kind, ty, typeck, reachable};
+use middle::{trans, stability, kind, ty, typeck, reachable};
use middle::dependency_format;
use middle;
use plugin::load::Plugins;
}
if sess.show_span() {
- front::show_span::run(sess, &krate);
+ syntax::show_span::run(sess.diagnostic(), &krate);
}
krate
*sess.crate_metadata.borrow_mut() =
collect_crate_metadata(sess, krate.attrs.as_slice());
- time(time_passes, "gated feature checking", (), |_|
- front::feature_gate::check_crate(sess, &krate));
+ time(time_passes, "gated feature checking", (), |_| {
+ let (features, unknown_features) =
+ syntax::feature_gate::check_crate(&sess.parse_sess.span_diagnostic, &krate);
+
+ for uf in unknown_features.iter() {
+ sess.add_lint(lint::builtin::UNKNOWN_FEATURES,
+ ast::CRATE_NODE_ID,
+ *uf,
+ "unknown feature".to_string());
+ }
+
+ sess.abort_if_errors();
+ *sess.features.borrow_mut() = features;
+ });
+
+ let any_exe = sess.crate_types.borrow().iter().any(|ty| {
+ *ty == config::CrateTypeExecutable
+ });
krate = time(time_passes, "crate injection", krate, |krate|
- front::std_inject::maybe_inject_crates_ref(sess, krate));
+ syntax::std_inject::maybe_inject_crates_ref(krate,
+ sess.opts.alt_std_name.clone(),
+ any_exe));
// strip before expansion to allow macros to depend on
// configuration variables e.g/ in
// baz! should not use this definition unless foo is enabled.
krate = time(time_passes, "configuration 1", krate, |krate|
- front::config::strip_unconfigured_items(krate));
+ syntax::config::strip_unconfigured_items(krate));
let mut addl_plugins = Some(addl_plugins);
let Plugins { macros, registrars }
let mut registry = Registry::new(&krate);
time(time_passes, "plugin registration", (), |_| {
- if sess.features.rustc_diagnostic_macros.get() {
+ if sess.features.borrow().rustc_diagnostic_macros {
registry.register_macro("__diagnostic_used",
diagnostics::plugin::expand_diagnostic_used);
registry.register_macro("__register_diagnostic",
{
let mut ls = sess.lint_store.borrow_mut();
- for pass in lint_passes.move_iter() {
+ for pass in lint_passes.into_iter() {
ls.register_pass(Some(sess), true, pass);
}
- for (name, to) in lint_groups.move_iter() {
+ for (name, to) in lint_groups.into_iter() {
ls.register_group(Some(sess), true, name, to);
}
}
os::setenv("PATH", os::join_paths(new_path.as_slice()).unwrap());
}
let cfg = syntax::ext::expand::ExpansionConfig {
- deriving_hash_type_parameter: sess.features.default_type_params.get(),
+ deriving_hash_type_parameter: sess.features.borrow().default_type_params,
crate_name: crate_name.to_string(),
};
let ret = syntax::ext::expand::expand_crate(&sess.parse_sess,
// strip again, in case expansion added anything with a #[cfg].
krate = time(time_passes, "configuration 2", krate, |krate|
- front::config::strip_unconfigured_items(krate));
+ syntax::config::strip_unconfigured_items(krate));
krate = time(time_passes, "maybe building test harness", krate, |krate|
- front::test::modify_for_testing(sess, krate));
+ syntax::test::modify_for_testing(&sess.parse_sess,
+ &sess.opts.cfg,
+ krate,
+ sess.diagnostic()));
krate = time(time_passes, "prelude injection", krate, |krate|
- front::std_inject::maybe_inject_prelude(sess, krate));
+ syntax::std_inject::maybe_inject_prelude(krate));
time(time_passes, "checking that all macro invocations are gone", &krate, |krate|
syntax::ext::expand::check_for_macros(&sess.parse_sess, krate));
middle::lang_items::collect_language_items(krate, &sess));
let middle::resolve::CrateMap {
- def_map: def_map,
- exp_map2: exp_map2,
- trait_map: trait_map,
- external_exports: external_exports,
- last_private_map: last_private_map
+ def_map,
+ freevars,
+ capture_mode_map,
+ exp_map2,
+ trait_map,
+ external_exports,
+ last_private_map
} =
time(time_passes, "resolution", (), |_|
middle::resolve::resolve_crate(&sess, &lang_items, krate));
plugin::build::find_plugin_registrar(
sess.diagnostic(), krate)));
- let (freevars, capture_modes) =
- time(time_passes, "freevar finding", (), |_|
- freevars::annotate_freevars(&def_map, krate));
-
let region_map = time(time_passes, "region resolution", (), |_|
middle::region::resolve_crate(&sess, krate));
let stability_index = time(time_passes, "stability index", (), |_|
stability::Index::build(krate));
+ time(time_passes, "static item recursion checking", (), |_|
+ middle::check_static_recursion::check_crate(&sess, krate, &def_map, &ast_map));
+
let ty_cx = ty::mk_ctxt(sess,
type_arena,
def_map,
named_region_map,
ast_map,
freevars,
- capture_modes,
+ capture_mode_map,
region_map,
lang_items,
stability_index);
// will be found in crate attributes.
let mut base = session.opts.crate_types.clone();
if base.len() == 0 {
- base.extend(attr_types.move_iter());
+ base.extend(attr_types.into_iter());
if base.len() == 0 {
base.push(link::default_output_for_target(session));
}
base.dedup();
}
- base.move_iter().filter(|crate_type| {
+ base.into_iter().filter(|crate_type| {
let res = !link::invalid_output_for_target(session, *crate_type);
if !res {
");
fn sort_lints(lints: Vec<(&'static Lint, bool)>) -> Vec<&'static Lint> {
- let mut lints: Vec<_> = lints.move_iter().map(|(x, _)| x).collect();
+ let mut lints: Vec<_> = lints.into_iter().map(|(x, _)| x).collect();
lints.sort_by(|x: &&Lint, y: &&Lint| {
match x.default_level.cmp(&y.default_level) {
// The sort doesn't case-fold but it's doubtful we care.
fn sort_lint_groups(lints: Vec<(&'static str, Vec<lint::LintId>, bool)>)
-> Vec<(&'static str, Vec<lint::LintId>)> {
- let mut lints: Vec<_> = lints.move_iter().map(|(x, y, _)| (x, y)).collect();
+ let mut lints: Vec<_> = lints.into_iter().map(|(x, y, _)| (x, y)).collect();
lints.sort_by(|&(x, _): &(&'static str, Vec<lint::LintId>),
&(y, _): &(&'static str, Vec<lint::LintId>)| {
x.cmp(&y)
println!(" {} {:7.7s} {}", padded("----"), "-------", "-------");
let print_lints = |lints: Vec<&Lint>| {
- for lint in lints.move_iter() {
+ for lint in lints.into_iter() {
let name = lint.name_lower().replace("_", "-");
println!(" {} {:7.7s} {}",
padded(name.as_slice()), lint.default_level.as_str(), lint.desc);
println!(" {} {}", padded("----"), "---------");
let print_lint_groups = |lints: Vec<(&'static str, Vec<lint::LintId>)>| {
- for (name, to) in lints.move_iter() {
+ for (name, to) in lints.into_iter() {
let name = name.chars().map(|x| x.to_lowercase())
.collect::<String>().replace("_", "-");
- let desc = to.move_iter().map(|x| x.as_str()).collect::<Vec<String>>().connect(", ");
+ let desc = to.into_iter().map(|x| x.as_str()).collect::<Vec<String>>().connect(", ");
println!(" {} {}",
padded(name.as_slice()), desc);
}
&sess.parse_sess)
}
};
- result.move_iter().collect()
+ result.into_iter().collect()
}
pub fn early_error(msg: &str) -> ! {
-> NodesMatchingUII<'a, 'ast> {
match *self {
ItemViaNode(node_id) =>
- NodesMatchingDirect(Some(node_id).move_iter()),
+ NodesMatchingDirect(Some(node_id).into_iter()),
ItemViaPath(ref parts) =>
NodesMatchingSuffix(map.nodes_matching_suffix(parts.as_slice())),
}
use driver::config;
use driver::driver;
-use front;
use metadata::cstore::CStore;
use metadata::filesearch;
use lint;
use syntax::codemap::Span;
use syntax::diagnostic;
use syntax::diagnostics;
+use syntax::feature_gate;
use syntax::parse;
use syntax::parse::token;
use syntax::parse::ParseSess;
pub working_dir: Path,
pub lint_store: RefCell<lint::LintStore>,
pub lints: RefCell<NodeMap<Vec<(lint::LintId, codemap::Span, String)>>>,
- pub node_id: Cell<ast::NodeId>,
pub crate_types: RefCell<Vec<config::CrateType>>,
pub crate_metadata: RefCell<Vec<String>>,
- pub features: front::feature_gate::Features,
+ pub features: RefCell<feature_gate::Features>,
/// The maximum recursion limit for potentially infinitely recursive
/// operations such as auto-dereference and monomorphization.
lints.insert(id, vec!((lint_id, sp, msg)));
}
pub fn next_node_id(&self) -> ast::NodeId {
- self.reserve_node_ids(1)
+ self.parse_sess.next_node_id()
}
pub fn reserve_node_ids(&self, count: ast::NodeId) -> ast::NodeId {
- let v = self.node_id.get();
-
- match v.checked_add(&count) {
- Some(next) => { self.node_id.set(next); }
- None => self.bug("Input too large, ran out of node ids!")
- }
-
- v
+ self.parse_sess.reserve_node_ids(count)
}
pub fn diagnostic<'a>(&'a self) -> &'a diagnostic::SpanHandler {
&self.parse_sess.span_diagnostic
working_dir: os::getcwd(),
lint_store: RefCell::new(lint::LintStore::new()),
lints: RefCell::new(NodeMap::new()),
- node_id: Cell::new(1),
crate_types: RefCell::new(Vec::new()),
crate_metadata: RefCell::new(Vec::new()),
- features: front::feature_gate::Features::new(),
+ features: RefCell::new(feature_gate::Features::new()),
recursion_limit: Cell::new(64),
};
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use syntax::fold::Folder;
-use syntax::{ast, fold, attr};
-use syntax::codemap::Spanned;
-use syntax::ptr::P;
-
-/// A folder that strips out items that do not belong in the current
-/// configuration.
-struct Context<'a> {
- in_cfg: |attrs: &[ast::Attribute]|: 'a -> bool,
-}
-
-// Support conditional compilation by transforming the AST, stripping out
-// any items that do not belong in the current configuration
-pub fn strip_unconfigured_items(krate: ast::Crate) -> ast::Crate {
- let config = krate.config.clone();
- strip_items(krate, |attrs| in_cfg(config.as_slice(), attrs))
-}
-
-impl<'a> fold::Folder for Context<'a> {
- fn fold_mod(&mut self, module: ast::Mod) -> ast::Mod {
- fold_mod(self, module)
- }
- fn fold_block(&mut self, block: P<ast::Block>) -> P<ast::Block> {
- fold_block(self, block)
- }
- fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
- fold_foreign_mod(self, foreign_mod)
- }
- fn fold_item_underscore(&mut self, item: ast::Item_) -> ast::Item_ {
- fold_item_underscore(self, item)
- }
- fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
- fold_expr(self, expr)
- }
- fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
- fold::noop_fold_mac(mac, self)
- }
-}
-
-pub fn strip_items(krate: ast::Crate,
- in_cfg: |attrs: &[ast::Attribute]| -> bool)
- -> ast::Crate {
- let mut ctxt = Context {
- in_cfg: in_cfg,
- };
- ctxt.fold_crate(krate)
-}
-
-fn filter_view_item(cx: &mut Context, view_item: ast::ViewItem) -> Option<ast::ViewItem> {
- if view_item_in_cfg(cx, &view_item) {
- Some(view_item)
- } else {
- None
- }
-}
-
-fn fold_mod(cx: &mut Context, ast::Mod {inner, view_items, items}: ast::Mod) -> ast::Mod {
- ast::Mod {
- inner: inner,
- view_items: view_items.move_iter().filter_map(|a| {
- filter_view_item(cx, a).map(|x| cx.fold_view_item(x))
- }).collect(),
- items: items.move_iter().filter_map(|a| {
- if item_in_cfg(cx, &*a) {
- Some(cx.fold_item(a))
- } else {
- None
- }
- }).flat_map(|x| x.move_iter()).collect()
- }
-}
-
-fn filter_foreign_item(cx: &mut Context, item: P<ast::ForeignItem>)
- -> Option<P<ast::ForeignItem>> {
- if foreign_item_in_cfg(cx, &*item) {
- Some(item)
- } else {
- None
- }
-}
-
-fn fold_foreign_mod(cx: &mut Context, ast::ForeignMod {abi, view_items, items}: ast::ForeignMod)
- -> ast::ForeignMod {
- ast::ForeignMod {
- abi: abi,
- view_items: view_items.move_iter().filter_map(|a| {
- filter_view_item(cx, a).map(|x| cx.fold_view_item(x))
- }).collect(),
- items: items.move_iter()
- .filter_map(|a| filter_foreign_item(cx, a))
- .collect()
- }
-}
-
-fn fold_item_underscore(cx: &mut Context, item: ast::Item_) -> ast::Item_ {
- let item = match item {
- ast::ItemImpl(a, b, c, impl_items) => {
- let impl_items = impl_items.move_iter()
- .filter(|ii| impl_item_in_cfg(cx, ii))
- .collect();
- ast::ItemImpl(a, b, c, impl_items)
- }
- ast::ItemTrait(a, b, c, methods) => {
- let methods = methods.move_iter()
- .filter(|m| trait_method_in_cfg(cx, m))
- .collect();
- ast::ItemTrait(a, b, c, methods)
- }
- ast::ItemStruct(def, generics) => {
- ast::ItemStruct(fold_struct(cx, def), generics)
- }
- ast::ItemEnum(def, generics) => {
- let mut variants = def.variants.move_iter().filter_map(|v| {
- if !(cx.in_cfg)(v.node.attrs.as_slice()) {
- None
- } else {
- Some(v.map(|Spanned {node: ast::Variant_ {id, name, attrs, kind,
- disr_expr, vis}, span}| {
- Spanned {
- node: ast::Variant_ {
- id: id,
- name: name,
- attrs: attrs,
- kind: match kind {
- ast::TupleVariantKind(..) => kind,
- ast::StructVariantKind(def) => {
- ast::StructVariantKind(fold_struct(cx, def))
- }
- },
- disr_expr: disr_expr,
- vis: vis
- },
- span: span
- }
- }))
- }
- });
- ast::ItemEnum(ast::EnumDef {
- variants: variants.collect(),
- }, generics)
- }
- item => item,
- };
-
- fold::noop_fold_item_underscore(item, cx)
-}
-
-fn fold_struct(cx: &mut Context, def: P<ast::StructDef>) -> P<ast::StructDef> {
- def.map(|ast::StructDef {fields, ctor_id, super_struct, is_virtual}| {
- ast::StructDef {
- fields: fields.move_iter().filter(|m| {
- (cx.in_cfg)(m.node.attrs.as_slice())
- }).collect(),
- ctor_id: ctor_id,
- super_struct: super_struct,
- is_virtual: is_virtual,
- }
- })
-}
-
-fn retain_stmt(cx: &mut Context, stmt: &ast::Stmt) -> bool {
- match stmt.node {
- ast::StmtDecl(ref decl, _) => {
- match decl.node {
- ast::DeclItem(ref item) => {
- item_in_cfg(cx, &**item)
- }
- _ => true
- }
- }
- _ => true
- }
-}
-
-fn fold_block(cx: &mut Context, b: P<ast::Block>) -> P<ast::Block> {
- b.map(|ast::Block {id, view_items, stmts, expr, rules, span}| {
- let resulting_stmts: Vec<P<ast::Stmt>> =
- stmts.move_iter().filter(|a| retain_stmt(cx, &**a)).collect();
- let resulting_stmts = resulting_stmts.move_iter()
- .flat_map(|stmt| cx.fold_stmt(stmt).move_iter())
- .collect();
- let filtered_view_items = view_items.move_iter().filter_map(|a| {
- filter_view_item(cx, a).map(|x| cx.fold_view_item(x))
- }).collect();
- ast::Block {
- id: id,
- view_items: filtered_view_items,
- stmts: resulting_stmts,
- expr: expr.map(|x| cx.fold_expr(x)),
- rules: rules,
- span: span,
- }
- })
-}
-
-fn fold_expr(cx: &mut Context, expr: P<ast::Expr>) -> P<ast::Expr> {
- expr.map(|ast::Expr {id, span, node}| {
- fold::noop_fold_expr(ast::Expr {
- id: id,
- node: match node {
- ast::ExprMatch(m, arms) => {
- ast::ExprMatch(m, arms.move_iter()
- .filter(|a| (cx.in_cfg)(a.attrs.as_slice()))
- .collect())
- }
- _ => node
- },
- span: span
- }, cx)
- })
-}
-
-fn item_in_cfg(cx: &mut Context, item: &ast::Item) -> bool {
- return (cx.in_cfg)(item.attrs.as_slice());
-}
-
-fn foreign_item_in_cfg(cx: &mut Context, item: &ast::ForeignItem) -> bool {
- return (cx.in_cfg)(item.attrs.as_slice());
-}
-
-fn view_item_in_cfg(cx: &mut Context, item: &ast::ViewItem) -> bool {
- return (cx.in_cfg)(item.attrs.as_slice());
-}
-
-fn trait_method_in_cfg(cx: &mut Context, meth: &ast::TraitItem) -> bool {
- match *meth {
- ast::RequiredMethod(ref meth) => (cx.in_cfg)(meth.attrs.as_slice()),
- ast::ProvidedMethod(ref meth) => (cx.in_cfg)(meth.attrs.as_slice())
- }
-}
-
-fn impl_item_in_cfg(cx: &mut Context, impl_item: &ast::ImplItem) -> bool {
- match *impl_item {
- ast::MethodImplItem(ref meth) => (cx.in_cfg)(meth.attrs.as_slice()),
- }
-}
-
-// Determine if an item should be translated in the current crate
-// configuration based on the item's attributes
-fn in_cfg(cfg: &[P<ast::MetaItem>], attrs: &[ast::Attribute]) -> bool {
- attr::test_cfg(cfg, attrs.iter())
-}
-
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Feature gating
-//!
-//! This modules implements the gating necessary for preventing certain compiler
-//! features from being used by default. This module will crawl a pre-expanded
-//! AST to ensure that there are no features which are used that are not
-//! enabled.
-//!
-//! Features are enabled in programs via the crate-level attributes of
-//! `#![feature(...)]` with a comma-separated list of features.
-
-use lint;
-
-use syntax::abi::RustIntrinsic;
-use syntax::ast::NodeId;
-use syntax::ast;
-use syntax::attr;
-use syntax::attr::AttrMetaMethods;
-use syntax::codemap::Span;
-use syntax::visit;
-use syntax::visit::Visitor;
-use syntax::parse::token;
-
-use driver::session::Session;
-
-use std::cell::Cell;
-use std::slice;
-
-/// This is a list of all known features since the beginning of time. This list
-/// can never shrink, it may only be expanded (in order to prevent old programs
-/// from failing to compile). The status of each feature may change, however.
-static KNOWN_FEATURES: &'static [(&'static str, Status)] = &[
- ("globs", Active),
- ("macro_rules", Active),
- ("struct_variant", Active),
- ("once_fns", Active),
- ("asm", Active),
- ("managed_boxes", Active),
- ("non_ascii_idents", Active),
- ("thread_local", Active),
- ("link_args", Active),
- ("phase", Active),
- ("plugin_registrar", Active),
- ("log_syntax", Active),
- ("trace_macros", Active),
- ("concat_idents", Active),
- ("unsafe_destructor", Active),
- ("intrinsics", Active),
- ("lang_items", Active),
-
- ("simd", Active),
- ("default_type_params", Active),
- ("quote", Active),
- ("linkage", Active),
- ("struct_inherit", Active),
- ("overloaded_calls", Active),
- ("unboxed_closure_sugar", Active),
-
- ("quad_precision_float", Removed),
-
- ("rustc_diagnostic_macros", Active),
- ("unboxed_closures", Active),
- ("import_shadowing", Active),
- ("advanced_slice_patterns", Active),
- ("tuple_indexing", Active),
-
- // if you change this list without updating src/doc/rust.md, cmr will be sad
-
- // A temporary feature gate used to enable parser extensions needed
- // to bootstrap fix for #5723.
- ("issue_5723_bootstrap", Accepted),
-
- // These are used to test this portion of the compiler, they don't actually
- // mean anything
- ("test_accepted_feature", Accepted),
- ("test_removed_feature", Removed),
-];
-
-enum Status {
- /// Represents an active feature that is currently being implemented or
- /// currently being considered for addition/removal.
- Active,
-
- /// Represents a feature which has since been removed (it was once Active)
- Removed,
-
- /// This language feature has since been Accepted (it was once Active)
- Accepted,
-}
-
-/// A set of features to be used by later passes.
-pub struct Features {
- pub default_type_params: Cell<bool>,
- pub overloaded_calls: Cell<bool>,
- pub rustc_diagnostic_macros: Cell<bool>,
- pub import_shadowing: Cell<bool>,
-}
-
-impl Features {
- pub fn new() -> Features {
- Features {
- default_type_params: Cell::new(false),
- overloaded_calls: Cell::new(false),
- rustc_diagnostic_macros: Cell::new(false),
- import_shadowing: Cell::new(false),
- }
- }
-}
-
-struct Context<'a> {
- features: Vec<&'static str>,
- sess: &'a Session,
-}
-
-impl<'a> Context<'a> {
- fn gate_feature(&self, feature: &str, span: Span, explain: &str) {
- if !self.has_feature(feature) {
- self.sess.span_err(span, explain);
- self.sess.span_note(span, format!("add #![feature({})] to the \
- crate attributes to enable",
- feature).as_slice());
- }
- }
-
- fn gate_box(&self, span: Span) {
- self.gate_feature("managed_boxes", span,
- "The managed box syntax is being replaced by the \
- `std::gc::Gc` and `std::rc::Rc` types. Equivalent \
- functionality to managed trait objects will be \
- implemented but is currently missing.");
- }
-
- fn has_feature(&self, feature: &str) -> bool {
- self.features.iter().any(|n| n.as_slice() == feature)
- }
-}
-
-impl<'a, 'v> Visitor<'v> for Context<'a> {
- fn visit_ident(&mut self, sp: Span, id: ast::Ident) {
- if !token::get_ident(id).get().is_ascii() {
- self.gate_feature("non_ascii_idents", sp,
- "non-ascii idents are not fully supported.");
- }
- }
-
- fn visit_view_item(&mut self, i: &ast::ViewItem) {
- match i.node {
- ast::ViewItemUse(ref path) => {
- match path.node {
- ast::ViewPathGlob(..) => {
- self.gate_feature("globs", path.span,
- "glob import statements are \
- experimental and possibly buggy");
- }
- _ => {}
- }
- }
- ast::ViewItemExternCrate(..) => {
- for attr in i.attrs.iter() {
- if attr.name().get() == "phase"{
- self.gate_feature("phase", attr.span,
- "compile time crate loading is \
- experimental and possibly buggy");
- }
- }
- }
- }
- visit::walk_view_item(self, i)
- }
-
- fn visit_item(&mut self, i: &ast::Item) {
- for attr in i.attrs.iter() {
- if attr.name().equiv(&("thread_local")) {
- self.gate_feature("thread_local", i.span,
- "`#[thread_local]` is an experimental feature, and does not \
- currently handle destructors. There is no corresponding \
- `#[task_local]` mapping to the task model");
- }
- }
- match i.node {
- ast::ItemEnum(ref def, _) => {
- for variant in def.variants.iter() {
- match variant.node.kind {
- ast::StructVariantKind(..) => {
- self.gate_feature("struct_variant", variant.span,
- "enum struct variants are \
- experimental and possibly buggy");
- }
- _ => {}
- }
- }
- }
-
- ast::ItemForeignMod(ref foreign_module) => {
- if attr::contains_name(i.attrs.as_slice(), "link_args") {
- self.gate_feature("link_args", i.span,
- "the `link_args` attribute is not portable \
- across platforms, it is recommended to \
- use `#[link(name = \"foo\")]` instead")
- }
- if foreign_module.abi == RustIntrinsic {
- self.gate_feature("intrinsics",
- i.span,
- "intrinsics are subject to change")
- }
- }
-
- ast::ItemFn(..) => {
- if attr::contains_name(i.attrs.as_slice(), "plugin_registrar") {
- self.gate_feature("plugin_registrar", i.span,
- "compiler plugins are experimental and possibly buggy");
- }
- }
-
- ast::ItemStruct(ref struct_definition, _) => {
- if attr::contains_name(i.attrs.as_slice(), "simd") {
- self.gate_feature("simd", i.span,
- "SIMD types are experimental and possibly buggy");
- }
- match struct_definition.super_struct {
- Some(ref path) => self.gate_feature("struct_inherit", path.span,
- "struct inheritance is experimental \
- and possibly buggy"),
- None => {}
- }
- if struct_definition.is_virtual {
- self.gate_feature("struct_inherit", i.span,
- "struct inheritance (`virtual` keyword) is \
- experimental and possibly buggy");
- }
- }
-
- ast::ItemImpl(..) => {
- if attr::contains_name(i.attrs.as_slice(),
- "unsafe_destructor") {
- self.gate_feature("unsafe_destructor",
- i.span,
- "`#[unsafe_destructor]` allows too \
- many unsafe patterns and may be \
- removed in the future");
- }
- }
-
- _ => {}
- }
-
- visit::walk_item(self, i);
- }
-
- fn visit_mac(&mut self, macro: &ast::Mac) {
- let ast::MacInvocTT(ref path, _, _) = macro.node;
- let id = path.segments.last().unwrap().identifier;
- let quotes = ["quote_tokens", "quote_expr", "quote_ty",
- "quote_item", "quote_pat", "quote_stmt"];
- let msg = " is not stable enough for use and are subject to change";
-
-
- if id == token::str_to_ident("macro_rules") {
- self.gate_feature("macro_rules", path.span, "macro definitions are \
- not stable enough for use and are subject to change");
- }
-
- else if id == token::str_to_ident("asm") {
- self.gate_feature("asm", path.span, "inline assembly is not \
- stable enough for use and is subject to change");
- }
-
- else if id == token::str_to_ident("log_syntax") {
- self.gate_feature("log_syntax", path.span, "`log_syntax!` is not \
- stable enough for use and is subject to change");
- }
-
- else if id == token::str_to_ident("trace_macros") {
- self.gate_feature("trace_macros", path.span, "`trace_macros` is not \
- stable enough for use and is subject to change");
- }
-
- else if id == token::str_to_ident("concat_idents") {
- self.gate_feature("concat_idents", path.span, "`concat_idents` is not \
- stable enough for use and is subject to change");
- }
-
- else {
- for "e in quotes.iter() {
- if id == token::str_to_ident(quote) {
- self.gate_feature("quote",
- path.span,
- format!("{}{}", quote, msg).as_slice());
- }
- }
- }
- }
-
- fn visit_foreign_item(&mut self, i: &ast::ForeignItem) {
- if attr::contains_name(i.attrs.as_slice(), "linkage") {
- self.gate_feature("linkage", i.span,
- "the `linkage` attribute is experimental \
- and not portable across platforms")
- }
- visit::walk_foreign_item(self, i)
- }
-
- fn visit_ty(&mut self, t: &ast::Ty) {
- match t.node {
- ast::TyClosure(ref closure) if closure.onceness == ast::Once => {
- self.gate_feature("once_fns", t.span,
- "once functions are \
- experimental and likely to be removed");
-
- },
- ast::TyBox(_) => { self.gate_box(t.span); }
- ast::TyUnboxedFn(..) => {
- self.gate_feature("unboxed_closure_sugar",
- t.span,
- "unboxed closure trait sugar is experimental");
- }
- _ => {}
- }
-
- visit::walk_ty(self, t);
- }
-
- fn visit_expr(&mut self, e: &ast::Expr) {
- match e.node {
- ast::ExprUnary(ast::UnBox, _) => {
- self.gate_box(e.span);
- }
- ast::ExprUnboxedFn(..) => {
- self.gate_feature("unboxed_closures",
- e.span,
- "unboxed closures are a work-in-progress \
- feature with known bugs");
- }
- ast::ExprTupField(..) => {
- self.gate_feature("tuple_indexing",
- e.span,
- "tuple indexing is experimental");
- }
- _ => {}
- }
- visit::walk_expr(self, e);
- }
-
- fn visit_generics(&mut self, generics: &ast::Generics) {
- for type_parameter in generics.ty_params.iter() {
- match type_parameter.default {
- Some(ref ty) => {
- self.gate_feature("default_type_params", ty.span,
- "default type parameters are \
- experimental and possibly buggy");
- }
- None => {}
- }
- }
- visit::walk_generics(self, generics);
- }
-
- fn visit_attribute(&mut self, attr: &ast::Attribute) {
- if attr::contains_name(slice::ref_slice(attr), "lang") {
- self.gate_feature("lang_items",
- attr.span,
- "language items are subject to change");
- }
- }
-
- fn visit_pat(&mut self, pattern: &ast::Pat) {
- match pattern.node {
- ast::PatVec(_, Some(_), ref last) if !last.is_empty() => {
- self.gate_feature("advanced_slice_patterns",
- pattern.span,
- "multiple-element slice matches anywhere \
- but at the end of a slice (e.g. \
- `[0, ..xs, 0]` are experimental")
- }
- _ => {}
- }
- visit::walk_pat(self, pattern)
- }
-
- fn visit_fn(&mut self,
- fn_kind: visit::FnKind<'v>,
- fn_decl: &'v ast::FnDecl,
- block: &'v ast::Block,
- span: Span,
- _: NodeId) {
- match fn_kind {
- visit::FkItemFn(_, _, _, abi) if abi == RustIntrinsic => {
- self.gate_feature("intrinsics",
- span,
- "intrinsics are subject to change")
- }
- _ => {}
- }
- visit::walk_fn(self, fn_kind, fn_decl, block, span);
- }
-}
-
-pub fn check_crate(sess: &Session, krate: &ast::Crate) {
- let mut cx = Context {
- features: Vec::new(),
- sess: sess,
- };
-
- for attr in krate.attrs.iter() {
- if !attr.check_name("feature") {
- continue
- }
-
- match attr.meta_item_list() {
- None => {
- sess.span_err(attr.span, "malformed feature attribute, \
- expected #![feature(...)]");
- }
- Some(list) => {
- for mi in list.iter() {
- let name = match mi.node {
- ast::MetaWord(ref word) => (*word).clone(),
- _ => {
- sess.span_err(mi.span,
- "malformed feature, expected just \
- one word");
- continue
- }
- };
- match KNOWN_FEATURES.iter()
- .find(|& &(n, _)| name.equiv(&n)) {
- Some(&(name, Active)) => { cx.features.push(name); }
- Some(&(_, Removed)) => {
- sess.span_err(mi.span, "feature has been removed");
- }
- Some(&(_, Accepted)) => {
- sess.span_warn(mi.span, "feature has been added to Rust, \
- directive not necessary");
- }
- None => {
- sess.add_lint(lint::builtin::UNKNOWN_FEATURES,
- ast::CRATE_NODE_ID,
- mi.span,
- "unknown feature".to_string());
- }
- }
- }
- }
- }
- }
-
- visit::walk_crate(&mut cx, krate);
-
- sess.abort_if_errors();
-
- sess.features.default_type_params.set(cx.has_feature("default_type_params"));
- sess.features.overloaded_calls.set(cx.has_feature("overloaded_calls"));
- sess.features.rustc_diagnostic_macros.set(cx.has_feature("rustc_diagnostic_macros"));
- sess.features.import_shadowing.set(cx.has_feature("import_shadowing"));
-}
-
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Span debugger
-//!
-//! This module shows spans for all expressions in the crate
-//! to help with compiler debugging.
-
-use syntax::ast;
-use syntax::visit;
-use syntax::visit::Visitor;
-
-use driver::session::Session;
-
-struct ShowSpanVisitor<'a> {
- sess: &'a Session
-}
-
-impl<'a, 'v> Visitor<'v> for ShowSpanVisitor<'a> {
- fn visit_expr(&mut self, e: &ast::Expr) {
- self.sess.span_note(e.span, "expression");
- visit::walk_expr(self, e);
- }
-
- fn visit_mac(&mut self, macro: &ast::Mac) {
- visit::walk_mac(self, macro);
- }
-}
-
-pub fn run(sess: &Session, krate: &ast::Crate) {
- let mut v = ShowSpanVisitor { sess: sess };
- visit::walk_crate(&mut v, krate);
-}
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use driver::config;
-use driver::session::Session;
-
-use syntax::ast;
-use syntax::attr;
-use syntax::codemap::DUMMY_SP;
-use syntax::codemap;
-use syntax::fold::Folder;
-use syntax::fold;
-use syntax::owned_slice::OwnedSlice;
-use syntax::parse::token::InternedString;
-use syntax::parse::token::special_idents;
-use syntax::parse::token;
-use syntax::ptr::P;
-use syntax::util::small_vector::SmallVector;
-
-use std::mem;
-
-pub fn maybe_inject_crates_ref(sess: &Session, krate: ast::Crate)
- -> ast::Crate {
- if use_std(&krate) {
- inject_crates_ref(sess, krate)
- } else {
- krate
- }
-}
-
-pub fn maybe_inject_prelude(sess: &Session, krate: ast::Crate) -> ast::Crate {
- if use_std(&krate) {
- inject_prelude(sess, krate)
- } else {
- krate
- }
-}
-
-fn use_std(krate: &ast::Crate) -> bool {
- !attr::contains_name(krate.attrs.as_slice(), "no_std")
-}
-
-fn use_start(krate: &ast::Crate) -> bool {
- !attr::contains_name(krate.attrs.as_slice(), "no_start")
-}
-
-fn no_prelude(attrs: &[ast::Attribute]) -> bool {
- attr::contains_name(attrs, "no_implicit_prelude")
-}
-
-struct StandardLibraryInjector<'a> {
- sess: &'a Session,
-}
-
-impl<'a> fold::Folder for StandardLibraryInjector<'a> {
- fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
-
- // The name to use in `extern crate "name" as std;`
- let actual_crate_name = match self.sess.opts.alt_std_name {
- Some(ref s) => token::intern_and_get_ident(s.as_slice()),
- None => token::intern_and_get_ident("std"),
- };
-
- let mut vis = vec!(ast::ViewItem {
- node: ast::ViewItemExternCrate(token::str_to_ident("std"),
- Some((actual_crate_name, ast::CookedStr)),
- ast::DUMMY_NODE_ID),
- attrs: vec!(
- attr::mk_attr_outer(attr::mk_attr_id(), attr::mk_list_item(
- InternedString::new("phase"),
- vec!(
- attr::mk_word_item(InternedString::new("plugin")),
- attr::mk_word_item(InternedString::new("link")
- ))))),
- vis: ast::Inherited,
- span: DUMMY_SP
- });
-
- let any_exe = self.sess.crate_types.borrow().iter().any(|ty| {
- *ty == config::CrateTypeExecutable
- });
- if use_start(&krate) && any_exe {
- let visible_rt_name = "rt";
- let actual_rt_name = "native";
- // Gensym the ident so it can't be named
- let visible_rt_name = token::gensym_ident(visible_rt_name);
- let actual_rt_name = token::intern_and_get_ident(actual_rt_name);
-
- vis.push(ast::ViewItem {
- node: ast::ViewItemExternCrate(visible_rt_name,
- Some((actual_rt_name, ast::CookedStr)),
- ast::DUMMY_NODE_ID),
- attrs: Vec::new(),
- vis: ast::Inherited,
- span: DUMMY_SP
- });
- }
-
- // `extern crate` must be precede `use` items
- mem::swap(&mut vis, &mut krate.module.view_items);
- krate.module.view_items.push_all_move(vis);
-
- // don't add #![no_std] here, that will block the prelude injection later.
- // Add it during the prelude injection instead.
-
- // Add #![feature(phase)] here, because we use #[phase] on extern crate std.
- let feat_phase_attr = attr::mk_attr_inner(attr::mk_attr_id(),
- attr::mk_list_item(
- InternedString::new("feature"),
- vec![attr::mk_word_item(InternedString::new("phase"))],
- ));
- // std_inject runs after feature checking so manually mark this attr
- attr::mark_used(&feat_phase_attr);
- krate.attrs.push(feat_phase_attr);
-
- krate
- }
-}
-
-fn inject_crates_ref(sess: &Session, krate: ast::Crate) -> ast::Crate {
- let mut fold = StandardLibraryInjector {
- sess: sess,
- };
- fold.fold_crate(krate)
-}
-
-struct PreludeInjector<'a>;
-
-
-impl<'a> fold::Folder for PreludeInjector<'a> {
- fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
- // Add #![no_std] here, so we don't re-inject when compiling pretty-printed source.
- // This must happen here and not in StandardLibraryInjector because this
- // fold happens second.
-
- let no_std_attr = attr::mk_attr_inner(attr::mk_attr_id(),
- attr::mk_word_item(InternedString::new("no_std")));
- // std_inject runs after feature checking so manually mark this attr
- attr::mark_used(&no_std_attr);
- krate.attrs.push(no_std_attr);
-
- if !no_prelude(krate.attrs.as_slice()) {
- // only add `use std::prelude::*;` if there wasn't a
- // `#![no_implicit_prelude]` at the crate level.
- // fold_mod() will insert glob path.
- let globs_attr = attr::mk_attr_inner(attr::mk_attr_id(),
- attr::mk_list_item(
- InternedString::new("feature"),
- vec!(
- attr::mk_word_item(InternedString::new("globs")),
- )));
- // std_inject runs after feature checking so manually mark this attr
- attr::mark_used(&globs_attr);
- krate.attrs.push(globs_attr);
-
- krate.module = self.fold_mod(krate.module);
- }
- krate
- }
-
- fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
- if !no_prelude(item.attrs.as_slice()) {
- // only recur if there wasn't `#![no_implicit_prelude]`
- // on this item, i.e. this means that the prelude is not
- // implicitly imported though the whole subtree
- fold::noop_fold_item(item, self)
- } else {
- SmallVector::one(item)
- }
- }
-
- fn fold_mod(&mut self, ast::Mod {inner, view_items, items}: ast::Mod) -> ast::Mod {
- let prelude_path = ast::Path {
- span: DUMMY_SP,
- global: false,
- segments: vec!(
- ast::PathSegment {
- identifier: token::str_to_ident("std"),
- lifetimes: Vec::new(),
- types: OwnedSlice::empty(),
- },
- ast::PathSegment {
- identifier: token::str_to_ident("prelude"),
- lifetimes: Vec::new(),
- types: OwnedSlice::empty(),
- }),
- };
-
- let (crates, uses) = view_items.partitioned(|x| {
- match x.node {
- ast::ViewItemExternCrate(..) => true,
- _ => false,
- }
- });
-
- // add prelude after any `extern crate` but before any `use`
- let mut view_items = crates;
- let vp = P(codemap::dummy_spanned(ast::ViewPathGlob(prelude_path, ast::DUMMY_NODE_ID)));
- view_items.push(ast::ViewItem {
- node: ast::ViewItemUse(vp),
- attrs: vec![ast::Attribute {
- span: DUMMY_SP,
- node: ast::Attribute_ {
- id: attr::mk_attr_id(),
- style: ast::AttrOuter,
- value: P(ast::MetaItem {
- span: DUMMY_SP,
- node: ast::MetaWord(token::get_name(
- special_idents::prelude_import.name)),
- }),
- is_sugared_doc: false,
- },
- }],
- vis: ast::Inherited,
- span: DUMMY_SP,
- });
- view_items.push_all_move(uses);
-
- fold::noop_fold_mod(ast::Mod {
- inner: inner,
- view_items: view_items,
- items: items
- }, self)
- }
-}
-
-fn inject_prelude(_: &Session, krate: ast::Crate) -> ast::Crate {
- let mut fold = PreludeInjector;
- fold.fold_crate(krate)
-}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Code that generates a test runner to run all the tests in a crate
-
-#![allow(dead_code)]
-#![allow(unused_imports)]
-
-use driver::session::Session;
-use front::config;
-
-use std::slice;
-use std::mem;
-use std::vec;
-use syntax::{ast, ast_util};
-use syntax::ast_util::*;
-use syntax::attr::AttrMetaMethods;
-use syntax::attr;
-use syntax::codemap::{DUMMY_SP, Span, ExpnInfo, NameAndSpan, MacroAttribute};
-use syntax::codemap;
-use syntax::ext::base::ExtCtxt;
-use syntax::ext::build::AstBuilder;
-use syntax::ext::expand::ExpansionConfig;
-use syntax::fold::{Folder, MoveMap};
-use syntax::fold;
-use syntax::owned_slice::OwnedSlice;
-use syntax::parse::token::InternedString;
-use syntax::parse::token;
-use syntax::print::pprust;
-use syntax::ptr::P;
-use syntax::util::small_vector::SmallVector;
-
-struct Test {
- span: Span,
- path: Vec<ast::Ident> ,
- bench: bool,
- ignore: bool,
- should_fail: bool
-}
-
-struct TestCtxt<'a> {
- sess: &'a Session,
- path: Vec<ast::Ident>,
- ext_cx: ExtCtxt<'a>,
- testfns: Vec<Test>,
- reexport_test_harness_main: Option<InternedString>,
- is_test_crate: bool,
- config: ast::CrateConfig,
-
- // top-level re-export submodule, filled out after folding is finished
- toplevel_reexport: Option<ast::Ident>,
-}
-
-// Traverse the crate, collecting all the test functions, eliding any
-// existing main functions, and synthesizing a main test harness
-pub fn modify_for_testing(sess: &Session,
- krate: ast::Crate) -> ast::Crate {
- // We generate the test harness when building in the 'test'
- // configuration, either with the '--test' or '--cfg test'
- // command line options.
- let should_test = attr::contains_name(krate.config.as_slice(), "test");
-
- // Check for #[reexport_test_harness_main = "some_name"] which
- // creates a `use some_name = __test::main;`. This needs to be
- // unconditional, so that the attribute is still marked as used in
- // non-test builds.
- let reexport_test_harness_main =
- attr::first_attr_value_str_by_name(krate.attrs.as_slice(),
- "reexport_test_harness_main");
-
- if should_test {
- generate_test_harness(sess, reexport_test_harness_main, krate)
- } else {
- strip_test_functions(krate)
- }
-}
-
-struct TestHarnessGenerator<'a> {
- cx: TestCtxt<'a>,
- tests: Vec<ast::Ident>,
-
- // submodule name, gensym'd identifier for re-exports
- tested_submods: Vec<(ast::Ident, ast::Ident)>,
-}
-
-impl<'a> fold::Folder for TestHarnessGenerator<'a> {
- fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
- let mut folded = fold::noop_fold_crate(c, self);
-
- // Add a special __test module to the crate that will contain code
- // generated for the test harness
- let (mod_, reexport) = mk_test_module(&mut self.cx);
- folded.module.items.push(mod_);
- match reexport {
- Some(re) => folded.module.view_items.push(re),
- None => {}
- }
- folded
- }
-
- fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
- self.cx.path.push(i.ident);
- debug!("current path: {}",
- ast_util::path_name_i(self.cx.path.as_slice()));
-
- if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
- match i.node {
- ast::ItemFn(_, ast::UnsafeFn, _, _, _) => {
- let sess = self.cx.sess;
- sess.span_fatal(i.span,
- "unsafe functions cannot be used for \
- tests");
- }
- _ => {
- debug!("this is a test function");
- let test = Test {
- span: i.span,
- path: self.cx.path.clone(),
- bench: is_bench_fn(&self.cx, &*i),
- ignore: is_ignored(&self.cx, &*i),
- should_fail: should_fail(&*i)
- };
- self.cx.testfns.push(test);
- self.tests.push(i.ident);
- // debug!("have {} test/bench functions",
- // cx.testfns.len());
- }
- }
- }
-
- // We don't want to recurse into anything other than mods, since
- // mods or tests inside of functions will break things
- let res = match i.node {
- ast::ItemMod(..) => fold::noop_fold_item(i, self),
- _ => SmallVector::one(i),
- };
- self.cx.path.pop();
- res
- }
-
- fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
- let tests = mem::replace(&mut self.tests, Vec::new());
- let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
- let mut mod_folded = fold::noop_fold_mod(m, self);
- let tests = mem::replace(&mut self.tests, tests);
- let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
-
- // Remove any #[main] from the AST so it doesn't clash with
- // the one we're going to add. Only if compiling an executable.
-
- mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
- item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
- ast::Item {
- id: id,
- ident: ident,
- attrs: attrs.move_iter().filter_map(|attr| {
- if !attr.check_name("main") {
- Some(attr)
- } else {
- None
- }
- }).collect(),
- node: node,
- vis: vis,
- span: span
- }
- })
- });
-
- if !tests.is_empty() || !tested_submods.is_empty() {
- let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
- mod_folded.items.push(it);
-
- if !self.cx.path.is_empty() {
- self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
- } else {
- debug!("pushing nothing, sym: {}", sym);
- self.cx.toplevel_reexport = Some(sym);
- }
- }
-
- mod_folded
- }
-}
-
-fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
- tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
- let mut view_items = Vec::new();
- let super_ = token::str_to_ident("super");
-
- view_items.extend(tests.move_iter().map(|r| {
- cx.ext_cx.view_use_simple(DUMMY_SP, ast::Public,
- cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
- }));
- view_items.extend(tested_submods.move_iter().map(|(r, sym)| {
- let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
- cx.ext_cx.view_use_simple_(DUMMY_SP, ast::Public, r, path)
- }));
-
- let reexport_mod = ast::Mod {
- inner: DUMMY_SP,
- view_items: view_items,
- items: Vec::new(),
- };
-
- let sym = token::gensym_ident("__test_reexports");
- let it = P(ast::Item {
- ident: sym.clone(),
- attrs: Vec::new(),
- id: ast::DUMMY_NODE_ID,
- node: ast::ItemMod(reexport_mod),
- vis: ast::Public,
- span: DUMMY_SP,
- });
-
- (it, sym)
-}
-
-fn generate_test_harness(sess: &Session,
- reexport_test_harness_main: Option<InternedString>,
- krate: ast::Crate) -> ast::Crate {
- let mut cx: TestCtxt = TestCtxt {
- sess: sess,
- ext_cx: ExtCtxt::new(&sess.parse_sess, sess.opts.cfg.clone(),
- ExpansionConfig {
- deriving_hash_type_parameter: false,
- crate_name: "test".to_string(),
- }),
- path: Vec::new(),
- testfns: Vec::new(),
- reexport_test_harness_main: reexport_test_harness_main,
- is_test_crate: is_test_crate(&krate),
- config: krate.config.clone(),
- toplevel_reexport: None,
- };
-
- cx.ext_cx.bt_push(ExpnInfo {
- call_site: DUMMY_SP,
- callee: NameAndSpan {
- name: "test".to_string(),
- format: MacroAttribute,
- span: None
- }
- });
-
- let mut fold = TestHarnessGenerator {
- cx: cx,
- tests: Vec::new(),
- tested_submods: Vec::new(),
- };
- let res = fold.fold_crate(krate);
- fold.cx.ext_cx.bt_pop();
- return res;
-}
-
-fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
- // When not compiling with --test we should not compile the
- // #[test] functions
- config::strip_items(krate, |attrs| {
- !attr::contains_name(attrs.as_slice(), "test") &&
- !attr::contains_name(attrs.as_slice(), "bench")
- })
-}
-
-fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
- let has_test_attr = attr::contains_name(i.attrs.as_slice(), "test");
-
- fn has_test_signature(i: &ast::Item) -> bool {
- match &i.node {
- &ast::ItemFn(ref decl, _, _, ref generics, _) => {
- let no_output = match decl.output.node {
- ast::TyNil => true,
- _ => false
- };
- decl.inputs.is_empty()
- && no_output
- && !generics.is_parameterized()
- }
- _ => false
- }
- }
-
- if has_test_attr && !has_test_signature(i) {
- let sess = cx.sess;
- sess.span_err(
- i.span,
- "functions used as tests must have signature fn() -> ()."
- );
- }
-
- return has_test_attr && has_test_signature(i);
-}
-
-fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
- let has_bench_attr = attr::contains_name(i.attrs.as_slice(), "bench");
-
- fn has_test_signature(i: &ast::Item) -> bool {
- match i.node {
- ast::ItemFn(ref decl, _, _, ref generics, _) => {
- let input_cnt = decl.inputs.len();
- let no_output = match decl.output.node {
- ast::TyNil => true,
- _ => false
- };
- let tparm_cnt = generics.ty_params.len();
- // NB: inadequate check, but we're running
- // well before resolve, can't get too deep.
- input_cnt == 1u
- && no_output && tparm_cnt == 0u
- }
- _ => false
- }
- }
-
- if has_bench_attr && !has_test_signature(i) {
- let sess = cx.sess;
- sess.span_err(i.span, "functions used as benches must have signature \
- `fn(&mut Bencher) -> ()`");
- }
-
- return has_bench_attr && has_test_signature(i);
-}
-
-fn is_ignored(cx: &TestCtxt, i: &ast::Item) -> bool {
- i.attrs.iter().any(|attr| {
- // check ignore(cfg(foo, bar))
- attr.check_name("ignore") && match attr.meta_item_list() {
- Some(ref cfgs) => {
- attr::test_cfg(cx.config.as_slice(), cfgs.iter())
- }
- None => true
- }
- })
-}
-
-fn should_fail(i: &ast::Item) -> bool {
- attr::contains_name(i.attrs.as_slice(), "should_fail")
-}
-
-/*
-
-We're going to be building a module that looks more or less like:
-
-mod __test {
- extern crate test (name = "test", vers = "...");
- fn main() {
- test::test_main_static(::os::args().as_slice(), tests)
- }
-
- static tests : &'static [test::TestDescAndFn] = &[
- ... the list of tests in the crate ...
- ];
-}
-
-*/
-
-fn mk_std(cx: &TestCtxt) -> ast::ViewItem {
- let id_test = token::str_to_ident("test");
- let (vi, vis) = if cx.is_test_crate {
- (ast::ViewItemUse(
- P(nospan(ast::ViewPathSimple(id_test,
- path_node(vec!(id_test)),
- ast::DUMMY_NODE_ID)))),
- ast::Public)
- } else {
- (ast::ViewItemExternCrate(id_test, None, ast::DUMMY_NODE_ID),
- ast::Inherited)
- };
- ast::ViewItem {
- node: vi,
- attrs: Vec::new(),
- vis: vis,
- span: DUMMY_SP
- }
-}
-
-fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<ast::ViewItem>) {
- // Link to test crate
- let view_items = vec!(mk_std(cx));
-
- // A constant vector of test descriptors.
- let tests = mk_tests(cx);
-
- // The synthesized main function which will call the console test runner
- // with our list of tests
- let mainfn = (quote_item!(&mut cx.ext_cx,
- pub fn main() {
- #![main]
- use std::slice::Slice;
- test::test_main_static(::std::os::args().as_slice(), TESTS);
- }
- )).unwrap();
-
- let testmod = ast::Mod {
- inner: DUMMY_SP,
- view_items: view_items,
- items: vec!(mainfn, tests),
- };
- let item_ = ast::ItemMod(testmod);
-
- let mod_ident = token::gensym_ident("__test");
- let item = ast::Item {
- ident: mod_ident,
- attrs: Vec::new(),
- id: ast::DUMMY_NODE_ID,
- node: item_,
- vis: ast::Public,
- span: DUMMY_SP,
- };
- let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
- // building `use <ident> = __test::main`
- let reexport_ident = token::str_to_ident(s.get());
-
- let use_path =
- nospan(ast::ViewPathSimple(reexport_ident,
- path_node(vec![mod_ident, token::str_to_ident("main")]),
- ast::DUMMY_NODE_ID));
-
- ast::ViewItem {
- node: ast::ViewItemUse(P(use_path)),
- attrs: vec![],
- vis: ast::Inherited,
- span: DUMMY_SP
- }
- });
-
- debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&item));
-
- (P(item), reexport)
-}
-
-fn nospan<T>(t: T) -> codemap::Spanned<T> {
- codemap::Spanned { node: t, span: DUMMY_SP }
-}
-
-fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
- ast::Path {
- span: DUMMY_SP,
- global: false,
- segments: ids.move_iter().map(|identifier| ast::PathSegment {
- identifier: identifier,
- lifetimes: Vec::new(),
- types: OwnedSlice::empty(),
- }).collect()
- }
-}
-
-fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
- // The vector of test_descs for this crate
- let test_descs = mk_test_descs(cx);
-
- // FIXME #15962: should be using quote_item, but that stringifies
- // __test_reexports, causing it to be reinterned, losing the
- // gensym information.
- let sp = DUMMY_SP;
- let ecx = &cx.ext_cx;
- let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
- ecx.ident_of("test"),
- ecx.ident_of("TestDescAndFn")]),
- None);
- let static_lt = ecx.lifetime(sp, token::special_idents::static_lifetime.name);
- // &'static [self::test::TestDescAndFn]
- let static_type = ecx.ty_rptr(sp,
- ecx.ty(sp, ast::TyVec(struct_type)),
- Some(static_lt),
- ast::MutImmutable);
- // static TESTS: $static_type = &[...];
- ecx.item_static(sp,
- ecx.ident_of("TESTS"),
- static_type,
- ast::MutImmutable,
- test_descs)
-}
-
-fn is_test_crate(krate: &ast::Crate) -> bool {
- match attr::find_crate_name(krate.attrs.as_slice()) {
- Some(ref s) if "test" == s.get().as_slice() => true,
- _ => false
- }
-}
-
-fn mk_test_descs(cx: &TestCtxt) -> P<ast::Expr> {
- debug!("building test vector from {} tests", cx.testfns.len());
-
- P(ast::Expr {
- id: ast::DUMMY_NODE_ID,
- node: ast::ExprAddrOf(ast::MutImmutable,
- P(ast::Expr {
- id: ast::DUMMY_NODE_ID,
- node: ast::ExprVec(cx.testfns.iter().map(|test| {
- mk_test_desc_and_fn_rec(cx, test)
- }).collect()),
- span: DUMMY_SP,
- })),
- span: DUMMY_SP,
- })
-}
-
-fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> P<ast::Expr> {
- // FIXME #15962: should be using quote_expr, but that stringifies
- // __test_reexports, causing it to be reinterned, losing the
- // gensym information.
-
- let span = test.span;
- let path = test.path.clone();
- let ecx = &cx.ext_cx;
- let self_id = ecx.ident_of("self");
- let test_id = ecx.ident_of("test");
-
- // creates self::test::$name
- let test_path = |name| {
- ecx.path(span, vec![self_id, test_id, ecx.ident_of(name)])
- };
- // creates $name: $expr
- let field = |name, expr| ecx.field_imm(span, ecx.ident_of(name), expr);
-
- debug!("encoding {}", ast_util::path_name_i(path.as_slice()));
-
- // path to the #[test] function: "foo::bar::baz"
- let path_string = ast_util::path_name_i(path.as_slice());
- let name_expr = ecx.expr_str(span, token::intern_and_get_ident(path_string.as_slice()));
-
- // self::test::StaticTestName($name_expr)
- let name_expr = ecx.expr_call(span,
- ecx.expr_path(test_path("StaticTestName")),
- vec![name_expr]);
-
- let ignore_expr = ecx.expr_bool(span, test.ignore);
- let fail_expr = ecx.expr_bool(span, test.should_fail);
-
- // self::test::TestDesc { ... }
- let desc_expr = ecx.expr_struct(
- span,
- test_path("TestDesc"),
- vec![field("name", name_expr),
- field("ignore", ignore_expr),
- field("should_fail", fail_expr)]);
-
-
- let mut visible_path = match cx.toplevel_reexport {
- Some(id) => vec![id],
- None => {
- cx.sess.bug(
- "expected to find top-level re-export name, but found None"
- );
- }
- };
- visible_path.extend(path.move_iter());
-
- let fn_expr = ecx.expr_path(ecx.path_global(span, visible_path));
-
- let variant_name = if test.bench { "StaticBenchFn" } else { "StaticTestFn" };
- // self::test::$variant_name($fn_expr)
- let testfn_expr = ecx.expr_call(span, ecx.expr_path(test_path(variant_name)), vec![fn_expr]);
-
- // self::test::TestDescAndFn { ... }
- ecx.expr_struct(span,
- test_path("TestDescAndFn"),
- vec![field("desc", desc_expr),
- field("testfn", testfn_expr)])
-}
html_root_url = "http://doc.rust-lang.org/master/")]
#![allow(deprecated)]
-#![feature(macro_rules, globs, struct_variant, managed_boxes, quote)]
+#![feature(macro_rules, globs, struct_variant, quote)]
#![feature(default_type_params, phase, unsafe_destructor)]
#![allow(unknown_features)] // NOTE: Remove after next snapshot
pub mod borrowck;
pub mod cfg;
pub mod check_const;
+ pub mod check_static_recursion;
pub mod check_loop;
pub mod check_match;
pub mod check_rvalues;
pub mod effect;
pub mod entry;
pub mod expr_use_visitor;
- pub mod freevars;
pub mod graph;
pub mod intrinsicck;
pub mod kind;
pub mod save;
pub mod stability;
pub mod subst;
+ pub mod traits;
pub mod trans;
pub mod ty;
pub mod ty_fold;
pub mod weak_lang_items;
}
-pub mod front {
- pub mod config;
- pub mod test;
- pub mod std_inject;
- pub mod feature_gate;
- pub mod show_span;
-}
-
pub mod metadata;
pub mod driver;
use syntax::ast_map;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
-use syntax::codemap::Span;
+use syntax::codemap::{Span, NO_EXPANSION};
use syntax::parse::token;
use syntax::{ast, ast_util, visit};
use syntax::ptr::P;
ast::StmtSemi(ref expr, _) => &**expr,
_ => return
};
- let t = ty::expr_ty(cx.tcx, expr);
- match ty::get(t).sty {
- ty::ty_nil | ty::ty_bot | ty::ty_bool => return,
- _ => {}
- }
+
match expr.node {
ast::ExprRet(..) => return,
_ => {}
let t = ty::expr_ty(cx.tcx, expr);
let mut warned = false;
match ty::get(t).sty {
+ ty::ty_nil | ty::ty_bot | ty::ty_bool => return,
ty::ty_struct(did, _) |
ty::ty_enum(did, _) => {
if ast_util::is_local(did) {
}
}
}
+ ty::TypeTraitItem(typedef) => {
+ match typedef.container {
+ ty::TraitContainer(..) => TraitDefaultImpl,
+ ty::ImplContainer(cid) => {
+ match ty::impl_trait_ref(cx.tcx, cid) {
+ Some(..) => TraitImpl,
+ None => PlainImpl
+ }
+ }
+ }
+ }
}
}
}
match &p.node {
&ast::PatIdent(_, ref path1, _) => {
match cx.tcx.def_map.borrow().find(&p.id) {
- Some(&def::DefLocal(_, _)) | Some(&def::DefBinding(_, _)) |
- Some(&def::DefArg(_, _)) => {
+ Some(&def::DefLocal(_)) => {
self.check_snake_case(cx, "variable", path1.node, p.span);
}
_ => {}
}
}
+declare_lint!(UNNECESSARY_IMPORT_BRACES, Allow,
+ "unnecessary braces around an imported item")
+
+pub struct UnnecessaryImportBraces;
+
+impl LintPass for UnnecessaryImportBraces {
+ fn get_lints(&self) -> LintArray {
+ lint_array!(UNNECESSARY_IMPORT_BRACES)
+ }
+
+ fn check_view_item(&mut self, cx: &Context, view_item: &ast::ViewItem) {
+ match view_item.node {
+ ast::ViewItemUse(ref view_path) => {
+ match view_path.node {
+ ast::ViewPathList(_, ref items, _) => {
+ if items.len() == 1 {
+ match items[0].node {
+ ast::PathListIdent {ref name, ..} => {
+ let m = format!("braces around {} is unnecessary",
+ token::get_ident(*name).get());
+ cx.span_lint(UNNECESSARY_IMPORT_BRACES, view_item.span,
+ m.as_slice());
+ },
+ _ => ()
+ }
+ }
+ }
+ _ => ()
+ }
+ },
+ _ => ()
+ }
+ }
+}
+
declare_lint!(UNUSED_UNSAFE, Warn,
"unnecessary use of an `unsafe` block")
match cx.tcx.adjustments.borrow().find(&e.id) {
Some(adjustment) => {
match *adjustment {
- ty::AutoDerefRef(ty::AutoDerefRef { ref autoref, .. }) => {
+ ty::AdjustDerefRef(ty::AutoDerefRef { ref autoref, .. }) => {
match (allocation, autoref) {
(VectorAllocation, &Some(ty::AutoPtr(_, _, None))) => {
cx.span_lint(UNNECESSARY_ALLOCATION, e.span,
fn check_expr(&mut self, cx: &Context, e: &ast::Expr) {
// if the expression was produced by a macro expansion,
- if e.span.expn_info.is_some() { return }
+ if e.span.expn_id != NO_EXPANSION { return }
let id = match e.node {
ast::ExprPath(..) | ast::ExprStruct(..) => {
typeck::MethodStaticUnboxedClosure(def_id) => {
def_id
}
- typeck::MethodParam(typeck::MethodParam {
- trait_id: trait_id,
+ typeck::MethodTypeParam(typeck::MethodParam {
+ trait_ref: ref trait_ref,
method_num: index,
..
- })
- | typeck::MethodObject(typeck::MethodObject {
- trait_id: trait_id,
+ }) |
+ typeck::MethodTraitObject(typeck::MethodObject {
+ trait_ref: ref trait_ref,
method_num: index,
..
}) => {
- match ty::trait_item(cx.tcx,
- trait_id,
- index) {
- ty::MethodTraitItem(method) => {
- method.def_id
- }
- }
+ ty::trait_item(cx.tcx,
+ trait_ref.def_id,
+ index).def_id()
}
}
}
//! for all lint attributes.
use middle::privacy::ExportedItems;
+use middle::subst;
use middle::ty;
use middle::typeck::astconv::AstConv;
use middle::typeck::infer;
NonSnakeCase,
NonUppercaseStatics,
UnnecessaryParens,
+ UnnecessaryImportBraces,
UnusedUnsafe,
UnsafeBlock,
UnusedMut,
// Move the vector of passes out of `$cx` so that we can
// iterate over it mutably while passing `$cx` to the methods.
let mut passes = $cx.lints.passes.take().unwrap();
- for obj in passes.mut_iter() {
+ for obj in passes.iter_mut() {
obj.$f($cx, $($args),*);
}
$cx.lints.passes = Some(passes);
_ => sess.bug("impossible level in raw_emit_lint"),
}
- for span in note.move_iter() {
+ for span in note.into_iter() {
sess.span_note(span, "lint level defined here");
}
}
// specified closure
let mut pushed = 0u;
- for result in gather_attrs(attrs).move_iter() {
+ for result in gather_attrs(attrs).into_iter() {
let v = match result {
Err(span) => {
self.tcx.sess.span_err(span, "malformed lint attribute");
}
};
- for (lint_id, level, span) in v.move_iter() {
+ for (lint_id, level, span) in v.into_iter() {
let now = self.lints.get_level_source(lint_id).val0();
if now == Forbid && level != Forbid {
let lint_name = lint_id.as_str();
fn ty_infer(&self, _span: Span) -> ty::t {
infer::new_infer_ctxt(self.tcx).next_ty_var()
}
+
+ fn associated_types_of_trait_are_valid(&self, _: ty::t, _: ast::DefId)
+ -> bool {
+ // FIXME(pcwalton): This is wrong.
+ true
+ }
+
+ fn associated_type_binding(&self,
+ _: Span,
+ _: Option<ty::t>,
+ trait_id: ast::DefId,
+ associated_type_id: ast::DefId)
+ -> ty::t {
+ // FIXME(pcwalton): This is wrong.
+ let trait_def = self.get_trait_def(trait_id);
+ let index = ty::associated_type_parameter_index(self.tcx,
+ &*trait_def,
+ associated_type_id);
+ ty::mk_param(self.tcx, subst::TypeSpace, index, associated_type_id)
+ }
}
impl<'a, 'tcx, 'v> Visitor<'v> for Context<'a, 'tcx> {
match self.tcx.sess.lints.borrow_mut().pop(&id) {
None => {}
Some(lints) => {
- for (lint_id, span, msg) in lints.move_iter() {
+ for (lint_id, span, msg) in lints.into_iter() {
self.span_lint(lint_id.lint, span, msg.as_slice())
}
}
tag_table_unboxed_closures = 0x54,
tag_table_upvar_borrow_map = 0x55,
tag_table_capture_modes = 0x56,
+ tag_table_object_cast_map = 0x57,
}
static first_astencode_tag: uint = tag_ast as uint;
-static last_astencode_tag: uint = tag_table_capture_modes as uint;
+static last_astencode_tag: uint = tag_table_object_cast_map as uint;
impl astencode_tag {
pub fn from_uint(value : uint) -> Option<astencode_tag> {
let is_a_tag = first_astencode_tag <= value && value <= last_astencode_tag;
map.find_or_insert_with(data.name(), |_| Vec::new()).push(cnum);
});
- for (name, dupes) in map.move_iter() {
+ for (name, dupes) in map.into_iter() {
if dupes.len() == 1 { continue }
diag.handler().warn(
format!("using multiple versions of crate `{}`", name).as_slice());
- for dupe in dupes.move_iter() {
+ for dupe in dupes.into_iter() {
let data = cstore.get_crate_data(dupe);
diag.span_note(data.span, "used here");
loader::note_crate_name(diag, data.name().as_slice());
let cdata = cstore.get_crate_data(def.krate);
decoder::get_stability(&*cdata, def.node)
}
+
+pub fn is_associated_type(cstore: &cstore::CStore, def: ast::DefId) -> bool {
+ let cdata = cstore.get_crate_data(def.krate);
+ decoder::is_associated_type(&*cdata, def.node)
+}
+
parse_bare_fn_ty_data, parse_trait_ref_data};
use middle::def;
use middle::lang_items;
-use middle::resolve::TraitItemKind;
+use middle::resolve::{TraitItemKind, TypeTraitItemKind};
use middle::subst;
use middle::ty::{ImplContainer, TraitContainer};
use middle::ty;
}
fn item_sort(item: rbml::Doc) -> char {
+ // NB(pcwalton): The default of 'r' here is relied upon in
+ // `is_associated_type` below.
let mut ret = 'r';
reader::tagged_docs(item, tag_item_trait_item_sort, |doc| {
ret = doc.as_str_slice().as_bytes()[0] as char;
};
DlDef(def::DefStaticMethod(did, provenance, fn_style))
}
- Type | ForeignType => DlDef(def::DefTy(did)),
+ Type | ForeignType => DlDef(def::DefTy(did, false)),
Mod => DlDef(def::DefMod(did)),
ForeignMod => DlDef(def::DefForeignMod(did)),
StructVariant => {
DlDef(def::DefVariant(enum_did, did, false))
}
Trait => DlDef(def::DefTrait(did)),
- Enum => DlDef(def::DefTy(did)),
+ Enum => DlDef(def::DefTy(did, true)),
Impl => DlImpl(did),
PublicField | InheritedField => DlField,
}
let def_id = item_def_id(doc, cdata);
match item_sort(doc) {
'r' | 'p' => impl_items.push(ty::MethodTraitItemId(def_id)),
+ 't' => impl_items.push(ty::TypeTraitItemId(def_id)),
_ => fail!("unknown impl item sort"),
}
true
let explicit_self = get_explicit_self(doc);
(name, TraitItemKind::from_explicit_self_category(explicit_self))
}
+ 't' => (name, TypeTraitItemKind),
c => {
fail!("get_trait_item_name_and_kind(): unknown trait item kind \
in metadata: `{}`", c)
};
let name = item_name(&*intr, method_doc);
+ let vis = item_visibility(method_doc);
match item_sort(method_doc) {
'r' | 'p' => {
let generics = doc_generics(method_doc, tcx, cdata,
tag_method_ty_generics);
let fty = doc_method_fty(method_doc, tcx, cdata);
- let vis = item_visibility(method_doc);
let explicit_self = get_explicit_self(method_doc);
let provided_source = get_provided_source(method_doc, cdata);
container,
provided_source)))
}
+ 't' => {
+ ty::TypeTraitItem(Rc::new(ty::AssociatedType {
+ ident: name,
+ vis: vis,
+ def_id: def_id,
+ container: container,
+ }))
+ }
_ => fail!("unknown impl/trait item sort"),
}
}
let def_id = item_def_id(mth, cdata);
match item_sort(mth) {
'r' | 'p' => result.push(ty::MethodTraitItemId(def_id)),
+ 't' => result.push(ty::TypeTraitItemId(def_id)),
_ => fail!("unknown trait item sort"),
}
true
ty::MethodTraitItem(ref method) => {
result.push((*method).clone())
}
+ ty::TypeTraitItem(_) => {}
}
}
true
let nd = reader::get_doc(meta_item_doc, tag_meta_item_name);
let n = token::intern_and_get_ident(nd.as_str_slice());
let subitems = get_meta_items(meta_item_doc);
- items.push(attr::mk_list_item(n, subitems.move_iter().collect()));
+ items.push(attr::mk_list_item(n, subitems.into_iter().collect()));
true
});
return items;
// Currently it's only possible to have a single meta item on
// an attribute
assert_eq!(meta_items.len(), 1u);
- let meta_item = meta_items.move_iter().nth(0).unwrap();
+ let meta_item = meta_items.into_iter().nth(0).unwrap();
attrs.push(
codemap::Spanned {
node: ast::Attribute_ {
ty::Generics { types: types, regions: regions }
}
+
+pub fn is_associated_type(cdata: Cmd, id: ast::NodeId) -> bool {
+ let items = reader::get_doc(rbml::Doc::new(cdata.data()), tag_items);
+ match maybe_find_item(id, items) {
+ None => false,
+ Some(item) => item_sort(item) == 't',
+ }
+}
+
use metadata::decoder;
use metadata::tyencode;
use middle::ty::{lookup_item_type};
-use middle::astencode;
use middle::ty;
-use middle::typeck;
use middle::stability;
use middle;
use util::nodemap::{NodeMap, NodeSet};
rbml_w.end_tag();
}
-fn encode_impl_vtables(rbml_w: &mut Encoder,
- ecx: &EncodeContext,
- vtables: &typeck::vtable_res) {
- rbml_w.start_tag(tag_item_impl_vtables);
- astencode::encode_vtable_res(ecx, rbml_w, vtables);
- rbml_w.end_tag();
-}
-
// Item info table encoding
fn encode_family(rbml_w: &mut Encoder, c: char) {
rbml_w.start_tag(tag_items_data_item_family);
tyencode::enc_ty(rbml_w.writer, ty_str_ctxt, typ);
}
+pub fn write_trait_ref(ecx: &EncodeContext,
+ rbml_w: &mut Encoder,
+ trait_ref: &ty::TraitRef) {
+ let ty_str_ctxt = &tyencode::ctxt {
+ diag: ecx.diag,
+ ds: def_to_string,
+ tcx: ecx.tcx,
+ abbrevs: &ecx.type_abbrevs
+ };
+ tyencode::enc_trait_ref(rbml_w.writer, ty_str_ctxt, trait_ref);
+}
+
pub fn write_region(ecx: &EncodeContext,
rbml_w: &mut Encoder,
r: ty::Region) {
let impl_items = ecx.tcx.impl_items.borrow();
match ecx.tcx.inherent_impls.borrow().find(&exp.def_id) {
Some(implementations) => {
- for base_impl_did in implementations.borrow().iter() {
+ for base_impl_did in implementations.iter() {
for &method_did in impl_items.get(base_impl_did).iter() {
let impl_item = ty::impl_or_trait_item(
ecx.tcx,
m.ident);
}
}
+ ty::TypeTraitItem(_) => {}
}
}
}
encode_bounds_and_type(rbml_w, ecx, &pty);
let elem = ast_map::PathName(m.ident.name);
- encode_path(rbml_w, impl_path.chain(Some(elem).move_iter()));
+ encode_path(rbml_w, impl_path.chain(Some(elem).into_iter()));
match ast_item_opt {
Some(&ast::MethodImplItem(ref ast_method)) => {
encode_attributes(rbml_w, ast_method.attrs.as_slice());
}
encode_method_argument_names(rbml_w, ast_method.pe_fn_decl());
}
+ Some(_) | None => {}
+ }
+
+ rbml_w.end_tag();
+}
+
+fn encode_info_for_associated_type(ecx: &EncodeContext,
+ rbml_w: &mut Encoder,
+ associated_type: &ty::AssociatedType,
+ impl_path: PathElems,
+ parent_id: NodeId,
+ typedef_opt: Option<P<ast::Typedef>>) {
+ debug!("encode_info_for_associated_type({},{})",
+ associated_type.def_id,
+ token::get_ident(associated_type.ident));
+
+ rbml_w.start_tag(tag_items_data_item);
+
+ encode_def_id(rbml_w, associated_type.def_id);
+ encode_name(rbml_w, associated_type.ident.name);
+ encode_visibility(rbml_w, associated_type.vis);
+ encode_family(rbml_w, 'y');
+ encode_parent_item(rbml_w, local_def(parent_id));
+ encode_item_sort(rbml_w, 'r');
+
+ let stab = stability::lookup(ecx.tcx, associated_type.def_id);
+ encode_stability(rbml_w, stab);
+
+ let elem = ast_map::PathName(associated_type.ident.name);
+ encode_path(rbml_w, impl_path.chain(Some(elem).move_iter()));
+
+ match typedef_opt {
None => {}
+ Some(typedef) => {
+ encode_attributes(rbml_w, typedef.attrs.as_slice());
+ encode_type(ecx, rbml_w, ty::node_id_to_type(ecx.tcx,
+ typedef.id));
+ }
}
rbml_w.end_tag();
match ecx.tcx.inherent_impls.borrow().find(&def_id) {
None => {}
Some(implementations) => {
- for &impl_def_id in implementations.borrow().iter() {
+ for &impl_def_id in implementations.iter() {
rbml_w.start_tag(tag_items_data_item_inherent_impl);
encode_def_id(rbml_w, impl_def_id);
rbml_w.end_tag();
encode_def_id(rbml_w, item_def_id);
encode_item_sort(rbml_w, 'r');
}
+ ty::TypeTraitItemId(item_def_id) => {
+ encode_def_id(rbml_w, item_def_id);
+ encode_item_sort(rbml_w, 't');
+ }
}
rbml_w.end_tag();
}
let trait_ref = ty::node_id_to_trait_ref(
tcx, ast_trait_ref.ref_id);
encode_trait_ref(rbml_w, ecx, &*trait_ref, tag_item_trait_ref);
- let impl_vtables = ty::lookup_impl_vtables(tcx, def_id);
- encode_impl_vtables(rbml_w, ecx, &impl_vtables);
}
encode_path(rbml_w, path.clone());
encode_stability(rbml_w, stab);
pos: rbml_w.writer.tell().unwrap(),
});
- let ty::MethodTraitItem(method_type) =
+ let trait_item_type =
ty::impl_or_trait_item(tcx, trait_item_def_id.def_id());
- encode_info_for_method(ecx, rbml_w, &*method_type, path.clone(),
- false, item.id, ast_item)
+ match (trait_item_type, ast_item) {
+ (ty::MethodTraitItem(ref method_type),
+ Some(&ast::MethodImplItem(_))) => {
+ encode_info_for_method(ecx,
+ rbml_w,
+ &**method_type,
+ path.clone(),
+ false,
+ item.id,
+ ast_item)
+ }
+ (ty::MethodTraitItem(ref method_type), _) => {
+ encode_info_for_method(ecx,
+ rbml_w,
+ &**method_type,
+ path.clone(),
+ false,
+ item.id,
+ None)
+ }
+ (ty::TypeTraitItem(ref associated_type),
+ Some(&ast::TypeImplItem(ref typedef))) => {
+ encode_info_for_associated_type(ecx,
+ rbml_w,
+ &**associated_type,
+ path.clone(),
+ item.id,
+ Some((*typedef).clone()))
+ }
+ (ty::TypeTraitItem(ref associated_type), _) => {
+ encode_info_for_associated_type(ecx,
+ rbml_w,
+ &**associated_type,
+ path.clone(),
+ item.id,
+ None)
+ }
+ }
}
}
ItemTrait(_, _, _, ref ms) => {
encode_def_id(rbml_w, method_def_id);
encode_item_sort(rbml_w, 'r');
}
+ ty::TypeTraitItemId(type_def_id) => {
+ encode_def_id(rbml_w, type_def_id);
+ encode_item_sort(rbml_w, 't');
+ }
}
rbml_w.end_tag();
rbml_w.start_tag(tag_items_data_item);
+ encode_parent_item(rbml_w, def_id);
+
+ let stab = stability::lookup(tcx, item_def_id.def_id());
+ encode_stability(rbml_w, stab);
+
let trait_item_type =
ty::impl_or_trait_item(tcx, item_def_id.def_id());
+ let is_nonstatic_method;
match trait_item_type {
- ty::MethodTraitItem(method_ty) => {
+ ty::MethodTraitItem(method_ty) => {
let method_def_id = item_def_id.def_id();
encode_method_ty_fields(ecx, rbml_w, &*method_ty);
- encode_parent_item(rbml_w, def_id);
-
- let stab = stability::lookup(tcx, method_def_id);
- encode_stability(rbml_w, stab);
let elem = ast_map::PathName(method_ty.ident.name);
encode_path(rbml_w,
- path.clone().chain(Some(elem).move_iter()));
+ path.clone().chain(Some(elem).into_iter()));
match method_ty.explicit_self {
ty::StaticExplicitSelfCategory => {
}
}
- let trait_item = ms.get(i);
- match *trait_item {
- RequiredMethod(ref tm) => {
- encode_attributes(rbml_w, tm.attrs.as_slice());
- encode_item_sort(rbml_w, 'r');
- encode_parent_sort(rbml_w, 't');
- encode_method_argument_names(rbml_w, &*tm.decl);
- }
+ is_nonstatic_method = method_ty.explicit_self !=
+ ty::StaticExplicitSelfCategory;
+ }
+ ty::TypeTraitItem(associated_type) => {
+ let elem = ast_map::PathName(associated_type.ident.name);
+ encode_path(rbml_w,
+ path.clone().chain(Some(elem).move_iter()));
- ProvidedMethod(ref m) => {
- encode_attributes(rbml_w, m.attrs.as_slice());
- // If this is a static method, we've already
- // encoded this.
- if method_ty.explicit_self !=
- ty::StaticExplicitSelfCategory {
- // FIXME: I feel like there is something funny
- // going on.
- let pty = ty::lookup_item_type(tcx, method_def_id);
- encode_bounds_and_type(rbml_w, ecx, &pty);
- }
- encode_item_sort(rbml_w, 'p');
- encode_parent_sort(rbml_w, 't');
- encode_inlined_item(ecx, rbml_w,
- IITraitItemRef(def_id, trait_item));
- encode_method_argument_names(rbml_w, &*m.pe_fn_decl());
- }
+ encode_family(rbml_w, 'y');
+
+ is_nonstatic_method = false;
+ }
+ }
+
+ encode_parent_sort(rbml_w, 't');
+
+ let trait_item = ms.get(i);
+ match ms.get(i) {
+ &RequiredMethod(ref tm) => {
+ encode_attributes(rbml_w, tm.attrs.as_slice());
+ encode_item_sort(rbml_w, 'r');
+ encode_method_argument_names(rbml_w, &*tm.decl);
+ }
+
+ &ProvidedMethod(ref m) => {
+ encode_attributes(rbml_w, m.attrs.as_slice());
+ // If this is a static method, we've already
+ // encoded this.
+ if is_nonstatic_method {
+ // FIXME: I feel like there is something funny
+ // going on.
+ let pty = ty::lookup_item_type(tcx,
+ item_def_id.def_id());
+ encode_bounds_and_type(rbml_w, ecx, &pty);
}
+ encode_item_sort(rbml_w, 'p');
+ encode_inlined_item(ecx,
+ rbml_w,
+ IITraitItemRef(def_id, trait_item));
+ encode_method_argument_names(rbml_w,
+ &*m.pe_fn_decl());
+ }
+
+ &TypeTraitItem(ref associated_type) => {
+ encode_attributes(rbml_w,
+ associated_type.attrs.as_slice());
+ encode_item_sort(rbml_w, 't');
}
}
fn encode_index<T: Hash>(rbml_w: &mut Encoder, index: Vec<entry<T>>,
write_fn: |&mut SeekableMemWriter, &T|) {
let mut buckets: Vec<Vec<entry<T>>> = Vec::from_fn(256, |_| Vec::new());
- for elt in index.move_iter() {
+ for elt in index.into_iter() {
let h = hash::hash(&elt.val) as uint;
buckets.get_mut(h % 256).push(elt);
}
pub fn encode_metadata(parms: EncodeParams, krate: &Crate) -> Vec<u8> {
let mut wr = SeekableMemWriter::new();
encode_metadata_inner(&mut wr, parms, krate);
- wr.unwrap().move_iter().collect()
+ wr.unwrap().into_iter().collect()
}
fn encode_metadata_inner(wr: &mut SeekableMemWriter, parms: EncodeParams, krate: &Crate) {
// libraries corresponds to the crate id and hash criteria that this
// search is being performed for.
let mut libraries = Vec::new();
- for (_hash, (rlibs, dylibs)) in candidates.move_iter() {
+ for (_hash, (rlibs, dylibs)) in candidates.into_iter() {
let mut metadata = None;
let rlib = self.extract_one(rlibs, "rlib", &mut metadata);
let dylib = self.extract_one(dylibs, "dylib", &mut metadata);
// libraries or not.
match libraries.len() {
0 => None,
- 1 => Some(libraries.move_iter().next().unwrap()),
+ 1 => Some(libraries.into_iter().next().unwrap()),
_ => {
self.sess.span_err(self.span,
format!("multiple matching crates for `{}`",
if m.len() == 0 {
return None
} else if m.len() == 1 {
- return Some(m.move_iter().next().unwrap())
+ return Some(m.into_iter().next().unwrap())
}
}
- for lib in m.move_iter() {
+ for lib in m.into_iter() {
info!("{} reading metadata from: {}", flavor, lib.display());
let metadata = match get_metadata_section(self.os, &lib) {
Ok(blob) => {
assert_eq!(next(st), '|');
let index = parse_uint(st);
assert_eq!(next(st), '|');
+ let associated_with = parse_opt(st, |st| {
+ parse_def(st, NominalType, |x,y| conv(x,y))
+ });
+ assert_eq!(next(st), '|');
let bounds = parse_bounds(st, |x,y| conv(x,y));
let default = parse_opt(st, |st| parse_ty(st, |x,y| conv(x,y)));
def_id: def_id,
space: space,
index: index,
+ associated_with: associated_with,
bounds: bounds,
default: default
}
let builtin_bounds = parse_builtin_bounds(st, |x,y| conv(x,y));
let mut param_bounds = ty::ParamBounds {
- opt_region_bound: None,
+ region_bounds: Vec::new(),
builtin_bounds: builtin_bounds,
trait_bounds: Vec::new()
};
loop {
match next(st) {
'R' => {
- param_bounds.opt_region_bound = Some(parse_region(st, |x, y| conv (x, y)));
+ param_bounds.region_bounds.push(parse_region(st, |x, y| conv (x, y)));
}
'I' => {
param_bounds.trait_bounds.push(Rc::new(parse_trait_ref(st, |x,y| conv(x,y))));
pub fn enc_bounds(w: &mut SeekableMemWriter, cx: &ctxt, bs: &ty::ParamBounds) {
enc_builtin_bounds(w, cx, &bs.builtin_bounds);
- for &r in bs.opt_region_bound.iter() {
+ for &r in bs.region_bounds.iter() {
mywrite!(w, "R");
enc_region(w, cx, r);
}
mywrite!(w, "{}:{}|{}|{}|",
token::get_ident(v.ident), (cx.ds)(v.def_id),
v.space.to_uint(), v.index);
+ enc_opt(w, v.associated_with, |w, did| mywrite!(w, "{}", (cx.ds)(did)));
+ mywrite!(w, "|");
enc_bounds(w, cx, &v.bounds);
enc_opt(w, v.default, |w, t| enc_ty(w, cx, t));
}
use metadata::decoder;
use middle::def;
use metadata::encoder as e;
-use middle::freevars::{CaptureMode, freevar_entry};
-use middle::freevars;
use middle::region;
use metadata::tydecode;
use metadata::tydecode::{DefIdSource, NominalType, TypeWithId, TypeParameter};
use metadata::tydecode::{RegionParameter};
use metadata::tyencode;
+use middle::mem_categorization::Typer;
use middle::subst;
use middle::subst::VecPerParamSpace;
use middle::typeck::{MethodCall, MethodCallee, MethodOrigin};
use libc;
use std::io::Seek;
use std::mem;
-use std::gc::GC;
+use std::rc::Rc;
use rbml::io::SeekableMemWriter;
use rbml::{reader, writer};
e::IIForeignRef(i) => i.id,
e::IITraitItemRef(_, &ast::ProvidedMethod(ref m)) => m.id,
e::IITraitItemRef(_, &ast::RequiredMethod(ref m)) => m.id,
- e::IIImplItemRef(_, &ast::MethodImplItem(ref m)) => m.id
+ e::IITraitItemRef(_, &ast::TypeTraitItem(ref ti)) => ti.id,
+ e::IIImplItemRef(_, &ast::MethodImplItem(ref m)) => m.id,
+ e::IIImplItemRef(_, &ast::TypeImplItem(ref ti)) => ti.id,
};
debug!("> Encoding inlined item: {} ({})",
ecx.tcx.map.path_to_string(id),
ast::IITraitItem(_, ref ti) => {
match *ti {
ast::ProvidedMethod(ref m) => m.pe_ident(),
- ast::RequiredMethod(ref ty_m) => ty_m.ident
+ ast::RequiredMethod(ref ty_m) => ty_m.ident,
+ ast::TypeTraitItem(ref ti) => ti.ident,
}
},
ast::IIImplItem(_, ref m) => {
match *m {
- ast::MethodImplItem(ref m) => m.pe_ident()
+ ast::MethodImplItem(ref m) => m.pe_ident(),
+ ast::TypeImplItem(ref ti) => ti.ident,
}
}
};
impl Folder for NestedItemsDropper {
fn fold_block(&mut self, blk: P<ast::Block>) -> P<ast::Block> {
blk.and_then(|ast::Block {id, stmts, expr, rules, span, ..}| {
- let stmts_sans_items = stmts.move_iter().filter_map(|stmt| {
+ let stmts_sans_items = stmts.into_iter().filter_map(|stmt| {
let use_stmt = match stmt.node {
ast::StmtExpr(_, _) | ast::StmtSemi(_, _) => true,
ast::StmtDecl(ref decl, _) => {
ast::RequiredMethod(
fold::noop_fold_type_method(ty_m.clone(), &mut fld))
}
+ ast::TypeTraitItem(ref associated_type) => {
+ ast::TypeTraitItem(
+ P(fold::noop_fold_associated_type(
+ (**associated_type).clone(),
+ &mut fld)))
+ }
})
}
e::IIImplItemRef(d, m) => {
.expect_one("noop_fold_method must produce \
exactly one method"))
}
+ ast::TypeImplItem(ref td) => {
+ ast::TypeImplItem(
+ P(fold::noop_fold_typedef((**td).clone(), &mut fld)))
+ }
})
}
e::IIForeignRef(i) => {
def::DefMod(did) => { def::DefMod(did.tr(dcx)) }
def::DefForeignMod(did) => { def::DefForeignMod(did.tr(dcx)) }
def::DefStatic(did, m) => { def::DefStatic(did.tr(dcx), m) }
- def::DefArg(nid, b) => { def::DefArg(dcx.tr_id(nid), b) }
- def::DefLocal(nid, b) => { def::DefLocal(dcx.tr_id(nid), b) }
+ def::DefLocal(nid) => { def::DefLocal(dcx.tr_id(nid)) }
def::DefVariant(e_did, v_did, is_s) => {
def::DefVariant(e_did.tr(dcx), v_did.tr(dcx), is_s)
},
def::DefTrait(did) => def::DefTrait(did.tr(dcx)),
- def::DefTy(did) => def::DefTy(did.tr(dcx)),
+ def::DefTy(did, is_enum) => def::DefTy(did.tr(dcx), is_enum),
+ def::DefAssociatedTy(did) => def::DefAssociatedTy(did.tr(dcx)),
def::DefPrimTy(p) => def::DefPrimTy(p),
def::DefTyParam(s, did, v) => def::DefTyParam(s, did.tr(dcx), v),
- def::DefBinding(nid, bm) => def::DefBinding(dcx.tr_id(nid), bm),
def::DefUse(did) => def::DefUse(did.tr(dcx)),
- def::DefUpvar(nid1, def, nid2, nid3) => {
+ def::DefUpvar(nid1, nid2, nid3) => {
def::DefUpvar(dcx.tr_id(nid1),
- box(GC) (*def).tr(dcx),
dcx.tr_id(nid2),
dcx.tr_id(nid3))
}
// ______________________________________________________________________
// Encoding and decoding of freevar information
-fn encode_freevar_entry(rbml_w: &mut Encoder, fv: &freevar_entry) {
+fn encode_freevar_entry(rbml_w: &mut Encoder, fv: &ty::Freevar) {
(*fv).encode(rbml_w).unwrap();
}
-fn encode_capture_mode(rbml_w: &mut Encoder, cm: CaptureMode) {
+fn encode_capture_mode(rbml_w: &mut Encoder, cm: ast::CaptureClause) {
cm.encode(rbml_w).unwrap();
}
trait rbml_decoder_helper {
fn read_freevar_entry(&mut self, dcx: &DecodeContext)
- -> freevar_entry;
- fn read_capture_mode(&mut self) -> CaptureMode;
+ -> ty::Freevar;
+ fn read_capture_mode(&mut self) -> ast::CaptureClause;
}
impl<'a> rbml_decoder_helper for reader::Decoder<'a> {
fn read_freevar_entry(&mut self, dcx: &DecodeContext)
- -> freevar_entry {
- let fv: freevar_entry = Decodable::decode(self).unwrap();
+ -> ty::Freevar {
+ let fv: ty::Freevar = Decodable::decode(self).unwrap();
fv.tr(dcx)
}
- fn read_capture_mode(&mut self) -> CaptureMode {
- let cm: CaptureMode = Decodable::decode(self).unwrap();
+ fn read_capture_mode(&mut self) -> ast::CaptureClause {
+ let cm: ast::CaptureClause = Decodable::decode(self).unwrap();
cm
}
}
-impl tr for freevar_entry {
- fn tr(&self, dcx: &DecodeContext) -> freevar_entry {
- freevar_entry {
+impl tr for ty::Freevar {
+ fn tr(&self, dcx: &DecodeContext) -> ty::Freevar {
+ ty::Freevar {
def: self.def.tr(dcx),
span: self.span.tr(dcx),
}
adjustment.encode(rbml_w)
});
rbml_w.emit_struct_field("origin", 1u, |rbml_w| {
- method.origin.encode(rbml_w)
+ Ok(rbml_w.emit_method_origin(ecx, &method.origin))
});
rbml_w.emit_struct_field("ty", 2u, |rbml_w| {
Ok(rbml_w.emit_ty(ecx, method.ty))
}).unwrap();
Ok((adjustment, MethodCallee {
origin: this.read_struct_field("origin", 1, |this| {
- let method_origin: MethodOrigin =
- Decodable::decode(this).unwrap();
- Ok(method_origin.tr(dcx))
+ Ok(this.read_method_origin(dcx))
}).unwrap(),
ty: this.read_struct_field("ty", 2, |this| {
Ok(this.read_ty(dcx))
typeck::MethodStaticUnboxedClosure(did) => {
typeck::MethodStaticUnboxedClosure(did.tr(dcx))
}
- typeck::MethodParam(ref mp) => {
- typeck::MethodParam(
+ typeck::MethodTypeParam(ref mp) => {
+ typeck::MethodTypeParam(
typeck::MethodParam {
- trait_id: mp.trait_id.tr(dcx),
- .. *mp
+ // def-id is already translated when we read it out
+ trait_ref: mp.trait_ref.clone(),
+ method_num: mp.method_num,
}
)
}
- typeck::MethodObject(ref mo) => {
- typeck::MethodObject(
+ typeck::MethodTraitObject(ref mo) => {
+ typeck::MethodTraitObject(
typeck::MethodObject {
- trait_id: mo.trait_id.tr(dcx),
+ trait_ref: mo.trait_ref.clone(),
.. *mo
}
)
// ______________________________________________________________________
// Encoding and decoding vtable_res
-fn encode_vtable_res_with_key(ecx: &e::EncodeContext,
- rbml_w: &mut Encoder,
- adjustment: typeck::ExprAdjustment,
- dr: &typeck::vtable_res) {
- use serialize::Encoder;
-
- rbml_w.emit_struct("VtableWithKey", 2, |rbml_w| {
- rbml_w.emit_struct_field("adjustment", 0u, |rbml_w| {
- adjustment.encode(rbml_w)
- });
- rbml_w.emit_struct_field("vtable_res", 1u, |rbml_w| {
- Ok(encode_vtable_res(ecx, rbml_w, dr))
- })
- }).unwrap()
-}
-
pub fn encode_vtable_res(ecx: &e::EncodeContext,
rbml_w: &mut Encoder,
dr: &typeck::vtable_res) {
tcx: &ty::ctxt, cdata: &cstore::crate_metadata)
-> typeck::vtable_param_res {
self.read_to_vec(|this| Ok(this.read_vtable_origin(tcx, cdata)))
- .unwrap().move_iter().collect()
+ .unwrap().into_iter().collect()
}
fn read_vtable_origin(&mut self,
fn emit_closure_type(&mut self,
ecx: &e::EncodeContext,
closure_type: &ty::ClosureTy);
+ fn emit_method_origin(&mut self,
+ ecx: &e::EncodeContext,
+ method_origin: &typeck::MethodOrigin);
fn emit_ty(&mut self, ecx: &e::EncodeContext, ty: ty::t);
fn emit_tys(&mut self, ecx: &e::EncodeContext, tys: &[ty::t]);
fn emit_type_param_def(&mut self,
ecx: &e::EncodeContext,
type_param_def: &ty::TypeParameterDef);
+ fn emit_trait_ref(&mut self, ecx: &e::EncodeContext, ty: &ty::TraitRef);
fn emit_polytype(&mut self,
ecx: &e::EncodeContext,
pty: ty::Polytype);
});
}
+ fn emit_method_origin(&mut self,
+ ecx: &e::EncodeContext,
+ method_origin: &typeck::MethodOrigin)
+ {
+ use serialize::Encoder;
+
+ self.emit_enum("MethodOrigin", |this| {
+ match *method_origin {
+ typeck::MethodStatic(def_id) => {
+ this.emit_enum_variant("MethodStatic", 0, 1, |this| {
+ Ok(this.emit_def_id(def_id))
+ })
+ }
+
+ typeck::MethodStaticUnboxedClosure(def_id) => {
+ this.emit_enum_variant("MethodStaticUnboxedClosure", 1, 1, |this| {
+ Ok(this.emit_def_id(def_id))
+ })
+ }
+
+ typeck::MethodTypeParam(ref p) => {
+ this.emit_enum_variant("MethodTypeParam", 2, 1, |this| {
+ this.emit_struct("MethodParam", 2, |this| {
+ try!(this.emit_struct_field("trait_ref", 0, |this| {
+ Ok(this.emit_trait_ref(ecx, &*p.trait_ref))
+ }));
+ try!(this.emit_struct_field("method_num", 0, |this| {
+ this.emit_uint(p.method_num)
+ }));
+ Ok(())
+ })
+ })
+ }
+
+ typeck::MethodTraitObject(ref o) => {
+ this.emit_enum_variant("MethodTraitObject", 3, 1, |this| {
+ this.emit_struct("MethodObject", 2, |this| {
+ try!(this.emit_struct_field("trait_ref", 0, |this| {
+ Ok(this.emit_trait_ref(ecx, &*o.trait_ref))
+ }));
+ try!(this.emit_struct_field("object_trait_id", 0, |this| {
+ Ok(this.emit_def_id(o.object_trait_id))
+ }));
+ try!(this.emit_struct_field("method_num", 0, |this| {
+ this.emit_uint(o.method_num)
+ }));
+ try!(this.emit_struct_field("real_index", 0, |this| {
+ this.emit_uint(o.real_index)
+ }));
+ Ok(())
+ })
+ })
+ }
+ }
+ });
+ }
+
fn emit_ty(&mut self, ecx: &e::EncodeContext, ty: ty::t) {
self.emit_opaque(|this| Ok(e::write_type(ecx, this, ty)));
}
self.emit_from_vec(tys, |this, ty| Ok(this.emit_ty(ecx, *ty)));
}
+ fn emit_trait_ref(&mut self,
+ ecx: &e::EncodeContext,
+ trait_ref: &ty::TraitRef) {
+ self.emit_opaque(|this| Ok(e::write_trait_ref(ecx, this, trait_ref)));
+ }
+
fn emit_type_param_def(&mut self,
ecx: &e::EncodeContext,
type_param_def: &ty::TypeParameterDef) {
self.emit_enum("AutoAdjustment", |this| {
match *adj {
- ty::AutoAddEnv(store) => {
+ ty::AdjustAddEnv(store) => {
this.emit_enum_variant("AutoAddEnv", 0, 1, |this| {
this.emit_enum_variant_arg(0, |this| store.encode(this))
})
}
- ty::AutoDerefRef(ref auto_deref_ref) => {
+ ty::AdjustDerefRef(ref auto_deref_ref) => {
this.emit_enum_variant("AutoDerefRef", 1, 1, |this| {
this.emit_enum_variant_arg(0,
|this| Ok(this.emit_auto_deref_ref(ecx, auto_deref_ref)))
this.emit_enum_variant_arg(1, |this| idx.encode(this))
})
}
- ty::UnsizeVtable(ref b, def_id, ref substs) => {
- this.emit_enum_variant("UnsizeVtable", 2, 3, |this| {
+ ty::UnsizeVtable(ty::TyTrait { def_id: def_id,
+ bounds: ref b,
+ substs: ref substs },
+ self_ty) => {
+ this.emit_enum_variant("UnsizeVtable", 2, 4, |this| {
this.emit_enum_variant_arg(
0, |this| Ok(this.emit_existential_bounds(ecx, b)));
this.emit_enum_variant_arg(1, |this| def_id.encode(this));
- this.emit_enum_variant_arg(2, |this| Ok(this.emit_substs(ecx, substs)))
+ this.emit_enum_variant_arg(2, |this| Ok(this.emit_ty(ecx, self_ty)));
+ this.emit_enum_variant_arg(3, |this| Ok(this.emit_substs(ecx, substs)))
})
}
}
});
for freevar in fv.iter() {
- match freevars::get_capture_mode(tcx, id) {
- freevars::CaptureByRef => {
+ match tcx.capture_mode(id) {
+ ast::CaptureByRef => {
rbml_w.tag(c::tag_table_upvar_borrow_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
})
}
- for &dr in tcx.vtable_map.borrow().find(&method_call).iter() {
- rbml_w.tag(c::tag_table_vtable_map, |rbml_w| {
+ for &trait_ref in tcx.object_cast_map.borrow().find(&id).iter() {
+ rbml_w.tag(c::tag_table_object_cast_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
- encode_vtable_res_with_key(ecx, rbml_w, method_call.adjustment, dr);
+ rbml_w.emit_trait_ref(ecx, &**trait_ref);
})
})
}
})
})
}
-
- for &dr in tcx.vtable_map.borrow().find(&method_call).iter() {
- rbml_w.tag(c::tag_table_vtable_map, |rbml_w| {
- rbml_w.id(id);
- rbml_w.tag(c::tag_table_val, |rbml_w| {
- encode_vtable_res_with_key(ecx, rbml_w, method_call.adjustment, dr);
- })
- })
- }
}
- ty::AutoDerefRef(ref adj) => {
+ ty::AdjustDerefRef(ref adj) => {
assert!(!ty::adjust_is_object(adjustment));
for autoderef in range(0, adj.autoderefs) {
let method_call = MethodCall::autoderef(id, autoderef);
})
})
}
-
- for &dr in tcx.vtable_map.borrow().find(&method_call).iter() {
- rbml_w.tag(c::tag_table_vtable_map, |rbml_w| {
- rbml_w.id(id);
- rbml_w.tag(c::tag_table_val, |rbml_w| {
- encode_vtable_res_with_key(ecx, rbml_w,
- method_call.adjustment, dr);
- })
- })
- }
}
}
_ => {
}
trait rbml_decoder_decoder_helpers {
+ fn read_method_origin(&mut self, dcx: &DecodeContext) -> typeck::MethodOrigin;
fn read_ty(&mut self, dcx: &DecodeContext) -> ty::t;
fn read_tys(&mut self, dcx: &DecodeContext) -> Vec<ty::t>;
+ fn read_trait_ref(&mut self, dcx: &DecodeContext) -> Rc<ty::TraitRef>;
fn read_type_param_def(&mut self, dcx: &DecodeContext)
-> ty::TypeParameterDef;
fn read_polytype(&mut self, dcx: &DecodeContext)
cdata: &cstore::crate_metadata) -> Vec<ty::t> {
self.read_to_vec(|this| Ok(this.read_ty_nodcx(tcx, cdata)) )
.unwrap()
- .move_iter()
+ .into_iter()
.collect()
}
}).unwrap()
}
+ fn read_method_origin(&mut self, dcx: &DecodeContext)
+ -> typeck::MethodOrigin
+ {
+ self.read_enum("MethodOrigin", |this| {
+ let variants = ["MethodStatic", "MethodStaticUnboxedClosure",
+ "MethodTypeParam", "MethodTraitObject"];
+ this.read_enum_variant(variants, |this, i| {
+ Ok(match i {
+ 0 => {
+ let def_id = this.read_def_id(dcx);
+ typeck::MethodStatic(def_id)
+ }
+
+ 1 => {
+ let def_id = this.read_def_id(dcx);
+ typeck::MethodStaticUnboxedClosure(def_id)
+ }
+
+ 2 => {
+ this.read_struct("MethodTypeParam", 2, |this| {
+ Ok(typeck::MethodTypeParam(
+ typeck::MethodParam {
+ trait_ref: {
+ this.read_struct_field("trait_ref", 0, |this| {
+ Ok(this.read_trait_ref(dcx))
+ }).unwrap()
+ },
+ method_num: {
+ this.read_struct_field("method_num", 1, |this| {
+ this.read_uint()
+ }).unwrap()
+ }
+ }))
+ }).unwrap()
+ }
+
+ 3 => {
+ this.read_struct("MethodTraitObject", 2, |this| {
+ Ok(typeck::MethodTraitObject(
+ typeck::MethodObject {
+ trait_ref: {
+ this.read_struct_field("trait_ref", 0, |this| {
+ Ok(this.read_trait_ref(dcx))
+ }).unwrap()
+ },
+ object_trait_id: {
+ this.read_struct_field("object_trait_id", 1, |this| {
+ Ok(this.read_def_id(dcx))
+ }).unwrap()
+ },
+ method_num: {
+ this.read_struct_field("method_num", 2, |this| {
+ this.read_uint()
+ }).unwrap()
+ },
+ real_index: {
+ this.read_struct_field("real_index", 3, |this| {
+ this.read_uint()
+ }).unwrap()
+ },
+ }))
+ }).unwrap()
+ }
+
+ _ => fail!("..")
+ })
+ })
+ }).unwrap()
+ }
+
+
fn read_ty(&mut self, dcx: &DecodeContext) -> ty::t {
// Note: regions types embed local node ids. In principle, we
// should translate these node ids into the new decode
}
fn read_tys(&mut self, dcx: &DecodeContext) -> Vec<ty::t> {
- self.read_to_vec(|this| Ok(this.read_ty(dcx))).unwrap().move_iter().collect()
+ self.read_to_vec(|this| Ok(this.read_ty(dcx))).unwrap().into_iter().collect()
+ }
+
+ fn read_trait_ref(&mut self, dcx: &DecodeContext) -> Rc<ty::TraitRef> {
+ Rc::new(self.read_opaque(|this, doc| {
+ let ty = tydecode::parse_trait_ref_data(
+ doc.data,
+ dcx.cdata.cnum,
+ doc.start,
+ dcx.tcx,
+ |s, a| this.convert_def_id(dcx, s, a));
+ Ok(ty)
+ }).unwrap())
}
fn read_type_param_def(&mut self, dcx: &DecodeContext)
let store: ty::TraitStore =
this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap();
- ty::AutoAddEnv(store.tr(dcx))
+ ty::AdjustAddEnv(store.tr(dcx))
}
1 => {
let auto_deref_ref: ty::AutoDerefRef =
this.read_enum_variant_arg(0,
|this| Ok(this.read_auto_deref_ref(dcx))).unwrap();
- ty::AutoDerefRef(auto_deref_ref)
+ ty::AdjustDerefRef(auto_deref_ref)
}
_ => fail!("bad enum variant for ty::AutoAdjustment")
})
0, |this| Ok(this.read_existential_bounds(dcx))).unwrap();
let def_id: ast::DefId =
this.read_enum_variant_arg(1, |this| Decodable::decode(this)).unwrap();
- let substs = this.read_enum_variant_arg(2,
+ let self_ty =
+ this.read_enum_variant_arg(2, |this| Ok(this.read_ty(dcx))).unwrap();
+ let substs = this.read_enum_variant_arg(3,
|this| Ok(this.read_substs(dcx))).unwrap();
-
- ty::UnsizeVtable(b, def_id.tr(dcx), substs)
+ let ty_trait = ty::TyTrait { def_id: def_id.tr(dcx),
+ bounds: b,
+ substs: substs };
+ ty::UnsizeVtable(ty_trait, self_ty)
}
_ => fail!("bad enum variant for ty::UnsizeKind")
})
c::tag_table_freevars => {
let fv_info = val_dsr.read_to_vec(|val_dsr| {
Ok(val_dsr.read_freevar_entry(dcx))
- }).unwrap().move_iter().collect();
+ }).unwrap().into_iter().collect();
dcx.tcx.freevars.borrow_mut().insert(id, fv_info);
}
c::tag_table_upvar_borrow_map => {
};
dcx.tcx.method_map.borrow_mut().insert(method_call, method);
}
- c::tag_table_vtable_map => {
- let (adjustment, vtable_res) =
- val_dsr.read_vtable_res_with_key(dcx.tcx,
- dcx.cdata);
- let vtable_key = MethodCall {
- expr_id: id,
- adjustment: adjustment
- };
- dcx.tcx.vtable_map.borrow_mut().insert(vtable_key, vtable_res);
+ c::tag_table_object_cast_map => {
+ let trait_ref = val_dsr.read_trait_ref(dcx);
+ dcx.tcx.object_cast_map.borrow_mut()
+ .insert(id, trait_ref);
}
c::tag_table_adjustments => {
let adj: ty::AutoAdjustment = val_dsr.read_auto_adjustment(dcx);
codemap::Span {
lo: codemap::BytePos(0),
hi: codemap::BytePos(0),
- expn_info: None
+ expn_id: codemap::NO_EXPANSION
}
}
fn ident_of(&self, st: &str) -> ast::Ident {
true
}
- pub fn is_local_variable_or_arg(&self, cmt: mc::cmt) -> bool {
+ fn is_local_variable_or_arg(&self, cmt: mc::cmt) -> bool {
match cmt.cat {
- mc::cat_local(_) | mc::cat_arg(_) => true,
+ mc::cat_local(_) => true,
_ => false
}
}
debug!("mark_variable_as_used_mut(cmt={})", cmt.repr(this.tcx()));
match cmt.cat.clone() {
mc::cat_copied_upvar(mc::CopiedUpvar { upvar_id: id, .. }) |
- mc::cat_local(id) | mc::cat_arg(id) => {
+ mc::cat_local(id) => {
this.tcx().used_mut_nodes.borrow_mut().insert(id);
return;
}
}
mc::cat_rvalue(..) |
- mc::cat_local(..) |
- mc::cat_arg(..) => {
+ mc::cat_local(..) => {
None
}
mc::cat_rvalue(..) |
mc::cat_copied_upvar(..) | // L-Local
mc::cat_local(..) | // L-Local
- mc::cat_arg(..) | // L-Local
mc::cat_upvar(..) |
mc::cat_deref(_, _, mc::BorrowedPtr(..)) | // L-Deref-Borrowed
mc::cat_deref(_, _, mc::Implicit(..)) |
mc::cat_static_item => {
ty::ReStatic
}
- mc::cat_local(local_id) |
- mc::cat_arg(local_id) => {
+ mc::cat_local(local_id) => {
ty::ReScope(self.bccx.tcx.region_maps.var_scope(local_id))
}
mc::cat_deref(_, _, mc::UnsafePtr(..)) => {
} else {
Vec::new()
};
- for ge in grouped_errors.mut_iter() {
+ for ge in grouped_errors.iter_mut() {
if move_from_id == ge.move_from.id && error.move_to.is_some() {
debug!("appending move_to to list");
ge.move_to_places.push_all_move(move_to);
Safe
}
- mc::cat_local(local_id) |
- mc::cat_arg(local_id) => {
+ mc::cat_local(local_id) => {
// R-Variable, locally declared
let lp = Rc::new(LpVar(local_id));
SafeIf(lp.clone(), vec![lp])
None
}
- mc::cat_local(id) |
- mc::cat_arg(id) => {
+ mc::cat_local(id) => {
Some(Rc::new(LpVar(id)))
}
adj: &ty::AutoAdjustment)
-> mc::cmt {
let r = match *adj {
- ty::AutoDerefRef(
+ ty::AdjustDerefRef(
ty::AutoDerefRef {
autoderefs: autoderefs, ..}) => {
self.mc().cat_expr_autoderefd(expr, autoderefs)
}
- ty::AutoAddEnv(..) => {
+ ty::AdjustAddEnv(..) => {
// no autoderefs
self.mc().cat_expr_unadjusted(expr)
}
ast::ExprIndex(ref l, ref r) |
ast::ExprBinary(_, ref l, ref r) if self.is_method_call(expr) => {
- self.call(expr, pred, &**l, Some(&**r).move_iter())
+ self.call(expr, pred, &**l, Some(&**r).into_iter())
+ }
+
+ ast::ExprSlice(ref base, ref start, ref end, _) => {
+ self.call(expr,
+ pred,
+ &**base,
+ start.iter().chain(end.iter()).map(|x| &**x))
}
ast::ExprUnary(_, ref e) if self.is_method_call(expr) => {
ast::ExprParen(ref e) |
ast::ExprField(ref e, _, _) |
ast::ExprTupField(ref e, _, _) => {
- self.straightline(expr, pred, Some(&**e).move_iter())
+ self.straightline(expr, pred, Some(&**e).into_iter())
}
ast::ExprInlineAsm(ref inline_asm) => {
// except according to those terms.
-use driver::session::Session;
use middle::def::*;
-use middle::resolve;
use middle::ty;
use middle::typeck;
use util::ppaux;
use syntax::ast::*;
-use syntax::{ast_util, ast_map};
+use syntax::ast_util;
use syntax::visit::Visitor;
use syntax::visit;
match it.node {
ItemStatic(_, _, ref ex) => {
v.inside_const(|v| v.visit_expr(&**ex));
- check_item_recursion(&v.tcx.sess, &v.tcx.map, &v.tcx.def_map, it);
}
ItemEnum(ref enum_definition, _) => {
for var in (*enum_definition).variants.iter() {
}
visit::walk_expr(v, e);
}
-
-struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
- root_it: &'a Item,
- sess: &'a Session,
- ast_map: &'a ast_map::Map<'ast>,
- def_map: &'a resolve::DefMap,
- idstack: Vec<NodeId>
-}
-
-// Make sure a const item doesn't recursively refer to itself
-// FIXME: Should use the dependency graph when it's available (#1356)
-pub fn check_item_recursion<'a>(sess: &'a Session,
- ast_map: &'a ast_map::Map,
- def_map: &'a resolve::DefMap,
- it: &'a Item) {
-
- let mut visitor = CheckItemRecursionVisitor {
- root_it: it,
- sess: sess,
- ast_map: ast_map,
- def_map: def_map,
- idstack: Vec::new()
- };
- visitor.visit_item(it);
-}
-
-impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
- fn visit_item(&mut self, it: &Item) {
- if self.idstack.iter().any(|x| x == &(it.id)) {
- self.sess.span_fatal(self.root_it.span, "recursive constant");
- }
- self.idstack.push(it.id);
- visit::walk_item(self, it);
- self.idstack.pop();
- }
-
- fn visit_expr(&mut self, e: &Expr) {
- match e.node {
- ExprPath(..) => {
- match self.def_map.borrow().find(&e.id) {
- Some(&DefStatic(def_id, _)) if
- ast_util::is_local(def_id) => {
- self.visit_item(&*self.ast_map.expect_item(def_id.node));
- }
- _ => ()
- }
- },
- _ => ()
- }
- visit::walk_expr(self, e);
- }
-}
let total_width = column_widths.iter().map(|n| *n).sum() + column_count * 3 + 1;
let br = String::from_char(total_width, '+');
try!(write!(f, "{}\n", br));
- for row in pretty_printed_matrix.move_iter() {
+ for row in pretty_printed_matrix.into_iter() {
try!(write!(f, "+"));
- for (column, pat_str) in row.move_iter().enumerate() {
+ for (column, pat_str) in row.into_iter().enumerate() {
try!(write!(f, " "));
f.width = Some(*column_widths.get(column));
try!(f.pad(pat_str.as_slice()));
fn construct_witness(cx: &MatchCheckCtxt, ctor: &Constructor,
pats: Vec<&Pat>, left_ty: ty::t) -> P<Pat> {
let pats_len = pats.len();
- let mut pats = pats.move_iter().map(|p| P((*p).clone()));
+ let mut pats = pats.into_iter().map(|p| P((*p).clone()));
let pat = match ty::get(left_ty).sty {
ty::ty_tup(_) => PatTup(pats.collect()),
};
if is_structure {
let fields = ty::lookup_struct_fields(cx.tcx, vid);
- let field_pats: Vec<FieldPat> = fields.move_iter()
+ let field_pats: Vec<FieldPat> = fields.into_iter()
.zip(pats)
.filter(|&(_, ref pat)| pat.node != PatWild(PatWildSingle))
.map(|(field, pat)| FieldPat {
fn missing_constructor(cx: &MatchCheckCtxt, &Matrix(ref rows): &Matrix,
left_ty: ty::t, max_slice_length: uint) -> Option<Constructor> {
let used_constructors: Vec<Constructor> = rows.iter()
- .flat_map(|row| pat_constructors(cx, *row.get(0), left_ty, max_slice_length).move_iter())
+ .flat_map(|row| pat_constructors(cx, *row.get(0), left_ty, max_slice_length).into_iter())
.collect();
all_constructors(cx, left_ty, max_slice_length)
- .move_iter()
+ .into_iter()
.find(|c| !used_constructors.contains(c))
}
if constructors.is_empty() {
match missing_constructor(cx, matrix, left_ty, max_slice_length) {
None => {
- all_constructors(cx, left_ty, max_slice_length).move_iter().map(|c| {
+ all_constructors(cx, left_ty, max_slice_length).into_iter().map(|c| {
match is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness) {
UsefulWithWitness(pats) => UsefulWithWitness({
let arity = constructor_arity(cx, &c, left_ty);
});
vec![construct_witness(cx, &c, subpats, left_ty)]
};
- result.extend(pats.move_iter().skip(arity));
+ result.extend(pats.into_iter().skip(arity));
result
}),
result => result
let wild_pats = Vec::from_elem(arity, &DUMMY_WILD_PAT);
let enum_pat = construct_witness(cx, &constructor, wild_pats, left_ty);
let mut new_pats = vec![enum_pat];
- new_pats.extend(pats.move_iter());
+ new_pats.extend(pats.into_iter());
UsefulWithWitness(new_pats)
},
result => result
}
}
} else {
- constructors.move_iter().map(|c|
+ constructors.into_iter().map(|c|
is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness)
).find(|result| result != &NotUseful).unwrap_or(NotUseful)
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This compiler pass detects static items that refer to themselves
+// recursively.
+
+use driver::session::Session;
+use middle::resolve;
+use middle::def::DefStatic;
+
+use syntax::ast::{Crate, Expr, ExprPath, Item, ItemStatic, NodeId};
+use syntax::{ast_util, ast_map};
+use syntax::visit::Visitor;
+use syntax::visit;
+
+struct CheckCrateVisitor<'a, 'ast: 'a> {
+ sess: &'a Session,
+ def_map: &'a resolve::DefMap,
+ ast_map: &'a ast_map::Map<'ast>
+}
+
+impl<'v, 'a, 'ast> Visitor<'v> for CheckCrateVisitor<'a, 'ast> {
+ fn visit_item(&mut self, i: &Item) {
+ check_item(self, i);
+ }
+}
+
+pub fn check_crate<'ast>(sess: &Session,
+ krate: &Crate,
+ def_map: &resolve::DefMap,
+ ast_map: &ast_map::Map<'ast>) {
+ let mut visitor = CheckCrateVisitor {
+ sess: sess,
+ def_map: def_map,
+ ast_map: ast_map
+ };
+ visit::walk_crate(&mut visitor, krate);
+ sess.abort_if_errors();
+}
+
+fn check_item(v: &mut CheckCrateVisitor, it: &Item) {
+ match it.node {
+ ItemStatic(_, _, ref ex) => {
+ check_item_recursion(v.sess, v.ast_map, v.def_map, it);
+ visit::walk_expr(v, &**ex)
+ },
+ _ => visit::walk_item(v, it)
+ }
+}
+
+struct CheckItemRecursionVisitor<'a, 'ast: 'a> {
+ root_it: &'a Item,
+ sess: &'a Session,
+ ast_map: &'a ast_map::Map<'ast>,
+ def_map: &'a resolve::DefMap,
+ idstack: Vec<NodeId>
+}
+
+// Make sure a const item doesn't recursively refer to itself
+// FIXME: Should use the dependency graph when it's available (#1356)
+pub fn check_item_recursion<'a>(sess: &'a Session,
+ ast_map: &'a ast_map::Map,
+ def_map: &'a resolve::DefMap,
+ it: &'a Item) {
+
+ let mut visitor = CheckItemRecursionVisitor {
+ root_it: it,
+ sess: sess,
+ ast_map: ast_map,
+ def_map: def_map,
+ idstack: Vec::new()
+ };
+ visitor.visit_item(it);
+}
+
+impl<'a, 'ast, 'v> Visitor<'v> for CheckItemRecursionVisitor<'a, 'ast> {
+ fn visit_item(&mut self, it: &Item) {
+ if self.idstack.iter().any(|x| x == &(it.id)) {
+ self.sess.span_err(self.root_it.span, "recursive constant");
+ return;
+ }
+ self.idstack.push(it.id);
+ visit::walk_item(self, it);
+ self.idstack.pop();
+ }
+
+ fn visit_expr(&mut self, e: &Expr) {
+ match e.node {
+ ExprPath(..) => {
+ match self.def_map.borrow().find(&e.id) {
+ Some(&DefStatic(def_id, _)) if
+ ast_util::is_local(def_id) => {
+ self.visit_item(&*self.ast_map.expect_item(def_id.node));
+ }
+ _ => ()
+ }
+ },
+ _ => ()
+ }
+ visit::walk_expr(self, e);
+ }
+}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
- let gens = self.gens.mut_slice(start, end);
+ let gens = self.gens.slice_mut(start, end);
set_bit(gens, bit);
}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
- let kills = self.kills.mut_slice(start, end);
+ let kills = self.kills.slice_mut(start, end);
set_bit(kills, bit);
}
}
if changed {
- let bits = self.kills.mut_slice(start, end);
+ let bits = self.kills.slice_mut(start, end);
debug!("{:s} add_kills_from_flow_exits flow_exit={} bits={} [before]",
self.analysis_name, flow_exit, mut_bits_to_string(bits));
bits.copy_from(orig_kills.as_slice());
fn reset(&mut self, bits: &mut [uint]) {
let e = if self.dfcx.oper.initial_value() {uint::MAX} else {0};
- for b in bits.mut_iter() {
+ for b in bits.iter_mut() {
*b = e;
}
}
let (start, end) = self.dfcx.compute_id_range(cfgidx);
let changed = {
// (scoping mutable borrow of self.dfcx.on_entry)
- let on_entry = self.dfcx.on_entry.mut_slice(start, end);
+ let on_entry = self.dfcx.on_entry.slice_mut(start, end);
bitwise(on_entry, pred_bits, &self.dfcx.oper)
};
if changed {
op: &Op) -> bool {
assert_eq!(out_vec.len(), in_vec.len());
let mut changed = false;
- for (out_elt, in_elt) in out_vec.mut_iter().zip(in_vec.iter()) {
+ for (out_elt, in_elt) in out_vec.iter_mut().zip(in_vec.iter()) {
let old_val = *out_elt;
let new_val = op.join(old_val, *in_elt);
*out_elt = new_val;
}
}
typeck::MethodStaticUnboxedClosure(_) => {}
- typeck::MethodParam(typeck::MethodParam {
- trait_id: trait_id,
+ typeck::MethodTypeParam(typeck::MethodParam {
+ trait_ref: ref trait_ref,
method_num: index,
..
- })
- | typeck::MethodObject(typeck::MethodObject {
- trait_id: trait_id,
+ }) |
+ typeck::MethodTraitObject(typeck::MethodObject {
+ trait_ref: ref trait_ref,
method_num: index,
..
}) => {
let trait_item = ty::trait_item(self.tcx,
- trait_id,
+ trait_ref.def_id,
index);
match trait_item {
ty::MethodTraitItem(method) => {
self.check_def_id(method.def_id);
}
+ ty::TypeTraitItem(typedef) => {
+ self.check_def_id(typedef.def_id);
+ }
}
}
}
ast::MethodImplItem(ref method) => {
visit::walk_block(self, method.pe_body());
}
+ ast::TypeImplItem(_) => {}
}
}
ast_map::NodeForeignItem(foreign_item) => {
}
let dead_code = lint::builtin::DEAD_CODE.name_lower();
- for attr in lint::gather_attrs(attrs).move_iter() {
+ for attr in lint::gather_attrs(attrs).into_iter() {
match attr {
Ok((ref name, lint::Allow, _))
if name.get() == dead_code.as_slice() => return true,
ast::MethodImplItem(ref method) => {
self.worklist.push(method.id);
}
+ ast::TypeImplItem(_) => {}
}
}
}
match self.tcx.inherent_impls.borrow().find(&local_def(id)) {
None => (),
Some(impl_list) => {
- for impl_did in impl_list.borrow().iter() {
+ for impl_did in impl_list.iter() {
for item_did in impl_items.get(impl_did).iter() {
if self.live_symbols.contains(&item_did.def_id()
.node) {
ast::ProvidedMethod(ref method) => {
visit::walk_block(self, &*method.pe_body())
}
- ast::RequiredMethod(_) => ()
+ ast::RequiredMethod(_) => {}
+ ast::TypeTraitItem(_) => {}
}
}
}
use syntax::ast;
use syntax::ast_util::local_def;
-use std::gc::Gc;
-
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum Def {
DefFn(ast::DefId, ast::FnStyle),
DefMod(ast::DefId),
DefForeignMod(ast::DefId),
DefStatic(ast::DefId, bool /* is_mutbl */),
- DefArg(ast::NodeId, ast::BindingMode),
- DefLocal(ast::NodeId, ast::BindingMode),
+ DefLocal(ast::NodeId),
DefVariant(ast::DefId /* enum */, ast::DefId /* variant */, bool /* is_structure */),
- DefTy(ast::DefId),
+ DefTy(ast::DefId, bool /* is_enum */),
+ DefAssociatedTy(ast::DefId),
DefTrait(ast::DefId),
DefPrimTy(ast::PrimTy),
DefTyParam(ParamSpace, ast::DefId, uint),
- DefBinding(ast::NodeId, ast::BindingMode),
DefUse(ast::DefId),
- DefUpvar(ast::NodeId, // id of closed over var
- Gc<Def>, // closed over def
+ DefUpvar(ast::NodeId, // id of closed over local
ast::NodeId, // expr node that creates the closure
- ast::NodeId), // id for the block/body of the closure expr
+ ast::NodeId), // block node for the closest enclosing proc
+ // or unboxed closure, DUMMY_NODE_ID otherwise
/// Note that if it's a tuple struct's definition, the node id of the ast::DefId
/// may either refer to the item definition's id or the StructDef.ctor_id.
match *self {
DefFn(id, _) | DefStaticMethod(id, _, _) | DefMod(id) |
DefForeignMod(id) | DefStatic(id, _) |
- DefVariant(_, id, _) | DefTy(id) | DefTyParam(_, id, _) |
- DefUse(id) | DefStruct(id) | DefTrait(id) | DefMethod(id, _) => {
+ DefVariant(_, id, _) | DefTy(id, _) | DefAssociatedTy(id) |
+ DefTyParam(_, id, _) | DefUse(id) | DefStruct(id) | DefTrait(id) |
+ DefMethod(id, _) => {
id
}
- DefArg(id, _) |
- DefLocal(id, _) |
+ DefLocal(id) |
DefSelfTy(id) |
- DefUpvar(id, _, _, _) |
- DefBinding(id, _) |
+ DefUpvar(id, _, _) |
DefRegion(id) |
DefTyParamBinder(id) |
DefLabel(id) => {
}
}
}
+
fn attempt_static(sess: &session::Session) -> Option<DependencyList> {
let crates = sess.cstore.get_used_crates(cstore::RequireStatic);
if crates.iter().all(|&(_, ref p)| p.is_some()) {
- Some(crates.move_iter().map(|_| Some(cstore::RequireStatic)).collect())
+ Some(crates.into_iter().map(|_| Some(cstore::RequireStatic)).collect())
} else {
None
}
use middle::mem_categorization as mc;
use middle::def;
-use middle::freevars;
+use middle::mem_categorization::Typer;
use middle::pat_util;
use middle::ty;
-use middle::typeck::{MethodCall, MethodObject, MethodOrigin, MethodParam};
+use middle::typeck::{MethodCall, MethodObject, MethodTraitObject};
+use middle::typeck::{MethodOrigin, MethodParam, MethodTypeParam};
use middle::typeck::{MethodStatic, MethodStaticUnboxedClosure};
use middle::typeck;
use util::ppaux::Repr;
ty::MethodTraitItem(ref method_descriptor) => {
(*method_descriptor).clone()
}
+ ty::TypeTraitItem(_) => {
+ tcx.sess.bug("overloaded call method wasn't in method map")
+ }
};
let impl_id = match method_descriptor.container {
ty::TraitContainer(_) => {
MethodStaticUnboxedClosure(def_id) => {
OverloadedCallType::from_unboxed_closure(tcx, def_id)
}
- MethodParam(ref method_param) => {
- OverloadedCallType::from_trait_id(tcx, method_param.trait_id)
- }
- MethodObject(ref method_object) => {
- OverloadedCallType::from_trait_id(tcx, method_object.trait_id)
+ MethodTypeParam(MethodParam { trait_ref: ref trait_ref, .. }) |
+ MethodTraitObject(MethodObject { trait_ref: ref trait_ref, .. }) => {
+ OverloadedCallType::from_trait_id(tcx, trait_ref.def_id)
}
}
}
ast::ExprPath(..) => { }
ast::ExprUnary(ast::UnDeref, ref base) => { // *base
- if !self.walk_overloaded_operator(expr, &**base, None) {
+ if !self.walk_overloaded_operator(expr, &**base, Vec::new()) {
self.select_from_expr(&**base);
}
}
}
ast::ExprIndex(ref lhs, ref rhs) => { // lhs[rhs]
- if !self.walk_overloaded_operator(expr, &**lhs, Some(&**rhs)) {
+ if !self.walk_overloaded_operator(expr, &**lhs, vec![&**rhs]) {
self.select_from_expr(&**lhs);
self.consume_expr(&**rhs);
}
}
+ ast::ExprSlice(ref base, ref start, ref end, _) => { // base[start..end]
+ let args = match (start, end) {
+ (&Some(ref e1), &Some(ref e2)) => vec![&**e1, &**e2],
+ (&Some(ref e), &None) => vec![&**e],
+ (&None, &Some(ref e)) => vec![&**e],
+ (&None, &None) => Vec::new()
+ };
+ let overloaded = self.walk_overloaded_operator(expr, &**base, args);
+ assert!(overloaded);
+ }
+
ast::ExprCall(ref callee, ref args) => { // callee(args)
self.walk_callee(expr, &**callee);
self.consume_exprs(args);
}
ast::ExprUnary(_, ref lhs) => {
- if !self.walk_overloaded_operator(expr, &**lhs, None) {
+ if !self.walk_overloaded_operator(expr, &**lhs, Vec::new()) {
self.consume_expr(&**lhs);
}
}
ast::ExprBinary(_, ref lhs, ref rhs) => {
- if !self.walk_overloaded_operator(expr, &**lhs, Some(&**rhs)) {
+ if !self.walk_overloaded_operator(expr, &**lhs, vec![&**rhs]) {
self.consume_expr(&**lhs);
self.consume_expr(&**rhs);
}
None => { }
Some(adjustment) => {
match *adjustment {
- ty::AutoAddEnv(..) => {
+ ty::AdjustAddEnv(..) => {
// Creating a closure consumes the input and stores it
// into the resulting rvalue.
debug!("walk_adjustment(AutoAddEnv)");
return_if_err!(self.mc.cat_expr_unadjusted(expr));
self.delegate_consume(expr.id, expr.span, cmt_unadjusted);
}
- ty::AutoDerefRef(ty::AutoDerefRef {
+ ty::AdjustDerefRef(ty::AutoDerefRef {
autoref: ref opt_autoref,
autoderefs: n
}) => {
fn walk_overloaded_operator(&mut self,
expr: &ast::Expr,
receiver: &ast::Expr,
- rhs: Option<&ast::Expr>)
+ rhs: Vec<&ast::Expr>)
-> bool
{
if !self.typer.is_method_call(expr.id) {
debug!("walk_captures({})", closure_expr.repr(self.tcx()));
let tcx = self.typer.tcx();
- freevars::with_freevars(tcx, closure_expr.id, |freevars| {
- match freevars::get_capture_mode(self.tcx(), closure_expr.id) {
- freevars::CaptureByRef => {
+ ty::with_freevars(tcx, closure_expr.id, |freevars| {
+ match self.tcx().capture_mode(closure_expr.id) {
+ ast::CaptureByRef => {
self.walk_by_ref_captures(closure_expr, freevars);
}
- freevars::CaptureByValue => {
+ ast::CaptureByValue => {
self.walk_by_value_captures(closure_expr, freevars);
}
}
fn walk_by_ref_captures(&mut self,
closure_expr: &ast::Expr,
- freevars: &[freevars::freevar_entry]) {
+ freevars: &[ty::Freevar]) {
for freevar in freevars.iter() {
let id_var = freevar.def.def_id().node;
let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id,
fn walk_by_value_captures(&mut self,
closure_expr: &ast::Expr,
- freevars: &[freevars::freevar_entry]) {
+ freevars: &[ty::Freevar]) {
for freevar in freevars.iter() {
let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id,
closure_expr.span,
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// A pass that annotates for each loops and functions with the free
-// variables that they contain.
-
-#![allow(non_camel_case_types)]
-
-use middle::def;
-use middle::mem_categorization::Typer;
-use middle::resolve;
-use middle::ty;
-use util::nodemap::{NodeMap, NodeSet};
-
-use syntax::ast;
-use syntax::codemap::Span;
-use syntax::visit::Visitor;
-use syntax::visit;
-
-#[deriving(Clone, Decodable, Encodable, Show)]
-pub enum CaptureMode {
- /// Copy/move the value from this llvm ValueRef into the environment.
- CaptureByValue,
-
- /// Access by reference (used for stack closures).
- CaptureByRef
-}
-
-// A vector of defs representing the free variables referred to in a function.
-// (The def_upvar will already have been stripped).
-#[deriving(Encodable, Decodable)]
-pub struct freevar_entry {
- pub def: def::Def, //< The variable being accessed free.
- pub span: Span //< First span where it is accessed (there can be multiple)
-}
-
-pub type freevar_map = NodeMap<Vec<freevar_entry>>;
-
-pub type CaptureModeMap = NodeMap<CaptureMode>;
-
-struct CollectFreevarsVisitor<'a> {
- seen: NodeSet,
- refs: Vec<freevar_entry>,
- def_map: &'a resolve::DefMap,
- capture_mode_map: &'a mut CaptureModeMap,
- depth: uint
-}
-
-impl<'a, 'v> Visitor<'v> for CollectFreevarsVisitor<'a> {
- fn visit_item(&mut self, _: &ast::Item) {
- // ignore_item
- }
-
- fn visit_expr(&mut self, expr: &ast::Expr) {
- match expr.node {
- ast::ExprProc(..) => {
- self.capture_mode_map.insert(expr.id, CaptureByValue);
- self.depth += 1;
- visit::walk_expr(self, expr);
- self.depth -= 1;
- }
- ast::ExprFnBlock(_, _, _) => {
- // NOTE(stage0): After snapshot, change to:
- //
- //let capture_mode = match capture_clause {
- // ast::CaptureByValue => CaptureByValue,
- // ast::CaptureByRef => CaptureByRef,
- //};
- let capture_mode = CaptureByRef;
- self.capture_mode_map.insert(expr.id, capture_mode);
- self.depth += 1;
- visit::walk_expr(self, expr);
- self.depth -= 1;
- }
- ast::ExprUnboxedFn(capture_clause, _, _, _) => {
- let capture_mode = match capture_clause {
- ast::CaptureByValue => CaptureByValue,
- ast::CaptureByRef => CaptureByRef,
- };
- self.capture_mode_map.insert(expr.id, capture_mode);
- self.depth += 1;
- visit::walk_expr(self, expr);
- self.depth -= 1;
- }
- ast::ExprPath(..) => {
- let mut def = *self.def_map.borrow().find(&expr.id)
- .expect("path not found");
- let mut i = 0;
- while i < self.depth {
- match def {
- def::DefUpvar(_, inner, _, _) => { def = *inner; }
- _ => break
- }
- i += 1;
- }
- if i == self.depth { // Made it to end of loop
- let dnum = def.def_id().node;
- if !self.seen.contains(&dnum) {
- self.refs.push(freevar_entry {
- def: def,
- span: expr.span,
- });
- self.seen.insert(dnum);
- }
- }
- }
- _ => visit::walk_expr(self, expr)
- }
- }
-}
-
-// Searches through part of the AST for all references to locals or
-// upvars in this frame and returns the list of definition IDs thus found.
-// Since we want to be able to collect upvars in some arbitrary piece
-// of the AST, we take a walker function that we invoke with a visitor
-// in order to start the search.
-fn collect_freevars(def_map: &resolve::DefMap,
- blk: &ast::Block,
- capture_mode_map: &mut CaptureModeMap)
- -> Vec<freevar_entry> {
- let mut v = CollectFreevarsVisitor {
- seen: NodeSet::new(),
- refs: Vec::new(),
- def_map: def_map,
- capture_mode_map: &mut *capture_mode_map,
- depth: 1
- };
-
- v.visit_block(blk);
-
- v.refs
-}
-
-struct AnnotateFreevarsVisitor<'a> {
- def_map: &'a resolve::DefMap,
- freevars: freevar_map,
- capture_mode_map: CaptureModeMap,
-}
-
-impl<'a, 'v> Visitor<'v> for AnnotateFreevarsVisitor<'a> {
- fn visit_fn(&mut self, fk: visit::FnKind<'v>, fd: &'v ast::FnDecl,
- blk: &'v ast::Block, s: Span, nid: ast::NodeId) {
- let vars = collect_freevars(self.def_map,
- blk,
- &mut self.capture_mode_map);
- self.freevars.insert(nid, vars);
- visit::walk_fn(self, fk, fd, blk, s);
- }
-}
-
-// Build a map from every function and for-each body to a set of the
-// freevars contained in it. The implementation is not particularly
-// efficient as it fully recomputes the free variables at every
-// node of interest rather than building up the free variables in
-// one pass. This could be improved upon if it turns out to matter.
-pub fn annotate_freevars(def_map: &resolve::DefMap, krate: &ast::Crate)
- -> (freevar_map, CaptureModeMap) {
- let mut visitor = AnnotateFreevarsVisitor {
- def_map: def_map,
- freevars: NodeMap::new(),
- capture_mode_map: NodeMap::new(),
- };
- visit::walk_crate(&mut visitor, krate);
-
- let AnnotateFreevarsVisitor {
- freevars,
- capture_mode_map,
- ..
- } = visitor;
- (freevars, capture_mode_map)
-}
-
-pub fn with_freevars<T>(tcx: &ty::ctxt, fid: ast::NodeId, f: |&[freevar_entry]| -> T) -> T {
- match tcx.freevars.borrow().find(&fid) {
- None => fail!("with_freevars: {} has no freevars", fid),
- Some(d) => f(d.as_slice())
- }
-}
-
-pub fn get_capture_mode<'tcx, T:Typer<'tcx>>(tcx: &T, closure_expr_id: ast::NodeId)
- -> CaptureMode {
- tcx.capture_mode(closure_expr_id)
-}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use middle::freevars::freevar_entry;
-use middle::freevars;
+use middle::mem_categorization::Typer;
use middle::subst;
-use middle::ty::ParameterEnvironment;
use middle::ty;
use middle::ty_fold::TypeFoldable;
use middle::ty_fold;
-use middle::typeck::check::vtable;
-use middle::typeck::{MethodCall, NoAdjustment};
-use middle::typeck;
-use util::ppaux::{Repr, ty_to_string};
+use util::ppaux::{ty_to_string};
use util::ppaux::UserString;
-use std::collections::HashSet;
use syntax::ast::*;
-use syntax::ast_util;
use syntax::attr;
use syntax::codemap::Span;
use syntax::print::pprust::{expr_to_string, ident_to_string};
use syntax::visit::Visitor;
use syntax::visit;
-// Kind analysis pass.
-//
-// There are several kinds defined by various operations. The most restrictive
-// kind is noncopyable. The noncopyable kind can be extended with any number
-// of the following attributes.
-//
-// Send: Things that can be sent on channels or included in spawned closures. It
-// includes scalar types as well as classes and unique types containing only
-// sendable types.
-// 'static: Things that do not contain references.
-//
-// This pass ensures that type parameters are only instantiated with types
-// whose kinds are equal or less general than the way the type parameter was
-// annotated (with the `Send` bound).
-//
-// It also verifies that noncopyable kinds are not copied. Sendability is not
-// applied, since none of our language primitives send. Instead, the sending
-// primitives in the stdlib are explicitly annotated to only take sendable
-// types.
+// Kind analysis pass. This pass does some ad-hoc checks that are more
+// convenient to do after type checking is complete and all checks are
+// known. These are generally related to the builtin bounds `Copy` and
+// `Sized`. Note that many of the builtin bound properties that used
+// to be checked here are actually checked by trait checking these
+// days.
-pub struct Context<'a, 'tcx: 'a> {
+pub struct Context<'a,'tcx:'a> {
tcx: &'a ty::ctxt<'tcx>,
- struct_and_enum_bounds_checked: HashSet<ty::t>,
- parameter_environments: Vec<ParameterEnvironment>,
}
impl<'a, 'tcx, 'v> Visitor<'v> for Context<'a, 'tcx> {
fn visit_pat(&mut self, p: &Pat) {
check_pat(self, p);
}
-
- fn visit_local(&mut self, l: &Local) {
- check_local(self, l);
- }
}
pub fn check_crate(tcx: &ty::ctxt) {
let mut ctx = Context {
tcx: tcx,
- struct_and_enum_bounds_checked: HashSet::new(),
- parameter_environments: Vec::new(),
};
visit::walk_crate(&mut ctx, tcx.map.krate());
tcx.sess.abort_if_errors();
.find(&trait_ref.ref_id)
.expect("trait ref not in def map!");
let trait_def_id = ast_trait_def.def_id();
- let trait_def = cx.tcx.trait_defs.borrow()
- .find_copy(&trait_def_id)
- .expect("trait def not in trait-defs map!");
-
- // If this trait has builtin-kind supertraits, meet them.
- let self_ty: ty::t = ty::node_id_to_type(cx.tcx, it.id);
- debug!("checking impl with self type {}", ty::get(self_ty).sty);
- check_builtin_bounds(
- cx, self_ty, trait_def.bounds.builtin_bounds,
- |missing| {
- span_err!(cx.tcx.sess, self_type.span, E0142,
- "the type `{}', which does not fulfill `{}`, \
- cannot implement this trait",
- ty_to_string(cx.tcx, self_ty), missing.user_string(cx.tcx));
- span_note!(cx.tcx.sess, self_type.span,
- "types implementing this trait must fulfill `{}`",
- trait_def.bounds.user_string(cx.tcx));
- });
// If this is a destructor, check kinds.
- if cx.tcx.lang_items.drop_trait() == Some(trait_def_id) {
+ if cx.tcx.lang_items.drop_trait() == Some(trait_def_id) &&
+ !attr::contains_name(it.attrs.as_slice(), "unsafe_destructor")
+ {
match self_type.node {
TyPath(_, ref bounds, path_node_id) => {
assert!(bounds.is_none());
}
fn check_item(cx: &mut Context, item: &Item) {
- if !attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
- match item.node {
- ItemImpl(_, ref trait_ref, ref self_type, _) => {
- let parameter_environment =
- ParameterEnvironment::for_item(cx.tcx, item.id);
- cx.parameter_environments.push(parameter_environment);
-
- // Check bounds on the `self` type.
- check_bounds_on_structs_or_enums_in_type_if_possible(
- cx,
- item.span,
- ty::node_id_to_type(cx.tcx, item.id));
-
- match trait_ref {
- &Some(ref trait_ref) => {
- check_impl_of_trait(cx, item, trait_ref, &**self_type);
-
- // Check bounds on the trait ref.
- match ty::impl_trait_ref(cx.tcx,
- ast_util::local_def(item.id)) {
- None => {}
- Some(trait_ref) => {
- check_bounds_on_structs_or_enums_in_trait_ref(
- cx,
- item.span,
- &*trait_ref);
-
- let trait_def = ty::lookup_trait_def(cx.tcx, trait_ref.def_id);
- for (ty, type_param_def) in trait_ref.substs.types
- .iter()
- .zip(trait_def.generics
- .types
- .iter()) {
- check_typaram_bounds(cx, item.span, *ty, type_param_def);
- }
- }
- }
- }
- &None => {}
- }
-
- drop(cx.parameter_environments.pop());
- }
- ItemEnum(..) => {
- let parameter_environment =
- ParameterEnvironment::for_item(cx.tcx, item.id);
- cx.parameter_environments.push(parameter_environment);
-
- let def_id = ast_util::local_def(item.id);
- for variant in ty::enum_variants(cx.tcx, def_id).iter() {
- for arg in variant.args.iter() {
- check_bounds_on_structs_or_enums_in_type_if_possible(
- cx,
- item.span,
- *arg)
- }
- }
-
- drop(cx.parameter_environments.pop());
- }
- ItemStruct(..) => {
- let parameter_environment =
- ParameterEnvironment::for_item(cx.tcx, item.id);
- cx.parameter_environments.push(parameter_environment);
-
- let def_id = ast_util::local_def(item.id);
- for field in ty::lookup_struct_fields(cx.tcx, def_id).iter() {
- check_bounds_on_structs_or_enums_in_type_if_possible(
- cx,
- item.span,
- ty::node_id_to_type(cx.tcx, field.id.node))
- }
-
- drop(cx.parameter_environments.pop());
-
- }
- ItemStatic(..) => {
- let parameter_environment =
- ParameterEnvironment::for_item(cx.tcx, item.id);
- cx.parameter_environments.push(parameter_environment);
-
- check_bounds_on_structs_or_enums_in_type_if_possible(
- cx,
- item.span,
- ty::node_id_to_type(cx.tcx, item.id));
-
- drop(cx.parameter_environments.pop());
- }
- _ => {}
+ match item.node {
+ ItemImpl(_, Some(ref trait_ref), ref self_type, _) => {
+ check_impl_of_trait(cx, item, trait_ref, &**self_type);
}
+ _ => {}
}
visit::walk_item(cx, item)
}
-fn check_local(cx: &mut Context, local: &Local) {
- check_bounds_on_structs_or_enums_in_type_if_possible(
- cx,
- local.span,
- ty::node_id_to_type(cx.tcx, local.id));
-
- visit::walk_local(cx, local)
-}
-
// Yields the appropriate function to check the kind of closed over
// variables. `id` is the NodeId for some expression that creates the
// closure.
fn with_appropriate_checker(cx: &Context,
id: NodeId,
- b: |checker: |&Context, &freevar_entry||) {
- fn check_for_uniq(cx: &Context, fv: &freevar_entry, bounds: ty::BuiltinBounds) {
+ fn_span: Span,
+ b: |checker: |&Context, &ty::Freevar||) {
+ fn check_for_uniq(cx: &Context,
+ fn_span: Span,
+ fv: &ty::Freevar,
+ bounds: ty::BuiltinBounds) {
// all captured data must be owned, regardless of whether it is
// moved in or copied in.
let id = fv.def.def_id().node;
let var_t = ty::node_id_to_type(cx.tcx, id);
- check_freevar_bounds(cx, fv.span, var_t, bounds, None);
+ check_freevar_bounds(cx, fn_span, fv.span, var_t, bounds, None);
}
- fn check_for_block(cx: &Context, fv: &freevar_entry,
- bounds: ty::BuiltinBounds, region: ty::Region) {
+ fn check_for_block(cx: &Context,
+ fn_span: Span,
+ fn_id: NodeId,
+ fv: &ty::Freevar,
+ bounds: ty::BuiltinBounds) {
let id = fv.def.def_id().node;
let var_t = ty::node_id_to_type(cx.tcx, id);
- // FIXME(#3569): Figure out whether the implicit borrow is actually
- // mutable. Currently we assume all upvars are referenced mutably.
- let implicit_borrowed_type = ty::mk_mut_rptr(cx.tcx, region, var_t);
- check_freevar_bounds(cx, fv.span, implicit_borrowed_type,
+ let upvar_id = ty::UpvarId { var_id: id, closure_expr_id: fn_id };
+ let upvar_borrow = cx.tcx.upvar_borrow(upvar_id);
+ let implicit_borrowed_type =
+ ty::mk_rptr(cx.tcx,
+ upvar_borrow.region,
+ ty::mt { mutbl: upvar_borrow.kind.to_mutbl_lossy(),
+ ty: var_t });
+ check_freevar_bounds(cx, fn_span, fv.span, implicit_borrowed_type,
bounds, Some(var_t));
}
- fn check_for_bare(cx: &Context, fv: &freevar_entry) {
+ fn check_for_bare(cx: &Context, fv: &ty::Freevar) {
span_err!(cx.tcx.sess, fv.span, E0143,
"can't capture dynamic environment in a fn item; \
use the || {} closure form instead", "{ ... }");
bounds: bounds,
..
}) => {
- b(|cx, fv| check_for_uniq(cx, fv, bounds.builtin_bounds))
+ b(|cx, fv| check_for_uniq(cx, fn_span, fv,
+ bounds.builtin_bounds))
}
ty::ty_closure(box ty::ClosureTy {
- store: ty::RegionTraitStore(region, _), bounds, ..
- }) => b(|cx, fv| check_for_block(cx, fv, bounds.builtin_bounds, region)),
+ store: ty::RegionTraitStore(..), bounds, ..
+ }) => {
+ b(|cx, fv| check_for_block(cx, fn_span, id, fv,
+ bounds.builtin_bounds))
+ }
ty::ty_bare_fn(_) => {
b(check_for_bare)
sp: Span,
fn_id: NodeId) {
- // Check kinds on free variables:
- with_appropriate_checker(cx, fn_id, |chk| {
- freevars::with_freevars(cx.tcx, fn_id, |freevars| {
+ // <Check kinds on free variables:
+ with_appropriate_checker(cx, fn_id, sp, |chk| {
+ ty::with_freevars(cx.tcx, fn_id, |freevars| {
for fv in freevars.iter() {
chk(cx, fv);
}
match fk {
visit::FkFnBlock(..) => {
- let ty = ty::node_id_to_type(cx.tcx, fn_id);
- check_bounds_on_structs_or_enums_in_type_if_possible(cx, sp, ty);
-
visit::walk_fn(cx, fk, decl, body, sp)
}
visit::FkItemFn(..) | visit::FkMethod(..) => {
- let parameter_environment = ParameterEnvironment::for_item(cx.tcx,
- fn_id);
- cx.parameter_environments.push(parameter_environment);
-
- let ty = ty::node_id_to_type(cx.tcx, fn_id);
- check_bounds_on_structs_or_enums_in_type_if_possible(cx, sp, ty);
-
visit::walk_fn(cx, fk, decl, body, sp);
- drop(cx.parameter_environments.pop());
}
}
}
pub fn check_expr(cx: &mut Context, e: &Expr) {
debug!("kind::check_expr({})", expr_to_string(e));
- // Handle any kind bounds on type parameters
- check_bounds_on_type_parameters(cx, e);
-
- // Check bounds on structures or enumerations in the type of the
- // expression.
- let expression_type = ty::expr_ty(cx.tcx, e);
- check_bounds_on_structs_or_enums_in_type_if_possible(cx,
- e.span,
- expression_type);
-
match e.node {
- ExprCast(ref source, _) => {
- let source_ty = ty::expr_ty(cx.tcx, &**source);
- let target_ty = ty::expr_ty(cx.tcx, e);
- let method_call = MethodCall {
- expr_id: e.id,
- adjustment: NoAdjustment,
- };
- check_trait_cast(cx,
- source_ty,
- target_ty,
- source.span,
- method_call);
- }
ExprRepeat(ref element, ref count_expr) => {
let count = ty::eval_repeat_count(cx.tcx, &**count_expr);
if count > 1 {
_ => {}
}
- // Search for auto-adjustments to find trait coercions.
- match cx.tcx.adjustments.borrow().find(&e.id) {
- Some(adjustment) => {
- match adjustment {
- adj if ty::adjust_is_object(adj) => {
- let source_ty = ty::expr_ty(cx.tcx, e);
- let target_ty = ty::expr_ty_adjusted(cx.tcx, e);
- let method_call = MethodCall {
- expr_id: e.id,
- adjustment: typeck::AutoObject,
- };
- check_trait_cast(cx,
- source_ty,
- target_ty,
- e.span,
- method_call);
- }
- _ => {}
- }
- }
- None => {}
- }
-
visit::walk_expr(cx, e);
}
-fn check_bounds_on_type_parameters(cx: &mut Context, e: &Expr) {
- let method_map = cx.tcx.method_map.borrow();
- let method_call = typeck::MethodCall::expr(e.id);
- let method = method_map.find(&method_call);
-
- // Find the values that were provided (if any)
- let item_substs = cx.tcx.item_substs.borrow();
- let (types, is_object_call) = match method {
- Some(method) => {
- let is_object_call = match method.origin {
- typeck::MethodObject(..) => true,
- typeck::MethodStatic(..) |
- typeck::MethodStaticUnboxedClosure(..) |
- typeck::MethodParam(..) => false
- };
- (&method.substs.types, is_object_call)
- }
- None => {
- match item_substs.find(&e.id) {
- None => { return; }
- Some(s) => { (&s.substs.types, false) }
- }
- }
- };
-
- // Find the relevant type parameter definitions
- let def_map = cx.tcx.def_map.borrow();
- let type_param_defs = match e.node {
- ExprPath(_) => {
- let did = def_map.get_copy(&e.id).def_id();
- ty::lookup_item_type(cx.tcx, did).generics.types.clone()
- }
- _ => {
- // Type substitutions should only occur on paths and
- // method calls, so this needs to be a method call.
-
- // Even though the callee_id may have been the id with
- // node_type_substs, e.id is correct here.
- match method {
- Some(method) => {
- ty::method_call_type_param_defs(cx.tcx, method.origin)
- }
- None => {
- cx.tcx.sess.span_bug(e.span,
- "non path/method call expr has type substs??");
- }
- }
- }
- };
-
- // Check that the value provided for each definition meets the
- // kind requirements
- for type_param_def in type_param_defs.iter() {
- let ty = *types.get(type_param_def.space, type_param_def.index);
-
- // If this is a call to an object method (`foo.bar()` where
- // `foo` has a type like `Trait`), then the self type is
- // unknown (after all, this is a virtual call). In that case,
- // we will have put a ty_err in the substitutions, and we can
- // just skip over validating the bounds (because the bounds
- // would have been enforced when the object instance was
- // created).
- if is_object_call && type_param_def.space == subst::SelfSpace {
- assert_eq!(type_param_def.index, 0);
- assert!(ty::type_is_error(ty));
- continue;
- }
-
- debug!("type_param_def space={} index={} ty={}",
- type_param_def.space, type_param_def.index, ty.repr(cx.tcx));
- check_typaram_bounds(cx, e.span, ty, type_param_def)
- }
-
- // Check the vtable.
- let vtable_map = cx.tcx.vtable_map.borrow();
- let vtable_res = match vtable_map.find(&method_call) {
- None => return,
- Some(vtable_res) => vtable_res,
- };
- check_type_parameter_bounds_in_vtable_result(cx, e.span, vtable_res);
-}
-
-fn check_type_parameter_bounds_in_vtable_result(
- cx: &mut Context,
- span: Span,
- vtable_res: &typeck::vtable_res) {
- for origins in vtable_res.iter() {
- for origin in origins.iter() {
- let (type_param_defs, substs) = match *origin {
- typeck::vtable_static(def_id, ref tys, _) => {
- let type_param_defs =
- ty::lookup_item_type(cx.tcx, def_id).generics
- .types
- .clone();
- (type_param_defs, (*tys).clone())
- }
- _ => {
- // Nothing to do here.
- continue
- }
- };
- for type_param_def in type_param_defs.iter() {
- let typ = substs.types.get(type_param_def.space,
- type_param_def.index);
- check_typaram_bounds(cx, span, *typ, type_param_def)
- }
- }
- }
-}
-
-fn check_trait_cast(cx: &mut Context,
- source_ty: ty::t,
- target_ty: ty::t,
- span: Span,
- method_call: MethodCall) {
- match ty::get(target_ty).sty {
- ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ ty, .. }) => {
- match ty::get(ty).sty {
- ty::ty_trait(box ty::TyTrait { bounds, .. }) => {
- match cx.tcx.vtable_map.borrow().find(&method_call) {
- None => {
- cx.tcx.sess.span_bug(span,
- "trait cast not in vtable \
- map?!")
- }
- Some(vtable_res) => {
- check_type_parameter_bounds_in_vtable_result(
- cx,
- span,
- vtable_res)
- }
- };
- check_trait_cast_bounds(cx, span, source_ty,
- bounds.builtin_bounds);
- }
- _ => {}
- }
- }
- _ => {}
- }
-}
-
fn check_ty(cx: &mut Context, aty: &Ty) {
match aty.node {
TyPath(_, _, id) => {
});
}
-fn check_bounds_on_structs_or_enums_in_type_if_possible(cx: &mut Context,
- span: Span,
- ty: ty::t) {
- // If we aren't in a function, structure, or enumeration context, we don't
- // have enough information to ensure that bounds on structures or
- // enumerations are satisfied. So we don't perform the check.
- if cx.parameter_environments.len() == 0 {
- return
- }
-
- // If we've already checked for this type, don't do it again. This
- // massively speeds up kind checking.
- if cx.struct_and_enum_bounds_checked.contains(&ty) {
- return
- }
- cx.struct_and_enum_bounds_checked.insert(ty);
-
- ty::walk_ty(ty, |ty| {
- match ty::get(ty).sty {
- ty::ty_struct(type_id, ref substs) |
- ty::ty_enum(type_id, ref substs) => {
- let polytype = ty::lookup_item_type(cx.tcx, type_id);
-
- // Check builtin bounds.
- for (ty, type_param_def) in substs.types
- .iter()
- .zip(polytype.generics
- .types
- .iter()) {
- check_typaram_bounds(cx, span, *ty, type_param_def);
- }
-
- // Check trait bounds.
- let parameter_environment =
- cx.parameter_environments.get(cx.parameter_environments
- .len() - 1);
- debug!(
- "check_bounds_on_structs_or_enums_in_type_if_possible(): \
- checking {}",
- ty.repr(cx.tcx));
- vtable::check_param_bounds(cx.tcx,
- span,
- parameter_environment,
- &polytype.generics.types,
- substs,
- |missing| {
- cx.tcx
- .sess
- .span_err(span,
- format!("instantiating a type parameter with \
- an incompatible type `{}`, which \
- does not fulfill `{}`",
- ty_to_string(cx.tcx, ty),
- missing.user_string(
- cx.tcx)).as_slice());
- })
- }
- _ => {}
- }
- });
-}
-
-fn check_bounds_on_structs_or_enums_in_trait_ref(cx: &mut Context,
- span: Span,
- trait_ref: &ty::TraitRef) {
- for ty in trait_ref.substs.types.iter() {
- check_bounds_on_structs_or_enums_in_type_if_possible(cx, span, *ty)
- }
-}
-
-pub fn check_freevar_bounds(cx: &Context, sp: Span, ty: ty::t,
+pub fn check_freevar_bounds(cx: &Context, fn_span: Span, sp: Span, ty: ty::t,
bounds: ty::BuiltinBounds, referenced_ty: Option<ty::t>)
{
check_builtin_bounds(cx, ty, bounds, |missing| {
ty_to_string(cx.tcx, ty), missing.user_string(cx.tcx));
}
}
- span_note!(cx.tcx.sess, sp,
+ span_note!(cx.tcx.sess, fn_span,
"this closure's environment must satisfy `{}`",
bounds.user_string(cx.tcx));
});
}
}
+ pub fn from_builtin_kind(&self, bound: ty::BuiltinBound)
+ -> Result<ast::DefId, String>
+ {
+ match bound {
+ ty::BoundSend => self.require(SendTraitLangItem),
+ ty::BoundSized => self.require(SizedTraitLangItem),
+ ty::BoundCopy => self.require(CopyTraitLangItem),
+ ty::BoundSync => self.require(SyncTraitLangItem),
+ }
+ }
+
pub fn to_builtin_kind(&self, id: ast::DefId) -> Option<ty::BuiltinBound> {
if Some(id) == self.send_trait() {
Some(ty::BoundSend)
ShrTraitLangItem, "shr", shr_trait;
IndexTraitLangItem, "index", index_trait;
IndexMutTraitLangItem, "index_mut", index_mut_trait;
+ SliceTraitLangItem, "slice", slice_trait;
+ SliceMutTraitLangItem, "slice_mut", slice_mut_trait;
UnsafeTypeLangItem, "unsafe", unsafe_type;
BeginUnwindLangItem, "begin_unwind", begin_unwind;
ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn;
- ClosureExchangeMallocFnLangItem, "closure_exchange_malloc", closure_exchange_malloc_fn;
ExchangeFreeFnLangItem, "exchange_free", exchange_free_fn;
MallocFnLangItem, "malloc", malloc_fn;
FreeFnLangItem, "free", free_fn;
*/
use middle::def::*;
-use middle::freevars;
use middle::mem_categorization::Typer;
use middle::pat_util;
use middle::ty;
use std::rc::Rc;
use std::str;
use std::uint;
+use syntax::ast;
use syntax::ast::*;
use syntax::codemap::{BytePos, original_sp, Span};
use syntax::parse::token::special_idents;
LoopLoop,
/// A `while` loop, with the given expression as condition.
WhileLoop(&'a Expr),
- /// A `for` loop.
- ForLoop,
+ /// A `for` loop, with the given pattern to bind.
+ ForLoop(&'a Pat),
}
#[deriving(PartialEq)]
b: &'v Block, s: Span, n: NodeId) {
visit_fn(self, fk, fd, b, s, n);
}
- fn visit_local(&mut self, l: &Local) { visit_local(self, l); }
+ fn visit_local(&mut self, l: &ast::Local) { visit_local(self, l); }
fn visit_expr(&mut self, ex: &Expr) { visit_expr(self, ex); }
fn visit_arm(&mut self, a: &Arm) { visit_arm(self, a); }
}
fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: &'v Block, s: Span, n: NodeId) {
check_fn(self, fk, fd, b, s, n);
}
- fn visit_local(&mut self, l: &Local) {
+ fn visit_local(&mut self, l: &ast::Local) {
check_local(self, l);
}
fn visit_expr(&mut self, ex: &Expr) {
lsets.warn_about_unused_args(decl, entry_ln);
}
-fn visit_local(ir: &mut IrMaps, local: &Local) {
+fn visit_local(ir: &mut IrMaps, local: &ast::Local) {
pat_util::pat_bindings(&ir.tcx.def_map, &*local.pat, |_, p_id, sp, path1| {
debug!("adding local variable {}", p_id);
let name = path1.node;
visit::walk_arm(ir, arm);
}
-fn moved_variable_node_id_from_def(def: Def) -> Option<NodeId> {
- match def {
- DefBinding(nid, _) |
- DefArg(nid, _) |
- DefLocal(nid, _) => Some(nid),
-
- _ => None
- }
-}
-
fn visit_expr(ir: &mut IrMaps, expr: &Expr) {
match expr.node {
// live nodes required for uses or definitions of variables:
ExprPath(_) => {
let def = ir.tcx.def_map.borrow().get_copy(&expr.id);
debug!("expr {}: path that leads to {:?}", expr.id, def);
- if moved_variable_node_id_from_def(def).is_some() {
- ir.add_live_node_for_node(expr.id, ExprNode(expr.span));
+ match def {
+ DefLocal(..) => ir.add_live_node_for_node(expr.id, ExprNode(expr.span)),
+ _ => {}
}
visit::walk_expr(ir, expr);
}
// in better error messages than just pointing at the closure
// construction site.
let mut call_caps = Vec::new();
- freevars::with_freevars(ir.tcx, expr.id, |freevars| {
+ ty::with_freevars(ir.tcx, expr.id, |freevars| {
for fv in freevars.iter() {
- match moved_variable_node_id_from_def(fv.def) {
- Some(rv) => {
+ match fv.def {
+ DefLocal(rv) => {
let fv_ln = ir.add_live_node(FreeVarNode(fv.span));
call_caps.push(CaptureInfo {ln: fv_ln,
var_nid: rv});
}
- None => {}
+ _ => {}
}
}
});
// otherwise, live nodes are not required:
ExprIndex(..) | ExprField(..) | ExprTupField(..) | ExprVec(..) |
- ExprCall(..) | ExprMethodCall(..) | ExprTup(..) |
+ ExprCall(..) | ExprMethodCall(..) | ExprTup(..) | ExprSlice(..) |
ExprBinary(..) | ExprAddrOf(..) |
ExprCast(..) | ExprUnary(..) | ExprBreak(_) |
ExprAgain(_) | ExprLit(_) | ExprRet(..) | ExprBlock(..) |
}
}
- fn propagate_through_local(&mut self, local: &Local, succ: LiveNode)
+ fn propagate_through_local(&mut self, local: &ast::Local, succ: LiveNode)
-> LiveNode {
// Note: we mark the variable as defined regardless of whether
// there is an initializer. Initially I had thought to only mark
self.propagate_through_loop(expr, WhileLoop(&**cond), &**blk, succ)
}
- ExprForLoop(_, ref head, ref blk, _) => {
- let ln = self.propagate_through_loop(expr, ForLoop, &**blk, succ);
+ ExprForLoop(ref pat, ref head, ref blk, _) => {
+ let ln = self.propagate_through_loop(expr, ForLoop(&**pat), &**blk, succ);
self.propagate_through_expr(&**head, ln)
}
self.propagate_through_expr(&**l, r_succ)
}
+ ExprSlice(ref e1, ref e2, ref e3, _) => {
+ let succ = e3.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
+ let succ = e2.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
+ self.propagate_through_expr(&**e1, succ)
+ }
+
ExprAddrOf(_, ref e) |
ExprCast(ref e, _) |
ExprUnary(_, ref e) |
fn access_path(&mut self, expr: &Expr, succ: LiveNode, acc: uint)
-> LiveNode {
- let def = self.ir.tcx.def_map.borrow().get_copy(&expr.id);
- match moved_variable_node_id_from_def(def) {
- Some(nid) => {
+ match self.ir.tcx.def_map.borrow().get_copy(&expr.id) {
+ DefLocal(nid) => {
let ln = self.live_node(expr.id, expr.span);
if acc != 0u {
self.init_from_succ(ln, succ);
}
ln
}
- None => succ
+ _ => succ
}
}
expr.id, block_to_string(body));
let cond_ln = match kind {
- LoopLoop | ForLoop => ln,
+ LoopLoop => ln,
+ ForLoop(ref pat) => self.define_bindings_in_pat(*pat, ln),
WhileLoop(ref cond) => self.propagate_through_expr(&**cond, ln),
};
let body_ln = self.with_loop_nodes(expr.id, succ, ln, |this| {
first_merge = false;
let new_cond_ln = match kind {
- LoopLoop | ForLoop => ln,
+ LoopLoop => ln,
+ ForLoop(ref pat) => {
+ self.define_bindings_in_pat(*pat, ln)
+ }
WhileLoop(ref cond) => {
self.propagate_through_expr(&**cond, ln)
}
// _______________________________________________________________________
// Checking for error conditions
-fn check_local(this: &mut Liveness, local: &Local) {
+fn check_local(this: &mut Liveness, local: &ast::Local) {
match local.init {
Some(_) => {
this.warn_about_unused_or_dead_vars_in_pat(&*local.pat);
visit::walk_expr(this, expr);
}
+ ExprForLoop(ref pat, _, _, _) => {
+ this.pat_bindings(&**pat, |this, ln, var, sp, id| {
+ this.warn_about_unused(sp, id, ln, var);
+ });
+ }
+
// no correctness conditions related to liveness
ExprCall(..) | ExprMethodCall(..) | ExprIf(..) | ExprMatch(..) |
ExprWhile(..) | ExprLoop(..) | ExprIndex(..) | ExprField(..) |
ExprTupField(..) | ExprVec(..) | ExprTup(..) | ExprBinary(..) |
ExprCast(..) | ExprUnary(..) | ExprRet(..) | ExprBreak(..) |
- ExprAgain(..) | ExprLit(_) | ExprBlock(..) |
+ ExprAgain(..) | ExprLit(_) | ExprBlock(..) | ExprSlice(..) |
ExprMac(..) | ExprAddrOf(..) | ExprStruct(..) | ExprRepeat(..) |
ExprParen(..) | ExprFnBlock(..) | ExprProc(..) | ExprUnboxedFn(..) |
- ExprPath(..) | ExprBox(..) | ExprForLoop(..) => {
+ ExprPath(..) | ExprBox(..) => {
visit::walk_expr(this, expr);
}
}
sp, "not all control paths return a value");
if ends_with_stmt {
let last_stmt = body.stmts.last().unwrap();
- let original_span = original_sp(last_stmt.span, sp);
+ let original_span = original_sp(self.ir.tcx.sess.codemap(),
+ last_stmt.span, sp);
let span_semicolon = Span {
lo: original_span.hi - BytePos(1),
hi: original_span.hi,
- expn_info: original_span.expn_info
+ expn_id: original_span.expn_id
};
self.ir.tcx.sess.span_note(
span_semicolon, "consider removing this semicolon:");
match expr.node {
ExprPath(_) => {
match self.ir.tcx.def_map.borrow().get_copy(&expr.id) {
- DefLocal(nid, _) => {
+ DefLocal(nid) => {
// Assignment to an immutable variable or argument: only legal
// if there is no later assignment. If this local is actually
// mutable, then check for a reassignment to flag the mutability
let var = self.variable(nid, expr.span);
self.warn_about_dead_assign(expr.span, expr.id, ln, var);
}
- def => {
- match moved_variable_node_id_from_def(def) {
- Some(nid) => {
- let ln = self.live_node(expr.id, expr.span);
- let var = self.variable(nid, expr.span);
- self.warn_about_dead_assign(expr.span, expr.id, ln, var);
- }
- None => {}
- }
- }
+ _ => {}
}
}
#![allow(non_camel_case_types)]
use middle::def;
-use middle::freevars;
use middle::ty;
use middle::typeck;
use util::nodemap::{DefIdMap, NodeMap};
use syntax::ast::{MutImmutable, MutMutable};
use syntax::ast;
+use syntax::ast_map;
use syntax::codemap::Span;
use syntax::print::pprust;
use syntax::parse::token;
cat_copied_upvar(CopiedUpvar), // upvar copied into proc env
cat_upvar(ty::UpvarId, ty::UpvarBorrow), // by ref upvar from stack closure
cat_local(ast::NodeId), // local variable
- cat_arg(ast::NodeId), // formal argument
cat_deref(cmt, uint, PointerKind), // deref of a ptr
cat_interior(cmt, InteriorKind), // something interior: field, tuple, etc
cat_downcast(cmt), // selects a particular enum variant (*1)
fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option<ast::NodeId>;
fn upvar_borrow(&self, upvar_id: ty::UpvarId) -> ty::UpvarBorrow;
fn capture_mode(&self, closure_expr_id: ast::NodeId)
- -> freevars::CaptureMode;
+ -> ast::CaptureClause;
fn unboxed_closures<'a>(&'a self)
-> &'a RefCell<DefIdMap<ty::UnboxedClosure>>;
}
}
}
- fn from_def(def: &def::Def) -> MutabilityCategory {
- match *def {
- def::DefFn(..) | def::DefStaticMethod(..) | def::DefSelfTy(..) |
- def::DefMod(..) | def::DefForeignMod(..) | def::DefVariant(..) |
- def::DefTy(..) | def::DefTrait(..) | def::DefPrimTy(..) |
- def::DefTyParam(..) | def::DefUse(..) | def::DefStruct(..) |
- def::DefTyParamBinder(..) | def::DefRegion(..) | def::DefLabel(..) |
- def::DefMethod(..) => fail!("no MutabilityCategory for def: {}", *def),
-
- def::DefStatic(_, false) => McImmutable,
- def::DefStatic(_, true) => McDeclared,
-
- def::DefArg(_, binding_mode) |
- def::DefBinding(_, binding_mode) |
- def::DefLocal(_, binding_mode) => match binding_mode {
- ast::BindByValue(ast::MutMutable) => McDeclared,
- _ => McImmutable
+ fn from_local(tcx: &ty::ctxt, id: ast::NodeId) -> MutabilityCategory {
+ match tcx.map.get(id) {
+ ast_map::NodeLocal(p) | ast_map::NodeArg(p) => match p.node {
+ ast::PatIdent(bind_mode, _, _) => {
+ if bind_mode == ast::BindByValue(ast::MutMutable) {
+ McDeclared
+ } else {
+ McImmutable
+ }
+ }
+ _ => tcx.sess.span_bug(p.span, "expected identifier pattern")
},
-
- def::DefUpvar(_, def, _, _) => MutabilityCategory::from_def(&*def)
+ _ => tcx.sess.span_bug(tcx.map.span(id), "expected identifier pattern")
}
}
Some(adjustment) => {
match *adjustment {
- ty::AutoAddEnv(..) => {
+ ty::AdjustAddEnv(..) => {
// Convert a bare fn to a closure by adding NULL env.
// Result is an rvalue.
let expr_ty = if_ok!(self.expr_ty_adjusted(expr));
Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty))
}
- ty::AutoDerefRef(
+ ty::AdjustDerefRef(
ty::AutoDerefRef {
autoref: Some(_), ..}) => {
// Equivalent to &*expr or something similar.
Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty))
}
- ty::AutoDerefRef(
+ ty::AdjustDerefRef(
ty::AutoDerefRef {
autoref: None, autoderefs: autoderefs}) => {
// Equivalent to *expr or something similar.
ast::ExprAssign(..) | ast::ExprAssignOp(..) |
ast::ExprFnBlock(..) | ast::ExprProc(..) |
ast::ExprUnboxedFn(..) | ast::ExprRet(..) |
- ast::ExprUnary(..) |
+ ast::ExprUnary(..) | ast::ExprSlice(..) |
ast::ExprMethodCall(..) | ast::ExprCast(..) |
ast::ExprVec(..) | ast::ExprTup(..) | ast::ExprIf(..) |
ast::ExprBinary(..) | ast::ExprWhile(..) |
Ok(self.cat_rvalue_node(id, span, expr_ty))
}
def::DefMod(_) | def::DefForeignMod(_) | def::DefUse(_) |
- def::DefTrait(_) | def::DefTy(_) | def::DefPrimTy(_) |
+ def::DefTrait(_) | def::DefTy(..) | def::DefPrimTy(_) |
def::DefTyParam(..) | def::DefTyParamBinder(..) | def::DefRegion(_) |
- def::DefLabel(_) | def::DefSelfTy(..) | def::DefMethod(..) => {
+ def::DefLabel(_) | def::DefSelfTy(..) | def::DefMethod(..) |
+ def::DefAssociatedTy(..) => {
Ok(Rc::new(cmt_ {
id:id,
span:span,
}))
}
- def::DefStatic(_, _) => {
+ def::DefStatic(_, mutbl) => {
Ok(Rc::new(cmt_ {
id:id,
span:span,
cat:cat_static_item,
- mutbl: MutabilityCategory::from_def(&def),
+ mutbl: if mutbl { McDeclared } else { McImmutable},
ty:expr_ty
}))
}
- def::DefArg(vid, _) => {
- // Idea: make this could be rewritten to model by-ref
- // stuff as `&const` and `&mut`?
-
- Ok(Rc::new(cmt_ {
- id: id,
- span: span,
- cat: cat_arg(vid),
- mutbl: MutabilityCategory::from_def(&def),
- ty:expr_ty
- }))
- }
-
- def::DefUpvar(var_id, _, fn_node_id, _) => {
+ def::DefUpvar(var_id, fn_node_id, _) => {
let ty = if_ok!(self.node_ty(fn_node_id));
match ty::get(ty).sty {
ty::ty_closure(ref closure_ty) => {
onceness: closure_ty.onceness,
capturing_proc: fn_node_id,
}),
- mutbl: MutabilityCategory::from_def(&def),
+ mutbl: MutabilityCategory::from_local(self.tcx(), var_id),
ty:expr_ty
}))
}
onceness: onceness,
capturing_proc: fn_node_id,
}),
- mutbl: MutabilityCategory::from_def(&def),
+ mutbl: MutabilityCategory::from_local(self.tcx(), var_id),
ty: expr_ty
}))
}
}
}
- def::DefLocal(vid, _) |
- def::DefBinding(vid, _) => {
- // by-value/by-ref bindings are local variables
+ def::DefLocal(vid) => {
Ok(Rc::new(cmt_ {
id: id,
span: span,
cat: cat_local(vid),
- mutbl: MutabilityCategory::from_def(&def),
+ mutbl: MutabilityCategory::from_local(self.tcx(), vid),
ty: expr_ty
}))
}
cat_rvalue(..) => {
"non-lvalue".to_string()
}
- cat_local(_) => {
- "local variable".to_string()
- }
- cat_arg(..) => {
- "argument".to_string()
+ cat_local(vid) => {
+ match self.tcx().map.find(vid) {
+ Some(ast_map::NodeArg(_)) => {
+ "argument".to_string()
+ }
+ _ => "local variable".to_string()
+ }
}
cat_deref(ref base, _, pk) => {
match base.cat {
cat_static_item |
cat_copied_upvar(..) |
cat_local(..) |
- cat_arg(..) |
cat_deref(_, _, UnsafePtr(..)) |
cat_deref(_, _, GcPtr(..)) |
cat_deref(_, _, BorrowedPtr(..)) |
cat_rvalue(..) |
cat_local(..) |
cat_upvar(..) |
- cat_arg(_) |
cat_deref(_, _, UnsafePtr(..)) => { // yes, it's aliasable, but...
None
}
cat_rvalue(..) |
cat_copied_upvar(..) |
cat_local(..) |
- cat_upvar(..) |
- cat_arg(..) => {
+ cat_upvar(..) => {
format!("{:?}", *self)
}
cat_deref(ref cmt, derefs, ptr) => {
identifier: Ident::new(elem.name()),
lifetimes: vec!(),
types: OwnedSlice::empty()
- }).move_iter().collect(),
+ }).into_iter().collect(),
span: DUMMY_SP,
})
}
use lint;
use middle::resolve;
use middle::ty;
-use middle::typeck::{MethodCall, MethodMap, MethodOrigin, MethodParam};
-use middle::typeck::{MethodStatic, MethodStaticUnboxedClosure, MethodObject};
+use middle::typeck::{MethodCall, MethodMap, MethodOrigin, MethodParam, MethodTypeParam};
+use middle::typeck::{MethodStatic, MethodStaticUnboxedClosure, MethodObject, MethodTraitObject};
use util::nodemap::{NodeMap, NodeSet};
use syntax::ast;
ast::ItemTrait(_, _, _, ref methods) if item.vis != ast::Public => {
for m in methods.iter() {
match *m {
- ast::ProvidedMethod(ref m) => self.parents.insert(m.id, item.id),
- ast::RequiredMethod(ref m) => self.parents.insert(m.id, item.id),
+ ast::ProvidedMethod(ref m) => {
+ self.parents.insert(m.id, item.id);
+ }
+ ast::RequiredMethod(ref m) => {
+ self.parents.insert(m.id, item.id);
+ }
+ ast::TypeTraitItem(_) => {}
};
}
}
self.exported_items.insert(method.id);
}
}
+ ast::TypeImplItem(_) => {}
}
}
}
debug!("required {}", m.id);
self.exported_items.insert(m.id);
}
+ ast::TypeTraitItem(ref t) => {
+ debug!("typedef {}", t.id);
+ self.exported_items.insert(t.id);
+ }
}
}
}
}
}
}
+ Some(&ty::TypeTraitItem(ref typedef)) => {
+ match typedef.container {
+ ty::TraitContainer(id) => {
+ debug!("privacy - recursing on trait {:?}", id);
+ self.def_privacy(id)
+ }
+ ty::ImplContainer(id) => {
+ match ty::impl_trait_ref(self.tcx, id) {
+ Some(t) => {
+ debug!("privacy - impl of trait {:?}", id);
+ self.def_privacy(t.def_id)
+ }
+ None => {
+ debug!("privacy - found a typedef {:?}",
+ typedef.vis);
+ if typedef.vis == ast::Public {
+ Allowable
+ } else {
+ ExternallyDenied
+ }
+ }
+ }
+ }
+ }
+ }
None => {
debug!("privacy - nope, not even a method");
ExternallyDenied
_ => m.pe_vis()
}
}
+ ast::TypeImplItem(_) => return Allowable,
}
}
Some(ast_map::NodeTraitItem(_)) => {
ty::MethodTraitItem(method_type) => {
method_type.provided_source.unwrap_or(method_id)
}
+ ty::TypeTraitItem(_) => method_id,
};
let string = token::get_ident(name);
def::DefFn(..) => ck("function"),
def::DefStatic(..) => ck("static"),
def::DefVariant(..) => ck("variant"),
- def::DefTy(..) => ck("type"),
+ def::DefTy(_, false) => ck("type"),
+ def::DefTy(_, true) => ck("enum"),
def::DefTrait(..) => ck("trait"),
def::DefStruct(..) => ck("struct"),
def::DefMethod(_, Some(..)) => ck("trait method"),
}
// Checks that a method is in scope.
- fn check_method(&mut self, span: Span, origin: MethodOrigin,
+ fn check_method(&mut self, span: Span, origin: &MethodOrigin,
ident: ast::Ident) {
- match origin {
+ match *origin {
MethodStatic(method_id) => {
self.check_static_method(span, method_id, ident)
}
MethodStaticUnboxedClosure(_) => {}
// Trait methods are always all public. The only controlling factor
// is whether the trait itself is accessible or not.
- MethodParam(MethodParam { trait_id: trait_id, .. }) |
- MethodObject(MethodObject { trait_id: trait_id, .. }) => {
- self.report_error(self.ensure_public(span, trait_id, None,
- "source trait"));
+ MethodTypeParam(MethodParam { trait_ref: ref trait_ref, .. }) |
+ MethodTraitObject(MethodObject { trait_ref: ref trait_ref, .. }) => {
+ self.report_error(self.ensure_public(span, trait_ref.def_id,
+ None, "source trait"));
}
}
}
}
Some(method) => {
debug!("(privacy checking) checking impl method");
- self.check_method(expr.span, method.origin, ident.node);
+ self.check_method(expr.span, &method.origin, ident.node);
}
}
}
ast::MethodImplItem(ref m) => {
check_inherited(m.span, m.pe_vis(), "");
}
+ ast::TypeImplItem(_) => {}
}
}
}
check_inherited(m.span, m.vis,
"unnecessary visibility");
}
+ ast::TypeTraitItem(_) => {}
}
}
}
ast::MethodImplItem(ref m) => {
check_inherited(tcx, m.span, m.pe_vis());
}
+ ast::TypeImplItem(_) => {}
}
}
}
ast::RequiredMethod(..) => {}
ast::ProvidedMethod(ref m) => check_inherited(tcx, m.span,
m.pe_vis()),
+ ast::TypeTraitItem(_) => {}
}
}
}
ast::MethodImplItem(ref m) => {
self.exported_items.contains(&m.id)
}
+ ast::TypeImplItem(_) => false,
}
});
ast::MethodImplItem(ref method) => {
visit::walk_method_helper(self, &**method)
}
+ ast::TypeImplItem(_) => {}
}
}
}
visit::walk_method_helper(self, &**method);
}
}
+ ast::TypeImplItem(_) => {}
}
}
if found_pub_static {
match *trait_method {
ast::RequiredMethod(_) => false,
ast::ProvidedMethod(_) => true,
+ ast::TypeTraitItem(_) => false,
}
}
Some(ast_map::NodeImplItem(impl_item)) => {
}
}
}
+ ast::TypeImplItem(_) => false,
}
}
Some(_) => false,
// Keep going, nothing to get exported
}
ast::ProvidedMethod(ref method) => {
- visit::walk_block(self, &*method.pe_body())
+ visit::walk_block(self, &*method.pe_body());
}
+ ast::TypeTraitItem(_) => {}
}
}
ast_map::NodeImplItem(impl_item) => {
visit::walk_block(self, method.pe_body())
}
}
+ ast::TypeImplItem(_) => {}
}
}
// Nothing to recurse on for these
use middle::pat_util::pat_bindings;
use middle::subst::{ParamSpace, FnSpace, TypeSpace};
use middle::ty::{ExplicitSelfCategory, StaticExplicitSelfCategory};
-use util::nodemap::{NodeMap, DefIdSet, FnvHashMap};
+use middle::ty::{CaptureModeMap, Freevar, FreevarMap};
+use util::nodemap::{NodeMap, NodeSet, DefIdSet, FnvHashMap};
use syntax::ast::{Arm, BindByRef, BindByValue, BindingMode, Block, Crate, CrateNum};
use syntax::ast::{DeclItem, DefId, Expr, ExprAgain, ExprBreak, ExprField};
use syntax::ast::{ForeignItem, ForeignItemFn, ForeignItemStatic, Generics};
use syntax::ast::{Ident, ImplItem, Item, ItemEnum, ItemFn, ItemForeignMod};
use syntax::ast::{ItemImpl, ItemMac, ItemMod, ItemStatic, ItemStruct};
-use syntax::ast::{ItemTrait, ItemTy, LOCAL_CRATE, Local, Method};
+use syntax::ast::{ItemTrait, ItemTy, LOCAL_CRATE, Local};
use syntax::ast::{MethodImplItem, Mod, Name, NamedField, NodeId};
use syntax::ast::{Pat, PatEnum, PatIdent, PatLit};
use syntax::ast::{PatRange, PatStruct, Path, PathListIdent, PathListMod};
use syntax::ast::{StructVariantKind, TraitRef, TraitTyParamBound};
use syntax::ast::{TupleVariantKind, Ty, TyBool, TyChar, TyClosure, TyF32};
use syntax::ast::{TyF64, TyFloat, TyI, TyI8, TyI16, TyI32, TyI64, TyInt};
-use syntax::ast::{TyParam, TyParamBound, TyPath, TyPtr, TyProc, TyRptr};
-use syntax::ast::{TyStr, TyU, TyU8, TyU16, TyU32, TyU64, TyUint};
-use syntax::ast::{UnboxedFnTyParamBound, UnnamedField, UnsafeFn, Variant};
-use syntax::ast::{ViewItem, ViewItemExternCrate, ViewItemUse, ViewPathGlob};
-use syntax::ast::{ViewPathList, ViewPathSimple, Visibility};
+use syntax::ast::{TyParam, TyParamBound, TyPath, TyPtr, TyProc, TyQPath};
+use syntax::ast::{TyRptr, TyStr, TyU, TyU8, TyU16, TyU32, TyU64, TyUint};
+use syntax::ast::{TypeImplItem, UnboxedFnTyParamBound, UnnamedField};
+use syntax::ast::{UnsafeFn, Variant, ViewItem, ViewItemExternCrate};
+use syntax::ast::{ViewItemUse, ViewPathGlob, ViewPathList, ViewPathSimple};
+use syntax::ast::{Visibility};
use syntax::ast;
use syntax::ast_util::{PostExpansionMethod, local_def, walk_pat};
+use syntax::ast_util;
use syntax::attr::AttrMetaMethods;
use syntax::ext::mtwt;
use syntax::parse::token::special_names;
use std::collections::{HashMap, HashSet};
use std::cell::{Cell, RefCell};
-use std::gc::GC;
use std::mem::replace;
use std::rc::{Rc, Weak};
use std::uint;
RibKind)
}
-// The rib kind controls the translation of argument or local definitions
-// (`def_arg` or `def_local`) to upvars (`def_upvar`).
+// The rib kind controls the translation of local
+// definitions (`DefLocal`) to upvars (`DefUpvar`).
enum RibKind {
// No translation needs to be applied.
NormalRibKind,
- // We passed through a function scope at the given node ID. Translate
- // upvars as appropriate.
- FunctionRibKind(NodeId /* func id */, NodeId /* body id */),
+ // We passed through a closure scope at the given node ID.
+ // Translate upvars as appropriate.
+ ClosureRibKind(NodeId /* func id */, NodeId /* body id if proc or unboxed */),
// We passed through an impl or trait and are now in one of its
// methods. Allow references to ty params that impl or trait
pub enum TraitItemKind {
NonstaticMethodTraitItemKind,
StaticMethodTraitItemKind,
+ TypeTraitItemKind,
}
impl TraitItemKind {
primitive_type_table: PrimitiveTypeTable,
def_map: DefMap,
+ freevars: RefCell<FreevarMap>,
+ freevars_seen: RefCell<NodeMap<NodeSet>>,
+ capture_mode_map: RefCell<CaptureModeMap>,
export_map2: ExportMap2,
trait_map: TraitMap,
external_exports: ExternalExports,
primitive_type_table: PrimitiveTypeTable::new(),
def_map: RefCell::new(NodeMap::new()),
+ freevars: RefCell::new(NodeMap::new()),
+ freevars_seen: RefCell::new(NodeMap::new()),
+ capture_mode_map: RefCell::new(NodeMap::new()),
export_map2: RefCell::new(NodeMap::new()),
trait_map: NodeMap::new(),
used_imports: HashSet::new(),
sp);
name_bindings.define_type
- (DefTy(local_def(item.id)), sp, is_public);
+ (DefTy(local_def(item.id), false), sp, is_public);
parent
}
sp);
name_bindings.define_type
- (DefTy(local_def(item.id)), sp, is_public);
+ (DefTy(local_def(item.id), true), sp, is_public);
for variant in (*enum_definition).variants.iter() {
self.build_reduced_graph_for_variant(
let name_bindings = self.add_child(ident, parent.clone(), forbid, sp);
// Define a name in the type namespace.
- name_bindings.define_type(DefTy(local_def(item.id)), sp, is_public);
+ name_bindings.define_type(DefTy(local_def(item.id), false), sp, is_public);
// If this is a newtype or unit-like struct, define a name
// in the value namespace as well
method.span,
is_public);
}
+ TypeImplItem(ref typedef) => {
+ // Add the typedef to the module.
+ let ident = typedef.ident;
+ let typedef_name_bindings =
+ self.add_child(
+ ident,
+ new_parent.clone(),
+ ForbidDuplicateTypesAndModules,
+ typedef.span);
+ let def = DefAssociatedTy(local_def(
+ typedef.id));
+ let is_public = typedef.vis ==
+ ast::Public;
+ typedef_name_bindings.define_type(
+ def,
+ typedef.span,
+ is_public);
+ }
}
}
}
// Add the names of all the methods to the trait info.
for method in methods.iter() {
- let (m_id, m_ident, m_fn_style, m_self, m_span) = match *method {
- ast::RequiredMethod(ref m) => {
- (m.id, m.ident, m.fn_style, &m.explicit_self, m.span)
- }
- ast::ProvidedMethod(ref m) => {
- (m.id, m.pe_ident(), m.pe_fn_style(), m.pe_explicit_self(), m.span)
- }
- };
+ let (ident, kind) = match *method {
+ ast::RequiredMethod(_) |
+ ast::ProvidedMethod(_) => {
+ let ty_m =
+ ast_util::trait_item_to_ty_method(method);
+
+ let ident = ty_m.ident;
+
+ // Add it as a name in the trait module.
+ let (def, static_flag) = match ty_m.explicit_self
+ .node {
+ SelfStatic => {
+ // Static methods become
+ // `def_static_method`s.
+ (DefStaticMethod(
+ local_def(ty_m.id),
+ FromTrait(local_def(item.id)),
+ ty_m.fn_style),
+ StaticMethodTraitItemKind)
+ }
+ _ => {
+ // Non-static methods become
+ // `def_method`s.
+ (DefMethod(local_def(ty_m.id),
+ Some(local_def(item.id))),
+ NonstaticMethodTraitItemKind)
+ }
+ };
- // Add it as a name in the trait module.
- let (def, static_flag) = match m_self.node {
- SelfStatic => {
- // Static methods become `def_static_method`s.
- (DefStaticMethod(local_def(m_id),
- FromTrait(local_def(item.id)),
- m_fn_style),
- StaticMethodTraitItemKind)
+ let method_name_bindings =
+ self.add_child(ident,
+ module_parent.clone(),
+ ForbidDuplicateTypesAndValues,
+ ty_m.span);
+ method_name_bindings.define_value(def,
+ ty_m.span,
+ true);
+
+ (ident, static_flag)
}
- _ => {
- // Non-static methods become `def_method`s.
- (DefMethod(local_def(m_id),
- Some(local_def(item.id))),
- NonstaticMethodTraitItemKind)
+ ast::TypeTraitItem(ref associated_type) => {
+ let def = DefAssociatedTy(local_def(
+ associated_type.id));
+
+ let name_bindings =
+ self.add_child(associated_type.ident,
+ module_parent.clone(),
+ ForbidDuplicateTypesAndValues,
+ associated_type.span);
+ name_bindings.define_type(def,
+ associated_type.span,
+ true);
+
+ (associated_type.ident, TypeTraitItemKind)
}
};
- let method_name_bindings =
- self.add_child(m_ident,
- module_parent.clone(),
- ForbidDuplicateValues,
- m_span);
- method_name_bindings.define_value(def, m_span, true);
-
self.trait_item_map
.borrow_mut()
- .insert((m_ident.name, def_id), static_flag);
+ .insert((ident.name, def_id), kind);
}
name_bindings.define_type(DefTrait(def_id), sp, is_public);
}
// Constructs the reduced graph for one variant. Variants exist in the
- // type and/or value namespaces.
+ // type and value namespaces.
fn build_reduced_graph_for_variant(&mut self,
variant: &Variant,
item_id: DefId,
parent: ReducedGraphParent,
is_public: bool) {
let ident = variant.node.name;
-
- match variant.node.kind {
- TupleVariantKind(_) => {
- let child = self.add_child(ident, parent, ForbidDuplicateValues, variant.span);
- child.define_value(DefVariant(item_id,
- local_def(variant.node.id), false),
- variant.span, is_public);
- }
+ let is_exported = match variant.node.kind {
+ TupleVariantKind(_) => false,
StructVariantKind(_) => {
- let child = self.add_child(ident, parent,
- ForbidDuplicateTypesAndValues,
- variant.span);
- child.define_type(DefVariant(item_id,
- local_def(variant.node.id), true),
- variant.span, is_public);
-
// Not adding fields for variants as they are not accessed with a self receiver
self.structs.insert(local_def(variant.node.id), Vec::new());
+ true
}
- }
+ };
+
+ let child = self.add_child(ident, parent,
+ ForbidDuplicateTypesAndValues,
+ variant.span);
+ child.define_value(DefVariant(item_id,
+ local_def(variant.node.id), is_exported),
+ variant.span, is_public);
+ child.define_type(DefVariant(item_id,
+ local_def(variant.node.id), is_exported),
+ variant.span, is_public);
}
/// Constructs the reduced graph for one 'view item'. View items consist
match def {
DefMod(def_id) | DefForeignMod(def_id) | DefStruct(def_id) |
- DefTy(def_id) => {
+ DefTy(def_id, _) => {
let type_def = child_name_bindings.type_def.borrow().clone();
match type_def {
Some(TypeNsDef { module_def: Some(module_def), .. }) => {
is_public,
DUMMY_SP)
}
- DefTy(_) => {
+ DefTy(..) | DefAssociatedTy(..) => {
debug!("(building reduced graph for external \
crate) building type {}", final_ident);
ignoring {:?}", def);
// Ignored; handled elsewhere.
}
- DefArg(..) | DefLocal(..) | DefPrimTy(..) |
- DefTyParam(..) | DefBinding(..) |
+ DefLocal(..) | DefPrimTy(..) | DefTyParam(..) |
DefUse(..) | DefUpvar(..) | DefRegion(..) |
DefTyParamBinder(..) | DefLabel(..) | DefSelfTy(..) => {
fail!("didn't expect `{:?}`", def);
import_span: Span,
name: Name,
namespace: Namespace) {
- if self.session.features.import_shadowing.get() {
+ if self.session.features.borrow().import_shadowing {
return
}
&mut ImportResolution,
import_span: Span,
name: Name) {
- if self.session.features.import_shadowing.get() {
+ if self.session.features.borrow().import_shadowing {
return
}
module: &Module,
name: Name,
span: Span) {
- if self.session.features.import_shadowing.get() {
+ if self.session.features.borrow().import_shadowing {
return
}
module: &Module,
name: Name,
span: Span) {
- if self.session.features.import_shadowing.get() {
+ if self.session.features.borrow().import_shadowing {
return
}
self.current_module = orig_module;
}
- /// Wraps the given definition in the appropriate number of `def_upvar`
+ /// Wraps the given definition in the appropriate number of `DefUpvar`
/// wrappers.
fn upvarify(&self,
ribs: &[Rib],
- rib_index: uint,
def_like: DefLike,
span: Span)
-> Option<DefLike> {
- let mut def;
- let is_ty_param;
-
match def_like {
- DlDef(d @ DefLocal(..)) | DlDef(d @ DefUpvar(..)) |
- DlDef(d @ DefArg(..)) | DlDef(d @ DefBinding(..)) => {
- def = d;
- is_ty_param = false;
- }
- DlDef(d @ DefTyParam(..)) |
- DlDef(d @ DefSelfTy(..)) => {
- def = d;
- is_ty_param = true;
- }
- _ => {
- return Some(def_like);
- }
- }
+ DlDef(d @ DefUpvar(..)) => {
+ self.session.span_bug(span,
+ format!("unexpected {} in bindings", d).as_slice())
+ }
+ DlDef(d @ DefLocal(_)) => {
+ let node_id = d.def_id().node;
+ let mut def = d;
+ let mut last_proc_body_id = ast::DUMMY_NODE_ID;
+ for rib in ribs.iter() {
+ match rib.kind {
+ NormalRibKind => {
+ // Nothing to do. Continue.
+ }
+ ClosureRibKind(function_id, maybe_proc_body) => {
+ let prev_def = def;
+ if maybe_proc_body != ast::DUMMY_NODE_ID {
+ last_proc_body_id = maybe_proc_body;
+ }
+ def = DefUpvar(node_id, function_id, last_proc_body_id);
- let mut rib_index = rib_index + 1;
- while rib_index < ribs.len() {
- match ribs[rib_index].kind {
- NormalRibKind => {
- // Nothing to do. Continue.
- }
- FunctionRibKind(function_id, body_id) => {
- if !is_ty_param {
- def = DefUpvar(def.def_id().node,
- box(GC) def,
- function_id,
- body_id);
- }
- }
- MethodRibKind(item_id, _) => {
- // If the def is a ty param, and came from the parent
- // item, it's ok
- match def {
- DefTyParam(_, did, _) if {
- self.def_map.borrow().find(&did.node).map(|x| *x)
- == Some(DefTyParamBinder(item_id))
- } => {
- // ok
- }
+ let mut seen = self.freevars_seen.borrow_mut();
+ let seen = seen.find_or_insert(function_id, NodeSet::new());
+ if seen.contains(&node_id) {
+ continue;
+ }
+ self.freevars.borrow_mut().find_or_insert(function_id, vec![])
+ .push(Freevar { def: prev_def, span: span });
+ seen.insert(node_id);
+ }
+ MethodRibKind(item_id, _) => {
+ // If the def is a ty param, and came from the parent
+ // item, it's ok
+ match def {
+ DefTyParam(_, did, _) if {
+ self.def_map.borrow().find_copy(&did.node)
+ == Some(DefTyParamBinder(item_id))
+ } => {} // ok
+ DefSelfTy(did) if did == item_id => {} // ok
+ _ => {
+ // This was an attempt to access an upvar inside a
+ // named function item. This is not allowed, so we
+ // report an error.
- DefSelfTy(did) if {
- did == item_id
- } => {
- // ok
- }
+ self.resolve_error(
+ span,
+ "can't capture dynamic environment in a fn item; \
+ use the || { ... } closure form instead");
- _ => {
- if !is_ty_param {
- // This was an attempt to access an upvar inside a
- // named function item. This is not allowed, so we
- // report an error.
+ return None;
+ }
+ }
+ }
+ ItemRibKind => {
+ // This was an attempt to access an upvar inside a
+ // named function item. This is not allowed, so we
+ // report an error.
- self.resolve_error(
- span,
- "can't capture dynamic environment in a fn item; \
- use the || { ... } closure form instead");
- } else {
- // This was an attempt to use a type parameter outside
- // its scope.
+ self.resolve_error(
+ span,
+ "can't capture dynamic environment in a fn item; \
+ use the || { ... } closure form instead");
- self.resolve_error(span,
- "can't use type parameters from \
- outer function; try using a local \
- type parameter instead");
- }
+ return None;
+ }
+ ConstantItemRibKind => {
+ // Still doesn't deal with upvars
+ self.resolve_error(span,
+ "attempt to use a non-constant \
+ value in a constant");
- return None;
+ }
}
- }
}
- ItemRibKind => {
- if !is_ty_param {
- // This was an attempt to access an upvar inside a
- // named function item. This is not allowed, so we
- // report an error.
+ Some(DlDef(def))
+ }
+ DlDef(def @ DefTyParam(..)) |
+ DlDef(def @ DefSelfTy(..)) => {
+ for rib in ribs.iter() {
+ match rib.kind {
+ NormalRibKind | ClosureRibKind(..) => {
+ // Nothing to do. Continue.
+ }
+ MethodRibKind(item_id, _) => {
+ // If the def is a ty param, and came from the parent
+ // item, it's ok
+ match def {
+ DefTyParam(_, did, _) if {
+ self.def_map.borrow().find_copy(&did.node)
+ == Some(DefTyParamBinder(item_id))
+ } => {} // ok
+ DefSelfTy(did) if did == item_id => {} // ok
+
+ _ => {
+ // This was an attempt to use a type parameter outside
+ // its scope.
+
+ self.resolve_error(span,
+ "can't use type parameters from \
+ outer function; try using a local \
+ type parameter instead");
+
+ return None;
+ }
+ }
+ }
+ ItemRibKind => {
+ // This was an attempt to use a type parameter outside
+ // its scope.
- self.resolve_error(
- span,
- "can't capture dynamic environment in a fn item; \
- use the || { ... } closure form instead");
- } else {
- // This was an attempt to use a type parameter outside
- // its scope.
+ self.resolve_error(span,
+ "can't use type parameters from \
+ outer function; try using a local \
+ type parameter instead");
- self.resolve_error(span,
- "can't use type parameters from \
- outer function; try using a local \
- type parameter instead");
- }
+ return None;
+ }
+ ConstantItemRibKind => {
+ // see #9186
+ self.resolve_error(span,
+ "cannot use an outer type \
+ parameter in this context");
- return None;
- }
- ConstantItemRibKind => {
- if is_ty_param {
- // see #9186
- self.resolve_error(span,
- "cannot use an outer type \
- parameter in this context");
- } else {
- // Still doesn't deal with upvars
- self.resolve_error(span,
- "attempt to use a non-constant \
- value in a constant");
+ }
}
-
}
+ Some(DlDef(def))
}
-
- rib_index += 1;
+ _ => Some(def_like)
}
-
- return Some(DlDef(def));
}
fn search_ribs(&self,
name: Name,
span: Span)
-> Option<DefLike> {
- // FIXME #4950: This should not use a while loop.
// FIXME #4950: Try caching?
- let mut i = ribs.len();
- while i != 0 {
- i -= 1;
- let binding_opt = ribs[i].bindings.borrow().find_copy(&name);
- match binding_opt {
+ for (i, rib) in ribs.iter().enumerate().rev() {
+ match rib.bindings.borrow().find_copy(&name) {
Some(def_like) => {
- return self.upvarify(ribs, i, def_like, span);
+ return self.upvarify(ribs.slice_from(i + 1), def_like, span);
}
None => {
// Continue.
}
}
- return None;
+ None
}
fn resolve_crate(&mut self, krate: &ast::Crate) {
ProvidedMethod(m.id)),
&**m)
}
+ ast::TypeTraitItem(_) => {
+ visit::walk_trait_item(this, method);
+ }
}
}
});
self.resolve_trait_reference(id, tref, reference_type)
}
UnboxedFnTyParamBound(ref unboxed_function) => {
+ match self.resolve_path(unboxed_function.ref_id,
+ &unboxed_function.path,
+ TypeNS,
+ true) {
+ None => {
+ let path_str = self.path_idents_to_string(
+ &unboxed_function.path);
+ self.resolve_error(unboxed_function.path.span,
+ format!("unresolved trait `{}`",
+ path_str).as_slice())
+ }
+ Some(def) => {
+ match def {
+ (DefTrait(_), _) => {
+ self.record_def(unboxed_function.ref_id, def);
+ }
+ _ => {
+ let msg =
+ format!("`{}` is not a trait",
+ self.path_idents_to_string(
+ &unboxed_function.path));
+ self.resolve_error(unboxed_function.path.span,
+ msg.as_slice());
+ }
+ }
+ }
+ }
+
for argument in unboxed_function.decl.inputs.iter() {
self.resolve_type(&*argument.ty);
}
// If it's a typedef, give a note
match def {
- DefTy(_) => {
+ DefTy(..) => {
self.session.span_note(
trait_reference.path.span,
format!("`type` aliases cannot \
Some(ref t) => match t.node {
TyPath(ref path, None, path_id) => {
match this.resolve_path(id, path, TypeNS, true) {
- Some((DefTy(def_id), lp)) if this.structs.contains_key(&def_id) => {
+ Some((DefTy(def_id, _), lp)) if this.structs.contains_key(&def_id) => {
let def = DefStruct(def_id);
debug!("(resolving struct) resolved `{}` to type {:?}",
token::get_ident(path.segments
// to be NormalRibKind?
fn resolve_method(&mut self,
rib_kind: RibKind,
- method: &Method) {
+ method: &ast::Method) {
let method_generics = method.pe_generics();
let type_parameters = HasTypeParameters(method_generics,
FnSpace,
ProvidedMethod(method.id)),
&**method);
}
+ TypeImplItem(ref typedef) => {
+ // If this is a trait impl, ensure the method
+ // exists in trait
+ this.check_trait_item(typedef.ident,
+ typedef.span);
+
+ this.resolve_type(&*typedef.typ);
+ }
}
}
});
});
}
+ TyQPath(ref qpath) => {
+ self.resolve_type(&*qpath.for_type);
+
+ let current_module = self.current_module.clone();
+ let module_path_idents: Vec<_> =
+ qpath.trait_name
+ .segments
+ .iter()
+ .map(|ps| ps.identifier)
+ .collect();
+ match self.resolve_module_path(
+ current_module,
+ module_path_idents.as_slice(),
+ UseLexicalScope,
+ qpath.trait_name.span,
+ PathSearch) {
+ Success((ref module, _)) if module.kind.get() ==
+ TraitModuleKind => {
+ match self.resolve_definition_of_name_in_module(
+ (*module).clone(),
+ qpath.item_name.name,
+ TypeNS) {
+ ChildNameDefinition(def, lp) |
+ ImportNameDefinition(def, lp) => {
+ match def {
+ DefAssociatedTy(trait_type_id) => {
+ let def = DefAssociatedTy(
+ trait_type_id);
+ self.record_def(ty.id, (def, lp));
+ }
+ _ => {
+ self.resolve_error(
+ ty.span,
+ "not an associated type");
+ }
+ }
+ }
+ NoNameDefinition => {
+ self.resolve_error(ty.span,
+ "unresolved associated \
+ type");
+ }
+ }
+ }
+ Success(..) => self.resolve_error(ty.span, "not a trait"),
+ Indeterminate => {
+ self.session.span_bug(ty.span,
+ "indeterminate result when \
+ resolving associated type")
+ }
+ Failed(error) => {
+ let (span, help) = match error {
+ Some((span, msg)) => (span, format!("; {}", msg)),
+ None => (ty.span, String::new()),
+ };
+ self.resolve_error(span,
+ format!("unresolved trait: {}",
+ help).as_slice())
+ }
+ }
+ }
+
TyClosure(ref c) | TyProc(ref c) => {
- self.resolve_type_parameter_bounds(ty.id, &c.bounds,
- TraitBoundingTypeParameter);
+ self.resolve_type_parameter_bounds(
+ ty.id,
+ &c.bounds,
+ TraitBoundingTypeParameter);
visit::walk_ty(self, ty);
}
debug!("(resolving pattern) binding `{}`",
token::get_name(renamed));
- let def = match mode {
- RefutableMode => {
- // For pattern arms, we must use
- // `def_binding` definitions.
-
- DefBinding(pattern.id, binding_mode)
- }
- LocalIrrefutableMode => {
- // But for locals, we use `def_local`.
- DefLocal(pattern.id, binding_mode)
- }
- ArgumentIrrefutableMode => {
- // And for function arguments, `def_arg`.
- DefArg(pattern.id, binding_mode)
- }
- };
+ let def = DefLocal(pattern.id);
// Record the definition so that later passes
// will be able to distinguish variants from
Some(def_id) => {
match self.trait_item_map.borrow().find(&(ident.name, def_id)) {
Some(&StaticMethodTraitItemKind) => (),
+ Some(&TypeTraitItemKind) => (),
None => (),
- _ => {
+ Some(&NonstaticMethodTraitItemKind) => {
debug!("containing module was a trait or impl \
and name was a method -> not resolved");
return None;
if allowed == Everything {
// Look for a field with the same name in the current self_type.
match self.def_map.borrow().find(&node_id) {
- Some(&DefTy(did))
+ Some(&DefTy(did, _))
| Some(&DefStruct(did))
| Some(&DefVariant(_, did, _)) => match self.structs.find(&did) {
None => {}
// structs, which wouldn't result in this error.)
match self.with_no_errors(|this|
this.resolve_path(expr.id, path, TypeNS, false)) {
- Some((DefTy(struct_id), _))
+ Some((DefTy(struct_id, _), _))
if self.structs.contains_key(&struct_id) => {
self.resolve_error(expr.span,
format!("`{}` is a structure name, but \
visit::walk_expr(self, expr);
}
- ExprFnBlock(_, ref fn_decl, ref block) |
- ExprProc(ref fn_decl, ref block) |
- ExprUnboxedFn(_, _, ref fn_decl, ref block) => {
- self.resolve_function(FunctionRibKind(expr.id, block.id),
+ ExprFnBlock(_, ref fn_decl, ref block) => {
+ // NOTE(stage0): After snapshot, change to:
+ //
+ //self.capture_mode_map.borrow_mut().insert(expr.id, capture_clause);
+ self.capture_mode_map.borrow_mut().insert(expr.id, ast::CaptureByRef);
+ self.resolve_function(ClosureRibKind(expr.id, ast::DUMMY_NODE_ID),
+ Some(&**fn_decl), NoTypeParameters,
+ &**block);
+ }
+ ExprProc(ref fn_decl, ref block) => {
+ self.capture_mode_map.borrow_mut().insert(expr.id, ast::CaptureByValue);
+ self.resolve_function(ClosureRibKind(expr.id, block.id),
+ Some(&**fn_decl), NoTypeParameters,
+ &**block);
+ }
+ ExprUnboxedFn(capture_clause, _, ref fn_decl, ref block) => {
+ self.capture_mode_map.borrow_mut().insert(expr.id, capture_clause);
+ self.resolve_function(ClosureRibKind(expr.id, block.id),
Some(&**fn_decl), NoTypeParameters,
&**block);
}
if idents.len() == 0 {
return "???".to_string();
}
- self.idents_to_string(idents.move_iter().rev()
+ self.idents_to_string(idents.into_iter().rev()
.collect::<Vec<ast::Ident>>()
.as_slice())
}
pub struct CrateMap {
pub def_map: DefMap,
+ pub freevars: RefCell<FreevarMap>,
+ pub capture_mode_map: RefCell<CaptureModeMap>,
pub exp_map2: ExportMap2,
pub trait_map: TraitMap,
pub external_exports: ExternalExports,
-> CrateMap {
let mut resolver = Resolver::new(session, krate.span);
resolver.resolve(krate);
- let Resolver { def_map, export_map2, trait_map, last_private,
- external_exports, .. } = resolver;
CrateMap {
- def_map: def_map,
- exp_map2: export_map2,
- trait_map: trait_map,
- external_exports: external_exports,
- last_private_map: last_private,
+ def_map: resolver.def_map,
+ freevars: resolver.freevars,
+ capture_mode_map: resolver.capture_mode_map,
+ exp_map2: resolver.export_map2,
+ trait_map: resolver.trait_map,
+ external_exports: resolver.external_exports,
+ last_private_map: resolver.last_private,
}
}
// If the expression is a macro expansion or other generated code, run screaming and don't index.
fn generated_code(span: Span) -> bool {
- span.expn_info.is_some() || span == DUMMY_SP
+ span.expn_id != NO_EXPANSION || span == DUMMY_SP
}
struct DxrVisitor<'l, 'tcx: 'l> {
def::DefMod(_) |
def::DefForeignMod(_) => Some(recorder::ModRef),
def::DefStruct(_) => Some(recorder::StructRef),
- def::DefTy(_) |
+ def::DefTy(..) |
+ def::DefAssociatedTy(..) |
def::DefTrait(_) => Some(recorder::TypeRef),
def::DefStatic(_, _) |
- def::DefBinding(_, _) |
- def::DefArg(_, _) |
- def::DefLocal(_, _) |
+ def::DefLocal(_) |
def::DefVariant(_, _, _) |
- def::DefUpvar(_, _, _, _) => Some(recorder::VarRef),
+ def::DefUpvar(..) => Some(recorder::VarRef),
def::DefFn(_, _) => Some(recorder::FnRef),
ty::MethodTraitItemId(def_id) => {
method.id != 0 && def_id.node == 0
}
+ ty::TypeTraitItemId(_) => false,
}
});
let decl_id = match decl_id {
None => None,
- Some(ty::MethodTraitItemId(def_id)) => Some(def_id),
+ Some(id) => Some(id.def_id()),
};
let sub_span = self.span.sub_span_after_keyword(method.span, keywords::Fn);
ast::MethodImplItem(ref method) => {
visit::walk_method_helper(self, &**method)
}
+ ast::TypeImplItem(ref typedef) => {
+ visit::walk_ty(self, &*typedef.typ)
+ }
}
}
}
let def = def_map.get(&ex.id);
let sub_span = self.span.span_for_last_ident(ex.span);
match *def {
- def::DefLocal(id, _) |
- def::DefArg(id, _) |
- def::DefUpvar(id, _, _, _) |
- def::DefBinding(id, _) => self.fmt.ref_str(recorder::VarRef,
- ex.span,
- sub_span,
- ast_util::local_def(id),
- self.cur_scope),
- def::DefStatic(def_id,_) |
- def::DefVariant(_, def_id, _) => self.fmt.ref_str(recorder::VarRef,
- ex.span,
- sub_span,
- def_id,
- self.cur_scope),
+ def::DefUpvar(..) |
+ def::DefLocal(..) |
+ def::DefStatic(..) |
+ def::DefVariant(..) => self.fmt.ref_str(recorder::VarRef,
+ ex.span,
+ sub_span,
+ def.def_id(),
+ self.cur_scope),
def::DefStruct(def_id) => self.fmt.ref_str(recorder::StructRef,
ex.span,
sub_span,
def_id)
.iter()
.find(|mr| {
- match **mr {
- ty::MethodTraitItem(ref mr) => {
- mr.ident.name == ti.ident()
- .name
- }
- }
+ mr.ident().name == ti.ident().name
})
.unwrap()
.def_id())
Some(impl_items.get(&def_id)
.iter()
.find(|mr| {
- match **mr {
- ty::MethodTraitItemId(mr) => {
- ty::impl_or_trait_item(
- &self.analysis
- .ty_cx,
- mr).ident()
- .name ==
- ti.ident().name
- }
- }
- }).unwrap()
- .def_id())
+ ty::impl_or_trait_item(
+ &self.analysis.ty_cx,
+ mr.def_id()).ident().name ==
+ ti.ident().name
+ })
+ .unwrap()
+ .def_id())
}
}
} else {
def::DefStaticMethod(_, _, _) => {
self.write_sub_path_trait_truncated(path);
},
- def::DefLocal(_, _) |
- def::DefArg(_, _) |
+ def::DefLocal(_) |
def::DefStatic(_,_) |
def::DefStruct(_) |
def::DefFn(_, _) => self.write_sub_paths_truncated(path),
match ty::trait_item_of_item(&self.analysis.ty_cx,
def_id) {
None => None,
- Some(ty::MethodTraitItemId(decl_id)) => Some(decl_id),
+ Some(decl_id) => Some(decl_id.def_id()),
};
// This incantation is required if the method referenced is a
ty::MethodTraitItem(method) => {
method.provided_source.unwrap_or(def_id)
}
+ ty::TypeTraitItem(_) => def_id,
};
(Some(def_id), decl_id)
}
- typeck::MethodParam(mp) => {
+ typeck::MethodTypeParam(ref mp) => {
// method invoked on a type parameter
let trait_item = ty::trait_item(&self.analysis.ty_cx,
- mp.trait_id,
+ mp.trait_ref.def_id,
mp.method_num);
- match trait_item {
- ty::MethodTraitItem(method) => {
- (None, Some(method.def_id))
- }
- }
- },
- typeck::MethodObject(mo) => {
+ (None, Some(trait_item.def_id()))
+ }
+ typeck::MethodTraitObject(ref mo) => {
// method invoked on a trait instance
let trait_item = ty::trait_item(&self.analysis.ty_cx,
- mo.trait_id,
+ mo.trait_ref.def_id,
mo.method_num);
- match trait_item {
- ty::MethodTraitItem(method) => {
- (None, Some(method.def_id))
- }
- }
- },
+ (None, Some(trait_item.def_id()))
+ }
};
let sub_span = self.span.sub_span_for_meth_name(ex.span);
self.fmt.meth_call_str(ex.span,
qualname,
method_type.id);
}
- ast::ProvidedMethod(ref method) => self.process_method(&**method)
+ ast::ProvidedMethod(ref method) => self.process_method(&**method),
+ ast::TypeTraitItem(_) => {}
}
}
}
let def = def_map.get(&id);
match *def {
- def::DefBinding(id, _) => self.fmt.variable_str(p.span,
- sub_span,
- id,
- path_to_string(p).as_slice(),
- value.as_slice(),
- ""),
+ def::DefLocal(id) => self.fmt.variable_str(p.span,
+ sub_span,
+ id,
+ path_to_string(p).as_slice(),
+ value.as_slice(),
+ ""),
def::DefVariant(_,id,_) => self.fmt.ref_str(ref_kind,
p.span,
sub_span,
self.cur_scope),
// FIXME(nrc) what is this doing here?
def::DefStatic(_, _) => {}
- _ => error!("unexpected defintion kind when processing collected paths: {:?}", *def)
+ _ => error!("unexpected definition kind when processing collected paths: {:?}",
+ *def)
}
}
self.collected_paths.clear();
Some(Span {
lo: base + self.sess.codemap().lookup_byte_offset(sub.lo).pos,
hi: base + self.sess.codemap().lookup_byte_offset(sub.hi).pos,
- expn_info: None,
+ expn_id: NO_EXPANSION,
})
}
}
use syntax::{attr, visit};
use syntax::ast;
use syntax::ast::{Attribute, Block, Crate, DefId, FnDecl, NodeId, Variant};
-use syntax::ast::{Item, RequiredMethod, ProvidedMethod, TraitItem, TypeMethod, Method};
-use syntax::ast::{Generics, StructDef, StructField, Ident};
+use syntax::ast::{Item, RequiredMethod, ProvidedMethod, TraitItem};
+use syntax::ast::{TypeMethod, Method, Generics, StructDef, StructField};
+use syntax::ast::{Ident, TypeTraitItem};
use syntax::ast_util::is_local;
use syntax::attr::Stability;
use syntax::visit::{FnKind, FkMethod, Visitor};
RequiredMethod(TypeMethod {id, ref attrs, ..}) => (id, attrs),
// work around lack of pattern matching for @ types
- ProvidedMethod(ref method) => match **method {
- Method {id, ref attrs, ..} => (id, attrs)
+ ProvidedMethod(ref method) => {
+ match **method {
+ Method {attrs: ref attrs, id: id, ..} => (id, attrs),
+ }
}
+
+ TypeTraitItem(ref typedef) => (typedef.id, &typedef.attrs),
};
self.annotate(id, attrs, |v| visit::walk_trait_item(v, t));
}
fn as_slice<'a>(&'a self) -> &'a [T];
fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T];
fn iter<'a>(&'a self) -> Items<'a, T>;
- fn mut_iter<'a>(&'a mut self) -> MutItems<'a, T>;
+ fn iter_mut<'a>(&'a mut self) -> MutItems<'a, T>;
fn get<'a>(&'a self, index: uint) -> Option<&'a T>;
fn get_mut<'a>(&'a mut self, index: uint) -> Option<&'a mut T>;
}
slice.iter()
}
- fn mut_iter<'a>(&'a mut self) -> MutItems<'a, T> {
- self.as_mut_slice().mut_iter()
+ fn iter_mut<'a>(&'a mut self) -> MutItems<'a, T> {
+ self.as_mut_slice().iter_mut()
}
fn get<'a>(&'a self, index: uint) -> Option<&'a T> {
s
}
+ pub fn erase_regions(self) -> Substs {
+ let Substs { types: types, regions: _ } = self;
+ Substs { types: types, regions: ErasedRegions }
+ }
+
pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace<ty::Region> {
/*!
* Since ErasedRegions are only to be used in trans, most of
}
}
+ fn new_internal(content: Vec<T>, type_limit: uint, self_limit: uint)
+ -> VecPerParamSpace<T>
+ {
+ VecPerParamSpace {
+ type_limit: type_limit,
+ self_limit: self_limit,
+ content: content,
+ }
+ }
+
pub fn sort(t: Vec<T>, space: |&T| -> ParamSpace) -> VecPerParamSpace<T> {
let mut result = VecPerParamSpace::empty();
- for t in t.move_iter() {
+ for t in t.into_iter() {
result.push(space(&t), t);
}
result
pub fn replace(&mut self, space: ParamSpace, elems: Vec<T>) {
// FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n).
self.truncate(space, 0);
- for t in elems.move_iter() {
+ for t in elems.into_iter() {
self.push(space, t);
}
}
pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
let (start, limit) = self.limits(space);
- self.content.mut_slice(start, limit)
+ self.content.slice_mut(start, limit)
}
pub fn opt_get<'a>(&'a self,
}
pub fn map<U>(&self, pred: |&T| -> U) -> VecPerParamSpace<U> {
- // FIXME (#15418): this could avoid allocating the intermediate
- // Vec's, but note that the values of type_limit and self_limit
- // also need to be kept in sync during construction.
- VecPerParamSpace::new(
- self.get_slice(TypeSpace).iter().map(|p| pred(p)).collect(),
- self.get_slice(SelfSpace).iter().map(|p| pred(p)).collect(),
- self.get_slice(FnSpace).iter().map(|p| pred(p)).collect())
+ let result = self.iter().map(pred).collect();
+ VecPerParamSpace::new_internal(result,
+ self.type_limit,
+ self.self_limit)
+ }
+
+ pub fn map_move<U>(self, pred: |T| -> U) -> VecPerParamSpace<U> {
+ let (t, s, f) = self.split();
+ VecPerParamSpace::new(t.into_iter().map(|p| pred(p)).collect(),
+ s.into_iter().map(|p| pred(p)).collect(),
+ f.into_iter().map(|p| pred(p)).collect())
}
pub fn map_rev<U>(&self, pred: |&T| -> U) -> VecPerParamSpace<U> {
let t1 = match ty::get(t).sty {
ty::ty_param(p) => {
- check(self, p, t, self.substs.types.opt_get(p.space, p.idx))
+ check(self,
+ p,
+ t,
+ self.substs.types.opt_get(p.space, p.idx),
+ p.space,
+ p.idx)
}
_ => {
ty_fold::super_fold_ty(self, t)
fn check(this: &SubstFolder,
p: ty::ParamTy,
source_ty: ty::t,
- opt_ty: Option<&ty::t>)
+ opt_ty: Option<&ty::t>,
+ space: ParamSpace,
+ index: uint)
-> ty::t {
match opt_ty {
Some(t) => *t,
let span = this.span.unwrap_or(DUMMY_SP);
this.tcx().sess.span_bug(
span,
- format!("Type parameter `{}` ({}) out of range \
+ format!("Type parameter `{}` ({}/{}/{}) out of range \
when substituting (root type={})",
p.repr(this.tcx()),
source_ty.repr(this.tcx()),
+ space,
+ index,
this.root_ty.repr(this.tcx())).as_slice());
}
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*! See `doc.rs` for high-level documentation */
+
+use super::DUMMY_CAUSE;
+use super::{EvaluatedToMatch, EvaluatedToAmbiguity, EvaluatedToUnmatch};
+use super::{evaluate_impl};
+use super::util;
+
+use middle::subst;
+use middle::subst::Subst;
+use middle::ty;
+use middle::typeck::infer::InferCtxt;
+use syntax::ast;
+use syntax::codemap::DUMMY_SP;
+use util::nodemap::DefIdMap;
+use util::ppaux::Repr;
+
+pub fn impl_can_satisfy(infcx: &InferCtxt,
+ impl1_def_id: ast::DefId,
+ impl2_def_id: ast::DefId)
+ -> bool
+{
+ // `impl1` provides an implementation of `Foo<X,Y> for Z`.
+ let impl1_substs =
+ util::fresh_substs_for_impl(infcx, DUMMY_SP, impl1_def_id);
+ let impl1_self_ty =
+ ty::impl_trait_ref(infcx.tcx, impl1_def_id).unwrap()
+ .self_ty()
+ .subst(infcx.tcx, &impl1_substs);
+
+ // Determine whether `impl2` can provide an implementation for those
+ // same types.
+ let param_env = ty::empty_parameter_environment();
+ let unboxed_closures = DefIdMap::new();
+ match evaluate_impl(infcx, ¶m_env, &unboxed_closures, DUMMY_CAUSE,
+ impl2_def_id, impl1_self_ty) {
+ EvaluatedToMatch | EvaluatedToAmbiguity => true,
+ EvaluatedToUnmatch => false,
+ }
+}
+
+pub fn impl_is_local(tcx: &ty::ctxt,
+ impl_def_id: ast::DefId)
+ -> bool
+{
+ debug!("impl_is_local({})", impl_def_id.repr(tcx));
+
+ // We only except this routine to be invoked on implementations
+ // of a trait, not inherent implementations.
+ let trait_ref = ty::impl_trait_ref(tcx, impl_def_id).unwrap();
+ debug!("trait_ref={}", trait_ref.repr(tcx));
+
+ // If the trait is local to the crate, ok.
+ if trait_ref.def_id.krate == ast::LOCAL_CRATE {
+ debug!("trait {} is local to current crate",
+ trait_ref.def_id.repr(tcx));
+ return true;
+ }
+
+ // Otherwise, self type must be local to the crate.
+ let self_ty = ty::lookup_item_type(tcx, impl_def_id).ty;
+ return ty_is_local(tcx, self_ty);
+}
+
+pub fn ty_is_local(tcx: &ty::ctxt,
+ ty: ty::t)
+ -> bool
+{
+ debug!("ty_is_local({})", ty.repr(tcx));
+
+ match ty::get(ty).sty {
+ ty::ty_nil |
+ ty::ty_bot |
+ ty::ty_bool |
+ ty::ty_char |
+ ty::ty_int(..) |
+ ty::ty_uint(..) |
+ ty::ty_float(..) |
+ ty::ty_str(..) => {
+ false
+ }
+
+ ty::ty_unboxed_closure(..) => {
+ // This routine is invoked on types specified by users as
+ // part of an impl and hence an unboxed closure type
+ // cannot appear.
+ tcx.sess.bug("ty_is_local applied to unboxed closure type")
+ }
+
+ ty::ty_bare_fn(..) |
+ ty::ty_closure(..) => {
+ false
+ }
+
+ ty::ty_uniq(t) => {
+ let krate = tcx.lang_items.owned_box().map(|d| d.krate);
+ krate == Some(ast::LOCAL_CRATE) || ty_is_local(tcx, t)
+ }
+
+ ty::ty_box(t) => {
+ let krate = tcx.lang_items.gc().map(|d| d.krate);
+ krate == Some(ast::LOCAL_CRATE) || ty_is_local(tcx, t)
+ }
+
+ ty::ty_vec(t, _) |
+ ty::ty_ptr(ty::mt { ty: t, .. }) |
+ ty::ty_rptr(_, ty::mt { ty: t, .. }) => {
+ ty_is_local(tcx, t)
+ }
+
+ ty::ty_tup(ref ts) => {
+ ts.iter().any(|&t| ty_is_local(tcx, t))
+ }
+
+ ty::ty_enum(def_id, ref substs) |
+ ty::ty_struct(def_id, ref substs) => {
+ def_id.krate == ast::LOCAL_CRATE || {
+ let variances = ty::item_variances(tcx, def_id);
+ subst::ParamSpace::all().iter().any(|&space| {
+ substs.types.get_slice(space).iter().enumerate().any(
+ |(i, &t)| {
+ match *variances.types.get(space, i) {
+ ty::Bivariant => {
+ // If Foo<T> is bivariant with respect to
+ // T, then it doesn't matter whether T is
+ // local or not, because `Foo<U>` for any
+ // U will be a subtype of T.
+ false
+ }
+ ty::Contravariant |
+ ty::Covariant |
+ ty::Invariant => {
+ ty_is_local(tcx, t)
+ }
+ }
+ })
+ })
+ }
+ }
+
+ ty::ty_trait(ref tt) => {
+ tt.def_id.krate == ast::LOCAL_CRATE
+ }
+
+ // Type parameters may be bound to types that are not local to
+ // the crate.
+ ty::ty_param(..) => {
+ false
+ }
+
+ ty::ty_infer(..) |
+ ty::ty_open(..) |
+ ty::ty_err => {
+ tcx.sess.bug(
+ format!("ty_is_local invoked on unexpected type: {}",
+ ty.repr(tcx)).as_slice())
+ }
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+
+# TRAIT RESOLUTION
+
+This document describes the general process and points out some non-obvious
+things.
+
+## Major concepts
+
+Trait resolution is the process of pairing up an impl with each
+reference to a trait. So, for example, if there is a generic function like:
+
+ fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> { ... }
+
+and then a call to that function:
+
+ let v: Vec<int> = clone_slice([1, 2, 3].as_slice())
+
+it is the job of trait resolution to figure out (in which case)
+whether there exists an impl of `int : Clone`
+
+Note that in some cases, like generic functions, we may not be able to
+find a specific impl, but we can figure out that the caller must
+provide an impl. To see what I mean, consider the body of `clone_slice`:
+
+ fn clone_slice<T:Clone>(x: &[T]) -> Vec<T> {
+ let mut v = Vec::new();
+ for e in x.iter() {
+ v.push((*e).clone()); // (*)
+ }
+ }
+
+The line marked `(*)` is only legal if `T` (the type of `*e`)
+implements the `Clone` trait. Naturally, since we don't know what `T`
+is, we can't find the specific impl; but based on the bound `T:Clone`,
+we can say that there exists an impl which the caller must provide.
+
+We use the term *obligation* to refer to a trait reference in need of
+an impl.
+
+## Overview
+
+Trait resolution consists of three major parts:
+
+- SELECTION: Deciding how to resolve a specific obligation. For
+ example, selection might decide that a specific obligation can be
+ resolved by employing an impl which matches the self type, or by
+ using a parameter bound. In the case of an impl, Selecting one
+ obligation can create *nested obligations* because of where clauses
+ on the impl itself.
+
+- FULFILLMENT: The fulfillment code is what tracks that obligations
+ are completely fulfilled. Basically it is a worklist of obligations
+ to be selected: once selection is successful, the obligation is
+ removed from the worklist and any nested obligations are enqueued.
+
+- COHERENCE: The coherence checks are intended to ensure that there
+ are never overlapping impls, where two impls could be used with
+ equal precedence.
+
+## Selection
+
+Selection is the process of deciding whether an obligation can be
+resolved and, if so, how it is to be resolved (via impl, where clause, etc).
+The main interface is the `select()` function, which takes an obligation
+and returns a `SelectionResult`. There are three possible outcomes:
+
+- `Ok(Some(selection))` -- yes, the obligation can be resolved, and
+ `selection` indicates how. If the impl was resolved via an impl,
+ then `selection` may also indicate nested obligations that are required
+ by the impl.
+
+- `Ok(None)` -- we are not yet sure whether the obligation can be
+ resolved or not. This happens most commonly when the obligation
+ contains unbound type variables.
+
+- `Err(err)` -- the obligation definitely cannot be resolved due to a
+ type error, or because there are no impls that could possibly apply,
+ etc.
+
+The basic algorithm for selection is broken into two big phases:
+candidate assembly and confirmation.
+
+### Candidate assembly
+
+Searches for impls/where-clauses/etc that might
+possibly be used to satisfy the obligation. Each of those is called
+a candidate. To avoid ambiguity, we want to find exactly one
+candidate that is definitively applicable. In some cases, we may not
+know whether an impl/where-clause applies or not -- this occurs when
+the obligation contains unbound inference variables.
+
+One important point is that candidate assembly considers *only the
+input types* of the obligation when deciding whether an impl applies
+or not. Consider the following example:
+
+ trait Convert<T> { // T is output, Self is input
+ fn convert(&self) -> T;
+ }
+
+ impl Convert<uint> for int { ... }
+
+Now assume we have an obligation `int : Convert<char>`. During
+candidate assembly, the impl above would be considered a definitively
+applicable candidate, because it has the same self type (`int`). The
+fact that the output type parameter `T` is `uint` on the impl and
+`char` in the obligation is not considered.
+
+#### Skolemization
+
+We (at least currently) wish to guarantee "crate concatenability" --
+which basically means that you could take two crates, concatenate
+them textually, and the combined crate would continue to compile. The
+only real way that this relates to trait matching is with
+inference. We have to be careful not to influence unbound type
+variables during the selection process, basically.
+
+Here is an example:
+
+ trait Foo { fn method() { ... }}
+ impl Foo for int { ... }
+
+ fn something() {
+ let mut x = None; // `x` has type `Option<?>`
+ loop {
+ match x {
+ Some(ref y) => { // `y` has type ?
+ y.method(); // (*)
+ ...
+ }}}
+ }
+
+The question is, can we resolve the call to `y.method()`? We don't yet
+know what type `y` has. However, there is only one impl in scope, and
+it is for `int`, so perhaps we could deduce that `y` *must* have type
+`int` (and hence the type of `x` is `Option<int>`)? This is actually
+sound reasoning: `int` is the only type in scope that could possibly
+make this program type check. However, this deduction is a bit
+"unstable", though, because if we concatenated with another crate that
+defined a newtype and implemented `Foo` for this newtype, then the
+inference would fail, because there would be two potential impls, not
+one.
+
+It is unclear how important this property is. It might be nice to drop it.
+But for the time being we maintain it.
+
+The way we do this is by *skolemizing* the obligation self type during
+the selection process -- skolemizing means, basically, replacing all
+unbound type variables with a new "skolemized" type. Each skolemized
+type is basically considered "as if" it were some fresh type that is
+distinct from all other types. The skolemization process also replaces
+lifetimes with `'static`, see the section on lifetimes below for an
+explanation.
+
+In the example above, this means that when matching `y.method()` we
+would convert the type of `y` from a type variable `?` to a skolemized
+type `X`. Then, since `X` cannot unify with `int`, the match would
+fail. Special code exists to check that the match failed because a
+skolemized type could not be unified with another kind of type -- this is
+not considered a definitive failure, but rather an ambiguous result,
+since if the type variable were later to be unified with int, then this
+obligation could be resolved then.
+
+*Note:* Currently, method matching does not use the trait resolution
+code, so if you in fact type in the example above, it may
+compile. Hopefully this will be fixed in later patches.
+
+#### Matching
+
+The subroutines that decide whether a particular impl/where-clause/etc
+applies to a particular obligation. At the moment, this amounts to
+unifying the self types, but in the future we may also recursively
+consider some of the nested obligations, in the case of an impl.
+
+#### Lifetimes and selection
+
+Because of how that lifetime inference works, it is not possible to
+give back immediate feedback as to whether a unification or subtype
+relationship between lifetimes holds or not. Therefore, lifetime
+matching is *not* considered during selection. This is achieved by
+having the skolemization process just replace *ALL* lifetimes with
+`'static`. Later, during confirmation, the non-skolemized self-type
+will be unified with the type from the impl (or whatever). This may
+yield lifetime constraints that will later be found to be in error (in
+contrast, the non-lifetime-constraints have already been checked
+during selection and can never cause an error, though naturally they
+may lead to other errors downstream).
+
+#### Where clauses
+
+Besides an impl, the other major way to resolve an obligation is via a
+where clause. The selection process is always given a *parameter
+environment* which contains a list of where clauses, which are
+basically obligations that can assume are satisfiable. We will iterate
+over that list and check whether our current obligation can be found
+in that list, and if so it is considered satisfied. More precisely, we
+want to check whether there is a where-clause obligation that is for
+the same trait (or some subtrait) and for which the self types match,
+using the definition of *matching* given above.
+
+Consider this simple example:
+
+ trait A1 { ... }
+ trait A2 : A1 { ... }
+
+ trait B { ... }
+
+ fn foo<X:A2+B> { ... }
+
+Clearly we can use methods offered by `A1`, `A2`, or `B` within the
+body of `foo`. In each case, that will incur an obligation like `X :
+A1` or `X : A2`. The parameter environment will contain two
+where-clauses, `X : A2` and `X : B`. For each obligation, then, we
+search this list of where-clauses. To resolve an obligation `X:A1`,
+we would note that `X:A2` implies that `X:A1`.
+
+### Confirmation
+
+Confirmation unifies the output type parameters of the trait with the
+values found in the obligation, possibly yielding a type error. If we
+return to our example of the `Convert` trait from the previous
+section, confirmation is where an error would be reported, because the
+impl specified that `T` would be `uint`, but the obligation reported
+`char`. Hence the result of selection would be an error.
+
+### Selection during translation
+
+During type checking, we do not store the results of trait selection.
+We simply wish to verify that trait selection will succeed. Then
+later, at trans time, when we have all concrete types available, we
+can repeat the trait selection. In this case, we do not consider any
+where-clauses to be in scope. We know that therefore each resolution
+will resolve to a particular impl.
+
+One interesting twist has to do with nested obligations. In general, in trans,
+we only need to do a "shallow" selection for an obligation. That is, we wish to
+identify which impl applies, but we do not (yet) need to decide how to select
+any nested obligations. Nonetheless, we *do* currently do a complete resolution,
+and that is because it can sometimes inform the results of type inference. That is,
+we do not have the full substitutions in terms of the type varibales of the impl available
+to us, so we must run trait selection to figure everything out.
+
+Here is an example:
+
+ trait Foo { ... }
+ impl<U,T:Bar<U>> Foo for Vec<T> { ... }
+
+ impl Bar<uint> for int { ... }
+
+After one shallow round of selection for an obligation like `Vec<int>
+: Foo`, we would know which impl we want, and we would know that
+`T=int`, but we do not know the type of `U`. We must select the
+nested obligation `int : Bar<U>` to find out that `U=uint`.
+
+It would be good to only do *just as much* nested resolution as
+necessary. Currently, though, we just do a full resolution.
+
+*/
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::ty;
+use middle::typeck::infer::{InferCtxt, skolemize};
+use util::nodemap::DefIdMap;
+use util::ppaux::Repr;
+
+use super::CodeAmbiguity;
+use super::Obligation;
+use super::FulfillmentError;
+use super::CodeSelectionError;
+use super::select::SelectionContext;
+use super::Unimplemented;
+
+/**
+ * The fulfillment context is used to drive trait resolution. It
+ * consists of a list of obligations that must be (eventually)
+ * satisfied. The job is to track which are satisfied, which yielded
+ * errors, and which are still pending. At any point, users can call
+ * `select_where_possible`, and the fulfilment context will try to do
+ * selection, retaining only those obligations that remain
+ * ambiguous. This may be helpful in pushing type inference
+ * along. Once all type inference constraints have been generated, the
+ * method `select_all_or_error` can be used to report any remaining
+ * ambiguous cases as errors.
+ */
+pub struct FulfillmentContext {
+ // A list of all obligations that have been registered with this
+ // fulfillment context.
+ trait_obligations: Vec<Obligation>,
+
+ // For semi-hacky reasons (see FIXME below) we keep the builtin
+ // trait obligations segregated.
+ builtin_obligations: Vec<Obligation>,
+}
+
+impl FulfillmentContext {
+ pub fn new() -> FulfillmentContext {
+ FulfillmentContext {
+ trait_obligations: Vec::new(),
+ builtin_obligations: Vec::new()
+ }
+ }
+
+ pub fn register_obligation(&mut self,
+ tcx: &ty::ctxt,
+ obligation: Obligation)
+ {
+ debug!("register_obligation({})", obligation.repr(tcx));
+ match tcx.lang_items.to_builtin_kind(obligation.trait_ref.def_id) {
+ Some(_) => {
+ self.builtin_obligations.push(obligation);
+ }
+ None => {
+ self.trait_obligations.push(obligation);
+ }
+ }
+ }
+
+ pub fn select_all_or_error(&mut self,
+ infcx: &InferCtxt,
+ param_env: &ty::ParameterEnvironment,
+ unboxed_closures: &DefIdMap<ty::UnboxedClosure>)
+ -> Result<(),Vec<FulfillmentError>>
+ {
+ try!(self.select_where_possible(infcx, param_env,
+ unboxed_closures));
+
+ // Anything left is ambiguous.
+ let errors: Vec<FulfillmentError> =
+ self.trait_obligations
+ .iter()
+ .map(|o| FulfillmentError::new((*o).clone(), CodeAmbiguity))
+ .collect();
+
+ if errors.is_empty() {
+ Ok(())
+ } else {
+ Err(errors)
+ }
+ }
+
+ pub fn select_where_possible(&mut self,
+ infcx: &InferCtxt,
+ param_env: &ty::ParameterEnvironment,
+ unboxed_closures: &DefIdMap<ty::UnboxedClosure>)
+ -> Result<(),Vec<FulfillmentError>>
+ {
+ let tcx = infcx.tcx;
+ let selcx = SelectionContext::new(infcx, param_env,
+ unboxed_closures);
+
+ debug!("select_where_possible({} obligations) start",
+ self.trait_obligations.len());
+
+ let mut errors = Vec::new();
+
+ loop {
+ let count = self.trait_obligations.len();
+
+ debug!("select_where_possible({} obligations) iteration",
+ count);
+
+ let mut selections = Vec::new();
+
+ // First pass: walk each obligation, retaining
+ // only those that we cannot yet process.
+ self.trait_obligations.retain(|obligation| {
+ match selcx.select(obligation) {
+ Ok(None) => {
+ true
+ }
+ Ok(Some(s)) => {
+ selections.push(s);
+ false
+ }
+ Err(selection_err) => {
+ debug!("obligation: {} error: {}",
+ obligation.repr(tcx),
+ selection_err.repr(tcx));
+
+ errors.push(FulfillmentError::new(
+ (*obligation).clone(),
+ CodeSelectionError(selection_err)));
+ false
+ }
+ }
+ });
+
+ if self.trait_obligations.len() == count {
+ // Nothing changed.
+ break;
+ }
+
+ // Now go through all the successful ones,
+ // registering any nested obligations for the future.
+ for selection in selections.into_iter() {
+ selection.map_move_nested(
+ |o| self.register_obligation(tcx, o));
+ }
+ }
+
+ debug!("select_where_possible({} obligations, {} errors) done",
+ self.trait_obligations.len(),
+ errors.len());
+
+ if errors.len() == 0 {
+ Ok(())
+ } else {
+ Err(errors)
+ }
+ }
+
+ pub fn check_builtin_bound_obligations(
+ &self,
+ infcx: &InferCtxt)
+ -> Result<(),Vec<FulfillmentError>>
+ {
+ let tcx = infcx.tcx;
+ let mut errors = Vec::new();
+ debug!("check_builtin_bound_obligations");
+ for obligation in self.builtin_obligations.iter() {
+ debug!("obligation={}", obligation.repr(tcx));
+
+ let def_id = obligation.trait_ref.def_id;
+ let bound = match tcx.lang_items.to_builtin_kind(def_id) {
+ Some(bound) => { bound }
+ None => { continue; }
+ };
+
+ let unskol_self_ty = obligation.self_ty();
+
+ // Skolemize the self-type so that it no longer contains
+ // inference variables. Note that this also replaces
+ // regions with 'static. You might think that this is not
+ // ok, because checking whether something is `Send`
+ // implies checking whether it is 'static: that's true,
+ // but in fact the region bound is fed into region
+ // inference separately and enforced there (and that has
+ // even already been done before this code executes,
+ // generally speaking).
+ let self_ty = skolemize(infcx, unskol_self_ty);
+
+ debug!("bound={} self_ty={}", bound, self_ty.repr(tcx));
+ if ty::type_is_error(self_ty) {
+ // Indicates an error that was/will-be
+ // reported elsewhere.
+ continue;
+ }
+
+ // Determine if builtin bound is met.
+ let tc = ty::type_contents(tcx, self_ty);
+ debug!("tc={}", tc);
+ let met = match bound {
+ ty::BoundSend => tc.is_sendable(tcx),
+ ty::BoundSized => tc.is_sized(tcx),
+ ty::BoundCopy => tc.is_copy(tcx),
+ ty::BoundSync => tc.is_sync(tcx),
+ };
+
+ if met {
+ continue;
+ }
+
+ // FIXME -- This is kind of a hack: it requently happens
+ // that some earlier error prevents types from being fully
+ // inferred, and then we get a bunch of uninteresting
+ // errors saying something like "<generic #0> doesn't
+ // implement Sized". It may even be true that we could
+ // just skip over all checks where the self-ty is an
+ // inference variable, but I was afraid that there might
+ // be an inference variable created, registered as an
+ // obligation, and then never forced by writeback, and
+ // hence by skipping here we'd be ignoring the fact that
+ // we don't KNOW the type works out. Though even that
+ // would probably be harmless, given that we're only
+ // talking about builtin traits, which are known to be
+ // inhabited. But in any case I just threw in this check
+ // for has_errors() to be sure that compilation isn't
+ // happening anyway. In that case, why inundate the user.
+ if ty::type_needs_infer(self_ty) &&
+ tcx.sess.has_errors()
+ {
+ debug!("skipping printout because self_ty={}",
+ self_ty.repr(tcx));
+ continue;
+ }
+
+ errors.push(
+ FulfillmentError::new(
+ (*obligation).clone(),
+ CodeSelectionError(Unimplemented)));
+ }
+
+ if errors.is_empty() {
+ Ok(())
+ } else {
+ Err(errors)
+ }
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+ * Trait Resolution. See doc.rs.
+ */
+
+use middle::subst;
+use middle::ty;
+use middle::typeck::infer::InferCtxt;
+use std::rc::Rc;
+use syntax::ast;
+use syntax::codemap::{Span, DUMMY_SP};
+use util::nodemap::DefIdMap;
+
+pub use self::fulfill::FulfillmentContext;
+pub use self::select::SelectionContext;
+pub use self::util::supertraits;
+pub use self::util::transitive_bounds;
+pub use self::util::Supertraits;
+pub use self::util::search_trait_and_supertraits_from_bound;
+
+mod coherence;
+mod fulfill;
+mod select;
+mod util;
+
+/**
+ * An `Obligation` represents some trait reference (e.g. `int:Eq`) for
+ * which the vtable must be found. The process of finding a vtable is
+ * called "resolving" the `Obligation`. This process consists of
+ * either identifying an `impl` (e.g., `impl Eq for int`) that
+ * provides the required vtable, or else finding a bound that is in
+ * scope. The eventual result is usually a `Selection` (defined below).
+ */
+#[deriving(Clone)]
+pub struct Obligation {
+ pub cause: ObligationCause,
+ pub recursion_depth: uint,
+ pub trait_ref: Rc<ty::TraitRef>,
+}
+
+/**
+ * Why did we incur this obligation? Used for error reporting.
+ */
+#[deriving(Clone)]
+pub struct ObligationCause {
+ pub span: Span,
+ pub code: ObligationCauseCode
+}
+
+#[deriving(Clone)]
+pub enum ObligationCauseCode {
+ /// Not well classified or should be obvious from span.
+ MiscObligation,
+
+ /// In an impl of trait X for type Y, type Y must
+ /// also implement all supertraits of X.
+ ItemObligation(ast::DefId),
+
+ /// Obligation incurred due to an object cast.
+ ObjectCastObligation(/* Object type */ ty::t),
+
+ /// Various cases where expressions must be sized/copy/etc:
+ AssignmentLhsSized, // L = X implies that L is Sized
+ StructInitializerSized, // S { ... } must be Sized
+ VariableType(ast::NodeId), // Type of each variable must be Sized
+ RepeatVec, // [T,..n] --> T must be Copy
+}
+
+pub static DUMMY_CAUSE: ObligationCause =
+ ObligationCause { span: DUMMY_SP,
+ code: MiscObligation };
+
+pub type Obligations = subst::VecPerParamSpace<Obligation>;
+
+pub type Selection = Vtable<Obligation>;
+
+#[deriving(Clone,Show)]
+pub enum SelectionError {
+ Unimplemented,
+ Overflow,
+ OutputTypeParameterMismatch(Rc<ty::TraitRef>, ty::type_err)
+}
+
+pub struct FulfillmentError {
+ pub obligation: Obligation,
+ pub code: FulfillmentErrorCode
+}
+
+#[deriving(Clone)]
+pub enum FulfillmentErrorCode {
+ CodeSelectionError(SelectionError),
+ CodeAmbiguity,
+}
+
+/**
+ * When performing resolution, it is typically the case that there
+ * can be one of three outcomes:
+ *
+ * - `Ok(Some(r))`: success occurred with result `r`
+ * - `Ok(None)`: could not definitely determine anything, usually due
+ * to inconclusive type inference.
+ * - `Err(e)`: error `e` occurred
+ */
+pub type SelectionResult<T> = Result<Option<T>, SelectionError>;
+
+#[deriving(PartialEq,Eq,Show)]
+pub enum EvaluationResult {
+ EvaluatedToMatch,
+ EvaluatedToAmbiguity,
+ EvaluatedToUnmatch
+}
+
+/**
+ * Given the successful resolution of an obligation, the `Vtable`
+ * indicates where the vtable comes from. Note that while we call this
+ * a "vtable", it does not necessarily indicate dynamic dispatch at
+ * runtime. `Vtable` instances just tell the compiler where to find
+ * methods, but in generic code those methods are typically statically
+ * dispatched -- only when an object is constructed is a `Vtable`
+ * instance reified into an actual vtable.
+ *
+ * For example, the vtable may be tied to a specific impl (case A),
+ * or it may be relative to some bound that is in scope (case B).
+ *
+ *
+ * ```
+ * impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
+ * impl<T:Clone> Clone<T> for Box<T> { ... } // Impl_2
+ * impl Clone for int { ... } // Impl_3
+ *
+ * fn foo<T:Clone>(concrete: Option<Box<int>>,
+ * param: T,
+ * mixed: Option<T>) {
+ *
+ * // Case A: Vtable points at a specific impl. Only possible when
+ * // type is concretely known. If the impl itself has bounded
+ * // type parameters, Vtable will carry resolutions for those as well:
+ * concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])])
+ *
+ * // Case B: Vtable must be provided by caller. This applies when
+ * // type is a type parameter.
+ * param.clone(); // VtableParam(Oblig_1)
+ *
+ * // Case C: A mix of cases A and B.
+ * mixed.clone(); // Vtable(Impl_1, [VtableParam(Oblig_1)])
+ * }
+ * ```
+ *
+ * ### The type parameter `N`
+ *
+ * See explanation on `VtableImplData`.
+ */
+#[deriving(Show,Clone)]
+pub enum Vtable<N> {
+ /// Vtable identifying a particular impl.
+ VtableImpl(VtableImplData<N>),
+
+ /// Vtable automatically generated for an unboxed closure. The def
+ /// ID is the ID of the closure expression. This is a `VtableImpl`
+ /// in spirit, but the impl is generated by the compiler and does
+ /// not appear in the source.
+ VtableUnboxedClosure(ast::DefId),
+
+ /// Successful resolution to an obligation provided by the caller
+ /// for some type parameter.
+ VtableParam(VtableParamData),
+
+ /// Successful resolution for a builtin trait.
+ VtableBuiltin,
+}
+
+/**
+ * Identifies a particular impl in the source, along with a set of
+ * substitutions from the impl's type/lifetime parameters. The
+ * `nested` vector corresponds to the nested obligations attached to
+ * the impl's type parameters.
+ *
+ * The type parameter `N` indicates the type used for "nested
+ * obligations" that are required by the impl. During type check, this
+ * is `Obligation`, as one might expect. During trans, however, this
+ * is `()`, because trans only requires a shallow resolution of an
+ * impl, and nested obligations are satisfied later.
+ */
+#[deriving(Clone)]
+pub struct VtableImplData<N> {
+ pub impl_def_id: ast::DefId,
+ pub substs: subst::Substs,
+ pub nested: subst::VecPerParamSpace<N>
+}
+
+/**
+ * A vtable provided as a parameter by the caller. For example, in a
+ * function like `fn foo<T:Eq>(...)`, if the `eq()` method is invoked
+ * on an instance of `T`, the vtable would be of type `VtableParam`.
+ */
+#[deriving(Clone)]
+pub struct VtableParamData {
+ // In the above example, this would `Eq`
+ pub bound: Rc<ty::TraitRef>,
+}
+
+pub fn try_select_obligation(infcx: &InferCtxt,
+ param_env: &ty::ParameterEnvironment,
+ unboxed_closures: &DefIdMap<ty::UnboxedClosure>,
+ obligation: &Obligation)
+ -> SelectionResult<Selection>
+{
+ /*!
+ * Attempts to select the impl/bound/etc for the obligation
+ * given. Returns `None` if we are unable to resolve, either
+ * because of ambiguity or due to insufficient inference. Note
+ * that selection is a shallow process and hence the result may
+ * contain nested obligations that must be resolved. The caller is
+ * responsible for ensuring that those get resolved. (But see
+ * `try_select_obligation_deep` below.)
+ */
+
+ let selcx = select::SelectionContext::new(infcx, param_env, unboxed_closures);
+ selcx.select(obligation)
+}
+
+pub fn evaluate_obligation(infcx: &InferCtxt,
+ param_env: &ty::ParameterEnvironment,
+ obligation: &Obligation,
+ unboxed_closures: &DefIdMap<ty::UnboxedClosure>)
+ -> EvaluationResult
+{
+ /*!
+ * Attempts to resolve the obligation given. Returns `None` if
+ * we are unable to resolve, either because of ambiguity or
+ * due to insufficient inference.
+ */
+
+ let selcx = select::SelectionContext::new(infcx, param_env,
+ unboxed_closures);
+ selcx.evaluate_obligation(obligation)
+}
+
+pub fn evaluate_impl(infcx: &InferCtxt,
+ param_env: &ty::ParameterEnvironment,
+ unboxed_closures: &DefIdMap<ty::UnboxedClosure>,
+ cause: ObligationCause,
+ impl_def_id: ast::DefId,
+ self_ty: ty::t)
+ -> EvaluationResult
+{
+ /*!
+ * Tests whether the impl `impl_def_id` can be applied to the self
+ * type `self_ty`. This is similar to "selection", but simpler:
+ *
+ * - It does not take a full trait-ref as input, so it skips over
+ * the "confirmation" step which would reconcile output type
+ * parameters.
+ * - It returns an `EvaluationResult`, which is a tri-value return
+ * (yes/no/unknown).
+ */
+
+ let selcx = select::SelectionContext::new(infcx, param_env, unboxed_closures);
+ selcx.evaluate_impl(impl_def_id, cause, self_ty)
+}
+
+pub fn select_inherent_impl(infcx: &InferCtxt,
+ param_env: &ty::ParameterEnvironment,
+ unboxed_closures: &DefIdMap<ty::UnboxedClosure>,
+ cause: ObligationCause,
+ impl_def_id: ast::DefId,
+ self_ty: ty::t)
+ -> SelectionResult<VtableImplData<Obligation>>
+{
+ /*!
+ * Matches the self type of the inherent impl `impl_def_id`
+ * against `self_ty` and returns the resulting resolution. This
+ * routine may modify the surrounding type context (for example,
+ * it may unify variables).
+ */
+
+ // This routine is only suitable for inherent impls. This is
+ // because it does not attempt to unify the output type parameters
+ // from the trait ref against the values from the obligation.
+ // (These things do not apply to inherent impls, for which there
+ // is no trait ref nor obligation.)
+ //
+ // Matching against non-inherent impls should be done with
+ // `try_resolve_obligation()`.
+ assert!(ty::impl_trait_ref(infcx.tcx, impl_def_id).is_none());
+
+ let selcx = select::SelectionContext::new(infcx, param_env,
+ unboxed_closures);
+ selcx.select_inherent_impl(impl_def_id, cause, self_ty)
+}
+
+pub fn is_orphan_impl(tcx: &ty::ctxt,
+ impl_def_id: ast::DefId)
+ -> bool
+{
+ /*!
+ * True if neither the trait nor self type is local. Note that
+ * `impl_def_id` must refer to an impl of a trait, not an inherent
+ * impl.
+ */
+
+ !coherence::impl_is_local(tcx, impl_def_id)
+}
+
+pub fn overlapping_impls(infcx: &InferCtxt,
+ impl1_def_id: ast::DefId,
+ impl2_def_id: ast::DefId)
+ -> bool
+{
+ /*!
+ * True if there exist types that satisfy both of the two given impls.
+ */
+
+ coherence::impl_can_satisfy(infcx, impl1_def_id, impl2_def_id) &&
+ coherence::impl_can_satisfy(infcx, impl2_def_id, impl1_def_id)
+}
+
+pub fn obligations_for_generics(tcx: &ty::ctxt,
+ cause: ObligationCause,
+ generics: &ty::Generics,
+ substs: &subst::Substs)
+ -> subst::VecPerParamSpace<Obligation>
+{
+ /*!
+ * Given generics for an impl like:
+ *
+ * impl<A:Foo, B:Bar+Qux> ...
+ *
+ * and a substs vector like `<A=A0, B=B0>`, yields a result like
+ *
+ * [[Foo for A0, Bar for B0, Qux for B0], [], []]
+ */
+
+ util::obligations_for_generics(tcx, cause, 0, generics, substs)
+}
+
+pub fn obligation_for_builtin_bound(tcx: &ty::ctxt,
+ cause: ObligationCause,
+ source_ty: ty::t,
+ builtin_bound: ty::BuiltinBound)
+ -> Obligation
+{
+ util::obligation_for_builtin_bound(tcx, cause, builtin_bound, 0, source_ty)
+}
+
+impl Obligation {
+ pub fn new(cause: ObligationCause, trait_ref: Rc<ty::TraitRef>) -> Obligation {
+ Obligation { cause: cause,
+ recursion_depth: 0,
+ trait_ref: trait_ref }
+ }
+
+ pub fn misc(span: Span, trait_ref: Rc<ty::TraitRef>) -> Obligation {
+ Obligation::new(ObligationCause::misc(span), trait_ref)
+ }
+
+ pub fn self_ty(&self) -> ty::t {
+ self.trait_ref.self_ty()
+ }
+}
+
+impl ObligationCause {
+ pub fn new(span: Span, code: ObligationCauseCode) -> ObligationCause {
+ ObligationCause { span: span, code: code }
+ }
+
+ pub fn misc(span: Span) -> ObligationCause {
+ ObligationCause { span: span, code: MiscObligation }
+ }
+}
+
+impl<N> Vtable<N> {
+ pub fn map_nested<M>(&self, op: |&N| -> M) -> Vtable<M> {
+ match *self {
+ VtableImpl(ref i) => VtableImpl(i.map_nested(op)),
+ VtableUnboxedClosure(d) => VtableUnboxedClosure(d),
+ VtableParam(ref p) => VtableParam((*p).clone()),
+ VtableBuiltin => VtableBuiltin,
+ }
+ }
+
+ pub fn map_move_nested<M>(self, op: |N| -> M) -> Vtable<M> {
+ match self {
+ VtableImpl(i) => VtableImpl(i.map_move_nested(op)),
+ VtableUnboxedClosure(d) => VtableUnboxedClosure(d),
+ VtableParam(p) => VtableParam(p),
+ VtableBuiltin => VtableBuiltin,
+ }
+ }
+}
+
+impl<N> VtableImplData<N> {
+ pub fn map_nested<M>(&self,
+ op: |&N| -> M)
+ -> VtableImplData<M>
+ {
+ VtableImplData {
+ impl_def_id: self.impl_def_id,
+ substs: self.substs.clone(),
+ nested: self.nested.map(op)
+ }
+ }
+
+ pub fn map_move_nested<M>(self, op: |N| -> M) -> VtableImplData<M> {
+ let VtableImplData { impl_def_id, substs, nested } = self;
+ VtableImplData {
+ impl_def_id: impl_def_id,
+ substs: substs,
+ nested: nested.map_move(op)
+ }
+ }
+}
+
+impl EvaluationResult {
+ pub fn potentially_applicable(&self) -> bool {
+ match *self {
+ EvaluatedToMatch | EvaluatedToAmbiguity => true,
+ EvaluatedToUnmatch => false
+ }
+ }
+}
+
+impl FulfillmentError {
+ fn new(obligation: Obligation, code: FulfillmentErrorCode)
+ -> FulfillmentError
+ {
+ FulfillmentError { obligation: obligation, code: code }
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*! See `doc.rs` for high-level documentation */
+
+use super::{Obligation, ObligationCause};
+use super::{EvaluationResult, EvaluatedToMatch,
+ EvaluatedToAmbiguity, EvaluatedToUnmatch};
+use super::{SelectionError, Unimplemented, Overflow,
+ OutputTypeParameterMismatch};
+use super::{Selection};
+use super::{SelectionResult};
+use super::{VtableBuiltin, VtableImpl, VtableParam, VtableUnboxedClosure};
+use super::{VtableImplData, VtableParamData};
+use super::{util};
+
+use middle::subst::{Subst, Substs, VecPerParamSpace};
+use middle::ty;
+use middle::typeck::check::regionmanip;
+use middle::typeck::infer;
+use middle::typeck::infer::InferCtxt;
+use std::rc::Rc;
+use syntax::ast;
+use util::nodemap::DefIdMap;
+use util::ppaux::Repr;
+
+pub struct SelectionContext<'cx, 'tcx:'cx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ param_env: &'cx ty::ParameterEnvironment,
+ unboxed_closures: &'cx DefIdMap<ty::UnboxedClosure>,
+}
+
+// pub struct SelectionCache {
+// hashmap: RefCell<HashMap<CacheKey, Candidate>>,
+// }
+
+// #[deriving(Hash,Eq,PartialEq)]
+// struct CacheKey {
+// trait_def_id: ast::DefId,
+// skol_obligation_self_ty: ty::t,
+// }
+
+enum MatchResult<T> {
+ Matched(T),
+ AmbiguousMatch,
+ NoMatch
+}
+
+/**
+ * The selection process begins by considering all impls, where
+ * clauses, and so forth that might resolve an obligation. Sometimes
+ * we'll be able to say definitively that (e.g.) an impl does not
+ * apply to the obligation: perhaps it is defined for `uint` but the
+ * obligation is for `int`. In that case, we drop the impl out of the
+ * list. But the other cases are considered *candidates*.
+ *
+ * Candidates can either be definitive or ambiguous. An ambiguous
+ * candidate is one that might match or might not, depending on how
+ * type variables wind up being resolved. This only occurs during inference.
+ *
+ * For selection to suceed, there must be exactly one non-ambiguous
+ * candidate. Usually, it is not possible to have more than one
+ * definitive candidate, due to the coherence rules. However, there is
+ * one case where it could occur: if there is a blanket impl for a
+ * trait (that is, an impl applied to all T), and a type parameter
+ * with a where clause. In that case, we can have a candidate from the
+ * where clause and a second candidate from the impl. This is not a
+ * problem because coherence guarantees us that the impl which would
+ * be used to satisfy the where clause is the same one that we see
+ * now. To resolve this issue, therefore, we ignore impls if we find a
+ * matching where clause. Part of the reason for this is that where
+ * clauses can give additional information (like, the types of output
+ * parameters) that would have to be inferred from the impl.
+ */
+#[deriving(Clone)]
+enum Candidate {
+ MatchedBuiltinCandidate,
+ AmbiguousBuiltinCandidate,
+ MatchedParamCandidate(VtableParamData),
+ AmbiguousParamCandidate,
+ Impl(ImplCandidate),
+ MatchedUnboxedClosureCandidate(/* closure */ ast::DefId)
+}
+
+#[deriving(Clone)]
+enum ImplCandidate {
+ MatchedImplCandidate(ast::DefId),
+ AmbiguousImplCandidate(ast::DefId),
+}
+
+impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
+ pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>,
+ param_env: &'cx ty::ParameterEnvironment,
+ unboxed_closures: &'cx DefIdMap<ty::UnboxedClosure>)
+ -> SelectionContext<'cx, 'tcx> {
+ SelectionContext { infcx: infcx, param_env: param_env,
+ unboxed_closures: unboxed_closures }
+ }
+
+ pub fn tcx(&self) -> &'cx ty::ctxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Selection
+ //
+ // The selection phase tries to identify *how* an obligation will
+ // be resolved. For example, it will identify which impl or
+ // parameter bound is to be used. The process can be inconclusive
+ // if the self type in the obligation is not fully inferred. Selection
+ // can result in an error in one of two ways:
+ //
+ // 1. If no applicable impl or parameter bound can be found.
+ // 2. If the output type parameters in the obligation do not match
+ // those specified by the impl/bound. For example, if the obligation
+ // is `Vec<Foo>:Iterable<Bar>`, but the impl specifies
+ // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
+
+ pub fn select(&self, obligation: &Obligation) -> SelectionResult<Selection> {
+ /*!
+ * Evaluates whether the obligation can be satisfied. Returns
+ * an indication of whether the obligation can be satisfied
+ * and, if so, by what means. Never affects surrounding typing
+ * environment.
+ */
+
+ debug!("select({})", obligation.repr(self.tcx()));
+
+ match try!(self.candidate_from_obligation(obligation)) {
+ None => Ok(None),
+ Some(candidate) => self.confirm_candidate(obligation, candidate),
+ }
+ }
+
+ pub fn select_inherent_impl(&self,
+ impl_def_id: ast::DefId,
+ obligation_cause: ObligationCause,
+ obligation_self_ty: ty::t)
+ -> SelectionResult<VtableImplData<Obligation>>
+ {
+ debug!("select_inherent_impl(impl_def_id={}, obligation_self_ty={})",
+ impl_def_id.repr(self.tcx()),
+ obligation_self_ty.repr(self.tcx()));
+
+ match self.candidate_from_impl(impl_def_id,
+ obligation_cause,
+ obligation_self_ty) {
+ Some(MatchedImplCandidate(impl_def_id)) => {
+ let vtable_impl =
+ try!(self.confirm_inherent_impl_candidate(
+ impl_def_id,
+ obligation_cause,
+ obligation_self_ty,
+ 0));
+ Ok(Some(vtable_impl))
+ }
+ Some(AmbiguousImplCandidate(_)) => {
+ Ok(None)
+ }
+ None => {
+ Err(Unimplemented)
+ }
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // EVALUATION
+ //
+ // Tests whether an obligation can be selected or whether an impl can be
+ // applied to particular types. It skips the "confirmation" step and
+ // hence completely ignores output type parameters.
+
+ pub fn evaluate_obligation(&self,
+ obligation: &Obligation)
+ -> EvaluationResult
+ {
+ /*!
+ * Evaluates whether the obligation `obligation` can be
+ * satisfied (by any means).
+ */
+
+ debug!("evaluate_obligation({})",
+ obligation.repr(self.tcx()));
+
+ match self.candidate_from_obligation(obligation) {
+ Ok(Some(c)) => c.to_evaluation_result(),
+ Ok(None) => EvaluatedToAmbiguity,
+ Err(_) => EvaluatedToUnmatch,
+ }
+ }
+
+ pub fn evaluate_impl(&self,
+ impl_def_id: ast::DefId,
+ obligation_cause: ObligationCause,
+ obligation_self_ty: ty::t)
+ -> EvaluationResult
+ {
+ /*!
+ * Evaluates whether the impl with id `impl_def_id` could be
+ * applied to the self type `obligation_self_ty`. This can be
+ * used either for trait or inherent impls.
+ */
+
+ debug!("evaluate_impl(impl_def_id={}, obligation_self_ty={})",
+ impl_def_id.repr(self.tcx()),
+ obligation_self_ty.repr(self.tcx()));
+
+ match self.candidate_from_impl(impl_def_id,
+ obligation_cause,
+ obligation_self_ty) {
+ Some(c) => c.to_evaluation_result(),
+ None => EvaluatedToUnmatch,
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // CANDIDATE ASSEMBLY
+ //
+ // The selection process begins by examining all in-scope impls,
+ // caller obligations, and so forth and assembling a list of
+ // candidates. See `doc.rs` and the `Candidate` type for more details.
+
+ fn candidate_from_obligation(&self, obligation: &Obligation)
+ -> SelectionResult<Candidate>
+ {
+ debug!("candidate_from_obligation({}, self_ty={})",
+ obligation.repr(self.tcx()),
+ self.infcx.ty_to_string(obligation.self_ty()));
+
+ let skol_obligation_self_ty =
+ infer::skolemize(self.infcx, obligation.self_ty());
+
+ // First, check the cache.
+ match self.check_candidate_cache(obligation, skol_obligation_self_ty) {
+ Some(c) => {
+ return Ok(Some(c));
+ }
+ None => { }
+ }
+
+ let mut candidates =
+ try!(self.assemble_candidates(obligation,
+ skol_obligation_self_ty));
+
+ debug!("candidate_from_obligation: {} candidates for {}",
+ candidates.len(), obligation.repr(self.tcx()));
+
+ // Examine candidates to determine outcome. Ideally we will
+ // have exactly one candidate that is definitively applicable.
+
+ if candidates.len() == 0 {
+ // Annoying edge case: if there are no impls, then there
+ // is no way that this trait reference is implemented,
+ // *unless* it contains unbound variables. In that case,
+ // it is possible that one of those unbound variables will
+ // be bound to a new type from some other crate which will
+ // also contain impls.
+ let trait_ref = &*obligation.trait_ref;
+ return if !self.trait_ref_unconstrained(trait_ref) {
+ debug!("candidate_from_obligation({}) -> 0 matches, unimpl",
+ obligation.repr(self.tcx()));
+ Err(Unimplemented)
+ } else {
+ debug!("candidate_from_obligation({}) -> 0 matches, ambig",
+ obligation.repr(self.tcx()));
+ Ok(None)
+ };
+ }
+
+ if candidates.len() > 1 {
+ // Ambiguity. Possibly we should report back more
+ // information on the potential candidates so we can give
+ // a better error message.
+ debug!("candidate_from_obligation({}) -> multiple matches, ambig",
+ obligation.repr(self.tcx()));
+
+ return Ok(None);
+ }
+
+ let candidate = candidates.pop().unwrap();
+ self.insert_candidate_cache(obligation, skol_obligation_self_ty,
+ candidate.clone());
+ Ok(Some(candidate))
+ }
+
+ fn check_candidate_cache(&self,
+ _obligation: &Obligation,
+ _skol_obligation_self_ty: ty::t)
+ -> Option<Candidate>
+ {
+ // let cache_key = CacheKey::new(obligation.trait_ref.def_id,
+ // skol_obligation_self_ty);
+ // let hashmap = self.tcx().selection_cache.hashmap.borrow();
+ // hashmap.find(&cache_key).map(|c| (*c).clone())
+ None
+ }
+
+ fn insert_candidate_cache(&self,
+ _obligation: &Obligation,
+ _skol_obligation_self_ty: ty::t,
+ _candidate: Candidate)
+ {
+ // FIXME -- Enable caching. I think the right place to put the cache
+ // is in the ParameterEnvironment, not the tcx, because otherwise
+ // when there are distinct where clauses in scope the cache can get
+ // confused.
+ //
+ //let cache_key = CacheKey::new(obligation.trait_ref.def_id,
+ // skol_obligation_self_ty);
+ //let mut hashmap = self.tcx().selection_cache.hashmap.borrow_mut();
+ //hashmap.insert(cache_key, candidate);
+ }
+
+ fn assemble_candidates(&self,
+ obligation: &Obligation,
+ skol_obligation_self_ty: ty::t)
+ -> Result<Vec<Candidate>, SelectionError>
+ {
+ // Check for overflow.
+
+ let recursion_limit = self.infcx.tcx.sess.recursion_limit.get();
+ if obligation.recursion_depth >= recursion_limit {
+ debug!("{} --> overflow", obligation.repr(self.tcx()));
+ return Err(Overflow);
+ }
+
+ let mut candidates = Vec::new();
+
+ match self.tcx().lang_items.to_builtin_kind(obligation.trait_ref.def_id) {
+ Some(_) => {
+ // FIXME -- The treatment of builtin bounds is a bit
+ // hacky right now. Eventually, the idea is to move
+ // the logic for selection out of type_contents and
+ // into this module (And make it based on the generic
+ // mechanisms of OIBTT2). However, I want to land
+ // some code today, so we're going to cut a few
+ // corners. What we do now is that the trait selection
+ // code always considers builtin obligations to
+ // match. The fulfillment code (which also has the job
+ // of tracking all the traits that must hold) will
+ // then just accumulate the various
+ // builtin-bound-related obligations that must be met.
+ // Later, at the end of typeck, after writeback etc,
+ // we will rewalk this list and extract all the
+ // builtin-bound-related obligations and test them
+ // again using type contents. Part of the motivation
+ // for this is that the type contents code requires
+ // that writeback has been completed in some cases.
+
+ candidates.push(AmbiguousBuiltinCandidate);
+ }
+
+ None => {
+ // Other bounds. Consider both in-scope bounds from fn decl
+ // and applicable impls.
+
+ try!(self.assemble_candidates_from_caller_bounds(
+ obligation,
+ skol_obligation_self_ty,
+ &mut candidates));
+
+ try!(self.assemble_unboxed_candidates(
+ obligation,
+ skol_obligation_self_ty,
+ &mut candidates));
+
+ // If there is a fn bound that applies, forego the
+ // impl search. It can only generate conflicts.
+
+ if candidates.len() == 0 {
+ try!(self.assemble_candidates_from_impls(
+ obligation,
+ skol_obligation_self_ty,
+ &mut candidates));
+ }
+ }
+ }
+
+ Ok(candidates)
+ }
+
+ fn assemble_candidates_from_caller_bounds(&self,
+ obligation: &Obligation,
+ skol_obligation_self_ty: ty::t,
+ candidates: &mut Vec<Candidate>)
+ -> Result<(),SelectionError>
+ {
+ /*!
+ * Given an obligation like `<SomeTrait for T>`, search the obligations
+ * that the caller supplied to find out whether it is listed among
+ * them.
+ *
+ * Never affects inference environment.
+v */
+
+ debug!("assemble_candidates_from_caller_bounds({})",
+ obligation.repr(self.tcx()));
+
+ for caller_obligation in self.param_env.caller_obligations.iter() {
+ debug!("caller_obligation={}",
+ caller_obligation.repr(self.tcx()));
+
+ // Skip over obligations that don't apply to
+ // `self_ty`.
+ let caller_bound = &caller_obligation.trait_ref;
+ let caller_self_ty = caller_bound.substs.self_ty().unwrap();
+ match self.match_self_types(obligation.cause,
+ caller_self_ty,
+ skol_obligation_self_ty) {
+ AmbiguousMatch => {
+ debug!("-> AmbiguousParamCandidate");
+ candidates.push(AmbiguousParamCandidate);
+ return Ok(());
+ }
+ NoMatch => {
+ continue;
+ }
+ Matched(()) => { }
+ }
+
+ // Search through the trait (and its supertraits) to
+ // see if it matches the def-id we are looking for.
+ let caller_bound = (*caller_bound).clone();
+ match util::search_trait_and_supertraits_from_bound(
+ self.infcx.tcx, caller_bound,
+ |d| d == obligation.trait_ref.def_id)
+ {
+ Some(vtable_param) => {
+ // If so, we're done!
+ debug!("-> MatchedParamCandidate({})", vtable_param);
+ candidates.push(MatchedParamCandidate(vtable_param));
+ return Ok(());
+ }
+
+ None => {
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn assemble_unboxed_candidates(&self,
+ obligation: &Obligation,
+ skol_obligation_self_ty: ty::t,
+ candidates: &mut Vec<Candidate>)
+ -> Result<(),SelectionError>
+ {
+ /*!
+ * Check for the artificial impl that the compiler will create
+ * for an obligation like `X : FnMut<..>` where `X` is an
+ * unboxed closure type.
+ */
+
+ let closure_def_id = match ty::get(skol_obligation_self_ty).sty {
+ ty::ty_unboxed_closure(id, _) => id,
+ _ => { return Ok(()); }
+ };
+
+ let tcx = self.tcx();
+ let fn_traits = [
+ (ty::FnUnboxedClosureKind, tcx.lang_items.fn_trait()),
+ (ty::FnMutUnboxedClosureKind, tcx.lang_items.fn_mut_trait()),
+ (ty::FnOnceUnboxedClosureKind, tcx.lang_items.fn_once_trait()),
+ ];
+ for tuple in fn_traits.iter() {
+ let kind = match tuple {
+ &(kind, Some(ref fn_trait))
+ if *fn_trait == obligation.trait_ref.def_id =>
+ {
+ kind
+ }
+ _ => continue,
+ };
+
+ // Check to see whether the argument and return types match.
+ let closure_kind = match self.unboxed_closures.find(&closure_def_id) {
+ Some(closure) => closure.kind,
+ None => {
+ self.tcx().sess.span_bug(
+ obligation.cause.span,
+ format!("No entry for unboxed closure: {}",
+ closure_def_id.repr(self.tcx())).as_slice());
+ }
+ };
+
+ if closure_kind != kind {
+ continue;
+ }
+
+ candidates.push(MatchedUnboxedClosureCandidate(closure_def_id));
+ }
+
+ Ok(())
+ }
+
+ fn assemble_candidates_from_impls(&self,
+ obligation: &Obligation,
+ skol_obligation_self_ty: ty::t,
+ candidates: &mut Vec<Candidate>)
+ -> Result<(), SelectionError>
+ {
+ /*!
+ * Search for impls that might apply to `obligation`.
+ */
+
+ let all_impls = self.all_impls(obligation.trait_ref.def_id);
+ for &impl_def_id in all_impls.iter() {
+ self.infcx.probe(|| {
+ match self.candidate_from_impl(impl_def_id,
+ obligation.cause,
+ skol_obligation_self_ty) {
+ Some(c) => {
+ candidates.push(Impl(c));
+ }
+
+ None => { }
+ }
+ });
+ }
+ Ok(())
+ }
+
+ fn candidate_from_impl(&self,
+ impl_def_id: ast::DefId,
+ obligation_cause: ObligationCause,
+ skol_obligation_self_ty: ty::t)
+ -> Option<ImplCandidate>
+ {
+ match self.match_impl_self_types(impl_def_id,
+ obligation_cause,
+ skol_obligation_self_ty) {
+ Matched(_) => {
+ Some(MatchedImplCandidate(impl_def_id))
+ }
+
+ AmbiguousMatch => {
+ Some(AmbiguousImplCandidate(impl_def_id))
+ }
+
+ NoMatch => {
+ None
+ }
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // CONFIRMATION
+ //
+ // Confirmation unifies the output type parameters of the trait
+ // with the values found in the obligation, possibly yielding a
+ // type error. See `doc.rs` for more details.
+
+ fn confirm_candidate(&self,
+ obligation: &Obligation,
+ candidate: Candidate)
+ -> SelectionResult<Selection>
+ {
+ debug!("confirm_candidate({}, {})",
+ obligation.repr(self.tcx()),
+ candidate.repr(self.tcx()));
+
+ match candidate {
+ AmbiguousBuiltinCandidate |
+ AmbiguousParamCandidate |
+ Impl(AmbiguousImplCandidate(_)) => {
+ Ok(None)
+ }
+
+ MatchedBuiltinCandidate => {
+ Ok(Some(VtableBuiltin))
+ }
+
+ MatchedParamCandidate(param) => {
+ Ok(Some(VtableParam(
+ try!(self.confirm_param_candidate(obligation, param)))))
+ }
+
+ Impl(MatchedImplCandidate(impl_def_id)) => {
+ let vtable_impl = try!(self.confirm_impl_candidate(obligation,
+ impl_def_id));
+ Ok(Some(VtableImpl(vtable_impl)))
+ }
+
+ MatchedUnboxedClosureCandidate(closure_def_id) => {
+ try!(self.confirm_unboxed_closure_candidate(obligation, closure_def_id));
+ Ok(Some(VtableUnboxedClosure(closure_def_id)))
+ }
+ }
+ }
+
+ fn confirm_param_candidate(&self,
+ obligation: &Obligation,
+ param: VtableParamData)
+ -> Result<VtableParamData,SelectionError>
+ {
+ debug!("confirm_param_candidate({},{})",
+ obligation.repr(self.tcx()),
+ param.repr(self.tcx()));
+
+ let () = try!(self.confirm(obligation.cause,
+ obligation.trait_ref.clone(),
+ param.bound.clone()));
+ Ok(param)
+ }
+
+ fn confirm_impl_candidate(&self,
+ obligation: &Obligation,
+ impl_def_id: ast::DefId)
+ -> Result<VtableImplData<Obligation>,SelectionError>
+ {
+ debug!("confirm_impl_candidate({},{})",
+ obligation.repr(self.tcx()),
+ impl_def_id.repr(self.tcx()));
+
+ // For a non-inhernet impl, we begin the same way as an
+ // inherent impl, by matching the self-type and assembling
+ // list of nested obligations.
+ let vtable_impl =
+ try!(self.confirm_inherent_impl_candidate(
+ impl_def_id,
+ obligation.cause,
+ obligation.trait_ref.self_ty(),
+ obligation.recursion_depth));
+
+ // But then we must also match the output types.
+ let () = try!(self.confirm_impl_vtable(impl_def_id,
+ obligation.cause,
+ obligation.trait_ref.clone(),
+ &vtable_impl.substs));
+ Ok(vtable_impl)
+ }
+
+ fn confirm_inherent_impl_candidate(&self,
+ impl_def_id: ast::DefId,
+ obligation_cause: ObligationCause,
+ obligation_self_ty: ty::t,
+ obligation_recursion_depth: uint)
+ -> Result<VtableImplData<Obligation>,
+ SelectionError>
+ {
+ let substs = match self.match_impl_self_types(impl_def_id,
+ obligation_cause,
+ obligation_self_ty) {
+ Matched(substs) => substs,
+ AmbiguousMatch | NoMatch => {
+ self.tcx().sess.bug(
+ format!("Impl {} was matchable against {} but now is not",
+ impl_def_id.repr(self.tcx()),
+ obligation_self_ty.repr(self.tcx()))
+ .as_slice());
+ }
+ };
+
+ let impl_obligations =
+ self.impl_obligations(obligation_cause,
+ obligation_recursion_depth,
+ impl_def_id,
+ &substs);
+ let vtable_impl = VtableImplData { impl_def_id: impl_def_id,
+ substs: substs,
+ nested: impl_obligations };
+
+ Ok(vtable_impl)
+ }
+
+ fn confirm_unboxed_closure_candidate(&self,
+ obligation: &Obligation,
+ closure_def_id: ast::DefId)
+ -> Result<(),SelectionError>
+ {
+ debug!("confirm_unboxed_closure_candidate({},{})",
+ obligation.repr(self.tcx()),
+ closure_def_id.repr(self.tcx()));
+
+ let closure_type = match self.unboxed_closures.find(&closure_def_id) {
+ Some(closure) => closure.closure_type.clone(),
+ None => {
+ self.tcx().sess.span_bug(
+ obligation.cause.span,
+ format!("No entry for unboxed closure: {}",
+ closure_def_id.repr(self.tcx())).as_slice());
+ }
+ };
+
+ // FIXME(pcwalton): This is a bogus thing to do, but
+ // it'll do for now until we get the new trait-bound
+ // region skolemization working.
+ let (_, new_signature) =
+ regionmanip::replace_late_bound_regions_in_fn_sig(
+ self.tcx(),
+ &closure_type.sig,
+ |br| self.infcx.next_region_var(
+ infer::LateBoundRegion(obligation.cause.span, br)));
+
+ let arguments_tuple = *new_signature.inputs.get(0);
+ let trait_ref = Rc::new(ty::TraitRef {
+ def_id: obligation.trait_ref.def_id,
+ substs: Substs::new_trait(
+ vec![arguments_tuple, new_signature.output],
+ vec![],
+ obligation.self_ty())
+ });
+
+ self.confirm(obligation.cause,
+ obligation.trait_ref.clone(),
+ trait_ref)
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Matching
+ //
+ // Matching is a common path used for both evaluation and
+ // confirmation. It basically unifies types that appear in impls
+ // and traits. This does affect the surrounding environment;
+ // therefore, when used during evaluation, match routines must be
+ // run inside of a `probe()` so that their side-effects are
+ // contained.
+
+ fn match_impl_self_types(&self,
+ impl_def_id: ast::DefId,
+ obligation_cause: ObligationCause,
+ obligation_self_ty: ty::t)
+ -> MatchResult<Substs>
+ {
+ /*!
+ * Determines whether the self type declared against
+ * `impl_def_id` matches `obligation_self_ty`. If successful,
+ * returns the substitutions used to make them match. See
+ * `match_impl()`. For example, if `impl_def_id` is declared
+ * as:
+ *
+ * impl<T:Copy> Foo for ~T { ... }
+ *
+ * and `obligation_self_ty` is `int`, we'd back an `Err(_)`
+ * result. But if `obligation_self_ty` were `~int`, we'd get
+ * back `Ok(T=int)`.
+ */
+
+ // Create fresh type variables for each type parameter declared
+ // on the impl etc.
+ let impl_substs = util::fresh_substs_for_impl(self.infcx,
+ obligation_cause.span,
+ impl_def_id);
+
+ // Find the self type for the impl.
+ let impl_self_ty = ty::lookup_item_type(self.tcx(), impl_def_id).ty;
+ let impl_self_ty = impl_self_ty.subst(self.tcx(), &impl_substs);
+
+ debug!("match_impl_self_types(obligation_self_ty={}, impl_self_ty={})",
+ obligation_self_ty.repr(self.tcx()),
+ impl_self_ty.repr(self.tcx()));
+
+ match self.match_self_types(obligation_cause,
+ impl_self_ty,
+ obligation_self_ty) {
+ Matched(()) => {
+ debug!("Matched impl_substs={}", impl_substs.repr(self.tcx()));
+ Matched(impl_substs)
+ }
+ AmbiguousMatch => {
+ debug!("AmbiguousMatch");
+ AmbiguousMatch
+ }
+ NoMatch => {
+ debug!("NoMatch");
+ NoMatch
+ }
+ }
+ }
+
+ fn match_self_types(&self,
+ cause: ObligationCause,
+
+ // The self type provided by the impl/caller-obligation:
+ provided_self_ty: ty::t,
+
+ // The self type the obligation is for:
+ required_self_ty: ty::t)
+ -> MatchResult<()>
+ {
+ // FIXME(#5781) -- equating the types is stronger than
+ // necessary. Should consider variance of trait w/r/t Self.
+
+ let origin = infer::RelateSelfType(cause.span);
+ match self.infcx.eq_types(false,
+ origin,
+ provided_self_ty,
+ required_self_ty) {
+ Ok(()) => Matched(()),
+ Err(ty::terr_sorts(ty::expected_found{expected: t1, found: t2})) => {
+ // This error occurs when there is an unresolved type
+ // variable in the `required_self_ty` that was forced
+ // to unify with a non-type-variable. That basically
+ // means we don't know enough to say with certainty
+ // whether there is a match or not -- it depends on
+ // how that type variable is ultimately resolved.
+ if ty::type_is_skolemized(t1) || ty::type_is_skolemized(t2) {
+ AmbiguousMatch
+ } else {
+ NoMatch
+ }
+ }
+ Err(_) => NoMatch,
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Confirmation
+ //
+ // The final step of selection: once we know how an obligation is
+ // is resolved, we confirm that selection in order to have
+ // side-effects on the typing environment. This step also unifies
+ // the output type parameters from the obligation with those found
+ // on the impl/bound, which may yield type errors.
+
+ fn confirm_impl_vtable(&self,
+ impl_def_id: ast::DefId,
+ obligation_cause: ObligationCause,
+ obligation_trait_ref: Rc<ty::TraitRef>,
+ substs: &Substs)
+ -> Result<(), SelectionError>
+ {
+ /*!
+ * Relates the output type parameters from an impl to the
+ * trait. This may lead to type errors. The confirmation step
+ * is separated from the main match procedure because these
+ * type errors do not cause us to select another impl.
+ *
+ * As an example, consider matching the obligation
+ * `Iterator<char> for Elems<int>` using the following impl:
+ *
+ * impl<T> Iterator<T> for Elems<T> { ... }
+ *
+ * The match phase will succeed with substitution `T=int`.
+ * The confirm step will then try to unify `int` and `char`
+ * and yield an error.
+ */
+
+ let impl_trait_ref = ty::impl_trait_ref(self.tcx(),
+ impl_def_id).unwrap();
+ let impl_trait_ref = impl_trait_ref.subst(self.tcx(),
+ substs);
+ self.confirm(obligation_cause, obligation_trait_ref, impl_trait_ref)
+ }
+
+ fn confirm(&self,
+ obligation_cause: ObligationCause,
+ obligation_trait_ref: Rc<ty::TraitRef>,
+ expected_trait_ref: Rc<ty::TraitRef>)
+ -> Result<(), SelectionError>
+ {
+ /*!
+ * After we have determined which impl applies, and with what
+ * substitutions, there is one last step. We have to go back
+ * and relate the "output" type parameters from the obligation
+ * to the types that are specified in the impl.
+ *
+ * For example, imagine we have:
+ *
+ * impl<T> Iterator<T> for Vec<T> { ... }
+ *
+ * and our obligation is `Iterator<Foo> for Vec<int>` (note
+ * the mismatch in the obligation types). Up until this step,
+ * no error would be reported: the self type is `Vec<int>`,
+ * and that matches `Vec<T>` with the substitution `T=int`.
+ * At this stage, we could then go and check that the type
+ * parameters to the `Iterator` trait match.
+ * (In terms of the parameters, the `expected_trait_ref`
+ * here would be `Iterator<int> for Vec<int>`, and the
+ * `obligation_trait_ref` would be `Iterator<Foo> for Vec<int>`.
+ *
+ * Note that this checking occurs *after* the impl has
+ * selected, because these output type parameters should not
+ * affect the selection of the impl. Therefore, if there is a
+ * mismatch, we report an error to the user.
+ */
+
+ let origin = infer::RelateOutputImplTypes(obligation_cause.span);
+
+ let obligation_trait_ref = obligation_trait_ref.clone();
+ match self.infcx.sub_trait_refs(false,
+ origin,
+ expected_trait_ref.clone(),
+ obligation_trait_ref) {
+ Ok(()) => Ok(()),
+ Err(e) => Err(OutputTypeParameterMismatch(expected_trait_ref, e))
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Miscellany
+
+ fn all_impls(&self, trait_def_id: ast::DefId) -> Vec<ast::DefId> {
+ /*!
+ * Returns se tof all impls for a given trait.
+ */
+
+ ty::populate_implementations_for_trait_if_necessary(self.tcx(),
+ trait_def_id);
+ match self.tcx().trait_impls.borrow().find(&trait_def_id) {
+ None => Vec::new(),
+ Some(impls) => impls.borrow().clone()
+ }
+ }
+
+ fn impl_obligations(&self,
+ cause: ObligationCause,
+ recursion_depth: uint,
+ impl_def_id: ast::DefId,
+ impl_substs: &Substs)
+ -> VecPerParamSpace<Obligation>
+ {
+ let impl_generics = ty::lookup_item_type(self.tcx(),
+ impl_def_id).generics;
+ util::obligations_for_generics(self.tcx(), cause, recursion_depth,
+ &impl_generics, impl_substs)
+ }
+
+ fn trait_ref_unconstrained(&self,
+ trait_ref: &ty::TraitRef)
+ -> bool
+ {
+ /*!
+ * True if the self type of the trait-ref contains
+ * unconstrained type variables.
+ */
+
+ let mut found_skol = false;
+
+ // Skolemization replaces all unconstrained type vars with
+ // a SkolemizedTy instance. Then we search to see if we
+ // found any.
+ let skol_ty = infer::skolemize(self.infcx, trait_ref.self_ty());
+ ty::walk_ty(skol_ty, |t| {
+ match ty::get(t).sty {
+ ty::ty_infer(ty::SkolemizedTy(_)) => { found_skol = true; }
+ _ => { }
+ }
+ });
+
+ found_skol
+ }
+}
+
+impl Candidate {
+ fn to_evaluation_result(&self) -> EvaluationResult {
+ match *self {
+ Impl(ref i) => i.to_evaluation_result(),
+
+ MatchedUnboxedClosureCandidate(..) |
+ MatchedBuiltinCandidate |
+ MatchedParamCandidate(..) => {
+ EvaluatedToMatch
+ }
+
+ AmbiguousBuiltinCandidate |
+ AmbiguousParamCandidate => {
+ EvaluatedToAmbiguity
+ }
+ }
+ }
+}
+
+impl ImplCandidate {
+ fn to_evaluation_result(&self) -> EvaluationResult {
+ match *self {
+ MatchedImplCandidate(..) => EvaluatedToMatch,
+ AmbiguousImplCandidate(..) => EvaluatedToAmbiguity
+ }
+ }
+}
+
+impl Repr for Candidate {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ match *self {
+ MatchedBuiltinCandidate => format!("MatchedBuiltinCandidate"),
+ AmbiguousBuiltinCandidate => format!("AmbiguousBuiltinCandidate"),
+ MatchedUnboxedClosureCandidate(c) => format!("MatchedUnboxedClosureCandidate({})", c),
+ MatchedParamCandidate(ref r) => format!("MatchedParamCandidate({})",
+ r.repr(tcx)),
+ AmbiguousParamCandidate => format!("AmbiguousParamCandidate"),
+ Impl(ref i) => i.repr(tcx)
+ }
+ }
+}
+
+impl Repr for ImplCandidate {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ match *self {
+ MatchedImplCandidate(ref d) => format!("MatchedImplCandidate({})",
+ d.repr(tcx)),
+ AmbiguousImplCandidate(ref d) => format!("AmbiguousImplCandidate({})",
+ d.repr(tcx)),
+ }
+ }
+}
+
+
+// impl SelectionCache {
+// pub fn new() -> SelectionCache {
+// SelectionCache {
+// hashmap: RefCell::new(HashMap::new())
+// }
+// }
+// }
+
+// impl CacheKey {
+// pub fn new(trait_def_id: ast::DefId,
+// skol_obligation_self_ty: ty::t)
+// -> CacheKey
+// {
+// CacheKey {
+// trait_def_id: trait_def_id,
+// skol_obligation_self_ty: skol_obligation_self_ty
+// }
+// }
+// }
--- /dev/null
+
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::subst;
+use middle::subst::{ParamSpace, Subst, Substs, VecPerParamSpace};
+use middle::typeck::infer::InferCtxt;
+use middle::ty;
+use std::fmt;
+use std::rc::Rc;
+use syntax::ast;
+use syntax::codemap::Span;
+use util::ppaux::Repr;
+
+use super::{Obligation, ObligationCause, VtableImpl, VtableParam, VtableParamData, VtableImplData};
+
+///////////////////////////////////////////////////////////////////////////
+// Supertrait iterator
+
+pub struct Supertraits<'cx, 'tcx:'cx> {
+ tcx: &'cx ty::ctxt<'tcx>,
+ stack: Vec<SupertraitEntry>,
+}
+
+struct SupertraitEntry {
+ position: uint,
+ supertraits: Vec<Rc<ty::TraitRef>>,
+}
+
+pub fn supertraits<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
+ trait_ref: Rc<ty::TraitRef>)
+ -> Supertraits<'cx, 'tcx>
+{
+ /*!
+ * Returns an iterator over the trait reference `T` and all of its
+ * supertrait references. May contain duplicates. In general
+ * the ordering is not defined.
+ *
+ * Example:
+ *
+ * ```
+ * trait Foo { ... }
+ * trait Bar : Foo { ... }
+ * trait Baz : Bar+Foo { ... }
+ * ```
+ *
+ * `supertraits(Baz)` yields `[Baz, Bar, Foo, Foo]` in some order.
+ */
+
+ transitive_bounds(tcx, [trait_ref])
+}
+
+pub fn transitive_bounds<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>,
+ bounds: &[Rc<ty::TraitRef>])
+ -> Supertraits<'cx, 'tcx>
+{
+ let bounds = Vec::from_fn(bounds.len(), |i| bounds[i].clone());
+ let entry = SupertraitEntry { position: 0, supertraits: bounds };
+ Supertraits { tcx: tcx, stack: vec![entry] }
+}
+
+impl<'cx, 'tcx> Supertraits<'cx, 'tcx> {
+ fn push(&mut self, trait_ref: &ty::TraitRef) {
+ let bounds = ty::bounds_for_trait_ref(self.tcx, trait_ref);
+ let entry = SupertraitEntry { position: 0,
+ supertraits: bounds.trait_bounds };
+ self.stack.push(entry);
+ }
+
+ pub fn indices(&self) -> Vec<uint> {
+ /*!
+ * Returns the path taken through the trait supertraits to
+ * reach the current point.
+ */
+
+ self.stack.iter().map(|e| e.position).collect()
+ }
+}
+
+impl<'cx, 'tcx> Iterator<Rc<ty::TraitRef>> for Supertraits<'cx, 'tcx> {
+ fn next(&mut self) -> Option<Rc<ty::TraitRef>> {
+ loop {
+ // Extract next item from top-most stack frame, if any.
+ let next_trait = match self.stack.mut_last() {
+ None => {
+ // No more stack frames. Done.
+ return None;
+ }
+ Some(entry) => {
+ let p = entry.position;
+ if p < entry.supertraits.len() {
+ // Still more supertraits left in the top stack frame.
+ entry.position += 1;
+
+ let next_trait =
+ (*entry.supertraits.get(p)).clone();
+ Some(next_trait)
+ } else {
+ None
+ }
+ }
+ };
+
+ match next_trait {
+ Some(next_trait) => {
+ self.push(&*next_trait);
+ return Some(next_trait);
+ }
+
+ None => {
+ // Top stack frame is exhausted, pop it.
+ self.stack.pop();
+ }
+ }
+ }
+ }
+}
+
+// determine the `self` type, using fresh variables for all variables
+// declared on the impl declaration e.g., `impl<A,B> for ~[(A,B)]`
+// would return ($0, $1) where $0 and $1 are freshly instantiated type
+// variables.
+pub fn fresh_substs_for_impl(infcx: &InferCtxt,
+ span: Span,
+ impl_def_id: ast::DefId)
+ -> Substs
+{
+ let tcx = infcx.tcx;
+ let impl_generics = ty::lookup_item_type(tcx, impl_def_id).generics;
+ infcx.fresh_substs_for_generics(span, &impl_generics)
+}
+
+impl<N> fmt::Show for VtableImplData<N> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "VtableImpl({})", self.impl_def_id)
+ }
+}
+
+impl fmt::Show for VtableParamData {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "VtableParam(...)")
+ }
+}
+
+pub fn obligations_for_generics(tcx: &ty::ctxt,
+ cause: ObligationCause,
+ recursion_depth: uint,
+ generics: &ty::Generics,
+ substs: &Substs)
+ -> VecPerParamSpace<Obligation>
+{
+ /*! See `super::obligations_for_generics` */
+
+ debug!("obligations_for_generics(generics={}, substs={})",
+ generics.repr(tcx), substs.repr(tcx));
+
+ let mut obligations = VecPerParamSpace::empty();
+
+ for def in generics.types.iter() {
+ push_obligations_for_param_bounds(tcx,
+ cause,
+ recursion_depth,
+ def.space,
+ def.index,
+ &def.bounds,
+ substs,
+ &mut obligations);
+ }
+
+ debug!("obligations() ==> {}", obligations.repr(tcx));
+
+ return obligations;
+}
+
+fn push_obligations_for_param_bounds(
+ tcx: &ty::ctxt,
+ cause: ObligationCause,
+ recursion_depth: uint,
+ space: subst::ParamSpace,
+ index: uint,
+ param_bounds: &ty::ParamBounds,
+ param_substs: &Substs,
+ obligations: &mut VecPerParamSpace<Obligation>)
+{
+ let param_ty = *param_substs.types.get(space, index);
+
+ for builtin_bound in param_bounds.builtin_bounds.iter() {
+ obligations.push(
+ space,
+ obligation_for_builtin_bound(tcx,
+ cause,
+ builtin_bound,
+ recursion_depth,
+ param_ty));
+ }
+
+ for bound_trait_ref in param_bounds.trait_bounds.iter() {
+ let bound_trait_ref = bound_trait_ref.subst(tcx, param_substs);
+ obligations.push(
+ space,
+ Obligation { cause: cause,
+ recursion_depth: recursion_depth,
+ trait_ref: bound_trait_ref });
+ }
+}
+
+pub fn obligation_for_builtin_bound(
+ tcx: &ty::ctxt,
+ cause: ObligationCause,
+ builtin_bound: ty::BuiltinBound,
+ recursion_depth: uint,
+ param_ty: ty::t)
+ -> Obligation
+{
+ match tcx.lang_items.from_builtin_kind(builtin_bound) {
+ Ok(def_id) => {
+ Obligation {
+ cause: cause,
+ recursion_depth: recursion_depth,
+ trait_ref: Rc::new(ty::TraitRef {
+ def_id: def_id,
+ substs: Substs::empty().with_self_ty(param_ty),
+ }),
+ }
+ }
+ Err(e) => {
+ tcx.sess.span_bug(cause.span, e.as_slice());
+ }
+ }
+}
+
+pub fn search_trait_and_supertraits_from_bound(tcx: &ty::ctxt,
+ caller_bound: Rc<ty::TraitRef>,
+ test: |ast::DefId| -> bool)
+ -> Option<VtableParamData>
+{
+ /*!
+ * Starting from a caller obligation `caller_bound` (which has
+ * coordinates `space`/`i` in the list of caller obligations),
+ * search through the trait and supertraits to find one where
+ * `test(d)` is true, where `d` is the def-id of the
+ * trait/supertrait. If any is found, return `Some(p)` where `p`
+ * is the path to that trait/supertrait. Else `None`.
+ */
+
+ for bound in transitive_bounds(tcx, &[caller_bound]) {
+ if test(bound.def_id) {
+ let vtable_param = VtableParamData { bound: bound };
+ return Some(vtable_param);
+ }
+ }
+
+ return None;
+}
+
+impl Repr for super::Obligation {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ format!("Obligation(trait_ref={},depth={})",
+ self.trait_ref.repr(tcx),
+ self.recursion_depth)
+ }
+}
+
+impl<N:Repr> Repr for super::Vtable<N> {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ match *self {
+ super::VtableImpl(ref v) =>
+ v.repr(tcx),
+
+ super::VtableUnboxedClosure(ref d) =>
+ format!("VtableUnboxedClosure({})",
+ d.repr(tcx)),
+
+ super::VtableParam(ref v) =>
+ format!("VtableParam({})", v.repr(tcx)),
+
+ super::VtableBuiltin =>
+ format!("Builtin"),
+ }
+ }
+}
+
+impl<N:Repr> Repr for super::VtableImplData<N> {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ format!("VtableImpl(impl_def_id={}, substs={}, nested={})",
+ self.impl_def_id.repr(tcx),
+ self.substs.repr(tcx),
+ self.nested.repr(tcx))
+ }
+}
+
+impl Repr for super::VtableParamData {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ format!("VtableParam(bound={})",
+ self.bound.repr(tcx))
+ }
+}
+
+impl Repr for super::SelectionError {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ match *self {
+ super::Unimplemented =>
+ format!("Unimplemented"),
+
+ super::Overflow =>
+ format!("Overflow"),
+
+ super::OutputTypeParameterMismatch(ref t, ref e) =>
+ format!("OutputTypeParameterMismatch({}, {})",
+ t.repr(tcx),
+ e.repr(tcx)),
+ }
+ }
+}
+
+impl Repr for super::FulfillmentError {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ format!("FulfillmentError({},{})",
+ self.obligation.repr(tcx),
+ self.code.repr(tcx))
+ }
+}
+
+impl Repr for super::FulfillmentErrorCode {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ match *self {
+ super::CodeSelectionError(ref o) => o.repr(tcx),
+ super::CodeAmbiguity => format!("Ambiguity")
+ }
+ }
+}
+
+impl fmt::Show for super::FulfillmentErrorCode {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ super::CodeSelectionError(ref e) => write!(f, "{}", e),
+ super::CodeAmbiguity => write!(f, "Ambiguity")
+ }
+ }
+}
+
+impl Repr for ty::type_err {
+ fn repr(&self, tcx: &ty::ctxt) -> String {
+ ty::type_err_to_str(tcx, self)
+ }
+}
+
let tcx = bcx.tcx();
let mut found: Vec<Opt> = vec![];
- for (i, br) in m.iter().enumerate() {
+ for br in m.iter() {
let cur = *br.pats.get(col);
let opt = match cur.node {
ast::PatLit(ref l) => ConstantValue(ConstantExpr(&**l)),
fn is_discr_reassigned(bcx: Block, discr: &ast::Expr, body: &ast::Expr) -> bool {
match discr.node {
ast::ExprPath(..) => match bcx.def(discr.id) {
- def::DefArg(vid, _) | def::DefBinding(vid, _) |
- def::DefLocal(vid, _) | def::DefUpvar(vid, _, _, _) => {
+ def::DefLocal(vid) | def::DefUpvar(vid, _, _) => {
let mut rc = ReassignmentChecker {
node: vid,
reassigned: false
fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) {
match cmt.cat {
mc::cat_copied_upvar(mc::CopiedUpvar { upvar_id: vid, .. }) |
- mc::cat_arg(vid) | mc::cat_local(vid) => self.reassigned = self.node == vid,
+ mc::cat_local(vid) => self.reassigned = self.node == vid,
_ => {}
}
}
return bcx;
}
-enum IrrefutablePatternBindingMode {
- // Stores the association between node ID and LLVM value in `lllocals`.
- BindLocal,
- // Stores the association between node ID and LLVM value in `llargs`.
- BindArgument
-}
-
pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
local: &ast::Local)
-> Block<'blk, 'tcx> {
pat_bindings(&tcx.def_map, pat, |_, p_id, _, path1| {
let scope = cleanup::var_scope(tcx, p_id);
bcx = mk_binding_alloca(
- bcx, p_id, &path1.node, BindLocal, scope, (),
+ bcx, p_id, &path1.node, scope, (),
|(), bcx, llval, ty| { zero_mem(bcx, llval, ty); bcx });
});
bcx
Some(ident) => {
let var_scope = cleanup::var_scope(tcx, local.id);
return mk_binding_alloca(
- bcx, pat.id, ident, BindLocal, var_scope, (),
+ bcx, pat.id, ident, var_scope, (),
|(), bcx, v, _| expr::trans_into(bcx, &**init_expr,
expr::SaveIn(v)));
}
add_comment(bcx, "creating zeroable ref llval");
}
let var_scope = cleanup::var_scope(tcx, local.id);
- bind_irrefutable_pat(bcx, pat, init_datum.val, BindLocal, var_scope)
+ bind_irrefutable_pat(bcx, pat, init_datum.val, var_scope)
}
}
None => {
-> Block<'blk, 'tcx> {
/*!
* Generates code for argument patterns like `fn foo(<pat>: T)`.
- * Creates entries in the `llargs` map for each of the bindings
+ * Creates entries in the `lllocals` map for each of the bindings
* in `pat`.
*
* # Arguments
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
let arg_val = arg.add_clean(bcx.fcx, arg_scope);
- bcx.fcx.llargs.borrow_mut()
+ bcx.fcx.lllocals.borrow_mut()
.insert(pat.id, Datum::new(arg_val, arg_ty, Lvalue));
bcx
} else {
mk_binding_alloca(
- bcx, pat.id, ident, BindArgument, arg_scope, arg,
+ bcx, pat.id, ident, arg_scope, arg,
|arg, bcx, llval, _| arg.store_to(bcx, llval))
}
}
// pattern.
let arg = unpack_datum!(
bcx, arg.to_lvalue_datum_in_scope(bcx, "__arg", arg_scope));
- bind_irrefutable_pat(bcx, pat, arg.val,
- BindArgument, arg_scope)
+ bind_irrefutable_pat(bcx, pat, arg.val, arg_scope)
}
}
}
}
// General path. Copy out the values that are used in the pattern.
- bind_irrefutable_pat(bcx, pat, llvalue, BindLocal, body_scope)
+ bind_irrefutable_pat(bcx, pat, llvalue, body_scope)
}
fn mk_binding_alloca<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
p_id: ast::NodeId,
ident: &ast::Ident,
- binding_mode: IrrefutablePatternBindingMode,
cleanup_scope: cleanup::ScopeId,
arg: A,
populate: |A, Block<'blk, 'tcx>, ValueRef, ty::t|
// Now that memory is initialized and has cleanup scheduled,
// create the datum and insert into the local variable map.
let datum = Datum::new(llval, var_ty, Lvalue);
- let mut llmap = match binding_mode {
- BindLocal => bcx.fcx.lllocals.borrow_mut(),
- BindArgument => bcx.fcx.llargs.borrow_mut()
- };
- llmap.insert(p_id, datum);
+ bcx.fcx.lllocals.borrow_mut().insert(p_id, datum);
bcx
}
fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pat: &ast::Pat,
val: ValueRef,
- binding_mode: IrrefutablePatternBindingMode,
cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx> {
/*!
* - bcx: starting basic block context
* - pat: the irrefutable pattern being matched.
* - val: the value being matched -- must be an lvalue (by ref, with cleanup)
- * - binding_mode: is this for an argument or a local variable?
*/
- debug!("bind_irrefutable_pat(bcx={}, pat={}, binding_mode={:?})",
+ debug!("bind_irrefutable_pat(bcx={}, pat={})",
bcx.to_str(),
- pat.repr(bcx.tcx()),
- binding_mode);
+ pat.repr(bcx.tcx()));
if bcx.sess().asm_comments() {
add_comment(bcx, format!("bind_irrefutable_pat(pat={})",
// binding will live and place it into the appropriate
// map.
bcx = mk_binding_alloca(
- bcx, pat.id, &path1.node, binding_mode, cleanup_scope, (),
+ bcx, pat.id, &path1.node, cleanup_scope, (),
|(), bcx, llval, ty| {
match pat_binding_mode {
ast::BindByValue(_) => {
}
for inner_pat in inner.iter() {
- bcx = bind_irrefutable_pat(bcx, &**inner_pat, val,
- binding_mode, cleanup_scope);
+ bcx = bind_irrefutable_pat(bcx, &**inner_pat, val, cleanup_scope);
}
}
ast::PatEnum(_, ref sub_pats) => {
for sub_pat in sub_pats.iter() {
for (i, &argval) in args.vals.iter().enumerate() {
bcx = bind_irrefutable_pat(bcx, &**sub_pat.get(i),
- argval, binding_mode,
- cleanup_scope);
+ argval, cleanup_scope);
}
}
}
let fldptr = adt::trans_field_ptr(bcx, &*repr,
val, 0, i);
bcx = bind_irrefutable_pat(bcx, &**elem,
- fldptr, binding_mode,
- cleanup_scope);
+ fldptr, cleanup_scope);
}
}
}
let ix = ty::field_idx_strict(tcx, f.ident.name, field_tys);
let fldptr = adt::trans_field_ptr(bcx, &*pat_repr, val,
discr, ix);
- bcx = bind_irrefutable_pat(bcx, &*f.pat, fldptr,
- binding_mode, cleanup_scope);
+ bcx = bind_irrefutable_pat(bcx, &*f.pat, fldptr, cleanup_scope);
}
})
}
let repr = adt::represent_node(bcx, pat.id);
for (i, elem) in elems.iter().enumerate() {
let fldptr = adt::trans_field_ptr(bcx, &*repr, val, 0, i);
- bcx = bind_irrefutable_pat(bcx, &**elem, fldptr,
- binding_mode, cleanup_scope);
+ bcx = bind_irrefutable_pat(bcx, &**elem, fldptr, cleanup_scope);
}
}
ast::PatBox(ref inner) => {
let llbox = Load(bcx, val);
- bcx = bind_irrefutable_pat(bcx, &**inner, llbox, binding_mode, cleanup_scope);
+ bcx = bind_irrefutable_pat(bcx, &**inner, llbox, cleanup_scope);
}
ast::PatRegion(ref inner) => {
let loaded_val = Load(bcx, val);
- bcx = bind_irrefutable_pat(bcx, &**inner, loaded_val, binding_mode, cleanup_scope);
+ bcx = bind_irrefutable_pat(bcx, &**inner, loaded_val, cleanup_scope);
}
ast::PatVec(ref before, ref slice, ref after) => {
let pat_ty = node_id_type(bcx, pat.id);
.iter()
.chain(slice.iter())
.chain(after.iter())
- .zip(extracted.vals.move_iter())
+ .zip(extracted.vals.into_iter())
.fold(bcx, |bcx, (inner, elem)|
- bind_irrefutable_pat(bcx, &**inner, elem, binding_mode, cleanup_scope)
+ bind_irrefutable_pat(bcx, &**inner, elem, cleanup_scope)
);
}
ast::PatMac(..) => {
let mut constraints =
String::from_str(constraints.iter()
.map(|s| s.get().to_string())
- .chain(ext_constraints.move_iter())
+ .chain(ext_constraints.into_iter())
.collect::<Vec<String>>()
.connect(",")
.as_slice());
use middle::trans::type_of::*;
use middle::trans::value::Value;
use middle::ty;
-use middle::typeck;
use util::common::indenter;
use util::ppaux::{Repr, ty_to_string};
use util::sha2::Sha256;
match ty::get(output).sty {
// functions returning bottom may unwind, but can never return normally
ty::ty_bot => {
- unsafe {
- llvm::LLVMAddFunctionAttribute(llfn,
- llvm::FunctionIndex as c_uint,
- llvm::NoReturnAttribute as uint64_t)
- }
+ llvm::SetFunctionAttribute(llfn, llvm::NoReturnAttribute)
}
_ => {}
}
if ccx.tcx().sess.opts.cg.no_redzone {
- unsafe {
- llvm::LLVMAddFunctionAttribute(llfn,
- llvm::FunctionIndex as c_uint,
- llvm::NoRedZoneAttribute as uint64_t)
- }
+ llvm::SetFunctionAttribute(llfn, llvm::NoRedZoneAttribute)
}
llvm::SetFunctionCallConv(llfn, cc);
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
-pub fn malloc_raw_dyn_proc<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- t: ty::t, alloc_fn: LangItem)
- -> Result<'blk, 'tcx> {
+pub fn malloc_raw_dyn_proc<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: ty::t) -> Result<'blk, 'tcx> {
let _icx = push_ctxt("malloc_raw_dyn_proc");
let ccx = bcx.ccx();
- let langcall = require_alloc_fn(bcx, t, alloc_fn);
-
// Grab the TypeRef type of ptr_ty.
let ptr_ty = ty::mk_uniq(bcx.tcx(), t);
let ptr_llty = type_of(ccx, ptr_ty);
let size = llsize_of(bcx.ccx(), llty);
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
- // Allocate space:
- let drop_glue = glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t));
- let r = callee::trans_lang_call(
- bcx,
- langcall,
- [
- PointerCast(bcx, drop_glue, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to()),
- size,
- llalign
- ],
- None);
- Result::new(r.bcx, PointerCast(r.bcx, r.val, ptr_llty))
+ // Allocate space and store the destructor pointer:
+ let Result {bcx: bcx, val: llbox} = malloc_raw_dyn(bcx, ptr_llty, t, size, llalign);
+ let dtor_ptr = GEPi(bcx, llbox, [0u, abi::box_field_drop_glue]);
+ let drop_glue_field_ty = type_of(ccx, ty::mk_nil_ptr(bcx.tcx()));
+ let drop_glue = PointerCast(bcx, glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t)),
+ drop_glue_field_ty);
+ Store(bcx, drop_glue, dtor_ptr);
+
+ Result::new(bcx, llbox)
}
// Since we're in trans we don't care for any region parameters
let ref substs = subst::Substs::erased(substs.types.clone());
- let vtables = typeck::check::vtable::trans_resolve_method(ccx.tcx(), did.node, substs);
- let (val, _) = monomorphize::monomorphic_fn(ccx, did, substs, vtables, None);
+ let (val, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
tcx.sess.bug("unexpected variant: required trait method \
in has_nested_returns")
}
+ ast::TypeTraitItem(_) => {
+ tcx.sess.bug("unexpected variant: type trait item in \
+ has_nested_returns")
+ }
}
}
Some(ast_map::NodeImplItem(ii)) => {
ast::MethMac(_) => tcx.sess.bug("unexpanded macro")
}
}
+ ast::TypeImplItem(_) => {
+ tcx.sess.bug("unexpected variant: type impl item in \
+ has_nested_returns")
+ }
}
}
Some(ast_map::NodeExpr(e)) => {
needs_ret_allocas: nested_returns,
personality: Cell::new(None),
caller_expects_out_pointer: uses_outptr,
- llargs: RefCell::new(NodeMap::new()),
lllocals: RefCell::new(NodeMap::new()),
llupvars: RefCell::new(NodeMap::new()),
id: id,
let arg_scope_id = cleanup::CustomScope(arg_scope);
- for (i, arg_datum) in arg_datums.move_iter().enumerate() {
+ for (i, arg_datum) in arg_datums.into_iter().enumerate() {
// For certain mode/type combinations, the raw llarg values are passed
// by value. However, within the fn body itself, we want to always
// have all locals and arguments be by-ref so that we can cancel the
assert_eq!(arg_datums.len(), 1);
- let arg_datum = arg_datums.move_iter().next().unwrap();
+ let arg_datum = arg_datums.into_iter().next().unwrap();
// Untuple the rest of the arguments.
let tuple_datum =
if !type_is_zero_size(fcx.ccx, result_ty) {
let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
let repr = adt::represent_type(ccx, result_ty);
- for (i, arg_datum) in arg_datums.move_iter().enumerate() {
+ for (i, arg_datum) in arg_datums.into_iter().enumerate() {
let lldestptr = adt::trans_field_ptr(bcx,
&*repr,
dest,
ast_map::NodeTraitItem(trait_method) => {
debug!("get_item_val(): processing a NodeTraitItem");
match *trait_method {
- ast::RequiredMethod(_) => {
- ccx.sess().bug("unexpected variant: required trait method in \
- get_item_val()");
+ ast::RequiredMethod(_) | ast::TypeTraitItem(_) => {
+ ccx.sess().bug("unexpected variant: required trait \
+ method in get_item_val()");
}
ast::ProvidedMethod(ref m) => {
register_method(ccx, id, &**m)
ast_map::NodeImplItem(ii) => {
match *ii {
ast::MethodImplItem(ref m) => register_method(ccx, id, &**m),
+ ast::TypeImplItem(ref typedef) => {
+ ccx.sess().span_bug(typedef.span,
+ "unexpected variant: required impl \
+ method in get_item_val()")
+ }
}
}
// the final product, so LTO needs to preserve them.
shared_ccx.sess().cstore.iter_crate_data(|cnum, _| {
let syms = csearch::get_reachable_extern_fns(&shared_ccx.sess().cstore, cnum);
- reachable.extend(syms.move_iter().map(|did| {
+ reachable.extend(syms.into_iter().map(|did| {
csearch::get_symbol(&shared_ccx.sess().cstore, did)
}));
});
// we care about.
if ixs.len() < 16 {
let mut small_vec = [ C_i32(self.ccx, 0), ..16 ];
- for (small_vec_e, &ix) in small_vec.mut_iter().zip(ixs.iter()) {
+ for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs.iter()) {
*small_vec_e = C_i32(self.ccx, ix as i32);
}
self.inbounds_gep(base, small_vec.slice(0, ixs.len()))
}
fn all_mem(cls: &mut [RegClass]) {
- for elt in cls.mut_iter() {
+ for elt in cls.iter_mut() {
*elt = Memory;
}
}
use metadata::csearch;
use middle::def;
use middle::subst;
-use middle::subst::{Subst, VecPerParamSpace};
+use middle::subst::{Subst};
use middle::trans::adt;
use middle::trans::base;
use middle::trans::base::*;
use middle::trans::type_::Type;
use middle::trans::type_of;
use middle::ty;
-use middle::typeck;
use middle::typeck::coherence::make_substs_for_receiver_types;
use middle::typeck::MethodCall;
use util::ppaux::Repr;
}
}
def::DefStatic(..) |
- def::DefArg(..) |
def::DefLocal(..) |
- def::DefBinding(..) |
def::DefUpvar(..) => {
datum_callee(bcx, ref_expr)
}
def::DefMod(..) | def::DefForeignMod(..) | def::DefTrait(..) |
- def::DefTy(..) | def::DefPrimTy(..) |
+ def::DefTy(..) | def::DefPrimTy(..) | def::DefAssociatedTy(..) |
def::DefUse(..) | def::DefTyParamBinder(..) |
def::DefRegion(..) | def::DefLabel(..) | def::DefTyParam(..) |
def::DefSelfTy(..) | def::DefMethod(..) => {
let _icx = push_ctxt("trans_fn_ref");
let substs = node_id_substs(bcx, node);
- let vtable_key = match node {
- ExprId(id) => MethodCall::expr(id),
- MethodCall(method_call) => method_call
- };
- let vtables = node_vtables(bcx, vtable_key);
- debug!("trans_fn_ref(def_id={}, node={:?}, substs={}, vtables={})",
+ debug!("trans_fn_ref(def_id={}, node={:?}, substs={})",
def_id.repr(bcx.tcx()),
node,
- substs.repr(bcx.tcx()),
- vtables.repr(bcx.tcx()));
- trans_fn_ref_with_vtables(bcx, def_id, node, substs, vtables)
+ substs.repr(bcx.tcx()));
+ trans_fn_ref_with_substs(bcx, def_id, node, substs)
}
-fn trans_fn_ref_with_vtables_to_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- def_id: ast::DefId,
- ref_id: ast::NodeId,
- substs: subst::Substs,
- vtables: typeck::vtable_res)
- -> Callee<'blk, 'tcx> {
+fn trans_fn_ref_with_substs_to_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ def_id: ast::DefId,
+ ref_id: ast::NodeId,
+ substs: subst::Substs)
+ -> Callee<'blk, 'tcx> {
Callee {
bcx: bcx,
- data: Fn(trans_fn_ref_with_vtables(bcx,
- def_id,
- ExprId(ref_id),
- substs,
- vtables)),
+ data: Fn(trans_fn_ref_with_substs(bcx,
+ def_id,
+ ExprId(ref_id),
+ substs)),
}
}
-fn resolve_default_method_vtables(bcx: Block,
- impl_id: ast::DefId,
- substs: &subst::Substs,
- impl_vtables: typeck::vtable_res)
- -> typeck::vtable_res
-{
- // Get the vtables that the impl implements the trait at
- let impl_res = ty::lookup_impl_vtables(bcx.tcx(), impl_id);
-
- // Build up a param_substs that we are going to resolve the
- // trait_vtables under.
- let param_substs = param_substs {
- substs: (*substs).clone(),
- vtables: impl_vtables.clone()
- };
-
- let mut param_vtables = resolve_vtables_under_param_substs(
- bcx.tcx(), ¶m_substs, &impl_res);
-
- // Now we pull any vtables for parameters on the actual method.
- param_vtables.push_all(subst::FnSpace,
- impl_vtables.get_slice(subst::FnSpace));
-
- param_vtables
-}
-
/// Translates the adapter that deconstructs a `Box<Trait>` object into
/// `Trait` so that a by-value self method can be called.
pub fn trans_unboxing_shim(bcx: Block,
llfn
}
-pub fn trans_fn_ref_with_vtables(
+pub fn trans_fn_ref_with_substs(
bcx: Block, //
def_id: ast::DefId, // def id of fn
node: ExprOrMethodCall, // node id of use of fn; may be zero if N/A
- substs: subst::Substs, // values for fn's ty params
- vtables: typeck::vtable_res) // vtables for the call
+ substs: subst::Substs) // vtables for the call
-> ValueRef
{
/*!
* This parameter may be zero; but, if so, the resulting value may not
* have the right type, so it must be cast before being used.
* - `substs`: values for each of the fn/method's parameters
- * - `vtables`: values for each bound on each of the type parameters
*/
- let _icx = push_ctxt("trans_fn_ref_with_vtables");
+ let _icx = push_ctxt("trans_fn_ref_with_substs");
let ccx = bcx.ccx();
let tcx = bcx.tcx();
- debug!("trans_fn_ref_with_vtables(bcx={}, def_id={}, node={:?}, \
- substs={}, vtables={})",
+ debug!("trans_fn_ref_with_substs(bcx={}, def_id={}, node={:?}, \
+ substs={})",
bcx.to_str(),
def_id.repr(tcx),
node,
- substs.repr(tcx),
- vtables.repr(tcx));
+ substs.repr(tcx));
assert!(substs.types.all(|t| !ty::type_needs_infer(*t)));
// We need to do a bunch of special handling for default methods.
// We need to modify the def_id and our substs in order to monomorphize
// the function.
- let (is_default, def_id, substs, vtables) =
- match ty::provided_source(tcx, def_id) {
- None => (false, def_id, substs, vtables),
+ let (is_default, def_id, substs) = match ty::provided_source(tcx, def_id) {
+ None => (false, def_id, substs),
Some(source_id) => {
// There are two relevant substitutions when compiling
// default methods. First, there is the substitution for
debug!("trans_fn_with_vtables - default method: \
substs = {}, trait_subst = {}, \
- first_subst = {}, new_subst = {}, \
- vtables = {}",
+ first_subst = {}, new_subst = {}",
substs.repr(tcx), trait_ref.substs.repr(tcx),
- first_subst.repr(tcx), new_substs.repr(tcx),
- vtables.repr(tcx));
-
- let param_vtables =
- resolve_default_method_vtables(bcx,
- impl_id,
- &substs,
- vtables);
+ first_subst.repr(tcx), new_substs.repr(tcx));
- debug!("trans_fn_with_vtables - default method: \
- param_vtables = {}",
- param_vtables.repr(tcx));
-
- (true, source_id, new_substs, param_vtables)
+ (true, source_id, new_substs)
+ }
+ ty::TypeTraitItem(_) => {
+ bcx.tcx().sess.bug("trans_fn_ref_with_vtables() tried \
+ to translate an associated type?!")
}
}
}
};
let (val, must_cast) =
- monomorphize::monomorphic_fn(ccx, def_id, &substs,
- vtables, opt_ref_id);
+ monomorphize::monomorphic_fn(ccx, def_id, &substs, opt_ref_id);
let mut val = val;
if must_cast && node != ExprId(0) {
// Monotype of the REFERENCE to the function (type params
None,
fty,
|bcx, _| {
- trans_fn_ref_with_vtables_to_callee(bcx,
- did,
- 0,
- subst::Substs::empty(),
- VecPerParamSpace::empty())
+ trans_fn_ref_with_substs_to_callee(bcx,
+ did,
+ 0,
+ subst::Substs::empty())
},
ArgVals(args),
dest)
// value.
ArgVals(&'a [ValueRef]),
- // For overloaded operators: `(lhs, Option(rhs, rhs_id))`. `lhs`
+ // For overloaded operators: `(lhs, Vec(rhs, rhs_id))`. `lhs`
// is the left-hand-side and `rhs/rhs_id` is the datum/expr-id of
- // the right-hand-side (if any).
- ArgOverloadedOp(Datum<Expr>, Option<(Datum<Expr>, ast::NodeId)>),
+ // the right-hand-side arguments (if any).
+ ArgOverloadedOp(Datum<Expr>, Vec<(Datum<Expr>, ast::NodeId)>),
// Supply value of arguments as a list of expressions that must be
// translated, for overloaded call operators.
DontAutorefArg)
}));
- match rhs {
- Some((rhs, rhs_id)) => {
- assert_eq!(arg_tys.len(), 2);
-
- llargs.push(unpack_result!(bcx, {
- trans_arg_datum(bcx, *arg_tys.get(1), rhs,
- arg_cleanup_scope,
- DoAutorefArg(rhs_id))
- }));
- }
- None => assert_eq!(arg_tys.len(), 1)
+ assert_eq!(arg_tys.len(), 1 + rhs.len());
+ for (rhs, rhs_id) in rhs.move_iter() {
+ llargs.push(unpack_result!(bcx, {
+ trans_arg_datum(bcx, *arg_tys.get(1), rhs,
+ arg_cleanup_scope,
+ DoAutorefArg(rhs_id))
+ }));
}
}
ArgVals(vs) => {
debug!("schedule_clean_in_ast_scope(cleanup_scope={:?})",
cleanup_scope);
- for scope in self.scopes.borrow_mut().mut_iter().rev() {
+ for scope in self.scopes.borrow_mut().iter_mut().rev() {
if scope.kind.is_ast_with_id(cleanup_scope) {
scope.cleanups.push(cleanup);
scope.clear_cached_exits();
// Check if a landing pad block exists; if not, create one.
{
let mut scopes = self.scopes.borrow_mut();
- let last_scope = scopes.mut_last().unwrap();
+ let last_scope = scopes.last_mut().unwrap();
match last_scope.cached_landing_pad {
Some(llbb) => { return llbb; }
None => {
use driver::config::FullDebugInfo;
use llvm::ValueRef;
use middle::def;
-use middle::freevars;
-use middle::lang_items::ClosureExchangeMallocFnLangItem;
+use middle::mem_categorization::Typer;
use middle::trans::adt;
use middle::trans::base::*;
use middle::trans::build::*;
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pub struct EnvValue {
- action: freevars::CaptureMode,
+ action: ast::CaptureClause,
datum: Datum<Lvalue>
}
// converted to ptrs.
let bound_tys = bound_values.iter().map(|bv| {
match bv.action {
- freevars::CaptureByValue => bv.datum.ty,
- freevars::CaptureByRef => ty::mk_mut_ptr(tcx, bv.datum.ty)
+ ast::CaptureByValue => bv.datum.ty,
+ ast::CaptureByRef => ty::mk_mut_ptr(tcx, bv.datum.ty)
}
}).collect();
let cdata_ty = ty::mk_tup(tcx, bound_tys);
let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
match store {
ty::UniqTraitStore => {
- malloc_raw_dyn_proc(bcx, cbox_ty, ClosureExchangeMallocFnLangItem)
+ malloc_raw_dyn_proc(bcx, cbox_ty)
}
ty::RegionTraitStore(..) => {
let llbox = alloc_ty(bcx, cbox_ty, "__closure");
// Copy expr values into boxed bindings.
let mut bcx = bcx;
- for (i, bv) in bound_values.move_iter().enumerate() {
+ for (i, bv) in bound_values.into_iter().enumerate() {
debug!("Copy {} into closure", bv.to_string(ccx));
if ccx.sess().asm_comments() {
let bound_data = GEPi(bcx, llbox, [0u, abi::box_field_body, i]);
match bv.action {
- freevars::CaptureByValue => {
+ ast::CaptureByValue => {
bcx = bv.datum.store_to(bcx, bound_data);
}
- freevars::CaptureByRef => {
+ ast::CaptureByRef => {
Store(bcx, bv.datum.to_llref(), bound_data);
}
}
// Given a context and a list of upvars, build a closure. This just
// collects the upvars and packages them up for store_environment.
fn build_closure<'blk, 'tcx>(bcx0: Block<'blk, 'tcx>,
- freevar_mode: freevars::CaptureMode,
- freevars: &Vec<freevars::freevar_entry>,
+ freevar_mode: ast::CaptureClause,
+ freevars: &Vec<ty::Freevar>,
store: ty::TraitStore)
-> ClosureResult<'blk, 'tcx> {
let _icx = push_ctxt("closure::build_closure");
// with the upvars and type descriptors.
fn load_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
cdata_ty: ty::t,
- freevars: &Vec<freevars::freevar_entry>,
+ freevars: &Vec<ty::Freevar>,
store: ty::TraitStore)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::load_environment");
fn load_unboxed_closure_environment<'blk, 'tcx>(
bcx: Block<'blk, 'tcx>,
arg_scope_id: ScopeId,
- freevars: &Vec<freevars::freevar_entry>,
+ freevars: &Vec<ty::Freevar>,
closure_id: ast::DefId)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::load_environment");
// set an inline hint for all closures
set_inline_hint(llfn);
- let freevar_mode = freevars::get_capture_mode(tcx, id);
- let freevars: Vec<freevars::freevar_entry> =
- freevars::with_freevars(tcx,
- id,
- |fv| fv.iter().map(|&fv| fv).collect());
+ let freevar_mode = tcx.capture_mode(id);
+ let freevars: Vec<ty::Freevar> =
+ ty::with_freevars(tcx, id, |fv| fv.iter().map(|&fv| fv).collect());
let ClosureResult {
llbox,
.clone();
let function_type = ty::mk_closure(bcx.tcx(), function_type);
- let freevars: Vec<freevars::freevar_entry> =
- freevars::with_freevars(bcx.tcx(),
- id,
- |fv| fv.iter().map(|&fv| fv).collect());
+ let freevars: Vec<ty::Freevar> =
+ ty::with_freevars(bcx.tcx(), id, |fv| fv.iter().map(|&fv| fv).collect());
let freevars_ptr = &freevars;
trans_closure(bcx.ccx(),
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef};
use llvm::{True, False, Bool};
use middle::def;
-use middle::freevars;
use middle::lang_items::LangItem;
use middle::mem_categorization as mc;
use middle::subst;
use middle::trans::debuginfo;
use middle::trans::type_::Type;
use middle::trans::type_of;
+use middle::traits;
use middle::ty;
+use middle::ty_fold;
use middle::typeck;
+use middle::typeck::infer;
use util::ppaux::Repr;
use util::nodemap::{DefIdMap, NodeMap};
use libc::{c_uint, c_longlong, c_ulonglong, c_char};
use std::c_str::ToCStr;
use std::cell::{Cell, RefCell};
+use std::rc::Rc;
use std::vec::Vec;
use syntax::ast::Ident;
use syntax::ast;
// will only be set in the case of default methods.
pub struct param_substs {
pub substs: subst::Substs,
- pub vtables: typeck::vtable_res,
}
impl param_substs {
pub fn empty() -> param_substs {
param_substs {
substs: subst::Substs::trans_empty(),
- vtables: subst::VecPerParamSpace::empty(),
}
}
}
}
-fn param_substs_to_string(this: ¶m_substs, tcx: &ty::ctxt) -> String {
- format!("param_substs(substs={},vtables={})",
- this.substs.repr(tcx),
- this.vtables.repr(tcx))
-}
-
impl Repr for param_substs {
fn repr(&self, tcx: &ty::ctxt) -> String {
- param_substs_to_string(self, tcx)
+ self.substs.repr(tcx)
}
}
// points to, but if this value is false, that slot will be a local alloca.
pub caller_expects_out_pointer: bool,
- // Maps arguments to allocas created for them in llallocas.
- pub llargs: RefCell<NodeMap<LvalueDatum>>,
-
- // Maps the def_ids for local variables to the allocas created for
+ // Maps the DefId's for local variables to the allocas created for
// them in llallocas.
pub lllocals: RefCell<NodeMap<LvalueDatum>>,
}
fn capture_mode(&self, closure_expr_id: ast::NodeId)
- -> freevars::CaptureMode {
+ -> ast::CaptureClause {
self.tcx().capture_modes.borrow().get_copy(&closure_expr_id)
}
}
monomorphize_type(bcx, ty::expr_ty_adjusted(bcx.tcx(), ex))
}
+pub fn fulfill_obligation(ccx: &CrateContext,
+ span: Span,
+ trait_ref: Rc<ty::TraitRef>)
+ -> traits::Vtable<()>
+{
+ /*!
+ * Attempts to resolve an obligation. The result is a shallow
+ * vtable resolution -- meaning that we do not (necessarily) resolve
+ * all nested obligations on the impl. Note that type check should
+ * guarantee to us that all nested obligations *could be* resolved
+ * if we wanted to.
+ */
+
+ let tcx = ccx.tcx();
+
+ // Remove any references to regions; this helps improve caching.
+ let trait_ref = ty_fold::erase_regions(tcx, trait_ref);
+
+ // First check the cache.
+ match ccx.trait_cache().borrow().find(&trait_ref) {
+ Some(vtable) => {
+ info!("Cache hit: {}", trait_ref.repr(ccx.tcx()));
+ return (*vtable).clone();
+ }
+ None => { }
+ }
+
+ ty::populate_implementations_for_trait_if_necessary(tcx, trait_ref.def_id);
+ let infcx = infer::new_infer_ctxt(tcx);
+
+ // Parameter environment is used to give details about type parameters,
+ // but since we are in trans, everything is fully monomorphized.
+ let param_env = ty::empty_parameter_environment();
+ let unboxed_closures = tcx.unboxed_closures.borrow();
+
+ // Do the initial selection for the obligation. This yields the
+ // shallow result we are looking for -- that is, what specific impl.
+ let selcx = traits::SelectionContext::new(&infcx, ¶m_env,
+ &*unboxed_closures);
+ let obligation = traits::Obligation::misc(span, trait_ref.clone());
+ let selection = match selcx.select(&obligation) {
+ Ok(Some(selection)) => selection,
+ Ok(None) => {
+ tcx.sess.span_bug(
+ span,
+ format!("Encountered ambiguity selecting `{}` during trans",
+ trait_ref.repr(tcx)).as_slice())
+ }
+ Err(e) => {
+ tcx.sess.span_bug(
+ span,
+ format!("Encountered error `{}` selecting `{}` during trans",
+ e.repr(tcx),
+ trait_ref.repr(tcx)).as_slice())
+ }
+ };
+
+ // Currently, we use a fulfillment context to completely resolve
+ // all nested obligations. This is because they can inform the
+ // inference of the impl's type parameters. However, in principle,
+ // we only need to do this until the impl's type parameters are
+ // fully bound. It could be a slight optimization to stop
+ // iterating early.
+ let mut fulfill_cx = traits::FulfillmentContext::new();
+ let vtable = selection.map_move_nested(|obligation| {
+ fulfill_cx.register_obligation(tcx, obligation);
+ });
+ match fulfill_cx.select_all_or_error(&infcx, ¶m_env, &*unboxed_closures) {
+ Ok(()) => { }
+ Err(e) => {
+ tcx.sess.span_bug(
+ span,
+ format!("Encountered errors `{}` fulfilling `{}` during trans",
+ e.repr(tcx),
+ trait_ref.repr(tcx)).as_slice());
+ }
+ }
+
+ // Use skolemize to simultaneously replace all type variables with
+ // their bindings and replace all regions with 'static. This is
+ // sort of overkill because we do not expect there to be any
+ // unbound type variables, hence no skolemized types should ever
+ // be inserted.
+ let vtable = infer::skolemize(&infcx, vtable);
+
+ info!("Cache miss: {}", trait_ref.repr(ccx.tcx()));
+ ccx.trait_cache().borrow_mut().insert(trait_ref,
+ vtable.clone());
+
+ vtable
+}
+
// Key used to lookup values supplied for type parameters in an expr.
#[deriving(PartialEq)]
pub enum ExprOrMethodCall {
pub fn node_id_substs(bcx: Block,
node: ExprOrMethodCall)
- -> subst::Substs {
+ -> subst::Substs
+{
let tcx = bcx.tcx();
let substs = match node {
substs.repr(bcx.tcx())).as_slice());
}
+ let substs = substs.erase_regions();
substs.substp(tcx, bcx.fcx.param_substs)
}
-pub fn node_vtables(bcx: Block, id: typeck::MethodCall)
- -> typeck::vtable_res {
- bcx.tcx().vtable_map.borrow().find(&id).map(|vts| {
- resolve_vtables_in_fn_ctxt(bcx.fcx, vts)
- }).unwrap_or_else(|| subst::VecPerParamSpace::empty())
-}
-
-// Apply the typaram substitutions in the FunctionContext to some
-// vtables. This should eliminate any vtable_params.
-pub fn resolve_vtables_in_fn_ctxt(fcx: &FunctionContext,
- vts: &typeck::vtable_res)
- -> typeck::vtable_res {
- resolve_vtables_under_param_substs(fcx.ccx.tcx(),
- fcx.param_substs,
- vts)
-}
-
-pub fn resolve_vtables_under_param_substs(tcx: &ty::ctxt,
- param_substs: ¶m_substs,
- vts: &typeck::vtable_res)
- -> typeck::vtable_res
-{
- vts.map(|ds| {
- resolve_param_vtables_under_param_substs(tcx,
- param_substs,
- ds)
- })
-}
-
-pub fn resolve_param_vtables_under_param_substs(tcx: &ty::ctxt,
- param_substs: ¶m_substs,
- ds: &typeck::vtable_param_res)
- -> typeck::vtable_param_res
-{
- ds.iter().map(|d| {
- resolve_vtable_under_param_substs(tcx,
- param_substs,
- d)
- }).collect()
-}
-
-
-
-pub fn resolve_vtable_under_param_substs(tcx: &ty::ctxt,
- param_substs: ¶m_substs,
- vt: &typeck::vtable_origin)
- -> typeck::vtable_origin
-{
- match *vt {
- typeck::vtable_static(trait_id, ref vtable_substs, ref sub) => {
- let vtable_substs = vtable_substs.substp(tcx, param_substs);
- typeck::vtable_static(
- trait_id,
- vtable_substs,
- resolve_vtables_under_param_substs(tcx, param_substs, sub))
- }
- typeck::vtable_param(n_param, n_bound) => {
- find_vtable(tcx, param_substs, n_param, n_bound)
- }
- typeck::vtable_unboxed_closure(def_id) => {
- typeck::vtable_unboxed_closure(def_id)
- }
- typeck::vtable_error => typeck::vtable_error
- }
-}
-
-pub fn find_vtable(tcx: &ty::ctxt,
- ps: ¶m_substs,
- n_param: typeck::param_index,
- n_bound: uint)
- -> typeck::vtable_origin {
- debug!("find_vtable(n_param={:?}, n_bound={}, ps={})",
- n_param, n_bound, ps.repr(tcx));
-
- let param_bounds = ps.vtables.get(n_param.space, n_param.index);
- param_bounds.get(n_bound).clone()
-}
-
pub fn langcall(bcx: Block,
span: Option<Span>,
msg: &str,
None => { }
Some(adj) => {
match adj {
- ty::AutoAddEnv(ty::RegionTraitStore(ty::ReStatic, _)) => {
+ ty::AdjustAddEnv(ty::RegionTraitStore(ty::ReStatic, _)) => {
let def = ty::resolve_expr(cx.tcx(), e);
let wrapper = closure::get_wrapper_for_bare_fn(cx,
ety_adjusted,
is_local);
llconst = C_struct(cx, [wrapper, C_null(Type::i8p(cx))], false)
}
- ty::AutoAddEnv(store) => {
+ ty::AdjustAddEnv(store) => {
cx.sess()
.span_bug(e.span,
format!("unexpected static function: {:?}",
store).as_slice())
}
- ty::AutoDerefRef(ref adj) => {
+ ty::AdjustDerefRef(ref adj) => {
let mut ty = ety;
// Save the last autoderef in case we can avoid it.
if adj.autoderefs > 0 {
use llvm::mk_target_data;
use metadata::common::LinkMeta;
use middle::resolve;
+use middle::traits;
use middle::trans::adt;
use middle::trans::base;
use middle::trans::builder::Builder;
monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
monomorphizing: RefCell<DefIdMap<uint>>,
/// Cache generated vtables
- vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
+ vtables: RefCell<HashMap<(ty::t,Rc<ty::TraitRef>), ValueRef>>,
/// Cache of constant strings,
const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
/// This is used to perform some basic load-balancing to keep all LLVM
/// contexts around the same size.
n_llvm_insns: Cell<uint>,
+
+ trait_cache: RefCell<HashMap<Rc<ty::TraitRef>,
+ traits::Vtable<()>>>,
}
pub struct CrateContext<'a, 'tcx: 'a> {
adt_reprs: RefCell::new(HashMap::new()),
type_hashcodes: RefCell::new(HashMap::new()),
all_llvm_symbols: RefCell::new(HashSet::new()),
- int_type: Type::from_ref(ptr::mut_null()),
- opaque_vec_type: Type::from_ref(ptr::mut_null()),
+ int_type: Type::from_ref(ptr::null_mut()),
+ opaque_vec_type: Type::from_ref(ptr::null_mut()),
builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
unboxed_closure_vals: RefCell::new(DefIdMap::new()),
dbg_cx: dbg_cx,
eh_personality: RefCell::new(None),
intrinsics: RefCell::new(HashMap::new()),
n_llvm_insns: Cell::new(0u),
+ trait_cache: RefCell::new(HashMap::new()),
};
local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared));
&self.local.monomorphizing
}
- pub fn vtables<'a>(&'a self) -> &'a RefCell<HashMap<(ty::t, MonoId), ValueRef>> {
+ pub fn vtables<'a>(&'a self) -> &'a RefCell<HashMap<(ty::t,Rc<ty::TraitRef>), ValueRef>> {
&self.local.vtables
}
pub fn count_llvm_insn(&self) {
self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1);
}
+
+ pub fn trait_cache(&self) -> &RefCell<HashMap<Rc<ty::TraitRef>, traits::Vtable<()>>> {
+ &self.local.trait_cache
+ }
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {
// Codegen the body.
body_bcx_out = trans_block(body_bcx_out, body, expr::Ignore);
- body_bcx_out.fcx.pop_custom_cleanup_scope(binding_cleanup_scope);
+ body_bcx_out =
+ body_bcx_out.fcx
+ .pop_and_trans_custom_cleanup_scope(body_bcx_out,
+ binding_cleanup_scope);
body_bcx_out =
body_bcx_out.fcx
.pop_and_trans_custom_cleanup_scope(body_bcx_out,
}
enum FunctionDebugContextRepr {
- FunctionDebugContext(Box<FunctionDebugContextData>),
+ DebugInfo(Box<FunctionDebugContextData>),
DebugInfoDisabled,
FunctionWithoutDebugInfo,
}
span: Span)
-> &'a FunctionDebugContextData {
match self.repr {
- FunctionDebugContext(box ref data) => data,
+ DebugInfo(box ref data) => data,
DebugInfoDisabled => {
cx.sess().span_bug(span,
FunctionDebugContext::debuginfo_disabled_message());
cx.sess().targ_cfg.os == abi::OsiOS {
"Dwarf Version".with_c_str(
|s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 2));
- } else if cx.sess().targ_cfg.os == abi::OsLinux {
- // FIXME(#13611) this is a kludge fix because the Linux bots have
- // gdb 7.4 which doesn't understand dwarf4, we should
- // do something more graceful here.
- "Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 3));
}
// Prevent bitcode readers from deleting the debug info.
type_metadata,
is_local_to_unit,
global,
- ptr::mut_null());
+ ptr::null_mut());
}
})
});
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, arg.pat.span).fn_metadata;
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, path1| {
- let llarg = match bcx.fcx.llargs.borrow().find_copy(&node_id) {
+ let llarg = match bcx.fcx.lllocals.borrow().find_copy(&node_id) {
Some(v) => v,
None => {
bcx.sess().span_bug(span,
- format!("no entry in llargs table for {:?}",
+ format!("no entry in lllocals table for {:?}",
node_id).as_slice());
}
};
- if unsafe { llvm::LLVMIsAAllocaInst(llarg.val) } == ptr::mut_null() {
+ if unsafe { llvm::LLVMIsAAllocaInst(llarg.val) } == ptr::null_mut() {
cx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \
Referenced variable location is not an alloca!");
}
set_debug_location(fcx.ccx, UnknownLocation);
return;
}
- FunctionDebugContext(box ref function_debug_context) => {
+ DebugInfo(box ref function_debug_context) => {
let cx = fcx.ccx;
debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span));
/// first real statement/expression of the function is translated.
pub fn start_emitting_source_locations(fcx: &FunctionContext) {
match fcx.debug_context.repr {
- FunctionDebugContext(box ref data) => {
+ DebugInfo(box ref data) => {
data.source_locations_enabled.set(true)
},
_ => { /* safe to ignore */ }
method.span,
true)
}
+ ast::TypeImplItem(ref typedef) => {
+ cx.sess().span_bug(typedef.span,
+ "create_function_debug_context() \
+ called on associated type?!")
+ }
}
}
ast_map::NodeExpr(ref expr) => {
cx.sess().opts.optimize != config::No,
llfn,
template_parameters,
- ptr::mut_null())
+ ptr::null_mut())
}
})
});
fn_metadata,
&mut *fn_debug_context.scope_map.borrow_mut());
- return FunctionDebugContext { repr: FunctionDebugContext(fn_debug_context) };
+ return FunctionDebugContext { repr: DebugInfo(fn_debug_context) };
fn get_function_signature(cx: &CrateContext,
fn_ast_id: ast::NodeId,
// Return type -- llvm::DIBuilder wants this at index 0
match fn_decl.output.node {
ast::TyNil => {
- signature.push(ptr::mut_null());
+ signature.push(ptr::null_mut());
}
_ => {
assert_type_for_node_id(cx, fn_ast_id, error_span);
file_metadata,
name,
actual_self_type_metadata,
- ptr::mut_null(),
+ ptr::null_mut(),
0,
0)
}
file_metadata,
name,
actual_type_metadata,
- ptr::mut_null(),
+ ptr::null_mut(),
0,
0)
}
bytes_to_bits(enum_type_size),
bytes_to_bits(enum_type_align),
0, // Flags
- ptr::mut_null(),
+ ptr::null_mut(),
0, // RuntimeLang
unique_type_id_str)
}
bytes_to_bits(struct_size),
bytes_to_bits(struct_align),
0,
- ptr::mut_null(),
+ ptr::null_mut(),
empty_array,
0,
- ptr::mut_null(),
+ ptr::null_mut(),
unique_type_id)
})
})
// return type
signature_metadata.push(match ty::get(signature.output).sty {
- ty::ty_nil => ptr::mut_null(),
+ ty::ty_nil => ptr::null_mut(),
_ => type_metadata(cx, signature.output, span)
});
let col = UNKNOWN_COLUMN_NUMBER;
debug!("setting debug location to {} {}", line, col);
let elements = [C_i32(cx, line as i32), C_i32(cx, col as i32),
- scope, ptr::mut_null()];
+ scope, ptr::null_mut()];
unsafe {
metadata_node = llvm::LLVMMDNodeInContext(debug_context(cx).llcontext,
elements.as_ptr(),
}
UnknownLocation => {
debug!("clearing debug location ");
- metadata_node = ptr::mut_null();
+ metadata_node = ptr::null_mut();
}
};
fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
match fcx.debug_context.repr {
- FunctionDebugContext(_) => false,
+ DebugInfo(_) => false,
_ => true
}
}
walk_expr(cx, &**rhs, scope_stack, scope_map);
}
+ ast::ExprSlice(ref base, ref start, ref end, _) => {
+ walk_expr(cx, &**base, scope_stack, scope_map);
+ start.as_ref().map(|x| walk_expr(cx, &**x, scope_stack, scope_map));
+ end.as_ref().map(|x| walk_expr(cx, &**x, scope_stack, scope_map));
+ }
+
ast::ExprVec(ref init_expressions) |
ast::ExprTup(ref init_expressions) => {
for ie in init_expressions.iter() {
} else {
None
};
- let mut path = krate.move_iter().chain(path).peekable();
+ let mut path = krate.into_iter().chain(path).peekable();
let mut current_key = Vec::new();
let mut parent_node: Option<Rc<NamespaceTreeNode>> = None;
// create and insert
let parent_scope = match parent_node {
Some(ref node) => node.scope,
- None => ptr::mut_null()
+ None => ptr::null_mut()
};
let namespace_name = token::get_name(name);
let scope = namespace_name.get().with_c_str(|namespace_name| {
parent_scope,
namespace_name,
// cannot reconstruct file ...
- ptr::mut_null(),
+ ptr::null_mut(),
// ... or line information, but that's not so important.
0)
}
use middle::lang_items::MallocFnLangItem;
use middle::mem_categorization::Typer;
use middle::subst;
+use middle::subst::Subst;
use middle::trans::_match;
use middle::trans::adt;
use middle::trans::asm;
use middle::trans::tvec;
use middle::trans::type_of;
use middle::ty::{struct_fields, tup_fields};
-use middle::ty::{AutoDerefRef, AutoAddEnv, AutoUnsafe};
+use middle::ty::{AdjustDerefRef, AdjustAddEnv, AutoUnsafe};
use middle::ty::{AutoPtr};
use middle::ty;
use middle::typeck;
use middle::typeck::MethodCall;
use util::common::indenter;
use util::ppaux::Repr;
-use util::nodemap::NodeMap;
use middle::trans::machine::{llsize_of, llsize_of_alloc};
use middle::trans::type_::Type;
use syntax::codemap;
use syntax::print::pprust::{expr_to_string};
use syntax::ptr::P;
+use std::rc::Rc;
// Destinations
debug!("unadjusted datum for expr {}: {}",
expr.id, datum.to_string(bcx.ccx()));
match adjustment {
- AutoAddEnv(..) => {
+ AdjustAddEnv(..) => {
datum = unpack_datum!(bcx, add_env(bcx, expr, datum));
}
- AutoDerefRef(ref adj) => {
+ AdjustDerefRef(ref adj) => {
let (autoderefs, use_autoref) = match adj.autoref {
// Extracting a value from a box counts as a deref, but if we are
// just converting Box<[T, ..n]> to Box<[T]> we aren't really doing
_ => bcx.sess().bug(format!("UnsizeStruct with bad sty: {}",
bcx.ty_to_string(unsized_ty)).as_slice())
},
- &ty::UnsizeVtable(..) =>
+ &ty::UnsizeVtable(ty::TyTrait { def_id: def_id, substs: ref substs, .. }, _) => {
+ let substs = substs.with_self_ty(unsized_ty);
+ let trait_ref =
+ Rc::new(ty::TraitRef { def_id: def_id,
+ substs: substs });
+ let trait_ref =
+ trait_ref.subst(bcx.tcx(), &bcx.fcx.param_substs.substs);
+ let box_ty = mk_ty(unsized_ty);
PointerCast(bcx,
- meth::vtable_ptr(bcx, id, mk_ty(unsized_ty)),
+ meth::get_vtable(bcx, box_ty, trait_ref),
Type::vtable_ptr(bcx.ccx()))
+ }
}
}
ast::ExprIndex(ref base, ref idx) => {
trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
}
+ ast::ExprSlice(ref base, ref start, ref end, _) => {
+ let _icx = push_ctxt("trans_slice");
+ let ccx = bcx.ccx();
+
+ let method_call = MethodCall::expr(expr.id);
+ let method_ty = ccx.tcx()
+ .method_map
+ .borrow()
+ .find(&method_call)
+ .map(|method| method.ty);
+ let base_datum = unpack_datum!(bcx, trans(bcx, &**base));
+
+ let mut args = vec![];
+ start.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
+ end.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
+
+ let result_ty = ty::ty_fn_ret(monomorphize_type(bcx, method_ty.unwrap()));
+ let scratch = rvalue_scratch_datum(bcx, result_ty, "trans_slice");
+
+ unpack_result!(bcx,
+ trans_overloaded_op(bcx,
+ expr,
+ method_call,
+ base_datum,
+ args,
+ Some(SaveIn(scratch.val))));
+ DatumBlock::new(bcx, scratch.to_expr_datum())
+ }
ast::ExprBox(_, ref contents) => {
// Special case for `Box<T>` and `Gc<T>`
let box_ty = expr_ty(bcx, expr);
index_expr,
method_call,
base_datum,
- Some((ix_datum, idx.id)),
+ vec![(ix_datum, idx.id)],
None));
let ref_ty = ty::ty_fn_ret(monomorphize_type(bcx, method_ty));
let elt_ty = match ty::deref(ref_ty, true) {
let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
- Some((rhs_datum, rhs.id)), Some(dest)).bcx
+ vec![(rhs_datum, rhs.id)], Some(dest)).bcx
}
ast::ExprUnary(_, ref subexpr) => {
// if not overloaded, would be RvalueDatumExpr
let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
- arg, None, Some(dest)).bcx
+ arg, Vec::new(), Some(dest)).bcx
}
ast::ExprIndex(ref base, ref idx) => {
// if not overloaded, would be RvalueDatumExpr
let base = unpack_datum!(bcx, trans(bcx, &**base));
let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
- Some((idx_datum, idx.id)), Some(dest)).bcx
+ vec![(idx_datum, idx.id)], Some(dest)).bcx
}
ast::ExprCast(ref val, _) => {
// DPS output mode means this is a trait cast:
if ty::type_is_trait(node_id_type(bcx, expr.id)) {
+ let trait_ref =
+ bcx.tcx().object_cast_map.borrow()
+ .find(&expr.id)
+ .map(|t| (*t).clone())
+ .unwrap();
+ let trait_ref =
+ trait_ref.subst(bcx.tcx(), &bcx.fcx.param_substs.substs);
let datum = unpack_datum!(bcx, trans(bcx, &**val));
- meth::trans_trait_cast(bcx, datum, expr.id, dest)
+ meth::trans_trait_cast(bcx, datum, expr.id,
+ trait_ref, dest)
} else {
bcx.tcx().sess.span_bug(expr.span,
"expr_cast of non-trait");
let _icx = push_ctxt("trans_local_var");
- return match def {
- def::DefUpvar(nid, _, _, _) => {
+ match def {
+ def::DefUpvar(nid, _, _) => {
// Can't move upvars, so this is never a ZeroMemLastUse.
let local_ty = node_id_type(bcx, nid);
match bcx.fcx.llupvars.borrow().find(&nid) {
}
}
}
- def::DefArg(nid, _) => {
- take_local(bcx, &*bcx.fcx.llargs.borrow(), nid)
- }
- def::DefLocal(nid, _) | def::DefBinding(nid, _) => {
- take_local(bcx, &*bcx.fcx.lllocals.borrow(), nid)
+ def::DefLocal(nid) => {
+ let datum = match bcx.fcx.lllocals.borrow().find(&nid) {
+ Some(&v) => v,
+ None => {
+ bcx.sess().bug(format!(
+ "trans_local_var: no datum for local/arg {:?} found",
+ nid).as_slice());
+ }
+ };
+ debug!("take_local(nid={:?}, v={}, ty={})",
+ nid, bcx.val_to_string(datum.val), bcx.ty_to_string(datum.ty));
+ datum
}
_ => {
bcx.sess().unimpl(format!(
"unsupported def type in trans_local_var: {:?}",
def).as_slice());
}
- };
-
- fn take_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- table: &NodeMap<Datum<Lvalue>>,
- nid: ast::NodeId)
- -> Datum<Lvalue> {
- let datum = match table.find(&nid) {
- Some(&v) => v,
- None => {
- bcx.sess().bug(format!(
- "trans_local_var: no datum for local/arg {:?} found",
- nid).as_slice());
- }
- };
- debug!("take_local(nid={:?}, v={}, ty={})",
- nid, bcx.val_to_string(datum.val), bcx.ty_to_string(datum.ty));
- datum
}
}
assert_eq!(discr, 0);
match ty::expr_kind(bcx.tcx(), &*base.expr) {
- ty::LvalueExpr => {
+ ty::RvalueDpsExpr | ty::RvalueDatumExpr if !ty::type_needs_drop(bcx.tcx(), ty) => {
+ bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
+ },
+ ty::RvalueStmtExpr => bcx.tcx().sess.bug("unexpected expr kind for struct base expr"),
+ _ => {
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
for &(i, t) in base.fields.iter() {
let datum = base_datum.get_element(
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
bcx = datum.store_to(bcx, dest);
}
- },
- ty::RvalueDpsExpr | ty::RvalueDatumExpr => {
- bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
- },
- ty::RvalueStmtExpr => bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
+ }
}
}
expr: &ast::Expr,
method_call: MethodCall,
lhs: Datum<Expr>,
- rhs: Option<(Datum<Expr>, ast::NodeId)>,
+ rhs: Vec<(Datum<Expr>, ast::NodeId)>,
dest: Option<Dest>)
-> Result<'blk, 'tcx> {
let method_ty = bcx.tcx().method_map.borrow().get(&method_call).ty;
let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
- datum, None, Some(SaveIn(scratch.val))));
+ datum, Vec::new(), Some(SaveIn(scratch.val))));
scratch.to_expr_datum()
}
None => {
let ps = ccx.tcx().map.with_path(id, |path| {
let abi = Some(ast_map::PathName(special_idents::clownshoe_abi.name));
- link::mangle(path.chain(abi.move_iter()), hash)
+ link::mangle(path.chain(abi.into_iter()), hash)
});
// Compute the type that the function would have if it were just a
match tys.fn_ty.ret_ty.attr {
Some(attr) => unsafe {
- llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr as u64);
+ llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64);
},
None => {}
}
match arg_ty.attr {
Some(attr) => unsafe {
- llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr as u64);
+ llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64);
},
None => ()
}
let env_ptr_ty = Type::at_box(bcx.ccx(), Type::i8(bcx.ccx())).ptr_to();
let env = PointerCast(bcx, env, env_ptr_ty);
with_cond(bcx, IsNotNull(bcx, env), |bcx| {
- let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_tydesc]);
+ let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_drop_glue]);
let dtor = Load(bcx, dtor_ptr);
Call(bcx, dtor, [PointerCast(bcx, box_cell_v, Type::i8p(bcx.ccx()))], None);
bcx
// don't.
local_def(mth.id)
}
+ ast::TypeTraitItem(_) => {
+ ccx.sess().bug("found TypeTraitItem IITraitItem")
+ }
}
}
csearch::found(&ast::IIImplItem(impl_did, ref impl_item)) => {
}
local_def(mth.id)
}
+ ast::TypeImplItem(_) => {
+ ccx.sess().bug("found TypeImplItem IIImplItem")
+ }
}
}
};
use llvm;
use llvm::ValueRef;
use metadata::csearch;
+use middle::subst::{Subst,Substs};
use middle::subst::VecPerParamSpace;
use middle::subst;
+use middle::traits;
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::callee::*;
use middle::trans::expr;
use middle::trans::glue;
use middle::trans::machine;
-use middle::trans::monomorphize;
use middle::trans::type_::Type;
use middle::trans::type_of::*;
use middle::ty;
use middle::typeck;
use middle::typeck::MethodCall;
-use util::common::indenter;
use util::ppaux::Repr;
use std::c_str::ToCStr;
+use std::rc::Rc;
use syntax::abi::{Rust, RustCall};
use syntax::parse::token;
use syntax::{ast, ast_map, attr, visit};
use syntax::ast_util::PostExpansionMethod;
+use syntax::codemap::DUMMY_SP;
// drop_glue pointer, size, align.
static VTABLE_OFFSET: uint = 3;
ast::MethodImplItem(ref method) => {
visit::walk_method_helper(&mut v, &**method);
}
+ ast::TypeImplItem(_) => {}
}
}
return;
};
visit::walk_method_helper(&mut v, &**method);
}
+ ast::TypeImplItem(_) => {}
}
}
}
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_method_callee");
- let (origin, method_ty) = match bcx.tcx().method_map
- .borrow().find(&method_call) {
- Some(method) => {
- debug!("trans_method_callee({:?}, method={})",
- method_call, method.repr(bcx.tcx()));
- (method.origin, method.ty)
- }
- None => {
- bcx.sess().span_bug(bcx.tcx().map.span(method_call.expr_id),
- "method call expr wasn't in method map")
- }
- };
+ let (origin, method_ty) =
+ bcx.tcx().method_map
+ .borrow()
+ .find(&method_call)
+ .map(|method| (method.origin.clone(), method.ty))
+ .unwrap();
match origin {
typeck::MethodStatic(did) |
MethodCall(method_call))),
}
}
- typeck::MethodParam(typeck::MethodParam {
- trait_id: trait_id,
- method_num: off,
- param_num: p,
- bound_num: b
- }) => {
- ty::populate_implementations_for_trait_if_necessary(
- bcx.tcx(),
- trait_id);
- let vtbl = find_vtable(bcx.tcx(), bcx.fcx.param_substs, p, b);
- trans_monomorphized_callee(bcx, method_call,
- trait_id, off, vtbl)
+ typeck::MethodTypeParam(typeck::MethodParam {
+ trait_ref: ref trait_ref,
+ method_num: method_num
+ }) => {
+ let trait_ref =
+ Rc::new(trait_ref.subst(bcx.tcx(),
+ &bcx.fcx.param_substs.substs));
+ let span = bcx.tcx().map.span(method_call.expr_id);
+ let origin = fulfill_obligation(bcx.ccx(),
+ span,
+ (*trait_ref).clone());
+ debug!("origin = {}", origin.repr(bcx.tcx()));
+ trans_monomorphized_callee(bcx, method_call, trait_ref.def_id,
+ method_num, origin)
}
- typeck::MethodObject(ref mt) => {
+ typeck::MethodTraitObject(ref mt) => {
let self_expr = match self_expr {
Some(self_expr) => self_expr,
None => {
method_id: ast::DefId,
trait_id: ast::DefId,
expr_id: ast::NodeId)
- -> ValueRef {
+ -> ValueRef
+{
let _icx = push_ctxt("meth::trans_static_method_callee");
let ccx = bcx.ccx();
method_id,
ty::item_path_str(bcx.tcx(), trait_id),
expr_id);
- let _indenter = indenter();
-
- ty::populate_implementations_for_trait_if_necessary(bcx.tcx(), trait_id);
let mname = if method_id.krate == ast::LOCAL_CRATE {
match bcx.tcx().map.get(method_id.node) {
ast_map::NodeTraitItem(method) => {
let ident = match *method {
ast::RequiredMethod(ref m) => m.ident,
- ast::ProvidedMethod(ref m) => m.pe_ident()
+ ast::ProvidedMethod(ref m) => m.pe_ident(),
+ ast::TypeTraitItem(_) => {
+ bcx.tcx().sess.bug("trans_static_method_callee() on \
+ an associated type?!")
+ }
};
ident.name
}
} else {
csearch::get_item_path(bcx.tcx(), method_id).last().unwrap().name()
};
- debug!("trans_static_method_callee: method_id={:?}, expr_id={:?}, \
+ debug!("trans_static_method_callee: method_id={}, expr_id={}, \
name={}", method_id, expr_id, token::get_name(mname));
- let vtable_key = MethodCall::expr(expr_id);
- let vtbls = resolve_vtables_in_fn_ctxt(
- bcx.fcx,
- ccx.tcx().vtable_map.borrow().get(&vtable_key));
-
- match *vtbls.get_self().unwrap().get(0) {
- typeck::vtable_static(impl_did, ref rcvr_substs, ref rcvr_origins) => {
- assert!(rcvr_substs.types.all(|t| !ty::type_needs_infer(*t)));
+ // Find the substitutions for the fn itself. This includes
+ // type parameters that belong to the trait but also some that
+ // belong to the method:
+ let rcvr_substs = node_id_substs(bcx, ExprId(expr_id));
+ let (rcvr_type, rcvr_self, rcvr_method) = rcvr_substs.types.split();
+
+ // Lookup the precise impl being called. To do that, we need to
+ // create a trait reference identifying the self type and other
+ // input type parameters. To create that trait reference, we have
+ // to pick apart the type parameters to identify just those that
+ // pertain to the trait. This is easiest to explain by example:
+ //
+ // trait Convert {
+ // fn from<U:Foo>(n: U) -> Option<Self>;
+ // }
+ // ...
+ // let f = <Vec<int> as Convert>::from::<String>(...)
+ //
+ // Here, in this call, which I've written with explicit UFCS
+ // notation, the set of type parameters will be:
+ //
+ // rcvr_type: [] <-- nothing declared on the trait itself
+ // rcvr_self: [Vec<int>] <-- the self type
+ // rcvr_method: [String] <-- method type parameter
+ //
+ // So we create a trait reference using the first two,
+ // basically corresponding to `<Vec<int> as Convert>`.
+ // The remaining type parameters (`rcvr_method`) will be used below.
+ let trait_substs =
+ Substs::erased(VecPerParamSpace::new(rcvr_type,
+ rcvr_self,
+ Vec::new()));
+ debug!("trait_substs={}", trait_substs.repr(bcx.tcx()));
+ let trait_ref = Rc::new(ty::TraitRef { def_id: trait_id,
+ substs: trait_substs });
+ let vtbl = fulfill_obligation(bcx.ccx(),
+ DUMMY_SP,
+ trait_ref);
+
+ // Now that we know which impl is being used, we can dispatch to
+ // the actual function:
+ match vtbl {
+ traits::VtableImpl(traits::VtableImplData {
+ impl_def_id: impl_did,
+ substs: impl_substs,
+ nested: _ }) =>
+ {
+ assert!(impl_substs.types.all(|t| !ty::type_needs_infer(*t)));
+
+ // Create the substitutions that are in scope. This combines
+ // the type parameters from the impl with those declared earlier.
+ // To see what I mean, consider a possible impl:
+ //
+ // impl<T> Convert for Vec<T> {
+ // fn from<U:Foo>(n: U) { ... }
+ // }
+ //
+ // Recall that we matched `<Vec<int> as Convert>`. Trait
+ // resolution will have given us a substitution
+ // containing `impl_substs=[[T=int],[],[]]` (the type
+ // parameters defined on the impl). We combine
+ // that with the `rcvr_method` from before, which tells us
+ // the type parameters from the *method*, to yield
+ // `callee_substs=[[T=int],[],[U=String]]`.
+ let (impl_type, impl_self, _) = impl_substs.types.split();
+ let callee_substs =
+ Substs::erased(VecPerParamSpace::new(impl_type,
+ impl_self,
+ rcvr_method));
let mth_id = method_with_name(ccx, impl_did, mname);
- let (callee_substs, callee_origins) =
- combine_impl_and_methods_tps(
- bcx, ExprId(expr_id),
- (*rcvr_substs).clone(), (*rcvr_origins).clone());
-
- let llfn = trans_fn_ref_with_vtables(bcx, mth_id, ExprId(expr_id),
- callee_substs,
- callee_origins);
+ let llfn = trans_fn_ref_with_substs(bcx, mth_id, ExprId(expr_id),
+ callee_substs);
let callee_ty = node_id_type(bcx, expr_id);
let llty = type_of_fn_from_ty(ccx, callee_ty).ptr_to();
PointerCast(bcx, llfn, llty)
}
- typeck::vtable_unboxed_closure(_) => {
- bcx.tcx().sess.bug("can't call a closure vtable in a static way");
- }
_ => {
- fail!("vtable_param left in monomorphized \
- function's vtable substs");
+ bcx.tcx().sess.bug(
+ format!("static call to invalid vtable: {}",
+ vtbl.repr(bcx.tcx())).as_slice());
}
}
}
.expect("could not find impl while translating");
let meth_did = impl_items.iter()
.find(|&did| {
- match *did {
- ty::MethodTraitItemId(did) => {
- ty::impl_or_trait_item(ccx.tcx(),
- did).ident()
- .name ==
- name
- }
- }
+ ty::impl_or_trait_item(ccx.tcx(),
+ did.def_id()).ident()
+ .name ==
+ name
}).expect("could not find method while \
translating");
method_call: MethodCall,
trait_id: ast::DefId,
n_method: uint,
- vtbl: typeck::vtable_origin)
+ vtable: traits::Vtable<()>)
-> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_monomorphized_callee");
- match vtbl {
- typeck::vtable_static(impl_did, rcvr_substs, rcvr_origins) => {
- let ccx = bcx.ccx();
- let mname = match ty::trait_item(ccx.tcx(), trait_id, n_method) {
- ty::MethodTraitItem(method) => method.ident,
- };
- let mth_id = method_with_name(bcx.ccx(), impl_did, mname.name);
-
- // create a concatenated set of substitutions which includes
- // those from the impl and those from the method:
- let (callee_substs, callee_origins) =
- combine_impl_and_methods_tps(
- bcx, MethodCall(method_call), rcvr_substs, rcvr_origins);
-
- // translate the function
- let llfn = trans_fn_ref_with_vtables(bcx,
- mth_id,
- MethodCall(method_call),
- callee_substs,
- callee_origins);
-
- Callee { bcx: bcx, data: Fn(llfn) }
- }
- typeck::vtable_unboxed_closure(closure_def_id) => {
+ match vtable {
+ traits::VtableImpl(vtable_impl) => {
+ let ccx = bcx.ccx();
+ let impl_did = vtable_impl.impl_def_id;
+ let mname = match ty::trait_item(ccx.tcx(), trait_id, n_method) {
+ ty::MethodTraitItem(method) => method.ident,
+ ty::TypeTraitItem(_) => {
+ bcx.tcx().sess.bug("can't monomorphize an associated \
+ type")
+ }
+ };
+ let mth_id = method_with_name(bcx.ccx(), impl_did, mname.name);
+
+ // create a concatenated set of substitutions which includes
+ // those from the impl and those from the method:
+ let callee_substs =
+ combine_impl_and_methods_tps(
+ bcx, MethodCall(method_call), vtable_impl.substs);
+
+ // translate the function
+ let llfn = trans_fn_ref_with_substs(bcx,
+ mth_id,
+ MethodCall(method_call),
+ callee_substs);
+
+ Callee { bcx: bcx, data: Fn(llfn) }
+ }
+ traits::VtableUnboxedClosure(closure_def_id) => {
// The static region and type parameters are lies, but we're in
// trans so it doesn't matter.
//
bcx,
closure_def_id);
- let llfn = trans_fn_ref_with_vtables(bcx,
- closure_def_id,
- MethodCall(method_call),
- callee_substs,
- VecPerParamSpace::empty());
-
- Callee {
- bcx: bcx,
- data: Fn(llfn),
- }
- }
- typeck::vtable_param(..) => {
- bcx.tcx().sess.bug(
- "vtable_param left in monomorphized function's vtable substs");
- }
- typeck::vtable_error => {
- bcx.tcx().sess.bug(
- "vtable_error left in monomorphized function's vtable substs");
- }
+ let llfn = trans_fn_ref_with_substs(bcx,
+ closure_def_id,
+ MethodCall(method_call),
+ callee_substs);
+
+ Callee {
+ bcx: bcx,
+ data: Fn(llfn),
+ }
+ }
+ _ => {
+ bcx.tcx().sess.bug(
+ "vtable_param left in monomorphized function's vtable substs");
+ }
}
}
fn combine_impl_and_methods_tps(bcx: Block,
node: ExprOrMethodCall,
- rcvr_substs: subst::Substs,
- rcvr_origins: typeck::vtable_res)
- -> (subst::Substs, typeck::vtable_res)
+ rcvr_substs: subst::Substs)
+ -> subst::Substs
{
/*!
* Creates a concatenated set of substitutions which includes
let ccx = bcx.ccx();
- let vtable_key = match node {
- ExprId(id) => MethodCall::expr(id),
- MethodCall(method_call) => method_call
- };
let node_substs = node_id_substs(bcx, node);
- let node_vtables = node_vtables(bcx, vtable_key);
- debug!("rcvr_substs={:?}", rcvr_substs.repr(ccx.tcx()));
- debug!("node_substs={:?}", node_substs.repr(ccx.tcx()));
+ debug!("rcvr_substs={}", rcvr_substs.repr(ccx.tcx()));
+ debug!("node_substs={}", node_substs.repr(ccx.tcx()));
// Break apart the type parameters from the node and type
// parameters from the receiver.
let (_, _, node_method) = node_substs.types.split();
let (rcvr_type, rcvr_self, rcvr_method) = rcvr_substs.types.clone().split();
assert!(rcvr_method.is_empty());
- let ty_substs = subst::Substs {
+ subst::Substs {
regions: subst::ErasedRegions,
types: subst::VecPerParamSpace::new(rcvr_type, rcvr_self, node_method)
- };
-
- // Now do the same work for the vtables.
- let (rcvr_type, rcvr_self, rcvr_method) = rcvr_origins.split();
- let (_, _, node_method) = node_vtables.split();
- assert!(rcvr_method.is_empty());
- let vtables = subst::VecPerParamSpace::new(rcvr_type, rcvr_self, node_method);
-
- (ty_substs, vtables)
+ }
}
fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
/// Creates a returns a dynamic vtable for the given type and vtable origin.
/// This is used only for objects.
-fn get_vtable(bcx: Block,
- self_ty: ty::t,
- origins: typeck::vtable_param_res)
- -> ValueRef
+///
+/// The `trait_ref` encodes the erased self type. Hence if we are
+/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
+/// `trait_ref` would map `T:Trait`, but `box_ty` would be
+/// `Foo<T>`. This `box_ty` is primarily used to encode the destructor.
+/// This will hopefully change now that DST is underway.
+pub fn get_vtable(bcx: Block,
+ box_ty: ty::t,
+ trait_ref: Rc<ty::TraitRef>)
+ -> ValueRef
{
- debug!("get_vtable(self_ty={}, origins={})",
- self_ty.repr(bcx.tcx()),
- origins.repr(bcx.tcx()));
+ debug!("get_vtable(box_ty={}, trait_ref={})",
+ box_ty.repr(bcx.tcx()),
+ trait_ref.repr(bcx.tcx()));
+ let tcx = bcx.tcx();
let ccx = bcx.ccx();
let _icx = push_ctxt("meth::get_vtable");
// Check the cache.
- let hash_id = (self_ty, monomorphize::make_vtable_id(ccx, origins.get(0)));
- match ccx.vtables().borrow().find(&hash_id) {
+ let cache_key = (box_ty, trait_ref.clone());
+ match ccx.vtables().borrow().find(&cache_key) {
Some(&val) => { return val }
None => { }
}
- // Not in the cache. Actually build it.
- let methods = origins.move_iter().flat_map(|origin| {
- match origin {
- typeck::vtable_static(id, substs, sub_vtables) => {
- emit_vtable_methods(bcx, id, substs, sub_vtables).move_iter()
+ // Not in the cache. Build it.
+ let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| {
+ let vtable = fulfill_obligation(bcx.ccx(),
+ DUMMY_SP,
+ trait_ref.clone());
+ match vtable {
+ traits::VtableImpl(
+ traits::VtableImplData {
+ impl_def_id: id,
+ substs: substs,
+ nested: _ }) => {
+ emit_vtable_methods(bcx, id, substs).into_iter()
}
- typeck::vtable_unboxed_closure(closure_def_id) => {
+ traits::VtableUnboxedClosure(closure_def_id) => {
let callee_substs =
get_callee_substitutions_for_unboxed_closure(
bcx,
closure_def_id);
- let mut llfn = trans_fn_ref_with_vtables(
+ let mut llfn = trans_fn_ref_with_substs(
bcx,
closure_def_id,
ExprId(0),
- callee_substs.clone(),
- VecPerParamSpace::empty());
+ callee_substs.clone());
{
let unboxed_closures = bcx.tcx()
}
}
- (vec!(llfn)).move_iter()
+ (vec!(llfn)).into_iter()
+ }
+ traits::VtableBuiltin |
+ traits::VtableParam(..) => {
+ bcx.sess().bug(
+ format!("resolved vtable for {} to bad vtable {} in trans",
+ trait_ref.repr(bcx.tcx()),
+ vtable.repr(bcx.tcx())).as_slice());
}
- _ => ccx.sess().bug("get_vtable: expected a static origin"),
}
});
- let size_ty = sizing_type_of(ccx, self_ty);
+ let size_ty = sizing_type_of(ccx, trait_ref.self_ty());
let size = machine::llsize_of_alloc(ccx, size_ty);
let ll_size = C_uint(ccx, size as uint);
- let align = align_of(ccx, self_ty);
+ let align = align_of(ccx, trait_ref.self_ty());
let ll_align = C_uint(ccx, align as uint);
// Generate a destructor for the vtable.
- let drop_glue = glue::get_drop_glue(ccx, self_ty);
+ let drop_glue = glue::get_drop_glue(ccx, box_ty);
let vtable = make_vtable(ccx, drop_glue, ll_size, ll_align, methods);
- ccx.vtables().borrow_mut().insert(hash_id, vtable);
+ ccx.vtables().borrow_mut().insert(cache_key, vtable);
vtable
}
let _icx = push_ctxt("meth::make_vtable");
let head = vec![drop_glue, size, align];
- let components: Vec<_> = head.move_iter().chain(ptrs).collect();
+ let components: Vec<_> = head.into_iter().chain(ptrs).collect();
unsafe {
let tbl = C_struct(ccx, components.as_slice(), false);
fn emit_vtable_methods(bcx: Block,
impl_id: ast::DefId,
- substs: subst::Substs,
- vtables: typeck::vtable_res)
+ substs: subst::Substs)
-> Vec<ValueRef> {
let ccx = bcx.ccx();
let tcx = ccx.tcx();
ty::populate_implementations_for_trait_if_necessary(bcx.tcx(), trt_id);
let trait_item_def_ids = ty::trait_item_def_ids(tcx, trt_id);
- trait_item_def_ids.iter().map(|method_def_id| {
+ trait_item_def_ids.iter().flat_map(|method_def_id| {
let method_def_id = method_def_id.def_id();
let ident = ty::impl_or_trait_item(tcx, method_def_id).ident();
// The substitutions we have are on the impl, so we grab
debug!("(making impl vtable) method has self or type \
params: {}",
token::get_ident(ident));
- C_null(Type::nil(ccx).ptr_to())
+ Some(C_null(Type::nil(ccx).ptr_to())).move_iter()
} else {
- let mut fn_ref = trans_fn_ref_with_vtables(
+ let mut fn_ref = trans_fn_ref_with_substs(
bcx,
m_id,
ExprId(0),
- substs.clone(),
- vtables.clone());
+ substs.clone());
if m.explicit_self == ty::ByValueExplicitSelfCategory {
fn_ref = trans_unboxing_shim(bcx,
fn_ref,
m_id,
substs.clone());
}
- fn_ref
+ Some(fn_ref).move_iter()
}
}
+ ty::TypeTraitItem(_) => {
+ None.move_iter()
+ }
}
}).collect()
}
-pub fn vtable_ptr(bcx: Block,
- id: ast::NodeId,
- self_ty: ty::t) -> ValueRef {
- let ccx = bcx.ccx();
- let origins = {
- let vtable_map = ccx.tcx().vtable_map.borrow();
- // This trait cast might be because of implicit coercion
- let adjs = ccx.tcx().adjustments.borrow();
- let adjust = adjs.find(&id);
- let method_call = if adjust.is_some() && ty::adjust_is_object(adjust.unwrap()) {
- MethodCall::autoobject(id)
- } else {
- MethodCall::expr(id)
- };
- let vres = vtable_map.get(&method_call).get_self().unwrap();
- resolve_param_vtables_under_param_substs(ccx.tcx(), bcx.fcx.param_substs, vres)
- };
- get_vtable(bcx, self_ty, origins)
-}
-
pub fn trans_trait_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
datum: Datum<Expr>,
id: ast::NodeId,
+ trait_ref: Rc<ty::TraitRef>,
dest: expr::Dest)
-> Block<'blk, 'tcx> {
/*!
*/
let mut bcx = bcx;
- let _icx = push_ctxt("meth::trans_cast");
+ let _icx = push_ctxt("meth::trans_trait_cast");
let lldest = match dest {
Ignore => {
- return datum.clean(bcx, "trait_cast", id);
+ return datum.clean(bcx, "trait_trait_cast", id);
}
SaveIn(dest) => dest
};
- let v_ty = datum.ty;
- let llbox_ty = type_of(bcx.ccx(), v_ty);
+ debug!("trans_trait_cast: trait_ref={}",
+ trait_ref.repr(bcx.tcx()));
+
+ let datum_ty = datum.ty;
+ let llbox_ty = type_of(bcx.ccx(), datum_ty);
// Store the pointer into the first half of pair.
let llboxdest = GEPi(bcx, lldest, [0u, abi::trt_field_box]);
bcx = datum.store_to(bcx, llboxdest);
// Store the vtable into the second half of pair.
- let vtable = vtable_ptr(bcx, id, v_ty);
+ let vtable = get_vtable(bcx, datum_ty, trait_ref);
let llvtabledest = GEPi(bcx, lldest, [0u, abi::trt_field_vtable]);
let llvtabledest = PointerCast(bcx, llvtabledest, val_ty(vtable).ptr_to());
Store(bcx, vtable, llvtabledest);
pub fn monomorphic_fn(ccx: &CrateContext,
fn_id: ast::DefId,
real_substs: &subst::Substs,
- vtables: typeck::vtable_res,
ref_id: Option<ast::NodeId>)
-> (ValueRef, bool) {
debug!("monomorphic_fn(\
fn_id={}, \
real_substs={}, \
- vtables={}, \
ref_id={:?})",
fn_id.repr(ccx.tcx()),
real_substs.repr(ccx.tcx()),
- vtables.repr(ccx.tcx()),
ref_id);
assert!(real_substs.types.all(|t| {
let psubsts = param_substs {
substs: (*real_substs).clone(),
- vtables: vtables,
};
debug!("monomorphic_fn(\
}
d
}
+ ast::TypeImplItem(_) => {
+ ccx.sess().bug("can't monomorphize an associated type")
+ }
}
}
ast_map::NodeTraitItem(method) => {
(lldecl, true)
}
-// Used to identify cached monomorphized functions and vtables
+// Used to identify cached monomorphized functions
#[deriving(PartialEq, Eq, Hash)]
pub struct MonoParamId {
pub subst: ty::t,
format!("couldn't find visit method for {}", ty_name).as_slice());
let method = match self.visitor_items[mth_idx] {
ty::MethodTraitItem(ref method) => (*method).clone(),
+ ty::TypeTraitItem(_) => return,
};
let mth_ty = ty::mk_bare_fn(tcx, method.fty.clone());
debug!("Emit call visit method: visit_{}: {}", ty_name, ty_to_string(tcx, mth_ty));
use std::c_str::ToCStr;
use std::mem;
-use std::string;
use std::cell::RefCell;
use std::collections::HashMap;
-use libc::{c_uint, c_void, free};
+use libc::c_uint;
#[deriving(Clone, PartialEq, Show)]
pub struct Type {
}
pub fn type_to_string(&self, ty: Type) -> String {
- unsafe {
- let s = llvm::LLVMTypeToString(ty.to_ref());
- let ret = string::raw::from_buf(s as *const u8);
- free(s as *mut c_void);
- ret
- }
+ llvm::build_string(|s| unsafe {
+ llvm::LLVMWriteTypeToString(ty.to_ref(), s);
+ }).expect("non-UTF8 type description from LLVM")
}
pub fn types_to_str(&self, tys: &[Type]) -> String {
}
pub fn val_to_string(&self, val: ValueRef) -> String {
- unsafe {
- let s = llvm::LLVMValueToString(val);
- let ret = string::raw::from_buf(s as *const u8);
- free(s as *mut c_void);
- ret
- }
+ llvm::build_string(|s| unsafe {
+ llvm::LLVMWriteValueToString(val, s);
+ }).expect("nun-UTF8 value description from LLVM")
}
}
use middle::const_eval;
use middle::def;
use middle::dependency_format;
-use middle::freevars::CaptureModeMap;
-use middle::freevars;
use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem};
use middle::lang_items::{FnOnceTraitLangItem, OpaqueStructLangItem};
use middle::lang_items::{TyDescStructLangItem, TyVisitorTraitLangItem};
use middle::stability;
use middle::subst::{Subst, Substs, VecPerParamSpace};
use middle::subst;
+use middle::traits;
use middle::ty;
use middle::typeck;
use middle::ty_fold;
#[deriving(Clone)]
pub enum ImplOrTraitItem {
MethodTraitItem(Rc<Method>),
+ TypeTraitItem(Rc<AssociatedType>),
}
impl ImplOrTraitItem {
fn id(&self) -> ImplOrTraitItemId {
match *self {
MethodTraitItem(ref method) => MethodTraitItemId(method.def_id),
+ TypeTraitItem(ref associated_type) => {
+ TypeTraitItemId(associated_type.def_id)
+ }
}
}
pub fn def_id(&self) -> ast::DefId {
match *self {
MethodTraitItem(ref method) => method.def_id,
+ TypeTraitItem(ref associated_type) => associated_type.def_id,
}
}
pub fn ident(&self) -> ast::Ident {
match *self {
MethodTraitItem(ref method) => method.ident,
+ TypeTraitItem(ref associated_type) => associated_type.ident,
}
}
pub fn container(&self) -> ImplOrTraitItemContainer {
match *self {
MethodTraitItem(ref method) => method.container,
+ TypeTraitItem(ref associated_type) => associated_type.container,
}
}
}
#[deriving(Clone)]
pub enum ImplOrTraitItemId {
MethodTraitItemId(ast::DefId),
+ TypeTraitItemId(ast::DefId),
}
impl ImplOrTraitItemId {
pub fn def_id(&self) -> ast::DefId {
match *self {
MethodTraitItemId(def_id) => def_id,
+ TypeTraitItemId(def_id) => def_id,
}
}
}
}
}
+#[deriving(Clone)]
+pub struct AssociatedType {
+ pub ident: ast::Ident,
+ pub vis: ast::Visibility,
+ pub def_id: ast::DefId,
+ pub container: ImplOrTraitItemContainer,
+}
+
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct mt {
pub ty: t,
#[deriving(Clone)]
pub enum AutoAdjustment {
- AutoAddEnv(ty::TraitStore),
- AutoDerefRef(AutoDerefRef)
+ AdjustAddEnv(ty::TraitStore),
+ AdjustDerefRef(AutoDerefRef)
}
#[deriving(Clone, PartialEq)]
// An unsize coercion applied to the tail field of a struct.
// The uint is the index of the type parameter which is unsized.
UnsizeStruct(Box<UnsizeKind>, uint),
- UnsizeVtable(ty::ExistentialBounds,
- ast::DefId, /* Trait ID */
- subst::Substs /* Trait substitutions */)
+ UnsizeVtable(TyTrait, /* the self type of the trait */ ty::t)
}
#[deriving(Clone)]
// returns the region of the borrowed reference.
pub fn adjusted_object_region(adj: &AutoAdjustment) -> Option<Region> {
match adj {
- &AutoDerefRef(AutoDerefRef{autoref: Some(ref autoref), ..}) => {
+ &AdjustDerefRef(AutoDerefRef{autoref: Some(ref autoref), ..}) => {
let (b, _, r) = autoref_object_region(autoref);
if b {
r
// Returns true if there is a trait cast at the bottom of the adjustment.
pub fn adjust_is_object(adj: &AutoAdjustment) -> bool {
match adj {
- &AutoDerefRef(AutoDerefRef{autoref: Some(ref autoref), ..}) => {
+ &AdjustDerefRef(AutoDerefRef{autoref: Some(ref autoref), ..}) => {
let (b, _, _) = autoref_object_region(autoref);
b
}
fn type_of_autoref(cx: &ctxt, autoref: &AutoRef) -> Option<t> {
match autoref {
&AutoUnsize(ref k) => match k {
- &UnsizeVtable(bounds, def_id, ref substs) => {
+ &UnsizeVtable(TyTrait { def_id, substs: ref substs, bounds }, _) => {
Some(mk_trait(cx, def_id, substs.clone(), bounds))
}
_ => None
},
&AutoUnsizeUniq(ref k) => match k {
- &UnsizeVtable(bounds, def_id, ref substs) => {
+ &UnsizeVtable(TyTrait { def_id, substs: ref substs, bounds }, _) => {
Some(mk_uniq(cx, mk_trait(cx, def_id, substs.clone(), bounds)))
}
_ => None
}
match adj {
- &AutoDerefRef(AutoDerefRef{autoref: Some(ref autoref), ..}) => {
+ &AdjustDerefRef(AutoDerefRef{autoref: Some(ref autoref), ..}) => {
type_of_autoref(cx, autoref)
}
_ => None
pub trait_refs: RefCell<NodeMap<Rc<TraitRef>>>,
pub trait_defs: RefCell<DefIdMap<Rc<TraitDef>>>,
+ /// Maps from node-id of a trait object cast (like `foo as
+ /// Box<Trait>`) to the trait reference.
+ pub object_cast_map: typeck::ObjectCastMap,
+
pub map: ast_map::Map<'tcx>,
pub intrinsic_defs: RefCell<DefIdMap<t>>,
- pub freevars: RefCell<freevars::freevar_map>,
+ pub freevars: RefCell<FreevarMap>,
pub tcache: type_cache,
pub rcache: creader_cache,
pub short_names_cache: RefCell<HashMap<t, String>>,
/// Maps a DefId of a type to a list of its inherent impls.
/// Contains implementations of methods that are inherent to a type.
/// Methods in these implementations don't need to be exported.
- pub inherent_impls: RefCell<DefIdMap<Rc<RefCell<Vec<ast::DefId>>>>>,
+ pub inherent_impls: RefCell<DefIdMap<Rc<Vec<ast::DefId>>>>,
/// Maps a DefId of an impl to a list of its items.
/// Note that this contains all of the impls that we know about,
/// about.
pub used_mut_nodes: RefCell<NodeSet>,
- /// vtable resolution information for impl declarations
- pub impl_vtables: typeck::impl_vtable_map,
-
/// The set of external nominal types whose implementations have been read.
/// This is used for lazy resolution of methods.
pub populated_external_types: RefCell<DefIdSet>,
pub extern_const_variants: RefCell<DefIdMap<ast::NodeId>>,
pub method_map: typeck::MethodMap,
- pub vtable_map: typeck::vtable_map,
pub dependency_formats: RefCell<dependency_format::Dependencies>,
/// Maps closures to their capture clauses.
pub capture_modes: RefCell<CaptureModeMap>,
+
+ /// Maps def IDs to true if and only if they're associated types.
+ pub associated_types: RefCell<DefIdMap<bool>>,
+
+ /// Maps def IDs of traits to information about their associated types.
+ pub trait_associated_types:
+ RefCell<DefIdMap<Rc<Vec<AssociatedTypeInfo>>>>,
}
pub enum tbox_flag {
/// as well as the existential type parameter in an object type.
#[deriving(PartialEq, Eq, Hash, Clone, Show)]
pub struct ParamBounds {
- pub opt_region_bound: Option<ty::Region>,
+ pub region_bounds: Vec<ty::Region>,
pub builtin_bounds: BuiltinBounds,
pub trait_bounds: Vec<Rc<TraitRef>>
}
/// Bounds suitable for an existentially quantified type parameter
/// such as those that appear in object types or closure types. The
/// major difference between this case and `ParamBounds` is that
-/// general purpose trait bounds are omitted.
+/// general purpose trait bounds are omitted and there must be
+/// *exactly one* region.
#[deriving(PartialEq, Eq, Hash, Clone, Show)]
pub struct ExistentialBounds {
pub region_bound: ty::Region,
pub enum InferTy {
TyVar(TyVid),
IntVar(IntVid),
- FloatVar(FloatVid)
+ FloatVar(FloatVid),
+ SkolemizedTy(uint),
+
+ // FIXME -- once integral fallback is impl'd, we should remove
+ // this type. It's only needed to prevent spurious errors for
+ // integers whose type winds up never being constrained.
+ SkolemizedIntTy(uint),
}
#[deriving(Clone, Encodable, Decodable, Eq, Hash, Show)]
TyVar(ref v) => v.fmt(f),
IntVar(ref v) => v.fmt(f),
FloatVar(ref v) => v.fmt(f),
+ SkolemizedTy(v) => write!(f, "SkolemizedTy({})", v),
+ SkolemizedIntTy(v) => write!(f, "SkolemizedIntTy({})", v),
}
}
}
pub def_id: ast::DefId,
pub space: subst::ParamSpace,
pub index: uint,
+ pub associated_with: Option<ast::DefId>,
pub bounds: ParamBounds,
pub default: Option<ty::t>,
}
}
}
+impl TraitRef {
+ pub fn self_ty(&self) -> ty::t {
+ self.substs.self_ty().unwrap()
+ }
+}
+
/// When type checking, we use the `ParameterEnvironment` to track
/// details about the type/lifetime parameters that are in scope.
/// It primarily stores the bounds information.
/// the "outer" view of a type or method to the "inner" view.
/// In general, this means converting from bound parameters to
/// free parameters. Since we currently represent bound/free type
- /// parameters in the same way, this only has an affect on regions.
+ /// parameters in the same way, this only has an effect on regions.
pub free_substs: Substs,
/// Bounds on the various type parameters
/// may specify stronger requirements). This field indicates the
/// region of the callee.
pub implicit_region_bound: ty::Region,
+
+ /// Obligations that the caller must satisfy. This is basically
+ /// the set of bounds on the in-scope type parameters, translated
+ /// into Obligations.
+ ///
+ /// Note: This effectively *duplicates* the `bounds` array for
+ /// now.
+ pub caller_obligations: VecPerParamSpace<traits::Obligation>,
}
impl ParameterEnvironment {
let method_generics = &method_ty.generics;
construct_parameter_environment(
cx,
+ method.span,
method_generics,
method.pe_body().id)
}
+ TypeTraitItem(_) => {
+ cx.sess
+ .bug("ParameterEnvironment::from_item(): \
+ can't create a parameter environment \
+ for type trait items")
+ }
}
}
+ ast::TypeImplItem(_) => {
+ cx.sess.bug("ParameterEnvironment::from_item(): \
+ can't create a parameter environment \
+ for type impl items")
+ }
}
}
Some(ast_map::NodeTraitItem(trait_method)) => {
let method_generics = &method_ty.generics;
construct_parameter_environment(
cx,
+ method.span,
method_generics,
method.pe_body().id)
}
+ TypeTraitItem(_) => {
+ cx.sess
+ .bug("ParameterEnvironment::from_item(): \
+ can't create a parameter environment \
+ for type trait items")
+ }
}
}
+ ast::TypeTraitItem(_) => {
+ cx.sess.bug("ParameterEnvironment::from_item(): \
+ can't create a parameter environment \
+ for type trait items")
+ }
}
}
Some(ast_map::NodeItem(item)) => {
let fn_pty = ty::lookup_item_type(cx, fn_def_id);
construct_parameter_environment(cx,
+ item.span,
&fn_pty.generics,
body.id)
}
ast::ItemStatic(..) => {
let def_id = ast_util::local_def(id);
let pty = ty::lookup_item_type(cx, def_id);
- construct_parameter_environment(cx, &pty.generics, id)
+ construct_parameter_environment(cx, item.span,
+ &pty.generics, id)
}
_ => {
cx.sess.span_bug(item.span,
/// As `Polytype` but for a trait ref.
pub struct TraitDef {
+ /// Generic type definitions. Note that `Self` is listed in here
+ /// as having a single bound, the trait itself (e.g., in the trait
+ /// `Eq`, there is a single bound `Self : Eq`). This is so that
+ /// default methods get to assume that the `Self` parameters
+ /// implements the trait.
pub generics: Generics,
+
+ /// The "supertrait" bounds.
pub bounds: ParamBounds,
pub trait_ref: Rc<ty::TraitRef>,
}
pub type node_type_table = RefCell<HashMap<uint,t>>;
/// Records information about each unboxed closure.
+#[deriving(Clone)]
pub struct UnboxedClosure {
/// The type of the unboxed closure.
pub closure_type: ClosureTy,
pub kind: UnboxedClosureKind,
}
-#[deriving(PartialEq, Eq)]
+#[deriving(Clone, PartialEq, Eq)]
pub enum UnboxedClosureKind {
FnUnboxedClosureKind,
FnMutUnboxedClosureKind,
dm: resolve::DefMap,
named_region_map: resolve_lifetime::NamedRegionMap,
map: ast_map::Map<'tcx>,
- freevars: freevars::freevar_map,
- capture_modes: freevars::CaptureModeMap,
+ freevars: RefCell<FreevarMap>,
+ capture_modes: RefCell<CaptureModeMap>,
region_maps: middle::region::RegionMaps,
lang_items: middle::lang_items::LanguageItems,
stability: stability::Index) -> ctxt<'tcx> {
item_substs: RefCell::new(NodeMap::new()),
trait_refs: RefCell::new(NodeMap::new()),
trait_defs: RefCell::new(DefIdMap::new()),
+ object_cast_map: RefCell::new(NodeMap::new()),
map: map,
intrinsic_defs: RefCell::new(DefIdMap::new()),
- freevars: RefCell::new(freevars),
+ freevars: freevars,
tcache: RefCell::new(DefIdMap::new()),
rcache: RefCell::new(HashMap::new()),
short_names_cache: RefCell::new(HashMap::new()),
impl_items: RefCell::new(DefIdMap::new()),
used_unsafe: RefCell::new(NodeSet::new()),
used_mut_nodes: RefCell::new(NodeSet::new()),
- impl_vtables: RefCell::new(DefIdMap::new()),
populated_external_types: RefCell::new(DefIdSet::new()),
populated_external_traits: RefCell::new(DefIdSet::new()),
upvar_borrow_map: RefCell::new(HashMap::new()),
extern_const_statics: RefCell::new(DefIdMap::new()),
extern_const_variants: RefCell::new(DefIdMap::new()),
method_map: RefCell::new(FnvHashMap::new()),
- vtable_map: RefCell::new(FnvHashMap::new()),
dependency_formats: RefCell::new(HashMap::new()),
unboxed_closures: RefCell::new(DefIdMap::new()),
node_lint_levels: RefCell::new(HashMap::new()),
transmute_restrictions: RefCell::new(Vec::new()),
stability: RefCell::new(stability),
- capture_modes: RefCell::new(capture_modes),
+ capture_modes: capture_modes,
+ associated_types: RefCell::new(DefIdMap::new()),
+ trait_associated_types: RefCell::new(DefIdMap::new()),
}
}
&ty_enum(_, ref substs) | &ty_struct(_, ref substs) => {
flags |= sflags(substs);
}
- &ty_trait(box ty::TyTrait { ref substs, ref bounds, .. }) => {
+ &ty_trait(box TyTrait { ref substs, ref bounds, .. }) => {
flags |= sflags(substs);
flags |= flags_for_bounds(bounds);
}
pub fn to_ty(self, tcx: &ty::ctxt) -> ty::t {
ty::mk_param(tcx, self.space, self.idx, self.def_id)
}
+
+ pub fn is_self(&self) -> bool {
+ self.space == subst::SelfSpace && self.idx == 0
+ }
}
impl ItemSubsts {
}
// Scalar and unique types are sendable, and durable
+ ty_infer(ty::SkolemizedIntTy(_)) |
ty_nil | ty_bot | ty_bool | ty_int(_) | ty_uint(_) | ty_float(_) |
ty_bare_fn(_) | ty::ty_char => {
TC::None
}
}
- ty_trait(box ty::TyTrait { bounds, .. }) => {
+ ty_trait(box TyTrait { bounds, .. }) => {
object_contents(cx, bounds) | TC::ReachesFfiUnsafe | TC::Nonsized
}
}
pub fn type_is_trait(ty: t) -> bool {
+ type_trait_info(ty).is_some()
+}
+
+pub fn type_trait_info(ty: t) -> Option<&'static TyTrait> {
match get(ty).sty {
ty_uniq(ty) | ty_rptr(_, mt { ty, ..}) | ty_ptr(mt { ty, ..}) => match get(ty).sty {
- ty_trait(..) => true,
- _ => false
+ ty_trait(ref t) => Some(&**t),
+ _ => None
},
- ty_trait(..) => true,
- _ => false
+ ty_trait(ref t) => Some(&**t),
+ _ => None
}
}
}
}
+pub fn type_is_skolemized(ty: t) -> bool {
+ match get(ty).sty {
+ ty_infer(SkolemizedTy(_)) => true,
+ ty_infer(SkolemizedIntTy(_)) => true,
+ _ => false
+ }
+}
+
pub fn type_is_uint(ty: t) -> bool {
match get(ty).sty {
ty_infer(IntVar(_)) | ty_uint(ast::TyU) => true,
return match adjustment {
Some(adjustment) => {
match *adjustment {
- AutoAddEnv(store) => {
+ AdjustAddEnv(store) => {
match ty::get(unadjusted_ty).sty {
ty::ty_bare_fn(ref b) => {
let bounds = ty::ExistentialBounds {
}
}
- AutoDerefRef(ref adj) => {
+ AdjustDerefRef(ref adj) => {
let mut adjusted_ty = unadjusted_ty;
if !ty::type_is_error(adjusted_ty) {
format!("UnsizeStruct with bad sty: {}",
ty_to_string(cx, ty)).as_slice())
},
- &UnsizeVtable(bounds, def_id, ref substs) => {
+ &UnsizeVtable(TyTrait { def_id, substs: ref substs, bounds }, _) => {
mk_trait(cx, def_id, substs.clone(), bounds)
}
}
}
pub fn method_call_type_param_defs<'tcx, T>(typer: &T,
- origin: typeck::MethodOrigin)
+ origin: &typeck::MethodOrigin)
-> VecPerParamSpace<TypeParameterDef>
where T: mc::Typer<'tcx> {
- match origin {
+ match *origin {
typeck::MethodStatic(did) => {
ty::lookup_item_type(typer.tcx(), did).generics.types.clone()
}
.trait_did(typer.tcx());
lookup_trait_def(typer.tcx(), def_id).generics.types.clone()
}
- typeck::MethodParam(typeck::MethodParam{
- trait_id: trt_id,
+ typeck::MethodTypeParam(typeck::MethodParam{
+ trait_ref: ref trait_ref,
method_num: n_mth,
..
}) |
- typeck::MethodObject(typeck::MethodObject{
- trait_id: trt_id,
+ typeck::MethodTraitObject(typeck::MethodObject{
+ trait_ref: ref trait_ref,
method_num: n_mth,
..
}) => {
- match ty::trait_item(typer.tcx(), trt_id, n_mth) {
+ match ty::trait_item(typer.tcx(), trait_ref.def_id, n_mth) {
ty::MethodTraitItem(method) => method.generics.types.clone(),
+ ty::TypeTraitItem(_) => {
+ typer.tcx().sess.bug("method_call_type_param_defs() \
+ called on associated type")
+ }
}
}
}
// the index method invoked for `a[i]` always yields an `&T`
ast::ExprIndex(..) => LvalueExpr,
+ // the slice method invoked for `a[..]` always yields an `&T`
+ ast::ExprSlice(..) => LvalueExpr,
+
// `for` loops are statements
ast::ExprForLoop(..) => RvalueStmtExpr,
// DefArg's, particularly those of immediate type, ought to
// considered rvalues.
def::DefStatic(..) |
- def::DefBinding(..) |
def::DefUpvar(..) |
- def::DefArg(..) |
def::DefLocal(..) => LvalueExpr,
def => {
ast::ExprUnary(ast::UnDeref, _) |
ast::ExprField(..) |
ast::ExprTupField(..) |
- ast::ExprIndex(..) => {
+ ast::ExprIndex(..) |
+ ast::ExprSlice(..) => {
LvalueExpr
}
ty_enum(id, _) => format!("enum {}", item_path_str(cx, id)),
ty_box(_) => "Gc-ptr".to_string(),
ty_uniq(_) => "box".to_string(),
- ty_vec(_, _) => "vector".to_string(),
+ ty_vec(_, Some(_)) => "array".to_string(),
+ ty_vec(_, None) => "unsized array".to_string(),
ty_ptr(_) => "*-ptr".to_string(),
ty_rptr(_, _) => "&-ptr".to_string(),
ty_bare_fn(_) => "extern fn".to_string(),
ty_infer(TyVar(_)) => "inferred type".to_string(),
ty_infer(IntVar(_)) => "integral variable".to_string(),
ty_infer(FloatVar(_)) => "floating-point variable".to_string(),
+ ty_infer(SkolemizedTy(_)) => "skolemized type".to_string(),
+ ty_infer(SkolemizedIntTy(_)) => "skolemized integral type".to_string(),
ty_param(ref p) => {
if p.space == subst::SelfSpace {
"Self".to_string()
Some(ast_map::NodeItem(item)) => {
match item.node {
ItemTrait(_, _, _, ref ms) => {
- ms.iter().filter_map(|m| match *m {
- ast::RequiredMethod(_) => None,
- ast::ProvidedMethod(ref m) => {
- match impl_or_trait_item(cx,
- ast_util::local_def(m.id)) {
- MethodTraitItem(m) => Some(m),
+ let (_, p) =
+ ast_util::split_trait_methods(ms.as_slice());
+ p.iter()
+ .map(|m| {
+ match impl_or_trait_item(
+ cx,
+ ast_util::local_def(m.id)) {
+ MethodTraitItem(m) => m,
+ TypeTraitItem(_) => {
+ cx.sess.bug("provided_trait_methods(): \
+ split_trait_methods() put \
+ associated types in the \
+ provided method bucket?!")
}
}
}).collect()
})
}
+/// Returns true if the given ID refers to an associated type and false if it
+/// refers to anything else.
+pub fn is_associated_type(cx: &ctxt, id: ast::DefId) -> bool {
+ let result = match cx.associated_types.borrow_mut().find(&id) {
+ Some(result) => return *result,
+ None if id.krate == ast::LOCAL_CRATE => {
+ match cx.impl_or_trait_items.borrow().find(&id) {
+ Some(ref item) => {
+ match **item {
+ TypeTraitItem(_) => true,
+ MethodTraitItem(_) => false,
+ }
+ }
+ None => false,
+ }
+ }
+ None => {
+ csearch::is_associated_type(&cx.sess.cstore, id)
+ }
+ };
+
+ cx.associated_types.borrow_mut().insert(id, result);
+ result
+}
+
+/// Returns the parameter index that the given associated type corresponds to.
+pub fn associated_type_parameter_index(cx: &ctxt,
+ trait_def: &TraitDef,
+ associated_type_id: ast::DefId)
+ -> uint {
+ for type_parameter_def in trait_def.generics.types.iter() {
+ if type_parameter_def.def_id == associated_type_id {
+ return type_parameter_def.index
+ }
+ }
+ cx.sess.bug("couldn't find associated type parameter index")
+}
+
+#[deriving(PartialEq, Eq)]
+pub struct AssociatedTypeInfo {
+ pub def_id: ast::DefId,
+ pub index: uint,
+ pub ident: ast::Ident,
+}
+
+impl PartialOrd for AssociatedTypeInfo {
+ fn partial_cmp(&self, other: &AssociatedTypeInfo) -> Option<Ordering> {
+ Some(self.index.cmp(&other.index))
+ }
+}
+
+impl Ord for AssociatedTypeInfo {
+ fn cmp(&self, other: &AssociatedTypeInfo) -> Ordering {
+ self.index.cmp(&other.index)
+ }
+}
+
+/// Returns the associated types belonging to the given trait, in parameter
+/// order.
+pub fn associated_types_for_trait(cx: &ctxt, trait_id: ast::DefId)
+ -> Rc<Vec<AssociatedTypeInfo>> {
+ cx.trait_associated_types
+ .borrow()
+ .find(&trait_id)
+ .expect("associated_types_for_trait(): trait not found, try calling \
+ ensure_associated_types()")
+ .clone()
+}
+
pub fn trait_item_def_ids(cx: &ctxt, id: ast::DefId)
-> Rc<Vec<ImplOrTraitItemId>> {
lookup_locally_or_in_crate_store("trait_item_def_ids",
|| csearch::get_type(cx, did))
}
-pub fn lookup_impl_vtables(cx: &ctxt,
- did: ast::DefId)
- -> typeck::vtable_res {
- lookup_locally_or_in_crate_store(
- "impl_vtables", did, &mut *cx.impl_vtables.borrow_mut(),
- || csearch::get_impl_vtables(cx, did) )
-}
-
/// Given the did of a trait, returns its canonical trait ref.
pub fn lookup_trait_def(cx: &ctxt, did: ast::DefId) -> Rc<ty::TraitDef> {
let mut trait_defs = cx.trait_defs.borrow_mut();
let mut acc = Vec::new();
ty::each_attr(tcx, did, |meta| {
- acc.extend(attr::find_repr_attrs(tcx.sess.diagnostic(), meta).move_iter());
+ acc.extend(attr::find_repr_attrs(tcx.sess.diagnostic(), meta).into_iter());
true
});
-> Vec<UnboxedClosureUpvar> {
if closure_id.krate == ast::LOCAL_CRATE {
match tcx.freevars.borrow().find(&closure_id.node) {
- None => tcx.sess.bug("no freevars for unboxed closure?!"),
+ None => vec![],
Some(ref freevars) => {
freevars.iter().map(|freevar| {
let freevar_def_id = freevar.def.def_id();
struct TypeNormalizer<'a, 'tcx: 'a>(&'a ctxt<'tcx>);
impl<'a, 'tcx> TypeFolder<'tcx> for TypeNormalizer<'a, 'tcx> {
- fn tcx<'a>(&'a self) -> &'a ctxt<'tcx> { let TypeNormalizer(c) = *self; c }
+ fn tcx(&self) -> &ctxt<'tcx> { let TypeNormalizer(c) = *self; c }
fn fold_ty(&mut self, t: ty::t) -> ty::t {
match self.tcx().normalized_cache.borrow().find_copy(&t) {
pub fn each_bound_trait_and_supertraits(tcx: &ctxt,
bounds: &[Rc<TraitRef>],
f: |Rc<TraitRef>| -> bool)
- -> bool {
- for bound_trait_ref in bounds.iter() {
- let mut supertrait_set = HashMap::new();
- let mut trait_refs = Vec::new();
- let mut i = 0;
-
- // Seed the worklist with the trait from the bound
- supertrait_set.insert(bound_trait_ref.def_id, ());
- trait_refs.push(bound_trait_ref.clone());
-
- // Add the given trait ty to the hash map
- while i < trait_refs.len() {
- debug!("each_bound_trait_and_supertraits(i={:?}, trait_ref={})",
- i, trait_refs.get(i).repr(tcx));
-
- if !f(trait_refs.get(i).clone()) {
- return false;
- }
-
- // Add supertraits to supertrait_set
- let trait_ref = trait_refs.get(i).clone();
- let trait_def = lookup_trait_def(tcx, trait_ref.def_id);
- for supertrait_ref in trait_def.bounds.trait_bounds.iter() {
- let supertrait_ref = supertrait_ref.subst(tcx, &trait_ref.substs);
- debug!("each_bound_trait_and_supertraits(supertrait_ref={})",
- supertrait_ref.repr(tcx));
-
- let d_id = supertrait_ref.def_id;
- if !supertrait_set.contains_key(&d_id) {
- // FIXME(#5527) Could have same trait multiple times
- supertrait_set.insert(d_id, ());
- trait_refs.push(supertrait_ref.clone());
- }
- }
-
- i += 1;
+ -> bool
+{
+ for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
+ if !f(bound_trait_ref) {
+ return false;
}
}
return true;
trait_bounds,
|trait_ref| {
let bounds = ty::bounds_for_trait_ref(tcx, &*trait_ref);
- push_region_bounds(bounds.opt_region_bound.as_slice(),
+ push_region_bounds(bounds.region_bounds.as_slice(),
bounds.builtin_bounds,
&mut all_bounds);
debug!("from {}: bounds={} all_bounds={}",
return
}
+ let mut inherent_impls = Vec::new();
csearch::each_implementation_for_type(&tcx.sess.cstore, type_id,
|impl_def_id| {
let impl_items = csearch::get_impl_items(&tcx.sess.cstore,
.insert(method_def_id, source);
}
}
+ TypeTraitItem(_) => {}
}
}
// If this is an inherent implementation, record it.
if associated_traits.is_none() {
- match tcx.inherent_impls.borrow().find(&type_id) {
- Some(implementation_list) => {
- implementation_list.borrow_mut().push(impl_def_id);
- return;
- }
- None => {}
- }
- tcx.inherent_impls.borrow_mut().insert(type_id,
- Rc::new(RefCell::new(vec!(impl_def_id))));
+ inherent_impls.push(impl_def_id);
}
});
+ tcx.inherent_impls.borrow_mut().insert(type_id, Rc::new(inherent_impls));
tcx.populated_external_types.borrow_mut().insert(type_id);
}
.insert(method_def_id, source);
}
}
+ TypeTraitItem(_) => {}
}
}
Some(m) => m.clone(),
None => return None,
};
- let name = match impl_item {
- MethodTraitItem(method) => method.ident.name,
- };
+ let name = impl_item.ident().name;
match trait_of_item(tcx, def_id) {
Some(trait_did) => {
let trait_items = ty::trait_items(tcx, trait_did);
}
}
}
- ty_trait(box ty::TyTrait { def_id: d, bounds, .. }) => {
+ ty_trait(box TyTrait { def_id: d, bounds, .. }) => {
byte!(17);
did(&mut state, d);
hash!(bounds);
}
}
+pub fn empty_parameter_environment() -> ParameterEnvironment {
+ /*!
+ * Construct a parameter environment suitable for static contexts
+ * or other contexts where there are no free type/lifetime
+ * parameters in scope.
+ */
+
+ ty::ParameterEnvironment { free_substs: Substs::empty(),
+ bounds: VecPerParamSpace::empty(),
+ caller_obligations: VecPerParamSpace::empty(),
+ implicit_region_bound: ty::ReEmpty }
+}
+
pub fn construct_parameter_environment(
tcx: &ctxt,
+ span: Span,
generics: &ty::Generics,
free_id: ast::NodeId)
-> ParameterEnvironment
free_substs.repr(tcx),
bounds.repr(tcx));
+ let obligations = traits::obligations_for_generics(tcx, traits::ObligationCause::misc(span),
+ generics, &free_substs);
+
return ty::ParameterEnvironment {
free_substs: free_substs,
bounds: bounds,
implicit_region_bound: ty::ReScope(free_id),
+ caller_obligations: obligations,
};
fn push_region_params(regions: &mut VecPerParamSpace<ty::Region>,
space: subst::ParamSpace,
defs: &[TypeParameterDef]) {
for (i, def) in defs.iter().enumerate() {
+ debug!("construct_parameter_environment(): push_types_from_defs: \
+ space={} def={} index={}",
+ space,
+ def.repr(tcx),
+ i);
let ty = ty::mk_param(tcx, space, i, def.def_id);
types.push(space, ty);
}
}
}
+ pub fn to_mutbl_lossy(self) -> ast::Mutability {
+ /*!
+ * Returns a mutability `m` such that an `&m T` pointer could
+ * be used to obtain this borrow kind. Because borrow kinds
+ * are richer than mutabilities, we sometimes have to pick a
+ * mutability that is stronger than necessary so that it at
+ * least *would permit* the borrow in question.
+ */
+
+ match self {
+ MutBorrow => ast::MutMutable,
+ ImmBorrow => ast::MutImmutable,
+
+ // We have no type correponding to a unique imm borrow, so
+ // use `&mut`. It gives all the capabilities of an `&uniq`
+ // and hence is a safe "over approximation".
+ UniqueImmBorrow => ast::MutMutable,
+ }
+ }
+
pub fn to_user_str(&self) -> &'static str {
match *self {
MutBorrow => "mutable",
}
fn capture_mode(&self, closure_expr_id: ast::NodeId)
- -> freevars::CaptureMode {
+ -> ast::CaptureClause {
self.capture_modes.borrow().get_copy(&closure_expr_id)
}
}
})
}
+
+/// A free variable referred to in a function.
+#[deriving(Encodable, Decodable)]
+pub struct Freevar {
+ /// The variable being accessed free.
+ pub def: def::Def,
+
+ // First span where it is accessed (there can be multiple).
+ pub span: Span
+}
+
+pub type FreevarMap = NodeMap<Vec<Freevar>>;
+
+pub type CaptureModeMap = NodeMap<ast::CaptureClause>;
+
+pub fn with_freevars<T>(tcx: &ty::ctxt, fid: ast::NodeId, f: |&[Freevar]| -> T) -> T {
+ match tcx.freevars.borrow().find(&fid) {
+ None => f(&[]),
+ Some(d) => f(d.as_slice())
+ }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Generalized type folding mechanism.
+/*!
+ * Generalized type folding mechanism. The setup is a bit convoluted
+ * but allows for convenient usage. Let T be an instance of some
+ * "foldable type" (one which implements `TypeFoldable`) and F be an
+ * instance of a "folder" (a type which implements `TypeFolder`). Then
+ * the setup is intended to be:
+ *
+ * T.fold_with(F) --calls--> F.fold_T(T) --calls--> super_fold_T(F, T)
+ *
+ * This way, when you define a new folder F, you can override
+ * `fold_T()` to customize the behavior, and invoke `super_fold_T()`
+ * to get the original behavior. Meanwhile, to actually fold
+ * something, you can just write `T.fold_with(F)`, which is
+ * convenient. (Note that `fold_with` will also transparently handle
+ * things like a `Vec<T>` where T is foldable and so on.)
+ *
+ * In this ideal setup, the only function that actually *does*
+ * anything is `super_fold_T`, which traverses the type `T`. Moreover,
+ * `super_fold_T` should only ever call `T.fold_with()`.
+ *
+ * In some cases, we follow a degenerate pattern where we do not have
+ * a `fold_T` nor `super_fold_T` method. Instead, `T.fold_with`
+ * traverses the structure directly. This is suboptimal because the
+ * behavior cannot be overriden, but it's much less work to implement.
+ * If you ever *do* need an override that doesn't exist, it's not hard
+ * to convert the degenerate pattern into the proper thing.
+ */
use middle::subst;
use middle::subst::VecPerParamSpace;
use middle::ty;
+use middle::traits;
use middle::typeck;
use std::rc::Rc;
use syntax::ast;
fn fold_item_substs(&mut self, i: ty::ItemSubsts) -> ty::ItemSubsts {
super_fold_item_substs(self, i)
}
+
+ fn fold_obligation(&mut self, o: &traits::Obligation) -> traits::Obligation {
+ super_fold_obligation(self, o)
+ }
}
///////////////////////////////////////////////////////////////////////////
// can easily refactor the folding into the TypeFolder trait as
// needed.
+impl TypeFoldable for () {
+ fn fold_with<'tcx, F:TypeFolder<'tcx>>(&self, _: &mut F) -> () {
+ ()
+ }
+}
+
impl<T:TypeFoldable> TypeFoldable for Option<T> {
fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Option<T> {
self.as_ref().map(|t| t.fold_with(folder))
impl TypeFoldable for ty::ParamBounds {
fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ParamBounds {
ty::ParamBounds {
- opt_region_bound: self.opt_region_bound.fold_with(folder),
+ region_bounds: self.region_bounds.fold_with(folder),
builtin_bounds: self.builtin_bounds.fold_with(folder),
trait_bounds: self.trait_bounds.fold_with(folder),
}
def_id: self.def_id,
space: self.space,
index: self.index,
+ associated_with: self.associated_with,
bounds: self.bounds.fold_with(folder),
default: self.default.fold_with(folder),
}
match *self {
ty::UnsizeLength(len) => ty::UnsizeLength(len),
ty::UnsizeStruct(box ref k, n) => ty::UnsizeStruct(box k.fold_with(folder), n),
- ty::UnsizeVtable(bounds, def_id, ref substs) => {
- ty::UnsizeVtable(bounds.fold_with(folder), def_id, substs.fold_with(folder))
+ ty::UnsizeVtable(ty::TyTrait{bounds, def_id, substs: ref substs}, self_ty) => {
+ ty::UnsizeVtable(
+ ty::TyTrait {
+ bounds: bounds.fold_with(folder),
+ def_id: def_id,
+ substs: substs.fold_with(folder)
+ },
+ self_ty.fold_with(folder))
}
}
}
}
+impl TypeFoldable for traits::Obligation {
+ fn fold_with<'tcx, F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::Obligation {
+ folder.fold_obligation(self)
+ }
+}
+
+impl<N:TypeFoldable> TypeFoldable for traits::VtableImplData<N> {
+ fn fold_with<'tcx, F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::VtableImplData<N> {
+ traits::VtableImplData {
+ impl_def_id: self.impl_def_id,
+ substs: self.substs.fold_with(folder),
+ nested: self.nested.fold_with(folder),
+ }
+ }
+}
+
+impl<N:TypeFoldable> TypeFoldable for traits::Vtable<N> {
+ fn fold_with<'tcx, F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::Vtable<N> {
+ match *self {
+ traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)),
+ traits::VtableUnboxedClosure(d) => traits::VtableUnboxedClosure(d),
+ traits::VtableParam(ref p) => traits::VtableParam(p.fold_with(folder)),
+ traits::VtableBuiltin => traits::VtableBuiltin,
+ }
+ }
+}
+
+impl TypeFoldable for traits::VtableParamData {
+ fn fold_with<'tcx, F:TypeFolder<'tcx>>(&self, folder: &mut F) -> traits::VtableParamData {
+ traits::VtableParamData {
+ bound: self.bound.fold_with(folder),
+ }
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// "super" routines: these are the default implementations for TypeFolder.
//
}
}
+pub fn super_fold_obligation<'tcx, T:TypeFolder<'tcx>>(this: &mut T,
+ obligation: &traits::Obligation)
+ -> traits::Obligation
+{
+ traits::Obligation {
+ cause: obligation.cause,
+ recursion_depth: obligation.recursion_depth,
+ trait_ref: obligation.trait_ref.fold_with(this),
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// Some sample folders
}
}
}
+
+///////////////////////////////////////////////////////////////////////////
+// Region eraser
+//
+// Replaces all free regions with 'static. Useful in trans.
+
+pub struct RegionEraser<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
+}
+
+pub fn erase_regions<T:TypeFoldable>(tcx: &ty::ctxt, t: T) -> T {
+ let mut eraser = RegionEraser { tcx: tcx };
+ t.fold_with(&mut eraser)
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionEraser<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
+
+ fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+ match r {
+ ty::ReLateBound(..) | ty::ReEarlyBound(..) => r,
+ _ => ty::ReStatic
+ }
+ }
+}
use syntax::abi;
use syntax::{ast, ast_util};
use syntax::codemap::Span;
+use syntax::parse::token;
pub trait AstConv<'tcx> {
fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype;
fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef>;
- // what type should we use when a type is omitted?
+ /// What type should we use when a type is omitted?
fn ty_infer(&self, span: Span) -> ty::t;
+
+ /// Returns true if associated types from the given trait and type are
+ /// allowed to be used here and false otherwise.
+ fn associated_types_of_trait_are_valid(&self,
+ ty: ty::t,
+ trait_id: ast::DefId)
+ -> bool;
+
+ /// Returns the binding of the given associated type for some type.
+ fn associated_type_binding(&self,
+ span: Span,
+ ty: Option<ty::t>,
+ trait_id: ast::DefId,
+ associated_type_id: ast::DefId)
+ -> ty::t;
}
pub fn ast_region_to_region(tcx: &ty::ctxt, lifetime: &ast::Lifetime)
r
}
-fn ast_path_substs<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
- this: &AC,
- rscope: &RS,
- decl_generics: &ty::Generics,
- self_ty: Option<ty::t>,
- path: &ast::Path) -> Substs
-{
+fn ast_path_substs<'tcx,AC,RS>(
+ this: &AC,
+ rscope: &RS,
+ decl_def_id: ast::DefId,
+ decl_generics: &ty::Generics,
+ self_ty: Option<ty::t>,
+ associated_ty: Option<ty::t>,
+ path: &ast::Path)
+ -> Substs
+ where AC: AstConv<'tcx>, RS: RegionScope {
/*!
* Given a path `path` that refers to an item `I` with the
* declared generics `decl_generics`, returns an appropriate
}
match anon_regions {
- Ok(v) => v.move_iter().collect(),
+ Ok(v) => v.into_iter().collect(),
Err(()) => Vec::from_fn(expected_num_region_params,
|_| ty::ReStatic) // hokey
}
// Convert the type parameters supplied by the user.
let ty_param_defs = decl_generics.types.get_slice(TypeSpace);
let supplied_ty_param_count = path.segments.iter().flat_map(|s| s.types.iter()).count();
- let formal_ty_param_count = ty_param_defs.len();
- let required_ty_param_count = ty_param_defs.iter()
- .take_while(|x| x.default.is_none())
- .count();
+ let formal_ty_param_count =
+ ty_param_defs.iter()
+ .take_while(|x| !ty::is_associated_type(tcx, x.def_id))
+ .count();
+ let required_ty_param_count =
+ ty_param_defs.iter()
+ .take_while(|x| {
+ x.default.is_none() &&
+ !ty::is_associated_type(tcx, x.def_id)
+ })
+ .count();
if supplied_ty_param_count < required_ty_param_count {
let expected = if required_ty_param_count < formal_ty_param_count {
"expected at least"
}
if supplied_ty_param_count > required_ty_param_count
- && !this.tcx().sess.features.default_type_params.get() {
+ && !this.tcx().sess.features.borrow().default_type_params {
span_err!(this.tcx().sess, path.span, E0108,
"default type parameters are experimental and possibly buggy");
span_note!(this.tcx().sess, path.span,
"add #![feature(default_type_params)] to the crate attributes to enable");
}
- let tps = path.segments.iter().flat_map(|s| s.types.iter())
- .map(|a_t| ast_ty_to_ty(this, rscope, &**a_t))
- .collect();
+ let tps = path.segments
+ .iter()
+ .flat_map(|s| s.types.iter())
+ .map(|a_t| ast_ty_to_ty(this, rscope, &**a_t))
+ .collect();
let mut substs = Substs::new_type(tps, regions);
}
for param in ty_param_defs.slice_from(supplied_ty_param_count).iter() {
- let default = param.default.unwrap();
- let default = default.subst_spanned(tcx, &substs, Some(path.span));
- substs.types.push(TypeSpace, default);
+ match param.default {
+ Some(default) => {
+ // This is a default type parameter.
+ let default = default.subst_spanned(tcx,
+ &substs,
+ Some(path.span));
+ substs.types.push(TypeSpace, default);
+ }
+ None => {
+ // This is an associated type.
+ substs.types.push(
+ TypeSpace,
+ this.associated_type_binding(path.span,
+ associated_ty,
+ decl_def_id,
+ param.def_id))
+ }
+ }
}
substs
}
-pub fn ast_path_to_trait_ref<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
- this: &AC,
- rscope: &RS,
- trait_def_id: ast::DefId,
- self_ty: Option<ty::t>,
- path: &ast::Path) -> Rc<ty::TraitRef> {
+pub fn ast_path_to_trait_ref<'tcx,AC,RS>(this: &AC,
+ rscope: &RS,
+ trait_def_id: ast::DefId,
+ self_ty: Option<ty::t>,
+ associated_type: Option<ty::t>,
+ path: &ast::Path)
+ -> Rc<ty::TraitRef>
+ where AC: AstConv<'tcx>,
+ RS: RegionScope {
let trait_def = this.get_trait_def(trait_def_id);
Rc::new(ty::TraitRef {
def_id: trait_def_id,
- substs: ast_path_substs(this, rscope, &trait_def.generics, self_ty, path)
+ substs: ast_path_substs(this,
+ rscope,
+ trait_def_id,
+ &trait_def.generics,
+ self_ty,
+ associated_type,
+ path)
})
}
rscope: &RS,
did: ast::DefId,
path: &ast::Path)
- -> TypeAndSubsts
-{
+ -> TypeAndSubsts {
let tcx = this.tcx();
let ty::Polytype {
generics: generics,
ty: decl_ty
} = this.get_item_ty(did);
- let substs = ast_path_substs(this, rscope, &generics, None, path);
+ let substs = ast_path_substs(this,
+ rscope,
+ did,
+ &generics,
+ None,
+ None,
+ path);
let ty = decl_ty.subst(tcx, &substs);
TypeAndSubsts { substs: substs, ty: ty }
}
Substs::new(VecPerParamSpace::params_from_type(type_params),
VecPerParamSpace::params_from_type(region_params))
} else {
- ast_path_substs(this, rscope, &generics, None, path)
+ ast_path_substs(this, rscope, did, &generics, None, None, path)
};
let ty = decl_ty.subst(tcx, &substs);
// FIXME(#12938): This is a hack until we have full support for
// DST.
match a_def {
- def::DefTy(did) | def::DefStruct(did)
+ def::DefTy(did, _) | def::DefStruct(did)
if Some(did) == this.tcx().lang_items.owned_box() => {
if path.segments
.iter()
"not enough type parameters supplied to `Box<T>`");
Some(ty::mk_err())
}
- def::DefTy(did) | def::DefStruct(did)
+ def::DefTy(did, _) | def::DefStruct(did)
if Some(did) == this.tcx().lang_items.gc() => {
if path.segments
.iter()
RS:RegionScope>(
this: &AC,
rscope: &RS,
- unboxed_function: &ast::UnboxedFnTy,
+ kind: ast::UnboxedClosureKind,
+ decl: &ast::FnDecl,
self_ty: Option<ty::t>)
-> ty::TraitRef {
- let lang_item = match unboxed_function.kind {
+ let lang_item = match kind {
ast::FnUnboxedClosureKind => FnTraitLangItem,
ast::FnMutUnboxedClosureKind => FnMutTraitLangItem,
ast::FnOnceUnboxedClosureKind => FnOnceTraitLangItem,
};
let trait_did = this.tcx().lang_items.require(lang_item).unwrap();
- let input_types =
- unboxed_function.decl
- .inputs
- .iter()
- .map(|input| {
+ let input_types = decl.inputs
+ .iter()
+ .map(|input| {
ast_ty_to_ty(this, rscope, &*input.ty)
- }).collect::<Vec<_>>();
+ }).collect::<Vec<_>>();
let input_tuple = if input_types.len() == 0 {
ty::mk_nil()
} else {
ty::mk_tup(this.tcx(), input_types)
};
- let output_type = ast_ty_to_ty(this,
- rscope,
- &*unboxed_function.decl.output);
+ let output_type = ast_ty_to_ty(this, rscope, &*decl.output);
let mut substs = Substs::new_type(vec!(input_tuple, output_type),
- Vec::new());
+ Vec::new());
match self_ty {
Some(s) => substs.types.push(SelfSpace, s),
substs
} = trait_ref_for_unboxed_function(this,
rscope,
- &**unboxed_function,
+ unboxed_function.kind,
+ &*unboxed_function.decl,
None);
let r = ptr_ty.default_region();
let tr = ty::mk_trait(this.tcx(),
}
}
Some(&def::DefTrait(trait_def_id)) => {
- let result = ast_path_to_trait_ref(
- this, rscope, trait_def_id, None, path);
+ let result = ast_path_to_trait_ref(this,
+ rscope,
+ trait_def_id,
+ None,
+ None,
+ path);
let bounds = match *opt_bounds {
None => {
conv_existential_bounds(this,
constr(ast_ty_to_ty(this, rscope, a_seq_ty))
}
+fn associated_ty_to_ty<'tcx,AC,RS>(this: &AC,
+ rscope: &RS,
+ trait_path: &ast::Path,
+ for_ast_type: &ast::Ty,
+ trait_type_id: ast::DefId,
+ span: Span)
+ -> ty::t
+ where AC: AstConv<'tcx>, RS: RegionScope {
+ // Find the trait that this associated type belongs to.
+ let trait_did = match ty::impl_or_trait_item(this.tcx(),
+ trait_type_id).container() {
+ ty::ImplContainer(_) => {
+ this.tcx().sess.span_bug(span,
+ "associated_ty_to_ty(): impl associated \
+ types shouldn't go through this \
+ function")
+ }
+ ty::TraitContainer(trait_id) => trait_id,
+ };
+
+ let for_type = ast_ty_to_ty(this, rscope, for_ast_type);
+ if !this.associated_types_of_trait_are_valid(for_type, trait_did) {
+ this.tcx().sess.span_err(span,
+ "this associated type is not \
+ allowed in this context");
+ return ty::mk_err()
+ }
+
+ let trait_ref = ast_path_to_trait_ref(this,
+ rscope,
+ trait_did,
+ None,
+ Some(for_type),
+ trait_path);
+ let trait_def = this.get_trait_def(trait_did);
+ for type_parameter in trait_def.generics.types.iter() {
+ if type_parameter.def_id == trait_type_id {
+ return *trait_ref.substs.types.get(type_parameter.space,
+ type_parameter.index)
+ }
+ }
+ this.tcx().sess.span_bug(span,
+ "this associated type didn't get added \
+ as a parameter for some reason")
+}
+
// Parses the programmer's textual representation of a type into our
// internal notion of a type.
pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
}
match a_def {
def::DefTrait(trait_def_id) => {
- let result = ast_path_to_trait_ref(
- this, rscope, trait_def_id, None, path);
+ let result = ast_path_to_trait_ref(this,
+ rscope,
+ trait_def_id,
+ None,
+ None,
+ path);
let empty_bounds: &[ast::TyParamBound] = &[];
let ast_bounds = match *bounds {
Some(ref b) => b.as_slice(),
result.substs.clone(),
bounds)
}
- def::DefTy(did) | def::DefStruct(did) => {
+ def::DefTy(did, _) | def::DefStruct(did) => {
ast_path_to_ty(this, rscope, did, path).ty
}
def::DefTyParam(space, id, n) => {
def::DefPrimTy(_) => {
fail!("DefPrimTy arm missed in previous ast_ty_to_prim_ty call");
}
+ def::DefAssociatedTy(trait_type_id) => {
+ let path_str = tcx.map.path_to_string(
+ tcx.map.get_parent(trait_type_id.node));
+ tcx.sess.span_err(ast_ty.span,
+ format!("ambiguous associated \
+ type; specify the type \
+ using the syntax `<Type \
+ as {}>::{}`",
+ path_str,
+ token::get_ident(
+ path.segments
+ .last()
+ .unwrap()
+ .identifier)
+ .get()).as_slice());
+ ty::mk_err()
+ }
_ => {
tcx.sess.span_fatal(ast_ty.span,
format!("found value name used \
}
}
}
+ ast::TyQPath(ref qpath) => {
+ match tcx.def_map.borrow().find(&ast_ty.id) {
+ None => {
+ tcx.sess.span_bug(ast_ty.span,
+ "unbound qualified path")
+ }
+ Some(&def::DefAssociatedTy(trait_type_id)) => {
+ associated_ty_to_ty(this,
+ rscope,
+ &qpath.trait_name,
+ &*qpath.for_type,
+ trait_type_id,
+ ast_ty.span)
+ }
+ Some(_) => {
+ tcx.sess.span_err(ast_ty.span,
+ "this qualified path does not name \
+ an associated type");
+ ty::mk_err()
+ }
+ }
+ }
ast::TyFixedLengthVec(ref ty, ref e) => {
match const_eval::eval_const_expr_partial(tcx, &**e) {
Ok(ref r) => {
};
let input_tys = input_tys.iter().map(|a| ty_of_arg(this, &rb, a, None));
let self_and_input_tys: Vec<_> =
- self_ty.move_iter().chain(input_tys).collect();
+ self_ty.into_iter().chain(input_tys).collect();
// Second, if there was exactly one lifetime (either a substitution or a
// reference) in the arguments, then any anonymous regions in the output
pub struct PartitionedBounds<'a> {
pub builtin_bounds: ty::BuiltinBounds,
pub trait_bounds: Vec<&'a ast::TraitRef>,
- pub unboxed_fn_ty_bounds: Vec<&'a ast::UnboxedFnTy>,
+ pub unboxed_fn_ty_bounds: Vec<&'a ast::UnboxedFnBound>,
pub region_bounds: Vec<&'a ast::Lifetime>,
}
region_bounds.push(l);
}
ast::UnboxedFnTyParamBound(ref unboxed_function) => {
- unboxed_fn_ty_bounds.push(unboxed_function);
+ unboxed_fn_ty_bounds.push(&**unboxed_function);
}
}
}
kind_name = "[error]";
arg_types = subpats.clone()
.unwrap_or_default()
- .move_iter()
+ .into_iter()
.map(|_| ty::mk_err())
.collect();
}
None);
match tcx.def_map.borrow().find(&pat.id) {
Some(def) => {
- let item_type = ty::lookup_item_type(tcx, def.def_id());
- let substitutions = fcx.infcx().fresh_substs_for_type(
- pat.span, &item_type.generics);
+ let struct_ty = fcx.instantiate_item_type(pat.span, def.def_id());
check_struct_pat(pcx, pat.span, fields.as_slice(),
- etc, def.def_id(), &substitutions);
+ etc, def.def_id(), &struct_ty.substs);
}
None => {
tcx.sess.span_bug(pat.span,
use middle::subst;
use middle::subst::Subst;
+use middle::traits;
use middle::ty::*;
use middle::ty;
use middle::typeck::astconv::AstConv;
use middle::typeck::check;
use middle::typeck::infer;
use middle::typeck::MethodCallee;
-use middle::typeck::{MethodOrigin, MethodParam};
-use middle::typeck::{MethodStatic, MethodStaticUnboxedClosure, MethodObject};
-use middle::typeck::{param_index};
+use middle::typeck::{MethodOrigin, MethodParam, MethodTypeParam};
+use middle::typeck::{MethodStatic, MethodStaticUnboxedClosure, MethodObject, MethodTraitObject};
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::TypeAndSubsts;
use util::common::indenter;
for trait_item in trait_items.iter() {
match *trait_item {
ty::MethodTraitItem(_) => method_count += 1,
+ ty::TypeTraitItem(_) => {}
}
}
true
// find all the impls of that trait. Each of those are
// candidates.
let opt_applicable_traits = self.fcx.ccx.trait_map.find(&expr_id);
- for applicable_traits in opt_applicable_traits.move_iter() {
+ for applicable_traits in opt_applicable_traits.into_iter() {
for trait_did in applicable_traits.iter() {
debug!("push_extension_candidates() found trait: {}",
if trait_did.krate == ast::LOCAL_CRATE {
.clone();
let method = match trait_item {
ty::MethodTraitItem(method) => method,
+ ty::TypeTraitItem(_) => {
+ self.tcx().sess.bug(
+ "push_unboxed_closure_call_candidates_if_applicable(): \
+ unexpected associated type in function trait")
+ }
};
// Make sure it has the right name!
return
}
- let vcx = self.fcx.vtable_context();
-
// Get the tupled type of the arguments.
let arguments_type = *closure_function_type.sig.inputs.get(0);
let return_type = closure_function_type.sig.output;
let closure_region =
- vcx.infcx.next_region_var(infer::MiscVariable(self.span));
+ self.fcx.infcx().next_region_var(infer::MiscVariable(self.span));
let unboxed_closure_type = ty::mk_unboxed_closure(self.tcx(),
closure_did,
closure_region);
rcvr_substs: subst::Substs::new_trait(
vec![arguments_type, return_type],
vec![],
- *vcx.infcx.next_ty_vars(1).get(0)),
+ *self.fcx.infcx().next_ty_vars(1).get(0)),
method_ty: method,
origin: MethodStaticUnboxedClosure(closure_did),
});
self.push_inherent_candidates_from_bounds_inner(
&[trait_ref.clone()],
- |_this, new_trait_ref, m, method_num, _bound_num| {
+ |_this, new_trait_ref, m, method_num| {
let vtable_index =
get_method_index(tcx, &*new_trait_ref,
trait_ref.clone(), method_num);
rcvr_match_condition: RcvrMatchesIfObject(did),
rcvr_substs: new_trait_ref.substs.clone(),
method_ty: Rc::new(m),
- origin: MethodObject(MethodObject {
- trait_id: new_trait_ref.def_id,
+ origin: MethodTraitObject(MethodObject {
+ trait_ref: new_trait_ref,
object_trait_id: did,
method_num: method_num,
real_index: vtable_index
rcvr_ty,
param_ty.space,
param_ty.idx,
- restrict_to,
- param_index { space: param_ty.space, index: param_ty.idx });
+ restrict_to);
}
self_ty: ty::t,
space: subst::ParamSpace,
index: uint,
- restrict_to: Option<DefId>,
- param: param_index) {
+ restrict_to: Option<DefId>) {
let bounds =
self.fcx.inh.param_env.bounds.get(space, index).trait_bounds
.as_slice();
self.push_inherent_candidates_from_bounds_inner(bounds,
- |this, trait_ref, m, method_num, bound_num| {
+ |this, trait_ref, m, method_num| {
match restrict_to {
Some(trait_did) => {
if trait_did != trait_ref.def_id {
rcvr_match_condition: condition,
rcvr_substs: trait_ref.substs.clone(),
method_ty: m,
- origin: MethodParam(MethodParam {
- trait_id: trait_ref.def_id,
+ origin: MethodTypeParam(MethodParam {
+ trait_ref: trait_ref,
method_num: method_num,
- param_num: param,
- bound_num: bound_num,
})
})
})
mk_cand: |this: &mut LookupContext,
tr: Rc<TraitRef>,
m: Rc<ty::Method>,
- method_num: uint,
- bound_num: uint|
- -> Option<Candidate>) {
+ method_num: uint|
+ -> Option<Candidate>)
+ {
let tcx = self.tcx();
- let mut next_bound_idx = 0; // count only trait bounds
-
- ty::each_bound_trait_and_supertraits(tcx, bounds, |bound_trait_ref| {
- let this_bound_idx = next_bound_idx;
- next_bound_idx += 1;
+ let mut cache = HashSet::new();
+ for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
+ // Already visited this trait, skip it.
+ if !cache.insert(bound_trait_ref.def_id) {
+ continue;
+ }
let trait_items = ty::trait_items(tcx, bound_trait_ref.def_id);
match trait_items.iter().position(|ti| {
m.explicit_self != ty::StaticExplicitSelfCategory &&
m.ident.name == self.m_name
}
+ ty::TypeTraitItem(_) => false,
}
}) {
Some(pos) => {
let method = match *trait_items.get(pos) {
ty::MethodTraitItem(ref method) => (*method).clone(),
+ ty::TypeTraitItem(_) => {
+ tcx.sess.bug("typechecking associated type as \
+ though it were a method")
+ }
};
match mk_cand(self,
bound_trait_ref,
method,
- pos,
- this_bound_idx) {
+ pos) {
Some(cand) => {
debug!("pushing inherent candidate for param: {}",
cand.repr(self.tcx()));
// check next trait or bound
}
}
- true
- });
+ }
}
let impl_items = self.tcx().impl_items.borrow();
for impl_infos in self.tcx().inherent_impls.borrow().find(&did).iter() {
- for impl_did in impl_infos.borrow().iter() {
+ for impl_did in impl_infos.iter() {
let items = impl_items.get(impl_did);
self.push_candidates_from_impl(*impl_did,
items.as_slice(),
m.ident().name == self.m_name
}) {
Some(ty::MethodTraitItem(method)) => method,
- None => { return; } // No method with the right name.
+ Some(ty::TypeTraitItem(_)) | None => {
+ // No method with the right name.
+ return
+ }
};
// determine the `self` of the impl with fresh
// variables for each parameter:
let span = self.self_expr.map_or(self.span, |e| e.span);
- let vcx = self.fcx.vtable_context();
let TypeAndSubsts {
substs: impl_substs,
ty: impl_ty
- } = impl_self_ty(&vcx, span, impl_did);
+ } = impl_self_ty(self.fcx, span, impl_did);
let condition = match method.explicit_self {
ByReferenceExplicitSelfCategory(_, mt) if mt == MutMutable =>
}
let (self_ty, auto_deref_ref) = self.consider_reborrow(self_ty, autoderefs);
- let adjustment = Some((self.self_expr.unwrap().id, ty::AutoDerefRef(auto_deref_ref)));
+ let adjustment = Some((self.self_expr.unwrap().id, ty::AdjustDerefRef(auto_deref_ref)));
match self.search_for_method(self_ty) {
None => None,
adjustment {:?} for {}", adjustment, self.ty_to_string(self_ty));
match adjustment {
Some((self_expr_id, adj)) => {
- self.fcx.write_adjustment(self_expr_id, adj);
+ self.fcx.write_adjustment(self_expr_id, self.span, adj);
}
None => {}
}
ty_err => None,
- ty_infer(TyVar(_)) => {
+ ty_infer(TyVar(_)) |
+ ty_infer(SkolemizedTy(_)) |
+ ty_infer(SkolemizedIntTy(_)) => {
self.bug(format!("unexpected type: {}",
self.ty_to_string(self_ty)).as_slice());
}
Some(self_expr_id) => {
self.fcx.write_adjustment(
self_expr_id,
- ty::AutoDerefRef(ty::AutoDerefRef {
+ self.span,
+ ty::AdjustDerefRef(ty::AutoDerefRef {
autoderefs: autoderefs,
autoref: Some(kind(region, *mutbl))
}));
// return something so we don't get errors for every mutability
return Some(MethodCallee {
- origin: relevant_candidates.get(0).origin,
+ origin: relevant_candidates.get(0).origin.clone(),
ty: ty::mk_err(),
substs: subst::Substs::empty()
});
candidate_a.repr(self.tcx()),
candidate_b.repr(self.tcx()));
match (&candidate_a.origin, &candidate_b.origin) {
- (&MethodParam(ref p1), &MethodParam(ref p2)) => {
- let same_trait = p1.trait_id == p2.trait_id;
- let same_method = p1.method_num == p2.method_num;
- let same_param = p1.param_num == p2.param_num;
- // The bound number may be different because
- // multiple bounds may lead to the same trait
- // impl
+ (&MethodTypeParam(ref p1), &MethodTypeParam(ref p2)) => {
+ let same_trait =
+ p1.trait_ref.def_id == p2.trait_ref.def_id;
+ let same_method =
+ p1.method_num == p2.method_num;
+ // it's ok to compare self-ty with `==` here because
+ // they are always a TyParam
+ let same_param =
+ p1.trait_ref.self_ty() == p2.trait_ref.self_ty();
same_trait && same_method && same_param
}
_ => false
let fn_sig = &bare_fn_ty.sig;
let inputs = match candidate.origin {
- MethodObject(..) => {
+ MethodTraitObject(..) => {
// For annoying reasons, we've already handled the
// substitution of self for object calls.
let args = fn_sig.inputs.slice_from(1).iter().map(|t| {
t.subst(tcx, &all_substs)
});
- Some(*fn_sig.inputs.get(0)).move_iter().chain(args).collect()
+ Some(*fn_sig.inputs.get(0)).into_iter().chain(args).collect()
}
_ => fn_sig.inputs.subst(tcx, &all_substs)
};
}
}
- self.fcx.add_region_obligations_for_parameters(
- self.span,
+ self.fcx.add_obligations_for_parameters(
+ traits::ObligationCause::misc(self.span),
&all_substs,
&candidate.method_ty.generics);
MethodCallee {
- origin: candidate.origin,
+ origin: candidate.origin.clone(),
ty: fty,
substs: all_substs
}
match candidate.origin {
MethodStatic(..) |
- MethodParam(..) |
+ MethodTypeParam(..) |
MethodStaticUnboxedClosure(..) => {
return; // not a call to a trait instance
}
- MethodObject(..) => {}
+ MethodTraitObject(..) => {}
}
match candidate.method_ty.explicit_self {
MethodStaticUnboxedClosure(_) => bad = false,
// FIXME: does this properly enforce this on everything now
// that self has been merged in? -sully
- MethodParam(MethodParam { trait_id: trait_id, .. }) |
- MethodObject(MethodObject { trait_id: trait_id, .. }) => {
+ MethodTypeParam(MethodParam { trait_ref: ref trait_ref, .. }) |
+ MethodTraitObject(MethodObject { trait_ref: ref trait_ref, .. }) => {
bad = self.tcx().destructor_for_type.borrow()
- .contains_key(&trait_id);
+ .contains_key(&trait_ref.def_id);
}
}
// If we're reporting statics, we want to report the trait
// definition if possible, rather than an impl
match ty::trait_item_of_item(self.tcx(), impl_did) {
- None => {
+ None | Some(TypeTraitItemId(_)) => {
debug!("(report candidate) No trait method \
found");
impl_did
MethodStaticUnboxedClosure(did) => {
self.report_static_candidate(idx, did)
}
- MethodParam(ref mp) => {
- self.report_param_candidate(idx, (*mp).trait_id)
+ MethodTypeParam(ref mp) => {
+ self.report_param_candidate(idx, mp.trait_ref.def_id)
}
- MethodObject(ref mo) => {
- self.report_trait_candidate(idx, mo.trait_id)
+ MethodTraitObject(ref mo) => {
+ self.report_trait_candidate(idx, mo.trait_ref.def_id)
}
}
}
use middle::const_eval;
use middle::def;
-use middle::freevars;
use middle::lang_items::IteratorItem;
use middle::mem_categorization::McResult;
use middle::mem_categorization;
use middle::pat_util;
use middle::subst;
use middle::subst::{Subst, Substs, VecPerParamSpace, ParamSpace};
+use middle::traits;
use middle::ty::{FnSig, VariantInfo};
use middle::ty::{Polytype};
use middle::ty::{Disr, ParamTy, ParameterEnvironment};
use middle::typeck::check::method::{DontAutoderefReceiver};
use middle::typeck::check::method::{IgnoreStaticMethods, ReportStaticMethods};
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
-use middle::typeck::check::vtable::VtableContext;
use middle::typeck::CrateCtxt;
use middle::typeck::infer::{resolve_type, force_tvar};
use middle::typeck::infer;
use middle::typeck::rscope::RegionScope;
use middle::typeck::{lookup_def_ccx};
use middle::typeck::no_params;
-use middle::typeck::{require_same_types, vtable_map};
-use middle::typeck::{MethodCall, MethodMap};
+use middle::typeck::{require_same_types};
+use middle::typeck::{MethodCall, MethodCallee, MethodMap, ObjectCastMap};
use middle::typeck::{TypeAndSubsts};
use middle::typeck;
use middle::lang_items::TypeIdLangItem;
use std::collections::HashMap;
use std::mem::replace;
use std::rc::Rc;
-use std::slice;
use syntax::abi;
-use syntax::ast::{ProvidedMethod, RequiredMethod};
+use syntax::ast::{ProvidedMethod, RequiredMethod, TypeTraitItem};
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util::{local_def, PostExpansionMethod};
use syntax;
pub mod _match;
-pub mod vtable;
+pub mod vtable2; // New trait code
pub mod writeback;
pub mod regionmanip;
pub mod regionck;
pub mod demand;
pub mod method;
+pub mod wf;
/// Fields that are part of a `FnCtxt` which are inherited by
/// closures defined within the function. For example:
item_substs: RefCell<NodeMap<ty::ItemSubsts>>,
adjustments: RefCell<NodeMap<ty::AutoAdjustment>>,
method_map: MethodMap,
- vtable_map: vtable_map,
upvar_borrow_map: RefCell<ty::UpvarBorrowMap>,
unboxed_closures: RefCell<DefIdMap<ty::UnboxedClosure>>,
+ object_cast_map: ObjectCastMap,
// A mapping from each fn's id to its signature, with all bound
// regions replaced with free ones. Unlike the other tables, this
// then in some expression `let x = Foo { ... }` it will
// instantiate the type parameter `T` with a fresh type `$0`. At
// the same time, it will record a region obligation of
- // `$0:'static`. This will get checked later by regionck. (We
+ // `$0:'static`. This will get checked later by regionck. (We
// can't generally check these things right away because we have
// to wait until types are resolved.)
//
// obligations (otherwise, it's easy to fail to walk to a
// particular node-id).
region_obligations: RefCell<NodeMap<Vec<RegionObligation>>>,
+
+ // Tracks trait obligations incurred during this function body.
+ fulfillment_cx: RefCell<traits::FulfillmentContext>,
}
struct RegionObligation {
self.ccx.tcx.upvar_borrow(upvar_id)
}
fn capture_mode(&self, closure_expr_id: ast::NodeId)
- -> freevars::CaptureMode {
+ -> ast::CaptureClause {
self.ccx.tcx.capture_mode(closure_expr_id)
}
fn unboxed_closures<'a>(&'a self)
item_substs: RefCell::new(NodeMap::new()),
adjustments: RefCell::new(NodeMap::new()),
method_map: RefCell::new(FnvHashMap::new()),
- vtable_map: RefCell::new(FnvHashMap::new()),
+ object_cast_map: RefCell::new(NodeMap::new()),
upvar_borrow_map: RefCell::new(HashMap::new()),
unboxed_closures: RefCell::new(DefIdMap::new()),
fn_sig_map: RefCell::new(NodeMap::new()),
region_obligations: RefCell::new(NodeMap::new()),
+ fulfillment_cx: RefCell::new(traits::FulfillmentContext::new()),
}
}
}
free_substs: subst::Substs::empty(),
bounds: subst::VecPerParamSpace::empty(),
implicit_region_bound: ty::ReStatic,
+ caller_obligations: subst::VecPerParamSpace::empty(),
};
Inherited::new(ccx.tcx, param_env)
}
struct CheckItemTypesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
-struct CheckTypeWellFormedVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
-
-impl<'a, 'tcx, 'v> Visitor<'v> for CheckTypeWellFormedVisitor<'a, 'tcx> {
- fn visit_item(&mut self, i: &ast::Item) {
- check_type_well_formed(self.ccx, i);
- visit::walk_item(self, i);
- }
-}
-
impl<'a, 'tcx, 'v> Visitor<'v> for CheckItemTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item) {
pub fn check_item_types(ccx: &CrateCtxt) {
let krate = ccx.tcx.map.krate();
-
- let mut visit = CheckTypeWellFormedVisitor { ccx: ccx };
+ let mut visit = wf::CheckTypeWellFormedVisitor::new(ccx);
visit::walk_crate(&mut visit, krate);
// If types are not well-formed, it leads to all manner of errors
let fcx = check_fn(ccx, fn_ty.fn_style, id, &fn_ty.sig,
decl, id, body, &inh);
- vtable::resolve_in_block(&fcx, body);
+ vtable2::select_all_fcx_obligations_or_error(&fcx);
regionck::regionck_fn(&fcx, id, body);
writeback::resolve_type_vars_in_fn(&fcx, decl, body);
+ vtable2::check_builtin_bound_obligations(&fcx); // must happen after writeback
}
_ => ccx.tcx.sess.impossible_case(body.span,
"check_bare_fn: function type expected")
}
impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> {
- fn assign(&mut self, nid: ast::NodeId, ty_opt: Option<ty::t>) {
- match ty_opt {
- None => {
- // infer the variable's type
- let var_id = self.fcx.infcx().next_ty_var_id();
- let var_ty = ty::mk_var(self.fcx.tcx(), var_id);
- self.fcx.inh.locals.borrow_mut().insert(nid, var_ty);
- }
- Some(typ) => {
- // take type that the user specified
- self.fcx.inh.locals.borrow_mut().insert(nid, typ);
- }
+ fn assign(&mut self, _span: Span, nid: ast::NodeId, ty_opt: Option<ty::t>) -> ty::t {
+ match ty_opt {
+ None => {
+ // infer the variable's type
+ let var_id = self.fcx.infcx().next_ty_var_id();
+ let var_ty = ty::mk_var(self.fcx.tcx(), var_id);
+ self.fcx.inh.locals.borrow_mut().insert(nid, var_ty);
+ var_ty
}
+ Some(typ) => {
+ // take type that the user specified
+ self.fcx.inh.locals.borrow_mut().insert(nid, typ);
+ typ
+ }
+ }
}
}
ast::TyInfer => None,
_ => Some(self.fcx.to_ty(&*local.ty))
};
- self.assign(local.id, o_ty);
+ self.assign(local.span, local.id, o_ty);
debug!("Local variable {} is assigned type {}",
self.fcx.pat_to_string(&*local.pat),
self.fcx.infcx().ty_to_string(
// Add pattern bindings.
fn visit_pat(&mut self, p: &ast::Pat) {
- match p.node {
- ast::PatIdent(_, ref path1, _)
- if pat_util::pat_is_binding(&self.fcx.ccx.tcx.def_map, p) => {
- self.assign(p.id, None);
- debug!("Pattern binding {} is assigned to {}",
- token::get_ident(path1.node),
- self.fcx.infcx().ty_to_string(
- self.fcx.inh.locals.borrow().get_copy(&p.id)));
- }
- _ => {}
- }
- visit::walk_pat(self, p);
-
+ match p.node {
+ ast::PatIdent(_, ref path1, _)
+ if pat_util::pat_is_binding(&self.fcx.ccx.tcx.def_map, p) => {
+ let var_ty = self.assign(p.span, p.id, None);
+
+ self.fcx.require_type_is_sized(var_ty, p.span,
+ traits::VariableType(p.id));
+
+ debug!("Pattern binding {} is assigned to {} with type {}",
+ token::get_ident(path1.node),
+ self.fcx.infcx().ty_to_string(
+ self.fcx.inh.locals.borrow().get_copy(&p.id)),
+ var_ty.repr(self.fcx.tcx()));
+ }
+ _ => {}
+ }
+ visit::walk_pat(self, p);
}
fn visit_block(&mut self, b: &ast::Block) {
// Add formal parameters.
for (arg_ty, input) in arg_tys.iter().zip(decl.inputs.iter()) {
// Create type variables for each argument.
- pat_util::pat_bindings(&tcx.def_map,
- &*input.pat,
- |_bm, pat_id, _sp, _path| {
- visit.assign(pat_id, None);
- });
+ pat_util::pat_bindings(
+ &tcx.def_map,
+ &*input.pat,
+ |_bm, pat_id, sp, _path| {
+ let var_ty = visit.assign(sp, pat_id, None);
+ fcx.require_type_is_sized(var_ty, sp,
+ traits::VariableType(pat_id));
+ });
// Check the pattern.
let pcx = pat_ctxt {
}
}
-fn check_type_well_formed(ccx: &CrateCtxt, item: &ast::Item) {
- /*!
- * Checks that the field types (in a struct def'n) or
- * argument types (in an enum def'n) are well-formed,
- * meaning that they do not require any constraints not
- * declared in the struct definition itself.
- * For example, this definition would be illegal:
- *
- * struct Ref<'a, T> { x: &'a T }
- *
- * because the type did not declare that `T:'a`.
- *
- * We do this check as a pre-pass before checking fn bodies
- * because if these constraints are not included it frequently
- * leads to confusing errors in fn bodies. So it's better to check
- * the types first.
- */
-
- debug!("check_type_well_formed(it.id={}, it.ident={})",
- item.id,
- ty::item_path_str(ccx.tcx, local_def(item.id)));
-
- match item.node {
- ast::ItemStruct(..) => {
- check_type_defn(ccx, item, |fcx| {
- ty::struct_fields(ccx.tcx, local_def(item.id),
- &fcx.inh.param_env.free_substs)
- .iter()
- .map(|f| f.mt.ty)
- .collect()
- });
- }
- ast::ItemEnum(..) => {
- check_type_defn(ccx, item, |fcx| {
- ty::substd_enum_variants(ccx.tcx, local_def(item.id),
- &fcx.inh.param_env.free_substs)
- .iter()
- .flat_map(|variant| {
- variant.args
- .iter()
- .map(|&arg_ty| arg_ty)
- })
- .collect()
- });
- }
- _ => {}
- }
-
- fn check_type_defn(ccx: &CrateCtxt,
- item: &ast::Item,
- lookup_fields: |&FnCtxt| -> Vec<ty::t>)
- {
- let item_def_id = local_def(item.id);
- let polytype = ty::lookup_item_type(ccx.tcx, item_def_id);
- let param_env =
- ty::construct_parameter_environment(ccx.tcx,
- &polytype.generics,
- item.id);
- let inh = Inherited::new(ccx.tcx, param_env);
- let fcx = blank_fn_ctxt(ccx, &inh, polytype.ty, item.id);
- let field_tys = lookup_fields(&fcx);
- regionck::regionck_type_defn(&fcx, item.span, field_tys.as_slice());
- }
-}
-
pub fn check_item_sized(ccx: &CrateCtxt, it: &ast::Item) {
debug!("check_item(it.id={}, it.ident={})",
it.id,
ast::MethodImplItem(ref m) => {
check_method_body(ccx, &impl_pty.generics, &**m);
}
+ ast::TypeImplItem(_) => {
+ // Nothing to do here.
+ }
}
}
ast_trait_ref,
&*impl_trait_ref,
impl_items.as_slice());
- vtable::resolve_impl(ccx.tcx, it, &impl_pty.generics, &*impl_trait_ref);
}
None => { }
}
ProvidedMethod(ref m) => {
check_method_body(ccx, &trait_def.generics, &**m);
}
+ TypeTraitItem(_) => {
+ // Nothing to do.
+ }
}
}
}
&**trait_method_ty,
&impl_trait_ref.substs);
}
+ _ => {
+ // This is span_bug as it should have already been
+ // caught in resolve.
+ tcx.sess
+ .span_bug(impl_method.span,
+ format!("item `{}` is of a \
+ different kind from \
+ its trait `{}`",
+ token::get_ident(
+ impl_item_ty.ident()),
+ pprust::path_to_string(
+ &ast_trait_ref.path))
+ .as_slice());
+ }
}
}
None => {
}
}
}
+ ast::TypeImplItem(ref typedef) => {
+ let typedef_def_id = local_def(typedef.id);
+ let typedef_ty = ty::impl_or_trait_item(ccx.tcx,
+ typedef_def_id);
+
+ // If this is an impl of an associated type, find the
+ // corresponding type definition in the trait.
+ let opt_associated_type =
+ trait_items.iter()
+ .find(|ti| {
+ ti.ident().name == typedef_ty.ident().name
+ });
+ match opt_associated_type {
+ Some(associated_type) => {
+ match (associated_type, &typedef_ty) {
+ (&ty::TypeTraitItem(_),
+ &ty::TypeTraitItem(_)) => {}
+ _ => {
+ // This is `span_bug` as it should have
+ // already been caught in resolve.
+ tcx.sess
+ .span_bug(typedef.span,
+ format!("item `{}` is of a \
+ different kind from \
+ its trait `{}`",
+ token::get_ident(
+ typedef_ty.ident()),
+ pprust::path_to_string(
+ &ast_trait_ref.path))
+ .as_slice());
+ }
+ }
+ }
+ None => {
+ // This is `span_bug` as it should have already been
+ // caught in resolve.
+ tcx.sess.span_bug(
+ typedef.span,
+ format!(
+ "associated type `{}` is not a member of \
+ trait `{}`",
+ token::get_ident(typedef_ty.ident()),
+ pprust::path_to_string(
+ &ast_trait_ref.path)).as_slice());
+ }
+ }
+ }
}
}
- // Check for missing methods from trait
+ // Check for missing items from trait
let provided_methods = ty::provided_trait_methods(tcx,
impl_trait_ref.def_id);
let mut missing_methods = Vec::new();
ast::MethodImplItem(ref m) => {
m.pe_ident().name == trait_method.ident.name
}
+ ast::TypeImplItem(_) => false,
}
});
let is_provided =
token::get_ident(trait_method.ident)));
}
}
+ ty::TypeTraitItem(ref associated_type) => {
+ let is_implemented = impl_items.iter().any(|ii| {
+ match *ii {
+ ast::TypeImplItem(ref typedef) => {
+ typedef.ident.name == associated_type.ident.name
+ }
+ ast::MethodImplItem(_) => false,
+ }
+ });
+ if !is_implemented {
+ missing_methods.push(
+ format!("`{}`",
+ token::get_ident(associated_type.ident)));
+ }
+ }
}
}
if !missing_methods.is_empty() {
span_err!(tcx.sess, impl_span, E0046,
- "not all trait methods implemented, missing: {}",
+ "not all trait items implemented, missing: {}",
missing_methods.connect(", "));
}
}
impl_m_body_id: ast::NodeId,
trait_m: &ty::Method,
trait_to_impl_substs: &subst::Substs) {
- debug!("compare_impl_method()");
+ debug!("compare_impl_method(trait_to_impl_substs={})",
+ trait_to_impl_substs.repr(tcx));
let infcx = infer::new_infer_ctxt(tcx);
// Try to give more informative error messages about self typing
// FIXME(pcwalton): We could be laxer here regarding sub- and super-
// traits, but I doubt that'll be wanted often, so meh.
for impl_trait_bound in impl_param_def.bounds.trait_bounds.iter() {
+ debug!("compare_impl_method(): impl-trait-bound subst");
let impl_trait_bound =
impl_trait_bound.subst(tcx, &impl_to_skol_substs);
let mut ok = false;
for trait_bound in trait_param_def.bounds.trait_bounds.iter() {
+ debug!("compare_impl_method(): trait-bound subst");
let trait_bound =
trait_bound.subst(tcx, &trait_to_skol_substs);
let infcx = infer::new_infer_ctxt(tcx);
// other words, anyone expecting to call a method with the type
// from the trait, can safely call a method with the type from the
// impl instead.
+ debug!("checking trait method for compatibility: impl ty {}, trait ty {}",
+ impl_fty.repr(tcx),
+ trait_fty.repr(tcx));
match infer::mk_subty(&infcx, false, infer::MethodCompatCheck(impl_m_span),
impl_fty, trait_fty) {
Ok(()) => {}
}
fn check_cast(fcx: &FnCtxt,
+ cast_expr: &ast::Expr,
e: &ast::Expr,
- t: &ast::Ty,
- id: ast::NodeId,
- span: Span) {
+ t: &ast::Ty) {
+ let id = cast_expr.id;
+ let span = cast_expr.span;
+
// Find the type of `e`. Supply hints based on the type we are casting to,
// if appropriate.
let t_1 = fcx.to_ty(t);
if ty::type_is_trait(t_1) {
// This will be looked up later on.
+ vtable2::check_object_cast(fcx, cast_expr, e, t_1);
fcx.write_ty(id, t_1);
return
}
fn ty_infer(&self, _span: Span) -> ty::t {
self.infcx().next_ty_var()
}
+
+ fn associated_types_of_trait_are_valid(&self, _: ty::t, _: ast::DefId)
+ -> bool {
+ false
+ }
+
+ fn associated_type_binding(&self,
+ span: Span,
+ _: Option<ty::t>,
+ _: ast::DefId,
+ _: ast::DefId)
+ -> ty::t {
+ self.tcx().sess.span_err(span, "unsupported associated type binding");
+ ty::mk_err()
+ }
}
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub fn err_count_since_creation(&self) -> uint {
self.ccx.tcx.sess.err_count() - self.err_count_on_creation
}
-
- pub fn vtable_context<'a>(&'a self) -> VtableContext<'a, 'tcx> {
- VtableContext {
- infcx: self.infcx(),
- param_env: &self.inh.param_env,
- unboxed_closures: &self.inh.unboxed_closures,
- }
- }
}
impl<'a, 'tcx> RegionScope for infer::InferCtxt<'a, 'tcx> {
self.inh.node_types.borrow_mut().insert(node_id, ty);
}
+ pub fn write_object_cast(&self,
+ key: ast::NodeId,
+ trait_ref: Rc<ty::TraitRef>) {
+ debug!("write_object_cast key={} trait_ref={}",
+ key, trait_ref.repr(self.tcx()));
+ self.inh.object_cast_map.borrow_mut().insert(key, trait_ref);
+ }
+
pub fn write_substs(&self, node_id: ast::NodeId, substs: ty::ItemSubsts) {
if !substs.substs.is_noop() {
debug!("write_substs({}, {}) in fcx {}",
pub fn write_autoderef_adjustment(&self,
node_id: ast::NodeId,
+ span: Span,
derefs: uint) {
if derefs == 0 { return; }
self.write_adjustment(
node_id,
- ty::AutoDerefRef(ty::AutoDerefRef {
+ span,
+ ty::AdjustDerefRef(ty::AutoDerefRef {
autoderefs: derefs,
autoref: None })
);
pub fn write_adjustment(&self,
node_id: ast::NodeId,
+ span: Span,
adj: ty::AutoAdjustment) {
debug!("write_adjustment(node_id={:?}, adj={:?})", node_id, adj);
+
+ // Careful: adjustments can imply trait obligations if we are
+ // casting from a concrete type to an object type. I think
+ // it'd probably be nicer to move the logic that creates the
+ // obligation into the code that creates the adjustment, but
+ // that's a bit awkward, so instead we go digging and pull the
+ // obligation out here.
+ self.register_adjustment_obligations(span, &adj);
self.inh.adjustments.borrow_mut().insert(node_id, adj);
}
+ fn register_adjustment_obligations(&self,
+ span: Span,
+ adj: &ty::AutoAdjustment) {
+ match *adj {
+ ty::AdjustAddEnv(..) => { }
+ ty::AdjustDerefRef(ref d_r) => {
+ match d_r.autoref {
+ Some(ref a_r) => {
+ self.register_autoref_obligations(span, a_r);
+ }
+ None => {}
+ }
+ }
+ }
+ }
+
+ fn register_autoref_obligations(&self,
+ span: Span,
+ autoref: &ty::AutoRef) {
+ match *autoref {
+ ty::AutoUnsize(ref unsize) => {
+ self.register_unsize_obligations(span, unsize);
+ }
+ ty::AutoPtr(_, _, None) |
+ ty::AutoUnsafe(_, None) => {
+ }
+ ty::AutoPtr(_, _, Some(ref a_r)) |
+ ty::AutoUnsafe(_, Some(ref a_r)) => {
+ self.register_autoref_obligations(span, &**a_r)
+ }
+ ty::AutoUnsizeUniq(ref unsize) => {
+ self.register_unsize_obligations(span, unsize);
+ }
+ }
+ }
+
+ fn register_unsize_obligations(&self,
+ span: Span,
+ unsize: &ty::UnsizeKind) {
+ debug!("register_unsize_obligations: unsize={:?}", unsize);
+
+ match *unsize {
+ ty::UnsizeLength(..) => {}
+ ty::UnsizeStruct(ref u, _) => {
+ self.register_unsize_obligations(span, &**u)
+ }
+ ty::UnsizeVtable(ref ty_trait, self_ty) => {
+ vtable2::register_object_cast_obligations(self,
+ span,
+ ty_trait,
+ self_ty);
+ }
+ }
+ }
+
+ pub fn instantiate_item_type(&self,
+ span: Span,
+ def_id: ast::DefId)
+ -> TypeAndSubsts
+ {
+ /*!
+ * Returns the type of `def_id` with all generics replaced by
+ * by fresh type/region variables. Also returns the
+ * substitution from the type parameters on `def_id` to the
+ * fresh variables. Registers any trait obligations specified
+ * on `def_id` at the same time.
+ */
+
+ let polytype =
+ ty::lookup_item_type(self.tcx(), def_id);
+ let substs =
+ self.infcx().fresh_substs_for_generics(
+ span,
+ &polytype.generics);
+ self.add_obligations_for_parameters(
+ traits::ObligationCause::new(
+ span,
+ traits::ItemObligation(def_id)),
+ &substs,
+ &polytype.generics);
+ let monotype =
+ polytype.ty.subst(self.tcx(), &substs);
+
+ TypeAndSubsts {
+ ty: monotype,
+ substs: substs
+ }
+ }
+
pub fn write_nil(&self, node_id: ast::NodeId) {
self.write_ty(node_id, ty::mk_nil());
}
self.write_ty(node_id, ty::mk_err());
}
+ pub fn require_type_meets(&self,
+ ty: ty::t,
+ span: Span,
+ code: traits::ObligationCauseCode,
+ bound: ty::BuiltinBound)
+ {
+ self.register_obligation(
+ traits::obligation_for_builtin_bound(
+ self.tcx(),
+ traits::ObligationCause::new(span, code),
+ ty,
+ bound));
+ }
+
+ pub fn require_type_is_sized(&self,
+ ty: ty::t,
+ span: Span,
+ code: traits::ObligationCauseCode)
+ {
+ self.require_type_meets(ty, span, code, ty::BoundSized);
+ }
+
+ pub fn require_expr_have_sized_type(&self,
+ expr: &ast::Expr,
+ code: traits::ObligationCauseCode)
+ {
+ self.require_type_is_sized(self.expr_ty(expr), expr.span, code);
+ }
+
+ pub fn register_obligation(&self,
+ obligation: traits::Obligation)
+ {
+ debug!("register_obligation({})",
+ obligation.repr(self.tcx()));
+
+ self.inh.fulfillment_cx
+ .borrow_mut()
+ .register_obligation(self.tcx(), obligation);
+ }
+
pub fn to_ty(&self, ast_t: &ast::Ty) -> ty::t {
- ast_ty_to_ty(self, self.infcx(), ast_t)
+ let t = ast_ty_to_ty(self, self.infcx(), ast_t);
+
+ let mut bounds_checker = wf::BoundsChecker::new(self,
+ ast_t.span,
+ self.body_id,
+ None);
+ bounds_checker.check_ty(t);
+
+ t
}
pub fn pat_to_string(&self, pat: &ast::Pat) -> String {
Ok(None) => Ok(()),
Err(ref e) => Err((*e)),
Ok(Some(adjustment)) => {
- self.write_adjustment(expr.id, adjustment);
+ self.write_adjustment(expr.id, expr.span, adjustment);
Ok(())
}
}
origin: origin });
}
- pub fn add_region_obligations_for_parameters(&self,
- span: Span,
- substs: &Substs,
- generics: &ty::Generics)
+ pub fn add_obligations_for_parameters(&self,
+ cause: traits::ObligationCause,
+ substs: &Substs,
+ generics: &ty::Generics)
{
/*!
* Given a set of generic parameter definitions (`generics`)
* locally.
*/
- debug!("add_region_obligations_for_parameters(substs={}, generics={})",
+ debug!("add_obligations_for_parameters(substs={}, generics={})",
substs.repr(self.tcx()),
generics.repr(self.tcx()));
+ self.add_trait_obligations_for_generics(cause, substs, generics);
+ self.add_region_obligations_for_generics(cause, substs, generics);
+ }
+
+ fn add_trait_obligations_for_generics(&self,
+ cause: traits::ObligationCause,
+ substs: &Substs,
+ generics: &ty::Generics) {
+ let obligations =
+ traits::obligations_for_generics(self.tcx(),
+ cause,
+ generics,
+ substs);
+ obligations.map_move(|o| self.register_obligation(o));
+ }
+
+ fn add_region_obligations_for_generics(&self,
+ cause: traits::ObligationCause,
+ substs: &Substs,
+ generics: &ty::Generics)
+ {
assert_eq!(generics.types.iter().len(),
substs.types.iter().len());
for (type_def, &type_param) in
idx: type_def.index,
def_id: type_def.def_id };
let bounds = type_def.bounds.subst(self.tcx(), substs);
- add_region_obligations_for_type_parameter(
- self, span, param_ty, &bounds, type_param);
+ self.add_region_obligations_for_type_parameter(
+ cause.span, param_ty, &bounds, type_param);
}
assert_eq!(generics.regions.iter().len(),
substs.regions().iter())
{
let bounds = region_def.bounds.subst(self.tcx(), substs);
- add_region_obligations_for_region_parameter(
- self, span, bounds.as_slice(), region_param);
- }
-
- fn add_region_obligations_for_type_parameter(
- fcx: &FnCtxt,
- span: Span,
- param_ty: ty::ParamTy,
- param_bound: &ty::ParamBounds,
- ty: ty::t)
- {
- // For each declared region bound `T:r`, `T` must outlive `r`.
- let region_bounds =
- ty::required_region_bounds(
- fcx.tcx(),
- param_bound.opt_region_bound.as_slice(),
- param_bound.builtin_bounds,
- param_bound.trait_bounds.as_slice());
- for &r in region_bounds.iter() {
- let origin = infer::RelateParamBound(span, param_ty, ty);
- fcx.register_region_obligation(origin, ty, r);
- }
+ self.add_region_obligations_for_region_parameter(
+ cause.span, bounds.as_slice(), region_param);
}
+ }
- fn add_region_obligations_for_region_parameter(
- fcx: &FnCtxt,
- span: Span,
- region_bounds: &[ty::Region],
- region_param: ty::Region)
- {
- for &b in region_bounds.iter() {
- // For each bound `region:b`, `b <= region` must hold
- // (i.e., `region` must outlive `b`).
- let origin = infer::RelateRegionParamBound(span);
- fcx.mk_subr(origin, b, region_param);
- }
+ fn add_region_obligations_for_type_parameter(&self,
+ span: Span,
+ param_ty: ty::ParamTy,
+ param_bound: &ty::ParamBounds,
+ ty: ty::t)
+ {
+ // For each declared region bound `T:r`, `T` must outlive `r`.
+ let region_bounds =
+ ty::required_region_bounds(
+ self.tcx(),
+ param_bound.region_bounds.as_slice(),
+ param_bound.builtin_bounds,
+ param_bound.trait_bounds.as_slice());
+ for &r in region_bounds.iter() {
+ let origin = infer::RelateParamBound(span, param_ty, ty);
+ self.register_region_obligation(origin, ty, r);
+ }
+ }
+
+ fn add_region_obligations_for_region_parameter(&self,
+ span: Span,
+ region_bounds: &[ty::Region],
+ region_param: ty::Region)
+ {
+ for &b in region_bounds.iter() {
+ // For each bound `region:b`, `b <= region` must hold
+ // (i.e., `region` must outlive `b`).
+ let origin = infer::RelateRegionParamBound(span);
+ self.mk_subr(origin, b, region_param);
}
}
}
}
/// Attempts to resolve a call expression as an overloaded call.
-fn try_overloaded_call(fcx: &FnCtxt,
- call_expression: &ast::Expr,
- callee: &ast::Expr,
- callee_type: ty::t,
- args: &[P<ast::Expr>])
- -> bool {
+fn try_overloaded_call<'a>(fcx: &FnCtxt,
+ call_expression: &ast::Expr,
+ callee: &ast::Expr,
+ callee_type: ty::t,
+ args: &[&'a P<ast::Expr>])
+ -> bool {
// Bail out if the callee is a bare function or a closure. We check those
// manually.
match *structure_of(fcx, callee.span, callee_type) {
fcx.inh.method_map.borrow_mut().insert(method_call, method_callee);
write_call(fcx, call_expression, output_type);
- if !fcx.tcx().sess.features.overloaded_calls.get() {
+ if !fcx.tcx().sess.features.borrow().overloaded_calls {
span_err!(fcx.tcx().sess, call_expression.span, E0056,
"overloaded calls are experimental");
span_note!(fcx.tcx().sess, call_expression.span,
(method, _) => method
};
+ make_return_type(fcx, method_call, method)
+}
+
+fn get_method_ty(method: &Option<MethodCallee>) -> ty::t {
+ match method {
+ &Some(ref method) => method.ty,
+ &None => ty::mk_err()
+ }
+}
+
+fn make_return_type(fcx: &FnCtxt,
+ method_call: Option<MethodCall>,
+ method: Option<MethodCallee>)
+ -> Option<ty::mt> {
match method {
Some(method) => {
let ref_ty = ty::ty_fn_ret(method.ty);
match method_call {
Some(method_call) => {
- fcx.inh.method_map.borrow_mut().insert(method_call, method);
+ fcx.inh.method_map.borrow_mut().insert(method_call,
+ method);
}
None => {}
}
ty::deref(ref_ty, true)
}
- None => None
+ None => None,
+ }
+}
+
+fn try_overloaded_slice(fcx: &FnCtxt,
+ method_call: Option<MethodCall>,
+ expr: &ast::Expr,
+ base_expr: &ast::Expr,
+ base_ty: ty::t,
+ start_expr: &Option<P<ast::Expr>>,
+ end_expr: &Option<P<ast::Expr>>,
+ mutbl: &ast::Mutability)
+ -> Option<ty::mt> {
+ let method = if mutbl == &ast::MutMutable {
+ // Try `SliceMut` first, if preferred.
+ match fcx.tcx().lang_items.slice_mut_trait() {
+ Some(trait_did) => {
+ let method_name = match (start_expr, end_expr) {
+ (&Some(_), &Some(_)) => "slice_mut_",
+ (&Some(_), &None) => "slice_from_mut_",
+ (&None, &Some(_)) => "slice_to_mut_",
+ (&None, &None) => "as_mut_slice_",
+ };
+
+ method::lookup_in_trait(fcx,
+ expr.span,
+ Some(&*base_expr),
+ token::intern(method_name),
+ trait_did,
+ base_ty,
+ [],
+ DontAutoderefReceiver,
+ IgnoreStaticMethods)
+ }
+ _ => None,
+ }
+ } else {
+ // Otherwise, fall back to `Slice`.
+ // FIXME(#17293) this will not coerce base_expr, so we miss the Slice
+ // trait for `&mut [T]`.
+ match fcx.tcx().lang_items.slice_trait() {
+ Some(trait_did) => {
+ let method_name = match (start_expr, end_expr) {
+ (&Some(_), &Some(_)) => "slice_",
+ (&Some(_), &None) => "slice_from_",
+ (&None, &Some(_)) => "slice_to_",
+ (&None, &None) => "as_slice_",
+ };
+
+ method::lookup_in_trait(fcx,
+ expr.span,
+ Some(&*base_expr),
+ token::intern(method_name),
+ trait_did,
+ base_ty,
+ [],
+ DontAutoderefReceiver,
+ IgnoreStaticMethods)
+ }
+ _ => None,
+ }
+ };
+
+
+ // Regardless of whether the lookup succeeds, check the method arguments
+ // so that we have *some* type for each argument.
+ let method_type = get_method_ty(&method);
+
+ let mut args = vec![];
+ start_expr.as_ref().map(|x| args.push(x));
+ end_expr.as_ref().map(|x| args.push(x));
+
+ check_method_argument_types(fcx,
+ expr.span,
+ method_type,
+ expr,
+ args.as_slice(),
+ DoDerefArgs,
+ DontTupleArguments);
+
+ match method {
+ Some(method) => {
+ let result_ty = ty::ty_fn_ret(method.ty);
+ match method_call {
+ Some(method_call) => {
+ fcx.inh.method_map.borrow_mut().insert(method_call,
+ method);
+ }
+ None => {}
+ }
+ Some(ty::mt { ty: result_ty, mutbl: ast::MutImmutable })
+ }
+ None => None,
}
}
// Regardless of whether the lookup succeeds, check the method arguments
// so that we have *some* type for each argument.
- let method_type = match method {
- Some(ref method) => method.ty,
- None => ty::mk_err()
- };
+ let method_type = get_method_ty(&method);
check_method_argument_types(fcx,
expr.span,
method_type,
expr,
- slice::ref_slice(index_expr),
+ &[index_expr],
DoDerefArgs,
DontTupleArguments);
- match method {
- Some(method) => {
- let ref_ty = ty::ty_fn_ret(method.ty);
- match method_call {
- Some(method_call) => {
- fcx.inh.method_map.borrow_mut().insert(method_call,
- method);
- }
- None => {}
- }
- ty::deref(ref_ty, true)
- }
- None => None,
- }
+ make_return_type(fcx, method_call, method)
}
/// Given the head of a `for` expression, looks up the `next` method in the
}
}
-fn check_method_argument_types(fcx: &FnCtxt,
- sp: Span,
- method_fn_ty: ty::t,
- callee_expr: &ast::Expr,
- args_no_rcvr: &[P<ast::Expr>],
- deref_args: DerefArgs,
- tuple_arguments: TupleArgumentsFlag)
- -> ty::t {
+fn check_method_argument_types<'a>(fcx: &FnCtxt,
+ sp: Span,
+ method_fn_ty: ty::t,
+ callee_expr: &ast::Expr,
+ args_no_rcvr: &[&'a P<ast::Expr>],
+ deref_args: DerefArgs,
+ tuple_arguments: TupleArgumentsFlag)
+ -> ty::t {
if ty::type_is_error(method_fn_ty) {
let err_inputs = err_args(args_no_rcvr.len());
check_argument_types(fcx,
}
}
-fn check_argument_types(fcx: &FnCtxt,
- sp: Span,
- fn_inputs: &[ty::t],
- callee_expr: &ast::Expr,
- args: &[P<ast::Expr>],
- deref_args: DerefArgs,
- variadic: bool,
- tuple_arguments: TupleArgumentsFlag) {
+fn check_argument_types<'a>(fcx: &FnCtxt,
+ sp: Span,
+ fn_inputs: &[ty::t],
+ _callee_expr: &ast::Expr,
+ args: &[&'a P<ast::Expr>],
+ deref_args: DerefArgs,
+ variadic: bool,
+ tuple_arguments: TupleArgumentsFlag) {
/*!
*
* Generic function that factors out common logic from
// an "opportunistic" vtable resolution of any trait
// bounds on the call.
if check_blocks {
- vtable::early_resolve_expr(callee_expr, fcx, true);
+ vtable2::select_fcx_obligations_where_possible(fcx);
}
// For variadic functions, we don't have a declared type for all of
DontDerefArgs => {}
}
- check_expr_coercable_to_type(fcx, &**arg, formal_ty);
+ check_expr_coercable_to_type(fcx, &***arg, formal_ty);
}
}
}
// arguments which we skipped above.
if variadic {
for arg in args.iter().skip(expected_arg_count) {
- check_expr(fcx, &**arg);
+ check_expr(fcx, &***arg);
// There are a few types which get autopromoted when passed via varargs
// in C but we just error out instead and require explicit casts.
let arg_ty = structurally_resolved_type(fcx, arg.span,
- fcx.expr_ty(&**arg));
+ fcx.expr_ty(&***arg));
match ty::get(arg_ty).sty {
ty::ty_float(ast::TyF32) => {
fcx.type_error_message(arg.span,
check_expr_with_unifier(fcx, expr, NoExpectation, lvalue_pref, || ())
}
-
// determine the `self` type, using fresh variables for all variables
// declared on the impl declaration e.g., `impl<A,B> for ~[(A,B)]`
// would return ($0, $1) where $0 and $1 are freshly instantiated type
// variables.
-pub fn impl_self_ty(vcx: &VtableContext,
+pub fn impl_self_ty(fcx: &FnCtxt,
span: Span, // (potential) receiver for this impl
did: ast::DefId)
-> TypeAndSubsts {
- let tcx = vcx.tcx();
+ let tcx = fcx.tcx();
let ity = ty::lookup_item_type(tcx, did);
let (n_tps, rps, raw_ty) =
ity.generics.regions.get_slice(subst::TypeSpace),
ity.ty);
- let rps = vcx.infcx.region_vars_for_defs(span, rps);
- let tps = vcx.infcx.next_ty_vars(n_tps);
+ let rps = fcx.inh.infcx.region_vars_for_defs(span, rps);
+ let tps = fcx.inh.infcx.next_ty_vars(n_tps);
let substs = subst::Substs::new_type(tps, rps);
let substd_ty = raw_ty.subst(tcx, &substs);
expr.repr(fcx.tcx()), expected.repr(fcx.tcx()));
// A generic function for doing all of the checking for call expressions
- fn check_call(fcx: &FnCtxt,
- call_expr: &ast::Expr,
- f: &ast::Expr,
- args: &[P<ast::Expr>]) {
+ fn check_call<'a>(fcx: &FnCtxt,
+ call_expr: &ast::Expr,
+ f: &ast::Expr,
+ args: &[&'a P<ast::Expr>]) {
// Store the type of `f` as the type of the callee
let fn_ty = fcx.expr_ty(f);
};
// Call the generic checker.
+ let args: Vec<_> = args.slice_from(1).iter().map(|x| x).collect();
let ret_ty = check_method_argument_types(fcx,
method_name.span,
fn_ty,
expr,
- args.slice_from(1),
+ args.as_slice(),
DontDerefArgs,
DontTupleArguments);
None => None
};
let args = match rhs {
- Some(rhs) => slice::ref_slice(rhs),
- None => {
- // Work around the lack of coercion.
- let empty: &[_] = &[];
- empty
- }
+ Some(rhs) => vec![rhs],
+ None => vec![]
};
match method {
Some(method) => {
op_ex.span,
method_ty,
op_ex,
- args,
+ args.as_slice(),
DoDerefArgs,
DontTupleArguments)
}
op_ex.span,
expected_ty,
op_ex,
- args,
+ args.as_slice(),
DoDerefArgs,
DontTupleArguments);
ty::mk_err()
match field_ty {
Some(field_ty) => {
fcx.write_ty(expr.id, field_ty);
- fcx.write_autoderef_adjustment(base.id, autoderefs);
+ fcx.write_autoderef_adjustment(base.id, base.span, autoderefs);
return;
}
None => {}
match field_ty {
Some(field_ty) => {
fcx.write_ty(expr.id, field_ty);
- fcx.write_autoderef_adjustment(base.id, autoderefs);
+ fcx.write_autoderef_adjustment(base.id, base.span, autoderefs);
return;
}
None => {}
base_expr: Option<&ast::Expr>) {
let tcx = fcx.ccx.tcx;
- // Look up the number of type parameters and the raw type, and
- // determine whether the class is region-parameterized.
- let item_type = ty::lookup_item_type(tcx, class_id);
- let raw_type = item_type.ty;
-
// Generate the struct type.
- let substitutions = fcx.infcx().fresh_substs_for_type(
- span, &item_type.generics);
- let mut struct_type = raw_type.subst(tcx, &substitutions);
+ let TypeAndSubsts {
+ ty: mut struct_type,
+ substs: struct_substs
+ } = fcx.instantiate_item_type(span, class_id);
// Look up and check the fields.
let class_fields = ty::lookup_struct_fields(tcx, class_id);
span,
class_id,
id,
- substitutions,
+ struct_substs,
class_fields.as_slice(),
fields,
base_expr.is_none());
// Look up the number of type parameters and the raw type, and
// determine whether the enum is region-parameterized.
- let item_type = ty::lookup_item_type(tcx, enum_id);
- let substitutions = fcx.infcx().fresh_substs_for_type(span, &item_type.generics);
- let enum_type = item_type.ty.subst(tcx, &substitutions);
+ let TypeAndSubsts {
+ ty: enum_type,
+ substs: substitutions
+ } = fcx.instantiate_item_type(span, enum_id);
// Look up and check the enum variant fields.
let variant_fields = ty::lookup_struct_fields(tcx, variant_id);
span_err!(tcx.sess, lhs.span, E0067, "illegal left-hand side expression");
}
+ fcx.require_expr_have_sized_type(&**lhs, traits::AssignmentLhsSized);
+
// Overwrite result of check_binop...this preserves existing behavior
// but seems quite dubious with regard to user-defined methods
// and so forth. - Niko
check_expr_has_type(fcx, &**rhs, lhs_ty);
let rhs_ty = fcx.expr_ty(&**rhs);
+ fcx.require_expr_have_sized_type(&**lhs, traits::AssignmentLhsSized);
+
if ty::type_is_error(lhs_ty) || ty::type_is_error(rhs_ty) {
fcx.write_error(id);
} else if ty::type_is_bot(lhs_ty) || ty::type_is_bot(rhs_ty) {
ast::ExprForLoop(ref pat, ref head, ref block, _) => {
check_expr(fcx, &**head);
let typ = lookup_method_for_for_loop(fcx, &**head, expr.id);
- vtable::early_resolve_expr(expr, fcx, true);
+ vtable2::select_fcx_obligations_where_possible(fcx);
let pcx = pat_ctxt {
fcx: fcx,
check_expr(fcx, &**f);
let f_ty = fcx.expr_ty(&**f);
+ let args: Vec<_> = args.iter().map(|x| x).collect();
if !try_overloaded_call(fcx, expr, &**f, f_ty, args.as_slice()) {
check_call(fcx, expr, &**f, args.as_slice());
let (args_bot, args_err) = args.iter().fold((false, false),
|(rest_bot, rest_err), a| {
// is this not working?
- let a_ty = fcx.expr_ty(&**a);
+ let a_ty = fcx.expr_ty(&***a);
(rest_bot || ty::type_is_bot(a_ty),
rest_err || ty::type_is_error(a_ty))});
if ty::type_is_error(f_ty) || args_err {
}
_ => {}
}
- check_cast(fcx, &**e, &**t, id, expr.span);
+ check_cast(fcx, expr, &**e, &**t);
}
ast::ExprVec(ref args) => {
let uty = match expected {
}
};
+ if count > 1 {
+ // For [foo, ..n] where n > 1, `foo` must have
+ // Copy type:
+ fcx.require_type_meets(
+ t,
+ expr.span,
+ traits::RepeatVec,
+ ty::BoundCopy);
+ }
+
if ty::type_is_error(element_ty) {
fcx.write_error(id);
} else if ty::type_is_bot(element_ty) {
}
}
}
+
+ fcx.require_expr_have_sized_type(expr, traits::StructInitializerSized);
}
ast::ExprField(ref base, ref field, ref tys) => {
check_field(fcx, expr, lvalue_pref, &**base, field, tys.as_slice());
Some(ty) => {
check_expr_has_type(fcx, &**idx, ty::mk_uint());
fcx.write_ty(id, ty);
- fcx.write_autoderef_adjustment(base.id, autoderefs);
+ fcx.write_autoderef_adjustment(base.id, base.span, autoderefs);
}
None => {
// This is an overloaded method.
}
}
}
+ ast::ExprSlice(ref base, ref start, ref end, ref mutbl) => {
+ check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref);
+ let raw_base_t = fcx.expr_ty(&**base);
+
+ let mut some_err = false;
+ if ty::type_is_error(raw_base_t) || ty::type_is_bot(raw_base_t) {
+ fcx.write_ty(id, raw_base_t);
+ some_err = true;
+ }
+
+ {
+ let check_slice_idx = |e: &ast::Expr| {
+ check_expr(fcx, e);
+ let e_t = fcx.expr_ty(e);
+ if ty::type_is_error(e_t) || ty::type_is_bot(e_t) {
+ fcx.write_ty(id, e_t);
+ some_err = true;
+ }
+ };
+ start.as_ref().map(|e| check_slice_idx(&**e));
+ end.as_ref().map(|e| check_slice_idx(&**e));
+ }
+
+ if !some_err {
+ let base_t = structurally_resolved_type(fcx,
+ expr.span,
+ raw_base_t);
+ let method_call = MethodCall::expr(expr.id);
+ match try_overloaded_slice(fcx,
+ Some(method_call),
+ expr,
+ &**base,
+ base_t,
+ start,
+ end,
+ mutbl) {
+ Some(mt) => fcx.write_ty(id, mt.ty),
+ None => {
+ fcx.type_error_message(expr.span,
+ |actual| {
+ format!("cannot take a {}slice of a value with type `{}`",
+ if mutbl == &ast::MutMutable {
+ "mutable "
+ } else {
+ ""
+ },
+ actual)
+ },
+ base_t,
+ None);
+ fcx.write_ty(id, ty::mk_err())
+ }
+ }
+ }
+ }
}
debug!("type of expr({}) {} is...", expr.id,
check_expr_with_hint(fcx, e, declty);
demand::coerce(fcx, e.span, declty, e);
+ vtable2::select_all_fcx_obligations_or_error(fcx);
regionck::regionck_expr(fcx, e);
writeback::resolve_type_vars_in_expr(fcx, e);
+ vtable2::check_builtin_bound_obligations(fcx);
}
/// Checks whether a type can be represented in memory. In particular, it
defn: def::Def)
-> Polytype {
match defn {
- def::DefArg(nid, _) | def::DefLocal(nid, _) |
- def::DefBinding(nid, _) => {
+ def::DefLocal(nid) | def::DefUpvar(nid, _, _) => {
let typ = fcx.local_ty(sp, nid);
return no_params(typ);
}
def::DefStruct(id) => {
return ty::lookup_item_type(fcx.ccx.tcx, id);
}
- def::DefUpvar(_, inner, _, _) => {
- return polytype_for_def(fcx, sp, *inner);
- }
def::DefTrait(_) |
- def::DefTy(_) |
+ def::DefTy(..) |
+ def::DefAssociatedTy(..) |
def::DefPrimTy(_) |
def::DefTyParam(..)=> {
fcx.ccx.tcx.sess.span_bug(sp, "expected value, found type");
def::DefVariant(..) |
def::DefTyParamBinder(..) |
def::DefTy(..) |
+ def::DefAssociatedTy(..) |
def::DefTrait(..) |
def::DefPrimTy(..) |
def::DefTyParam(..) => {
// elsewhere. (I hope)
def::DefMod(..) |
def::DefForeignMod(..) |
- def::DefArg(..) |
def::DefLocal(..) |
def::DefMethod(..) |
- def::DefBinding(..) |
def::DefUse(..) |
def::DefRegion(..) |
def::DefLabel(..) |
assert_eq!(substs.regions().len(space), region_defs.len(space));
}
- fcx.add_region_obligations_for_parameters(
- span, &substs, &polytype.generics);
+ fcx.add_obligations_for_parameters(
+ traits::ObligationCause::new(span,
+ traits::ItemObligation(def.def_id())),
+ &substs,
+ &polytype.generics);
fcx.write_ty_substs(node_id, polytype.ty, ty::ItemSubsts {
substs: substs,
*/
use middle::def;
-use middle::def::{DefArg, DefBinding, DefLocal, DefUpvar};
-use middle::freevars;
use middle::mem_categorization as mc;
use middle::ty::{ReScope};
use middle::ty;
fcx.infcx().resolve_regions_and_report_errors();
}
-pub fn regionck_type_defn(fcx: &FnCtxt,
- span: Span,
- component_tys: &[ty::t]) {
- let mut rcx = Rcx::new(fcx, 0);
- for &component_ty in component_tys.iter() {
- // Check that each type outlives the empty region. Since the
- // empty region is a subregion of all others, this can't fail
- // unless the type does not meet the well-formedness
- // requirements.
- type_must_outlive(&mut rcx, infer::RelateRegionParamBound(span),
- component_ty, ty::ReEmpty);
- }
+pub fn regionck_item(fcx: &FnCtxt, item: &ast::Item) {
+ let mut rcx = Rcx::new(fcx, item.id);
+ rcx.visit_region_obligations(item.id);
fcx.infcx().resolve_regions_and_report_errors();
}
fcx.infcx().resolve_regions_and_report_errors();
}
+pub fn regionck_ensure_component_tys_wf(fcx: &FnCtxt,
+ span: Span,
+ component_tys: &[ty::t]) {
+ /*!
+ * Checks that the types in `component_tys` are well-formed.
+ * This will add constraints into the region graph.
+ * Does *not* run `resolve_regions_and_report_errors` and so forth.
+ */
+
+ let mut rcx = Rcx::new(fcx, 0);
+ for &component_ty in component_tys.iter() {
+ // Check that each type outlives the empty region. Since the
+ // empty region is a subregion of all others, this can't fail
+ // unless the type does not meet the well-formedness
+ // requirements.
+ type_must_outlive(&mut rcx, infer::RelateRegionParamBound(span),
+ component_ty, ty::ReEmpty);
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// INTERNALS
let tcx = fcx.tcx();
match def {
- DefLocal(node_id, _) | DefArg(node_id, _) |
- DefBinding(node_id, _) => {
+ def::DefLocal(node_id) => {
tcx.region_maps.var_region(node_id)
}
- DefUpvar(_, subdef, closure_id, body_id) => {
- match ty::ty_closure_store(fcx.node_ty(closure_id)) {
- ty::RegionTraitStore(..) => region_of_def(fcx, *subdef),
- ty::UniqTraitStore => ReScope(body_id)
+ def::DefUpvar(node_id, _, body_id) => {
+ if body_id == ast::DUMMY_NODE_ID {
+ tcx.region_maps.var_region(node_id)
+ } else {
+ ReScope(body_id)
}
}
_ => {
}
fn capture_mode(&self, closure_expr_id: ast::NodeId)
- -> freevars::CaptureMode {
+ -> ast::CaptureClause {
self.tcx().capture_modes.borrow().get_copy(&closure_expr_id)
}
for &adjustment in rcx.fcx.inh.adjustments.borrow().find(&expr.id).iter() {
debug!("adjustment={:?}", adjustment);
match *adjustment {
- ty::AutoDerefRef(ty::AutoDerefRef {autoderefs, autoref: ref opt_autoref}) => {
+ ty::AdjustDerefRef(ty::AutoDerefRef {autoderefs, autoref: ref opt_autoref}) => {
let expr_ty = rcx.resolve_node_type(expr.id);
constrain_autoderefs(rcx, expr, autoderefs, expr_ty);
for autoref in opt_autoref.iter() {
ast::ExprAssignOp(_, ref lhs, ref rhs) => {
if has_method_map {
constrain_call(rcx, expr, Some(&**lhs),
- Some(&**rhs).move_iter(), true);
+ Some(&**rhs).into_iter(), true);
}
adjust_borrow_kind_for_assignment_lhs(rcx, &**lhs);
// implicit "by ref" sort of passing style here. This
// should be converted to an adjustment!
constrain_call(rcx, expr, Some(&**lhs),
- Some(&**rhs).move_iter(), true);
+ Some(&**rhs).into_iter(), true);
visit::walk_expr(rcx, expr);
}
..}) => {
// For closure, ensure that the variables outlive region
// bound, since they are captured by reference.
- freevars::with_freevars(tcx, expr.id, |freevars| {
+ ty::with_freevars(tcx, expr.id, |freevars| {
if freevars.is_empty() {
// No free variables means that the environment
// will be NULL at runtime and hence the closure
..}) => {
// For proc, ensure that the *types* of the variables
// outlive region bound, since they are captured by value.
- freevars::with_freevars(tcx, expr.id, |freevars| {
+ ty::with_freevars(tcx, expr.id, |freevars| {
ensure_free_variable_types_outlive_closure_bound(
rcx, bounds.region_bound, expr, freevars);
});
}
ty::ty_unboxed_closure(_, region) => {
- freevars::with_freevars(tcx, expr.id, |freevars| {
+ ty::with_freevars(tcx, expr.id, |freevars| {
// No free variables means that there is no environment and
// hence the closure has static lifetime. Otherwise, the
// closure must not outlive the variables it closes over
store: ty::RegionTraitStore(..),
..
}) => {
- freevars::with_freevars(tcx, expr.id, |freevars| {
+ ty::with_freevars(tcx, expr.id, |freevars| {
propagate_upupvar_borrow_kind(rcx, expr, freevars);
})
}
rcx: &mut Rcx,
region_bound: ty::Region,
expr: &ast::Expr,
- freevars: &[freevars::freevar_entry])
+ freevars: &[ty::Freevar])
{
/*!
* Make sure that the type of all free variables referenced
rcx: &mut Rcx,
region_bound: ty::Region,
expr: &ast::Expr,
- freevars: &[freevars::freevar_entry])
+ freevars: &[ty::Freevar])
{
/*!
* Make sure that all free variables referenced inside the
fn propagate_upupvar_borrow_kind(rcx: &mut Rcx,
expr: &ast::Expr,
- freevars: &[freevars::freevar_entry]) {
+ freevars: &[ty::Freevar]) {
let tcx = rcx.fcx.ccx.tcx;
debug!("propagate_upupvar_borrow_kind({})", expr.repr(tcx));
for freevar in freevars.iter() {
// determining the final borrow_kind) and propagate that as
// a constraint on the outer closure.
match freevar.def {
- def::DefUpvar(var_id, _, outer_closure_id, _) => {
+ def::DefUpvar(var_id, outer_closure_id, _) => {
// thing being captured is itself an upvar:
let outer_upvar_id = ty::UpvarId {
var_id: var_id,
mc::cat_static_item |
mc::cat_copied_upvar(..) |
mc::cat_local(..) |
- mc::cat_arg(..) |
mc::cat_upvar(..) |
mc::cat_rvalue(..) => {
// These are all "base cases" with independent lifetimes
mc::cat_rvalue(_) |
mc::cat_copied_upvar(_) |
mc::cat_local(_) |
- mc::cat_arg(_) |
mc::cat_upvar(..) => {
return;
}
mc::cat_rvalue(_) |
mc::cat_copied_upvar(_) |
mc::cat_local(_) |
- mc::cat_arg(_) |
mc::cat_upvar(..) => {
return;
}
let param_bound = param_env.bounds.get(param_ty.space, param_ty.idx);
param_bounds =
ty::required_region_bounds(rcx.tcx(),
- param_bound.opt_region_bound.as_slice(),
+ param_bound.region_bounds.as_slice(),
param_bound.builtin_bounds,
param_bound.trait_bounds.as_slice());
// Inspect bounds on this type parameter for any
// region bounds.
- for &r in type_param_def.bounds.opt_region_bound.iter() {
+ for &r in type_param_def.bounds.region_bounds.iter() {
self.stack.push((r, Some(ty)));
self.accumulate_from_ty(type_param_ty);
self.stack.pop().unwrap();
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-
-use middle::ty;
-use middle::ty::{AutoDerefRef, ParamTy};
-use middle::ty_fold::TypeFolder;
-use middle::typeck::astconv::AstConv;
-use middle::typeck::check::{FnCtxt, impl_self_ty};
-use middle::typeck::check::{structurally_resolved_type};
-use middle::typeck::check::regionmanip;
-use middle::typeck::check::writeback;
-use middle::typeck::infer::fixup_err_to_string;
-use middle::typeck::infer::{resolve_and_force_all_but_regions, resolve_type};
-use middle::typeck::infer;
-use middle::typeck::{MethodCall, TypeAndSubsts};
-use middle::typeck::{param_index, vtable_error, vtable_origin, vtable_param};
-use middle::typeck::{vtable_param_res, vtable_res, vtable_static};
-use middle::typeck::{vtable_unboxed_closure};
-use middle::subst;
-use middle::subst::{Subst, VecPerParamSpace};
-use util::common::indenter;
-use util::nodemap::DefIdMap;
-use util::ppaux;
-use util::ppaux::Repr;
-
-use std::cell::RefCell;
-use std::rc::Rc;
-use std::collections::HashSet;
-use syntax::ast;
-use syntax::ast_util;
-use syntax::codemap::Span;
-use syntax::print::pprust::expr_to_string;
-use syntax::visit;
-use syntax::visit::Visitor;
-
-// vtable resolution looks for places where trait bounds are
-// substituted in and figures out which vtable is used. There is some
-// extra complication thrown in to support early "opportunistic"
-// vtable resolution. This is a hacky mechanism that is invoked while
-// typechecking function calls (after typechecking non-closure
-// arguments and before typechecking closure arguments) in the hope of
-// solving for the trait parameters from the impl. (For example,
-// determining that if a parameter bounded by BaseIter<A> is
-// instantiated with Option<int>, that A = int.)
-//
-// In early resolution mode, no vtables are recorded, and a number of
-// errors are ignored. Early resolution only works if a type is
-// *fully* resolved. (We could be less restrictive than that, but it
-// would require much more care, and this seems to work decently in
-// practice.)
-//
-// While resolution on a single type requires the type to be fully
-// resolved, when resolving a substitution against a list of bounds,
-// we do not require all of the types to be resolved in advance.
-// Furthermore, we process substitutions in reverse order, which
-// allows resolution on later parameters to give information on
-// earlier params referenced by the typeclass bounds.
-// It may be better to do something more clever, like processing fully
-// resolved types first.
-
-/// A vtable context includes an inference context, a parameter environment,
-/// and a list of unboxed closure types.
-pub struct VtableContext<'a, 'tcx: 'a> {
- pub infcx: &'a infer::InferCtxt<'a, 'tcx>,
- pub param_env: &'a ty::ParameterEnvironment,
- pub unboxed_closures: &'a RefCell<DefIdMap<ty::UnboxedClosure>>,
-}
-
-impl<'a, 'tcx> VtableContext<'a, 'tcx> {
- pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.infcx.tcx }
-}
-
-fn lookup_vtables(vcx: &VtableContext,
- span: Span,
- type_param_defs: &VecPerParamSpace<ty::TypeParameterDef>,
- substs: &subst::Substs,
- is_early: bool)
- -> VecPerParamSpace<vtable_param_res> {
- debug!("lookup_vtables(\
- type_param_defs={}, \
- substs={}",
- type_param_defs.repr(vcx.tcx()),
- substs.repr(vcx.tcx()));
-
- // We do this backwards for reasons discussed above.
- let result = type_param_defs.map_rev(|def| {
- let ty = *substs.types.get(def.space, def.index);
- lookup_vtables_for_param(vcx, span, Some(substs),
- &def.bounds, ty, is_early)
- });
-
- debug!("lookup_vtables result(\
- type_param_defs={}, \
- substs={}, \
- result={})",
- type_param_defs.repr(vcx.tcx()),
- substs.repr(vcx.tcx()),
- result.repr(vcx.tcx()));
-
- result
-}
-
-fn lookup_vtables_for_param(vcx: &VtableContext,
- span: Span,
- // None for substs means the identity
- substs: Option<&subst::Substs>,
- type_param_bounds: &ty::ParamBounds,
- ty: ty::t,
- is_early: bool)
- -> vtable_param_res {
- let tcx = vcx.tcx();
-
- debug!("lookup_vtables_for_param(ty={}, type_param_bounds={}, is_early={})",
- ty.repr(vcx.tcx()),
- type_param_bounds.repr(vcx.tcx()),
- is_early);
-
- // ty is the value supplied for the type parameter A...
- let mut param_result = Vec::new();
-
- ty::each_bound_trait_and_supertraits(tcx,
- type_param_bounds.trait_bounds
- .as_slice(),
- |trait_ref| {
- // ...and here trait_ref is each bound that was declared on A,
- // expressed in terms of the type parameters.
-
- debug!("matching ty={} trait_ref={}",
- ty.repr(vcx.tcx()),
- trait_ref.repr(vcx.tcx()));
-
- ty::populate_implementations_for_trait_if_necessary(tcx,
- trait_ref.def_id);
-
- // Substitute the values of the type parameters that may
- // appear in the bound.
- let trait_ref = substs.as_ref().map_or(trait_ref.clone(), |substs| {
- debug!("about to subst: {}, {}",
- trait_ref.repr(tcx), substs.repr(tcx));
- trait_ref.subst(tcx, *substs)
- });
-
- debug!("after subst: {}", trait_ref.repr(tcx));
-
- match lookup_vtable(vcx, span, ty, trait_ref.clone(), is_early) {
- Some(vtable) => param_result.push(vtable),
- None => {
- vcx.tcx().sess.span_err(span,
- format!("failed to find an implementation of \
- trait {} for {}",
- vcx.infcx.trait_ref_to_string(&*trait_ref),
- vcx.infcx.ty_to_string(ty)).as_slice());
- param_result.push(vtable_error)
- }
- }
- true
- });
-
- debug!("lookup_vtables_for_param result(\
- type_param_bounds={}, \
- ty={}, \
- result={})",
- type_param_bounds.repr(vcx.tcx()),
- ty.repr(vcx.tcx()),
- param_result.repr(vcx.tcx()));
-
- param_result
-}
-
-fn relate_trait_refs(vcx: &VtableContext,
- span: Span,
- act_trait_ref: Rc<ty::TraitRef>,
- exp_trait_ref: Rc<ty::TraitRef>) {
- /*!
- *
- * Checks that an implementation of `act_trait_ref` is suitable
- * for use where `exp_trait_ref` is required and reports an
- * error otherwise.
- */
-
- match infer::mk_sub_trait_refs(vcx.infcx,
- false,
- infer::RelateTraitRefs(span),
- act_trait_ref.clone(),
- exp_trait_ref.clone()) {
- Ok(()) => {} // Ok.
- Err(ref err) => {
- // There is an error, but we need to do some work to make
- // the message good.
- // Resolve any type vars in the trait refs
- let r_act_trait_ref =
- vcx.infcx.resolve_type_vars_in_trait_ref_if_possible(&*act_trait_ref);
- let r_exp_trait_ref =
- vcx.infcx.resolve_type_vars_in_trait_ref_if_possible(&*exp_trait_ref);
- // Only print the message if there aren't any previous type errors
- // inside the types.
- if !ty::trait_ref_contains_error(&r_act_trait_ref) &&
- !ty::trait_ref_contains_error(&r_exp_trait_ref)
- {
- let tcx = vcx.tcx();
- span_err!(tcx.sess, span, E0095, "expected {}, found {} ({})",
- ppaux::trait_ref_to_string(tcx, &r_exp_trait_ref),
- ppaux::trait_ref_to_string(tcx, &r_act_trait_ref),
- ty::type_err_to_str(tcx, err));
- }
- }
- }
-}
-
-// Look up the vtable implementing the trait `trait_ref` at type `t`
-fn lookup_vtable(vcx: &VtableContext,
- span: Span,
- ty: ty::t,
- trait_ref: Rc<ty::TraitRef>,
- is_early: bool)
- -> Option<vtable_origin>
-{
- debug!("lookup_vtable(ty={}, trait_ref={})",
- ty.repr(vcx.tcx()),
- trait_ref.repr(vcx.tcx()));
- let _i = indenter();
-
- let ty = match fixup_ty(vcx, span, ty, is_early) {
- Some(ty) => ty,
- None => {
- // fixup_ty can only fail if this is early resolution
- assert!(is_early);
- // The type has unconstrained type variables in it, so we can't
- // do early resolution on it. Return some completely bogus vtable
- // information: we aren't storing it anyways.
- return Some(vtable_error);
- }
- };
-
- if ty::type_is_error(ty) {
- return Some(vtable_error);
- }
-
- // If the type is self or a param, we look at the trait/supertrait
- // bounds to see if they include the trait we are looking for.
- let vtable_opt = match ty::get(ty).sty {
- ty::ty_param(ParamTy {space, idx: n, ..}) => {
- let env_bounds = &vcx.param_env.bounds;
- let type_param_bounds = &env_bounds.get(space, n).trait_bounds;
- lookup_vtable_from_bounds(vcx,
- span,
- type_param_bounds.as_slice(),
- param_index {
- space: space,
- index: n,
- },
- trait_ref.clone())
- }
-
- // Default case just falls through
- _ => None
- };
-
- if vtable_opt.is_some() { return vtable_opt; }
-
- // If we aren't a self type or param, or it was, but we didn't find it,
- // do a search.
- search_for_vtable(vcx, span, ty, trait_ref, is_early)
-}
-
-// Given a list of bounds on a type, search those bounds to see if any
-// of them are the vtable we are looking for.
-fn lookup_vtable_from_bounds(vcx: &VtableContext,
- span: Span,
- bounds: &[Rc<ty::TraitRef>],
- param: param_index,
- trait_ref: Rc<ty::TraitRef>)
- -> Option<vtable_origin> {
- let tcx = vcx.tcx();
-
- let mut n_bound = 0;
- let mut ret = None;
- ty::each_bound_trait_and_supertraits(tcx, bounds, |bound_trait_ref| {
- debug!("checking bounds trait {}",
- bound_trait_ref.repr(vcx.tcx()));
-
- if bound_trait_ref.def_id == trait_ref.def_id {
- relate_trait_refs(vcx, span, bound_trait_ref, trait_ref.clone());
- let vtable = vtable_param(param, n_bound);
- debug!("found param vtable: {:?}",
- vtable);
- ret = Some(vtable);
- false
- } else {
- n_bound += 1;
- true
- }
- });
- ret
-}
-
-fn search_for_unboxed_closure_vtable(vcx: &VtableContext,
- span: Span,
- ty: ty::t,
- trait_ref: Rc<ty::TraitRef>)
- -> Option<vtable_origin> {
- let tcx = vcx.tcx();
- let closure_def_id = match ty::get(ty).sty {
- ty::ty_unboxed_closure(closure_def_id, _) => closure_def_id,
- _ => return None,
- };
-
- let fn_traits = [
- (ty::FnUnboxedClosureKind, tcx.lang_items.fn_trait()),
- (ty::FnMutUnboxedClosureKind, tcx.lang_items.fn_mut_trait()),
- (ty::FnOnceUnboxedClosureKind, tcx.lang_items.fn_once_trait()),
- ];
- for tuple in fn_traits.iter() {
- let kind = match tuple {
- &(kind, Some(ref fn_trait)) if *fn_trait == trait_ref.def_id => {
- kind
- }
- _ => continue,
- };
-
- // Check to see whether the argument and return types match.
- let unboxed_closures = tcx.unboxed_closures.borrow();
- let closure_type = match unboxed_closures.find(&closure_def_id) {
- Some(closure) => {
- if closure.kind != kind {
- continue
- }
- closure.closure_type.clone()
- }
- None => {
- // Try the inherited unboxed closure type map.
- let unboxed_closures = vcx.unboxed_closures.borrow();
- match unboxed_closures.find(&closure_def_id) {
- Some(closure) => {
- if closure.kind != kind {
- continue
- }
- closure.closure_type.clone()
- }
- None => {
- tcx.sess.span_bug(span,
- "didn't find unboxed closure type \
- in tcx map or inh map")
- }
- }
- }
- };
-
- // FIXME(pcwalton): This is a bogus thing to do, but
- // it'll do for now until we get the new trait-bound
- // region skolemization working.
- let (_, new_signature) =
- regionmanip::replace_late_bound_regions_in_fn_sig(
- tcx,
- &closure_type.sig,
- |br| {
- vcx.infcx.next_region_var(infer::LateBoundRegion(span,
- br))
- });
-
- let arguments_tuple = *new_signature.inputs.get(0);
- let corresponding_trait_ref = Rc::new(ty::TraitRef {
- def_id: trait_ref.def_id,
- substs: subst::Substs::new_trait(
- vec![arguments_tuple, new_signature.output],
- Vec::new(),
- ty)
- });
-
- relate_trait_refs(vcx, span, corresponding_trait_ref, trait_ref);
- return Some(vtable_unboxed_closure(closure_def_id))
- }
-
- None
-}
-
-fn search_for_vtable(vcx: &VtableContext,
- span: Span,
- ty: ty::t,
- trait_ref: Rc<ty::TraitRef>,
- is_early: bool)
- -> Option<vtable_origin> {
- let tcx = vcx.tcx();
-
- // First, check to see whether this is a call to the `call` method of an
- // unboxed closure. If so, and the arguments match, we're done.
- match search_for_unboxed_closure_vtable(vcx,
- span,
- ty,
- trait_ref.clone()) {
- Some(vtable_origin) => return Some(vtable_origin),
- None => {}
- }
-
- // Nope. Continue.
-
- let mut found = Vec::new();
- let mut impls_seen = HashSet::new();
-
- // Load the implementations from external metadata if necessary.
- ty::populate_implementations_for_trait_if_necessary(tcx,
- trait_ref.def_id);
-
- let impls = match tcx.trait_impls.borrow().find_copy(&trait_ref.def_id) {
- Some(impls) => impls,
- None => {
- return None;
- }
- };
- // impls is the list of all impls in scope for trait_ref.
- for &impl_did in impls.borrow().iter() {
- // im is one specific impl of trait_ref.
-
- // First, ensure we haven't processed this impl yet.
- if impls_seen.contains(&impl_did) {
- continue;
- }
- impls_seen.insert(impl_did);
-
- // ty::impl_traits gives us the trait im implements.
- //
- // If foo implements a trait t, and if t is the same trait as
- // trait_ref, we need to unify it with trait_ref in order to
- // get all the ty vars sorted out.
- let r = ty::impl_trait_ref(tcx, impl_did);
- let of_trait_ref = r.expect("trait_ref missing on trait impl");
- if of_trait_ref.def_id != trait_ref.def_id { continue; }
-
- // At this point, we know that of_trait_ref is the same trait
- // as trait_ref, but possibly applied to different substs.
- //
- // Next, we check whether the "for" ty in the impl is
- // compatible with the type that we're casting to a
- // trait. That is, if im is:
- //
- // impl<T> some_trait<T> for self_ty<T> { ... }
- //
- // we check whether self_ty<T> is the type of the thing that
- // we're trying to cast to some_trait. If not, then we try
- // the next impl.
- //
- // FIXME: document a bit more what this means
- let TypeAndSubsts {
- substs: substs,
- ty: for_ty
- } = impl_self_ty(vcx, span, impl_did);
- match infer::mk_eqty(vcx.infcx,
- false,
- infer::RelateSelfType(span),
- ty,
- for_ty) {
- Err(_) => continue,
- Ok(()) => ()
- }
-
- // Now, in the previous example, for_ty is bound to
- // the type self_ty, and substs is bound to [T].
- debug!("The self ty is {} and its substs are {}",
- for_ty.repr(tcx),
- substs.types.repr(tcx));
-
- // Next, we unify trait_ref -- the type that we want to cast
- // to -- with of_trait_ref -- the trait that im implements. At
- // this point, we require that they be unifiable with each
- // other -- that's what relate_trait_refs does.
- //
- // For example, in the above example, of_trait_ref would be
- // some_trait<T>, so we would be unifying trait_ref<U> (for
- // some value of U) with some_trait<T>. This would fail if T
- // and U weren't compatible.
-
- let of_trait_ref = of_trait_ref.subst(tcx, &substs);
-
- debug!("(checking vtable) num 2 relating trait \
- ty {} to of_trait_ref {}",
- vcx.infcx.trait_ref_to_string(&*trait_ref),
- vcx.infcx.trait_ref_to_string(&*of_trait_ref));
-
- relate_trait_refs(vcx, span, of_trait_ref, trait_ref.clone());
-
-
- // Recall that trait_ref -- the trait type we're casting to --
- // is the trait with id trait_ref.def_id applied to the substs
- // trait_ref.substs.
-
- // Resolve any sub bounds. Note that there still may be free
- // type variables in substs. This might still be OK: the
- // process of looking up bounds might constrain some of them.
- //
- // This does not check built-in traits because those are handled
- // later in the kind checking pass.
- let im_generics =
- ty::lookup_item_type(tcx, impl_did).generics;
- let subres = lookup_vtables(vcx,
- span,
- &im_generics.types,
- &substs,
- is_early);
-
- // substs might contain type variables, so we call
- // fixup_substs to resolve them.
- let substs_f = match fixup_substs(vcx, span,
- trait_ref.def_id,
- substs,
- is_early) {
- Some(ref substs) => (*substs).clone(),
- None => {
- assert!(is_early);
- // Bail out with a bogus answer
- return Some(vtable_error);
- }
- };
-
- debug!("The fixed-up substs are {} - \
- they will be unified with the bounds for \
- the target ty, {}",
- substs_f.types.repr(tcx),
- trait_ref.repr(tcx));
-
- // Next, we unify the fixed-up substitutions for the impl self
- // ty with the substitutions from the trait type that we're
- // trying to cast to. connect_trait_tps requires these lists
- // of types to unify pairwise.
- // I am a little confused about this, since it seems to be
- // very similar to the relate_trait_refs we already do,
- // but problems crop up if it is removed, so... -sully
- connect_trait_tps(vcx, span, &substs_f, trait_ref.clone(), impl_did);
-
- // Finally, we register that we found a matching impl, and
- // record the def ID of the impl as well as the resolved list
- // of type substitutions for the target trait.
- found.push(vtable_static(impl_did, substs_f, subres));
- }
-
- match found.len() {
- 0 => { return None }
- 1 => return Some(found.get(0).clone()),
- _ => {
- if !is_early {
- span_err!(vcx.tcx().sess, span, E0096,
- "multiple applicable methods in scope");
- }
- return Some(found.get(0).clone());
- }
- }
-}
-
-
-fn fixup_substs(vcx: &VtableContext,
- span: Span,
- id: ast::DefId,
- substs: subst::Substs,
- is_early: bool)
- -> Option<subst::Substs> {
- let tcx = vcx.tcx();
- // use a dummy type just to package up the substs that need fixing up
- let t = ty::mk_trait(tcx,
- id, substs,
- ty::region_existential_bound(ty::ReStatic));
- fixup_ty(vcx, span, t, is_early).map(|t_f| {
- match ty::get(t_f).sty {
- ty::ty_trait(ref inner) => inner.substs.clone(),
- _ => fail!("t_f should be a trait")
- }
- })
-}
-
-fn fixup_ty(vcx: &VtableContext,
- span: Span,
- ty: ty::t,
- is_early: bool)
- -> Option<ty::t> {
- let tcx = vcx.tcx();
- match resolve_type(vcx.infcx, Some(span), ty, resolve_and_force_all_but_regions) {
- Ok(new_type) => Some(new_type),
- Err(e) if !is_early => {
- tcx.sess.span_err(span,
- format!("cannot determine a type for this bounded type \
- parameter: {}",
- fixup_err_to_string(e)).as_slice());
- Some(ty::mk_err())
- }
- Err(_) => {
- None
- }
- }
-}
-
-fn connect_trait_tps(vcx: &VtableContext,
- span: Span,
- impl_substs: &subst::Substs,
- trait_ref: Rc<ty::TraitRef>,
- impl_did: ast::DefId) {
- let tcx = vcx.tcx();
-
- let impl_trait_ref = match ty::impl_trait_ref(tcx, impl_did) {
- Some(t) => t,
- None => vcx.tcx().sess.span_bug(span,
- "connect_trait_tps invoked on a type impl")
- };
-
- let impl_trait_ref = impl_trait_ref.subst(tcx, impl_substs);
- relate_trait_refs(vcx, span, impl_trait_ref, trait_ref);
-}
-
-fn insert_vtables(fcx: &FnCtxt, vtable_key: MethodCall, vtables: vtable_res) {
- debug!("insert_vtables(vtable_key={}, vtables={})",
- vtable_key, vtables.repr(fcx.tcx()));
- fcx.inh.vtable_map.borrow_mut().insert(vtable_key, vtables);
-}
-
-pub fn early_resolve_expr(ex: &ast::Expr, fcx: &FnCtxt, is_early: bool) {
- fn mutability_allowed(a_mutbl: ast::Mutability,
- b_mutbl: ast::Mutability) -> bool {
- a_mutbl == b_mutbl ||
- (a_mutbl == ast::MutMutable && b_mutbl == ast::MutImmutable)
- }
-
- debug!("vtable: early_resolve_expr() ex with id {:?} (early: {}): {}",
- ex.id, is_early, expr_to_string(ex));
- let _indent = indenter();
-
- let cx = fcx.ccx;
- let check_object_cast = |src_ty: ty::t, target_ty: ty::t| {
- debug!("check_object_cast {} to {}",
- fcx.infcx().ty_to_string(src_ty),
- fcx.infcx().ty_to_string(target_ty));
- // Check that a cast is of correct types.
- match (&ty::get(target_ty).sty, &ty::get(src_ty).sty) {
- (&ty::ty_rptr(_, ty::mt{ty, mutbl}), &ty::ty_rptr(_, mt))
- | (&ty::ty_ptr(ty::mt{ty, mutbl}), &ty::ty_rptr(_, mt))
- if !mutability_allowed(mt.mutbl, mutbl) => {
- match ty::get(ty).sty {
- ty::ty_trait(..) => {
- span_err!(fcx.tcx().sess, ex.span, E0097, "types differ in mutability");
- }
- _ => {}
- }
- }
- (&ty::ty_uniq(..), &ty::ty_uniq(..) )
- | (&ty::ty_ptr(..), &ty::ty_ptr(..) )
- | (&ty::ty_ptr(..), &ty::ty_rptr(..)) => {}
- (&ty::ty_rptr(r_t, _), &ty::ty_rptr(r_s, _)) => {
- infer::mk_subr(fcx.infcx(),
- infer::RelateObjectBound(ex.span),
- r_t,
- r_s);
- }
- (&ty::ty_uniq(ty), _) => {
- match ty::get(ty).sty {
- ty::ty_trait(..) => {
- span_err!(fcx.ccx.tcx.sess, ex.span, E0098,
- "can only cast an boxed pointer to a boxed object, not a {}",
- ty::ty_sort_string(fcx.tcx(), src_ty));
- }
- _ => {}
- }
-
- }
- (&ty::ty_rptr(_, ty::mt{ty, ..}), _) => {
- match ty::get(ty).sty {
- ty::ty_trait(..) => {
- span_err!(fcx.ccx.tcx.sess, ex.span, E0099,
- "can only cast an &-pointer to an &-object, not a {}",
- ty::ty_sort_string(fcx.tcx(), src_ty));
- }
- _ => {}
- }
- }
- (&ty::ty_ptr(ty::mt{ty, ..}), _) => {
- match ty::get(ty).sty {
- ty::ty_trait(..) => {
- span_err!(fcx.ccx.tcx.sess, ex.span, E0160,
- "can only cast an *-pointer or &-pointer to an *-object, not a {}",
- ty::ty_sort_string(fcx.tcx(), src_ty));
- }
- _ => {}
- }
- }
- _ => {}
- }
- };
- let resolve_object_cast = |src_ty: ty::t, target_ty: ty::t, key: MethodCall| {
- // Look up vtables for the type we're casting to,
- // passing in the source and target type. The source
- // must be a pointer type suitable to the object sigil,
- // e.g.: `&x as &Trait` or `box x as Box<Trait>`
- // Bounds of type's contents are not checked here, but in kind.rs.
- match ty::get(target_ty).sty {
- ty::ty_trait(box ty::TyTrait {
- def_id: target_def_id, substs: ref target_substs, ..
- }) => {
- let vcx = fcx.vtable_context();
-
- // Take the type parameters from the object
- // type, but set the Self type (which is
- // unknown, for the object type) to be the type
- // we are casting from.
- let mut target_types = target_substs.types.clone();
- assert!(target_types.get_self().is_none());
- target_types.push(subst::SelfSpace, src_ty);
-
- let target_trait_ref = Rc::new(ty::TraitRef {
- def_id: target_def_id,
- substs: subst::Substs {
- regions: target_substs.regions.clone(),
- types: target_types
- }
- });
-
- let param_bounds = ty::ParamBounds {
- opt_region_bound: None,
- builtin_bounds: ty::empty_builtin_bounds(),
- trait_bounds: vec!(target_trait_ref)
- };
-
- let vtables =
- lookup_vtables_for_param(&vcx,
- ex.span,
- None,
- ¶m_bounds,
- src_ty,
- is_early);
-
- if !is_early {
- let mut r = VecPerParamSpace::empty();
- r.push(subst::SelfSpace, vtables);
- insert_vtables(fcx, key, r);
- }
- }
- _ => {}
- }
- };
- match ex.node {
- ast::ExprPath(..) => {
- fcx.opt_node_ty_substs(ex.id, |item_substs| {
- debug!("vtable resolution on parameter bounds for expr {}",
- ex.repr(fcx.tcx()));
- let def = cx.tcx.def_map.borrow().get_copy(&ex.id);
- let did = def.def_id();
- let item_ty = ty::lookup_item_type(cx.tcx, did);
- debug!("early resolve expr: def {:?} {:?}, {:?}, {}", ex.id, did, def,
- fcx.infcx().ty_to_string(item_ty.ty));
- debug!("early_resolve_expr: looking up vtables for type params {}",
- item_ty.generics.types.repr(fcx.tcx()));
- let vcx = fcx.vtable_context();
- let vtbls = lookup_vtables(&vcx, ex.span,
- &item_ty.generics.types,
- &item_substs.substs, is_early);
- if !is_early {
- insert_vtables(fcx, MethodCall::expr(ex.id), vtbls);
- }
- });
- }
-
- // Must resolve bounds on methods with bounded params
- ast::ExprBinary(_, _, _) |
- ast::ExprUnary(_, _) |
- ast::ExprAssignOp(_, _, _) |
- ast::ExprIndex(_, _) |
- ast::ExprMethodCall(_, _, _) |
- ast::ExprForLoop(..) |
- ast::ExprCall(..) => {
- match fcx.inh.method_map.borrow().find(&MethodCall::expr(ex.id)) {
- Some(method) => {
- debug!("vtable resolution on parameter bounds for method call {}",
- ex.repr(fcx.tcx()));
- let type_param_defs =
- ty::method_call_type_param_defs(fcx, method.origin);
- let substs = fcx.method_ty_substs(ex.id);
- let vcx = fcx.vtable_context();
- let vtbls = lookup_vtables(&vcx, ex.span,
- &type_param_defs,
- &substs, is_early);
- if !is_early {
- insert_vtables(fcx, MethodCall::expr(ex.id), vtbls);
- }
- }
- None => {}
- }
- }
- ast::ExprCast(ref src, _) => {
- debug!("vtable resolution on expr {}", ex.repr(fcx.tcx()));
- let target_ty = fcx.expr_ty(ex);
- let src_ty = structurally_resolved_type(fcx, ex.span,
- fcx.expr_ty(&**src));
- check_object_cast(src_ty, target_ty);
- match (ty::deref(src_ty, false), ty::deref(target_ty, false)) {
- (Some(s), Some(t)) => {
- let key = MethodCall::expr(ex.id);
- resolve_object_cast(s.ty, t.ty, key)
- }
- _ => {}
- }
- }
- _ => ()
- }
-
- // Search for auto-adjustments to find trait coercions
- match fcx.inh.adjustments.borrow().find(&ex.id) {
- Some(adjustment) => {
- match *adjustment {
- _ if ty::adjust_is_object(adjustment) => {
- let src_ty = structurally_resolved_type(fcx, ex.span,
- fcx.expr_ty(ex));
- match ty::type_of_adjust(fcx.tcx(), adjustment) {
- Some(target_ty) => {
- check_object_cast(src_ty, target_ty)
- }
- None => {}
- }
-
- match trait_cast_types(fcx, adjustment, src_ty, ex.span) {
- Some((s, t)) => {
- let key = MethodCall::autoobject(ex.id);
- resolve_object_cast(s, t, key)
- }
- None => fail!("Couldn't extract types from adjustment")
- }
- }
- AutoDerefRef(ref adj) => {
- for autoderef in range(0, adj.autoderefs) {
- let method_call = MethodCall::autoderef(ex.id, autoderef);
- match fcx.inh.method_map.borrow().find(&method_call) {
- Some(method) => {
- debug!("vtable resolution on parameter bounds for autoderef {}",
- ex.repr(fcx.tcx()));
- let type_param_defs =
- ty::method_call_type_param_defs(cx.tcx, method.origin);
- let vcx = fcx.vtable_context();
- let vtbls = lookup_vtables(&vcx, ex.span,
- &type_param_defs,
- &method.substs, is_early);
- if !is_early {
- insert_vtables(fcx, method_call, vtbls);
- }
- }
- None => {}
- }
- }
- }
- _ => {}
- }
- }
- None => {}
- }
-}
-
-// When we coerce (possibly implicitly) from a concrete type to a trait type, this
-// function returns the concrete type and trait. This might happen arbitrarily
-// deep in the adjustment. This function will fail if the adjustment does not
-// match the source type.
-// This function will always return types if ty::adjust_is_object is true for the
-// adjustment
-fn trait_cast_types(fcx: &FnCtxt,
- adj: &ty::AutoAdjustment,
- src_ty: ty::t,
- sp: Span)
- -> Option<(ty::t, ty::t)> {
- fn trait_cast_types_autoref(fcx: &FnCtxt,
- autoref: &ty::AutoRef,
- src_ty: ty::t,
- sp: Span)
- -> Option<(ty::t, ty::t)> {
- fn trait_cast_types_unsize(fcx: &FnCtxt,
- k: &ty::UnsizeKind,
- src_ty: ty::t,
- sp: Span)
- -> Option<(ty::t, ty::t)> {
- match k {
- &ty::UnsizeVtable(bounds, def_id, ref substs) => {
- Some((src_ty, ty::mk_trait(fcx.tcx(), def_id, substs.clone(), bounds)))
- }
- &ty::UnsizeStruct(box ref k, tp_index) => match ty::get(src_ty).sty {
- ty::ty_struct(_, ref substs) => {
- let ty_substs = substs.types.get_slice(subst::TypeSpace);
- let field_ty = structurally_resolved_type(fcx, sp, ty_substs[tp_index]);
- trait_cast_types_unsize(fcx, k, field_ty, sp)
- }
- _ => fail!("Failed to find a ty_struct to correspond with \
- UnsizeStruct whilst walking adjustment. Found {}",
- ppaux::ty_to_string(fcx.tcx(), src_ty))
- },
- _ => None
- }
- }
-
- match autoref {
- &ty::AutoUnsize(ref k) |
- &ty::AutoUnsizeUniq(ref k) => trait_cast_types_unsize(fcx, k, src_ty, sp),
- &ty::AutoPtr(_, _, Some(box ref autoref)) |
- &ty::AutoUnsafe(_, Some(box ref autoref)) => {
- trait_cast_types_autoref(fcx, autoref, src_ty, sp)
- }
- _ => None
- }
- }
-
- match adj {
- &ty::AutoDerefRef(AutoDerefRef{autoref: Some(ref autoref), autoderefs}) => {
- let mut derefed_type = src_ty;
- for _ in range(0, autoderefs) {
- derefed_type = ty::deref(derefed_type, true).unwrap().ty;
- derefed_type = structurally_resolved_type(fcx, sp, derefed_type)
- }
- trait_cast_types_autoref(fcx, autoref, derefed_type, sp)
- }
- _ => None
- }
-}
-
-pub fn resolve_impl(tcx: &ty::ctxt,
- impl_item: &ast::Item,
- impl_generics: &ty::Generics,
- impl_trait_ref: &ty::TraitRef) {
- /*!
- * The situation is as follows. We have some trait like:
- *
- * trait Foo<A:Clone> : Bar {
- * fn method() { ... }
- * }
- *
- * and an impl like:
- *
- * impl<B:Clone> Foo<B> for int { ... }
- *
- * We want to validate that the various requirements of the trait
- * are met:
- *
- * A:Clone, Self:Bar
- *
- * But of course after substituting the types from the impl:
- *
- * B:Clone, int:Bar
- *
- * We store these results away as the "impl_res" for use by the
- * default methods.
- */
-
- debug!("resolve_impl(impl_item.id={})",
- impl_item.id);
-
- let param_env = ty::construct_parameter_environment(tcx,
- impl_generics,
- impl_item.id);
-
- // The impl_trait_ref in our example above would be
- // `Foo<B> for int`
- let impl_trait_ref = impl_trait_ref.subst(tcx, ¶m_env.free_substs);
- debug!("impl_trait_ref={}", impl_trait_ref.repr(tcx));
-
- let infcx = &infer::new_infer_ctxt(tcx);
- let unboxed_closures = RefCell::new(DefIdMap::new());
- let vcx = VtableContext {
- infcx: infcx,
- param_env: ¶m_env,
- unboxed_closures: &unboxed_closures,
- };
-
- // Resolve the vtables for the trait reference on the impl. This
- // serves many purposes, best explained by example. Imagine we have:
- //
- // trait A<T:B> : C { fn x(&self) { ... } }
- //
- // and
- //
- // impl A<int> for uint { ... }
- //
- // In that case, the trait ref will be `A<int> for uint`. Resolving
- // this will first check that the various types meet their requirements:
- //
- // 1. Because of T:B, int must implement the trait B
- // 2. Because of the supertrait C, uint must implement the trait C.
- //
- // Simultaneously, the result of this resolution (`vtbls`), is precisely
- // the set of vtable information needed to compile the default method
- // `x()` adapted to the impl. (After all, a default method is basically
- // the same as:
- //
- // fn default_x<T:B, Self:A>(...) { .. .})
-
- let trait_def = ty::lookup_trait_def(tcx, impl_trait_ref.def_id);
- let vtbls = lookup_vtables(&vcx,
- impl_item.span,
- &trait_def.generics.types,
- &impl_trait_ref.substs,
- false);
-
- infcx.resolve_regions_and_report_errors();
-
- let vtbls = writeback::resolve_impl_res(infcx, impl_item.span, &vtbls);
- let impl_def_id = ast_util::local_def(impl_item.id);
-
- debug!("impl_vtables for {} are {}",
- impl_def_id.repr(tcx),
- vtbls.repr(tcx));
-
- tcx.impl_vtables.borrow_mut().insert(impl_def_id, vtbls);
-}
-
-/// Resolve vtables for a method call after typeck has finished.
-/// Used by trans to monomorphize artificial method callees (e.g. drop).
-pub fn trans_resolve_method(tcx: &ty::ctxt, id: ast::NodeId,
- substs: &subst::Substs) -> vtable_res {
- let generics = ty::lookup_item_type(tcx, ast_util::local_def(id)).generics;
- let unboxed_closures = RefCell::new(DefIdMap::new());
- let vcx = VtableContext {
- infcx: &infer::new_infer_ctxt(tcx),
- param_env: &ty::construct_parameter_environment(tcx, &ty::Generics::empty(), id),
- unboxed_closures: &unboxed_closures,
- };
-
- lookup_vtables(&vcx,
- tcx.map.span(id),
- &generics.types,
- substs,
- false)
-}
-
-impl<'a, 'b, 'tcx, 'v> Visitor<'v> for &'a FnCtxt<'b, 'tcx> {
- fn visit_expr(&mut self, ex: &ast::Expr) {
- early_resolve_expr(ex, *self, false);
- visit::walk_expr(self, ex);
- }
- fn visit_item(&mut self, _: &ast::Item) {
- // no-op
- }
-}
-
-// Detect points where a trait-bounded type parameter is
-// instantiated, resolve the impls for the parameters.
-pub fn resolve_in_block(mut fcx: &FnCtxt, bl: &ast::Block) {
- visit::walk_block(&mut fcx, bl);
-}
-
-/// Used in the kind checker after typechecking has finished. Calls
-/// `any_missing` if any bounds were missing.
-pub fn check_param_bounds(tcx: &ty::ctxt,
- span: Span,
- parameter_environment: &ty::ParameterEnvironment,
- type_param_defs:
- &VecPerParamSpace<ty::TypeParameterDef>,
- substs: &subst::Substs,
- any_missing: |&ty::TraitRef|) {
- let unboxed_closures = RefCell::new(DefIdMap::new());
- let vcx = VtableContext {
- infcx: &infer::new_infer_ctxt(tcx),
- param_env: parameter_environment,
- unboxed_closures: &unboxed_closures,
- };
- let vtable_param_results =
- lookup_vtables(&vcx, span, type_param_defs, substs, false);
- for (vtable_param_result, type_param_def) in
- vtable_param_results.iter().zip(type_param_defs.iter()) {
- for (vtable_result, trait_ref) in
- vtable_param_result.iter()
- .zip(type_param_def.bounds
- .trait_bounds
- .iter()) {
- match *vtable_result {
- vtable_error => any_missing(&**trait_ref),
- vtable_static(..) |
- vtable_param(..) |
- vtable_unboxed_closure(..) => {}
- }
- }
- }
-}
-
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::subst::{SelfSpace};
+use middle::traits;
+use middle::traits::{SelectionError, Overflow,
+ OutputTypeParameterMismatch, Unimplemented};
+use middle::traits::{Obligation, obligation_for_builtin_bound};
+use middle::traits::{FulfillmentError, CodeSelectionError, CodeAmbiguity};
+use middle::traits::{ObligationCause};
+use middle::ty;
+use middle::typeck::check::{FnCtxt,
+ structurally_resolved_type};
+use middle::typeck::infer;
+use std::rc::Rc;
+use syntax::ast;
+use syntax::codemap::Span;
+use util::ppaux::UserString;
+use util::ppaux::Repr;
+
+/// When reporting an error about a failed trait obligation, it's nice
+/// to include some context indicating why we were checking that
+/// obligation in the first place. The span is often enough but
+/// sometimes it's not. Currently this enum is a bit of a hack and I
+/// suspect it should be carried in the obligation or more deeply
+/// integrated somehow.
+pub enum ErrorReportingContext {
+ GenericContext,
+ ImplSupertraitCheck,
+}
+
+pub fn check_object_cast(fcx: &FnCtxt,
+ cast_expr: &ast::Expr,
+ source_expr: &ast::Expr,
+ target_object_ty: ty::t)
+{
+ debug!("check_object_cast(cast_expr={}, target_object_ty={})",
+ cast_expr.repr(fcx.tcx()),
+ target_object_ty.repr(fcx.tcx()));
+
+ // Look up vtables for the type we're casting to,
+ // passing in the source and target type. The source
+ // must be a pointer type suitable to the object sigil,
+ // e.g.: `&x as &Trait` or `box x as Box<Trait>`
+ let source_ty = fcx.expr_ty(source_expr);
+ let source_ty = structurally_resolved_type(fcx, source_expr.span, source_ty);
+ debug!("source_ty={}", source_ty.repr(fcx.tcx()));
+ match (&ty::get(source_ty).sty, &ty::get(target_object_ty).sty) {
+ (&ty::ty_uniq(referent_ty), &ty::ty_uniq(object_trait_ty)) => {
+ let object_trait = object_trait(&object_trait_ty);
+
+ // Ensure that if ~T is cast to ~Trait, then T : Trait
+ push_cast_obligation(fcx, cast_expr, object_trait, referent_ty);
+ }
+
+ (&ty::ty_rptr(referent_region, ty::mt { ty: referent_ty,
+ mutbl: referent_mutbl }),
+ &ty::ty_rptr(target_region, ty::mt { ty: object_trait_ty,
+ mutbl: target_mutbl })) =>
+ {
+ let object_trait = object_trait(&object_trait_ty);
+ if !mutability_allowed(referent_mutbl, target_mutbl) {
+ fcx.tcx().sess.span_err(source_expr.span,
+ "types differ in mutability");
+ } else {
+ // Ensure that if &'a T is cast to &'b Trait, then T : Trait
+ push_cast_obligation(fcx, cast_expr,
+ object_trait,
+ referent_ty);
+
+ // Ensure that if &'a T is cast to &'b Trait, then 'b <= 'a
+ infer::mk_subr(fcx.infcx(),
+ infer::RelateObjectBound(source_expr.span),
+ target_region,
+ referent_region);
+ }
+ }
+
+ (_, &ty::ty_uniq(..)) => {
+ fcx.ccx.tcx.sess.span_err(
+ source_expr.span,
+ format!("can only cast an boxed pointer \
+ to a boxed object, not a {}",
+ ty::ty_sort_string(fcx.tcx(), source_ty)).as_slice());
+ }
+
+ (_, &ty::ty_rptr(..)) => {
+ fcx.ccx.tcx.sess.span_err(
+ source_expr.span,
+ format!("can only cast a &-pointer \
+ to an &-object, not a {}",
+ ty::ty_sort_string(fcx.tcx(), source_ty)).as_slice());
+ }
+
+ _ => {
+ fcx.tcx().sess.span_bug(
+ source_expr.span,
+ "expected object type");
+ }
+ }
+
+ // Because we currently give unsound lifetimes to the "ty_box", I
+ // could have written &'static ty::TyTrait here, but it seems
+ // gratuitously unsafe.
+ fn object_trait<'a>(t: &'a ty::t) -> &'a ty::TyTrait {
+ match ty::get(*t).sty {
+ ty::ty_trait(ref ty_trait) => &**ty_trait,
+ _ => fail!("expected ty_trait")
+ }
+ }
+
+ fn mutability_allowed(a_mutbl: ast::Mutability,
+ b_mutbl: ast::Mutability)
+ -> bool {
+ a_mutbl == b_mutbl ||
+ (a_mutbl == ast::MutMutable && b_mutbl == ast::MutImmutable)
+ }
+
+ fn push_cast_obligation(fcx: &FnCtxt,
+ cast_expr: &ast::Expr,
+ object_trait: &ty::TyTrait,
+ referent_ty: ty::t) {
+ let object_trait_ref =
+ register_object_cast_obligations(fcx,
+ cast_expr.span,
+ object_trait,
+ referent_ty);
+
+ // Finally record the object_trait_ref for use during trans
+ // (it would prob be better not to do this, but it's just kind
+ // of a pain to have to reconstruct it).
+ fcx.write_object_cast(cast_expr.id, object_trait_ref);
+ }
+}
+
+pub fn register_object_cast_obligations(fcx: &FnCtxt,
+ span: Span,
+ object_trait: &ty::TyTrait,
+ referent_ty: ty::t)
+ -> Rc<ty::TraitRef>
+{
+ // This is just for better error reporting. Kinda goofy. The object type stuff
+ // needs some refactoring so there is a more convenient type to pass around.
+ let object_trait_ty =
+ ty::mk_trait(fcx.tcx(),
+ object_trait.def_id,
+ object_trait.substs.clone(),
+ object_trait.bounds);
+
+ debug!("register_object_cast_obligations: referent_ty={} object_trait_ty={}",
+ referent_ty.repr(fcx.tcx()),
+ object_trait_ty.repr(fcx.tcx()));
+
+ // Take the type parameters from the object type, but set
+ // the Self type (which is unknown, for the object type)
+ // to be the type we are casting from.
+ let mut object_substs = object_trait.substs.clone();
+ assert!(object_substs.self_ty().is_none());
+ object_substs.types.push(SelfSpace, referent_ty);
+
+ // Create the obligation for casting from T to Trait.
+ let object_trait_ref =
+ Rc::new(ty::TraitRef { def_id: object_trait.def_id,
+ substs: object_substs });
+ let object_obligation =
+ Obligation::new(
+ ObligationCause::new(span,
+ traits::ObjectCastObligation(object_trait_ty)),
+ object_trait_ref.clone());
+ fcx.register_obligation(object_obligation);
+
+ // Create additional obligations for all the various builtin
+ // bounds attached to the object cast. (In other words, if the
+ // object type is Foo+Send, this would create an obligation
+ // for the Send check.)
+ for builtin_bound in object_trait.bounds.builtin_bounds.iter() {
+ fcx.register_obligation(
+ obligation_for_builtin_bound(
+ fcx.tcx(),
+ ObligationCause::new(span,
+ traits::ObjectCastObligation(object_trait_ty)),
+ referent_ty,
+ builtin_bound));
+ }
+
+ object_trait_ref
+}
+
+pub fn select_all_fcx_obligations_or_error(fcx: &FnCtxt) {
+ debug!("select_all_fcx_obligations_or_error");
+
+ let mut fulfillment_cx = fcx.inh.fulfillment_cx.borrow_mut();
+ let r =
+ fulfillment_cx.select_all_or_error(
+ fcx.infcx(),
+ &fcx.inh.param_env,
+ &*fcx.inh.unboxed_closures.borrow());
+ match r {
+ Ok(()) => { }
+ Err(errors) => { report_fulfillment_errors(fcx, &errors); }
+ }
+}
+
+pub fn check_builtin_bound_obligations(fcx: &FnCtxt) {
+ /*!
+ * Hacky second pass to check builtin-bounds obligations *after*
+ * writeback occurs.
+ */
+
+ match
+ fcx.inh.fulfillment_cx.borrow()
+ .check_builtin_bound_obligations(fcx.infcx())
+ {
+ Ok(()) => { }
+ Err(errors) => { report_fulfillment_errors(fcx, &errors); }
+ }
+}
+
+fn resolve_trait_ref(fcx: &FnCtxt, obligation: &Obligation)
+ -> (ty::TraitRef, ty::t)
+{
+ let trait_ref =
+ fcx.infcx().resolve_type_vars_in_trait_ref_if_possible(
+ &*obligation.trait_ref);
+ let self_ty =
+ trait_ref.substs.self_ty().unwrap();
+ (trait_ref, self_ty)
+}
+
+pub fn report_fulfillment_errors(fcx: &FnCtxt,
+ errors: &Vec<FulfillmentError>) {
+ for error in errors.iter() {
+ report_fulfillment_error(fcx, error);
+ }
+}
+
+pub fn report_fulfillment_error(fcx: &FnCtxt,
+ error: &FulfillmentError) {
+ match error.code {
+ CodeSelectionError(ref e) => {
+ report_selection_error(fcx, &error.obligation, e);
+ }
+ CodeAmbiguity => {
+ maybe_report_ambiguity(fcx, &error.obligation);
+ }
+ }
+}
+
+pub fn report_selection_error(fcx: &FnCtxt,
+ obligation: &Obligation,
+ error: &SelectionError) {
+ match *error {
+ Unimplemented => {
+ let (trait_ref, self_ty) = resolve_trait_ref(fcx, obligation);
+ if !ty::type_is_error(self_ty) {
+ fcx.tcx().sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the trait `{}` is not implemented for the type `{}`",
+ trait_ref.user_string(fcx.tcx()),
+ self_ty.user_string(fcx.tcx())).as_slice());
+ note_obligation_cause(fcx, obligation);
+ }
+ }
+ Overflow => {
+ report_overflow(fcx, obligation);
+ }
+ OutputTypeParameterMismatch(ref expected_trait_ref, ref e) => {
+ let expected_trait_ref =
+ fcx.infcx().resolve_type_vars_in_trait_ref_if_possible(
+ &**expected_trait_ref);
+ let (trait_ref, self_ty) = resolve_trait_ref(fcx, obligation);
+ if !ty::type_is_error(self_ty) {
+ fcx.tcx().sess.span_err(
+ obligation.cause.span,
+ format!(
+ "type mismatch: the type `{}` implements the trait `{}`, \
+ but the trait `{}` is required ({})",
+ self_ty.user_string(fcx.tcx()),
+ expected_trait_ref.user_string(fcx.tcx()),
+ trait_ref.user_string(fcx.tcx()),
+ ty::type_err_to_str(fcx.tcx(), e)).as_slice());
+ note_obligation_cause(fcx, obligation);
+ }
+ }
+ }
+}
+
+pub fn report_overflow(fcx: &FnCtxt, obligation: &Obligation) {
+ let (trait_ref, self_ty) = resolve_trait_ref(fcx, obligation);
+ if ty::type_is_error(self_ty) {
+ fcx.tcx().sess.span_err(
+ obligation.cause.span,
+ format!(
+ "could not locate an impl of the trait `{}` for \
+ the type `{}` due to overflow; possible cyclic \
+ dependency between impls",
+ trait_ref.user_string(fcx.tcx()),
+ self_ty.user_string(fcx.tcx())).as_slice());
+ note_obligation_cause(fcx, obligation);
+ }
+}
+
+pub fn maybe_report_ambiguity(fcx: &FnCtxt, obligation: &Obligation) {
+ // Unable to successfully determine, probably means
+ // insufficient type information, but could mean
+ // ambiguous impls. The latter *ought* to be a
+ // coherence violation, so we don't report it here.
+ let (trait_ref, self_ty) = resolve_trait_ref(fcx, obligation);
+ debug!("maybe_report_ambiguity(trait_ref={}, self_ty={}, obligation={})",
+ trait_ref.repr(fcx.tcx()),
+ self_ty.repr(fcx.tcx()),
+ obligation.repr(fcx.tcx()));
+ if ty::type_is_error(self_ty) {
+ } else if ty::type_needs_infer(self_ty) {
+ fcx.tcx().sess.span_err(
+ obligation.cause.span,
+ format!(
+ "unable to infer enough type information to \
+ locate the impl of the trait `{}` for \
+ the type `{}`; type annotations required",
+ trait_ref.user_string(fcx.tcx()),
+ self_ty.user_string(fcx.tcx())).as_slice());
+ note_obligation_cause(fcx, obligation);
+ } else if fcx.tcx().sess.err_count() == 0 {
+ // Ambiguity. Coherence should have reported an error.
+ fcx.tcx().sess.span_bug(
+ obligation.cause.span,
+ format!(
+ "coherence failed to report ambiguity: \
+ cannot locate the impl of the trait `{}` for \
+ the type `{}`",
+ trait_ref.user_string(fcx.tcx()),
+ self_ty.user_string(fcx.tcx())).as_slice());
+ }
+}
+
+pub fn select_fcx_obligations_where_possible(fcx: &FnCtxt) {
+ /*! Select as many obligations as we can at present. */
+
+ match
+ fcx.inh.fulfillment_cx
+ .borrow_mut()
+ .select_where_possible(fcx.infcx(),
+ &fcx.inh.param_env,
+ &*fcx.inh.unboxed_closures.borrow())
+ {
+ Ok(()) => { }
+ Err(errors) => { report_fulfillment_errors(fcx, &errors); }
+ }
+}
+
+fn note_obligation_cause(fcx: &FnCtxt,
+ obligation: &Obligation) {
+ let tcx = fcx.tcx();
+ let trait_name = ty::item_path_str(tcx, obligation.trait_ref.def_id);
+ match obligation.cause.code {
+ traits::MiscObligation => { }
+ traits::ItemObligation(item_def_id) => {
+ let item_name = ty::item_path_str(tcx, item_def_id);
+ tcx.sess.span_note(
+ obligation.cause.span,
+ format!(
+ "the trait `{}` must be implemented because it is required by `{}`",
+ trait_name,
+ item_name).as_slice());
+ }
+ traits::ObjectCastObligation(object_ty) => {
+ tcx.sess.span_note(
+ obligation.cause.span,
+ format!(
+ "the trait `{}` must be implemented for the cast \
+ to the object type `{}`",
+ trait_name,
+ fcx.infcx().ty_to_string(object_ty)).as_slice());
+ }
+ traits::RepeatVec => {
+ tcx.sess.span_note(
+ obligation.cause.span,
+ format!(
+ "the `Copy` trait is required because the \
+ repeated element will be copied").as_slice());
+ }
+ traits::VariableType(_) => {
+ tcx.sess.span_note(
+ obligation.cause.span,
+ "all local variables must have a statically known size");
+ }
+ traits::AssignmentLhsSized => {
+ tcx.sess.span_note(
+ obligation.cause.span,
+ "the left-hand-side of an assignment must have a statically known size");
+ }
+ traits::StructInitializerSized => {
+ tcx.sess.span_note(
+ obligation.cause.span,
+ "structs must have a statically known size to be initialized");
+ }
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::subst::{Subst};
+use middle::traits;
+use middle::ty;
+use middle::ty_fold::{TypeFolder, TypeFoldable};
+use middle::typeck::astconv::AstConv;
+use middle::typeck::check::{FnCtxt, Inherited, blank_fn_ctxt, vtable2, regionck};
+use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
+use middle::typeck::CrateCtxt;
+use util::ppaux::Repr;
+
+use std::collections::HashSet;
+use syntax::ast;
+use syntax::ast_util::{local_def};
+use syntax::codemap::Span;
+use syntax::visit;
+use syntax::visit::Visitor;
+
+pub struct CheckTypeWellFormedVisitor<'ccx, 'tcx:'ccx> {
+ ccx: &'ccx CrateCtxt<'ccx, 'tcx>,
+ cache: HashSet<ty::t>
+}
+
+impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
+ pub fn new(ccx: &'ccx CrateCtxt<'ccx, 'tcx>) -> CheckTypeWellFormedVisitor<'ccx, 'tcx> {
+ CheckTypeWellFormedVisitor { ccx: ccx, cache: HashSet::new() }
+ }
+
+ fn check_item_well_formed(&mut self, ccx: &CrateCtxt, item: &ast::Item) {
+ /*!
+ * Checks that the field types (in a struct def'n) or
+ * argument types (in an enum def'n) are well-formed,
+ * meaning that they do not require any constraints not
+ * declared in the struct definition itself.
+ * For example, this definition would be illegal:
+ *
+ * struct Ref<'a, T> { x: &'a T }
+ *
+ * because the type did not declare that `T:'a`.
+ *
+ * We do this check as a pre-pass before checking fn bodies
+ * because if these constraints are not included it frequently
+ * leads to confusing errors in fn bodies. So it's better to check
+ * the types first.
+ */
+
+ debug!("check_item_well_formed(it.id={}, it.ident={})",
+ item.id,
+ ty::item_path_str(ccx.tcx, local_def(item.id)));
+
+ let ccx = self.ccx;
+ match item.node {
+ ast::ItemImpl(..) => {
+ self.check_impl(item);
+ }
+ ast::ItemFn(..) => {
+ self.check_item_type(item);
+ }
+ ast::ItemStatic(..) => {
+ self.check_item_type(item);
+ }
+ ast::ItemStruct(..) => {
+ self.check_type_defn(item, |fcx| {
+ ty::struct_fields(ccx.tcx, local_def(item.id),
+ &fcx.inh.param_env.free_substs)
+ .iter()
+ .map(|f| f.mt.ty)
+ .collect()
+ });
+ }
+ ast::ItemEnum(..) => {
+ self.check_type_defn(item, |fcx| {
+ ty::substd_enum_variants(ccx.tcx, local_def(item.id),
+ &fcx.inh.param_env.free_substs)
+ .iter()
+ .flat_map(|variant| {
+ variant.args
+ .iter()
+ .map(|&arg_ty| arg_ty)
+ })
+ .collect()
+ });
+ }
+ _ => {}
+ }
+ }
+
+ fn with_fcx(&mut self,
+ ccx: &CrateCtxt,
+ item: &ast::Item,
+ f: |&mut CheckTypeWellFormedVisitor, &FnCtxt|) {
+ let item_def_id = local_def(item.id);
+ let polytype = ty::lookup_item_type(ccx.tcx, item_def_id);
+ let param_env =
+ ty::construct_parameter_environment(ccx.tcx,
+ item.span,
+ &polytype.generics,
+ item.id);
+ let inh = Inherited::new(ccx.tcx, param_env);
+ let fcx = blank_fn_ctxt(ccx, &inh, polytype.ty, item.id);
+ f(self, &fcx);
+ vtable2::select_all_fcx_obligations_or_error(&fcx);
+ regionck::regionck_item(&fcx, item);
+ vtable2::check_builtin_bound_obligations(&fcx);
+ }
+
+ fn check_type_defn(&mut self,
+ item: &ast::Item,
+ lookup_fields: |&FnCtxt| -> Vec<ty::t>)
+ {
+ /*!
+ * In a type definition, we check that to ensure that the types of the fields are
+ * well-formed.
+ */
+
+ self.with_fcx(self.ccx, item, |this, fcx| {
+ let field_tys = lookup_fields(fcx);
+ let mut bounds_checker = BoundsChecker::new(fcx, item.span,
+ item.id, Some(&mut this.cache));
+ for &ty in field_tys.iter() {
+ // Regions are checked below.
+ bounds_checker.check_traits_in_ty(ty);
+ }
+
+ regionck::regionck_ensure_component_tys_wf(
+ fcx, item.span, field_tys.as_slice());
+ });
+ }
+
+ fn check_item_type(&mut self,
+ item: &ast::Item)
+ {
+ self.with_fcx(self.ccx, item, |this, fcx| {
+ let mut bounds_checker = BoundsChecker::new(fcx, item.span,
+ item.id, Some(&mut this.cache));
+ let polytype = ty::lookup_item_type(fcx.tcx(), local_def(item.id));
+ let item_ty = polytype.ty.subst(fcx.tcx(), &fcx.inh.param_env.free_substs);
+ bounds_checker.check_traits_in_ty(item_ty);
+ });
+ }
+
+ fn check_impl(&mut self,
+ item: &ast::Item)
+ {
+ self.with_fcx(self.ccx, item, |this, fcx| {
+ let mut bounds_checker = BoundsChecker::new(fcx, item.span,
+ item.id, Some(&mut this.cache));
+
+ let self_ty = ty::node_id_to_type(fcx.tcx(), item.id);
+ let self_ty = self_ty.subst(fcx.tcx(), &fcx.inh.param_env.free_substs);
+
+ bounds_checker.check_traits_in_ty(self_ty);
+
+ let trait_ref = match ty::impl_trait_ref(fcx.tcx(), local_def(item.id)) {
+ None => { return; }
+ Some(t) => { t }
+ };
+ let trait_ref = (*trait_ref).subst(fcx.tcx(), &fcx.inh.param_env.free_substs);
+
+ // We are stricter on the trait-ref in an impl than the
+ // self-type. In particular, we enforce region
+ // relationships. The reason for this is that (at least
+ // presently) "appyling" an impl does not require that the
+ // application site check the well-formedness constraints on the
+ // trait reference. Instead, this is done at the impl site.
+ // Arguably this is wrong and we should treat the trait-reference
+ // the same way as we treat the self-type.
+ bounds_checker.check_trait_ref(&trait_ref);
+
+ let trait_def = ty::lookup_trait_def(fcx.tcx(), trait_ref.def_id);
+
+ let cause =
+ traits::ObligationCause::new(
+ item.span,
+ traits::ItemObligation(trait_ref.def_id));
+
+ // Find the supertrait bounds. This will add `int:Bar`.
+ //
+ // FIXME -- This is a bit ill-factored. There is very similar
+ // code in traits::util::obligations_for_generics.
+ fcx.add_region_obligations_for_type_parameter(item.span,
+ ty::ParamTy::for_self(trait_ref.def_id),
+ &trait_def.bounds,
+ trait_ref.self_ty());
+ for builtin_bound in trait_def.bounds.builtin_bounds.iter() {
+ fcx.register_obligation(
+ traits::obligation_for_builtin_bound(fcx.tcx(),
+ cause,
+ trait_ref.self_ty(),
+ builtin_bound));
+ }
+ for trait_bound in trait_def.bounds.trait_bounds.iter() {
+ let trait_bound = trait_bound.subst(fcx.tcx(), &trait_ref.substs);
+ fcx.register_obligation(
+ traits::Obligation::new(cause, trait_bound));
+ }
+ });
+ }
+}
+
+impl<'ccx, 'tcx, 'v> Visitor<'v> for CheckTypeWellFormedVisitor<'ccx, 'tcx> {
+ fn visit_item(&mut self, i: &'v ast::Item) {
+ self.check_item_well_formed(self.ccx, i);
+ visit::walk_item(self, i);
+ }
+}
+
+pub struct BoundsChecker<'cx,'tcx:'cx> {
+ fcx: &'cx FnCtxt<'cx,'tcx>,
+ span: Span,
+ scope_id: ast::NodeId,
+ binding_count: uint,
+ cache: Option<&'cx mut HashSet<ty::t>>,
+}
+
+impl<'cx,'tcx> BoundsChecker<'cx,'tcx> {
+ pub fn new(fcx: &'cx FnCtxt<'cx,'tcx>,
+ span: Span,
+ scope_id: ast::NodeId,
+ cache: Option<&'cx mut HashSet<ty::t>>)
+ -> BoundsChecker<'cx,'tcx> {
+ BoundsChecker { fcx: fcx, span: span, scope_id: scope_id,
+ cache: cache, binding_count: 0 }
+ }
+
+ pub fn check_trait_ref(&mut self, trait_ref: &ty::TraitRef) {
+ /*!
+ * Given a trait ref like `A : Trait<B>`, where `Trait` is
+ * defined as (say):
+ *
+ * trait Trait<B:OtherTrait> : Copy { ... }
+ *
+ * This routine will check that `B : OtherTrait` and `A :
+ * Trait<B>`. It will also recursively check that the types
+ * `A` and `B` are well-formed.
+ *
+ * Note that it does not (currently, at least)
+ * check that `A : Copy` (that check is delegated to the point
+ * where impl `A : Trait<B>` is implemented).
+ */
+
+ let trait_def = ty::lookup_trait_def(self.fcx.tcx(), trait_ref.def_id);
+
+ self.fcx.add_obligations_for_parameters(
+ traits::ObligationCause::new(
+ self.span,
+ traits::ItemObligation(trait_ref.def_id)),
+ &trait_ref.substs,
+ &trait_def.generics);
+
+ for &ty in trait_ref.substs.types.iter() {
+ self.check_traits_in_ty(ty);
+ }
+ }
+
+ pub fn check_ty(&mut self, ty: ty::t) {
+ ty.fold_with(self);
+ }
+
+ fn check_traits_in_ty(&mut self, ty: ty::t) {
+ // When checking types outside of a type def'n, we ignore
+ // region obligations. See discussion below in fold_ty().
+ self.binding_count += 1;
+ ty.fold_with(self);
+ self.binding_count -= 1;
+ }
+}
+
+impl<'cx,'tcx> TypeFolder<'tcx> for BoundsChecker<'cx,'tcx> {
+ fn tcx(&self) -> &ty::ctxt<'tcx> {
+ self.fcx.tcx()
+ }
+
+ fn fold_ty(&mut self, t: ty::t) -> ty::t {
+ debug!("BoundsChecker t={}",
+ t.repr(self.tcx()));
+
+ match self.cache {
+ Some(ref mut cache) => {
+ if !cache.insert(t) {
+ // Already checked this type! Don't check again.
+ debug!("cached");
+ return t;
+ }
+ }
+ None => { }
+ }
+
+ match ty::get(t).sty{
+ ty::ty_struct(type_id, ref substs) |
+ ty::ty_enum(type_id, ref substs) => {
+ let polytype = ty::lookup_item_type(self.fcx.tcx(), type_id);
+
+ if self.binding_count == 0 {
+ self.fcx.add_obligations_for_parameters(
+ traits::ObligationCause::new(self.span,
+ traits::ItemObligation(type_id)),
+ substs,
+ &polytype.generics);
+ } else {
+ // There are two circumstances in which we ignore
+ // region obligations.
+ //
+ // The first is when we are inside of a closure
+ // type. This is because in that case the region
+ // obligations for the parameter types are things
+ // that the closure body gets to assume and the
+ // caller must prove at the time of call. In other
+ // words, if there is a type like `<'a, 'b> | &'a
+ // &'b int |`, it is well-formed, and caller will
+ // have to show that `'b : 'a` at the time of
+ // call.
+ //
+ // The second is when we are checking for
+ // well-formedness outside of a type def'n or fn
+ // body. This is for a similar reason: in general,
+ // we only do WF checking for regions in the
+ // result of expressions and type definitions, so
+ // to as allow for implicit where clauses.
+ //
+ // (I believe we should do the same for traits, but
+ // that will require an RFC. -nmatsakis)
+ self.fcx.add_trait_obligations_for_generics(
+ traits::ObligationCause::new(self.span,
+ traits::ItemObligation(type_id)),
+ substs,
+ &polytype.generics);
+ }
+
+ self.fold_substs(substs);
+ }
+ ty::ty_bare_fn(ty::BareFnTy{sig: ref fn_sig, ..}) |
+ ty::ty_closure(box ty::ClosureTy{sig: ref fn_sig, ..}) => {
+ self.binding_count += 1;
+
+ let (_, fn_sig) =
+ replace_late_bound_regions_in_fn_sig(
+ self.fcx.tcx(), fn_sig,
+ |br| ty::ReFree(ty::FreeRegion{scope_id: self.scope_id,
+ bound_region: br}));
+
+ debug!("late-bound regions replaced: {}",
+ fn_sig.repr(self.tcx()));
+
+ self.fold_sig(&fn_sig);
+
+ self.binding_count -= 1;
+ }
+ ref sty => {
+ self.fold_sty(sty);
+ }
+ }
+
+ t // we're not folding to produce a new type, so just return `t` here
+ }
+}
wbcx.visit_expr(e);
wbcx.visit_upvar_borrow_map();
wbcx.visit_unboxed_closures();
+ wbcx.visit_object_cast_map();
}
pub fn resolve_type_vars_in_fn(fcx: &FnCtxt,
}
wbcx.visit_upvar_borrow_map();
wbcx.visit_unboxed_closures();
+ wbcx.visit_object_cast_map();
}
pub fn resolve_impl_res(infcx: &infer::InferCtxt,
self.visit_node_id(ResolvingExpr(e.span), e.id);
self.visit_method_map_entry(ResolvingExpr(e.span),
MethodCall::expr(e.id));
- self.visit_vtable_map_entry(ResolvingExpr(e.span),
- MethodCall::expr(e.id));
match e.node {
ast::ExprFnBlock(_, ref decl, _) |
}
}
+ fn visit_object_cast_map(&self) {
+ if self.fcx.writeback_errors.get() {
+ return
+ }
+
+ for (&node_id, trait_ref) in self.fcx
+ .inh
+ .object_cast_map
+ .borrow()
+ .iter()
+ {
+ let span = ty::expr_span(self.tcx(), node_id);
+ let reason = ResolvingExpr(span);
+ let closure_ty = self.resolve(trait_ref, reason);
+ self.tcx()
+ .object_cast_map
+ .borrow_mut()
+ .insert(node_id, closure_ty);
+ }
+ }
+
fn visit_node_id(&self, reason: ResolveReason, id: ast::NodeId) {
// Resolve any borrowings for the node with id `id`
self.visit_adjustments(reason, id);
Some(adjustment) => {
let adj_object = ty::adjust_is_object(&adjustment);
let resolved_adjustment = match adjustment {
- ty::AutoAddEnv(store) => {
+ ty::AdjustAddEnv(store) => {
// FIXME(eddyb) #2190 Allow only statically resolved
// bare functions to coerce to a closure to avoid
// constructing (slower) indirect call wrappers.
}
}
- ty::AutoAddEnv(self.resolve(&store, reason))
+ ty::AdjustAddEnv(self.resolve(&store, reason))
}
- ty::AutoDerefRef(adj) => {
+ ty::AdjustDerefRef(adj) => {
for autoderef in range(0, adj.autoderefs) {
let method_call = MethodCall::autoderef(id, autoderef);
self.visit_method_map_entry(reason, method_call);
- self.visit_vtable_map_entry(reason, method_call);
}
if adj_object {
let method_call = MethodCall::autoobject(id);
self.visit_method_map_entry(reason, method_call);
- self.visit_vtable_map_entry(reason, method_call);
}
- ty::AutoDerefRef(ty::AutoDerefRef {
+ ty::AdjustDerefRef(ty::AutoDerefRef {
autoderefs: adj.autoderefs,
autoref: self.resolve(&adj.autoref, reason),
})
}
}
- fn visit_vtable_map_entry(&self,
- reason: ResolveReason,
- vtable_key: MethodCall) {
- // Resolve any vtable map entry
- match self.fcx.inh.vtable_map.borrow_mut().pop(&vtable_key) {
- Some(origins) => {
- let r_origins = self.resolve(&origins, reason);
- debug!("writeback::resolve_vtable_map_entry(\
- vtable_key={}, vtables={:?})",
- vtable_key, r_origins.repr(self.tcx()));
- self.tcx().vtable_map.borrow_mut().insert(vtable_key, r_origins);
- }
- None => {}
- }
- }
-
fn resolve<T:ResolveIn>(&self, t: &T, reason: ResolveReason) -> T {
t.resolve_in(&mut Resolver::new(self.fcx, reason))
}
}
}
}
+
+///////////////////////////////////////////////////////////////////////////
+// During type check, we store promises with the result of trait
+// lookup rather than the actual results (because the results are not
+// necessarily available immediately). These routines unwind the
+// promises. It is expected that we will have already reported any
+// errors that may be encountered, so if the promises store an error,
+// a dummy result is returned.
+++ /dev/null
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Coherence phase
-//
-// The job of the coherence phase of typechecking is to ensure that each trait
-// has at most one implementation for each type. Then we build a mapping from
-// each trait in the system to its implementations.
-
-
-use metadata::csearch::{each_impl, get_impl_trait, each_implementation_for_trait};
-use metadata::csearch;
-use middle::subst;
-use middle::subst::{Substs};
-use middle::ty::get;
-use middle::ty::{ImplContainer, ImplOrTraitItemId, MethodTraitItemId};
-use middle::ty::{lookup_item_type};
-use middle::ty::{t, ty_bool, ty_char, ty_bot, ty_box, ty_enum, ty_err};
-use middle::ty::{ty_str, ty_vec, ty_float, ty_infer, ty_int, ty_nil, ty_open};
-use middle::ty::{ty_param, Polytype, ty_ptr};
-use middle::ty::{ty_rptr, ty_struct, ty_trait, ty_tup};
-use middle::ty::{ty_uint, ty_unboxed_closure, ty_uniq, ty_bare_fn};
-use middle::ty::{ty_closure};
-use middle::ty::type_is_ty_var;
-use middle::subst::Subst;
-use middle::ty;
-use middle::typeck::CrateCtxt;
-use middle::typeck::infer::combine::Combine;
-use middle::typeck::infer::InferCtxt;
-use middle::typeck::infer::{new_infer_ctxt, resolve_ivar, resolve_type};
-use middle::typeck::infer;
-use util::ppaux::Repr;
-use middle::def::{DefStruct, DefTy};
-use syntax::ast::{Crate, DefId};
-use syntax::ast::{Item, ItemEnum, ItemImpl, ItemMod, ItemStruct};
-use syntax::ast::{LOCAL_CRATE, TraitRef, TyPath};
-use syntax::ast;
-use syntax::ast_map::NodeItem;
-use syntax::ast_map;
-use syntax::ast_util::{local_def};
-use syntax::codemap::{Span, DUMMY_SP};
-use syntax::parse::token;
-use syntax::visit;
-
-use std::collections::HashSet;
-use std::cell::RefCell;
-use std::rc::Rc;
-
-struct UniversalQuantificationResult {
- monotype: t
-}
-
-fn get_base_type(inference_context: &InferCtxt,
- span: Span,
- original_type: t)
- -> Option<t> {
- let resolved_type = match resolve_type(inference_context,
- Some(span),
- original_type,
- resolve_ivar) {
- Ok(resulting_type) if !type_is_ty_var(resulting_type) => resulting_type,
- _ => {
- inference_context.tcx.sess.span_fatal(span,
- "the type of this value must be known in order \
- to determine the base type");
- }
- };
-
- match get(resolved_type).sty {
- ty_enum(..) | ty_struct(..) | ty_unboxed_closure(..) => {
- debug!("(getting base type) found base type");
- Some(resolved_type)
- }
-
- _ if ty::type_is_trait(resolved_type) => {
- debug!("(getting base type) found base type (trait)");
- Some(resolved_type)
- }
-
- ty_nil | ty_bot | ty_bool | ty_char | ty_int(..) | ty_uint(..) | ty_float(..) |
- ty_str(..) | ty_vec(..) | ty_bare_fn(..) | ty_closure(..) | ty_tup(..) |
- ty_infer(..) | ty_param(..) | ty_err | ty_open(..) |
- ty_box(_) | ty_uniq(_) | ty_ptr(_) | ty_rptr(_, _) => {
- debug!("(getting base type) no base type; found {:?}",
- get(original_type).sty);
- None
- }
- ty_trait(..) => fail!("should have been caught")
- }
-}
-
-fn type_is_defined_in_local_crate(tcx: &ty::ctxt, original_type: t) -> bool {
- /*!
- *
- * For coherence, when we have `impl Trait for Type`, we need to
- * guarantee that `Type` is "local" to the
- * crate. For our purposes, this means that it must contain
- * some nominal type defined in this crate.
- */
-
- let mut found_nominal = false;
- ty::walk_ty(original_type, |t| {
- match get(t).sty {
- ty_enum(def_id, _) |
- ty_struct(def_id, _) |
- ty_unboxed_closure(def_id, _) => {
- if def_id.krate == ast::LOCAL_CRATE {
- found_nominal = true;
- }
- }
- ty_trait(box ty::TyTrait { def_id, .. }) => {
- if def_id.krate == ast::LOCAL_CRATE {
- found_nominal = true;
- }
- }
- ty_uniq(..) => {
- match tcx.lang_items.owned_box() {
- Some(did) if did.krate == ast::LOCAL_CRATE => {
- found_nominal = true;
- }
- _ => {}
- }
- }
- ty_box(..) => {
- match tcx.lang_items.gc() {
- Some(did) if did.krate == ast::LOCAL_CRATE => {
- found_nominal = true;
- }
- _ => {}
- }
- }
-
- _ => { }
- }
- });
- return found_nominal;
-}
-
-// Returns the def ID of the base type, if there is one.
-fn get_base_type_def_id(inference_context: &InferCtxt,
- span: Span,
- original_type: t)
- -> Option<DefId> {
- match get_base_type(inference_context, span, original_type) {
- None => None,
- Some(base_type) => {
- match get(base_type).sty {
- ty_enum(def_id, _) |
- ty_struct(def_id, _) |
- ty_unboxed_closure(def_id, _) => {
- Some(def_id)
- }
- ty_ptr(ty::mt {ty, ..}) |
- ty_rptr(_, ty::mt {ty, ..}) |
- ty_uniq(ty) => {
- match ty::get(ty).sty {
- ty_trait(box ty::TyTrait { def_id, .. }) => {
- Some(def_id)
- }
- _ => {
- fail!("get_base_type() returned a type that wasn't an \
- enum, struct, or trait");
- }
- }
- }
- ty_trait(box ty::TyTrait { def_id, .. }) => {
- Some(def_id)
- }
- _ => {
- fail!("get_base_type() returned a type that wasn't an \
- enum, struct, or trait");
- }
- }
- }
- }
-}
-
-struct CoherenceChecker<'a, 'tcx: 'a> {
- crate_context: &'a CrateCtxt<'a, 'tcx>,
- inference_context: InferCtxt<'a, 'tcx>,
-}
-
-struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
- cc: &'a CoherenceChecker<'a, 'tcx>
-}
-
-impl<'a, 'tcx, 'v> visit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> {
- fn visit_item(&mut self, item: &Item) {
-
- //debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
-
- match item.node {
- ItemImpl(_, ref opt_trait, _, _) => {
- match opt_trait.clone() {
- Some(opt_trait) => {
- self.cc.check_implementation(item, [opt_trait]);
- }
- None => self.cc.check_implementation(item, [])
- }
- }
- _ => {
- // Nothing to do.
- }
- };
-
- visit::walk_item(self, item);
- }
-}
-
-struct PrivilegedScopeVisitor<'a, 'tcx: 'a> {
- cc: &'a CoherenceChecker<'a, 'tcx>
-}
-
-impl<'a, 'tcx, 'v> visit::Visitor<'v> for PrivilegedScopeVisitor<'a, 'tcx> {
- fn visit_item(&mut self, item: &Item) {
-
- match item.node {
- ItemMod(ref module_) => {
- // Then visit the module items.
- visit::walk_mod(self, module_);
- }
- ItemImpl(_, None, ref ast_ty, _) => {
- if !self.cc.ast_type_is_defined_in_local_crate(&**ast_ty) {
- // This is an error.
- let session = &self.cc.crate_context.tcx.sess;
- span_err!(session, item.span, E0116,
- "cannot associate methods with a type outside the \
- crate the type is defined in; define and implement \
- a trait or new type instead");
- }
- }
- ItemImpl(_, Some(ref trait_ref), _, _) => {
- let tcx = self.cc.crate_context.tcx;
- // `for_ty` is `Type` in `impl Trait for Type`
- let for_ty = ty::node_id_to_type(tcx, item.id);
- if !type_is_defined_in_local_crate(tcx, for_ty) {
- // This implementation is not in scope of its base
- // type. This still might be OK if the trait is
- // defined in the same crate.
-
- let trait_def_id =
- self.cc.trait_ref_to_trait_def_id(trait_ref);
-
- if trait_def_id.krate != LOCAL_CRATE {
- let session = &self.cc.crate_context.tcx.sess;
- span_err!(session, item.span, E0117,
- "cannot provide an extension implementation \
- where both trait and type are not defined in this crate");
- }
- }
-
- visit::walk_item(self, item);
- }
- _ => {
- visit::walk_item(self, item);
- }
- }
- }
-}
-
-impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
- fn check(&self, krate: &Crate) {
- // Check implementations and traits. This populates the tables
- // containing the inherent methods and extension methods. It also
- // builds up the trait inheritance table.
- let mut visitor = CoherenceCheckVisitor { cc: self };
- visit::walk_crate(&mut visitor, krate);
-
- // Check that there are no overlapping trait instances
- self.check_implementation_coherence();
-
- // Check whether traits with base types are in privileged scopes.
- self.check_privileged_scopes(krate);
-
- // Bring in external crates. It's fine for this to happen after the
- // coherence checks, because we ensure by construction that no errors
- // can happen at link time.
- self.add_external_crates();
-
- // Populate the table of destructors. It might seem a bit strange to
- // do this here, but it's actually the most convenient place, since
- // the coherence tables contain the trait -> type mappings.
- self.populate_destructor_table();
- }
-
- fn check_implementation(&self, item: &Item,
- associated_traits: &[TraitRef]) {
- let tcx = self.crate_context.tcx;
- let impl_did = local_def(item.id);
- let self_type = ty::lookup_item_type(tcx, impl_did);
-
- // If there are no traits, then this implementation must have a
- // base type.
-
- if associated_traits.len() == 0 {
- debug!("(checking implementation) no associated traits for item '{}'",
- token::get_ident(item.ident));
-
- match get_base_type_def_id(&self.inference_context,
- item.span,
- self_type.ty) {
- None => {
- let session = &self.crate_context.tcx.sess;
- span_err!(session, item.span, E0118,
- "no base type found for inherent implementation; \
- implement a trait or new type instead");
- }
- Some(_) => {
- // Nothing to do.
- }
- }
- }
-
- let impl_items = self.create_impl_from_item(item);
-
- for associated_trait in associated_traits.iter() {
- let trait_ref = ty::node_id_to_trait_ref(
- self.crate_context.tcx, associated_trait.ref_id);
- debug!("(checking implementation) adding impl for trait '{}', item '{}'",
- trait_ref.repr(self.crate_context.tcx),
- token::get_ident(item.ident));
-
- self.add_trait_impl(trait_ref.def_id, impl_did);
- }
-
- // Add the implementation to the mapping from implementation to base
- // type def ID, if there is a base type for this implementation and
- // the implementation does not have any associated traits.
- match get_base_type_def_id(&self.inference_context,
- item.span,
- self_type.ty) {
- None => {
- // Nothing to do.
- }
- Some(base_type_def_id) => {
- // FIXME: Gather up default methods?
- if associated_traits.len() == 0 {
- self.add_inherent_impl(base_type_def_id, impl_did);
- }
- }
- }
-
- tcx.impl_items.borrow_mut().insert(impl_did, impl_items);
- }
-
- // Creates default method IDs and performs type substitutions for an impl
- // and trait pair. Then, for each provided method in the trait, inserts a
- // `ProvidedMethodInfo` instance into the `provided_method_sources` map.
- fn instantiate_default_methods(
- &self,
- impl_id: DefId,
- trait_ref: &ty::TraitRef,
- all_impl_items: &mut Vec<ImplOrTraitItemId>) {
- let tcx = self.crate_context.tcx;
- debug!("instantiate_default_methods(impl_id={:?}, trait_ref={})",
- impl_id, trait_ref.repr(tcx));
-
- let impl_poly_type = ty::lookup_item_type(tcx, impl_id);
-
- let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
- for trait_method in prov.iter() {
- // Synthesize an ID.
- let new_id = tcx.sess.next_node_id();
- let new_did = local_def(new_id);
-
- debug!("new_did={:?} trait_method={}", new_did, trait_method.repr(tcx));
-
- // Create substitutions for the various trait parameters.
- let new_method_ty =
- Rc::new(subst_receiver_types_in_method_ty(
- tcx,
- impl_id,
- &impl_poly_type,
- trait_ref,
- new_did,
- &**trait_method,
- Some(trait_method.def_id)));
-
- debug!("new_method_ty={}", new_method_ty.repr(tcx));
- all_impl_items.push(MethodTraitItemId(new_did));
-
- // construct the polytype for the method based on the
- // method_ty. it will have all the generics from the
- // impl, plus its own.
- let new_polytype = ty::Polytype {
- generics: new_method_ty.generics.clone(),
- ty: ty::mk_bare_fn(tcx, new_method_ty.fty.clone())
- };
- debug!("new_polytype={}", new_polytype.repr(tcx));
-
- tcx.tcache.borrow_mut().insert(new_did, new_polytype);
- tcx.impl_or_trait_items
- .borrow_mut()
- .insert(new_did, ty::MethodTraitItem(new_method_ty));
-
- // Pair the new synthesized ID up with the
- // ID of the method.
- self.crate_context.tcx.provided_method_sources.borrow_mut()
- .insert(new_did, trait_method.def_id);
- }
- }
-
- fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
- let tcx = self.crate_context.tcx;
- match tcx.inherent_impls.borrow().find(&base_def_id) {
- Some(implementation_list) => {
- implementation_list.borrow_mut().push(impl_def_id);
- return;
- }
- None => {}
- }
-
- tcx.inherent_impls.borrow_mut().insert(base_def_id,
- Rc::new(RefCell::new(vec!(impl_def_id))));
- }
-
- fn add_trait_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
- ty::record_trait_implementation(self.crate_context.tcx,
- base_def_id,
- impl_def_id);
- }
-
- fn check_implementation_coherence(&self) {
- for trait_id in self.crate_context.tcx.trait_impls.borrow().keys() {
- self.check_implementation_coherence_of(*trait_id);
- }
- }
-
- fn check_implementation_coherence_of(&self, trait_def_id: DefId) {
- // Unify pairs of polytypes.
- self.iter_impls_of_trait_local(trait_def_id, |impl_a| {
- let polytype_a =
- self.get_self_type_for_implementation(impl_a);
-
- // "We have an impl of trait <trait_def_id> for type <polytype_a>,
- // and that impl is <impl_a>"
- self.iter_impls_of_trait(trait_def_id, |impl_b| {
-
- // An impl is coherent with itself
- if impl_a != impl_b {
- let polytype_b = self.get_self_type_for_implementation(
- impl_b);
-
- if self.polytypes_unify(polytype_a.clone(), polytype_b) {
- let session = &self.crate_context.tcx.sess;
- span_err!(session, self.span_of_impl(impl_a), E0119,
- "conflicting implementations for trait `{}`",
- ty::item_path_str(self.crate_context.tcx, trait_def_id));
- if impl_b.krate == LOCAL_CRATE {
- span_note!(session, self.span_of_impl(impl_b),
- "note conflicting implementation here");
- } else {
- let crate_store = &self.crate_context.tcx.sess.cstore;
- let cdata = crate_store.get_crate_data(impl_b.krate);
- span_note!(session, self.span_of_impl(impl_a),
- "conflicting implementation in crate `{}`",
- cdata.name);
- }
- }
- }
- })
- })
- }
-
- fn iter_impls_of_trait(&self, trait_def_id: DefId, f: |DefId|) {
- self.iter_impls_of_trait_local(trait_def_id, |x| f(x));
-
- if trait_def_id.krate == LOCAL_CRATE {
- return;
- }
-
- let crate_store = &self.crate_context.tcx.sess.cstore;
- csearch::each_implementation_for_trait(crate_store, trait_def_id, |impl_def_id| {
- // Is this actually necessary?
- let _ = lookup_item_type(self.crate_context.tcx, impl_def_id);
- f(impl_def_id);
- });
- }
-
- fn iter_impls_of_trait_local(&self, trait_def_id: DefId, f: |DefId|) {
- match self.crate_context.tcx.trait_impls.borrow().find(&trait_def_id) {
- Some(impls) => {
- for &impl_did in impls.borrow().iter() {
- f(impl_did);
- }
- }
- None => { /* no impls? */ }
- }
- }
-
- fn polytypes_unify(&self,
- polytype_a: Polytype,
- polytype_b: Polytype)
- -> bool {
- let universally_quantified_a =
- self.universally_quantify_polytype(polytype_a);
- let universally_quantified_b =
- self.universally_quantify_polytype(polytype_b);
-
- return self.can_unify_universally_quantified(
- &universally_quantified_a, &universally_quantified_b) ||
- self.can_unify_universally_quantified(
- &universally_quantified_b, &universally_quantified_a);
- }
-
- // Converts a polytype to a monotype by replacing all parameters with
- // type variables. Returns the monotype and the type variables created.
- fn universally_quantify_polytype(&self, polytype: Polytype)
- -> UniversalQuantificationResult
- {
- let substitutions =
- self.inference_context.fresh_substs_for_type(DUMMY_SP,
- &polytype.generics);
- let monotype = polytype.ty.subst(self.crate_context.tcx, &substitutions);
-
- UniversalQuantificationResult {
- monotype: monotype
- }
- }
-
- fn can_unify_universally_quantified<'a>(&self,
- a: &'a UniversalQuantificationResult,
- b: &'a UniversalQuantificationResult)
- -> bool
- {
- infer::can_mk_subty(&self.inference_context,
- a.monotype,
- b.monotype).is_ok()
- }
-
- fn get_self_type_for_implementation(&self, impl_did: DefId)
- -> Polytype {
- self.crate_context.tcx.tcache.borrow().get_copy(&impl_did)
- }
-
- // Privileged scope checking
- fn check_privileged_scopes(&self, krate: &Crate) {
- let mut visitor = PrivilegedScopeVisitor{ cc: self };
- visit::walk_crate(&mut visitor, krate);
- }
-
- fn trait_ref_to_trait_def_id(&self, trait_ref: &TraitRef) -> DefId {
- let def_map = &self.crate_context.tcx.def_map;
- let trait_def = def_map.borrow().get_copy(&trait_ref.ref_id);
- let trait_id = trait_def.def_id();
- return trait_id;
- }
-
- /// For coherence, when we have `impl Type`, we need to guarantee that
- /// `Type` is "local" to the crate. For our purposes, this means that it
- /// must precisely name some nominal type defined in this crate.
- fn ast_type_is_defined_in_local_crate(&self, original_type: &ast::Ty) -> bool {
- match original_type.node {
- TyPath(_, _, path_id) => {
- match self.crate_context.tcx.def_map.borrow().get_copy(&path_id) {
- DefTy(def_id) | DefStruct(def_id) => {
- if def_id.krate != LOCAL_CRATE {
- return false;
- }
-
- // Make sure that this type precisely names a nominal
- // type.
- match self.crate_context.tcx.map.find(def_id.node) {
- None => {
- self.crate_context.tcx.sess.span_bug(
- original_type.span,
- "resolve didn't resolve this type?!");
- }
- Some(NodeItem(item)) => {
- match item.node {
- ItemStruct(..) | ItemEnum(..) => true,
- _ => false,
- }
- }
- Some(_) => false,
- }
- }
- _ => false
- }
- }
- _ => false
- }
- }
-
- // Converts an implementation in the AST to a vector of items.
- fn create_impl_from_item(&self, item: &Item) -> Vec<ImplOrTraitItemId> {
- match item.node {
- ItemImpl(_, ref trait_refs, _, ref ast_items) => {
- let mut items: Vec<ImplOrTraitItemId> =
- ast_items.iter()
- .map(|ast_item| {
- match *ast_item {
- ast::MethodImplItem(ref ast_method) => {
- MethodTraitItemId(
- local_def(ast_method.id))
- }
- }
- }).collect();
-
- for trait_ref in trait_refs.iter() {
- let ty_trait_ref = ty::node_id_to_trait_ref(
- self.crate_context.tcx,
- trait_ref.ref_id);
-
- self.instantiate_default_methods(local_def(item.id),
- &*ty_trait_ref,
- &mut items);
- }
-
- items
- }
- _ => {
- self.crate_context.tcx.sess.span_bug(item.span,
- "can't convert a non-impl to an impl");
- }
- }
- }
-
- fn span_of_impl(&self, impl_did: DefId) -> Span {
- assert_eq!(impl_did.krate, LOCAL_CRATE);
- self.crate_context.tcx.map.span(impl_did.node)
- }
-
- // External crate handling
-
- fn add_external_impl(&self,
- impls_seen: &mut HashSet<DefId>,
- impl_def_id: DefId) {
- let tcx = self.crate_context.tcx;
- let impl_items = csearch::get_impl_items(&tcx.sess.cstore,
- impl_def_id);
-
- // Make sure we don't visit the same implementation multiple times.
- if !impls_seen.insert(impl_def_id) {
- // Skip this one.
- return
- }
- // Good. Continue.
-
- let _ = lookup_item_type(tcx, impl_def_id);
- let associated_traits = get_impl_trait(tcx, impl_def_id);
-
- // Do a sanity check.
- assert!(associated_traits.is_some());
-
- // Record all the trait items.
- for trait_ref in associated_traits.iter() {
- self.add_trait_impl(trait_ref.def_id, impl_def_id);
- }
-
- // For any methods that use a default implementation, add them to
- // the map. This is a bit unfortunate.
- for item_def_id in impl_items.iter() {
- let impl_item = ty::impl_or_trait_item(tcx, item_def_id.def_id());
- match impl_item {
- ty::MethodTraitItem(ref method) => {
- for &source in method.provided_source.iter() {
- tcx.provided_method_sources
- .borrow_mut()
- .insert(item_def_id.def_id(), source);
- }
- }
- }
- }
-
- tcx.impl_items.borrow_mut().insert(impl_def_id, impl_items);
- }
-
- // Adds implementations and traits from external crates to the coherence
- // info.
- fn add_external_crates(&self) {
- let mut impls_seen = HashSet::new();
-
- let crate_store = &self.crate_context.tcx.sess.cstore;
- crate_store.iter_crate_data(|crate_number, _crate_metadata| {
- each_impl(crate_store, crate_number, |def_id| {
- assert_eq!(crate_number, def_id.krate);
- self.add_external_impl(&mut impls_seen, def_id)
- })
- })
- }
-
- //
- // Destructors
- //
-
- fn populate_destructor_table(&self) {
- let tcx = self.crate_context.tcx;
- let drop_trait = match tcx.lang_items.drop_trait() {
- Some(id) => id, None => { return }
- };
-
- let impl_items = tcx.impl_items.borrow();
- let trait_impls = match tcx.trait_impls.borrow().find_copy(&drop_trait) {
- None => return, // No types with (new-style) dtors present.
- Some(found_impls) => found_impls
- };
-
- for &impl_did in trait_impls.borrow().iter() {
- let items = impl_items.get(&impl_did);
- if items.len() < 1 {
- // We'll error out later. For now, just don't ICE.
- continue;
- }
- let method_def_id = *items.get(0);
-
- let self_type = self.get_self_type_for_implementation(impl_did);
- match ty::get(self_type.ty).sty {
- ty::ty_enum(type_def_id, _) |
- ty::ty_struct(type_def_id, _) |
- ty::ty_unboxed_closure(type_def_id, _) => {
- tcx.destructor_for_type
- .borrow_mut()
- .insert(type_def_id, method_def_id.def_id());
- tcx.destructors
- .borrow_mut()
- .insert(method_def_id.def_id());
- }
- _ => {
- // Destructors only work on nominal types.
- if impl_did.krate == ast::LOCAL_CRATE {
- {
- match tcx.map.find(impl_did.node) {
- Some(ast_map::NodeItem(item)) => {
- span_err!(tcx.sess, item.span, E0120,
- "the Drop trait may only be implemented on structures");
- }
- _ => {
- tcx.sess.bug("didn't find impl in ast \
- map");
- }
- }
- }
- } else {
- tcx.sess.bug("found external impl of Drop trait on \
- something other than a struct");
- }
- }
- }
- }
- }
-}
-
-pub fn make_substs_for_receiver_types(tcx: &ty::ctxt,
- trait_ref: &ty::TraitRef,
- method: &ty::Method)
- -> subst::Substs
-{
- /*!
- * Substitutes the values for the receiver's type parameters
- * that are found in method, leaving the method's type parameters
- * intact.
- */
-
- let meth_tps: Vec<ty::t> =
- method.generics.types.get_slice(subst::FnSpace)
- .iter()
- .map(|def| ty::mk_param_from_def(tcx, def))
- .collect();
- let meth_regions: Vec<ty::Region> =
- method.generics.regions.get_slice(subst::FnSpace)
- .iter()
- .map(|def| ty::ReEarlyBound(def.def_id.node, def.space,
- def.index, def.name))
- .collect();
- trait_ref.substs.clone().with_method(meth_tps, meth_regions)
-}
-
-fn subst_receiver_types_in_method_ty(tcx: &ty::ctxt,
- impl_id: ast::DefId,
- impl_poly_type: &ty::Polytype,
- trait_ref: &ty::TraitRef,
- new_def_id: ast::DefId,
- method: &ty::Method,
- provided_source: Option<ast::DefId>)
- -> ty::Method
-{
- let combined_substs = make_substs_for_receiver_types(tcx, trait_ref, method);
-
- debug!("subst_receiver_types_in_method_ty: combined_substs={}",
- combined_substs.repr(tcx));
-
- let mut method_generics = method.generics.subst(tcx, &combined_substs);
-
- // replace the type parameters declared on the trait with those
- // from the impl
- for &space in [subst::TypeSpace, subst::SelfSpace].iter() {
- method_generics.types.replace(
- space,
- Vec::from_slice(impl_poly_type.generics.types.get_slice(space)));
- method_generics.regions.replace(
- space,
- Vec::from_slice(impl_poly_type.generics.regions.get_slice(space)));
- }
-
- debug!("subst_receiver_types_in_method_ty: method_generics={}",
- method_generics.repr(tcx));
-
- let method_fty = method.fty.subst(tcx, &combined_substs);
-
- debug!("subst_receiver_types_in_method_ty: method_ty={}",
- method.fty.repr(tcx));
-
- ty::Method::new(
- method.ident,
- method_generics,
- method_fty,
- method.explicit_self,
- method.vis,
- new_def_id,
- ImplContainer(impl_id),
- provided_source
- )
-}
-
-pub fn check_coherence(crate_context: &CrateCtxt) {
- CoherenceChecker {
- crate_context: crate_context,
- inference_context: new_infer_ctxt(crate_context.tcx),
- }.check(crate_context.tcx.map.krate());
-}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Coherence phase
+//
+// The job of the coherence phase of typechecking is to ensure that
+// each trait has at most one implementation for each type. This is
+// done by the orphan and overlap modules. Then we build up various
+// mappings. That mapping code resides here.
+
+
+use metadata::csearch::{each_impl, get_impl_trait};
+use metadata::csearch;
+use middle::subst;
+use middle::subst::{Substs};
+use middle::ty::get;
+use middle::ty::{ImplContainer, ImplOrTraitItemId, MethodTraitItemId};
+use middle::ty::{TypeTraitItemId, lookup_item_type};
+use middle::ty::{t, ty_bool, ty_char, ty_bot, ty_box, ty_enum, ty_err};
+use middle::ty::{ty_str, ty_vec, ty_float, ty_infer, ty_int, ty_nil, ty_open};
+use middle::ty::{ty_param, Polytype, ty_ptr};
+use middle::ty::{ty_rptr, ty_struct, ty_trait, ty_tup};
+use middle::ty::{ty_uint, ty_unboxed_closure, ty_uniq, ty_bare_fn};
+use middle::ty::{ty_closure};
+use middle::ty::type_is_ty_var;
+use middle::subst::Subst;
+use middle::ty;
+use middle::typeck::CrateCtxt;
+use middle::typeck::infer::combine::Combine;
+use middle::typeck::infer::InferCtxt;
+use middle::typeck::infer::{new_infer_ctxt, resolve_ivar, resolve_type};
+use std::collections::{HashSet};
+use std::cell::RefCell;
+use std::rc::Rc;
+use syntax::ast::{Crate, DefId};
+use syntax::ast::{Item, ItemImpl};
+use syntax::ast::{LOCAL_CRATE, TraitRef};
+use syntax::ast;
+use syntax::ast_map::NodeItem;
+use syntax::ast_map;
+use syntax::ast_util::{local_def};
+use syntax::codemap::{Span};
+use syntax::parse::token;
+use syntax::visit;
+use util::nodemap::{DefIdMap, FnvHashMap};
+use util::ppaux::Repr;
+
+mod orphan;
+mod overlap;
+
+fn get_base_type(inference_context: &InferCtxt,
+ span: Span,
+ original_type: t)
+ -> Option<t> {
+ let resolved_type = match resolve_type(inference_context,
+ Some(span),
+ original_type,
+ resolve_ivar) {
+ Ok(resulting_type) if !type_is_ty_var(resulting_type) => resulting_type,
+ _ => {
+ inference_context.tcx.sess.span_fatal(span,
+ "the type of this value must be known in order \
+ to determine the base type");
+ }
+ };
+
+ match get(resolved_type).sty {
+ ty_enum(..) | ty_struct(..) | ty_unboxed_closure(..) => {
+ debug!("(getting base type) found base type");
+ Some(resolved_type)
+ }
+
+ _ if ty::type_is_trait(resolved_type) => {
+ debug!("(getting base type) found base type (trait)");
+ Some(resolved_type)
+ }
+
+ ty_nil | ty_bot | ty_bool | ty_char | ty_int(..) | ty_uint(..) | ty_float(..) |
+ ty_str(..) | ty_vec(..) | ty_bare_fn(..) | ty_closure(..) | ty_tup(..) |
+ ty_infer(..) | ty_param(..) | ty_err | ty_open(..) |
+ ty_box(_) | ty_uniq(_) | ty_ptr(_) | ty_rptr(_, _) => {
+ debug!("(getting base type) no base type; found {:?}",
+ get(original_type).sty);
+ None
+ }
+ ty_trait(..) => fail!("should have been caught")
+ }
+}
+
+// Returns the def ID of the base type, if there is one.
+fn get_base_type_def_id(inference_context: &InferCtxt,
+ span: Span,
+ original_type: t)
+ -> Option<DefId> {
+ match get_base_type(inference_context, span, original_type) {
+ None => None,
+ Some(base_type) => {
+ match get(base_type).sty {
+ ty_enum(def_id, _) |
+ ty_struct(def_id, _) |
+ ty_unboxed_closure(def_id, _) => {
+ Some(def_id)
+ }
+ ty_ptr(ty::mt {ty, ..}) |
+ ty_rptr(_, ty::mt {ty, ..}) |
+ ty_uniq(ty) => {
+ match ty::get(ty).sty {
+ ty_trait(box ty::TyTrait { def_id, .. }) => {
+ Some(def_id)
+ }
+ _ => {
+ fail!("get_base_type() returned a type that wasn't an \
+ enum, struct, or trait");
+ }
+ }
+ }
+ ty_trait(box ty::TyTrait { def_id, .. }) => {
+ Some(def_id)
+ }
+ _ => {
+ fail!("get_base_type() returned a type that wasn't an \
+ enum, struct, or trait");
+ }
+ }
+ }
+ }
+}
+
+struct CoherenceChecker<'a, 'tcx: 'a> {
+ crate_context: &'a CrateCtxt<'a, 'tcx>,
+ inference_context: InferCtxt<'a, 'tcx>,
+ inherent_impls: RefCell<DefIdMap<Rc<RefCell<Vec<ast::DefId>>>>>,
+}
+
+struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
+ cc: &'a CoherenceChecker<'a, 'tcx>
+}
+
+impl<'a, 'tcx, 'v> visit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> {
+ fn visit_item(&mut self, item: &Item) {
+
+ //debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
+
+ match item.node {
+ ItemImpl(_, ref opt_trait, _, _) => {
+ match opt_trait.clone() {
+ Some(opt_trait) => {
+ self.cc.check_implementation(item, [opt_trait]);
+ }
+ None => self.cc.check_implementation(item, [])
+ }
+ }
+ _ => {
+ // Nothing to do.
+ }
+ };
+
+ visit::walk_item(self, item);
+ }
+}
+
+impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
+ fn check(&self, krate: &Crate) {
+ // Check implementations and traits. This populates the tables
+ // containing the inherent methods and extension methods. It also
+ // builds up the trait inheritance table.
+ let mut visitor = CoherenceCheckVisitor { cc: self };
+ visit::walk_crate(&mut visitor, krate);
+
+ // Copy over the inherent impls we gathered up during the walk into
+ // the tcx.
+ let mut tcx_inherent_impls =
+ self.crate_context.tcx.inherent_impls.borrow_mut();
+ for (k, v) in self.inherent_impls.borrow().iter() {
+ tcx_inherent_impls.insert((*k).clone(),
+ Rc::new((*v.borrow()).clone()));
+ }
+
+ // Bring in external crates. It's fine for this to happen after the
+ // coherence checks, because we ensure by construction that no errors
+ // can happen at link time.
+ self.add_external_crates();
+
+ // Populate the table of destructors. It might seem a bit strange to
+ // do this here, but it's actually the most convenient place, since
+ // the coherence tables contain the trait -> type mappings.
+ self.populate_destructor_table();
+ }
+
+ fn check_implementation(&self,
+ item: &Item,
+ associated_traits: &[TraitRef]) {
+ let tcx = self.crate_context.tcx;
+ let impl_did = local_def(item.id);
+ let self_type = ty::lookup_item_type(tcx, impl_did);
+
+ // If there are no traits, then this implementation must have a
+ // base type.
+
+ let impl_items = self.create_impl_from_item(item);
+
+ for associated_trait in associated_traits.iter() {
+ let trait_ref = ty::node_id_to_trait_ref(
+ self.crate_context.tcx, associated_trait.ref_id);
+ debug!("(checking implementation) adding impl for trait '{}', item '{}'",
+ trait_ref.repr(self.crate_context.tcx),
+ token::get_ident(item.ident));
+
+ self.add_trait_impl(trait_ref.def_id, impl_did);
+ }
+
+ // Add the implementation to the mapping from implementation to base
+ // type def ID, if there is a base type for this implementation and
+ // the implementation does not have any associated traits.
+ match get_base_type_def_id(&self.inference_context,
+ item.span,
+ self_type.ty) {
+ None => {
+ // Nothing to do.
+ }
+ Some(base_type_def_id) => {
+ // FIXME: Gather up default methods?
+ if associated_traits.len() == 0 {
+ self.add_inherent_impl(base_type_def_id, impl_did);
+ }
+ }
+ }
+
+ tcx.impl_items.borrow_mut().insert(impl_did, impl_items);
+ }
+
+ // Creates default method IDs and performs type substitutions for an impl
+ // and trait pair. Then, for each provided method in the trait, inserts a
+ // `ProvidedMethodInfo` instance into the `provided_method_sources` map.
+ fn instantiate_default_methods(
+ &self,
+ impl_id: DefId,
+ trait_ref: &ty::TraitRef,
+ all_impl_items: &mut Vec<ImplOrTraitItemId>) {
+ let tcx = self.crate_context.tcx;
+ debug!("instantiate_default_methods(impl_id={:?}, trait_ref={})",
+ impl_id, trait_ref.repr(tcx));
+
+ let impl_poly_type = ty::lookup_item_type(tcx, impl_id);
+
+ let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
+ for trait_method in prov.iter() {
+ // Synthesize an ID.
+ let new_id = tcx.sess.next_node_id();
+ let new_did = local_def(new_id);
+
+ debug!("new_did={:?} trait_method={}", new_did, trait_method.repr(tcx));
+
+ // Create substitutions for the various trait parameters.
+ let new_method_ty =
+ Rc::new(subst_receiver_types_in_method_ty(
+ tcx,
+ impl_id,
+ &impl_poly_type,
+ trait_ref,
+ new_did,
+ &**trait_method,
+ Some(trait_method.def_id)));
+
+ debug!("new_method_ty={}", new_method_ty.repr(tcx));
+ all_impl_items.push(MethodTraitItemId(new_did));
+
+ // construct the polytype for the method based on the
+ // method_ty. it will have all the generics from the
+ // impl, plus its own.
+ let new_polytype = ty::Polytype {
+ generics: new_method_ty.generics.clone(),
+ ty: ty::mk_bare_fn(tcx, new_method_ty.fty.clone())
+ };
+ debug!("new_polytype={}", new_polytype.repr(tcx));
+
+ tcx.tcache.borrow_mut().insert(new_did, new_polytype);
+ tcx.impl_or_trait_items
+ .borrow_mut()
+ .insert(new_did, ty::MethodTraitItem(new_method_ty));
+
+ // Pair the new synthesized ID up with the
+ // ID of the method.
+ self.crate_context.tcx.provided_method_sources.borrow_mut()
+ .insert(new_did, trait_method.def_id);
+ }
+ }
+
+ fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
+ match self.inherent_impls.borrow().find(&base_def_id) {
+ Some(implementation_list) => {
+ implementation_list.borrow_mut().push(impl_def_id);
+ return;
+ }
+ None => {}
+ }
+
+ self.inherent_impls.borrow_mut().insert(
+ base_def_id,
+ Rc::new(RefCell::new(vec!(impl_def_id))));
+ }
+
+ fn add_trait_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
+ debug!("add_trait_impl: base_def_id={} impl_def_id={}",
+ base_def_id, impl_def_id);
+ ty::record_trait_implementation(self.crate_context.tcx,
+ base_def_id,
+ impl_def_id);
+ }
+
+ fn get_self_type_for_implementation(&self, impl_did: DefId)
+ -> Polytype {
+ self.crate_context.tcx.tcache.borrow().get_copy(&impl_did)
+ }
+
+ // Converts an implementation in the AST to a vector of items.
+ fn create_impl_from_item(&self, item: &Item) -> Vec<ImplOrTraitItemId> {
+ match item.node {
+ ItemImpl(_, ref trait_refs, _, ref ast_items) => {
+ let mut items: Vec<ImplOrTraitItemId> =
+ ast_items.iter()
+ .map(|ast_item| {
+ match *ast_item {
+ ast::MethodImplItem(ref ast_method) => {
+ MethodTraitItemId(
+ local_def(ast_method.id))
+ }
+ ast::TypeImplItem(ref typedef) => {
+ TypeTraitItemId(local_def(typedef.id))
+ }
+ }
+ }).collect();
+
+ for trait_ref in trait_refs.iter() {
+ let ty_trait_ref = ty::node_id_to_trait_ref(
+ self.crate_context.tcx,
+ trait_ref.ref_id);
+
+ self.instantiate_default_methods(local_def(item.id),
+ &*ty_trait_ref,
+ &mut items);
+ }
+
+ items
+ }
+ _ => {
+ self.crate_context.tcx.sess.span_bug(item.span,
+ "can't convert a non-impl to an impl");
+ }
+ }
+ }
+
+ // External crate handling
+
+ fn add_external_impl(&self,
+ impls_seen: &mut HashSet<DefId>,
+ impl_def_id: DefId) {
+ let tcx = self.crate_context.tcx;
+ let impl_items = csearch::get_impl_items(&tcx.sess.cstore,
+ impl_def_id);
+
+ // Make sure we don't visit the same implementation multiple times.
+ if !impls_seen.insert(impl_def_id) {
+ // Skip this one.
+ return
+ }
+ // Good. Continue.
+
+ let _ = lookup_item_type(tcx, impl_def_id);
+ let associated_traits = get_impl_trait(tcx, impl_def_id);
+
+ // Do a sanity check.
+ assert!(associated_traits.is_some());
+
+ // Record all the trait items.
+ for trait_ref in associated_traits.iter() {
+ self.add_trait_impl(trait_ref.def_id, impl_def_id);
+ }
+
+ // For any methods that use a default implementation, add them to
+ // the map. This is a bit unfortunate.
+ for item_def_id in impl_items.iter() {
+ let impl_item = ty::impl_or_trait_item(tcx, item_def_id.def_id());
+ match impl_item {
+ ty::MethodTraitItem(ref method) => {
+ for &source in method.provided_source.iter() {
+ tcx.provided_method_sources
+ .borrow_mut()
+ .insert(item_def_id.def_id(), source);
+ }
+ }
+ ty::TypeTraitItem(_) => {}
+ }
+ }
+
+ tcx.impl_items.borrow_mut().insert(impl_def_id, impl_items);
+ }
+
+ // Adds implementations and traits from external crates to the coherence
+ // info.
+ fn add_external_crates(&self) {
+ let mut impls_seen = HashSet::new();
+
+ let crate_store = &self.crate_context.tcx.sess.cstore;
+ crate_store.iter_crate_data(|crate_number, _crate_metadata| {
+ each_impl(crate_store, crate_number, |def_id| {
+ assert_eq!(crate_number, def_id.krate);
+ self.add_external_impl(&mut impls_seen, def_id)
+ })
+ })
+ }
+
+ //
+ // Destructors
+ //
+
+ fn populate_destructor_table(&self) {
+ let tcx = self.crate_context.tcx;
+ let drop_trait = match tcx.lang_items.drop_trait() {
+ Some(id) => id, None => { return }
+ };
+
+ let impl_items = tcx.impl_items.borrow();
+ let trait_impls = match tcx.trait_impls.borrow().find_copy(&drop_trait) {
+ None => return, // No types with (new-style) dtors present.
+ Some(found_impls) => found_impls
+ };
+
+ for &impl_did in trait_impls.borrow().iter() {
+ let items = impl_items.get(&impl_did);
+ if items.len() < 1 {
+ // We'll error out later. For now, just don't ICE.
+ continue;
+ }
+ let method_def_id = *items.get(0);
+
+ let self_type = self.get_self_type_for_implementation(impl_did);
+ match ty::get(self_type.ty).sty {
+ ty::ty_enum(type_def_id, _) |
+ ty::ty_struct(type_def_id, _) |
+ ty::ty_unboxed_closure(type_def_id, _) => {
+ tcx.destructor_for_type
+ .borrow_mut()
+ .insert(type_def_id, method_def_id.def_id());
+ tcx.destructors
+ .borrow_mut()
+ .insert(method_def_id.def_id());
+ }
+ _ => {
+ // Destructors only work on nominal types.
+ if impl_did.krate == ast::LOCAL_CRATE {
+ {
+ match tcx.map.find(impl_did.node) {
+ Some(ast_map::NodeItem(item)) => {
+ span_err!(tcx.sess, item.span, E0120,
+ "the Drop trait may only be implemented on structures");
+ }
+ _ => {
+ tcx.sess.bug("didn't find impl in ast \
+ map");
+ }
+ }
+ }
+ } else {
+ tcx.sess.bug("found external impl of Drop trait on \
+ something other than a struct");
+ }
+ }
+ }
+ }
+ }
+}
+
+pub fn make_substs_for_receiver_types(tcx: &ty::ctxt,
+ trait_ref: &ty::TraitRef,
+ method: &ty::Method)
+ -> subst::Substs
+{
+ /*!
+ * Substitutes the values for the receiver's type parameters
+ * that are found in method, leaving the method's type parameters
+ * intact.
+ */
+
+ let meth_tps: Vec<ty::t> =
+ method.generics.types.get_slice(subst::FnSpace)
+ .iter()
+ .map(|def| ty::mk_param_from_def(tcx, def))
+ .collect();
+ let meth_regions: Vec<ty::Region> =
+ method.generics.regions.get_slice(subst::FnSpace)
+ .iter()
+ .map(|def| ty::ReEarlyBound(def.def_id.node, def.space,
+ def.index, def.name))
+ .collect();
+ trait_ref.substs.clone().with_method(meth_tps, meth_regions)
+}
+
+fn subst_receiver_types_in_method_ty(tcx: &ty::ctxt,
+ impl_id: ast::DefId,
+ impl_poly_type: &ty::Polytype,
+ trait_ref: &ty::TraitRef,
+ new_def_id: ast::DefId,
+ method: &ty::Method,
+ provided_source: Option<ast::DefId>)
+ -> ty::Method
+{
+ let combined_substs = make_substs_for_receiver_types(tcx, trait_ref, method);
+
+ debug!("subst_receiver_types_in_method_ty: combined_substs={}",
+ combined_substs.repr(tcx));
+
+ let mut method_generics = method.generics.subst(tcx, &combined_substs);
+
+ // replace the type parameters declared on the trait with those
+ // from the impl
+ for &space in [subst::TypeSpace, subst::SelfSpace].iter() {
+ method_generics.types.replace(
+ space,
+ Vec::from_slice(impl_poly_type.generics.types.get_slice(space)));
+ method_generics.regions.replace(
+ space,
+ Vec::from_slice(impl_poly_type.generics.regions.get_slice(space)));
+ }
+
+ debug!("subst_receiver_types_in_method_ty: method_generics={}",
+ method_generics.repr(tcx));
+
+ let method_fty = method.fty.subst(tcx, &combined_substs);
+
+ debug!("subst_receiver_types_in_method_ty: method_ty={}",
+ method.fty.repr(tcx));
+
+ ty::Method::new(
+ method.ident,
+ method_generics,
+ method_fty,
+ method.explicit_self,
+ method.vis,
+ new_def_id,
+ ImplContainer(impl_id),
+ provided_source
+ )
+}
+
+pub fn check_coherence(crate_context: &CrateCtxt) {
+ CoherenceChecker {
+ crate_context: crate_context,
+ inference_context: new_infer_ctxt(crate_context.tcx),
+ inherent_impls: RefCell::new(FnvHashMap::new()),
+ }.check(crate_context.tcx.map.krate());
+ orphan::check(crate_context.tcx);
+ overlap::check(crate_context.tcx);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+ * Orphan checker: every impl either implements a trait defined in this
+ * crate or pertains to a type defined in this crate.
+ */
+
+use middle::traits;
+use middle::ty;
+use syntax::ast::{Item, ItemImpl};
+use syntax::ast;
+use syntax::ast_util;
+use syntax::visit;
+use util::ppaux::Repr;
+
+pub fn check(tcx: &ty::ctxt) {
+ let mut orphan = OrphanChecker { tcx: tcx };
+ visit::walk_crate(&mut orphan, tcx.map.krate());
+}
+
+struct OrphanChecker<'cx, 'tcx:'cx> {
+ tcx: &'cx ty::ctxt<'tcx>
+}
+
+impl<'cx, 'tcx,'v> visit::Visitor<'v> for OrphanChecker<'cx, 'tcx> {
+ fn visit_item(&mut self, item: &'v ast::Item) {
+ let def_id = ast_util::local_def(item.id);
+ match item.node {
+ ast::ItemImpl(_, None, _, _) => {
+ // For inherent impls, self type must be a nominal type
+ // defined in this crate.
+ debug!("coherence2::orphan check: inherent impl {}", item.repr(self.tcx));
+ let self_ty = ty::lookup_item_type(self.tcx, def_id).ty;
+ match ty::get(self_ty).sty {
+ ty::ty_enum(def_id, _) |
+ ty::ty_struct(def_id, _) => {
+ if def_id.krate != ast::LOCAL_CRATE {
+ span_err!(self.tcx.sess, item.span, E0116,
+ "cannot associate methods with a type outside the \
+ crate the type is defined in; define and implement \
+ a trait or new type instead");
+ }
+ }
+ _ => {
+ span_err!(self.tcx.sess, item.span, E0118,
+ "no base type found for inherent implementation; \
+ implement a trait or new type instead");
+ }
+ }
+ }
+ ast::ItemImpl(_, Some(_), _, _) => {
+ // "Trait" impl
+ debug!("coherence2::orphan check: trait impl {}", item.repr(self.tcx));
+ if traits::is_orphan_impl(self.tcx, def_id) {
+ span_err!(self.tcx.sess, item.span, E0117,
+ "cannot provide an extension implementation \
+ where both trait and type are not defined in this crate");
+ }
+ }
+ _ => {
+ // Not an impl
+ }
+ }
+
+ visit::walk_item(self, item);
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+ * Overlap: No two impls for the same trait are implemented for the
+ * same type.
+ */
+
+use middle::traits;
+use middle::ty;
+use middle::typeck::infer::{new_infer_ctxt};
+use middle::typeck::infer;
+use syntax::ast::{DefId};
+use syntax::ast::{LOCAL_CRATE};
+use syntax::ast;
+use syntax::codemap::{Span};
+use util::ppaux::Repr;
+
+pub fn check(tcx: &ty::ctxt) {
+ let overlap = OverlapChecker { tcx: tcx };
+ overlap.check_for_overlapping_impls();
+}
+
+struct OverlapChecker<'cx, 'tcx:'cx> {
+ tcx: &'cx ty::ctxt<'tcx>
+}
+
+impl<'cx, 'tcx> OverlapChecker<'cx, 'tcx> {
+ fn check_for_overlapping_impls(&self) {
+ debug!("check_for_overlapping_impls");
+ let trait_impls = self.tcx.trait_impls.borrow();
+ for trait_def_id in trait_impls.keys() {
+ self.check_for_overlapping_impls_of_trait(*trait_def_id);
+ }
+ }
+
+ fn check_for_overlapping_impls_of_trait(&self,
+ trait_def_id: ast::DefId)
+ {
+ debug!("check_for_overlapping_impls_of_trait(trait_def_id={})",
+ trait_def_id.repr(self.tcx));
+
+ // FIXME -- it seems like this method actually pushes
+ // duplicate impls onto the list
+ ty::populate_implementations_for_type_if_necessary(self.tcx,
+ trait_def_id);
+
+ let mut impls = Vec::new();
+ self.push_impls_of_trait(trait_def_id, &mut impls);
+
+ for (i, &impl1_def_id) in impls.iter().enumerate() {
+ if impl1_def_id.krate != ast::LOCAL_CRATE {
+ // we don't need to check impls if both are external;
+ // that's the other crate's job.
+ continue;
+ }
+
+ for &impl2_def_id in impls.slice_from(i+1).iter() {
+ self.check_if_impls_overlap(trait_def_id,
+ impl1_def_id,
+ impl2_def_id);
+ }
+ }
+ }
+
+ fn check_if_impls_overlap(&self,
+ trait_def_id: ast::DefId,
+ impl1_def_id: ast::DefId,
+ impl2_def_id: ast::DefId)
+ {
+ assert_eq!(impl1_def_id.krate, ast::LOCAL_CRATE);
+
+ debug!("check_if_impls_overlap({}, {}, {})",
+ trait_def_id.repr(self.tcx),
+ impl1_def_id.repr(self.tcx),
+ impl2_def_id.repr(self.tcx));
+
+ let infcx = infer::new_infer_ctxt(self.tcx);
+ if !traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id) {
+ return;
+ }
+
+ span_err!(self.tcx.sess, self.span_of_impl(impl1_def_id), E0119,
+ "conflicting implementations for trait `{}`",
+ ty::item_path_str(self.tcx, trait_def_id));
+
+ if impl2_def_id.krate == ast::LOCAL_CRATE {
+ span_note!(self.tcx.sess, self.span_of_impl(impl2_def_id),
+ "note conflicting implementation here");
+ } else {
+ let crate_store = &self.tcx.sess.cstore;
+ let cdata = crate_store.get_crate_data(impl2_def_id.krate);
+ span_note!(self.tcx.sess, self.span_of_impl(impl1_def_id),
+ "conflicting implementation in crate `{}`",
+ cdata.name);
+ }
+ }
+
+ fn push_impls_of_trait(&self,
+ trait_def_id: ast::DefId,
+ out: &mut Vec<ast::DefId>) {
+ match self.tcx.trait_impls.borrow().find(&trait_def_id) {
+ Some(impls) => { out.push_all(impls.borrow().as_slice()); }
+ None => { /* no impls */ }
+ }
+ }
+
+ fn span_of_impl(&self, impl_did: ast::DefId) -> Span {
+ assert_eq!(impl_did.krate, ast::LOCAL_CRATE);
+ self.tcx.map.span(impl_did.node)
+ }
+}
fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &ast::Ty) -> ty::t;
}
-impl<'a, 'tcx> ToTy for CrateCtxt<'a, 'tcx> {
+impl<'a,'tcx> ToTy for ImplCtxt<'a,'tcx> {
+ fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &ast::Ty) -> ty::t {
+ ast_ty_to_ty(self, rs, ast_ty)
+ }
+}
+
+impl<'a,'tcx> ToTy for CrateCtxt<'a,'tcx> {
fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &ast::Ty) -> ty::t {
ast_ty_to_ty(self, rs, ast_ty)
}
let abi = self.tcx.map.get_foreign_abi(id.node);
ty_of_foreign_item(self, &*foreign_item, abi)
}
+ Some(ast_map::NodeTraitItem(trait_item)) => {
+ ty_of_trait_item(self, &*trait_item)
+ }
x => {
self.tcx.sess.bug(format!("unexpected sort of node \
in get_item_ty(): {:?}",
"the type placeholder `_` is not allowed within types on item signatures.");
ty::mk_err()
}
+
+ fn associated_types_of_trait_are_valid(&self, _: ty::t, _: ast::DefId)
+ -> bool {
+ false
+ }
+
+ fn associated_type_binding(&self,
+ span: Span,
+ _: Option<ty::t>,
+ _: ast::DefId,
+ _: ast::DefId)
+ -> ty::t {
+ self.tcx().sess.span_err(span, "associated types may not be \
+ referenced here");
+ ty::mk_err()
+ }
}
pub fn get_enum_variant_types(ccx: &CrateCtxt,
ast::StructVariantKind(ref struct_def) => {
let pty = Polytype {
- generics: ty_generics_for_type(ccx, generics),
+ generics: ty_generics_for_type(
+ ccx,
+ generics,
+ DontCreateTypeParametersForAssociatedTypes),
ty: enum_ty
};
};
let pty = Polytype {
- generics: ty_generics_for_type(ccx, generics),
+ generics: ty_generics_for_type(
+ ccx,
+ generics,
+ DontCreateTypeParametersForAssociatedTypes),
ty: result_ty
};
ccx,
trait_id,
&trait_def.generics,
+ trait_items.as_slice(),
&m.id,
&m.ident,
&m.explicit_self,
ccx,
trait_id,
&trait_def.generics,
+ trait_items.as_slice(),
&m.id,
&m.pe_ident(),
m.pe_explicit_self(),
&m.pe_fn_style(),
&*m.pe_fn_decl())
}
+ ast::TypeTraitItem(ref at) => {
+ tcx.sess.span_bug(at.span,
+ "there shouldn't \
+ be a type trait \
+ item here")
+ }
});
if ty_method.explicit_self ==
.insert(ty_method.def_id,
ty::MethodTraitItem(ty_method));
}
+ ast::TypeTraitItem(ref ast_associated_type) => {
+ let trait_did = local_def(trait_id);
+ let associated_type = ty::AssociatedType {
+ ident: ast_associated_type.ident,
+ vis: ast::Public,
+ def_id: local_def(ast_associated_type.id),
+ container: TraitContainer(trait_did),
+ };
+
+ let trait_item = ty::TypeTraitItem(Rc::new(
+ associated_type));
+ tcx.impl_or_trait_items
+ .borrow_mut()
+ .insert(associated_type.def_id,
+ trait_item);
+ }
}
}
ty::MethodTraitItemId(local_def(
method.id))
}
+ ast::TypeTraitItem(ref typedef) => {
+ ty::TypeTraitItemId(local_def(typedef.id))
+ }
}
}).collect());
ty: ty::mk_bare_fn(ccx.tcx, m.fty.clone()) });
}
- fn ty_method_of_trait_method(this: &CrateCtxt,
+ fn ty_method_of_trait_method(ccx: &CrateCtxt,
trait_id: ast::NodeId,
trait_generics: &ty::Generics,
+ trait_items: &[ast::TraitItem],
m_id: &ast::NodeId,
m_ident: &ast::Ident,
m_explicit_self: &ast::ExplicitSelf,
m_fn_style: &ast::FnStyle,
m_decl: &ast::FnDecl)
-> ty::Method {
- let trait_self_ty = ty::mk_self_type(this.tcx, local_def(trait_id));
-
- let (fty, explicit_self_category) =
- astconv::ty_of_method(this,
+ let ty_generics =
+ ty_generics_for_fn_or_method(
+ ccx,
+ m_generics,
+ (*trait_generics).clone(),
+ DontCreateTypeParametersForAssociatedTypes);
+
+ let (fty, explicit_self_category) = {
+ let tmcx = TraitMethodCtxt {
+ ccx: ccx,
+ trait_id: local_def(trait_id),
+ trait_items: trait_items.as_slice(),
+ method_generics: &ty_generics,
+ };
+ let trait_self_ty = ty::mk_self_type(tmcx.tcx(),
+ local_def(trait_id));
+ astconv::ty_of_method(&tmcx,
*m_id,
*m_fn_style,
trait_self_ty,
m_explicit_self,
m_decl,
- m_abi);
- let ty_generics =
- ty_generics_for_fn_or_method(this,
- m_generics,
- (*trait_generics).clone());
+ m_abi)
+ };
+
ty::Method::new(
*m_ident,
ty_generics,
}
}
-fn convert_methods<'a, I: Iterator<&'a ast::Method>>(ccx: &CrateCtxt,
- container: ImplOrTraitItemContainer,
- mut ms: I,
- untransformed_rcvr_ty: ty::t,
- rcvr_ty_generics: &ty::Generics,
- rcvr_visibility: ast::Visibility) {
+fn convert_associated_type(ccx: &CrateCtxt,
+ trait_def: &ty::TraitDef,
+ associated_type: &ast::AssociatedType)
+ -> ty::Polytype {
+ // Find the type parameter ID corresponding to this
+ // associated type.
+ let type_parameter_def = trait_def.generics
+ .types
+ .get_slice(subst::TypeSpace)
+ .iter()
+ .find(|def| {
+ def.def_id == local_def(associated_type.id)
+ });
+ let type_parameter_def = match type_parameter_def {
+ Some(type_parameter_def) => type_parameter_def,
+ None => {
+ ccx.tcx().sess.span_bug(associated_type.span,
+ "`convert_associated_type()` didn't find \
+ a type parameter ID corresponding to \
+ this type")
+ }
+ };
+ let param_type = ty::mk_param(ccx.tcx,
+ subst::TypeSpace,
+ type_parameter_def.index,
+ local_def(associated_type.id));
+ ccx.tcx.tcache.borrow_mut().insert(local_def(associated_type.id),
+ Polytype {
+ generics: ty::Generics::empty(),
+ ty: param_type,
+ });
+ write_ty_to_tcx(ccx.tcx, associated_type.id, param_type);
+
+ let associated_type = Rc::new(ty::AssociatedType {
+ ident: associated_type.ident,
+ vis: ast::Public,
+ def_id: local_def(associated_type.id),
+ container: TraitContainer(trait_def.trait_ref.def_id),
+ });
+ ccx.tcx
+ .impl_or_trait_items
+ .borrow_mut()
+ .insert(associated_type.def_id,
+ ty::TypeTraitItem(associated_type));
+
+ Polytype {
+ generics: ty::Generics::empty(),
+ ty: param_type,
+ }
+}
+
+enum ConvertMethodContext<'a> {
+ /// Used when converting implementation methods.
+ ImplConvertMethodContext,
+ /// Used when converting method signatures. The def ID is the def ID of
+ /// the trait we're translating.
+ TraitConvertMethodContext(ast::DefId, &'a [ast::TraitItem]),
+}
+
+fn convert_methods<'a,I>(ccx: &CrateCtxt,
+ convert_method_context: ConvertMethodContext,
+ container: ImplOrTraitItemContainer,
+ mut ms: I,
+ untransformed_rcvr_ty: ty::t,
+ rcvr_ty_generics: &ty::Generics,
+ rcvr_visibility: ast::Visibility)
+ where I: Iterator<&'a ast::Method> {
debug!("convert_methods(untransformed_rcvr_ty={}, \
rcvr_ty_generics={})",
untransformed_rcvr_ty.repr(ccx.tcx),
let tcx = ccx.tcx;
let mut seen_methods = HashSet::new();
for m in ms {
- if !seen_methods.insert(m.pe_ident().repr(ccx.tcx)) {
+ if !seen_methods.insert(m.pe_ident().repr(tcx)) {
tcx.sess.span_err(m.span, "duplicate method in trait impl");
}
let mty = Rc::new(ty_of_method(ccx,
+ convert_method_context,
container,
m,
untransformed_rcvr_ty,
rcvr_visibility));
let fty = ty::mk_bare_fn(tcx, mty.fty.clone());
debug!("method {} (id {}) has type {}",
- m.pe_ident().repr(ccx.tcx),
+ m.pe_ident().repr(tcx),
m.id,
- fty.repr(ccx.tcx));
+ fty.repr(tcx));
tcx.tcache.borrow_mut().insert(
local_def(m.id),
Polytype {
}
fn ty_of_method(ccx: &CrateCtxt,
+ convert_method_context: ConvertMethodContext,
container: ImplOrTraitItemContainer,
m: &ast::Method,
untransformed_rcvr_ty: ty::t,
_ => m.pe_abi(),
};
- let (fty, explicit_self_category) =
- astconv::ty_of_method(ccx,
- m.id,
- m.pe_fn_style(),
- untransformed_rcvr_ty,
- m.pe_explicit_self(),
- &*m.pe_fn_decl(),
- real_abi);
+ let m_ty_generics =
+ ty_generics_for_fn_or_method(
+ ccx,
+ m.pe_generics(),
+ (*rcvr_ty_generics).clone(),
+ CreateTypeParametersForAssociatedTypes);
+
+ let (fty, explicit_self_category) = match convert_method_context {
+ ImplConvertMethodContext => {
+ let imcx = ImplMethodCtxt {
+ ccx: ccx,
+ method_generics: &m_ty_generics,
+ };
+ astconv::ty_of_method(&imcx,
+ m.id,
+ m.pe_fn_style(),
+ untransformed_rcvr_ty,
+ m.pe_explicit_self(),
+ &*m.pe_fn_decl(),
+ real_abi)
+ }
+ TraitConvertMethodContext(trait_id, trait_items) => {
+ let tmcx = TraitMethodCtxt {
+ ccx: ccx,
+ trait_id: trait_id,
+ trait_items: trait_items,
+ method_generics: &m_ty_generics,
+ };
+ astconv::ty_of_method(&tmcx,
+ m.id,
+ m.pe_fn_style(),
+ untransformed_rcvr_ty,
+ m.pe_explicit_self(),
+ &*m.pe_fn_decl(),
+ real_abi)
+ }
+ };
// if the method specifies a visibility, use that, otherwise
// inherit the visibility from the impl (so `foo` in `pub impl
// foo(); }`).
let method_vis = m.pe_vis().inherit_from(rcvr_visibility);
- let m_ty_generics =
- ty_generics_for_fn_or_method(ccx, m.pe_generics(),
- (*rcvr_ty_generics).clone());
ty::Method::new(m.pe_ident(),
m_ty_generics,
fty,
}
}
+fn is_associated_type_valid_for_param(ty: ty::t,
+ trait_id: ast::DefId,
+ generics: &ty::Generics)
+ -> bool {
+ match ty::get(ty).sty {
+ ty::ty_param(param_ty) => {
+ let type_parameter = generics.types.get(param_ty.space,
+ param_ty.idx);
+ for trait_bound in type_parameter.bounds.trait_bounds.iter() {
+ if trait_bound.def_id == trait_id {
+ return true
+ }
+ }
+ }
+ _ => {}
+ }
+
+ false
+}
+
+fn find_associated_type_in_generics(tcx: &ty::ctxt,
+ span: Span,
+ ty: Option<ty::t>,
+ associated_type_id: ast::DefId,
+ generics: &ty::Generics)
+ -> ty::t {
+ let ty = match ty {
+ None => {
+ tcx.sess.span_bug(span,
+ "find_associated_type_in_generics(): no self \
+ type")
+ }
+ Some(ty) => ty,
+ };
+
+ match ty::get(ty).sty {
+ ty::ty_param(ref param_ty) => {
+ /*let type_parameter = generics.types.get(param_ty.space,
+ param_ty.idx);
+ let param_id = type_parameter.def_id;*/
+ let param_id = param_ty.def_id;
+ for type_parameter in generics.types.iter() {
+ if type_parameter.def_id == associated_type_id
+ && type_parameter.associated_with == Some(param_id) {
+ return ty::mk_param_from_def(tcx, type_parameter)
+ }
+ }
+
+ tcx.sess.span_bug(span,
+ "find_associated_type_in_generics(): didn't \
+ find associated type anywhere in the generics \
+ list")
+ }
+ _ => {
+ tcx.sess.span_bug(span,
+ "find_associated_type_in_generics(): self type \
+ is not a parameter")
+
+ }
+ }
+}
+
+fn type_is_self(ty: ty::t) -> bool {
+ match ty::get(ty).sty {
+ ty::ty_param(ref param_ty) if param_ty.is_self() => true,
+ _ => false,
+ }
+}
+
+struct ImplCtxt<'a,'tcx:'a> {
+ ccx: &'a CrateCtxt<'a,'tcx>,
+ opt_trait_ref_id: Option<ast::DefId>,
+ impl_items: &'a [ast::ImplItem],
+ impl_generics: &'a ty::Generics,
+}
+
+impl<'a,'tcx> AstConv<'tcx> for ImplCtxt<'a,'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ self.ccx.tcx
+ }
+
+ fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
+ self.ccx.get_item_ty(id)
+ }
+
+ fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef> {
+ self.ccx.get_trait_def(id)
+ }
+
+ fn ty_infer(&self, span: Span) -> ty::t {
+ self.ccx.ty_infer(span)
+ }
+
+ fn associated_types_of_trait_are_valid(&self,
+ ty: ty::t,
+ trait_id: ast::DefId)
+ -> bool {
+ // OK if the trait with the associated type is the trait we're
+ // implementing.
+ match self.opt_trait_ref_id {
+ Some(trait_ref_id) if trait_ref_id == trait_id => {
+ if type_is_self(ty) {
+ return true
+ }
+ }
+ Some(_) | None => {}
+ }
+
+ // OK if the trait with the associated type is one of the traits in
+ // our bounds.
+ is_associated_type_valid_for_param(ty, trait_id, self.impl_generics)
+ }
+
+ fn associated_type_binding(&self,
+ span: Span,
+ ty: Option<ty::t>,
+ trait_id: ast::DefId,
+ associated_type_id: ast::DefId)
+ -> ty::t {
+ ensure_associated_types(self, trait_id);
+ let associated_type_ids = ty::associated_types_for_trait(self.ccx.tcx,
+ trait_id);
+ match self.opt_trait_ref_id {
+ Some(trait_ref_id) if trait_ref_id == trait_id => {
+ // It's an associated type on the trait that we're
+ // implementing.
+ let associated_type_id =
+ associated_type_ids.iter()
+ .find(|id| {
+ id.def_id == associated_type_id
+ })
+ .expect("associated_type_binding(): \
+ expected associated type ID \
+ in trait");
+ let associated_type =
+ ty::impl_or_trait_item(self.ccx.tcx,
+ associated_type_id.def_id);
+ for impl_item in self.impl_items.iter() {
+ match *impl_item {
+ ast::MethodImplItem(_) => {}
+ ast::TypeImplItem(ref typedef) => {
+ if associated_type.ident().name == typedef.ident
+ .name {
+ return self.ccx.to_ty(&ExplicitRscope,
+ &*typedef.typ)
+ }
+ }
+ }
+ }
+ self.ccx
+ .tcx
+ .sess
+ .span_bug(span,
+ "ImplCtxt::associated_type_binding(): didn't \
+ find associated type")
+ }
+ Some(_) | None => {}
+ }
+
+ // OK then, it should be an associated type on one of the traits in
+ // our bounds.
+ find_associated_type_in_generics(self.ccx.tcx,
+ span,
+ ty,
+ associated_type_id,
+ self.impl_generics)
+ }
+}
+
+struct FnCtxt<'a,'tcx:'a> {
+ ccx: &'a CrateCtxt<'a,'tcx>,
+ generics: &'a ty::Generics,
+}
+
+impl<'a,'tcx> AstConv<'tcx> for FnCtxt<'a,'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ self.ccx.tcx
+ }
+
+ fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
+ self.ccx.get_item_ty(id)
+ }
+
+ fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef> {
+ self.ccx.get_trait_def(id)
+ }
+
+ fn ty_infer(&self, span: Span) -> ty::t {
+ self.ccx.ty_infer(span)
+ }
+
+ fn associated_types_of_trait_are_valid(&self,
+ ty: ty::t,
+ trait_id: ast::DefId)
+ -> bool {
+ // OK if the trait with the associated type is one of the traits in
+ // our bounds.
+ is_associated_type_valid_for_param(ty, trait_id, self.generics)
+ }
+
+ fn associated_type_binding(&self,
+ span: Span,
+ ty: Option<ty::t>,
+ _: ast::DefId,
+ associated_type_id: ast::DefId)
+ -> ty::t {
+ debug!("collect::FnCtxt::associated_type_binding()");
+
+ // The ID should map to an associated type on one of the traits in
+ // our bounds.
+ find_associated_type_in_generics(self.ccx.tcx,
+ span,
+ ty,
+ associated_type_id,
+ self.generics)
+ }
+}
+
+struct ImplMethodCtxt<'a,'tcx:'a> {
+ ccx: &'a CrateCtxt<'a,'tcx>,
+ method_generics: &'a ty::Generics,
+}
+
+impl<'a,'tcx> AstConv<'tcx> for ImplMethodCtxt<'a,'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ self.ccx.tcx
+ }
+
+ fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
+ self.ccx.get_item_ty(id)
+ }
+
+ fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef> {
+ self.ccx.get_trait_def(id)
+ }
+
+ fn ty_infer(&self, span: Span) -> ty::t {
+ self.ccx.ty_infer(span)
+ }
+
+ fn associated_types_of_trait_are_valid(&self,
+ ty: ty::t,
+ trait_id: ast::DefId)
+ -> bool {
+ is_associated_type_valid_for_param(ty, trait_id, self.method_generics)
+ }
+
+ fn associated_type_binding(&self,
+ span: Span,
+ ty: Option<ty::t>,
+ _: ast::DefId,
+ associated_type_id: ast::DefId)
+ -> ty::t {
+ debug!("collect::ImplMethodCtxt::associated_type_binding()");
+
+ // The ID should map to an associated type on one of the traits in
+ // our bounds.
+ find_associated_type_in_generics(self.ccx.tcx,
+ span,
+ ty,
+ associated_type_id,
+ self.method_generics)
+ }
+}
+
+struct TraitMethodCtxt<'a,'tcx:'a> {
+ ccx: &'a CrateCtxt<'a,'tcx>,
+ trait_id: ast::DefId,
+ trait_items: &'a [ast::TraitItem],
+ method_generics: &'a ty::Generics,
+}
+
+impl<'a,'tcx> AstConv<'tcx> for TraitMethodCtxt<'a,'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ self.ccx.tcx
+ }
+
+ fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
+ self.ccx.get_item_ty(id)
+ }
+
+ fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef> {
+ self.ccx.get_trait_def(id)
+ }
+
+ fn ty_infer(&self, span: Span) -> ty::t {
+ self.ccx.ty_infer(span)
+ }
+
+ fn associated_types_of_trait_are_valid(&self,
+ ty: ty::t,
+ trait_id: ast::DefId)
+ -> bool {
+ // OK if the trait with the associated type is this trait.
+ if self.trait_id == trait_id && type_is_self(ty) {
+ return true
+ }
+
+ // OK if the trait with the associated type is one of the traits in
+ // our bounds.
+ is_associated_type_valid_for_param(ty, trait_id, self.method_generics)
+ }
+
+ fn associated_type_binding(&self,
+ span: Span,
+ ty: Option<ty::t>,
+ trait_id: ast::DefId,
+ associated_type_id: ast::DefId)
+ -> ty::t {
+ debug!("collect::TraitMethodCtxt::associated_type_binding()");
+
+ // If this is one of our own associated types, return it.
+ if trait_id == self.trait_id {
+ let mut index = 0;
+ for item in self.trait_items.iter() {
+ match *item {
+ ast::RequiredMethod(_) | ast::ProvidedMethod(_) => {}
+ ast::TypeTraitItem(ref item) => {
+ if local_def(item.id) == associated_type_id {
+ return ty::mk_param(self.tcx(),
+ subst::TypeSpace,
+ index,
+ associated_type_id)
+ }
+ index += 1;
+ }
+ }
+ }
+ self.ccx
+ .tcx
+ .sess
+ .span_bug(span,
+ "TraitMethodCtxt::associated_type_binding(): \
+ didn't find associated type anywhere in the item \
+ list")
+ }
+
+ // The ID should map to an associated type on one of the traits in
+ // our bounds.
+ find_associated_type_in_generics(self.ccx.tcx,
+ span,
+ ty,
+ associated_type_id,
+ self.method_generics)
+ }
+}
+
+struct GenericsCtxt<'a,AC:'a> {
+ chain: &'a AC,
+ associated_types_generics: &'a ty::Generics,
+}
+
+impl<'a,'tcx,AC:AstConv<'tcx>> AstConv<'tcx> for GenericsCtxt<'a,AC> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ self.chain.tcx()
+ }
+
+ fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
+ self.chain.get_item_ty(id)
+ }
+
+ fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef> {
+ self.chain.get_trait_def(id)
+ }
+
+ fn ty_infer(&self, span: Span) -> ty::t {
+ self.chain.ty_infer(span)
+ }
+
+ fn associated_types_of_trait_are_valid(&self,
+ ty: ty::t,
+ trait_id: ast::DefId)
+ -> bool {
+ // OK if the trait with the associated type is one of the traits in
+ // our bounds.
+ is_associated_type_valid_for_param(ty,
+ trait_id,
+ self.associated_types_generics)
+ }
+
+ fn associated_type_binding(&self,
+ span: Span,
+ ty: Option<ty::t>,
+ _: ast::DefId,
+ associated_type_id: ast::DefId)
+ -> ty::t {
+ debug!("collect::GenericsCtxt::associated_type_binding()");
+
+ // The ID should map to an associated type on one of the traits in
+ // our bounds.
+ find_associated_type_in_generics(self.chain.tcx(),
+ span,
+ ty,
+ associated_type_id,
+ self.associated_types_generics)
+ }
+}
+
pub fn convert(ccx: &CrateCtxt, it: &ast::Item) {
let tcx = ccx.tcx;
debug!("convert: item {} with id {}", token::get_ident(it.ident), it.id);
ref opt_trait_ref,
ref selfty,
ref impl_items) => {
- let ty_generics = ty_generics_for_type(ccx, generics);
+ // Create generics from the generics specified in the impl head.
+ let ty_generics = ty_generics_for_type(
+ ccx,
+ generics,
+ CreateTypeParametersForAssociatedTypes);
+
let selfty = ccx.to_ty(&ExplicitRscope, &**selfty);
write_ty_to_tcx(tcx, it.id, selfty);
- tcx.tcache.borrow_mut().insert(local_def(it.id),
- Polytype {
- generics: ty_generics.clone(),
- ty: selfty});
+ tcx.tcache
+ .borrow_mut()
+ .insert(local_def(it.id),
+ Polytype {
+ generics: ty_generics.clone(),
+ ty: selfty,
+ });
// If there is a trait reference, treat the methods as always public.
// This is to work around some incorrect behavior in privacy checking:
it.vis
};
+ let icx = ImplCtxt {
+ ccx: ccx,
+ opt_trait_ref_id: match *opt_trait_ref {
+ None => None,
+ Some(ref ast_trait_ref) => {
+ Some(lookup_def_tcx(tcx,
+ ast_trait_ref.path.span,
+ ast_trait_ref.ref_id).def_id())
+ }
+ },
+ impl_items: impl_items.as_slice(),
+ impl_generics: &ty_generics,
+ };
+
let mut methods = Vec::new();
for impl_item in impl_items.iter() {
match *impl_item {
method.pe_explicit_self());
methods.push(&**method);
}
+ ast::TypeImplItem(ref typedef) => {
+ let typ = icx.to_ty(&ExplicitRscope, &*typedef.typ);
+ tcx.tcache
+ .borrow_mut()
+ .insert(local_def(typedef.id),
+ Polytype {
+ generics: ty::Generics::empty(),
+ ty: typ,
+ });
+ write_ty_to_tcx(ccx.tcx, typedef.id, typ);
+
+ let associated_type = Rc::new(ty::AssociatedType {
+ ident: typedef.ident,
+ vis: typedef.vis,
+ def_id: local_def(typedef.id),
+ container: ty::ImplContainer(local_def(it.id)),
+ });
+ tcx.impl_or_trait_items
+ .borrow_mut()
+ .insert(local_def(typedef.id),
+ ty::TypeTraitItem(associated_type));
+ }
}
}
convert_methods(ccx,
+ ImplConvertMethodContext,
ImplContainer(local_def(it.id)),
- methods.move_iter(),
+ methods.into_iter(),
selfty,
&ty_generics,
parent_visibility);
for trait_ref in opt_trait_ref.iter() {
- instantiate_trait_ref(ccx, trait_ref, selfty);
+ instantiate_trait_ref(&icx, trait_ref, selfty, None);
}
},
ast::ItemTrait(_, _, _, ref trait_methods) => {
self_type,
method.pe_explicit_self())
}
+ ast::TypeTraitItem(ref associated_type) => {
+ convert_associated_type(ccx,
+ &*trait_def,
+ &**associated_type);
+ }
}
}
// Run convert_methods on the provided methods.
- let untransformed_rcvr_ty = ty::mk_self_type(tcx, local_def(it.id));
+ let untransformed_rcvr_ty = ty::mk_self_type(tcx,
+ local_def(it.id));
+ let convert_method_context =
+ TraitConvertMethodContext(local_def(it.id),
+ trait_methods.as_slice());
convert_methods(ccx,
+ convert_method_context,
TraitContainer(local_def(it.id)),
trait_methods.iter().filter_map(|m| match *m {
ast::RequiredMethod(_) => None,
- ast::ProvidedMethod(ref m) => Some(&**m)
+ ast::ProvidedMethod(ref m) => Some(&**m),
+ ast::TypeTraitItem(_) => None,
}),
untransformed_rcvr_ty,
&trait_def.generics,
ccx.tcx.tcache.borrow_mut().insert(local_def(i.id), pty);
}
-pub fn instantiate_trait_ref(ccx: &CrateCtxt,
- ast_trait_ref: &ast::TraitRef,
- self_ty: ty::t) -> Rc<ty::TraitRef> {
+pub fn instantiate_trait_ref<'tcx,AC>(this: &AC,
+ ast_trait_ref: &ast::TraitRef,
+ self_ty: ty::t,
+ associated_type: Option<ty::t>)
+ -> Rc<ty::TraitRef>
+ where AC: AstConv<'tcx> {
/*!
* Instantiates the path for the given trait reference, assuming that
* it's bound to a valid trait type. Returns the def_id for the defining
// FIXME(#5121) -- distinguish early vs late lifetime params
let rscope = ExplicitRscope;
- match lookup_def_tcx(ccx.tcx, ast_trait_ref.path.span, ast_trait_ref.ref_id) {
+ match lookup_def_tcx(this.tcx(),
+ ast_trait_ref.path.span,
+ ast_trait_ref.ref_id) {
def::DefTrait(trait_did) => {
let trait_ref =
- astconv::ast_path_to_trait_ref(
- ccx, &rscope, trait_did, Some(self_ty), &ast_trait_ref.path);
+ astconv::ast_path_to_trait_ref(this,
+ &rscope,
+ trait_did,
+ Some(self_ty),
+ associated_type,
+ &ast_trait_ref.path);
- ccx.tcx.trait_refs.borrow_mut().insert(ast_trait_ref.ref_id,
- trait_ref.clone());
+ this.tcx().trait_refs.borrow_mut().insert(ast_trait_ref.ref_id,
+ trait_ref.clone());
trait_ref
}
_ => {
- ccx.tcx.sess.span_fatal(
+ this.tcx().sess.span_fatal(
ast_trait_ref.path.span,
format!("`{}` is not a trait",
path_to_string(&ast_trait_ref.path)).as_slice());
}
}
-pub fn instantiate_unboxed_fn_ty(ccx: &CrateCtxt,
- unboxed_function: &ast::UnboxedFnTy,
- param_ty: ty::ParamTy)
- -> Rc<ty::TraitRef>
-{
+pub fn instantiate_unboxed_fn_ty<'tcx,AC>(this: &AC,
+ unboxed_function: &ast::UnboxedFnTy,
+ param_ty: ty::ParamTy)
+ -> Rc<ty::TraitRef>
+ where AC: AstConv<'tcx> {
let rscope = ExplicitRscope;
- let param_ty = param_ty.to_ty(ccx.tcx);
- Rc::new(astconv::trait_ref_for_unboxed_function(ccx,
+ let param_ty = param_ty.to_ty(this.tcx());
+ Rc::new(astconv::trait_ref_for_unboxed_function(this,
&rscope,
- unboxed_function,
+ unboxed_function.kind,
+ &*unboxed_function.decl,
Some(param_ty)))
}
_ => {}
}
- let (generics, unbound, bounds) = match it.node {
- ast::ItemTrait(ref generics, ref unbound, ref bounds, _) => {
- (generics, unbound, bounds)
+ let (generics, unbound, bounds, items) = match it.node {
+ ast::ItemTrait(ref generics,
+ ref unbound,
+ ref supertraits,
+ ref items) => {
+ (generics, unbound, supertraits, items.as_slice())
}
ref s => {
tcx.sess.span_bug(
}
};
- let substs = mk_trait_substs(ccx, it.id, generics);
+ let substs = mk_trait_substs(ccx, it.id, generics, items);
let ty_generics = ty_generics_for_trait(ccx,
it.id,
&substs,
- generics);
+ generics,
+ items);
let self_param_ty = ty::ParamTy::for_self(def_id);
fn mk_trait_substs(ccx: &CrateCtxt,
trait_id: ast::NodeId,
- generics: &ast::Generics)
- -> subst::Substs
- {
+ generics: &ast::Generics,
+ items: &[ast::TraitItem])
+ -> subst::Substs {
// Creates a no-op substitution for the trait's type parameters.
let regions =
generics.lifetimes
def.lifetime.name))
.collect();
- let types =
+ // Start with the generics in the type parameters...
+ let mut types: Vec<_> =
generics.ty_params
.iter()
.enumerate()
i, local_def(def.id)))
.collect();
+ // ...and add generics synthesized from the associated types.
+ for item in items.iter() {
+ match *item {
+ ast::TypeTraitItem(ref trait_item) => {
+ let index = types.len();
+ types.push(ty::mk_param(ccx.tcx,
+ subst::TypeSpace,
+ index,
+ local_def(trait_item.id)))
+ }
+ ast::RequiredMethod(_) | ast::ProvidedMethod(_) => {}
+ }
+ }
+
let self_ty =
ty::mk_param(ccx.tcx, subst::SelfSpace, 0, local_def(trait_id));
return pty;
}
ast::ItemFn(ref decl, fn_style, abi, ref generics, _) => {
- let ty_generics = ty_generics_for_fn_or_method(ccx, generics,
- ty::Generics::empty());
- let tofd = astconv::ty_of_bare_fn(ccx,
- it.id,
- fn_style,
- abi,
- &**decl);
+ let ty_generics = ty_generics_for_fn_or_method(
+ ccx,
+ generics,
+ ty::Generics::empty(),
+ CreateTypeParametersForAssociatedTypes);
+ let tofd = {
+ let fcx = FnCtxt {
+ ccx: ccx,
+ generics: &ty_generics,
+ };
+ astconv::ty_of_bare_fn(&fcx,
+ it.id,
+ fn_style,
+ abi,
+ &**decl)
+ };
let pty = Polytype {
generics: ty_generics,
ty: ty::mk_bare_fn(ccx.tcx, tofd)
debug!("type of {} (id {}) is {}",
token::get_ident(it.ident),
it.id,
- ppaux::ty_to_string(tcx, pty.ty));
+ pty.repr(tcx));
ccx.tcx.tcache.borrow_mut().insert(local_def(it.id), pty.clone());
return pty;
let pty = {
let ty = ccx.to_ty(&ExplicitRscope, &**t);
Polytype {
- generics: ty_generics_for_type(ccx, generics),
+ generics: ty_generics_for_type(
+ ccx,
+ generics,
+ DontCreateTypeParametersForAssociatedTypes),
ty: ty
}
};
}
ast::ItemEnum(_, ref generics) => {
// Create a new generic polytype.
- let ty_generics = ty_generics_for_type(ccx, generics);
+ let ty_generics = ty_generics_for_type(
+ ccx,
+ generics,
+ DontCreateTypeParametersForAssociatedTypes);
let substs = mk_item_substs(ccx, &ty_generics);
let t = ty::mk_enum(tcx, local_def(it.id), substs);
let pty = Polytype {
tcx.sess.span_bug(it.span, "invoked ty_of_item on trait");
}
ast::ItemStruct(_, ref generics) => {
- let ty_generics = ty_generics_for_type(ccx, generics);
+ let ty_generics = ty_generics_for_type(
+ ccx,
+ generics,
+ DontCreateTypeParametersForAssociatedTypes);
let substs = mk_item_substs(ccx, &ty_generics);
let t = ty::mk_struct(tcx, local_def(it.id), substs);
let pty = Polytype {
}
}
+fn ty_of_trait_item(ccx: &CrateCtxt, trait_item: &ast::TraitItem)
+ -> ty::Polytype {
+ match *trait_item {
+ ast::RequiredMethod(ref m) => {
+ ccx.tcx.sess.span_bug(m.span,
+ "ty_of_trait_item() on required method")
+ }
+ ast::ProvidedMethod(ref m) => {
+ ccx.tcx.sess.span_bug(m.span,
+ "ty_of_trait_item() on provided method")
+ }
+ ast::TypeTraitItem(ref associated_type) => {
+ let parent = ccx.tcx.map.get_parent(associated_type.id);
+ let trait_def = match ccx.tcx.map.get(parent) {
+ ast_map::NodeItem(item) => trait_def_of_item(ccx, &*item),
+ _ => {
+ ccx.tcx.sess.span_bug(associated_type.span,
+ "associated type's parent wasn't \
+ an item?!")
+ }
+ };
+ convert_associated_type(ccx, &*trait_def, &**associated_type)
+ }
+ }
+}
+
fn ty_generics_for_type(ccx: &CrateCtxt,
- generics: &ast::Generics)
- -> ty::Generics
-{
+ generics: &ast::Generics,
+ create_type_parameters_for_associated_types:
+ CreateTypeParametersForAssociatedTypesFlag)
+ -> ty::Generics {
ty_generics(ccx,
subst::TypeSpace,
generics.lifetimes.as_slice(),
generics.ty_params.as_slice(),
ty::Generics::empty(),
- &generics.where_clause)
+ &generics.where_clause,
+ create_type_parameters_for_associated_types)
}
fn ty_generics_for_trait(ccx: &CrateCtxt,
trait_id: ast::NodeId,
substs: &subst::Substs,
- generics: &ast::Generics)
+ generics: &ast::Generics,
+ items: &[ast::TraitItem])
-> ty::Generics {
- let mut generics = ty_generics(ccx,
- subst::TypeSpace,
- generics.lifetimes.as_slice(),
- generics.ty_params.as_slice(),
- ty::Generics::empty(),
- &generics.where_clause);
+ let mut generics =
+ ty_generics(ccx,
+ subst::TypeSpace,
+ generics.lifetimes.as_slice(),
+ generics.ty_params.as_slice(),
+ ty::Generics::empty(),
+ &generics.where_clause,
+ DontCreateTypeParametersForAssociatedTypes);
+
+ // Add in type parameters for any associated types.
+ for item in items.iter() {
+ match *item {
+ ast::TypeTraitItem(ref associated_type) => {
+ let def = ty::TypeParameterDef {
+ space: subst::TypeSpace,
+ index: generics.types.len(subst::TypeSpace),
+ ident: associated_type.ident,
+ def_id: local_def(associated_type.id),
+ bounds: ty::ParamBounds {
+ builtin_bounds: ty::empty_builtin_bounds(),
+ trait_bounds: Vec::new(),
+ region_bounds: Vec::new(),
+ },
+ associated_with: Some(local_def(trait_id)),
+ default: None,
+ };
+ ccx.tcx.ty_param_defs.borrow_mut().insert(associated_type.id,
+ def.clone());
+ generics.types.push(subst::TypeSpace, def);
+ }
+ ast::ProvidedMethod(_) | ast::RequiredMethod(_) => {}
+ }
+ }
+ // Add in the self type parameter.
+ //
// Something of a hack: use the node id for the trait, also as
// the node id for the Self type parameter.
let param_id = trait_id;
ident: special_idents::type_self,
def_id: local_def(param_id),
bounds: ty::ParamBounds {
- opt_region_bound: None,
+ region_bounds: vec!(),
builtin_bounds: ty::empty_builtin_bounds(),
trait_bounds: vec!(self_trait_ref),
},
+ associated_with: None,
default: None
};
generics
}
-fn ty_generics_for_fn_or_method(ccx: &CrateCtxt,
- generics: &ast::Generics,
- base_generics: ty::Generics)
- -> ty::Generics {
+fn ty_generics_for_fn_or_method<'tcx,AC>(
+ this: &AC,
+ generics: &ast::Generics,
+ base_generics: ty::Generics,
+ create_type_parameters_for_associated_types:
+ CreateTypeParametersForAssociatedTypesFlag)
+ -> ty::Generics
+ where AC: AstConv<'tcx> {
let early_lifetimes = resolve_lifetime::early_bound_lifetimes(generics);
- ty_generics(ccx,
+ ty_generics(this,
subst::FnSpace,
early_lifetimes.as_slice(),
generics.ty_params.as_slice(),
base_generics,
- &generics.where_clause)
+ &generics.where_clause,
+ create_type_parameters_for_associated_types)
}
// Add the Sized bound, unless the type parameter is marked as `Sized?`.
-fn add_unsized_bound(ccx: &CrateCtxt,
- unbound: &Option<ast::TyParamBound>,
- bounds: &mut ty::BuiltinBounds,
- desc: &str,
- span: Span) {
- let kind_id = ccx.tcx.lang_items.require(SizedTraitLangItem);
-
+fn add_unsized_bound<'tcx,AC>(this: &AC,
+ unbound: &Option<ast::TyParamBound>,
+ bounds: &mut ty::BuiltinBounds,
+ desc: &str,
+ span: Span)
+ where AC: AstConv<'tcx> {
+ let kind_id = this.tcx().lang_items.require(SizedTraitLangItem);
match unbound {
&Some(ast::TraitTyParamBound(ref tpb)) => {
// FIXME(#8559) currently requires the unbound to be built-in.
- let trait_def_id = ty::trait_ref_to_def_id(ccx.tcx, tpb);
+ let trait_def_id = ty::trait_ref_to_def_id(this.tcx(), tpb);
match kind_id {
Ok(kind_id) if trait_def_id != kind_id => {
- ccx.tcx.sess.span_warn(span,
- format!("default bound relaxed \
- for a {}, but this does \
- nothing because the given \
- bound is not a default. \
- Only `Sized?` is supported.",
- desc).as_slice());
- ty::try_add_builtin_trait(ccx.tcx,
+ this.tcx().sess.span_warn(span,
+ format!("default bound relaxed \
+ for a {}, but this \
+ does nothing because \
+ the given bound is not \
+ a default. \
+ Only `Sized?` is \
+ supported.",
+ desc).as_slice());
+ ty::try_add_builtin_trait(this.tcx(),
kind_id,
bounds);
}
}
}
_ if kind_id.is_ok() => {
- ty::try_add_builtin_trait(ccx.tcx,
- kind_id.unwrap(),
- bounds);
+ ty::try_add_builtin_trait(this.tcx(), kind_id.unwrap(), bounds);
}
// No lang item for Sized, so we can't add it as a bound.
_ => {}
}
}
-fn ty_generics(ccx: &CrateCtxt,
- space: subst::ParamSpace,
- lifetime_defs: &[ast::LifetimeDef],
- types: &[ast::TyParam],
- base_generics: ty::Generics,
- where_clause: &ast::WhereClause)
- -> ty::Generics
-{
+#[deriving(Clone, PartialEq, Eq)]
+enum CreateTypeParametersForAssociatedTypesFlag {
+ DontCreateTypeParametersForAssociatedTypes,
+ CreateTypeParametersForAssociatedTypes,
+}
+
+fn ensure_associated_types<'tcx,AC>(this: &AC, trait_id: ast::DefId)
+ where AC: AstConv<'tcx> {
+ if this.tcx().trait_associated_types.borrow().contains_key(&trait_id) {
+ return
+ }
+
+ if trait_id.krate == ast::LOCAL_CRATE {
+ match this.tcx().map.find(trait_id.node) {
+ Some(ast_map::NodeItem(item)) => {
+ match item.node {
+ ast::ItemTrait(_, _, _, ref trait_items) => {
+ let mut result = Vec::new();
+ let mut index = 0;
+ for trait_item in trait_items.iter() {
+ match *trait_item {
+ ast::RequiredMethod(_) |
+ ast::ProvidedMethod(_) => {}
+ ast::TypeTraitItem(ref associated_type) => {
+ let info = ty::AssociatedTypeInfo {
+ def_id: local_def(associated_type.id),
+ index: index,
+ ident: associated_type.ident,
+ };
+ result.push(info);
+ index += 1;
+ }
+ }
+ }
+ this.tcx()
+ .trait_associated_types
+ .borrow_mut()
+ .insert(trait_id, Rc::new(result));
+ return
+ }
+ _ => {
+ this.tcx().sess.bug("ensure_associated_types() \
+ called on non-trait")
+ }
+ }
+ }
+ _ => {
+ this.tcx().sess.bug("ensure_associated_types() called on \
+ non-trait")
+ }
+ }
+
+ }
+
+ // Cross-crate case.
+ let mut result = Vec::new();
+ let mut index = 0;
+ let trait_items = ty::trait_items(this.tcx(), trait_id);
+ for trait_item in trait_items.iter() {
+ match *trait_item {
+ ty::MethodTraitItem(_) => {}
+ ty::TypeTraitItem(ref associated_type) => {
+ let info = ty::AssociatedTypeInfo {
+ def_id: associated_type.def_id,
+ index: index,
+ ident: associated_type.ident
+ };
+ result.push(info);
+ index += 1;
+ }
+ }
+ }
+ this.tcx().trait_associated_types.borrow_mut().insert(trait_id,
+ Rc::new(result));
+}
+
+fn ty_generics<'tcx,AC>(this: &AC,
+ space: subst::ParamSpace,
+ lifetime_defs: &[ast::LifetimeDef],
+ types: &[ast::TyParam],
+ base_generics: ty::Generics,
+ where_clause: &ast::WhereClause,
+ create_type_parameters_for_associated_types:
+ CreateTypeParametersForAssociatedTypesFlag)
+ -> ty::Generics
+ where AC: AstConv<'tcx> {
let mut result = base_generics;
for (i, l) in lifetime_defs.iter().enumerate() {
let bounds = l.bounds.iter()
- .map(|l| ast_region_to_region(ccx.tcx, l))
+ .map(|l| ast_region_to_region(this.tcx(), l))
.collect();
let def = ty::RegionParameterDef { name: l.lifetime.name,
space: space,
result.regions.push(space, def);
}
+ assert!(result.types.is_empty_in(space));
+
+ // First, create the virtual type parameters for associated types if
+ // necessary.
+ let mut associated_types_generics = ty::Generics::empty();
+ match create_type_parameters_for_associated_types {
+ DontCreateTypeParametersForAssociatedTypes => {}
+ CreateTypeParametersForAssociatedTypes => {
+ let mut index = 0;
+ for param in types.iter() {
+ for bound in param.bounds.iter() {
+ match *bound {
+ ast::TraitTyParamBound(ref trait_bound) => {
+ match lookup_def_tcx(this.tcx(),
+ trait_bound.path.span,
+ trait_bound.ref_id) {
+ def::DefTrait(trait_did) => {
+ ensure_associated_types(this, trait_did);
+ let associated_types =
+ ty::associated_types_for_trait(
+ this.tcx(),
+ trait_did);
+ for associated_type_info in
+ associated_types.iter() {
+ let associated_type_trait_item =
+ ty::impl_or_trait_item(
+ this.tcx(),
+ associated_type_info.def_id);
+ let def = ty::TypeParameterDef {
+ ident: associated_type_trait_item
+ .ident(),
+ def_id:
+ associated_type_info.def_id,
+ space: space,
+ index: types.len() + index,
+ bounds: ty::ParamBounds {
+ builtin_bounds:
+ ty::empty_builtin_bounds(),
+ trait_bounds: Vec::new(),
+ region_bounds: Vec::new(),
+ },
+ associated_with: {
+ Some(local_def(param.id))
+ },
+ default: None,
+ };
+ associated_types_generics.types
+ .push(space,
+ def);
+ index += 1;
+ }
+ }
+ _ => {
+ this.tcx().sess.span_bug(trait_bound.path
+ .span,
+ "not a trait?!")
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ }
+
+ // Now create the real type parameters.
+ let gcx = GenericsCtxt {
+ chain: this,
+ associated_types_generics: &associated_types_generics,
+ };
for (i, param) in types.iter().enumerate() {
- let def = get_or_create_type_parameter_def(ccx,
+ let def = get_or_create_type_parameter_def(&gcx,
space,
param,
i,
where_clause);
- debug!("ty_generics: def for type param: {}", def.repr(ccx.tcx));
+ debug!("ty_generics: def for type param: {}, {}",
+ def.repr(this.tcx()),
+ space);
result.types.push(space, def);
}
+ // Append the associated types to the result.
+ for associated_type_param in associated_types_generics.types
+ .get_slice(space)
+ .iter() {
+ assert!(result.types.get_slice(space).len() ==
+ associated_type_param.index);
+ debug!("ty_generics: def for associated type: {}, {}",
+ associated_type_param.repr(this.tcx()),
+ space);
+ result.types.push(space, (*associated_type_param).clone());
+ }
+
return result;
- fn get_or_create_type_parameter_def(ccx: &CrateCtxt,
+ fn get_or_create_type_parameter_def<'tcx,AC>(
+ this: &AC,
space: subst::ParamSpace,
param: &ast::TyParam,
index: uint,
where_clause: &ast::WhereClause)
- -> ty::TypeParameterDef {
- match ccx.tcx.ty_param_defs.borrow().find(¶m.id) {
+ -> ty::TypeParameterDef
+ where AC: AstConv<'tcx> {
+ match this.tcx().ty_param_defs.borrow().find(¶m.id) {
Some(d) => { return (*d).clone(); }
None => { }
}
let param_ty = ty::ParamTy::new(space, index, local_def(param.id));
- let bounds = compute_bounds(ccx,
+ let bounds = compute_bounds(this,
param.ident.name,
param_ty,
param.bounds.as_slice(),
¶m.unbound,
param.span,
where_clause);
- let default = param.default.as_ref().map(|path| {
- let ty = ast_ty_to_ty(ccx, &ExplicitRscope, &**path);
- let cur_idx = index;
-
- ty::walk_ty(ty, |t| {
- match ty::get(t).sty {
- ty::ty_param(p) => if p.idx > cur_idx {
- span_err!(ccx.tcx.sess, path.span, E0128,
- "type parameters with a default cannot use \
- forward declared identifiers");
- },
- _ => {}
- }
- });
+ let default = match param.default {
+ None => None,
+ Some(ref path) => {
+ let ty = ast_ty_to_ty(this, &ExplicitRscope, &**path);
+ let cur_idx = index;
+
+ ty::walk_ty(ty, |t| {
+ match ty::get(t).sty {
+ ty::ty_param(p) => if p.idx > cur_idx {
+ span_err!(this.tcx().sess, path.span, E0128,
+ "type parameters with a default cannot use \
+ forward declared identifiers");
+ },
+ _ => {}
+ }
+ });
- ty
- });
+ Some(ty)
+ }
+ };
let def = ty::TypeParameterDef {
space: space,
index: index,
ident: param.ident,
def_id: local_def(param.id),
+ associated_with: None,
bounds: bounds,
default: default
};
- ccx.tcx.ty_param_defs.borrow_mut().insert(param.id, def.clone());
+ this.tcx().ty_param_defs.borrow_mut().insert(param.id, def.clone());
def
}
}
-fn compute_bounds(
- ccx: &CrateCtxt,
- name_of_bounded_thing: ast::Name,
- param_ty: ty::ParamTy,
- ast_bounds: &[ast::TyParamBound],
- unbound: &Option<ast::TyParamBound>,
- span: Span,
- where_clause: &ast::WhereClause)
- -> ty::ParamBounds
-{
+fn compute_bounds<'tcx,AC>(this: &AC,
+ name_of_bounded_thing: ast::Name,
+ param_ty: ty::ParamTy,
+ ast_bounds: &[ast::TyParamBound],
+ unbound: &Option<ast::TyParamBound>,
+ span: Span,
+ where_clause: &ast::WhereClause)
+ -> ty::ParamBounds
+ where AC: AstConv<'tcx> {
/*!
* Translate the AST's notion of ty param bounds (which are an
* enum consisting of a newtyped Ty or a region) to ty's
* traits, or the built-in trait (formerly known as kind): Send.
*/
- let mut param_bounds = conv_param_bounds(ccx,
+ let mut param_bounds = conv_param_bounds(this,
span,
param_ty,
ast_bounds,
where_clause);
- add_unsized_bound(ccx,
+ add_unsized_bound(this,
unbound,
&mut param_bounds.builtin_bounds,
"type parameter",
span);
- check_bounds_compatible(ccx.tcx, name_of_bounded_thing,
- ¶m_bounds, span);
+ check_bounds_compatible(this.tcx(),
+ name_of_bounded_thing,
+ ¶m_bounds,
+ span);
param_bounds.trait_bounds.sort_by(|a,b| a.def_id.cmp(&b.def_id));
}
}
-fn conv_param_bounds(ccx: &CrateCtxt,
- span: Span,
- param_ty: ty::ParamTy,
- ast_bounds: &[ast::TyParamBound],
- where_clause: &ast::WhereClause)
- -> ty::ParamBounds
-{
+fn conv_param_bounds<'tcx,AC>(this: &AC,
+ span: Span,
+ param_ty: ty::ParamTy,
+ ast_bounds: &[ast::TyParamBound],
+ where_clause: &ast::WhereClause)
+ -> ty::ParamBounds
+ where AC: AstConv<'tcx> {
let all_bounds =
- merge_param_bounds(ccx, param_ty, ast_bounds, where_clause);
+ merge_param_bounds(this.tcx(), param_ty, ast_bounds, where_clause);
let astconv::PartitionedBounds { builtin_bounds,
trait_bounds,
region_bounds,
unboxed_fn_ty_bounds } =
- astconv::partition_bounds(ccx.tcx, span, all_bounds.as_slice());
- let unboxed_fn_ty_bounds =
- unboxed_fn_ty_bounds.move_iter()
- .map(|b| instantiate_unboxed_fn_ty(ccx, b, param_ty));
+ astconv::partition_bounds(this.tcx(), span, all_bounds.as_slice());
+
+ let unboxed_fn_ty_bounds = unboxed_fn_ty_bounds.move_iter().map(|b| {
+ let trait_id = this.tcx().def_map.borrow().get(&b.ref_id).def_id();
+ let mut kind = None;
+ for &(lang_item, this_kind) in [
+ (this.tcx().lang_items.fn_trait(), ast::FnUnboxedClosureKind),
+ (this.tcx().lang_items.fn_mut_trait(),
+ ast::FnMutUnboxedClosureKind),
+ (this.tcx().lang_items.fn_once_trait(),
+ ast::FnOnceUnboxedClosureKind)
+ ].iter() {
+ if Some(trait_id) == lang_item {
+ kind = Some(this_kind);
+ break
+ }
+ }
+
+ let kind = match kind {
+ Some(kind) => kind,
+ None => {
+ this.tcx().sess.span_err(b.path.span,
+ "unboxed function trait must be one \
+ of `Fn`, `FnMut`, or `FnOnce`");
+ ast::FnMutUnboxedClosureKind
+ }
+ };
+
+ let rscope = ExplicitRscope;
+ let param_ty = param_ty.to_ty(this.tcx());
+ Rc::new(astconv::trait_ref_for_unboxed_function(this,
+ &rscope,
+ kind,
+ &*b.decl,
+ Some(param_ty)))
+ });
+
let trait_bounds: Vec<Rc<ty::TraitRef>> =
- trait_bounds.move_iter()
- .map(|b| instantiate_trait_ref(ccx, b, param_ty.to_ty(ccx.tcx)))
+ trait_bounds.into_iter()
+ .map(|b| {
+ instantiate_trait_ref(this,
+ b,
+ param_ty.to_ty(this.tcx()),
+ Some(param_ty.to_ty(this.tcx())))
+ })
.chain(unboxed_fn_ty_bounds)
.collect();
- let opt_region_bound =
- astconv::compute_opt_region_bound(
- ccx.tcx, span, builtin_bounds, region_bounds.as_slice(),
- trait_bounds.as_slice());
+ let region_bounds: Vec<ty::Region> =
+ region_bounds.move_iter()
+ .map(|r| ast_region_to_region(this.tcx(), r))
+ .collect();
ty::ParamBounds {
- opt_region_bound: opt_region_bound,
+ region_bounds: region_bounds,
builtin_bounds: builtin_bounds,
trait_bounds: trait_bounds,
}
}
-fn merge_param_bounds<'a>(ccx: &CrateCtxt,
+fn merge_param_bounds<'a>(tcx: &ty::ctxt,
param_ty: ty::ParamTy,
ast_bounds: &'a [ast::TyParamBound],
where_clause: &'a ast::WhereClause)
- -> Vec<&'a ast::TyParamBound>
-{
+ -> Vec<&'a ast::TyParamBound> {
/*!
* Merges the bounds declared on a type parameter with those
* found from where clauses into a single list.
}
for predicate in where_clause.predicates.iter() {
- let predicate_param_id = ccx.tcx
- .def_map
- .borrow()
- .find(&predicate.id)
- .expect("compute_bounds(): resolve \
- didn't resolve the type \
- parameter identifier in a \
- `where` clause")
- .def_id();
+ let predicate_param_id =
+ tcx.def_map
+ .borrow()
+ .find(&predicate.id)
+ .expect("compute_bounds(): resolve didn't resolve the type \
+ parameter identifier in a `where` clause")
+ .def_id();
if param_ty.def_id != predicate_param_id {
continue
}
def_id: ast::DefId,
ast_generics: &ast::Generics,
abi: abi::Abi)
- -> ty::Polytype {
-
+ -> ty::Polytype {
for i in decl.inputs.iter() {
match (*i).pat.node {
ast::PatIdent(_, _, _) => (),
}
}
- let ty_generics_for_fn_or_method =
- ty_generics_for_fn_or_method(ccx, ast_generics,
- ty::Generics::empty());
+ let ty_generics_for_fn_or_method = ty_generics_for_fn_or_method(
+ ccx,
+ ast_generics,
+ ty::Generics::empty(),
+ DontCreateTypeParametersForAssociatedTypes);
let rb = BindingRscope::new(def_id.node);
let input_tys = decl.inputs
.iter()
*/
use middle::subst;
-use middle::ty::{AutoPtr, AutoDerefRef, AutoUnsize, AutoUnsafe};
+use middle::ty::{AutoPtr, AutoDerefRef, AdjustDerefRef, AutoUnsize, AutoUnsafe};
use middle::ty::{mt};
use middle::ty;
use middle::typeck::infer::{CoerceResult, resolve_type, Coercion};
mt {ty: inner_ty, mutbl: mutbl_b});
try!(sub.tys(a_borrowed, b));
- Ok(Some(AutoDerefRef(AutoDerefRef {
+ Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(AutoPtr(r_borrow, mutbl_b, None))
})))
let unsized_ty = ty::mk_slice(self.get_ref().infcx.tcx, r_borrow,
mt {ty: t_a, mutbl: mutbl_b});
try!(self.get_ref().infcx.try(|| sub.tys(unsized_ty, b)));
- Ok(Some(AutoDerefRef(AutoDerefRef {
+ Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 0,
autoref: Some(ty::AutoPtr(r_borrow,
mutbl_b,
let sty_b = &ty::get(b).sty;
match (sty_a, sty_b) {
- (&ty::ty_rptr(_, ty::mt{ty: t_a, ..}), &ty::ty_rptr(_, mt_b)) => {
+ (&ty::ty_rptr(_, ty::mt{ty: t_a, mutbl: mutbl_a}), &ty::ty_rptr(_, mt_b)) => {
self.unpack_actual_value(t_a, |sty_a| {
- match self.unsize_ty(sty_a, mt_b.ty) {
+ match self.unsize_ty(t_a, sty_a, mt_b.ty) {
Some((ty, kind)) => {
+ if !can_coerce_mutbls(mutbl_a, mt_b.mutbl) {
+ return Err(ty::terr_mutability);
+ }
+
let coercion = Coercion(self.get_ref().trace.clone());
let r_borrow = self.get_ref().infcx.next_region_var(coercion);
let ty = ty::mk_rptr(self.get_ref().infcx.tcx,
try!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
debug!("Success, coerced with AutoDerefRef(1, \
AutoPtr(AutoUnsize({:?})))", kind);
- Ok(Some(AutoDerefRef(AutoDerefRef {
+ Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoPtr(r_borrow, mt_b.mutbl,
Some(box AutoUnsize(kind))))
}
})
}
- (&ty::ty_rptr(_, ty::mt{ty: t_a, ..}), &ty::ty_ptr(mt_b)) => {
+ (&ty::ty_rptr(_, ty::mt{ty: t_a, mutbl: mutbl_a}), &ty::ty_ptr(mt_b)) => {
self.unpack_actual_value(t_a, |sty_a| {
- match self.unsize_ty(sty_a, mt_b.ty) {
+ match self.unsize_ty(t_a, sty_a, mt_b.ty) {
Some((ty, kind)) => {
+ if !can_coerce_mutbls(mutbl_a, mt_b.mutbl) {
+ return Err(ty::terr_mutability);
+ }
+
let ty = ty::mk_ptr(self.get_ref().infcx.tcx,
ty::mt{ty: ty, mutbl: mt_b.mutbl});
try!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
debug!("Success, coerced with AutoDerefRef(1, \
AutoPtr(AutoUnsize({:?})))", kind);
- Ok(Some(AutoDerefRef(AutoDerefRef {
+ Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mt_b.mutbl,
Some(box AutoUnsize(kind))))
}
(&ty::ty_uniq(t_a), &ty::ty_uniq(t_b)) => {
self.unpack_actual_value(t_a, |sty_a| {
- match self.unsize_ty(sty_a, t_b) {
+ match self.unsize_ty(t_a, sty_a, t_b) {
Some((ty, kind)) => {
let ty = ty::mk_uniq(self.get_ref().infcx.tcx, ty);
try!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
debug!("Success, coerced with AutoDerefRef(1, \
AutoUnsizeUniq({:?}))", kind);
- Ok(Some(AutoDerefRef(AutoDerefRef {
+ Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsizeUniq(kind))
})))
// performed to unsize it.
// E.g., `[T, ..n]` -> `([T], UnsizeLength(n))`
fn unsize_ty(&self,
+ ty_a: ty::t,
sty_a: &ty::sty,
ty_b: ty::t)
-> Option<(ty::t, ty::UnsizeKind)> {
def_id,
substs.clone(),
bounds);
- Some((ty, ty::UnsizeVtable(bounds,
- def_id,
- substs.clone())))
+ Some((ty, ty::UnsizeVtable(ty::TyTrait { def_id: def_id,
+ bounds: bounds,
+ substs: substs.clone() },
+ ty_a)))
}
(&ty::ty_struct(did_a, ref substs_a), &ty::ty_struct(did_b, ref substs_b))
if did_a == did_b => {
if self.get_ref().infcx.try(|| sub.tys(*tp_a, *tp_b)).is_ok() {
continue;
}
- match self.unpack_actual_value(*tp_a, |tp| self.unsize_ty(tp, *tp_b)) {
+ match
+ self.unpack_actual_value(
+ *tp_a,
+ |tp| self.unsize_ty(*tp_a, tp, *tp_b))
+ {
Some((new_tp, k)) => {
// Check that the whole types match.
let mut new_substs = substs_a.clone();
{
let tcx = self.get_ref().infcx.tcx;
- debug!("coerce_borrowed_object(a={}, sty_a={:?}, b={})",
+ debug!("coerce_borrowed_object(a={}, sty_a={:?}, b={}, b_mutbl={})",
a.repr(tcx), sty_a,
- b.repr(tcx));
+ b.repr(tcx), b_mutbl);
let coercion = Coercion(self.get_ref().trace.clone());
let r_a = self.get_ref().infcx.next_region_var(coercion);
- self.coerce_object(a, sty_a, b,
+ self.coerce_object(a, sty_a, b, b_mutbl,
|tr| ty::mk_rptr(tcx, r_a, ty::mt{ mutbl: b_mutbl, ty: tr }),
|| AutoPtr(r_a, b_mutbl, None))
}
{
let tcx = self.get_ref().infcx.tcx;
- debug!("coerce_unsafe_object(a={}, sty_a={:?}, b={})",
+ debug!("coerce_unsafe_object(a={}, sty_a={:?}, b={}, b_mutbl={})",
a.repr(tcx), sty_a,
- b.repr(tcx));
+ b.repr(tcx), b_mutbl);
- self.coerce_object(a, sty_a, b,
+ self.coerce_object(a, sty_a, b, b_mutbl,
|tr| ty::mk_ptr(tcx, ty::mt{ mutbl: b_mutbl, ty: tr }),
|| AutoUnsafe(b_mutbl, None))
}
a: ty::t,
sty_a: &ty::sty,
b: ty::t,
+ b_mutbl: ast::Mutability,
mk_ty: |ty::t| -> ty::t,
mk_adjust: || -> ty::AutoRef) -> CoerceResult
{
let tcx = self.get_ref().infcx.tcx;
match *sty_a {
- ty::ty_rptr(_, ty::mt{ty, ..}) => match ty::get(ty).sty {
+ ty::ty_rptr(_, ty::mt{ty, mutbl}) => match ty::get(ty).sty {
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
bounds,
..
- }) => {
+ }) =>
+ {
+ debug!("mutbl={} b_mutbl={}", mutbl, b_mutbl);
+
let tr = ty::mk_trait(tcx, def_id, substs.clone(), bounds);
try!(self.subtype(mk_ty(tr), b));
- Ok(Some(AutoDerefRef(AutoDerefRef {
+ Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(mk_adjust())
})))
_ => return self.subtype(a, b)
};
- let adj = ty::AutoAddEnv(fn_ty_b.store);
+ let adj = ty::AdjustAddEnv(fn_ty_b.store);
let a_closure = ty::mk_closure(self.get_ref().infcx.tcx,
ty::ClosureTy {
sig: fn_ty_a.sig.clone(),
// Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
// regionck knows that the region for `a` must be valid here.
- Ok(Some(AutoDerefRef(AutoDerefRef {
+ Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mutbl_b, None))
})))
}
}
+
+fn can_coerce_mutbls(from_mutbl: ast::Mutability,
+ to_mutbl: ast::Mutability)
+ -> bool {
+ match (from_mutbl, to_mutbl) {
+ (ast::MutMutable, ast::MutMutable) => true,
+ (ast::MutImmutable, ast::MutImmutable) => true,
+ (ast::MutMutable, ast::MutImmutable) => true,
+ (ast::MutImmutable, ast::MutMutable) => false,
+ }
+}
use middle::subst;
-use middle::subst::Substs;
+use middle::subst::{ErasedRegions, NonerasedRegions, Substs};
use middle::ty::{FloatVar, FnSig, IntVar, TyVar};
use middle::ty::{IntType, UintType};
use middle::ty::{BuiltinBounds};
let a_tps = a_subst.types.get_slice(space);
let b_tps = b_subst.types.get_slice(space);
let tps = try!(self.tps(space, a_tps, b_tps));
+ substs.types.replace(space, tps);
+ }
- let a_regions = a_subst.regions().get_slice(space);
- let b_regions = b_subst.regions().get_slice(space);
-
- let mut invariance = Vec::new();
- let r_variances = match variances {
- Some(ref variances) => variances.regions.get_slice(space),
- None => {
- for _ in a_regions.iter() {
- invariance.push(ty::Invariant);
- }
- invariance.as_slice()
- }
- };
+ match (&a_subst.regions, &b_subst.regions) {
+ (&ErasedRegions, _) | (_, &ErasedRegions) => {
+ substs.regions = ErasedRegions;
+ }
- let regions = try!(relate_region_params(self,
- item_def_id,
- r_variances,
- a_regions,
- b_regions));
+ (&NonerasedRegions(ref a), &NonerasedRegions(ref b)) => {
+ for &space in subst::ParamSpace::all().iter() {
+ let a_regions = a.get_slice(space);
+ let b_regions = b.get_slice(space);
- substs.types.replace(space, tps);
- substs.mut_regions().replace(space, regions);
+ let mut invariance = Vec::new();
+ let r_variances = match variances {
+ Some(ref variances) => {
+ variances.regions.get_slice(space)
+ }
+ None => {
+ for _ in a_regions.iter() {
+ invariance.push(ty::Invariant);
+ }
+ invariance.as_slice()
+ }
+ };
+
+ let regions = try!(relate_region_params(self,
+ item_def_id,
+ r_variances,
+ a_regions,
+ b_regions));
+ substs.mut_regions().replace(space, regions);
+ }
+ }
}
return Ok(substs);
same_frs: &FreeRegionsFromSameFn) {
let scope_id = same_frs.scope_id;
let (sub_fr, sup_fr) = (same_frs.sub_fr, same_frs.sup_fr);
- for sr in same_regions.mut_iter() {
+ for sr in same_regions.iter_mut() {
if sr.contains(&sup_fr.bound_region)
&& scope_id == sr.scope_id {
sr.push(sub_fr.bound_region);
infer::ExprAssignable(_) => "mismatched types",
infer::RelateTraitRefs(_) => "mismatched traits",
infer::RelateSelfType(_) => "mismatched types",
+ infer::RelateOutputImplTypes(_) => "mismatched types",
infer::MatchExpressionArm(_, _) => "match arms have incompatible types",
infer::IfExpression(_) => "if and else have incompatible types",
};
Some(&m.pe_explicit_self().node),
m.span))
}
+ ast::TypeImplItem(_) => None,
}
},
_ => None
Some(&d) => d
};
match a_def {
- def::DefTy(did) | def::DefStruct(did) => {
+ def::DefTy(did, _) | def::DefStruct(did) => {
let generics = ty::lookup_item_type(self.tcx, did).generics;
let expected =
ast::TyFixedLengthVec(build_to(ty, to), e)
}
ast::TyTup(tys) => {
- ast::TyTup(tys.move_iter().map(|ty| build_to(ty, to)).collect())
+ ast::TyTup(tys.into_iter().map(|ty| build_to(ty, to)).collect())
}
ast::TyParen(typ) => ast::TyParen(build_to(typ, to)),
other => other
format!("traits are compatible")
}
infer::RelateSelfType(_) => {
- format!("type matches impl")
+ format!("self type matches impl self type")
+ }
+ infer::RelateOutputImplTypes(_) => {
+ format!("trait type parameters matches those \
+ specified on the impl")
}
infer::MatchExpressionArm(_, _) => {
format!("match arms have compatible types")
taken.push_all(m.pe_generics().lifetimes.as_slice());
Some(m.id)
}
+ ast::TypeImplItem(_) => None,
}
}
_ => None
use middle::ty::{TyVid, IntVid, FloatVid, RegionVid};
use middle::ty;
use middle::ty_fold;
+use middle::ty_fold::TypeFoldable;
use middle::ty_fold::TypeFolder;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::coercion::Coerce;
pub mod lub;
pub mod region_inference;
pub mod resolve;
+mod skolemize;
pub mod sub;
pub mod test;
pub mod type_variable;
// Relating trait refs when resolving vtables
RelateTraitRefs(Span),
- // Relating trait refs when resolving vtables
+ // Relating self types when resolving vtables
RelateSelfType(Span),
+ // Relating trait type parameters to those found in impl etc
+ RelateOutputImplTypes(Span),
+
// Computing common supertype in the arms of a match expression
MatchExpressionArm(Span, Span),
BoundRegionInCoherence(ast::Name),
}
+#[deriving(Show)]
pub enum fixup_err {
unresolved_int_ty(IntVid),
unresolved_float_ty(FloatVid),
origin: TypeOrigin,
a: ty::t,
b: ty::t)
- -> ures {
+ -> ures
+{
debug!("mk_subty({} <: {})", a.repr(cx.tcx), b.repr(cx.tcx));
- indent(|| {
- cx.commit_if_ok(|| {
- let trace = TypeTrace {
- origin: origin,
- values: Types(expected_found(a_is_expected, a, b))
- };
- cx.sub(a_is_expected, trace).tys(a, b)
- })
- }).to_ures()
+ cx.commit_if_ok(|| {
+ cx.sub_types(a_is_expected, origin, a, b)
+ })
}
pub fn can_mk_subty(cx: &InferCtxt, a: ty::t, b: ty::t) -> ures {
origin: Misc(codemap::DUMMY_SP),
values: Types(expected_found(true, a, b))
};
- cx.sub(true, trace).tys(a, b)
- }).to_ures()
+ cx.sub(true, trace).tys(a, b).to_ures()
+ })
}
pub fn can_mk_eqty(cx: &InferCtxt, a: ty::t, b: ty::t) -> ures {
cx.region_vars.verify_param_bound(origin, param_ty, a, bs);
}
+
+pub fn skolemize<T:TypeFoldable+Repr>(cx: &InferCtxt, a: T) -> T {
+ let mut skol = skolemize::TypeSkolemizer::new(cx);
+ let b = a.fold_with(&mut skol);
+ debug!("skol(a={}) -> {}", a.repr(cx.tcx), b.repr(cx.tcx));
+ b
+}
+
pub fn mk_eqty(cx: &InferCtxt,
a_is_expected: bool,
origin: TypeOrigin,
-> ures
{
debug!("mk_eqty({} <: {})", a.repr(cx.tcx), b.repr(cx.tcx));
- cx.commit_if_ok(|| {
- let trace = TypeTrace {
- origin: origin,
- values: Types(expected_found(a_is_expected, a, b))
- };
- try!(cx.equate(a_is_expected, trace).tys(a, b));
- Ok(())
- })
+ cx.commit_if_ok(
+ || cx.eq_types(a_is_expected, origin, a, b))
}
pub fn mk_sub_trait_refs(cx: &InferCtxt,
origin: TypeOrigin,
a: Rc<ty::TraitRef>,
b: Rc<ty::TraitRef>)
- -> ures
+ -> ures
{
debug!("mk_sub_trait_refs({} <: {})",
a.repr(cx.tcx), b.repr(cx.tcx));
- indent(|| {
- cx.commit_if_ok(|| {
- let trace = TypeTrace {
- origin: origin,
- values: TraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
- };
- let suber = cx.sub(a_is_expected, trace);
- suber.trait_refs(&*a, &*b)
- })
- }).to_ures()
+ cx.commit_if_ok(
+ || cx.sub_trait_refs(a_is_expected, origin, a.clone(), b.clone()))
}
fn expected_found<T>(a_is_expected: bool,
a: T,
- b: T) -> ty::expected_found<T> {
+ b: T)
+ -> ty::expected_found<T>
+{
if a_is_expected {
ty::expected_found {expected: a, found: b}
} else {
}
/// Execute `f` then unroll any bindings it creates
- pub fn probe<T,E>(&self, f: || -> Result<T,E>) -> Result<T,E> {
+ pub fn probe<R>(&self, f: || -> R) -> R {
debug!("probe()");
let snapshot = self.start_snapshot();
let r = f();
{
self.region_vars.add_given(sub, sup);
}
+
+ pub fn sub_types(&self,
+ a_is_expected: bool,
+ origin: TypeOrigin,
+ a: ty::t,
+ b: ty::t)
+ -> ures
+ {
+ debug!("sub_types({} <: {})", a.repr(self.tcx), b.repr(self.tcx));
+ let trace = TypeTrace {
+ origin: origin,
+ values: Types(expected_found(a_is_expected, a, b))
+ };
+ self.sub(a_is_expected, trace).tys(a, b).to_ures()
+ }
+
+ pub fn eq_types(&self,
+ a_is_expected: bool,
+ origin: TypeOrigin,
+ a: ty::t,
+ b: ty::t)
+ -> ures
+ {
+ let trace = TypeTrace {
+ origin: origin,
+ values: Types(expected_found(a_is_expected, a, b))
+ };
+ self.equate(a_is_expected, trace).tys(a, b).to_ures()
+ }
+
+ pub fn sub_trait_refs(&self,
+ a_is_expected: bool,
+ origin: TypeOrigin,
+ a: Rc<ty::TraitRef>,
+ b: Rc<ty::TraitRef>)
+ -> ures
+ {
+ debug!("sub_trait_refs({} <: {})",
+ a.repr(self.tcx),
+ b.repr(self.tcx));
+ let trace = TypeTrace {
+ origin: origin,
+ values: TraitRefs(expected_found(a_is_expected,
+ a.clone(), b.clone()))
+ };
+ let suber = self.sub(a_is_expected, trace);
+ suber.trait_refs(&*a, &*b).to_ures()
+ }
}
impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
.collect()
}
- pub fn fresh_substs_for_type(&self,
- span: Span,
- generics: &ty::Generics)
- -> subst::Substs
+ pub fn fresh_substs_for_generics(&self,
+ span: Span,
+ generics: &ty::Generics)
+ -> subst::Substs
{
/*!
* Given a set of generics defined on a type or impl, returns
* a substitution mapping each type/region parameter to a
* fresh inference variable.
*/
- assert!(generics.types.len(subst::SelfSpace) == 0);
+
+ let type_params =
+ generics.types.map(
+ |_| self.next_ty_var());
+ let region_params =
+ generics.regions.map(
+ |d| self.next_region_var(EarlyBoundRegion(span, d.name)));
+ subst::Substs::new(type_params, region_params)
+ }
+
+ pub fn fresh_substs_for_trait(&self,
+ span: Span,
+ generics: &ty::Generics,
+ self_ty: ty::t)
+ -> subst::Substs
+ {
+ /*!
+ * Given a set of generics defined on a trait, returns a
+ * substitution mapping each output type/region parameter to a
+ * fresh inference variable, and mapping the self type to
+ * `self_ty`.
+ */
+
+ assert!(generics.types.len(subst::SelfSpace) == 1);
assert!(generics.types.len(subst::FnSpace) == 0);
assert!(generics.regions.len(subst::SelfSpace) == 0);
assert!(generics.regions.len(subst::FnSpace) == 0);
let region_param_defs = generics.regions.get_slice(subst::TypeSpace);
let regions = self.region_vars_for_defs(span, region_param_defs);
let type_parameters = self.next_ty_vars(type_parameter_count);
- subst::Substs::new_type(type_parameters, regions)
+ subst::Substs::new_trait(type_parameters, regions, self_ty)
}
pub fn fresh_bound_region(&self, binder_id: ast::NodeId) -> ty::Region {
trait_ref_to_string(self.tcx, &t)
}
+ pub fn contains_unbound_type_variables(&self, typ: ty::t) -> ty::t {
+ match resolve_type(self,
+ None,
+ typ, resolve_nested_tvar | resolve_ivar) {
+ Ok(new_type) => new_type,
+ Err(_) => typ
+ }
+ }
+
pub fn resolve_type_vars_if_possible(&self, typ: ty::t) -> ty::t {
match resolve_type(self,
None,
Misc(span) => span,
RelateTraitRefs(span) => span,
RelateSelfType(span) => span,
+ RelateOutputImplTypes(span) => span,
MatchExpressionArm(match_span, _) => match_span,
IfExpression(span) => span,
}
RelateSelfType(a) => {
format!("RelateSelfType({})", a.repr(tcx))
}
+ RelateOutputImplTypes(a) => {
+ format!("RelateOutputImplTypes({})", a.repr(tcx))
+ }
MatchExpressionArm(a, b) => {
format!("MatchExpressionArm({}, {})", a.repr(tcx), b.repr(tcx))
}
// future). If you want to resolve everything but one type, you are
// probably better off writing `resolve_all - resolve_ivar`.
-
use middle::ty::{FloatVar, FloatVid, IntVar, IntVid, RegionVid, TyVar, TyVid};
use middle::ty::{IntType, UintType};
use middle::ty;
use middle::typeck::infer::{fixup_err, fres, InferCtxt};
use middle::typeck::infer::{unresolved_int_ty,unresolved_float_ty,unresolved_ty};
use syntax::codemap::Span;
-use util::common::indent;
use util::ppaux::{Repr, ty_to_string};
pub static resolve_nested_tvar: uint = 0b0000000001;
}
impl<'a, 'tcx> ty_fold::TypeFolder<'tcx> for ResolveState<'a, 'tcx> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ fn tcx(&self) -> &ty::ctxt<'tcx> {
self.infcx.tcx
}
pub fn resolve_type_chk(&mut self,
typ: ty::t)
- -> fres<ty::t> {
+ -> fres<ty::t>
+ {
self.err = None;
debug!("Resolving {} (modes={:x})",
let rty = self.resolve_type(typ);
match self.err {
- None => {
- debug!("Resolved {} to {} (modes={:x})",
- ty_to_string(self.infcx.tcx, typ),
- ty_to_string(self.infcx.tcx, rty),
- self.modes);
- return Ok(rty);
- }
- Some(e) => return Err(e)
+ None => {
+ debug!("Resolved {} to {} (modes={:x})",
+ ty_to_string(self.infcx.tcx, typ),
+ ty_to_string(self.infcx.tcx, rty),
+ self.modes);
+ return Ok(rty);
+ }
+ Some(e) => {
+ return Err(e);
+ }
}
}
orig: ty::Region)
-> fres<ty::Region> {
self.err = None;
- let resolved = indent(|| self.resolve_region(orig) );
+ let resolved = self.resolve_region(orig);
match self.err {
None => Ok(resolved),
Some(e) => Err(e)
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+ * Skolemization is the process of replacing unknown variables with
+ * fresh types. The idea is that the type, after skolemization,
+ * contains no inference variables but instead contains either a value
+ * for each variable (if the variable had already fresh "arbitrary"
+ * types wherever a variable would have been.
+ *
+ * Skolemization is used wherever we want to test what the type
+ * inferencer knows "so far". The primary place it is used right now
+ * is in the trait matching algorithm, which needs to be able to test
+ * whether an `impl` self type matches some other type X -- *without*
+ * affecting `X`. That means if that if the type `X` is in fact an
+ * unbound type variable, we want the match to be regarded as
+ * ambiguous, because depending on what type that type variable is
+ * ultimately assigned, the match may or may not succeed.
+ *
+ * Note that you should be careful not to allow the output of
+ * skolemization to leak to the user in error messages or in any other
+ * form. Skolemization is only really useful as an internal detail.
+ *
+ * __An important detail concerning regions.__ The skolemizer also
+ * replaces *all* regions with 'static. The reason behind this is
+ * that, in general, we do not take region relationships into account
+ * when making type-overloaded decisions. This is important because of
+ * the design of the region inferencer, which is not based on
+ * unification but rather on accumulating and then solving a set of
+ * constraints. In contrast, the type inferencer assigns a value to
+ * each type variable only once, and it does so as soon as it can, so
+ * it is reasonable to ask what the type inferencer knows "so far".
+ */
+
+use middle::ty;
+use middle::ty_fold;
+use middle::ty_fold::TypeFoldable;
+use middle::ty_fold::TypeFolder;
+
+use super::InferCtxt;
+use super::unify::InferCtxtMethodsForSimplyUnifiableTypes;
+use super::unify::SimplyUnifiable;
+use super::unify::UnifyKey;
+
+pub struct TypeSkolemizer<'a, 'tcx:'a> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ skolemization_count: uint
+}
+
+impl<'a, 'tcx> TypeSkolemizer<'a, 'tcx> {
+ pub fn new<'tcx>(infcx: &'a InferCtxt<'a, 'tcx>) -> TypeSkolemizer<'a, 'tcx> {
+ TypeSkolemizer { infcx: infcx, skolemization_count: 0 }
+ }
+
+ fn probe_ty(&mut self, v: ty::TyVid) -> ty::t {
+ self.skolemize_if_none(self.infcx.type_variables.borrow().probe(v), ty::SkolemizedTy)
+ }
+
+ fn probe_unifiable<V:SimplyUnifiable,K:UnifyKey<Option<V>>>(&mut self, k: K) -> ty::t {
+ self.skolemize_if_none(self.infcx.probe_var(k), ty::SkolemizedIntTy)
+ }
+
+ fn skolemize_if_none(&mut self, o: Option<ty::t>,
+ skolemizer: |uint| -> ty::InferTy)
+ -> ty::t {
+ match o {
+ Some(t) => t.fold_with(self),
+ None => {
+ let index = self.skolemization_count;
+ self.skolemization_count += 1;
+ ty::mk_infer(self.tcx(), skolemizer(index))
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for TypeSkolemizer<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> &'b ty::ctxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_region(&mut self, r: ty::Region) -> ty::Region {
+ match r {
+ ty::ReEarlyBound(..) |
+ ty::ReLateBound(..) => {
+ // leave bound regions alone
+ r
+ }
+
+ ty::ReStatic |
+ ty::ReFree(_) |
+ ty::ReScope(_) |
+ ty::ReInfer(_) |
+ ty::ReEmpty => {
+ // replace all free regions with 'static
+ ty::ReStatic
+ }
+ }
+ }
+
+ fn fold_ty(&mut self, t: ty::t) -> ty::t {
+ match ty::get(t).sty {
+ ty::ty_infer(ty::TyVar(v)) => {
+ self.probe_ty(v)
+ }
+
+ ty::ty_infer(ty::IntVar(v)) => {
+ self.probe_unifiable(v)
+ }
+
+ ty::ty_infer(ty::FloatVar(v)) => {
+ self.probe_unifiable(v)
+ }
+
+ ty::ty_infer(ty::SkolemizedTy(_)) |
+ ty::ty_infer(ty::SkolemizedIntTy(_)) => {
+ self.tcx().sess.bug("Cannot skolemize a skolemized type");
+ }
+
+ ty::ty_open(..) => {
+ self.tcx().sess.bug("Cannot skolemize an open existential type");
+ }
+
+ ty::ty_nil |
+ ty::ty_bot |
+ ty::ty_bool |
+ ty::ty_char |
+ ty::ty_int(..) |
+ ty::ty_uint(..) |
+ ty::ty_float(..) |
+ ty::ty_enum(..) |
+ ty::ty_box(..) |
+ ty::ty_uniq(..) |
+ ty::ty_str |
+ ty::ty_err |
+ ty::ty_vec(..) |
+ ty::ty_ptr(..) |
+ ty::ty_rptr(..) |
+ ty::ty_bare_fn(..) |
+ ty::ty_closure(..) |
+ ty::ty_trait(..) |
+ ty::ty_struct(..) |
+ ty::ty_unboxed_closure(..) |
+ ty::ty_tup(..) |
+ ty::ty_param(..) => {
+ ty_fold::super_fold_ty(self, t)
+ }
+ }
+ }
+}
use driver::diagnostic::Emitter;
use driver::driver;
use driver::session;
-use middle::freevars;
use middle::lang_items;
use middle::region;
use middle::resolve;
// run just enough stuff to build a tcx:
let lang_items = lang_items::collect_language_items(krate, &sess);
- let resolve::CrateMap { def_map: def_map, .. } =
+ let resolve::CrateMap { def_map, freevars, capture_mode_map, .. } =
resolve::resolve_crate(&sess, &lang_items, krate);
- let (freevars_map, captures_map) = freevars::annotate_freevars(&def_map,
- krate);
let named_region_map = resolve_lifetime::krate(&sess, krate);
let region_map = region::resolve_crate(&sess, krate);
let stability_index = stability::Index::build(krate);
def_map,
named_region_map,
ast_map,
- freevars_map,
- captures_map,
+ freevars,
+ capture_mode_map,
region_map,
lang_items,
stability_index);
* relationship.
*/
pub trait SimplyUnifiable : Clone + PartialEq + Repr {
+ fn to_type(&self) -> ty::t;
fn to_type_err(expected_found<Self>) -> ty::type_err;
}
a_id: K,
b: V)
-> ures;
+ fn probe_var(&self, a_id: K) -> Option<ty::t>;
}
impl<'a,'tcx,V:SimplyUnifiable,K:UnifyKey<Option<V>>>
}
}
}
+
+ fn probe_var(&self, a_id: K) -> Option<ty::t> {
+ let tcx = self.tcx;
+ let table = UnifyKey::unification_table(self);
+ let node_a = table.borrow_mut().get(tcx, a_id);
+ match node_a.value {
+ None => None,
+ Some(ref a_t) => Some(a_t.to_type())
+ }
+ }
}
///////////////////////////////////////////////////////////////////////////
}
impl SimplyUnifiable for IntVarValue {
+ fn to_type(&self) -> ty::t {
+ match *self {
+ ty::IntType(i) => ty::mk_mach_int(i),
+ ty::UintType(i) => ty::mk_mach_uint(i),
+ }
+ }
+
fn to_type_err(err: expected_found<IntVarValue>) -> ty::type_err {
return ty::terr_int_mismatch(err);
}
}
impl SimplyUnifiable for ast::FloatTy {
+ fn to_type(&self) -> ty::t {
+ ty::mk_mach_float(*self)
+ }
+
fn to_type_err(err: expected_found<ast::FloatTy>) -> ty::type_err {
return ty::terr_float_mismatch(err);
}
use util::common::time;
use util::ppaux::Repr;
use util::ppaux;
-use util::nodemap::{DefIdMap, FnvHashMap};
+use util::nodemap::{NodeMap, FnvHashMap};
use std::cell::RefCell;
+use std::rc::Rc;
use syntax::codemap::Span;
use syntax::print::pprust::*;
use syntax::{ast, ast_map, abi};
pub index: uint
}
-#[deriving(Clone, Encodable, Decodable)]
+#[deriving(Clone)]
pub enum MethodOrigin {
// fully statically resolved method
MethodStatic(ast::DefId),
MethodStaticUnboxedClosure(ast::DefId),
// method invoked on a type parameter with a bounded trait
- MethodParam(MethodParam),
+ MethodTypeParam(MethodParam),
// method invoked on a trait instance
- MethodObject(MethodObject),
+ MethodTraitObject(MethodObject),
}
// details for a method invoked with a receiver whose type is a type parameter
// with a bounded trait.
-#[deriving(Clone, Encodable, Decodable)]
+#[deriving(Clone)]
pub struct MethodParam {
- // the trait containing the method to be invoked
- pub trait_id: ast::DefId,
+ // the precise trait reference that occurs as a bound -- this may
+ // be a supertrait of what the user actually typed.
+ pub trait_ref: Rc<ty::TraitRef>,
- // index of the method to be invoked amongst the trait's methods
+ // index of uint in the list of methods for the trait
pub method_num: uint,
-
- // index of the type parameter (from those that are in scope) that is
- // the type of the receiver
- pub param_num: param_index,
-
- // index of the bound for this type parameter which specifies the trait
- pub bound_num: uint,
}
// details for a method invoked with a receiver whose type is an object
-#[deriving(Clone, Encodable, Decodable)]
+#[deriving(Clone)]
pub struct MethodObject {
// the (super)trait containing the method to be invoked
- pub trait_id: ast::DefId,
+ pub trait_ref: Rc<ty::TraitRef>,
// the actual base trait id of the object
pub object_trait_id: ast::DefId,
/**
* With method calls, we store some extra information in
- * side tables (i.e method_map, vtable_map). We use
+ * side tables (i.e method_map). We use
* MethodCall as a key to index into these tables instead of
* just directly using the expression's NodeId. The reason
* for this being that we may apply adjustments (coercions)
}
}
-pub type vtable_map = RefCell<FnvHashMap<MethodCall, vtable_res>>;
-
-
-pub type impl_vtable_map = RefCell<DefIdMap<vtable_res>>;
+// For every explicit cast into an object type, maps from the cast
+// expr to the associated trait ref.
+pub type ObjectCastMap = RefCell<NodeMap<Rc<ty::TraitRef>>>;
pub struct CrateCtxt<'a, 'tcx: 'a> {
// A mapping from method call sites to traits that have that method.
self.add_constraints_from_sig(&method.fty.sig,
self.covariant);
}
+ ty::TypeTraitItem(_) => {}
}
}
}
use lint::{LintPassObject, LintId, Lint};
use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT};
-use syntax::ext::base::{IdentTT, LetSyntaxTT, ItemDecorator, ItemModifier};
+use syntax::ext::base::{IdentTT, LetSyntaxTT, Decorator, Modifier};
use syntax::ext::base::{MacroExpanderFn};
use syntax::codemap::Span;
use syntax::parse::token;
self.syntax_exts.push((name, match extension {
NormalTT(ext, _) => NormalTT(ext, Some(self.krate_span)),
IdentTT(ext, _) => IdentTT(ext, Some(self.krate_span)),
- ItemDecorator(ext) => ItemDecorator(ext),
- ItemModifier(ext) => ItemModifier(ext),
+ Decorator(ext) => Decorator(ext),
+ Modifier(ext) => Modifier(ext),
// there's probably a nicer way to signal this:
LetSyntaxTT(_, _) => fail!("can't register a new LetSyntax!"),
}));
/// Register a lint group.
pub fn register_lint_group(&mut self, name: &'static str, to: Vec<&'static Lint>) {
- self.lint_groups.insert(name, to.move_iter().map(|x| LintId::of(x)).collect());
+ self.lint_groups.insert(name, to.into_iter().map(|x| LintId::of(x)).collect());
}
}
if cx.sess.verbose() {
for t in substs.types.get_slice(subst::SelfSpace).iter() {
- strs.push(format!("for {}", t.repr(cx)));
+ strs.push(format!("self {}", t.repr(cx)));
+ }
+
+ // generally there shouldn't be any substs in the fn param
+ // space, but in verbose mode, print them out.
+ for t in substs.types.get_slice(subst::FnSpace).iter() {
+ strs.push(format!("fn {}", t.repr(cx)));
}
}
}
}
-impl<T:Repr> Repr for Rc<T> {
+impl<'a,T:Repr> Repr for &'a T {
fn repr(&self, tcx: &ctxt) -> String {
(&**self).repr(tcx)
}
}
-impl<'a, T:Repr> Repr for &'a T {
+impl<T:Repr> Repr for Rc<T> {
fn repr(&self, tcx: &ctxt) -> String {
- (*self).repr(tcx)
+ (&**self).repr(tcx)
}
}
impl Repr for ty::TypeParameterDef {
fn repr(&self, tcx: &ctxt) -> String {
- format!("TypeParameterDef({}, {})",
- self.def_id.repr(tcx),
- self.bounds.repr(tcx))
+ format!("TypeParameterDef({}, {}, {}/{})",
+ self.def_id,
+ self.bounds.repr(tcx),
+ self.space,
+ self.index)
}
}
impl Repr for ty::TraitRef {
fn repr(&self, tcx: &ctxt) -> String {
- trait_ref_to_string(tcx, self)
+ let base = ty::item_path_str(tcx, self.def_id);
+ let trait_def = ty::lookup_trait_def(tcx, self.def_id);
+ format!("<{} as {}>",
+ self.substs.self_ty().repr(tcx),
+ parameterized(tcx, base.as_slice(), &self.substs, &trait_def.generics))
}
}
&typeck::MethodStaticUnboxedClosure(def_id) => {
format!("MethodStaticUnboxedClosure({})", def_id.repr(tcx))
}
- &typeck::MethodParam(ref p) => {
+ &typeck::MethodTypeParam(ref p) => {
p.repr(tcx)
}
- &typeck::MethodObject(ref p) => {
+ &typeck::MethodTraitObject(ref p) => {
p.repr(tcx)
}
}
impl Repr for typeck::MethodParam {
fn repr(&self, tcx: &ctxt) -> String {
- format!("MethodParam({},{:?},{:?},{:?})",
- self.trait_id.repr(tcx),
- self.method_num,
- self.param_num,
- self.bound_num)
+ format!("MethodParam({},{})",
+ self.trait_ref.repr(tcx),
+ self.method_num)
}
}
impl Repr for typeck::MethodObject {
fn repr(&self, tcx: &ctxt) -> String {
format!("MethodObject({},{:?},{:?})",
- self.trait_id.repr(tcx),
+ self.trait_ref.repr(tcx),
self.method_num,
self.real_index)
}
format!("({},{})", a.repr(tcx), b.repr(tcx))
}
}
+
// except according to those terms.
pub static box_field_refcnt: uint = 0u;
-pub static box_field_tydesc: uint = 1u;
+pub static box_field_drop_glue: uint = 1u;
pub static box_field_body: uint = 4u;
pub static tydesc_field_visit_glue: uint = 3u;
debug!("preparing the RPATH!");
let libs = config.used_crates.clone();
- let libs = libs.move_iter().filter_map(|(_, l)| {
+ let libs = libs.into_iter().filter_map(|(_, l)| {
l.map(|p| p.clone())
}).collect::<Vec<_>>();
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
- self.buffer.mut_slice(self.buffer_idx, size),
+ self.buffer.slice_mut(self.buffer_idx, size),
input.slice_to(buffer_remaining));
self.buffer_idx = 0;
func(self.buffer);
i += buffer_remaining;
} else {
copy_memory(
- self.buffer.mut_slice(self.buffer_idx, self.buffer_idx + input.len()),
+ self.buffer.slice_mut(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
// be empty.
let input_remaining = input.len() - i;
copy_memory(
- self.buffer.mut_slice(0, input_remaining),
+ self.buffer.slice_mut(0, input_remaining),
input.slice_from(i));
self.buffer_idx += input_remaining;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
- self.buffer.mut_slice(self.buffer_idx, idx).set_memory(0);
+ self.buffer.slice_mut(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
- return self.buffer.mut_slice(self.buffer_idx - len, self.buffer_idx);
+ return self.buffer.slice_mut(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
)
)
- read_u32v_be(w.mut_slice(0, 16), data);
+ read_u32v_be(w.slice_mut(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
- write_u32_be(out.mut_slice(0, 4), self.engine.state.h0);
- write_u32_be(out.mut_slice(4, 8), self.engine.state.h1);
- write_u32_be(out.mut_slice(8, 12), self.engine.state.h2);
- write_u32_be(out.mut_slice(12, 16), self.engine.state.h3);
- write_u32_be(out.mut_slice(16, 20), self.engine.state.h4);
- write_u32_be(out.mut_slice(20, 24), self.engine.state.h5);
- write_u32_be(out.mut_slice(24, 28), self.engine.state.h6);
- write_u32_be(out.mut_slice(28, 32), self.engine.state.h7);
+ write_u32_be(out.slice_mut(0, 4), self.engine.state.h0);
+ write_u32_be(out.slice_mut(4, 8), self.engine.state.h1);
+ write_u32_be(out.slice_mut(8, 12), self.engine.state.h2);
+ write_u32_be(out.slice_mut(12, 16), self.engine.state.h3);
+ write_u32_be(out.slice_mut(16, 20), self.engine.state.h4);
+ write_u32_be(out.slice_mut(20, 24), self.engine.state.h5);
+ write_u32_be(out.slice_mut(24, 28), self.engine.state.h6);
+ write_u32_be(out.slice_mut(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
let expected_vec: Vec<u8> = expected.from_hex()
.unwrap()
- .move_iter()
+ .into_iter()
.collect();
assert_eq!(expected_vec, result_bytes);
}
SawExprAssign,
SawExprAssignOp(ast::BinOp),
SawExprIndex,
+ SawExprSlice,
SawExprPath,
SawExprAddrOf(ast::Mutability),
SawExprRet,
ExprField(_, id, _) => SawExprField(content(id.node)),
ExprTupField(_, id, _) => SawExprTupField(id.node),
ExprIndex(..) => SawExprIndex,
+ ExprSlice(..) => SawExprSlice,
ExprPath(..) => SawExprPath,
ExprAddrOf(m, _) => SawExprAddrOf(m),
ExprBreak(id) => SawExprBreak(id.map(content)),
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! LLVM diagnostic reports.
+
+use libc::c_char;
+
+use {ValueRef, TwineRef, DebugLocRef, DiagnosticInfoRef};
+
+pub enum OptimizationDiagnosticKind {
+ OptimizationRemark,
+ OptimizationMissed,
+ OptimizationAnalysis,
+ OptimizationFailure,
+}
+
+impl OptimizationDiagnosticKind {
+ pub fn describe(self) -> &'static str {
+ match self {
+ OptimizationRemark => "remark",
+ OptimizationMissed => "missed",
+ OptimizationAnalysis => "analysis",
+ OptimizationFailure => "failure",
+ }
+ }
+}
+
+pub struct OptimizationDiagnostic {
+ pub kind: OptimizationDiagnosticKind,
+ pub pass_name: *const c_char,
+ pub function: ValueRef,
+ pub debug_loc: DebugLocRef,
+ pub message: TwineRef,
+}
+
+impl OptimizationDiagnostic {
+ unsafe fn unpack(kind: OptimizationDiagnosticKind, di: DiagnosticInfoRef)
+ -> OptimizationDiagnostic {
+
+ let mut opt = OptimizationDiagnostic {
+ kind: kind,
+ pass_name: 0 as *const c_char,
+ function: 0 as ValueRef,
+ debug_loc: 0 as DebugLocRef,
+ message: 0 as TwineRef,
+ };
+
+ super::LLVMUnpackOptimizationDiagnostic(di,
+ &mut opt.pass_name,
+ &mut opt.function,
+ &mut opt.debug_loc,
+ &mut opt.message);
+
+ opt
+ }
+}
+
+pub enum Diagnostic {
+ Optimization(OptimizationDiagnostic),
+
+ /// LLVM has other types that we do not wrap here.
+ UnknownDiagnostic(DiagnosticInfoRef),
+}
+
+impl Diagnostic {
+ pub unsafe fn unpack(di: DiagnosticInfoRef) -> Diagnostic {
+ let kind = super::LLVMGetDiagInfoKind(di);
+
+ match kind {
+ super::DK_OptimizationRemark
+ => Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di)),
+
+ super::DK_OptimizationRemarkMissed
+ => Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di)),
+
+ super::DK_OptimizationRemarkAnalysis
+ => Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di)),
+
+ super::DK_OptimizationFailure
+ => Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di)),
+
+ _ => UnknownDiagnostic(di)
+ }
+ }
+}
extern crate libc;
use std::c_str::ToCStr;
+use std::cell::RefCell;
+use std::{raw, mem};
use libc::{c_uint, c_ushort, uint64_t, c_int, size_t, c_char};
-use libc::{c_longlong, c_ulonglong};
+use libc::{c_longlong, c_ulonglong, c_void};
use debuginfo::{DIBuilderRef, DIDescriptor,
DIFile, DILexicalBlock, DISubprogram, DIType,
DIBasicType, DIDerivedType, DICompositeType,
DIVariable, DIGlobalVariable, DIArray, DISubrange};
pub mod archive_ro;
+pub mod diagnostic;
pub type Opcode = u32;
pub type Bool = c_uint;
CommonLinkage = 14,
}
-#[deriving(Clone)]
-pub enum Attribute {
- ZExtAttribute = 1 << 0,
- SExtAttribute = 1 << 1,
- NoReturnAttribute = 1 << 2,
- InRegAttribute = 1 << 3,
- StructRetAttribute = 1 << 4,
- NoUnwindAttribute = 1 << 5,
- NoAliasAttribute = 1 << 6,
- ByValAttribute = 1 << 7,
- NestAttribute = 1 << 8,
- ReadNoneAttribute = 1 << 9,
- ReadOnlyAttribute = 1 << 10,
- NoInlineAttribute = 1 << 11,
- AlwaysInlineAttribute = 1 << 12,
- OptimizeForSizeAttribute = 1 << 13,
- StackProtectAttribute = 1 << 14,
- StackProtectReqAttribute = 1 << 15,
- AlignmentAttribute = 31 << 16,
- NoCaptureAttribute = 1 << 21,
- NoRedZoneAttribute = 1 << 22,
- NoImplicitFloatAttribute = 1 << 23,
- NakedAttribute = 1 << 24,
- InlineHintAttribute = 1 << 25,
- StackAttribute = 7 << 26,
- ReturnsTwiceAttribute = 1 << 29,
- UWTableAttribute = 1 << 30,
- NonLazyBindAttribute = 1 << 31,
+#[repr(C)]
+#[deriving(Show)]
+pub enum DiagnosticSeverity {
+ Error,
+ Warning,
+ Remark,
+ Note,
+}
+
+bitflags! {
+ flags Attribute : u32 {
+ static ZExtAttribute = 1 << 0,
+ static SExtAttribute = 1 << 1,
+ static NoReturnAttribute = 1 << 2,
+ static InRegAttribute = 1 << 3,
+ static StructRetAttribute = 1 << 4,
+ static NoUnwindAttribute = 1 << 5,
+ static NoAliasAttribute = 1 << 6,
+ static ByValAttribute = 1 << 7,
+ static NestAttribute = 1 << 8,
+ static ReadNoneAttribute = 1 << 9,
+ static ReadOnlyAttribute = 1 << 10,
+ static NoInlineAttribute = 1 << 11,
+ static AlwaysInlineAttribute = 1 << 12,
+ static OptimizeForSizeAttribute = 1 << 13,
+ static StackProtectAttribute = 1 << 14,
+ static StackProtectReqAttribute = 1 << 15,
+ static AlignmentAttribute = 31 << 16,
+ static NoCaptureAttribute = 1 << 21,
+ static NoRedZoneAttribute = 1 << 22,
+ static NoImplicitFloatAttribute = 1 << 23,
+ static NakedAttribute = 1 << 24,
+ static InlineHintAttribute = 1 << 25,
+ static StackAttribute = 7 << 26,
+ static ReturnsTwiceAttribute = 1 << 29,
+ static UWTableAttribute = 1 << 30,
+ static NonLazyBindAttribute = 1 << 31,
+ }
}
#[repr(u64)]
impl AttrHelper for Attribute {
fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) {
unsafe {
- LLVMAddFunctionAttribute(llfn, idx, *self as uint64_t);
+ LLVMAddFunctionAttribute(llfn, idx, self.bits() as uint64_t);
}
}
fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) {
unsafe {
- LLVMAddCallSiteAttribute(callsite, idx, *self as uint64_t);
+ LLVMAddCallSiteAttribute(callsite, idx, self.bits() as uint64_t);
}
}
}
// Consts for the LLVMCodeGenFileType type (in include/llvm/c/TargetMachine.h)
#[repr(C)]
pub enum FileType {
- AssemblyFile = 0,
- ObjectFile = 1
+ AssemblyFileType = 0,
+ ObjectFileType = 1
}
-pub enum Metadata {
+pub enum MetadataType {
MD_dbg = 0,
MD_tbaa = 1,
MD_prof = 2,
CodeModelLarge = 5,
}
+#[repr(C)]
+pub enum DiagnosticKind {
+ DK_InlineAsm = 0,
+ DK_StackSize,
+ DK_DebugMetadataVersion,
+ DK_SampleProfile,
+ DK_OptimizationRemark,
+ DK_OptimizationRemarkMissed,
+ DK_OptimizationRemarkAnalysis,
+ DK_OptimizationFailure,
+}
+
// Opaque pointer types
pub enum Module_opaque {}
pub type ModuleRef = *mut Module_opaque;
pub type TargetMachineRef = *mut TargetMachine_opaque;
pub enum Archive_opaque {}
pub type ArchiveRef = *mut Archive_opaque;
+pub enum Twine_opaque {}
+pub type TwineRef = *mut Twine_opaque;
+pub enum DiagnosticInfo_opaque {}
+pub type DiagnosticInfoRef = *mut DiagnosticInfo_opaque;
+pub enum DebugLoc_opaque {}
+pub type DebugLocRef = *mut DebugLoc_opaque;
+
+pub type DiagnosticHandler = unsafe extern "C" fn(DiagnosticInfoRef, *mut c_void);
pub mod debuginfo {
use super::{ValueRef};
-> ValueRef;
pub fn LLVMDICompositeTypeSetTypeArray(CompositeType: ValueRef, TypeArray: ValueRef);
- pub fn LLVMTypeToString(Type: TypeRef) -> *const c_char;
- pub fn LLVMValueToString(value_ref: ValueRef) -> *const c_char;
+ pub fn LLVMWriteTypeToString(Type: TypeRef, s: RustStringRef);
+ pub fn LLVMWriteValueToString(value_ref: ValueRef, s: RustStringRef);
pub fn LLVMIsAArgument(value_ref: ValueRef) -> ValueRef;
pub fn LLVMRustGetSectionName(SI: SectionIteratorRef,
data: *mut *const c_char) -> c_int;
+
+ pub fn LLVMWriteTwineToString(T: TwineRef, s: RustStringRef);
+
+ pub fn LLVMContextSetDiagnosticHandler(C: ContextRef,
+ Handler: DiagnosticHandler,
+ DiagnosticContext: *mut c_void);
+
+ pub fn LLVMUnpackOptimizationDiagnostic(DI: DiagnosticInfoRef,
+ pass_name_out: *mut *const c_char,
+ function_out: *mut ValueRef,
+ debugloc_out: *mut DebugLocRef,
+ message_out: *mut TwineRef);
+
+ pub fn LLVMWriteDiagnosticInfoToString(DI: DiagnosticInfoRef, s: RustStringRef);
+ pub fn LLVMGetDiagInfoSeverity(DI: DiagnosticInfoRef) -> DiagnosticSeverity;
+ pub fn LLVMGetDiagInfoKind(DI: DiagnosticInfoRef) -> DiagnosticKind;
+
+ pub fn LLVMWriteDebugLocToString(C: ContextRef, DL: DebugLocRef, s: RustStringRef);
}
pub fn SetInstructionCallConv(instr: ValueRef, cc: CallConv) {
pub fn SetFunctionAttribute(fn_: ValueRef, attr: Attribute) {
unsafe {
- LLVMAddFunctionAttribute(fn_, FunctionIndex as c_uint, attr as uint64_t)
+ LLVMAddFunctionAttribute(fn_, FunctionIndex as c_uint, attr.bits() as uint64_t)
}
}
}
}
+pub enum RustString_opaque {}
+pub type RustStringRef = *mut RustString_opaque;
+type RustStringRepr = *mut RefCell<Vec<u8>>;
+
+/// Appending to a Rust string -- used by raw_rust_string_ostream.
+#[no_mangle]
+pub unsafe extern "C" fn rust_llvm_string_write_impl(sr: RustStringRef,
+ ptr: *const c_char,
+ size: size_t) {
+ let slice: &[u8] = mem::transmute(raw::Slice {
+ data: ptr as *const u8,
+ len: size as uint,
+ });
+
+ let sr: RustStringRepr = mem::transmute(sr);
+ (*sr).borrow_mut().push_all(slice);
+}
+
+pub fn build_string(f: |RustStringRef|) -> Option<String> {
+ let mut buf = RefCell::new(Vec::new());
+ f(&mut buf as RustStringRepr as RustStringRef);
+ String::from_utf8(buf.unwrap()).ok()
+}
+
+pub unsafe fn twine_to_string(tr: TwineRef) -> String {
+ build_string(|s| LLVMWriteTwineToString(tr, s))
+ .expect("got a non-UTF8 Twine from LLVM")
+}
+
+pub unsafe fn debug_loc_to_string(c: ContextRef, tr: DebugLocRef) -> String {
+ build_string(|s| LLVMWriteDebugLocToString(c, tr, s))
+ .expect("got a non-UTF8 DebugLoc from LLVM")
+}
+
// FIXME #15460 - create a public function that actually calls our
// static LLVM symbols. Otherwise the linker will just throw llvm
// away. We're just calling lots of stuff until we transitively get
let did = def.def_id();
if ast_util::is_local(did) { return None }
try_inline_def(cx, tcx, def).map(|vec| {
- vec.move_iter().map(|mut item| {
+ vec.into_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
item.name = Some(into.clean(cx));
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
- ret.extend(build_impls(cx, tcx, did).move_iter());
+ ret.extend(build_impls(cx, tcx, did).into_iter());
clean::StructItem(build_struct(cx, tcx, did))
}
- def::DefTy(did) => {
+ def::DefTy(did, false) => {
+ record_extern_fqn(cx, did, clean::TypeTypedef);
+ ret.extend(build_impls(cx, tcx, did).into_iter());
+ build_type(cx, tcx, did)
+ }
+ def::DefTy(did, true) => {
record_extern_fqn(cx, did, clean::TypeEnum);
- ret.extend(build_impls(cx, tcx, did).move_iter());
+ ret.extend(build_impls(cx, tcx, did).into_iter());
build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
- attrs.extend(v.move_iter().map(|a| {
+ attrs.extend(v.into_iter().map(|a| {
a.clean(cx)
}));
});
match cx.tcx_opt() {
Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
- let fqn = fqn.move_iter().map(|i| i.to_string()).collect();
+ let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
None => {}
let def = ty::lookup_trait_def(tcx, did);
let trait_items = ty::trait_items(tcx, did).clean(cx);
let provided = ty::provided_trait_methods(tcx, did);
- let mut items = trait_items.move_iter().map(|trait_item| {
+ let mut items = trait_items.into_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
clean::ProvidedMethod(trait_item)
} else {
match tcx.inherent_impls.borrow().find(&did) {
None => {}
Some(i) => {
- impls.extend(i.borrow().iter().map(|&did| { build_impl(cx, tcx, did) }));
+ impls.extend(i.iter().map(|&did| { build_impl(cx, tcx, did) }));
}
}
}
}
- impls.move_iter().filter_map(|a| a).collect()
+ impls.into_iter().filter_map(|a| a).collect()
}
fn build_impl(cx: &DocContext, tcx: &ty::ctxt,
};
Some(item)
}
+ ty::TypeTraitItem(_) => {
+ // FIXME(pcwalton): Implement.
+ None
+ }
}
}).collect();
return Some(clean::Item {
}
decoder::DlDef(def) if vis == ast::Public => {
match try_inline_def(cx, tcx, def) {
- Some(i) => items.extend(i.move_iter()),
+ Some(i) => items.extend(i.into_iter()),
None => {}
}
}
use syntax::ast_util::PostExpansionMethod;
use syntax::attr;
use syntax::attr::{AttributeMethods, AttrMetaMethods};
-use syntax::codemap::Pos;
+use syntax::codemap::{DUMMY_SP, Pos};
use syntax::parse::token::InternedString;
use syntax::parse::token;
use syntax::ptr::P;
pub name: String,
pub module: Option<Item>,
pub externs: Vec<(ast::CrateNum, ExternalCrate)>,
- pub primitives: Vec<Primitive>,
+ pub primitives: Vec<PrimitiveType>,
}
impl<'a, 'tcx> Clean<Crate> for visit_ast::RustdocVisitor<'a, 'tcx> {
_ => unreachable!(),
};
let mut tmp = Vec::new();
- for child in m.items.mut_iter() {
+ for child in m.items.iter_mut() {
let inner = match child.inner {
ModuleItem(ref mut m) => m,
_ => continue,
};
- let prim = match Primitive::find(child.attrs.as_slice()) {
+ let prim = match PrimitiveType::find(child.attrs.as_slice()) {
Some(prim) => prim,
None => continue,
};
inner.items.push(i);
}
- m.items.extend(tmp.move_iter());
+ m.items.extend(tmp.into_iter());
}
Crate {
pub struct ExternalCrate {
pub name: String,
pub attrs: Vec<Attribute>,
- pub primitives: Vec<Primitive>,
+ pub primitives: Vec<PrimitiveType>,
}
impl Clean<ExternalCrate> for cstore::crate_metadata {
_ => return
};
let attrs = inline::load_attrs(cx, tcx, did);
- Primitive::find(attrs.as_slice()).map(|prim| primitives.push(prim));
+ PrimitiveType::find(attrs.as_slice()).map(|prim| primitives.push(prim));
})
});
ExternalCrate {
/// `static`s from an extern block
ForeignStaticItem(Static),
MacroItem(Macro),
- PrimitiveItem(Primitive),
+ PrimitiveItem(PrimitiveType),
+ AssociatedTypeItem,
}
#[deriving(Clone, Encodable, Decodable)]
"".to_string()
};
let mut foreigns = Vec::new();
- for subforeigns in self.foreigns.clean(cx).move_iter() {
- for foreign in subforeigns.move_iter() {
+ for subforeigns in self.foreigns.clean(cx).into_iter() {
+ for foreign in subforeigns.into_iter() {
foreigns.push(foreign)
}
}
self.statics.clean(cx),
self.traits.clean(cx),
self.impls.clean(cx),
- self.view_items.clean(cx).move_iter()
- .flat_map(|s| s.move_iter()).collect(),
+ self.view_items.clean(cx).into_iter()
+ .flat_map(|s| s.into_iter()).collect(),
self.macros.clean(cx),
);
external_path(cx, "Sync", &empty)),
};
let fqn = csearch::get_item_path(tcx, did);
- let fqn = fqn.move_iter().map(|i| i.to_string()).collect();
+ let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did,
(fqn, TypeTrait));
TraitBound(ResolvedPath {
None => return RegionBound,
};
let fqn = csearch::get_item_path(tcx, self.def_id);
- let fqn = fqn.move_iter().map(|i| i.to_string())
+ let fqn = fqn.into_iter().map(|i| i.to_string())
.collect::<Vec<String>>();
let path = external_path(cx, fqn.last().unwrap().as_slice(),
&self.substs);
fn clean(&self, cx: &DocContext) -> FnDecl {
let (did, sig) = *self;
let mut names = if did.node != 0 {
- csearch::get_method_arg_names(&cx.tcx().sess.cstore, did).move_iter()
+ csearch::get_method_arg_names(&cx.tcx().sess.cstore, did).into_iter()
} else {
- Vec::new().move_iter()
+ Vec::new().into_iter()
}.peekable();
if names.peek().map(|s| s.as_slice()) == Some("self") {
let _ = names.next();
#[deriving(Clone, Encodable, Decodable)]
pub struct Trait {
- pub items: Vec<TraitItem>,
+ pub items: Vec<TraitMethod>,
pub generics: Generics,
pub bounds: Vec<TyParamBound>,
}
}
#[deriving(Clone, Encodable, Decodable)]
-pub enum TraitItem {
+pub enum TraitMethod {
RequiredMethod(Item),
ProvidedMethod(Item),
+ TypeTraitItem(Item),
}
-impl TraitItem {
+impl TraitMethod {
pub fn is_req(&self) -> bool {
match self {
&RequiredMethod(..) => true,
match *self {
RequiredMethod(ref item) => item,
ProvidedMethod(ref item) => item,
+ TypeTraitItem(ref item) => item,
}
}
}
-impl Clean<TraitItem> for ast::TraitItem {
- fn clean(&self, cx: &DocContext) -> TraitItem {
+impl Clean<TraitMethod> for ast::TraitItem {
+ fn clean(&self, cx: &DocContext) -> TraitMethod {
match self {
&ast::RequiredMethod(ref t) => RequiredMethod(t.clean(cx)),
&ast::ProvidedMethod(ref t) => ProvidedMethod(t.clean(cx)),
+ &ast::TypeTraitItem(ref t) => TypeTraitItem(t.clean(cx)),
}
}
}
#[deriving(Clone, Encodable, Decodable)]
-pub enum ImplItem {
+pub enum ImplMethod {
MethodImplItem(Item),
+ TypeImplItem(Item),
}
-impl Clean<ImplItem> for ast::ImplItem {
- fn clean(&self, cx: &DocContext) -> ImplItem {
+impl Clean<ImplMethod> for ast::ImplItem {
+ fn clean(&self, cx: &DocContext) -> ImplMethod {
match self {
&ast::MethodImplItem(ref t) => MethodImplItem(t.clean(cx)),
+ &ast::TypeImplItem(ref t) => TypeImplItem(t.clean(cx)),
}
}
}
fn clean(&self, cx: &DocContext) -> Item {
match *self {
ty::MethodTraitItem(ref mti) => mti.clean(cx),
+ ty::TypeTraitItem(ref tti) => tti.clean(cx),
}
}
}
/// For references to self
Self(ast::DefId),
/// Primitives are just the fixed-size numeric types (plus int/uint/float), and char.
- Primitive(Primitive),
+ Primitive(PrimitiveType),
Closure(Box<ClosureDecl>),
Proc(Box<ClosureDecl>),
/// extern "ABI" fn
}
#[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash)]
-pub enum Primitive {
+pub enum PrimitiveType {
Int, I8, I16, I32, I64,
Uint, U8, U16, U32, U64,
F32, F64,
TypeStruct,
TypeTrait,
TypeVariant,
+ TypeTypedef,
}
-impl Primitive {
- fn from_str(s: &str) -> Option<Primitive> {
+impl PrimitiveType {
+ fn from_str(s: &str) -> Option<PrimitiveType> {
match s.as_slice() {
"int" => Some(Int),
"i8" => Some(I8),
}
}
- fn find(attrs: &[Attribute]) -> Option<Primitive> {
+ fn find(attrs: &[Attribute]) -> Option<PrimitiveType> {
for attr in attrs.iter() {
let list = match *attr {
List(ref k, ref l) if k.as_slice() == "doc" => l,
if k.as_slice() == "primitive" => v.as_slice(),
_ => continue,
};
- match Primitive::from_str(value) {
+ match PrimitiveType::from_str(value) {
Some(p) => return Some(p),
None => {}
}
ty::ty_enum(did, ref substs) |
ty::ty_trait(box ty::TyTrait { def_id: did, ref substs, .. }) => {
let fqn = csearch::get_item_path(cx.tcx(), did);
- let fqn: Vec<String> = fqn.move_iter().map(|i| {
+ let fqn: Vec<String> = fqn.into_iter().map(|i| {
i.to_string()
}).collect();
let kind = match ty::get(*self).sty {
generics: self.generics.clean(cx),
trait_: self.trait_.clean(cx),
for_: self.for_.clean(cx),
- items: self.items.clean(cx).move_iter().map(|ti| {
+ items: self.items.clean(cx).into_iter().map(|ti| {
match ti {
MethodImplItem(i) => i,
+ TypeImplItem(i) => i,
}
}).collect(),
derived: detect_derived(self.attrs.as_slice()),
let remaining = list.iter().filter(|path| {
match inline::try_inline(cx, path.node.id(), None) {
Some(items) => {
- ret.extend(items.move_iter()); false
+ ret.extend(items.into_iter()); false
}
None => true,
}
}
ast::ViewPathSimple(ident, _, id) => {
match inline::try_inline(cx, id, Some(ident)) {
- Some(items) => ret.extend(items.move_iter()),
+ Some(items) => ret.extend(items.into_iter()),
None => ret.push(convert(&self.node)),
}
}
fn register_def(cx: &DocContext, def: def::Def) -> ast::DefId {
let (did, kind) = match def {
def::DefFn(i, _) => (i, TypeFunction),
- def::DefTy(i) => (i, TypeEnum),
+ def::DefTy(i, false) => (i, TypeTypedef),
+ def::DefTy(i, true) => (i, TypeEnum),
def::DefTrait(i) => (i, TypeTrait),
def::DefStruct(i) => (i, TypeStruct),
def::DefMod(i) => (i, TypeModule),
}
}
+impl Clean<Item> for ast::AssociatedType {
+ fn clean(&self, cx: &DocContext) -> Item {
+ Item {
+ source: self.span.clean(cx),
+ name: Some(self.ident.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ inner: AssociatedTypeItem,
+ visibility: None,
+ def_id: ast_util::local_def(self.id),
+ stability: None,
+ }
+ }
+}
+
+impl Clean<Item> for ty::AssociatedType {
+ fn clean(&self, cx: &DocContext) -> Item {
+ Item {
+ source: DUMMY_SP.clean(cx),
+ name: Some(self.ident.clean(cx)),
+ attrs: Vec::new(),
+ inner: AssociatedTypeItem,
+ visibility: None,
+ def_id: self.def_id,
+ stability: None,
+ }
+ }
+}
+
+impl Clean<Item> for ast::Typedef {
+ fn clean(&self, cx: &DocContext) -> Item {
+ Item {
+ source: self.span.clean(cx),
+ name: Some(self.ident.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ inner: TypedefItem(Typedef {
+ type_: self.typ.clean(cx),
+ generics: Generics {
+ lifetimes: Vec::new(),
+ type_params: Vec::new(),
+ },
+ }),
+ visibility: None,
+ def_id: ast_util::local_def(self.id),
+ stability: None,
+ }
+ }
+}
+
fn lang_struct(cx: &DocContext, did: Option<ast::DefId>,
t: ty::t, name: &str,
fallback: fn(Box<Type>) -> Type) -> Type {
None => return fallback(box t.clean(cx)),
};
let fqn = csearch::get_item_path(cx.tcx(), did);
- let fqn: Vec<String> = fqn.move_iter().map(|i| {
+ let fqn: Vec<String> = fqn.into_iter().map(|i| {
i.to_string()
}).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, TypeStruct));
span_diagnostic_handler);
let mut cfg = config::build_configuration(&sess);
- for cfg_ in cfgs.move_iter() {
+ for cfg_ in cfgs.into_iter() {
let cfg_ = token::intern_and_get_ident(cfg_.as_slice());
cfg.push(P(codemap::dummy_spanned(ast::MetaWord(cfg_))));
}
libc::FILE_SHARE_READ |
libc::FILE_SHARE_DELETE |
libc::FILE_SHARE_WRITE,
- ptr::mut_null(),
+ ptr::null_mut(),
libc::CREATE_ALWAYS,
libc::FILE_ATTRIBUTE_NORMAL,
- ptr::mut_null())
+ ptr::null_mut())
};
if handle == libc::INVALID_HANDLE_VALUE {
fail!("create file error: {}", os::last_os_error());
StructItem(mut i) => {
let mut foo = Vec::new(); swap(&mut foo, &mut i.fields);
let num_fields = foo.len();
- i.fields.extend(foo.move_iter().filter_map(|x| self.fold_item(x)));
+ i.fields.extend(foo.into_iter().filter_map(|x| self.fold_item(x)));
i.fields_stripped |= num_fields != i.fields.len();
StructItem(i)
},
EnumItem(mut i) => {
let mut foo = Vec::new(); swap(&mut foo, &mut i.variants);
let num_variants = foo.len();
- i.variants.extend(foo.move_iter().filter_map(|x| self.fold_item(x)));
+ i.variants.extend(foo.into_iter().filter_map(|x| self.fold_item(x)));
i.variants_stripped |= num_variants != i.variants.len();
EnumItem(i)
},
TraitItem(mut i) => {
- fn vtrm<T: DocFolder>(this: &mut T, trm: TraitItem)
- -> Option<TraitItem> {
+ fn vtrm<T: DocFolder>(this: &mut T, trm: TraitMethod)
+ -> Option<TraitMethod> {
match trm {
RequiredMethod(it) => {
match this.fold_item(it) {
None => return None,
}
},
+ TypeTraitItem(it) => {
+ match this.fold_item(it) {
+ Some(x) => return Some(TypeTraitItem(x)),
+ None => return None,
+ }
+ }
}
}
let mut foo = Vec::new(); swap(&mut foo, &mut i.items);
- i.items.extend(foo.move_iter().filter_map(|x| vtrm(self, x)));
+ i.items.extend(foo.into_iter().filter_map(|x| vtrm(self, x)));
TraitItem(i)
},
ImplItem(mut i) => {
let mut foo = Vec::new(); swap(&mut foo, &mut i.items);
- i.items.extend(foo.move_iter()
+ i.items.extend(foo.into_iter()
.filter_map(|x| self.fold_item(x)));
ImplItem(i)
},
let mut foo = Vec::new(); swap(&mut foo, &mut j.fields);
let num_fields = foo.len();
let c = |x| self.fold_item(x);
- j.fields.extend(foo.move_iter().filter_map(c));
+ j.fields.extend(foo.into_iter().filter_map(c));
j.fields_stripped |= num_fields != j.fields.len();
VariantItem(Variant {kind: StructVariant(j), ..i2})
},
fn fold_mod(&mut self, m: Module) -> Module {
Module {
is_crate: m.is_crate,
- items: m.items.move_iter().filter_map(|i| self.fold_item(i)).collect()
+ items: m.items.into_iter().filter_map(|i| self.fold_item(i)).collect()
}
}
}
fn primitive_link(f: &mut fmt::Formatter,
- prim: clean::Primitive,
+ prim: clean::PrimitiveType,
name: &str) -> fmt::Result {
let m = cache_key.get().unwrap();
let mut needs_termination = false;
ForeignStatic = 14,
Macro = 15,
Primitive = 16,
+ AssociatedType = 17,
}
impl ItemType {
match *self {
Module => "mod",
Struct => "struct",
- Enum => "type",
+ Enum => "enum",
Function => "fn",
Typedef => "type",
Static => "static",
ForeignStatic => "ffs",
Macro => "macro",
Primitive => "primitive",
+ AssociatedType => "associatedtype",
}
}
}
clean::ForeignStaticItem(..) => ForeignStatic,
clean::MacroItem(..) => Macro,
clean::PrimitiveItem(..) => Primitive,
+ clean::AssociatedTypeItem => AssociatedType,
}
}
pub extern_locations: HashMap<ast::CrateNum, ExternalLocation>,
/// Cache of where documentation for primitives can be found.
- pub primitive_locations: HashMap<clean::Primitive, ast::CrateNum>,
+ pub primitive_locations: HashMap<clean::PrimitiveType, ast::CrateNum>,
/// Set of definitions which have been inlined from external crates.
pub inlined: HashSet<ast::DefId>,
let paths: HashMap<ast::DefId, (Vec<String>, ItemType)> =
analysis.as_ref().map(|a| {
let paths = a.external_paths.borrow_mut().take().unwrap();
- paths.move_iter().map(|(k, (v, t))| {
+ paths.into_iter().map(|(k, (v, t))| {
(k, (v, match t {
clean::TypeStruct => item_type::Struct,
clean::TypeEnum => item_type::Enum,
clean::TypeModule => item_type::Module,
clean::TypeStatic => item_type::Static,
clean::TypeVariant => item_type::Variant,
+ clean::TypeTypedef => item_type::Typedef,
}))
}).collect()
}).unwrap_or(HashMap::new());
use clean::{FixedVector, Slice, Tuple, PrimitiveTuple};
// extract relevant documentation for this impl
- let dox = match attrs.move_iter().find(|a| {
+ let dox = match attrs.into_iter().find(|a| {
match *a {
clean::NameValue(ref x, _)
if "doc" == x.as_slice() => {
_ => unreachable!()
};
this.sidebar = build_sidebar(&m);
- for item in m.items.move_iter() {
+ for item in m.items.into_iter() {
f(this,item);
}
Ok(())
clean::ForeignStaticItem(..) => ("ffi-statics", "Foreign Statics"),
clean::MacroItem(..) => ("macros", "Macros"),
clean::PrimitiveItem(..) => ("primitives", "Primitive Types"),
+ clean::AssociatedTypeItem(..) => ("associated-types", "Associated Types"),
};
try!(write!(w,
"<h2 id='{id}' class='section-header'>\
_ => false,
}
})
- .collect::<Vec<&clean::TraitItem>>();
+ .collect::<Vec<&clean::TraitMethod>>();
let provided = t.items.iter()
.filter(|m| {
match **m {
_ => false,
}
})
- .collect::<Vec<&clean::TraitItem>>();
+ .collect::<Vec<&clean::TraitMethod>>();
if t.items.len() == 0 {
try!(write!(w, "{{ }}"));
// Trait documentation
try!(document(w, it));
- fn trait_item(w: &mut fmt::Formatter, m: &clean::TraitItem)
+ fn trait_item(w: &mut fmt::Formatter, m: &clean::TraitMethod)
-> fmt::Result {
try!(write!(w, "<h3 id='{}.{}' class='method'>{}<code>",
shortty(m.item()),
v.push(myname);
}
- for (_, items) in map.mut_iter() {
+ for (_, items) in map.iter_mut() {
items.as_mut_slice().sort();
}
return map;
fn item_primitive(w: &mut fmt::Formatter,
it: &clean::Item,
- _p: &clean::Primitive) -> fmt::Result {
+ _p: &clean::PrimitiveType) -> fmt::Result {
try!(document(w, it));
render_methods(w, it)
}
.content .highlighted.enum { background-color: #b4d1b9; }
.content .highlighted.struct { background-color: #e7b1a0; }
.content .highlighted.fn { background-color: #c6afb3; }
+.content .highlighted.method { background-color: #c6afb3; }
+.content .highlighted.ffi { background-color: #c6afb3; }
.docblock.short.nowrap {
display: block;
p a { color: #4e8bca; }
p a:hover { text-decoration: underline; }
-.content span.trait, .block a.current.trait { color: #ed9603; }
-.content span.mod, .block a.current.mod { color: #4d76ae; }
-.content span.enum, .block a.current.enum { color: #5e9766; }
-.content span.struct, .block a.current.struct { color: #e53700; }
-.content span.fn, .block a.current.fn { color: #8c6067; }
+.content span.trait, .content a.trait, .block a.current.trait { color: #ed9603; }
+.content span.mod, .content a.mod, block a.current.mod { color: #4d76ae; }
+.content span.enum, .content a.enum, .block a.current.enum { color: #5e9766; }
+.content span.struct, .content a.struct, .block a.current.struct { color: #e53700; }
+.content span.fn, .content a.fn, .block a.current.fn { color: #8c6067; }
+.content span.method, .content a.method, .block a.current.method { color: #8c6067; }
+.content span.ffi, .content a.ffi, .block a.current.ffi { color: #8c6067; }
.content .fnname { color: #8c6067; }
.search-input {
// `rustdoc::html::item_type::ItemType` type in Rust.
var itemTypes = ["mod",
"struct",
- "type",
+ "enum",
"fn",
"type",
"static",
$(".method").each(function() {
if ($(this).next().is(".docblock")) {
- $(this).children().first().after(toggle[0]);
+ $(this).children().first().after(toggle.clone());
}
});
// get the thing we just pushed, so we can borrow the string
// out of it with the right lifetime
- let just_inserted = self.chain.mut_last().unwrap();
+ let just_inserted = self.chain.last_mut().unwrap();
just_inserted.sec_number.as_slice()
}
}
pm.add_plugin(plugin);
}
info!("loading plugins...");
- for pname in plugins.move_iter() {
+ for pname in plugins.into_iter() {
pm.load_plugin(pname);
}
// }
let mut json = std::collections::TreeMap::new();
json.insert("schema".to_string(), json::String(SCHEMA_VERSION.to_string()));
- let plugins_json = res.move_iter()
+ let plugins_json = res.into_iter()
.filter_map(|opt| {
match opt {
None => None,
// Primitives are never stripped
clean::PrimitiveItem(..) => {}
+
+ // Associated types are never stripped
+ clean::AssociatedTypeItem(..) => {}
}
let fastreturn = match i.inner {
use syntax::ast::Public;
use clean::{Crate, Item, ModuleItem, Module, StructItem, Struct, EnumItem, Enum};
-use clean::{ImplItem, Impl, Trait, TraitItem, ProvidedMethod, RequiredMethod};
-use clean::{ViewItemItem, PrimitiveItem};
+use clean::{ImplItem, Impl, Trait, TraitItem, TraitMethod, ProvidedMethod, RequiredMethod};
+use clean::{TypeTraitItem, ViewItemItem, PrimitiveItem};
#[deriving(Zero, Encodable, Decodable, PartialEq, Eq)]
/// The counts for each stability level.
items: ref trait_items,
..
}) => {
- fn extract_item<'a>(trait_item: &'a TraitItem) -> &'a Item {
+ fn extract_item<'a>(trait_item: &'a TraitMethod) -> &'a Item {
match *trait_item {
ProvidedMethod(ref item) |
- RequiredMethod(ref item) => item
+ RequiredMethod(ref item) |
+ TypeTraitItem(ref item) => item
}
}
let subcounts = trait_items.iter()
span_diagnostic_handler);
let mut cfg = config::build_configuration(&sess);
- cfg.extend(cfgs.move_iter().map(|cfg_| {
+ cfg.extend(cfgs.into_iter().map(|cfg_| {
let cfg_ = token::intern_and_get_ident(cfg_.as_slice());
P(dummy_spanned(ast::MetaWord(cfg_)))
}));
test_args.insert(0, "rustdoctest".to_string());
testing::test_main(test_args.as_slice(),
- collector.tests.move_iter().collect());
+ collector.tests.into_iter().collect());
0
}
v
};
- for to_run in cur.move_iter() {
+ for to_run in cur.into_iter() {
to_run();
}
}
});
};
- for f in futures.mut_iter() { f.recv() }
+ for f in futures.iter_mut() { f.recv() }
assert_eq!(**total.lock(), num_tasks * count);
}
pub type Key<T> = &'static KeyValue<T>;
#[allow(missing_doc)]
-pub enum KeyValue<T> { Key }
+pub enum KeyValue<T> { KeyValueKey }
// The task-local-map stores all TLD information for the currently running
// task. It is stored as an owned pointer into the runtime, and it's only
#[test]
fn test_tls_multitask() {
- static my_key: Key<String> = &Key;
+ static my_key: Key<String> = &KeyValueKey;
my_key.replace(Some("parent data".to_string()));
task::spawn(proc() {
// TLD shouldn't carry over.
#[test]
fn test_tls_overwrite() {
- static my_key: Key<String> = &Key;
+ static my_key: Key<String> = &KeyValueKey;
my_key.replace(Some("first data".to_string()));
my_key.replace(Some("next data".to_string())); // Shouldn't leak.
assert!(my_key.get().unwrap().as_slice() == "next data");
#[test]
fn test_tls_pop() {
- static my_key: Key<String> = &Key;
+ static my_key: Key<String> = &KeyValueKey;
my_key.replace(Some("weasel".to_string()));
assert!(my_key.replace(None).unwrap() == "weasel".to_string());
// Pop must remove the data from the map.
// to get recorded as something within a rust stack segment. Then a
// subsequent upcall (esp. for logging, think vsnprintf) would run on
// a stack smaller than 1 MB.
- static my_key: Key<String> = &Key;
+ static my_key: Key<String> = &KeyValueKey;
task::spawn(proc() {
my_key.replace(Some("hax".to_string()));
});
#[test]
fn test_tls_multiple_types() {
- static str_key: Key<String> = &Key;
- static box_key: Key<Gc<()>> = &Key;
- static int_key: Key<int> = &Key;
+ static str_key: Key<String> = &KeyValueKey;
+ static box_key: Key<Gc<()>> = &KeyValueKey;
+ static int_key: Key<int> = &KeyValueKey;
task::spawn(proc() {
str_key.replace(Some("string data".to_string()));
box_key.replace(Some(box(GC) ()));
#[test]
fn test_tls_overwrite_multiple_types() {
- static str_key: Key<String> = &Key;
- static box_key: Key<Gc<()>> = &Key;
- static int_key: Key<int> = &Key;
+ static str_key: Key<String> = &KeyValueKey;
+ static box_key: Key<Gc<()>> = &KeyValueKey;
+ static int_key: Key<int> = &KeyValueKey;
task::spawn(proc() {
str_key.replace(Some("string data".to_string()));
str_key.replace(Some("string data 2".to_string()));
#[test]
#[should_fail]
fn test_tls_cleanup_on_failure() {
- static str_key: Key<String> = &Key;
- static box_key: Key<Gc<()>> = &Key;
- static int_key: Key<int> = &Key;
+ static str_key: Key<String> = &KeyValueKey;
+ static box_key: Key<Gc<()>> = &KeyValueKey;
+ static int_key: Key<int> = &KeyValueKey;
str_key.replace(Some("parent data".to_string()));
box_key.replace(Some(box(GC) ()));
task::spawn(proc() {
self.tx.send(());
}
}
- static key: Key<Dropper> = &Key;
+ static key: Key<Dropper> = &KeyValueKey;
let _ = task::try(proc() {
key.replace(Some(Dropper{ tx: tx }));
});
#[test]
fn test_static_pointer() {
- static key: Key<&'static int> = &Key;
+ static key: Key<&'static int> = &KeyValueKey;
static VALUE: int = 0;
key.replace(Some(&VALUE));
}
#[test]
fn test_owned() {
- static key: Key<Box<int>> = &Key;
+ static key: Key<Box<int>> = &KeyValueKey;
key.replace(Some(box 1));
{
#[test]
fn test_same_key_type() {
- static key1: Key<int> = &Key;
- static key2: Key<int> = &Key;
- static key3: Key<int> = &Key;
- static key4: Key<int> = &Key;
- static key5: Key<int> = &Key;
+ static key1: Key<int> = &KeyValueKey;
+ static key2: Key<int> = &KeyValueKey;
+ static key3: Key<int> = &KeyValueKey;
+ static key4: Key<int> = &KeyValueKey;
+ static key5: Key<int> = &KeyValueKey;
key1.replace(Some(1));
key2.replace(Some(2));
key3.replace(Some(3));
#[test]
#[should_fail]
fn test_nested_get_set1() {
- static key: Key<int> = &Key;
+ static key: Key<int> = &KeyValueKey;
assert_eq!(key.replace(Some(4)), None);
let _k = key.get();
#[bench]
fn bench_replace_none(b: &mut test::Bencher) {
- static key: Key<uint> = &Key;
+ static key: Key<uint> = &KeyValueKey;
let _clear = ClearKey(key);
key.replace(None);
b.iter(|| {
#[bench]
fn bench_replace_some(b: &mut test::Bencher) {
- static key: Key<uint> = &Key;
+ static key: Key<uint> = &KeyValueKey;
let _clear = ClearKey(key);
key.replace(Some(1u));
b.iter(|| {
#[bench]
fn bench_replace_none_some(b: &mut test::Bencher) {
- static key: Key<uint> = &Key;
+ static key: Key<uint> = &KeyValueKey;
let _clear = ClearKey(key);
key.replace(Some(0u));
b.iter(|| {
#[bench]
fn bench_100_keys_replace_last(b: &mut test::Bencher) {
- static keys: [KeyValue<uint>, ..100] = [Key, ..100];
+ static keys: [KeyValue<uint>, ..100] = [KeyValueKey, ..100];
let _clear = keys.iter().map(ClearKey).collect::<Vec<ClearKey<uint>>>();
for (i, key) in keys.iter().enumerate() {
key.replace(Some(i));
#[bench]
fn bench_1000_keys_replace_last(b: &mut test::Bencher) {
- static keys: [KeyValue<uint>, ..1000] = [Key, ..1000];
+ static keys: [KeyValue<uint>, ..1000] = [KeyValueKey, ..1000];
let _clear = keys.iter().map(ClearKey).collect::<Vec<ClearKey<uint>>>();
for (i, key) in keys.iter().enumerate() {
key.replace(Some(i));
#[bench]
fn bench_get(b: &mut test::Bencher) {
- static key: Key<uint> = &Key;
+ static key: Key<uint> = &KeyValueKey;
let _clear = ClearKey(key);
key.replace(Some(42));
b.iter(|| {
#[bench]
fn bench_100_keys_get_last(b: &mut test::Bencher) {
- static keys: [KeyValue<uint>, ..100] = [Key, ..100];
+ static keys: [KeyValue<uint>, ..100] = [KeyValueKey, ..100];
let _clear = keys.iter().map(ClearKey).collect::<Vec<ClearKey<uint>>>();
for (i, key) in keys.iter().enumerate() {
key.replace(Some(i));
#[bench]
fn bench_1000_keys_get_last(b: &mut test::Bencher) {
- static keys: [KeyValue<uint>, ..1000] = [Key, ..1000];
+ static keys: [KeyValue<uint>, ..1000] = [KeyValueKey, ..1000];
let _clear = keys.iter().map(ClearKey).collect::<Vec<ClearKey<uint>>>();
for (i, key) in keys.iter().enumerate() {
key.replace(Some(i));
pub fn new() -> LocalHeap {
LocalHeap {
memory_region: MemoryRegion { live_allocations: 0 },
- live_allocs: ptr::mut_null(),
+ live_allocs: ptr::null_mut(),
}
}
// allocations list
mybox.drop_glue = drop_glue;
mybox.ref_count = 1;
- mybox.prev = ptr::mut_null();
+ mybox.prev = ptr::null_mut();
mybox.next = self.live_allocs;
if !self.live_allocs.is_null() {
unsafe { (*self.live_allocs).prev = alloc; }
//! Walks the internal list of allocations
let mut alloc = self.live_allocs;
- while alloc != ptr::mut_null() {
+ while alloc != ptr::null_mut() {
let next_before = (*alloc).next;
f(self, alloc);
rtabort!("thread-local pointer is null. bogus!");
}
let ptr: Box<T> = mem::transmute(void_ptr);
- tls::set(key, ptr::mut_null());
+ tls::set(key, ptr::null_mut());
return ptr;
}
None
} else {
let ptr: Box<T> = mem::transmute(void_ptr);
- tls::set(key, ptr::mut_null());
+ tls::set(key, ptr::null_mut());
Some(ptr)
}
}
}
pub unsafe fn init_cond() -> uint {
- return CreateEventA(ptr::mut_null(), libc::FALSE, libc::FALSE,
+ return CreateEventA(ptr::null_mut(), libc::FALSE, libc::FALSE,
ptr::null()) as uint;
}
// kernel does, might as well make it explicit. With the current
// 20 kB red zone, that makes for a 64 kB minimum stack.
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
- let ret = CreateThread(ptr::mut_null(), stack_size as libc::size_t,
- super::thread_start, arg, 0, ptr::mut_null());
+ let ret = CreateThread(ptr::null_mut(), stack_size as libc::size_t,
+ super::thread_start, arg, 0, ptr::null_mut());
if ret as uint == 0 {
// be sure to not leak the closure
}
pub unsafe fn join(native: rust_thread) {
- assert_eq!(pthread_join(native, ptr::mut_null()), 0);
+ assert_eq!(pthread_join(native, ptr::null_mut()), 0);
}
pub unsafe fn detach(native: rust_thread) {
}
impl<'a> FormatWriter for BufWriter<'a> {
fn write(&mut self, bytes: &[u8]) -> fmt::Result {
- let left = self.buf.mut_slice_from(self.pos);
+ let left = self.buf.slice_from_mut(self.pos);
let to_write = bytes.slice_to(cmp::min(bytes.len(), left.len()));
slice::bytes::copy_memory(left, to_write);
self.pos += to_write.len();
}
#[unsafe_destructor]
-impl<'a, T> Drop for Guard<'a, T> {
+impl<'a, T:Send> Drop for Guard<'a, T> {
fn drop(&mut self) {
// This guard's homing missile is still armed, so we're guaranteed to be
// on the same I/O event loop, so this unsafety should be ok.
use libc::c_int;
use libc;
use std::mem;
-use std::ptr::{null, mut_null};
+use std::ptr::{null, null_mut};
use std::rt::task::BlockedTask;
use std::rt::rtio;
ai_socktype: 0,
ai_protocol: 0,
ai_addrlen: 0,
- ai_canonname: mut_null(),
- ai_addr: mut_null(),
- ai_next: mut_null(),
+ ai_canonname: null_mut(),
+ ai_addr: null_mut(),
+ ai_next: null_mut(),
}
});
let hint_ptr = hint.as_ref().map_or(null(), |x| {
}
unsafe {
- uvll::set_data_for_uv_handle(self.uv_handle(), ptr::mut_null::<()>());
+ uvll::set_data_for_uv_handle(self.uv_handle(), ptr::null_mut::<()>());
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb)
}
}
unsafe {
uvll::uv_close(self.uv_handle() as *mut uvll::uv_handle_t, close_cb);
uvll::set_data_for_uv_handle(self.uv_handle(),
- ptr::mut_null::<()>());
+ ptr::null_mut::<()>());
wait_until_woken_after(&mut slot, &self.uv_loop(), || {
uvll::set_data_for_uv_handle(self.uv_handle(), &mut slot);
unsafe {
let data = uvll::get_data_for_uv_handle(handle);
uvll::free_handle(handle);
- if data == ptr::mut_null() { return }
+ if data == ptr::null_mut() { return }
let slot: &mut Option<BlockedTask> = mem::transmute(data);
wakeup(slot);
}
pub fn new(ty: uvll::uv_req_type) -> Request {
unsafe {
let handle = uvll::malloc_req(ty);
- uvll::set_data_for_req(handle, ptr::mut_null::<()>());
+ uvll::set_data_for_req(handle, ptr::null_mut::<()>());
Request::wrap(handle)
}
}
pub unsafe fn get_data<T>(&self) -> &'static mut T {
let data = uvll::get_data_for_req(self.handle);
- assert!(data != ptr::mut_null());
+ assert!(data != ptr::null_mut());
mem::transmute(data)
}
pub fn empty_buf() -> Buf {
uvll::uv_buf_t {
- base: ptr::mut_null(),
+ base: ptr::null_mut(),
len: 0,
}
}
let mut ret_io = Vec::with_capacity(io.len());
unsafe {
stdio.set_len(io.len());
- for (slot, other) in stdio.mut_iter().zip(io.iter()) {
+ for (slot, other) in stdio.iter_mut().zip(io.iter()) {
let io = set_stdio(slot as *mut uvll::uv_stdio_container_t, other,
io_loop);
ret_io.push(io);
let mut req = match self.last_write_req.take() {
Some(req) => req, None => Request::new(uvll::UV_WRITE),
};
- req.set_data(ptr::mut_null::<()>());
+ req.set_data(ptr::null_mut::<()>());
// And here's where timeouts get a little interesting. Currently, libuv
// does not support canceling an in-flight write request. Consequently,
// handle, so our only cleanup is to free the handle itself
if cfg!(windows) {
unsafe { uvll::free_handle(handle); }
- watcher.tty = ptr::mut_null();
+ watcher.tty = ptr::null_mut();
}
Err(UvError(n))
}
match Process::spawn(self, cfg) {
Ok((p, io)) => {
Ok((p as Box<rtio::RtioProcess + Send>,
- io.move_iter().map(|i| i.map(|p| {
+ io.into_iter().map(|i| i.map(|p| {
box p as Box<rtio::RtioPipe + Send>
})).collect()))
}
}
match modulus {
- 0 => Ok(b.move_iter().collect()),
+ 0 => Ok(b.into_iter().collect()),
_ => Err(InvalidHexLength),
}
}
* `Boolean`: equivalent to rust's `bool`
* `Number`: equivalent to rust's `f64`
* `String`: equivalent to rust's `String`
-* `Array`: equivalent to rust's `Vec<T>`, but also allowing objects of different types in the same
+* `List`: equivalent to rust's `Vec<T>`, but also allowing objects of different types in the same
array
* `Object`: equivalent to rust's `Treemap<String, json::Json>`
* `Null`
use std::mem::{swap, transmute};
use std::num::{FPNaN, FPInfinite};
use std::str::ScalarValue;
-use std::string::String;
+use std::string;
use std::vec::Vec;
use Encodable;
I64(i64),
U64(u64),
F64(f64),
- String(String),
+ String(string::String),
Boolean(bool),
- List(List),
- Object(Object),
+ List(JsonList),
+ Object(JsonObject),
Null,
}
-pub type List = Vec<Json>;
-pub type Object = TreeMap<String, Json>;
+pub type JsonList = Vec<Json>;
+pub type JsonObject = TreeMap<string::String, Json>;
/// The errors that can arise while parsing a JSON stream.
#[deriving(Clone, PartialEq)]
KeyMustBeAString,
ExpectedColon,
TrailingCharacters,
+ TrailingComma,
InvalidEscape,
InvalidUnicodeCodePoint,
LoneLeadingSurrogateInHexEscape,
#[deriving(Clone, PartialEq, Show)]
pub enum DecoderError {
ParseError(ParserError),
- ExpectedError(String, String),
- MissingFieldError(String),
- UnknownVariantError(String),
- ApplicationError(String)
+ ExpectedError(string::String, string::String),
+ MissingFieldError(string::String),
+ UnknownVariantError(string::String),
+ ApplicationError(string::String)
}
/// Returns a readable error string for a given error code.
KeyMustBeAString => "key must be a string",
ExpectedColon => "expected `:`",
TrailingCharacters => "trailing characters",
+ TrailingComma => "trailing comma",
InvalidEscape => "invalid escape",
UnrecognizedHex => "invalid \\u escape (unrecognized hex)",
NotFourDigit => "invalid \\u escape (not four digits)",
}
/// Shortcut function to encode a `T` into a JSON `String`
-pub fn encode<'a, T: Encodable<Encoder<'a>, io::IoError>>(object: &T) -> String {
+pub fn encode<'a, T: Encodable<Encoder<'a>, io::IoError>>(object: &T) -> string::String {
let buff = Encoder::buffer_encode(object);
- String::from_utf8(buff).unwrap()
+ string::String::from_utf8(buff).unwrap()
}
impl fmt::Show for ErrorCode {
}
}
-fn fmt_number_or_null(v: f64) -> String {
+fn fmt_number_or_null(v: f64) -> string::String {
match v.classify() {
- FPNaN | FPInfinite => String::from_str("null"),
+ FPNaN | FPInfinite => string::String::from_str("null"),
_ => f64::to_str_digits(v, 6u)
}
}
///
/// Note: this function is deprecated. Consider using `json::encode` instead.
#[deprecated = "Replaced by `json::encode`"]
- pub fn str_encode<T: Encodable<Encoder<'a>, io::IoError>>(object: &T) -> String {
+ pub fn str_encode<T: Encodable<Encoder<'a>, io::IoError>>(object: &T) -> string::String {
encode(object)
}
}
}
/// Encodes a json value into a string
- pub fn to_pretty_str(&self) -> String {
+ pub fn to_pretty_str(&self) -> string::String {
let mut s = MemWriter::new();
self.to_pretty_writer(&mut s as &mut io::Writer).unwrap();
- String::from_utf8(s.unwrap()).unwrap()
+ string::String::from_utf8(s.unwrap()).unwrap()
}
/// If the Json value is an Object, returns the value associated with the provided key.
/// Otherwise, returns None.
- pub fn find<'a>(&'a self, key: &String) -> Option<&'a Json>{
+ pub fn find<'a>(&'a self, key: &string::String) -> Option<&'a Json>{
match self {
&Object(ref map) => map.find(key),
_ => None
/// Attempts to get a nested Json Object for each key in `keys`.
/// If any key is found not to exist, find_path will return None.
/// Otherwise, it will return the Json value associated with the final key.
- pub fn find_path<'a>(&'a self, keys: &[&String]) -> Option<&'a Json>{
+ pub fn find_path<'a>(&'a self, keys: &[&string::String]) -> Option<&'a Json>{
let mut target = self;
for key in keys.iter() {
match target.find(*key) {
/// If the Json value is an Object, performs a depth-first search until
/// a value associated with the provided key is found. If no value is found
/// or the Json value is not an Object, returns None.
- pub fn search<'a>(&'a self, key: &String) -> Option<&'a Json> {
+ pub fn search<'a>(&'a self, key: &string::String) -> Option<&'a Json> {
match self {
&Object(ref map) => {
match map.find(key) {
/// If the Json value is an Object, returns the associated TreeMap.
/// Returns None otherwise.
- pub fn as_object<'a>(&'a self) -> Option<&'a Object> {
+ pub fn as_object<'a>(&'a self) -> Option<&'a JsonObject> {
match self {
&Object(ref map) => Some(map),
_ => None
/// If the Json value is a List, returns the associated vector.
/// Returns None otherwise.
- pub fn as_list<'a>(&'a self) -> Option<&'a List> {
+ pub fn as_list<'a>(&'a self) -> Option<&'a JsonList> {
match self {
&List(ref list) => Some(&*list),
_ => None
I64Value(i64),
U64Value(u64),
F64Value(f64),
- StringValue(String),
+ StringValue(string::String),
NullValue,
Error(ParserError),
}
#[deriving(PartialEq, Show)]
enum ParserState {
// Parse a value in a list, true means first element.
- ParseList(bool),
+ ParseArray(bool),
// Parse ',' or ']' after an element in a list.
ParseListComma,
// Parse a key:value in an object, true means first element.
}
// Used by Parser to insert Key elements at the top of the stack.
- fn push_key(&mut self, key: String) {
+ fn push_key(&mut self, key: string::String) {
self.stack.push(InternalKey(self.str_buffer.len() as u16, key.len() as u16));
for c in key.as_bytes().iter() {
self.str_buffer.push(*c);
Ok(n)
}
- fn parse_str(&mut self) -> Result<String, ParserError> {
+ fn parse_str(&mut self) -> Result<string::String, ParserError> {
let mut escape = false;
- let mut res = String::new();
+ let mut res = string::String::new();
loop {
self.bump();
// The only paths where the loop can spin a new iteration
// are in the cases ParseListComma and ParseObjectComma if ','
// is parsed. In these cases the state is set to (respectively)
- // ParseList(false) and ParseObject(false), which always return,
+ // ParseArray(false) and ParseObject(false), which always return,
// so there is no risk of getting stuck in an infinite loop.
// All other paths return before the end of the loop's iteration.
self.parse_whitespace();
ParseStart => {
return self.parse_start();
}
- ParseList(first) => {
+ ParseArray(first) => {
return self.parse_list(first);
}
ParseListComma => {
let val = self.parse_value();
self.state = match val {
Error(_) => { ParseFinished }
- ListStart => { ParseList(true) }
+ ListStart => { ParseArray(true) }
ObjectStart => { ParseObject(true) }
_ => { ParseBeforeFinish }
};
self.state = match val {
Error(_) => { ParseFinished }
- ListStart => { ParseList(true) }
+ ListStart => { ParseArray(true) }
ObjectStart => { ParseObject(true) }
_ => { ParseListComma }
};
fn parse_list_comma_or_end(&mut self) -> Option<JsonEvent> {
if self.ch_is(',') {
self.stack.bump_index();
- self.state = ParseList(false);
+ self.state = ParseArray(false);
self.bump();
return None;
} else if self.ch_is(']') {
fn parse_object(&mut self, first: bool) -> JsonEvent {
if self.ch_is('}') {
if !first {
- self.stack.pop();
+ if self.stack.is_empty() {
+ return self.error_event(TrailingComma);
+ } else {
+ self.stack.pop();
+ }
}
if self.stack.is_empty() {
self.state = ParseBeforeFinish;
self.state = match val {
Error(_) => { ParseFinished }
- ListStart => { ParseList(true) }
+ ListStart => { ParseArray(true) }
ObjectStart => { ParseObject(true) }
_ => { ParseObjectComma }
};
Some(F64Value(n)) => { Ok(F64(n)) }
Some(BooleanValue(b)) => { Ok(Boolean(b)) }
Some(StringValue(ref mut s)) => {
- let mut temp = String::new();
+ let mut temp = string::String::new();
swap(s, &mut temp);
Ok(String(temp))
}
loop {
if self.token == Some(ListEnd) {
- return Ok(List(values.move_iter().collect()));
+ return Ok(List(values.into_iter().collect()));
}
match self.build_value() {
Ok(v) => values.push(v),
Err(ExpectedError("single character string".to_string(), format!("{}", s)))
}
- fn read_str(&mut self) -> DecodeResult<String> {
+ fn read_str(&mut self) -> DecodeResult<string::String> {
debug!("read_str");
expect!(self.pop(), String)
}
};
match o.pop(&"fields".to_string()) {
Some(List(l)) => {
- for field in l.move_iter().rev() {
+ for field in l.into_iter().rev() {
self.stack.push(field);
}
},
debug!("read_seq()");
let list = try!(expect!(self.pop(), List));
let len = list.len();
- for v in list.move_iter().rev() {
+ for v in list.into_iter().rev() {
self.stack.push(v);
}
f(self, len)
debug!("read_map()");
let obj = try!(expect!(self.pop(), Object));
let len = obj.len();
- for (key, value) in obj.move_iter() {
+ for (key, value) in obj.into_iter() {
self.stack.push(value);
self.stack.push(String(key));
}
fn to_json(&self) -> Json { Boolean(*self) }
}
-impl ToJson for String {
+impl ToJson for string::String {
fn to_json(&self) -> Json { String((*self).clone()) }
}
fn to_json(&self) -> Json { List(self.iter().map(|elt| elt.to_json()).collect()) }
}
-impl<A: ToJson> ToJson for TreeMap<String, A> {
+impl<A: ToJson> ToJson for TreeMap<string::String, A> {
fn to_json(&self) -> Json {
let mut d = TreeMap::new();
for (key, value) in self.iter() {
}
}
-impl<A: ToJson> ToJson for HashMap<String, A> {
+impl<A: ToJson> ToJson for HashMap<string::String, A> {
fn to_json(&self) -> Json {
let mut d = TreeMap::new();
for (key, value) in self.iter() {
extern crate test;
use self::test::Bencher;
use {Encodable, Decodable};
- use super::{Encoder, Decoder, Error, Boolean, I64, U64, F64, List, String, Null,
+ use super::{List, Encoder, Decoder, Error, Boolean, I64, U64, F64, String, Null,
PrettyEncoder, Object, Json, from_str, ParseError, ExpectedError,
MissingFieldError, UnknownVariantError, DecodeResult, DecoderError,
JsonEvent, Parser, StackElement,
F64Value, StringValue, NullValue, SyntaxError, Key, Index, Stack,
InvalidSyntax, InvalidNumber, EOFWhileParsingObject, EOFWhileParsingList,
EOFWhileParsingValue, EOFWhileParsingString, KeyMustBeAString, ExpectedColon,
- TrailingCharacters};
+ TrailingCharacters, TrailingComma};
use std::{i64, u64, f32, f64, io};
use std::collections::TreeMap;
+ use std::string;
#[deriving(Decodable, Eq, PartialEq, Show)]
struct OptionData {
#[deriving(PartialEq, Encodable, Decodable, Show)]
enum Animal {
Dog,
- Frog(String, int)
+ Frog(string::String, int)
}
#[deriving(PartialEq, Encodable, Decodable, Show)]
struct Inner {
a: (),
b: uint,
- c: Vec<String>,
+ c: Vec<string::String>,
}
#[deriving(PartialEq, Encodable, Decodable, Show)]
inner: Vec<Inner>,
}
- fn mk_object(items: &[(String, Json)]) -> Json {
+ fn mk_object(items: &[(string::String, Json)]) -> Json {
let mut d = TreeMap::new();
for item in items.iter() {
from_str(a.to_pretty_str().as_slice()).unwrap());
}
- fn with_str_writer(f: |&mut io::Writer|) -> String {
+ fn with_str_writer(f: |&mut io::Writer|) -> string::String {
use std::io::MemWriter;
use std::str;
#[test]
fn test_write_none() {
- let value: Option<String> = None;
+ let value: Option<string::String> = None;
let s = with_str_writer(|writer| {
let mut encoder = Encoder::new(writer);
value.encode(&mut encoder).unwrap();
("\"\\uAB12\"", "\uAB12")];
for &(i, o) in s.iter() {
- let v: String = super::decode(i).unwrap();
+ let v: string::String = super::decode(i).unwrap();
assert_eq!(v.as_slice(), o);
}
}
#[test]
fn test_decode_option() {
- let value: Option<String> = super::decode("null").unwrap();
+ let value: Option<string::String> = super::decode("null").unwrap();
assert_eq!(value, None);
- let value: Option<String> = super::decode("\"jodhpurs\"").unwrap();
+ let value: Option<string::String> = super::decode("\"jodhpurs\"").unwrap();
assert_eq!(value, Some("jodhpurs".to_string()));
}
fn test_decode_map() {
let s = "{\"a\": \"Dog\", \"b\": {\"variant\":\"Frog\",\
\"fields\":[\"Henry\", 349]}}";
- let mut map: TreeMap<String, Animal> = super::decode(s).unwrap();
+ let mut map: TreeMap<string::String, Animal> = super::decode(s).unwrap();
assert_eq!(map.pop(&"a".to_string()), Some(Dog));
assert_eq!(map.pop(&"b".to_string()), Some(Frog("Henry".to_string(), 349)));
struct DecodeStruct {
x: f64,
y: bool,
- z: String,
+ z: string::String,
w: Vec<DecodeStruct>
}
#[deriving(Decodable)]
enum DecodeEnum {
A(f64),
- B(String)
+ B(string::String)
}
fn check_err<T: Decodable<Decoder, DecoderError>>(to_parse: &'static str,
expected: DecoderError) {
}
}
}
+
#[test]
#[ignore(cfg(target_word_size = "32"))] // FIXME(#14064)
fn test_read_object_streaming() {
assert_eq!(last_event("{\"a\":1"), Error(SyntaxError(EOFWhileParsingObject, 1, 7)));
assert_eq!(last_event("{\"a\":1 1"), Error(SyntaxError(InvalidSyntax, 1, 8)));
assert_eq!(last_event("{\"a\":1,"), Error(SyntaxError(EOFWhileParsingObject, 1, 8)));
+ assert_eq!(last_event("{\"a\":1,}"), Error(SyntaxError(TrailingComma, 1, 8)));
assert_stream_equal(
"{}",
});
}
- fn big_json() -> String {
+ fn big_json() -> string::String {
let mut src = "[\n".to_string();
for _ in range(0i, 500) {
src.push_str(r#"{ "a": true, "b": null, "c":3.1415, "d": "Hello world", "e": \
impl OwnedAsciiExt for Vec<u8> {
#[inline]
fn into_ascii_upper(mut self) -> Vec<u8> {
- for byte in self.mut_iter() {
+ for byte in self.iter_mut() {
*byte = ASCII_UPPER_MAP[*byte as uint];
}
self
#[inline]
fn into_ascii_lower(mut self) -> Vec<u8> {
- for byte in self.mut_iter() {
+ for byte in self.iter_mut() {
*byte = ASCII_LOWER_MAP[*byte as uint];
}
self
/// * base - A raw pointer to a buffer
/// * len - The number of elements in the buffer
pub unsafe fn new(base: *mut T, len: uint) -> CVec<T> {
- assert!(base != ptr::mut_null());
+ assert!(base != ptr::null_mut());
CVec {
base: base,
len: len,
/// for freeing the buffer, etc.
pub unsafe fn new_with_dtor(base: *mut T, len: uint,
dtor: proc():Send) -> CVec<T> {
- assert!(base != ptr::mut_null());
+ assert!(base != ptr::null_mut());
CVec {
base: base,
len: len,
#[should_fail]
fn test_fail_at_null() {
unsafe {
- CVec::new(ptr::mut_null::<u8>(), 9);
+ CVec::new(ptr::null_mut::<u8>(), 9);
}
}
if new_capacity < old_table.capacity() {
// Shrink the table. Naive algorithm for resizing:
- for (h, k, v) in old_table.move_iter() {
+ for (h, k, v) in old_table.into_iter() {
self.insert_hashed_nocheck(h, k, v);
}
} else {
///
/// let new = vec!["a key", "b key", "z key"];
///
- /// for k in new.move_iter() {
+ /// for k in new.into_iter() {
/// map.find_with_or_insert_with(
/// k, "new value",
/// // if the key does exist either prepend or append this
Entries { inner: self.table.iter() }
}
+ /// Deprecated: use `iter_mut`.
+ #[deprecated = "use iter_mut"]
+ pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ self.iter_mut()
+ }
+
/// An iterator visiting all key-value pairs in arbitrary order,
/// with mutable references to the values.
/// Iterator element type is `(&'a K, &'a mut V)`.
/// map.insert("c", 3);
///
/// // Update all values
- /// for (_, val) in map.mut_iter() {
+ /// for (_, val) in map.iter_mut() {
/// *val *= 2;
/// }
///
/// println!("key: {} val: {}", key, val);
/// }
/// ```
- pub fn mut_iter(&mut self) -> MutEntries<K, V> {
- MutEntries { inner: self.table.mut_iter() }
+ pub fn iter_mut(&mut self) -> MutEntries<K, V> {
+ MutEntries { inner: self.table.iter_mut() }
+ }
+
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ self.into_iter()
}
/// Creates a consuming iterator, that is, one that moves each key-value
/// map.insert("c", 3);
///
/// // Not possible with .iter()
- /// let vec: Vec<(&str, int)> = map.move_iter().collect();
+ /// let vec: Vec<(&str, int)> = map.into_iter().collect();
/// ```
- pub fn move_iter(self) -> MoveEntries<K, V> {
+ pub fn into_iter(self) -> MoveEntries<K, V> {
MoveEntries {
- inner: self.table.move_iter().map(|(_, k, v)| (k, v))
+ inner: self.table.into_iter().map(|(_, k, v)| (k, v))
}
}
}
drop(hm.clone());
{
- let mut half = hm.move_iter().take(50);
+ let mut half = hm.into_iter().take(50);
let v = drop_vector.get().unwrap();
for i in range(0u, 200) {
#[test]
fn test_keys() {
let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let map = vec.into_iter().collect::<HashMap<int, char>>();
let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
#[test]
fn test_values() {
let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let map = vec.into_iter().collect::<HashMap<int, char>>();
let values = map.values().map(|&v| v).collect::<Vec<char>>();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
- let mut iter = map.mut_iter();
+ let mut iter = map.iter_mut();
for _ in iter.by_ref().take(3) {}
self.map.keys()
}
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
+ pub fn move_iter(self) -> SetMoveItems<T> {
+ self.into_iter()
+ }
+
/// Creates a consuming iterator, that is, one that moves each value out
/// of the set in arbitrary order. The set cannot be used after calling
/// this.
/// set.insert("b".to_string());
///
/// // Not possible to collect to a Vec<String> with a regular `.iter()`.
- /// let v: Vec<String> = set.move_iter().collect();
+ /// let v: Vec<String> = set.into_iter().collect();
///
/// // Will print in an arbitrary order.
/// for x in v.iter() {
/// println!("{}", x);
/// }
/// ```
- pub fn move_iter(self) -> SetMoveItems<T> {
- self.map.move_iter().map(|(k, _)| k)
+ pub fn into_iter(self) -> SetMoveItems<T> {
+ self.map.into_iter().map(|(k, _)| k)
}
/// Visit the values representing the difference.
hs
};
- let v = hs.move_iter().collect::<Vec<char>>();
+ let v = hs.into_iter().collect::<Vec<char>>();
assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
}
}
}
- pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ pub fn iter_mut(&mut self) -> MutEntries<K, V> {
MutEntries {
iter: self.raw_buckets(),
elems_left: self.size(),
}
}
- pub fn move_iter(self) -> MoveEntries<K, V> {
+ pub fn into_iter(self) -> MoveEntries<K, V> {
MoveEntries {
iter: self.raw_buckets(),
table: self,
return;
}
// This is done in reverse because we've likely partially taken
- // some elements out with `.move_iter()` from the front.
+ // some elements out with `.into_iter()` from the front.
// Check if the size is 0, so we don't do a useless scan when
// dropping empty tables such as on resize.
// Also avoid double drop of elements that have been already moved out.
LruEntry {
key: k,
value: v,
- next: ptr::mut_null(),
- prev: ptr::mut_null(),
+ next: ptr::null_mut(),
+ prev: ptr::null_mut(),
}
}
}
}
pub unsafe fn open_internal() -> *mut u8 {
- let mut handle = ptr::mut_null();
+ let mut handle = ptr::null_mut();
GetModuleHandleExW(0 as libc::DWORD, ptr::null(), &mut handle);
handle as *mut u8
}
if buf.len() > self.buf.len() {
self.inner.get_mut_ref().write(buf)
} else {
- let dst = self.buf.mut_slice_from(self.pos);
+ let dst = self.buf.slice_from_mut(self.pos);
slice::bytes::copy_memory(dst, buf);
self.pos += buf.len();
Ok(())
struct InternalBufferedWriter<W>(BufferedWriter<W>);
impl<W> InternalBufferedWriter<W> {
- fn get_mut_ref<'a>(&'a mut self) -> &'a mut BufferedWriter<W> {
+ fn get_mut<'a>(&'a mut self) -> &'a mut BufferedWriter<W> {
let InternalBufferedWriter(ref mut w) = *self;
return w;
}
impl<W: Reader> Reader for InternalBufferedWriter<W> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
- self.get_mut_ref().inner.get_mut_ref().read(buf)
+ self.get_mut().inner.get_mut_ref().read(buf)
}
}
impl<S: Stream> Writer for BufferedStream<S> {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
- self.inner.inner.get_mut_ref().write(buf)
+ self.inner.inner.get_mut().write(buf)
}
fn flush(&mut self) -> IoResult<()> {
- self.inner.inner.get_mut_ref().flush()
+ self.inner.inner.get_mut().flush()
}
}
loop {
match self.buf {
Some(ref prev) => {
- let dst = buf.mut_slice_from(num_read);
+ let dst = buf.slice_from_mut(num_read);
let src = prev.slice_from(self.pos);
let count = cmp::min(dst.len(), src.len());
bytes::copy_memory(dst, src.slice_to(count));
/// at a non-directory file
pub fn readdir(path: &Path) -> IoResult<Vec<Path>> {
let err = LocalIo::maybe_raise(|io| {
- Ok(try!(io.fs_readdir(&path.to_c_str(), 0)).move_iter().map(|a| {
+ Ok(try!(io.fs_readdir(&path.to_c_str(), 0)).into_iter().map(|a| {
Path::new(a)
}).collect())
}).map_err(IoError::from_rtio_error);
// delete all regular files in the way and push subdirs
// on the stack
- for child in children.move_iter() {
+ for child in children.into_iter() {
// FIXME(#12795) we should use lstat in all cases
let child_type = match cfg!(windows) {
true => try!(update_err(stat(&child), path)),
{
let mut read_stream = File::open_mode(filename, Open, Read);
{
- let read_buf = read_mem.mut_slice(0, 4);
+ let read_buf = read_mem.slice_mut(0, 4);
check!(read_stream.read(read_buf));
}
{
- let read_buf = read_mem.mut_slice(4, 8);
+ let read_buf = read_mem.slice_mut(4, 8);
check!(read_stream.read(read_buf));
}
}
let write_len = min(buf.len(), self.buf.len() - self.pos);
{
let input = self.buf.slice(self.pos, self.pos + write_len);
- let output = buf.mut_slice(0, write_len);
+ let output = buf.slice_mut(0, write_len);
assert_eq!(input.len(), output.len());
slice::bytes::copy_memory(output, input);
}
})
}
- slice::bytes::copy_memory(self.buf.mut_slice_from(self.pos), buf);
+ slice::bytes::copy_memory(self.buf.slice_from_mut(self.pos), buf);
self.pos += buf.len();
Ok(())
}
let write_len = min(buf.len(), self.buf.len() - self.pos);
{
let input = self.buf.slice(self.pos, self.pos + write_len);
- let output = buf.mut_slice(0, write_len);
+ let output = buf.slice_mut(0, write_len);
assert_eq!(input.len(), output.len());
slice::bytes::copy_memory(output, input);
}
assert!(r.read_at_least(buf.len(), buf).is_ok());
let b: &[_] = &[1, 2, 3];
assert_eq!(buf.as_slice(), b);
- assert!(r.read_at_least(0, buf.mut_slice_to(0)).is_ok());
+ assert!(r.read_at_least(0, buf.slice_to_mut(0)).is_ok());
assert_eq!(buf.as_slice(), b);
assert!(r.read_at_least(buf.len(), buf).is_ok());
let b: &[_] = &[4, 5, 6];
while read < min {
let mut zeroes = 0;
loop {
- match self.read(buf.mut_slice_from(read)) {
+ match self.read(buf.slice_from_mut(read)) {
Ok(0) => {
zeroes += 1;
if zeroes >= NO_PROGRESS_LIMIT {
{
let mut start = 1;
while start < width {
- match try!(self.read(buf.mut_slice(start, width))) {
+ match try!(self.read(buf.slice_mut(start, width))) {
n if n == width - start => break,
n if n < width - start => { start += n; }
_ => return Err(standard_error(InvalidInput)),
/// Easy name resolution. Given a hostname, returns the list of IP addresses for
/// that hostname.
pub fn get_host_addresses(host: &str) -> IoResult<Vec<IpAddr>> {
- lookup(Some(host), None, None).map(|a| a.move_iter().map(|i| i.address.ip).collect())
+ lookup(Some(host), None, None).map(|a| a.into_iter().map(|i| i.address.ip).collect())
}
/// Full-fledged resolution. This function will perform a synchronous call to
match LocalIo::maybe_raise(|io| {
io.get_host_addresses(hostname, servname, hint)
}) {
- Ok(v) => Ok(v.move_iter().map(|info| {
+ Ok(v) => Ok(v.into_iter().map(|info| {
Info {
address: SocketAddr {
ip: super::from_rtio(info.address.ip),
// Return result of first successful parser
fn read_or<T>(&mut self, parsers: &mut [|&mut Parser| -> Option<T>])
-> Option<T> {
- for pf in parsers.mut_iter() {
+ for pf in parsers.iter_mut() {
match self.read_atomically(|p: &mut Parser| (*pf)(p)) {
Some(r) => return Some(r),
None => {}
assert!(head.len() + tail.len() <= 8);
let mut gs = [0u16, ..8];
gs.copy_from(head);
- gs.mut_slice(8 - tail.len(), 8).copy_from(tail);
+ gs.slice_mut(8 - tail.len(), 8).copy_from(tail);
Ipv6Addr(gs[0], gs[1], gs[2], gs[3], gs[4], gs[5], gs[6], gs[7])
}
pub mod tcp;
pub mod udp;
pub mod ip;
-// FIXME(#12093) - this should not be called unix
-pub mod unix;
+pub mod pipe;
fn to_rtio(ip: IpAddr) -> rtio::IpAddr {
match ip {
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+
+Named pipes
+
+This module contains the ability to communicate over named pipes with
+synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
+while on Unix it corresponds to UNIX domain sockets.
+
+These pipes are similar to TCP in the sense that you can have both a stream to a
+server and a server itself. The server provided accepts other `UnixStream`
+instances as clients.
+
+*/
+
+#![allow(missing_doc)]
+
+use prelude::*;
+
+use io::{Listener, Acceptor, IoResult, IoError, TimedOut, standard_error};
+use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
+use rt::rtio::{RtioUnixAcceptor, RtioPipe};
+use time::Duration;
+
+/// A stream which communicates over a named pipe.
+pub struct UnixStream {
+ obj: Box<RtioPipe + Send>,
+}
+
+impl UnixStream {
+ /// Connect to a pipe named by `path`. This will attempt to open a
+ /// connection to the underlying socket.
+ ///
+ /// The returned stream will be closed when the object falls out of scope.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// # #![allow(unused_must_use)]
+ /// use std::io::net::pipe::UnixStream;
+ ///
+ /// let server = Path::new("path/to/my/socket");
+ /// let mut stream = UnixStream::connect(&server);
+ /// stream.write([1, 2, 3]);
+ /// ```
+ pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
+ LocalIo::maybe_raise(|io| {
+ io.unix_connect(&path.to_c_str(), None).map(|p| UnixStream { obj: p })
+ }).map_err(IoError::from_rtio_error)
+ }
+
+ /// Connect to a pipe named by `path`, timing out if the specified number of
+ /// milliseconds.
+ ///
+ /// This function is similar to `connect`, except that if `timeout`
+ /// elapses the function will return an error of kind `TimedOut`.
+ ///
+ /// If a `timeout` with zero or negative duration is specified then
+ /// the function returns `Err`, with the error kind set to `TimedOut`.
+ #[experimental = "the timeout argument is likely to change types"]
+ pub fn connect_timeout<P: ToCStr>(path: &P,
+ timeout: Duration) -> IoResult<UnixStream> {
+ if timeout <= Duration::milliseconds(0) {
+ return Err(standard_error(TimedOut));
+ }
+
+ LocalIo::maybe_raise(|io| {
+ let s = io.unix_connect(&path.to_c_str(), Some(timeout.num_milliseconds() as u64));
+ s.map(|p| UnixStream { obj: p })
+ }).map_err(IoError::from_rtio_error)
+ }
+
+
+ /// Closes the reading half of this connection.
+ ///
+ /// This method will close the reading portion of this connection, causing
+ /// all pending and future reads to immediately return with an error.
+ ///
+ /// Note that this method affects all cloned handles associated with this
+ /// stream, not just this one handle.
+ pub fn close_read(&mut self) -> IoResult<()> {
+ self.obj.close_read().map_err(IoError::from_rtio_error)
+ }
+
+ /// Closes the writing half of this connection.
+ ///
+ /// This method will close the writing portion of this connection, causing
+ /// all pending and future writes to immediately return with an error.
+ ///
+ /// Note that this method affects all cloned handles associated with this
+ /// stream, not just this one handle.
+ pub fn close_write(&mut self) -> IoResult<()> {
+ self.obj.close_write().map_err(IoError::from_rtio_error)
+ }
+
+ /// Sets the read/write timeout for this socket.
+ ///
+ /// For more information, see `TcpStream::set_timeout`
+ #[experimental = "the timeout argument may change in type and value"]
+ pub fn set_timeout(&mut self, timeout_ms: Option<u64>) {
+ self.obj.set_timeout(timeout_ms)
+ }
+
+ /// Sets the read timeout for this socket.
+ ///
+ /// For more information, see `TcpStream::set_timeout`
+ #[experimental = "the timeout argument may change in type and value"]
+ pub fn set_read_timeout(&mut self, timeout_ms: Option<u64>) {
+ self.obj.set_read_timeout(timeout_ms)
+ }
+
+ /// Sets the write timeout for this socket.
+ ///
+ /// For more information, see `TcpStream::set_timeout`
+ #[experimental = "the timeout argument may change in type and value"]
+ pub fn set_write_timeout(&mut self, timeout_ms: Option<u64>) {
+ self.obj.set_write_timeout(timeout_ms)
+ }
+}
+
+impl Clone for UnixStream {
+ fn clone(&self) -> UnixStream {
+ UnixStream { obj: self.obj.clone() }
+ }
+}
+
+impl Reader for UnixStream {
+ fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
+ self.obj.read(buf).map_err(IoError::from_rtio_error)
+ }
+}
+
+impl Writer for UnixStream {
+ fn write(&mut self, buf: &[u8]) -> IoResult<()> {
+ self.obj.write(buf).map_err(IoError::from_rtio_error)
+ }
+}
+
+/// A value that can listen for incoming named pipe connection requests.
+pub struct UnixListener {
+ /// The internal, opaque runtime Unix listener.
+ obj: Box<RtioUnixListener + Send>,
+}
+
+impl UnixListener {
+
+ /// Creates a new listener, ready to receive incoming connections on the
+ /// specified socket. The server will be named by `path`.
+ ///
+ /// This listener will be closed when it falls out of scope.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # fn main() {}
+ /// # fn foo() {
+ /// # #![allow(unused_must_use)]
+ /// use std::io::net::pipe::UnixListener;
+ /// use std::io::{Listener, Acceptor};
+ ///
+ /// let server = Path::new("/path/to/my/socket");
+ /// let stream = UnixListener::bind(&server);
+ /// for mut client in stream.listen().incoming() {
+ /// client.write([1, 2, 3, 4]);
+ /// }
+ /// # }
+ /// ```
+ pub fn bind<P: ToCStr>(path: &P) -> IoResult<UnixListener> {
+ LocalIo::maybe_raise(|io| {
+ io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
+ }).map_err(IoError::from_rtio_error)
+ }
+}
+
+impl Listener<UnixStream, UnixAcceptor> for UnixListener {
+ fn listen(self) -> IoResult<UnixAcceptor> {
+ self.obj.listen().map(|obj| {
+ UnixAcceptor { obj: obj }
+ }).map_err(IoError::from_rtio_error)
+ }
+}
+
+/// A value that can accept named pipe connections, returned from `listen()`.
+pub struct UnixAcceptor {
+ /// The internal, opaque runtime Unix acceptor.
+ obj: Box<RtioUnixAcceptor + Send>,
+}
+
+impl UnixAcceptor {
+ /// Sets a timeout for this acceptor, after which accept() will no longer
+ /// block indefinitely.
+ ///
+ /// The argument specified is the amount of time, in milliseconds, into the
+ /// future after which all invocations of accept() will not block (and any
+ /// pending invocation will return). A value of `None` will clear any
+ /// existing timeout.
+ ///
+ /// When using this method, it is likely necessary to reset the timeout as
+ /// appropriate, the timeout specified is specific to this object, not
+ /// specific to the next request.
+ #[experimental = "the name and arguments to this function are likely \
+ to change"]
+ pub fn set_timeout(&mut self, timeout_ms: Option<u64>) {
+ self.obj.set_timeout(timeout_ms)
+ }
+
+ /// Closes the accepting capabilities of this acceptor.
+ ///
+ /// This function has the same semantics as `TcpAcceptor::close_accept`, and
+ /// more information can be found in that documentation.
+ #[experimental]
+ pub fn close_accept(&mut self) -> IoResult<()> {
+ self.obj.close_accept().map_err(IoError::from_rtio_error)
+ }
+}
+
+impl Acceptor<UnixStream> for UnixAcceptor {
+ fn accept(&mut self) -> IoResult<UnixStream> {
+ self.obj.accept().map(|s| {
+ UnixStream { obj: s }
+ }).map_err(IoError::from_rtio_error)
+ }
+}
+
+impl Clone for UnixAcceptor {
+ /// Creates a new handle to this unix acceptor, allowing for simultaneous
+ /// accepts.
+ ///
+ /// The underlying unix acceptor will not be closed until all handles to the
+ /// acceptor have been deallocated. Incoming connections will be received on
+ /// at most once acceptor, the same connection will not be accepted twice.
+ ///
+ /// The `close_accept` method will shut down *all* acceptors cloned from the
+ /// same original acceptor, whereas the `set_timeout` method only affects
+ /// the selector that it is called on.
+ ///
+ /// This function is useful for creating a handle to invoke `close_accept`
+ /// on to wake up any other task blocked in `accept`.
+ fn clone(&self) -> UnixAcceptor {
+ UnixAcceptor { obj: self.obj.clone() }
+ }
+}
+
+#[cfg(test)]
+#[allow(experimental)]
+mod tests {
+ use prelude::*;
+ use super::*;
+ use io::*;
+ use io::test::*;
+
+ pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
+ let path1 = next_test_unix();
+ let path2 = path1.clone();
+
+ let mut acceptor = UnixListener::bind(&path1).listen();
+
+ spawn(proc() {
+ match UnixStream::connect(&path2) {
+ Ok(c) => client(c),
+ Err(e) => fail!("failed connect: {}", e),
+ }
+ });
+
+ match acceptor.accept() {
+ Ok(c) => server(c),
+ Err(e) => fail!("failed accept: {}", e),
+ }
+ }
+
+ iotest!(fn bind_error() {
+ let path = "path/to/nowhere";
+ match UnixListener::bind(&path) {
+ Ok(..) => fail!(),
+ Err(e) => {
+ assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
+ e.kind == InvalidInput);
+ }
+ }
+ })
+
+ iotest!(fn connect_error() {
+ let path = if cfg!(windows) {
+ r"\\.\pipe\this_should_not_exist_ever"
+ } else {
+ "path/to/nowhere"
+ };
+ match UnixStream::connect(&path) {
+ Ok(..) => fail!(),
+ Err(e) => {
+ assert!(e.kind == FileNotFound || e.kind == OtherIoError);
+ }
+ }
+ })
+
+ iotest!(fn smoke() {
+ smalltest(proc(mut server) {
+ let mut buf = [0];
+ server.read(buf).unwrap();
+ assert!(buf[0] == 99);
+ }, proc(mut client) {
+ client.write([99]).unwrap();
+ })
+ })
+
+ iotest!(fn read_eof() {
+ smalltest(proc(mut server) {
+ let mut buf = [0];
+ assert!(server.read(buf).is_err());
+ assert!(server.read(buf).is_err());
+ }, proc(_client) {
+ // drop the client
+ })
+ } #[ignore(cfg(windows))]) // FIXME(#12516)
+
+ iotest!(fn write_begone() {
+ smalltest(proc(mut server) {
+ let buf = [0];
+ loop {
+ match server.write(buf) {
+ Ok(..) => {}
+ Err(e) => {
+ assert!(e.kind == BrokenPipe ||
+ e.kind == NotConnected ||
+ e.kind == ConnectionReset,
+ "unknown error {:?}", e);
+ break;
+ }
+ }
+ }
+ }, proc(_client) {
+ // drop the client
+ })
+ })
+
+ iotest!(fn accept_lots() {
+ let times = 10;
+ let path1 = next_test_unix();
+ let path2 = path1.clone();
+
+ let mut acceptor = match UnixListener::bind(&path1).listen() {
+ Ok(a) => a,
+ Err(e) => fail!("failed listen: {}", e),
+ };
+
+ spawn(proc() {
+ for _ in range(0u, times) {
+ let mut stream = UnixStream::connect(&path2);
+ match stream.write([100]) {
+ Ok(..) => {}
+ Err(e) => fail!("failed write: {}", e)
+ }
+ }
+ });
+
+ for _ in range(0, times) {
+ let mut client = acceptor.accept();
+ let mut buf = [0];
+ match client.read(buf) {
+ Ok(..) => {}
+ Err(e) => fail!("failed read/accept: {}", e),
+ }
+ assert_eq!(buf[0], 100);
+ }
+ })
+
+ #[cfg(unix)]
+ iotest!(fn path_exists() {
+ let path = next_test_unix();
+ let _acceptor = UnixListener::bind(&path).listen();
+ assert!(path.exists());
+ })
+
+ iotest!(fn unix_clone_smoke() {
+ let addr = next_test_unix();
+ let mut acceptor = UnixListener::bind(&addr).listen();
+
+ spawn(proc() {
+ let mut s = UnixStream::connect(&addr);
+ let mut buf = [0, 0];
+ debug!("client reading");
+ assert_eq!(s.read(buf), Ok(1));
+ assert_eq!(buf[0], 1);
+ debug!("client writing");
+ s.write([2]).unwrap();
+ debug!("client dropping");
+ });
+
+ let mut s1 = acceptor.accept().unwrap();
+ let s2 = s1.clone();
+
+ let (tx1, rx1) = channel();
+ let (tx2, rx2) = channel();
+ spawn(proc() {
+ let mut s2 = s2;
+ rx1.recv();
+ debug!("writer writing");
+ s2.write([1]).unwrap();
+ debug!("writer done");
+ tx2.send(());
+ });
+ tx1.send(());
+ let mut buf = [0, 0];
+ debug!("reader reading");
+ assert_eq!(s1.read(buf), Ok(1));
+ debug!("reader done");
+ rx2.recv();
+ })
+
+ iotest!(fn unix_clone_two_read() {
+ let addr = next_test_unix();
+ let mut acceptor = UnixListener::bind(&addr).listen();
+ let (tx1, rx) = channel();
+ let tx2 = tx1.clone();
+
+ spawn(proc() {
+ let mut s = UnixStream::connect(&addr);
+ s.write([1]).unwrap();
+ rx.recv();
+ s.write([2]).unwrap();
+ rx.recv();
+ });
+
+ let mut s1 = acceptor.accept().unwrap();
+ let s2 = s1.clone();
+
+ let (done, rx) = channel();
+ spawn(proc() {
+ let mut s2 = s2;
+ let mut buf = [0, 0];
+ s2.read(buf).unwrap();
+ tx2.send(());
+ done.send(());
+ });
+ let mut buf = [0, 0];
+ s1.read(buf).unwrap();
+ tx1.send(());
+
+ rx.recv();
+ })
+
+ iotest!(fn unix_clone_two_write() {
+ let addr = next_test_unix();
+ let mut acceptor = UnixListener::bind(&addr).listen();
+
+ spawn(proc() {
+ let mut s = UnixStream::connect(&addr);
+ let mut buf = [0, 1];
+ s.read(buf).unwrap();
+ s.read(buf).unwrap();
+ });
+
+ let mut s1 = acceptor.accept().unwrap();
+ let s2 = s1.clone();
+
+ let (tx, rx) = channel();
+ spawn(proc() {
+ let mut s2 = s2;
+ s2.write([1]).unwrap();
+ tx.send(());
+ });
+ s1.write([2]).unwrap();
+
+ rx.recv();
+ })
+
+ iotest!(fn drop_removes_listener_path() {
+ let path = next_test_unix();
+ let l = UnixListener::bind(&path).unwrap();
+ assert!(path.exists());
+ drop(l);
+ assert!(!path.exists());
+ } #[cfg(not(windows))])
+
+ iotest!(fn drop_removes_acceptor_path() {
+ let path = next_test_unix();
+ let l = UnixListener::bind(&path).unwrap();
+ assert!(path.exists());
+ drop(l.listen().unwrap());
+ assert!(!path.exists());
+ } #[cfg(not(windows))])
+
+ iotest!(fn accept_timeout() {
+ let addr = next_test_unix();
+ let mut a = UnixListener::bind(&addr).unwrap().listen().unwrap();
+
+ a.set_timeout(Some(10));
+
+ // Make sure we time out once and future invocations also time out
+ let err = a.accept().err().unwrap();
+ assert_eq!(err.kind, TimedOut);
+ let err = a.accept().err().unwrap();
+ assert_eq!(err.kind, TimedOut);
+
+ // Also make sure that even though the timeout is expired that we will
+ // continue to receive any pending connections.
+ let (tx, rx) = channel();
+ let addr2 = addr.clone();
+ spawn(proc() {
+ tx.send(UnixStream::connect(&addr2).unwrap());
+ });
+ let l = rx.recv();
+ for i in range(0u, 1001) {
+ match a.accept() {
+ Ok(..) => break,
+ Err(ref e) if e.kind == TimedOut => {}
+ Err(e) => fail!("error: {}", e),
+ }
+ ::task::deschedule();
+ if i == 1000 { fail!("should have a pending connection") }
+ }
+ drop(l);
+
+ // Unset the timeout and make sure that this always blocks.
+ a.set_timeout(None);
+ let addr2 = addr.clone();
+ spawn(proc() {
+ drop(UnixStream::connect(&addr2).unwrap());
+ });
+ a.accept().unwrap();
+ })
+
+ iotest!(fn connect_timeout_error() {
+ let addr = next_test_unix();
+ assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(100)).is_err());
+ })
+
+ iotest!(fn connect_timeout_success() {
+ let addr = next_test_unix();
+ let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
+ assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(100)).is_ok());
+ })
+
+ iotest!(fn connect_timeout_zero() {
+ let addr = next_test_unix();
+ let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
+ assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(0)).is_err());
+ })
+
+ iotest!(fn connect_timeout_negative() {
+ let addr = next_test_unix();
+ let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
+ assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(-1)).is_err());
+ })
+
+ iotest!(fn close_readwrite_smoke() {
+ let addr = next_test_unix();
+ let a = UnixListener::bind(&addr).listen().unwrap();
+ let (_tx, rx) = channel::<()>();
+ spawn(proc() {
+ let mut a = a;
+ let _s = a.accept().unwrap();
+ let _ = rx.recv_opt();
+ });
+
+ let mut b = [0];
+ let mut s = UnixStream::connect(&addr).unwrap();
+ let mut s2 = s.clone();
+
+ // closing should prevent reads/writes
+ s.close_write().unwrap();
+ assert!(s.write([0]).is_err());
+ s.close_read().unwrap();
+ assert!(s.read(b).is_err());
+
+ // closing should affect previous handles
+ assert!(s2.write([0]).is_err());
+ assert!(s2.read(b).is_err());
+
+ // closing should affect new handles
+ let mut s3 = s.clone();
+ assert!(s3.write([0]).is_err());
+ assert!(s3.read(b).is_err());
+
+ // make sure these don't die
+ let _ = s2.close_read();
+ let _ = s2.close_write();
+ let _ = s3.close_read();
+ let _ = s3.close_write();
+ })
+
+ iotest!(fn close_read_wakes_up() {
+ let addr = next_test_unix();
+ let a = UnixListener::bind(&addr).listen().unwrap();
+ let (_tx, rx) = channel::<()>();
+ spawn(proc() {
+ let mut a = a;
+ let _s = a.accept().unwrap();
+ let _ = rx.recv_opt();
+ });
+
+ let mut s = UnixStream::connect(&addr).unwrap();
+ let s2 = s.clone();
+ let (tx, rx) = channel();
+ spawn(proc() {
+ let mut s2 = s2;
+ assert!(s2.read([0]).is_err());
+ tx.send(());
+ });
+ // this should wake up the child task
+ s.close_read().unwrap();
+
+ // this test will never finish if the child doesn't wake up
+ rx.recv();
+ })
+
+ iotest!(fn readwrite_timeouts() {
+ let addr = next_test_unix();
+ let mut a = UnixListener::bind(&addr).listen().unwrap();
+ let (tx, rx) = channel::<()>();
+ spawn(proc() {
+ let mut s = UnixStream::connect(&addr).unwrap();
+ rx.recv();
+ assert!(s.write([0]).is_ok());
+ let _ = rx.recv_opt();
+ });
+
+ let mut s = a.accept().unwrap();
+ s.set_timeout(Some(20));
+ assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
+ assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
+
+ s.set_timeout(Some(20));
+ for i in range(0u, 1001) {
+ match s.write([0, .. 128 * 1024]) {
+ Ok(()) | Err(IoError { kind: ShortWrite(..), .. }) => {},
+ Err(IoError { kind: TimedOut, .. }) => break,
+ Err(e) => fail!("{}", e),
+ }
+ if i == 1000 { fail!("should have filled up?!"); }
+ }
+
+ // I'm not sure as to why, but apparently the write on windows always
+ // succeeds after the previous timeout. Who knows?
+ if !cfg!(windows) {
+ assert_eq!(s.write([0]).err().unwrap().kind, TimedOut);
+ }
+
+ tx.send(());
+ s.set_timeout(None);
+ assert_eq!(s.read([0, 0]), Ok(1));
+ })
+
+ iotest!(fn read_timeouts() {
+ let addr = next_test_unix();
+ let mut a = UnixListener::bind(&addr).listen().unwrap();
+ let (tx, rx) = channel::<()>();
+ spawn(proc() {
+ let mut s = UnixStream::connect(&addr).unwrap();
+ rx.recv();
+ let mut amt = 0;
+ while amt < 100 * 128 * 1024 {
+ match s.read([0, ..128 * 1024]) {
+ Ok(n) => { amt += n; }
+ Err(e) => fail!("{}", e),
+ }
+ }
+ let _ = rx.recv_opt();
+ });
+
+ let mut s = a.accept().unwrap();
+ s.set_read_timeout(Some(20));
+ assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
+ assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
+
+ tx.send(());
+ for _ in range(0u, 100) {
+ assert!(s.write([0, ..128 * 1024]).is_ok());
+ }
+ })
+
+ iotest!(fn write_timeouts() {
+ let addr = next_test_unix();
+ let mut a = UnixListener::bind(&addr).listen().unwrap();
+ let (tx, rx) = channel::<()>();
+ spawn(proc() {
+ let mut s = UnixStream::connect(&addr).unwrap();
+ rx.recv();
+ assert!(s.write([0]).is_ok());
+ let _ = rx.recv_opt();
+ });
+
+ let mut s = a.accept().unwrap();
+ s.set_write_timeout(Some(20));
+ for i in range(0u, 1001) {
+ match s.write([0, .. 128 * 1024]) {
+ Ok(()) | Err(IoError { kind: ShortWrite(..), .. }) => {},
+ Err(IoError { kind: TimedOut, .. }) => break,
+ Err(e) => fail!("{}", e),
+ }
+ if i == 1000 { fail!("should have filled up?!"); }
+ }
+
+ tx.send(());
+ assert!(s.read([0]).is_ok());
+ })
+
+ iotest!(fn timeout_concurrent_read() {
+ let addr = next_test_unix();
+ let mut a = UnixListener::bind(&addr).listen().unwrap();
+ let (tx, rx) = channel::<()>();
+ spawn(proc() {
+ let mut s = UnixStream::connect(&addr).unwrap();
+ rx.recv();
+ assert!(s.write([0]).is_ok());
+ let _ = rx.recv_opt();
+ });
+
+ let mut s = a.accept().unwrap();
+ let s2 = s.clone();
+ let (tx2, rx2) = channel();
+ spawn(proc() {
+ let mut s2 = s2;
+ assert!(s2.read([0]).is_ok());
+ tx2.send(());
+ });
+
+ s.set_read_timeout(Some(20));
+ assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
+ tx.send(());
+
+ rx2.recv();
+ })
+
+ #[cfg(not(windows))]
+ iotest!(fn clone_accept_smoke() {
+ let addr = next_test_unix();
+ let l = UnixListener::bind(&addr);
+ let mut a = l.listen().unwrap();
+ let mut a2 = a.clone();
+
+ let addr2 = addr.clone();
+ spawn(proc() {
+ let _ = UnixStream::connect(&addr2);
+ });
+ spawn(proc() {
+ let _ = UnixStream::connect(&addr);
+ });
+
+ assert!(a.accept().is_ok());
+ drop(a);
+ assert!(a2.accept().is_ok());
+ })
+
+ iotest!(fn clone_accept_concurrent() {
+ let addr = next_test_unix();
+ let l = UnixListener::bind(&addr);
+ let a = l.listen().unwrap();
+ let a2 = a.clone();
+
+ let (tx, rx) = channel();
+ let tx2 = tx.clone();
+
+ spawn(proc() { let mut a = a; tx.send(a.accept()) });
+ spawn(proc() { let mut a = a2; tx2.send(a.accept()) });
+
+ let addr2 = addr.clone();
+ spawn(proc() {
+ let _ = UnixStream::connect(&addr2);
+ });
+ spawn(proc() {
+ let _ = UnixStream::connect(&addr);
+ });
+
+ assert!(rx.recv().is_ok());
+ assert!(rx.recv().is_ok());
+ })
+
+ iotest!(fn close_accept_smoke() {
+ let addr = next_test_unix();
+ let l = UnixListener::bind(&addr);
+ let mut a = l.listen().unwrap();
+
+ a.close_accept().unwrap();
+ assert_eq!(a.accept().err().unwrap().kind, EndOfFile);
+ })
+
+ iotest!(fn close_accept_concurrent() {
+ let addr = next_test_unix();
+ let l = UnixListener::bind(&addr);
+ let a = l.listen().unwrap();
+ let mut a2 = a.clone();
+
+ let (tx, rx) = channel();
+ spawn(proc() {
+ let mut a = a;
+ tx.send(a.accept());
+ });
+ a2.close_accept().unwrap();
+
+ assert_eq!(rx.recv().err().unwrap().kind, EndOfFile);
+ })
+}
/// match socket.recv_from(buf) {
/// Ok((amt, src)) => {
/// // Send a reply to the socket we received data from
-/// let buf = buf.mut_slice_to(amt);
+/// let buf = buf.slice_to_mut(amt);
/// buf.reverse();
/// socket.send_to(buf, src);
/// }
+++ /dev/null
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*!
-
-Named pipes
-
-This module contains the ability to communicate over named pipes with
-synchronous I/O. On windows, this corresponds to talking over a Named Pipe,
-while on Unix it corresponds to UNIX domain sockets.
-
-These pipes are similar to TCP in the sense that you can have both a stream to a
-server and a server itself. The server provided accepts other `UnixStream`
-instances as clients.
-
-*/
-
-#![allow(missing_doc)]
-
-use prelude::*;
-
-use io::{Listener, Acceptor, IoResult, IoError, TimedOut, standard_error};
-use rt::rtio::{IoFactory, LocalIo, RtioUnixListener};
-use rt::rtio::{RtioUnixAcceptor, RtioPipe};
-use time::Duration;
-
-/// A stream which communicates over a named pipe.
-pub struct UnixStream {
- obj: Box<RtioPipe + Send>,
-}
-
-impl UnixStream {
- /// Connect to a pipe named by `path`. This will attempt to open a
- /// connection to the underlying socket.
- ///
- /// The returned stream will be closed when the object falls out of scope.
- ///
- /// # Example
- ///
- /// ```rust
- /// # #![allow(unused_must_use)]
- /// use std::io::net::unix::UnixStream;
- ///
- /// let server = Path::new("path/to/my/socket");
- /// let mut stream = UnixStream::connect(&server);
- /// stream.write([1, 2, 3]);
- /// ```
- pub fn connect<P: ToCStr>(path: &P) -> IoResult<UnixStream> {
- LocalIo::maybe_raise(|io| {
- io.unix_connect(&path.to_c_str(), None).map(|p| UnixStream { obj: p })
- }).map_err(IoError::from_rtio_error)
- }
-
- /// Connect to a pipe named by `path`, timing out if the specified number of
- /// milliseconds.
- ///
- /// This function is similar to `connect`, except that if `timeout`
- /// elapses the function will return an error of kind `TimedOut`.
- ///
- /// If a `timeout` with zero or negative duration is specified then
- /// the function returns `Err`, with the error kind set to `TimedOut`.
- #[experimental = "the timeout argument is likely to change types"]
- pub fn connect_timeout<P: ToCStr>(path: &P,
- timeout: Duration) -> IoResult<UnixStream> {
- if timeout <= Duration::milliseconds(0) {
- return Err(standard_error(TimedOut));
- }
-
- LocalIo::maybe_raise(|io| {
- let s = io.unix_connect(&path.to_c_str(), Some(timeout.num_milliseconds() as u64));
- s.map(|p| UnixStream { obj: p })
- }).map_err(IoError::from_rtio_error)
- }
-
-
- /// Closes the reading half of this connection.
- ///
- /// This method will close the reading portion of this connection, causing
- /// all pending and future reads to immediately return with an error.
- ///
- /// Note that this method affects all cloned handles associated with this
- /// stream, not just this one handle.
- pub fn close_read(&mut self) -> IoResult<()> {
- self.obj.close_read().map_err(IoError::from_rtio_error)
- }
-
- /// Closes the writing half of this connection.
- ///
- /// This method will close the writing portion of this connection, causing
- /// all pending and future writes to immediately return with an error.
- ///
- /// Note that this method affects all cloned handles associated with this
- /// stream, not just this one handle.
- pub fn close_write(&mut self) -> IoResult<()> {
- self.obj.close_write().map_err(IoError::from_rtio_error)
- }
-
- /// Sets the read/write timeout for this socket.
- ///
- /// For more information, see `TcpStream::set_timeout`
- #[experimental = "the timeout argument may change in type and value"]
- pub fn set_timeout(&mut self, timeout_ms: Option<u64>) {
- self.obj.set_timeout(timeout_ms)
- }
-
- /// Sets the read timeout for this socket.
- ///
- /// For more information, see `TcpStream::set_timeout`
- #[experimental = "the timeout argument may change in type and value"]
- pub fn set_read_timeout(&mut self, timeout_ms: Option<u64>) {
- self.obj.set_read_timeout(timeout_ms)
- }
-
- /// Sets the write timeout for this socket.
- ///
- /// For more information, see `TcpStream::set_timeout`
- #[experimental = "the timeout argument may change in type and value"]
- pub fn set_write_timeout(&mut self, timeout_ms: Option<u64>) {
- self.obj.set_write_timeout(timeout_ms)
- }
-}
-
-impl Clone for UnixStream {
- fn clone(&self) -> UnixStream {
- UnixStream { obj: self.obj.clone() }
- }
-}
-
-impl Reader for UnixStream {
- fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
- self.obj.read(buf).map_err(IoError::from_rtio_error)
- }
-}
-
-impl Writer for UnixStream {
- fn write(&mut self, buf: &[u8]) -> IoResult<()> {
- self.obj.write(buf).map_err(IoError::from_rtio_error)
- }
-}
-
-/// A value that can listen for incoming named pipe connection requests.
-pub struct UnixListener {
- /// The internal, opaque runtime Unix listener.
- obj: Box<RtioUnixListener + Send>,
-}
-
-impl UnixListener {
-
- /// Creates a new listener, ready to receive incoming connections on the
- /// specified socket. The server will be named by `path`.
- ///
- /// This listener will be closed when it falls out of scope.
- ///
- /// # Example
- ///
- /// ```
- /// # fn main() {}
- /// # fn foo() {
- /// # #![allow(unused_must_use)]
- /// use std::io::net::unix::UnixListener;
- /// use std::io::{Listener, Acceptor};
- ///
- /// let server = Path::new("/path/to/my/socket");
- /// let stream = UnixListener::bind(&server);
- /// for mut client in stream.listen().incoming() {
- /// client.write([1, 2, 3, 4]);
- /// }
- /// # }
- /// ```
- pub fn bind<P: ToCStr>(path: &P) -> IoResult<UnixListener> {
- LocalIo::maybe_raise(|io| {
- io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
- }).map_err(IoError::from_rtio_error)
- }
-}
-
-impl Listener<UnixStream, UnixAcceptor> for UnixListener {
- fn listen(self) -> IoResult<UnixAcceptor> {
- self.obj.listen().map(|obj| {
- UnixAcceptor { obj: obj }
- }).map_err(IoError::from_rtio_error)
- }
-}
-
-/// A value that can accept named pipe connections, returned from `listen()`.
-pub struct UnixAcceptor {
- /// The internal, opaque runtime Unix acceptor.
- obj: Box<RtioUnixAcceptor + Send>,
-}
-
-impl UnixAcceptor {
- /// Sets a timeout for this acceptor, after which accept() will no longer
- /// block indefinitely.
- ///
- /// The argument specified is the amount of time, in milliseconds, into the
- /// future after which all invocations of accept() will not block (and any
- /// pending invocation will return). A value of `None` will clear any
- /// existing timeout.
- ///
- /// When using this method, it is likely necessary to reset the timeout as
- /// appropriate, the timeout specified is specific to this object, not
- /// specific to the next request.
- #[experimental = "the name and arguments to this function are likely \
- to change"]
- pub fn set_timeout(&mut self, timeout_ms: Option<u64>) {
- self.obj.set_timeout(timeout_ms)
- }
-
- /// Closes the accepting capabilities of this acceptor.
- ///
- /// This function has the same semantics as `TcpAcceptor::close_accept`, and
- /// more information can be found in that documentation.
- #[experimental]
- pub fn close_accept(&mut self) -> IoResult<()> {
- self.obj.close_accept().map_err(IoError::from_rtio_error)
- }
-}
-
-impl Acceptor<UnixStream> for UnixAcceptor {
- fn accept(&mut self) -> IoResult<UnixStream> {
- self.obj.accept().map(|s| {
- UnixStream { obj: s }
- }).map_err(IoError::from_rtio_error)
- }
-}
-
-impl Clone for UnixAcceptor {
- /// Creates a new handle to this unix acceptor, allowing for simultaneous
- /// accepts.
- ///
- /// The underlying unix acceptor will not be closed until all handles to the
- /// acceptor have been deallocated. Incoming connections will be received on
- /// at most once acceptor, the same connection will not be accepted twice.
- ///
- /// The `close_accept` method will shut down *all* acceptors cloned from the
- /// same original acceptor, whereas the `set_timeout` method only affects
- /// the selector that it is called on.
- ///
- /// This function is useful for creating a handle to invoke `close_accept`
- /// on to wake up any other task blocked in `accept`.
- fn clone(&self) -> UnixAcceptor {
- UnixAcceptor { obj: self.obj.clone() }
- }
-}
-
-#[cfg(test)]
-#[allow(experimental)]
-mod tests {
- use prelude::*;
- use super::*;
- use io::*;
- use io::test::*;
-
- pub fn smalltest(server: proc(UnixStream):Send, client: proc(UnixStream):Send) {
- let path1 = next_test_unix();
- let path2 = path1.clone();
-
- let mut acceptor = UnixListener::bind(&path1).listen();
-
- spawn(proc() {
- match UnixStream::connect(&path2) {
- Ok(c) => client(c),
- Err(e) => fail!("failed connect: {}", e),
- }
- });
-
- match acceptor.accept() {
- Ok(c) => server(c),
- Err(e) => fail!("failed accept: {}", e),
- }
- }
-
- iotest!(fn bind_error() {
- let path = "path/to/nowhere";
- match UnixListener::bind(&path) {
- Ok(..) => fail!(),
- Err(e) => {
- assert!(e.kind == PermissionDenied || e.kind == FileNotFound ||
- e.kind == InvalidInput);
- }
- }
- })
-
- iotest!(fn connect_error() {
- let path = if cfg!(windows) {
- r"\\.\pipe\this_should_not_exist_ever"
- } else {
- "path/to/nowhere"
- };
- match UnixStream::connect(&path) {
- Ok(..) => fail!(),
- Err(e) => {
- assert!(e.kind == FileNotFound || e.kind == OtherIoError);
- }
- }
- })
-
- iotest!(fn smoke() {
- smalltest(proc(mut server) {
- let mut buf = [0];
- server.read(buf).unwrap();
- assert!(buf[0] == 99);
- }, proc(mut client) {
- client.write([99]).unwrap();
- })
- })
-
- iotest!(fn read_eof() {
- smalltest(proc(mut server) {
- let mut buf = [0];
- assert!(server.read(buf).is_err());
- assert!(server.read(buf).is_err());
- }, proc(_client) {
- // drop the client
- })
- } #[ignore(cfg(windows))]) // FIXME(#12516)
-
- iotest!(fn write_begone() {
- smalltest(proc(mut server) {
- let buf = [0];
- loop {
- match server.write(buf) {
- Ok(..) => {}
- Err(e) => {
- assert!(e.kind == BrokenPipe ||
- e.kind == NotConnected ||
- e.kind == ConnectionReset,
- "unknown error {:?}", e);
- break;
- }
- }
- }
- }, proc(_client) {
- // drop the client
- })
- })
-
- iotest!(fn accept_lots() {
- let times = 10;
- let path1 = next_test_unix();
- let path2 = path1.clone();
-
- let mut acceptor = match UnixListener::bind(&path1).listen() {
- Ok(a) => a,
- Err(e) => fail!("failed listen: {}", e),
- };
-
- spawn(proc() {
- for _ in range(0u, times) {
- let mut stream = UnixStream::connect(&path2);
- match stream.write([100]) {
- Ok(..) => {}
- Err(e) => fail!("failed write: {}", e)
- }
- }
- });
-
- for _ in range(0, times) {
- let mut client = acceptor.accept();
- let mut buf = [0];
- match client.read(buf) {
- Ok(..) => {}
- Err(e) => fail!("failed read/accept: {}", e),
- }
- assert_eq!(buf[0], 100);
- }
- })
-
- #[cfg(unix)]
- iotest!(fn path_exists() {
- let path = next_test_unix();
- let _acceptor = UnixListener::bind(&path).listen();
- assert!(path.exists());
- })
-
- iotest!(fn unix_clone_smoke() {
- let addr = next_test_unix();
- let mut acceptor = UnixListener::bind(&addr).listen();
-
- spawn(proc() {
- let mut s = UnixStream::connect(&addr);
- let mut buf = [0, 0];
- debug!("client reading");
- assert_eq!(s.read(buf), Ok(1));
- assert_eq!(buf[0], 1);
- debug!("client writing");
- s.write([2]).unwrap();
- debug!("client dropping");
- });
-
- let mut s1 = acceptor.accept().unwrap();
- let s2 = s1.clone();
-
- let (tx1, rx1) = channel();
- let (tx2, rx2) = channel();
- spawn(proc() {
- let mut s2 = s2;
- rx1.recv();
- debug!("writer writing");
- s2.write([1]).unwrap();
- debug!("writer done");
- tx2.send(());
- });
- tx1.send(());
- let mut buf = [0, 0];
- debug!("reader reading");
- assert_eq!(s1.read(buf), Ok(1));
- debug!("reader done");
- rx2.recv();
- })
-
- iotest!(fn unix_clone_two_read() {
- let addr = next_test_unix();
- let mut acceptor = UnixListener::bind(&addr).listen();
- let (tx1, rx) = channel();
- let tx2 = tx1.clone();
-
- spawn(proc() {
- let mut s = UnixStream::connect(&addr);
- s.write([1]).unwrap();
- rx.recv();
- s.write([2]).unwrap();
- rx.recv();
- });
-
- let mut s1 = acceptor.accept().unwrap();
- let s2 = s1.clone();
-
- let (done, rx) = channel();
- spawn(proc() {
- let mut s2 = s2;
- let mut buf = [0, 0];
- s2.read(buf).unwrap();
- tx2.send(());
- done.send(());
- });
- let mut buf = [0, 0];
- s1.read(buf).unwrap();
- tx1.send(());
-
- rx.recv();
- })
-
- iotest!(fn unix_clone_two_write() {
- let addr = next_test_unix();
- let mut acceptor = UnixListener::bind(&addr).listen();
-
- spawn(proc() {
- let mut s = UnixStream::connect(&addr);
- let mut buf = [0, 1];
- s.read(buf).unwrap();
- s.read(buf).unwrap();
- });
-
- let mut s1 = acceptor.accept().unwrap();
- let s2 = s1.clone();
-
- let (tx, rx) = channel();
- spawn(proc() {
- let mut s2 = s2;
- s2.write([1]).unwrap();
- tx.send(());
- });
- s1.write([2]).unwrap();
-
- rx.recv();
- })
-
- iotest!(fn drop_removes_listener_path() {
- let path = next_test_unix();
- let l = UnixListener::bind(&path).unwrap();
- assert!(path.exists());
- drop(l);
- assert!(!path.exists());
- } #[cfg(not(windows))])
-
- iotest!(fn drop_removes_acceptor_path() {
- let path = next_test_unix();
- let l = UnixListener::bind(&path).unwrap();
- assert!(path.exists());
- drop(l.listen().unwrap());
- assert!(!path.exists());
- } #[cfg(not(windows))])
-
- iotest!(fn accept_timeout() {
- let addr = next_test_unix();
- let mut a = UnixListener::bind(&addr).unwrap().listen().unwrap();
-
- a.set_timeout(Some(10));
-
- // Make sure we time out once and future invocations also time out
- let err = a.accept().err().unwrap();
- assert_eq!(err.kind, TimedOut);
- let err = a.accept().err().unwrap();
- assert_eq!(err.kind, TimedOut);
-
- // Also make sure that even though the timeout is expired that we will
- // continue to receive any pending connections.
- let (tx, rx) = channel();
- let addr2 = addr.clone();
- spawn(proc() {
- tx.send(UnixStream::connect(&addr2).unwrap());
- });
- let l = rx.recv();
- for i in range(0u, 1001) {
- match a.accept() {
- Ok(..) => break,
- Err(ref e) if e.kind == TimedOut => {}
- Err(e) => fail!("error: {}", e),
- }
- ::task::deschedule();
- if i == 1000 { fail!("should have a pending connection") }
- }
- drop(l);
-
- // Unset the timeout and make sure that this always blocks.
- a.set_timeout(None);
- let addr2 = addr.clone();
- spawn(proc() {
- drop(UnixStream::connect(&addr2).unwrap());
- });
- a.accept().unwrap();
- })
-
- iotest!(fn connect_timeout_error() {
- let addr = next_test_unix();
- assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(100)).is_err());
- })
-
- iotest!(fn connect_timeout_success() {
- let addr = next_test_unix();
- let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
- assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(100)).is_ok());
- })
-
- iotest!(fn connect_timeout_zero() {
- let addr = next_test_unix();
- let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
- assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(0)).is_err());
- })
-
- iotest!(fn connect_timeout_negative() {
- let addr = next_test_unix();
- let _a = UnixListener::bind(&addr).unwrap().listen().unwrap();
- assert!(UnixStream::connect_timeout(&addr, Duration::milliseconds(-1)).is_err());
- })
-
- iotest!(fn close_readwrite_smoke() {
- let addr = next_test_unix();
- let a = UnixListener::bind(&addr).listen().unwrap();
- let (_tx, rx) = channel::<()>();
- spawn(proc() {
- let mut a = a;
- let _s = a.accept().unwrap();
- let _ = rx.recv_opt();
- });
-
- let mut b = [0];
- let mut s = UnixStream::connect(&addr).unwrap();
- let mut s2 = s.clone();
-
- // closing should prevent reads/writes
- s.close_write().unwrap();
- assert!(s.write([0]).is_err());
- s.close_read().unwrap();
- assert!(s.read(b).is_err());
-
- // closing should affect previous handles
- assert!(s2.write([0]).is_err());
- assert!(s2.read(b).is_err());
-
- // closing should affect new handles
- let mut s3 = s.clone();
- assert!(s3.write([0]).is_err());
- assert!(s3.read(b).is_err());
-
- // make sure these don't die
- let _ = s2.close_read();
- let _ = s2.close_write();
- let _ = s3.close_read();
- let _ = s3.close_write();
- })
-
- iotest!(fn close_read_wakes_up() {
- let addr = next_test_unix();
- let a = UnixListener::bind(&addr).listen().unwrap();
- let (_tx, rx) = channel::<()>();
- spawn(proc() {
- let mut a = a;
- let _s = a.accept().unwrap();
- let _ = rx.recv_opt();
- });
-
- let mut s = UnixStream::connect(&addr).unwrap();
- let s2 = s.clone();
- let (tx, rx) = channel();
- spawn(proc() {
- let mut s2 = s2;
- assert!(s2.read([0]).is_err());
- tx.send(());
- });
- // this should wake up the child task
- s.close_read().unwrap();
-
- // this test will never finish if the child doesn't wake up
- rx.recv();
- })
-
- iotest!(fn readwrite_timeouts() {
- let addr = next_test_unix();
- let mut a = UnixListener::bind(&addr).listen().unwrap();
- let (tx, rx) = channel::<()>();
- spawn(proc() {
- let mut s = UnixStream::connect(&addr).unwrap();
- rx.recv();
- assert!(s.write([0]).is_ok());
- let _ = rx.recv_opt();
- });
-
- let mut s = a.accept().unwrap();
- s.set_timeout(Some(20));
- assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
- assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
-
- s.set_timeout(Some(20));
- for i in range(0u, 1001) {
- match s.write([0, .. 128 * 1024]) {
- Ok(()) | Err(IoError { kind: ShortWrite(..), .. }) => {},
- Err(IoError { kind: TimedOut, .. }) => break,
- Err(e) => fail!("{}", e),
- }
- if i == 1000 { fail!("should have filled up?!"); }
- }
-
- // I'm not sure as to why, but apparently the write on windows always
- // succeeds after the previous timeout. Who knows?
- if !cfg!(windows) {
- assert_eq!(s.write([0]).err().unwrap().kind, TimedOut);
- }
-
- tx.send(());
- s.set_timeout(None);
- assert_eq!(s.read([0, 0]), Ok(1));
- })
-
- iotest!(fn read_timeouts() {
- let addr = next_test_unix();
- let mut a = UnixListener::bind(&addr).listen().unwrap();
- let (tx, rx) = channel::<()>();
- spawn(proc() {
- let mut s = UnixStream::connect(&addr).unwrap();
- rx.recv();
- let mut amt = 0;
- while amt < 100 * 128 * 1024 {
- match s.read([0, ..128 * 1024]) {
- Ok(n) => { amt += n; }
- Err(e) => fail!("{}", e),
- }
- }
- let _ = rx.recv_opt();
- });
-
- let mut s = a.accept().unwrap();
- s.set_read_timeout(Some(20));
- assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
- assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
-
- tx.send(());
- for _ in range(0u, 100) {
- assert!(s.write([0, ..128 * 1024]).is_ok());
- }
- })
-
- iotest!(fn write_timeouts() {
- let addr = next_test_unix();
- let mut a = UnixListener::bind(&addr).listen().unwrap();
- let (tx, rx) = channel::<()>();
- spawn(proc() {
- let mut s = UnixStream::connect(&addr).unwrap();
- rx.recv();
- assert!(s.write([0]).is_ok());
- let _ = rx.recv_opt();
- });
-
- let mut s = a.accept().unwrap();
- s.set_write_timeout(Some(20));
- for i in range(0u, 1001) {
- match s.write([0, .. 128 * 1024]) {
- Ok(()) | Err(IoError { kind: ShortWrite(..), .. }) => {},
- Err(IoError { kind: TimedOut, .. }) => break,
- Err(e) => fail!("{}", e),
- }
- if i == 1000 { fail!("should have filled up?!"); }
- }
-
- tx.send(());
- assert!(s.read([0]).is_ok());
- })
-
- iotest!(fn timeout_concurrent_read() {
- let addr = next_test_unix();
- let mut a = UnixListener::bind(&addr).listen().unwrap();
- let (tx, rx) = channel::<()>();
- spawn(proc() {
- let mut s = UnixStream::connect(&addr).unwrap();
- rx.recv();
- assert!(s.write([0]).is_ok());
- let _ = rx.recv_opt();
- });
-
- let mut s = a.accept().unwrap();
- let s2 = s.clone();
- let (tx2, rx2) = channel();
- spawn(proc() {
- let mut s2 = s2;
- assert!(s2.read([0]).is_ok());
- tx2.send(());
- });
-
- s.set_read_timeout(Some(20));
- assert_eq!(s.read([0]).err().unwrap().kind, TimedOut);
- tx.send(());
-
- rx2.recv();
- })
-
- #[cfg(not(windows))]
- iotest!(fn clone_accept_smoke() {
- let addr = next_test_unix();
- let l = UnixListener::bind(&addr);
- let mut a = l.listen().unwrap();
- let mut a2 = a.clone();
-
- let addr2 = addr.clone();
- spawn(proc() {
- let _ = UnixStream::connect(&addr2);
- });
- spawn(proc() {
- let _ = UnixStream::connect(&addr);
- });
-
- assert!(a.accept().is_ok());
- drop(a);
- assert!(a2.accept().is_ok());
- })
-
- iotest!(fn clone_accept_concurrent() {
- let addr = next_test_unix();
- let l = UnixListener::bind(&addr);
- let a = l.listen().unwrap();
- let a2 = a.clone();
-
- let (tx, rx) = channel();
- let tx2 = tx.clone();
-
- spawn(proc() { let mut a = a; tx.send(a.accept()) });
- spawn(proc() { let mut a = a2; tx2.send(a.accept()) });
-
- let addr2 = addr.clone();
- spawn(proc() {
- let _ = UnixStream::connect(&addr2);
- });
- spawn(proc() {
- let _ = UnixStream::connect(&addr);
- });
-
- assert!(rx.recv().is_ok());
- assert!(rx.recv().is_ok());
- })
-
- iotest!(fn close_accept_smoke() {
- let addr = next_test_unix();
- let l = UnixListener::bind(&addr);
- let mut a = l.listen().unwrap();
-
- a.close_accept().unwrap();
- assert_eq!(a.accept().err().unwrap().kind, EndOfFile);
- })
-
- iotest!(fn close_accept_concurrent() {
- let addr = next_test_unix();
- let l = UnixListener::bind(&addr);
- let a = l.listen().unwrap();
- let mut a2 = a.clone();
-
- let (tx, rx) = channel();
- spawn(proc() {
- let mut a = a;
- tx.send(a.accept());
- });
- a2.close_accept().unwrap();
-
- assert_eq!(rx.recv().err().unwrap().kind, EndOfFile);
- })
-}
use rt::rtio;
use c_str::CString;
use collections::HashMap;
+use hash::Hash;
+use clone::Clone;
+#[cfg(windows)]
+use std::hash::sip::SipState;
/// Signal a process to exit, without forcibly killing it. Corresponds to
/// SIGTERM on unix platforms.
pub extra_io: Vec<Option<io::PipeStream>>,
}
+/// A representation of environment variable name
+/// It compares case-insensitive on Windows and case-sensitive everywhere else.
+#[cfg(not(windows))]
+#[deriving(PartialEq, Eq, Hash, Clone, Show)]
+struct EnvKey(CString);
+
+#[doc(hidden)]
+#[cfg(windows)]
+#[deriving(Eq, Clone, Show)]
+struct EnvKey(CString);
+
+#[cfg(windows)]
+impl Hash for EnvKey {
+ fn hash(&self, state: &mut SipState) {
+ let &EnvKey(ref x) = self;
+ match x.as_str() {
+ Some(s) => for ch in s.chars() {
+ (ch as u8 as char).to_lowercase().hash(state);
+ },
+ None => x.hash(state)
+ }
+ }
+}
+
+#[cfg(windows)]
+impl PartialEq for EnvKey {
+ fn eq(&self, other: &EnvKey) -> bool {
+ let &EnvKey(ref x) = self;
+ let &EnvKey(ref y) = other;
+ match (x.as_str(), y.as_str()) {
+ (Some(xs), Some(ys)) => {
+ if xs.len() != ys.len() {
+ return false
+ } else {
+ for (xch, ych) in xs.chars().zip(ys.chars()) {
+ if xch.to_lowercase() != ych.to_lowercase() {
+ return false;
+ }
+ }
+ return true;
+ }
+ },
+ // If either is not a valid utf8 string, just compare them byte-wise
+ _ => return x.eq(y)
+ }
+ }
+}
+
/// A HashMap representation of environment variables.
-pub type EnvMap = HashMap<CString, CString>;
+pub type EnvMap = HashMap<EnvKey, CString>;
/// The `Command` type acts as a process builder, providing fine-grained control
/// over how a new process should be spawned. A default configuration can be
self
}
// Get a mutable borrow of the environment variable map for this `Command`.
- fn get_env_map<'a>(&'a mut self) -> &'a mut EnvMap {
+ fn get_env_map<'a>(&'a mut self) -> &'a mut EnvMap {
match self.env {
Some(ref mut map) => map,
None => {
// if the env is currently just inheriting from the parent's,
// materialize the parent's env into a hashtable.
- self.env = Some(os::env_as_bytes().move_iter()
- .map(|(k, v)| (k.as_slice().to_c_str(),
+ self.env = Some(os::env_as_bytes().into_iter()
+ .map(|(k, v)| (EnvKey(k.as_slice().to_c_str()),
v.as_slice().to_c_str()))
.collect());
self.env.as_mut().unwrap()
}
/// Inserts or updates an environment variable mapping.
+ ///
+ /// Note that environment variable names are case-insensitive (but case-preserving) on Windows,
+ /// and case-sensitive on all other platforms.
pub fn env<'a, T: ToCStr, U: ToCStr>(&'a mut self, key: T, val: U)
-> &'a mut Command {
- self.get_env_map().insert(key.to_c_str(), val.to_c_str());
+ self.get_env_map().insert(EnvKey(key.to_c_str()), val.to_c_str());
self
}
/// Removes an environment variable mapping.
pub fn env_remove<'a, T: ToCStr>(&'a mut self, key: T) -> &'a mut Command {
- self.get_env_map().remove(&key.to_c_str());
+ self.get_env_map().remove(&EnvKey(key.to_c_str()));
self
}
/// variable, the *rightmost* instance will determine the value.
pub fn env_set_all<'a, T: ToCStr, U: ToCStr>(&'a mut self, env: &[(T,U)])
-> &'a mut Command {
- self.env = Some(env.iter().map(|&(ref k, ref v)| (k.to_c_str(), v.to_c_str()))
+ self.env = Some(env.iter().map(|&(ref k, ref v)| (EnvKey(k.to_c_str()), v.to_c_str()))
.collect());
self
}
let env = match self.env {
None => None,
Some(ref env_map) =>
- Some(env_map.iter().collect::<Vec<_>>())
+ Some(env_map.iter()
+ .map(|(&EnvKey(ref key), val)| (key, val))
+ .collect::<Vec<_>>())
};
let cfg = ProcessConfig {
program: &self.program,
detach: self.detach,
};
io.spawn(cfg).map(|(p, io)| {
- let mut io = io.move_iter().map(|p| {
+ let mut io = io.into_iter().map(|p| {
p.map(|p| io::PipeStream::new(p))
});
Process {
assert!(cmd.status().unwrap().success());
assert!(fdes.inner_write("extra write\n".as_bytes()).is_ok());
})
+
+ #[test]
+ #[cfg(windows)]
+ fn env_map_keys_ci() {
+ use super::EnvKey;
+ let mut cmd = Command::new("");
+ cmd.env("path", "foo");
+ cmd.env("Path", "bar");
+ let env = &cmd.env.unwrap();
+ let val = env.find(&EnvKey("PATH".to_c_str()));
+ assert!(val.unwrap() == &"bar".to_c_str());
+ }
}
use io::net::ip::*;
use io::net::udp::*;
#[cfg(unix)]
- use io::net::unix::*;
+ use io::net::pipe::*;
use io::timer::*;
use io::process::*;
use rt::running_on_valgrind;
pub unsafe fn raise_fd_limit() {
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
- use ptr::mut_null;
+ use ptr::null_mut;
use mem::size_of_val;
use os::last_os_error;
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(&mut mib[0], 2, &mut maxfiles as *mut libc::c_int as *mut libc::c_void, &mut size,
- mut_null(), 0) != 0 {
+ null_mut(), 0) != 0 {
let err = last_os_error();
fail!("raise_fd_limit: error calling sysctl: {}", err);
}
}
let len = cmp::min(self.limit, buf.len());
- let res = self.inner.read(buf.mut_slice_to(len));
+ let res = self.inner.read(buf.slice_to_mut(len));
match res {
Ok(len) => self.limit -= len,
_ => {}
impl Writer for MultiWriter {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
- for writer in self.writers.mut_iter() {
+ for writer in self.writers.iter_mut() {
try!(writer.write(buf));
}
Ok(())
#[inline]
fn flush(&mut self) -> io::IoResult<()> {
- for writer in self.writers.mut_iter() {
+ for writer in self.writers.iter_mut() {
try!(writer.flush());
}
Ok(())
#[inline]
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
let mut len = 0;
- for (slot, elt) in buf.mut_iter().zip(self.iter.by_ref()) {
+ for (slot, elt) in buf.iter_mut().zip(self.iter.by_ref()) {
*slot = elt;
len += 1;
}
fn test_chained_reader() {
let rs = vec!(MemReader::new(vec!(0, 1)), MemReader::new(vec!()),
MemReader::new(vec!(2, 3)));
- let mut r = ChainedReader::new(rs.move_iter());
+ let mut r = ChainedReader::new(rs.into_iter());
assert_eq!(vec!(0, 1, 2, 3), r.read_to_end().unwrap());
}
// FIXME #7809: This shouldn't be pub, and it should be reexported under 'unstable'
// but name resolution doesn't work without it being pub.
-#[unstable]
pub mod rt;
mod failure;
#[macro_export]
macro_rules! local_data_key(
($name:ident: $ty:ty) => (
- static $name: ::std::local_data::Key<$ty> = &::std::local_data::Key;
+ static $name: ::std::local_data::Key<$ty> = &::std::local_data::KeyValueKey;
);
(pub $name:ident: $ty:ty) => (
- pub static $name: ::std::local_data::Key<$ty> = &::std::local_data::Key;
+ pub static $name: ::std::local_data::Key<$ty> = &::std::local_data::KeyValueKey;
);
)
/// # Example
///
/// ```rust
- /// let home: &'static str = env!("HOME");
- /// println!("the home directory at the time of compiling was: {}", home);
+ /// let path: &'static str = env!("PATH");
+ /// println!("the $PATH variable at the time of compiling was: {}", path);
/// ```
#[macro_export]
macro_rules! env( ($name:expr) => ({ /* compiler built-in */ }) )
/// }
/// ```
pub fn env() -> Vec<(String,String)> {
- env_as_bytes().move_iter().map(|(k,v)| {
+ env_as_bytes().into_iter().map(|(k,v)| {
let k = String::from_utf8_lossy(k.as_slice()).into_string();
let v = String::from_utf8_lossy(v.as_slice()).into_string();
(k,v)
-1 as c_int];
let mut sz: libc::size_t = 0;
let err = sysctl(mib.as_mut_ptr(), mib.len() as ::libc::c_uint,
- ptr::mut_null(), &mut sz, ptr::mut_null(),
+ ptr::null_mut(), &mut sz, ptr::null_mut(),
0u as libc::size_t);
if err != 0 { return None; }
if sz == 0 { return None; }
let mut v: Vec<u8> = Vec::with_capacity(sz as uint);
let err = sysctl(mib.as_mut_ptr(), mib.len() as ::libc::c_uint,
v.as_mut_ptr() as *mut c_void, &mut sz,
- ptr::mut_null(), 0u as libc::size_t);
+ ptr::null_mut(), 0u as libc::size_t);
if err != 0 { return None; }
if sz == 0 { return None; }
v.set_len(sz as uint - 1); // chop off trailing NUL
unsafe {
use libc::funcs::extra::_NSGetExecutablePath;
let mut sz: u32 = 0;
- _NSGetExecutablePath(ptr::mut_null(), &mut sz);
+ _NSGetExecutablePath(ptr::null_mut(), &mut sz);
if sz == 0 { return None; }
let mut v: Vec<u8> = Vec::with_capacity(sz as uint);
let err = _NSGetExecutablePath(v.as_mut_ptr() as *mut i8, &mut sz);
unsafe {
let res = FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
- ptr::mut_null(),
+ ptr::null_mut(),
errnum as DWORD,
langId,
buf.as_mut_ptr(),
#[cfg(not(windows))]
fn real_args() -> Vec<String> {
- real_args_as_bytes().move_iter()
+ real_args_as_bytes().into_iter()
.map(|v| {
String::from_utf8_lossy(v.as_slice()).into_string()
}).collect()
#[cfg(windows)]
fn real_args_as_bytes() -> Vec<Vec<u8>> {
- real_args().move_iter().map(|s| s.into_bytes()).collect()
+ real_args().into_iter().map(|s| s.into_bytes()).collect()
}
type LPCWSTR = *const u16;
pub fn new(min_len: uint, options: &[MapOption]) -> Result<MemoryMap, MapError> {
use libc::types::os::arch::extra::{LPVOID, DWORD, SIZE_T, HANDLE};
- let mut lpAddress: LPVOID = ptr::mut_null();
+ let mut lpAddress: LPVOID = ptr::null_mut();
let mut readable = false;
let mut writable = false;
let mut executable = false;
unsafe {
let hFile = libc::get_osfhandle(fd) as HANDLE;
let mapping = libc::CreateFileMappingW(hFile,
- ptr::mut_null(),
+ ptr::null_mut(),
flProtect,
0,
0,
ptr::null());
- if mapping == ptr::mut_null() {
+ if mapping == ptr::null_mut() {
return Err(ErrCreateFileMappingW(errno()));
}
if errno() as c_int == libc::ERROR_ALREADY_EXISTS {
let n = if is_abs { comps.len() } else { comps.len() - 1} +
comps.iter().map(|v| v.len()).sum();
let mut v = Vec::with_capacity(n);
- let mut it = comps.move_iter();
+ let mut it = comps.into_iter();
if !is_abs {
match it.next() {
None => (),
assert!(comps == exps, "components: Expected {:?}, found {:?}",
comps, exps);
let comps = path.components().rev().collect::<Vec<&[u8]>>();
- let exps = exps.move_iter().rev().collect::<Vec<&[u8]>>();
+ let exps = exps.into_iter().rev().collect::<Vec<&[u8]>>();
assert!(comps == exps, "rev_components: Expected {:?}, found {:?}",
comps, exps);
}
Some(_) => s.push_str(prefix_),
None => ()
}
- let mut it = comps.move_iter();
+ let mut it = comps.into_iter();
if !is_abs {
match it.next() {
None => (),
//! // where the car is. The game host will never open the door with the car.
//! fn game_host_open<R: Rng>(car: uint, choice: uint, rng: &mut R) -> uint {
//! let choices = free_doors(&[car, choice]);
-//! rand::sample(rng, choices.move_iter(), 1)[0]
+//! rand::sample(rng, choices.into_iter(), 1)[0]
//! }
//!
//! // Returns the door we switch to, given our current choice and
let bytes = path.as_vec();
if bytes.len() < LAST_FILENAME.len() {
let i = bytes.iter();
- for (slot, val) in LAST_FILENAME.mut_iter().zip(i) {
+ for (slot, val) in LAST_FILENAME.iter_mut().zip(i) {
*slot = *val as libc::c_char;
}
LAST_FILENAME.as_ptr()
None => ptr::null(),
};
STATE = backtrace_create_state(filename, 0, error_cb,
- ptr::mut_null());
+ ptr::null_mut());
return STATE
}
#[unsafe_destructor]
impl<T> Drop for TaskPool<T> {
fn drop(&mut self) {
- for channel in self.channels.mut_iter() {
+ for channel in self.channels.iter_mut() {
channel.send(Quit);
}
}
inner: UnsafeCell<Flavor<T>>,
receives: Cell<uint>,
// can't share in an arc
- marker: marker::NoSync,
+ _marker: marker::NoSync,
}
/// An iterator over messages on a receiver, this iterator will block
inner: UnsafeCell<Flavor<T>>,
sends: Cell<uint>,
// can't share in an arc
- marker: marker::NoSync,
+ _marker: marker::NoSync,
}
/// The sending-half of Rust's synchronous channel type. This half can only be
pub struct SyncSender<T> {
inner: Arc<UnsafeCell<sync::Packet<T>>>,
// can't share in an arc
- marker: marker::NoSync,
+ _marker: marker::NoSync,
}
/// This enumeration is the list of the possible reasons that try_recv could not
#[doc(hidden)]
trait UnsafeFlavor<T> {
fn inner_unsafe<'a>(&'a self) -> &'a UnsafeCell<Flavor<T>>;
- unsafe fn mut_inner<'a>(&'a self) -> &'a mut Flavor<T> {
+ unsafe fn inner_mut<'a>(&'a self) -> &'a mut Flavor<T> {
&mut *self.inner_unsafe().get()
}
unsafe fn inner<'a>(&'a self) -> &'a Flavor<T> {
Sender {
inner: UnsafeCell::new(inner),
sends: Cell::new(0),
- marker: marker::NoSync,
+ _marker: marker::NoSync,
}
}
unsafe {
let tmp = Sender::new(Stream(new_inner));
- mem::swap(self.mut_inner(), tmp.mut_inner());
+ mem::swap(self.inner_mut(), tmp.inner_mut());
}
return ret;
}
(*packet.get()).inherit_blocker(sleeper);
let tmp = Sender::new(Shared(packet.clone()));
- mem::swap(self.mut_inner(), tmp.mut_inner());
+ mem::swap(self.inner_mut(), tmp.inner_mut());
}
Sender::new(Shared(packet))
}
#[unsafe_destructor]
impl<T: Send> Drop for Sender<T> {
fn drop(&mut self) {
- match *unsafe { self.mut_inner() } {
+ match *unsafe { self.inner_mut() } {
Oneshot(ref mut p) => unsafe { (*p.get()).drop_chan(); },
Stream(ref mut p) => unsafe { (*p.get()).drop_chan(); },
Shared(ref mut p) => unsafe { (*p.get()).drop_chan(); },
impl<T: Send> SyncSender<T> {
fn new(inner: Arc<UnsafeCell<sync::Packet<T>>>) -> SyncSender<T> {
- SyncSender { inner: inner, marker: marker::NoSync }
+ SyncSender { inner: inner, _marker: marker::NoSync }
}
/// Sends a value on this synchronous channel.
impl<T: Send> Receiver<T> {
fn new(inner: Flavor<T>) -> Receiver<T> {
- Receiver { inner: UnsafeCell::new(inner), receives: Cell::new(0), marker: marker::NoSync }
+ Receiver { inner: UnsafeCell::new(inner), receives: Cell::new(0), _marker: marker::NoSync }
}
/// Blocks waiting for a value on this receiver
}
};
unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
}
}
}
Sync(ref p) => return unsafe { (*p.get()).recv() }
};
unsafe {
- mem::swap(self.mut_inner(), new_port.mut_inner());
+ mem::swap(self.inner_mut(), new_port.inner_mut());
}
}
}
}
};
unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
}
}
}
};
task = t;
unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
}
}
}
let new_port = match result { Ok(b) => return b, Err(p) => p };
was_upgrade = true;
unsafe {
- mem::swap(self.mut_inner(),
- new_port.mut_inner());
+ mem::swap(self.inner_mut(),
+ new_port.inner_mut());
}
}
}
#[unsafe_destructor]
impl<T: Send> Drop for Receiver<T> {
fn drop(&mut self) {
- match *unsafe { self.mut_inner() } {
+ match *unsafe { self.inner_mut() } {
Oneshot(ref mut p) => unsafe { (*p.get()).drop_port(); },
Stream(ref mut p) => unsafe { (*p.get()).drop_port(); },
Shared(ref mut p) => unsafe { (*p.get()).drop_port(); },
/// There may only be one worker per deque.
pub struct Worker<T> {
deque: Arc<Deque<T>>,
- noshare: marker::NoSync,
+ _noshare: marker::NoSync,
}
/// The stealing half of the work-stealing deque. Stealers have access to the
/// `steal` method.
pub struct Stealer<T> {
deque: Arc<Deque<T>>,
- noshare: marker::NoSync,
+ _noshare: marker::NoSync,
}
/// When stealing some data, this is an enumeration of the possible outcomes.
pub fn deque(&self) -> (Worker<T>, Stealer<T>) {
let a = Arc::new(Deque::new(self.clone()));
let b = a.clone();
- (Worker { deque: a, noshare: marker::NoSync },
- Stealer { deque: b, noshare: marker::NoSync })
+ (Worker { deque: a, _noshare: marker::NoSync },
+ Stealer { deque: b, _noshare: marker::NoSync })
}
fn alloc(&mut self, bits: uint) -> Box<Buffer<T>> {
impl<T: Send> Clone for Stealer<T> {
fn clone(&self) -> Stealer<T> {
- Stealer { deque: self.deque.clone(), noshare: marker::NoSync }
+ Stealer { deque: self.deque.clone(), _noshare: marker::NoSync }
}
}
}
}
- for thread in threads.move_iter() {
+ for thread in threads.into_iter() {
thread.join();
}
}
})
}).collect::<Vec<Thread<()>>>();
- for thread in threads.move_iter() {
+ for thread in threads.into_iter() {
thread.join();
}
}
DONE.store(true, SeqCst);
}
- for thread in threads.move_iter() {
+ for thread in threads.into_iter() {
thread.join();
}
unsafe { DONE.store(true, SeqCst); }
- for thread in threads.move_iter() {
+ for thread in threads.into_iter() {
thread.join();
}
}
}
// Wait for children to pass their asserts
- for r in children.mut_iter() {
+ for r in children.iter_mut() {
assert!(r.get_ref().is_ok());
}
assert_eq!(*lock, 42);
*lock = 31337;
// send to other readers
- for &(ref mut rc, _) in reader_convos.mut_iter() {
+ for &(ref mut rc, _) in reader_convos.iter_mut() {
rc.send(())
}
let lock = lock.downgrade();
// complete handshake with other readers
- for &(_, ref mut rp) in reader_convos.mut_iter() {
+ for &(_, ref mut rp) in reader_convos.iter_mut() {
rp.recv()
}
tx1.send(()); // tell writer to try again
});
}
- for rx in completion_rxs.mut_iter() {
+ for rx in completion_rxs.iter_mut() {
assert_eq!(nmsgs, rx.recv());
}
for _ in range(0, nthreads) {
}
// wait until all children get in the mutex
- for rx in rxs.mut_iter() { rx.recv(); }
+ for rx in rxs.iter_mut() { rx.recv(); }
{
let lock = m.lock();
let num_woken = lock.cond.broadcast();
assert_eq!(num_woken, num_waiters);
}
// wait until all children wake up
- for rx in rxs.mut_iter() { rx.recv(); }
+ for rx in rxs.iter_mut() { rx.recv(); }
}
#[test]
fn test_mutex_cond_broadcast() {
}
// wait until all children get in the mutex
- for rx in rxs.mut_iter() { let _ = rx.recv(); }
+ for rx in rxs.iter_mut() { let _ = rx.recv(); }
lock_cond(&x, |cond| {
let num_woken = cond.broadcast();
assert_eq!(num_woken, num_waiters);
});
// wait until all children wake up
- for rx in rxs.mut_iter() { let _ = rx.recv(); }
+ for rx in rxs.iter_mut() { let _ = rx.recv(); }
}
#[test]
fn test_rwlock_cond_broadcast() {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum TyParamBound {
TraitTyParamBound(TraitRef),
- UnboxedFnTyParamBound(UnboxedFnTy),
+ UnboxedFnTyParamBound(P<UnboxedFnBound>),
RegionTyParamBound(Lifetime)
}
pub type TyParamBounds = OwnedSlice<TyParamBound>;
+#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
+pub struct UnboxedFnBound {
+ pub path: Path,
+ pub decl: P<FnDecl>,
+ pub ref_id: NodeId,
+}
+
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct TyParam {
pub ident: Ident,
ExprField(P<Expr>, SpannedIdent, Vec<P<Ty>>),
ExprTupField(P<Expr>, Spanned<uint>, Vec<P<Ty>>),
ExprIndex(P<Expr>, P<Expr>),
+ ExprSlice(P<Expr>, Option<P<Expr>>, Option<P<Expr>>, Mutability),
/// Variable reference, possibly containing `::` and/or
/// type parameters, e.g. foo::bar::<baz>
ExprParen(P<Expr>)
}
+/// A "qualified path":
+///
+/// <Vec<T> as SomeTrait>::SomeAssociatedItem
+/// ^~~~~ ^~~~~~~~~ ^~~~~~~~~~~~~~~~~~
+/// for_type trait_name item_name
+#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
+pub struct QPath {
+ pub for_type: P<Ty>,
+ pub trait_name: Path,
+ pub item_name: Ident,
+}
+
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum CaptureClause {
CaptureByValue,
pub enum TraitItem {
RequiredMethod(TypeMethod),
ProvidedMethod(P<Method>),
+ TypeTraitItem(P<AssociatedType>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub enum ImplItem {
MethodImplItem(P<Method>),
+ TypeImplItem(P<Typedef>),
+}
+
+#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
+pub struct AssociatedType {
+ pub id: NodeId,
+ pub span: Span,
+ pub ident: Ident,
+ pub attrs: Vec<Attribute>,
+}
+
+#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
+pub struct Typedef {
+ pub id: NodeId,
+ pub span: Span,
+ pub ident: Ident,
+ pub vis: Visibility,
+ pub attrs: Vec<Attribute>,
+ pub typ: P<Ty>,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
TyUnboxedFn(P<UnboxedFnTy>),
TyTup(Vec<P<Ty>> ),
TyPath(Path, Option<TyParamBounds>, NodeId), // for #7264; see above
+ /// A "qualified path", e.g. `<Vec<T> as SomeTrait>::SomeType`
+ TyQPath(P<QPath>),
/// No-op; kept solely so that we can pretty-print faithfully
TyParen(P<Ty>),
TyTypeof(P<Expr>),
inner: Span {
lo: BytePos(11),
hi: BytePos(19),
- expn_info: None,
+ expn_id: NO_EXPANSION,
},
view_items: Vec::new(),
items: Vec::new(),
span: Span {
lo: BytePos(10),
hi: BytePos(20),
- expn_info: None,
+ expn_id: NO_EXPANSION,
},
exported_macros: Vec::new(),
};
ast_map::NodeImplItem(ii) => {
match *ii {
ast::MethodImplItem(ref m) => method(&**m),
+ ast::TypeImplItem(_) => {
+ fail!("impl method FnLikeNode that is not fn-like")
+ }
}
}
ast_map::NodeExpr(e) => match e.node {
use abi;
use ast::*;
use ast_util;
-use ast_util::PostExpansionMethod;
use codemap::{DUMMY_SP, Span, Spanned};
use fold::Folder;
use parse::token;
use print::pprust;
+use ptr::P;
use visit::{mod, Visitor};
use arena::TypedArena;
}
}
}
+ TypeImplItem(ref t) => PathName(t.ident.name),
}
},
NodeTraitItem(tm) => match *tm {
RequiredMethod(ref m) => PathName(m.ident.name),
- ProvidedMethod(ref m) => match m.node {
- MethDecl(ident, _, _, _, _, _, _, _) => {
- PathName(ident.name)
+ ProvidedMethod(ref m) => {
+ match m.node {
+ MethDecl(ident, _, _, _, _, _, _, _) => {
+ PathName(ident.name)
+ }
+ MethMac(_) => fail!("no path elem for {:?}", node),
}
- MethMac(_) => fail!("no path elem for {:?}", node),
}
+ TypeTraitItem(ref m) => PathName(m.ident.name),
},
NodeVariant(v) => PathName(v.node.name.name),
_ => fail!("no path elem for {:?}", node)
fn path_to_str_with_ident(&self, id: NodeId, i: Ident) -> String {
self.with_path(id, |path| {
- path_to_string(path.chain(Some(PathName(i.name)).move_iter()))
+ path_to_string(path.chain(Some(PathName(i.name)).into_iter()))
})
}
NodeForeignItem(fi) => Some(fi.attrs.as_slice()),
NodeTraitItem(ref tm) => match **tm {
RequiredMethod(ref type_m) => Some(type_m.attrs.as_slice()),
- ProvidedMethod(ref m) => Some(m.attrs.as_slice())
+ ProvidedMethod(ref m) => Some(m.attrs.as_slice()),
+ TypeTraitItem(ref typ) => Some(typ.attrs.as_slice()),
},
NodeImplItem(ref ii) => {
match **ii {
MethodImplItem(ref m) => Some(m.attrs.as_slice()),
+ TypeImplItem(ref t) => Some(t.attrs.as_slice()),
}
}
NodeVariant(ref v) => Some(v.node.attrs.as_slice()),
match *trait_method {
RequiredMethod(ref type_method) => type_method.span,
ProvidedMethod(ref method) => method.span,
+ TypeTraitItem(ref typedef) => typedef.span,
}
}
Some(NodeImplItem(ref impl_item)) => {
match **impl_item {
MethodImplItem(ref method) => method.span,
+ TypeImplItem(ref typedef) => typedef.span,
}
}
Some(NodeVariant(variant)) => variant.span,
match *self {
RequiredMethod(ref tm) => tm.ident.name,
ProvidedMethod(ref m) => m.name(),
+ TypeTraitItem(ref at) => at.ident.name,
}
}
}
fn name(&self) -> Name {
match *self {
MethodImplItem(ref m) => m.name(),
+ TypeImplItem(ref td) => td.ident.name,
}
}
}
match i.node {
ItemImpl(_, _, _, ref impl_items) => {
for impl_item in impl_items.iter() {
- let id = match *impl_item {
- MethodImplItem(ref m) => m.id
- };
- self.insert(id, NodeImplItem(impl_item));
+ match *impl_item {
+ MethodImplItem(ref m) => {
+ self.insert(m.id, NodeImplItem(impl_item));
+ }
+ TypeImplItem(ref t) => {
+ self.insert(t.id, NodeImplItem(impl_item));
+ }
+ }
}
}
ItemEnum(ref enum_definition, _) => {
None => {}
}
}
- ItemTrait(_, _, _, ref methods) => {
- for tm in methods.iter() {
- let id = match *tm {
- RequiredMethod(ref m) => m.id,
- ProvidedMethod(ref m) => m.id
- };
- self.insert(id, NodeTraitItem(tm));
+ ItemTrait(_, _, ref bounds, ref trait_items) => {
+ for b in bounds.iter() {
+ match *b {
+ TraitTyParamBound(ref t) => {
+ self.insert(t.ref_id, NodeItem(i));
+ }
+ _ => {}
+ }
+ }
+
+ for tm in trait_items.iter() {
+ match *tm {
+ RequiredMethod(ref m) => {
+ self.insert(m.id, NodeTraitItem(tm));
+ }
+ ProvidedMethod(ref m) => {
+ self.insert(m.id, NodeTraitItem(tm));
+ }
+ TypeTraitItem(ref typ) => {
+ self.insert(typ.id, NodeTraitItem(tm));
+ }
+ }
}
}
_ => {}
IITraitItem(fld.fold_ops.new_def_id(d),
RequiredMethod(fld.fold_type_method(ty_m)))
}
+ TypeTraitItem(at) => {
+ IITraitItem(
+ fld.fold_ops.new_def_id(d),
+ TypeTraitItem(P(fld.fold_associated_type((*at).clone()))))
+ }
},
IIImplItem(d, m) => match m {
MethodImplItem(m) => {
MethodImplItem(fld.fold_method(m)
.expect_one("expected one method")))
}
+ TypeImplItem(t) => {
+ IIImplItem(fld.fold_ops.new_def_id(d),
+ TypeImplItem(P(fld.fold_typedef((*t).clone()))))
+ }
},
IIForeign(i) => IIForeign(fld.fold_foreign_item(i))
};
IITraitItem(_, ref trait_item) => {
let trait_item_id = match *trait_item {
ProvidedMethod(ref m) => m.id,
- RequiredMethod(ref m) => m.id
+ RequiredMethod(ref m) => m.id,
+ TypeTraitItem(ref ty) => ty.id,
};
collector.insert(trait_item_id, NodeTraitItem(trait_item));
}
IIImplItem(_, ref impl_item) => {
let impl_item_id = match *impl_item {
- MethodImplItem(ref m) => m.id
+ MethodImplItem(ref m) => m.id,
+ TypeImplItem(ref ti) => ti.id,
};
collector.insert(impl_item_id, NodeImplItem(impl_item));
pprust::mac_to_string(mac), id)
}
}
+ TypeImplItem(ref t) => {
+ format!("typedef {} in {} (id={})",
+ token::get_ident(t.ident),
+ map.path_to_string(id),
+ id)
+ }
}
}
- Some(NodeTraitItem(ref ti)) => {
- let ident = match **ti {
- ProvidedMethod(ref m) => m.pe_ident(),
- RequiredMethod(ref m) => m.ident
- };
- format!("method {} in {} (id={})",
- token::get_ident(ident),
- map.path_to_string(id), id)
+ Some(NodeTraitItem(ref tm)) => {
+ match **tm {
+ RequiredMethod(_) | ProvidedMethod(_) => {
+ let m = ast_util::trait_item_to_ty_method(&**tm);
+ format!("method {} in {} (id={})",
+ token::get_ident(m.ident),
+ map.path_to_string(id),
+ id)
+ }
+ TypeTraitItem(ref t) => {
+ format!("type item {} in {} (id={})",
+ token::get_ident(t.ident),
+ map.path_to_string(id),
+ id)
+ }
+ }
}
Some(NodeVariant(ref variant)) => {
format!("variant {} in {} (id={})",
token::gensym_ident(pretty.as_slice())
}
+pub fn trait_method_to_ty_method(method: &Method) -> TypeMethod {
+ match method.node {
+ MethDecl(ident,
+ ref generics,
+ abi,
+ ref explicit_self,
+ fn_style,
+ ref decl,
+ _,
+ vis) => {
+ TypeMethod {
+ ident: ident,
+ attrs: method.attrs.clone(),
+ fn_style: fn_style,
+ decl: (*decl).clone(),
+ generics: generics.clone(),
+ explicit_self: (*explicit_self).clone(),
+ id: method.id,
+ span: method.span,
+ vis: vis,
+ abi: abi,
+ }
+ },
+ MethMac(_) => fail!("expected non-macro method declaration")
+ }
+}
+
+/// extract a TypeMethod from a TraitItem. if the TraitItem is
+/// a default, pull out the useful fields to make a TypeMethod
+//
+// NB: to be used only after expansion is complete, and macros are gone.
+pub fn trait_item_to_ty_method(method: &TraitItem) -> TypeMethod {
+ match *method {
+ RequiredMethod(ref m) => (*m).clone(),
+ ProvidedMethod(ref m) => trait_method_to_ty_method(&**m),
+ TypeTraitItem(_) => {
+ fail!("trait_method_to_ty_method(): expected method but found \
+ typedef")
+ }
+ }
+}
+
+pub fn split_trait_methods(trait_methods: &[TraitItem])
+ -> (Vec<TypeMethod>, Vec<P<Method>> ) {
+ let mut reqd = Vec::new();
+ let mut provd = Vec::new();
+ for trt_method in trait_methods.iter() {
+ match *trt_method {
+ RequiredMethod(ref tm) => reqd.push((*tm).clone()),
+ ProvidedMethod(ref m) => provd.push((*m).clone()),
+ TypeTraitItem(_) => {}
+ }
+ };
+ (reqd, provd)
+}
+
pub fn struct_field_visibility(field: ast::StructField) -> Visibility {
match field.node.kind {
ast::NamedField(_, v) | ast::UnnamedField(v) => v
match *tm {
ast::RequiredMethod(ref m) => self.operation.visit_id(m.id),
ast::ProvidedMethod(ref m) => self.operation.visit_id(m.id),
+ ast::TypeTraitItem(ref typ) => self.operation.visit_id(typ.id),
}
visit::walk_trait_item(self, tm);
}
pub fn sort_meta_items(items: Vec<P<MetaItem>>) -> Vec<P<MetaItem>> {
// This is sort of stupid here, but we need to sort by
// human-readable strings.
- let mut v = items.move_iter()
+ let mut v = items.into_iter()
.map(|mi| (mi.name(), mi))
.collect::<Vec<(InternedString, P<MetaItem>)>>();
v.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b));
// There doesn't seem to be a more optimal way to do this
- v.move_iter().map(|(_, m)| m.map(|Spanned {node, span}| {
+ v.into_iter().map(|(_, m)| m.map(|Spanned {node, span}| {
Spanned {
node: match node {
MetaList(n, mis) => MetaList(n, sort_meta_items(mis)),
use serialize::{Encodable, Decodable, Encoder, Decoder};
use std::cell::RefCell;
-use std::gc::Gc;
use std::rc::Rc;
pub trait Pos {
pub hi: BytePos,
/// Information about where the macro came from, if this piece of
/// code was created by a macro expansion.
- pub expn_info: Option<Gc<ExpnInfo>>
+ pub expn_id: ExpnId
}
-pub static DUMMY_SP: Span = Span { lo: BytePos(0), hi: BytePos(0), expn_info: None };
+pub static DUMMY_SP: Span = Span { lo: BytePos(0), hi: BytePos(0), expn_id: NO_EXPANSION };
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash, Show)]
pub struct Spanned<T> {
/* assuming that we're not in macro expansion */
pub fn mk_sp(lo: BytePos, hi: BytePos) -> Span {
- Span {lo: lo, hi: hi, expn_info: None}
+ Span {lo: lo, hi: hi, expn_id: NO_EXPANSION}
}
/// Return the span itself if it doesn't come from a macro expansion,
/// otherwise return the call site span up to the `enclosing_sp` by
/// following the `expn_info` chain.
-pub fn original_sp(sp: Span, enclosing_sp: Span) -> Span {
- match (sp.expn_info, enclosing_sp.expn_info) {
+pub fn original_sp(cm: &CodeMap, sp: Span, enclosing_sp: Span) -> Span {
+ let call_site1 = cm.with_expn_info(sp.expn_id, |ei| ei.map(|ei| ei.call_site));
+ let call_site2 = cm.with_expn_info(enclosing_sp.expn_id, |ei| ei.map(|ei| ei.call_site));
+ match (call_site1, call_site2) {
(None, _) => sp,
- (Some(expn1), Some(expn2)) if expn1.call_site == expn2.call_site => sp,
- (Some(expn1), _) => original_sp(expn1.call_site, enclosing_sp),
+ (Some(call_site1), Some(call_site2)) if call_site1 == call_site2 => sp,
+ (Some(call_site1), _) => original_sp(cm, call_site1, enclosing_sp),
}
}
pub callee: NameAndSpan
}
+#[deriving(PartialEq, Eq, Clone, Show, Hash)]
+pub struct ExpnId(u32);
+
+pub static NO_EXPANSION: ExpnId = ExpnId(-1);
+
pub type FileName = String;
pub struct FileLines {
}
pub struct CodeMap {
- pub files: RefCell<Vec<Rc<FileMap>>>
+ pub files: RefCell<Vec<Rc<FileMap>>>,
+ expansions: RefCell<Vec<ExpnInfo>>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
+ expansions: RefCell::new(Vec::new()),
}
}
col: chpos - linechpos
}
}
+
+ pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
+ let mut expansions = self.expansions.borrow_mut();
+ expansions.push(expn_info);
+ ExpnId(expansions.len().to_u32().expect("too many ExpnInfo's!") - 1)
+ }
+
+ pub fn with_expn_info<T>(&self, id: ExpnId, f: |Option<&ExpnInfo>| -> T) -> T {
+ match id {
+ NO_EXPANSION => f(None),
+ ExpnId(i) => f(Some(&(*self.expansions.borrow())[i as uint]))
+ }
+ }
}
#[cfg(test)]
fn t7() {
// Test span_to_lines for a span ending at the end of filemap
let cm = init_code_map();
- let span = Span {lo: BytePos(12), hi: BytePos(23), expn_info: None};
+ let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let file_lines = cm.span_to_lines(span);
assert_eq!(file_lines.file.name, "blork.rs".to_string());
fn t8() {
// Test span_to_snippet for a span ending at the end of filemap
let cm = init_code_map();
- let span = Span {lo: BytePos(12), hi: BytePos(23), expn_info: None};
+ let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let snippet = cm.span_to_snippet(span);
assert_eq!(snippet, Some("second line".to_string()));
fn t9() {
// Test span_to_str for a span ending at the end of filemap
let cm = init_code_map();
- let span = Span {lo: BytePos(12), hi: BytePos(23), expn_info: None};
+ let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let sstr = cm.span_to_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12".to_string());
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use fold::Folder;
+use {ast, fold, attr};
+use codemap::Spanned;
+use ptr::P;
+
+/// A folder that strips out items that do not belong in the current
+/// configuration.
+struct Context<'a> {
+ in_cfg: |attrs: &[ast::Attribute]|: 'a -> bool,
+}
+
+// Support conditional compilation by transforming the AST, stripping out
+// any items that do not belong in the current configuration
+pub fn strip_unconfigured_items(krate: ast::Crate) -> ast::Crate {
+ let config = krate.config.clone();
+ strip_items(krate, |attrs| in_cfg(config.as_slice(), attrs))
+}
+
+impl<'a> fold::Folder for Context<'a> {
+ fn fold_mod(&mut self, module: ast::Mod) -> ast::Mod {
+ fold_mod(self, module)
+ }
+ fn fold_block(&mut self, block: P<ast::Block>) -> P<ast::Block> {
+ fold_block(self, block)
+ }
+ fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
+ fold_foreign_mod(self, foreign_mod)
+ }
+ fn fold_item_underscore(&mut self, item: ast::Item_) -> ast::Item_ {
+ fold_item_underscore(self, item)
+ }
+ fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
+ fold_expr(self, expr)
+ }
+ fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
+ fold::noop_fold_mac(mac, self)
+ }
+}
+
+pub fn strip_items(krate: ast::Crate,
+ in_cfg: |attrs: &[ast::Attribute]| -> bool)
+ -> ast::Crate {
+ let mut ctxt = Context {
+ in_cfg: in_cfg,
+ };
+ ctxt.fold_crate(krate)
+}
+
+fn filter_view_item(cx: &mut Context, view_item: ast::ViewItem) -> Option<ast::ViewItem> {
+ if view_item_in_cfg(cx, &view_item) {
+ Some(view_item)
+ } else {
+ None
+ }
+}
+
+fn fold_mod(cx: &mut Context, ast::Mod {inner, view_items, items}: ast::Mod) -> ast::Mod {
+ ast::Mod {
+ inner: inner,
+ view_items: view_items.into_iter().filter_map(|a| {
+ filter_view_item(cx, a).map(|x| cx.fold_view_item(x))
+ }).collect(),
+ items: items.into_iter().filter_map(|a| {
+ if item_in_cfg(cx, &*a) {
+ Some(cx.fold_item(a))
+ } else {
+ None
+ }
+ }).flat_map(|x| x.into_iter()).collect()
+ }
+}
+
+fn filter_foreign_item(cx: &mut Context, item: P<ast::ForeignItem>)
+ -> Option<P<ast::ForeignItem>> {
+ if foreign_item_in_cfg(cx, &*item) {
+ Some(item)
+ } else {
+ None
+ }
+}
+
+fn fold_foreign_mod(cx: &mut Context, ast::ForeignMod {abi, view_items, items}: ast::ForeignMod)
+ -> ast::ForeignMod {
+ ast::ForeignMod {
+ abi: abi,
+ view_items: view_items.into_iter().filter_map(|a| {
+ filter_view_item(cx, a).map(|x| cx.fold_view_item(x))
+ }).collect(),
+ items: items.into_iter()
+ .filter_map(|a| filter_foreign_item(cx, a))
+ .collect()
+ }
+}
+
+fn fold_item_underscore(cx: &mut Context, item: ast::Item_) -> ast::Item_ {
+ let item = match item {
+ ast::ItemImpl(a, b, c, impl_items) => {
+ let impl_items = impl_items.into_iter()
+ .filter(|ii| impl_item_in_cfg(cx, ii))
+ .collect();
+ ast::ItemImpl(a, b, c, impl_items)
+ }
+ ast::ItemTrait(a, b, c, methods) => {
+ let methods = methods.into_iter()
+ .filter(|m| trait_method_in_cfg(cx, m))
+ .collect();
+ ast::ItemTrait(a, b, c, methods)
+ }
+ ast::ItemStruct(def, generics) => {
+ ast::ItemStruct(fold_struct(cx, def), generics)
+ }
+ ast::ItemEnum(def, generics) => {
+ let mut variants = def.variants.into_iter().filter_map(|v| {
+ if !(cx.in_cfg)(v.node.attrs.as_slice()) {
+ None
+ } else {
+ Some(v.map(|Spanned {node: ast::Variant_ {id, name, attrs, kind,
+ disr_expr, vis}, span}| {
+ Spanned {
+ node: ast::Variant_ {
+ id: id,
+ name: name,
+ attrs: attrs,
+ kind: match kind {
+ ast::TupleVariantKind(..) => kind,
+ ast::StructVariantKind(def) => {
+ ast::StructVariantKind(fold_struct(cx, def))
+ }
+ },
+ disr_expr: disr_expr,
+ vis: vis
+ },
+ span: span
+ }
+ }))
+ }
+ });
+ ast::ItemEnum(ast::EnumDef {
+ variants: variants.collect(),
+ }, generics)
+ }
+ item => item,
+ };
+
+ fold::noop_fold_item_underscore(item, cx)
+}
+
+fn fold_struct(cx: &mut Context, def: P<ast::StructDef>) -> P<ast::StructDef> {
+ def.map(|ast::StructDef {fields, ctor_id, super_struct, is_virtual}| {
+ ast::StructDef {
+ fields: fields.into_iter().filter(|m| {
+ (cx.in_cfg)(m.node.attrs.as_slice())
+ }).collect(),
+ ctor_id: ctor_id,
+ super_struct: super_struct,
+ is_virtual: is_virtual,
+ }
+ })
+}
+
+fn retain_stmt(cx: &mut Context, stmt: &ast::Stmt) -> bool {
+ match stmt.node {
+ ast::StmtDecl(ref decl, _) => {
+ match decl.node {
+ ast::DeclItem(ref item) => {
+ item_in_cfg(cx, &**item)
+ }
+ _ => true
+ }
+ }
+ _ => true
+ }
+}
+
+fn fold_block(cx: &mut Context, b: P<ast::Block>) -> P<ast::Block> {
+ b.map(|ast::Block {id, view_items, stmts, expr, rules, span}| {
+ let resulting_stmts: Vec<P<ast::Stmt>> =
+ stmts.into_iter().filter(|a| retain_stmt(cx, &**a)).collect();
+ let resulting_stmts = resulting_stmts.into_iter()
+ .flat_map(|stmt| cx.fold_stmt(stmt).into_iter())
+ .collect();
+ let filtered_view_items = view_items.into_iter().filter_map(|a| {
+ filter_view_item(cx, a).map(|x| cx.fold_view_item(x))
+ }).collect();
+ ast::Block {
+ id: id,
+ view_items: filtered_view_items,
+ stmts: resulting_stmts,
+ expr: expr.map(|x| cx.fold_expr(x)),
+ rules: rules,
+ span: span,
+ }
+ })
+}
+
+fn fold_expr(cx: &mut Context, expr: P<ast::Expr>) -> P<ast::Expr> {
+ expr.map(|ast::Expr {id, span, node}| {
+ fold::noop_fold_expr(ast::Expr {
+ id: id,
+ node: match node {
+ ast::ExprMatch(m, arms) => {
+ ast::ExprMatch(m, arms.into_iter()
+ .filter(|a| (cx.in_cfg)(a.attrs.as_slice()))
+ .collect())
+ }
+ _ => node
+ },
+ span: span
+ }, cx)
+ })
+}
+
+fn item_in_cfg(cx: &mut Context, item: &ast::Item) -> bool {
+ return (cx.in_cfg)(item.attrs.as_slice());
+}
+
+fn foreign_item_in_cfg(cx: &mut Context, item: &ast::ForeignItem) -> bool {
+ return (cx.in_cfg)(item.attrs.as_slice());
+}
+
+fn view_item_in_cfg(cx: &mut Context, item: &ast::ViewItem) -> bool {
+ return (cx.in_cfg)(item.attrs.as_slice());
+}
+
+fn trait_method_in_cfg(cx: &mut Context, meth: &ast::TraitItem) -> bool {
+ match *meth {
+ ast::RequiredMethod(ref meth) => (cx.in_cfg)(meth.attrs.as_slice()),
+ ast::ProvidedMethod(ref meth) => (cx.in_cfg)(meth.attrs.as_slice()),
+ ast::TypeTraitItem(ref typ) => (cx.in_cfg)(typ.attrs.as_slice()),
+ }
+}
+
+fn impl_item_in_cfg(cx: &mut Context, impl_item: &ast::ImplItem) -> bool {
+ match *impl_item {
+ ast::MethodImplItem(ref meth) => (cx.in_cfg)(meth.attrs.as_slice()),
+ ast::TypeImplItem(ref typ) => (cx.in_cfg)(typ.attrs.as_slice()),
+ }
+}
+
+// Determine if an item should be translated in the current crate
+// configuration based on the item's attributes
+fn in_cfg(cfg: &[P<ast::MetaItem>], attrs: &[ast::Attribute]) -> bool {
+ attr::test_cfg(cfg, attrs.iter())
+}
+
// we want to tell compiletest/runtest to look at the last line of the
// span (since `custom_highlight_lines` displays an arrow to the end of
// the span)
- let span_end = Span { lo: sp.hi, hi: sp.hi, expn_info: sp.expn_info};
+ let span_end = Span { lo: sp.hi, hi: sp.hi, expn_id: sp.expn_id};
let ses = cm.span_to_string(span_end);
try!(print_diagnostic(dst, ses.as_slice(), lvl, msg, code));
if rsp.is_full_span() {
cm: &codemap::CodeMap,
sp: Span)
-> io::IoResult<()> {
- for ei in sp.expn_info.iter() {
- let ss = ei.callee
- .span
- .as_ref()
- .map_or("".to_string(), |span| cm.span_to_string(*span));
- let (pre, post) = match ei.callee.format {
- codemap::MacroAttribute => ("#[", "]"),
- codemap::MacroBang => ("", "!")
- };
- try!(print_diagnostic(w, ss.as_slice(), Note,
- format!("in expansion of {}{}{}", pre,
- ei.callee.name,
- post).as_slice(), None));
- let ss = cm.span_to_string(ei.call_site);
- try!(print_diagnostic(w, ss.as_slice(), Note, "expansion site", None));
- try!(print_macro_backtrace(w, cm, ei.call_site));
- }
- Ok(())
+ let cs = try!(cm.with_expn_info(sp.expn_id, |expn_info| match expn_info {
+ Some(ei) => {
+ let ss = ei.callee.span.map_or(String::new(), |span| cm.span_to_string(span));
+ let (pre, post) = match ei.callee.format {
+ codemap::MacroAttribute => ("#[", "]"),
+ codemap::MacroBang => ("", "!")
+ };
+ try!(print_diagnostic(w, ss.as_slice(), Note,
+ format!("in expansion of {}{}{}", pre,
+ ei.callee.name,
+ post).as_slice(), None));
+ let ss = cm.span_to_string(ei.call_site);
+ try!(print_diagnostic(w, ss.as_slice(), Note, "expansion site", None));
+ Ok(Some(ei.call_site))
+ }
+ None => Ok(None)
+ }));
+ cs.map_or(Ok(()), |call_site| print_macro_backtrace(w, cm, call_site))
}
pub fn expect<T>(diag: &SpanHandler, opt: Option<T>, msg: || -> String) -> T {
use ast;
use ast::{Ident, Name, TokenTree};
use codemap::Span;
-use ext::base::{ExtCtxt, MacExpr, MacItem, MacResult};
+use ext::base::{ExtCtxt, MacExpr, MacResult, MacItems};
use ext::build::AstBuilder;
use parse::token;
use ptr::P;
let sym = Ident::new(token::gensym((
"__register_diagnostic_".to_string() + token::get_ident(*code).get()
).as_slice()));
- MacItem::new(quote_item!(ecx, mod $sym {}).unwrap())
+ MacItems::new(vec![quote_item!(ecx, mod $sym {}).unwrap()].into_iter())
}
pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt,
(descriptions.len(), ecx.expr_vec(span, descriptions))
})
});
- MacItem::new(quote_item!(ecx,
+ MacItems::new(vec![quote_item!(ecx,
pub static $name: [(&'static str, &'static str), ..$count] = $expr;
- ).unwrap())
+ ).unwrap()].into_iter())
}
use ast;
use ast::Name;
use codemap;
-use codemap::{CodeMap, Span, ExpnInfo};
+use codemap::{CodeMap, Span, ExpnId, ExpnInfo, NO_EXPANSION};
use ext;
use ext::expand;
use parse;
use fold::Folder;
use std::collections::HashMap;
-use std::gc::{Gc, GC};
use std::rc::Rc;
// new-style macro! tt code:
Some(self.p)
}
}
-/// A convenience type for macros that return a single item.
-pub struct MacItem {
- i: P<ast::Item>
+/// A type for macros that return multiple items.
+pub struct MacItems {
+ items: SmallVector<P<ast::Item>>
}
-impl MacItem {
- pub fn new(i: P<ast::Item>) -> Box<MacResult+'static> {
- box MacItem { i: i } as Box<MacResult+'static>
+
+impl MacItems {
+ pub fn new<I: Iterator<P<ast::Item>>>(mut it: I) -> Box<MacResult+'static> {
+ box MacItems { items: it.collect() } as Box<MacResult+'static>
}
}
-impl MacResult for MacItem {
- fn make_items(self: Box<MacItem>) -> Option<SmallVector<P<ast::Item>>> {
- Some(SmallVector::one(self.i))
- }
- fn make_stmt(self: Box<MacItem>) -> Option<P<ast::Stmt>> {
- Some(P(codemap::respan(
- self.i.span,
- ast::StmtDecl(
- P(codemap::respan(self.i.span, ast::DeclItem(self.i))),
- ast::DUMMY_NODE_ID))))
+
+impl MacResult for MacItems {
+ fn make_items(self: Box<MacItems>) -> Option<SmallVector<P<ast::Item>>> {
+ Some(self.items)
}
}
/// based upon it.
///
/// `#[deriving(...)]` is an `ItemDecorator`.
- ItemDecorator(Box<ItemDecorator + 'static>),
+ Decorator(Box<ItemDecorator + 'static>),
/// A syntax extension that is attached to an item and modifies it
/// in-place.
- ItemModifier(Box<ItemModifier + 'static>),
+ Modifier(Box<ItemModifier + 'static>),
/// A normal, function-like syntax extension.
///
builtin_normal_expander(
ext::log_syntax::expand_syntax_ext));
syntax_expanders.insert(intern("deriving"),
- ItemDecorator(box ext::deriving::expand_meta_deriving));
+ Decorator(box ext::deriving::expand_meta_deriving));
// Quasi-quoting expanders
syntax_expanders.insert(intern("quote_tokens"),
pub struct ExtCtxt<'a> {
pub parse_sess: &'a parse::ParseSess,
pub cfg: ast::CrateConfig,
- pub backtrace: Option<Gc<ExpnInfo>>,
+ pub backtrace: ExpnId,
pub ecfg: expand::ExpansionConfig,
pub mod_path: Vec<ast::Ident> ,
ExtCtxt {
parse_sess: parse_sess,
cfg: cfg,
- backtrace: None,
+ backtrace: NO_EXPANSION,
mod_path: Vec::new(),
ecfg: ecfg,
trace_mac: false,
pub fn parse_sess(&self) -> &'a parse::ParseSess { self.parse_sess }
pub fn cfg(&self) -> ast::CrateConfig { self.cfg.clone() }
pub fn call_site(&self) -> Span {
- match self.backtrace {
+ self.codemap().with_expn_info(self.backtrace, |ei| match ei {
Some(expn_info) => expn_info.call_site,
None => self.bug("missing top span")
- }
+ })
}
pub fn print_backtrace(&self) { }
- pub fn backtrace(&self) -> Option<Gc<ExpnInfo>> { self.backtrace }
+ pub fn backtrace(&self) -> ExpnId { self.backtrace }
+ pub fn original_span(&self) -> Span {
+ let mut expn_id = self.backtrace;
+ let mut call_site = None;
+ loop {
+ match self.codemap().with_expn_info(expn_id, |ei| ei.map(|ei| ei.call_site)) {
+ None => break,
+ Some(cs) => {
+ call_site = Some(cs);
+ expn_id = cs.expn_id;
+ }
+ }
+ }
+ call_site.expect("missing expansion backtrace")
+ }
+ pub fn original_span_in_file(&self) -> Span {
+ let mut expn_id = self.backtrace;
+ let mut call_site = None;
+ loop {
+ let expn_info = self.codemap().with_expn_info(expn_id, |ei| {
+ ei.map(|ei| (ei.call_site, ei.callee.name.as_slice() == "include"))
+ });
+ match expn_info {
+ None => break,
+ Some((cs, is_include)) => {
+ if is_include {
+ // Don't recurse into file using "include!".
+ break;
+ }
+ call_site = Some(cs);
+ expn_id = cs.expn_id;
+ }
+ }
+ }
+ call_site.expect("missing expansion backtrace")
+ }
+
pub fn mod_push(&mut self, i: ast::Ident) { self.mod_path.push(i); }
pub fn mod_pop(&mut self) { self.mod_path.pop().unwrap(); }
pub fn mod_path(&self) -> Vec<ast::Ident> {
v.extend(self.mod_path.iter().map(|a| *a));
return v;
}
- pub fn bt_push(&mut self, ei: codemap::ExpnInfo) {
- match ei {
- ExpnInfo {call_site: cs, callee: ref callee} => {
- self.backtrace =
- Some(box(GC) ExpnInfo {
- call_site: Span {lo: cs.lo, hi: cs.hi,
- expn_info: self.backtrace.clone()},
- callee: (*callee).clone()
- });
- }
- }
+ pub fn bt_push(&mut self, ei: ExpnInfo) {
+ let mut call_site = ei.call_site;
+ call_site.expn_id = self.backtrace;
+ self.backtrace = self.codemap().record_expansion(ExpnInfo {
+ call_site: call_site,
+ callee: ei.callee
+ });
}
pub fn bt_pop(&mut self) {
match self.backtrace {
- Some(expn_info) => self.backtrace = expn_info.call_site.expn_info,
- _ => self.bug("tried to pop without a push")
+ NO_EXPANSION => self.bug("tried to pop without a push"),
+ expn_id => {
+ self.backtrace = self.codemap().with_expn_info(expn_id, |expn_info| {
+ expn_info.map_or(NO_EXPANSION, |ei| ei.call_site.expn_id)
+ });
+ }
}
}
/// Emit `msg` attached to `sp`, and stop compilation immediately.
}
fn find_escape_frame<'a>(&'a mut self) -> &'a mut MapChainFrame {
- for (i, frame) in self.chain.mut_iter().enumerate().rev() {
+ for (i, frame) in self.chain.iter_mut().enumerate().rev() {
if !frame.info.macros_escape || i == 0 {
return frame
}
types: Vec<P<ast::Ty>> )
-> ast::Path {
let last_identifier = idents.pop().unwrap();
- let mut segments: Vec<ast::PathSegment> = idents.move_iter()
+ let mut segments: Vec<ast::PathSegment> = idents.into_iter()
.map(|ident| {
ast::PathSegment {
identifier: ident,
let field_span = Span {
lo: sp.lo - Pos::from_uint(field_name.get().len()),
hi: sp.hi,
- expn_info: sp.expn_info,
+ expn_id: sp.expn_id,
};
let id = Spanned { node: ident, span: field_span };
let field_span = Span {
lo: sp.lo - Pos::from_uint(idx.to_string().len()),
hi: sp.hi,
- expn_info: sp.expn_info,
+ expn_id: sp.expn_id,
};
let id = Spanned { node: idx, span: field_span };
}
fn variant(&self, span: Span, name: Ident, tys: Vec<P<ast::Ty>> ) -> ast::Variant {
- let args = tys.move_iter().map(|ty| {
+ let args = tys.into_iter().map(|ty| {
ast::VariantArg { ty: ty, id: ast::DUMMY_NODE_ID }
}).collect();
None => return base::DummyResult::expr(sp)
};
let mut accumulator = String::new();
- for e in es.move_iter() {
+ for e in es.into_iter() {
match e.node {
ast::ExprLit(ref lit) => {
match lit.node {
cs_same_method(|cx, span, exprs| {
// create `a.<method>(); b.<method>(); c.<method>(); ...`
// (where method is `assert_receiver_is_total_eq`)
- let stmts = exprs.move_iter().map(|e| cx.stmt_expr(e)).collect();
+ let stmts = exprs.into_iter().map(|e| cx.stmt_expr(e)).collect();
let block = cx.block(span, stmts, None);
cx.expr_block(block)
},
//! ~~~
use std::cell::RefCell;
-use std::gc::GC;
use std::vec;
use abi::Abi;
ast::ItemImpl(trait_generics,
opt_trait_ref,
self_type,
- methods.move_iter()
+ methods.into_iter()
.map(|method| {
ast::MethodImplItem(method)
}).collect()))
_ => Some(ast::Arg::new_self(trait_.span, ast::MutImmutable, special_idents::self_))
};
let args = {
- let args = arg_types.move_iter().map(|(name, ty)| {
+ let args = arg_types.into_iter().map(|(name, ty)| {
cx.arg(trait_.span, name, ty)
});
- self_arg.move_iter().chain(args).collect()
+ self_arg.into_iter().chain(args).collect()
};
let ret_type = self.get_ret_ty(cx, trait_, generics, type_ident);
// transpose raw_fields
let fields = if raw_fields.len() > 0 {
- let mut raw_fields = raw_fields.move_iter().map(|v| v.move_iter());
+ let mut raw_fields = raw_fields.into_iter().map(|v| v.into_iter());
let first_field = raw_fields.next().unwrap();
let mut other_fields: Vec<vec::MoveItems<(Span, Option<Ident>, P<Expr>)>>
= raw_fields.collect();
span: span,
name: opt_id,
self_: field,
- other: other_fields.mut_iter().map(|l| {
+ other: other_fields.iter_mut().map(|l| {
match l.next().unwrap() {
(_, _, ex) => ex
}
// The transposition is driven by walking across the
// arg fields of the variant for the first self pat.
- let field_tuples = first_self_pat_idents.move_iter().enumerate()
+ let field_tuples = first_self_pat_idents.into_iter().enumerate()
// For each arg field of self, pull out its getter expr ...
.map(|(field_index, (sp, opt_ident, self_getter_expr))| {
// ... but FieldInfo also wants getter expr
None => cx.span_bug(self.span, "trait with empty path in generic `deriving`"),
Some(name) => *name
};
- to_set.expn_info = Some(box(GC) codemap::ExpnInfo {
+ to_set.expn_id = cx.codemap().record_expansion(codemap::ExpnInfo {
call_site: to_set,
callee: codemap::NameAndSpan {
name: format!("deriving({})", trait_name),
// struct_type is definitely not Unknown, since struct_def.fields
// must be nonempty to reach here
let pattern = if struct_type == Record {
- let field_pats = subpats.move_iter().zip(ident_expr.iter()).map(|(pat, &(_, id, _))| {
+ let field_pats = subpats.into_iter().zip(ident_expr.iter()).map(|(pat, &(_, id, _))| {
// id is guaranteed to be Some
ast::FieldPat { ident: id.unwrap(), pat: pat }
}).collect();
cs_same_method(
|cx, span, vals| {
if use_foldl {
- vals.move_iter().fold(base.clone(), |old, new| {
+ vals.into_iter().fold(base.clone(), |old, new| {
f(cx, span, old, new)
})
} else {
- vals.move_iter().rev().fold(base.clone(), |old, new| {
+ vals.into_iter().rev().fold(base.clone(), |old, new| {
f(cx, span, old, new)
})
}
return DummyResult::expr(sp);
}
None => return DummyResult::expr(sp),
- Some(exprs) => exprs.move_iter()
+ Some(exprs) => exprs.into_iter()
};
let var = match expr_to_string(cx,
use visit;
use visit::Visitor;
-use std::gc::Gc;
-
enum Either<L,R> {
Left(L),
Right(R)
// be the root of the call stack. That's the most
// relevant span and it's the actual invocation of
// the macro.
- let mac_span = original_span(fld.cx);
+ let mac_span = fld.cx.original_span();
let opt_parsed = {
let expanded = expandfun.expand(fld.cx,
- mac_span.call_site,
+ mac_span,
marked_before.as_slice());
parse_thunk(expanded)
};
match fld.cx.syntax_env.find(&intern(mname.get())) {
Some(rc) => match *rc {
- ItemDecorator(ref dec) => {
+ Decorator(ref dec) => {
attr::mark_used(attr);
fld.cx.bt_push(ExpnInfo {
let mut items: SmallVector<P<ast::Item>> = SmallVector::zero();
dec.expand(fld.cx, attr.span, &*attr.node.value, &*it,
|item| items.push(item));
- decorator_items.extend(items.move_iter()
- .flat_map(|item| expand_item(item, fld).move_iter()));
+ decorator_items.extend(items.into_iter()
+ .flat_map(|item| expand_item(item, fld).into_iter()));
fld.cx.bt_pop();
}
// partition the attributes into ItemModifiers and others
let (modifiers, other_attrs) = it.attrs.partitioned(|attr| {
match fld.cx.syntax_env.find(&intern(attr.name().get())) {
- Some(rc) => match *rc { ItemModifier(_) => true, _ => false },
+ Some(rc) => match *rc { Modifier(_) => true, _ => false },
_ => false
}
});
match fld.cx.syntax_env.find(&intern(mname.get())) {
Some(rc) => match *rc {
- ItemModifier(ref mac) => {
+ Modifier(ref mac) => {
attr::mark_used(attr);
fld.cx.bt_push(ExpnInfo {
call_site: attr.span,
SmallVector::zero()
}
Right(Some(items)) => {
- items.move_iter()
+ items.into_iter()
.map(|i| mark_item(i, fm))
- .flat_map(|i| fld.fold_item(i).move_iter())
+ .flat_map(|i| fld.fold_item(i).into_iter())
.collect()
}
Right(None) => {
fld.cx.bt_pop();
if semi {
- fully_expanded.move_iter().map(|s| s.map(|Spanned {node, span}| {
+ fully_expanded.into_iter().map(|s| s.map(|Spanned {node, span}| {
Spanned {
node: match node {
StmtExpr(e, stmt_id) => StmtSemi(e, stmt_id),
// all of the pats must have the same set of bindings, so use the
// first one to extract them and generate new names:
let idents = pattern_bindings(&**expanded_pats.get(0));
- let new_renames = idents.move_iter().map(|id| (id, fresh_name(&id))).collect();
+ let new_renames = idents.into_iter().map(|id| (id, fresh_name(&id))).collect();
// apply the renaming, but only to the PatIdents:
let mut rename_pats_fld = PatIdentRenamer{renames:&new_renames};
let rewritten_pats = expanded_pats.move_map(|pat| rename_pats_fld.fold_pat(pat));
// expand the elements of a block.
pub fn expand_block_elts(b: P<Block>, fld: &mut MacroExpander) -> P<Block> {
b.map(|Block {id, view_items, stmts, expr, rules, span}| {
- let new_view_items = view_items.move_iter().map(|x| fld.fold_view_item(x)).collect();
- let new_stmts = stmts.move_iter().flat_map(|x| {
+ let new_view_items = view_items.into_iter().map(|x| fld.fold_view_item(x)).collect();
+ let new_stmts = stmts.into_iter().flat_map(|x| {
// perform all pending renames
let renamed_stmt = {
let pending_renames = &mut fld.cx.syntax_env.info().pending_renames;
rename_fld.fold_stmt(x).expect_one("rename_fold didn't return one value")
};
// expand macros in the statement
- fld.fold_stmt(renamed_stmt).move_iter()
+ fld.fold_stmt(renamed_stmt).into_iter()
}).collect();
let new_expr = expr.map(|x| {
let expr = {
let fm = fresh_mark();
let marked_before = mark_tts(tts.as_slice(), fm);
- let mac_span = original_span(fld.cx);
+ let mac_span = fld.cx.original_span();
let expanded = match expander.expand(fld.cx,
- mac_span.call_site,
+ mac_span,
marked_before.as_slice()).make_pat() {
Some(e) => e,
None => {
};
// expand again if necessary
- new_methods.move_iter().flat_map(|m| fld.fold_method(m).move_iter()).collect()
+ new_methods.into_iter().flat_map(|m| fld.fold_method(m).into_iter()).collect()
}
})
}
Span {
lo: sp.lo,
hi: sp.hi,
- expn_info: cx.backtrace(),
+ expn_id: cx.backtrace(),
}
}
cx: &mut cx,
};
- for ExportedMacros { crate_name, macros } in imported_macros.move_iter() {
+ for ExportedMacros { crate_name, macros } in imported_macros.into_iter() {
let name = format!("<{} macros>", token::get_ident(crate_name))
.into_string();
- for source in macros.move_iter() {
+ for source in macros.into_iter() {
let item = parse::parse_item_from_source_str(name.clone(),
source,
expander.cx.cfg(),
}
}
- for (name, extension) in user_exts.move_iter() {
+ for (name, extension) in user_exts.into_iter() {
expander.cx.syntax_env.insert(name, extension);
}
.expect_one("marking an item didn't return exactly one method")
}
-fn original_span(cx: &ExtCtxt) -> Gc<codemap::ExpnInfo> {
- let mut relevant_info = cx.backtrace();
- let mut einfo = relevant_info.unwrap();
- loop {
- match relevant_info {
- None => { break }
- Some(e) => {
- einfo = e;
- relevant_info = einfo.call_site.expn_info;
- }
- }
- }
- return einfo;
-}
-
/// Check that there are no macro invocations left in the AST:
pub fn check_for_macros(sess: &parse::ParseSess, krate: &ast::Crate) {
visit::walk_crate(&mut MacroExterminator{sess:sess}, krate);
use ptr::P;
use std::collections::HashMap;
+use std::string;
#[deriving(PartialEq)]
enum ArgumentType {
- Known(String),
+ Known(string::String),
Unsigned,
String,
}
enum Position {
Exact(uint),
- Named(String),
+ Named(string::String),
}
struct Context<'a, 'b:'a> {
/// Note that we keep a side-array of the ordering of the named arguments
/// found to be sure that we can translate them in the same order that they
/// were declared in.
- names: HashMap<String, P<ast::Expr>>,
- name_types: HashMap<String, ArgumentType>,
- name_ordering: Vec<String>,
+ names: HashMap<string::String, P<ast::Expr>>,
+ name_types: HashMap<string::String, ArgumentType>,
+ name_ordering: Vec<string::String>,
/// The latest consecutive literal strings, or empty if there weren't any.
- literal: String,
+ literal: string::String,
/// Collection of the compiled `rt::Argument` structures
pieces: Vec<P<ast::Expr>>,
/// Stays `true` if all formatting parameters are default (as in "{}{}").
all_pieces_simple: bool,
- name_positions: HashMap<String, uint>,
+ name_positions: HashMap<string::String, uint>,
method_statics: Vec<P<ast::Item>>,
/// Updated as arguments are consumed or methods are entered
/// named arguments))
fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool,
tts: &[ast::TokenTree])
- -> (Invocation, Option<(P<ast::Expr>, Vec<P<ast::Expr>>, Vec<String>,
- HashMap<String, P<ast::Expr>>)>) {
+ -> (Invocation, Option<(P<ast::Expr>, Vec<P<ast::Expr>>, Vec<string::String>,
+ HashMap<string::String, P<ast::Expr>>)>) {
let mut args = Vec::new();
- let mut names = HashMap::<String, P<ast::Expr>>::new();
+ let mut names = HashMap::<string::String, P<ast::Expr>>::new();
let mut order = Vec::new();
let mut p = ecx.new_parser_from_tts(tts);
fn verify_piece(&mut self, p: &parse::Piece) {
match *p {
parse::String(..) => {}
- parse::Argument(ref arg) => {
+ parse::NextArgument(ref arg) => {
// width/precision first, if they have implicit positional
// parameters it makes more sense to consume them first.
self.verify_count(arg.format.width);
}
}
- fn describe_num_args(&self) -> String {
+ fn describe_num_args(&self) -> string::String {
match self.args.len() {
0 => "no arguments given".to_string(),
1 => "there is 1 argument".to_string(),
self.literal.push_str(s);
None
}
- parse::Argument(ref arg) => {
+ parse::NextArgument(ref arg) => {
// Translate the position
let pos = match arg.position {
// These two have a direct mapping
let mut heads = Vec::new();
// First, declare all of our methods that are statics
- for method in self.method_statics.move_iter() {
+ for method in self.method_statics.into_iter() {
let decl = respan(self.fmtsp, ast::DeclItem(method));
lets.push(P(respan(self.fmtsp,
ast::StmtDecl(P(decl), ast::DUMMY_NODE_ID))));
// format! string are shoved into locals. Furthermore, we shove the address
// of each variable because we don't want to move out of the arguments
// passed to this function.
- for (i, e) in self.args.move_iter().enumerate() {
+ for (i, e) in self.args.into_iter().enumerate() {
let arg_ty = match self.arg_types.get(i).as_ref() {
Some(ty) => ty,
None => continue // error already generated
// Now create a vector containing all the arguments
let slicename = self.ecx.ident_of("__args_vec");
{
- let args = names.move_iter().map(|a| a.unwrap());
- let mut args = locals.move_iter().chain(args);
+ let args = names.into_iter().map(|a| a.unwrap());
+ let mut args = locals.into_iter().chain(args);
let args = self.ecx.expr_vec_slice(self.fmtsp, args.collect());
lets.push(self.ecx.stmt_let(self.fmtsp, false, slicename, args));
}
invocation: Invocation,
efmt: P<ast::Expr>,
args: Vec<P<ast::Expr>>,
- name_ordering: Vec<String>,
- names: HashMap<String, P<ast::Expr>>)
+ name_ordering: Vec<string::String>,
+ names: HashMap<string::String, P<ast::Expr>>)
-> P<ast::Expr> {
let arg_types = Vec::from_fn(args.len(), |_| None);
let mut cx = Context {
name_ordering: name_ordering,
nest_level: 0,
next_arg: 0,
- literal: String::new(),
+ literal: string::String::new(),
pieces: Vec::new(),
str_pieces: Vec::new(),
all_pieces_simple: true,
impl<T: ToTokens> ToTokens for Vec<T> {
fn to_tokens(&self, cx: &ExtCtxt) -> Vec<TokenTree> {
- let a = self.iter().flat_map(|t| t.to_tokens(cx).move_iter());
+ let a = self.iter().flat_map(|t| t.to_tokens(cx).into_iter());
FromIterator::from_iter(a)
}
}
use ast;
use codemap;
use codemap::{Pos, Span};
-use codemap::{ExpnInfo, NameAndSpan};
use ext::base::*;
use ext::base;
use ext::build::AstBuilder;
use parse::token;
use print::pprust;
-use std::gc::Gc;
use std::io::File;
use std::rc::Rc;
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "line!");
- let topmost = topmost_expn_info(cx.backtrace().unwrap());
- let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
+ let topmost = cx.original_span_in_file();
+ let loc = cx.codemap().lookup_char_pos(topmost.lo);
- base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.line))
+ base::MacExpr::new(cx.expr_uint(topmost, loc.line))
}
/* col!(): expands to the current column number */
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "col!");
- let topmost = topmost_expn_info(cx.backtrace().unwrap());
- let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
- base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.col.to_uint()))
+ let topmost = cx.original_span_in_file();
+ let loc = cx.codemap().lookup_char_pos(topmost.lo);
+ base::MacExpr::new(cx.expr_uint(topmost, loc.col.to_uint()))
}
/// file!(): expands to the current filename */
-> Box<base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "file!");
- let topmost = topmost_expn_info(cx.backtrace().unwrap());
- let loc = cx.codemap().lookup_char_pos(topmost.call_site.lo);
+ let topmost = cx.original_span_in_file();
+ let loc = cx.codemap().lookup_char_pos(topmost.lo);
let filename = token::intern_and_get_ident(loc.file.name.as_slice());
- base::MacExpr::new(cx.expr_str(topmost.call_site, filename))
+ base::MacExpr::new(cx.expr_str(topmost, filename))
}
pub fn expand_stringify(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
}
}
-// recur along an ExpnInfo chain to find the original expression
-fn topmost_expn_info(expn_info: Gc<codemap::ExpnInfo>) -> Gc<codemap::ExpnInfo> {
- match *expn_info {
- ExpnInfo { call_site: ref call_site, .. } => {
- match call_site.expn_info {
- Some(next_expn_info) => {
- match *next_expn_info {
- ExpnInfo {
- callee: NameAndSpan { name: ref name, .. },
- ..
- } => {
- // Don't recurse into file using "include!"
- if "include" == name.as_slice() {
- expn_info
- } else {
- topmost_expn_info(next_expn_info)
- }
- }
- }
- },
- None => expn_info
- }
- }
- }
-}
-
// resolve a file-system path to an absolute file-system path (if it
// isn't already)
fn res_rel_file(cx: &mut ExtCtxt, sp: codemap::Span, arg: &Path) -> Path {
if token_name_eq(&tok, &EOF) {
if eof_eis.len() == 1u {
let mut v = Vec::new();
- for dv in eof_eis.get_mut(0).matches.mut_iter() {
+ for dv in eof_eis.get_mut(0).matches.iter_mut() {
v.push(dv.pop().unwrap());
}
return Success(nameize(sess, ms, v.as_slice()));
let mut parser = self.parser.borrow_mut();
match parser.token {
EOF => break,
- _ => ret.push(parser.parse_method(None))
+ _ => {
+ let attrs = parser.parse_outer_attributes();
+ ret.push(parser.parse_method(attrs, ast::Inherited))
+ }
}
}
self.ensure_complete_parse(false);
/* done with this set; pop or repeat? */
if should_pop {
let prev = r.stack.pop().unwrap();
- match r.stack.mut_last() {
+ match r.stack.last_mut() {
None => {
r.cur_tok = EOF;
return ret_val;
r.repeat_len.pop();
}
} else { /* repeat */
- *r.repeat_idx.mut_last().unwrap() += 1u;
- r.stack.mut_last().unwrap().idx = 0;
+ *r.repeat_idx.last_mut().unwrap() += 1u;
+ r.stack.last_mut().unwrap().idx = 0;
match r.stack.last().unwrap().sep.clone() {
Some(tk) => {
r.cur_tok = tk; /* repeat same span, I guess */
TTTok(sp, tok) => {
r.cur_span = sp;
r.cur_tok = tok;
- r.stack.mut_last().unwrap().idx += 1;
+ r.stack.last_mut().unwrap().idx += 1;
return ret_val;
}
TTSeq(sp, tts, sep, zerok) => {
"this must repeat at least once");
}
- r.stack.mut_last().unwrap().idx += 1;
+ r.stack.last_mut().unwrap().idx += 1;
return tt_next_token(r);
}
r.repeat_len.push(len);
}
// FIXME #2887: think about span stuff here
TTNonterminal(sp, ident) => {
- r.stack.mut_last().unwrap().idx += 1;
+ r.stack.last_mut().unwrap().idx += 1;
match *lookup_cur_matched(r, ident) {
/* sidestep the interpolation tricks for ident because
(a) idents can be in lots of places, so it'd be a pain
--- /dev/null
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Feature gating
+//!
+//! This modules implements the gating necessary for preventing certain compiler
+//! features from being used by default. This module will crawl a pre-expanded
+//! AST to ensure that there are no features which are used that are not
+//! enabled.
+//!
+//! Features are enabled in programs via the crate-level attributes of
+//! `#![feature(...)]` with a comma-separated list of features.
+
+use abi::RustIntrinsic;
+use ast::NodeId;
+use ast;
+use attr;
+use attr::AttrMetaMethods;
+use codemap::Span;
+use diagnostic::SpanHandler;
+use visit;
+use visit::Visitor;
+use parse::token;
+
+use std::slice;
+
+/// This is a list of all known features since the beginning of time. This list
+/// can never shrink, it may only be expanded (in order to prevent old programs
+/// from failing to compile). The status of each feature may change, however.
+static KNOWN_FEATURES: &'static [(&'static str, Status)] = &[
+ ("globs", Active),
+ ("macro_rules", Active),
+ ("struct_variant", Active),
+ ("once_fns", Active),
+ ("asm", Active),
+ ("managed_boxes", Active),
+ ("non_ascii_idents", Active),
+ ("thread_local", Active),
+ ("link_args", Active),
+ ("phase", Active),
+ ("plugin_registrar", Active),
+ ("log_syntax", Active),
+ ("trace_macros", Active),
+ ("concat_idents", Active),
+ ("unsafe_destructor", Active),
+ ("intrinsics", Active),
+ ("lang_items", Active),
+
+ ("simd", Active),
+ ("default_type_params", Active),
+ ("quote", Active),
+ ("linkage", Active),
+ ("struct_inherit", Active),
+ ("overloaded_calls", Active),
+ ("unboxed_closure_sugar", Active),
+
+ ("quad_precision_float", Removed),
+
+ ("rustc_diagnostic_macros", Active),
+ ("unboxed_closures", Active),
+ ("import_shadowing", Active),
+ ("advanced_slice_patterns", Active),
+ ("tuple_indexing", Active),
+ ("associated_types", Active),
+
+ // if you change this list without updating src/doc/rust.md, cmr will be sad
+
+ // A temporary feature gate used to enable parser extensions needed
+ // to bootstrap fix for #5723.
+ ("issue_5723_bootstrap", Accepted),
+
+ // These are used to test this portion of the compiler, they don't actually
+ // mean anything
+ ("test_accepted_feature", Accepted),
+ ("test_removed_feature", Removed),
+];
+
+enum Status {
+ /// Represents an active feature that is currently being implemented or
+ /// currently being considered for addition/removal.
+ Active,
+
+ /// Represents a feature which has since been removed (it was once Active)
+ Removed,
+
+ /// This language feature has since been Accepted (it was once Active)
+ Accepted,
+}
+
+/// A set of features to be used by later passes.
+pub struct Features {
+ pub default_type_params: bool,
+ pub overloaded_calls: bool,
+ pub rustc_diagnostic_macros: bool,
+ pub import_shadowing: bool,
+}
+
+impl Features {
+ pub fn new() -> Features {
+ Features {
+ default_type_params: false,
+ overloaded_calls: false,
+ rustc_diagnostic_macros: false,
+ import_shadowing: false,
+ }
+ }
+}
+
+struct Context<'a> {
+ features: Vec<&'static str>,
+ span_handler: &'a SpanHandler,
+}
+
+impl<'a> Context<'a> {
+ fn gate_feature(&self, feature: &str, span: Span, explain: &str) {
+ if !self.has_feature(feature) {
+ self.span_handler.span_err(span, explain);
+ self.span_handler.span_note(span, format!("add #![feature({})] to the \
+ crate attributes to enable",
+ feature).as_slice());
+ }
+ }
+
+ fn gate_box(&self, span: Span) {
+ self.gate_feature("managed_boxes", span,
+ "The managed box syntax is being replaced by the \
+ `std::gc::Gc` and `std::rc::Rc` types. Equivalent \
+ functionality to managed trait objects will be \
+ implemented but is currently missing.");
+ }
+
+ fn has_feature(&self, feature: &str) -> bool {
+ self.features.iter().any(|n| n.as_slice() == feature)
+ }
+}
+
+impl<'a, 'v> Visitor<'v> for Context<'a> {
+ fn visit_ident(&mut self, sp: Span, id: ast::Ident) {
+ if !token::get_ident(id).get().is_ascii() {
+ self.gate_feature("non_ascii_idents", sp,
+ "non-ascii idents are not fully supported.");
+ }
+ }
+
+ fn visit_view_item(&mut self, i: &ast::ViewItem) {
+ match i.node {
+ ast::ViewItemUse(ref path) => {
+ match path.node {
+ ast::ViewPathGlob(..) => {
+ self.gate_feature("globs", path.span,
+ "glob import statements are \
+ experimental and possibly buggy");
+ }
+ _ => {}
+ }
+ }
+ ast::ViewItemExternCrate(..) => {
+ for attr in i.attrs.iter() {
+ if attr.name().get() == "phase"{
+ self.gate_feature("phase", attr.span,
+ "compile time crate loading is \
+ experimental and possibly buggy");
+ }
+ }
+ }
+ }
+ visit::walk_view_item(self, i)
+ }
+
+ fn visit_item(&mut self, i: &ast::Item) {
+ for attr in i.attrs.iter() {
+ if attr.name().equiv(&("thread_local")) {
+ self.gate_feature("thread_local", i.span,
+ "`#[thread_local]` is an experimental feature, and does not \
+ currently handle destructors. There is no corresponding \
+ `#[task_local]` mapping to the task model");
+ }
+ }
+ match i.node {
+ ast::ItemEnum(ref def, _) => {
+ for variant in def.variants.iter() {
+ match variant.node.kind {
+ ast::StructVariantKind(..) => {
+ self.gate_feature("struct_variant", variant.span,
+ "enum struct variants are \
+ experimental and possibly buggy");
+ }
+ _ => {}
+ }
+ }
+ }
+
+ ast::ItemForeignMod(ref foreign_module) => {
+ if attr::contains_name(i.attrs.as_slice(), "link_args") {
+ self.gate_feature("link_args", i.span,
+ "the `link_args` attribute is not portable \
+ across platforms, it is recommended to \
+ use `#[link(name = \"foo\")]` instead")
+ }
+ if foreign_module.abi == RustIntrinsic {
+ self.gate_feature("intrinsics",
+ i.span,
+ "intrinsics are subject to change")
+ }
+ }
+
+ ast::ItemFn(..) => {
+ if attr::contains_name(i.attrs.as_slice(), "plugin_registrar") {
+ self.gate_feature("plugin_registrar", i.span,
+ "compiler plugins are experimental and possibly buggy");
+ }
+ }
+
+ ast::ItemStruct(ref struct_definition, _) => {
+ if attr::contains_name(i.attrs.as_slice(), "simd") {
+ self.gate_feature("simd", i.span,
+ "SIMD types are experimental and possibly buggy");
+ }
+ match struct_definition.super_struct {
+ Some(ref path) => self.gate_feature("struct_inherit", path.span,
+ "struct inheritance is experimental \
+ and possibly buggy"),
+ None => {}
+ }
+ if struct_definition.is_virtual {
+ self.gate_feature("struct_inherit", i.span,
+ "struct inheritance (`virtual` keyword) is \
+ experimental and possibly buggy");
+ }
+ }
+
+ ast::ItemImpl(_, _, _, ref items) => {
+ if attr::contains_name(i.attrs.as_slice(),
+ "unsafe_destructor") {
+ self.gate_feature("unsafe_destructor",
+ i.span,
+ "`#[unsafe_destructor]` allows too \
+ many unsafe patterns and may be \
+ removed in the future");
+ }
+
+ for item in items.iter() {
+ match *item {
+ ast::MethodImplItem(_) => {}
+ ast::TypeImplItem(ref typedef) => {
+ self.gate_feature("associated_types",
+ typedef.span,
+ "associated types are \
+ experimental")
+ }
+ }
+ }
+ }
+
+ _ => {}
+ }
+
+ visit::walk_item(self, i);
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) {
+ match *trait_item {
+ ast::RequiredMethod(_) | ast::ProvidedMethod(_) => {}
+ ast::TypeTraitItem(ref ti) => {
+ self.gate_feature("associated_types",
+ ti.span,
+ "associated types are experimental")
+ }
+ }
+ }
+
+ fn visit_mac(&mut self, macro: &ast::Mac) {
+ let ast::MacInvocTT(ref path, _, _) = macro.node;
+ let id = path.segments.last().unwrap().identifier;
+ let quotes = ["quote_tokens", "quote_expr", "quote_ty",
+ "quote_item", "quote_pat", "quote_stmt"];
+ let msg = " is not stable enough for use and are subject to change";
+
+
+ if id == token::str_to_ident("macro_rules") {
+ self.gate_feature("macro_rules", path.span, "macro definitions are \
+ not stable enough for use and are subject to change");
+ }
+
+ else if id == token::str_to_ident("asm") {
+ self.gate_feature("asm", path.span, "inline assembly is not \
+ stable enough for use and is subject to change");
+ }
+
+ else if id == token::str_to_ident("log_syntax") {
+ self.gate_feature("log_syntax", path.span, "`log_syntax!` is not \
+ stable enough for use and is subject to change");
+ }
+
+ else if id == token::str_to_ident("trace_macros") {
+ self.gate_feature("trace_macros", path.span, "`trace_macros` is not \
+ stable enough for use and is subject to change");
+ }
+
+ else if id == token::str_to_ident("concat_idents") {
+ self.gate_feature("concat_idents", path.span, "`concat_idents` is not \
+ stable enough for use and is subject to change");
+ }
+
+ else {
+ for "e in quotes.iter() {
+ if id == token::str_to_ident(quote) {
+ self.gate_feature("quote",
+ path.span,
+ format!("{}{}", quote, msg).as_slice());
+ }
+ }
+ }
+ }
+
+ fn visit_foreign_item(&mut self, i: &ast::ForeignItem) {
+ if attr::contains_name(i.attrs.as_slice(), "linkage") {
+ self.gate_feature("linkage", i.span,
+ "the `linkage` attribute is experimental \
+ and not portable across platforms")
+ }
+ visit::walk_foreign_item(self, i)
+ }
+
+ fn visit_ty(&mut self, t: &ast::Ty) {
+ match t.node {
+ ast::TyClosure(ref closure) if closure.onceness == ast::Once => {
+ self.gate_feature("once_fns", t.span,
+ "once functions are \
+ experimental and likely to be removed");
+
+ },
+ ast::TyBox(_) => { self.gate_box(t.span); }
+ ast::TyUnboxedFn(..) => {
+ self.gate_feature("unboxed_closure_sugar",
+ t.span,
+ "unboxed closure trait sugar is experimental");
+ }
+ _ => {}
+ }
+
+ visit::walk_ty(self, t);
+ }
+
+ fn visit_expr(&mut self, e: &ast::Expr) {
+ match e.node {
+ ast::ExprUnary(ast::UnBox, _) => {
+ self.gate_box(e.span);
+ }
+ ast::ExprUnboxedFn(..) => {
+ self.gate_feature("unboxed_closures",
+ e.span,
+ "unboxed closures are a work-in-progress \
+ feature with known bugs");
+ }
+ ast::ExprTupField(..) => {
+ self.gate_feature("tuple_indexing",
+ e.span,
+ "tuple indexing is experimental");
+ }
+ _ => {}
+ }
+ visit::walk_expr(self, e);
+ }
+
+ fn visit_generics(&mut self, generics: &ast::Generics) {
+ for type_parameter in generics.ty_params.iter() {
+ match type_parameter.default {
+ Some(ref ty) => {
+ self.gate_feature("default_type_params", ty.span,
+ "default type parameters are \
+ experimental and possibly buggy");
+ }
+ None => {}
+ }
+ }
+ visit::walk_generics(self, generics);
+ }
+
+ fn visit_attribute(&mut self, attr: &ast::Attribute) {
+ if attr::contains_name(slice::ref_slice(attr), "lang") {
+ self.gate_feature("lang_items",
+ attr.span,
+ "language items are subject to change");
+ }
+ }
+
+ fn visit_pat(&mut self, pattern: &ast::Pat) {
+ match pattern.node {
+ ast::PatVec(_, Some(_), ref last) if !last.is_empty() => {
+ self.gate_feature("advanced_slice_patterns",
+ pattern.span,
+ "multiple-element slice matches anywhere \
+ but at the end of a slice (e.g. \
+ `[0, ..xs, 0]` are experimental")
+ }
+ _ => {}
+ }
+ visit::walk_pat(self, pattern)
+ }
+
+ fn visit_fn(&mut self,
+ fn_kind: visit::FnKind<'v>,
+ fn_decl: &'v ast::FnDecl,
+ block: &'v ast::Block,
+ span: Span,
+ _: NodeId) {
+ match fn_kind {
+ visit::FkItemFn(_, _, _, abi) if abi == RustIntrinsic => {
+ self.gate_feature("intrinsics",
+ span,
+ "intrinsics are subject to change")
+ }
+ _ => {}
+ }
+ visit::walk_fn(self, fn_kind, fn_decl, block, span);
+ }
+}
+
+pub fn check_crate(span_handler: &SpanHandler, krate: &ast::Crate) -> (Features, Vec<Span>) {
+ let mut cx = Context {
+ features: Vec::new(),
+ span_handler: span_handler,
+ };
+
+ let mut unknown_features = Vec::new();
+
+ for attr in krate.attrs.iter() {
+ if !attr.check_name("feature") {
+ continue
+ }
+
+ match attr.meta_item_list() {
+ None => {
+ span_handler.span_err(attr.span, "malformed feature attribute, \
+ expected #![feature(...)]");
+ }
+ Some(list) => {
+ for mi in list.iter() {
+ let name = match mi.node {
+ ast::MetaWord(ref word) => (*word).clone(),
+ _ => {
+ span_handler.span_err(mi.span,
+ "malformed feature, expected just \
+ one word");
+ continue
+ }
+ };
+ match KNOWN_FEATURES.iter()
+ .find(|& &(n, _)| name.equiv(&n)) {
+ Some(&(name, Active)) => { cx.features.push(name); }
+ Some(&(_, Removed)) => {
+ span_handler.span_err(mi.span, "feature has been removed");
+ }
+ Some(&(_, Accepted)) => {
+ span_handler.span_warn(mi.span, "feature has been added to Rust, \
+ directive not necessary");
+ }
+ None => {
+ unknown_features.push(mi.span);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ visit::walk_crate(&mut cx, krate);
+
+ (Features {
+ default_type_params: cx.has_feature("default_type_params"),
+ overloaded_calls: cx.has_feature("overloaded_calls"),
+ rustc_diagnostic_macros: cx.has_feature("rustc_diagnostic_macros"),
+ import_shadowing: cx.has_feature("import_shadowing"),
+ },
+ unknown_features)
+}
+
impl<T> MoveMap<T> for Vec<T> {
fn move_map(mut self, f: |T| -> T) -> Vec<T> {
use std::{mem, ptr};
- for p in self.mut_iter() {
+ for p in self.iter_mut() {
unsafe {
// FIXME(#5016) this shouldn't need to zero to be safe.
mem::move_val_init(p, f(ptr::read_and_zero(p)));
noop_fold_where_predicate(where_predicate, self)
}
+ fn fold_typedef(&mut self, typedef: Typedef) -> Typedef {
+ noop_fold_typedef(typedef, self)
+ }
+
+ fn fold_associated_type(&mut self, associated_type: AssociatedType)
+ -> AssociatedType {
+ noop_fold_associated_type(associated_type, self)
+ }
+
fn new_id(&mut self, i: NodeId) -> NodeId {
i
}
node: DeclLocal(fld.fold_local(l)),
span: fld.new_span(span)
})),
- DeclItem(it) => fld.fold_item(it).move_iter().map(|i| P(Spanned {
+ DeclItem(it) => fld.fold_item(it).into_iter().map(|i| P(Spanned {
node: DeclItem(i),
span: fld.new_span(span)
})).collect()
fld.fold_opt_bounds(bounds),
id)
}
+ TyQPath(ref qpath) => {
+ TyQPath(P(QPath {
+ for_type: fld.fold_ty(qpath.for_type.clone()),
+ trait_name: fld.fold_path(qpath.trait_name.clone()),
+ item_name: fld.fold_ident(qpath.item_name.clone()),
+ }))
+ }
TyFixedLengthVec(ty, e) => {
TyFixedLengthVec(fld.fold_ty(ty), fld.fold_expr(e))
}
})
}
-pub fn noop_fold_ty_param_bound<T: Folder>(tpb: TyParamBound, fld: &mut T)
- -> TyParamBound {
+pub fn noop_fold_ty_param_bound<T>(tpb: TyParamBound, fld: &mut T)
+ -> TyParamBound
+ where T: Folder {
match tpb {
TraitTyParamBound(ty) => TraitTyParamBound(fld.fold_trait_ref(ty)),
RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)),
- UnboxedFnTyParamBound(UnboxedFnTy {decl, kind}) => {
- UnboxedFnTyParamBound(UnboxedFnTy {
- decl: fld.fold_fn_decl(decl),
- kind: kind,
- })
+ UnboxedFnTyParamBound(bound) => {
+ match *bound {
+ UnboxedFnBound {
+ ref path,
+ ref decl,
+ ref_id
+ } => {
+ UnboxedFnTyParamBound(P(UnboxedFnBound {
+ path: fld.fold_path(path.clone()),
+ decl: fld.fold_fn_decl(decl.clone()),
+ ref_id: fld.new_id(ref_id),
+ }))
+ }
+ }
}
}
}
}
}
+pub fn noop_fold_typedef<T>(t: Typedef, folder: &mut T)
+ -> Typedef
+ where T: Folder {
+ let new_id = folder.new_id(t.id);
+ let new_span = folder.new_span(t.span);
+ let new_attrs = t.attrs.iter().map(|attr| {
+ folder.fold_attribute((*attr).clone())
+ }).collect();
+ let new_ident = folder.fold_ident(t.ident);
+ let new_type = folder.fold_ty(t.typ);
+ ast::Typedef {
+ ident: new_ident,
+ typ: new_type,
+ id: new_id,
+ span: new_span,
+ vis: t.vis,
+ attrs: new_attrs,
+ }
+}
+
+pub fn noop_fold_associated_type<T>(at: AssociatedType, folder: &mut T)
+ -> AssociatedType
+ where T: Folder {
+ let new_id = folder.new_id(at.id);
+ let new_span = folder.new_span(at.span);
+ let new_ident = folder.fold_ident(at.ident);
+ let new_attrs = at.attrs
+ .iter()
+ .map(|attr| folder.fold_attribute((*attr).clone()))
+ .collect();
+ ast::AssociatedType {
+ ident: new_ident,
+ attrs: new_attrs,
+ id: new_id,
+ span: new_span,
+ }
+}
+
pub fn noop_fold_struct_def<T: Folder>(struct_def: P<StructDef>, fld: &mut T) -> P<StructDef> {
struct_def.map(|StructDef {fields, ctor_id, super_struct, is_virtual}| StructDef {
fields: fields.move_map(|f| fld.fold_struct_field(f)),
b.map(|Block {id, view_items, stmts, expr, rules, span}| Block {
id: folder.new_id(id),
view_items: view_items.move_map(|x| folder.fold_view_item(x)),
- stmts: stmts.move_iter().flat_map(|s| folder.fold_stmt(s).move_iter()).collect(),
+ stmts: stmts.into_iter().flat_map(|s| folder.fold_stmt(s).into_iter()).collect(),
expr: expr.map(|x| folder.fold_expr(x)),
rules: rules,
span: folder.new_span(span),
ItemStruct(struct_def, folder.fold_generics(generics))
}
ItemImpl(generics, ifce, ty, impl_items) => {
+ let mut new_impl_items = Vec::new();
+ for impl_item in impl_items.iter() {
+ match *impl_item {
+ MethodImplItem(ref x) => {
+ for method in folder.fold_method((*x).clone())
+ .move_iter() {
+ new_impl_items.push(MethodImplItem(method))
+ }
+ }
+ TypeImplItem(ref t) => {
+ new_impl_items.push(TypeImplItem(
+ P(folder.fold_typedef((**t).clone()))));
+ }
+ }
+ }
+ let ifce = match ifce {
+ None => None,
+ Some(ref trait_ref) => {
+ Some(folder.fold_trait_ref((*trait_ref).clone()))
+ }
+ };
ItemImpl(folder.fold_generics(generics),
- ifce.map(|p| folder.fold_trait_ref(p)),
+ ifce,
folder.fold_ty(ty),
- impl_items.move_iter().flat_map(|impl_item| match impl_item {
- MethodImplItem(x) => {
- folder.fold_method(x).move_iter().map(|x| MethodImplItem(x))
- }
- }).collect())
+ new_impl_items)
}
ItemTrait(generics, unbound, bounds, methods) => {
let bounds = folder.fold_bounds(bounds);
- let methods = methods.move_iter().flat_map(|method| match method {
- RequiredMethod(m) => {
- SmallVector::one(RequiredMethod(folder.fold_type_method(m))).move_iter()
- }
- ProvidedMethod(method) => {
- // the awkward collect/iter idiom here is because
- // even though an iter and a map satisfy the same trait bound,
- // they're not actually the same type, so the method arms
- // don't unify.
- let methods: SmallVector<ast::TraitItem> =
- folder.fold_method(method).move_iter()
- .map(|m| ProvidedMethod(m)).collect();
- methods.move_iter()
- }
+ let methods = methods.into_iter().flat_map(|method| {
+ let r = match method {
+ RequiredMethod(m) => {
+ SmallVector::one(RequiredMethod(
+ folder.fold_type_method(m)))
+ .move_iter()
+ }
+ ProvidedMethod(method) => {
+ // the awkward collect/iter idiom here is because
+ // even though an iter and a map satisfy the same
+ // trait bound, they're not actually the same type, so
+ // the method arms don't unify.
+ let methods: SmallVector<ast::TraitItem> =
+ folder.fold_method(method).move_iter()
+ .map(|m| ProvidedMethod(m)).collect();
+ methods.move_iter()
+ }
+ TypeTraitItem(at) => {
+ SmallVector::one(TypeTraitItem(P(
+ folder.fold_associated_type(
+ (*at).clone()))))
+ .move_iter()
+ }
+ };
+ r
}).collect();
ItemTrait(folder.fold_generics(generics),
unbound,
}
pub fn noop_fold_type_method<T: Folder>(m: TypeMethod, fld: &mut T) -> TypeMethod {
- let TypeMethod {id, ident, attrs, fn_style, abi, decl, generics, explicit_self, vis, span} = m;
+ let TypeMethod {
+ id,
+ ident,
+ attrs,
+ fn_style,
+ abi,
+ decl,
+ generics,
+ explicit_self,
+ vis,
+ span
+ } = m;
TypeMethod {
id: fld.new_id(id),
ident: fld.fold_ident(ident),
Mod {
inner: folder.new_span(inner),
view_items: view_items.move_map(|x| folder.fold_view_item(x)),
- items: items.move_iter().flat_map(|x| folder.fold_item(x).move_iter()).collect(),
+ items: items.into_iter().flat_map(|x| folder.fold_item(x).into_iter()).collect(),
}
}
ExprIndex(el, er) => {
ExprIndex(folder.fold_expr(el), folder.fold_expr(er))
}
+ ExprSlice(e, e1, e2, m) => {
+ ExprSlice(folder.fold_expr(e),
+ e1.map(|x| folder.fold_expr(x)),
+ e2.map(|x| folder.fold_expr(x)),
+ m)
+ }
ExprPath(pth) => ExprPath(folder.fold_path(pth)),
ExprBreak(opt_ident) => ExprBreak(opt_ident.map(|x| folder.fold_ident(x))),
ExprAgain(opt_ident) => ExprAgain(opt_ident.map(|x| folder.fold_ident(x))),
match node {
StmtDecl(d, id) => {
let id = folder.new_id(id);
- folder.fold_decl(d).move_iter().map(|d| P(Spanned {
+ folder.fold_decl(d).into_iter().map(|d| P(Spanned {
node: StmtDecl(d, id),
span: span
})).collect()
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/")]
-#![feature(macro_rules, globs, managed_boxes, default_type_params, phase)]
+#![feature(macro_rules, globs, default_type_params, phase)]
#![feature(quote, struct_variant, unsafe_destructor, import_shadowing)]
#![allow(deprecated)]
pub mod ast_util;
pub mod attr;
pub mod codemap;
+pub mod config;
pub mod crateid;
pub mod diagnostic;
+pub mod feature_gate;
pub mod fold;
pub mod owned_slice;
pub mod parse;
pub mod ptr;
+pub mod show_span;
+pub mod std_inject;
+pub mod test;
pub mod visit;
pub mod print {
use std::default::Default;
use std::hash;
use std::{mem, raw, ptr, slice, vec};
+use std::rt::heap::EMPTY;
use serialize::{Encodable, Decodable, Encoder, Decoder};
/// A non-growable owned slice. This would preferably become `~[T]`
impl<T> OwnedSlice<T> {
pub fn empty() -> OwnedSlice<T> {
- OwnedSlice { data: ptr::mut_null(), len: 0 }
+ OwnedSlice { data: ptr::null_mut(), len: 0 }
}
#[inline(never)]
}
pub fn as_slice<'a>(&'a self) -> &'a [T] {
- static PTR_MARKER: u8 = 0;
let ptr = if self.data.is_null() {
// length zero, i.e. this will never be read as a T.
- &PTR_MARKER as *const u8 as *const T
+ EMPTY as *const T
} else {
self.data as *const T
};
}
pub fn move_iter(self) -> vec::MoveItems<T> {
- self.into_vec().move_iter()
+ self.into_vec().into_iter()
}
pub fn map<U>(&self, f: |&T| -> U) -> OwnedSlice<U> {
mod test {
use super::*;
- use codemap::{BytePos, CodeMap, Span};
+ use codemap::{BytePos, CodeMap, Span, NO_EXPANSION};
use diagnostic;
use parse::token;
use parse::token::{str_to_ident};
let tok1 = string_reader.next_token();
let tok2 = TokenAndSpan{
tok:token::IDENT(id, false),
- sp:Span {lo:BytePos(21),hi:BytePos(23),expn_info: None}};
+ sp:Span {lo:BytePos(21),hi:BytePos(23),expn_id: NO_EXPANSION}};
assert_eq!(tok1,tok2);
assert_eq!(string_reader.next_token().tok, token::WS);
// the 'main' id is already read:
let tok3 = string_reader.next_token();
let tok4 = TokenAndSpan{
tok:token::IDENT(str_to_ident("main"), false),
- sp:Span {lo:BytePos(24),hi:BytePos(28),expn_info: None}};
+ sp:Span {lo:BytePos(24),hi:BytePos(28),expn_id: NO_EXPANSION}};
assert_eq!(tok3,tok4);
// the lparen is already read:
assert_eq!(string_reader.last_pos.clone(), BytePos(29))
use parse::parser::Parser;
use ptr::P;
-use std::cell::RefCell;
+use std::cell::{Cell, RefCell};
use std::io::File;
use std::rc::Rc;
use std::str;
pub span_diagnostic: SpanHandler, // better be the same as the one in the reader!
/// Used to determine and report recursive mod inclusions
included_mod_stack: RefCell<Vec<Path>>,
+ pub node_id: Cell<ast::NodeId>,
}
pub fn new_parse_sess() -> ParseSess {
ParseSess {
span_diagnostic: mk_span_handler(default_handler(Auto, None), CodeMap::new()),
included_mod_stack: RefCell::new(Vec::new()),
+ node_id: Cell::new(1),
}
}
ParseSess {
span_diagnostic: sh,
included_mod_stack: RefCell::new(Vec::new()),
+ node_id: Cell::new(1),
+ }
+}
+
+impl ParseSess {
+ pub fn next_node_id(&self) -> ast::NodeId {
+ self.reserve_node_ids(1)
+ }
+ pub fn reserve_node_ids(&self, count: ast::NodeId) -> ast::NodeId {
+ let v = self.node_id.get();
+
+ match v.checked_add(&count) {
+ Some(next) => { self.node_id.set(next); }
+ None => fail!("Input too large, ran out of node ids!")
+ }
+
+ v
}
}
mod test {
use super::*;
use serialize::json;
- use codemap::{Span, BytePos, Spanned};
+ use codemap::{Span, BytePos, Spanned, NO_EXPANSION};
use owned_slice::OwnedSlice;
use ast;
use abi;
// produce a codemap::span
fn sp(a: u32, b: u32) -> Span {
- Span{lo:BytePos(a),hi:BytePos(b),expn_info:None}
+ Span {lo: BytePos(a), hi: BytePos(b), expn_id: NO_EXPANSION}
}
#[test] fn path_exprs_1() {
#![macro_escape]
use abi;
-use ast::{BareFnTy, ClosureTy};
+use ast::{AssociatedType, BareFnTy, ClosureTy};
use ast::{RegionTyParamBound, TraitTyParamBound};
use ast::{ProvidedMethod, Public, FnStyle};
use ast::{Mod, BiAdd, Arg, Arm, Attribute, BindByRef, BindByValue};
-use ast::{BiBitAnd, BiBitOr, BiBitXor, Block};
+use ast::{BiBitAnd, BiBitOr, BiBitXor, BiRem, Block};
use ast::{BlockCheckMode, UnBox};
use ast::{CaptureByRef, CaptureByValue, CaptureClause};
use ast::{Crate, CrateConfig, Decl, DeclItem};
use ast::{Expr, Expr_, ExprAddrOf, ExprMatch, ExprAgain};
use ast::{ExprAssign, ExprAssignOp, ExprBinary, ExprBlock, ExprBox};
use ast::{ExprBreak, ExprCall, ExprCast};
-use ast::{ExprField, ExprTupField, ExprFnBlock, ExprIf, ExprIndex};
+use ast::{ExprField, ExprTupField, ExprFnBlock, ExprIf, ExprIndex, ExprSlice};
use ast::{ExprLit, ExprLoop, ExprMac};
use ast::{ExprMethodCall, ExprParen, ExprPath, ExprProc};
use ast::{ExprRepeat, ExprRet, ExprStruct, ExprTup, ExprUnary, ExprUnboxedFn};
use ast::{MethodImplItem, NamedField, UnNeg, NoReturn, UnNot};
use ast::{Pat, PatEnum, PatIdent, PatLit, PatRange, PatRegion, PatStruct};
use ast::{PatTup, PatBox, PatWild, PatWildMulti, PatWildSingle};
-use ast::{BiRem, RequiredMethod};
+use ast::{QPath, RequiredMethod};
use ast::{RetStyle, Return, BiShl, BiShr, Stmt, StmtDecl};
use ast::{StmtExpr, StmtSemi, StmtMac, StructDef, StructField};
use ast::{StructVariantKind, BiSub};
use ast::{TTNonterminal, TupleVariantKind, Ty, Ty_, TyBot, TyBox};
use ast::{TypeField, TyFixedLengthVec, TyClosure, TyProc, TyBareFn};
use ast::{TyTypeof, TyInfer, TypeMethod};
-use ast::{TyNil, TyParam, TyParamBound, TyParen, TyPath, TyPtr, TyRptr};
-use ast::{TyTup, TyU32, TyUnboxedFn, TyUniq, TyVec, UnUniq};
-use ast::{UnboxedClosureKind, UnboxedFnTy, UnboxedFnTyParamBound};
+use ast::{TyNil, TyParam, TyParamBound, TyParen, TyPath, TyPtr, TyQPath};
+use ast::{TyRptr, TyTup, TyU32, TyUnboxedFn, TyUniq, TyVec, UnUniq};
+use ast::{TypeImplItem, TypeTraitItem, Typedef, UnboxedClosureKind};
+use ast::{UnboxedFnBound, UnboxedFnTy, UnboxedFnTyParamBound};
use ast::{UnnamedField, UnsafeBlock};
use ast::{UnsafeFn, ViewItem, ViewItem_, ViewItemExternCrate, ViewItemUse};
use ast::{ViewPath, ViewPathGlob, ViewPathList, ViewPathSimple};
use std::rc::Rc;
use std::iter;
-#[allow(non_camel_case_types)]
-#[deriving(PartialEq)]
-pub enum restriction {
- UNRESTRICTED,
- RESTRICT_STMT_EXPR,
- RESTRICT_NO_BAR_OP,
- RESTRICT_NO_BAR_OR_DOUBLEBAR_OP,
- RESTRICT_NO_STRUCT_LITERAL,
+bitflags! {
+ flags Restrictions: u8 {
+ static Unrestricted = 0b0000,
+ static RestrictionStmtExpr = 0b0001,
+ static RestrictionNoBarOp = 0b0010,
+ static RestrictionNoStructLiteral = 0b0100
+ }
}
type ItemInfo = (Ident, Item_, Option<Vec<Attribute> >);
pub buffer_start: int,
pub buffer_end: int,
pub tokens_consumed: uint,
- pub restriction: restriction,
+ pub restrictions: Restrictions,
pub quote_depth: uint, // not (yet) related to the quasiquoter
pub reader: Box<Reader+'a>,
pub interner: Rc<token::IdentInterner>,
buffer_start: 0,
buffer_end: 0,
tokens_consumed: 0,
- restriction: UNRESTRICTED,
+ restrictions: Unrestricted,
quote_depth: 0,
obsolete_set: HashSet::new(),
mod_path_stack: Vec::new(),
(decl, lifetime_defs)
}
- /// Parse the methods in a trait declaration
- pub fn parse_trait_methods(&mut self) -> Vec<TraitItem> {
+ /// Parses `type Foo;` in a trait declaration only. The `type` keyword has
+ /// already been parsed.
+ fn parse_associated_type(&mut self, attrs: Vec<Attribute>)
+ -> AssociatedType {
+ let lo = self.span.lo;
+ let ident = self.parse_ident();
+ let hi = self.span.hi;
+ self.expect(&token::SEMI);
+ AssociatedType {
+ id: ast::DUMMY_NODE_ID,
+ span: mk_sp(lo, hi),
+ ident: ident,
+ attrs: attrs,
+ }
+ }
+
+ /// Parses `type Foo = TYPE;` in an implementation declaration only. The
+ /// `type` keyword has already been parsed.
+ fn parse_typedef(&mut self, attrs: Vec<Attribute>, vis: Visibility)
+ -> Typedef {
+ let lo = self.span.lo;
+ let ident = self.parse_ident();
+ self.expect(&token::EQ);
+ let typ = self.parse_ty(true);
+ let hi = self.span.hi;
+ self.expect(&token::SEMI);
+ Typedef {
+ id: ast::DUMMY_NODE_ID,
+ span: mk_sp(lo, hi),
+ ident: ident,
+ vis: vis,
+ attrs: attrs,
+ typ: typ,
+ }
+ }
+
+ /// Parse the items in a trait declaration
+ pub fn parse_trait_items(&mut self) -> Vec<TraitItem> {
self.parse_unspanned_seq(
&token::LBRACE,
&token::RBRACE,
seq_sep_none(),
|p| {
let attrs = p.parse_outer_attributes();
- let lo = p.span.lo;
-
- // NB: at the moment, trait methods are public by default; this
- // could change.
- let vis = p.parse_visibility();
- let abi = if p.eat_keyword(keywords::Extern) {
- p.parse_opt_abi().unwrap_or(abi::C)
- } else if attr::contains_name(attrs.as_slice(),
- "rust_call_abi_hack") {
- // FIXME(stage0, pcwalton): Remove this awful hack after a
- // snapshot, and change to `extern "rust-call" fn`.
- abi::RustCall
+
+ if p.eat_keyword(keywords::Type) {
+ TypeTraitItem(P(p.parse_associated_type(attrs)))
} else {
- abi::Rust
- };
- let style = p.parse_fn_style();
- let ident = p.parse_ident();
+ let lo = p.span.lo;
- let mut generics = p.parse_generics();
+ let vis = p.parse_visibility();
+ let abi = if p.eat_keyword(keywords::Extern) {
+ p.parse_opt_abi().unwrap_or(abi::C)
+ } else if attr::contains_name(attrs.as_slice(),
+ "rust_call_abi_hack") {
+ // FIXME(stage0, pcwalton): Remove this awful hack after a
+ // snapshot, and change to `extern "rust-call" fn`.
+ abi::RustCall
+ } else {
+ abi::Rust
+ };
- let (explicit_self, d) = p.parse_fn_decl_with_self(|p| {
- // This is somewhat dubious; We don't want to allow argument
- // names to be left off if there is a definition...
- p.parse_arg_general(false)
- });
+ let style = p.parse_fn_style();
+ let ident = p.parse_ident();
+ let mut generics = p.parse_generics();
- p.parse_where_clause(&mut generics);
+ let (explicit_self, d) = p.parse_fn_decl_with_self(|p| {
+ // This is somewhat dubious; We don't want to allow
+ // argument names to be left off if there is a
+ // definition...
+ p.parse_arg_general(false)
+ });
- let hi = p.last_span.hi;
- match p.token {
- token::SEMI => {
- p.bump();
- debug!("parse_trait_methods(): parsing required method");
- RequiredMethod(TypeMethod {
- ident: ident,
- attrs: attrs,
- fn_style: style,
- decl: d,
- generics: generics,
- abi: abi,
- explicit_self: explicit_self,
- id: ast::DUMMY_NODE_ID,
- span: mk_sp(lo, hi),
- vis: vis,
- })
- }
- token::LBRACE => {
- debug!("parse_trait_methods(): parsing provided method");
- let (inner_attrs, body) =
- p.parse_inner_attrs_and_block();
- let mut attrs = attrs;
- attrs.extend(inner_attrs.move_iter());
- ProvidedMethod(P(ast::Method {
- attrs: attrs,
- id: ast::DUMMY_NODE_ID,
- span: mk_sp(lo, hi),
- node: ast::MethDecl(ident,
- generics,
- abi,
- explicit_self,
- style,
- d,
- body,
- vis)
- }))
- }
+ p.parse_where_clause(&mut generics);
+
+ let hi = p.last_span.hi;
+ match p.token {
+ token::SEMI => {
+ p.bump();
+ debug!("parse_trait_methods(): parsing required method");
+ RequiredMethod(TypeMethod {
+ ident: ident,
+ attrs: attrs,
+ fn_style: style,
+ decl: d,
+ generics: generics,
+ abi: abi,
+ explicit_self: explicit_self,
+ id: ast::DUMMY_NODE_ID,
+ span: mk_sp(lo, hi),
+ vis: vis,
+ })
+ }
+ token::LBRACE => {
+ debug!("parse_trait_methods(): parsing provided method");
+ let (inner_attrs, body) =
+ p.parse_inner_attrs_and_block();
+ let attrs = attrs.append(inner_attrs.as_slice());
+ ProvidedMethod(P(ast::Method {
+ attrs: attrs,
+ id: ast::DUMMY_NODE_ID,
+ span: mk_sp(lo, hi),
+ node: ast::MethDecl(ident,
+ generics,
+ abi,
+ explicit_self,
+ style,
+ d,
+ body,
+ vis)
+ }))
+ }
- _ => {
- let token_str = p.this_token_to_string();
- p.fatal((format!("expected `;` or `{{`, found `{}`",
- token_str)).as_slice())
- }
+ _ => {
+ let token_str = p.this_token_to_string();
+ p.fatal((format!("expected `;` or `{{`, found `{}`",
+ token_str)).as_slice())
+ }
+ }
}
})
}
if ts.len() == 1 && !one_tuple {
self.expect(&token::RPAREN);
- TyParen(ts.move_iter().nth(0).unwrap())
+ TyParen(ts.into_iter().nth(0).unwrap())
} else {
let t = TyTup(ts);
self.expect(&token::RPAREN);
} else if self.token_is_closure_keyword() ||
self.token == token::BINOP(token::OR) ||
self.token == token::OROR ||
- self.token == token::LT {
+ (self.token == token::LT &&
+ self.look_ahead(1, |t| {
+ *t == token::GT || Parser::token_is_lifetime(t)
+ })) {
// CLOSURE
- //
- // FIXME(pcwalton): Eventually `token::LT` will not unambiguously
- // introduce a closure, once procs can have lifetime bounds. We
- // will need to refactor the grammar a little bit at that point.
self.parse_ty_closure()
} else if self.eat_keyword(keywords::Typeof) {
TyTypeof(e)
} else if self.eat_keyword(keywords::Proc) {
self.parse_proc_type()
+ } else if self.token == token::LT {
+ // QUALIFIED PATH
+ self.bump();
+ let for_type = self.parse_ty(true);
+ self.expect_keyword(keywords::As);
+ let trait_name = self.parse_path(LifetimeAndTypesWithoutColons);
+ self.expect(&token::GT);
+ self.expect(&token::MOD_SEP);
+ let item_name = self.parse_ident();
+ TyQPath(P(QPath {
+ for_type: for_type,
+ trait_name: trait_name.path,
+ item_name: item_name,
+ }))
} else if self.token == token::MOD_SEP
|| is_ident_or_path(&self.token) {
// NAMED TYPE
ExprIndex(expr, idx)
}
+ pub fn mk_slice(&mut self, expr: P<Expr>,
+ start: Option<P<Expr>>,
+ end: Option<P<Expr>>,
+ mutbl: Mutability)
+ -> ast::Expr_ {
+ ExprSlice(expr, start, end, mutbl)
+ }
+
pub fn mk_field(&mut self, expr: P<Expr>, ident: ast::SpannedIdent,
tys: Vec<P<Ty>>) -> ast::Expr_ {
ExprField(expr, ident, tys)
self.commit_expr_expecting(&**es.last().unwrap(), token::RPAREN);
return if es.len() == 1 && !trailing_comma {
- self.mk_expr(lo, hi, ExprParen(es.move_iter().nth(0).unwrap()))
+ self.mk_expr(lo, hi, ExprParen(es.into_iter().nth(0).unwrap()))
} else {
self.mk_expr(lo, hi, ExprTup(es))
}
}
}
hi = self.last_span.hi;
- },
+ }
_ => {
if self.eat_keyword(keywords::Ref) {
return self.parse_lambda_expr(CaptureByRef);
if self.token == token::LBRACE {
// This is a struct literal, unless we're prohibited
// from parsing struct literals here.
- if self.restriction != RESTRICT_NO_STRUCT_LITERAL {
+ if !self.restrictions.contains(RestrictionNoStructLiteral) {
// It's a struct literal.
self.bump();
let mut fields = Vec::new();
}
// expr[...]
+ // Could be either an index expression or a slicing expression.
+ // Any slicing non-terminal can have a mutable version with `mut`
+ // after the opening square bracket.
token::LBRACKET => {
self.bump();
- let ix = self.parse_expr();
- hi = self.span.hi;
- self.commit_expr_expecting(&*ix, token::RBRACKET);
- let index = self.mk_index(e, ix);
- e = self.mk_expr(lo, hi, index)
+ let mutbl = if self.eat_keyword(keywords::Mut) {
+ MutMutable
+ } else {
+ MutImmutable
+ };
+ match self.token {
+ // e[]
+ token::RBRACKET => {
+ self.bump();
+ hi = self.span.hi;
+ let slice = self.mk_slice(e, None, None, mutbl);
+ e = self.mk_expr(lo, hi, slice)
+ }
+ // e[..e]
+ token::DOTDOT => {
+ self.bump();
+ match self.token {
+ // e[..]
+ token::RBRACKET => {
+ self.bump();
+ hi = self.span.hi;
+ let slice = self.mk_slice(e, None, None, mutbl);
+ e = self.mk_expr(lo, hi, slice);
+
+ self.span_err(e.span, "incorrect slicing expression: `[..]`");
+ self.span_note(e.span,
+ "use `expr[]` to construct a slice of the whole of expr");
+ }
+ // e[..e]
+ _ => {
+ hi = self.span.hi;
+ let e2 = self.parse_expr();
+ self.commit_expr_expecting(&*e2, token::RBRACKET);
+ let slice = self.mk_slice(e, None, Some(e2), mutbl);
+ e = self.mk_expr(lo, hi, slice)
+ }
+ }
+ }
+ // e[e] | e[e..] | e[e..e]
+ _ => {
+ let ix = self.parse_expr();
+ match self.token {
+ // e[e..] | e[e..e]
+ token::DOTDOT => {
+ self.bump();
+ let e2 = match self.token {
+ // e[e..]
+ token::RBRACKET => {
+ self.bump();
+ None
+ }
+ // e[e..e]
+ _ => {
+ let e2 = self.parse_expr();
+ self.commit_expr_expecting(&*e2, token::RBRACKET);
+ Some(e2)
+ }
+ };
+ hi = self.span.hi;
+ let slice = self.mk_slice(e, Some(ix), e2, mutbl);
+ e = self.mk_expr(lo, hi, slice)
+ }
+ // e[e]
+ _ => {
+ if mutbl == ast::MutMutable {
+ self.span_err(e.span,
+ "`mut` keyword is invalid in index expressions");
+ }
+ hi = self.span.hi;
+ self.commit_expr_expecting(&*ix, token::RBRACKET);
+ let index = self.mk_index(e, ix);
+ e = self.mk_expr(lo, hi, index)
+ }
+ }
+ }
+ }
}
_ => return e
// Prevent dynamic borrow errors later on by limiting the
// scope of the borrows.
- match (&self.token, &self.restriction) {
- (&token::BINOP(token::OR), &RESTRICT_NO_BAR_OP) => return lhs,
- (&token::BINOP(token::OR),
- &RESTRICT_NO_BAR_OR_DOUBLEBAR_OP) => return lhs,
- (&token::OROR, &RESTRICT_NO_BAR_OR_DOUBLEBAR_OP) => return lhs,
- _ => { }
+ if self.token == token::BINOP(token::OR) &&
+ self.restrictions.contains(RestrictionNoBarOp) {
+ return lhs;
}
let cur_opt = token_to_binop(&self.token);
pub fn parse_assign_expr(&mut self) -> P<Expr> {
let lo = self.span.lo;
let lhs = self.parse_binops();
+ let restrictions = self.restrictions & RestrictionNoStructLiteral;
match self.token {
token::EQ => {
self.bump();
- let rhs = self.parse_expr();
+ let rhs = self.parse_expr_res(restrictions);
self.mk_expr(lo, rhs.span.hi, ExprAssign(lhs, rhs))
}
token::BINOPEQ(op) => {
self.bump();
- let rhs = self.parse_expr();
+ let rhs = self.parse_expr_res(restrictions);
let aop = match op {
token::PLUS => BiAdd,
token::MINUS => BiSub,
/// Parse an 'if' expression ('if' token already eaten)
pub fn parse_if_expr(&mut self) -> P<Expr> {
let lo = self.last_span.lo;
- let cond = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL);
+ let cond = self.parse_expr_res(RestrictionNoStructLiteral);
let thn = self.parse_block();
let mut els: Option<P<Expr>> = None;
let mut hi = thn.span.hi;
let lo = self.last_span.lo;
let pat = self.parse_pat();
self.expect_keyword(keywords::In);
- let expr = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL);
+ let expr = self.parse_expr_res(RestrictionNoStructLiteral);
let loop_block = self.parse_block();
let hi = self.span.hi;
pub fn parse_while_expr(&mut self, opt_ident: Option<ast::Ident>) -> P<Expr> {
let lo = self.last_span.lo;
- let cond = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL);
+ let cond = self.parse_expr_res(RestrictionNoStructLiteral);
let body = self.parse_block();
let hi = body.span.hi;
return self.mk_expr(lo, hi, ExprWhile(cond, body, opt_ident));
fn parse_match_expr(&mut self) -> P<Expr> {
let lo = self.last_span.lo;
- let discriminant = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL);
+ let discriminant = self.parse_expr_res(RestrictionNoStructLiteral);
self.commit_expr_expecting(&*discriminant, token::LBRACE);
let mut arms: Vec<Arm> = Vec::new();
while self.token != token::RBRACE {
guard = Some(self.parse_expr());
}
self.expect(&token::FAT_ARROW);
- let expr = self.parse_expr_res(RESTRICT_STMT_EXPR);
+ let expr = self.parse_expr_res(RestrictionStmtExpr);
let require_comma =
!classify::expr_is_simple_block(&*expr)
/// Parse an expression
pub fn parse_expr(&mut self) -> P<Expr> {
- return self.parse_expr_res(UNRESTRICTED);
+ return self.parse_expr_res(Unrestricted);
}
- /// Parse an expression, subject to the given restriction
- pub fn parse_expr_res(&mut self, r: restriction) -> P<Expr> {
- let old = self.restriction;
- self.restriction = r;
+ /// Parse an expression, subject to the given restrictions
+ pub fn parse_expr_res(&mut self, r: Restrictions) -> P<Expr> {
+ let old = self.restrictions;
+ self.restrictions = r;
let e = self.parse_assign_expr();
- self.restriction = old;
+ self.restrictions = old;
return e;
}
// These expressions are limited to literals (possibly
// preceded by unary-minus) or identifiers.
let val = self.parse_literal_maybe_minus();
- if self.token == token::DOTDOT &&
+ // FIXME(#17295) remove the DOTDOT option.
+ if (self.token == token::DOTDOTDOT || self.token == token::DOTDOT) &&
self.look_ahead(1, |t| {
*t != token::COMMA && *t != token::RBRACKET
}) {
}
});
- if self.look_ahead(1, |t| *t == token::DOTDOT) &&
+ // FIXME(#17295) remove the DOTDOT option.
+ if self.look_ahead(1, |t| *t == token::DOTDOTDOT || *t == token::DOTDOT) &&
self.look_ahead(2, |t| {
*t != token::COMMA && *t != token::RBRACKET
}) {
- let start = self.parse_expr_res(RESTRICT_NO_BAR_OP);
- self.eat(&token::DOTDOT);
- let end = self.parse_expr_res(RESTRICT_NO_BAR_OP);
+ let start = self.parse_expr_res(RestrictionNoBarOp);
+ // FIXME(#17295) remove the DOTDOT option (self.eat(&token::DOTDOTDOT)).
+ if self.token == token::DOTDOTDOT || self.token == token::DOTDOT {
+ self.bump();
+ }
+ let end = self.parse_expr_res(RestrictionNoBarOp);
pat = PatRange(start, end);
} else if is_plain_ident(&self.token) && !can_be_enum_or_struct {
let id = self.parse_ident();
check_expected_item(self, found_attrs);
// Remainder are line-expr stmts.
- let e = self.parse_expr_res(RESTRICT_STMT_EXPR);
+ let e = self.parse_expr_res(RestrictionStmtExpr);
P(spanned(lo, e.span.hi, StmtExpr(e, ast::DUMMY_NODE_ID)))
}
}
/// Is this expression a successfully-parsed statement?
fn expr_is_complete(&mut self, e: &Expr) -> bool {
- self.restriction == RESTRICT_STMT_EXPR &&
+ self.restrictions.contains(RestrictionStmtExpr) &&
!classify::expr_requires_semi_to_be_stmt(e)
}
} = self.parse_items_and_view_items(first_item_attrs,
false, false);
- for item in items.move_iter() {
+ for item in items.into_iter() {
let span = item.span;
let decl = P(spanned(span.lo, span.hi, DeclItem(item)));
stmts.push(P(spanned(span.lo, span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID))));
let span_with_semi = Span {
lo: span.lo,
hi: self.last_span.hi,
- expn_info: span.expn_info,
+ expn_id: span.expn_id,
};
stmts.push(P(Spanned {
node: StmtSemi(e, stmt_id),
})
}
- fn parse_unboxed_function_type(&mut self) -> UnboxedFnTy {
- let (optional_unboxed_closure_kind, inputs) =
- if self.eat(&token::OROR) {
- (None, Vec::new())
- } else {
- self.expect_or();
-
- let optional_unboxed_closure_kind =
- self.parse_optional_unboxed_closure_kind();
-
- let inputs = self.parse_seq_to_before_or(&token::COMMA,
- |p| {
- p.parse_arg_general(false)
- });
- self.expect_or();
- (optional_unboxed_closure_kind, inputs)
- };
-
- let (return_style, output) = self.parse_ret_ty();
- UnboxedFnTy {
- decl: P(FnDecl {
- inputs: inputs,
- output: output,
- cf: return_style,
- variadic: false,
- }),
- kind: match optional_unboxed_closure_kind {
- Some(kind) => kind,
- None => FnMutUnboxedClosureKind,
- },
- }
- }
-
// Parses a sequence of bounds if a `:` is found,
// otherwise returns empty list.
fn parse_colon_then_ty_param_bounds(&mut self)
self.bump();
}
token::MOD_SEP | token::IDENT(..) => {
- let tref = self.parse_trait_ref();
- result.push(TraitTyParamBound(tref));
- }
- token::BINOP(token::OR) | token::OROR => {
- let unboxed_function_type =
- self.parse_unboxed_function_type();
- result.push(UnboxedFnTyParamBound(unboxed_function_type));
+ let path =
+ self.parse_path(LifetimeAndTypesWithoutColons).path;
+ if self.token == token::LPAREN {
+ self.bump();
+ let inputs = self.parse_seq_to_end(
+ &token::RPAREN,
+ seq_sep_trailing_allowed(token::COMMA),
+ |p| p.parse_arg_general(false));
+ let (return_style, output) = self.parse_ret_ty();
+ result.push(UnboxedFnTyParamBound(P(UnboxedFnBound {
+ path: path,
+ decl: P(FnDecl {
+ inputs: inputs,
+ output: output,
+ cf: return_style,
+ variadic: false,
+ }),
+ ref_id: ast::DUMMY_NODE_ID,
+ })));
+ } else {
+ result.push(TraitTyParamBound(ast::TraitRef {
+ path: path,
+ ref_id: ast::DUMMY_NODE_ID,
+ }))
+ }
}
_ => break,
}
"variadic function must be declared with at least one named argument");
}
- let args = args.move_iter().map(|x| x.unwrap()).collect();
+ let args = args.into_iter().map(|x| x.unwrap()).collect();
(args, variadic)
}
(optional_unboxed_closure_kind, args)
}
};
- let output = if self.eat(&token::RARROW) {
- self.parse_ty(true)
+ let (style, output) = if self.token == token::RARROW {
+ self.parse_ret_ty()
} else {
- P(Ty {
+ (Return, P(Ty {
id: ast::DUMMY_NODE_ID,
node: TyInfer,
span: self.span,
- })
+ }))
};
(P(FnDecl {
inputs: inputs_captures,
output: output,
- cf: Return,
+ cf: style,
variadic: false
}), optional_unboxed_closure_kind)
}
seq_sep_trailing_allowed(token::COMMA),
|p| p.parse_fn_block_arg());
- let output = if self.eat(&token::RARROW) {
- self.parse_ty(true)
+ let (style, output) = if self.token == token::RARROW {
+ self.parse_ret_ty()
} else {
- P(Ty {
+ (Return, P(Ty {
id: ast::DUMMY_NODE_ID,
node: TyInfer,
span: self.span,
- })
+ }))
};
P(FnDecl {
inputs: inputs,
output: output,
- cf: Return,
+ cf: style,
variadic: false
})
}
/// Parse a method in a trait impl, starting with `attrs` attributes.
pub fn parse_method(&mut self,
- already_parsed_attrs: Option<Vec<Attribute>>)
+ attrs: Vec<Attribute>,
+ visa: Visibility)
-> P<Method> {
- let next_attrs = self.parse_outer_attributes();
- let attrs = match already_parsed_attrs {
- Some(mut a) => { a.push_all_move(next_attrs); a }
- None => next_attrs
- };
-
let lo = self.span.lo;
// code copied from parse_macro_use_or_failure... abstraction!
self.span.hi) };
(ast::MethMac(m), self.span.hi, attrs)
} else {
- let visa = self.parse_visibility();
let abi = if self.eat_keyword(keywords::Extern) {
self.parse_opt_abi().unwrap_or(abi::C)
} else if attr::contains_name(attrs.as_slice(),
self.parse_where_clause(&mut tps);
- let meths = self.parse_trait_methods();
+ let meths = self.parse_trait_items();
(ident, ItemTrait(tps, sized, bounds, meths), None)
}
fn parse_impl_items(&mut self) -> (Vec<ImplItem>, Vec<Attribute>) {
let mut impl_items = Vec::new();
self.expect(&token::LBRACE);
- let (inner_attrs, next) = self.parse_inner_attrs_and_next();
- let mut method_attrs = Some(next);
+ let (inner_attrs, mut method_attrs) =
+ self.parse_inner_attrs_and_next();
while !self.eat(&token::RBRACE) {
- impl_items.push(MethodImplItem(self.parse_method(method_attrs)));
- method_attrs = None;
+ method_attrs.push_all_move(self.parse_outer_attributes());
+ let vis = self.parse_visibility();
+ if self.eat_keyword(keywords::Type) {
+ impl_items.push(TypeImplItem(P(self.parse_typedef(
+ method_attrs,
+ vis))))
+ } else {
+ impl_items.push(MethodImplItem(self.parse_method(
+ method_attrs,
+ vis)));
+ }
+ method_attrs = self.parse_outer_attributes();
}
(impl_items, inner_attrs)
}
Some(attrs))
}
- /// Parse a::B<String,int>
- fn parse_trait_ref(&mut self) -> TraitRef {
- ast::TraitRef {
- path: self.parse_path(LifetimeAndTypesWithoutColons).path,
- ref_id: ast::DUMMY_NODE_ID,
- }
- }
-
/// Parse struct Foo { ... }
fn parse_item_struct(&mut self, is_virtual: bool) -> ItemInfo {
let class_name = self.parse_ident();
seq_sep_trailing_allowed(token::COMMA),
|p| p.parse_ty(true)
);
- for ty in arg_tys.move_iter() {
+ for ty in arg_tys.into_iter() {
args.push(ast::VariantArg {
ty: ty,
id: ast::DUMMY_NODE_ID,
self.bump();
let mut attrs = attrs;
mem::swap(&mut item.attrs, &mut attrs);
- item.attrs.extend(attrs.move_iter());
+ item.attrs.extend(attrs.into_iter());
return IoviItem(P(item));
}
None => {}
let path = ast::Path {
span: span,
global: false,
- segments: path.move_iter().map(|identifier| {
+ segments: path.into_iter().map(|identifier| {
ast::PathSegment {
identifier: identifier,
lifetimes: Vec::new(),
let path = ast::Path {
span: mk_sp(lo, self.span.hi),
global: false,
- segments: path.move_iter().map(|identifier| {
+ segments: path.into_iter().map(|identifier| {
ast::PathSegment {
identifier: identifier,
lifetimes: Vec::new(),
let path = ast::Path {
span: mk_sp(lo, self.span.hi),
global: false,
- segments: path.move_iter().map(|identifier| {
+ segments: path.into_iter().map(|identifier| {
ast::PathSegment {
identifier: identifier,
lifetimes: Vec::new(),
let path = ast::Path {
span: mk_sp(lo, self.span.hi),
global: false,
- segments: path.move_iter().map(|identifier| {
+ segments: path.into_iter().map(|identifier| {
ast::PathSegment {
identifier: identifier,
lifetimes: Vec::new(),
pub fn to_string(t: &Token) -> String {
match *t {
- EQ => "=".to_string(),
- LT => "<".to_string(),
- LE => "<=".to_string(),
- EQEQ => "==".to_string(),
- NE => "!=".to_string(),
- GE => ">=".to_string(),
- GT => ">".to_string(),
- NOT => "!".to_string(),
- TILDE => "~".to_string(),
- OROR => "||".to_string(),
- ANDAND => "&&".to_string(),
- BINOP(op) => binop_to_string(op).to_string(),
+ EQ => "=".into_string(),
+ LT => "<".into_string(),
+ LE => "<=".into_string(),
+ EQEQ => "==".into_string(),
+ NE => "!=".into_string(),
+ GE => ">=".into_string(),
+ GT => ">".into_string(),
+ NOT => "!".into_string(),
+ TILDE => "~".into_string(),
+ OROR => "||".into_string(),
+ ANDAND => "&&".into_string(),
+ BINOP(op) => binop_to_string(op).into_string(),
BINOPEQ(op) => {
- let mut s = binop_to_string(op).to_string();
+ let mut s = binop_to_string(op).into_string();
s.push_str("=");
s
}
/* Structural symbols */
- AT => "@".to_string(),
- DOT => ".".to_string(),
- DOTDOT => "..".to_string(),
- DOTDOTDOT => "...".to_string(),
- COMMA => ",".to_string(),
- SEMI => ";".to_string(),
- COLON => ":".to_string(),
- MOD_SEP => "::".to_string(),
- RARROW => "->".to_string(),
- LARROW => "<-".to_string(),
- FAT_ARROW => "=>".to_string(),
- LPAREN => "(".to_string(),
- RPAREN => ")".to_string(),
- LBRACKET => "[".to_string(),
- RBRACKET => "]".to_string(),
- LBRACE => "{".to_string(),
- RBRACE => "}".to_string(),
- POUND => "#".to_string(),
- DOLLAR => "$".to_string(),
- QUESTION => "?".to_string(),
+ AT => "@".into_string(),
+ DOT => ".".into_string(),
+ DOTDOT => "..".into_string(),
+ DOTDOTDOT => "...".into_string(),
+ COMMA => ",".into_string(),
+ SEMI => ";".into_string(),
+ COLON => ":".into_string(),
+ MOD_SEP => "::".into_string(),
+ RARROW => "->".into_string(),
+ LARROW => "<-".into_string(),
+ FAT_ARROW => "=>".into_string(),
+ LPAREN => "(".into_string(),
+ RPAREN => ")".into_string(),
+ LBRACKET => "[".into_string(),
+ RBRACKET => "]".into_string(),
+ LBRACE => "{".into_string(),
+ RBRACE => "}".into_string(),
+ POUND => "#".into_string(),
+ DOLLAR => "$".into_string(),
+ QUESTION => "?".into_string(),
/* Literals */
LIT_BYTE(b) => {
format!("'{}'", c.as_str())
}
LIT_INTEGER(c) | LIT_FLOAT(c) => {
- c.as_str().to_string()
+ c.as_str().into_string()
}
LIT_STR(s) => {
}
/* Name components */
- IDENT(s, _) => get_ident(s).get().to_string(),
+ IDENT(s, _) => get_ident(s).get().into_string(),
LIFETIME(s) => {
format!("{}", get_ident(s))
}
- UNDERSCORE => "_".to_string(),
+ UNDERSCORE => "_".into_string(),
/* Other */
- DOC_COMMENT(s) => s.as_str().to_string(),
- EOF => "<eof>".to_string(),
- WS => " ".to_string(),
- COMMENT => "/* */".to_string(),
+ DOC_COMMENT(s) => s.as_str().into_string(),
+ EOF => "<eof>".into_string(),
+ WS => " ".into_string(),
+ COMMENT => "/* */".into_string(),
SHEBANG(s) => format!("/* shebang: {}*/", s.as_str()),
INTERPOLATED(ref nt) => {
&NtTy(ref e) => ::print::pprust::ty_to_string(&**e),
&NtPath(ref e) => ::print::pprust::path_to_string(&**e),
_ => {
- let mut s = "an interpolated ".to_string();
+ let mut s = "an interpolated ".into_string();
match *nt {
NtItem(..) => s.push_str("item"),
NtBlock(..) => s.push_str("block"),
//! avoid combining it with other lines and making matters even worse.
use std::io;
-use std::string::String;
+use std::string;
#[deriving(Clone, PartialEq)]
pub enum Breaks {
#[deriving(Clone)]
pub enum Token {
- String(String, int),
+ String(string::String, int),
Break(BreakToken),
Begin(BeginToken),
End,
}
}
-pub fn tok_str(t: Token) -> String {
+pub fn tok_str(t: Token) -> string::String {
match t {
String(s, len) => return format!("STR({},{})", s, len),
Break(_) => return "BREAK".to_string(),
left: uint,
right: uint,
lim: uint)
- -> String {
+ -> string::String {
let n = toks.len();
assert_eq!(n, szs.len());
let mut i = left;
let mut l = lim;
- let mut s = String::from_str("[");
+ let mut s = string::String::from_str("[");
while i != right && l != 0u {
l -= 1u;
if i != left {
// except according to those terms.
use abi;
-use ast::{FnMutUnboxedClosureKind, FnOnceUnboxedClosureKind};
-use ast::{FnUnboxedClosureKind, MethodImplItem};
-use ast::{RegionTyParamBound, TraitTyParamBound, UnboxedClosureKind};
-use ast::{UnboxedFnTyParamBound, RequiredMethod, ProvidedMethod};
+use ast::{FnUnboxedClosureKind, FnMutUnboxedClosureKind};
+use ast::{FnOnceUnboxedClosureKind};
+use ast::{MethodImplItem, RegionTyParamBound, TraitTyParamBound};
+use ast::{RequiredMethod, ProvidedMethod, TypeImplItem, TypeTraitItem};
+use ast::{UnboxedClosureKind, UnboxedFnTyParamBound};
use ast;
use ast_util;
use owned_slice::OwnedSlice;
ast::TyPath(ref path, ref bounds, _) => {
try!(self.print_bounded_path(path, bounds));
}
+ ast::TyQPath(ref qpath) => {
+ try!(word(&mut self.s, "<"));
+ try!(self.print_type(&*qpath.for_type));
+ try!(space(&mut self.s));
+ try!(self.word_space("as"));
+ try!(self.print_path(&qpath.trait_name, false));
+ try!(word(&mut self.s, ">"));
+ try!(word(&mut self.s, "::"));
+ try!(self.print_ident(qpath.item_name));
+ }
ast::TyFixedLengthVec(ref ty, ref v) => {
try!(word(&mut self.s, "["));
try!(self.print_type(&**ty));
}
}
+ fn print_associated_type(&mut self, typedef: &ast::AssociatedType)
+ -> IoResult<()> {
+ try!(self.word_space("type"));
+ try!(self.print_ident(typedef.ident));
+ word(&mut self.s, ";")
+ }
+
+ fn print_typedef(&mut self, typedef: &ast::Typedef) -> IoResult<()> {
+ try!(self.word_space("type"));
+ try!(self.print_ident(typedef.ident));
+ try!(space(&mut self.s));
+ try!(self.word_space("="));
+ try!(self.print_type(&*typedef.typ));
+ word(&mut self.s, ";")
+ }
+
/// Pretty-print an item
pub fn print_item(&mut self, item: &ast::Item) -> IoResult<()> {
try!(self.hardbreak_if_not_bol());
ast::MethodImplItem(ref meth) => {
try!(self.print_method(&**meth));
}
+ ast::TypeImplItem(ref typ) => {
+ try!(self.print_typedef(&**typ));
+ }
}
}
try!(self.bclose(item.span));
m: &ast::TraitItem) -> IoResult<()> {
match *m {
RequiredMethod(ref ty_m) => self.print_ty_method(ty_m),
- ProvidedMethod(ref m) => self.print_method(&**m)
+ ProvidedMethod(ref m) => self.print_method(&**m),
+ TypeTraitItem(ref t) => self.print_associated_type(&**t),
}
}
pub fn print_impl_item(&mut self, ii: &ast::ImplItem) -> IoResult<()> {
match *ii {
MethodImplItem(ref m) => self.print_method(&**m),
+ TypeImplItem(ref td) => self.print_typedef(&**td),
}
}
try!(self.print_expr(&**index));
try!(word(&mut self.s, "]"));
}
+ ast::ExprSlice(ref e, ref start, ref end, ref mutbl) => {
+ try!(self.print_expr(&**e));
+ try!(word(&mut self.s, "["));
+ if mutbl == &ast::MutMutable {
+ try!(word(&mut self.s, "mut"));
+ if start.is_some() || end.is_some() {
+ try!(space(&mut self.s));
+ }
+ }
+ match start {
+ &Some(ref e) => try!(self.print_expr(&**e)),
+ _ => {}
+ }
+ if start.is_some() || end.is_some() {
+ try!(word(&mut self.s, ".."));
+ }
+ match end {
+ &Some(ref e) => try!(self.print_expr(&**e)),
+ _ => {}
+ }
+ try!(word(&mut self.s, "]"));
+ }
ast::ExprPath(ref path) => try!(self.print_path(path, true)),
ast::ExprBreak(opt_ident) => {
try!(word(&mut self.s, "break"));
ast::PatRange(ref begin, ref end) => {
try!(self.print_expr(&**begin));
try!(space(&mut self.s));
- try!(word(&mut self.s, ".."));
+ try!(word(&mut self.s, "..."));
try!(self.print_expr(&**end));
}
ast::PatVec(ref before, ref slice, ref after) => {
self.print_lifetime(lt)
}
UnboxedFnTyParamBound(ref unboxed_function_type) => {
- self.print_ty_fn(None,
- None,
- ast::NormalFn,
- ast::Many,
- &*unboxed_function_type.decl,
- None,
- &OwnedSlice::empty(),
- None,
- None,
- Some(unboxed_function_type.kind))
+ try!(self.print_path(&unboxed_function_type.path,
+ false));
+ try!(self.popen());
+ try!(self.print_fn_args(&*unboxed_function_type.decl,
+ None));
+ try!(self.pclose());
+ self.print_fn_output(&*unboxed_function_type.decl)
}
})
}
self.end()
}
+ pub fn print_fn_output(&mut self, decl: &ast::FnDecl) -> IoResult<()> {
+ match decl.output.node {
+ ast::TyNil => Ok(()),
+ _ => {
+ try!(self.space_if_not_bol());
+ try!(self.ibox(indent_unit));
+ try!(self.word_space("->"));
+ if decl.cf == ast::NoReturn {
+ try!(self.word_nbsp("!"));
+ } else {
+ try!(self.print_type(&*decl.output));
+ }
+ self.end()
+ }
+ }
+ }
+
pub fn print_ty_fn(&mut self,
opt_abi: Option<abi::Abi>,
opt_sigil: Option<char>,
try!(self.maybe_print_comment(decl.output.span.lo));
- match decl.output.node {
- ast::TyNil => {}
- _ => {
- try!(self.space_if_not_bol());
- try!(self.ibox(indent_unit));
- try!(self.word_space("->"));
- if decl.cf == ast::NoReturn {
- try!(self.word_nbsp("!"));
- } else {
- try!(self.print_type(&*decl.output));
- }
- try!(self.end());
- }
- }
+ try!(self.print_fn_output(decl));
match generics {
Some(generics) => try!(self.print_where_clause(generics)),
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Span debugger
+//!
+//! This module shows spans for all expressions in the crate
+//! to help with compiler debugging.
+
+use ast;
+use diagnostic;
+use visit;
+use visit::Visitor;
+
+struct ShowSpanVisitor<'a> {
+ span_diagnostic: &'a diagnostic::SpanHandler,
+}
+
+impl<'a, 'v> Visitor<'v> for ShowSpanVisitor<'a> {
+ fn visit_expr(&mut self, e: &ast::Expr) {
+ self.span_diagnostic.span_note(e.span, "expression");
+ visit::walk_expr(self, e);
+ }
+
+ fn visit_mac(&mut self, macro: &ast::Mac) {
+ visit::walk_mac(self, macro);
+ }
+}
+
+pub fn run(span_diagnostic: &diagnostic::SpanHandler, krate: &ast::Crate) {
+ let mut v = ShowSpanVisitor { span_diagnostic: span_diagnostic };
+ visit::walk_crate(&mut v, krate);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use ast;
+use attr;
+use codemap::DUMMY_SP;
+use codemap;
+use fold::Folder;
+use fold;
+use owned_slice::OwnedSlice;
+use parse::token::InternedString;
+use parse::token::special_idents;
+use parse::token;
+use ptr::P;
+use util::small_vector::SmallVector;
+
+use std::mem;
+
+pub fn maybe_inject_crates_ref(krate: ast::Crate, alt_std_name: Option<String>, any_exe: bool)
+ -> ast::Crate {
+ if use_std(&krate) {
+ inject_crates_ref(krate, alt_std_name, any_exe)
+ } else {
+ krate
+ }
+}
+
+pub fn maybe_inject_prelude(krate: ast::Crate) -> ast::Crate {
+ if use_std(&krate) {
+ inject_prelude(krate)
+ } else {
+ krate
+ }
+}
+
+fn use_std(krate: &ast::Crate) -> bool {
+ !attr::contains_name(krate.attrs.as_slice(), "no_std")
+}
+
+fn use_start(krate: &ast::Crate) -> bool {
+ !attr::contains_name(krate.attrs.as_slice(), "no_start")
+}
+
+fn no_prelude(attrs: &[ast::Attribute]) -> bool {
+ attr::contains_name(attrs, "no_implicit_prelude")
+}
+
+struct StandardLibraryInjector<'a> {
+ alt_std_name: Option<String>,
+ any_exe: bool,
+}
+
+impl<'a> fold::Folder for StandardLibraryInjector<'a> {
+ fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
+
+ // The name to use in `extern crate "name" as std;`
+ let actual_crate_name = match self.alt_std_name {
+ Some(ref s) => token::intern_and_get_ident(s.as_slice()),
+ None => token::intern_and_get_ident("std"),
+ };
+
+ let mut vis = vec!(ast::ViewItem {
+ node: ast::ViewItemExternCrate(token::str_to_ident("std"),
+ Some((actual_crate_name, ast::CookedStr)),
+ ast::DUMMY_NODE_ID),
+ attrs: vec!(
+ attr::mk_attr_outer(attr::mk_attr_id(), attr::mk_list_item(
+ InternedString::new("phase"),
+ vec!(
+ attr::mk_word_item(InternedString::new("plugin")),
+ attr::mk_word_item(InternedString::new("link")
+ ))))),
+ vis: ast::Inherited,
+ span: DUMMY_SP
+ });
+
+ if use_start(&krate) && self.any_exe {
+ let visible_rt_name = "rt";
+ let actual_rt_name = "native";
+ // Gensym the ident so it can't be named
+ let visible_rt_name = token::gensym_ident(visible_rt_name);
+ let actual_rt_name = token::intern_and_get_ident(actual_rt_name);
+
+ vis.push(ast::ViewItem {
+ node: ast::ViewItemExternCrate(visible_rt_name,
+ Some((actual_rt_name, ast::CookedStr)),
+ ast::DUMMY_NODE_ID),
+ attrs: Vec::new(),
+ vis: ast::Inherited,
+ span: DUMMY_SP
+ });
+ }
+
+ // `extern crate` must be precede `use` items
+ mem::swap(&mut vis, &mut krate.module.view_items);
+ krate.module.view_items.push_all_move(vis);
+
+ // don't add #![no_std] here, that will block the prelude injection later.
+ // Add it during the prelude injection instead.
+
+ // Add #![feature(phase)] here, because we use #[phase] on extern crate std.
+ let feat_phase_attr = attr::mk_attr_inner(attr::mk_attr_id(),
+ attr::mk_list_item(
+ InternedString::new("feature"),
+ vec![attr::mk_word_item(InternedString::new("phase"))],
+ ));
+ // std_inject runs after feature checking so manually mark this attr
+ attr::mark_used(&feat_phase_attr);
+ krate.attrs.push(feat_phase_attr);
+
+ krate
+ }
+}
+
+fn inject_crates_ref(krate: ast::Crate,
+ alt_std_name: Option<String>,
+ any_exe: bool) -> ast::Crate {
+ let mut fold = StandardLibraryInjector {
+ alt_std_name: alt_std_name,
+ any_exe: any_exe,
+ };
+ fold.fold_crate(krate)
+}
+
+struct PreludeInjector<'a>;
+
+
+impl<'a> fold::Folder for PreludeInjector<'a> {
+ fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
+ // Add #![no_std] here, so we don't re-inject when compiling pretty-printed source.
+ // This must happen here and not in StandardLibraryInjector because this
+ // fold happens second.
+
+ let no_std_attr = attr::mk_attr_inner(attr::mk_attr_id(),
+ attr::mk_word_item(InternedString::new("no_std")));
+ // std_inject runs after feature checking so manually mark this attr
+ attr::mark_used(&no_std_attr);
+ krate.attrs.push(no_std_attr);
+
+ if !no_prelude(krate.attrs.as_slice()) {
+ // only add `use std::prelude::*;` if there wasn't a
+ // `#![no_implicit_prelude]` at the crate level.
+ // fold_mod() will insert glob path.
+ let globs_attr = attr::mk_attr_inner(attr::mk_attr_id(),
+ attr::mk_list_item(
+ InternedString::new("feature"),
+ vec!(
+ attr::mk_word_item(InternedString::new("globs")),
+ )));
+ // std_inject runs after feature checking so manually mark this attr
+ attr::mark_used(&globs_attr);
+ krate.attrs.push(globs_attr);
+
+ krate.module = self.fold_mod(krate.module);
+ }
+ krate
+ }
+
+ fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
+ if !no_prelude(item.attrs.as_slice()) {
+ // only recur if there wasn't `#![no_implicit_prelude]`
+ // on this item, i.e. this means that the prelude is not
+ // implicitly imported though the whole subtree
+ fold::noop_fold_item(item, self)
+ } else {
+ SmallVector::one(item)
+ }
+ }
+
+ fn fold_mod(&mut self, ast::Mod {inner, view_items, items}: ast::Mod) -> ast::Mod {
+ let prelude_path = ast::Path {
+ span: DUMMY_SP,
+ global: false,
+ segments: vec!(
+ ast::PathSegment {
+ identifier: token::str_to_ident("std"),
+ lifetimes: Vec::new(),
+ types: OwnedSlice::empty(),
+ },
+ ast::PathSegment {
+ identifier: token::str_to_ident("prelude"),
+ lifetimes: Vec::new(),
+ types: OwnedSlice::empty(),
+ }),
+ };
+
+ let (crates, uses) = view_items.partitioned(|x| {
+ match x.node {
+ ast::ViewItemExternCrate(..) => true,
+ _ => false,
+ }
+ });
+
+ // add prelude after any `extern crate` but before any `use`
+ let mut view_items = crates;
+ let vp = P(codemap::dummy_spanned(ast::ViewPathGlob(prelude_path, ast::DUMMY_NODE_ID)));
+ view_items.push(ast::ViewItem {
+ node: ast::ViewItemUse(vp),
+ attrs: vec![ast::Attribute {
+ span: DUMMY_SP,
+ node: ast::Attribute_ {
+ id: attr::mk_attr_id(),
+ style: ast::AttrOuter,
+ value: P(ast::MetaItem {
+ span: DUMMY_SP,
+ node: ast::MetaWord(token::get_name(
+ special_idents::prelude_import.name)),
+ }),
+ is_sugared_doc: false,
+ },
+ }],
+ vis: ast::Inherited,
+ span: DUMMY_SP,
+ });
+ view_items.push_all_move(uses);
+
+ fold::noop_fold_mod(ast::Mod {
+ inner: inner,
+ view_items: view_items,
+ items: items
+ }, self)
+ }
+}
+
+fn inject_prelude(krate: ast::Crate) -> ast::Crate {
+ let mut fold = PreludeInjector;
+ fold.fold_crate(krate)
+}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Code that generates a test runner to run all the tests in a crate
+
+#![allow(dead_code)]
+#![allow(unused_imports)]
+
+use std::gc::{Gc, GC};
+use std::slice;
+use std::mem;
+use std::vec;
+use ast_util::*;
+use attr::AttrMetaMethods;
+use attr;
+use codemap::{DUMMY_SP, Span, ExpnInfo, NameAndSpan, MacroAttribute};
+use codemap;
+use diagnostic;
+use config;
+use ext::base::ExtCtxt;
+use ext::build::AstBuilder;
+use ext::expand::ExpansionConfig;
+use fold::{Folder, MoveMap};
+use fold;
+use owned_slice::OwnedSlice;
+use parse::token::InternedString;
+use parse::{token, ParseSess};
+use print::pprust;
+use {ast, ast_util};
+use ptr::P;
+use util::small_vector::SmallVector;
+
+struct Test {
+ span: Span,
+ path: Vec<ast::Ident> ,
+ bench: bool,
+ ignore: bool,
+ should_fail: bool
+}
+
+struct TestCtxt<'a> {
+ sess: &'a ParseSess,
+ span_diagnostic: &'a diagnostic::SpanHandler,
+ path: Vec<ast::Ident>,
+ ext_cx: ExtCtxt<'a>,
+ testfns: Vec<Test>,
+ reexport_test_harness_main: Option<InternedString>,
+ is_test_crate: bool,
+ config: ast::CrateConfig,
+
+ // top-level re-export submodule, filled out after folding is finished
+ toplevel_reexport: Option<ast::Ident>,
+}
+
+// Traverse the crate, collecting all the test functions, eliding any
+// existing main functions, and synthesizing a main test harness
+pub fn modify_for_testing(sess: &ParseSess,
+ cfg: &ast::CrateConfig,
+ krate: ast::Crate,
+ span_diagnostic: &diagnostic::SpanHandler) -> ast::Crate {
+ // We generate the test harness when building in the 'test'
+ // configuration, either with the '--test' or '--cfg test'
+ // command line options.
+ let should_test = attr::contains_name(krate.config.as_slice(), "test");
+
+ // Check for #[reexport_test_harness_main = "some_name"] which
+ // creates a `use some_name = __test::main;`. This needs to be
+ // unconditional, so that the attribute is still marked as used in
+ // non-test builds.
+ let reexport_test_harness_main =
+ attr::first_attr_value_str_by_name(krate.attrs.as_slice(),
+ "reexport_test_harness_main");
+
+ if should_test {
+ generate_test_harness(sess, reexport_test_harness_main, krate, cfg, span_diagnostic)
+ } else {
+ strip_test_functions(krate)
+ }
+}
+
+struct TestHarnessGenerator<'a> {
+ cx: TestCtxt<'a>,
+ tests: Vec<ast::Ident>,
+
+ // submodule name, gensym'd identifier for re-exports
+ tested_submods: Vec<(ast::Ident, ast::Ident)>,
+}
+
+impl<'a> fold::Folder for TestHarnessGenerator<'a> {
+ fn fold_crate(&mut self, c: ast::Crate) -> ast::Crate {
+ let mut folded = fold::noop_fold_crate(c, self);
+
+ // Add a special __test module to the crate that will contain code
+ // generated for the test harness
+ let (mod_, reexport) = mk_test_module(&mut self.cx);
+ folded.module.items.push(mod_);
+ match reexport {
+ Some(re) => folded.module.view_items.push(re),
+ None => {}
+ }
+ folded
+ }
+
+ fn fold_item(&mut self, i: P<ast::Item>) -> SmallVector<P<ast::Item>> {
+ self.cx.path.push(i.ident);
+ debug!("current path: {}",
+ ast_util::path_name_i(self.cx.path.as_slice()));
+
+ if is_test_fn(&self.cx, &*i) || is_bench_fn(&self.cx, &*i) {
+ match i.node {
+ ast::ItemFn(_, ast::UnsafeFn, _, _, _) => {
+ let diag = self.cx.span_diagnostic;
+ diag.span_fatal(i.span,
+ "unsafe functions cannot be used for \
+ tests");
+ }
+ _ => {
+ debug!("this is a test function");
+ let test = Test {
+ span: i.span,
+ path: self.cx.path.clone(),
+ bench: is_bench_fn(&self.cx, &*i),
+ ignore: is_ignored(&self.cx, &*i),
+ should_fail: should_fail(&*i)
+ };
+ self.cx.testfns.push(test);
+ self.tests.push(i.ident);
+ // debug!("have {} test/bench functions",
+ // cx.testfns.len());
+ }
+ }
+ }
+
+ // We don't want to recurse into anything other than mods, since
+ // mods or tests inside of functions will break things
+ let res = match i.node {
+ ast::ItemMod(..) => fold::noop_fold_item(i, self),
+ _ => SmallVector::one(i),
+ };
+ self.cx.path.pop();
+ res
+ }
+
+ fn fold_mod(&mut self, m: ast::Mod) -> ast::Mod {
+ let tests = mem::replace(&mut self.tests, Vec::new());
+ let tested_submods = mem::replace(&mut self.tested_submods, Vec::new());
+ let mut mod_folded = fold::noop_fold_mod(m, self);
+ let tests = mem::replace(&mut self.tests, tests);
+ let tested_submods = mem::replace(&mut self.tested_submods, tested_submods);
+
+ // Remove any #[main] from the AST so it doesn't clash with
+ // the one we're going to add. Only if compiling an executable.
+
+ mod_folded.items = mem::replace(&mut mod_folded.items, vec![]).move_map(|item| {
+ item.map(|ast::Item {id, ident, attrs, node, vis, span}| {
+ ast::Item {
+ id: id,
+ ident: ident,
+ attrs: attrs.into_iter().filter_map(|attr| {
+ if !attr.check_name("main") {
+ Some(attr)
+ } else {
+ None
+ }
+ }).collect(),
+ node: node,
+ vis: vis,
+ span: span
+ }
+ })
+ });
+
+ if !tests.is_empty() || !tested_submods.is_empty() {
+ let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
+ mod_folded.items.push(it);
+
+ if !self.cx.path.is_empty() {
+ self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
+ } else {
+ debug!("pushing nothing, sym: {}", sym);
+ self.cx.toplevel_reexport = Some(sym);
+ }
+ }
+
+ mod_folded
+ }
+}
+
+fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
+ tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (P<ast::Item>, ast::Ident) {
+ let mut view_items = Vec::new();
+ let super_ = token::str_to_ident("super");
+
+ view_items.extend(tests.into_iter().map(|r| {
+ cx.ext_cx.view_use_simple(DUMMY_SP, ast::Public,
+ cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
+ }));
+ view_items.extend(tested_submods.into_iter().map(|(r, sym)| {
+ let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
+ cx.ext_cx.view_use_simple_(DUMMY_SP, ast::Public, r, path)
+ }));
+
+ let reexport_mod = ast::Mod {
+ inner: DUMMY_SP,
+ view_items: view_items,
+ items: Vec::new(),
+ };
+
+ let sym = token::gensym_ident("__test_reexports");
+ let it = P(ast::Item {
+ ident: sym.clone(),
+ attrs: Vec::new(),
+ id: ast::DUMMY_NODE_ID,
+ node: ast::ItemMod(reexport_mod),
+ vis: ast::Public,
+ span: DUMMY_SP,
+ });
+
+ (it, sym)
+}
+
+fn generate_test_harness(sess: &ParseSess,
+ reexport_test_harness_main: Option<InternedString>,
+ krate: ast::Crate,
+ cfg: &ast::CrateConfig,
+ sd: &diagnostic::SpanHandler) -> ast::Crate {
+ let mut cx: TestCtxt = TestCtxt {
+ sess: sess,
+ span_diagnostic: sd,
+ ext_cx: ExtCtxt::new(sess, cfg.clone(),
+ ExpansionConfig {
+ deriving_hash_type_parameter: false,
+ crate_name: "test".to_string(),
+ }),
+ path: Vec::new(),
+ testfns: Vec::new(),
+ reexport_test_harness_main: reexport_test_harness_main,
+ is_test_crate: is_test_crate(&krate),
+ config: krate.config.clone(),
+ toplevel_reexport: None,
+ };
+
+ cx.ext_cx.bt_push(ExpnInfo {
+ call_site: DUMMY_SP,
+ callee: NameAndSpan {
+ name: "test".to_string(),
+ format: MacroAttribute,
+ span: None
+ }
+ });
+
+ let mut fold = TestHarnessGenerator {
+ cx: cx,
+ tests: Vec::new(),
+ tested_submods: Vec::new(),
+ };
+ let res = fold.fold_crate(krate);
+ fold.cx.ext_cx.bt_pop();
+ return res;
+}
+
+fn strip_test_functions(krate: ast::Crate) -> ast::Crate {
+ // When not compiling with --test we should not compile the
+ // #[test] functions
+ config::strip_items(krate, |attrs| {
+ !attr::contains_name(attrs.as_slice(), "test") &&
+ !attr::contains_name(attrs.as_slice(), "bench")
+ })
+}
+
+fn is_test_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
+ let has_test_attr = attr::contains_name(i.attrs.as_slice(), "test");
+
+ fn has_test_signature(i: &ast::Item) -> bool {
+ match &i.node {
+ &ast::ItemFn(ref decl, _, _, ref generics, _) => {
+ let no_output = match decl.output.node {
+ ast::TyNil => true,
+ _ => false
+ };
+ decl.inputs.is_empty()
+ && no_output
+ && !generics.is_parameterized()
+ }
+ _ => false
+ }
+ }
+
+ if has_test_attr && !has_test_signature(i) {
+ let diag = cx.span_diagnostic;
+ diag.span_err(
+ i.span,
+ "functions used as tests must have signature fn() -> ()."
+ );
+ }
+
+ return has_test_attr && has_test_signature(i);
+}
+
+fn is_bench_fn(cx: &TestCtxt, i: &ast::Item) -> bool {
+ let has_bench_attr = attr::contains_name(i.attrs.as_slice(), "bench");
+
+ fn has_test_signature(i: &ast::Item) -> bool {
+ match i.node {
+ ast::ItemFn(ref decl, _, _, ref generics, _) => {
+ let input_cnt = decl.inputs.len();
+ let no_output = match decl.output.node {
+ ast::TyNil => true,
+ _ => false
+ };
+ let tparm_cnt = generics.ty_params.len();
+ // NB: inadequate check, but we're running
+ // well before resolve, can't get too deep.
+ input_cnt == 1u
+ && no_output && tparm_cnt == 0u
+ }
+ _ => false
+ }
+ }
+
+ if has_bench_attr && !has_test_signature(i) {
+ let diag = cx.span_diagnostic;
+ diag.span_err(i.span, "functions used as benches must have signature \
+ `fn(&mut Bencher) -> ()`");
+ }
+
+ return has_bench_attr && has_test_signature(i);
+}
+
+fn is_ignored(cx: &TestCtxt, i: &ast::Item) -> bool {
+ i.attrs.iter().any(|attr| {
+ // check ignore(cfg(foo, bar))
+ attr.check_name("ignore") && match attr.meta_item_list() {
+ Some(ref cfgs) => {
+ attr::test_cfg(cx.config.as_slice(), cfgs.iter())
+ }
+ None => true
+ }
+ })
+}
+
+fn should_fail(i: &ast::Item) -> bool {
+ attr::contains_name(i.attrs.as_slice(), "should_fail")
+}
+
+/*
+
+We're going to be building a module that looks more or less like:
+
+mod __test {
+ extern crate test (name = "test", vers = "...");
+ fn main() {
+ test::test_main_static(::os::args().as_slice(), tests)
+ }
+
+ static tests : &'static [test::TestDescAndFn] = &[
+ ... the list of tests in the crate ...
+ ];
+}
+
+*/
+
+fn mk_std(cx: &TestCtxt) -> ast::ViewItem {
+ let id_test = token::str_to_ident("test");
+ let (vi, vis) = if cx.is_test_crate {
+ (ast::ViewItemUse(
+ P(nospan(ast::ViewPathSimple(id_test,
+ path_node(vec!(id_test)),
+ ast::DUMMY_NODE_ID)))),
+ ast::Public)
+ } else {
+ (ast::ViewItemExternCrate(id_test, None, ast::DUMMY_NODE_ID),
+ ast::Inherited)
+ };
+ ast::ViewItem {
+ node: vi,
+ attrs: Vec::new(),
+ vis: vis,
+ span: DUMMY_SP
+ }
+}
+
+fn mk_test_module(cx: &mut TestCtxt) -> (P<ast::Item>, Option<ast::ViewItem>) {
+ // Link to test crate
+ let view_items = vec!(mk_std(cx));
+
+ // A constant vector of test descriptors.
+ let tests = mk_tests(cx);
+
+ // The synthesized main function which will call the console test runner
+ // with our list of tests
+ let mainfn = (quote_item!(&mut cx.ext_cx,
+ pub fn main() {
+ #![main]
+ use std::slice::Slice;
+ test::test_main_static(::std::os::args().as_slice(), TESTS);
+ }
+ )).unwrap();
+
+ let testmod = ast::Mod {
+ inner: DUMMY_SP,
+ view_items: view_items,
+ items: vec!(mainfn, tests),
+ };
+ let item_ = ast::ItemMod(testmod);
+
+ let mod_ident = token::gensym_ident("__test");
+ let item = ast::Item {
+ ident: mod_ident,
+ attrs: Vec::new(),
+ id: ast::DUMMY_NODE_ID,
+ node: item_,
+ vis: ast::Public,
+ span: DUMMY_SP,
+ };
+ let reexport = cx.reexport_test_harness_main.as_ref().map(|s| {
+ // building `use <ident> = __test::main`
+ let reexport_ident = token::str_to_ident(s.get());
+
+ let use_path =
+ nospan(ast::ViewPathSimple(reexport_ident,
+ path_node(vec![mod_ident, token::str_to_ident("main")]),
+ ast::DUMMY_NODE_ID));
+
+ ast::ViewItem {
+ node: ast::ViewItemUse(P(use_path)),
+ attrs: vec![],
+ vis: ast::Inherited,
+ span: DUMMY_SP
+ }
+ });
+
+ debug!("Synthetic test module:\n{}\n", pprust::item_to_string(&item));
+
+ (P(item), reexport)
+}
+
+fn nospan<T>(t: T) -> codemap::Spanned<T> {
+ codemap::Spanned { node: t, span: DUMMY_SP }
+}
+
+fn path_node(ids: Vec<ast::Ident> ) -> ast::Path {
+ ast::Path {
+ span: DUMMY_SP,
+ global: false,
+ segments: ids.into_iter().map(|identifier| ast::PathSegment {
+ identifier: identifier,
+ lifetimes: Vec::new(),
+ types: OwnedSlice::empty(),
+ }).collect()
+ }
+}
+
+fn mk_tests(cx: &TestCtxt) -> P<ast::Item> {
+ // The vector of test_descs for this crate
+ let test_descs = mk_test_descs(cx);
+
+ // FIXME #15962: should be using quote_item, but that stringifies
+ // __test_reexports, causing it to be reinterned, losing the
+ // gensym information.
+ let sp = DUMMY_SP;
+ let ecx = &cx.ext_cx;
+ let struct_type = ecx.ty_path(ecx.path(sp, vec![ecx.ident_of("self"),
+ ecx.ident_of("test"),
+ ecx.ident_of("TestDescAndFn")]),
+ None);
+ let static_lt = ecx.lifetime(sp, token::special_idents::static_lifetime.name);
+ // &'static [self::test::TestDescAndFn]
+ let static_type = ecx.ty_rptr(sp,
+ ecx.ty(sp, ast::TyVec(struct_type)),
+ Some(static_lt),
+ ast::MutImmutable);
+ // static TESTS: $static_type = &[...];
+ ecx.item_static(sp,
+ ecx.ident_of("TESTS"),
+ static_type,
+ ast::MutImmutable,
+ test_descs)
+}
+
+fn is_test_crate(krate: &ast::Crate) -> bool {
+ match attr::find_crate_name(krate.attrs.as_slice()) {
+ Some(ref s) if "test" == s.get().as_slice() => true,
+ _ => false
+ }
+}
+
+fn mk_test_descs(cx: &TestCtxt) -> P<ast::Expr> {
+ debug!("building test vector from {} tests", cx.testfns.len());
+
+ P(ast::Expr {
+ id: ast::DUMMY_NODE_ID,
+ node: ast::ExprAddrOf(ast::MutImmutable,
+ P(ast::Expr {
+ id: ast::DUMMY_NODE_ID,
+ node: ast::ExprVec(cx.testfns.iter().map(|test| {
+ mk_test_desc_and_fn_rec(cx, test)
+ }).collect()),
+ span: DUMMY_SP,
+ })),
+ span: DUMMY_SP,
+ })
+}
+
+fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> P<ast::Expr> {
+ // FIXME #15962: should be using quote_expr, but that stringifies
+ // __test_reexports, causing it to be reinterned, losing the
+ // gensym information.
+
+ let span = test.span;
+ let path = test.path.clone();
+ let ecx = &cx.ext_cx;
+ let self_id = ecx.ident_of("self");
+ let test_id = ecx.ident_of("test");
+
+ // creates self::test::$name
+ let test_path = |name| {
+ ecx.path(span, vec![self_id, test_id, ecx.ident_of(name)])
+ };
+ // creates $name: $expr
+ let field = |name, expr| ecx.field_imm(span, ecx.ident_of(name), expr);
+
+ debug!("encoding {}", ast_util::path_name_i(path.as_slice()));
+
+ // path to the #[test] function: "foo::bar::baz"
+ let path_string = ast_util::path_name_i(path.as_slice());
+ let name_expr = ecx.expr_str(span, token::intern_and_get_ident(path_string.as_slice()));
+
+ // self::test::StaticTestName($name_expr)
+ let name_expr = ecx.expr_call(span,
+ ecx.expr_path(test_path("StaticTestName")),
+ vec![name_expr]);
+
+ let ignore_expr = ecx.expr_bool(span, test.ignore);
+ let fail_expr = ecx.expr_bool(span, test.should_fail);
+
+ // self::test::TestDesc { ... }
+ let desc_expr = ecx.expr_struct(
+ span,
+ test_path("TestDesc"),
+ vec![field("name", name_expr),
+ field("ignore", ignore_expr),
+ field("should_fail", fail_expr)]);
+
+
+ let mut visible_path = match cx.toplevel_reexport {
+ Some(id) => vec![id],
+ None => {
+ let diag = cx.span_diagnostic;
+ diag.handler.bug("expected to find top-level re-export name, but found None");
+ }
+ };
+ visible_path.extend(path.into_iter());
+
+ let fn_expr = ecx.expr_path(ecx.path_global(span, visible_path));
+
+ let variant_name = if test.bench { "StaticBenchFn" } else { "StaticTestFn" };
+ // self::test::$variant_name($fn_expr)
+ let testfn_expr = ecx.expr_call(span, ecx.expr_path(test_path(variant_name)), vec![fn_expr]);
+
+ // self::test::TestDescAndFn { ... }
+ ecx.expr_struct(span,
+ test_path("TestDescAndFn"),
+ vec![field("desc", desc_expr),
+ field("testfn", testfn_expr)])
+}
use std::cmp::Equiv;
use std::fmt;
use std::hash::Hash;
-use std::mem;
use std::rc::Rc;
pub struct Interner<T> {
impl RcStr {
pub fn new(string: &str) -> RcStr {
RcStr {
- string: Rc::new(string.to_string()),
+ string: Rc::new(string.into_string()),
}
}
}
(*self.vect.borrow().get(idx.uint())).clone()
}
- /// Returns this string with lifetime tied to the interner. Since
- /// strings may never be removed from the interner, this is safe.
- pub fn get_ref<'a>(&'a self, idx: Name) -> &'a str {
- let vect = self.vect.borrow();
- let s: &str = vect.get(idx.uint()).as_slice();
- unsafe {
- mem::transmute(s)
- }
- }
-
pub fn len(&self) -> uint {
self.vect.borrow().len()
}
}
pub fn push_all(&mut self, other: SmallVector<T>) {
- for v in other.move_iter() {
+ for v in other.into_iter() {
self.push(v);
}
}
One(v) => v,
Many(v) => {
if v.len() == 1 {
- v.move_iter().next().unwrap()
+ v.into_iter().next().unwrap()
} else {
fail!(err)
}
}
}
+ /// Deprecated: use `into_iter`.
+ #[deprecated = "use into_iter"]
pub fn move_iter(self) -> MoveItems<T> {
+ self.into_iter()
+ }
+
+ pub fn into_iter(self) -> MoveItems<T> {
let repr = match self.repr {
Zero => ZeroIterator,
One(v) => OneIterator(v),
- Many(vs) => ManyIterator(vs.move_iter())
+ Many(vs) => ManyIterator(vs.into_iter())
};
MoveItems { repr: repr }
}
#[test]
fn test_from_iter() {
- let v: SmallVector<int> = (vec!(1i, 2, 3)).move_iter().collect();
+ let v: SmallVector<int> = (vec!(1i, 2, 3)).into_iter().collect();
assert_eq!(3, v.len());
assert_eq!(&1, v.get(0));
assert_eq!(&2, v.get(1));
#[test]
fn test_move_iter() {
let v = SmallVector::zero();
- let v: Vec<int> = v.move_iter().collect();
+ let v: Vec<int> = v.into_iter().collect();
assert_eq!(Vec::new(), v);
let v = SmallVector::one(1i);
- assert_eq!(vec!(1i), v.move_iter().collect());
+ assert_eq!(vec!(1i), v.into_iter().collect());
let v = SmallVector::many(vec!(1i, 2i, 3i));
- assert_eq!(vec!(1i, 2i, 3i), v.move_iter().collect());
+ assert_eq!(vec!(1i, 2i, 3i), v.into_iter().collect());
}
#[test]
fn visit_attribute(&mut self, _attr: &'v Attribute) {}
}
-pub fn walk_inlined_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v InlinedItem) {
+pub fn walk_inlined_item<'v,V>(visitor: &mut V, item: &'v InlinedItem)
+ where V: Visitor<'v> {
match *item {
IIItem(ref i) => visitor.visit_item(&**i),
IIForeign(ref i) => visitor.visit_foreign_item(&**i),
IITraitItem(_, ref ti) => visitor.visit_trait_item(ti),
- IIImplItem(_, MethodImplItem(ref m)) => walk_method_helper(visitor, &**m)
+ IIImplItem(_, MethodImplItem(ref m)) => {
+ walk_method_helper(visitor, &**m)
+ }
+ IIImplItem(_, TypeImplItem(ref typedef)) => {
+ visitor.visit_ident(typedef.span, typedef.ident);
+ visitor.visit_ty(&*typedef.typ);
+ }
}
}
MethodImplItem(ref method) => {
walk_method_helper(visitor, &**method)
}
+ TypeImplItem(ref typedef) => {
+ visitor.visit_ident(typedef.span, typedef.ident);
+ visitor.visit_ty(&*typedef.typ);
+ }
}
}
}
None => { }
}
}
+ TyQPath(ref qpath) => {
+ visitor.visit_ty(&*qpath.for_type);
+ visitor.visit_path(&qpath.trait_name, typ.id);
+ visitor.visit_ident(typ.span, qpath.item_name);
+ }
TyFixedLengthVec(ref ty, ref expression) => {
visitor.visit_ty(&**ty);
visitor.visit_expr(&**expression)
pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_method: &'v TraitItem) {
match *trait_method {
- RequiredMethod(ref method_type) => {
- visitor.visit_ty_method(method_type)
- }
+ RequiredMethod(ref method_type) => visitor.visit_ty_method(method_type),
ProvidedMethod(ref method) => walk_method_helper(visitor, &**method),
+ TypeTraitItem(ref associated_type) => {
+ visitor.visit_ident(associated_type.span, associated_type.ident)
+ }
}
}
visitor.visit_expr(&**main_expression);
visitor.visit_expr(&**index_expression)
}
+ ExprSlice(ref main_expression, ref start, ref end, _) => {
+ visitor.visit_expr(&**main_expression);
+ walk_expr_opt(visitor, start);
+ walk_expr_opt(visitor, end)
+ }
ExprPath(ref path) => {
visitor.visit_path(path, expression.id)
}
#[allow(missing_doc)]
#[deriving(Clone)]
pub enum Param {
- String(String),
+ Words(String),
Number(int)
}
Number(0), Number(0), Number(0), Number(0), Number(0),
Number(0), Number(0), Number(0), Number(0),
];
- for (dst, src) in mparams.mut_iter().zip(params.iter()) {
+ for (dst, src) in mparams.iter_mut().zip(params.iter()) {
*dst = (*src).clone();
}
'{' => state = IntConstant(0),
'l' => if stack.len() > 0 {
match stack.pop().unwrap() {
- String(s) => stack.push(Number(s.len() as int)),
- _ => return Err("a non-str was used with %l".to_string())
+ Words(s) => stack.push(Number(s.len() as int)),
+ _ => return Err("a non-str was used with %l".to_string())
}
} else { return Err("stack is empty".to_string()) },
'+' => if stack.len() > 1 {
return Err("non-number on stack with %s".to_string())
}
};
- let mut s: Vec<u8> = s.move_iter().collect();
+ let mut s: Vec<u8> = s.into_iter().collect();
if flags.precision > s.len() {
let mut s_ = Vec::with_capacity(flags.precision);
let n = flags.precision - s.len();
.to_ascii()
.to_upper()
.into_bytes()
- .move_iter()
+ .into_iter()
.collect();
if flags.alternate {
let s_ = replace(&mut s, vec!(b'0', b'X'));
}
s
}
- String(s) => {
+ Words(s) => {
match op {
FormatString => {
let mut s = Vec::from_slice(s.as_bytes());
#[cfg(test)]
mod test {
- use super::{expand,String,Variables,Number};
+ use super::{expand,Words,Variables,Number};
use std::result::Ok;
#[test]
assert!(res.is_err(),
"Op {} succeeded incorrectly with 0 stack entries", *cap);
let p = if *cap == "%s" || *cap == "%l" {
- String("foo".to_string())
+ Words("foo".to_string())
} else {
Number(97)
};
let mut varstruct = Variables::new();
let vars = &mut varstruct;
assert_eq!(expand(b"%p1%s%p2%2s%p3%2s%p4%.2s",
- [String("foo".to_string()),
- String("foo".to_string()),
- String("f".to_string()),
- String("foo".to_string())], vars),
+ [Words("foo".to_string()),
+ Words("foo".to_string()),
+ Words("f".to_string()),
+ Words("foo".to_string())], vars),
Ok("foofoo ffo".bytes().collect()));
- assert_eq!(expand(b"%p1%:-4.2s", [String("foo".to_string())], vars),
+ assert_eq!(expand(b"%p1%:-4.2s", [Words("foo".to_string())], vars),
Ok("fo ".bytes().collect()));
assert_eq!(expand(b"%p1%d%p1%.3d%p1%5d%p1%:+d", [Number(1)], vars),
// All benchmarks run at the end, in serial.
// (this includes metric fns)
- for b in filtered_benchs_and_metrics.move_iter() {
+ for b in filtered_benchs_and_metrics.into_iter() {
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
run_test(opts, !opts.run_benchmarks, b, tx.clone());
let (test, result, stdout) = rx.recv();
filtered = match opts.filter {
None => filtered,
Some(ref re) => {
- filtered.move_iter()
+ filtered.into_iter()
.filter(|test| re.is_match(test.desc.name.as_slice())).collect()
}
};
None
}
};
- filtered.move_iter().filter_map(|x| filter(x)).collect()
+ filtered.into_iter().filter_map(|x| filter(x)).collect()
};
// Sort the tests alphabetically
match opts.test_shard {
None => filtered,
Some((a,b)) => {
- filtered.move_iter().enumerate()
+ filtered.into_iter().enumerate()
// note: using a - 1 so that the valid shards, for example, are
// 1.2 and 2.2 instead of 0.2 and 1.2
.filter(|&(i,_)| i % b == (a - 1))
}
let result_future = task.try_future(testfn);
- let stdout = reader.read_to_end().unwrap().move_iter().collect();
+ let stdout = reader.read_to_end().unwrap().into_iter().collect();
let task_result = result_future.unwrap();
let test_result = calc_result(&desc, task_result.is_ok());
monitor_ch.send((desc.clone(), test_result, stdout));
loop {
let loop_start = precise_time_ns();
- for p in samples.mut_iter() {
+ for p in samples.iter_mut() {
self.bench_n(n, |x| f(x));
*p = self.ns_per_iter() as f64;
};
stats::winsorize(samples, 5.0);
let summ = stats::Summary::new(samples);
- for p in samples.mut_iter() {
+ for p in samples.iter_mut() {
self.bench_n(5 * n, |x| f(x));
*p = self.ns_per_iter() as f64;
};
let lo = percentile_of_sorted(tmp.as_slice(), pct);
let hundred: T = FromPrimitive::from_uint(100).unwrap();
let hi = percentile_of_sorted(tmp.as_slice(), hundred-pct);
- for samp in samples.mut_iter() {
+ for samp in samples.iter_mut() {
if *samp > hi {
*samp = hi
} else if *samp < lo {
unsafe fn os_get_time() -> (i64, i32) {
use std::ptr;
let mut tv = libc::timeval { tv_sec: 0, tv_usec: 0 };
- imp::gettimeofday(&mut tv, ptr::mut_null());
+ imp::gettimeofday(&mut tv, ptr::null_mut());
(tv.tv_sec as i64, tv.tv_usec * 1000)
}
#include "rustllvm.h"
#include "llvm/Object/Archive.h"
#include "llvm/Object/ObjectFile.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
#if LLVM_VERSION_MINOR >= 5
#include "llvm/IR/CallSite.h"
#endif
}
-extern "C" char *LLVMTypeToString(LLVMTypeRef Type) {
- std::string s;
- llvm::raw_string_ostream os(s);
+extern "C" void LLVMWriteTypeToString(LLVMTypeRef Type, RustStringRef str) {
+ raw_rust_string_ostream os(str);
unwrap<llvm::Type>(Type)->print(os);
- return strdup(os.str().data());
}
-extern "C" char *LLVMValueToString(LLVMValueRef Value) {
- std::string s;
- llvm::raw_string_ostream os(s);
+extern "C" void LLVMWriteValueToString(LLVMValueRef Value, RustStringRef str) {
+ raw_rust_string_ostream os(str);
os << "(";
unwrap<llvm::Value>(Value)->getType()->print(os);
os << ":";
unwrap<llvm::Value>(Value)->print(os);
os << ")";
- return strdup(os.str().data());
}
#if LLVM_VERSION_MINOR >= 5
LLVMRustArrayType(LLVMTypeRef ElementType, uint64_t ElementCount) {
return wrap(ArrayType::get(unwrap(ElementType), ElementCount));
}
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Twine, LLVMTwineRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DebugLoc, LLVMDebugLocRef)
+
+extern "C" void
+LLVMWriteTwineToString(LLVMTwineRef T, RustStringRef str) {
+ raw_rust_string_ostream os(str);
+ unwrap(T)->print(os);
+}
+
+extern "C" void
+LLVMUnpackOptimizationDiagnostic(
+ LLVMDiagnosticInfoRef di,
+ const char **pass_name_out,
+ LLVMValueRef *function_out,
+ LLVMDebugLocRef *debugloc_out,
+ LLVMTwineRef *message_out)
+{
+ // Undefined to call this not on an optimization diagnostic!
+ llvm::DiagnosticInfoOptimizationBase *opt
+ = static_cast<llvm::DiagnosticInfoOptimizationBase*>(unwrap(di));
+
+ *pass_name_out = opt->getPassName();
+ *function_out = wrap(&opt->getFunction());
+ *debugloc_out = wrap(&opt->getDebugLoc());
+ *message_out = wrap(&opt->getMsg());
+}
+
+extern "C" void LLVMWriteDiagnosticInfoToString(LLVMDiagnosticInfoRef di, RustStringRef str) {
+ raw_rust_string_ostream os(str);
+ DiagnosticPrinterRawOStream dp(os);
+ unwrap(di)->print(dp);
+}
+
+extern "C" int LLVMGetDiagInfoKind(LLVMDiagnosticInfoRef di) {
+ return unwrap(di)->getKind();
+}
+
+extern "C" void LLVMWriteDebugLocToString(
+ LLVMContextRef C,
+ LLVMDebugLocRef dl,
+ RustStringRef str)
+{
+ raw_rust_string_ostream os(str);
+ unwrap(dl)->print(*unwrap(C), os);
+}
#endif
void LLVMRustSetLastError(const char*);
+
+typedef struct OpaqueRustString *RustStringRef;
+typedef struct LLVMOpaqueTwine *LLVMTwineRef;
+typedef struct LLVMOpaqueDebugLoc *LLVMDebugLocRef;
+
+extern "C" void
+rust_llvm_string_write_impl(RustStringRef str, const char *ptr, size_t size);
+
+class raw_rust_string_ostream : public llvm::raw_ostream {
+ RustStringRef str;
+ uint64_t pos;
+
+ void write_impl(const char *ptr, size_t size) override {
+ rust_llvm_string_write_impl(str, ptr, size);
+ pos += size;
+ }
+
+ uint64_t current_pos() const override {
+ return pos;
+ }
+
+public:
+ explicit raw_rust_string_ostream(RustStringRef str)
+ : str(str), pos(0) { }
+
+ ~raw_rust_string_ostream() {
+ // LLVM requires this.
+ flush();
+ }
+};
+S 2014-09-16 828e075
+ winnt-x86_64 ce1e9d7f6967bfa368853e7c968e1626cc319951
+ winnt-i386 a8bd994666dfe683a5d7922c7998500255780724
+ linux-x86_64 88ff474db96c6ffc5c1dc7a43442cbe1cd88c8a2
+ linux-i386 7a731891f726c8a0590b142a4e8924c5e8b22e8d
+ freebsd-x86_64 e67a56f76484f775cd4836dedb2d1069ab5d7921
+ macos-i386 f48023648a77e89086f4a2b39d76b09e4fff032d
+ macos-x86_64 2ad6457b2b3036f87eae7581d64ee5341a07fb06
+
S 2014-09-10 6faa4f3
winnt-x86_64 939eb546469cb936441cff3b6f2478f562f77c46
winnt-i386 cfe4f8b519bb9d62588f9310a8f94bc919d5423b
- linux-x86_64 72c92895fa9a1dba7880073f2b2b5d0e3e1a2ab6
+ linux-x86_64 40e2ab1b67d0a2859f7da15e13bfd2748b50f0c7
linux-i386 6f5464c9ab191d93bfea0894ca7c6f90c3506f2b
freebsd-x86_64 648f35800ba98f1121d418b6d0c13c63b7a8951b
macos-i386 545fc45a0071142714639c6be377e6d308c3a4e1
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-stage1
+#![feature(plugin_registrar, managed_boxes, quote)]
+#![crate_type = "dylib"]
+
+extern crate syntax;
+extern crate rustc;
+
+use syntax::ast;
+use syntax::codemap;
+use syntax::ext::base::{ExtCtxt, MacResult, MacItems};
+use rustc::plugin::Registry;
+
+#[plugin_registrar]
+pub fn plugin_registrar(reg: &mut Registry) {
+ reg.register_macro("multiple_items", expand)
+}
+
+fn expand(cx: &mut ExtCtxt, _: codemap::Span, _: &[ast::TokenTree]) -> Box<MacResult+'static> {
+ MacItems::new(vec![
+ quote_item!(cx, struct Struct1;).unwrap(),
+ quote_item!(cx, struct Struct2;).unwrap()
+ ].into_iter())
+}
reg.register_macro("identity", expand_identity);
reg.register_syntax_extension(
token::intern("into_foo"),
- ItemModifier(box expand_into_foo));
+ Modifier(box expand_into_foo));
}
fn expand_make_a_1(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree])
pub fn b() {}
pub struct c;
pub enum d {}
+ pub type e = int;
pub struct A(());
pub fn reexported_b() {}
pub struct reexported_c;
pub enum reexported_d {}
+ pub type reexported_e = int;
}
pub mod bar {
pub use foo::reexported_b as f;
pub use foo::reexported_c as g;
pub use foo::reexported_d as h;
+ pub use foo::reexported_e as i;
}
pub static a: int = 0;
pub fn b() {}
pub struct c;
pub enum d {}
+pub type e = int;
-static i: int = 0;
-fn j() {}
-struct k;
-enum l {}
+static j: int = 0;
+fn k() {}
+struct l;
+enum m {}
+type n = int;
pub struct Struct;
pub enum Unit {
- Unit,
+ UnitVariant,
Argument(Struct)
}
} else if args.len() <= 1u {
vec!("".to_string(), "100000".to_string())
} else {
- args.move_iter().collect()
+ args.into_iter().collect()
};
let n = from_str::<uint>(args.get(1).as_slice()).unwrap();
server(&from_parent, &to_parent);
});
- for r in worker_results.move_iter() {
+ for r in worker_results.into_iter() {
r.unwrap();
}
} else if args.len() <= 1u {
vec!("".to_string(), "10000".to_string(), "4".to_string())
} else {
- args.move_iter().map(|x| x.to_string()).collect()
+ args.into_iter().map(|x| x.to_string()).collect()
};
println!("{}", args);
server(&from_parent, &to_parent);
});
- for r in worker_results.move_iter() {
+ for r in worker_results.into_iter() {
r.unwrap();
}
} else if args.len() <= 1u {
vec!("".to_string(), "10000".to_string(), "4".to_string())
} else {
- args.clone().move_iter().map(|x| x.to_string()).collect()
+ args.clone().into_iter().map(|x| x.to_string()).collect()
};
println!("{:?}", args);
} else if args.len() <= 1u {
vec!("".to_string(), "10".to_string(), "100".to_string())
} else {
- args.clone().move_iter().collect()
+ args.clone().into_iter().collect()
};
let num_tasks = from_str::<uint>(args.get(1).as_slice()).unwrap();
thread_ring(0, msg_per_task, num_chan, num_port);
// synchronize
- for f in futures.mut_iter() {
+ for f in futures.iter_mut() {
f.get()
}
} else if args.len() <= 1u {
vec!("".to_string(), "10".to_string(), "100".to_string())
} else {
- args.clone().move_iter().collect()
+ args.clone().into_iter().collect()
};
let num_tasks = from_str::<uint>(args.get(1).as_slice()).unwrap();
thread_ring(0, msg_per_task, num_chan, num_port);
// synchronize
- for f in futures.mut_iter() {
+ for f in futures.iter_mut() {
let _ = f.get();
}
let mut rng = StdRng::new().unwrap();
let mut rgradients = [Vec2 { x: 0.0, y: 0.0 }, ..256];
- for x in rgradients.mut_iter() {
+ for x in rgradients.iter_mut() {
*x = random_gradient(&mut rng);
}
let mut permutations = [0i32, ..256];
- for (i, x) in permutations.mut_iter().enumerate() {
+ for (i, x) in permutations.iter_mut().enumerate() {
*x = i as i32;
}
rng.shuffle(permutations);
} else if args.len() <= 1u {
vec!("".to_string(), "8".to_string())
} else {
- args.move_iter().collect()
+ args.into_iter().collect()
};
let n = from_str::<int>(args.get(1).as_slice()).unwrap();
println!("Ack(3,{}): {}\n", n, ack(3, n));
})
}).collect::<Vec<Future<String>>>();
- for message in messages.mut_iter() {
+ for message in messages.iter_mut() {
println!("{}", *message.get_ref());
}
fn rotate(x: &mut [i32]) {
let mut prev = x[0];
- for place in x.mut_iter().rev() {
+ for place in x.iter_mut().rev() {
prev = mem::replace(place, prev)
}
}
fn next_permutation(perm: &mut [i32], count: &mut [i32]) {
for i in range(1, perm.len()) {
- rotate(perm.mut_slice_to(i + 1));
+ rotate(perm.slice_to_mut(i + 1));
let count_i = &mut count[i];
if *count_i >= i as i32 {
*count_i = 0;
fn get(&mut self, mut idx: i32) -> P {
let mut pp = [0u8, .. 16];
self.permcount = idx as u32;
- for (i, place) in self.perm.p.mut_iter().enumerate() {
+ for (i, place) in self.perm.p.iter_mut().enumerate() {
*place = i as i32 + 1;
}
let d = idx / self.fact[i] as i32;
self.cnt[i] = d;
idx %= self.fact[i] as i32;
- for (place, val) in pp.mut_iter().zip(self.perm.p.slice_to(i + 1).iter()) {
+ for (place, val) in pp.iter_mut().zip(self.perm.p.slice_to(i + 1).iter()) {
*place = (*val) as u8
}
fn reverse(tperm: &mut [i32], mut k: uint) {
- tperm.mut_slice_to(k).reverse()
+ tperm.slice_to_mut(k).reverse()
}
fn work(mut perm: Perm, n: uint, max: uint) -> (i32, i32) {
let mut checksum = 0;
let mut maxflips = 0;
- for fut in futures.mut_iter() {
+ for fut in futures.iter_mut() {
let (cs, mf) = fut.get();
checksum += cs;
maxflips = cmp::max(maxflips, mf);
copy_memory(buf.as_mut_slice(), alu);
let buf_len = buf.len();
- copy_memory(buf.mut_slice(alu_len, buf_len),
+ copy_memory(buf.slice_mut(alu_len, buf_len),
alu.slice_to(LINE_LEN));
let mut pos = 0;
fn make_lookup(a: &[AminoAcid]) -> [AminoAcid, ..LOOKUP_SIZE] {
let mut lookup = [ NULL_AMINO_ACID, ..LOOKUP_SIZE ];
let mut j = 0;
- for (i, slot) in lookup.mut_iter().enumerate() {
+ for (i, slot) in lookup.iter_mut().enumerate() {
while a[j].p < (i as f32) {
j += 1;
}
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// The Computer Language Benchmarks Game
+// http://benchmarksgame.alioth.debian.org/
//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
+// contributed by the Rust Project Developers
-/* -*- mode: rust; indent-tabs-mode: nil -*-
- * Implementation of 'fasta' benchmark from
- * Computer Language Benchmarks Game
- * http://shootout.alioth.debian.org/
- */
+// Copyright (c) 2012-2014 The Rust Project Developers
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// - Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of "The Computer Language Benchmarks Game" nor
+// the name of "The Computer Language Shootout Benchmarks" nor the
+// names of its contributors may be used to endorse or promote
+// products derived from this software without specific prior
+// written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::io;
use std::io::{BufferedWriter, File};
struct AAGen<'a> {
rng: &'a mut MyRandom,
- data: Vec<(u32, u8)> }
+ data: Vec<(u32, u8)>
+}
impl<'a> AAGen<'a> {
fn new<'b>(rng: &'b mut MyRandom, aa: &[(char, f32)]) -> AAGen<'b> {
let mut cum = 0.;
} else if args.len() <= 1u {
vec!("".to_string(), "30".to_string())
} else {
- args.move_iter().collect()
+ args.into_iter().collect()
};
let n = from_str::<int>(args.get(1).as_slice()).unwrap();
println!("{}\n", fib(n));
let sizes = vec!(1u,2,3,4,6,12,18);
let mut streams = Vec::from_fn(sizes.len(), |_| Some(channel::<String>()));
let mut from_child = Vec::new();
- let to_child = sizes.iter().zip(streams.mut_iter()).map(|(sz, stream_ref)| {
+ let to_child = sizes.iter().zip(streams.iter_mut()).map(|(sz, stream_ref)| {
let sz = *sz;
let stream = replace(stream_ref, None);
let (to_parent_, from_child_) = stream.unwrap();
{
res.push_all(l.as_slice().trim().as_bytes());
}
- for b in res.mut_iter() {
+ for b in res.iter_mut() {
*b = b.to_ascii().to_upper().to_byte();
}
res
Future::spawn(proc() generate_frequencies(input.as_slice(), occ.len()))
}).collect();
- for (i, freq) in nb_freqs.move_iter() {
+ for (i, freq) in nb_freqs.into_iter() {
print_frequencies(&freq.unwrap(), i);
}
- for (&occ, freq) in OCCURRENCES.iter().zip(occ_freqs.move_iter()) {
+ for (&occ, freq) in OCCURRENCES.iter().zip(occ_freqs.into_iter()) {
print_occurrences(&mut freq.unwrap(), occ);
}
}
})
});
- for res in precalc_futures.move_iter() {
+ for res in precalc_futures.into_iter() {
let (rs, is) = res.unwrap();
precalc_r.push_all_move(rs);
precalc_i.push_all_move(is);
});
try!(writeln!(&mut out as &mut Writer, "P4\n{} {}", w, h));
- for res in data.move_iter() {
+ for res in data.into_iter() {
try!(out.write(res.unwrap().as_slice()));
}
out.flush()
}).collect();
// translating to (0, 0) as minimum coordinates.
- for cur_piece in res.mut_iter() {
+ for cur_piece in res.iter_mut() {
let (dy, dx) = *cur_piece.iter().min_by(|e| *e).unwrap();
- for &(ref mut y, ref mut x) in cur_piece.mut_iter() {
+ for &(ref mut y, ref mut x) in cur_piece.iter_mut() {
*y -= dy; *x -= dx;
}
}
// transformation must be taken except for one piece (piece 3
// here).
let transforms: Vec<Vec<Vec<(int, int)>>> =
- pieces.move_iter().enumerate()
+ pieces.into_iter().enumerate()
.map(|(id, p)| transform(p, id != 3))
.collect();
Some(bi) => bi,
None => break
};
- for bj in b_slice.mut_iter() {
+ for bj in b_slice.iter_mut() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
stress_task(i);
}));
}
- for r in results.move_iter() {
+ for r in results.into_iter() {
r.unwrap();
}
}
} else if args.len() <= 1u {
vec!("".to_string(), "8".to_string())
} else {
- args.move_iter().map(|x| x.to_string()).collect()
+ args.into_iter().map(|x| x.to_string()).collect()
};
let opts = parse_opts(args.clone());
+++ /dev/null
-// The Computer Language Benchmarks Game
-// http://benchmarksgame.alioth.debian.org/
-//
-// contributed by the Rust Project Developers
-
-// Copyright (c) 2013-2014 The Rust Project Developers
-//
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// - Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in
-// the documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of "The Computer Language Benchmarks Game" nor
-// the name of "The Computer Language Shootout Benchmarks" nor the
-// names of its contributors may be used to endorse or promote
-// products derived from this software without specific prior
-// written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-extern crate num;
-
-use std::from_str::FromStr;
-use std::num::One;
-use std::num::Zero;
-use std::num::FromPrimitive;
-use num::Integer;
-use num::bigint::BigInt;
-
-struct Context {
- numer: BigInt,
- accum: BigInt,
- denom: BigInt,
-}
-
-impl Context {
- fn new() -> Context {
- Context {
- numer: One::one(),
- accum: Zero::zero(),
- denom: One::one(),
- }
- }
-
- fn from_int(i: int) -> BigInt {
- FromPrimitive::from_int(i).unwrap()
- }
-
- fn extract_digit(&self) -> int {
- if self.numer > self.accum {return -1;}
- let (q, r) =
- (self.numer * Context::from_int(3) + self.accum)
- .div_rem(&self.denom);
- if r + self.numer >= self.denom {return -1;}
- q.to_int().unwrap()
- }
-
- fn next_term(&mut self, k: int) {
- let y2 = Context::from_int(k * 2 + 1);
- self.accum = (self.accum + (self.numer << 1)) * y2;
- self.numer = self.numer * Context::from_int(k);
- self.denom = self.denom * y2;
- }
-
- fn eliminate_digit(&mut self, d: int) {
- let d = Context::from_int(d);
- let ten = Context::from_int(10);
- self.accum = (self.accum - self.denom * d) * ten;
- self.numer = self.numer * ten;
- }
-}
-
-fn pidigits(n: int) {
- let mut k = 0;
- let mut context = Context::new();
-
- for i in range(1, n + 1) {
- let mut d;
- loop {
- k += 1;
- context.next_term(k);
- d = context.extract_digit();
- if d != -1 {break;}
- }
-
- print!("{}", d);
- if i % 10 == 0 {print!("\t:{}\n", i);}
-
- context.eliminate_digit(d);
- }
-
- let m = n % 10;
- if m != 0 {
- for _ in range(m, 10) { print!(" "); }
- print!("\t:{}\n", n);
- }
-}
-
-fn main() {
- let args = std::os::args();
- let args = args.as_slice();
- let n = if args.len() < 2 {
- 512
- } else {
- FromStr::from_str(args[1].as_slice()).unwrap()
- };
- pidigits(n);
-}
(regex!("Y"), "(c|t)"),
];
let mut seq = seq;
- for (re, replacement) in substs.move_iter() {
+ for (re, replacement) in substs.into_iter() {
seq = re.replace_all(seq.as_slice(), NoExpand(replacement));
}
seq.len()
regex!("agggtaa[cgt]|[acg]ttaccct"),
];
let (mut variant_strs, mut counts) = (vec!(), vec!());
- for variant in variants.move_iter() {
+ for variant in variants.into_iter() {
let seq_arc_copy = seq_arc.clone();
variant_strs.push(variant.to_string());
counts.push(Future::spawn(proc() {
('H', 'D'), ('D', 'H'), ('B', 'V'), ('N', 'N'),
('\n', '\n')];
let mut complements: [u8, ..256] = [0, ..256];
- for (i, c) in complements.mut_iter().enumerate() {
+ for (i, c) in complements.iter_mut().enumerate() {
*c = i as u8;
}
let lower = 'A' as u8 - 'a' as u8;
};
let mut data = data.unwrap();
- for seq in data.as_mut_slice().mut_split(|c| *c == '>' as u8) {
+ for seq in data.as_mut_slice().split_mut(|c| *c == '>' as u8) {
// skip header and last \n
let begin = match seq.iter().position(|c| *c == '\n' as u8) {
None => continue,
Some(c) => c
};
let len = seq.len();
- let seq = seq.mut_slice(begin + 1, len - 1);
+ let seq = seq.slice_mut(begin + 1, len - 1);
// arrange line breaks
let len = seq.len();
}
// reverse complement, as
- // seq.reverse(); for c in seq.mut_iter() {*c = complements[*c]}
+ // seq.reverse(); for c in seq.iter_mut() {*c = complements[*c]}
// but faster:
- let mut it = seq.mut_iter();
+ let mut it = seq.iter_mut();
loop {
match (it.next(), it.next_back()) {
(Some(front), Some(back)) => {
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
+// The Computer Language Benchmarks Game
+// http://benchmarksgame.alioth.debian.org/
//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
+// contributed by the Rust Project Developers
+
+// Copyright (c) 2012-2014 The Rust Project Developers
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// - Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in
+// the documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of "The Computer Language Benchmarks Game" nor
+// the name of "The Computer Language Shootout Benchmarks" nor the
+// names of its contributors may be used to endorse or promote
+// products derived from this software without specific prior
+// written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
// no-pretty-expanded FIXME #15189
} else if args.len() <= 1u {
vec!("".to_string(), "10000".to_string(), "50".to_string())
} else {
- args.move_iter().collect()
+ args.into_iter().collect()
};
let max = from_str::<uint>(args.get(1).as_slice()).unwrap();
let rep = from_str::<uint>(args.get(2).as_slice()).unwrap();
} else if args.len() <= 1 {
vec!("".to_string(), "100".to_string())
} else {
- args.clone().move_iter().collect()
+ args.clone().into_iter().collect()
};
let (tx, rx) = channel();
});
let child_start_chans: Vec<Sender<Sender<int>>> =
- wait_ports.move_iter().map(|port| port.recv()).collect();
+ wait_ports.into_iter().map(|port| port.recv()).collect();
let (start_port, start_chan) = stream::<Sender<int>>();
parent_wait_chan.send(start_chan);
let parent_result_chan: Sender<int> = start_port.recv();
let child_sum_ports: Vec<Reciever<int>> =
- child_start_chans.move_iter().map(|child_start_chan| {
+ child_start_chans.into_iter().map(|child_start_chan| {
let (child_sum_port, child_sum_chan) = stream::<int>();
child_start_chan.send(child_sum_chan);
child_sum_port
}).collect();
- let sum = child_sum_ports.move_iter().fold(0, |sum, sum_port| sum + sum_port.recv() );
+ let sum = child_sum_ports.into_iter().fold(0, |sum, sum_port| sum + sum_port.recv() );
parent_result_chan.send(sum + 1);
}
} else if args.len() <= 1u {
vec!("".to_string(), "10".to_string())
} else {
- args.move_iter().collect()
+ args.into_iter().collect()
};
let n = from_str::<uint>(args.get(1).as_slice()).unwrap();
let mut i = 0u;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let _x: int = [1i, 2, 3]; //~ ERROR expected int, found array
+
+ let x: &[int] = &[1, 2, 3];
+ let _y: &int = x; //~ ERROR expected int, found unsized array
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Get {
+ type Value; //~ ERROR associated types are experimental
+ fn get(&self) -> Get::Value;
+}
+
+struct Struct {
+ x: int,
+}
+
+impl Get for Struct {
+ type Value = int; //~ ERROR associated types are experimental
+ fn get(&self) -> int {
+ self.x
+ }
+}
+
+fn main() {
+ let s = Struct {
+ x: 100,
+ };
+ assert_eq!(s.get(), 100);
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_types)]
+
+trait Get {
+ type Value;
+ fn get(&self) -> <Self as Get>::Value;
+}
+
+fn get<T:Get,U:Get>(x: T, y: U) -> Get::Value {}
+//~^ ERROR ambiguous associated type
+
+trait Other {
+ fn uhoh<U:Get>(&self, foo: U, bar: <Self as Get>::Value) {}
+ //~^ ERROR this associated type is not allowed in this context
+}
+
+impl<T:Get> Other for T {
+ fn uhoh<U:Get>(&self, foo: U, bar: <(T, U) as Get>::Value) {}
+ //~^ ERROR this associated type is not allowed in this context
+}
+
+trait Grab {
+ type Value;
+ fn grab(&self) -> Grab::Value;
+ //~^ ERROR ambiguous associated type
+}
+
+fn main() {
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_types)]
+
+trait Get {
+ type Value;
+ fn get(&self) -> <Self as Get>::Value;
+}
+
+fn get(x: int) -> <int as Get>::Value {}
+//~^ ERROR this associated type is not allowed in this context
+
+struct Struct {
+ x: int,
+}
+
+impl Struct {
+ fn uhoh<T>(foo: <T as Get>::Value) {}
+ //~^ ERROR this associated type is not allowed in this context
+}
+
+fn main() {
+}
+
// except according to those terms.
fn foo<T:'static>() {
- 1u.bar::<T>(); //~ ERROR: does not fulfill `Send`
+ 1u.bar::<T>(); //~ ERROR `core::kinds::Send` is not implemented
}
trait bar {
pub fn main() {
let x: Vec<Trait + Sized> = Vec::new();
- //~^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
- //~^^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
- //~^^^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
+ //~^^ ERROR the trait `core::kinds::Sized` is not implemented
let x: Vec<Box<RefCell<Trait + Sized>>> = Vec::new();
- //~^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
- //~^^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
}
use std::rc::Rc;
pub fn main() {
- let _x = Rc::new(vec!(1i, 2)).move_iter();
+ let _x = Rc::new(vec!(1i, 2)).into_iter();
//~^ ERROR cannot move out of dereference of `&`-pointer
}
#![feature(overloaded_calls)]
-fn a<F:|&: int, int| -> int>(mut f: F) {
+fn a<F:Fn(int, int) -> int>(mut f: F) {
let g = &mut f;
f(1, 2); //~ ERROR cannot borrow `f` as immutable
//~^ ERROR cannot borrow `f` as immutable
}
-fn b<F:|&mut: int, int| -> int>(f: F) {
+fn b<F:FnMut(int, int) -> int>(f: F) {
f(1, 2); //~ ERROR cannot borrow immutable argument
}
-fn c<F:|: int, int| -> int>(f: F) {
+fn c<F:FnOnce(int, int) -> int>(f: F) {
f(1, 2);
f(1, 2); //~ ERROR use of moved value
}
trait Foo : Send+Sync { }
-impl <T: Sync> Foo for (T,) { } //~ ERROR cannot implement this trait
+impl <T: Sync+'static> Foo for (T,) { } //~ ERROR the trait `core::kinds::Send` is not implemented
-impl <T: Send> Foo for (T,T) { } //~ ERROR cannot implement this trait
+impl <T: Send> Foo for (T,T) { } //~ ERROR the trait `core::kinds::Sync` is not implemented
impl <T: Send+Sync> Foo for (T,T,T) { } // (ok)
impl <T:Sync> RequiresShare for X<T> { }
-impl <T:Sync> RequiresRequiresShareAndSend for X<T> { } //~ ERROR cannot implement this trait
+impl <T:Sync+'static> RequiresRequiresShareAndSend for X<T> { }
+//~^ ERROR the trait `core::kinds::Send` is not implemented
fn main() { }
// to use capabilities granted by builtin kinds as supertraits.
trait Foo : Sync+'static {
- fn foo(self, mut chan: Sender<Self>) {
- chan.send(self); //~ ERROR does not fulfill `Send`
- }
+ fn foo(self, mut chan: Sender<Self>) { }
}
impl <T: Sync> Foo for T { }
+//~^ ERROR the parameter type `T` may not live long enough
+//~^^ ERROR the parameter type `T` may not live long enough
fn main() {
let (tx, rx) = channel();
trait Foo : Send { }
impl <'a> Foo for &'a mut () { }
-//~^ ERROR which does not fulfill `Send`, cannot implement this trait
+//~^ ERROR does not fulfill the required lifetime
fn main() { }
trait Foo : Send { }
-impl <T: Sync> Foo for T { } //~ ERROR cannot implement this trait
+impl <T: Sync+'static> Foo for T { } //~ ERROR the trait `core::kinds::Send` is not implemented
fn main() { }
}
impl animal for cat {
- //~^ ERROR not all trait methods implemented, missing: `eat`
+ //~^ ERROR not all trait items implemented, missing: `eat`
}
fn cat(in_x : uint) -> cat {
fn main() {
// Type inference didn't use to be able to handle this:
foo(|| fail!());
+ foo(|| -> ! fail!());
foo(|| 22); //~ ERROR mismatched types
+ foo(|| -> ! 22); //~ ERROR mismatched types
+ let x = || -> ! 1; //~ ERROR mismatched types
}
fn test<T: Sync>() {}
fn main() {
- test::<Sender<int>>(); //~ ERROR: does not fulfill `Sync`
- test::<Receiver<int>>(); //~ ERROR: does not fulfill `Sync`
- test::<Sender<int>>(); //~ ERROR: does not fulfill `Sync`
+ test::<Sender<int>>(); //~ ERROR: `core::kinds::Sync` is not implemented
+ test::<Receiver<int>>(); //~ ERROR: `core::kinds::Sync` is not implemented
+ test::<Sender<int>>(); //~ ERROR: `core::kinds::Sync` is not implemented
}
extern crate trait_impl_conflict;
use trait_impl_conflict::Foo;
-impl<A> Foo for A {
-//~^ ERROR conflicting implementations for trait `trait_impl_conflict::Foo`
-//~^^ ERROR cannot provide an extension implementation where both trait and type
-// are not defined in this crate
+impl<A> Foo for A { //~ ERROR E0117
}
fn main() {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: conflicting implementations for trait `Foo`
trait Foo {
}
-impl Foo for int {
+impl Foo for int { //~ ERROR conflicting implementations
}
-impl<A> Foo for A {
+impl<A> Foo for A { //~ NOTE conflicting implementation here
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(unreachable_code)]
+
+fn main() {
+ let x: || -> ! = || fail!();
+ x();
+ println!("Foo bar"); //~ ERROR: unreachable statement
+}
#[deriving(Default)]
struct Struct {
- x: Error //~ ERROR
+ x: Error //~ ERROR `core::default::Default` is not implemented
}
fn main() {}
struct Error;
-#[deriving(Zero)] //~ ERROR failed to find an implementation
+#[deriving(Zero)] //~ ERROR not implemented
struct Struct {
- x: Error //~ ERROR failed to find an implementation
- //~^ ERROR failed to find an implementation
- //~^^ ERROR type `Error` does not implement any method in scope
+ x: Error
}
fn main() {}
struct Error;
-#[deriving(Zero)] //~ ERROR failed to find an implementation
+#[deriving(Zero)] //~ ERROR not implemented
struct Struct(
- Error //~ ERROR
- //~^ ERROR failed to find an implementation
- //~^^ ERROR type `Error` does not implement any method in scope
+ Error
);
fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
-
-type Foo = Vec<u8>;
-
-impl Drop for Foo {
-//~^ ERROR cannot provide an extension implementation
+impl Drop for int {
+ //~^ ERROR the Drop trait may only be implemented on structures
+ //~^^ ERROR cannot provide an extension implementation
fn drop(&mut self) {
println!("kaboom");
}
// Assignment.
let f5: &mut Fat<ToBar> = &mut Fat { f1: 5, f2: "some str", ptr: Bar1 {f :42} };
let z: Box<ToBar> = box Bar1 {f: 36};
- f5.ptr = *z; //~ ERROR dynamically sized type on lhs of assignment
- //~^ ERROR E0161
+ f5.ptr = *z;
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
}
let f5: &mut Fat<ToBar> = &mut Fat { f1: 5, f2: "some str", ptr: Bar1 {f :42} };
let z: Box<ToBar> = box Bar1 {f: 36};
f5.ptr = Bar1 {f: 36}; //~ ERROR mismatched types: expected `ToBar`, found `Bar1`
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented for the type `ToBar`
}
let f1 = Fat { ptr: Foo };
let f2: &Fat<Foo> = &f1;
let f3: &Fat<Bar> = f2;
- //~^ ERROR failed to find an implementation of trait Bar for Foo
+ //~^ ERROR the trait `Bar` is not implemented for the type `Foo`
}
// With a vec of ints.
let f1 = Fat { ptr: [1, 2, 3] };
let f2: &Fat<[int, ..3]> = &f1;
- let f3: &mut Fat<[int]> = f2; //~ ERROR cannot borrow immutable dereference
+ let f3: &mut Fat<[int]> = f2; //~ ERROR mismatched types
// With a trait.
let f1 = Fat { ptr: Foo };
let f2: &Fat<Foo> = &f1;
- let f3: &mut Fat<Bar> = f2; //~ ERROR cannot borrow immutable dereference
+ let f3: &mut Fat<Bar> = f2; //~ ERROR mismatched types
}
let y: &T = x; //~ ERROR mismatched types
// Test that we cannot convert an immutable ptr to a mutable one using *-ptrs
- let x: &mut T = &S; //~ ERROR types differ in mutability
- let x: *mut T = &S; //~ ERROR types differ in mutability
- let x: *mut S = &S;
- //~^ ERROR mismatched types
+ let x: &mut T = &S; //~ ERROR mismatched types
+ let x: *mut T = &S; //~ ERROR mismatched types
+ let x: *mut S = &S; //~ ERROR mismatched types
// The below four sets of tests test that we cannot implicitly deref a *-ptr
// during a coercion.
let f: Fat<[int, ..3]> = Fat { ptr: [5i, 6, 7] };
let g: &Fat<[int]> = &f;
let h: &Fat<Fat<[int]>> = &Fat { ptr: *g };
- //~^ ERROR trying to initialise a dynamically sized struct
- //~^^ ERROR E0161
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that when you implement a trait that has a sized type
+// parameter, the corresponding value must be sized. Also that the
+// self type must be sized if appropriate.
+
+trait Foo<T> { fn take(self, x: &T) { } } // Note: T is sized
+
+impl Foo<[int]> for uint { }
+//~^ ERROR the trait `core::kinds::Sized` is not implemented for the type `[int]`
+
+impl Foo<int> for [uint] { }
+//~^ ERROR the trait `core::kinds::Sized` is not implemented for the type `[uint]`
+
+pub fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that enum variants are not actually types.
+
+enum Foo {
+ Bar
+}
+
+fn foo(x: Bar) {} //~ERROR found value name used as a type
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that enum variants are in the type namespace.
+
+enum Foo {
+ Foo //~ERROR duplicate definition of type or module `Foo`
+}
+
+enum Bar {
+ Baz
+}
+
+trait Baz {} //~ERROR duplicate definition of type or module `Baz`
+
+pub fn main() {}
fn check_bound<T:Copy>(_: T) {}
fn main() {
- check_bound("nocopy".to_string()); //~ ERROR does not fulfill `Copy`
+ check_bound("nocopy".to_string()); //~ ERROR the trait `core::kinds::Copy` is not implemented
}
fn main() {
format!("{:d}", "3");
- //~^ ERROR: failed to find an implementation of trait core::fmt::Signed
+ //~^ ERROR: the trait `core::fmt::Signed` is not implemented
}
fn get(&self) -> T;
}
-impl Getter<int> for int { //~ ERROR failed to find an implementation of trait Clone2 for int
+impl Getter<int> for int { //~ ERROR the trait `Clone2` is not implemented
fn get(&self) -> int { *self }
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::io;
+use std::vec;
+
+pub struct Container<'a> {
+ reader: &'a mut Reader //~ ERROR explicit lifetime bound required
+}
+
+impl<'a> Container<'a> {
+ pub fn wrap<'s>(reader: &'s mut Reader) -> Container<'s> {
+ Container { reader: reader }
+ }
+
+ pub fn read_to(&mut self, vec: &mut [u8]) {
+ self.reader.read(vec);
+ }
+}
+
+pub fn for_stdin<'a>() -> Container<'a> {
+ let mut r = io::stdin();
+ Container::wrap(&mut r as &mut Reader)
+}
+
+fn main() {
+ let mut c = for_stdin();
+ let mut v = vec::Vec::from_elem(10, 0u8);
+ c.read_to(v.as_mut_slice());
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn blah() -> int { //~ ERROR not all control paths return a value
+ 1i
+
+ ; //~ NOTE consider removing this semicolon:
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(struct_variant)]
+
+mod a {
+ pub enum Enum {
+ EnumStructVariant { x: u8, y: u8, z: u8 }
+ }
+
+ pub fn get_enum_struct_variant() -> () {
+ EnumStructVariant { x: 1, y: 2, z: 3 }
+//~^ ERROR mismatched types: expected `()`, found `a::Enum` (expected (), found enum a::Enum)
+ }
+}
+
+mod b {
+ mod test {
+ use a;
+
+ fn test_enum_struct_variant() {
+ let enum_struct_variant = ::a::get_enum_struct_variant();
+ match enum_struct_variant {
+ a::EnumStructVariant { x, y, z } => {
+ //~^ ERROR error: mismatched types: expected `()`, found a structure pattern
+ }
+ }
+ }
+ }
+}
+
+fn main() {}
let y: Gc<int> = box (GC) 0;
println!("{}", x + 1); //~ ERROR binary operation `+` cannot be applied to type `Box<int>`
- //~^ ERROR cannot determine a type for this bounded type parameter: unconstrained type
+ //~^ ERROR unable to infer enough type information
println!("{}", y + 1);
//~^ ERROR binary operation `+` cannot be applied to type `Gc<int>`
- //~^^ ERROR cannot determine a type for this bounded type parameter: unconstrained type
+ //~^^ ERROR unable to infer enough type information
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let mut array = [1, 2, 3];
+//~^ ERROR cannot determine a type for this local variable: cannot determine the type of this integ
+ let pie_slice = array.slice(1, 2);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub fn foo(params: Option<&[&str]>) -> uint {
+ params.unwrap().head().unwrap().len()
+}
+
+fn main() {
+ let name = "Foo";
+ let msg = foo(Some(&[name.as_slice()]));
+//~^ ERROR mismatched types: expected `core::option::Option<&[&str]>`
+ assert_eq!(msg, 3);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+static FOO: uint = FOO; //~ ERROR recursive constant
+
+fn main() {
+ let _x: [u8, ..FOO]; // caused stack overflow prior to fix
+ let _y: uint = 1 + {
+ static BAR: uint = BAR; //~ ERROR recursive constant
+ let _z: [u8, ..BAR]; // caused stack overflow prior to fix
+ 1
+ };
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that the parser does not attempt to parse struct literals
+// within assignments in if expressions.
+
+struct Foo {
+ foo: uint
+}
+
+fn main() {
+ let x = 1u;
+ let y: Foo;
+
+ // `x { ... }` should not be interpreted as a struct literal here
+ if x = x {
+ //~^ ERROR mismatched types: expected `bool`, found `()` (expected bool, found ())
+ println!("{}", x);
+ }
+ // Explicit parentheses on the left should match behavior of above
+ if (x = x) {
+ //~^ ERROR mismatched types: expected `bool`, found `()` (expected bool, found ())
+ println!("{}", x);
+ }
+ // The struct literal interpretation is fine with explicit parentheses on the right
+ if y = (Foo { foo: x }) {
+ //~^ ERROR mismatched types: expected `bool`, found `()` (expected bool, found ())
+ println!("{}", x);
+ }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-enum foo { foo(bar) }
+enum foo { foo_(bar) }
enum bar { bar_none, bar_some(bar) }
//~^ ERROR illegal recursive enum type; wrap the inner value in a box to make it representable
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-enum foo { foo(bar) }
+enum foo { foo_(bar) }
struct bar { x: bar }
//~^ ERROR illegal recursive struct type; wrap the inner value in a box to make it representable
//~^^ ERROR this type cannot be instantiated without an instance of itself
#[deriving(PartialEq)]
struct thing(uint);
-impl PartialOrd for thing { //~ ERROR not all trait methods implemented, missing: `partial_cmp`
+impl PartialOrd for thing { //~ ERROR not all trait items implemented, missing: `partial_cmp`
fn le(&self, other: &thing) -> bool { true }
fn ge(&self, other: &thing) -> bool { true }
}
name: int
}
-fn bar(_x: Foo) {} //~ ERROR variable `_x` has dynamically sized type
+fn bar(_x: Foo) {} //~ ERROR the trait `core::kinds::Sized` is not implemented
fn main() {}
trait I {}
type K = I+'static;
-fn foo(_x: K) {} //~ ERROR: variable `_x` has dynamically sized type
+fn foo(_x: K) {} //~ ERROR: the trait `core::kinds::Sized` is not implemented
fn main() {}
}
fn new_struct(r: A+'static) -> Struct {
- //~^ ERROR variable `r` has dynamically sized type
- Struct { r: r } //~ ERROR trying to initialise a dynamically sized struct
- //~^ ERROR E0161
- //~^^ ERROR E0161
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
+ Struct { r: r }
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
}
trait Curve {}
extern crate debug;
fn main() {
- format!("{:?}", None); //~ ERROR: cannot determine a type for this bounded
+ // Unconstrained type:
+ format!("{:?}", None); //~ ERROR: E0101
}
fn main() {
let a = A {v: box B{v: None} as Box<Foo+Send>};
- //~^ ERROR cannot pack type `Box<B>`, which does not fulfill `Send`, as a trait bounded by Send
- let v = Rc::new(RefCell::new(a));
- let w = v.clone();
- let b = &*v;
- let mut b = b.borrow_mut();
- b.v.set(w.clone());
+ //~^ ERROR the trait `core::kinds::Send` is not implemented for the type `B`
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let v = &[]; //~ ERROR cannot determine a type for this local variable: unconstrained type
+ let it = v.iter();
+}
assert_copy::<&'a [int]>();
// ...unless they are mutable
- assert_copy::<&'static mut int>(); //~ ERROR does not fulfill
- assert_copy::<&'a mut int>(); //~ ERROR does not fulfill
+ assert_copy::<&'static mut int>(); //~ ERROR `core::kinds::Copy` is not implemented
+ assert_copy::<&'a mut int>(); //~ ERROR `core::kinds::Copy` is not implemented
// ~ pointers are not ok
- assert_copy::<Box<int>>(); //~ ERROR does not fulfill
- assert_copy::<String>(); //~ ERROR does not fulfill
- assert_copy::<Vec<int> >(); //~ ERROR does not fulfill
- assert_copy::<Box<&'a mut int>>(); //~ ERROR does not fulfill
+ assert_copy::<Box<int>>(); //~ ERROR `core::kinds::Copy` is not implemented
+ assert_copy::<String>(); //~ ERROR `core::kinds::Copy` is not implemented
+ assert_copy::<Vec<int> >(); //~ ERROR `core::kinds::Copy` is not implemented
+ assert_copy::<Box<&'a mut int>>(); //~ ERROR `core::kinds::Copy` is not implemented
// borrowed object types are generally ok
assert_copy::<&'a Dummy>();
assert_copy::<&'static Dummy+Copy>();
// owned object types are not ok
- assert_copy::<Box<Dummy>>(); //~ ERROR does not fulfill
- assert_copy::<Box<Dummy+Copy>>(); //~ ERROR does not fulfill
+ assert_copy::<Box<Dummy>>(); //~ ERROR `core::kinds::Copy` is not implemented
+ assert_copy::<Box<Dummy+Copy>>(); //~ ERROR `core::kinds::Copy` is not implemented
// mutable object types are not ok
- assert_copy::<&'a mut Dummy+Copy>(); //~ ERROR does not fulfill
+ assert_copy::<&'a mut Dummy+Copy>(); //~ ERROR `core::kinds::Copy` is not implemented
// closures are like an `&mut` object
- assert_copy::<||>(); //~ ERROR does not fulfill
+ assert_copy::<||>(); //~ ERROR `core::kinds::Copy` is not implemented
// unsafe ptrs are ok
assert_copy::<*const int>();
assert_copy::<MyStruct>();
// structs containing non-POD are not ok
- assert_copy::<MyNoncopyStruct>(); //~ ERROR does not fulfill
+ assert_copy::<MyNoncopyStruct>(); //~ ERROR `core::kinds::Copy` is not implemented
// managed or ref counted types are not ok
- assert_copy::<Gc<int>>(); //~ ERROR does not fulfill
- assert_copy::<Rc<int>>(); //~ ERROR does not fulfill
+ assert_copy::<Gc<int>>(); //~ ERROR `core::kinds::Copy` is not implemented
+ assert_copy::<Rc<int>>(); //~ ERROR `core::kinds::Copy` is not implemented
}
pub fn main() {
fn main() {
let x = box 3i;
take_param(&x);
- //~^ ERROR instantiating a type parameter with an incompatible type
+ //~^ ERROR the trait `core::kinds::Copy` is not implemented
}
fn f<T>(val: T) {
let t: S<T> = S;
let a = &t as &Gettable<T>;
- //~^ ERROR instantiating a type parameter with an incompatible type `T`
+ //~^ ERROR the trait `core::kinds::Send` is not implemented
+ //~^^ ERROR the trait `core::kinds::Copy` is not implemented
let a: &Gettable<T> = &t;
- //~^ ERROR instantiating a type parameter with an incompatible type `T`
+ //~^ ERROR the trait `core::kinds::Send` is not implemented
+ //~^^ ERROR the trait `core::kinds::Copy` is not implemented
}
-fn main() {
- let t: S<&int> = S;
- let a = &t as &Gettable<&int>;
- //~^ ERROR instantiating a type parameter with an incompatible type
+fn foo<'a>() {
+ let t: S<&'a int> = S;
+ let a = &t as &Gettable<&'a int>;
let t: Box<S<String>> = box S;
let a = t as Box<Gettable<String>>;
- //~^ ERROR instantiating a type parameter with an incompatible type
+ //~^ ERROR the trait `core::kinds::Copy` is not implemented
let t: Box<S<String>> = box S;
let a: Box<Gettable<String>> = t;
- //~^ ERROR instantiating a type parameter with an incompatible type
+ //~^ ERROR the trait `core::kinds::Copy` is not implemented
}
+fn main() { }
fn main() {
let x = box 3i;
- take_param(&x); //~ ERROR does not fulfill `Copy`
+ take_param(&x); //~ ERROR `core::kinds::Copy` is not implemented
let y = &x;
- let z = &x as &Foo; //~ ERROR does not fulfill `Copy`
+ let z = &x as &Foo; //~ ERROR `core::kinds::Copy` is not implemented
}
fn foo<'a>() {
is_send::<proc()>();
- //~^ ERROR: instantiating a type parameter with an incompatible type
+ //~^ ERROR: the trait `core::kinds::Send` is not implemented
is_freeze::<proc()>();
- //~^ ERROR: instantiating a type parameter with an incompatible type
+ //~^ ERROR: the trait `core::kinds::Sync` is not implemented
}
fn main() { }
// careful with object types, who knows what they close over...
fn object_ref_with_static_bound_not_ok() {
- assert_send::<&'static Dummy+'static>(); //~ ERROR does not fulfill
+ assert_send::<&'static Dummy+'static>();
+ //~^ ERROR the trait `core::kinds::Send` is not implemented
}
fn box_object_with_no_bound_not_ok<'a>() {
- assert_send::<Box<Dummy>>(); //~ ERROR does not fulfill
+ assert_send::<Box<Dummy>>(); //~ ERROR the trait `core::kinds::Send` is not implemented
}
fn proc_with_no_bound_not_ok<'a>() {
- assert_send::<proc()>(); //~ ERROR does not fulfill
+ assert_send::<proc()>(); //~ ERROR the trait `core::kinds::Send` is not implemented
}
fn closure_with_no_bound_not_ok<'a>() {
- assert_send::<||:'static>(); //~ ERROR does not fulfill
+ assert_send::<||:'static>(); //~ ERROR the trait `core::kinds::Send` is not implemented
}
fn object_with_send_bound_ok() {
// careful with object types, who knows what they close over...
fn test51<'a>() {
assert_send::<&'a Dummy>(); //~ ERROR does not fulfill the required lifetime
+ //~^ ERROR the trait `core::kinds::Send` is not implemented
}
fn test52<'a>() {
assert_send::<&'a Dummy+Send>(); //~ ERROR does not fulfill the required lifetime
// them not ok
fn test_70<'a>() {
assert_send::<proc():'a>(); //~ ERROR does not fulfill the required lifetime
+ //~^ ERROR the trait `core::kinds::Send` is not implemented
}
fn test_71<'a>() {
assert_send::<Box<Dummy+'a>>(); //~ ERROR does not fulfill the required lifetime
+ //~^ ERROR the trait `core::kinds::Send` is not implemented
}
fn main() { }
trait Dummy { }
fn test50() {
- assert_send::<&'static Dummy>(); //~ ERROR does not fulfill `Send`
+ assert_send::<&'static Dummy>(); //~ ERROR the trait `core::kinds::Send` is not implemented
}
fn test53() {
- assert_send::<Box<Dummy>>(); //~ ERROR does not fulfill `Send`
+ assert_send::<Box<Dummy>>(); //~ ERROR the trait `core::kinds::Send` is not implemented
}
// ...unless they are properly bounded
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn assert_send<T:Send>() { }
+
+// unsafe ptrs are ok unless they point at unsendable things
+fn test70() {
+ assert_send::<*mut int>();
+}
+fn test71<'a>() {
+ assert_send::<*mut &'a int>(); //~ ERROR does not fulfill the required lifetime
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(unnecessary_import_braces)]
+#![allow(dead_code)]
+#![allow(unused_imports)]
+
+use test::{A}; //~ ERROR braces around A is unnecessary
+
+mod test {
+ pub struct A;
+}
+
+fn main() {}
}
}
+fn f5a() {
+ for x in range(1i, 10) { }
+ //~^ ERROR unused variable: `x`
+}
+
+fn f5b() {
+ for (x, _) in [1i, 2, 3].iter().enumerate() { }
+ //~^ ERROR unused variable: `x`
+}
+
+fn f5c() {
+ for (_, x) in [1i, 2, 3].iter().enumerate() {
+ //~^ ERROR unused variable: `x`
+ continue;
+ std::os::set_exit_status(*x); //~ WARNING unreachable statement
+ }
+}
+
fn main() {
}
let x: Box<HashMap<int, int>> = box HashMap::new();
let x: Box<Map<int, int>> = x;
let y: Box<Map<uint, int>> = box x;
- //~^ ERROR failed to find an implementation of trait collections::Map<uint,int>
- //~^^ ERROR failed to find an implementation of trait core::collections::Collection
+ //~^ ERROR the trait `collections::Map<uint,int>` is not implemented
}
fn main()
{
- foo(marker::NoCopy); //~ ERROR does not fulfill
+ foo(marker::NoCopy); //~ ERROR the trait `core::kinds::Copy` is not implemented
}
fn main()
{
- foo(marker::NoSend); //~ ERROR does not fulfill `Send`
+ foo(marker::NoSend); //~ ERROR the trait `core::kinds::Send` is not implemented
}
fn main()
{
- foo(marker::NoSync); //~ ERROR does not fulfill `Sync`
+ foo(marker::NoSync); //~ ERROR the trait `core::kinds::Sync` is not implemented
}
fn eq(&self, other: &int) -> bool { *self == *other }
}
-impl MyEq for A {} //~ ERROR not all trait methods implemented, missing: `eq`
+impl MyEq for A {} //~ ERROR not all trait items implemented, missing: `eq`
fn main() {
}
fn f20() {
let x = vec!("hi".to_string());
- consume(x.move_iter().next().unwrap());
+ consume(x.into_iter().next().unwrap());
touch(x.get(0)); //~ ERROR use of moved value: `x`
}
fn f100() {
let x = vec!("hi".to_string());
- let _y = x.move_iter().next().unwrap();
+ let _y = x.into_iter().next().unwrap();
touch(&x); //~ ERROR use of moved value: `x`
}
fn f110() {
let x = vec!("hi".to_string());
- let _y = [x.move_iter().next().unwrap(), ..1];
+ let _y = [x.into_iter().next().unwrap(), ..1];
touch(&x); //~ ERROR use of moved value: `x`
}
fn main() {
let x = RefCell::new(0i);
- f(x); //~ ERROR: which does not fulfill `Sync`
+ f(x); //~ ERROR `core::kinds::Sync` is not implemented
}
fn main() {
let x = A(marker::NoSync);
- bar(&x); //~ ERROR type parameter with an incompatible type
+ bar(&x); //~ ERROR the trait `core::kinds::Sync` is not implemented
}
let x = foo(Port(box(GC) ()));
task::spawn(proc() {
- let y = x; //~ ERROR does not fulfill `Send`
+ let y = x;
+ //~^ ERROR does not fulfill `Send`
println!("{:?}", y);
});
}
fn main() {
let x = A(marker::NoSend);
bar(x);
- //~^ ERROR instantiating a type parameter with an incompatible type `Foo`,
- // which does not fulfill `Send`
+ //~^ ERROR `core::kinds::Send` is not implemented
}
fn main() {
let x = Rc::new(5i);
bar(x);
- //~^ ERROR instantiating a type parameter with an incompatible type `alloc::rc::Rc<int>`,
- // which does not fulfill `Send`
+ //~^ ERROR `core::kinds::Send` is not implemented
}
fn main() {
let x = Foo { a: 5, ns: marker::NoSend };
bar(x);
- //~^ ERROR instantiating a type parameter with an incompatible type `Foo`,
- // which does not fulfill `Send`
+ //~^ ERROR the trait `core::kinds::Send` is not implemented
}
fn main() {
let x = A(marker::NoSync);
bar(x);
- //~^ ERROR instantiating a type parameter with an incompatible type `Foo`,
- // which does not fulfill `Sync`
+ //~^ ERROR the trait `core::kinds::Sync` is not implemented
}
fn main() {
let x = Rc::new(RefCell::new(5i));
bar(x);
- //~^ ERROR instantiating a type parameter with an incompatible type
- // `std::rc::Rc<std::cell::RefCell<int>>`, which does not fulfill `Sync`
+ //~^ ERROR the trait `core::kinds::Sync` is not implemented
}
fn main() {
let x = Foo { a: 5, m: marker::NoSync };
bar(x);
- //~^ ERROR instantiating a type parameter with an incompatible type `Foo`,
- // which does not fulfill `Sync`
+ //~^ ERROR the trait `core::kinds::Sync` is not implemented
}
trait Foo {}
fn take_foo<F:Foo>(f: F) {}
-fn take_object(f: Box<Foo>) { take_foo(f); } //~ ERROR failed to find an implementation of trait
-//~^ ERROR failed to find an implementation
+fn take_object(f: Box<Foo>) { take_foo(f); }
+//~^ ERROR the trait `Foo` is not implemented
fn main() {}
{
// Can't do this copy
let x = box box box A {y: r(i)};
- let _z = x.clone(); //~ ERROR failed to find an implementation
- //~^ ERROR failed to find an implementation
+ let _z = x.clone(); //~ ERROR not implemented
println!("{:?}", x);
}
println!("{:?}", *i);
codemap::span {
lo: codemap::BytePos(0),
hi: codemap::BytePos(0),
- expn_info: None
+ expn_id: NO_EXPANSION
}
}
fn ident_of(st: &str) -> ast::ident {
codemap::span {
lo: codemap::BytePos(0),
hi: codemap::BytePos(0),
- expn_info: None
+ expn_id: codemap::NO_EXPANSION
}
}
fn ident_of(st: &str) -> ast::ident {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-enum Nil {Nil}
+enum Nil {NilValue}
struct Cons<T> {head:int, tail:T}
trait Dot {fn dot(&self, other:Self) -> int;}
impl Dot for Nil {
}
}
pub fn main() {
- let n = test(1, 0, Nil, Nil);
+ let n = test(1, 0, NilValue, NilValue);
println!("{}", n);
}
'a,
'b,
A:IsStatic,
- B:Is<'a>+Is2<'b>, //~ ERROR ambiguous lifetime bound
+ B:Is<'a>+Is2<'b>, // OK in a parameter, but not an object type.
C:'b+Is<'a>+Is2<'b>,
D:Is<'a>+Is2<'static>,
- E:'a+'b //~ ERROR only a single explicit lifetime bound is permitted
+ E:'a+'b // OK in a parameter, but not an object type.
>() { }
fn main() { }
// nominal types (but not on other types) and that they are type
// checked.
-#![no_std]
-
struct Inv<'a> { // invariant w/r/t 'a
x: &'a mut &'a int
}
fn object_with_random_bound_not_ok<'a>() {
assert_send::<&'a Dummy+'a>(); //~ ERROR does not fulfill
+ //~^ ERROR not implemented
}
fn object_with_send_bound_not_ok<'a>() {
fn proc_with_lifetime_not_ok<'a>() {
assert_send::<proc():'a>(); //~ ERROR does not fulfill
+ //~^ ERROR not implemented
}
fn closure_with_lifetime_not_ok<'a>() {
assert_send::<||:'a>(); //~ ERROR does not fulfill
+ //~^ ERROR not implemented
}
// unsafe pointers are ok unless they point at unsendable things
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![no_std]
-
// Check that explicit region bounds are allowed on the various
// nominal types (but not on other types) and that they are type
// checked.
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Various tests where we over type parameters with multiple lifetime
+// bounds.
+
+trait SomeTrait { fn get(&self) -> int; }
+
+fn make_object_good1<'a,'b,A:SomeTrait+'a+'b>(v: A) -> Box<SomeTrait+'a> {
+ // A outlives 'a AND 'b...
+ box v as Box<SomeTrait+'a> // ...hence this type is safe.
+}
+
+fn make_object_good2<'a,'b,A:SomeTrait+'a+'b>(v: A) -> Box<SomeTrait+'b> {
+ // A outlives 'a AND 'b...
+ box v as Box<SomeTrait+'b> // ...hence this type is safe.
+}
+
+fn make_object_bad<'a,'b,'c,A:SomeTrait+'a+'b>(v: A) -> Box<SomeTrait+'c> {
+ // A outlives 'a AND 'b...but not 'c.
+ box v as Box<SomeTrait+'a> //~ ERROR mismatched types
+}
+
+fn main() {
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![no_std]
-
#![allow(dead_code)]
trait Deref {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![no_std]
-
fn a<'a, 'b:'a>(x: &mut &'a int, y: &mut &'b int) {
// Note: this is legal because of the `'b:'a` declaration.
*x = *y;
fn main() {
let a = Foo { x: 3 };
- let _ = [ a, ..5 ]; //~ ERROR copying a value of non-copyable type
+ let _ = [ a, ..5 ];
+ //~^ ERROR the trait `core::kinds::Copy` is not implemented for the type `Foo`
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test slicing expr[..] is an error and gives a helpful error message.
+
+struct Foo;
+
+fn main() {
+ let x = Foo;
+ x[..]; //~ ERROR incorrect slicing expression: `[..]`
+ //~^ NOTE use `expr[]` to construct a slice of the whole of expr
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that slicing syntax gives errors if we have not implemented the trait.
+
+struct Foo;
+
+fn main() {
+ let x = Foo;
+ x[]; //~ ERROR cannot take a slice of a value with type `Foo`
+ x[Foo..]; //~ ERROR cannot take a slice of a value with type `Foo`
+ x[..Foo]; //~ ERROR cannot take a slice of a value with type `Foo`
+ x[Foo..Foo]; //~ ERROR cannot take a slice of a value with type `Foo`
+ x[mut]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
+ x[mut Foo..]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
+ x[mut ..Foo]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
+ x[mut Foo..Foo]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test slicing expressions doesn't defeat the borrow checker.
+
+fn main() {
+ let y;
+ {
+ let x: &[int] = &[1, 2, 3, 4, 5]; //~ ERROR borrowed value does not live long enough
+ y = x[1..];
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test mutability and slicing syntax.
+
+fn main() {
+ let x: &[int] = &[1, 2, 3, 4, 5];
+ // Can't mutably slice an immutable slice
+ let y = x[mut 2..4]; //~ ERROR cannot take a mutable slice of a value with type `&[int]`
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test mutability and slicing syntax.
+
+fn main() {
+ let x: &[int] = &[1, 2, 3, 4, 5];
+ // Immutable slices are not mutable.
+ let y: &mut[_] = x[2..4]; //~ ERROR cannot borrow immutable dereference of `&`-pointer as mutabl
+
+ let x: &mut [int] = &mut [1, 2, 3, 4, 5];
+ // Can't borrow mutably twice
+ let y = x[mut 1..2];
+ let y = x[mut 4..5]; //~ERROR cannot borrow
+}
let x = *&x; //~ ERROR: cannot move out of dereference
let x = INIT_ATOMIC_UINT; //~ ERROR cannot move out of static item
let x = *&x; //~ ERROR: cannot move out of dereference
- let x: AtomicPtr<uint> = AtomicPtr::new(ptr::mut_null());
+ let x: AtomicPtr<uint> = AtomicPtr::new(ptr::null_mut());
let x = *&x; //~ ERROR: cannot move out of dereference
let x: AtomicOption<uint> = AtomicOption::empty();
let x = *&x; //~ ERROR: cannot move out of dereference
pub fn main() {
test_send::<rand::TaskRng>();
- //~^ ERROR: incompatible type `std::rand::TaskRng`, which does not fulfill `Send`
+ //~^ ERROR `core::kinds::Send` is not implemented
}
// This should emit the less confusing error, not the more confusing one.
fn foo(_x: Foo + Send) {
- //~^ERROR variable `_x` has dynamically sized type `Foo+Send`
+ //~^ERROR the trait `core::kinds::Sized` is not implemented
}
fn main() { }
fn main() {
let foo = Foo {
- //~^ ERROR failed to find an implementation
- //~^^ ERROR instantiating a type parameter with an incompatible type
+ //~^ ERROR not implemented
x: 3i
};
+
let baz: Foo<uint> = fail!();
- //~^ ERROR failed to find an implementation
- //~^^ ERROR instantiating a type parameter with an incompatible type
+ //~^ ERROR not implemented
}
}
static X: Foo<uint> = Foo {
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
x: 1,
};
use trait_bounds_on_structs_and_enums_xc::{Bar, Foo, Trait};
fn explode(x: Foo<uint>) {}
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
fn kaboom(y: Bar<f32>) {}
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
fn main() {
- let foo = Foo {
- //~^ ERROR failed to find an implementation
- //~^^ ERROR instantiating a type parameter with an incompatible type
- x: 3i
- };
- let bar: Bar<f64> = return;
- //~^ ERROR failed to find an implementation
- //~^^ ERROR instantiating a type parameter with an incompatible type
- let _ = bar;
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:trait_bounds_on_structs_and_enums_xc.rs
+
+extern crate trait_bounds_on_structs_and_enums_xc;
+
+use trait_bounds_on_structs_and_enums_xc::{Bar, Foo, Trait};
+
+fn main() {
+ let foo = Foo {
+ //~^ ERROR not implemented
+ x: 3i
+ };
+ let bar: Bar<f64> = return;
+ //~^ ERROR not implemented
+ let _ = bar;
+}
+
CBar(uint),
}
-fn explode(x: Foo<uint>) {}
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+fn explode(x: Foo<u32>) {}
+//~^ ERROR not implemented
fn kaboom(y: Bar<f32>) {}
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
-impl<T> Foo<T> { //~ ERROR failed to find an implementation
-//~^ ERROR instantiating a type parameter with an incompatible type
+impl<T> Foo<T> {
+//~^ ERROR the trait `Trait` is not implemented
fn uhoh() {}
}
struct Baz {
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
a: Foo<int>,
}
enum Boo {
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
Quux(Bar<uint>),
}
struct Badness<T> {
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
b: Foo<T>,
}
enum MoreBadness<T> {
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
EvenMoreBadness(Bar<T>),
}
struct Struct;
impl PolyTrait<Foo<uint>> for Struct {
-//~^ ERROR failed to find an implementation
-//~^^ ERROR instantiating a type parameter with an incompatible type
+//~^ ERROR not implemented
fn whatever() {}
}
fn main() {
- let bar: Bar<f64> = return;
- //~^ ERROR failed to find an implementation
- //~^^ ERROR instantiating a type parameter with an incompatible type
- let _ = bar;
}
fn main() {
let s: Box<Trait<int>> = box Struct { person: "Fred" };
- //~^ ERROR expected Trait<int>, found Trait<&'static str>
- //~^^ ERROR expected Trait<int>, found Trait<&'static str>
+ //~^ ERROR type mismatch
s.f(1);
}
// these compile as if Self: Tr<U>, even tho only Self: Tr<Self or T>
trait A: Tr<Self> {
fn test<U>(u: U) -> Self {
- Tr::op(u) //~ ERROR expected Tr<U>, found Tr<Self>
+ Tr::op(u) //~ ERROR type mismatch
}
}
trait B<T>: Tr<T> {
fn test<U>(u: U) -> Self {
- Tr::op(u) //~ ERROR expected Tr<U>, found Tr<T>
+ Tr::op(u) //~ ERROR type mismatch
}
}
let ns = NoSync{m: marker::NoSync};
test(ns);
- //~^ ERROR instantiating a type parameter with an incompatible type `NoSync`, which does not fulfill `Sync`
+ //~^ ERROR `core::kinds::Sync` is not implemented
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn f<F:Nonexist(int) -> int>(x: F) {} //~ ERROR unresolved trait
+
+type Typedef = int;
+
+fn g<F:Typedef(int) -> int>(x: F) {} //~ ERROR `Typedef` is not a trait
+
+fn main() {}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Trait {}
+
+fn f<F:Trait(int) -> int>(x: F) {}
+//~^ ERROR unboxed function trait must be one of `Fn`, `FnMut`, or `FnOnce`
+
+fn main() {}
+
pub fn main() {
let f = |&mut: x: uint, y: int| -> int { (x as int) + y };
- let z = call_it(3, f); //~ ERROR expected core::ops::FnMut
- //~^ ERROR expected core::ops::FnMut
+ let z = call_it(3, f); //~ ERROR type mismatch
println!("{}", z);
}
#![feature(lang_items, overloaded_calls, unboxed_closures)]
-fn c<F:|: int, int| -> int>(f: F) -> int {
+fn c<F:FnOnce(int, int) -> int>(f: F) -> int {
f(5, 6)
}
fn main() {
let z: int = 7;
assert_eq!(c(|&: x: int, y| x + y + z), 10);
- //~^ ERROR failed to find an implementation
+ //~^ ERROR not implemented
}
fn main() {
let i = box r { b: true };
- let _j = i.clone(); //~ ERROR failed to find an implementation
- //~^ ERROR failed to find an implementation
+ let _j = i.clone(); //~ ERROR not implemented
println!("{:?}", i);
}
fn main() {
let i = box box(GC) 100i;
- f(i); //~ ERROR does not fulfill `Send`
+ f(i); //~ ERROR `core::kinds::Send` is not implemented
}
let r1 = vec!(box r { i: i1 });
let r2 = vec!(box r { i: i2 });
f(r1.clone(), r2.clone());
- //~^ ERROR failed to find an implementation of
- //~^^ ERROR failed to find an implementation of
- //~^^^ ERROR failed to find an implementation of
- //~^^^^ ERROR failed to find an implementation of
+ //~^ ERROR the trait `core::clone::Clone` is not implemented
+ //~^^ ERROR the trait `core::clone::Clone` is not implemented
println!("{:?}", (r2, i1.get()));
println!("{:?}", (r1, i2.get()));
}
fn main() {
let cat = "kitty".to_string();
- let (tx, _) = channel(); //~ ERROR does not fulfill `Send`
- tx.send(foo(42, box(GC) (cat))); //~ ERROR does not fulfill `Send`
+ let (tx, _) = channel(); //~ ERROR `core::kinds::Send` is not implemented
+ tx.send(foo(42, box(GC) (cat))); //~ ERROR `core::kinds::Send` is not implemented
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: instantiating a type parameter with an incompatible type
fn bar<T: Sized>() { }
-fn foo<Sized? T>() { bar::<T>() }
+fn foo<Sized? T>() { bar::<T>() } //~ ERROR the trait `core::kinds::Sized` is not implemented
fn main() { }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: instantiating a type parameter with an incompatible type
+enum Foo<T> { FooSome(T), FooNone }
+
fn bar<T: Sized>() { }
-fn foo<Sized? T>() { bar::<Option<T>>() }
+fn foo<Sized? T>() { bar::<Foo<T>>() }
+//~^ ERROR the trait `core::kinds::Sized` is not implemented
+//~^^ ERROR the trait `core::kinds::Sized` is not implemented
+//
+// One error is for T being provided to Foo<T>, the other is
+// for Foo<T> being provided to bar.
+
fn main() { }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: instantiating a type parameter with an incompatible type
-
struct Foo<T> { data: T }
fn bar<T: Sized>() { }
fn foo<Sized? T>() { bar::<Foo<T>>() }
+//~^ ERROR the trait `core::kinds::Sized` is not implemented
+//~^^ ERROR the trait `core::kinds::Sized` is not implemented
+// One error is for the T in Foo<T>, the other is for Foo<T> as a value
+// for bar's type parameter.
+
fn main() { }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Test sized-ness checking in substitution.
+// Test sized-ness checking in substitution within fn bodies..
// Unbounded.
fn f1<Sized? X>(x: &X) {
- f2::<X>(x); //~ ERROR instantiating a type parameter with an incompatible type `X`, which does n
+ f2::<X>(x);
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
}
fn f2<X>(x: &X) {
}
// Bounded.
trait T for Sized? {}
fn f3<Sized? X: T>(x: &X) {
- f4::<X>(x); //~ ERROR instantiating a type parameter with an incompatible type `X`, which does n
+ f4::<X>(x);
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
}
fn f4<X: T>(x: &X) {
}
fn f5<Y>(x: &Y) {}
fn f6<Sized? X>(x: &X) {}
fn f7<Sized? X>(x1: &E<X>, x2: &E<X>) {
- f5(x1); //~ERROR instantiating a type parameter with an incompatible type `E<X>`, which does not
+ f5(x1);
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
f6(x2); // ok
}
}
fn f8<Sized? X>(x1: &S<X>, x2: &S<X>) {
- f5(x1); //~ERROR instantiating a type parameter with an incompatible type `S<X>`, which does not
+ f5(x1);
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
f6(x2); // ok
}
// Test some tuples.
fn f9<Sized? X>(x1: Box<S<X>>, x2: Box<E<X>>) {
- f5(&(*x1, 34i)); //~ERROR E0161
- //~^ ERROR instantiating a type parameter with an incompatible type
- f5(&(32i, *x2)); //~ERROR E0161
- //~^ ERROR instantiating a type parameter with an incompatible type
+ f5(&(*x1, 34i));
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
+ f5(&(32i, *x2));
+ //~^ ERROR the trait `core::kinds::Sized` is not implemented
}
-// impl - bounded
-trait T1<Z: T> {
-}
-struct S3<Sized? Y>;
-impl<Sized? X: T> T1<X> for S3<X> { //~ ERROR instantiating a type parameter with an incompatible
-}
-
-// impl - unbounded
-trait T2<Z> {
-}
-impl<Sized? X> T2<X> for S3<X> { //~ ERROR instantiating a type parameter with an incompatible type
-}
-
-// impl - struct
-trait T3<Sized? Z> {
-}
-struct S4<Y>;
-impl<Sized? X> T3<X> for S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
-}
-impl<Sized? X> S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
-}
-
-
pub fn main() {
}
trait T for Sized? {}
fn f1<Sized? X>(x: &X) {
- let _: X; //~ERROR variable `_` has dynamically sized type `X`
- let _: (int, (X, int)); //~ERROR variable `_` has dynamically sized type `(int,(X,int))`
- let y: X; //~ERROR variable `y` has dynamically sized type `X`
- let y: (int, (X, int)); //~ERROR variable `y` has dynamically sized type `(int,(X,int))`
+ let _: X; // <-- this is OK, no bindings created, no initializer.
+ let _: (int, (X, int)); // same
+ let y: X; //~ERROR the trait `core::kinds::Sized` is not implemented
+ let y: (int, (X, int)); //~ERROR the trait `core::kinds::Sized` is not implemented
}
fn f2<Sized? X: T>(x: &X) {
- let _: X; //~ERROR variable `_` has dynamically sized type `X`
- let _: (int, (X, int)); //~ERROR variable `_` has dynamically sized type `(int,(X,int))`
- let y: X; //~ERROR variable `y` has dynamically sized type `X`
- let y: (int, (X, int)); //~ERROR variable `y` has dynamically sized type `(int,(X,int))`
+ let y: X; //~ERROR the trait `core::kinds::Sized` is not implemented
+ let y: (int, (X, int)); //~ERROR the trait `core::kinds::Sized` is not implemented
}
fn f3<Sized? X>(x1: Box<X>, x2: Box<X>, x3: Box<X>) {
- let y: X = *x1; //~ERROR variable `y` has dynamically sized type `X`
- let y = *x2; //~ERROR variable `y` has dynamically sized type `X`
- let (y, z) = (*x3, 4i); //~ERROR variable `y` has dynamically sized type `X`
- //~^ ERROR E0161
+ let y: X = *x1; //~ERROR the trait `core::kinds::Sized` is not implemented
+ let y = *x2; //~ERROR the trait `core::kinds::Sized` is not implemented
+ let (y, z) = (*x3, 4i); //~ERROR the trait `core::kinds::Sized` is not implemented
}
fn f4<Sized? X: T>(x1: Box<X>, x2: Box<X>, x3: Box<X>) {
- let y: X = *x1; //~ERROR variable `y` has dynamically sized type `X`
- let y = *x2; //~ERROR variable `y` has dynamically sized type `X`
- let (y, z) = (*x3, 4i); //~ERROR variable `y` has dynamically sized type `X`
- //~^ ERROR E0161
+ let y: X = *x1; //~ERROR the trait `core::kinds::Sized` is not implemented
+ let y = *x2; //~ERROR the trait `core::kinds::Sized` is not implemented
+ let (y, z) = (*x3, 4i); //~ERROR the trait `core::kinds::Sized` is not implemented
}
-fn g1<Sized? X>(x: X) {} //~ERROR variable `x` has dynamically sized type `X`
-fn g2<Sized? X: T>(x: X) {} //~ERROR variable `x` has dynamically sized type `X`
+fn g1<Sized? X>(x: X) {} //~ERROR the trait `core::kinds::Sized` is not implemented
+fn g2<Sized? X: T>(x: X) {} //~ERROR the trait `core::kinds::Sized` is not implemented
pub fn main() {
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test sized-ness checking in substitution in impls.
+
+trait T for Sized? {}
+
+// I would like these to fail eventually.
+// impl - bounded
+trait T1<Z: T> {
+}
+struct S3<Sized? Y>;
+impl<Sized? X: T> T1<X> for S3<X> {
+ //~^ ERROR `core::kinds::Sized` is not implemented for the type `X`
+}
+
+// impl - unbounded
+trait T2<Z> {
+}
+struct S4<Sized? Y>;
+impl<Sized? X> T2<X> for S4<X> {
+ //~^ ERROR `core::kinds::Sized` is not implemented for the type `X`
+}
+
+// impl - struct
+trait T3<Sized? Z> {
+}
+struct S5<Y>;
+impl<Sized? X> T3<X> for S5<X> { //~ ERROR not implemented
+}
+
+impl<Sized? X> S5<X> { //~ ERROR not implemented
+}
+
+
+fn main() { }
fn main() {
let mut xs: Vec<int> = vec!();
- for x in xs.mut_iter() {
+ for x in xs.iter_mut() {
xs.push(1i) //~ ERROR cannot borrow `xs`
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// error-pattern: failed to find an implementation
-
struct r {
i:int
}
let i = vec!(r(0));
let j = vec!(r(1));
let k = i + j;
+ //~^ ERROR not implemented
println!("{}", j);
+ //~^ ERROR not implemented
}
fn call_it<B:TraitB>(b: B) -> int {
let y = 4u;
- b.gimme_an_a(y) //~ ERROR failed to find an implementation of trait TraitA
- //~^ ERROR failed to find an implementation of trait TraitA
+ b.gimme_an_a(y) //~ ERROR the trait `TraitA` is not implemented
}
fn main() {
fn main() {
drop(equal(&Struct, &Struct))
- //~^ ERROR failed to find an implementation of trait core::cmp::Eq
- //~^^ ERROR failed to find an implementation of trait core::cmp::PartialEq
- //~^^^ ERROR failed to find an implementation of trait core::cmp::Eq
- //~^^^^ ERROR failed to find an implementation of trait core::cmp::PartialEq
+ //~^ ERROR the trait `core::cmp::Eq` is not implemented
}
static_priv_by_default::b;
static_priv_by_default::c;
foo::<static_priv_by_default::d>();
+ foo::<static_priv_by_default::e>();
// publicly re-exported items should be available
static_priv_by_default::bar::e;
static_priv_by_default::bar::f;
static_priv_by_default::bar::g;
foo::<static_priv_by_default::bar::h>();
+ foo::<static_priv_by_default::bar::i>();
// private items at the top should be inaccessible
- static_priv_by_default::i;
- //~^ ERROR: static `i` is private
static_priv_by_default::j;
- //~^ ERROR: function `j` is private
+ //~^ ERROR: static `j` is private
static_priv_by_default::k;
- //~^ ERROR: struct `k` is private
- foo::<static_priv_by_default::l>();
- //~^ ERROR: type `l` is private
+ //~^ ERROR: function `k` is private
+ static_priv_by_default::l;
+ //~^ ERROR: struct `l` is private
+ foo::<static_priv_by_default::m>();
+ //~^ ERROR: enum `m` is private
+ foo::<static_priv_by_default::n>();
+ //~^ ERROR: type `n` is private
// public items in a private mod should be inaccessible
static_priv_by_default::foo::a;
static_priv_by_default::foo::c;
//~^ ERROR: struct `c` is private
foo::<static_priv_by_default::foo::d>();
- //~^ ERROR: type `d` is private
+ //~^ ERROR: enum `d` is private
+ foo::<static_priv_by_default::foo::e>();
+ //~^ ERROR: type `e` is private
}
// older versions of GDB too. A more extensive test can be found in
// gdb-pretty-struct-and-enums.rs
-// ignore-test FIXME(#16919)
// ignore-tidy-linelength
// ignore-lldb
// ignore-android: FIXME(#10381)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-test FIXME(#16919)
// ignore-tidy-linelength
// ignore-lldb
// ignore-android: FIXME(#10381)
let mut_ref2 = (&mut mut_generic_struct, 0i32);
// Raw Pointers
- let mut_ptr1: (*mut Struct1, int) = (ptr::mut_null(), 0);
- let mut_ptr2: (*mut int, int) = (ptr::mut_null(), 0);
- let mut_ptr3: (*mut Mod1::Mod2::Enum3<Struct1>, int) = (ptr::mut_null(), 0);
+ let mut_ptr1: (*mut Struct1, int) = (ptr::null_mut(), 0);
+ let mut_ptr2: (*mut int, int) = (ptr::null_mut(), 0);
+ let mut_ptr3: (*mut Mod1::Mod2::Enum3<Struct1>, int) = (ptr::null_mut(), 0);
let const_ptr1: (*const Struct1, int) = (ptr::null(), 0);
let const_ptr2: (*const int, int) = (ptr::null(), 0);
// pp-exact
enum foo {
- foo, // a foo.
- bar,
+ bar, // a bar.
+ baz,
}
fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:explicit failure
+
+pub fn main() {
+ fail!(); println!("{}", 1i);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:bad input
+
+fn main() {
+ Some("foo").unwrap_or(fail!("bad input")).to_string();
+}
use std::sync::Arc;
-enum e<T> { e(Arc<T>) }
+enum e<T> { ee(Arc<T>) }
fn foo() -> e<int> {fail!();}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-stage1
+// aux-build:issue_16723_multiple_items_syntax_ext.rs
+#![feature(phase)]
+
+#[phase(plugin)] extern crate issue_16723_multiple_items_syntax_ext;
+
+multiple_items!()
+
+impl Struct1 {
+ fn foo() {}
+}
+impl Struct2 {
+ fn foo() {}
+}
+
+fn main() {
+ Struct1::foo();
+ Struct2::foo();
+ println!("hallo");
+}
codemap::span {
lo: codemap::BytePos(0),
hi: codemap::BytePos(0),
- expn_info: None
+ expn_id: codemap::NO_EXPANSION
}
}
fn ident_of(st: &str) -> ast::ident {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_types)]
+
+trait Get {
+ type Value;
+ fn get(&self) -> &<Self as Get>::Value;
+ fn grab(&self) -> &<Self as Get>::Value {
+ self.get()
+ }
+}
+
+struct Struct {
+ x: int,
+}
+
+impl Get for Struct {
+ type Value = int;
+ fn get(&self) -> &int {
+ &self.x
+ }
+}
+
+fn main() {
+ let s = Struct {
+ x: 100,
+ };
+ assert_eq!(*s.grab(), 100);
+}
+
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_types)]
+
+trait Get {
+ type Value;
+ fn get(&self) -> &<Self as Get>::Value;
+}
+
+struct Struct {
+ x: int,
+}
+
+impl Get for Struct {
+ type Value = int;
+ fn get(&self) -> &int {
+ &self.x
+ }
+}
+
+fn grab<T:Get>(x: &T) -> &<T as Get>::Value {
+ x.get()
+}
+
+fn main() {
+ let s = Struct {
+ x: 100,
+ };
+ assert_eq!(*grab(&s), 100);
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_types)]
+
+trait Get {
+ type Value;
+ fn get(&self) -> &<Self as Get>::Value;
+}
+
+struct Struct {
+ x: int,
+}
+
+impl Get for Struct {
+ type Value = int;
+ fn get(&self) -> &int {
+ &self.x
+ }
+}
+
+trait Grab {
+ type U;
+ fn grab(&self) -> &<Self as Grab>::U;
+}
+
+impl<T:Get> Grab for T {
+ type U = <T as Get>::Value;
+ fn grab(&self) -> &<T as Get>::Value {
+ self.get()
+ }
+}
+
+fn main() {
+ let s = Struct {
+ x: 100,
+ };
+ assert_eq!(*s.grab(), 100);
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_types)]
+
+trait Get {
+ type Value;
+ fn get(&self) -> &<Self as Get>::Value;
+}
+
+struct Struct {
+ x: int,
+}
+
+impl Get for Struct {
+ type Value = int;
+ fn get(&self) -> &int {
+ &self.x
+ }
+}
+
+impl Struct {
+ fn grab<T:Get>(x: &T) -> &<T as Get>::Value {
+ x.get()
+ }
+}
+
+fn main() {
+ let s = Struct {
+ x: 100,
+ };
+ assert_eq!(*Struct::grab(&s), 100);
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(associated_types)]
+
+trait Get {
+ type Value;
+ fn get(&self) -> &<Self as Get>::Value;
+}
+
+struct Struct {
+ x: int,
+}
+
+impl Get for Struct {
+ type Value = int;
+ fn get(&self) -> &int {
+ &self.x
+ }
+}
+
+fn main() {
+ let s = Struct {
+ x: 100,
+ };
+ assert_eq!(*s.get(), 100);
+}
+
// except according to those terms.
-fn f<T>(x: Vec<T>) -> T { return x.move_iter().next().unwrap(); }
+fn f<T>(x: Vec<T>) -> T { return x.into_iter().next().unwrap(); }
fn g(act: |Vec<int> | -> int) -> int { return act(vec!(1, 2, 3)); }
use std::gc::GC;
enum newtype {
- newtype(int)
+ newvar(int)
}
pub fn main() {
// specially.
let x = box(GC) Cell::new(5);
- let y = box(GC) Cell::new(newtype(3));
+ let y = box(GC) Cell::new(newvar(3));
let z = match y.get() {
- newtype(b) => {
+ newvar(b) => {
x.set(x.get() + 1);
x.get() * b
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+fn f(x: || -> !) -> ! {
+ x();
+}
+
+fn main() {
+ let x: || -> ! = || fail!();
+ let _y: || -> ! = || x();
+}
let a = A;
a.foo::<<'a>||>();
+
+ // issue #13490
+ let _ = || -> ! loop {};
+ let _ = proc() -> ! loop {};
}
struct B<T>;
#[deriving(PartialEq, Show)]
enum Bar<T> {
- Bar(T)
+ Baz(T)
}
pub fn main() {
let f: proc(int) -> Foo<int> = Foo;
assert_eq!(f(5), Foo(5));
- let f: |int| -> Bar<int> = Bar;
- assert_eq!(f(5), Bar(5));
+ let f: |int| -> Bar<int> = Baz;
+ assert_eq!(f(5), Baz(5));
- let f: proc(int) -> Bar<int> = Bar;
- assert_eq!(f(5), Bar(5));
+ let f: proc(int) -> Bar<int> = Baz;
+ assert_eq!(f(5), Baz(5));
let f: |int| -> Option<int> = Some;
assert_eq!(f(5), Some(5));
pub fn main() {
unsafe {
for &bare_fn in bare_fns.iter() { bare_fn() }
- for closure in closures.mut_iter() {
+ for closure in closures.iter_mut() {
let S(ref mut closure) = *closure;
(*closure)()
}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that destructor on a struct runs successfully after the struct
+// is boxed and converted to an object.
+
+static mut value: uint = 0;
+
+struct Cat {
+ name : uint,
+}
+
+trait Dummy {
+ fn get(&self) -> uint;
+}
+
+impl Dummy for Cat {
+ fn get(&self) -> uint { self.name }
+}
+
+impl Drop for Cat {
+ fn drop(&mut self) {
+ unsafe { value = self.name; }
+ }
+}
+
+pub fn main() {
+ {
+ let x = box Cat {name: 22};
+ let nyan: Box<Dummy> = x as Box<Dummy>;
+ }
+ unsafe {
+ assert_eq!(value, 22);
+ }
+}
let len = (&*c).f.len();
assert!(len == 3);
}
-}
\ No newline at end of file
+}
}
}
-fn call_it<F:|int|->int>(mut f: F, x: int) -> int {
+fn call_it<F:FnMut(int)->int>(mut f: F, x: int) -> int {
f.call_mut((x,)) + 3
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub mod two_tuple {
+ trait T {}
+ struct P<'a>(&'a T + 'a, &'a T + 'a);
+ pub fn f<'a>(car: &'a T, cdr: &'a T) -> P<'a> {
+ P(car, cdr)
+ }
+}
+
+pub mod two_fields {
+ trait T {}
+ struct P<'a> { car: &'a T + 'a, cdr: &'a T + 'a }
+ pub fn f<'a>(car: &'a T, cdr: &'a T) -> P<'a> {
+ P{ car: car, cdr: cdr }
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ if true {
+ proc(_) {}
+ } else {
+ proc(_: &mut ()) {}
+ };
+}
{
let mut fragments = vec![Fragment, Fragment, Fragment];
let _new_fragments: Vec<Fragment> = mem::replace(&mut fragments, vec![])
- .move_iter()
+ .into_iter()
.skip_while(|_fragment| {
true
}).collect();
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unsafe_destructor)]
+
+struct Leak<'a> {
+ dropped: &'a mut bool
+}
+
+#[unsafe_destructor]
+impl<'a> Drop for Leak<'a> {
+ fn drop(&mut self) {
+ *self.dropped = true;
+ }
+}
+
+fn main() {
+ let mut dropped = false;
+ {
+ let leak = Leak { dropped: &mut dropped };
+ for ((), leaked) in Some(((),leak)).move_iter() {}
+ }
+
+ assert!(dropped);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+static mut DROPPED: [bool, ..2] = [false, false];
+
+struct A(uint);
+struct Foo { _a: A, _b: int }
+
+impl Drop for A {
+ fn drop(&mut self) {
+ let A(i) = *self;
+ unsafe { DROPPED[i] = true; }
+ }
+}
+
+fn main() {
+ {
+ Foo {
+ _a: A(0),
+ ..Foo { _a: A(1), _b: 2 }
+ };
+ }
+ unsafe {
+ assert!(DROPPED[0]);
+ assert!(DROPPED[1]);
+ }
+}
int_value(i64),
}
-fn lookup(table: json::Object, key: String, default: String) -> String
+fn lookup(table: json::JsonObject, key: String, default: String) -> String
{
match table.find(&key.to_string()) {
option::Some(&json::String(ref s)) => {
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-enum y { x }
-
-enum x {}
-
-pub fn main() {}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-enum PureCounter { PureCounter(uint) }
+enum PureCounter { PureCounterVariant(uint) }
fn each(thing: PureCounter, blk: |v: &uint|) {
- let PureCounter(ref x) = thing;
+ let PureCounterVariant(ref x) = thing;
blk(x);
}
struct A;
-macro_rules! make_thirteen_method {() => (pub fn thirteen(&self)->int {13})}
+macro_rules! make_thirteen_method {() => (fn thirteen(&self)->int {13})}
impl A { make_thirteen_method!() }
fn main() {
}
fn issue_13731() {
- enum A { A(()) }
- static B: A = A(());
+ enum A { AA(()) }
+ static B: A = AA(());
- match A(()) {
+ match AA(()) {
B => ()
}
}
pub fn main() {
match 7 {
- s..e => (),
+ s...e => (),
_ => (),
}
}
fn test2() {
let mut ints = [0i, ..32];
- for i in ints.mut_iter() { *i += 22; }
+ for i in ints.iter_mut() { *i += 22; }
for i in ints.iter() { assert!(*i == 22); }
}
fn myvec_elt<X>(mv: myvec<X>) -> X {
let myvec(v) = mv;
- return v.move_iter().next().unwrap();
+ return v.into_iter().next().unwrap();
}
pub fn main() {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Testing creating two vtables with the same self type, but different
+// traits.
+
+use std::any::Any;
+use std::any::AnyRefExt;
+
+trait Wrap {
+ fn get(&self) -> int;
+ fn wrap(self: Box<Self>) -> Box<Any+'static>;
+}
+
+impl Wrap for int {
+ fn get(&self) -> int {
+ *self
+ }
+ fn wrap(self: Box<int>) -> Box<Any+'static> {
+ self as Box<Any+'static>
+ }
+}
+
+fn is<T:'static>(x: &Any) -> bool {
+ x.is::<T>()
+}
+
+fn main() {
+ let x = box 22i as Box<Wrap>;
+ println!("x={}", x.get());
+ let y = x.wrap();
+}
unsafe fn test_triangle() -> bool {
static COUNT : uint = 16;
- let mut ascend = Vec::from_elem(COUNT, ptr::mut_null());
+ let mut ascend = Vec::from_elem(COUNT, ptr::null_mut());
let ascend = ascend.as_mut_slice();
static ALIGN : uint = 1;
println!("type: {}", (*s).clone());
}
- let vec_types: Vec<String> = v.types.clone().move_iter().collect();
+ let vec_types: Vec<String> = v.types.clone().into_iter().collect();
assert_eq!(vec_types, vec!("bool".to_string(), "int".to_string(),
"i8".to_string(), "i16".to_string()));
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// A test where we (successfully) close over a reference into
+// an object.
+
+trait SomeTrait { fn get(&self) -> int; }
+
+impl<'a> SomeTrait for &'a int {
+ fn get(&self) -> int {
+ **self
+ }
+}
+
+fn make_object<'a,A:SomeTrait+'a>(v: A) -> Box<SomeTrait+'a> {
+ box v as Box<SomeTrait+'a>
+}
+
+fn main() {
+ let i: int = 22;
+ let obj = make_object(&i);
+ assert_eq!(22, obj.get());
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// exec-env:RUST_LOG=rust-log-filter/f.o
+
+#![feature(phase)]
+#[phase(plugin,link)]
+extern crate log;
+
+pub struct ChannelLogger {
+ tx: Sender<String>
+}
+
+impl ChannelLogger {
+ pub fn new() -> (Box<ChannelLogger>, Receiver<String>) {
+ let (tx, rx) = channel();
+ (box ChannelLogger { tx: tx }, rx)
+ }
+}
+
+impl log::Logger for ChannelLogger {
+ fn log(&mut self, record: &log::LogRecord) {
+ self.tx.send(format!("{}", record.args));
+ }
+}
+
+pub fn main() {
+ let (logger, rx) = ChannelLogger::new();
+
+ spawn(proc() {
+ log::set_logger(logger);
+
+ // our regex is "f.o"
+ // ensure it is a regex, and isn't anchored
+ info!("foo");
+ info!("bar");
+ info!("foo bar");
+ info!("bar foo");
+ info!("f1o");
+ });
+
+ assert_eq!(rx.recv().as_slice(), "foo");
+ assert_eq!(rx.recv().as_slice(), "foo bar");
+ assert_eq!(rx.recv().as_slice(), "bar foo");
+ assert_eq!(rx.recv().as_slice(), "f1o");
+ assert!(rx.recv_opt().is_err());
+}
assert_eq!(map.find(&Owned("def".to_string())), Some(&d));
assert!(map.pop(&Slice("foo")).is_some());
- assert_eq!(map.move_iter().map(|(k, v)| format!("{}{}", k, v))
+ assert_eq!(map.into_iter().map(|(k, v)| format!("{}{}", k, v))
.collect::<Vec<String>>()
.concat(),
"abc50bcd51cde52def53".to_string());
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test slicing expressions on slices and Vecs.
+
+fn main() {
+ let x: &[int] = &[1, 2, 3, 4, 5];
+ let cmp: &[int] = &[1, 2, 3, 4, 5];
+ assert!(x[] == cmp)
+ let cmp: &[int] = &[3, 4, 5];
+ assert!(x[2..] == cmp)
+ let cmp: &[int] = &[1, 2, 3];
+ assert!(x[..3] == cmp)
+ let cmp: &[int] = &[2, 3, 4];
+ assert!(x[1..4] == cmp)
+
+ let x: Vec<int> = vec![1, 2, 3, 4, 5];
+ let cmp: &[int] = &[1, 2, 3, 4, 5];
+ assert!(x[] == cmp)
+ let cmp: &[int] = &[3, 4, 5];
+ assert!(x[2..] == cmp)
+ let cmp: &[int] = &[1, 2, 3];
+ assert!(x[..3] == cmp)
+ let cmp: &[int] = &[2, 3, 4];
+ assert!(x[1..4] == cmp)
+
+ let x: &mut [int] = &mut [1, 2, 3, 4, 5];
+ {
+ let cmp: &mut [int] = &mut [1, 2, 3, 4, 5];
+ assert!(x[mut] == cmp)
+ }
+ {
+ let cmp: &mut [int] = &mut [3, 4, 5];
+ assert!(x[mut 2..] == cmp)
+ }
+ {
+ let cmp: &mut [int] = &mut [1, 2, 3];
+ assert!(x[mut ..3] == cmp)
+ }
+ {
+ let cmp: &mut [int] = &mut [2, 3, 4];
+ assert!(x[mut 1..4] == cmp)
+ }
+
+ let mut x: Vec<int> = vec![1, 2, 3, 4, 5];
+ {
+ let cmp: &mut [int] = &mut [1, 2, 3, 4, 5];
+ assert!(x[mut] == cmp)
+ }
+ {
+ let cmp: &mut [int] = &mut [3, 4, 5];
+ assert!(x[mut 2..] == cmp)
+ }
+ {
+ let cmp: &mut [int] = &mut [1, 2, 3];
+ assert!(x[mut ..3] == cmp)
+ }
+ {
+ let cmp: &mut [int] = &mut [2, 3, 4];
+ assert!(x[mut 1..4] == cmp)
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that is a slicing expr[..] fails, the correct cleanups happen.
+
+use std::task;
+
+struct Foo;
+
+static mut DTOR_COUNT: int = 0;
+
+impl Drop for Foo {
+ fn drop(&mut self) { unsafe { DTOR_COUNT += 1; } }
+}
+
+fn foo() {
+ let x: &[_] = &[Foo, Foo];
+ x[3..4];
+}
+
+fn main() {
+ let _ = task::try(proc() foo());
+ unsafe { assert!(DTOR_COUNT == 2); }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that is a slicing expr[..] fails, the correct cleanups happen.
+
+use std::task;
+
+struct Foo;
+
+static mut DTOR_COUNT: int = 0;
+
+impl Drop for Foo {
+ fn drop(&mut self) { unsafe { DTOR_COUNT += 1; } }
+}
+
+fn bar() -> uint {
+ fail!();
+}
+
+fn foo() {
+ let x: &[_] = &[Foo, Foo];
+ x[3..bar()];
+}
+
+fn main() {
+ let _ = task::try(proc() foo());
+ unsafe { assert!(DTOR_COUNT == 2); }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test slicing sugar.
+
+extern crate core;
+use core::ops::{Slice,SliceMut};
+
+static mut COUNT: uint = 0;
+
+struct Foo;
+
+impl Slice<Foo, Foo> for Foo {
+ fn as_slice_<'a>(&'a self) -> &'a Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+ fn slice_from_<'a>(&'a self, _from: &Foo) -> &'a Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+ fn slice_to_<'a>(&'a self, _to: &Foo) -> &'a Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+ fn slice_<'a>(&'a self, _from: &Foo, _to: &Foo) -> &'a Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+}
+
+impl SliceMut<Foo, Foo> for Foo {
+ fn as_mut_slice_<'a>(&'a mut self) -> &'a mut Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+ fn slice_from_mut_<'a>(&'a mut self, _from: &Foo) -> &'a mut Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+ fn slice_to_mut_<'a>(&'a mut self, _to: &Foo) -> &'a mut Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+ fn slice_mut_<'a>(&'a mut self, _from: &Foo, _to: &Foo) -> &'a mut Foo {
+ unsafe { COUNT += 1; }
+ self
+ }
+}
+fn main() {
+ let mut x = Foo;
+ x[];
+ x[Foo..];
+ x[..Foo];
+ x[Foo..Foo];
+ x[mut];
+ x[mut Foo..];
+ x[mut ..Foo];
+ x[mut Foo..Foo];
+ unsafe {
+ assert!(COUNT == 8);
+ }
+}
extern crate debug;
enum a_tag {
- a_tag(u64)
+ a_tag_var(u64)
}
struct t_rec {
}
pub fn main() {
- let x = t_rec {c8: 22u8, t: a_tag(44u64)};
+ let x = t_rec {c8: 22u8, t: a_tag_var(44u64)};
let y = format!("{:?}", x);
println!("y = {}", y);
- assert_eq!(y, "t_rec{c8: 22u8, t: a_tag(44u64)}".to_string());
+ assert_eq!(y, "t_rec{c8: 22u8, t: a_tag_var(44u64)}".to_string());
}
use std::mem;
enum Tag {
- Tag(u64)
+ TagInner(u64)
}
struct Rec {
}
fn mk_rec() -> Rec {
- return Rec { c8:0u8, t:Tag(0u64) };
+ return Rec { c8:0u8, t:TagInner(0u64) };
}
fn is_8_byte_aligned(u: &Tag) -> bool {
}
// Join spawned tasks...
- for r in results.mut_iter() { r.get_ref(); }
+ for r in results.iter_mut() { r.get_ref(); }
println!("Completed: Final number is: ");
println!("{:?}", sum);
#![feature(lang_items, overloaded_calls, unboxed_closures)]
-fn a<F:|&: int, int| -> int>(f: F) -> int {
+fn a<F:Fn(int, int) -> int>(f: F) -> int {
f(1, 2)
}
-fn b<F:|&mut: int, int| -> int>(mut f: F) -> int {
+fn b<F:FnMut(int, int) -> int>(mut f: F) -> int {
f(3, 4)
}
-fn c<F:|: int, int| -> int>(f: F) -> int {
+fn c<F:FnOnce(int, int) -> int>(f: F) -> int {
f(5, 6)
}
}
}
-fn a<F:|&: int, int| -> int>(f: F) -> int {
+fn a<F:Fn(int, int) -> int>(f: F) -> int {
f(1, 2)
}
-fn b<F:|&mut: int, int| -> int>(mut f: F) -> int {
+fn b<F:FnMut(int, int) -> int>(mut f: F) -> int {
f(3, 4)
}
-fn c<F:|: int, int| -> int>(f: F) -> int {
+fn c<F:FnOnce(int, int) -> int>(f: F) -> int {
f(5, 6)
}
#![feature(overloaded_calls, unboxed_closures)]
-fn a<F:|&: int, int| -> int>(f: F) -> int {
+fn a<F:Fn(int, int) -> int>(f: F) -> int {
f(1, 2)
}
-fn b<F:|&mut: int, int| -> int>(mut f: F) -> int {
+fn b<F:FnMut(int, int) -> int>(mut f: F) -> int {
f(3, 4)
}
-fn c<F:|: int, int| -> int>(f: F) -> int {
+fn c<F:FnOnce(int, int) -> int>(f: F) -> int {
f(5, 6)
}
use std::ptr;
-pub fn replace_map<'a, T, F>(src: &mut T, prod: F)
-where F: |: T| -> T {
+pub fn replace_map<'a, T, F>(src: &mut T, prod: F) where F: FnOnce(T) -> T {
unsafe { *src = prod(ptr::read(src as *mut T as *const T)); }
}
fn sub_expr() {
// Test for a &[T] => &&[T] coercion in sub-expression position
// (surpisingly, this can cause errors which are not caused by either of:
- // `let x = vec.mut_slice(0, 2);`
- // `foo(vec.mut_slice(0, 2));` ).
+ // `let x = vec.slice_mut(0, 2);`
+ // `foo(vec.slice_mut(0, 2));` ).
let mut vec: Vec<int> = vec!(1, 2, 3, 4);
let b: &mut [int] = [1, 2];
- assert!(vec.mut_slice(0, 2) == b);
+ assert!(vec.slice_mut(0, 2) == b);
}
fn index() {
struct Bencher;
// ICE
-fn warm_up<'a, F>(f: F) where F: |&: &'a mut Bencher| {
+fn warm_up<'a, F>(f: F) where F: Fn(&'a mut Bencher) {
}
fn main() {
extern crate xcrate_unit_struct;
static s1: xcrate_unit_struct::Struct = xcrate_unit_struct::Struct;
-static s2: xcrate_unit_struct::Unit = xcrate_unit_struct::Unit;
+static s2: xcrate_unit_struct::Unit = xcrate_unit_struct::UnitVariant;
static s3: xcrate_unit_struct::Unit =
xcrate_unit_struct::Argument(xcrate_unit_struct::Struct);
static s4: xcrate_unit_struct::Unit = xcrate_unit_struct::Argument(s1);
pub fn main() {
f1(xcrate_unit_struct::Struct);
- f2(xcrate_unit_struct::Unit);
+ f2(xcrate_unit_struct::UnitVariant);
f2(xcrate_unit_struct::Argument(xcrate_unit_struct::Struct));
f1(s1);