#
# * `VERBOSE=1` - Print all commands. Use this to see what's going on.
# * `RUSTFLAGS=...` - Add compiler flags to all `rustc` invocations
+# * `JEMALLOC_FLAGS=...` - Pass flags to jemalloc's configure script
#
# * `TESTNAME=...` - Specify the name of tests to run
# * `CHECK_IGNORED=1` - Run normally-ignored tests
CFG_RUSTC_FLAGS := $(RUSTFLAGS)
CFG_GCCISH_CFLAGS :=
CFG_GCCISH_LINK_FLAGS :=
+CFG_JEMALLOC_FLAGS :=
ifdef CFG_DISABLE_OPTIMIZE
$(info cfg: disabling rustc optimization (CFG_DISABLE_OPTIMIZE))
CFG_RUSTC_FLAGS +=
+ CFG_JEMALLOC_FLAGS += --enable-debug
else
# The rtopt cfg turns off runtime sanity checks
CFG_RUSTC_FLAGS += -O --cfg rtopt
endif
+CFG_JEMALLOC_FLAGS += $(JEMALLOC_FLAGS)
+
ifdef CFG_DISABLE_DEBUG
CFG_RUSTC_FLAGS += --cfg ndebug
CFG_GCCISH_CFLAGS += -DRUST_NDEBUG
$$(JEMALLOC_LOCAL_$(1)): $$(JEMALLOC_DEPS) $$(MKFILE_DEPS)
@$$(call E, make: jemalloc)
cd "$$(JEMALLOC_BUILD_DIR_$(1))"; "$(S)src/jemalloc/configure" \
- $$(JEMALLOC_ARGS_$(1)) --with-jemalloc-prefix=je_ \
+ $$(JEMALLOC_ARGS_$(1)) --with-jemalloc-prefix=je_ $(CFG_JEMALLOC_FLAGS) \
--build=$(CFG_BUILD) --host=$(1) \
CC="$$(CC_$(1))" \
AR="$$(AR_$(1))" \
// codegen tests (vs. clang)
-fn make_o_name(config: &Config, testfile: &Path) -> Path {
- output_base_name(config, testfile).with_extension("o")
-}
-
fn append_suffix_to_stem(p: &Path, suffix: &str) -> Path {
if suffix.len() == 0 {
(*p).clone()
// FIXME (#9639): This needs to handle non-utf8 paths
let link_args = vec!("-L".to_string(),
aux_dir.as_str().unwrap().to_string());
- let llvm_args = vec!("--emit=obj".to_string(),
- "--crate-type=lib".to_string(),
- "-C".to_string(),
- "save-temps".to_string());
+ let llvm_args = vec!("--emit=bc,obj".to_string(),
+ "--crate-type=lib".to_string());
let args = make_compile_args(config,
props,
link_args.append(llvm_args.as_slice()),
- |a, b| ThisFile(make_o_name(a, b)), testfile);
+ |a, b| ThisDirectory(output_base_name(a, b).dir_path()),
+ testfile);
compose_and_run_compiler(config, props, testfile, args, None)
}
common thing to do.
The first thing that we need to do is make a file to put our code in. I like
-to make a projects directory in my home directory, and keep all my projects
+to make a `projects` directory in my home directory, and keep all my projects
there. Rust does not care where your code lives.
This actually leads to one other concern we should address: this tutorial will
languages which have it, like Haskell, often suggest that documenting your
types explicitly is a best-practice. We agree that forcing functions to declare
types while allowing for inference inside of function bodies is a wonderful
-compromise between full inference and no inference.
+sweet spot between full inference and no inference.
What about returning a value? Here's a function that adds one to an integer:
% Rust Documentation
-<!-- Completely hide the TOC and the section numbers -->
-<style type="text/css">
-#TOC { display: none; }
-.header-section-number { display: none; }
-li {list-style-type: none; }
-</style>
+Welcome to the Rust documentation! You can use the section headings above
+to jump to any particular section.
+
+# Getting Started
+
+If you haven't seen Rust at all yet, the first thing you should read is the [30
+minute intro](intro.html). It will give you an overview of the basic ideas of Rust
+at a high level.
+
+Once you know you really want to learn Rust, the next step is reading [the
+guide](guide.html). It is a lengthy explanation of Rust, its syntax, and its
+concepts. Upon completing the guide, you'll be an intermediate Rust developer,
+and will have a good grasp of the fundamental ideas behind Rust.
+
+# Community & Getting Help
+
+If you need help with something, or just want to talk about Rust with others,
+there's a few places you can do that:
+
+The Rust IRC channels on [irc.mozilla.org](http://irc.mozilla.org/) are the
+fastest way to get help.
+[`#rust`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust) is
+the general discussion channel, and you'll find people willing to help you with
+any questions you may have.
+
+There are also three specialty channels:
+[`#rust-gamedev`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-gamedev)
+and
+[`#rust-osdev`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-osdev)
+are for game development and operating system development, respectively.
+There's also
+[`#rust-internals`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-internals), which is for discussion of the development of Rust itself.
+
+You can also get help on [Stack
+Overflow](http://stackoverflow.com/questions/tagged/rust). Searching for your
+problem might reveal someone who has asked it before!
+
+There is an active [subreddit](http://reddit.com/r/rust) with lots of
+discussion about Rust.
+
+There is also a [developer forum](http://discuss.rust-lang.org/), where the
+development of Rust itself is discussed.
-* [A 30-minute Intro to Rust](intro.html)
-* [The Rust tutorial](tutorial.html) (* [PDF](tutorial.pdf))
-* [The Rust reference manual](rust.html) (* [PDF](rust.pdf))
# Guides
+Rust Guides are in-depth looks at a particular topic that's relevant to Rust
+development. If you're trying to figure out how to do something, there may be
+a guide that can help you out:
+
* [Strings](guide-strings.html)
* [Pointers](guide-pointers.html)
* [References and Lifetimes](guide-lifetimes.html)
* [Testing](guide-testing.html)
* [Rust's Runtime](guide-runtime.html)
+# Tools
+
+Rust's still a young language, so there isn't a ton of tooling yet, but the
+tools we have are really nice.
+
+[Cargo](http://crates.io) is Rust's package manager, and its website contains
+lots of good documentation.
+
+[The `rustdoc` manual](rustdoc.html) contains information about Rust's
+documentation tool.
+
# FAQs
+There are questions that are asked quite often, and so we've made FAQs for them:
+
* [Language Design FAQ](complement-design-faq.html)
* [Language FAQ](complement-lang-faq.html)
* [Project FAQ](complement-project-faq.html)
* [How to submit a bug report](complement-bugreport.html)
-# Libraries
-
-* [The standard library, `std`](std/index.html)
-
-<!-- force the two lists to be separate -->
-
-* [The `arena` allocation library](arena/index.html)
-* [The `collections` library](collections/index.html)
-* [The `flate` compression library](flate/index.html)
-* [The `fourcc` four-character code library](fourcc/index.html)
-* [The `getopts` argument parsing library](getopts/index.html)
-* [The `glob` file path matching library](glob/index.html)
-* [The `green` M:N runtime library](green/index.html)
-* [The `hexfloat` library for hexadecimal floating-point literals](hexfloat/index.html)
-* [The `libc` bindings](libc/index.html)
-* [The `native` 1:1 threading runtime](native/index.html)
-* [The `num` arbitrary precision numerics library](num/index.html)
-* [The `rand` library for random numbers and distributions](rand/index.html)
-* [The `regex` library for regular expressions](regex/index.html)
-* [The `rustc` compiler](rustc/index.html)
-* [The `rustuv` M:N I/O library](rustuv/index.html)
-* [The `semver` version collation library](semver/index.html)
-* [The `serialize` value encoding/decoding library](serialize/index.html)
-* [The `sync` library for concurrency-enabled mechanisms and primitives](sync/index.html)
-* [The `syntax` library, the Rust parser](syntax/index.html)
-* [The `term` terminal-handling library](term/index.html)
-* [The `test` library containing the unit-testing & micro-benchmark framework](test/index.html)
-* [The `time` library](time/index.html)
-* [The `uuid` 128-bit universally unique identifier library](uuid/index.html)
-* [The `url` library](url/index.html)
-* [The `log` library](log/index.html)
-
-# Tooling
-
-* [The `rustdoc` manual](rustdoc.html)
+# The standard library
+
+You can find function-level documentation for the entire standard library
+[here](std/index.html). There's a list of crates on the left with more specific
+sections, or you can use the search bar at the top to search for something if
+you know its name.
# External documentation
-*Note: While these are great resources for learning Rust, they may
-track a particular version of Rust that is likely not exactly the same
-as that for which this documentation was generated.*
+*Note: While these are great resources for learning Rust, they may track a
+particular version of Rust that is likely not exactly the same as that for
+which this documentation was generated.*
-* [Rust for Rubyists] - An excellent introduction for Rust; not just for Rubyists (tracks the most recent release).
-* [Rust by Example] - Short examples of common tasks in Rust (tracks the master branch).
-* [The Rust wiki](http://github.com/rust-lang/rust/wiki)
+* [Rust by Example] - Short examples of common tasks in Rust (tracks the master
+ branch).
+* [Rust for Rubyists] - The first community tutorial for Rust. Tracks the last
+ stable release. Not just for Ruby programmers.
-[Rust for Rubyists]: http://www.rustforrubyists.com/
[Rust by Example]: http://rustbyexample.com/
-
-# Community
-
-* [Reddit](http://reddit.com/r/rust)
-* [Stack Overflow](http://stackoverflow.com/questions/tagged/rust)
-* [Developer Forum](http://discuss.rust-lang.org/)
-* The Rust IRC channels on [irc.mozilla.org](http://irc.mozilla.org/):
- * [`#rust`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust) - general discussion
- * [`#rust-gamedev`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-gamedev) - game development
- * [`#rust-internals`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-internals) - compiler and libraries
- * [`#rust-osdev`](http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-osdev) - operating system development
-
+[Rust for Rubyists]: http://www.rustforrubyists.com/
: This type does not implement "copy", even if eligible
* `no_send_bound`
: This type does not implement "send", even if eligible
-* `no_share_bound`
- : This type does not implement "share", even if eligible
+* `no_sync_bound`
+ : This type does not implement "sync", even if eligible
* `eh_personality`
: ___Needs filling in___
* `exchange_free`
assert!(b != "world");
~~~~
-### Vector types
+### Vector, Array, and Slice types
-The vector type constructor represents a homogeneous array of values of a given type.
-A vector has a fixed size.
-(Operations like `vec.push` operate solely on owned vectors.)
-A vector type can be annotated with a _definite_ size, such as `[int, ..10]`.
-Such a definite-sized vector type is a first-class type, since its size is known statically.
-A vector without such a size is said to be of _indefinite_ size,
-and is therefore not a _first-class_ type.
-An indefinite-size vector can only be instantiated through a pointer type,
-such as `&[T]` or `Vec<T>`.
-The kind of a vector type depends on the kind of its element type,
-as with other simple structural types.
+Rust has three different types for a list of items:
-Expressions producing vectors of definite size cannot be evaluated in a
-context expecting a vector of indefinite size; one must copy the
-definite-sized vector contents into a distinct vector of indefinite size.
+* `Vec<T>`, a 'vector'
+* `[T ..N]`, an 'array'
+* `&[T]`, a 'slice'.
-An example of a vector type and its use:
+A vector is a heap-allocated list of `T`. A vector has ownership over the data
+inside of it. It is also able to grow and change in size. It's important to note
+that `Vec<T>` is a library type, it's not actually part of the core language.
-~~~~
-let v: &[int] = &[7, 5, 3];
-let i: int = v[2];
-assert!(i == 3);
-~~~~
+An array has a fixed size, and can be allocated on either the stack or the heap.
+
+A slice is a 'view' into a vector or array. It doesn't own the data it points
+to, it borrows it.
+
+An example of each kind:
+
+```{rust}
+let vec: Vec<int> = vec![1, 2, 3];
+let arr: [int, ..3] = [1, 2, 3];
+let s: &[int] = vec.as_slice();
+```
+
+As you can see, the `vec!` macro allows you to create a `Vec<T>` easily. The
+`vec!` macro is also part of the standard library, rather than the language.
-All in-bounds elements of a vector are always initialized,
-and access to a vector is always bounds-checked.
+All in-bounds elements of vectors, arrays, and slices are always initialized,
+and access to a vector, array, or slice is always bounds-checked.
### Structure types
use core::ptr::RawPtr;
#[cfg(not(test))] use core::raw;
-#[cfg(not(test))] use util;
+#[cfg(stage0, not(test))] use util;
/// Returns a pointer to `size` bytes of memory.
///
}
// FIXME: #7496
-#[cfg(not(test))]
+#[cfg(stage0, not(test))]
#[lang="closure_exchange_malloc"]
#[inline]
#[allow(deprecated)]
alloc as *mut u8
}
+// FIXME: #7496
+#[cfg(not(stage0), not(test))]
+#[lang="closure_exchange_malloc"]
+#[inline]
+#[allow(deprecated)]
+unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
+ align: uint) -> *mut u8 {
+ let p = allocate(size, align);
+
+ let alloc = p as *mut raw::Box<()>;
+ (*alloc).drop_glue = drop_glue;
+
+ alloc as *mut u8
+}
+
#[cfg(jemalloc)]
mod imp {
use core::option::{None, Option};
use std::num;
use std::ptr;
use std::rc::Rc;
-use std::rt::heap::allocate;
+use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
- data: Rc<RefCell<Vec<u8> >>,
+ data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
+
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
end: Cell<*const T>,
/// A pointer to the first arena segment.
- first: RefCell<TypedArenaChunkRef<T>>,
+ first: RefCell<*mut TypedArenaChunk<T>>,
}
-type TypedArenaChunkRef<T> = Option<Box<TypedArenaChunk<T>>>;
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
- next: TypedArenaChunkRef<T>,
+ next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
+fn calculate_size<T>(capacity: uint) -> uint {
+ let mut size = mem::size_of::<TypedArenaChunk<T>>();
+ size = round_up(size, mem::min_align_of::<T>());
+ let elem_size = mem::size_of::<T>();
+ let elems_size = elem_size.checked_mul(&capacity).unwrap();
+ size = size.checked_add(&elems_size).unwrap();
+ size
+}
+
impl<T> TypedArenaChunk<T> {
#[inline]
- fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
- -> Box<TypedArenaChunk<T>> {
- let mut size = mem::size_of::<TypedArenaChunk<T>>();
- size = round_up(size, mem::min_align_of::<T>());
- let elem_size = mem::size_of::<T>();
- let elems_size = elem_size.checked_mul(&capacity).unwrap();
- size = size.checked_add(&elems_size).unwrap();
-
- let mut chunk = unsafe {
- let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
- let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
- ptr::write(&mut chunk.next, next);
- chunk
- };
-
- chunk.capacity = capacity;
+ unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
+ -> *mut TypedArenaChunk<T> {
+ let size = calculate_size::<T>(capacity);
+ let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
+ as *mut TypedArenaChunk<T>;
+ (*chunk).next = next;
+ (*chunk).capacity = capacity;
chunk
}
}
// Destroy the next chunk.
- let next_opt = mem::replace(&mut self.next, None);
- match next_opt {
- None => {}
- Some(mut next) => {
- // We assume that the next chunk is completely filled.
- let capacity = next.capacity;
- next.destroy(capacity)
- }
+ let next = self.next;
+ let size = calculate_size::<T>(self.capacity);
+ deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
+ mem::min_align_of::<TypedArenaChunk<T>>());
+ if next.is_not_null() {
+ let capacity = (*next).capacity;
+ (*next).destroy(capacity);
}
}
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
- let chunk = TypedArenaChunk::<T>::new(None, capacity);
- TypedArena {
- ptr: Cell::new(chunk.start() as *const T),
- end: Cell::new(chunk.end() as *const T),
- first: RefCell::new(Some(chunk)),
+ unsafe {
+ let chunk = TypedArenaChunk::<T>::new(ptr::mut_null(), capacity);
+ TypedArena {
+ ptr: Cell::new((*chunk).start() as *const T),
+ end: Cell::new((*chunk).end() as *const T),
+ first: RefCell::new(chunk),
+ }
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
- let chunk = self.first.borrow_mut().take().unwrap();
- let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
- let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
- self.ptr.set(chunk.start() as *const T);
- self.end.set(chunk.end() as *const T);
- *self.first.borrow_mut() = Some(chunk)
+ unsafe {
+ let chunk = *self.first.borrow_mut();
+ let new_capacity = (*chunk).capacity.checked_mul(&2).unwrap();
+ let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
+ self.ptr.set((*chunk).start() as *const T);
+ self.end.set((*chunk).end() as *const T);
+ *self.first.borrow_mut() = chunk
+ }
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
- // Determine how much was filled.
- let start = self.first.borrow().as_ref().unwrap().start() as uint;
- let end = self.ptr.get() as uint;
- let diff = (end - start) / mem::size_of::<T>();
-
- // Pass that to the `destroy` method.
unsafe {
- self.first.borrow_mut().as_mut().unwrap().destroy(diff)
+ // Determine how much was filled.
+ let start = self.first.borrow().as_ref().unwrap().start() as uint;
+ let end = self.ptr.get() as uint;
+ let diff = (end - start) / mem::size_of::<T>();
+
+ // Pass that to the `destroy` method.
+ (**self.first.borrow_mut()).destroy(diff)
}
}
}
pub use core::slice::{Chunks, Slice, ImmutableSlice, ImmutablePartialEqSlice};
pub use core::slice::{ImmutableOrdSlice, MutableSlice, Items, MutItems};
pub use core::slice::{MutSplits, MutChunks, Splits};
-pub use core::slice::{bytes, ref_slice, MutableCloneableSlice};
+pub use core::slice::{bytes, mut_ref_slice, ref_slice, MutableCloneableSlice};
pub use core::slice::{Found, NotFound};
// Functional utilities
iter: Items<'static, T>
}
+impl<T> MoveItems<T> {
+ #[inline]
+ /// Drops all items that have not yet been moved and returns the empty vector.
+ pub fn unwrap(mut self) -> Vec<T> {
+ unsafe {
+ for _x in self { }
+ let MoveItems { allocation, cap, iter: _iter } = self;
+ mem::forget(self);
+ Vec { ptr: allocation, cap: cap, len: 0 }
+ }
+ }
+}
+
impl<T> Iterator<T> for MoveItems<T> {
#[inline]
fn next<'a>(&'a mut self) -> Option<T> {
assert_eq!(vec.swap_remove(0), None);
}
+ #[test]
+ fn test_move_iter_unwrap() {
+ let mut vec: Vec<uint> = Vec::with_capacity(7);
+ vec.push(1);
+ vec.push(2);
+ let ptr = vec.as_ptr();
+ vec = vec.move_iter().unwrap();
+ assert_eq!(vec.as_ptr(), ptr);
+ assert_eq!(vec.capacity(), 7);
+ assert_eq!(vec.len(), 0);
+ }
+
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| {
/// A type which is considered "not sync", meaning that
/// its contents are not threadsafe, hence they cannot be
/// shared between tasks.
- #[lang="no_share_bound"]
+ #[lang="no_sync_bound"]
#[deriving(PartialEq,Clone)]
pub struct NoSync;
///
/// assert_eq!(n.count_ones(), 3);
/// ```
- fn count_ones(self) -> Self;
+ fn count_ones(self) -> uint;
/// Returns the number of zeros in the binary representation of the integer.
///
/// assert_eq!(n.count_zeros(), 5);
/// ```
#[inline]
- fn count_zeros(self) -> Self {
+ fn count_zeros(self) -> uint {
(!self).count_ones()
}
///
/// assert_eq!(n.leading_zeros(), 10);
/// ```
- fn leading_zeros(self) -> Self;
+ fn leading_zeros(self) -> uint;
/// Returns the number of trailing zeros in the binary representation
/// of the integer.
///
/// assert_eq!(n.trailing_zeros(), 3);
/// ```
- fn trailing_zeros(self) -> Self;
+ fn trailing_zeros(self) -> uint;
/// Shifts the bits to the left by a specified amount amount, `n`, wrapping
/// the truncated bits to the end of the resulting integer.
($T:ty, $BITS:expr, $ctpop:path, $ctlz:path, $cttz:path, $bswap:path) => {
impl Int for $T {
#[inline]
- fn count_ones(self) -> $T { unsafe { $ctpop(self) } }
+ fn count_ones(self) -> uint { unsafe { $ctpop(self) as uint } }
#[inline]
- fn leading_zeros(self) -> $T { unsafe { $ctlz(self) } }
+ fn leading_zeros(self) -> uint { unsafe { $ctlz(self) as uint } }
#[inline]
- fn trailing_zeros(self) -> $T { unsafe { $cttz(self) } }
+ fn trailing_zeros(self) -> uint { unsafe { $cttz(self) as uint } }
#[inline]
fn rotate_left(self, n: uint) -> $T {
($T:ty, $U:ty) => {
impl Int for $T {
#[inline]
- fn count_ones(self) -> $T { (self as $U).count_ones() as $T }
+ fn count_ones(self) -> uint { (self as $U).count_ones() }
#[inline]
- fn leading_zeros(self) -> $T { (self as $U).leading_zeros() as $T }
+ fn leading_zeros(self) -> uint { (self as $U).leading_zeros() }
#[inline]
- fn trailing_zeros(self) -> $T { (self as $U).trailing_zeros() as $T }
+ fn trailing_zeros(self) -> uint { (self as $U).trailing_zeros() }
#[inline]
fn rotate_left(self, n: uint) -> $T { (self as $U).rotate_left(n) as $T }
*
*/
+use kinds::Sized;
+
/**
*
* The `Drop` trait is used to run some code when a value goes out of scope. This
* ```
*/
#[lang="deref"]
-pub trait Deref<Result> {
+pub trait Deref<Sized? Result> {
/// The method called to dereference a value
fn deref<'a>(&'a self) -> &'a Result;
}
* ```
*/
#[lang="deref_mut"]
-pub trait DerefMut<Result>: Deref<Result> {
+pub trait DerefMut<Sized? Result>: Deref<Result> {
/// The method called to mutably dereference a value
fn deref_mut<'a>(&'a mut self) -> &'a mut Result;
}
let mut i: uint = 0;
let ln = self.len();
while i < ln / 2 {
- self.swap(i, ln - i - 1);
+ // Unsafe swap to avoid the bounds check in safe swap.
+ unsafe {
+ let pa: *mut T = self.unsafe_mut_ref(i);
+ let pb: *mut T = self.unsafe_mut_ref(ln - i - 1);
+ ptr::swap(pa, pb);
+ }
i += 1;
}
}
#[test]
fn test_count_zeros() {
- assert!(A.count_zeros() == BITS as $T - 3);
- assert!(B.count_zeros() == BITS as $T - 2);
- assert!(C.count_zeros() == BITS as $T - 5);
+ assert!(A.count_zeros() == BITS - 3);
+ assert!(B.count_zeros() == BITS - 2);
+ assert!(C.count_zeros() == BITS - 5);
}
#[test]
#[test]
fn test_count_zeros() {
- assert!(A.count_zeros() == BITS as $T - 3);
- assert!(B.count_zeros() == BITS as $T - 2);
- assert!(C.count_zeros() == BITS as $T - 5);
+ assert!(A.count_zeros() == BITS - 3);
+ assert!(B.count_zeros() == BITS - 2);
+ assert!(C.count_zeros() == BITS - 5);
}
#[test]
}
#[test]
+ #[ignore(cfg(windows))] // FIXME (#9406)
fn test_lots_of_files() {
// this is a good test because it touches lots of differently named files
glob("/*/*/*/*").skip(10000).next();
#[cfg(windows)] pub use types::os::arch::extra::{LARGE_INTEGER, LPVOID, LONG};
#[cfg(windows)] pub use types::os::arch::extra::{time64_t, OVERLAPPED, LPCWSTR};
#[cfg(windows)] pub use types::os::arch::extra::{LPOVERLAPPED, SIZE_T, LPDWORD};
-#[cfg(windows)] pub use types::os::arch::extra::{SECURITY_ATTRIBUTES};
+#[cfg(windows)] pub use types::os::arch::extra::{SECURITY_ATTRIBUTES, WIN32_FIND_DATAW};
#[cfg(windows)] pub use funcs::c95::string::{wcslen};
#[cfg(windows)] pub use funcs::posix88::stat_::{wstat, wutime, wchmod, wrmdir};
#[cfg(windows)] pub use funcs::bsd43::{closesocket};
pub type LPWSAPROTOCOL_INFO = *mut WSAPROTOCOL_INFO;
pub type GROUP = c_uint;
+
+ #[repr(C)]
+ pub struct WIN32_FIND_DATAW {
+ pub dwFileAttributes: DWORD,
+ pub ftCreationTime: FILETIME,
+ pub ftLastAccessTime: FILETIME,
+ pub ftLastWriteTime: FILETIME,
+ pub nFileSizeHigh: DWORD,
+ pub nFileSizeLow: DWORD,
+ pub dwReserved0: DWORD,
+ pub dwReserved1: DWORD,
+ pub cFileName: [wchar_t, ..260], // #define MAX_PATH 260
+ pub cAlternateFileName: [wchar_t, ..14],
+ }
+
+ pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW;
}
}
}
LPMEMORY_BASIC_INFORMATION,
LPSYSTEM_INFO, HANDLE, LPHANDLE,
LARGE_INTEGER, PLARGE_INTEGER,
- LPFILETIME};
+ LPFILETIME, LPWIN32_FIND_DATAW};
extern "system" {
pub fn GetEnvironmentVariableW(n: LPCWSTR,
-> DWORD;
pub fn SetCurrentDirectoryW(lpPathName: LPCWSTR) -> BOOL;
pub fn GetLastError() -> DWORD;
- pub fn FindFirstFileW(fileName: LPCWSTR, findFileData: HANDLE)
+ pub fn FindFirstFileW(fileName: LPCWSTR, findFileData: LPWIN32_FIND_DATAW)
-> HANDLE;
- pub fn FindNextFileW(findFile: HANDLE, findFileData: HANDLE)
+ pub fn FindNextFileW(findFile: HANDLE, findFileData: LPWIN32_FIND_DATAW)
-> BOOL;
pub fn FindClose(findFile: HANDLE) -> BOOL;
pub fn DuplicateHandle(hSourceProcessHandle: HANDLE,
//! Blocking Windows-based file I/O
use alloc::arc::Arc;
-use libc::{c_int, c_void};
-use libc;
+use libc::{mod, c_int};
use std::c_str::CString;
use std::mem;
use std::os::windows::fill_utf16_buf_and_decode;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::str;
-use std::vec;
pub type fd_t = libc::c_int;
}
pub fn readdir(p: &CString) -> IoResult<Vec<CString>> {
- use std::rt::libc_heap::malloc_raw;
-
fn prune(root: &CString, dirs: Vec<Path>) -> Vec<CString> {
let root = unsafe { CString::new(root.as_ptr(), false) };
let root = Path::new(root);
}).map(|path| root.join(path).to_c_str()).collect()
}
- extern {
- fn rust_list_dir_wfd_size() -> libc::size_t;
- fn rust_list_dir_wfd_fp_buf(wfd: *mut libc::c_void) -> *const u16;
- }
let star = Path::new(unsafe {
CString::new(p.as_ptr(), false)
}).join("*");
let path = try!(to_utf16(&star.to_c_str()));
unsafe {
- let wfd_ptr = malloc_raw(rust_list_dir_wfd_size() as uint);
- let find_handle = libc::FindFirstFileW(path.as_ptr(),
- wfd_ptr as libc::HANDLE);
+ let mut wfd = mem::zeroed();
+ let find_handle = libc::FindFirstFileW(path.as_ptr(), &mut wfd);
if find_handle != libc::INVALID_HANDLE_VALUE {
- let mut paths = vec!();
- let mut more_files = 1 as libc::c_int;
+ let mut paths = vec![];
+ let mut more_files = 1 as libc::BOOL;
while more_files != 0 {
- let fp_buf = rust_list_dir_wfd_fp_buf(wfd_ptr as *mut c_void);
- if fp_buf as uint == 0 {
- fail!("os::list_dir() failure: got null ptr from wfd");
- } else {
- let fp_vec = vec::raw::from_buf(fp_buf, libc::wcslen(fp_buf) as uint);
- let fp_trimmed = str::truncate_utf16_at_nul(fp_vec.as_slice());
- let fp_str = String::from_utf16(fp_trimmed)
- .expect("rust_list_dir_wfd_fp_buf returned invalid UTF-16");
- paths.push(Path::new(fp_str));
+ {
+ let filename = str::truncate_utf16_at_nul(wfd.cFileName);
+ match String::from_utf16(filename) {
+ Some(filename) => paths.push(Path::new(filename)),
+ None => {
+ assert!(libc::FindClose(find_handle) != 0);
+ return Err(IoError {
+ code: super::c::ERROR_ILLEGAL_CHARACTER as uint,
+ extra: 0,
+ detail: Some(format!("path was not valid UTF-16: {}", filename)),
+ })
+ }, // FIXME #12056: Convert the UCS-2 to invalid utf-8 instead of erroring
+ }
}
- more_files = libc::FindNextFileW(find_handle,
- wfd_ptr as libc::HANDLE);
+ more_files = libc::FindNextFileW(find_handle, &mut wfd);
}
assert!(libc::FindClose(find_handle) != 0);
- libc::free(wfd_ptr as *mut c_void);
Ok(prune(p, paths))
} else {
Err(super::last_error())
// wait for the socket to become readable again.
let _guard = lock();
match retry(|| read(deadline.is_some())) {
- -1 if util::wouldblock() => { assert!(deadline.is_some()); }
+ -1 if util::wouldblock() => {}
-1 => return Err(os::last_error()),
n => { ret = n; break }
}
pub fn bits(&self) -> uint {
if self.is_zero() { return 0; }
let zeros = self.data.last().unwrap().leading_zeros();
- return self.data.len()*BigDigit::bits - (zeros as uint);
+ return self.data.len()*BigDigit::bits - zeros;
}
}
Ratio::from_integer(self.numer / self.denom)
}
- ///Returns the fractional part of a number.
+ /// Returns the fractional part of a number.
#[inline]
pub fn fract(&self) -> Ratio<T> {
Ratio::new_raw(self.numer % self.denom, self.denom.clone())
}
}
-// a/b + c/d = (a*d + b*c)/(b*d
+// a/b + c/d = (a*d + b*c)/(b*d)
arith_impl!(impl Add, add)
// a/b - c/d = (a*d - b*c)/(b*d)
self.isaac();
}
self.cnt -= 1;
- self.rsl[self.cnt as uint]
+
+ // self.cnt is at most RAND_SIZE, but that is before the
+ // subtraction above. We want to index without bounds
+ // checking, but this could lead to incorrect code if someone
+ // misrefactors, so we check, sometimes.
+ //
+ // (Changes here should be reflected in Isaac64Rng.next_u64.)
+ debug_assert!(self.cnt < RAND_SIZE);
+
+ // (the % is cheaply telling the optimiser that we're always
+ // in bounds, without unsafe. NB. this is a power of two, so
+ // it optimises to a bitwise mask).
+ self.rsl[(self.cnt % RAND_SIZE) as uint]
}
}
self.isaac64();
}
self.cnt -= 1;
- unsafe { *self.rsl.unsafe_get(self.cnt) }
+
+ // See corresponding location in IsaacRng.next_u32 for
+ // explanation.
+ debug_assert!(self.cnt < RAND_SIZE_64)
+ self.rsl[(self.cnt % RAND_SIZE_64) as uint]
}
}
use super::rpath;
use super::rpath::RPathConfig;
use super::svh::Svh;
+use super::write::{OutputTypeBitcode, OutputTypeExe, OutputTypeObject};
use driver::driver::{CrateTranslation, OutputFilenames, Input, FileInput};
use driver::config::NoDebugInfo;
use driver::session::Session;
use driver::config;
-use llvm;
-use llvm::ModuleRef;
use metadata::common::LinkMeta;
use metadata::{encoder, cstore, filesearch, csearch, loader, creader};
use middle::trans::context::CrateContext;
use util::ppaux;
use util::sha2::{Digest, Sha256};
-use std::c_str::{ToCStr, CString};
use std::char;
-use std::collections::HashSet;
use std::io::{fs, TempDir, Command};
use std::io;
use std::mem;
-use std::ptr;
use std::str;
use std::string::String;
use flate;
RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET + 8;
-#[deriving(Clone, PartialEq, PartialOrd, Ord, Eq)]
-pub enum OutputType {
- OutputTypeBitcode,
- OutputTypeAssembly,
- OutputTypeLlvmAssembly,
- OutputTypeObject,
- OutputTypeExe,
-}
-
-pub fn llvm_err(sess: &Session, msg: String) -> ! {
- unsafe {
- let cstr = llvm::LLVMRustGetLastError();
- if cstr == ptr::null() {
- sess.fatal(msg.as_slice());
- } else {
- let err = CString::new(cstr, true);
- let err = String::from_utf8_lossy(err.as_bytes());
- sess.fatal(format!("{}: {}",
- msg.as_slice(),
- err.as_slice()).as_slice());
- }
- }
-}
-
-pub fn write_output_file(
- sess: &Session,
- target: llvm::TargetMachineRef,
- pm: llvm::PassManagerRef,
- m: ModuleRef,
- output: &Path,
- file_type: llvm::FileType) {
- unsafe {
- output.with_c_str(|output| {
- let result = llvm::LLVMRustWriteOutputFile(
- target, pm, m, output, file_type);
- if !result {
- llvm_err(sess, "could not write output".to_string());
- }
- })
- }
-}
-
-pub mod write {
-
- use super::super::lto;
- use super::{write_output_file, OutputType};
- use super::{OutputTypeAssembly, OutputTypeBitcode};
- use super::{OutputTypeExe, OutputTypeLlvmAssembly};
- use super::{OutputTypeObject};
- use driver::driver::{CrateTranslation, OutputFilenames};
- use driver::config::NoDebugInfo;
- use driver::session::Session;
- use driver::config;
- use llvm;
- use llvm::{ModuleRef, TargetMachineRef, PassManagerRef};
- use util::common::time;
- use syntax::abi;
-
- use std::c_str::ToCStr;
- use std::io::{Command};
- use libc::{c_uint, c_int};
- use std::str;
-
- // On android, we by default compile for armv7 processors. This enables
- // things like double word CAS instructions (rather than emulating them)
- // which are *far* more efficient. This is obviously undesirable in some
- // cases, so if any sort of target feature is specified we don't append v7
- // to the feature list.
- //
- // On iOS only armv7 and newer are supported. So it is useful to
- // get all hardware potential via VFP3 (hardware floating point)
- // and NEON (SIMD) instructions supported by LLVM.
- // Note that without those flags various linking errors might
- // arise as some of intrinsics are converted into function calls
- // and nobody provides implementations those functions
- fn target_feature<'a>(sess: &'a Session) -> &'a str {
- match sess.targ_cfg.os {
- abi::OsAndroid => {
- if "" == sess.opts.cg.target_feature.as_slice() {
- "+v7"
- } else {
- sess.opts.cg.target_feature.as_slice()
- }
- },
- abi::OsiOS if sess.targ_cfg.arch == abi::Arm => {
- "+v7,+thumb2,+vfp3,+neon"
- },
- _ => sess.opts.cg.target_feature.as_slice()
- }
- }
-
- pub fn run_passes(sess: &Session,
- trans: &CrateTranslation,
- output_types: &[OutputType],
- output: &OutputFilenames) {
- let llmod = trans.module;
- let llcx = trans.context;
- unsafe {
- configure_llvm(sess);
-
- if sess.opts.cg.save_temps {
- output.with_extension("no-opt.bc").with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
-
- let opt_level = match sess.opts.optimize {
- config::No => llvm::CodeGenLevelNone,
- config::Less => llvm::CodeGenLevelLess,
- config::Default => llvm::CodeGenLevelDefault,
- config::Aggressive => llvm::CodeGenLevelAggressive,
- };
- let use_softfp = sess.opts.cg.soft_float;
-
- // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a parameter.
- // FIXME: #11954: mac64 unwinding may not work with fp elim
- let no_fp_elim = (sess.opts.debuginfo != NoDebugInfo) ||
- (sess.targ_cfg.os == abi::OsMacos &&
- sess.targ_cfg.arch == abi::X86_64);
-
- // OSX has -dead_strip, which doesn't rely on ffunction_sections
- // FIXME(#13846) this should be enabled for windows
- let ffunction_sections = sess.targ_cfg.os != abi::OsMacos &&
- sess.targ_cfg.os != abi::OsWindows;
- let fdata_sections = ffunction_sections;
-
- let reloc_model = match sess.opts.cg.relocation_model.as_slice() {
- "pic" => llvm::RelocPIC,
- "static" => llvm::RelocStatic,
- "default" => llvm::RelocDefault,
- "dynamic-no-pic" => llvm::RelocDynamicNoPic,
- _ => {
- sess.err(format!("{} is not a valid relocation mode",
- sess.opts
- .cg
- .relocation_model).as_slice());
- sess.abort_if_errors();
- return;
- }
- };
-
- let code_model = match sess.opts.cg.code_model.as_slice() {
- "default" => llvm::CodeModelDefault,
- "small" => llvm::CodeModelSmall,
- "kernel" => llvm::CodeModelKernel,
- "medium" => llvm::CodeModelMedium,
- "large" => llvm::CodeModelLarge,
- _ => {
- sess.err(format!("{} is not a valid code model",
- sess.opts
- .cg
- .code_model).as_slice());
- sess.abort_if_errors();
- return;
- }
- };
-
- let tm = sess.targ_cfg
- .target_strs
- .target_triple
- .as_slice()
- .with_c_str(|t| {
- sess.opts.cg.target_cpu.as_slice().with_c_str(|cpu| {
- target_feature(sess).with_c_str(|features| {
- llvm::LLVMRustCreateTargetMachine(
- t, cpu, features,
- code_model,
- reloc_model,
- opt_level,
- true /* EnableSegstk */,
- use_softfp,
- no_fp_elim,
- ffunction_sections,
- fdata_sections,
- )
- })
- })
- });
-
- // Create the two optimizing pass managers. These mirror what clang
- // does, and are by populated by LLVM's default PassManagerBuilder.
- // Each manager has a different set of passes, but they also share
- // some common passes.
- let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
- let mpm = llvm::LLVMCreatePassManager();
-
- // If we're verifying or linting, add them to the function pass
- // manager.
- let addpass = |pass: &str| {
- pass.as_slice().with_c_str(|s| llvm::LLVMRustAddPass(fpm, s))
- };
- if !sess.no_verify() { assert!(addpass("verify")); }
-
- if !sess.opts.cg.no_prepopulate_passes {
- llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
- llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
- populate_llvm_passes(fpm, mpm, llmod, opt_level,
- trans.no_builtins);
- }
-
- for pass in sess.opts.cg.passes.iter() {
- pass.as_slice().with_c_str(|s| {
- if !llvm::LLVMRustAddPass(mpm, s) {
- sess.warn(format!("unknown pass {}, ignoring",
- *pass).as_slice());
- }
- })
- }
-
- // Finally, run the actual optimization passes
- time(sess.time_passes(), "llvm function passes", (), |()|
- llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
- time(sess.time_passes(), "llvm module passes", (), |()|
- llvm::LLVMRunPassManager(mpm, llmod));
-
- // Deallocate managers that we're now done with
- llvm::LLVMDisposePassManager(fpm);
- llvm::LLVMDisposePassManager(mpm);
-
- // Emit the bytecode if we're either saving our temporaries or
- // emitting an rlib. Whenever an rlib is created, the bytecode is
- // inserted into the archive in order to allow LTO against it.
- if sess.opts.cg.save_temps ||
- (sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
- sess.opts.output_types.contains(&OutputTypeExe)) {
- output.temp_path(OutputTypeBitcode).with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
-
- if sess.lto() {
- time(sess.time_passes(), "all lto passes", (), |()|
- lto::run(sess, llmod, tm, trans.reachable.as_slice()));
-
- if sess.opts.cg.save_temps {
- output.with_extension("lto.bc").with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
- }
-
- // A codegen-specific pass manager is used to generate object
- // files for an LLVM module.
- //
- // Apparently each of these pass managers is a one-shot kind of
- // thing, so we create a new one for each type of output. The
- // pass manager passed to the closure should be ensured to not
- // escape the closure itself, and the manager should only be
- // used once.
- fn with_codegen(tm: TargetMachineRef, llmod: ModuleRef,
- no_builtins: bool, f: |PassManagerRef|) {
- unsafe {
- let cpm = llvm::LLVMCreatePassManager();
- llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
- llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
- f(cpm);
- llvm::LLVMDisposePassManager(cpm);
- }
- }
-
- let mut object_file = None;
- let mut needs_metadata = false;
- for output_type in output_types.iter() {
- let path = output.path(*output_type);
- match *output_type {
- OutputTypeBitcode => {
- path.with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
- OutputTypeLlvmAssembly => {
- path.with_c_str(|output| {
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- llvm::LLVMRustPrintModule(cpm, llmod, output);
- })
- })
- }
- OutputTypeAssembly => {
- // If we're not using the LLVM assembler, this function
- // could be invoked specially with output_type_assembly,
- // so in this case we still want the metadata object
- // file.
- let ty = OutputTypeAssembly;
- let path = if sess.opts.output_types.contains(&ty) {
- path
- } else {
- needs_metadata = true;
- output.temp_path(OutputTypeAssembly)
- };
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- write_output_file(sess, tm, cpm, llmod, &path,
- llvm::AssemblyFile);
- });
- }
- OutputTypeObject => {
- object_file = Some(path);
- }
- OutputTypeExe => {
- object_file = Some(output.temp_path(OutputTypeObject));
- needs_metadata = true;
- }
- }
- }
-
- time(sess.time_passes(), "codegen passes", (), |()| {
- match object_file {
- Some(ref path) => {
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- write_output_file(sess, tm, cpm, llmod, path,
- llvm::ObjectFile);
- });
- }
- None => {}
- }
- if needs_metadata {
- with_codegen(tm, trans.metadata_module,
- trans.no_builtins, |cpm| {
- let out = output.temp_path(OutputTypeObject)
- .with_extension("metadata.o");
- write_output_file(sess, tm, cpm,
- trans.metadata_module, &out,
- llvm::ObjectFile);
- })
- }
- });
-
- llvm::LLVMRustDisposeTargetMachine(tm);
- llvm::LLVMDisposeModule(trans.metadata_module);
- llvm::LLVMDisposeModule(llmod);
- llvm::LLVMContextDispose(llcx);
- if sess.time_llvm_passes() { llvm::LLVMRustPrintPassTimings(); }
- }
- }
-
- pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
- let pname = super::get_cc_prog(sess);
- let mut cmd = Command::new(pname.as_slice());
-
- cmd.arg("-c").arg("-o").arg(outputs.path(OutputTypeObject))
- .arg(outputs.temp_path(OutputTypeAssembly));
- debug!("{}", &cmd);
-
- match cmd.output() {
- Ok(prog) => {
- if !prog.status.success() {
- sess.err(format!("linking with `{}` failed: {}",
- pname,
- prog.status).as_slice());
- sess.note(format!("{}", &cmd).as_slice());
- let mut note = prog.error.clone();
- note.push_all(prog.output.as_slice());
- sess.note(str::from_utf8(note.as_slice()).unwrap());
- sess.abort_if_errors();
- }
- },
- Err(e) => {
- sess.err(format!("could not exec the linker `{}`: {}",
- pname,
- e).as_slice());
- sess.abort_if_errors();
- }
- }
- }
-
- unsafe fn configure_llvm(sess: &Session) {
- use std::sync::{Once, ONCE_INIT};
- static mut INIT: Once = ONCE_INIT;
-
- // Copy what clang does by turning on loop vectorization at O2 and
- // slp vectorization at O3
- let vectorize_loop = !sess.opts.cg.no_vectorize_loops &&
- (sess.opts.optimize == config::Default ||
- sess.opts.optimize == config::Aggressive);
- let vectorize_slp = !sess.opts.cg.no_vectorize_slp &&
- sess.opts.optimize == config::Aggressive;
-
- let mut llvm_c_strs = Vec::new();
- let mut llvm_args = Vec::new();
- {
- let add = |arg: &str| {
- let s = arg.to_c_str();
- llvm_args.push(s.as_ptr());
- llvm_c_strs.push(s);
- };
- add("rustc"); // fake program name
- if vectorize_loop { add("-vectorize-loops"); }
- if vectorize_slp { add("-vectorize-slp"); }
- if sess.time_llvm_passes() { add("-time-passes"); }
- if sess.print_llvm_passes() { add("-debug-pass=Structure"); }
-
- for arg in sess.opts.cg.llvm_args.iter() {
- add((*arg).as_slice());
- }
- }
-
- INIT.doit(|| {
- llvm::LLVMInitializePasses();
-
- // Only initialize the platforms supported by Rust here, because
- // using --llvm-root will have multiple platforms that rustllvm
- // doesn't actually link to and it's pointless to put target info
- // into the registry that Rust cannot generate machine code for.
- llvm::LLVMInitializeX86TargetInfo();
- llvm::LLVMInitializeX86Target();
- llvm::LLVMInitializeX86TargetMC();
- llvm::LLVMInitializeX86AsmPrinter();
- llvm::LLVMInitializeX86AsmParser();
-
- llvm::LLVMInitializeARMTargetInfo();
- llvm::LLVMInitializeARMTarget();
- llvm::LLVMInitializeARMTargetMC();
- llvm::LLVMInitializeARMAsmPrinter();
- llvm::LLVMInitializeARMAsmParser();
-
- llvm::LLVMInitializeMipsTargetInfo();
- llvm::LLVMInitializeMipsTarget();
- llvm::LLVMInitializeMipsTargetMC();
- llvm::LLVMInitializeMipsAsmPrinter();
- llvm::LLVMInitializeMipsAsmParser();
-
- llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
- llvm_args.as_ptr());
- });
- }
-
- unsafe fn populate_llvm_passes(fpm: llvm::PassManagerRef,
- mpm: llvm::PassManagerRef,
- llmod: ModuleRef,
- opt: llvm::CodeGenOptLevel,
- no_builtins: bool) {
- // Create the PassManagerBuilder for LLVM. We configure it with
- // reasonable defaults and prepare it to actually populate the pass
- // manager.
- let builder = llvm::LLVMPassManagerBuilderCreate();
- match opt {
- llvm::CodeGenLevelNone => {
- // Don't add lifetime intrinsics at O0
- llvm::LLVMRustAddAlwaysInlinePass(builder, false);
- }
- llvm::CodeGenLevelLess => {
- llvm::LLVMRustAddAlwaysInlinePass(builder, true);
- }
- // numeric values copied from clang
- llvm::CodeGenLevelDefault => {
- llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
- 225);
- }
- llvm::CodeGenLevelAggressive => {
- llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
- 275);
- }
- }
- llvm::LLVMPassManagerBuilderSetOptLevel(builder, opt as c_uint);
- llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, no_builtins);
-
- // Use the builder to populate the function/module pass managers.
- llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
- llvm::LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
- llvm::LLVMPassManagerBuilderDispose(builder);
-
- match opt {
- llvm::CodeGenLevelDefault | llvm::CodeGenLevelAggressive => {
- "mergefunc".with_c_str(|s| llvm::LLVMRustAddPass(mpm, s));
- }
- _ => {}
- };
- }
-}
-
-
/*
* Name mangling and its relationship to metadata. This is complex. Read
* carefully.
}
fn get_symbol_hash(ccx: &CrateContext, t: ty::t) -> String {
- match ccx.type_hashcodes.borrow().find(&t) {
+ match ccx.type_hashcodes().borrow().find(&t) {
Some(h) => return h.to_string(),
None => {}
}
- let mut symbol_hasher = ccx.symbol_hasher.borrow_mut();
- let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, &ccx.link_meta);
- ccx.type_hashcodes.borrow_mut().insert(t, hash.clone());
+ let mut symbol_hasher = ccx.symbol_hasher().borrow_mut();
+ let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, ccx.link_meta());
+ ccx.type_hashcodes().borrow_mut().insert(t, hash.clone());
hash
}
}
}
-fn remove(sess: &Session, path: &Path) {
+pub fn remove(sess: &Session, path: &Path) {
match fs::unlink(path) {
Ok(..) => {}
Err(e) => {
fn archive_search_paths(sess: &Session) -> Vec<Path> {
let mut rustpath = filesearch::rust_path();
rustpath.push(sess.target_filesearch().get_lib_path());
- // FIXME: Addl lib search paths are an unordered HashSet?
- // Shouldn't this search be done in some order?
- let addl_lib_paths: HashSet<Path> = sess.opts.addl_lib_search_paths.borrow().clone();
- let mut search: Vec<Path> = addl_lib_paths.move_iter().collect();
+ let mut search: Vec<Path> = sess.opts.addl_lib_search_paths.borrow().clone();
search.push_all(rustpath.as_slice());
return search;
}
// contain the metadata in a separate file. We use a temp directory
// here so concurrent builds in the same directory don't try to use
// the same filename for metadata (stomping over one another)
- let tmpdir = TempDir::new("rustc").expect("needs a temp dir");
+ let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
let metadata = tmpdir.path().join(METADATA_FILENAME);
match fs::File::create(&metadata).write(trans.metadata
.as_slice()) {
ab.add_file(&metadata).unwrap();
remove(sess, &metadata);
- // For LTO purposes, the bytecode of this library is also inserted
- // into the archive.
- //
- // Note that we make sure that the bytecode filename in the archive
- // is never exactly 16 bytes long by adding a 16 byte extension to
- // it. This is to work around a bug in LLDB that would cause it to
- // crash if the name of a file in an archive was exactly 16 bytes.
- let bc_filename = obj_filename.with_extension("bc");
- let bc_deflated_filename = obj_filename.with_extension("bytecode.deflate");
-
- let bc_data = match fs::File::open(&bc_filename).read_to_end() {
- Ok(buffer) => buffer,
- Err(e) => sess.fatal(format!("failed to read bytecode: {}",
- e).as_slice())
- };
+ if sess.opts.cg.codegen_units == 1 {
+ // For LTO purposes, the bytecode of this library is also
+ // inserted into the archive. We currently do this only when
+ // codegen_units == 1, so we don't have to deal with multiple
+ // bitcode files per crate.
+ //
+ // Note that we make sure that the bytecode filename in the
+ // archive is never exactly 16 bytes long by adding a 16 byte
+ // extension to it. This is to work around a bug in LLDB that
+ // would cause it to crash if the name of a file in an archive
+ // was exactly 16 bytes.
+ let bc_filename = obj_filename.with_extension("bc");
+ let bc_deflated_filename = obj_filename.with_extension("bytecode.deflate");
+
+ let bc_data = match fs::File::open(&bc_filename).read_to_end() {
+ Ok(buffer) => buffer,
+ Err(e) => sess.fatal(format!("failed to read bytecode: {}",
+ e).as_slice())
+ };
- let bc_data_deflated = match flate::deflate_bytes(bc_data.as_slice()) {
- Some(compressed) => compressed,
- None => sess.fatal(format!("failed to compress bytecode from {}",
- bc_filename.display()).as_slice())
- };
+ let bc_data_deflated = match flate::deflate_bytes(bc_data.as_slice()) {
+ Some(compressed) => compressed,
+ None => sess.fatal(format!("failed to compress bytecode from {}",
+ bc_filename.display()).as_slice())
+ };
- let mut bc_file_deflated = match fs::File::create(&bc_deflated_filename) {
- Ok(file) => file,
- Err(e) => {
- sess.fatal(format!("failed to create compressed bytecode \
- file: {}", e).as_slice())
- }
- };
+ let mut bc_file_deflated = match fs::File::create(&bc_deflated_filename) {
+ Ok(file) => file,
+ Err(e) => {
+ sess.fatal(format!("failed to create compressed bytecode \
+ file: {}", e).as_slice())
+ }
+ };
- match write_rlib_bytecode_object_v1(&mut bc_file_deflated,
- bc_data_deflated.as_slice()) {
- Ok(()) => {}
- Err(e) => {
- sess.err(format!("failed to write compressed bytecode: \
- {}", e).as_slice());
- sess.abort_if_errors()
- }
- };
+ match write_rlib_bytecode_object_v1(&mut bc_file_deflated,
+ bc_data_deflated.as_slice()) {
+ Ok(()) => {}
+ Err(e) => {
+ sess.err(format!("failed to write compressed bytecode: \
+ {}", e).as_slice());
+ sess.abort_if_errors()
+ }
+ };
- ab.add_file(&bc_deflated_filename).unwrap();
- remove(sess, &bc_deflated_filename);
- if !sess.opts.cg.save_temps &&
- !sess.opts.output_types.contains(&OutputTypeBitcode) {
- remove(sess, &bc_filename);
+ ab.add_file(&bc_deflated_filename).unwrap();
+ remove(sess, &bc_deflated_filename);
+ if !sess.opts.cg.save_temps &&
+ !sess.opts.output_types.contains(&OutputTypeBitcode) {
+ remove(sess, &bc_filename);
+ }
}
}
// links to all upstream files as well.
fn link_natively(sess: &Session, trans: &CrateTranslation, dylib: bool,
obj_filename: &Path, out_filename: &Path) {
- let tmpdir = TempDir::new("rustc").expect("needs a temp dir");
+ let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
// The invocations of cc share some flags across platforms
let pname = get_cc_prog(sess);
// Mark all dynamic libraries and executables as compatible with ASLR
cmd.arg("-Wl,--dynamicbase");
+
+ // Mark all dynamic libraries and executables as compatible with the larger 4GiB address
+ // space available to x86 Windows binaries on x86_64.
+ if sess.targ_cfg.arch == abi::X86 {
+ cmd.arg("-Wl,--large-address-aware");
+ }
}
if sess.targ_cfg.os == abi::OsAndroid {
// except according to those terms.
use super::link;
+use super::write;
use driver::session;
use driver::config;
use llvm;
archive.read(format!("{}.bytecode.deflate",
file).as_slice())
});
- let bc_encoded = bc_encoded.expect("missing compressed bytecode in archive!");
+ let bc_encoded = match bc_encoded {
+ Some(data) => data,
+ None => {
+ sess.fatal(format!("missing compressed bytecode in {} \
+ (perhaps it was compiled with -C codegen-units > 1)",
+ path.display()).as_slice());
+ },
+ };
let bc_extractor = if is_versioned_bytecode_format(bc_encoded) {
|_| {
// Read the version
if !llvm::LLVMRustLinkInExternalBitcode(llmod,
ptr as *const libc::c_char,
bc_decoded.len() as libc::size_t) {
- link::llvm_err(sess,
- format!("failed to load bc of `{}`",
- name.as_slice()));
+ write::llvm_err(sess.diagnostic().handler(),
+ format!("failed to load bc of `{}`",
+ name.as_slice()));
}
});
}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use back::lto;
+use back::link::{get_cc_prog, remove};
+use driver::driver::{CrateTranslation, ModuleTranslation, OutputFilenames};
+use driver::config::NoDebugInfo;
+use driver::session::Session;
+use driver::config;
+use llvm;
+use llvm::{ModuleRef, TargetMachineRef, PassManagerRef};
+use util::common::time;
+use syntax::abi;
+use syntax::codemap;
+use syntax::diagnostic;
+use syntax::diagnostic::{Emitter, Handler, Level, mk_handler};
+
+use std::c_str::{ToCStr, CString};
+use std::io::Command;
+use std::io::fs;
+use std::iter::Unfold;
+use std::ptr;
+use std::str;
+use std::sync::{Arc, Mutex};
+use std::task::TaskBuilder;
+use libc::{c_uint, c_int};
+
+
+#[deriving(Clone, PartialEq, PartialOrd, Ord, Eq)]
+pub enum OutputType {
+ OutputTypeBitcode,
+ OutputTypeAssembly,
+ OutputTypeLlvmAssembly,
+ OutputTypeObject,
+ OutputTypeExe,
+}
+
+
+pub fn llvm_err(handler: &diagnostic::Handler, msg: String) -> ! {
+ unsafe {
+ let cstr = llvm::LLVMRustGetLastError();
+ if cstr == ptr::null() {
+ handler.fatal(msg.as_slice());
+ } else {
+ let err = CString::new(cstr, true);
+ let err = String::from_utf8_lossy(err.as_bytes());
+ handler.fatal(format!("{}: {}",
+ msg.as_slice(),
+ err.as_slice()).as_slice());
+ }
+ }
+}
+
+pub fn write_output_file(
+ handler: &diagnostic::Handler,
+ target: llvm::TargetMachineRef,
+ pm: llvm::PassManagerRef,
+ m: ModuleRef,
+ output: &Path,
+ file_type: llvm::FileType) {
+ unsafe {
+ output.with_c_str(|output| {
+ let result = llvm::LLVMRustWriteOutputFile(
+ target, pm, m, output, file_type);
+ if !result {
+ llvm_err(handler, "could not write output".to_string());
+ }
+ })
+ }
+}
+
+
+struct Diagnostic {
+ msg: String,
+ code: Option<String>,
+ lvl: Level,
+}
+
+// We use an Arc instead of just returning a list of diagnostics from the
+// child task because we need to make sure that the messages are seen even
+// if the child task fails (for example, when `fatal` is called).
+#[deriving(Clone)]
+struct SharedEmitter {
+ buffer: Arc<Mutex<Vec<Diagnostic>>>,
+}
+
+impl SharedEmitter {
+ fn new() -> SharedEmitter {
+ SharedEmitter {
+ buffer: Arc::new(Mutex::new(Vec::new())),
+ }
+ }
+
+ fn dump(&mut self, handler: &Handler) {
+ let mut buffer = self.buffer.lock();
+ for diag in buffer.iter() {
+ match diag.code {
+ Some(ref code) => {
+ handler.emit_with_code(None,
+ diag.msg.as_slice(),
+ code.as_slice(),
+ diag.lvl);
+ },
+ None => {
+ handler.emit(None,
+ diag.msg.as_slice(),
+ diag.lvl);
+ },
+ }
+ }
+ buffer.clear();
+ }
+}
+
+impl Emitter for SharedEmitter {
+ fn emit(&mut self, cmsp: Option<(&codemap::CodeMap, codemap::Span)>,
+ msg: &str, code: Option<&str>, lvl: Level) {
+ assert!(cmsp.is_none(), "SharedEmitter doesn't support spans");
+
+ self.buffer.lock().push(Diagnostic {
+ msg: msg.to_string(),
+ code: code.map(|s| s.to_string()),
+ lvl: lvl,
+ });
+ }
+
+ fn custom_emit(&mut self, _cm: &codemap::CodeMap,
+ _sp: diagnostic::RenderSpan, _msg: &str, _lvl: Level) {
+ fail!("SharedEmitter doesn't support custom_emit");
+ }
+}
+
+
+// On android, we by default compile for armv7 processors. This enables
+// things like double word CAS instructions (rather than emulating them)
+// which are *far* more efficient. This is obviously undesirable in some
+// cases, so if any sort of target feature is specified we don't append v7
+// to the feature list.
+//
+// On iOS only armv7 and newer are supported. So it is useful to
+// get all hardware potential via VFP3 (hardware floating point)
+// and NEON (SIMD) instructions supported by LLVM.
+// Note that without those flags various linking errors might
+// arise as some of intrinsics are converted into function calls
+// and nobody provides implementations those functions
+fn target_feature<'a>(sess: &'a Session) -> &'a str {
+ match sess.targ_cfg.os {
+ abi::OsAndroid => {
+ if "" == sess.opts.cg.target_feature.as_slice() {
+ "+v7"
+ } else {
+ sess.opts.cg.target_feature.as_slice()
+ }
+ },
+ abi::OsiOS if sess.targ_cfg.arch == abi::Arm => {
+ "+v7,+thumb2,+vfp3,+neon"
+ },
+ _ => sess.opts.cg.target_feature.as_slice()
+ }
+}
+
+fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel {
+ match optimize {
+ config::No => llvm::CodeGenLevelNone,
+ config::Less => llvm::CodeGenLevelLess,
+ config::Default => llvm::CodeGenLevelDefault,
+ config::Aggressive => llvm::CodeGenLevelAggressive,
+ }
+}
+
+fn create_target_machine(sess: &Session) -> TargetMachineRef {
+ let reloc_model = match sess.opts.cg.relocation_model.as_slice() {
+ "pic" => llvm::RelocPIC,
+ "static" => llvm::RelocStatic,
+ "default" => llvm::RelocDefault,
+ "dynamic-no-pic" => llvm::RelocDynamicNoPic,
+ _ => {
+ sess.err(format!("{} is not a valid relocation mode",
+ sess.opts
+ .cg
+ .relocation_model).as_slice());
+ sess.abort_if_errors();
+ unreachable!();
+ }
+ };
+
+ let opt_level = get_llvm_opt_level(sess.opts.optimize);
+ let use_softfp = sess.opts.cg.soft_float;
+
+ // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a parameter.
+ // FIXME: #11954: mac64 unwinding may not work with fp elim
+ let no_fp_elim = (sess.opts.debuginfo != NoDebugInfo) ||
+ (sess.targ_cfg.os == abi::OsMacos &&
+ sess.targ_cfg.arch == abi::X86_64);
+
+ // OSX has -dead_strip, which doesn't rely on ffunction_sections
+ // FIXME(#13846) this should be enabled for windows
+ let ffunction_sections = sess.targ_cfg.os != abi::OsMacos &&
+ sess.targ_cfg.os != abi::OsWindows;
+ let fdata_sections = ffunction_sections;
+
+ let code_model = match sess.opts.cg.code_model.as_slice() {
+ "default" => llvm::CodeModelDefault,
+ "small" => llvm::CodeModelSmall,
+ "kernel" => llvm::CodeModelKernel,
+ "medium" => llvm::CodeModelMedium,
+ "large" => llvm::CodeModelLarge,
+ _ => {
+ sess.err(format!("{} is not a valid code model",
+ sess.opts
+ .cg
+ .code_model).as_slice());
+ sess.abort_if_errors();
+ unreachable!();
+ }
+ };
+
+ unsafe {
+ sess.targ_cfg
+ .target_strs
+ .target_triple
+ .as_slice()
+ .with_c_str(|t| {
+ sess.opts.cg.target_cpu.as_slice().with_c_str(|cpu| {
+ target_feature(sess).with_c_str(|features| {
+ llvm::LLVMRustCreateTargetMachine(
+ t, cpu, features,
+ code_model,
+ reloc_model,
+ opt_level,
+ true /* EnableSegstk */,
+ use_softfp,
+ no_fp_elim,
+ ffunction_sections,
+ fdata_sections,
+ )
+ })
+ })
+ })
+ }
+}
+
+
+/// Module-specific configuration for `optimize_and_codegen`.
+#[deriving(Clone)]
+struct ModuleConfig {
+ /// LLVM TargetMachine to use for codegen.
+ tm: TargetMachineRef,
+ /// Names of additional optimization passes to run.
+ passes: Vec<String>,
+ /// Some(level) to optimize at a certain level, or None to run
+ /// absolutely no optimizations (used for the metadata module).
+ opt_level: Option<llvm::CodeGenOptLevel>,
+
+ // Flags indicating which outputs to produce.
+ emit_no_opt_bc: bool,
+ emit_bc: bool,
+ emit_lto_bc: bool,
+ emit_ir: bool,
+ emit_asm: bool,
+ emit_obj: bool,
+
+ // Miscellaneous flags. These are mostly copied from command-line
+ // options.
+ no_verify: bool,
+ no_prepopulate_passes: bool,
+ no_builtins: bool,
+ time_passes: bool,
+}
+
+impl ModuleConfig {
+ fn new(tm: TargetMachineRef, passes: Vec<String>) -> ModuleConfig {
+ ModuleConfig {
+ tm: tm,
+ passes: passes,
+ opt_level: None,
+
+ emit_no_opt_bc: false,
+ emit_bc: false,
+ emit_lto_bc: false,
+ emit_ir: false,
+ emit_asm: false,
+ emit_obj: false,
+
+ no_verify: false,
+ no_prepopulate_passes: false,
+ no_builtins: false,
+ time_passes: false,
+ }
+ }
+
+ fn set_flags(&mut self, sess: &Session, trans: &CrateTranslation) {
+ self.no_verify = sess.no_verify();
+ self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes;
+ self.no_builtins = trans.no_builtins;
+ self.time_passes = sess.time_passes();
+ }
+}
+
+/// Additional resources used by optimize_and_codegen (not module specific)
+struct CodegenContext<'a> {
+ // Extra resources used for LTO: (sess, reachable). This will be `None`
+ // when running in a worker thread.
+ lto_ctxt: Option<(&'a Session, &'a [String])>,
+ // Handler to use for diagnostics produced during codegen.
+ handler: &'a Handler,
+}
+
+impl<'a> CodegenContext<'a> {
+ fn new(handler: &'a Handler) -> CodegenContext<'a> {
+ CodegenContext {
+ lto_ctxt: None,
+ handler: handler,
+ }
+ }
+
+ fn new_with_session(sess: &'a Session, reachable: &'a [String]) -> CodegenContext<'a> {
+ CodegenContext {
+ lto_ctxt: Some((sess, reachable)),
+ handler: sess.diagnostic().handler(),
+ }
+ }
+}
+
+// Unsafe due to LLVM calls.
+unsafe fn optimize_and_codegen(cgcx: &CodegenContext,
+ mtrans: ModuleTranslation,
+ config: ModuleConfig,
+ name_extra: String,
+ output_names: OutputFilenames) {
+ let ModuleTranslation { llmod, llcx } = mtrans;
+ let tm = config.tm;
+
+ if config.emit_no_opt_bc {
+ let ext = format!("{}.no-opt.bc", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+
+ match config.opt_level {
+ Some(opt_level) => {
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ // If we're verifying or linting, add them to the function pass
+ // manager.
+ let addpass = |pass: &str| {
+ pass.as_slice().with_c_str(|s| llvm::LLVMRustAddPass(fpm, s))
+ };
+ if !config.no_verify { assert!(addpass("verify")); }
+
+ if !config.no_prepopulate_passes {
+ llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
+ llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
+ populate_llvm_passes(fpm, mpm, llmod, opt_level,
+ config.no_builtins);
+ }
+
+ for pass in config.passes.iter() {
+ pass.as_slice().with_c_str(|s| {
+ if !llvm::LLVMRustAddPass(mpm, s) {
+ cgcx.handler.warn(format!("unknown pass {}, ignoring",
+ *pass).as_slice());
+ }
+ })
+ }
+
+ // Finally, run the actual optimization passes
+ time(config.time_passes, "llvm function passes", (), |()|
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
+ time(config.time_passes, "llvm module passes", (), |()|
+ llvm::LLVMRunPassManager(mpm, llmod));
+
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
+
+ match cgcx.lto_ctxt {
+ Some((sess, reachable)) if sess.lto() => {
+ time(sess.time_passes(), "all lto passes", (), |()|
+ lto::run(sess, llmod, tm, reachable));
+
+ if config.emit_lto_bc {
+ let name = format!("{}.lto.bc", name_extra);
+ output_names.with_extension(name.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+ },
+ _ => {},
+ }
+ },
+ None => {},
+ }
+
+ // A codegen-specific pass manager is used to generate object
+ // files for an LLVM module.
+ //
+ // Apparently each of these pass managers is a one-shot kind of
+ // thing, so we create a new one for each type of output. The
+ // pass manager passed to the closure should be ensured to not
+ // escape the closure itself, and the manager should only be
+ // used once.
+ unsafe fn with_codegen(tm: TargetMachineRef, llmod: ModuleRef,
+ no_builtins: bool, f: |PassManagerRef|) {
+ let cpm = llvm::LLVMCreatePassManager();
+ llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
+ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+ f(cpm);
+ llvm::LLVMDisposePassManager(cpm);
+ }
+
+ if config.emit_bc {
+ let ext = format!("{}.bc", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+
+ time(config.time_passes, "codegen passes", (), |()| {
+ if config.emit_ir {
+ let ext = format!("{}.ll", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|output| {
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ llvm::LLVMRustPrintModule(cpm, llmod, output);
+ })
+ })
+ }
+
+ if config.emit_asm {
+ let path = output_names.with_extension(format!("{}.s", name_extra).as_slice());
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::AssemblyFile);
+ });
+ }
+
+ if config.emit_obj {
+ let path = output_names.with_extension(format!("{}.o", name_extra).as_slice());
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::ObjectFile);
+ });
+ }
+ });
+
+ llvm::LLVMDisposeModule(llmod);
+ llvm::LLVMContextDispose(llcx);
+ llvm::LLVMRustDisposeTargetMachine(tm);
+}
+
+pub fn run_passes(sess: &Session,
+ trans: &CrateTranslation,
+ output_types: &[OutputType],
+ crate_output: &OutputFilenames) {
+ // It's possible that we have `codegen_units > 1` but only one item in
+ // `trans.modules`. We could theoretically proceed and do LTO in that
+ // case, but it would be confusing to have the validity of
+ // `-Z lto -C codegen-units=2` depend on details of the crate being
+ // compiled, so we complain regardless.
+ if sess.lto() && sess.opts.cg.codegen_units > 1 {
+ // This case is impossible to handle because LTO expects to be able
+ // to combine the entire crate and all its dependencies into a
+ // single compilation unit, but each codegen unit is in a separate
+ // LLVM context, so they can't easily be combined.
+ sess.fatal("can't perform LTO when using multiple codegen units");
+ }
+
+ // Sanity check
+ assert!(trans.modules.len() == sess.opts.cg.codegen_units);
+
+ unsafe {
+ configure_llvm(sess);
+ }
+
+ let tm = create_target_machine(sess);
+
+ // Figure out what we actually need to build.
+
+ let mut modules_config = ModuleConfig::new(tm, sess.opts.cg.passes.clone());
+ let mut metadata_config = ModuleConfig::new(tm, vec!());
+
+ modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize));
+
+ // Save all versions of the bytecode if we're saving our temporaries.
+ if sess.opts.cg.save_temps {
+ modules_config.emit_no_opt_bc = true;
+ modules_config.emit_bc = true;
+ modules_config.emit_lto_bc = true;
+ metadata_config.emit_bc = true;
+ }
+
+ // Emit a bitcode file for the crate if we're emitting an rlib.
+ // Whenever an rlib is created, the bitcode is inserted into the
+ // archive in order to allow LTO against it.
+ let needs_crate_bitcode =
+ sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
+ sess.opts.output_types.contains(&OutputTypeExe) &&
+ sess.opts.cg.codegen_units == 1;
+ if needs_crate_bitcode {
+ modules_config.emit_bc = true;
+ }
+
+ for output_type in output_types.iter() {
+ match *output_type {
+ OutputTypeBitcode => { modules_config.emit_bc = true; },
+ OutputTypeLlvmAssembly => { modules_config.emit_ir = true; },
+ OutputTypeAssembly => {
+ modules_config.emit_asm = true;
+ // If we're not using the LLVM assembler, this function
+ // could be invoked specially with output_type_assembly, so
+ // in this case we still want the metadata object file.
+ if !sess.opts.output_types.contains(&OutputTypeAssembly) {
+ metadata_config.emit_obj = true;
+ }
+ },
+ OutputTypeObject => { modules_config.emit_obj = true; },
+ OutputTypeExe => {
+ modules_config.emit_obj = true;
+ metadata_config.emit_obj = true;
+ },
+ }
+ }
+
+ modules_config.set_flags(sess, trans);
+ metadata_config.set_flags(sess, trans);
+
+
+ // Populate a buffer with a list of codegen tasks. Items are processed in
+ // LIFO order, just because it's a tiny bit simpler that way. (The order
+ // doesn't actually matter.)
+ let mut work_items = Vec::with_capacity(1 + trans.modules.len());
+
+ {
+ let work = build_work_item(sess,
+ trans.metadata_module,
+ metadata_config.clone(),
+ crate_output.clone(),
+ "metadata".to_string());
+ work_items.push(work);
+ }
+
+ for (index, mtrans) in trans.modules.iter().enumerate() {
+ let work = build_work_item(sess,
+ *mtrans,
+ modules_config.clone(),
+ crate_output.clone(),
+ format!("{}", index));
+ work_items.push(work);
+ }
+
+ // Process the work items, optionally using worker threads.
+ if sess.opts.cg.codegen_units == 1 {
+ run_work_singlethreaded(sess, trans.reachable.as_slice(), work_items);
+
+ if needs_crate_bitcode {
+ // The only bitcode file produced (aside from metadata) was
+ // "crate.0.bc". Rename to "crate.bc" since that's what
+ // `link_rlib` expects to find.
+ fs::copy(&crate_output.with_extension("0.bc"),
+ &crate_output.temp_path(OutputTypeBitcode)).unwrap();
+ }
+ } else {
+ run_work_multithreaded(sess, work_items, sess.opts.cg.codegen_units);
+
+ assert!(!needs_crate_bitcode,
+ "can't produce a crate bitcode file from multiple compilation units");
+ }
+
+ // All codegen is finished.
+ unsafe {
+ llvm::LLVMRustDisposeTargetMachine(tm);
+ }
+
+ // Produce final compile outputs.
+
+ let copy_if_one_unit = |ext: &str, output_type: OutputType| {
+ // Three cases:
+ if sess.opts.cg.codegen_units == 1 {
+ // 1) Only one codegen unit. In this case it's no difficulty
+ // to copy `foo.0.x` to `foo.x`.
+ fs::copy(&crate_output.with_extension(ext),
+ &crate_output.path(output_type)).unwrap();
+ if !sess.opts.cg.save_temps {
+ // The user just wants `foo.x`, not `foo.0.x`.
+ remove(sess, &crate_output.with_extension(ext));
+ }
+ } else {
+ if crate_output.single_output_file.is_some() {
+ // 2) Multiple codegen units, with `-o some_name`. We have
+ // no good solution for this case, so warn the user.
+ sess.warn(format!("ignoring -o because multiple .{} files were produced",
+ ext).as_slice());
+ } else {
+ // 3) Multiple codegen units, but no `-o some_name`. We
+ // just leave the `foo.0.x` files in place.
+ // (We don't have to do any work in this case.)
+ }
+ }
+ };
+
+ let link_obj = |output_path: &Path| {
+ // Running `ld -r` on a single input is kind of pointless.
+ if sess.opts.cg.codegen_units == 1 {
+ fs::copy(&crate_output.with_extension("0.o"),
+ output_path).unwrap();
+ // Leave the .0.o file around, to mimic the behavior of the normal
+ // code path.
+ return;
+ }
+
+ // Some builds of MinGW GCC will pass --force-exe-suffix to ld, which
+ // will automatically add a .exe extension if the extension is not
+ // already .exe or .dll. To ensure consistent behavior on Windows, we
+ // add the .exe suffix explicitly and then rename the output file to
+ // the desired path. This will give the correct behavior whether or
+ // not GCC adds --force-exe-suffix.
+ let windows_output_path =
+ if sess.targ_cfg.os == abi::OsWindows {
+ Some(output_path.with_extension("o.exe"))
+ } else {
+ None
+ };
+
+ let pname = get_cc_prog(sess);
+ let mut cmd = Command::new(pname.as_slice());
+
+ cmd.args(sess.targ_cfg.target_strs.cc_args.as_slice());
+ cmd.arg("-nostdlib");
+
+ for index in range(0, trans.modules.len()) {
+ cmd.arg(crate_output.with_extension(format!("{}.o", index).as_slice()));
+ }
+
+ cmd.arg("-r")
+ .arg("-o")
+ .arg(windows_output_path.as_ref().unwrap_or(output_path));
+
+ if (sess.opts.debugging_opts & config::PRINT_LINK_ARGS) != 0 {
+ println!("{}", &cmd);
+ }
+
+ cmd.stdin(::std::io::process::Ignored)
+ .stdout(::std::io::process::InheritFd(1))
+ .stderr(::std::io::process::InheritFd(2));
+ match cmd.status() {
+ Ok(_) => {},
+ Err(e) => {
+ sess.err(format!("could not exec the linker `{}`: {}",
+ pname,
+ e).as_slice());
+ sess.abort_if_errors();
+ },
+ }
+
+ match windows_output_path {
+ Some(ref windows_path) => {
+ fs::rename(windows_path, output_path).unwrap();
+ },
+ None => {
+ // The file is already named according to `output_path`.
+ }
+ }
+ };
+
+ // Flag to indicate whether the user explicitly requested bitcode.
+ // Otherwise, we produced it only as a temporary output, and will need
+ // to get rid of it.
+ // FIXME: Since we don't support LTO anyway, maybe we can avoid
+ // producing the temporary .0.bc's in the first place?
+ let mut save_bitcode = false;
+ for output_type in output_types.iter() {
+ match *output_type {
+ OutputTypeBitcode => {
+ save_bitcode = true;
+ copy_if_one_unit("0.bc", OutputTypeBitcode);
+ },
+ OutputTypeLlvmAssembly => { copy_if_one_unit("0.ll", OutputTypeLlvmAssembly); },
+ OutputTypeAssembly => { copy_if_one_unit("0.s", OutputTypeAssembly); },
+ OutputTypeObject => { link_obj(&crate_output.path(OutputTypeObject)); },
+ OutputTypeExe => {
+ // If OutputTypeObject is already in the list, then
+ // `crate.o` will be handled by the OutputTypeObject case.
+ // Otherwise, we need to create the temporary object so we
+ // can run the linker.
+ if !sess.opts.output_types.contains(&OutputTypeObject) {
+ link_obj(&crate_output.temp_path(OutputTypeObject));
+ }
+ },
+ }
+ }
+ let save_bitcode = save_bitcode;
+
+ // Clean up unwanted temporary files.
+
+ // We create the following files by default:
+ // - crate.0.bc
+ // - crate.0.o
+ // - crate.metadata.bc
+ // - crate.metadata.o
+ // - crate.o (linked from crate.##.o)
+ // - crate.bc (copied from crate.0.bc)
+ // We may create additional files if requested by the user (through
+ // `-C save-temps` or `--emit=` flags).
+
+ if !sess.opts.cg.save_temps {
+ // Remove the temporary .0.o objects. If the user didn't
+ // explicitly request bitcode (with --emit=bc), we must remove
+ // .0.bc as well. (We don't touch the crate.bc that may have been
+ // produced earlier.)
+ for i in range(0, trans.modules.len()) {
+ if modules_config.emit_obj {
+ let ext = format!("{}.o", i);
+ remove(sess, &crate_output.with_extension(ext.as_slice()));
+ }
+
+ if modules_config.emit_bc && !save_bitcode {
+ let ext = format!("{}.bc", i);
+ remove(sess, &crate_output.with_extension(ext.as_slice()));
+ }
+ }
+
+ if metadata_config.emit_bc && !save_bitcode {
+ remove(sess, &crate_output.with_extension("metadata.bc"));
+ }
+ }
+
+ // We leave the following files around by default:
+ // - crate.o
+ // - crate.metadata.o
+ // - crate.bc
+ // These are used in linking steps and will be cleaned up afterward.
+
+ // FIXME: time_llvm_passes support - does this use a global context or
+ // something?
+ //if sess.time_llvm_passes() { llvm::LLVMRustPrintPassTimings(); }
+}
+
+type WorkItem = proc(&CodegenContext):Send;
+
+fn build_work_item(sess: &Session,
+ mtrans: ModuleTranslation,
+ config: ModuleConfig,
+ output_names: OutputFilenames,
+ name_extra: String) -> WorkItem {
+ let mut config = config;
+ config.tm = create_target_machine(sess);
+
+ proc(cgcx) unsafe {
+ optimize_and_codegen(cgcx, mtrans, config, name_extra, output_names);
+ }
+}
+
+fn run_work_singlethreaded(sess: &Session,
+ reachable: &[String],
+ work_items: Vec<WorkItem>) {
+ let cgcx = CodegenContext::new_with_session(sess, reachable);
+ let mut work_items = work_items;
+
+ // Since we're running single-threaded, we can pass the session to
+ // the proc, allowing `optimize_and_codegen` to perform LTO.
+ for work in Unfold::new((), |_| work_items.pop()) {
+ work(&cgcx);
+ }
+}
+
+fn run_work_multithreaded(sess: &Session,
+ work_items: Vec<WorkItem>,
+ num_workers: uint) {
+ // Run some workers to process the work items.
+ let work_items_arc = Arc::new(Mutex::new(work_items));
+ let mut diag_emitter = SharedEmitter::new();
+ let mut futures = Vec::with_capacity(num_workers);
+
+ for i in range(0, num_workers) {
+ let work_items_arc = work_items_arc.clone();
+ let diag_emitter = diag_emitter.clone();
+
+ let future = TaskBuilder::new().named(format!("codegen-{}", i)).try_future(proc() {
+ let diag_handler = mk_handler(box diag_emitter);
+
+ // Must construct cgcx inside the proc because it has non-Send
+ // fields.
+ let cgcx = CodegenContext::new(&diag_handler);
+
+ loop {
+ // Avoid holding the lock for the entire duration of the match.
+ let maybe_work = work_items_arc.lock().pop();
+ match maybe_work {
+ Some(work) => {
+ work(&cgcx);
+
+ // Make sure to fail the worker so the main thread can
+ // tell that there were errors.
+ cgcx.handler.abort_if_errors();
+ }
+ None => break,
+ }
+ }
+ });
+ futures.push(future);
+ }
+
+ let mut failed = false;
+ for future in futures.move_iter() {
+ match future.unwrap() {
+ Ok(()) => {},
+ Err(_) => {
+ failed = true;
+ },
+ }
+ // Display any new diagnostics.
+ diag_emitter.dump(sess.diagnostic().handler());
+ }
+ if failed {
+ sess.fatal("aborting due to worker thread failure");
+ }
+}
+
+pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
+ let pname = get_cc_prog(sess);
+ let mut cmd = Command::new(pname.as_slice());
+
+ cmd.arg("-c").arg("-o").arg(outputs.path(OutputTypeObject))
+ .arg(outputs.temp_path(OutputTypeAssembly));
+ debug!("{}", &cmd);
+
+ match cmd.output() {
+ Ok(prog) => {
+ if !prog.status.success() {
+ sess.err(format!("linking with `{}` failed: {}",
+ pname,
+ prog.status).as_slice());
+ sess.note(format!("{}", &cmd).as_slice());
+ let mut note = prog.error.clone();
+ note.push_all(prog.output.as_slice());
+ sess.note(str::from_utf8(note.as_slice()).unwrap());
+ sess.abort_if_errors();
+ }
+ },
+ Err(e) => {
+ sess.err(format!("could not exec the linker `{}`: {}",
+ pname,
+ e).as_slice());
+ sess.abort_if_errors();
+ }
+ }
+}
+
+unsafe fn configure_llvm(sess: &Session) {
+ use std::sync::{Once, ONCE_INIT};
+ static mut INIT: Once = ONCE_INIT;
+
+ // Copy what clang does by turning on loop vectorization at O2 and
+ // slp vectorization at O3
+ let vectorize_loop = !sess.opts.cg.no_vectorize_loops &&
+ (sess.opts.optimize == config::Default ||
+ sess.opts.optimize == config::Aggressive);
+ let vectorize_slp = !sess.opts.cg.no_vectorize_slp &&
+ sess.opts.optimize == config::Aggressive;
+
+ let mut llvm_c_strs = Vec::new();
+ let mut llvm_args = Vec::new();
+ {
+ let add = |arg: &str| {
+ let s = arg.to_c_str();
+ llvm_args.push(s.as_ptr());
+ llvm_c_strs.push(s);
+ };
+ add("rustc"); // fake program name
+ if vectorize_loop { add("-vectorize-loops"); }
+ if vectorize_slp { add("-vectorize-slp"); }
+ if sess.time_llvm_passes() { add("-time-passes"); }
+ if sess.print_llvm_passes() { add("-debug-pass=Structure"); }
+
+ for arg in sess.opts.cg.llvm_args.iter() {
+ add((*arg).as_slice());
+ }
+ }
+
+ INIT.doit(|| {
+ llvm::LLVMInitializePasses();
+
+ // Only initialize the platforms supported by Rust here, because
+ // using --llvm-root will have multiple platforms that rustllvm
+ // doesn't actually link to and it's pointless to put target info
+ // into the registry that Rust cannot generate machine code for.
+ llvm::LLVMInitializeX86TargetInfo();
+ llvm::LLVMInitializeX86Target();
+ llvm::LLVMInitializeX86TargetMC();
+ llvm::LLVMInitializeX86AsmPrinter();
+ llvm::LLVMInitializeX86AsmParser();
+
+ llvm::LLVMInitializeARMTargetInfo();
+ llvm::LLVMInitializeARMTarget();
+ llvm::LLVMInitializeARMTargetMC();
+ llvm::LLVMInitializeARMAsmPrinter();
+ llvm::LLVMInitializeARMAsmParser();
+
+ llvm::LLVMInitializeMipsTargetInfo();
+ llvm::LLVMInitializeMipsTarget();
+ llvm::LLVMInitializeMipsTargetMC();
+ llvm::LLVMInitializeMipsAsmPrinter();
+ llvm::LLVMInitializeMipsAsmParser();
+
+ llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
+ llvm_args.as_ptr());
+ });
+}
+
+unsafe fn populate_llvm_passes(fpm: llvm::PassManagerRef,
+ mpm: llvm::PassManagerRef,
+ llmod: ModuleRef,
+ opt: llvm::CodeGenOptLevel,
+ no_builtins: bool) {
+ // Create the PassManagerBuilder for LLVM. We configure it with
+ // reasonable defaults and prepare it to actually populate the pass
+ // manager.
+ let builder = llvm::LLVMPassManagerBuilderCreate();
+ match opt {
+ llvm::CodeGenLevelNone => {
+ // Don't add lifetime intrinsics at O0
+ llvm::LLVMRustAddAlwaysInlinePass(builder, false);
+ }
+ llvm::CodeGenLevelLess => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, true);
+ }
+ // numeric values copied from clang
+ llvm::CodeGenLevelDefault => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
+ 225);
+ }
+ llvm::CodeGenLevelAggressive => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
+ 275);
+ }
+ }
+ llvm::LLVMPassManagerBuilderSetOptLevel(builder, opt as c_uint);
+ llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, no_builtins);
+
+ // Use the builder to populate the function/module pass managers.
+ llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
+ llvm::LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
+ llvm::LLVMPassManagerBuilderDispose(builder);
+
+ match opt {
+ llvm::CodeGenLevelDefault | llvm::CodeGenLevelAggressive => {
+ "mergefunc".with_c_str(|s| llvm::LLVMRustAddPass(mpm, s));
+ }
+ _ => {}
+ };
+}
E0146,
E0147,
E0148,
- E0149,
- E0150,
E0151,
E0152,
E0153,
E0157,
E0158,
E0159,
- E0160
+ E0160,
+ E0161
)
use driver::session::Session;
use back;
-use back::link;
+use back::write;
use back::target_strs;
use back::{arm, x86, x86_64, mips, mipsel};
use lint;
use syntax::parse;
use syntax::parse::token::InternedString;
-use std::collections::{HashSet, HashMap};
+use std::collections::HashMap;
use getopts::{optopt, optmulti, optflag, optflagopt};
use getopts;
use std::cell::{RefCell};
pub debuginfo: DebugInfoLevel,
pub lint_opts: Vec<(String, lint::Level)>,
pub describe_lints: bool,
- pub output_types: Vec<back::link::OutputType> ,
+ pub output_types: Vec<back::write::OutputType> ,
// This was mutable for rustpkg, which updates search paths based on the
// parsed code. It remains mutable in case its replacements wants to use
// this.
- pub addl_lib_search_paths: RefCell<HashSet<Path>>,
+ pub addl_lib_search_paths: RefCell<Vec<Path>>,
pub maybe_sysroot: Option<Path>,
pub target_triple: String,
// User-specified cfg meta items. The compiler itself will add additional
lint_opts: Vec::new(),
describe_lints: false,
output_types: Vec::new(),
- addl_lib_search_paths: RefCell::new(HashSet::new()),
+ addl_lib_search_paths: RefCell::new(Vec::new()),
maybe_sysroot: None,
target_triple: driver::host_triple().to_string(),
cfg: Vec::new(),
}
}
+ fn parse_uint(slot: &mut uint, v: Option<&str>) -> bool {
+ use std::from_str::FromStr;
+ match v.and_then(FromStr::from_str) {
+ Some(i) => { *slot = i; true },
+ None => false
+ }
+ }
}
) )
"metadata to mangle symbol names with"),
extra_filename: String = ("".to_string(), parse_string,
"extra data to put in each output filename"),
+ codegen_units: uint = (1, parse_uint,
+ "divide crate into N units to optimize in parallel"),
)
pub fn build_codegen_options(matches: &getopts::Matches) -> CodegenOptions
for unparsed_output_type in unparsed_output_types.iter() {
for part in unparsed_output_type.as_slice().split(',') {
let output_type = match part.as_slice() {
- "asm" => link::OutputTypeAssembly,
- "ir" => link::OutputTypeLlvmAssembly,
- "bc" => link::OutputTypeBitcode,
- "obj" => link::OutputTypeObject,
- "link" => link::OutputTypeExe,
+ "asm" => write::OutputTypeAssembly,
+ "ir" => write::OutputTypeLlvmAssembly,
+ "bc" => write::OutputTypeBitcode,
+ "obj" => write::OutputTypeObject,
+ "link" => write::OutputTypeExe,
_ => {
early_error(format!("unknown emission type: `{}`",
part).as_slice())
output_types.as_mut_slice().sort();
output_types.dedup();
if output_types.len() == 0 {
- output_types.push(link::OutputTypeExe);
+ output_types.push(write::OutputTypeExe);
}
let sysroot_opt = matches.opt_str("sysroot").map(|m| Path::new(m));
use back::link;
+use back::write;
use driver::session::Session;
use driver::config;
use front;
use std::io;
use std::io::fs;
+use arena::TypedArena;
use syntax::ast;
use syntax::attr;
use syntax::attr::{AttrMetaMethods};
if stop_after_phase_2(&sess) { return; }
+ let type_arena = TypedArena::new();
let analysis = phase_3_run_analysis_passes(sess, &expanded_crate,
- ast_map, id);
+ ast_map, &type_arena, id);
phase_save_analysis(&analysis.ty_cx.sess, &expanded_crate, &analysis, outdir);
if stop_after_phase_3(&analysis.ty_cx.sess) { return; }
let (tcx, trans) = phase_4_translate_to_llvm(expanded_crate, analysis);
Some((krate, map))
}
-pub struct CrateAnalysis {
+pub struct CrateAnalysis<'tcx> {
pub exp_map2: middle::resolve::ExportMap2,
pub exported_items: middle::privacy::ExportedItems,
pub public_items: middle::privacy::PublicItems,
- pub ty_cx: ty::ctxt,
+ pub ty_cx: ty::ctxt<'tcx>,
pub reachable: NodeSet,
pub name: String,
}
/// Run the resolution, typechecking, region checking and other
/// miscellaneous analysis passes on the crate. Return various
/// structures carrying the results of the analysis.
-pub fn phase_3_run_analysis_passes(sess: Session,
- krate: &ast::Crate,
- ast_map: syntax::ast_map::Map,
- name: String) -> CrateAnalysis {
+pub fn phase_3_run_analysis_passes<'tcx>(sess: Session,
+ krate: &ast::Crate,
+ ast_map: syntax::ast_map::Map,
+ type_arena: &'tcx TypedArena<ty::t_box_>,
+ name: String) -> CrateAnalysis<'tcx> {
let time_passes = sess.time_passes();
time(time_passes, "external crate/lib resolution", (), |_|
stability::Index::build(krate));
let ty_cx = ty::mk_ctxt(sess,
+ type_arena,
def_map,
named_region_map,
ast_map,
time(time_passes, "borrow checking", (), |_|
middle::borrowck::check_crate(&ty_cx, krate));
+ time(time_passes, "rvalue checking", (), |_|
+ middle::check_rvalues::check_crate(&ty_cx, krate));
+
time(time_passes, "kind checking", (), |_|
kind::check_crate(&ty_cx, krate));
middle::save::process_crate(sess, krate, analysis, odir));
}
+pub struct ModuleTranslation {
+ pub llcx: ContextRef,
+ pub llmod: ModuleRef,
+}
+
pub struct CrateTranslation {
- pub context: ContextRef,
- pub module: ModuleRef,
- pub metadata_module: ModuleRef,
+ pub modules: Vec<ModuleTranslation>,
+ pub metadata_module: ModuleTranslation,
pub link: LinkMeta,
pub metadata: Vec<u8>,
pub reachable: Vec<String>,
trans: &CrateTranslation,
outputs: &OutputFilenames) {
if sess.opts.cg.no_integrated_as {
- let output_type = link::OutputTypeAssembly;
+ let output_type = write::OutputTypeAssembly;
time(sess.time_passes(), "LLVM passes", (), |_|
- link::write::run_passes(sess, trans, [output_type], outputs));
+ write::run_passes(sess, trans, [output_type], outputs));
- link::write::run_assembler(sess, outputs);
+ write::run_assembler(sess, outputs);
// Remove assembly source, unless --save-temps was specified
if !sess.opts.cg.save_temps {
- fs::unlink(&outputs.temp_path(link::OutputTypeAssembly)).unwrap();
+ fs::unlink(&outputs.temp_path(write::OutputTypeAssembly)).unwrap();
}
} else {
time(sess.time_passes(), "LLVM passes", (), |_|
- link::write::run_passes(sess,
- trans,
- sess.opts.output_types.as_slice(),
- outputs));
+ write::run_passes(sess,
+ trans,
+ sess.opts.output_types.as_slice(),
+ outputs));
}
}
}
pub fn stop_after_phase_5(sess: &Session) -> bool {
- if !sess.opts.output_types.iter().any(|&i| i == link::OutputTypeExe) {
+ if !sess.opts.output_types.iter().any(|&i| i == write::OutputTypeExe) {
debug!("not building executable, returning early from compile_input");
return true;
}
for output_type in sess.opts.output_types.iter() {
let file = outputs.path(*output_type);
match *output_type {
- link::OutputTypeExe => {
+ write::OutputTypeExe => {
for output in sess.crate_types.borrow().iter() {
let p = link::filename_for_input(sess, *output,
id, &file);
session.opts.cg.metadata.clone()
}
+#[deriving(Clone)]
pub struct OutputFilenames {
pub out_directory: Path,
pub out_filestem: String,
}
impl OutputFilenames {
- pub fn path(&self, flavor: link::OutputType) -> Path {
+ pub fn path(&self, flavor: write::OutputType) -> Path {
match self.single_output_file {
Some(ref path) => return path.clone(),
None => {}
self.temp_path(flavor)
}
- pub fn temp_path(&self, flavor: link::OutputType) -> Path {
+ pub fn temp_path(&self, flavor: write::OutputType) -> Path {
let base = self.out_directory.join(self.filestem());
match flavor {
- link::OutputTypeBitcode => base.with_extension("bc"),
- link::OutputTypeAssembly => base.with_extension("s"),
- link::OutputTypeLlvmAssembly => base.with_extension("ll"),
- link::OutputTypeObject => base.with_extension("o"),
- link::OutputTypeExe => base,
+ write::OutputTypeBitcode => base.with_extension("bc"),
+ write::OutputTypeAssembly => base.with_extension("s"),
+ write::OutputTypeLlvmAssembly => base.with_extension("ll"),
+ write::OutputTypeObject => base.with_extension("o"),
+ write::OutputTypeExe => base,
}
}
use std::io::{mod, MemReader};
use std::from_str::FromStr;
use std::option;
-
+use arena::TypedArena;
#[deriving(PartialEq, Show)]
pub enum PpSourceMode {
}
PpmTyped => {
let ast_map = ast_map.expect("--pretty=typed missing ast_map");
- let analysis = driver::phase_3_run_analysis_passes(sess, krate, ast_map, id);
+ let type_arena = TypedArena::new();
+ let analysis = driver::phase_3_run_analysis_passes(sess, krate, ast_map,
+ &type_arena, id);
let annotation = TypedAnnotation { analysis: analysis };
f(&annotation, payload)
}
}
-struct TypedAnnotation {
- analysis: CrateAnalysis,
+struct TypedAnnotation<'tcx> {
+ analysis: CrateAnalysis<'tcx>,
}
-impl PrinterSupport for TypedAnnotation {
+impl<'tcx> PrinterSupport for TypedAnnotation<'tcx> {
fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
}
-impl SessionCarrier for TypedAnnotation {
+impl<'tcx> SessionCarrier for TypedAnnotation<'tcx> {
fn sess<'a>(&'a self) -> &'a Session { &self.analysis.ty_cx.sess }
}
-impl AstMapCarrier for TypedAnnotation {
+impl<'tcx> AstMapCarrier for TypedAnnotation<'tcx> {
fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
Some(&self.analysis.ty_cx.map)
}
}
-impl pprust::PpAnn for TypedAnnotation {
+impl<'tcx> pprust::PpAnn for TypedAnnotation<'tcx> {
fn pre(&self,
s: &mut pprust::State,
node: pprust::AnnNode) -> io::IoResult<()> {
match code {
Some(code) => {
let variants = gather_flowgraph_variants(&sess);
+ let type_arena = TypedArena::new();
let analysis = driver::phase_3_run_analysis_passes(sess, &krate,
- ast_map, id);
+ ast_map, &type_arena, id);
print_flowgraph(variants, analysis, code, out)
}
None => {
pub mod link;
pub mod lto;
+ pub mod write;
}
pub mod check_const;
pub mod check_loop;
pub mod check_match;
+ pub mod check_rvalues;
pub mod check_static;
pub mod const_eval;
pub mod dataflow;
ast::ExprLit(lit) => {
match ty::get(ty::expr_ty(cx.tcx, e)).sty {
ty::ty_int(t) => {
- let int_type = if t == ast::TyI {
- cx.sess().targ_cfg.int_type
- } else { t };
- let (min, max) = int_ty_range(int_type);
- let mut lit_val: i64 = match lit.node {
+ match lit.node {
ast::LitInt(v, ast::SignedIntLit(_, ast::Plus)) |
ast::LitInt(v, ast::UnsuffixedIntLit(ast::Plus)) => {
- if v > i64::MAX as u64{
+ let int_type = if t == ast::TyI {
+ cx.sess().targ_cfg.int_type
+ } else { t };
+ let (min, max) = int_ty_range(int_type);
+ let negative = self.negated_expr_id == e.id;
+
+ if (negative && v > (min.abs() as u64)) ||
+ (!negative && v > (max.abs() as u64)) {
cx.span_lint(TYPE_OVERFLOW, e.span,
"literal out of range for its type");
return;
}
- v as i64
- }
- ast::LitInt(v, ast::SignedIntLit(_, ast::Minus)) |
- ast::LitInt(v, ast::UnsuffixedIntLit(ast::Minus)) => {
- -(v as i64)
}
_ => fail!()
};
- if self.negated_expr_id == e.id {
- lit_val *= -1;
- }
- if lit_val < min || lit_val > max {
- cx.span_lint(TYPE_OVERFLOW, e.span,
- "literal out of range for its type");
- }
},
ty::ty_uint(t) => {
let uint_type = if t == ast::TyU {
declare_lint!(CTYPES, Warn,
"proper use of libc types in foreign modules")
-struct CTypesVisitor<'a> {
- cx: &'a Context<'a>
+struct CTypesVisitor<'a, 'tcx: 'a> {
+ cx: &'a Context<'a, 'tcx>
}
-impl<'a> CTypesVisitor<'a> {
+impl<'a, 'tcx> CTypesVisitor<'a, 'tcx> {
fn check_def(&mut self, sp: Span, ty_id: ast::NodeId, path_id: ast::NodeId) {
match self.cx.tcx.def_map.borrow().get_copy(&path_id) {
def::DefPrimTy(ast::TyInt(ast::TyI)) => {
}
}
-impl<'a> Visitor<()> for CTypesVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for CTypesVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty, _: ()) {
match ty.node {
ast::TyPath(_, _, id) => self.check_def(ty.span, ty.id, id),
declare_lint!(RAW_POINTER_DERIVING, Warn,
"uses of #[deriving] with raw pointers are rarely correct")
-struct RawPtrDerivingVisitor<'a> {
- cx: &'a Context<'a>
+struct RawPtrDerivingVisitor<'a, 'tcx: 'a> {
+ cx: &'a Context<'a, 'tcx>
}
-impl<'a> Visitor<()> for RawPtrDerivingVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for RawPtrDerivingVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty, _: ()) {
static MSG: &'static str = "use of `#[deriving]` with a raw pointer";
match ty.node {
}
pub fn get_lint_groups<'t>(&'t self) -> Vec<(&'static str, Vec<LintId>, bool)> {
- self.lint_groups.iter().map(|(k, &(ref v, b))| (*k, v.clone(), b)).collect()
+ self.lint_groups.iter().map(|(k, v)| (*k,
+ v.ref0().clone(),
+ *v.ref1())).collect()
}
pub fn register_pass(&mut self, sess: Option<&Session>,
match self.by_name.find_equiv(&lint_name.as_slice()) {
Some(&lint_id) => self.set_level(lint_id, (level, CommandLine)),
None => {
- match self.lint_groups.iter().map(|(&x, &(ref y, _))| (x, y.clone()))
+ match self.lint_groups.iter().map(|(&x, pair)| (x, pair.ref0().clone()))
.collect::<HashMap<&'static str, Vec<LintId>>>()
.find_equiv(&lint_name.as_slice()) {
Some(v) => {
}
/// Context for lint checking.
-pub struct Context<'a> {
+pub struct Context<'a, 'tcx: 'a> {
/// Type context we're checking in.
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
/// The crate being checked.
pub krate: &'a ast::Crate,
}
}
-impl<'a> Context<'a> {
- fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> Context<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
krate: &'a ast::Crate,
- exported_items: &'a ExportedItems) -> Context<'a> {
+ exported_items: &'a ExportedItems) -> Context<'a, 'tcx> {
// We want to own the lint store, so move it out of the session.
let lint_store = mem::replace(&mut *tcx.sess.lint_store.borrow_mut(),
LintStore::new());
}
}
-impl<'a> AstConv for Context<'a>{
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> AstConv<'tcx> for Context<'a, 'tcx>{
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
ty::lookup_item_type(self.tcx, id)
}
}
-impl<'a> Visitor<()> for Context<'a> {
+impl<'a, 'tcx> Visitor<()> for Context<'a, 'tcx> {
fn visit_item(&mut self, it: &ast::Item, _: ()) {
self.with_lint_attrs(it.attrs.as_slice(), |cx| {
run_lints!(cx, check_item, it);
}
// Output any lints that were previously added to the session.
-impl<'a> IdVisitingOperation for Context<'a> {
+impl<'a, 'tcx> IdVisitingOperation for Context<'a, 'tcx> {
fn visit_id(&self, id: ast::NodeId) {
match self.tcx.sess.lints.borrow_mut().pop(&id) {
None => {}
rbml_w: &mut Encoder,
ii: InlinedItemRef|: 'a;
-pub struct EncodeParams<'a> {
+pub struct EncodeParams<'a, 'tcx: 'a> {
pub diag: &'a SpanHandler,
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
pub reexports2: &'a middle::resolve::ExportMap2,
pub item_symbols: &'a RefCell<NodeMap<String>>,
pub non_inlineable_statics: &'a RefCell<NodeSet>,
pub reachable: &'a NodeSet,
}
-pub struct EncodeContext<'a> {
+pub struct EncodeContext<'a, 'tcx: 'a> {
pub diag: &'a SpanHandler,
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
pub reexports2: &'a middle::resolve::ExportMap2,
pub item_symbols: &'a RefCell<NodeMap<String>>,
pub non_inlineable_statics: &'a RefCell<NodeSet>,
IITraitItemRef(local_def(parent_id),
RequiredInlinedTraitItemRef(
&*ast_method)));
- } else {
+ }
+ if !any_types {
encode_symbol(ecx, rbml_w, m.def_id.node);
}
encode_method_argument_names(rbml_w, &*ast_method.pe_fn_decl());
encode_attributes(rbml_w, item.attrs.as_slice());
if tps_len > 0u || should_inline(item.attrs.as_slice()) {
encode_inlined_item(ecx, rbml_w, IIItemRef(item));
- } else {
+ }
+ if tps_len == 0 {
encode_symbol(ecx, rbml_w, item.id);
}
encode_visibility(rbml_w, vis);
encode_name(rbml_w, nitem.ident.name);
if abi == abi::RustIntrinsic {
encode_inlined_item(ecx, rbml_w, IIForeignRef(nitem));
- } else {
- encode_symbol(ecx, rbml_w, nitem.id);
}
+ encode_symbol(ecx, rbml_w, nitem.id);
}
ForeignItemStatic(_, mutbl) => {
if mutbl {
-struct ImplVisitor<'a,'b:'a,'c:'a> {
- ecx: &'a EncodeContext<'b>,
+struct ImplVisitor<'a, 'b:'a, 'c:'a, 'tcx:'b> {
+ ecx: &'a EncodeContext<'b, 'tcx>,
rbml_w: &'a mut Encoder<'c>,
}
-impl<'a,'b,'c> Visitor<()> for ImplVisitor<'a,'b,'c> {
+impl<'a, 'b, 'c, 'tcx> Visitor<()> for ImplVisitor<'a, 'b, 'c, 'tcx> {
fn visit_item(&mut self, item: &Item, _: ()) {
match item.node {
ItemImpl(_, Some(ref trait_ref), _, _) => {
pub struct FileSearch<'a> {
pub sysroot: &'a Path,
- pub addl_lib_search_paths: &'a RefCell<HashSet<Path>>,
+ pub addl_lib_search_paths: &'a RefCell<Vec<Path>>,
pub triple: &'a str,
}
pub fn new(sysroot: &'a Path,
triple: &'a str,
- addl_lib_search_paths: &'a RefCell<HashSet<Path>>) -> FileSearch<'a> {
+ addl_lib_search_paths: &'a RefCell<Vec<Path>>) -> FileSearch<'a> {
debug!("using sysroot = {}, triple = {}", sysroot.display(), triple);
FileSearch {
sysroot: sysroot,
pub type conv_did<'a> =
|source: DefIdSource, ast::DefId|: 'a -> ast::DefId;
-pub struct PState<'a> {
+pub struct PState<'a, 'tcx: 'a> {
data: &'a [u8],
krate: ast::CrateNum,
pos: uint,
- tcx: &'a ty::ctxt
+ tcx: &'a ty::ctxt<'tcx>
}
fn peek(st: &PState) -> char {
})
}
-pub fn parse_state_from_data<'a>(data: &'a [u8], crate_num: ast::CrateNum,
- pos: uint, tcx: &'a ty::ctxt) -> PState<'a> {
+pub fn parse_state_from_data<'a, 'tcx>(data: &'a [u8], crate_num: ast::CrateNum,
+ pos: uint, tcx: &'a ty::ctxt<'tcx>)
+ -> PState<'a, 'tcx> {
PState {
data: data,
krate: crate_num,
macro_rules! mywrite( ($($arg:tt)*) => ({ write!($($arg)*); }) )
-pub struct ctxt<'a> {
+pub struct ctxt<'a, 'tcx: 'a> {
pub diag: &'a SpanHandler,
// Def -> str Callback:
pub ds: fn(DefId) -> String,
// The type context.
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
pub abbrevs: &'a abbrev_map
}
#[cfg(test)] use syntax::print::pprust;
#[cfg(test)] use std::gc::Gc;
-struct DecodeContext<'a> {
+struct DecodeContext<'a, 'tcx: 'a> {
cdata: &'a cstore::crate_metadata,
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
}
-struct ExtendedDecodeContext<'a> {
- dcx: &'a DecodeContext<'a>,
+struct ExtendedDecodeContext<'a, 'tcx: 'a> {
+ dcx: &'a DecodeContext<'a, 'tcx>,
from_id_range: ast_util::IdRange,
to_id_range: ast_util::IdRange
}
ast_util::IdRange { min: to_id_min, max: to_id_max }
}
-impl<'a> ExtendedDecodeContext<'a> {
+impl<'a, 'tcx> ExtendedDecodeContext<'a, 'tcx> {
pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId {
/*!
* Translates an internal id, meaning a node id that is known
Decodable::decode(&mut d).unwrap()
}
-struct AstRenumberer<'a> {
- xcx: &'a ExtendedDecodeContext<'a>,
+struct AstRenumberer<'a, 'tcx: 'a> {
+ xcx: &'a ExtendedDecodeContext<'a, 'tcx>,
}
-impl<'a> ast_map::FoldOps for AstRenumberer<'a> {
+impl<'a, 'tcx> ast_map::FoldOps for AstRenumberer<'a, 'tcx> {
fn new_id(&self, id: ast::NodeId) -> ast::NodeId {
if id == ast::DUMMY_NODE_ID {
// Used by ast_map to map the NodeInlinedParent.
// ______________________________________________________________________
// Encoding and decoding the side tables
-trait get_ty_str_ctxt {
- fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a>;
+trait get_ty_str_ctxt<'tcx> {
+ fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a, 'tcx>;
}
-impl<'a> get_ty_str_ctxt for e::EncodeContext<'a> {
- fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a> {
+impl<'a, 'tcx> get_ty_str_ctxt<'tcx> for e::EncodeContext<'a, 'tcx> {
+ fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a, 'tcx> {
tyencode::ctxt {
diag: self.tcx.sess.diagnostic(),
ds: e::def_to_string,
}
}
-struct CheckLoanCtxt<'a> {
- bccx: &'a BorrowckCtxt<'a>,
- dfcx_loans: &'a LoanDataFlow<'a>,
- move_data: move_data::FlowedMoveData<'a>,
+struct CheckLoanCtxt<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
+ dfcx_loans: &'a LoanDataFlow<'a, 'tcx>,
+ move_data: move_data::FlowedMoveData<'a, 'tcx>,
all_loans: &'a [Loan],
}
-impl<'a> euv::Delegate for CheckLoanCtxt<'a> {
+impl<'a, 'tcx> euv::Delegate for CheckLoanCtxt<'a, 'tcx> {
fn consume(&mut self,
consume_id: ast::NodeId,
consume_span: Span,
fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) { }
}
-pub fn check_loans(bccx: &BorrowckCtxt,
- dfcx_loans: &LoanDataFlow,
- move_data: move_data::FlowedMoveData,
- all_loans: &[Loan],
- decl: &ast::FnDecl,
- body: &ast::Block) {
+pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
+ dfcx_loans: &LoanDataFlow<'b, 'tcx>,
+ move_data: move_data::FlowedMoveData<'c, 'tcx>,
+ all_loans: &[Loan],
+ decl: &ast::FnDecl,
+ body: &ast::Block) {
debug!("check_loans(body id={:?})", body.id);
let mut clcx = CheckLoanCtxt {
borrow_kind1 == ty::ImmBorrow && borrow_kind2 == ty::ImmBorrow
}
-impl<'a> CheckLoanCtxt<'a> {
- pub fn tcx(&self) -> &'a ty::ctxt { self.bccx.tcx }
+impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
pub fn each_issued_loan(&self, scope_id: ast::NodeId, op: |&Loan| -> bool)
-> bool {
///////////////////////////////////////////////////////////////////////////
// Private
-struct GuaranteeLifetimeContext<'a> {
- bccx: &'a BorrowckCtxt<'a>,
+struct GuaranteeLifetimeContext<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
// the node id of the function body for the enclosing item
item_scope_id: ast::NodeId,
cmt_original: mc::cmt
}
-impl<'a> GuaranteeLifetimeContext<'a> {
+impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> {
fn check(&self, cmt: &mc::cmt, discr_scope: Option<ast::NodeId>) -> R {
//! Main routine. Walks down `cmt` until we find the "guarantor".
(all_loans, move_data)
}
-struct GatherLoanCtxt<'a> {
- bccx: &'a BorrowckCtxt<'a>,
+struct GatherLoanCtxt<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
move_data: move_data::MoveData,
move_error_collector: move_error::MoveErrorCollector,
all_loans: Vec<Loan>,
item_ub: ast::NodeId,
}
-impl<'a> euv::Delegate for GatherLoanCtxt<'a> {
+impl<'a, 'tcx> euv::Delegate for GatherLoanCtxt<'a, 'tcx> {
fn consume(&mut self,
consume_id: ast::NodeId,
_consume_span: Span,
}
}
-impl<'a> GatherLoanCtxt<'a> {
- pub fn tcx(&self) -> &'a ty::ctxt { self.bccx.tcx }
+impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
fn guarantee_valid(&mut self,
borrow_id: ast::NodeId,
///
/// This visitor walks static initializer's expressions and makes
/// sure the loans being taken are sound.
-struct StaticInitializerCtxt<'a> {
- bccx: &'a BorrowckCtxt<'a>
+struct StaticInitializerCtxt<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for StaticInitializerCtxt<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for StaticInitializerCtxt<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr, _: ()) {
match ex.node {
ast::ExprAddrOf(mutbl, ref base) => {
///////////////////////////////////////////////////////////////////////////
// Private
-struct RestrictionsContext<'a> {
- bccx: &'a BorrowckCtxt<'a>,
+struct RestrictionsContext<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
span: Span,
loan_region: ty::Region,
cause: euv::LoanCause,
}
-impl<'a> RestrictionsContext<'a> {
+impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> {
fn restrict(&self,
cmt: mc::cmt) -> RestrictionResult {
debug!("restrict(cmt={})", cmt.repr(self.bccx.tcx));
}
}
-pub struct DataflowLabeller<'a> {
+pub struct DataflowLabeller<'a, 'tcx: 'a> {
pub inner: cfg_dot::LabelledCFG<'a>,
pub variants: Vec<Variant>,
- pub borrowck_ctxt: &'a BorrowckCtxt<'a>,
- pub analysis_data: &'a borrowck::AnalysisData<'a>,
+ pub borrowck_ctxt: &'a BorrowckCtxt<'a, 'tcx>,
+ pub analysis_data: &'a borrowck::AnalysisData<'a, 'tcx>,
}
-impl<'a> DataflowLabeller<'a> {
+impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> {
fn dataflow_for(&self, e: EntryOrExit, n: &Node<'a>) -> String {
let id = n.val1().data.id;
debug!("dataflow_for({}, id={}) {}", e, id, self.variants);
fn build_set<O:DataFlowOperator>(&self,
e: EntryOrExit,
cfgidx: CFGIndex,
- dfcx: &DataFlowContext<'a, O>,
+ dfcx: &DataFlowContext<'a, 'tcx, O>,
to_lp: |uint| -> Rc<LoanPath>) -> String {
let mut saw_some = false;
let mut set = "{".to_string();
}
}
-impl<'a> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a> {
+impl<'a, 'tcx> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> {
fn graph_id(&'a self) -> dot::Id<'a> { self.inner.graph_id() }
fn node_id(&'a self, n: &Node<'a>) -> dot::Id<'a> { self.inner.node_id(n) }
fn node_label(&'a self, n: &Node<'a>) -> dot::LabelText<'a> {
fn edge_label(&'a self, e: &Edge<'a>) -> dot::LabelText<'a> { self.inner.edge_label(e) }
}
-impl<'a> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a> {
+impl<'a, 'tcx> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> {
fn nodes(&self) -> dot::Nodes<'a, Node<'a>> { self.inner.nodes() }
fn edges(&self) -> dot::Edges<'a, Edge<'a>> { self.inner.edges() }
fn source(&self, edge: &Edge<'a>) -> Node<'a> { self.inner.source(edge) }
#[deriving(Clone)]
pub struct LoanDataFlowOperator;
-pub type LoanDataFlow<'a> = DataFlowContext<'a, LoanDataFlowOperator>;
+pub type LoanDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, LoanDataFlowOperator>;
-impl<'a> Visitor<()> for BorrowckCtxt<'a> {
+impl<'a, 'tcx> Visitor<()> for BorrowckCtxt<'a, 'tcx> {
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl,
b: &Block, s: Span, n: NodeId, _: ()) {
borrowck_fn(self, fk, fd, b, s, n);
}
/// Collection of conclusions determined via borrow checker analyses.
-pub struct AnalysisData<'a> {
+pub struct AnalysisData<'a, 'tcx: 'a> {
pub all_loans: Vec<Loan>,
- pub loans: DataFlowContext<'a, LoanDataFlowOperator>,
- pub move_data: move_data::FlowedMoveData<'a>,
+ pub loans: DataFlowContext<'a, 'tcx, LoanDataFlowOperator>,
+ pub move_data: move_data::FlowedMoveData<'a, 'tcx>,
}
fn borrowck_fn(this: &mut BorrowckCtxt,
visit::walk_fn(this, fk, decl, body, sp, ());
}
-fn build_borrowck_dataflow_data<'a>(this: &mut BorrowckCtxt<'a>,
- fk: &FnKind,
- decl: &ast::FnDecl,
- cfg: &cfg::CFG,
- body: &ast::Block,
- sp: Span,
- id: ast::NodeId) -> AnalysisData<'a> {
+fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
+ fk: &FnKind,
+ decl: &ast::FnDecl,
+ cfg: &cfg::CFG,
+ body: &ast::Block,
+ sp: Span,
+ id: ast::NodeId) -> AnalysisData<'a, 'tcx> {
// Check the body of fn items.
let id_range = ast_util::compute_id_range_for_fn_body(fk, decl, body, sp, id);
let (all_loans, move_data) =
/// Accessor for introspective clients inspecting `AnalysisData` and
/// the `BorrowckCtxt` itself , e.g. the flowgraph visualizer.
-pub fn build_borrowck_dataflow_data_for_fn<'a>(
- tcx: &'a ty::ctxt,
- input: FnPartsWithCFG<'a>) -> (BorrowckCtxt<'a>, AnalysisData<'a>) {
+pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>(
+ tcx: &'a ty::ctxt<'tcx>,
+ input: FnPartsWithCFG<'a>) -> (BorrowckCtxt<'a, 'tcx>, AnalysisData<'a, 'tcx>) {
let mut bccx = BorrowckCtxt {
tcx: tcx,
// ----------------------------------------------------------------------
// Type definitions
-pub struct BorrowckCtxt<'a> {
- tcx: &'a ty::ctxt,
+pub struct BorrowckCtxt<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
// Statistics:
stats: Gc<BorrowStats>,
///////////////////////////////////////////////////////////////////////////
// Misc
-impl<'a> BorrowckCtxt<'a> {
+impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
pub fn is_subregion_of(&self, r_sub: ty::Region, r_sup: ty::Region)
-> bool {
self.tcx.region_maps.is_subregion_of(r_sub, r_sup)
self.tcx.region_maps.is_subscope_of(r_sub, r_sup)
}
- pub fn mc(&self) -> mc::MemCategorizationContext<'a,ty::ctxt> {
+ pub fn mc(&self) -> mc::MemCategorizationContext<'a, ty::ctxt<'tcx>> {
mc::MemCategorizationContext::new(self.tcx)
}
pub assignee_ids: RefCell<HashSet<ast::NodeId>>,
}
-pub struct FlowedMoveData<'a> {
+pub struct FlowedMoveData<'a, 'tcx: 'a> {
pub move_data: MoveData,
- pub dfcx_moves: MoveDataFlow<'a>,
+ pub dfcx_moves: MoveDataFlow<'a, 'tcx>,
// We could (and maybe should, for efficiency) combine both move
// and assign data flow into one, but this way it's easier to
// distinguish the bits that correspond to moves and assignments.
- pub dfcx_assign: AssignDataFlow<'a>
+ pub dfcx_assign: AssignDataFlow<'a, 'tcx>
}
/// Index into `MoveData.paths`, used like a pointer
#[deriving(Clone)]
pub struct MoveDataFlowOperator;
-pub type MoveDataFlow<'a> = DataFlowContext<'a, MoveDataFlowOperator>;
+pub type MoveDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, MoveDataFlowOperator>;
#[deriving(Clone)]
pub struct AssignDataFlowOperator;
-pub type AssignDataFlow<'a> = DataFlowContext<'a, AssignDataFlowOperator>;
+pub type AssignDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, AssignDataFlowOperator>;
fn loan_path_is_precise(loan_path: &LoanPath) -> bool {
match *loan_path {
}
}
-impl<'a> FlowedMoveData<'a> {
+impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
pub fn new(move_data: MoveData,
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
cfg: &cfg::CFG,
id_range: ast_util::IdRange,
decl: &ast::FnDecl,
body: &ast::Block)
- -> FlowedMoveData<'a> {
+ -> FlowedMoveData<'a, 'tcx> {
let mut dfcx_moves =
DataFlowContext::new(tcx,
"flowed_move_data_moves",
use std::gc::Gc;
-struct CFGBuilder<'a> {
- tcx: &'a ty::ctxt,
+struct CFGBuilder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
exit_map: NodeMap<CFGIndex>,
graph: CFGGraph,
fn_exit: CFGIndex,
g.add_node(CFGNodeData { id: ast::DUMMY_NODE_ID })
}
-impl<'a> CFGBuilder<'a> {
+impl<'a, 'tcx> CFGBuilder<'a, 'tcx> {
fn block(&mut self, blk: &ast::Block, pred: CFGIndex) -> CFGIndex {
let mut stmts_exit = pred;
for stmt in blk.stmts.iter() {
use syntax::visit::Visitor;
use syntax::visit;
-pub struct CheckCrateVisitor<'a> {
- tcx: &'a ty::ctxt,
+pub struct CheckCrateVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
}
-impl<'a> Visitor<bool> for CheckCrateVisitor<'a> {
+impl<'a, 'tcx> Visitor<bool> for CheckCrateVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &Item, env: bool) {
check_item(self, i, env);
}
}
}
-pub struct MatchCheckCtxt<'a> {
- pub tcx: &'a ty::ctxt
+pub struct MatchCheckCtxt<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>
}
#[deriving(Clone, PartialEq)]
LeaveOutWitness
}
-impl<'a> Visitor<()> for MatchCheckCtxt<'a> {
+impl<'a, 'tcx> Visitor<()> for MatchCheckCtxt<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr, _: ()) {
check_expr(self, ex);
}
}
}
-pub struct StaticInliner<'a> {
- pub tcx: &'a ty::ctxt,
+pub struct StaticInliner<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>,
pub failed: bool
}
-impl<'a> StaticInliner<'a> {
- pub fn new<'a>(tcx: &'a ty::ctxt) -> StaticInliner<'a> {
+impl<'a, 'tcx> StaticInliner<'a, 'tcx> {
+ pub fn new<'a>(tcx: &'a ty::ctxt<'tcx>) -> StaticInliner<'a, 'tcx> {
StaticInliner {
tcx: tcx,
failed: false
}
}
-impl<'a> Folder for StaticInliner<'a> {
+impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
fn fold_pat(&mut self, pat: Gc<Pat>) -> Gc<Pat> {
match pat.node {
PatIdent(..) | PatEnum(..) => {
/// Ensures that a pattern guard doesn't borrow by mutable reference or
/// assign.
-fn check_for_mutation_in_guard<'a>(cx: &'a MatchCheckCtxt<'a>, guard: &Expr) {
+fn check_for_mutation_in_guard<'a, 'tcx>(cx: &'a MatchCheckCtxt<'a, 'tcx>, guard: &Expr) {
let mut checker = MutationChecker {
cx: cx,
};
visitor.walk_expr(guard);
}
-struct MutationChecker<'a> {
- cx: &'a MatchCheckCtxt<'a>,
+struct MutationChecker<'a, 'tcx: 'a> {
+ cx: &'a MatchCheckCtxt<'a, 'tcx>,
}
-impl<'a> Delegate for MutationChecker<'a> {
+impl<'a, 'tcx> Delegate for MutationChecker<'a, 'tcx> {
fn consume(&mut self, _: NodeId, _: Span, _: cmt, _: ConsumeMode) {}
fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {}
fn borrow(&mut self,
visitor.visit_pat(pat, true);
}
-struct AtBindingPatternVisitor<'a,'b:'a> {
- cx: &'a MatchCheckCtxt<'b>,
+struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> {
+ cx: &'a MatchCheckCtxt<'b, 'tcx>,
}
-impl<'a,'b> Visitor<bool> for AtBindingPatternVisitor<'a,'b> {
+impl<'a, 'b, 'tcx> Visitor<bool> for AtBindingPatternVisitor<'a, 'b, 'tcx> {
fn visit_pat(&mut self, pat: &Pat, bindings_allowed: bool) {
if !bindings_allowed && pat_is_binding(&self.cx.tcx.def_map, pat) {
self.cx.tcx.sess.span_err(pat.span,
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Checks that all rvalues in a crate have statically known size. check_crate
+// is the public starting point.
+
+use middle::expr_use_visitor as euv;
+use middle::mem_categorization as mc;
+use middle::ty;
+use util::ppaux::ty_to_string;
+
+use syntax::ast;
+use syntax::codemap::Span;
+use syntax::visit;
+
+pub fn check_crate(tcx: &ty::ctxt,
+ krate: &ast::Crate) {
+ let mut rvcx = RvalueContext { tcx: tcx };
+ visit::walk_crate(&mut rvcx, krate, ());
+}
+
+struct RvalueContext<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>
+}
+
+impl<'a, 'tcx> visit::Visitor<()> for RvalueContext<'a, 'tcx> {
+ fn visit_fn(&mut self,
+ _: &visit::FnKind,
+ fd: &ast::FnDecl,
+ b: &ast::Block,
+ _: Span,
+ _: ast::NodeId,
+ _: ()) {
+ let mut euv = euv::ExprUseVisitor::new(self, self.tcx);
+ euv.walk_fn(fd, b);
+ }
+}
+
+impl<'a, 'tcx> euv::Delegate for RvalueContext<'a, 'tcx> {
+ fn consume(&mut self,
+ _: ast::NodeId,
+ span: Span,
+ cmt: mc::cmt,
+ _: euv::ConsumeMode) {
+ debug!("consume; cmt: {:?}; type: {}", *cmt, ty_to_string(self.tcx, cmt.ty));
+ if !ty::type_is_sized(self.tcx, cmt.ty) {
+ span_err!(self.tcx.sess, span, E0161,
+ "cannot move a value of type {0}: the size of {0} cannot be statically determined",
+ ty_to_string(self.tcx, cmt.ty));
+ }
+ }
+
+ fn consume_pat(&mut self,
+ _consume_pat: &ast::Pat,
+ _cmt: mc::cmt,
+ _mode: euv::ConsumeMode) {
+ }
+
+ fn borrow(&mut self,
+ _borrow_id: ast::NodeId,
+ _borrow_span: Span,
+ _cmt: mc::cmt,
+ _loan_region: ty::Region,
+ _bk: ty::BorrowKind,
+ _loan_cause: euv::LoanCause) {
+ }
+
+ fn decl_without_init(&mut self,
+ _id: ast::NodeId,
+ _span: Span) {
+ }
+
+ fn mutate(&mut self,
+ _assignment_id: ast::NodeId,
+ _assignment_span: Span,
+ _assignee_cmt: mc::cmt,
+ _mode: euv::MutateMode) {
+ }
+}
Some(format!("mutable static items are not allowed to have {}", suffix))
}
-struct CheckStaticVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct CheckStaticVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
}
pub fn check_crate(tcx: &ty::ctxt, krate: &ast::Crate) {
visit::walk_crate(&mut CheckStaticVisitor { tcx: tcx }, krate, false)
}
-impl<'a> CheckStaticVisitor<'a> {
+impl<'a, 'tcx> CheckStaticVisitor<'a, 'tcx> {
fn report_error(&self, span: Span, result: Option<String>) -> bool {
match result {
None => { false }
}
}
-impl<'a> Visitor<bool> for CheckStaticVisitor<'a> {
+impl<'a, 'tcx> Visitor<bool> for CheckStaticVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _is_const: bool) {
debug!("visit_item(item={})", pprust::item_to_string(i));
}
}
-struct ConstEvalVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct ConstEvalVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
ccache: constness_cache,
}
-impl<'a> ConstEvalVisitor<'a> {
+impl<'a, 'tcx> ConstEvalVisitor<'a, 'tcx> {
fn classify(&mut self, e: &Expr) -> constness {
let did = ast_util::local_def(e.id);
match self.ccache.find(&did) {
}
-impl<'a> Visitor<()> for ConstEvalVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for ConstEvalVisitor<'a, 'tcx> {
fn visit_ty(&mut self, t: &Ty, _: ()) {
match t.node {
TyFixedLengthVec(_, expr) => {
}
}
-pub fn eval_const_expr_partial<T: ty::ExprTyProvider>(tcx: &T, e: &Expr)
- -> Result<const_val, String> {
+pub fn eval_const_expr_partial(tcx: &ty::ctxt, e: &Expr) -> Result<const_val, String> {
fn fromb(b: bool) -> Result<const_val, String> { Ok(const_int(b as i64)) }
match e.node {
ExprUnary(UnNeg, ref inner) => {
// This tends to get called w/o the type actually having been
// populated in the ctxt, which was causing things to blow up
// (#5900). Fall back to doing a limited lookup to get past it.
- let ety = ty::expr_ty_opt(tcx.ty_ctxt(), e)
- .or_else(|| astconv::ast_ty_to_prim_ty(tcx.ty_ctxt(), &**target_ty))
+ let ety = ty::expr_ty_opt(tcx, e)
+ .or_else(|| astconv::ast_ty_to_prim_ty(tcx, &**target_ty))
.unwrap_or_else(|| {
- tcx.ty_ctxt().sess.span_fatal(target_ty.span,
- "target type not found for \
- const cast")
+ tcx.sess.span_fatal(target_ty.span,
+ "target type not found for const cast")
});
let base = eval_const_expr_partial(tcx, &**base);
}
}
ExprPath(_) => {
- match lookup_const(tcx.ty_ctxt(), e) {
- Some(actual_e) => eval_const_expr_partial(tcx.ty_ctxt(), &*actual_e),
+ match lookup_const(tcx, e) {
+ Some(actual_e) => eval_const_expr_partial(tcx, &*actual_e),
None => Err("non-constant path in constant expr".to_string())
}
}
pub enum EntryOrExit { Entry, Exit }
#[deriving(Clone)]
-pub struct DataFlowContext<'a, O> {
- tcx: &'a ty::ctxt,
+pub struct DataFlowContext<'a, 'tcx: 'a, O> {
+ tcx: &'a ty::ctxt<'tcx>,
/// a name for the analysis using this dataflow instance
analysis_name: &'static str,
fn initial_value(&self) -> bool;
}
-struct PropagationContext<'a, 'b:'a, O:'a> {
- dfcx: &'a mut DataFlowContext<'b, O>,
+struct PropagationContext<'a, 'b: 'a, 'tcx: 'b, O: 'a> {
+ dfcx: &'a mut DataFlowContext<'b, 'tcx, O>,
changed: bool
}
})
}
-impl<'a, O:DataFlowOperator> DataFlowContext<'a, O> {
+impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
fn has_bitset_for_nodeid(&self, n: ast::NodeId) -> bool {
assert!(n != ast::DUMMY_NODE_ID);
self.nodeid_to_index.contains_key(&n)
}
}
-impl<'a, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, O> {
+impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O> {
fn pre(&self,
ps: &mut pprust::State,
node: pprust::AnnNode) -> io::IoResult<()> {
}
}
-impl<'a, O:DataFlowOperator> DataFlowContext<'a, O> {
- pub fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
+ pub fn new(tcx: &'a ty::ctxt<'tcx>,
analysis_name: &'static str,
decl: Option<&ast::FnDecl>,
cfg: &cfg::CFG,
oper: O,
id_range: IdRange,
- bits_per_id: uint) -> DataFlowContext<'a, O> {
+ bits_per_id: uint) -> DataFlowContext<'a, 'tcx, O> {
let words_per_id = (bits_per_id + uint::BITS - 1) / uint::BITS;
let num_nodes = cfg.graph.all_nodes().len();
}
}
-impl<'a, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, O> {
-// ^^^^^^^^^^^^^ only needed for pretty printing
+impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
+// ^^^^^^^^^^^^^ only needed for pretty printing
pub fn propagate(&mut self, cfg: &cfg::CFG, blk: &ast::Block) {
//! Performs the data flow analysis.
}
}
-impl<'a, 'b, O:DataFlowOperator> PropagationContext<'a, 'b, O> {
+impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
fn walk_cfg(&mut self,
cfg: &cfg::CFG,
in_out: &mut [uint]) {
}
}
-struct MarkSymbolVisitor<'a> {
+struct MarkSymbolVisitor<'a, 'tcx: 'a> {
worklist: Vec<ast::NodeId>,
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
struct_has_extern_repr: bool
}
-impl<'a> MarkSymbolVisitor<'a> {
- fn new(tcx: &'a ty::ctxt,
- worklist: Vec<ast::NodeId>) -> MarkSymbolVisitor<'a> {
+impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
+ worklist: Vec<ast::NodeId>) -> MarkSymbolVisitor<'a, 'tcx> {
MarkSymbolVisitor {
worklist: worklist,
tcx: tcx,
}
}
-impl<'a> Visitor<MarkSymbolVisitorContext> for MarkSymbolVisitor<'a> {
+impl<'a, 'tcx> Visitor<MarkSymbolVisitorContext> for MarkSymbolVisitor<'a, 'tcx> {
fn visit_struct_def(&mut self, def: &ast::StructDef, _: ast::Ident, _: &ast::Generics,
_: ast::NodeId, ctxt: MarkSymbolVisitorContext) {
}
}
-struct DeadVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct DeadVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
-impl<'a> DeadVisitor<'a> {
+impl<'a, 'tcx> DeadVisitor<'a, 'tcx> {
fn should_warn_about_field(&mut self, node: &ast::StructField_) -> bool {
let is_named = node.ident().is_some();
let field_type = ty::node_id_to_type(self.tcx, node.id);
}
}
-impl<'a> Visitor<()> for DeadVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for DeadVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let ctor_id = get_struct_ctor_id(item);
if !self.symbol_is_live(item.id, ctor_id) && should_warn(item) {
}
}
-struct EffectCheckVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct EffectCheckVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
/// Whether we're in an unsafe context.
unsafe_context: UnsafeContext,
}
-impl<'a> EffectCheckVisitor<'a> {
+impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> {
fn require_unsafe(&mut self, span: Span, description: &str) {
match self.unsafe_context {
SafeContext => {
}
}
-impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for EffectCheckVisitor<'a, 'tcx> {
fn visit_fn(&mut self, fn_kind: &visit::FnKind, fn_decl: &ast::FnDecl,
block: &ast::Block, span: Span, _: ast::NodeId, _:()) {
)
)
-impl<'d,'t,TYPER:mc::Typer> ExprUseVisitor<'d,'t,TYPER> {
+impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,TYPER> {
pub fn new(delegate: &'d mut Delegate,
typer: &'t TYPER)
-> ExprUseVisitor<'d,'t,TYPER> {
}
}
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+ fn tcx(&self) -> &'t ty::ctxt<'tcx> {
self.typer.tcx()
}
}
}
-pub fn get_capture_mode<T:Typer>(tcx: &T, closure_expr_id: ast::NodeId)
- -> CaptureMode {
+pub fn get_capture_mode<'tcx, T:Typer<'tcx>>(tcx: &T, closure_expr_id: ast::NodeId)
+ -> CaptureMode {
tcx.capture_mode(closure_expr_id)
}
result
}
-struct IntrinsicCheckingVisitor<'a> {
- tcx: &'a ctxt,
+struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ctxt<'tcx>,
}
-impl<'a> IntrinsicCheckingVisitor<'a> {
+impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> {
fn def_id_is_transmute(&self, def_id: DefId) -> bool {
let intrinsic = match ty::get(ty::lookup_item_type(self.tcx, def_id).ty).sty {
ty::ty_bare_fn(ref bfty) => bfty.abi == RustIntrinsic,
}
}
-impl<'a> Visitor<()> for IntrinsicCheckingVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for IntrinsicCheckingVisitor<'a, 'tcx> {
fn visit_expr(&mut self, expr: &ast::Expr, (): ()) {
match expr.node {
ast::ExprPath(..) => {
// primitives in the stdlib are explicitly annotated to only take sendable
// types.
-pub struct Context<'a> {
- tcx: &'a ty::ctxt,
+pub struct Context<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
struct_and_enum_bounds_checked: HashSet<ty::t>,
parameter_environments: Vec<ParameterEnvironment>,
}
-impl<'a> Visitor<()> for Context<'a> {
+impl<'a, 'tcx> Visitor<()> for Context<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr, _: ()) {
check_expr(self, ex);
}
tcx.sess.abort_if_errors();
}
-struct EmptySubstsFolder<'a> {
- tcx: &'a ty::ctxt
+struct EmptySubstsFolder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>
}
-impl<'a> ty_fold::TypeFolder for EmptySubstsFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'a, 'tcx> ty_fold::TypeFolder<'tcx> for EmptySubstsFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.tcx
}
fn fold_substs(&mut self, _: &subst::Substs) -> subst::Substs {
fn check_item(cx: &mut Context, item: &Item) {
if !attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
- ItemImpl(_, Some(ref trait_ref), ref self_type, _) => {
- check_impl_of_trait(cx, item, trait_ref, &**self_type);
-
+ ItemImpl(_, ref trait_ref, ref self_type, _) => {
let parameter_environment =
ParameterEnvironment::for_item(cx.tcx, item.id);
cx.parameter_environments.push(parameter_environment);
item.span,
ty::node_id_to_type(cx.tcx, item.id));
- // Check bounds on the trait ref.
- match ty::impl_trait_ref(cx.tcx,
- ast_util::local_def(item.id)) {
- None => {}
- Some(trait_ref) => {
- check_bounds_on_structs_or_enums_in_trait_ref(
- cx,
- item.span,
- &*trait_ref);
+ match trait_ref {
+ &Some(ref trait_ref) => {
+ check_impl_of_trait(cx, item, trait_ref, &**self_type);
+
+ // Check bounds on the trait ref.
+ match ty::impl_trait_ref(cx.tcx,
+ ast_util::local_def(item.id)) {
+ None => {}
+ Some(trait_ref) => {
+ check_bounds_on_structs_or_enums_in_trait_ref(
+ cx,
+ item.span,
+ &*trait_ref);
+
+ let trait_def = ty::lookup_trait_def(cx.tcx, trait_ref.def_id);
+ for (ty, type_param_def) in trait_ref.substs.types
+ .iter()
+ .zip(trait_def.generics
+ .types
+ .iter()) {
+ check_typaram_bounds(cx, item.span, *ty, type_param_def);
+ }
+ }
+ }
}
+ &None => {}
}
drop(cx.parameter_environments.pop());
NoSendItem, "no_send_bound", no_send_bound;
NoCopyItem, "no_copy_bound", no_copy_bound;
- NoSyncItem, "no_share_bound", no_share_bound;
+ NoSyncItem, "no_sync_bound", no_sync_bound;
ManagedItem, "managed_bound", managed_bound;
IteratorItem, "iterator", iterator;
}
}
-impl<'a> Visitor<()> for IrMaps<'a> {
+impl<'a, 'tcx> Visitor<()> for IrMaps<'a, 'tcx> {
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl, b: &Block, s: Span, n: NodeId, _: ()) {
visit_fn(self, fk, fd, b, s, n);
}
ImplicitRet
}
-struct IrMaps<'a> {
- tcx: &'a ty::ctxt,
+struct IrMaps<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
num_live_nodes: uint,
num_vars: uint,
lnks: Vec<LiveNodeKind>,
}
-impl<'a> IrMaps<'a> {
- fn new(tcx: &'a ty::ctxt) -> IrMaps<'a> {
+impl<'a, 'tcx> IrMaps<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>) -> IrMaps<'a, 'tcx> {
IrMaps {
tcx: tcx,
num_live_nodes: 0,
}
}
-impl<'a> Visitor<()> for Liveness<'a> {
+impl<'a, 'tcx> Visitor<()> for Liveness<'a, 'tcx> {
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl, b: &Block, s: Span, n: NodeId, _: ()) {
check_fn(self, fk, fd, b, s, n);
}
static ACC_WRITE: uint = 2u;
static ACC_USE: uint = 4u;
-struct Liveness<'a> {
- ir: &'a mut IrMaps<'a>,
+struct Liveness<'a, 'tcx: 'a> {
+ ir: &'a mut IrMaps<'a, 'tcx>,
s: Specials,
successors: Vec<LiveNode>,
users: Vec<Users>,
cont_ln: NodeMap<LiveNode>
}
-impl<'a> Liveness<'a> {
- fn new(ir: &'a mut IrMaps<'a>, specials: Specials) -> Liveness<'a> {
+impl<'a, 'tcx> Liveness<'a, 'tcx> {
+ fn new(ir: &'a mut IrMaps<'a, 'tcx>, specials: Specials) -> Liveness<'a, 'tcx> {
let num_live_nodes = ir.num_live_nodes;
let num_vars = ir.num_vars;
Liveness {
fn pat_bindings(&mut self,
pat: &Pat,
- f: |&mut Liveness<'a>, LiveNode, Variable, Span, NodeId|) {
+ f: |&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId|) {
pat_util::pat_bindings(&self.ir.tcx.def_map, pat, |_bm, p_id, sp, _n| {
let ln = self.live_node(p_id, sp);
let var = self.variable(p_id, sp);
fn arm_pats_bindings(&mut self,
pats: &[Gc<Pat>],
- f: |&mut Liveness<'a>, LiveNode, Variable, Span, NodeId|) {
+ f: |&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId|) {
// only consider the first pattern; any later patterns must have
// the same bindings, and we also consider the first pattern to be
// the "authoritative" set of ids
fn indices2(&mut self,
ln: LiveNode,
succ_ln: LiveNode,
- op: |&mut Liveness<'a>, uint, uint|) {
+ op: |&mut Liveness<'a, 'tcx>, uint, uint|) {
let node_base_idx = self.idx(ln, Variable(0u));
let succ_base_idx = self.idx(succ_ln, Variable(0u));
for var_idx in range(0u, self.ir.num_vars) {
loop_node_id: NodeId,
break_ln: LiveNode,
cont_ln: LiveNode,
- f: |&mut Liveness<'a>| -> R)
+ f: |&mut Liveness<'a, 'tcx>| -> R)
-> R {
debug!("with_loop_nodes: {} {}", loop_node_id, break_ln.get());
self.loop_scope.push(loop_node_id);
// do not check contents of nested fns
}
-impl<'a> Liveness<'a> {
+impl<'a, 'tcx> Liveness<'a, 'tcx> {
fn check_ret(&self,
id: NodeId,
sp: Span,
// like `*x`, the type of this deref node is the deref'd type (`T`),
// but in a pattern like `@x`, the `@x` pattern is again a
// dereference, but its type is the type *before* the dereference
-// (`@T`). So use `cmt.type` to find the type of the value in a consistent
+// (`@T`). So use `cmt.ty` to find the type of the value in a consistent
// fashion. For more details, see the method `cat_pattern`
#[deriving(Clone, PartialEq)]
pub struct cmt_ {
* know that no errors have occurred, so we simply consult the tcx and we
* can be sure that only `Ok` results will occur.
*/
-pub trait Typer {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt;
+pub trait Typer<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn node_ty(&self, id: ast::NodeId) -> McResult<ty::t>;
fn node_method_ty(&self, method_call: typeck::MethodCall) -> Option<ty::t>;
fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment>>;
)
)
-impl<'t,TYPER:Typer> MemCategorizationContext<'t,TYPER> {
+impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
pub fn new(typer: &'t TYPER) -> MemCategorizationContext<'t,TYPER> {
MemCategorizationContext { typer: typer }
}
- fn tcx(&self) -> &'t ty::ctxt {
+ fn tcx(&self) -> &'t ty::ctxt<'tcx> {
self.typer.tcx()
}
/// The embargo visitor, used to determine the exports of the ast
////////////////////////////////////////////////////////////////////////////////
-struct EmbargoVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct EmbargoVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
exp_map2: &'a resolve::ExportMap2,
// This flag is an indicator of whether the previous item in the
prev_public: bool,
}
-impl<'a> EmbargoVisitor<'a> {
+impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> {
// There are checks inside of privacy which depend on knowing whether a
// trait should be exported or not. The two current consumers of this are:
//
}
}
-impl<'a> Visitor<()> for EmbargoVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for EmbargoVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let orig_all_pub = self.prev_public;
self.prev_public = orig_all_pub && item.vis == ast::Public;
/// The privacy visitor, where privacy checks take place (violations reported)
////////////////////////////////////////////////////////////////////////////////
-struct PrivacyVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct PrivacyVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
curitem: ast::NodeId,
in_foreign: bool,
parents: NodeMap<ast::NodeId>,
NamedField(ast::Ident),
}
-impl<'a> PrivacyVisitor<'a> {
+impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> {
// used when debugging
fn nodestr(&self, id: ast::NodeId) -> String {
self.tcx.map.node_to_string(id).to_string()
}
}
-impl<'a> Visitor<()> for PrivacyVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for PrivacyVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let orig_curitem = replace(&mut self.curitem, item.id);
visit::walk_item(self, item, ());
/// The privacy sanity check visitor, ensures unnecessary visibility isn't here
////////////////////////////////////////////////////////////////////////////////
-struct SanePrivacyVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct SanePrivacyVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
in_fn: bool,
}
-impl<'a> Visitor<()> for SanePrivacyVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for SanePrivacyVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
if self.in_fn {
self.check_all_inherited(item);
}
}
-impl<'a> SanePrivacyVisitor<'a> {
+impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> {
/// Validates all of the visibility qualifiers placed on the item given. This
/// ensures that there are no extraneous qualifiers that don't actually do
/// anything. In theory these qualifiers wouldn't parse, but that may happen
}
}
-struct VisiblePrivateTypesVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct VisiblePrivateTypesVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
exported_items: &'a ExportedItems,
public_items: &'a PublicItems,
}
-struct CheckTypeForPrivatenessVisitor<'a, 'b:'a> {
- inner: &'a VisiblePrivateTypesVisitor<'b>,
+struct CheckTypeForPrivatenessVisitor<'a, 'b: 'a, 'tcx: 'b> {
+ inner: &'a VisiblePrivateTypesVisitor<'b, 'tcx>,
/// whether the type refers to private types.
contains_private: bool,
/// whether we've recurred at all (i.e. if we're pointing at the
outer_type_is_public_path: bool,
}
-impl<'a> VisiblePrivateTypesVisitor<'a> {
+impl<'a, 'tcx> VisiblePrivateTypesVisitor<'a, 'tcx> {
fn path_is_private_type(&self, path_id: ast::NodeId) -> bool {
let did = match self.tcx.def_map.borrow().find_copy(&path_id) {
// `int` etc. (None doesn't seem to occur.)
}
}
-impl<'a, 'b> Visitor<()> for CheckTypeForPrivatenessVisitor<'a, 'b> {
+impl<'a, 'b, 'tcx> Visitor<()> for CheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty, _: ()) {
match ty.node {
ast::TyPath(_, _, path_id) => {
fn visit_expr(&mut self, _: &ast::Expr, _: ()) {}
}
-impl<'a> Visitor<()> for VisiblePrivateTypesVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for VisiblePrivateTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
match item.node {
// contents of a private mod can be reexported, so we need
}
// Information needed while computing reachability.
-struct ReachableContext<'a> {
+struct ReachableContext<'a, 'tcx: 'a> {
// The type context.
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
// The set of items which must be exported in the linkage sense.
reachable_symbols: NodeSet,
// A worklist of item IDs. Each item ID in this worklist will be inlined
any_library: bool,
}
-impl<'a> Visitor<()> for ReachableContext<'a> {
+impl<'a, 'tcx> Visitor<()> for ReachableContext<'a, 'tcx> {
fn visit_expr(&mut self, expr: &ast::Expr, _: ()) {
}
}
-impl<'a> ReachableContext<'a> {
+impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
// Creates a new reachability computation context.
- fn new(tcx: &'a ty::ctxt) -> ReachableContext<'a> {
+ fn new(tcx: &'a ty::ctxt<'tcx>) -> ReachableContext<'a, 'tcx> {
let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| {
*ty != config::CrateTypeExecutable
});
span.expn_info.is_some() || span == DUMMY_SP
}
-struct DxrVisitor<'l> {
+struct DxrVisitor<'l, 'tcx: 'l> {
sess: &'l Session,
- analysis: &'l CrateAnalysis,
+ analysis: &'l CrateAnalysis<'tcx>,
collected_paths: Vec<(NodeId, ast::Path, bool, recorder::Row)>,
collecting: bool,
fmt: FmtStrs<'l>,
}
-impl <'l> DxrVisitor<'l> {
+impl <'l, 'tcx> DxrVisitor<'l, 'tcx> {
fn dump_crate_info(&mut self, name: &str, krate: &ast::Crate) {
// the current crate
self.fmt.crate_str(krate.span, name);
}
}
-impl<'l> Visitor<DxrVisitorEnv> for DxrVisitor<'l> {
+impl<'l, 'tcx> Visitor<DxrVisitorEnv> for DxrVisitor<'l, 'tcx> {
fn visit_item(&mut self, item:&ast::Item, e: DxrVisitorEnv) {
if generated_code(item.span) {
return
///////////////////////////////////////////////////////////////////////////
// The actual substitution engine itself is a type folder.
-struct SubstFolder<'a> {
- tcx: &'a ty::ctxt,
+struct SubstFolder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
substs: &'a Substs,
// The location for which the substitution is performed, if available.
ty_stack_depth: uint,
}
-impl<'a> TypeFolder for SubstFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
// Note: This routine only handles regions that are bound on
use syntax::codemap::Span;
use syntax::fold::Folder;
-struct ConstantExpr<'a>(&'a ty::ctxt, Gc<ast::Expr>);
+struct ConstantExpr<'a, 'tcx: 'a>(&'a ty::ctxt<'tcx>, Gc<ast::Expr>);
-impl<'a> Eq for ConstantExpr<'a> {
+impl<'a, 'tcx> Eq for ConstantExpr<'a, 'tcx> {
fn assert_receiver_is_total_eq(&self) {}
}
-impl<'a> PartialEq for ConstantExpr<'a> {
- fn eq(&self, other: &ConstantExpr<'a>) -> bool {
+impl<'a, 'tcx> PartialEq for ConstantExpr<'a, 'tcx> {
+ fn eq(&self, other: &ConstantExpr<'a, 'tcx>) -> bool {
let &ConstantExpr(tcx, expr) = self;
let &ConstantExpr(_, other_expr) = other;
match const_eval::compare_lit_exprs(tcx, &*expr, &*other_expr) {
// An option identifying a branch (either a literal, an enum variant or a range)
#[deriving(Eq, PartialEq)]
-enum Opt<'a> {
- ConstantValue(ConstantExpr<'a>),
- ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>),
+enum Opt<'blk, 'tcx: 'blk> {
+ ConstantValue(ConstantExpr<'blk, 'tcx>),
+ ConstantRange(ConstantExpr<'blk, 'tcx>, ConstantExpr<'blk, 'tcx>),
Variant(ty::Disr, Rc<adt::Repr>, ast::DefId),
SliceLengthEqual(uint),
SliceLengthGreaterOrEqual(/* prefix length */ uint, /* suffix length */ uint),
}
-impl<'a> Opt<'a> {
- fn trans(&self, mut bcx: &'a Block<'a>) -> OptResult<'a> {
+impl<'blk, 'tcx> Opt<'blk, 'tcx> {
+ fn trans(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> {
let _icx = push_ctxt("match::trans_opt");
let ccx = bcx.ccx();
match *self {
CompareSliceLength
}
-pub enum OptResult<'a> {
- SingleResult(Result<'a>),
- RangeResult(Result<'a>, Result<'a>),
- LowerBound(Result<'a>)
+pub enum OptResult<'blk, 'tcx: 'blk> {
+ SingleResult(Result<'blk, 'tcx>),
+ RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>),
+ LowerBound(Result<'blk, 'tcx>)
}
#[deriving(Clone)]
type BindingsMap = HashMap<Ident, BindingInfo>;
-struct ArmData<'a, 'b> {
- bodycx: &'b Block<'b>,
+struct ArmData<'a, 'blk, 'tcx: 'blk> {
+ bodycx: Block<'blk, 'tcx>,
arm: &'a ast::Arm,
bindings_map: BindingsMap
}
* As we proceed `bound_ptrs` are filled with pointers to values to be bound,
* these pointers are stored in llmatch variables just before executing `data` arm.
*/
-struct Match<'a, 'b:'a> {
+struct Match<'a, 'blk: 'a, 'tcx: 'blk> {
pats: Vec<Gc<ast::Pat>>,
- data: &'a ArmData<'a, 'b>,
+ data: &'a ArmData<'a, 'blk, 'tcx>,
bound_ptrs: Vec<(Ident, ValueRef)>
}
-impl<'a, 'b> Repr for Match<'a, 'b> {
+impl<'a, 'blk, 'tcx> Repr for Match<'a, 'blk, 'tcx> {
fn repr(&self, tcx: &ty::ctxt) -> String {
if tcx.sess.verbose() {
// for many programs, this just take too long to serialize
return false;
}
-fn expand_nested_bindings<'a, 'b>(
- bcx: &'b Block<'b>,
- m: &'a [Match<'a, 'b>],
- col: uint,
- val: ValueRef)
- -> Vec<Match<'a, 'b>> {
+fn expand_nested_bindings<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ col: uint,
+ val: ValueRef)
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("expand_nested_bindings(bcx={}, m={}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
type EnterPatterns<'a> = |&[Gc<ast::Pat>]|: 'a -> Option<Vec<Gc<ast::Pat>>>;
-fn enter_match<'a, 'b>(
- bcx: &'b Block<'b>,
- dm: &DefMap,
- m: &'a [Match<'a, 'b>],
- col: uint,
- val: ValueRef,
- e: EnterPatterns)
- -> Vec<Match<'a, 'b>> {
+fn enter_match<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ dm: &DefMap,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ col: uint,
+ val: ValueRef,
+ e: EnterPatterns)
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("enter_match(bcx={}, m={}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
}).collect()
}
-fn enter_default<'a, 'b>(
- bcx: &'b Block<'b>,
- dm: &DefMap,
- m: &'a [Match<'a, 'b>],
- col: uint,
- val: ValueRef)
- -> Vec<Match<'a, 'b>> {
+fn enter_default<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ dm: &DefMap,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ col: uint,
+ val: ValueRef)
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("enter_default(bcx={}, m={}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
/// takes the complete row of patterns rather than just the first one.
/// Also, most of the enter_() family functions have been unified with
/// the check_match specialization step.
-fn enter_opt<'a, 'b>(
- bcx: &'b Block<'b>,
+fn enter_opt<'a, 'blk, 'tcx>(
+ bcx: Block<'blk, 'tcx>,
_: ast::NodeId,
dm: &DefMap,
- m: &'a [Match<'a, 'b>],
+ m: &'a [Match<'a, 'blk, 'tcx>],
opt: &Opt,
col: uint,
variant_size: uint,
val: ValueRef)
- -> Vec<Match<'a, 'b>> {
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("enter_opt(bcx={}, m={}, opt={:?}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
// Returns the options in one column of matches. An option is something that
// needs to be conditionally matched at runtime; for example, the discriminant
// on a set of enum variants or a literal.
-fn get_branches<'a>(bcx: &'a Block, m: &[Match], col: uint) -> Vec<Opt<'a>> {
+fn get_branches<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ m: &[Match], col: uint)
+ -> Vec<Opt<'blk, 'tcx>> {
let ccx = bcx.ccx();
- fn add_to_set<'a>(set: &mut Vec<Opt<'a>>, opt: Opt<'a>) {
+ fn add_to_set<'blk, 'tcx>(set: &mut Vec<Opt<'blk, 'tcx>>, opt: Opt<'blk, 'tcx>) {
if !set.contains(&opt) {
set.push(opt);
}
}
ast::PatIdent(..) | ast::PatEnum(..) | ast::PatStruct(..) => {
// This is either an enum variant or a variable binding.
- let opt_def = ccx.tcx.def_map.borrow().find_copy(&cur.id);
+ let opt_def = ccx.tcx().def_map.borrow().find_copy(&cur.id);
match opt_def {
Some(def::DefVariant(enum_id, var_id, _)) => {
let variant = ty::enum_variant_with_id(ccx.tcx(), enum_id, var_id);
found
}
-struct ExtractedBlock<'a> {
- vals: Vec<ValueRef> ,
- bcx: &'a Block<'a>,
+struct ExtractedBlock<'blk, 'tcx: 'blk> {
+ vals: Vec<ValueRef>,
+ bcx: Block<'blk, 'tcx>,
}
-fn extract_variant_args<'a>(
- bcx: &'a Block<'a>,
- repr: &adt::Repr,
- disr_val: ty::Disr,
- val: ValueRef)
- -> ExtractedBlock<'a> {
+fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ repr: &adt::Repr,
+ disr_val: ty::Disr,
+ val: ValueRef)
+ -> ExtractedBlock<'blk, 'tcx> {
let _icx = push_ctxt("match::extract_variant_args");
let args = Vec::from_fn(adt::num_args(repr, disr_val), |i| {
adt::trans_field_ptr(bcx, repr, val, disr_val, i)
Datum::new(val, left_ty, Lvalue)
}
-fn bind_subslice_pat<'a>(
- bcx: &'a Block<'a>,
- pat_id: ast::NodeId,
- val: ValueRef,
- offset_left: uint,
- offset_right: uint) -> ValueRef {
+fn bind_subslice_pat(bcx: Block,
+ pat_id: ast::NodeId,
+ val: ValueRef,
+ offset_left: uint,
+ offset_right: uint) -> ValueRef {
let _icx = push_ctxt("match::bind_subslice_pat");
let vec_ty = node_id_type(bcx, pat_id);
let vt = tvec::vec_types(bcx, ty::sequence_element_type(bcx.tcx(), ty::type_content(vec_ty)));
scratch.val
}
-fn extract_vec_elems<'a>(
- bcx: &'a Block<'a>,
- left_ty: ty::t,
- before: uint,
- after: uint,
- val: ValueRef)
- -> ExtractedBlock<'a> {
+fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ left_ty: ty::t,
+ before: uint,
+ after: uint,
+ val: ValueRef)
+ -> ExtractedBlock<'blk, 'tcx> {
let _icx = push_ctxt("match::extract_vec_elems");
let vec_datum = match_datum(val, left_ty);
let (base, len) = vec_datum.get_vec_base_and_len(bcx);
}
/// What to do when the pattern match fails.
-enum FailureHandler<'a> {
+enum FailureHandler {
Infallible,
JumpToBasicBlock(BasicBlockRef),
Unreachable
}
-impl<'a> FailureHandler<'a> {
+impl FailureHandler {
fn is_fallible(&self) -> bool {
match *self {
Infallible => false,
!self.is_fallible()
}
- fn handle_fail(&self, bcx: &Block) {
+ fn handle_fail(&self, bcx: Block) {
match *self {
Infallible =>
fail!("attempted to fail in an infallible failure handler!"),
}
// Compiles a comparison between two things.
-fn compare_values<'a>(
- cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: ty::t)
- -> Result<'a> {
- fn compare_str<'a>(cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: ty::t)
- -> Result<'a> {
+fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ rhs_t: ty::t)
+ -> Result<'blk, 'tcx> {
+ fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ rhs_t: ty::t)
+ -> Result<'blk, 'tcx> {
let did = langcall(cx,
None,
format!("comparison of `{}`",
}
}
-fn insert_lllocals<'a>(mut bcx: &'a Block<'a>, bindings_map: &BindingsMap,
- cs: Option<cleanup::ScopeId>)
- -> &'a Block<'a> {
+fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ bindings_map: &BindingsMap,
+ cs: Option<cleanup::ScopeId>)
+ -> Block<'blk, 'tcx> {
/*!
* For each binding in `data.bindings_map`, adds an appropriate entry into
* the `fcx.lllocals` map
bcx
}
-fn compile_guard<'a, 'b>(
- bcx: &'b Block<'b>,
- guard_expr: &ast::Expr,
- data: &ArmData,
- m: &'a [Match<'a, 'b>],
- vals: &[ValueRef],
- chk: &FailureHandler,
- has_genuine_default: bool)
- -> &'b Block<'b> {
+fn compile_guard<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ guard_expr: &ast::Expr,
+ data: &ArmData,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ vals: &[ValueRef],
+ chk: &FailureHandler,
+ has_genuine_default: bool)
+ -> Block<'blk, 'tcx> {
debug!("compile_guard(bcx={}, guard_expr={}, m={}, vals={})",
bcx.to_str(),
bcx.expr_to_string(guard_expr),
})
}
-fn compile_submatch<'a, 'b>(
- bcx: &'b Block<'b>,
- m: &'a [Match<'a, 'b>],
- vals: &[ValueRef],
- chk: &FailureHandler,
- has_genuine_default: bool) {
+fn compile_submatch<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ vals: &[ValueRef],
+ chk: &FailureHandler,
+ has_genuine_default: bool) {
debug!("compile_submatch(bcx={}, m={}, vals={})",
bcx.to_str(),
m.repr(bcx.tcx()),
}
}
-fn compile_submatch_continue<'a, 'b>(
- mut bcx: &'b Block<'b>,
- m: &'a [Match<'a, 'b>],
- vals: &[ValueRef],
- chk: &FailureHandler,
- col: uint,
- val: ValueRef,
- has_genuine_default: bool) {
+fn compile_submatch_continue<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ vals: &[ValueRef],
+ chk: &FailureHandler,
+ col: uint,
+ val: ValueRef,
+ has_genuine_default: bool) {
let fcx = bcx.fcx;
let tcx = bcx.tcx();
let dm = &tcx.def_map;
}
}
-pub fn trans_match<'a>(
- bcx: &'a Block<'a>,
- match_expr: &ast::Expr,
- discr_expr: &ast::Expr,
- arms: &[ast::Arm],
- dest: Dest)
- -> &'a Block<'a> {
+pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ match_expr: &ast::Expr,
+ discr_expr: &ast::Expr,
+ arms: &[ast::Arm],
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("match::trans_match");
trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest)
}
/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body`
-fn is_discr_reassigned(bcx: &Block, discr: &ast::Expr, body: &ast::Expr) -> bool {
+fn is_discr_reassigned(bcx: Block, discr: &ast::Expr, body: &ast::Expr) -> bool {
match discr.node {
ast::ExprPath(..) => match bcx.def(discr.id) {
def::DefArg(vid, _) | def::DefBinding(vid, _) |
}
}
-fn create_bindings_map(bcx: &Block, pat: Gc<ast::Pat>,
+fn create_bindings_map(bcx: Block, pat: Gc<ast::Pat>,
discr: &ast::Expr, body: &ast::Expr) -> BindingsMap {
// Create the bindings map, which is a mapping from each binding name
// to an alloca() that will be the value for that local variable.
return bindings_map;
}
-fn trans_match_inner<'a>(scope_cx: &'a Block<'a>,
- match_id: ast::NodeId,
- discr_expr: &ast::Expr,
- arms: &[ast::Arm],
- dest: Dest) -> &'a Block<'a> {
+fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>,
+ match_id: ast::NodeId,
+ discr_expr: &ast::Expr,
+ arms: &[ast::Arm],
+ dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("match::trans_match_inner");
let fcx = scope_cx.fcx;
let mut bcx = scope_cx;
BindArgument
}
-pub fn store_local<'a>(bcx: &'a Block<'a>,
- local: &ast::Local)
- -> &'a Block<'a> {
+pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ local: &ast::Local)
+ -> Block<'blk, 'tcx> {
/*!
* Generates code for a local variable declaration like
* `let <pat>;` or `let <pat> = <opt_init_expr>`.
}
};
- fn create_dummy_locals<'a>(mut bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>)
- -> &'a Block<'a> {
+ fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>)
+ -> Block<'blk, 'tcx> {
// create dummy memory for the variables if we have no
// value to store into them immediately
let tcx = bcx.tcx();
}
}
-pub fn store_arg<'a>(mut bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>,
- arg: Datum<Rvalue>,
- arg_scope: cleanup::ScopeId)
- -> &'a Block<'a> {
+pub fn store_arg<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>,
+ arg: Datum<Rvalue>,
+ arg_scope: cleanup::ScopeId)
+ -> Block<'blk, 'tcx> {
/*!
* Generates code for argument patterns like `fn foo(<pat>: T)`.
* Creates entries in the `llargs` map for each of the bindings
/// Generates code for the pattern binding in a `for` loop like
/// `for <pat> in <expr> { ... }`.
-pub fn store_for_loop_binding<'a>(
- bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>,
- llvalue: ValueRef,
- body_scope: cleanup::ScopeId)
- -> &'a Block<'a> {
+pub fn store_for_loop_binding<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>,
+ llvalue: ValueRef,
+ body_scope: cleanup::ScopeId)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("match::store_for_loop_binding");
if simple_identifier(&*pat).is_some() {
bind_irrefutable_pat(bcx, pat, llvalue, BindLocal, body_scope)
}
-fn mk_binding_alloca<'a,A>(bcx: &'a Block<'a>,
- p_id: ast::NodeId,
- ident: &ast::Ident,
- binding_mode: IrrefutablePatternBindingMode,
- cleanup_scope: cleanup::ScopeId,
- arg: A,
- populate: |A, &'a Block<'a>, ValueRef, ty::t| -> &'a Block<'a>)
- -> &'a Block<'a> {
+fn mk_binding_alloca<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
+ p_id: ast::NodeId,
+ ident: &ast::Ident,
+ binding_mode: IrrefutablePatternBindingMode,
+ cleanup_scope: cleanup::ScopeId,
+ arg: A,
+ populate: |A, Block<'blk, 'tcx>, ValueRef, ty::t|
+ -> Block<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let var_ty = node_id_type(bcx, p_id);
// Allocate memory on stack for the binding.
bcx
}
-fn bind_irrefutable_pat<'a>(
- bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>,
- val: ValueRef,
- binding_mode: IrrefutablePatternBindingMode,
- cleanup_scope: cleanup::ScopeId)
- -> &'a Block<'a> {
+fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>,
+ val: ValueRef,
+ binding_mode: IrrefutablePatternBindingMode,
+ cleanup_scope: cleanup::ScopeId)
+ -> Block<'blk, 'tcx> {
/*!
* A simple version of the pattern matching code that only handles
* irrefutable patterns. This is used in let/argument patterns,
* these, for places in trans where the `ty::t` isn't directly
* available.
*/
-pub fn represent_node(bcx: &Block, node: ast::NodeId) -> Rc<Repr> {
+pub fn represent_node(bcx: Block, node: ast::NodeId) -> Rc<Repr> {
represent_type(bcx.ccx(), node_id_type(bcx, node))
}
/// Decides how to represent a given type.
pub fn represent_type(cx: &CrateContext, t: ty::t) -> Rc<Repr> {
debug!("Representing: {}", ty_to_string(cx.tcx(), t));
- match cx.adt_reprs.borrow().find(&t) {
+ match cx.adt_reprs().borrow().find(&t) {
Some(repr) => return repr.clone(),
None => {}
}
let repr = Rc::new(represent_type_uncached(cx, t));
debug!("Represented as: {:?}", repr)
- cx.adt_reprs.borrow_mut().insert(t, repr.clone());
+ cx.adt_reprs().borrow_mut().insert(t, repr.clone());
repr
}
attempts = choose_shortest;
},
attr::ReprPacked => {
- cx.tcx.sess.bug("range_to_inttype: found ReprPacked on an enum");
+ cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum");
}
}
for &ity in attempts.iter() {
*
* This should ideally be less tightly tied to `_match`.
*/
-pub fn trans_switch(bcx: &Block, r: &Repr, scrutinee: ValueRef)
+pub fn trans_switch(bcx: Block, r: &Repr, scrutinee: ValueRef)
-> (_match::BranchKind, Option<ValueRef>) {
match *r {
CEnum(..) | General(..) |
/// Obtain the actual discriminant of a value.
-pub fn trans_get_discr(bcx: &Block, r: &Repr, scrutinee: ValueRef, cast_to: Option<Type>)
+pub fn trans_get_discr(bcx: Block, r: &Repr, scrutinee: ValueRef, cast_to: Option<Type>)
-> ValueRef {
let signed;
let val;
}
}
-fn struct_wrapped_nullable_bitdiscr(bcx: &Block, nndiscr: Disr, ptrfield: PointerField,
+fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, ptrfield: PointerField,
scrutinee: ValueRef) -> ValueRef {
let llptrptr = match ptrfield {
ThinPointer(field) => GEPi(bcx, scrutinee, [0, field]),
}
/// Helper for cases where the discriminant is simply loaded.
-fn load_discr(bcx: &Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
+fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
-> ValueRef {
let llty = ll_inttype(bcx.ccx(), ity);
assert_eq!(val_ty(ptr), llty.ptr_to());
*
* This should ideally be less tightly tied to `_match`.
*/
-pub fn trans_case<'a>(bcx: &'a Block<'a>, r: &Repr, discr: Disr)
- -> _match::OptResult<'a> {
+pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
+ -> _match::OptResult<'blk, 'tcx> {
match *r {
CEnum(ity, _, _) => {
_match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
* Set the discriminant for a new value of the given case of the given
* representation.
*/
-pub fn trans_set_discr(bcx: &Block, r: &Repr, val: ValueRef, discr: Disr) {
+pub fn trans_set_discr(bcx: Block, r: &Repr, val: ValueRef, discr: Disr) {
match *r {
CEnum(ity, min, max) => {
assert_discr_in_range(ity, min, max, discr);
}
/// Access a field, at a point when the value's case is known.
-pub fn trans_field_ptr(bcx: &Block, r: &Repr, val: ValueRef, discr: Disr,
+pub fn trans_field_ptr(bcx: Block, r: &Repr, val: ValueRef, discr: Disr,
ix: uint) -> ValueRef {
// Note: if this ever needs to generate conditionals (e.g., if we
// decide to do some kind of cdr-coding-like non-unique repr
}
}
-pub fn struct_field_ptr(bcx: &Block, st: &Struct, val: ValueRef,
+pub fn struct_field_ptr(bcx: Block, st: &Struct, val: ValueRef,
ix: uint, needs_cast: bool) -> ValueRef {
let val = if needs_cast {
let ccx = bcx.ccx();
GEPi(bcx, val, [0, ix])
}
-pub fn fold_variants<'r, 'b>(
- bcx: &'b Block<'b>, r: &Repr, value: ValueRef,
- f: |&'b Block<'b>, &Struct, ValueRef|: 'r -> &'b Block<'b>
-) -> &'b Block<'b> {
+pub fn fold_variants<'blk, 'tcx>(
+ bcx: Block<'blk, 'tcx>, r: &Repr, value: ValueRef,
+ f: |Block<'blk, 'tcx>, &Struct, ValueRef| -> Block<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
match *r {
Univariant(ref st, _) => {
}
/// Access the struct drop flag, if present.
-pub fn trans_drop_flag_ptr<'b>(mut bcx: &'b Block<'b>, r: &Repr,
- val: ValueRef) -> datum::DatumBlock<'b, datum::Expr> {
+pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr, val: ValueRef)
+ -> datum::DatumBlock<'blk, 'tcx, datum::Expr> {
let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), ty::mk_bool());
match *r {
Univariant(ref st, true) => {
use syntax::ast;
// Take an inline assembly expression and splat it out via LLVM
-pub fn trans_inline_asm<'a>(bcx: &'a Block<'a>, ia: &ast::InlineAsm)
- -> &'a Block<'a> {
+pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
+ -> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
use back::{link, abi};
use driver::config;
use driver::config::{NoDebugInfo, FullDebugInfo};
-use driver::driver::{CrateAnalysis, CrateTranslation};
+use driver::driver::{CrateAnalysis, CrateTranslation, ModuleTranslation};
use driver::session::Session;
use lint;
use llvm::{BasicBlockRef, ModuleRef, ValueRef, Vector, get_param};
use middle::trans::callee;
use middle::trans::cleanup::{CleanupMethods, ScopeId};
use middle::trans::cleanup;
-use middle::trans::common::{Block, C_bool, C_bytes, C_i32, C_integral, C_nil};
-use middle::trans::common::{C_null, C_struct, C_u64, C_u8, C_uint, C_undef};
+use middle::trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_integral, C_nil};
+use middle::trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_uint, C_undef};
use middle::trans::common::{CrateContext, ExternMap, FunctionContext};
use middle::trans::common::{NodeInfo, Result, SubstP, monomorphize_type};
use middle::trans::common::{node_id_type, param_substs, return_type_is_void};
use middle::trans::common::{type_is_zero_size, val_ty};
use middle::trans::common;
use middle::trans::consts;
+use middle::trans::context::SharedCrateContext;
use middle::trans::controlflow;
use middle::trans::datum;
use middle::trans::debuginfo;
use middle::trans::inline;
use middle::trans::intrinsic;
use middle::trans::machine;
-use middle::trans::machine::{llsize_of, llsize_of_real};
+use middle::trans::machine::{llsize_of, llsize_of_real, llalign_of_min};
use middle::trans::meth;
use middle::trans::monomorphize;
use middle::trans::tvec;
use libc::{c_uint, uint64_t};
use std::c_str::ToCStr;
use std::cell::{Cell, RefCell};
+use std::collections::HashSet;
use std::rc::Rc;
use std::{i8, i16, i32, i64};
use syntax::abi::{X86, X86_64, Arm, Mips, Mipsel, Rust, RustCall};
_InsnCtxt { _cannot_construct_outside_of_this_module: () }
}
-pub struct StatRecorder<'a> {
- ccx: &'a CrateContext,
+pub struct StatRecorder<'a, 'tcx: 'a> {
+ ccx: &'a CrateContext<'a, 'tcx>,
name: Option<String>,
start: u64,
istart: uint,
}
-impl<'a> StatRecorder<'a> {
- pub fn new(ccx: &'a CrateContext, name: String) -> StatRecorder<'a> {
+impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
+ pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String)
+ -> StatRecorder<'a, 'tcx> {
let start = if ccx.sess().trans_stats() {
time::precise_time_ns()
} else {
0
};
- let istart = ccx.stats.n_llvm_insns.get();
+ let istart = ccx.stats().n_llvm_insns.get();
StatRecorder {
ccx: ccx,
name: Some(name),
}
#[unsafe_destructor]
-impl<'a> Drop for StatRecorder<'a> {
+impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
fn drop(&mut self) {
if self.ccx.sess().trans_stats() {
let end = time::precise_time_ns();
let elapsed = ((end - self.start) / 1_000_000) as uint;
- let iend = self.ccx.stats.n_llvm_insns.get();
- self.ccx.stats.fn_stats.borrow_mut().push((self.name.take().unwrap(),
+ let iend = self.ccx.stats().n_llvm_insns.get();
+ self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
elapsed,
iend - self.istart));
- self.ccx.stats.n_fns.set(self.ccx.stats.n_fns.get() + 1);
+ self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
// Reset LLVM insn count to avoid compound costs.
- self.ccx.stats.n_llvm_insns.set(self.istart);
+ self.ccx.stats().n_llvm_insns.set(self.istart);
}
}
}
let llfn: ValueRef = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMGetOrInsertFunction(ccx.llmod, buf, ty.to_ref())
+ llvm::LLVMGetOrInsertFunction(ccx.llmod(), buf, ty.to_ref())
}
});
_ => {}
}
- if ccx.tcx.sess.opts.cg.no_redzone {
+ if ccx.tcx().sess.opts.cg.no_redzone {
unsafe {
llvm::LLVMAddFunctionAttribute(llfn,
llvm::FunctionIndex as c_uint,
}
fn get_extern_rust_fn(ccx: &CrateContext, fn_ty: ty::t, name: &str, did: ast::DefId) -> ValueRef {
- match ccx.externs.borrow().find_equiv(&name) {
+ match ccx.externs().borrow().find_equiv(&name) {
Some(n) => return *n,
None => ()
}
set_llvm_fn_attrs(attrs.as_slice(), f)
});
- ccx.externs.borrow_mut().insert(name.to_string(), f);
+ ccx.externs().borrow_mut().insert(name.to_string(), f);
f
}
let unboxed_closure_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_id);
match unboxed_closure.kind {
ty::FnUnboxedClosureKind => {
- ty::mk_imm_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_imm_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnMutUnboxedClosureKind => {
- ty::mk_mut_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_mut_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnOnceUnboxedClosureKind => unboxed_closure_type,
}
pub fn kind_for_unboxed_closure(ccx: &CrateContext, closure_id: ast::DefId)
-> ty::UnboxedClosureKind {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
unboxed_closures.get(&closure_id).kind
}
(f.sig.inputs.clone(), f.sig.output, f.abi, Some(Type::i8p(ccx)))
}
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_did);
let function_type = unboxed_closure.closure_type.clone();
let self_type = self_type_for_unboxed_closure(ccx, closure_did);
let llfty = type_of_rust_fn(ccx, env, inputs.as_slice(), output, abi);
debug!("decl_rust_fn(input count={},type={})",
inputs.len(),
- ccx.tn.type_to_string(llfty));
+ ccx.tn().type_to_string(llfty));
let llfn = decl_fn(ccx, name, llvm::CCallConv, llfty, output);
let attrs = get_fn_llvm_attributes(ccx, fn_ty);
// Returns a pointer to the body for the box. The box may be an opaque
// box. The result will be casted to the type of body_t, if it is statically
// known.
-pub fn at_box_body(bcx: &Block, body_t: ty::t, boxptr: ValueRef) -> ValueRef {
+pub fn at_box_body(bcx: Block, body_t: ty::t, boxptr: ValueRef) -> ValueRef {
let _icx = push_ctxt("at_box_body");
let ccx = bcx.ccx();
let ty = Type::at_box(ccx, type_of(ccx, body_t));
GEPi(bcx, boxptr, [0u, abi::box_field_body])
}
-fn require_alloc_fn(bcx: &Block, info_ty: ty::t, it: LangItem) -> ast::DefId {
+fn require_alloc_fn(bcx: Block, info_ty: ty::t, it: LangItem) -> ast::DefId {
match bcx.tcx().lang_items.require(it) {
Ok(id) => id,
Err(s) => {
// The following malloc_raw_dyn* functions allocate a box to contain
// a given type, but with a potentially dynamic size.
-pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>,
- llty_ptr: Type,
- info_ty: ty::t,
- size: ValueRef,
- align: ValueRef)
- -> Result<'a> {
+pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ llty_ptr: Type,
+ info_ty: ty::t,
+ size: ValueRef,
+ align: ValueRef)
+ -> Result<'blk, 'tcx> {
let _icx = push_ctxt("malloc_raw_exchange");
// Allocate space:
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
-pub fn malloc_raw_dyn_managed<'a>(
- bcx: &'a Block<'a>,
- t: ty::t,
- alloc_fn: LangItem,
- size: ValueRef)
- -> Result<'a> {
- let _icx = push_ctxt("malloc_raw_managed");
+pub fn malloc_raw_dyn_proc<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t, alloc_fn: LangItem)
+ -> Result<'blk, 'tcx> {
+ let _icx = push_ctxt("malloc_raw_dyn_proc");
+ let ccx = bcx.ccx();
+
+ let langcall = require_alloc_fn(bcx, t, alloc_fn);
+
+ // Grab the TypeRef type of ptr_ty.
+ let ptr_ty = ty::mk_uniq(bcx.tcx(), t);
+ let ptr_llty = type_of(ccx, ptr_ty);
+
+ let llty = type_of(bcx.ccx(), t);
+ let size = llsize_of(bcx.ccx(), llty);
+ let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
+
+ // Allocate space:
+ let drop_glue = glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t));
+ let r = callee::trans_lang_call(
+ bcx,
+ langcall,
+ [
+ PointerCast(bcx, drop_glue, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to()),
+ size,
+ llalign
+ ],
+ None);
+ Result::new(r.bcx, PointerCast(r.bcx, r.val, ptr_llty))
+}
+
+
+pub fn malloc_raw_dyn_managed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ alloc_fn: LangItem,
+ size: ValueRef)
+ -> Result<'blk, 'tcx> {
+ let _icx = push_ctxt("malloc_raw_dyn_managed");
let ccx = bcx.ccx();
let langcall = require_alloc_fn(bcx, t, alloc_fn);
// Type descriptor and type glue stuff
pub fn get_tydesc(ccx: &CrateContext, t: ty::t) -> Rc<tydesc_info> {
- match ccx.tydescs.borrow().find(&t) {
+ match ccx.tydescs().borrow().find(&t) {
Some(inf) => return inf.clone(),
_ => { }
}
- ccx.stats.n_static_tydescs.set(ccx.stats.n_static_tydescs.get() + 1u);
+ ccx.stats().n_static_tydescs.set(ccx.stats().n_static_tydescs.get() + 1u);
let inf = Rc::new(glue::declare_tydesc(ccx, t));
- ccx.tydescs.borrow_mut().insert(t, inf.clone());
+ ccx.tydescs().borrow_mut().insert(t, inf.clone());
inf
}
// Double-check that we never ask LLVM to declare the same symbol twice. It
// silently mangles such symbols, breaking our linkage model.
pub fn note_unique_llvm_symbol(ccx: &CrateContext, sym: String) {
- if ccx.all_llvm_symbols.borrow().contains(&sym) {
+ if ccx.all_llvm_symbols().borrow().contains(&sym) {
ccx.sess().bug(format!("duplicate LLVM symbol: {}", sym).as_slice());
}
- ccx.all_llvm_symbols.borrow_mut().insert(sym);
+ ccx.all_llvm_symbols().borrow_mut().insert(sym);
}
substs: &subst::Substs)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
- let did = if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(ccx, did)
- } else {
- did
- };
+ let did = inline::maybe_instantiate_inline(ccx, did);
if !substs.types.is_empty() {
assert_eq!(did.krate, ast::LOCAL_CRATE);
let dtor_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[glue::get_drop_glue_type(ccx, t)], ty::mk_nil());
get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name.as_slice(),
llvm::CCallConv,
llty,
// Used only for creating scalar comparison glue.
pub enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, }
-pub fn compare_scalar_types<'a>(
- cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- t: ty::t,
- op: ast::BinOp)
- -> Result<'a> {
+pub fn compare_scalar_types<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ t: ty::t,
+ op: ast::BinOp)
+ -> Result<'blk, 'tcx> {
let f = |a| Result::new(cx, compare_scalar_values(cx, lhs, rhs, a, op));
match ty::get(t).sty {
// A helper function to do the actual comparison of scalar values.
-pub fn compare_scalar_values<'a>(
- cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- nt: scalar_type,
- op: ast::BinOp)
- -> ValueRef {
+pub fn compare_scalar_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ nt: scalar_type,
+ op: ast::BinOp)
+ -> ValueRef {
let _icx = push_ctxt("compare_scalar_values");
- fn die(cx: &Block) -> ! {
+ fn die(cx: Block) -> ! {
cx.sess().bug("compare_scalar_values: must be a comparison operator");
}
match nt {
}
pub fn compare_simd_types(
- cx: &Block,
+ cx: Block,
lhs: ValueRef,
rhs: ValueRef,
t: ty::t,
}
}
-pub type val_and_ty_fn<'r,'b> =
- |&'b Block<'b>, ValueRef, ty::t|: 'r -> &'b Block<'b>;
+pub type val_and_ty_fn<'a, 'blk, 'tcx> =
+ |Block<'blk, 'tcx>, ValueRef, ty::t|: 'a -> Block<'blk, 'tcx>;
// Iterates through the elements of a structural type.
-pub fn iter_structural_ty<'r,
- 'b>(
- cx: &'b Block<'b>,
- av: ValueRef,
- t: ty::t,
- f: val_and_ty_fn<'r,'b>)
- -> &'b Block<'b> {
+pub fn iter_structural_ty<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ av: ValueRef,
+ t: ty::t,
+ f: val_and_ty_fn<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("iter_structural_ty");
- fn iter_variant<'r,
- 'b>(
- cx: &'b Block<'b>,
- repr: &adt::Repr,
- av: ValueRef,
- variant: &ty::VariantInfo,
- substs: &subst::Substs,
- f: val_and_ty_fn<'r,'b>)
- -> &'b Block<'b> {
+ fn iter_variant<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ repr: &adt::Repr,
+ av: ValueRef,
+ variant: &ty::VariantInfo,
+ substs: &subst::Substs,
+ f: val_and_ty_fn<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("iter_variant");
let tcx = cx.tcx();
let mut cx = cx;
return cx;
}
-pub fn cast_shift_expr_rhs<'a>(
- cx: &'a Block<'a>,
+pub fn cast_shift_expr_rhs(cx: Block,
op: ast::BinOp,
lhs: ValueRef,
rhs: ValueRef)
}
}
-pub fn fail_if_zero_or_overflows<'a>(
- cx: &'a Block<'a>,
- span: Span,
- divrem: ast::BinOp,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: ty::t)
- -> &'a Block<'a> {
+pub fn fail_if_zero_or_overflows<'blk, 'tcx>(
+ cx: Block<'blk, 'tcx>,
+ span: Span,
+ divrem: ast::BinOp,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ rhs_t: ty::t)
+ -> Block<'blk, 'tcx> {
let (zero_text, overflow_text) = if divrem == ast::BiDiv {
("attempted to divide by zero",
"attempted to divide with overflow")
}
_ => {
let llty = type_of(ccx, t);
- get_extern_const(&mut *ccx.externs.borrow_mut(),
- ccx.llmod,
+ get_extern_const(&mut *ccx.externs().borrow_mut(),
+ ccx.llmod(),
name.as_slice(),
llty)
}
}
}
-pub fn invoke<'a>(
- bcx: &'a Block<'a>,
- llfn: ValueRef,
- llargs: Vec<ValueRef> ,
- fn_ty: ty::t,
- call_info: Option<NodeInfo>,
- // FIXME(15064) is_lang_item is a horrible hack, please remove it
- // at the soonest opportunity.
- is_lang_item: bool)
- -> (ValueRef, &'a Block<'a>) {
+pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ llfn: ValueRef,
+ llargs: Vec<ValueRef> ,
+ fn_ty: ty::t,
+ call_info: Option<NodeInfo>,
+ // FIXME(15064) is_lang_item is a horrible hack, please remove it
+ // at the soonest opportunity.
+ is_lang_item: bool)
+ -> (ValueRef, Block<'blk, 'tcx>) {
let _icx = push_ctxt("invoke_");
if bcx.unreachable.get() {
return (C_null(Type::i8(bcx.ccx())), bcx);
}
}
-pub fn need_invoke(bcx: &Block) -> bool {
+pub fn need_invoke(bcx: Block) -> bool {
if bcx.sess().no_landing_pads() {
return false;
}
bcx.fcx.needs_invoke()
}
-pub fn load_if_immediate(cx: &Block, v: ValueRef, t: ty::t) -> ValueRef {
+pub fn load_if_immediate(cx: Block, v: ValueRef, t: ty::t) -> ValueRef {
let _icx = push_ctxt("load_if_immediate");
if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); }
return v;
}
-pub fn load_ty(cx: &Block, ptr: ValueRef, t: ty::t) -> ValueRef {
+pub fn load_ty(cx: Block, ptr: ValueRef, t: ty::t) -> ValueRef {
/*!
* Helper for loading values from memory. Does the necessary conversion if
* the in-memory type differs from the type used for SSA values. Also
}
}
-pub fn store_ty(cx: &Block, v: ValueRef, dst: ValueRef, t: ty::t) {
+pub fn store_ty(cx: Block, v: ValueRef, dst: ValueRef, t: ty::t) {
/*!
* Helper for storing values in memory. Does the necessary conversion if
* the in-memory type differs from the type used for SSA values.
};
}
-pub fn ignore_lhs(_bcx: &Block, local: &ast::Local) -> bool {
+pub fn ignore_lhs(_bcx: Block, local: &ast::Local) -> bool {
match local.pat.node {
ast::PatWild(ast::PatWildSingle) => true, _ => false
}
}
-pub fn init_local<'a>(bcx: &'a Block<'a>, local: &ast::Local)
- -> &'a Block<'a> {
+pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &ast::Local)
+ -> Block<'blk, 'tcx> {
debug!("init_local(bcx={}, local.id={:?})", bcx.to_str(), local.id);
let _indenter = indenter();
let _icx = push_ctxt("init_local");
_match::store_local(bcx, local)
}
-pub fn raw_block<'a>(
- fcx: &'a FunctionContext<'a>,
- is_lpad: bool,
- llbb: BasicBlockRef)
- -> &'a Block<'a> {
- common::Block::new(llbb, is_lpad, None, fcx)
+pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
+ is_lpad: bool,
+ llbb: BasicBlockRef)
+ -> Block<'blk, 'tcx> {
+ common::BlockS::new(llbb, is_lpad, None, fcx)
}
-pub fn with_cond<'a>(
- bcx: &'a Block<'a>,
- val: ValueRef,
- f: |&'a Block<'a>| -> &'a Block<'a>)
- -> &'a Block<'a> {
+pub fn with_cond<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ f: |Block<'blk, 'tcx>| -> Block<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("with_cond");
let fcx = bcx.fcx;
let next_cx = fcx.new_temp_block("next");
next_cx
}
-pub fn call_lifetime_start(cx: &Block, ptr: ValueRef) {
+pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
if cx.sess().opts.optimize == config::No {
return;
}
Call(cx, lifetime_start, [llsize, ptr], None);
}
-pub fn call_lifetime_end(cx: &Block, ptr: ValueRef) {
+pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
if cx.sess().opts.optimize == config::No {
return;
}
Call(cx, lifetime_end, [llsize, ptr], None);
}
-pub fn call_memcpy(cx: &Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
+pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
let _icx = push_ctxt("call_memcpy");
let ccx = cx.ccx();
let key = match ccx.sess().targ_cfg.arch {
let memcpy = ccx.get_intrinsic(&key);
let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
- let size = IntCast(cx, n_bytes, ccx.int_type);
+ let size = IntCast(cx, n_bytes, ccx.int_type());
let align = C_i32(ccx, align as i32);
let volatile = C_bool(ccx, false);
Call(cx, memcpy, [dst_ptr, src_ptr, size, align, volatile], None);
}
-pub fn memcpy_ty(bcx: &Block, dst: ValueRef, src: ValueRef, t: ty::t) {
+pub fn memcpy_ty(bcx: Block, dst: ValueRef, src: ValueRef, t: ty::t) {
let _icx = push_ctxt("memcpy_ty");
let ccx = bcx.ccx();
if ty::type_is_structural(t) {
}
}
-pub fn zero_mem(cx: &Block, llptr: ValueRef, t: ty::t) {
+pub fn zero_mem(cx: Block, llptr: ValueRef, t: ty::t) {
if cx.unreachable.get() { return; }
let _icx = push_ctxt("zero_mem");
let bcx = cx;
b.call(llintrinsicfn, [llptr, llzeroval, size, align, volatile], None);
}
-pub fn alloc_ty(bcx: &Block, t: ty::t, name: &str) -> ValueRef {
+pub fn alloc_ty(bcx: Block, t: ty::t, name: &str) -> ValueRef {
let _icx = push_ctxt("alloc_ty");
let ccx = bcx.ccx();
let ty = type_of::type_of(ccx, t);
return val;
}
-pub fn alloca(cx: &Block, ty: Type, name: &str) -> ValueRef {
+pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
let p = alloca_no_lifetime(cx, ty, name);
call_lifetime_start(cx, p);
p
}
-pub fn alloca_no_lifetime(cx: &Block, ty: Type, name: &str) -> ValueRef {
+pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef {
let _icx = push_ctxt("alloca");
if cx.unreachable.get() {
unsafe {
Alloca(cx, ty, name)
}
-pub fn alloca_zeroed(cx: &Block, ty: ty::t, name: &str) -> ValueRef {
+pub fn alloca_zeroed(cx: Block, ty: ty::t, name: &str) -> ValueRef {
let llty = type_of::type_of(cx.ccx(), ty);
if cx.unreachable.get() {
unsafe {
p
}
-pub fn arrayalloca(cx: &Block, ty: Type, v: ValueRef) -> ValueRef {
+pub fn arrayalloca(cx: Block, ty: Type, v: ValueRef) -> ValueRef {
let _icx = push_ctxt("arrayalloca");
if cx.unreachable.get() {
unsafe {
//
// Be warned! You must call `init_function` before doing anything with the
// returned function context.
-pub fn new_fn_ctxt<'a>(ccx: &'a CrateContext,
- llfndecl: ValueRef,
- id: ast::NodeId,
- has_env: bool,
- output_type: ty::t,
- param_substs: &'a param_substs,
- sp: Option<Span>,
- block_arena: &'a TypedArena<Block<'a>>)
- -> FunctionContext<'a> {
+pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
+ llfndecl: ValueRef,
+ id: ast::NodeId,
+ has_env: bool,
+ output_type: ty::t,
+ param_substs: &'a param_substs,
+ sp: Option<Span>,
+ block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
+ -> FunctionContext<'a, 'tcx> {
param_substs.validate();
debug!("new_fn_ctxt(path={}, id={}, param_substs={})",
if id == -1 {
"".to_string()
} else {
- ccx.tcx.map.path_to_string(id).to_string()
+ ccx.tcx().map.path_to_string(id).to_string()
},
id, param_substs.repr(ccx.tcx()));
/// Performs setup on a newly created function, creating the entry scope block
/// and allocating space for the return pointer.
-pub fn init_function<'a>(fcx: &'a FunctionContext<'a>,
- skip_retptr: bool,
- output_type: ty::t) -> &'a Block<'a> {
+pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>,
+ skip_retptr: bool,
+ output_type: ty::t) -> Block<'a, 'tcx> {
let entry_bcx = fcx.new_temp_block("entry-block");
// Use a dummy instruction as the insertion point for all allocas.
/// datums.
///
/// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
-fn create_datums_for_fn_args_under_call_abi<
- 'a>(
- mut bcx: &'a Block<'a>,
+fn create_datums_for_fn_args_under_call_abi(
+ mut bcx: Block,
arg_scope: cleanup::CustomScopeIndex,
arg_tys: &[ty::t])
-> Vec<RvalueDatum> {
result
}
-fn copy_args_to_allocas<'a>(fcx: &FunctionContext<'a>,
- arg_scope: cleanup::CustomScopeIndex,
- bcx: &'a Block<'a>,
- args: &[ast::Arg],
- arg_datums: Vec<RvalueDatum> )
- -> &'a Block<'a> {
+fn copy_args_to_allocas<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
+ arg_scope: cleanup::CustomScopeIndex,
+ bcx: Block<'blk, 'tcx>,
+ args: &[ast::Arg],
+ arg_datums: Vec<RvalueDatum> )
+ -> Block<'blk, 'tcx> {
debug!("copy_args_to_allocas");
let _icx = push_ctxt("copy_args_to_allocas");
bcx
}
-fn copy_unboxed_closure_args_to_allocas<'a>(
- mut bcx: &'a Block<'a>,
+fn copy_unboxed_closure_args_to_allocas<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
arg_scope: cleanup::CustomScopeIndex,
args: &[ast::Arg],
arg_datums: Vec<RvalueDatum>,
monomorphized_arg_types: &[ty::t])
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("copy_unboxed_closure_args_to_allocas");
let arg_scope_id = cleanup::CustomScope(arg_scope);
// Ties up the llstaticallocas -> llloadenv -> lltop edges,
// and builds the return block.
-pub fn finish_fn<'a>(fcx: &'a FunctionContext<'a>,
- last_bcx: &'a Block<'a>,
- retty: ty::t) {
+pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
+ last_bcx: Block<'blk, 'tcx>,
+ retty: ty::t) {
let _icx = push_ctxt("finish_fn");
// This shouldn't need to recompute the return type,
}
// Builds the return block for a function.
-pub fn build_return_block(fcx: &FunctionContext, ret_cx: &Block, retty: ty::t) {
+pub fn build_return_block(fcx: &FunctionContext, ret_cx: Block, retty: ty::t) {
if fcx.llretslotptr.get().is_none() ||
(!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
return RetVoid(ret_cx);
abi: Abi,
has_env: bool,
is_unboxed_closure: IsUnboxedClosureFlag,
- maybe_load_env: <'a>|&'a Block<'a>, ScopeId|
- -> &'a Block<'a>) {
- ccx.stats.n_closures.set(ccx.stats.n_closures.get() + 1);
+ maybe_load_env: <'blk, 'tcx> |Block<'blk, 'tcx>, ScopeId|
+ -> Block<'blk, 'tcx>) {
+ ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
let _icx = push_ctxt("trans_closure");
set_uwtable(llfndecl);
ty_to_string(ccx.tcx(), *monomorphized_arg_type));
}
debug!("trans_closure: function lltype: {}",
- bcx.fcx.ccx.tn.val_to_string(bcx.fcx.llfn));
+ bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
let arg_datums = if abi != RustCall {
create_datums_for_fn_args(&fcx,
param_substs: ¶m_substs,
id: ast::NodeId,
attrs: &[ast::Attribute]) {
- let _s = StatRecorder::new(ccx, ccx.tcx.map.path_to_string(id).to_string());
+ let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
debug!("trans_fn(param_substs={})", param_substs.repr(ccx.tcx()));
let _icx = push_ctxt("trans_fn");
let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
llfndecl);
}
-pub fn trans_named_tuple_constructor<'a>(mut bcx: &'a Block<'a>,
- ctor_ty: ty::t,
- disr: ty::Disr,
- args: callee::CallArgs,
- dest: expr::Dest) -> Result<'a> {
+pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ ctor_ty: ty::t,
+ disr: ty::Disr,
+ args: callee::CallArgs,
+ dest: expr::Dest) -> Result<'blk, 'tcx> {
let ccx = bcx.fcx.ccx;
- let tcx = &ccx.tcx;
+ let tcx = ccx.tcx();
let result_ty = match ty::get(ctor_ty).sty {
ty::ty_bare_fn(ref bft) => bft.sig.output,
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
- let levels = ccx.tcx.node_lint_levels.borrow();
+ let levels = ccx.tcx().node_lint_levels.borrow();
let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCE);
let lvlsrc = match levels.find(&(id, lint_id)) {
None | Some(&(lint::Allow, _)) => return,
}
}
-pub struct TransItemVisitor<'a> {
- pub ccx: &'a CrateContext,
+pub struct TransItemVisitor<'a, 'tcx: 'a> {
+ pub ccx: &'a CrateContext<'a, 'tcx>,
}
-impl<'a> Visitor<()> for TransItemVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for TransItemVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _:()) {
trans_item(self.ccx, i);
}
}
+/// Enum describing the origin of an LLVM `Value`, for linkage purposes.
+pub enum ValueOrigin {
+ /// The LLVM `Value` is in this context because the corresponding item was
+ /// assigned to the current compilation unit.
+ OriginalTranslation,
+ /// The `Value`'s corresponding item was assigned to some other compilation
+ /// unit, but the `Value` was translated in this context anyway because the
+ /// item is marked `#[inline]`.
+ InlinedCopy,
+}
+
+/// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
+/// If the `llval` is the direct translation of a specific Rust item, `id`
+/// should be set to the `NodeId` of that item. (This mapping should be
+/// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
+/// `None`.) `llval_origin` indicates whether `llval` is the translation of an
+/// item assigned to `ccx`'s compilation unit or an inlined copy of an item
+/// assigned to a different compilation unit.
+pub fn update_linkage(ccx: &CrateContext,
+ llval: ValueRef,
+ id: Option<ast::NodeId>,
+ llval_origin: ValueOrigin) {
+ match llval_origin {
+ InlinedCopy => {
+ // `llval` is a translation of an item defined in a separate
+ // compilation unit. This only makes sense if there are at least
+ // two compilation units.
+ assert!(ccx.sess().opts.cg.codegen_units > 1);
+ // `llval` is a copy of something defined elsewhere, so use
+ // `AvailableExternallyLinkage` to avoid duplicating code in the
+ // output.
+ llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
+ return;
+ },
+ OriginalTranslation => {},
+ }
+
+ match id {
+ Some(id) if ccx.reachable().contains(&id) => {
+ llvm::SetLinkage(llval, llvm::ExternalLinkage);
+ },
+ _ => {
+ // `id` does not refer to an item in `ccx.reachable`.
+ if ccx.sess().opts.cg.codegen_units > 1 {
+ llvm::SetLinkage(llval, llvm::ExternalLinkage);
+ } else {
+ llvm::SetLinkage(llval, llvm::InternalLinkage);
+ }
+ },
+ }
+}
+
pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
let _icx = push_ctxt("trans_item");
+
+ let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
+
match item.node {
ast::ItemFn(ref decl, _fn_style, abi, ref generics, ref body) => {
if !generics.is_type_parameterized() {
- let llfn = get_item_val(ccx, item.id);
- if abi != Rust {
- foreign::trans_rust_fn_with_foreign_abi(ccx,
- &**decl,
- &**body,
- item.attrs.as_slice(),
- llfn,
- ¶m_substs::empty(),
- item.id,
- None);
- } else {
- trans_fn(ccx,
- &**decl,
- &**body,
- llfn,
- ¶m_substs::empty(),
- item.id,
- item.attrs.as_slice());
+ let trans_everywhere = attr::requests_inline(item.attrs.as_slice());
+ // Ignore `trans_everywhere` for cross-crate inlined items
+ // (`from_external`). `trans_item` will be called once for each
+ // compilation unit that references the item, so it will still get
+ // translated everywhere it's needed.
+ for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+ let llfn = get_item_val(ccx, item.id);
+ if abi != Rust {
+ foreign::trans_rust_fn_with_foreign_abi(ccx,
+ &**decl,
+ &**body,
+ item.attrs.as_slice(),
+ llfn,
+ ¶m_substs::empty(),
+ item.id,
+ None);
+ } else {
+ trans_fn(ccx,
+ &**decl,
+ &**body,
+ llfn,
+ ¶m_substs::empty(),
+ item.id,
+ item.attrs.as_slice());
+ }
+ update_linkage(ccx,
+ llfn,
+ Some(item.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
}
}
item.id);
}
ast::ItemMod(ref m) => {
- trans_mod(ccx, m);
+ trans_mod(&ccx.rotate(), m);
}
ast::ItemEnum(ref enum_definition, _) => {
enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
// Recurse on the expression to catch items in blocks
let mut v = TransItemVisitor{ ccx: ccx };
v.visit_expr(&**expr, ());
- consts::trans_const(ccx, m, item.id);
+
+ let trans_everywhere = attr::requests_inline(item.attrs.as_slice());
+ for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+ consts::trans_const(ccx, m, item.id);
+
+ let g = get_item_val(ccx, item.id);
+ update_linkage(ccx,
+ g,
+ Some(item.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
+ }
+
// Do static_assert checking. It can't really be done much earlier
// because we need to get the value of the bool out of LLVM
if attr::contains_name(item.attrs.as_slice(), "static_assert") {
static");
}
- let v = ccx.const_values.borrow().get_copy(&item.id);
+ let v = ccx.const_values().borrow().get_copy(&item.id);
unsafe {
if !(llvm::LLVMConstIntGetZExtValue(v) != 0) {
ccx.sess().span_fatal(expr.span, "static assertion failed");
fn finish_register_fn(ccx: &CrateContext, sp: Span, sym: String, node_id: ast::NodeId,
llfn: ValueRef) {
- ccx.item_symbols.borrow_mut().insert(node_id, sym);
-
- if !ccx.reachable.contains(&node_id) {
- llvm::SetLinkage(llfn, llvm::InternalLinkage);
- }
+ ccx.item_symbols().borrow_mut().insert(node_id, sym);
// The stack exhaustion lang item shouldn't have a split stack because
// otherwise it would continue to be exhausted (bad), and both it and the
// eh_personality functions need to be externally linkable.
let def = ast_util::local_def(node_id);
- if ccx.tcx.lang_items.stack_exhausted() == Some(def) {
+ if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
unset_split_stack(llfn);
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
- if ccx.tcx.lang_items.eh_personality() == Some(def) {
+ if ccx.tcx().lang_items.eh_personality() == Some(def) {
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
ty::ty_bare_fn(ref f) => (f.sig.clone(), f.abi, false),
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let ref function_type = unboxed_closures.get(&closure_did)
.closure_type;
fn create_entry_fn(ccx: &CrateContext,
rust_main: ValueRef,
use_start_lang_item: bool) {
- let llfty = Type::func([ccx.int_type, Type::i8p(ccx).ptr_to()],
- &ccx.int_type);
+ let llfty = Type::func([ccx.int_type(), Type::i8p(ccx).ptr_to()],
+ &ccx.int_type());
let llfn = decl_cdecl_fn(ccx, "main", llfty, ty::mk_nil());
let llbb = "top".with_c_str(|buf| {
unsafe {
- llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, buf)
}
});
- let bld = ccx.builder.b;
+ let bld = ccx.raw_builder();
unsafe {
llvm::LLVMPositionBuilderAtEnd(bld, llbb);
let (start_fn, args) = if use_start_lang_item {
- let start_def_id = match ccx.tcx.lang_items.require(StartFnLangItem) {
+ let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
Ok(id) => id,
Err(s) => { ccx.sess().fatal(s.as_slice()); }
};
fn exported_name(ccx: &CrateContext, id: ast::NodeId,
ty: ty::t, attrs: &[ast::Attribute]) -> String {
+ match ccx.external_srcs().borrow().find(&id) {
+ Some(&did) => {
+ let sym = csearch::get_symbol(&ccx.sess().cstore, did);
+ debug!("found item {} in other crate...", sym);
+ return sym;
+ }
+ None => {}
+ }
+
match attr::first_attr_value_str_by_name(attrs, "export_name") {
// Use provided name
Some(name) => name.get().to_string(),
- _ => ccx.tcx.map.with_path(id, |mut path| {
+ _ => ccx.tcx().map.with_path(id, |mut path| {
if attr::contains_name(attrs, "no_mangle") {
// Don't mangle
path.last().unwrap().to_string()
pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
debug!("get_item_val(id=`{:?}`)", id);
- match ccx.item_vals.borrow().find_copy(&id) {
+ match ccx.item_vals().borrow().find_copy(&id) {
Some(v) => return v,
None => {}
}
- let mut foreign = false;
- let item = ccx.tcx.map.get(id);
+ let item = ccx.tcx().map.get(id);
let val = match item {
ast_map::NodeItem(i) => {
let ty = ty::node_id_to_type(ccx.tcx(), i.id);
// using the current crate's name/version
// information in the hash of the symbol
debug!("making {}", sym);
- let (sym, is_local) = {
- match ccx.external_srcs.borrow().find(&i.id) {
- Some(&did) => {
- debug!("but found in other crate...");
- (csearch::get_symbol(&ccx.sess().cstore,
- did), false)
- }
- None => (sym, true)
- }
- };
+ let is_local = !ccx.external_srcs().borrow().contains_key(&id);
// We need the translated value here, because for enums the
// LLVM type is not fully determined by the Rust type.
let (v, inlineable, _) = consts::const_expr(ccx, &**expr, is_local);
- ccx.const_values.borrow_mut().insert(id, v);
+ ccx.const_values().borrow_mut().insert(id, v);
let mut inlineable = inlineable;
unsafe {
let llty = llvm::LLVMTypeOf(v);
let g = sym.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty, buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty, buf)
});
- if !ccx.reachable.contains(&id) {
- llvm::SetLinkage(g, llvm::InternalLinkage);
- }
-
// Apply the `unnamed_addr` attribute if
// requested
if !ast_util::static_has_significant_address(
if !inlineable {
debug!("{} not inlined", sym);
- ccx.non_inlineable_statics.borrow_mut()
+ ccx.non_inlineable_statics().borrow_mut()
.insert(id);
}
- ccx.item_symbols.borrow_mut().insert(i.id, sym);
+ ccx.item_symbols().borrow_mut().insert(i.id, sym);
g
}
}
}
ast_map::NodeForeignItem(ni) => {
- foreign = true;
-
match ni.node {
ast::ForeignItemFn(..) => {
- let abi = ccx.tcx.map.get_foreign_abi(id);
+ let abi = ccx.tcx().map.get_foreign_abi(id);
let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
let name = foreign::link_name(&*ni);
foreign::register_foreign_item_fn(ccx, abi, ty,
};
assert!(args.len() != 0u);
let ty = ty::node_id_to_type(ccx.tcx(), id);
- let parent = ccx.tcx.map.get_parent(id);
- let enm = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let enm = ccx.tcx().map.expect_item(parent);
let sym = exported_name(ccx,
id,
ty,
}
Some(ctor_id) => ctor_id,
};
- let parent = ccx.tcx.map.get_parent(id);
- let struct_item = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let struct_item = ccx.tcx().map.expect_item(parent);
let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
let sym = exported_name(ccx,
id,
}
};
- // foreign items (extern fns and extern statics) don't have internal
- // linkage b/c that doesn't quite make sense. Otherwise items can
- // have internal linkage if they're not reachable.
- if !foreign && !ccx.reachable.contains(&id) {
- llvm::SetLinkage(val, llvm::InternalLinkage);
- }
+ // All LLVM globals and functions are initially created as external-linkage
+ // declarations. If `trans_item`/`trans_fn` later turns the declaration
+ // into a definition, it adjusts the linkage then (using `update_linkage`).
+ //
+ // The exception is foreign items, which have their linkage set inside the
+ // call to `foreign::register_*` above. We don't touch the linkage after
+ // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
+ // other item translation functions do).
- ccx.item_vals.borrow_mut().insert(id, val);
+ ccx.item_vals().borrow_mut().insert(id, val);
val
}
pub fn p2i(ccx: &CrateContext, v: ValueRef) -> ValueRef {
unsafe {
- return llvm::LLVMConstPtrToInt(v, ccx.int_type.to_ref());
+ return llvm::LLVMConstPtrToInt(v, ccx.int_type().to_ref());
}
}
-pub fn crate_ctxt_to_encode_parms<'r>(cx: &'r CrateContext, ie: encoder::EncodeInlinedItem<'r>)
- -> encoder::EncodeParams<'r> {
- encoder::EncodeParams {
- diag: cx.sess().diagnostic(),
- tcx: cx.tcx(),
- reexports2: &cx.exp_map2,
- item_symbols: &cx.item_symbols,
- non_inlineable_statics: &cx.non_inlineable_statics,
- link_meta: &cx.link_meta,
- cstore: &cx.sess().cstore,
- encode_inlined_item: ie,
- reachable: &cx.reachable,
- }
+pub fn crate_ctxt_to_encode_parms<'a, 'tcx>(cx: &'a SharedCrateContext<'tcx>,
+ ie: encoder::EncodeInlinedItem<'a>)
+ -> encoder::EncodeParams<'a, 'tcx> {
+ encoder::EncodeParams {
+ diag: cx.sess().diagnostic(),
+ tcx: cx.tcx(),
+ reexports2: cx.exp_map2(),
+ item_symbols: cx.item_symbols(),
+ non_inlineable_statics: cx.non_inlineable_statics(),
+ link_meta: cx.link_meta(),
+ cstore: &cx.sess().cstore,
+ encode_inlined_item: ie,
+ reachable: cx.reachable(),
+ }
}
-pub fn write_metadata(cx: &CrateContext, krate: &ast::Crate) -> Vec<u8> {
+pub fn write_metadata(cx: &SharedCrateContext, krate: &ast::Crate) -> Vec<u8> {
use flate;
let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
cx.sess().fatal("failed to compress metadata")
}
}.as_slice());
- let llmeta = C_bytes(cx, compressed.as_slice());
- let llconst = C_struct(cx, [llmeta], false);
+ let llmeta = C_bytes_in_context(cx.metadata_llcx(), compressed.as_slice());
+ let llconst = C_struct_in_context(cx.metadata_llcx(), [llmeta], false);
let name = format!("rust_metadata_{}_{}",
- cx.link_meta.crate_name,
- cx.link_meta.crate_hash);
+ cx.link_meta().crate_name,
+ cx.link_meta().crate_hash);
let llglobal = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(cx.metadata_llmod, val_ty(llconst).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf)
}
});
unsafe {
return metadata;
}
+/// Find any symbols that are defined in one compilation unit, but not declared
+/// in any other compilation unit. Give these symbols internal linkage.
+fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<String>) {
+ use std::c_str::CString;
+
+ unsafe {
+ let mut declared = HashSet::new();
+
+ let iter_globals = |llmod| {
+ ValueIter {
+ cur: llvm::LLVMGetFirstGlobal(llmod),
+ step: llvm::LLVMGetNextGlobal,
+ }
+ };
+
+ let iter_functions = |llmod| {
+ ValueIter {
+ cur: llvm::LLVMGetFirstFunction(llmod),
+ step: llvm::LLVMGetNextFunction,
+ }
+ };
+
+ // Collect all external declarations in all compilation units.
+ for ccx in cx.iter() {
+ for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ let linkage = llvm::LLVMGetLinkage(val);
+ // We only care about external declarations (not definitions)
+ // and available_externally definitions.
+ if !(linkage == llvm::ExternalLinkage as c_uint &&
+ llvm::LLVMIsDeclaration(val) != 0) &&
+ !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
+ continue
+ }
+
+ let name = CString::new(llvm::LLVMGetValueName(val), false);
+ declared.insert(name);
+ }
+ }
+
+ // Examine each external definition. If the definition is not used in
+ // any other compilation unit, and is not reachable from other crates,
+ // then give it internal linkage.
+ for ccx in cx.iter() {
+ for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ // We only care about external definitions.
+ if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
+ llvm::LLVMIsDeclaration(val) == 0) {
+ continue
+ }
+
+ let name = CString::new(llvm::LLVMGetValueName(val), false);
+ if !declared.contains(&name) &&
+ !reachable.contains_equiv(&name.as_str().unwrap()) {
+ llvm::SetLinkage(val, llvm::InternalLinkage);
+ }
+ }
+ }
+ }
+
+
+ struct ValueIter {
+ cur: ValueRef,
+ step: unsafe extern "C" fn(ValueRef) -> ValueRef,
+ }
+
+ impl Iterator<ValueRef> for ValueIter {
+ fn next(&mut self) -> Option<ValueRef> {
+ let old = self.cur;
+ if !old.is_null() {
+ self.cur = unsafe { (self.step)(old) };
+ Some(old)
+ } else {
+ None
+ }
+ }
+ }
+}
+
pub fn trans_crate(krate: ast::Crate,
analysis: CrateAnalysis) -> (ty::ctxt, CrateTranslation) {
let CrateAnalysis { ty_cx: tcx, exp_map2, reachable, name, .. } = analysis;
let link_meta = link::build_link_meta(&tcx.sess, &krate, name);
- // Append ".rs" to crate name as LLVM module identifier.
- //
- // LLVM code generator emits a ".file filename" directive
- // for ELF backends. Value of the "filename" is set as the
- // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
- // crashes if the module identifier is same as other symbols
- // such as a function name in the module.
- // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
- let mut llmod_id = link_meta.crate_name.clone();
- llmod_id.push_str(".rs");
-
- let ccx = CrateContext::new(llmod_id.as_slice(), tcx, exp_map2,
- Sha256::new(), link_meta, reachable);
-
- // First, verify intrinsics.
- intrinsic::check_intrinsics(&ccx);
-
- // Next, translate the module.
+ let codegen_units = tcx.sess.opts.cg.codegen_units;
+ let shared_ccx = SharedCrateContext::new(link_meta.crate_name.as_slice(),
+ codegen_units,
+ tcx,
+ exp_map2,
+ Sha256::new(),
+ link_meta.clone(),
+ reachable);
+
{
- let _icx = push_ctxt("text");
- trans_mod(&ccx, &krate.module);
+ let ccx = shared_ccx.get_ccx(0);
+
+ // First, verify intrinsics.
+ intrinsic::check_intrinsics(&ccx);
+
+ // Next, translate the module.
+ {
+ let _icx = push_ctxt("text");
+ trans_mod(&ccx, &krate.module);
+ }
}
- glue::emit_tydescs(&ccx);
- if ccx.sess().opts.debuginfo != NoDebugInfo {
- debuginfo::finalize(&ccx);
+ for ccx in shared_ccx.iter() {
+ glue::emit_tydescs(&ccx);
+ if ccx.sess().opts.debuginfo != NoDebugInfo {
+ debuginfo::finalize(&ccx);
+ }
}
// Translate the metadata.
- let metadata = write_metadata(&ccx, &krate);
- if ccx.sess().trans_stats() {
+ let metadata = write_metadata(&shared_ccx, &krate);
+
+ if shared_ccx.sess().trans_stats() {
+ let stats = shared_ccx.stats();
println!("--- trans stats ---");
- println!("n_static_tydescs: {}", ccx.stats.n_static_tydescs.get());
- println!("n_glues_created: {}", ccx.stats.n_glues_created.get());
- println!("n_null_glues: {}", ccx.stats.n_null_glues.get());
- println!("n_real_glues: {}", ccx.stats.n_real_glues.get());
-
- println!("n_fns: {}", ccx.stats.n_fns.get());
- println!("n_monos: {}", ccx.stats.n_monos.get());
- println!("n_inlines: {}", ccx.stats.n_inlines.get());
- println!("n_closures: {}", ccx.stats.n_closures.get());
+ println!("n_static_tydescs: {}", stats.n_static_tydescs.get());
+ println!("n_glues_created: {}", stats.n_glues_created.get());
+ println!("n_null_glues: {}", stats.n_null_glues.get());
+ println!("n_real_glues: {}", stats.n_real_glues.get());
+
+ println!("n_fns: {}", stats.n_fns.get());
+ println!("n_monos: {}", stats.n_monos.get());
+ println!("n_inlines: {}", stats.n_inlines.get());
+ println!("n_closures: {}", stats.n_closures.get());
println!("fn stats:");
- ccx.stats.fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
+ stats.fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
insns_b.cmp(&insns_a)
});
- for tuple in ccx.stats.fn_stats.borrow().iter() {
+ for tuple in stats.fn_stats.borrow().iter() {
match *tuple {
(ref name, ms, insns) => {
println!("{} insns, {} ms, {}", insns, ms, *name);
}
}
}
- if ccx.sess().count_llvm_insns() {
- for (k, v) in ccx.stats.llvm_insns.borrow().iter() {
+ if shared_ccx.sess().count_llvm_insns() {
+ for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
println!("{:7u} {}", *v, *k);
}
}
- let llcx = ccx.llcx;
- let link_meta = ccx.link_meta.clone();
- let llmod = ccx.llmod;
+ let modules = shared_ccx.iter()
+ .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
+ .collect();
- let mut reachable: Vec<String> = ccx.reachable.iter().filter_map(|id| {
- ccx.item_symbols.borrow().find(id).map(|s| s.to_string())
+ let mut reachable: Vec<String> = shared_ccx.reachable().iter().filter_map(|id| {
+ shared_ccx.item_symbols().borrow().find(id).map(|s| s.to_string())
}).collect();
// For the purposes of LTO, we add to the reachable set all of the upstream
// reachable extern fns. These functions are all part of the public ABI of
// the final product, so LTO needs to preserve them.
- ccx.sess().cstore.iter_crate_data(|cnum, _| {
- let syms = csearch::get_reachable_extern_fns(&ccx.sess().cstore, cnum);
+ shared_ccx.sess().cstore.iter_crate_data(|cnum, _| {
+ let syms = csearch::get_reachable_extern_fns(&shared_ccx.sess().cstore, cnum);
reachable.extend(syms.move_iter().map(|did| {
- csearch::get_symbol(&ccx.sess().cstore, did)
+ csearch::get_symbol(&shared_ccx.sess().cstore, did)
}));
});
// referenced from rt/rust_try.ll
reachable.push("rust_eh_personality_catch".to_string());
- let metadata_module = ccx.metadata_llmod;
- let formats = ccx.tcx.dependency_formats.borrow().clone();
+ if codegen_units > 1 {
+ internalize_symbols(&shared_ccx, &reachable.iter().map(|x| x.clone()).collect());
+ }
+
+ let metadata_module = ModuleTranslation {
+ llcx: shared_ccx.metadata_llcx(),
+ llmod: shared_ccx.metadata_llmod(),
+ };
+ let formats = shared_ccx.tcx().dependency_formats.borrow().clone();
let no_builtins = attr::contains_name(krate.attrs.as_slice(), "no_builtins");
- (ccx.tcx, CrateTranslation {
- context: llcx,
- module: llmod,
- link: link_meta,
+ let translation = CrateTranslation {
+ modules: modules,
metadata_module: metadata_module,
+ link: link_meta,
metadata: metadata,
reachable: reachable,
crate_formats: formats,
no_builtins: no_builtins,
- })
+ };
+
+ (shared_ccx.take_tcx(), translation)
}
use libc::{c_uint, c_ulonglong, c_char};
-pub fn terminate(cx: &Block, _: &str) {
+pub fn terminate(cx: Block, _: &str) {
debug!("terminate({})", cx.to_str());
cx.terminated.set(true);
}
-pub fn check_not_terminated(cx: &Block) {
+pub fn check_not_terminated(cx: Block) {
if cx.terminated.get() {
fail!("already terminated!");
}
}
-pub fn B<'a>(cx: &'a Block) -> Builder<'a> {
+pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> {
let b = cx.fcx.ccx.builder();
b.position_at_end(cx.llbb);
b
// for (fail/break/return statements, call to diverging functions, etc), and
// further instructions to the block should simply be ignored.
-pub fn RetVoid(cx: &Block) {
+pub fn RetVoid(cx: Block) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "RetVoid");
B(cx).ret_void();
}
-pub fn Ret(cx: &Block, v: ValueRef) {
+pub fn Ret(cx: Block, v: ValueRef) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "Ret");
B(cx).ret(v);
}
-pub fn AggregateRet(cx: &Block, ret_vals: &[ValueRef]) {
+pub fn AggregateRet(cx: Block, ret_vals: &[ValueRef]) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "AggregateRet");
B(cx).aggregate_ret(ret_vals);
}
-pub fn Br(cx: &Block, dest: BasicBlockRef) {
+pub fn Br(cx: Block, dest: BasicBlockRef) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "Br");
B(cx).br(dest);
}
-pub fn CondBr(cx: &Block,
+pub fn CondBr(cx: Block,
if_: ValueRef,
then: BasicBlockRef,
else_: BasicBlockRef) {
B(cx).cond_br(if_, then, else_);
}
-pub fn Switch(cx: &Block, v: ValueRef, else_: BasicBlockRef, num_cases: uint)
+pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: uint)
-> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
check_not_terminated(cx);
}
}
-pub fn IndirectBr(cx: &Block, addr: ValueRef, num_dests: uint) {
+pub fn IndirectBr(cx: Block, addr: ValueRef, num_dests: uint) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "IndirectBr");
B(cx).indirect_br(addr, num_dests);
}
-pub fn Invoke(cx: &Block,
+pub fn Invoke(cx: Block,
fn_: ValueRef,
args: &[ValueRef],
then: BasicBlockRef,
B(cx).invoke(fn_, args, then, catch, attributes)
}
-pub fn Unreachable(cx: &Block) {
+pub fn Unreachable(cx: Block) {
if cx.unreachable.get() {
return
}
}
/* Arithmetic */
-pub fn Add(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Add(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).add(lhs, rhs)
}
-pub fn NSWAdd(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NSWAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nswadd(lhs, rhs)
}
-pub fn NUWAdd(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NUWAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nuwadd(lhs, rhs)
}
-pub fn FAdd(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fadd(lhs, rhs)
}
-pub fn Sub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Sub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).sub(lhs, rhs)
}
-pub fn NSWSub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NSWSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nswsub(lhs, rhs)
}
-pub fn NUWSub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NUWSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nuwsub(lhs, rhs)
}
-pub fn FSub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fsub(lhs, rhs)
}
-pub fn Mul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Mul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).mul(lhs, rhs)
}
-pub fn NSWMul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NSWMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nswmul(lhs, rhs)
}
-pub fn NUWMul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NUWMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nuwmul(lhs, rhs)
}
-pub fn FMul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fmul(lhs, rhs)
}
-pub fn UDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn UDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).udiv(lhs, rhs)
}
-pub fn SDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn SDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).sdiv(lhs, rhs)
}
-pub fn ExactSDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn ExactSDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).exactsdiv(lhs, rhs)
}
-pub fn FDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fdiv(lhs, rhs)
}
-pub fn URem(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn URem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).urem(lhs, rhs)
}
-pub fn SRem(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn SRem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).srem(lhs, rhs)
}
-pub fn FRem(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FRem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).frem(lhs, rhs)
}
-pub fn Shl(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Shl(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).shl(lhs, rhs)
}
-pub fn LShr(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn LShr(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).lshr(lhs, rhs)
}
-pub fn AShr(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn AShr(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).ashr(lhs, rhs)
}
-pub fn And(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn And(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).and(lhs, rhs)
}
-pub fn Or(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Or(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).or(lhs, rhs)
}
-pub fn Xor(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Xor(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).xor(lhs, rhs)
}
-pub fn BinOp(cx: &Block, op: Opcode, lhs: ValueRef, rhs: ValueRef)
+pub fn BinOp(cx: Block, op: Opcode, lhs: ValueRef, rhs: ValueRef)
-> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).binop(op, lhs, rhs)
}
-pub fn Neg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn Neg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).neg(v)
}
-pub fn NSWNeg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn NSWNeg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).nswneg(v)
}
-pub fn NUWNeg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn NUWNeg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).nuwneg(v)
}
-pub fn FNeg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn FNeg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).fneg(v)
}
-pub fn Not(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn Not(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).not(v)
}
/* Memory */
-pub fn Malloc(cx: &Block, ty: Type) -> ValueRef {
+pub fn Malloc(cx: Block, ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
}
-pub fn ArrayMalloc(cx: &Block, ty: Type, val: ValueRef) -> ValueRef {
+pub fn ArrayMalloc(cx: Block, ty: Type, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
}
-pub fn Alloca(cx: &Block, ty: Type, name: &str) -> ValueRef {
+pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); }
AllocaFcx(cx.fcx, ty, name)
b.alloca(ty, name)
}
-pub fn ArrayAlloca(cx: &Block, ty: Type, val: ValueRef) -> ValueRef {
+pub fn ArrayAlloca(cx: Block, ty: Type, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); }
let b = cx.fcx.ccx.builder();
}
}
-pub fn Free(cx: &Block, pointer_val: ValueRef) {
+pub fn Free(cx: Block, pointer_val: ValueRef) {
if cx.unreachable.get() { return; }
B(cx).free(pointer_val)
}
-pub fn Load(cx: &Block, pointer_val: ValueRef) -> ValueRef {
+pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
if cx.unreachable.get() {
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
return llvm::LLVMGetUndef(eltty.to_ref());
}
}
}
-pub fn VolatileLoad(cx: &Block, pointer_val: ValueRef) -> ValueRef {
+pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn AtomicLoad(cx: &Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef {
+pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
if cx.unreachable.get() {
- return llvm::LLVMGetUndef(ccx.int_type.to_ref());
+ return llvm::LLVMGetUndef(ccx.int_type().to_ref());
}
B(cx).atomic_load(pointer_val, order)
}
}
-pub fn LoadRangeAssert(cx: &Block, pointer_val: ValueRef, lo: c_ulonglong,
+pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: c_ulonglong,
hi: c_ulonglong, signed: llvm::Bool) -> ValueRef {
if cx.unreachable.get() {
let ccx = cx.fcx.ccx;
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
unsafe {
llvm::LLVMGetUndef(eltty.to_ref())
}
}
-pub fn Store(cx: &Block, val: ValueRef, ptr: ValueRef) {
+pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) {
if cx.unreachable.get() { return; }
B(cx).store(val, ptr)
}
-pub fn VolatileStore(cx: &Block, val: ValueRef, ptr: ValueRef) {
+pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) {
if cx.unreachable.get() { return; }
B(cx).volatile_store(val, ptr)
}
-pub fn AtomicStore(cx: &Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
+pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
if cx.unreachable.get() { return; }
B(cx).atomic_store(val, ptr, order)
}
-pub fn GEP(cx: &Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
+pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
// Simple wrapper around GEP that takes an array of ints and wraps them
// in C_i32()
#[inline]
-pub fn GEPi(cx: &Block, base: ValueRef, ixs: &[uint]) -> ValueRef {
+pub fn GEPi(cx: Block, base: ValueRef, ixs: &[uint]) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
}
}
-pub fn InBoundsGEP(cx: &Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
+pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
}
}
-pub fn StructGEP(cx: &Block, pointer: ValueRef, idx: uint) -> ValueRef {
+pub fn StructGEP(cx: Block, pointer: ValueRef, idx: uint) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
}
}
-pub fn GlobalString(cx: &Block, _str: *const c_char) -> ValueRef {
+pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
}
-pub fn GlobalStringPtr(cx: &Block, _str: *const c_char) -> ValueRef {
+pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
/* Casts */
-pub fn Trunc(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).trunc(val, dest_ty)
}
}
-pub fn ZExt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).zext(val, dest_ty)
}
}
-pub fn SExt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).sext(val, dest_ty)
}
}
-pub fn FPToUI(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fptoui(val, dest_ty)
}
}
-pub fn FPToSI(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fptosi(val, dest_ty)
}
}
-pub fn UIToFP(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).uitofp(val, dest_ty)
}
}
-pub fn SIToFP(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).sitofp(val, dest_ty)
}
}
-pub fn FPTrunc(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fptrunc(val, dest_ty)
}
}
-pub fn FPExt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fpext(val, dest_ty)
}
}
-pub fn PtrToInt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).ptrtoint(val, dest_ty)
}
}
-pub fn IntToPtr(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).inttoptr(val, dest_ty)
}
}
-pub fn BitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).bitcast(val, dest_ty)
}
}
-pub fn ZExtOrBitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).zext_or_bitcast(val, dest_ty)
}
}
-pub fn SExtOrBitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).sext_or_bitcast(val, dest_ty)
}
}
-pub fn TruncOrBitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).trunc_or_bitcast(val, dest_ty)
}
}
-pub fn Cast(cx: &Block, op: Opcode, val: ValueRef, dest_ty: Type,
+pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type,
_: *const u8)
-> ValueRef {
unsafe {
}
}
-pub fn PointerCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).pointercast(val, dest_ty)
}
}
-pub fn IntCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).intcast(val, dest_ty)
}
}
-pub fn FPCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fpcast(val, dest_ty)
/* Comparisons */
-pub fn ICmp(cx: &Block, op: IntPredicate, lhs: ValueRef, rhs: ValueRef)
+pub fn ICmp(cx: Block, op: IntPredicate, lhs: ValueRef, rhs: ValueRef)
-> ValueRef {
unsafe {
if cx.unreachable.get() {
}
}
-pub fn FCmp(cx: &Block, op: RealPredicate, lhs: ValueRef, rhs: ValueRef)
+pub fn FCmp(cx: Block, op: RealPredicate, lhs: ValueRef, rhs: ValueRef)
-> ValueRef {
unsafe {
if cx.unreachable.get() {
}
/* Miscellaneous instructions */
-pub fn EmptyPhi(cx: &Block, ty: Type) -> ValueRef {
+pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
B(cx).empty_phi(ty)
}
}
-pub fn Phi(cx: &Block, ty: Type, vals: &[ValueRef],
+pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef],
bbs: &[BasicBlockRef]) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
}
}
-pub fn _UndefReturn(cx: &Block, fn_: ValueRef) -> ValueRef {
+pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
let ty = val_ty(fn_);
let retty = if ty.kind() == llvm::Integer {
ty.return_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
B(cx).count_insn("ret_undef");
llvm::LLVMGetUndef(retty.to_ref())
}
}
-pub fn add_span_comment(cx: &Block, sp: Span, text: &str) {
+pub fn add_span_comment(cx: Block, sp: Span, text: &str) {
B(cx).add_span_comment(sp, text)
}
-pub fn add_comment(cx: &Block, text: &str) {
+pub fn add_comment(cx: Block, text: &str) {
B(cx).add_comment(text)
}
-pub fn InlineAsmCall(cx: &Block, asm: *const c_char, cons: *const c_char,
+pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char,
inputs: &[ValueRef], output: Type,
volatile: bool, alignstack: bool,
dia: AsmDialect) -> ValueRef {
B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia)
}
-pub fn Call(cx: &Block, fn_: ValueRef, args: &[ValueRef],
+pub fn Call(cx: Block, fn_: ValueRef, args: &[ValueRef],
attributes: Option<AttrBuilder>) -> ValueRef {
if cx.unreachable.get() { return _UndefReturn(cx, fn_); }
B(cx).call(fn_, args, attributes)
}
-pub fn CallWithConv(cx: &Block, fn_: ValueRef, args: &[ValueRef], conv: CallConv,
+pub fn CallWithConv(cx: Block, fn_: ValueRef, args: &[ValueRef], conv: CallConv,
attributes: Option<AttrBuilder>) -> ValueRef {
if cx.unreachable.get() { return _UndefReturn(cx, fn_); }
B(cx).call_with_conv(fn_, args, conv, attributes)
}
-pub fn AtomicFence(cx: &Block, order: AtomicOrdering) {
+pub fn AtomicFence(cx: Block, order: AtomicOrdering) {
if cx.unreachable.get() { return; }
B(cx).atomic_fence(order)
}
-pub fn Select(cx: &Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef {
+pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(then); }
B(cx).select(if_, then, else_)
}
-pub fn VAArg(cx: &Block, list: ValueRef, ty: Type) -> ValueRef {
+pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
B(cx).va_arg(list, ty)
}
}
-pub fn ExtractElement(cx: &Block, vec_val: ValueRef, index: ValueRef) -> ValueRef {
+pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn InsertElement(cx: &Block, vec_val: ValueRef, elt_val: ValueRef,
+pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef,
index: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
}
}
-pub fn ShuffleVector(cx: &Block, v1: ValueRef, v2: ValueRef,
+pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef,
mask: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
}
}
-pub fn VectorSplat(cx: &Block, num_elts: uint, elt_val: ValueRef) -> ValueRef {
+pub fn VectorSplat(cx: Block, num_elts: uint, elt_val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn ExtractValue(cx: &Block, agg_val: ValueRef, index: uint) -> ValueRef {
+pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: uint) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn InsertValue(cx: &Block, agg_val: ValueRef, elt_val: ValueRef, index: uint) -> ValueRef {
+pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: uint) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn IsNull(cx: &Block, val: ValueRef) -> ValueRef {
+pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
}
}
-pub fn IsNotNull(cx: &Block, val: ValueRef) -> ValueRef {
+pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
}
}
-pub fn PtrDiff(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
- if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type.to_ref()); }
+ if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); }
B(cx).ptrdiff(lhs, rhs)
}
}
-pub fn Trap(cx: &Block) {
+pub fn Trap(cx: Block) {
if cx.unreachable.get() { return; }
B(cx).trap();
}
-pub fn LandingPad(cx: &Block, ty: Type, pers_fn: ValueRef,
+pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef,
num_clauses: uint) -> ValueRef {
check_not_terminated(cx);
assert!(!cx.unreachable.get());
B(cx).landing_pad(ty, pers_fn, num_clauses)
}
-pub fn SetCleanup(cx: &Block, landing_pad: ValueRef) {
+pub fn SetCleanup(cx: Block, landing_pad: ValueRef) {
B(cx).set_cleanup(landing_pad)
}
-pub fn Resume(cx: &Block, exn: ValueRef) -> ValueRef {
+pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef {
check_not_terminated(cx);
terminate(cx, "Resume");
B(cx).resume(exn)
}
// Atomic Operations
-pub fn AtomicCmpXchg(cx: &Block, dst: ValueRef,
+pub fn AtomicCmpXchg(cx: Block, dst: ValueRef,
cmp: ValueRef, src: ValueRef,
order: AtomicOrdering,
failure_order: AtomicOrdering) -> ValueRef {
B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order)
}
-pub fn AtomicRMW(cx: &Block, op: AtomicBinOp,
+pub fn AtomicRMW(cx: Block, op: AtomicBinOp,
dst: ValueRef, src: ValueRef,
order: AtomicOrdering) -> ValueRef {
B(cx).atomic_rmw(op, dst, src, order)
use std::string::String;
use syntax::codemap::Span;
-pub struct Builder<'a> {
+pub struct Builder<'a, 'tcx: 'a> {
pub llbuilder: BuilderRef,
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a, 'tcx>,
}
// This is a really awful way to get a zero-length c-string, but better (and a
&cnull as *const c_char
}
-impl<'a> Builder<'a> {
- pub fn new(ccx: &'a CrateContext) -> Builder<'a> {
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> {
Builder {
- llbuilder: ccx.builder.b,
+ llbuilder: ccx.raw_builder(),
ccx: ccx,
}
}
pub fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() {
- self.ccx.stats.n_llvm_insns.set(self.ccx
- .stats
+ self.ccx.stats().n_llvm_insns.set(self.ccx
+ .stats()
.n_llvm_insns
.get() + 1);
}
+ self.ccx.count_llvm_insn();
if self.ccx.sess().count_llvm_insns() {
base::with_insn_ctxt(|v| {
- let mut h = self.ccx.stats.llvm_insns.borrow_mut();
+ let mut h = self.ccx.stats().llvm_insns.borrow_mut();
// Build version of path with cycles removed.
self.count_insn("invoke");
debug!("Invoke {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
let v = [min, max];
llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
- llvm::LLVMMDNodeInContext(self.ccx.llcx,
+ llvm::LLVMMDNodeInContext(self.ccx.llcx(),
v.as_ptr(), v.len() as c_uint));
}
pub fn store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store");
unsafe {
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store.volatile");
unsafe {
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
self.count_insn("store.atomic");
unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
else { llvm::False };
let argtys = inputs.iter().map(|v| {
- debug!("Asm Input Type: {:?}", self.ccx.tn.val_to_string(*v));
+ debug!("Asm Input Type: {:?}", self.ccx.tn().val_to_string(*v));
val_ty(*v)
}).collect::<Vec<_>>();
- debug!("Asm Output Type: {:?}", self.ccx.tn.type_to_string(output));
+ debug!("Asm Output Type: {:?}", self.ccx.tn().type_to_string(output));
let fty = Type::func(argtys.as_slice(), &output);
unsafe {
let v = llvm::LLVMInlineAsm(
self.count_insn("call");
debug!("Call {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
let r = size % 32;
if r > 0 {
unsafe {
- args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx, r as c_uint)));
+ args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
}
}
TraitItem(MethodData)
}
-pub struct Callee<'a> {
- pub bcx: &'a Block<'a>,
+pub struct Callee<'blk, 'tcx: 'blk> {
+ pub bcx: Block<'blk, 'tcx>,
pub data: CalleeData,
}
-fn trans<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> Callee<'a> {
+fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr)
+ -> Callee<'blk, 'tcx> {
let _icx = push_ctxt("trans_callee");
debug!("callee::trans(expr={})", expr.repr(bcx.tcx()));
// any other expressions are closures:
return datum_callee(bcx, expr);
- fn datum_callee<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> Callee<'a> {
+ fn datum_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr)
+ -> Callee<'blk, 'tcx> {
let DatumBlock {bcx: mut bcx, datum} = expr::trans(bcx, expr);
match ty::get(datum.ty).sty {
ty::ty_bare_fn(..) => {
}
}
- fn fn_callee<'a>(bcx: &'a Block<'a>, llfn: ValueRef) -> Callee<'a> {
+ fn fn_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llfn: ValueRef)
+ -> Callee<'blk, 'tcx> {
return Callee {
bcx: bcx,
data: Fn(llfn),
};
}
- fn trans_def<'a>(bcx: &'a Block<'a>, def: def::Def, ref_expr: &ast::Expr)
- -> Callee<'a> {
+ fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: def::Def, ref_expr: &ast::Expr)
+ -> Callee<'blk, 'tcx> {
debug!("trans_def(def={}, ref_expr={})", def.repr(bcx.tcx()), ref_expr.repr(bcx.tcx()));
let expr_ty = node_id_type(bcx, ref_expr.id);
match def {
def::DefFn(did, _) if {
- let def_id = if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(bcx.ccx(), did)
- } else {
- did
- };
- match bcx.tcx().map.find(def_id.node) {
+ let maybe_def_id = inline::get_local_instance(bcx.ccx(), did);
+ let maybe_ast_node = maybe_def_id.and_then(|def_id| bcx.tcx().map
+ .find(def_id.node));
+ match maybe_ast_node {
Some(ast_map::NodeStructCtor(_)) => true,
_ => false
}
_ => false
} => {
let substs = node_id_substs(bcx, ExprId(ref_expr.id));
- let def_id = if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(bcx.ccx(), did)
- } else {
- did
- };
+ let def_id = inline::maybe_instantiate_inline(bcx.ccx(), did);
Callee { bcx: bcx, data: Intrinsic(def_id.node, substs) }
}
def::DefFn(did, _) |
}
}
-pub fn trans_fn_ref(bcx: &Block, def_id: ast::DefId, node: ExprOrMethodCall) -> ValueRef {
+pub fn trans_fn_ref(bcx: Block, def_id: ast::DefId, node: ExprOrMethodCall) -> ValueRef {
/*!
* Translates a reference (with id `ref_id`) to the fn/method
* with id `def_id` into a function pointer. This may require
trans_fn_ref_with_vtables(bcx, def_id, node, substs, vtables)
}
-fn trans_fn_ref_with_vtables_to_callee<'a>(bcx: &'a Block<'a>,
- def_id: ast::DefId,
- ref_id: ast::NodeId,
- substs: subst::Substs,
- vtables: typeck::vtable_res)
- -> Callee<'a> {
+fn trans_fn_ref_with_vtables_to_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ def_id: ast::DefId,
+ ref_id: ast::NodeId,
+ substs: subst::Substs,
+ vtables: typeck::vtable_res)
+ -> Callee<'blk, 'tcx> {
Callee {
bcx: bcx,
data: Fn(trans_fn_ref_with_vtables(bcx,
}
}
-fn resolve_default_method_vtables(bcx: &Block,
+fn resolve_default_method_vtables(bcx: Block,
impl_id: ast::DefId,
substs: &subst::Substs,
impl_vtables: typeck::vtable_res)
/// Translates the adapter that deconstructs a `Box<Trait>` object into
/// `Trait` so that a by-value self method can be called.
-pub fn trans_unboxing_shim(bcx: &Block,
+pub fn trans_unboxing_shim(bcx: Block,
llshimmedfn: ValueRef,
fty: &ty::BareFnTy,
method_id: ast::DefId,
}
pub fn trans_fn_ref_with_vtables(
- bcx: &Block, //
+ bcx: Block, //
def_id: ast::DefId, // def id of fn
node: ExprOrMethodCall, // node id of use of fn; may be zero if N/A
substs: subst::Substs, // values for fn's ty params
// Check whether this fn has an inlined copy and, if so, redirect
// def_id to the local id of the inlined copy.
- let def_id = {
- if def_id.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(ccx, def_id)
- } else {
- def_id
- }
- };
+ let def_id = inline::maybe_instantiate_inline(ccx, def_id);
// We must monomorphise if the fn has type parameters, is a default method,
// or is a named tuple constructor.
// ______________________________________________________________________
// Translating calls
-pub fn trans_call<'a>(
- in_cx: &'a Block<'a>,
- call_ex: &ast::Expr,
- f: &ast::Expr,
- args: CallArgs,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_call<'blk, 'tcx>(in_cx: Block<'blk, 'tcx>,
+ call_ex: &ast::Expr,
+ f: &ast::Expr,
+ args: CallArgs,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_call");
trans_call_inner(in_cx,
Some(common::expr_info(call_ex)),
Some(dest)).bcx
}
-pub fn trans_method_call<'a>(
- bcx: &'a Block<'a>,
- call_ex: &ast::Expr,
- rcvr: &ast::Expr,
- args: CallArgs,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_method_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ call_ex: &ast::Expr,
+ rcvr: &ast::Expr,
+ args: CallArgs,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_method_call");
debug!("trans_method_call(call_ex={})", call_ex.repr(bcx.tcx()));
let method_call = MethodCall::expr(call_ex.id);
Some(dest)).bcx
}
-pub fn trans_lang_call<'a>(
- bcx: &'a Block<'a>,
- did: ast::DefId,
- args: &[ValueRef],
- dest: Option<expr::Dest>)
- -> Result<'a> {
+pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ did: ast::DefId,
+ args: &[ValueRef],
+ dest: Option<expr::Dest>)
+ -> Result<'blk, 'tcx> {
let fty = if did.krate == ast::LOCAL_CRATE {
ty::node_id_to_type(bcx.tcx(), did.node)
} else {
dest)
}
-pub fn trans_call_inner<'a>(
- bcx: &'a Block<'a>,
- call_info: Option<NodeInfo>,
- callee_ty: ty::t,
- get_callee: |bcx: &'a Block<'a>,
- arg_cleanup_scope: cleanup::ScopeId|
- -> Callee<'a>,
- args: CallArgs,
- dest: Option<expr::Dest>)
- -> Result<'a> {
+pub fn trans_call_inner<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ call_info: Option<NodeInfo>,
+ callee_ty: ty::t,
+ get_callee: |bcx: Block<'blk, 'tcx>,
+ arg_cleanup_scope: cleanup::ScopeId|
+ -> Callee<'blk, 'tcx>,
+ args: CallArgs,
+ dest: Option<expr::Dest>)
+ -> Result<'blk, 'tcx> {
/*!
* This behemoth of a function translates function calls.
* Unfortunately, in order to generate more efficient LLVM
ArgOverloadedCall(&'a [Gc<ast::Expr>]),
}
-fn trans_args_under_call_abi<'a>(
- mut bcx: &'a Block<'a>,
+fn trans_args_under_call_abi<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
arg_exprs: &[Gc<ast::Expr>],
fn_ty: ty::t,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId,
ignore_self: bool)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
// Translate the `self` argument first.
let arg_tys = ty::ty_fn_args(fn_ty);
if !ignore_self {
bcx
}
-fn trans_overloaded_call_args<'a>(
- mut bcx: &'a Block<'a>,
+fn trans_overloaded_call_args<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
arg_exprs: &[Gc<ast::Expr>],
fn_ty: ty::t,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId,
ignore_self: bool)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
// Translate the `self` argument first.
let arg_tys = ty::ty_fn_args(fn_ty);
if !ignore_self {
bcx
}
-pub fn trans_args<'a>(
- cx: &'a Block<'a>,
- args: CallArgs,
- fn_ty: ty::t,
- llargs: &mut Vec<ValueRef> ,
- arg_cleanup_scope: cleanup::ScopeId,
- ignore_self: bool,
- abi: synabi::Abi)
- -> &'a Block<'a> {
+pub fn trans_args<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ args: CallArgs,
+ fn_ty: ty::t,
+ llargs: &mut Vec<ValueRef> ,
+ arg_cleanup_scope: cleanup::ScopeId,
+ ignore_self: bool,
+ abi: synabi::Abi)
+ -> Block<'blk, 'tcx> {
debug!("trans_args(abi={})", abi);
let _icx = push_ctxt("trans_args");
DoAutorefArg(ast::NodeId)
}
-pub fn trans_arg_datum<'a>(
- bcx: &'a Block<'a>,
- formal_arg_ty: ty::t,
- arg_datum: Datum<Expr>,
- arg_cleanup_scope: cleanup::ScopeId,
- autoref_arg: AutorefArg)
- -> Result<'a> {
+pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ formal_arg_ty: ty::t,
+ arg_datum: Datum<Expr>,
+ arg_cleanup_scope: cleanup::ScopeId,
+ autoref_arg: AutorefArg)
+ -> Result<'blk, 'tcx> {
let _icx = push_ctxt("trans_arg_datum");
let mut bcx = bcx;
let ccx = bcx.ccx();
use syntax::ast;
use util::ppaux::Repr;
-pub struct CleanupScope<'a> {
+pub struct CleanupScope<'blk, 'tcx: 'blk> {
// The id of this cleanup scope. If the id is None,
// this is a *temporary scope* that is pushed during trans to
// cleanup miscellaneous garbage that trans may generate whose
// lifetime is a subset of some expression. See module doc for
// more details.
- kind: CleanupScopeKind<'a>,
+ kind: CleanupScopeKind<'blk, 'tcx>,
// Cleanups to run upon scope exit.
cleanups: Vec<CleanupObj>,
pub static EXIT_LOOP: uint = 1;
pub static EXIT_MAX: uint = 2;
-pub enum CleanupScopeKind<'a> {
+pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
CustomScopeKind,
AstScopeKind(ast::NodeId),
- LoopScopeKind(ast::NodeId, [&'a Block<'a>, ..EXIT_MAX])
+ LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>, ..EXIT_MAX])
}
#[deriving(PartialEq)]
pub trait Cleanup {
fn must_unwind(&self) -> bool;
fn clean_on_unwind(&self) -> bool;
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a>;
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx>;
}
pub type CleanupObj = Box<Cleanup+'static>;
CustomScope(CustomScopeIndex)
}
-impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
+impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
fn push_ast_cleanup_scope(&self, id: ast::NodeId) {
/*!
* Invoked when we start to trans the code contained
*/
debug!("push_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
// FIXME(#2202) -- currently closure bodies have a parent
// region, which messes up the assertion below, since there
// this new AST scope had better be its immediate child.
let top_scope = self.top_ast_scope();
if top_scope.is_some() {
- assert_eq!(self.ccx.tcx.region_maps.opt_encl_scope(id), top_scope);
+ assert_eq!(self.ccx.tcx().region_maps.opt_encl_scope(id), top_scope);
}
self.push_scope(CleanupScope::new(AstScopeKind(id)));
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
- exits: [&'a Block<'a>, ..EXIT_MAX]) {
+ exits: [Block<'blk, 'tcx>, ..EXIT_MAX]) {
debug!("push_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
assert_eq!(Some(id), self.top_ast_scope());
self.push_scope(CleanupScope::new(LoopScopeKind(id, exits)));
}
fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
/*!
* Removes the cleanup scope for id `cleanup_scope`, which
* must be at the top of the cleanup stack, and generates the
*/
debug!("pop_and_trans_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
*/
debug!("pop_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
}
fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
/*!
* Removes the top cleanup scope from the stack, which must be
* a temporary scope, and generates the code to do its
self.ccx.sess().bug("no loop scope found");
}
- fn normal_exit_block(&'a self,
+ fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: uint) -> BasicBlockRef {
/*!
self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
}
- fn return_exit_block(&'a self) -> BasicBlockRef {
+ fn return_exit_block(&'blk self) -> BasicBlockRef {
/*!
* Returns a block to branch to which will perform all pending
* cleanups and then return from this function
debug!("schedule_lifetime_end({:?}, val={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val));
+ self.ccx.tn().val_to_string(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
debug!("schedule_drop_mem({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
true);
debug!("schedule_drop_immediate({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_free_value({:?}, val={}, heap={:?})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
+ heap);
+
+ self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ }
+
+ fn schedule_free_slice(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap) {
+ /*!
+ * Schedules a call to `free(val)`. Note that this is a shallow
+ * operation.
+ */
+
+ let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
+
+ debug!("schedule_free_slice({:?}, val={}, heap={:?})",
+ cleanup_scope,
+ self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
self.ccx.sess().bug(
format!("no cleanup scope {} found",
- self.ccx.tcx.map.node_to_string(cleanup_scope)).as_slice());
+ self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
}
fn schedule_clean_in_custom_scope(&self,
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
- fn get_landing_pad(&'a self) -> BasicBlockRef {
+ fn get_landing_pad(&'blk self) -> BasicBlockRef {
/*!
* Returns a basic block to branch to in the event of a failure.
* This block will run the failure cleanups and eventually
}
}
-impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> {
+impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
fn top_ast_scope(&self) -> Option<ast::NodeId> {
/*!
* Returns the id of the current top-most AST scope, if any.
}
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
- bcx: &'a Block<'a>,
- scope: &CleanupScope) -> &'a Block<'a> {
+ bcx: Block<'blk, 'tcx>,
+ scope: &CleanupScope) -> Block<'blk, 'tcx> {
/*! Generates the cleanups for `scope` into `bcx` */
let mut bcx = bcx;
self.scopes.borrow().len()
}
- fn push_scope(&self, scope: CleanupScope<'a>) {
+ fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
self.scopes.borrow_mut().push(scope)
}
- fn pop_scope(&self) -> CleanupScope<'a> {
+ fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
debug!("popping cleanup scope {}, {} scopes remaining",
self.top_scope(|s| s.block_name("")),
self.scopes_len() - 1);
self.scopes.borrow_mut().pop().unwrap()
}
- fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R {
+ fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R {
f(self.scopes.borrow().last().unwrap())
}
- fn trans_cleanups_to_exit_scope(&'a self,
+ fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef {
/*!
prev_llbb
}
- fn get_or_create_landing_pad(&'a self) -> BasicBlockRef {
+ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
/*!
* Creates a landing pad for the top scope, if one does not
* exist. The landing pad will perform all cleanups necessary
let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
None => {
- let mut personality = self.ccx.eh_personality.borrow_mut();
+ let mut personality = self.ccx.eh_personality().borrow_mut();
match *personality {
Some(llpersonality) => llpersonality,
None => {
}
}
-impl<'a> CleanupScope<'a> {
- fn new(kind: CleanupScopeKind<'a>) -> CleanupScope<'a> {
+impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
+ fn new(kind: CleanupScopeKind<'blk, 'tcx>) -> CleanupScope<'blk, 'tcx> {
CleanupScope {
kind: kind,
cleanups: vec!(),
}
}
-impl<'a> CleanupScopeKind<'a> {
+impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
fn is_temp(&self) -> bool {
match *self {
CustomScopeKind => true,
self.must_unwind
}
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty)
} else {
true
}
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
match self.heap {
HeapManaged => {
glue::trans_free(bcx, self.ptr)
}
}
+pub struct FreeSlice {
+ ptr: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap,
+}
+
+impl Cleanup for FreeSlice {
+ fn must_unwind(&self) -> bool {
+ true
+ }
+
+ fn clean_on_unwind(&self) -> bool {
+ true
+ }
+
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
+ match self.heap {
+ HeapManaged => {
+ glue::trans_free(bcx, self.ptr)
+ }
+ HeapExchange => {
+ glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
+ }
+ }
+ }
+}
+
pub struct LifetimeEnd {
ptr: ValueRef,
}
true
}
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
base::call_lifetime_end(bcx, self.ptr);
bcx
}
///////////////////////////////////////////////////////////////////////////
// These traits just exist to put the methods into this file.
-pub trait CleanupMethods<'a> {
+pub trait CleanupMethods<'blk, 'tcx> {
fn push_ast_cleanup_scope(&self, id: ast::NodeId);
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
- exits: [&'a Block<'a>, ..EXIT_MAX]);
+ exits: [Block<'blk, 'tcx>, ..EXIT_MAX]);
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
- -> &'a Block<'a>;
+ -> Block<'blk, 'tcx>;
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId);
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex);
fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
- -> &'a Block<'a>;
+ -> Block<'blk, 'tcx>;
fn top_loop_scope(&self) -> ast::NodeId;
- fn normal_exit_block(&'a self,
+ fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: uint) -> BasicBlockRef;
- fn return_exit_block(&'a self) -> BasicBlockRef;
+ fn return_exit_block(&'blk self) -> BasicBlockRef;
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef);
val: ValueRef,
heap: Heap,
content_ty: ty::t);
+ fn schedule_free_slice(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj);
custom_scope: CustomScopeIndex,
cleanup: CleanupObj);
fn needs_invoke(&self) -> bool;
- fn get_landing_pad(&'a self) -> BasicBlockRef;
+ fn get_landing_pad(&'blk self) -> BasicBlockRef;
}
-trait CleanupHelperMethods<'a> {
+trait CleanupHelperMethods<'blk, 'tcx> {
fn top_ast_scope(&self) -> Option<ast::NodeId>;
fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn trans_scope_cleanups(&self,
- bcx: &'a Block<'a>,
- scope: &CleanupScope<'a>) -> &'a Block<'a>;
- fn trans_cleanups_to_exit_scope(&'a self,
+ bcx: Block<'blk, 'tcx>,
+ scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
+ fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef;
- fn get_or_create_landing_pad(&'a self) -> BasicBlockRef;
+ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
fn scopes_len(&self) -> uint;
- fn push_scope(&self, scope: CleanupScope<'a>);
- fn pop_scope(&self) -> CleanupScope<'a>;
- fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R;
+ fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
+ fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
+ fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R;
}
use middle::trans::datum::{Datum, DatumBlock, Expr, Lvalue, rvalue_scratch_datum};
use middle::trans::debuginfo;
use middle::trans::expr;
-use middle::trans::machine::llsize_of;
use middle::trans::type_of::*;
use middle::trans::type_::Type;
use middle::ty;
ty::mk_tup(tcx, vec!(ty::mk_uint(), ty::mk_nil_ptr(tcx), ptr, ptr, t))
}
-fn allocate_cbox<'a>(bcx: &'a Block<'a>,
- store: ty::TraitStore,
- cdata_ty: ty::t)
- -> Result<'a> {
+fn allocate_cbox<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ store: ty::TraitStore,
+ cdata_ty: ty::t)
+ -> Result<'blk, 'tcx> {
let _icx = push_ctxt("closure::allocate_cbox");
let tcx = bcx.tcx();
// Allocate and initialize the box:
+ let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
match store {
ty::UniqTraitStore => {
- let ty = type_of(bcx.ccx(), cdata_ty);
- let size = llsize_of(bcx.ccx(), ty);
- // we treat proc as @ here, which isn't ideal
- malloc_raw_dyn_managed(bcx, cdata_ty, ClosureExchangeMallocFnLangItem, size)
+ malloc_raw_dyn_proc(bcx, cbox_ty, ClosureExchangeMallocFnLangItem)
}
ty::RegionTraitStore(..) => {
- let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let llbox = alloc_ty(bcx, cbox_ty, "__closure");
Result::new(bcx, llbox)
}
}
}
-pub struct ClosureResult<'a> {
+pub struct ClosureResult<'blk, 'tcx: 'blk> {
llbox: ValueRef, // llvalue of ptr to closure
cdata_ty: ty::t, // type of the closure data
- bcx: &'a Block<'a> // final bcx
+ bcx: Block<'blk, 'tcx> // final bcx
}
// Given a block context and a list of tydescs and values to bind
// construct a closure out of them. If copying is true, it is a
// heap allocated closure that copies the upvars into environment.
// Otherwise, it is stack allocated and copies pointers to the upvars.
-pub fn store_environment<'a>(
- bcx: &'a Block<'a>,
- bound_values: Vec<EnvValue> ,
- store: ty::TraitStore)
- -> ClosureResult<'a> {
+pub fn store_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ bound_values: Vec<EnvValue> ,
+ store: ty::TraitStore)
+ -> ClosureResult<'blk, 'tcx> {
let _icx = push_ctxt("closure::store_environment");
let ccx = bcx.ccx();
let tcx = ccx.tcx();
// Given a context and a list of upvars, build a closure. This just
// collects the upvars and packages them up for store_environment.
-fn build_closure<'a>(bcx0: &'a Block<'a>,
- freevar_mode: freevars::CaptureMode,
- freevars: &Vec<freevars::freevar_entry>,
- store: ty::TraitStore)
- -> ClosureResult<'a>
-{
+fn build_closure<'blk, 'tcx>(bcx0: Block<'blk, 'tcx>,
+ freevar_mode: freevars::CaptureMode,
+ freevars: &Vec<freevars::freevar_entry>,
+ store: ty::TraitStore)
+ -> ClosureResult<'blk, 'tcx> {
let _icx = push_ctxt("closure::build_closure");
// If we need to, package up the iterator body to call
// Given an enclosing block context, a new function context, a closure type,
// and a list of upvars, generate code to load and populate the environment
// with the upvars and type descriptors.
-fn load_environment<'a>(bcx: &'a Block<'a>,
- cdata_ty: ty::t,
- freevars: &Vec<freevars::freevar_entry>,
- store: ty::TraitStore)
- -> &'a Block<'a> {
+fn load_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ cdata_ty: ty::t,
+ freevars: &Vec<freevars::freevar_entry>,
+ store: ty::TraitStore)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::load_environment");
// Don't bother to create the block if there's nothing to load
bcx
}
-fn load_unboxed_closure_environment<'a>(
- bcx: &'a Block<'a>,
+fn load_unboxed_closure_environment<'blk, 'tcx>(
+ bcx: Block<'blk, 'tcx>,
arg_scope_id: ScopeId,
freevars: &Vec<freevars::freevar_entry>,
closure_id: ast::DefId)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::load_environment");
if freevars.len() == 0 {
bcx
}
-fn fill_fn_pair(bcx: &Block, pair: ValueRef, llfn: ValueRef, llenvptr: ValueRef) {
+fn fill_fn_pair(bcx: Block, pair: ValueRef, llfn: ValueRef, llenvptr: ValueRef) {
Store(bcx, llfn, GEPi(bcx, pair, [0u, abi::fn_field_code]));
let llenvptr = PointerCast(bcx, llenvptr, Type::i8p(bcx.ccx()));
Store(bcx, llenvptr, GEPi(bcx, pair, [0u, abi::fn_field_box]));
}
-pub fn trans_expr_fn<'a>(
- bcx: &'a Block<'a>,
- store: ty::TraitStore,
- decl: &ast::FnDecl,
- body: &ast::Block,
- id: ast::NodeId,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_expr_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ store: ty::TraitStore,
+ decl: &ast::FnDecl,
+ body: &ast::Block,
+ id: ast::NodeId,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
/*!
*
* Translates the body of a closure expression.
pub fn get_or_create_declaration_if_unboxed_closure(ccx: &CrateContext,
closure_id: ast::DefId)
-> Option<ValueRef> {
- if !ccx.tcx.unboxed_closures.borrow().contains_key(&closure_id) {
+ if !ccx.tcx().unboxed_closures.borrow().contains_key(&closure_id) {
// Not an unboxed closure.
return None
}
- match ccx.unboxed_closure_vals.borrow().find(&closure_id) {
+ match ccx.unboxed_closure_vals().borrow().find(&closure_id) {
Some(llfn) => {
debug!("get_or_create_declaration_if_unboxed_closure(): found \
closure");
None => {}
}
- let function_type = ty::mk_unboxed_closure(&ccx.tcx,
+ let function_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let symbol = ccx.tcx.map.with_path(closure_id.node, |path| {
+ let symbol = ccx.tcx().map.with_path(closure_id.node, |path| {
mangle_internal_name_by_path_and_seq(path, "unboxed_closure")
});
debug!("get_or_create_declaration_if_unboxed_closure(): inserting new \
closure {} (type {})",
closure_id,
- ccx.tn.type_to_string(val_ty(llfn)));
- ccx.unboxed_closure_vals.borrow_mut().insert(closure_id, llfn);
+ ccx.tn().type_to_string(val_ty(llfn)));
+ ccx.unboxed_closure_vals().borrow_mut().insert(closure_id, llfn);
Some(llfn)
}
-pub fn trans_unboxed_closure<'a>(
- mut bcx: &'a Block<'a>,
+pub fn trans_unboxed_closure<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
id: ast::NodeId,
dest: expr::Dest)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::trans_unboxed_closure");
debug!("trans_unboxed_closure()");
}
};
- match ccx.closure_bare_wrapper_cache.borrow().find(&fn_ptr) {
+ match ccx.closure_bare_wrapper_cache().borrow().find(&fn_ptr) {
Some(&llval) => return llval,
None => {}
}
decl_rust_fn(ccx, closure_ty, name.as_slice())
};
- ccx.closure_bare_wrapper_cache.borrow_mut().insert(fn_ptr, llfn);
+ ccx.closure_bare_wrapper_cache().borrow_mut().insert(fn_ptr, llfn);
// This is only used by statics inlined from a different crate.
if !is_local {
llfn
}
-pub fn make_closure_from_bare_fn<'a>(bcx: &'a Block<'a>,
- closure_ty: ty::t,
- def: def::Def,
- fn_ptr: ValueRef)
- -> DatumBlock<'a, Expr> {
+pub fn make_closure_from_bare_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ closure_ty: ty::t,
+ def: def::Def,
+ fn_ptr: ValueRef)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let scratch = rvalue_scratch_datum(bcx, closure_ty, "__adjust");
let wrapper = get_wrapper_for_bare_fn(bcx.ccx(), closure_ty, def, fn_ptr, true);
fill_fn_pair(bcx, scratch.val, wrapper, C_null(Type::i8p(bcx.ccx())));
use driver::session::Session;
use llvm;
-use llvm::{ValueRef, BasicBlockRef, BuilderRef};
+use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef};
use llvm::{True, False, Bool};
use middle::def;
use middle::freevars;
ty::ty_struct(..) | ty::ty_enum(..) | ty::ty_tup(..) |
ty::ty_unboxed_closure(..) => {
let llty = sizing_type_of(ccx, ty);
- llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type)
+ llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
}
_ => type_is_zero_size(ccx, ty)
}
// Function context. Every LLVM function we create will have one of
// these.
-pub struct FunctionContext<'a> {
+pub struct FunctionContext<'a, 'tcx: 'a> {
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
// address of the first instruction in the sequence of
// instructions for this function that will go in the .text
pub span: Option<Span>,
// The arena that blocks are allocated from.
- pub block_arena: &'a TypedArena<Block<'a>>,
+ pub block_arena: &'a TypedArena<BlockS<'a, 'tcx>>,
// This function's enclosing crate context.
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a, 'tcx>,
// Used and maintained by the debuginfo module.
pub debug_context: debuginfo::FunctionDebugContext,
// Cleanup scopes.
- pub scopes: RefCell<Vec<cleanup::CleanupScope<'a>> >,
+ pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
}
-impl<'a> FunctionContext<'a> {
+impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
pub fn arg_pos(&self, arg: uint) -> uint {
let arg = self.env_arg_pos() + arg;
if self.llenv.is_some() {
self.llreturn.set(Some(unsafe {
"return".with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx, self.llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, buf)
})
}))
}
self.llreturn.get().unwrap()
}
- pub fn get_ret_slot(&self, bcx: &Block, ty: ty::t, name: &str) -> ValueRef {
+ pub fn get_ret_slot(&self, bcx: Block, ty: ty::t, name: &str) -> ValueRef {
if self.needs_ret_allocas {
base::alloca_no_lifetime(bcx, type_of::type_of(bcx.ccx(), ty), name)
} else {
is_lpad: bool,
name: &str,
opt_node_id: Option<ast::NodeId>)
- -> &'a Block<'a> {
+ -> Block<'a, 'tcx> {
unsafe {
let llbb = name.with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx,
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
buf)
});
- Block::new(llbb, is_lpad, opt_node_id, self)
+ BlockS::new(llbb, is_lpad, opt_node_id, self)
}
}
pub fn new_id_block(&'a self,
name: &str,
node_id: ast::NodeId)
- -> &'a Block<'a> {
+ -> Block<'a, 'tcx> {
self.new_block(false, name, Some(node_id))
}
pub fn new_temp_block(&'a self,
name: &str)
- -> &'a Block<'a> {
+ -> Block<'a, 'tcx> {
self.new_block(false, name, None)
}
pub fn join_blocks(&'a self,
id: ast::NodeId,
- in_cxs: &[&'a Block<'a>])
- -> &'a Block<'a> {
+ in_cxs: &[Block<'a, 'tcx>])
+ -> Block<'a, 'tcx> {
let out = self.new_id_block("join", id);
let mut reachable = false;
for bcx in in_cxs.iter() {
// code. Each basic block we generate is attached to a function, typically
// with many basic blocks per function. All the basic blocks attached to a
// function are organized as a directed graph.
-pub struct Block<'a> {
+pub struct BlockS<'blk, 'tcx: 'blk> {
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
// block to the function pointed to by llfn. We insert
// The function context for the function to which this block is
// attached.
- pub fcx: &'a FunctionContext<'a>,
+ pub fcx: &'blk FunctionContext<'blk, 'tcx>,
}
-impl<'a> Block<'a> {
- pub fn new<'a>(
- llbb: BasicBlockRef,
+pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
+
+impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
+ pub fn new(llbb: BasicBlockRef,
is_lpad: bool,
opt_node_id: Option<ast::NodeId>,
- fcx: &'a FunctionContext<'a>)
- -> &'a Block<'a> {
- fcx.block_arena.alloc(Block {
+ fcx: &'blk FunctionContext<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
+ fcx.block_arena.alloc(BlockS {
llbb: llbb,
terminated: Cell::new(false),
unreachable: Cell::new(false),
})
}
- pub fn ccx(&self) -> &'a CrateContext { self.fcx.ccx }
- pub fn tcx(&self) -> &'a ty::ctxt {
- &self.fcx.ccx.tcx
+ pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
+ self.fcx.ccx
+ }
+ pub fn tcx(&self) -> &'blk ty::ctxt<'tcx> {
+ self.fcx.ccx.tcx()
}
- pub fn sess(&self) -> &'a Session { self.fcx.ccx.sess() }
+ pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
pub fn ident(&self, ident: Ident) -> String {
token::get_ident(ident).get().to_string()
}
pub fn val_to_string(&self, val: ValueRef) -> String {
- self.ccx().tn.val_to_string(val)
+ self.ccx().tn().val_to_string(val)
}
pub fn llty_str(&self, ty: Type) -> String {
- self.ccx().tn.type_to_string(ty)
+ self.ccx().tn().type_to_string(ty)
}
pub fn ty_to_string(&self, t: ty::t) -> String {
}
pub fn to_str(&self) -> String {
- let blk: *const Block = self;
- format!("[block {}]", blk)
+ format!("[block {:p}]", self)
}
}
-impl<'a> mc::Typer for Block<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'blk, 'tcx> mc::Typer<'tcx> for BlockS<'blk, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.tcx()
}
}
}
-pub struct Result<'a> {
- pub bcx: &'a Block<'a>,
+pub struct Result<'blk, 'tcx: 'blk> {
+ pub bcx: Block<'blk, 'tcx>,
pub val: ValueRef
}
-impl<'a> Result<'a> {
- pub fn new(bcx: &'a Block<'a>, val: ValueRef) -> Result<'a> {
+impl<'b, 'tcx> Result<'b, 'tcx> {
+ pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
Result {
bcx: bcx,
val: val,
}
pub fn C_int(ccx: &CrateContext, i: int) -> ValueRef {
- C_integral(ccx.int_type, i as u64, true)
+ C_integral(ccx.int_type(), i as u64, true)
}
pub fn C_uint(ccx: &CrateContext, i: uint) -> ValueRef {
- C_integral(ccx.int_type, i as u64, false)
+ C_integral(ccx.int_type(), i as u64, false)
}
pub fn C_u8(ccx: &CrateContext, i: uint) -> ValueRef {
// our boxed-and-length-annotated strings.
pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
unsafe {
- match cx.const_cstr_cache.borrow().find(&s) {
+ match cx.const_cstr_cache().borrow().find(&s) {
Some(&llval) => return llval,
None => ()
}
- let sc = llvm::LLVMConstStringInContext(cx.llcx,
+ let sc = llvm::LLVMConstStringInContext(cx.llcx(),
s.get().as_ptr() as *const c_char,
s.get().len() as c_uint,
!null_terminated as Bool);
let gsym = token::gensym("str");
let g = format!("str{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(sc).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(sc).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::SetLinkage(g, llvm::InternalLinkage);
- cx.const_cstr_cache.borrow_mut().insert(s, g);
+ cx.const_cstr_cache().borrow_mut().insert(s, g);
g
}
}
let len = s.get().len();
let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s, false),
Type::i8p(cx).to_ref());
- C_named_struct(cx.tn.find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
+ C_named_struct(cx.tn().find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
}
}
let gsym = token::gensym("binary");
let g = format!("binary{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(lldata).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(lldata).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, lldata);
llvm::LLVMSetGlobalConstant(g, True);
}
}
-pub fn C_struct(ccx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
+pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
+ C_struct_in_context(cx.llcx(), elts, packed)
+}
+
+pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
unsafe {
- llvm::LLVMConstStructInContext(ccx.llcx,
+ llvm::LLVMConstStructInContext(llcx,
elts.as_ptr(), elts.len() as c_uint,
packed as Bool)
}
}
}
-pub fn C_bytes(ccx: &CrateContext, bytes: &[u8]) -> ValueRef {
+pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
+ C_bytes_in_context(cx.llcx(), bytes)
+}
+
+pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
- return llvm::LLVMConstStringInContext(ccx.llcx, ptr, bytes.len() as c_uint, True);
+ return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
}
}
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={}, us={:?}, r={})",
- cx.tn.val_to_string(v), us, cx.tn.val_to_string(r));
+ cx.tn().val_to_string(v), us, cx.tn().val_to_string(r));
return r;
}
}
}
-pub fn monomorphize_type(bcx: &Block, t: ty::t) -> ty::t {
+pub fn monomorphize_type(bcx: &BlockS, t: ty::t) -> ty::t {
t.subst(bcx.tcx(), &bcx.fcx.param_substs.substs)
}
-pub fn node_id_type(bcx: &Block, id: ast::NodeId) -> ty::t {
+pub fn node_id_type(bcx: &BlockS, id: ast::NodeId) -> ty::t {
let tcx = bcx.tcx();
let t = ty::node_id_to_type(tcx, id);
monomorphize_type(bcx, t)
}
-pub fn expr_ty(bcx: &Block, ex: &ast::Expr) -> ty::t {
+pub fn expr_ty(bcx: Block, ex: &ast::Expr) -> ty::t {
node_id_type(bcx, ex.id)
}
-pub fn expr_ty_adjusted(bcx: &Block, ex: &ast::Expr) -> ty::t {
+pub fn expr_ty_adjusted(bcx: Block, ex: &ast::Expr) -> ty::t {
monomorphize_type(bcx, ty::expr_ty_adjusted(bcx.tcx(), ex))
}
MethodCall(typeck::MethodCall)
}
-pub fn node_id_substs(bcx: &Block,
+pub fn node_id_substs(bcx: Block,
node: ExprOrMethodCall)
-> subst::Substs {
let tcx = bcx.tcx();
substs.substp(tcx, bcx.fcx.param_substs)
}
-pub fn node_vtables(bcx: &Block, id: typeck::MethodCall)
+pub fn node_vtables(bcx: Block, id: typeck::MethodCall)
-> typeck::vtable_res {
bcx.tcx().vtable_map.borrow().find(&id).map(|vts| {
resolve_vtables_in_fn_ctxt(bcx.fcx, vts)
param_bounds.get(n_bound).clone()
}
-pub fn langcall(bcx: &Block,
+pub fn langcall(bcx: Block,
span: Option<Span>,
msg: &str,
li: LangItem)
pub fn const_ptrcast(cx: &CrateContext, a: ValueRef, t: Type) -> ValueRef {
unsafe {
let b = llvm::LLVMConstPointerCast(a, t.ptr_to().to_ref());
- assert!(cx.const_globals.borrow_mut().insert(b as int, a));
+ assert!(cx.const_globals().borrow_mut().insert(b as int, a));
b
}
}
pub fn const_addr_of(cx: &CrateContext, cv: ValueRef, mutbl: ast::Mutability) -> ValueRef {
unsafe {
let gv = "const".with_c_str(|name| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(cv).to_ref(), name)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(cv).to_ref(), name)
});
llvm::LLVMSetInitializer(gv, cv);
llvm::LLVMSetGlobalConstant(gv,
}
fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef {
- let v = match cx.const_globals.borrow().find(&(v as int)) {
+ let v = match cx.const_globals().borrow().find(&(v as int)) {
Some(&v) => v,
None => v
};
pub fn get_const_val(cx: &CrateContext,
mut def_id: ast::DefId) -> (ValueRef, bool) {
- let contains_key = cx.const_values.borrow().contains_key(&def_id.node);
+ let contains_key = cx.const_values().borrow().contains_key(&def_id.node);
if !ast_util::is_local(def_id) || !contains_key {
if !ast_util::is_local(def_id) {
def_id = inline::maybe_instantiate_inline(cx, def_id);
}
- match cx.tcx.map.expect_item(def_id.node).node {
+ match cx.tcx().map.expect_item(def_id.node).node {
ast::ItemStatic(_, ast::MutImmutable, _) => {
trans_const(cx, ast::MutImmutable, def_id.node);
}
}
}
- (cx.const_values.borrow().get_copy(&def_id.node),
- !cx.non_inlineable_statics.borrow().contains(&def_id.node))
+ (cx.const_values().borrow().get_copy(&def_id.node),
+ !cx.non_inlineable_statics().borrow().contains(&def_id.node))
}
pub fn const_expr(cx: &CrateContext, e: &ast::Expr, is_local: bool) -> (ValueRef, bool, ty::t) {
let mut inlineable = inlineable;
let ety = ty::expr_ty(cx.tcx(), e);
let mut ety_adjusted = ty::expr_ty_adjusted(cx.tcx(), e);
- let opt_adj = cx.tcx.adjustments.borrow().find_copy(&e.id);
+ let opt_adj = cx.tcx().adjustments.borrow().find_copy(&e.id);
match opt_adj {
None => { }
Some(adj) => {
(expr::cast_enum, expr::cast_integral) => {
let repr = adt::represent_type(cx, basety);
let discr = adt::const_get_discrim(cx, &*repr, v);
- let iv = C_integral(cx.int_type, discr, false);
+ let iv = C_integral(cx.int_type(), discr, false);
let ety_cast = expr::cast_type_kind(cx.tcx(), ety);
match ety_cast {
expr::cast_integral => {
let g = base::get_item_val(ccx, id);
// At this point, get_item_val has already translated the
// constant's initializer to determine its LLVM type.
- let v = ccx.const_values.borrow().get_copy(&id);
+ let v = ccx.const_values().borrow().get_copy(&id);
llvm::LLVMSetInitializer(g, v);
+
+ // `get_item_val` left `g` with external linkage, but we just set an
+ // initializer for it. But we don't know yet if `g` should really be
+ // defined in this compilation unit, so we set its linkage to
+ // `AvailableExternallyLinkage`. (It's still a definition, but acts
+ // like a declaration for most purposes.) If `g` really should be
+ // declared here, then `trans_item` will fix up the linkage later on.
+ llvm::SetLinkage(g, llvm::AvailableExternallyLinkage);
+
if m != ast::MutMutable {
llvm::LLVMSetGlobalConstant(g, True);
}
use driver::config::NoDebugInfo;
use driver::session::Session;
use llvm;
-use llvm::{ContextRef, ModuleRef, ValueRef};
+use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
use llvm::{TargetData};
use llvm::mk_target_data;
use metadata::common::LinkMeta;
pub fn_stats: RefCell<Vec<(String, uint, uint)> >,
}
-pub struct CrateContext {
- pub llmod: ModuleRef,
- pub llcx: ContextRef,
- pub metadata_llmod: ModuleRef,
- pub td: TargetData,
- pub tn: TypeNames,
- pub externs: RefCell<ExternMap>,
- pub item_vals: RefCell<NodeMap<ValueRef>>,
- pub exp_map2: resolve::ExportMap2,
- pub reachable: NodeSet,
- pub item_symbols: RefCell<NodeMap<String>>,
- pub link_meta: LinkMeta,
- pub drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
- pub tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
+/// The shared portion of a `CrateContext`. There is one `SharedCrateContext`
+/// per crate. The data here is shared between all compilation units of the
+/// crate, so it must not contain references to any LLVM data structures
+/// (aside from metadata-related ones).
+pub struct SharedCrateContext<'tcx> {
+ local_ccxs: Vec<LocalCrateContext>,
+
+ metadata_llmod: ModuleRef,
+ metadata_llcx: ContextRef,
+
+ exp_map2: resolve::ExportMap2,
+ reachable: NodeSet,
+ item_symbols: RefCell<NodeMap<String>>,
+ link_meta: LinkMeta,
+ /// A set of static items which cannot be inlined into other crates. This
+ /// will prevent in IIItem() structures from being encoded into the metadata
+ /// that is generated
+ non_inlineable_statics: RefCell<NodeSet>,
+ symbol_hasher: RefCell<Sha256>,
+ tcx: ty::ctxt<'tcx>,
+ stats: Stats,
+
+ available_monomorphizations: RefCell<HashSet<String>>,
+ available_drop_glues: RefCell<HashMap<ty::t, String>>,
+ available_visit_glues: RefCell<HashMap<ty::t, String>>,
+}
+
+/// The local portion of a `CrateContext`. There is one `LocalCrateContext`
+/// per compilation unit. Each one has its own LLVM `ContextRef` so that
+/// several compilation units may be optimized in parallel. All other LLVM
+/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
+pub struct LocalCrateContext {
+ llmod: ModuleRef,
+ llcx: ContextRef,
+ td: TargetData,
+ tn: TypeNames,
+ externs: RefCell<ExternMap>,
+ item_vals: RefCell<NodeMap<ValueRef>>,
+ drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
+ tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
/// Set when running emit_tydescs to enforce that no more tydescs are
/// created.
- pub finished_tydescs: Cell<bool>,
+ finished_tydescs: Cell<bool>,
/// Track mapping of external ids to local items imported for inlining
- pub external: RefCell<DefIdMap<Option<ast::NodeId>>>,
+ external: RefCell<DefIdMap<Option<ast::NodeId>>>,
/// Backwards version of the `external` map (inlined items to where they
/// came from)
- pub external_srcs: RefCell<NodeMap<ast::DefId>>,
- /// A set of static items which cannot be inlined into other crates. This
- /// will prevent in IIItem() structures from being encoded into the metadata
- /// that is generated
- pub non_inlineable_statics: RefCell<NodeSet>,
+ external_srcs: RefCell<NodeMap<ast::DefId>>,
/// Cache instances of monomorphized functions
- pub monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
- pub monomorphizing: RefCell<DefIdMap<uint>>,
+ monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
+ monomorphizing: RefCell<DefIdMap<uint>>,
/// Cache generated vtables
- pub vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
+ vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
/// Cache of constant strings,
- pub const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
+ const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
/// Reverse-direction for const ptrs cast from globals.
/// Key is an int, cast from a ValueRef holding a *T,
/// when we ptrcast, and we have to ptrcast during translation
/// of a [T] const because we form a slice, a [*T,int] pair, not
/// a pointer to an LLVM array type.
- pub const_globals: RefCell<HashMap<int, ValueRef>>,
+ const_globals: RefCell<HashMap<int, ValueRef>>,
/// Cache of emitted const values
- pub const_values: RefCell<NodeMap<ValueRef>>,
+ const_values: RefCell<NodeMap<ValueRef>>,
/// Cache of external const values
- pub extern_const_values: RefCell<DefIdMap<ValueRef>>,
+ extern_const_values: RefCell<DefIdMap<ValueRef>>,
- pub impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
+ impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
/// Cache of closure wrappers for bare fn's.
- pub closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
-
- pub lltypes: RefCell<HashMap<ty::t, Type>>,
- pub llsizingtypes: RefCell<HashMap<ty::t, Type>>,
- pub adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
- pub symbol_hasher: RefCell<Sha256>,
- pub type_hashcodes: RefCell<HashMap<ty::t, String>>,
- pub all_llvm_symbols: RefCell<HashSet<String>>,
- pub tcx: ty::ctxt,
- pub stats: Stats,
- pub int_type: Type,
- pub opaque_vec_type: Type,
- pub builder: BuilderRef_res,
+ closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
+
+ lltypes: RefCell<HashMap<ty::t, Type>>,
+ llsizingtypes: RefCell<HashMap<ty::t, Type>>,
+ adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
+ type_hashcodes: RefCell<HashMap<ty::t, String>>,
+ all_llvm_symbols: RefCell<HashSet<String>>,
+ int_type: Type,
+ opaque_vec_type: Type,
+ builder: BuilderRef_res,
/// Holds the LLVM values for closure IDs.
- pub unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
+ unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
- pub dbg_cx: Option<debuginfo::CrateDebugContext>,
+ dbg_cx: Option<debuginfo::CrateDebugContext>,
- pub eh_personality: RefCell<Option<ValueRef>>,
+ eh_personality: RefCell<Option<ValueRef>>,
intrinsics: RefCell<HashMap<&'static str, ValueRef>>,
+
+ /// Number of LLVM instructions translated into this `LocalCrateContext`.
+ /// This is used to perform some basic load-balancing to keep all LLVM
+ /// contexts around the same size.
+ n_llvm_insns: Cell<uint>,
+}
+
+pub struct CrateContext<'a, 'tcx: 'a> {
+ shared: &'a SharedCrateContext<'tcx>,
+ local: &'a LocalCrateContext,
+ /// The index of `local` in `shared.local_ccxs`. This is used in
+ /// `maybe_iter(true)` to identify the original `LocalCrateContext`.
+ index: uint,
+}
+
+pub struct CrateContextIterator<'a, 'tcx: 'a> {
+ shared: &'a SharedCrateContext<'tcx>,
+ index: uint,
+}
+
+impl<'a, 'tcx> Iterator<CrateContext<'a, 'tcx>> for CrateContextIterator<'a,'tcx> {
+ fn next(&mut self) -> Option<CrateContext<'a, 'tcx>> {
+ if self.index >= self.shared.local_ccxs.len() {
+ return None;
+ }
+
+ let index = self.index;
+ self.index += 1;
+
+ Some(CrateContext {
+ shared: self.shared,
+ local: &self.shared.local_ccxs[index],
+ index: index,
+ })
+ }
+}
+
+/// The iterator produced by `CrateContext::maybe_iter`.
+pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> {
+ shared: &'a SharedCrateContext<'tcx>,
+ index: uint,
+ single: bool,
+ origin: uint,
+}
+
+impl<'a, 'tcx> Iterator<(CrateContext<'a, 'tcx>, bool)> for CrateContextMaybeIterator<'a, 'tcx> {
+ fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> {
+ if self.index >= self.shared.local_ccxs.len() {
+ return None;
+ }
+
+ let index = self.index;
+ self.index += 1;
+ if self.single {
+ self.index = self.shared.local_ccxs.len();
+ }
+
+ let ccx = CrateContext {
+ shared: self.shared,
+ local: &self.shared.local_ccxs[index],
+ index: index,
+ };
+ Some((ccx, index == self.origin))
+ }
+}
+
+
+unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
+ let llcx = llvm::LLVMContextCreate();
+ let llmod = mod_name.with_c_str(|buf| {
+ llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
+ });
+ sess.targ_cfg
+ .target_strs
+ .data_layout
+ .as_slice()
+ .with_c_str(|buf| {
+ llvm::LLVMSetDataLayout(llmod, buf);
+ });
+ sess.targ_cfg
+ .target_strs
+ .target_triple
+ .as_slice()
+ .with_c_str(|buf| {
+ llvm::LLVMRustSetNormalizedTarget(llmod, buf);
+ });
+ (llcx, llmod)
}
-impl CrateContext {
- pub fn new(name: &str,
- tcx: ty::ctxt,
+impl<'tcx> SharedCrateContext<'tcx> {
+ pub fn new(crate_name: &str,
+ local_count: uint,
+ tcx: ty::ctxt<'tcx>,
emap2: resolve::ExportMap2,
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet)
- -> CrateContext {
+ -> SharedCrateContext<'tcx> {
+ let (metadata_llcx, metadata_llmod) = unsafe {
+ create_context_and_module(&tcx.sess, "metadata")
+ };
+
+ let mut shared_ccx = SharedCrateContext {
+ local_ccxs: Vec::with_capacity(local_count),
+ metadata_llmod: metadata_llmod,
+ metadata_llcx: metadata_llcx,
+ exp_map2: emap2,
+ reachable: reachable,
+ item_symbols: RefCell::new(NodeMap::new()),
+ link_meta: link_meta,
+ non_inlineable_statics: RefCell::new(NodeSet::new()),
+ symbol_hasher: RefCell::new(symbol_hasher),
+ tcx: tcx,
+ stats: Stats {
+ n_static_tydescs: Cell::new(0u),
+ n_glues_created: Cell::new(0u),
+ n_null_glues: Cell::new(0u),
+ n_real_glues: Cell::new(0u),
+ n_fns: Cell::new(0u),
+ n_monos: Cell::new(0u),
+ n_inlines: Cell::new(0u),
+ n_closures: Cell::new(0u),
+ n_llvm_insns: Cell::new(0u),
+ llvm_insns: RefCell::new(HashMap::new()),
+ fn_stats: RefCell::new(Vec::new()),
+ },
+ available_monomorphizations: RefCell::new(HashSet::new()),
+ available_drop_glues: RefCell::new(HashMap::new()),
+ available_visit_glues: RefCell::new(HashMap::new()),
+ };
+
+ for i in range(0, local_count) {
+ // Append ".rs" to crate name as LLVM module identifier.
+ //
+ // LLVM code generator emits a ".file filename" directive
+ // for ELF backends. Value of the "filename" is set as the
+ // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
+ // crashes if the module identifier is same as other symbols
+ // such as a function name in the module.
+ // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
+ let llmod_id = format!("{}.{}.rs", crate_name, i);
+ let local_ccx = LocalCrateContext::new(&shared_ccx, llmod_id.as_slice());
+ shared_ccx.local_ccxs.push(local_ccx);
+ }
+
+ shared_ccx
+ }
+
+ pub fn iter<'a>(&'a self) -> CrateContextIterator<'a, 'tcx> {
+ CrateContextIterator {
+ shared: self,
+ index: 0,
+ }
+ }
+
+ pub fn get_ccx<'a>(&'a self, index: uint) -> CrateContext<'a, 'tcx> {
+ CrateContext {
+ shared: self,
+ local: &self.local_ccxs[index],
+ index: index,
+ }
+ }
+
+ fn get_smallest_ccx<'a>(&'a self) -> CrateContext<'a, 'tcx> {
+ let (local_ccx, index) =
+ self.local_ccxs
+ .iter()
+ .zip(range(0, self.local_ccxs.len()))
+ .min_by(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get())
+ .unwrap();
+ CrateContext {
+ shared: self,
+ local: local_ccx,
+ index: index,
+ }
+ }
+
+
+ pub fn metadata_llmod(&self) -> ModuleRef {
+ self.metadata_llmod
+ }
+
+ pub fn metadata_llcx(&self) -> ContextRef {
+ self.metadata_llcx
+ }
+
+ pub fn exp_map2<'a>(&'a self) -> &'a resolve::ExportMap2 {
+ &self.exp_map2
+ }
+
+ pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+ &self.reachable
+ }
+
+ pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+ &self.item_symbols
+ }
+
+ pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+ &self.link_meta
+ }
+
+ pub fn non_inlineable_statics<'a>(&'a self) -> &'a RefCell<NodeSet> {
+ &self.non_inlineable_statics
+ }
+
+ pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+ &self.symbol_hasher
+ }
+
+ pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ &self.tcx
+ }
+
+ pub fn take_tcx(self) -> ty::ctxt<'tcx> {
+ self.tcx
+ }
+
+ pub fn sess<'a>(&'a self) -> &'a Session {
+ &self.tcx.sess
+ }
+
+ pub fn stats<'a>(&'a self) -> &'a Stats {
+ &self.stats
+ }
+}
+
+impl LocalCrateContext {
+ fn new(shared: &SharedCrateContext,
+ name: &str)
+ -> LocalCrateContext {
unsafe {
- let llcx = llvm::LLVMContextCreate();
- let llmod = name.with_c_str(|buf| {
- llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
- });
- let metadata_llmod = format!("{}_metadata", name).with_c_str(|buf| {
- llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
- });
- tcx.sess
- .targ_cfg
- .target_strs
- .data_layout
- .as_slice()
- .with_c_str(|buf| {
- llvm::LLVMSetDataLayout(llmod, buf);
- llvm::LLVMSetDataLayout(metadata_llmod, buf);
- });
- tcx.sess
- .targ_cfg
- .target_strs
- .target_triple
- .as_slice()
- .with_c_str(|buf| {
- llvm::LLVMRustSetNormalizedTarget(llmod, buf);
- llvm::LLVMRustSetNormalizedTarget(metadata_llmod, buf);
- });
-
- let td = mk_target_data(tcx.sess
- .targ_cfg
- .target_strs
- .data_layout
- .as_slice());
-
- let dbg_cx = if tcx.sess.opts.debuginfo != NoDebugInfo {
+ let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name);
+
+ let td = mk_target_data(shared.tcx
+ .sess
+ .targ_cfg
+ .target_strs
+ .data_layout
+ .as_slice());
+
+ let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
Some(debuginfo::CrateDebugContext::new(llmod))
} else {
None
};
- let mut ccx = CrateContext {
+ let mut local_ccx = LocalCrateContext {
llmod: llmod,
llcx: llcx,
- metadata_llmod: metadata_llmod,
td: td,
tn: TypeNames::new(),
externs: RefCell::new(HashMap::new()),
item_vals: RefCell::new(NodeMap::new()),
- exp_map2: emap2,
- reachable: reachable,
- item_symbols: RefCell::new(NodeMap::new()),
- link_meta: link_meta,
drop_glues: RefCell::new(HashMap::new()),
tydescs: RefCell::new(HashMap::new()),
finished_tydescs: Cell::new(false),
external: RefCell::new(DefIdMap::new()),
external_srcs: RefCell::new(NodeMap::new()),
- non_inlineable_statics: RefCell::new(NodeSet::new()),
monomorphized: RefCell::new(HashMap::new()),
monomorphizing: RefCell::new(DefIdMap::new()),
vtables: RefCell::new(HashMap::new()),
lltypes: RefCell::new(HashMap::new()),
llsizingtypes: RefCell::new(HashMap::new()),
adt_reprs: RefCell::new(HashMap::new()),
- symbol_hasher: RefCell::new(symbol_hasher),
type_hashcodes: RefCell::new(HashMap::new()),
all_llvm_symbols: RefCell::new(HashSet::new()),
- tcx: tcx,
- stats: Stats {
- n_static_tydescs: Cell::new(0u),
- n_glues_created: Cell::new(0u),
- n_null_glues: Cell::new(0u),
- n_real_glues: Cell::new(0u),
- n_fns: Cell::new(0u),
- n_monos: Cell::new(0u),
- n_inlines: Cell::new(0u),
- n_closures: Cell::new(0u),
- n_llvm_insns: Cell::new(0u),
- llvm_insns: RefCell::new(HashMap::new()),
- fn_stats: RefCell::new(Vec::new()),
- },
int_type: Type::from_ref(ptr::mut_null()),
opaque_vec_type: Type::from_ref(ptr::mut_null()),
builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
dbg_cx: dbg_cx,
eh_personality: RefCell::new(None),
intrinsics: RefCell::new(HashMap::new()),
+ n_llvm_insns: Cell::new(0u),
};
- ccx.int_type = Type::int(&ccx);
- ccx.opaque_vec_type = Type::opaque_vec(&ccx);
+ local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared));
+ local_ccx.opaque_vec_type = Type::opaque_vec(&local_ccx.dummy_ccx(shared));
- let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
- str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type], false);
- ccx.tn.associate_type("str_slice", &str_slice_ty);
+ // Done mutating local_ccx directly. (The rest of the
+ // initialization goes through RefCell.)
+ {
+ let ccx = local_ccx.dummy_ccx(shared);
- ccx.tn.associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
+ let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
+ str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type()], false);
+ ccx.tn().associate_type("str_slice", &str_slice_ty);
- if ccx.sess().count_llvm_insns() {
- base::init_insn_ctxt()
+ ccx.tn().associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
+
+ if ccx.sess().count_llvm_insns() {
+ base::init_insn_ctxt()
+ }
}
- ccx
+ local_ccx
}
}
- pub fn tcx<'a>(&'a self) -> &'a ty::ctxt {
- &self.tcx
+ /// Create a dummy `CrateContext` from `self` and the provided
+ /// `SharedCrateContext`. This is somewhat dangerous because `self` may
+ /// not actually be an element of `shared.local_ccxs`, which can cause some
+ /// operations to `fail` unexpectedly.
+ ///
+ /// This is used in the `LocalCrateContext` constructor to allow calling
+ /// functions that expect a complete `CrateContext`, even before the local
+ /// portion is fully initialized and attached to the `SharedCrateContext`.
+ fn dummy_ccx<'a, 'tcx>(&'a self, shared: &'a SharedCrateContext<'tcx>)
+ -> CrateContext<'a, 'tcx> {
+ CrateContext {
+ shared: shared,
+ local: self,
+ index: -1 as uint,
+ }
+ }
+}
+
+impl<'b, 'tcx> CrateContext<'b, 'tcx> {
+ pub fn shared(&self) -> &'b SharedCrateContext<'tcx> {
+ self.shared
+ }
+
+ pub fn local(&self) -> &'b LocalCrateContext {
+ self.local
+ }
+
+
+ /// Get a (possibly) different `CrateContext` from the same
+ /// `SharedCrateContext`.
+ pub fn rotate(&self) -> CrateContext<'b, 'tcx> {
+ self.shared.get_smallest_ccx()
+ }
+
+ /// Either iterate over only `self`, or iterate over all `CrateContext`s in
+ /// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)`
+ /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false`
+ /// otherwise. This method is useful for avoiding code duplication in
+ /// cases where it may or may not be necessary to translate code into every
+ /// context.
+ pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> {
+ CrateContextMaybeIterator {
+ shared: self.shared,
+ index: if iter_all { 0 } else { self.index },
+ single: !iter_all,
+ origin: self.index,
+ }
+ }
+
+
+ pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ &self.shared.tcx
}
pub fn sess<'a>(&'a self) -> &'a Session {
- &self.tcx.sess
+ &self.shared.tcx.sess
}
- pub fn builder<'a>(&'a self) -> Builder<'a> {
+ pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> {
Builder::new(self)
}
+ pub fn raw_builder<'a>(&'a self) -> BuilderRef {
+ self.local.builder.b
+ }
+
pub fn tydesc_type(&self) -> Type {
- self.tn.find_type("tydesc").unwrap()
+ self.local.tn.find_type("tydesc").unwrap()
}
pub fn get_intrinsic(&self, key: & &'static str) -> ValueRef {
- match self.intrinsics.borrow().find_copy(key) {
+ match self.intrinsics().borrow().find_copy(key) {
Some(v) => return v,
_ => {}
}
let ref cfg = self.sess().targ_cfg;
cfg.os != abi::OsiOS || cfg.arch != abi::Arm
}
+
+
+ pub fn llmod(&self) -> ModuleRef {
+ self.local.llmod
+ }
+
+ pub fn llcx(&self) -> ContextRef {
+ self.local.llcx
+ }
+
+ pub fn td<'a>(&'a self) -> &'a TargetData {
+ &self.local.td
+ }
+
+ pub fn tn<'a>(&'a self) -> &'a TypeNames {
+ &self.local.tn
+ }
+
+ pub fn externs<'a>(&'a self) -> &'a RefCell<ExternMap> {
+ &self.local.externs
+ }
+
+ pub fn item_vals<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.local.item_vals
+ }
+
+ pub fn exp_map2<'a>(&'a self) -> &'a resolve::ExportMap2 {
+ &self.shared.exp_map2
+ }
+
+ pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+ &self.shared.reachable
+ }
+
+ pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+ &self.shared.item_symbols
+ }
+
+ pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+ &self.shared.link_meta
+ }
+
+ pub fn drop_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, ValueRef>> {
+ &self.local.drop_glues
+ }
+
+ pub fn tydescs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<tydesc_info>>> {
+ &self.local.tydescs
+ }
+
+ pub fn finished_tydescs<'a>(&'a self) -> &'a Cell<bool> {
+ &self.local.finished_tydescs
+ }
+
+ pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
+ &self.local.external
+ }
+
+ pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<ast::DefId>> {
+ &self.local.external_srcs
+ }
+
+ pub fn non_inlineable_statics<'a>(&'a self) -> &'a RefCell<NodeSet> {
+ &self.shared.non_inlineable_statics
+ }
+
+ pub fn monomorphized<'a>(&'a self) -> &'a RefCell<HashMap<MonoId, ValueRef>> {
+ &self.local.monomorphized
+ }
+
+ pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<uint>> {
+ &self.local.monomorphizing
+ }
+
+ pub fn vtables<'a>(&'a self) -> &'a RefCell<HashMap<(ty::t, MonoId), ValueRef>> {
+ &self.local.vtables
+ }
+
+ pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<HashMap<InternedString, ValueRef>> {
+ &self.local.const_cstr_cache
+ }
+
+ pub fn const_globals<'a>(&'a self) -> &'a RefCell<HashMap<int, ValueRef>> {
+ &self.local.const_globals
+ }
+
+ pub fn const_values<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.local.const_values
+ }
+
+ pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.local.extern_const_values
+ }
+
+ pub fn impl_method_cache<'a>(&'a self)
+ -> &'a RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>> {
+ &self.local.impl_method_cache
+ }
+
+ pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<HashMap<ValueRef, ValueRef>> {
+ &self.local.closure_bare_wrapper_cache
+ }
+
+ pub fn lltypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.local.lltypes
+ }
+
+ pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.local.llsizingtypes
+ }
+
+ pub fn adt_reprs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<adt::Repr>>> {
+ &self.local.adt_reprs
+ }
+
+ pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+ &self.shared.symbol_hasher
+ }
+
+ pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.local.type_hashcodes
+ }
+
+ pub fn all_llvm_symbols<'a>(&'a self) -> &'a RefCell<HashSet<String>> {
+ &self.local.all_llvm_symbols
+ }
+
+ pub fn stats<'a>(&'a self) -> &'a Stats {
+ &self.shared.stats
+ }
+
+ pub fn available_monomorphizations<'a>(&'a self) -> &'a RefCell<HashSet<String>> {
+ &self.shared.available_monomorphizations
+ }
+
+ pub fn available_drop_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.shared.available_drop_glues
+ }
+
+ pub fn available_visit_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.shared.available_visit_glues
+ }
+
+ pub fn int_type(&self) -> Type {
+ self.local.int_type
+ }
+
+ pub fn opaque_vec_type(&self) -> Type {
+ self.local.opaque_vec_type
+ }
+
+ pub fn unboxed_closure_vals<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.local.unboxed_closure_vals
+ }
+
+ pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext> {
+ &self.local.dbg_cx
+ }
+
+ pub fn eh_personality<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
+ &self.local.eh_personality
+ }
+
+ fn intrinsics<'a>(&'a self) -> &'a RefCell<HashMap<&'static str, ValueRef>> {
+ &self.local.intrinsics
+ }
+
+ pub fn count_llvm_insn(&self) {
+ self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1);
+ }
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {
($name:expr fn() -> $ret:expr) => (
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name, Type::func([], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
);
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name,
Type::func([$($arg),*], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
let f = base::decl_cdecl_fn(ccx, stringify!($cname),
Type::func([$($arg),*], &$ret),
ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
use std::gc::Gc;
-pub fn trans_stmt<'a>(cx: &'a Block<'a>,
- s: &ast::Stmt)
- -> &'a Block<'a> {
+pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ s: &ast::Stmt)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_stmt");
let fcx = cx.fcx;
debug!("trans_stmt({})", s.repr(cx.tcx()));
return bcx;
}
-pub fn trans_stmt_semi<'a>(cx: &'a Block<'a>, e: &ast::Expr) -> &'a Block<'a> {
+pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &ast::Expr)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_stmt_semi");
let ty = expr_ty(cx, e);
if ty::type_needs_drop(cx.tcx(), ty) {
}
}
-pub fn trans_block<'a>(bcx: &'a Block<'a>,
- b: &ast::Block,
- mut dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ b: &ast::Block,
+ mut dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_block");
let fcx = bcx.fcx;
let mut bcx = bcx;
return bcx;
}
-pub fn trans_if<'a>(bcx: &'a Block<'a>,
- if_id: ast::NodeId,
- cond: &ast::Expr,
- thn: ast::P<ast::Block>,
- els: Option<Gc<ast::Expr>>,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ if_id: ast::NodeId,
+ cond: &ast::Expr,
+ thn: ast::P<ast::Block>,
+ els: Option<Gc<ast::Expr>>,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
debug!("trans_if(bcx={}, if_id={}, cond={}, thn={:?}, dest={})",
bcx.to_str(), if_id, bcx.expr_to_string(cond), thn.id,
dest.to_string(bcx.ccx()));
next_bcx
}
-pub fn trans_while<'a>(bcx: &'a Block<'a>,
- loop_id: ast::NodeId,
- cond: &ast::Expr,
- body: &ast::Block)
- -> &'a Block<'a> {
+pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ loop_id: ast::NodeId,
+ cond: &ast::Expr,
+ body: &ast::Block)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_while");
let fcx = bcx.fcx;
}
/// Translates a `for` loop.
-pub fn trans_for<'a>(
- mut bcx: &'a Block<'a>,
- loop_info: NodeInfo,
- pat: Gc<ast::Pat>,
- head: &ast::Expr,
- body: &ast::Block)
- -> &'a Block<'a> {
+pub fn trans_for<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ loop_info: NodeInfo,
+ pat: Gc<ast::Pat>,
+ head: &ast::Expr,
+ body: &ast::Block)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_for");
// bcx
next_bcx_in
}
-pub fn trans_loop<'a>(bcx:&'a Block<'a>,
- loop_id: ast::NodeId,
- body: &ast::Block)
- -> &'a Block<'a> {
+pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ loop_id: ast::NodeId,
+ body: &ast::Block)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_loop");
let fcx = bcx.fcx;
return next_bcx_in;
}
-pub fn trans_break_cont<'a>(bcx: &'a Block<'a>,
- expr_id: ast::NodeId,
- opt_label: Option<Ident>,
- exit: uint)
- -> &'a Block<'a> {
+pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId,
+ opt_label: Option<Ident>,
+ exit: uint)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_break_cont");
let fcx = bcx.fcx;
return bcx;
}
-pub fn trans_break<'a>(bcx: &'a Block<'a>,
- expr_id: ast::NodeId,
- label_opt: Option<Ident>)
- -> &'a Block<'a> {
+pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId,
+ label_opt: Option<Ident>)
+ -> Block<'blk, 'tcx> {
return trans_break_cont(bcx, expr_id, label_opt, cleanup::EXIT_BREAK);
}
-pub fn trans_cont<'a>(bcx: &'a Block<'a>,
- expr_id: ast::NodeId,
- label_opt: Option<Ident>)
- -> &'a Block<'a> {
+pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId,
+ label_opt: Option<Ident>)
+ -> Block<'blk, 'tcx> {
return trans_break_cont(bcx, expr_id, label_opt, cleanup::EXIT_LOOP);
}
-pub fn trans_ret<'a>(bcx: &'a Block<'a>,
- e: Option<Gc<ast::Expr>>)
- -> &'a Block<'a> {
+pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ e: Option<Gc<ast::Expr>>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_ret");
let fcx = bcx.fcx;
let mut bcx = bcx;
return bcx;
}
-pub fn trans_fail<'a>(
- bcx: &'a Block<'a>,
- sp: Span,
- fail_str: InternedString)
- -> &'a Block<'a> {
+pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ sp: Span,
+ fail_str: InternedString)
+ -> Block<'blk, 'tcx> {
let ccx = bcx.ccx();
let _icx = push_ctxt("trans_fail_value");
return bcx;
}
-pub fn trans_fail_bounds_check<'a>(
- bcx: &'a Block<'a>,
- sp: Span,
- index: ValueRef,
- len: ValueRef)
- -> &'a Block<'a> {
+pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ sp: Span,
+ index: ValueRef,
+ len: ValueRef)
+ -> Block<'blk, 'tcx> {
let ccx = bcx.ccx();
let _icx = push_ctxt("trans_fail_bounds_check");
pub kind: K,
}
-pub struct DatumBlock<'a, K> {
- pub bcx: &'a Block<'a>,
+pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
+ pub bcx: Block<'blk, 'tcx>,
pub datum: Datum<K>,
}
return Datum::new(val, ty, Rvalue::new(ByValue));
}
-pub fn immediate_rvalue_bcx<'a>(bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> DatumBlock<'a, Rvalue> {
+pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
return DatumBlock::new(bcx, immediate_rvalue(val, ty))
}
-pub fn lvalue_scratch_datum<'a, A>(bcx: &'a Block<'a>,
- ty: ty::t,
- name: &str,
- zero: bool,
- scope: cleanup::ScopeId,
- arg: A,
- populate: |A, &'a Block<'a>, ValueRef|
- -> &'a Block<'a>)
- -> DatumBlock<'a, Lvalue> {
+pub fn lvalue_scratch_datum<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
+ ty: ty::t,
+ name: &str,
+ zero: bool,
+ scope: cleanup::ScopeId,
+ arg: A,
+ populate: |A, Block<'blk, 'tcx>, ValueRef|
+ -> Block<'blk, 'tcx>)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
/*!
* Allocates temporary space on the stack using alloca() and
* returns a by-ref Datum pointing to it. The memory will be
DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue))
}
-pub fn rvalue_scratch_datum(bcx: &Block,
+pub fn rvalue_scratch_datum(bcx: Block,
ty: ty::t,
name: &str)
-> Datum<Rvalue> {
* Take appropriate action after the value in `datum` has been
* stored to a new location.
*/
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> &'a Block<'a>;
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> Block<'blk, 'tcx>;
/**
* True if this mode is a reference mode, meaning that the datum's
}
impl KindOps for Rvalue {
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- _val: ValueRef,
- _ty: ty::t)
- -> &'a Block<'a> {
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ _val: ValueRef,
+ _ty: ty::t)
+ -> Block<'blk, 'tcx> {
// No cleanup is scheduled for an rvalue, so we don't have
// to do anything after a move to cancel or duplicate it.
bcx
}
impl KindOps for Lvalue {
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> &'a Block<'a> {
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> Block<'blk, 'tcx> {
/*!
* If an lvalue is moved, we must zero out the memory in which
* it resides so as to cancel cleanup. If an @T lvalue is
}
impl KindOps for Expr {
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> &'a Block<'a> {
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> Block<'blk, 'tcx> {
match *self {
LvalueExpr => Lvalue.post_store(bcx, val, ty),
RvalueExpr(ref r) => r.post_store(bcx, val, ty),
self.val
}
- pub fn to_lvalue_datum_in_scope<'a>(self,
- bcx: &'a Block<'a>,
- name: &str,
- scope: cleanup::ScopeId)
- -> DatumBlock<'a, Lvalue> {
+ pub fn to_lvalue_datum_in_scope<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &str,
+ scope: cleanup::ScopeId)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
/*!
* Returns an lvalue datum (that is, a by ref datum with
* cleanup scheduled). If `self` is not already an lvalue,
}
}
- pub fn to_ref_datum<'a>(self, bcx: &'a Block<'a>) -> DatumBlock<'a, Rvalue> {
+ pub fn to_ref_datum<'blk, 'tcx>(self, bcx: Block<'blk, 'tcx>)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
let mut bcx = bcx;
match self.kind.mode {
ByRef => DatumBlock::new(bcx, self),
}
}
- pub fn to_appropriate_datum<'a>(self,
- bcx: &'a Block<'a>)
- -> DatumBlock<'a, Rvalue> {
+ pub fn to_appropriate_datum<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
match self.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
self.to_ref_datum(bcx)
}
#[allow(dead_code)] // potentially useful
- pub fn assert_lvalue(self, bcx: &Block) -> Datum<Lvalue> {
+ pub fn assert_lvalue(self, bcx: Block) -> Datum<Lvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
|_| bcx.sess().bug("assert_lvalue given rvalue"))
}
- pub fn assert_rvalue(self, bcx: &Block) -> Datum<Rvalue> {
+ pub fn assert_rvalue(self, bcx: Block) -> Datum<Rvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
|r| r)
}
- pub fn store_to_dest<'a>(self,
- bcx: &'a Block<'a>,
- dest: expr::Dest,
- expr_id: ast::NodeId)
- -> &'a Block<'a> {
+ pub fn store_to_dest<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ dest: expr::Dest,
+ expr_id: ast::NodeId)
+ -> Block<'blk, 'tcx> {
match dest {
expr::Ignore => {
self.add_clean_if_rvalue(bcx, expr_id);
}
}
- pub fn add_clean_if_rvalue<'a>(self,
- bcx: &'a Block<'a>,
- expr_id: ast::NodeId) {
+ pub fn add_clean_if_rvalue<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId) {
/*!
* Arranges cleanup for `self` if it is an rvalue. Use when
* you are done working with a value that may need drop.
})
}
- pub fn clean<'a>(self,
- bcx: &'a Block<'a>,
- name: &'static str,
- expr_id: ast::NodeId)
- -> &'a Block<'a> {
+ pub fn clean<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &'static str,
+ expr_id: ast::NodeId)
+ -> Block<'blk, 'tcx> {
/*!
* Ensures that `self` will get cleaned up, if it is not an lvalue
* already.
self.to_lvalue_datum(bcx, name, expr_id).bcx
}
- pub fn to_lvalue_datum<'a>(self,
- bcx: &'a Block<'a>,
- name: &str,
- expr_id: ast::NodeId)
- -> DatumBlock<'a, Lvalue> {
+ pub fn to_lvalue_datum<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &str,
+ expr_id: ast::NodeId)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
debug!("to_lvalue_datum self: {}", self.to_string(bcx.ccx()));
assert!(ty::lltype_is_sized(bcx.tcx(), self.ty),
})
}
- pub fn to_rvalue_datum<'a>(self,
- bcx: &'a Block<'a>,
- name: &'static str)
- -> DatumBlock<'a, Rvalue> {
+ pub fn to_rvalue_datum<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &'static str)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
/*!
* Ensures that we have an rvalue datum (that is, a datum with
* no cleanup scheduled).
// datum may also be unsized _without the size information_. It is the
// callers responsibility to package the result in some way to make a valid
// datum in that case (e.g., by making a fat pointer or opened pair).
- pub fn get_element<'a>(&self,
- bcx: &'a Block<'a>,
- ty: ty::t,
- gep: |ValueRef| -> ValueRef)
- -> Datum<Lvalue> {
+ pub fn get_element(&self, bcx: Block, ty: ty::t,
+ gep: |ValueRef| -> ValueRef)
+ -> Datum<Lvalue> {
let val = match ty::get(self.ty).sty {
_ if ty::type_is_sized(bcx.tcx(), self.ty) => gep(self.val),
ty::ty_open(_) => {
}
}
- pub fn get_vec_base_and_len<'a>(&self, bcx: &'a Block<'a>) -> (ValueRef, ValueRef) {
+ pub fn get_vec_base_and_len(&self, bcx: Block) -> (ValueRef, ValueRef) {
//! Converts a vector into the slice pair.
tvec::get_base_and_len(bcx, self.val, self.ty)
Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
}
- pub fn store_to<'a>(self,
- bcx: &'a Block<'a>,
- dst: ValueRef)
- -> &'a Block<'a> {
+ pub fn store_to<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ dst: ValueRef)
+ -> Block<'blk, 'tcx> {
/*!
* Moves or copies this value into a new home, as appropriate
* depending on the type of the datum. This method consumes
self.kind.post_store(bcx, self.val, self.ty)
}
- fn shallow_copy<'a>(&self,
- bcx: &'a Block<'a>,
- dst: ValueRef)
- -> &'a Block<'a> {
+ fn shallow_copy<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ dst: ValueRef)
+ -> Block<'blk, 'tcx> {
/*!
* Helper function that performs a shallow copy of this value
* into `dst`, which should be a pointer to a memory location
return bcx;
}
- pub fn shallow_copy_and_take<'a>(&self,
- bcx: &'a Block<'a>,
- dst: ValueRef)
- -> &'a Block<'a> {
+ pub fn shallow_copy_and_take<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ dst: ValueRef)
+ -> Block<'blk, 'tcx> {
/*!
* Copies the value into a new location and runs any necessary
* take glue on the new location. This function always
#[allow(dead_code)] // useful for debugging
pub fn to_string(&self, ccx: &CrateContext) -> String {
format!("Datum({}, {}, {:?})",
- ccx.tn.val_to_string(self.val),
+ ccx.tn().val_to_string(self.val),
ty_to_string(ccx.tcx(), self.ty),
self.kind)
}
appropriate_rvalue_mode(ccx, self.ty)
}
- pub fn to_llscalarish<'a>(self, bcx: &'a Block<'a>) -> ValueRef {
+ pub fn to_llscalarish(self, bcx: Block) -> ValueRef {
/*!
* Converts `self` into a by-value `ValueRef`. Consumes this
* datum (i.e., absolves you of responsibility to cleanup the
}
}
- pub fn to_llbool<'a>(self, bcx: &'a Block<'a>) -> ValueRef {
+ pub fn to_llbool(self, bcx: Block) -> ValueRef {
assert!(ty::type_is_bool(self.ty) || ty::type_is_bot(self.ty))
self.to_llscalarish(bcx)
}
}
-impl <'a, K> DatumBlock<'a, K> {
- pub fn new(bcx: &'a Block<'a>, datum: Datum<K>) -> DatumBlock<'a, K> {
+impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> {
+ pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<K>) -> DatumBlock<'blk, 'tcx, K> {
DatumBlock { bcx: bcx, datum: datum }
}
}
-impl<'a, K:KindOps> DatumBlock<'a, K> {
- pub fn to_expr_datumblock(self) -> DatumBlock<'a, Expr> {
+impl<'blk, 'tcx, K:KindOps> DatumBlock<'blk, 'tcx, K> {
+ pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> {
DatumBlock::new(self.bcx, self.datum.to_expr_datum())
}
}
-impl<'a> DatumBlock<'a, Expr> {
+impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> {
pub fn store_to_dest(self,
dest: expr::Dest,
- expr_id: ast::NodeId) -> &'a Block<'a> {
+ expr_id: ast::NodeId) -> Block<'blk, 'tcx> {
let DatumBlock { bcx, datum } = self;
datum.store_to_dest(bcx, dest, expr_id)
}
- pub fn to_llbool(self) -> Result<'a> {
+ pub fn to_llbool(self) -> Result<'blk, 'tcx> {
let DatumBlock { datum, bcx } = self;
Result::new(bcx, datum.to_llbool(bcx))
}
// First, find out the 'real' def_id of the type. Items inlined from
// other crates have to be mapped back to their source.
let source_def_id = if def_id.krate == ast::LOCAL_CRATE {
- match cx.external_srcs.borrow().find_copy(&def_id.node) {
+ match cx.external_srcs().borrow().find_copy(&def_id.node) {
Some(source_def_id) => {
// The given def_id identifies the inlined copy of a
// type definition, let's take the source of the copy.
// Get the crate hash as first part of the identifier.
let crate_hash = if source_def_id.krate == ast::LOCAL_CRATE {
- cx.link_meta.crate_hash.clone()
+ cx.link_meta().crate_hash.clone()
} else {
cx.sess().cstore.get_crate_hash(source_def_id.krate)
};
/// Create any deferred debug metadata nodes
pub fn finalize(cx: &CrateContext) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
if cx.sess().targ_cfg.os == abi::OsMacos ||
cx.sess().targ_cfg.os == abi::OsiOS {
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 2));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 2));
} else {
// FIXME(#13611) this is a kludge fix because the Linux bots have
// gdb 7.4 which doesn't understand dwarf4, we should
// do something more graceful here.
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 3));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 3));
}
// Prevent bitcode readers from deleting the debug info.
"Debug Info Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s,
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s,
llvm::LLVMRustDebugMetadataVersion));
};
}
pub fn create_global_var_metadata(cx: &CrateContext,
node_id: ast::NodeId,
global: ValueRef) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
// crate should already contain debuginfo for it. More importantly, the
// global might not even exist in un-inlined form anywhere which would lead
// to a linker errors.
- if cx.external_srcs.borrow().contains_key(&node_id) {
+ if cx.external_srcs().borrow().contains_key(&node_id) {
return;
}
- let var_item = cx.tcx.map.get(node_id);
+ let var_item = cx.tcx().map.get(node_id);
let (ident, span) = match var_item {
ast_map::NodeItem(item) => {
/// Creates debug information for the given local variable.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_local_var_metadata(bcx: &Block, local: &ast::Local) {
+pub fn create_local_var_metadata(bcx: Block, local: &ast::Local) {
if fn_should_be_ignored(bcx.fcx) {
return;
}
let cx = bcx.ccx();
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, path1| {
let var_ident = path1.node;
/// Creates debug information for a variable captured in a closure.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_captured_var_metadata(bcx: &Block,
+pub fn create_captured_var_metadata(bcx: Block,
node_id: ast::NodeId,
env_data_type: ty::t,
env_pointer: ValueRef,
let cx = bcx.ccx();
- let ast_item = cx.tcx.map.find(node_id);
+ let ast_item = cx.tcx().map.find(node_id);
let variable_ident = match ast_item {
None => {
/// match-statement arm.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_match_binding_metadata(bcx: &Block,
+pub fn create_match_binding_metadata(bcx: Block,
variable_ident: ast::Ident,
binding: BindingInfo) {
if fn_should_be_ignored(bcx.fcx) {
let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
let aops = unsafe {
- [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type.to_ref())]
+ [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type().to_ref())]
};
// Regardless of the actual type (`T`) we're always passed the stack slot (alloca)
// for the binding. For ByRef bindings that's a `T*` but for ByMove bindings we
/// Creates debug information for the given function argument.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_argument_metadata(bcx: &Block, arg: &ast::Arg) {
+pub fn create_argument_metadata(bcx: Block, arg: &ast::Arg) {
if fn_should_be_ignored(bcx.fcx) {
return;
}
let fcx = bcx.fcx;
let cx = fcx.ccx;
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, arg.pat.span).fn_metadata;
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, path1| {
let empty_generics = ast_util::empty_generics();
- let fnitem = cx.tcx.map.get(fn_ast_id);
+ let fnitem = cx.tcx().map.get(fn_ast_id);
let (ident, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
ast_map::NodeItem(ref item) => {
// externally visible or by being inlined into something externally visible).
// It might better to use the `exported_items` set from `driver::CrateAnalysis`
// in the future, but (atm) this set is not available in the translation pass.
- !cx.reachable.contains(&node_id)
+ !cx.reachable().contains(&node_id)
}
#[allow(non_snake_case)]
});
fn fallback_path(cx: &CrateContext) -> CString {
- cx.link_meta.crate_name.as_slice().to_c_str()
+ cx.link_meta().crate_name.as_slice().to_c_str()
}
}
-fn declare_local(bcx: &Block,
+fn declare_local(bcx: Block,
variable_ident: ast::Ident,
variable_type: ty::t,
scope_metadata: DIScope,
match scope_map.borrow().find_copy(&node_id) {
Some(scope_metadata) => scope_metadata,
None => {
- let node = fcx.ccx.tcx.map.get(node_id);
+ let node = fcx.ccx.tcx().map.get(node_id);
fcx.ccx.sess().span_bug(span,
format!("debuginfo: Could not find scope info for node {:?}",
def_id: ast::DefId)
-> token::InternedString {
let name = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.get_path_elem(def_id.node).name()
+ cx.tcx().map.get_path_elem(def_id.node).name()
} else {
- csearch::get_item_path(&cx.tcx, def_id).last().unwrap().name()
+ csearch::get_item_path(cx.tcx(), def_id).last().unwrap().name()
};
token::get_name(name)
content_llvm_type: Type)
-> bool {
member_llvm_types.len() == 5 &&
- member_llvm_types[0] == cx.int_type &&
+ member_llvm_types[0] == cx.int_type() &&
member_llvm_types[1] == Type::generic_glue_fn(cx).ptr_to() &&
member_llvm_types[2] == Type::i8(cx).ptr_to() &&
member_llvm_types[3] == Type::i8(cx).ptr_to() &&
-> bool {
member_llvm_types.len() == 2 &&
member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
- member_llvm_types[1] == cx.int_type
+ member_llvm_types[1] == cx.int_type()
}
}
};
unsafe {
- llvm::LLVMSetCurrentDebugLocation(cx.builder.b, metadata_node);
+ llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
}
debug_context(cx).current_debug_location.set(debug_location);
#[inline]
fn debug_context<'a>(cx: &'a CrateContext) -> &'a CrateDebugContext {
- let debug_context: &'a CrateDebugContext = cx.dbg_cx.get_ref();
+ let debug_context: &'a CrateDebugContext = cx.dbg_cx().get_ref();
debug_context
}
#[inline]
#[allow(non_snake_case)]
fn DIB(cx: &CrateContext) -> DIBuilderRef {
- cx.dbg_cx.get_ref().builder
+ cx.dbg_cx().get_ref().builder
}
fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
}
fn assert_type_for_node_id(cx: &CrateContext, node_id: ast::NodeId, error_span: Span) {
- if !cx.tcx.node_types.borrow().contains_key(&(node_id as uint)) {
+ if !cx.tcx().node_types.borrow().contains_key(&(node_id as uint)) {
cx.sess().span_bug(error_span, "debuginfo: Could not find type for node id!");
}
}
-> (DIScope, Span) {
let containing_scope = namespace_for_item(cx, def_id).scope;
let definition_span = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.span(def_id.node)
+ cx.tcx().map.span(def_id.node)
} else {
// For external items there is no span information
codemap::DUMMY_SP
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
struct ScopeStackEntry {
scope_metadata: DIScope,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
}
fn crate_root_namespace<'a>(cx: &'a CrateContext) -> &'a str {
- cx.link_meta.crate_name.as_slice()
+ cx.link_meta().crate_name.as_slice()
}
fn namespace_for_item(cx: &CrateContext, def_id: ast::DefId) -> Rc<NamespaceTreeNode> {
impl Dest {
pub fn to_string(&self, ccx: &CrateContext) -> String {
match *self {
- SaveIn(v) => format!("SaveIn({})", ccx.tn.val_to_string(v)),
+ SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
Ignore => "Ignore".to_string()
}
}
}
-pub fn trans_into<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- dest: Dest)
- -> &'a Block<'a> {
+pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
/*!
* This function is equivalent to `trans(bcx, expr).store_to_dest(dest)`
* but it may generate better optimized LLVM code.
bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
}
-pub fn trans<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* Translates an expression, returning a datum (and new block)
* encapsulating the result. When possible, it is preferred to
return DatumBlock::new(bcx, datum);
}
-pub fn get_len(bcx: &Block, fat_ptr: ValueRef) -> ValueRef {
+pub fn get_len(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, [0u, abi::slice_elt_len])
}
-pub fn get_dataptr(bcx: &Block, fat_ptr: ValueRef) -> ValueRef {
+pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, [0u, abi::slice_elt_base])
}
-fn apply_adjustments<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* Helper for trans that apply adjustments from `expr` to `datum`,
* which should be the unadjusted translation of `expr`.
debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
return DatumBlock::new(bcx, datum);
- fn apply_autoref<'a>(autoref: &ty::AutoRef,
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn apply_autoref<'blk, 'tcx>(autoref: &ty::AutoRef,
+ bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let mut datum = datum;
DatumBlock::new(bcx, datum)
}
- fn ref_ptr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn ref_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
if !ty::type_is_sized(bcx.tcx(), datum.ty) {
debug!("Taking address of unsized type {}",
bcx.ty_to_string(datum.ty));
// into a type to be destructed. If we want to end up with a Box pointer,
// then mk_ty should make a Box pointer (T -> Box<T>), if we want a
// borrowed reference then it should be T -> &T.
- fn unsized_info<'a>(bcx: &'a Block<'a>,
- kind: &ty::UnsizeKind,
- id: ast::NodeId,
- unsized_ty: ty::t,
- mk_ty: |ty::t| -> ty::t) -> ValueRef {
+ fn unsized_info<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ kind: &ty::UnsizeKind,
+ id: ast::NodeId,
+ unsized_ty: ty::t,
+ mk_ty: |ty::t| -> ty::t) -> ValueRef {
match kind {
&ty::UnsizeLength(len) => C_uint(bcx.ccx(), len),
&ty::UnsizeStruct(box ref k, tp_index) => match ty::get(unsized_ty).sty {
}
}
- fn unsize_expr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- k: &ty::UnsizeKind)
- -> DatumBlock<'a, Expr> {
+ fn unsize_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ k: &ty::UnsizeKind)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let tcx = bcx.tcx();
let datum_ty = datum.ty;
let unsized_ty = ty::unsize_ty(tcx, datum_ty, k, expr.span);
into_fat_ptr(bcx, expr, datum, dest_ty, base, info)
}
- fn ref_fat_ptr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let tcx = bcx.tcx();
let dest_ty = ty::close_type(tcx, datum.ty);
let base = |bcx, val| Load(bcx, get_dataptr(bcx, val));
into_fat_ptr(bcx, expr, datum, dest_ty, base, len)
}
- fn into_fat_ptr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- dest_ty: ty::t,
- base: |&'a Block<'a>, ValueRef| -> ValueRef,
- info: |&'a Block<'a>, ValueRef| -> ValueRef)
- -> DatumBlock<'a, Expr> {
+ fn into_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ dest_ty: ty::t,
+ base: |Block<'blk, 'tcx>, ValueRef| -> ValueRef,
+ info: |Block<'blk, 'tcx>, ValueRef| -> ValueRef)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
// Arrange cleanup
DatumBlock::new(bcx, scratch.to_expr_datum())
}
- fn unsize_unique_vec<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- len: uint)
- -> DatumBlock<'a, Expr> {
+ fn unsize_unique_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ len: uint)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let tcx = bcx.tcx();
DatumBlock::new(bcx, scratch.to_expr_datum())
}
- fn unsize_unique_expr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- k: &ty::UnsizeKind)
- -> DatumBlock<'a, Expr> {
+ fn unsize_unique_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ k: &ty::UnsizeKind)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let tcx = bcx.tcx();
DatumBlock::new(bcx, scratch.to_expr_datum())
}
- fn add_env<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn add_env<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
// This is not the most efficient thing possible; since closures
// are two words it'd be better if this were compiled in
// 'dest' mode, but I can't find a nice way to structure the
}
}
-pub fn trans_to_lvalue<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- name: &str)
- -> DatumBlock<'a, Lvalue> {
+pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ name: &str)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
/*!
* Translates an expression in "lvalue" mode -- meaning that it
* returns a reference to the memory that the expr represents.
return datum.to_lvalue_datum(bcx, name, expr.id);
}
-fn trans_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* A version of `trans` that ignores adjustments. You almost
* certainly do not want to call this directly.
}
};
- fn nil<'a>(bcx: &'a Block<'a>, ty: ty::t) -> DatumBlock<'a, Expr> {
+ fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
let datum = immediate_rvalue(llval, ty);
DatumBlock::new(bcx, datum.to_expr_datum())
}
}
-fn trans_datum_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let fcx = bcx.fcx;
let _icx = push_ctxt("trans_datum_unadjusted");
}
}
-fn trans_rec_field<'a>(bcx: &'a Block<'a>,
- base: &ast::Expr,
- field: ast::Ident)
- -> DatumBlock<'a, Expr> {
+fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ base: &ast::Expr,
+ field: ast::Ident)
+ -> DatumBlock<'blk, 'tcx, Expr> {
//! Translates `base.field`.
let mut bcx = bcx;
})
}
-fn trans_index<'a>(bcx: &'a Block<'a>,
- index_expr: &ast::Expr,
- base: &ast::Expr,
- idx: &ast::Expr,
- method_call: MethodCall)
- -> DatumBlock<'a, Expr> {
+fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ index_expr: &ast::Expr,
+ base: &ast::Expr,
+ idx: &ast::Expr,
+ method_call: MethodCall)
+ -> DatumBlock<'blk, 'tcx, Expr> {
//! Translates `base[idx]`.
let _icx = push_ctxt("trans_index");
let mut bcx = bcx;
// Check for overloaded index.
- let method_ty = ccx.tcx
+ let method_ty = ccx.tcx()
.method_map
.borrow()
.find(&method_call)
let ix_size = machine::llbitsize_of_real(bcx.ccx(),
val_ty(ix_val));
let int_size = machine::llbitsize_of_real(bcx.ccx(),
- ccx.int_type);
+ ccx.int_type());
let ix_val = {
if ix_size < int_size {
if ty::type_is_signed(expr_ty(bcx, idx)) {
- SExt(bcx, ix_val, ccx.int_type)
- } else { ZExt(bcx, ix_val, ccx.int_type) }
+ SExt(bcx, ix_val, ccx.int_type())
+ } else { ZExt(bcx, ix_val, ccx.int_type()) }
} else if ix_size > int_size {
- Trunc(bcx, ix_val, ccx.int_type)
+ Trunc(bcx, ix_val, ccx.int_type())
} else {
ix_val
}
DatumBlock::new(bcx, elt_datum)
}
-fn trans_def<'a>(bcx: &'a Block<'a>,
- ref_expr: &ast::Expr,
- def: def::Def)
- -> DatumBlock<'a, Expr>
-{
+fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ ref_expr: &ast::Expr,
+ def: def::Def)
+ -> DatumBlock<'blk, 'tcx, Expr> {
//! Translates a reference to a path.
let _icx = push_ctxt("trans_def_lvalue");
trans_def_fn_unadjusted(bcx, ref_expr, def)
}
def::DefStatic(did, _) => {
+ // There are three things that may happen here:
+ // 1) If the static item is defined in this crate, it will be
+ // translated using `get_item_val`, and we return a pointer to
+ // the result.
+ // 2) If the static item is defined in another crate, but is
+ // marked inlineable, then it will be inlined into this crate
+ // and then translated with `get_item_val`. Again, we return a
+ // pointer to the result.
+ // 3) If the static item is defined in another crate and is not
+ // marked inlineable, then we add (or reuse) a declaration of
+ // an external global, and return a pointer to that.
let const_ty = expr_ty(bcx, ref_expr);
- fn get_did(ccx: &CrateContext, did: ast::DefId)
- -> ast::DefId {
- if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(ccx, did)
- } else {
- did
- }
- }
-
- fn get_val<'a>(bcx: &'a Block<'a>, did: ast::DefId, const_ty: ty::t)
- -> ValueRef {
+ fn get_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, did: ast::DefId, const_ty: ty::t)
+ -> ValueRef {
// For external constants, we don't inline.
if did.krate == ast::LOCAL_CRATE {
+ // Case 1 or 2. (The inlining in case 2 produces a new
+ // DefId in LOCAL_CRATE.)
+
// The LLVM global has the type of its initializer,
// which may not be equal to the enum's type for
// non-C-like enums.
let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
PointerCast(bcx, val, pty)
} else {
- match bcx.ccx().extern_const_values.borrow().find(&did) {
+ // Case 3.
+ match bcx.ccx().extern_const_values().borrow().find(&did) {
None => {} // Continue.
Some(llval) => {
return *llval;
&bcx.ccx().sess().cstore,
did);
let llval = symbol.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(bcx.ccx().llmod,
+ llvm::LLVMAddGlobal(bcx.ccx().llmod(),
llty.to_ref(),
buf)
});
- bcx.ccx().extern_const_values.borrow_mut()
+ bcx.ccx().extern_const_values().borrow_mut()
.insert(did, llval);
llval
}
}
}
-
- let did = get_did(bcx.ccx(), did);
+ // The DefId produced by `maybe_instantiate_inline`
+ // may be in the LOCAL_CRATE or not.
+ let did = inline::maybe_instantiate_inline(bcx.ccx(), did);
let val = get_val(bcx, did, const_ty);
DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
}
}
}
-fn trans_rvalue_stmt_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> &'a Block<'a> {
+fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> Block<'blk, 'tcx> {
let mut bcx = bcx;
let _icx = push_ctxt("trans_rvalue_stmt");
}
}
-fn trans_rvalue_dps_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- dest: Dest)
- -> &'a Block<'a> {
+fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
let mut bcx = bcx;
let tcx = bcx.tcx();
}
}
-fn trans_def_dps_unadjusted<'a>(
- bcx: &'a Block<'a>,
- ref_expr: &ast::Expr,
- def: def::Def,
- dest: Dest)
- -> &'a Block<'a> {
+fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ ref_expr: &ast::Expr,
+ def: def::Def,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_def_dps_unadjusted");
let lldest = match dest {
}
}
-fn trans_def_fn_unadjusted<'a>(bcx: &'a Block<'a>,
- ref_expr: &ast::Expr,
- def: def::Def) -> DatumBlock<'a, Expr> {
+fn trans_def_fn_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ ref_expr: &ast::Expr,
+ def: def::Def)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_def_datum_unadjusted");
let llfn = match def {
DatumBlock::new(bcx, Datum::new(llfn, fn_ty, RvalueExpr(Rvalue::new(ByValue))))
}
-pub fn trans_local_var<'a>(bcx: &'a Block<'a>,
- def: def::Def)
- -> Datum<Lvalue> {
+pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ def: def::Def)
+ -> Datum<Lvalue> {
/*!
* Translates a reference to a local variable or argument.
* This always results in an lvalue datum.
}
};
- fn take_local<'a>(bcx: &'a Block<'a>,
- table: &NodeMap<Datum<Lvalue>>,
- nid: ast::NodeId)
- -> Datum<Lvalue> {
+ fn take_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ table: &NodeMap<Datum<Lvalue>>,
+ nid: ast::NodeId)
+ -> Datum<Lvalue> {
let datum = match table.find(&nid) {
Some(&v) => v,
None => {
}
}
-fn trans_struct<'a>(bcx: &'a Block<'a>,
- fields: &[ast::Field],
- base: Option<Gc<ast::Expr>>,
- expr_span: codemap::Span,
- id: ast::NodeId,
- dest: Dest) -> &'a Block<'a> {
+fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ fields: &[ast::Field],
+ base: Option<Gc<ast::Expr>>,
+ expr_span: codemap::Span,
+ id: ast::NodeId,
+ dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rec");
let ty = node_id_type(bcx, id);
* - `optbase` contains information on the base struct (if any) from
* which remaining fields are copied; see comments on `StructBaseInfo`.
*/
-pub fn trans_adt<'a>(mut bcx: &'a Block<'a>,
- ty: ty::t,
- discr: ty::Disr,
- fields: &[(uint, Gc<ast::Expr>)],
- optbase: Option<StructBaseInfo>,
- dest: Dest) -> &'a Block<'a> {
+pub fn trans_adt<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ ty: ty::t,
+ discr: ty::Disr,
+ fields: &[(uint, Gc<ast::Expr>)],
+ optbase: Option<StructBaseInfo>,
+ dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_adt");
let fcx = bcx.fcx;
let repr = adt::represent_type(bcx.ccx(), ty);
}
-fn trans_immediate_lit<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- lit: ast::Lit)
- -> DatumBlock<'a, Expr> {
+fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ lit: ast::Lit)
+ -> DatumBlock<'blk, 'tcx, Expr> {
// must not be a string constant, that is a RvalueDpsExpr
let _icx = push_ctxt("trans_immediate_lit");
let ty = expr_ty(bcx, expr);
immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
}
-fn trans_unary<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- op: ast::UnOp,
- sub_expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ op: ast::UnOp,
+ sub_expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
let mut bcx = bcx;
let _icx = push_ctxt("trans_unary_datum");
// Otherwise, we should be in the RvalueDpsExpr path.
assert!(
op == ast::UnDeref ||
- !ccx.tcx.method_map.borrow().contains_key(&method_call));
+ !ccx.tcx().method_map.borrow().contains_key(&method_call));
let un_ty = expr_ty(bcx, expr);
}
}
-fn trans_uniq_expr<'a>(bcx: &'a Block<'a>,
- box_ty: ty::t,
- contents: &ast::Expr,
- contents_ty: ty::t)
- -> DatumBlock<'a, Expr> {
+fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ty: ty::t,
+ contents: &ast::Expr,
+ contents_ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_uniq_expr");
let fcx = bcx.fcx;
assert!(ty::type_is_sized(bcx.tcx(), contents_ty));
immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
}
-fn trans_managed_expr<'a>(bcx: &'a Block<'a>,
- box_ty: ty::t,
- contents: &ast::Expr,
- contents_ty: ty::t)
- -> DatumBlock<'a, Expr> {
+fn trans_managed_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ty: ty::t,
+ contents: &ast::Expr,
+ contents_ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_managed_expr");
let fcx = bcx.fcx;
let ty = type_of::type_of(bcx.ccx(), contents_ty);
immediate_rvalue_bcx(bcx, bx, box_ty).to_expr_datumblock()
}
-fn trans_addr_of<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- subexpr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ subexpr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_addr_of");
let mut bcx = bcx;
let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
// Important to get types for both lhs and rhs, because one might be _|_
// and the other not.
-fn trans_eager_binop<'a>(
- bcx: &'a Block<'a>,
- binop_expr: &ast::Expr,
- binop_ty: ty::t,
- op: ast::BinOp,
- lhs_t: ty::t,
- lhs: ValueRef,
- rhs_t: ty::t,
- rhs: ValueRef)
- -> DatumBlock<'a, Expr> {
+fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ binop_expr: &ast::Expr,
+ binop_ty: ty::t,
+ op: ast::BinOp,
+ lhs_t: ty::t,
+ lhs: ValueRef,
+ rhs_t: ty::t,
+ rhs: ValueRef)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_eager_binop");
let tcx = bcx.tcx();
lazy_or,
}
-fn trans_lazy_binop<'a>(
- bcx: &'a Block<'a>,
- binop_expr: &ast::Expr,
- op: lazy_binop_ty,
- a: &ast::Expr,
- b: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ binop_expr: &ast::Expr,
+ op: lazy_binop_ty,
+ a: &ast::Expr,
+ b: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_lazy_binop");
let binop_ty = expr_ty(bcx, binop_expr);
let fcx = bcx.fcx;
return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
}
-fn trans_binary<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- op: ast::BinOp,
- lhs: &ast::Expr,
- rhs: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ op: ast::BinOp,
+ lhs: &ast::Expr,
+ rhs: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_binary");
let ccx = bcx.ccx();
// if overloaded, would be RvalueDpsExpr
- assert!(!ccx.tcx.method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
+ assert!(!ccx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
match op {
ast::BiAnd => {
}
}
-fn trans_overloaded_op<'a, 'b>(
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- method_call: MethodCall,
- lhs: Datum<Expr>,
- rhs: Option<(Datum<Expr>, ast::NodeId)>,
- dest: Option<Dest>)
- -> Result<'a> {
+fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ method_call: MethodCall,
+ lhs: Datum<Expr>,
+ rhs: Option<(Datum<Expr>, ast::NodeId)>,
+ dest: Option<Dest>)
+ -> Result<'blk, 'tcx> {
let method_ty = bcx.tcx().method_map.borrow().get(&method_call).ty;
callee::trans_call_inner(bcx,
Some(expr_info(expr)),
dest)
}
-fn trans_overloaded_call<'a>(
- mut bcx: &'a Block<'a>,
- expr: &ast::Expr,
- callee: Gc<ast::Expr>,
- args: &[Gc<ast::Expr>],
- dest: Option<Dest>)
- -> &'a Block<'a> {
+fn trans_overloaded_call<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ callee: Gc<ast::Expr>,
+ args: &[Gc<ast::Expr>],
+ dest: Option<Dest>)
+ -> Block<'blk, 'tcx> {
let method_call = MethodCall::expr(expr.id);
let method_type = bcx.tcx()
.method_map
bcx
}
-fn int_cast(bcx: &Block,
+fn int_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef,
}
}
-fn float_cast(bcx: &Block,
+fn float_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef)
}
}
-fn trans_imm_cast<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- id: ast::NodeId)
- -> DatumBlock<'a, Expr> {
+fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ id: ast::NodeId)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_cast");
let mut bcx = bcx;
let ccx = bcx.ccx();
return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
}
-fn trans_assign_op<'a>(
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- op: ast::BinOp,
- dst: &ast::Expr,
- src: Gc<ast::Expr>)
- -> &'a Block<'a> {
+fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ op: ast::BinOp,
+ dst: &ast::Expr,
+ src: Gc<ast::Expr>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_assign_op");
let mut bcx = bcx;
return result_datum.store_to(bcx, dst_datum.val);
}
-fn auto_ref<'a>(bcx: &'a Block<'a>,
- datum: Datum<Expr>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ datum: Datum<Expr>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
// Ensure cleanup of `datum` if not already scheduled and obtain
DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(ByValue))))
}
-fn deref_multiple<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- times: uint)
- -> DatumBlock<'a, Expr> {
+fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ times: uint)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let mut datum = datum;
for i in range(0, times) {
DatumBlock { bcx: bcx, datum: datum }
}
-fn deref_once<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- method_call: MethodCall)
- -> DatumBlock<'a, Expr> {
+fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ method_call: MethodCall)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
debug!("deref_once(expr={}, datum={}, method_call={})",
let mut bcx = bcx;
// Check for overloaded deref.
- let method_ty = ccx.tcx.method_map.borrow()
+ let method_ty = ccx.tcx().method_map.borrow()
.find(&method_call).map(|method| method.ty);
let datum = match method_ty {
Some(method_ty) => {
return r;
- fn deref_owned_pointer<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- content_ty: ty::t)
- -> DatumBlock<'a, Expr> {
+ fn deref_owned_pointer<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ content_ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* We microoptimize derefs of owned pointers a bit here.
* Basically, the idea is to make the deref of an rvalue
}
};
unsafe {
+ // Declare a symbol `foo` with the desired linkage.
let g1 = ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty2.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty2.to_ref(), buf)
});
llvm::SetLinkage(g1, linkage);
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(ident.get());
let g2 = real_name.with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
});
llvm::SetLinkage(g2, llvm::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
}
}
None => unsafe {
+ // Generate an external declaration.
ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
})
}
}
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let llfn = base::get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name,
cc,
llfn_ty,
llfn
}
-pub fn trans_native_call<'a>(
- bcx: &'a Block<'a>,
- callee_ty: ty::t,
- llfn: ValueRef,
- llretptr: ValueRef,
- llargs_rust: &[ValueRef],
- passed_arg_tys: Vec<ty::t> )
- -> &'a Block<'a> {
+pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ callee_ty: ty::t,
+ llfn: ValueRef,
+ llretptr: ValueRef,
+ llargs_rust: &[ValueRef],
+ passed_arg_tys: Vec<ty::t> )
+ -> Block<'blk, 'tcx> {
/*!
* Prepares a call to a native function. This requires adapting
* from the Rust argument passing rules to the native rules.
llfn={}, \
llretptr={})",
callee_ty.repr(tcx),
- ccx.tn.val_to_string(llfn),
- ccx.tn.val_to_string(llretptr));
+ ccx.tn().val_to_string(llfn),
+ ccx.tn().val_to_string(llretptr));
let (fn_abi, fn_sig) = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref fn_ty) => (fn_ty.abi, fn_ty.sig.clone()),
debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}",
i,
- ccx.tn.val_to_string(llarg_rust),
+ ccx.tn().val_to_string(llarg_rust),
rust_indirect,
- ccx.tn.type_to_string(arg_tys[i].ty));
+ ccx.tn().type_to_string(arg_tys[i].ty));
// Ensure that we always have the Rust value indirectly,
// because it makes bitcasting easier.
}
debug!("llarg_rust={} (after indirection)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Check whether we need to do any casting
match arg_tys[i].cast {
}
debug!("llarg_rust={} (after casting)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Finally, load the value if needed for the foreign ABI
let foreign_indirect = arg_tys[i].is_indirect();
};
debug!("argument {}, llarg_foreign={}",
- i, ccx.tn.val_to_string(llarg_foreign));
+ i, ccx.tn().val_to_string(llarg_foreign));
// fill padding with undef value
match arg_tys[i].pad {
None => fn_type.ret_ty.ty
};
- debug!("llretptr={}", ccx.tn.val_to_string(llretptr));
- debug!("llforeign_retval={}", ccx.tn.val_to_string(llforeign_retval));
- debug!("llrust_ret_ty={}", ccx.tn.type_to_string(llrust_ret_ty));
- debug!("llforeign_ret_ty={}", ccx.tn.type_to_string(llforeign_ret_ty));
+ debug!("llretptr={}", ccx.tn().val_to_string(llretptr));
+ debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval));
+ debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty));
+ debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty));
if llrust_ret_ty == llforeign_ret_ty {
base::store_ty(bcx, llforeign_retval, llretptr, fn_sig.output)
register_foreign_item_fn(ccx, abi, ty,
lname.get().as_slice(),
Some(foreign_item.span));
+ // Unlike for other items, we shouldn't call
+ // `base::update_linkage` here. Foreign items have
+ // special linkage requirements, which are handled
+ // inside `foreign::register_*`.
}
}
}
_ => {}
}
- ccx.item_symbols.borrow_mut().insert(foreign_item.id,
+ ccx.item_symbols().borrow_mut().insert(foreign_item.id,
lname.get().to_string());
}
}
let llfn = base::decl_fn(ccx, name, cconv, llfn_ty, ty::mk_nil());
add_argument_attributes(&tys, llfn);
debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})",
- ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty);
add_argument_attributes(&tys, llfn);
debug!("register_rust_fn_with_foreign_abi(node_id={:?}, llfn_ty={}, llfn={})",
- node_id, ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let t = ty::node_id_to_type(tcx, id).subst(
ccx.tcx(), ¶m_substs.substs);
- let ps = ccx.tcx.map.with_path(id, |path| {
+ let ps = ccx.tcx().map.with_path(id, |path| {
let abi = Some(ast_map::PathName(special_idents::clownshoe_abi.name));
link::mangle(path.chain(abi.move_iter()), hash)
});
_ => {
ccx.sess().bug(format!("build_rust_fn: extern fn {} has ty {}, \
expected a bare fn ty",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
t.repr(tcx)).as_slice());
}
};
debug!("build_rust_fn: path={} id={} t={}",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
id, t.repr(tcx));
let llfn = base::decl_internal_rust_fn(ccx, t, ps.as_slice());
let tcx = ccx.tcx();
debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={})",
- ccx.tn.val_to_string(llrustfn),
- ccx.tn.val_to_string(llwrapfn),
+ ccx.tn().val_to_string(llrustfn),
+ ccx.tn().val_to_string(llwrapfn),
t.repr(ccx.tcx()));
// Avoid all the Rust generation stuff and just generate raw
let the_block =
"the block".with_c_str(
- |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llwrapfn, s));
+ |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn, s));
let builder = ccx.builder();
builder.position_at_end(the_block);
match foreign_outptr {
Some(llforeign_outptr) => {
debug!("out pointer, foreign={}",
- ccx.tn.val_to_string(llforeign_outptr));
+ ccx.tn().val_to_string(llforeign_outptr));
let llrust_retptr =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
debug!("out pointer, foreign={} (casted)",
- ccx.tn.val_to_string(llrust_retptr));
+ ccx.tn().val_to_string(llrust_retptr));
llrust_args.push(llrust_retptr);
return_alloca = None;
}
allocad={}, \
llrust_ret_ty={}, \
return_ty={}",
- ccx.tn.val_to_string(slot),
- ccx.tn.type_to_string(llrust_ret_ty),
+ ccx.tn().val_to_string(slot),
+ ccx.tn().type_to_string(llrust_ret_ty),
tys.fn_sig.output.repr(tcx));
llrust_args.push(slot);
return_alloca = Some(slot);
let mut llforeign_arg = get_param(llwrapfn, foreign_index);
debug!("llforeign_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llforeign_arg));
+ i, ccx.tn().val_to_string(llforeign_arg));
debug!("rust_indirect = {}, foreign_indirect = {}",
rust_indirect, foreign_indirect);
};
debug!("llrust_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llrust_arg));
+ i, ccx.tn().val_to_string(llrust_arg));
llrust_args.push(llrust_arg);
}
// Perform the call itself
- debug!("calling llrustfn = {}, t = {}", ccx.tn.val_to_string(llrustfn), t.repr(ccx.tcx()));
+ debug!("calling llrustfn = {}, t = {}",
+ ccx.tn().val_to_string(llrustfn), t.repr(ccx.tcx()));
let attributes = base::get_fn_llvm_attributes(ccx, t);
let llrust_ret_val = builder.call(llrustfn, llrust_args.as_slice(), Some(attributes));
fn_ty={} -> {}, \
ret_def={}",
ty.repr(ccx.tcx()),
- ccx.tn.types_to_str(llsig.llarg_tys.as_slice()),
- ccx.tn.type_to_string(llsig.llret_ty),
- ccx.tn.types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
- ccx.tn.type_to_string(fn_ty.ret_ty.ty),
+ ccx.tn().types_to_str(llsig.llarg_tys.as_slice()),
+ ccx.tn().type_to_string(llsig.llret_ty),
+ ccx.tn().types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
+ ccx.tn().type_to_string(fn_ty.ret_ty.ty),
ret_def);
ForeignTypes {
use syntax::ast;
use syntax::parse::token;
-pub fn trans_free<'a>(cx: &'a Block<'a>, v: ValueRef) -> &'a Block<'a> {
+pub fn trans_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_free");
callee::trans_lang_call(cx,
langcall(cx, None, "", FreeFnLangItem),
Some(expr::Ignore)).bcx
}
-fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
- align: ValueRef) -> &'a Block<'a> {
+pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
+ size: ValueRef, align: ValueRef)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx,
Some(expr::Ignore)).bcx
}
-pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
- align: u64) -> &'a Block<'a> {
- trans_exchange_free_internal(cx,
- v,
- C_uint(cx.ccx(), size as uint),
- C_uint(cx.ccx(), align as uint))
+pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
+ size: u64, align: u64) -> Block<'blk, 'tcx> {
+ trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size as uint),
+ C_uint(cx.ccx(), align as uint))
}
-pub fn trans_exchange_free_ty<'a>(bcx: &'a Block<'a>, ptr: ValueRef,
- content_ty: ty::t) -> &'a Block<'a> {
+pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef,
+ content_ty: ty::t) -> Block<'blk, 'tcx> {
assert!(ty::type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
}
}
-pub fn take_ty<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+pub fn take_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("take_ty");
match ty::get(t).sty {
return ty::mk_i8();
}
match ty::get(t).sty {
- ty::ty_box(typ) if !ty::type_needs_drop(tcx, typ) =>
- ty::mk_box(tcx, ty::mk_i8()),
-
ty::ty_uniq(typ) if !ty::type_needs_drop(tcx, typ)
&& ty::type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8()
} else {
- ty::mk_uniq(tcx, ty::mk_i8())
+ t
}
}
_ => t
}
}
-pub fn drop_ty<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty(t={})", t.repr(bcx.tcx()));
let _icx = push_ctxt("drop_ty");
bcx
}
-pub fn drop_ty_immediate<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
Store(bcx, v, vp);
debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t));
let t = get_drop_glue_type(ccx, t);
debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t));
- match ccx.drop_glues.borrow().find(&t) {
+ match ccx.drop_glues().borrow().find(&t) {
Some(&glue) => return glue,
_ => { }
}
};
let llfnty = Type::glue_fn(ccx, llty);
- let glue = declare_generic_glue(ccx, t, llfnty, "drop");
- ccx.drop_glues.borrow_mut().insert(t, glue);
+ let (glue, new_sym) = match ccx.available_drop_glues().borrow().find(&t) {
+ Some(old_sym) => {
+ let glue = decl_cdecl_fn(ccx, old_sym.as_slice(), llfnty, ty::mk_nil());
+ (glue, None)
+ },
+ None => {
+ let (sym, glue) = declare_generic_glue(ccx, t, llfnty, "drop");
+ (glue, Some(sym))
+ },
+ };
- make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
+ ccx.drop_glues().borrow_mut().insert(t, glue);
+
+ // To avoid infinite recursion, don't `make_drop_glue` until after we've
+ // added the entry to the `drop_glues` cache.
+ match new_sym {
+ Some(sym) => {
+ ccx.available_drop_glues().borrow_mut().insert(t, sym);
+ // We're creating a new drop glue, so also generate a body.
+ make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
+ },
+ None => {},
+ }
glue
}
Some(visit_glue) => visit_glue,
None => {
debug!("+++ lazily_emit_tydesc_glue VISIT {}", ppaux::ty_to_string(ccx.tcx(), ti.ty));
- let glue_fn = declare_generic_glue(ccx, ti.ty, llfnty, "visit");
+
+ let (glue_fn, new_sym) = match ccx.available_visit_glues().borrow().find(&ti.ty) {
+ Some(old_sym) => {
+ let glue_fn = decl_cdecl_fn(ccx, old_sym.as_slice(), llfnty, ty::mk_nil());
+ (glue_fn, None)
+ },
+ None => {
+ let (sym, glue_fn) = declare_generic_glue(ccx, ti.ty, llfnty, "visit");
+ (glue_fn, Some(sym))
+ },
+ };
+
ti.visit_glue.set(Some(glue_fn));
- make_generic_glue(ccx, ti.ty, glue_fn, make_visit_glue, "visit");
+
+ match new_sym {
+ Some(sym) => {
+ ccx.available_visit_glues().borrow_mut().insert(ti.ty, sym);
+ make_generic_glue(ccx, ti.ty, glue_fn, make_visit_glue, "visit");
+ },
+ None => {},
+ }
+
debug!("--- lazily_emit_tydesc_glue VISIT {}", ppaux::ty_to_string(ccx.tcx(), ti.ty));
glue_fn
}
}
// See [Note-arg-mode]
-pub fn call_visit_glue(bcx: &Block, v: ValueRef, tydesc: ValueRef) {
+pub fn call_visit_glue(bcx: Block, v: ValueRef, tydesc: ValueRef) {
let _icx = push_ctxt("call_visit_glue");
// Select the glue function to call from the tydesc
Call(bcx, llfn, [llrawptr], None);
}
-fn make_visit_glue<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+fn make_visit_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("make_visit_glue");
let mut bcx = bcx;
let (visitor_trait, object_ty) = match ty::visitor_object_ty(bcx.tcx(),
bcx
}
-fn trans_struct_drop_flag<'a>(mut bcx: &'a Block<'a>,
- t: ty::t,
- v0: ValueRef,
- dtor_did: ast::DefId,
- class_did: ast::DefId,
- substs: &subst::Substs)
- -> &'a Block<'a> {
+fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ v0: ValueRef,
+ dtor_did: ast::DefId,
+ class_did: ast::DefId,
+ substs: &subst::Substs)
+ -> Block<'blk, 'tcx> {
let repr = adt::represent_type(bcx.ccx(), t);
let struct_data = if ty::type_is_sized(bcx.tcx(), t) {
v0
})
}
-fn trans_struct_drop<'a>(bcx: &'a Block<'a>,
- t: ty::t,
- v0: ValueRef,
- dtor_did: ast::DefId,
- class_did: ast::DefId,
- substs: &subst::Substs)
- -> &'a Block<'a> {
+fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ v0: ValueRef,
+ dtor_did: ast::DefId,
+ class_did: ast::DefId,
+ substs: &subst::Substs)
+ -> Block<'blk, 'tcx> {
let repr = adt::represent_type(bcx.ccx(), t);
// Find and call the actual destructor
})
}
-fn size_and_align_of_dst<'a>(bcx: &'a Block<'a>, t :ty::t, info: ValueRef) -> (ValueRef, ValueRef) {
+fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}",
bcx.ty_to_string(t), bcx.val_to_string(info));
if ty::type_is_sized(bcx.tcx(), t) {
}
}
-fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'a> {
+fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
match ty::get(t).sty {
let info = GEPi(bcx, v0, [0, abi::slice_elt_len]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
- trans_exchange_free_internal(bcx, llbox, llsize, llalign)
+ trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
})
}
_ => {
with_cond(bcx, IsNotNull(bcx, env), |bcx| {
let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_tydesc]);
let dtor = Load(bcx, dtor_ptr);
- let cdata = GEPi(bcx, env, [0u, abi::box_field_body]);
- Call(bcx, dtor, [PointerCast(bcx, cdata, Type::i8p(bcx.ccx()))], None);
-
- // Free the environment itself
- // FIXME: #13994: pass align and size here
- trans_exchange_free(bcx, env, 0, 8)
+ Call(bcx, dtor, [PointerCast(bcx, box_cell_v, Type::i8p(bcx.ccx()))], None);
+ bcx
})
}
ty::ty_trait(..) => {
}
}
-fn decr_refcnt_maybe_free<'a>(bcx: &'a Block<'a>,
- box_ptr_ptr: ValueRef,
- t: ty::t) -> &'a Block<'a> {
+fn decr_refcnt_maybe_free<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ptr_ptr: ValueRef,
+ t: ty::t) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("decr_refcnt_maybe_free");
let fcx = bcx.fcx;
let ccx = bcx.ccx();
next_bcx
}
-fn incr_refcnt_of_boxed<'a>(bcx: &'a Block<'a>,
- box_ptr_ptr: ValueRef) -> &'a Block<'a> {
+fn incr_refcnt_of_boxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ptr_ptr: ValueRef) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("incr_refcnt_of_boxed");
let ccx = bcx.ccx();
let box_ptr = Load(bcx, box_ptr_ptr);
pub fn declare_tydesc(ccx: &CrateContext, t: ty::t) -> tydesc_info {
// If emit_tydescs already ran, then we shouldn't be creating any new
// tydescs.
- assert!(!ccx.finished_tydescs.get());
+ assert!(!ccx.finished_tydescs().get());
let llty = type_of(ccx, t);
debug!("+++ declare_tydesc {} {}", ppaux::ty_to_string(ccx.tcx(), t), name);
let gvar = name.as_slice().with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(ccx.llmod, ccx.tydesc_type().to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), ccx.tydesc_type().to_ref(), buf)
}
});
note_unique_llvm_symbol(ccx, name);
}
fn declare_generic_glue(ccx: &CrateContext, t: ty::t, llfnty: Type,
- name: &str) -> ValueRef {
+ name: &str) -> (String, ValueRef) {
let _icx = push_ctxt("declare_generic_glue");
let fn_nm = mangle_internal_name_by_type_and_seq(
ccx,
t,
format!("glue_{}", name).as_slice());
let llfn = decl_cdecl_fn(ccx, fn_nm.as_slice(), llfnty, ty::mk_nil());
- note_unique_llvm_symbol(ccx, fn_nm);
- return llfn;
+ note_unique_llvm_symbol(ccx, fn_nm.clone());
+ return (fn_nm, llfn);
}
fn make_generic_glue(ccx: &CrateContext,
t: ty::t,
llfn: ValueRef,
- helper: <'a> |&'a Block<'a>, ValueRef, ty::t|
- -> &'a Block<'a>,
+ helper: <'blk, 'tcx> |Block<'blk, 'tcx>, ValueRef, ty::t|
+ -> Block<'blk, 'tcx>,
name: &str)
-> ValueRef {
let _icx = push_ctxt("make_generic_glue");
let bcx = init_function(&fcx, false, ty::mk_nil());
- llvm::SetLinkage(llfn, llvm::InternalLinkage);
- ccx.stats.n_glues_created.set(ccx.stats.n_glues_created.get() + 1u);
+ update_linkage(ccx, llfn, None, OriginalTranslation);
+
+ ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1u);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
pub fn emit_tydescs(ccx: &CrateContext) {
let _icx = push_ctxt("emit_tydescs");
// As of this point, allow no more tydescs to be created.
- ccx.finished_tydescs.set(true);
+ ccx.finished_tydescs().set(true);
let glue_fn_ty = Type::generic_glue_fn(ccx).ptr_to();
- for (_, ti) in ccx.tydescs.borrow().iter() {
+ for (_, ti) in ccx.tydescs().borrow().iter() {
// Each of the glue functions needs to be cast to a generic type
// before being put into the tydesc because we only have a singleton
// tydesc type. Then we'll recast each function to its real type when
let drop_glue = unsafe {
llvm::LLVMConstPointerCast(get_drop_glue(ccx, ti.ty), glue_fn_ty.to_ref())
};
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() + 1);
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() + 1);
let visit_glue =
match ti.visit_glue.get() {
None => {
- ccx.stats.n_null_glues.set(ccx.stats.n_null_glues.get() +
+ ccx.stats().n_null_glues.set(ccx.stats().n_null_glues.get() +
1u);
C_null(glue_fn_ty)
}
Some(v) => {
unsafe {
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() +
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() +
1);
llvm::LLVMConstPointerCast(v, glue_fn_ty.to_ref())
}
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use llvm::{AvailableExternallyLinkage, SetLinkage};
+use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage};
use metadata::csearch;
use middle::astencode;
use middle::trans::base::{push_ctxt, trans_item, get_item_val, trans_fn};
use syntax::ast_util::{local_def, PostExpansionMethod};
use syntax::ast_util;
-pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId)
- -> ast::DefId {
+fn instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId)
+ -> Option<ast::DefId> {
let _icx = push_ctxt("maybe_instantiate_inline");
- match ccx.external.borrow().find(&fn_id) {
+ match ccx.external().borrow().find(&fn_id) {
Some(&Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline({}): already inline as node id {}",
ty::item_path_str(ccx.tcx(), fn_id), node_id);
- return local_def(node_id);
+ return Some(local_def(node_id));
}
Some(&None) => {
- return fn_id; // Not inlinable
+ return None; // Not inlinable
}
None => {
// Not seen yet
csearch::maybe_get_item_ast(
ccx.tcx(), fn_id,
|a,b,c,d| astencode::decode_inlined_item(a, b, c, d));
- return match csearch_result {
+
+ let inline_def = match csearch_result {
csearch::not_found => {
- ccx.external.borrow_mut().insert(fn_id, None);
- fn_id
+ ccx.external().borrow_mut().insert(fn_id, None);
+ return None;
}
csearch::found(ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
trans_item(ccx, &*item);
- // We're bringing an external global into this crate, but we don't
- // want to create two copies of the global. If we do this, then if
- // you take the address of the global in two separate crates you get
- // two different addresses. This is bad for things like conditions,
- // but it could possibly have other adverse side effects. We still
- // want to achieve the optimizations related to this global,
- // however, so we use the available_externally linkage which llvm
- // provides
- match item.node {
+ let linkage = match item.node {
+ ast::ItemFn(_, _, _, ref generics, _) => {
+ if generics.is_type_parameterized() {
+ // Generics have no symbol, so they can't be given any
+ // linkage.
+ None
+ } else {
+ if ccx.sess().opts.cg.codegen_units == 1 {
+ // We could use AvailableExternallyLinkage here,
+ // but InternalLinkage allows LLVM to optimize more
+ // aggressively (at the cost of sometimes
+ // duplicating code).
+ Some(InternalLinkage)
+ } else {
+ // With multiple compilation units, duplicated code
+ // is more of a problem. Also, `codegen_units > 1`
+ // means the user is okay with losing some
+ // performance.
+ Some(AvailableExternallyLinkage)
+ }
+ }
+ }
ast::ItemStatic(_, mutbl, _) => {
- let g = get_item_val(ccx, item.id);
- // see the comment in get_item_val() as to why this check is
- // performed here.
- if ast_util::static_has_significant_address(
- mutbl,
- item.attrs.as_slice()) {
- SetLinkage(g, AvailableExternallyLinkage);
+ if !ast_util::static_has_significant_address(mutbl, item.attrs.as_slice()) {
+ // Inlined static items use internal linkage when
+ // possible, so that LLVM will coalesce globals with
+ // identical initializers. (It only does this for
+ // globals with unnamed_addr and either internal or
+ // private linkage.)
+ Some(InternalLinkage)
+ } else {
+ // The address is significant, so we can't create an
+ // internal copy of the static. (The copy would have a
+ // different address from the original.)
+ Some(AvailableExternallyLinkage)
}
}
- _ => {}
+ _ => unreachable!(),
+ };
+
+ match linkage {
+ Some(linkage) => {
+ let g = get_item_val(ccx, item.id);
+ SetLinkage(g, linkage);
+ }
+ None => {}
}
local_def(item.id)
}
csearch::found(ast::IIForeign(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
local_def(item.id)
}
csearch::found_parent(parent_id, ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(parent_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, parent_id);
+ ccx.external().borrow_mut().insert(parent_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
let mut my_id = 0;
match item.node {
let vs_there = ty::enum_variants(ccx.tcx(), parent_id);
for (here, there) in vs_here.iter().zip(vs_there.iter()) {
if there.id == fn_id { my_id = here.id.node; }
- ccx.external.borrow_mut().insert(there.id, Some(here.id.node));
+ ccx.external().borrow_mut().insert(there.id, Some(here.id.node));
}
}
ast::ItemStruct(ref struct_def, _) => {
match struct_def.ctor_id {
None => {}
Some(ctor_id) => {
- ccx.external.borrow_mut().insert(fn_id, Some(ctor_id));
+ ccx.external().borrow_mut().insert(fn_id, Some(ctor_id));
my_id = ctor_id;
}
}
match impl_item {
ast::ProvidedInlinedTraitItem(mth) |
ast::RequiredInlinedTraitItem(mth) => {
- ccx.external.borrow_mut().insert(fn_id, Some(mth.id));
- ccx.external_srcs.borrow_mut().insert(mth.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(mth.id));
+ ccx.external_srcs().borrow_mut().insert(mth.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
}
}
¶m_substs::empty(),
mth.id,
[]);
+ // Use InternalLinkage so LLVM can optimize more
+ // aggressively.
+ SetLinkage(llfn, InternalLinkage);
}
local_def(mth.id)
}
}
}
};
+
+ return Some(inline_def);
+}
+
+pub fn get_local_instance(ccx: &CrateContext, fn_id: ast::DefId)
+ -> Option<ast::DefId> {
+ if fn_id.krate == ast::LOCAL_CRATE {
+ Some(fn_id)
+ } else {
+ instantiate_inline(ccx, fn_id)
+ }
+}
+
+pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId) -> ast::DefId {
+ get_local_instance(ccx, fn_id).unwrap_or(fn_id)
}
/// Performs late verification that intrinsics are used correctly. At present,
/// the only intrinsic that needs such verification is `transmute`.
pub fn check_intrinsics(ccx: &CrateContext) {
- for transmute_restriction in ccx.tcx
+ for transmute_restriction in ccx.tcx()
.transmute_restrictions
.borrow()
.iter() {
ccx.sess().abort_if_errors();
}
-pub fn trans_intrinsic_call<'a>(mut bcx: &'a Block<'a>, node: ast::NodeId,
- callee_ty: ty::t, cleanup_scope: cleanup::CustomScopeIndex,
- args: callee::CallArgs, dest: expr::Dest,
- substs: subst::Substs, call_info: NodeInfo) -> Result<'a> {
+pub fn trans_intrinsic_call<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, node: ast::NodeId,
+ callee_ty: ty::t, cleanup_scope: cleanup::CustomScopeIndex,
+ args: callee::CallArgs, dest: expr::Dest,
+ substs: subst::Substs, call_info: NodeInfo)
+ -> Result<'blk, 'tcx> {
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let hash = ty::hash_crate_independent(
ccx.tcx(),
*substs.types.get(FnSpace, 0),
- &ccx.link_meta.crate_hash);
+ &ccx.link_meta().crate_hash);
// NB: This needs to be kept in lockstep with the TypeId struct in
// the intrinsic module
C_named_struct(llret_ty, [C_u64(ccx, hash)])
Result::new(bcx, llresult)
}
-fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool,
+fn copy_intrinsic(bcx: Block, allow_overlap: bool, volatile: bool,
tp_ty: ty::t, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
+ let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
let name = if allow_overlap {
if int_size == 32 {
"llvm.memmove.p0i8.p0i8.i32"
C_bool(ccx, volatile)], None)
}
-fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t,
+fn memset_intrinsic(bcx: Block, volatile: bool, tp_ty: ty::t,
dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
+ let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
"llvm.memset.p0i8.i32"
} else {
"llvm.memset.p0i8.i64"
C_bool(ccx, volatile)], None)
}
-fn count_zeros_intrinsic(bcx: &Block, name: &'static str, val: ValueRef) -> ValueRef {
+fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
let y = C_bool(bcx.ccx(), false);
let llfn = bcx.ccx().get_intrinsic(&name);
Call(bcx, llfn, [val, y], None)
}
-fn with_overflow_intrinsic(bcx: &Block, name: &'static str, t: ty::t,
+fn with_overflow_intrinsic(bcx: Block, name: &'static str, t: ty::t,
a: ValueRef, b: ValueRef) -> ValueRef {
let llfn = bcx.ccx().get_intrinsic(&name);
impl LlvmRepr for Type {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.type_to_string(*self)
+ ccx.tn().type_to_string(*self)
}
}
impl LlvmRepr for ValueRef {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.val_to_string(*self)
+ ccx.tn().val_to_string(*self)
}
}
// Returns the number of bytes clobbered by a Store to this type.
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMStoreSizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABISizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// below.
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- let nbits = llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64;
+ let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64;
if nbits & 7 != 0 {
// Not an even number of bytes, spills into "next" byte.
1 + (nbits >> 3)
/// Returns the "real" size of the type in bits.
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64
+ llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64
}
}
// space to be consumed.
pub fn nonzero_llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
if llbitsize_of_real(cx, ty) == 0 {
- unsafe { llvm::LLVMConstInt(cx.int_type.to_ref(), 1, False) }
+ unsafe { llvm::LLVMConstInt(cx.int_type().to_ref(), 1, False) }
} else {
llsize_of(cx, ty)
}
// allocations inside a stack frame, which LLVM has a free hand in.
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMPreferredAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// and similar ABI-mandated things.
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABIAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
pub fn llalign_of(cx: &CrateContext, ty: Type) -> ValueRef {
unsafe {
return llvm::LLVMConstIntCast(
- llvm::LLVMAlignOf(ty.to_ref()), cx.int_type.to_ref(), False);
+ llvm::LLVMAlignOf(ty.to_ref()), cx.int_type().to_ref(), False);
}
}
pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: uint) -> u64 {
unsafe {
- return llvm::LLVMOffsetOfElement(cx.td.lltd, struct_ty.to_ref(), element as u32) as u64;
+ return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(), element as u32) as u64;
}
}
use std::c_str::ToCStr;
use syntax::abi::{Rust, RustCall};
use syntax::parse::token;
-use syntax::{ast, ast_map, visit};
+use syntax::{ast, ast_map, attr, visit};
use syntax::ast_util::PostExpansionMethod;
// drop_glue pointer, size, align.
match *impl_item {
ast::MethodImplItem(method) => {
if method.pe_generics().ty_params.len() == 0u {
- let llfn = get_item_val(ccx, method.id);
- trans_fn(ccx,
- &*method.pe_fn_decl(),
- &*method.pe_body(),
- llfn,
- ¶m_substs::empty(),
- method.id,
- []);
+ let trans_everywhere = attr::requests_inline(method.attrs.as_slice());
+ for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
+ let llfn = get_item_val(ccx, method.id);
+ trans_fn(ccx,
+ &*method.pe_fn_decl(),
+ &*method.pe_body(),
+ llfn,
+ ¶m_substs::empty(),
+ method.id,
+ []);
+ update_linkage(ccx,
+ llfn,
+ Some(method.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
+ }
}
let mut v = TransItemVisitor {
ccx: ccx,
}
}
-pub fn trans_method_callee<'a>(
- bcx: &'a Block<'a>,
- method_call: MethodCall,
- self_expr: Option<&ast::Expr>,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Callee<'a> {
+pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ method_call: MethodCall,
+ self_expr: Option<&ast::Expr>,
+ arg_cleanup_scope: cleanup::ScopeId)
+ -> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_method_callee");
let (origin, method_ty) = match bcx.tcx().method_map
}
}
-pub fn trans_static_method_callee(bcx: &Block,
+pub fn trans_static_method_callee(bcx: Block,
method_id: ast::DefId,
trait_id: ast::DefId,
expr_id: ast::NodeId)
let vtable_key = MethodCall::expr(expr_id);
let vtbls = resolve_vtables_in_fn_ctxt(
bcx.fcx,
- ccx.tcx.vtable_map.borrow().get(&vtable_key));
+ ccx.tcx().vtable_map.borrow().get(&vtable_key));
match *vtbls.get_self().unwrap().get(0) {
typeck::vtable_static(impl_did, ref rcvr_substs, ref rcvr_origins) => {
fn method_with_name(ccx: &CrateContext, impl_id: ast::DefId, name: ast::Name)
-> ast::DefId {
- match ccx.impl_method_cache.borrow().find_copy(&(impl_id, name)) {
+ match ccx.impl_method_cache().borrow().find_copy(&(impl_id, name)) {
Some(m) => return m,
None => {}
}
- let impl_items = ccx.tcx.impl_items.borrow();
+ let impl_items = ccx.tcx().impl_items.borrow();
let impl_items =
impl_items.find(&impl_id)
.expect("could not find impl while translating");
.find(|&did| {
match *did {
ty::MethodTraitItemId(did) => {
- ty::impl_or_trait_item(&ccx.tcx,
+ ty::impl_or_trait_item(ccx.tcx(),
did).ident()
.name ==
name
}).expect("could not find method while \
translating");
- ccx.impl_method_cache.borrow_mut().insert((impl_id, name),
+ ccx.impl_method_cache().borrow_mut().insert((impl_id, name),
meth_did.def_id());
meth_did.def_id()
}
-fn trans_monomorphized_callee<'a>(
- bcx: &'a Block<'a>,
- method_call: MethodCall,
- trait_id: ast::DefId,
- n_method: uint,
- vtbl: typeck::vtable_origin)
- -> Callee<'a> {
+fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ method_call: MethodCall,
+ trait_id: ast::DefId,
+ n_method: uint,
+ vtbl: typeck::vtable_origin)
+ -> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_monomorphized_callee");
match vtbl {
typeck::vtable_static(impl_did, rcvr_substs, rcvr_origins) => {
}
}
-fn combine_impl_and_methods_tps(bcx: &Block,
+fn combine_impl_and_methods_tps(bcx: Block,
node: ExprOrMethodCall,
rcvr_substs: subst::Substs,
rcvr_origins: typeck::vtable_res)
(ty_substs, vtables)
}
-fn trans_trait_callee<'a>(bcx: &'a Block<'a>,
- method_ty: ty::t,
- n_method: uint,
- self_expr: &ast::Expr,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Callee<'a> {
+fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ method_ty: ty::t,
+ n_method: uint,
+ self_expr: &ast::Expr,
+ arg_cleanup_scope: cleanup::ScopeId)
+ -> Callee<'blk, 'tcx> {
/*!
* Create a method callee where the method is coming from a trait
* object (e.g., Box<Trait> type). In this case, we must pull the fn
trans_trait_callee_from_llval(bcx, method_ty, n_method, llval)
}
-pub fn trans_trait_callee_from_llval<'a>(bcx: &'a Block<'a>,
- callee_ty: ty::t,
- n_method: uint,
- llpair: ValueRef)
- -> Callee<'a> {
+pub fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ callee_ty: ty::t,
+ n_method: uint,
+ llpair: ValueRef)
+ -> Callee<'blk, 'tcx> {
/*!
* Same as `trans_trait_callee()` above, except that it is given
* a by-ref pointer to the object pair.
/// Creates the self type and (fake) callee substitutions for an unboxed
/// closure with the given def ID. The static region and type parameters are
/// lies, but we're in trans so it doesn't matter.
-fn get_callee_substitutions_for_unboxed_closure(bcx: &Block,
+fn get_callee_substitutions_for_unboxed_closure(bcx: Block,
def_id: ast::DefId)
-> subst::Substs {
let self_ty = ty::mk_unboxed_closure(bcx.tcx(), def_id, ty::ReStatic);
/// Creates a returns a dynamic vtable for the given type and vtable origin.
/// This is used only for objects.
-fn get_vtable(bcx: &Block,
+fn get_vtable(bcx: Block,
self_ty: ty::t,
origins: typeck::vtable_param_res)
-> ValueRef
// Check the cache.
let hash_id = (self_ty, monomorphize::make_vtable_id(ccx, origins.get(0)));
- match ccx.vtables.borrow().find(&hash_id) {
+ match ccx.vtables().borrow().find(&hash_id) {
Some(&val) => { return val }
None => { }
}
let drop_glue = glue::get_drop_glue(ccx, self_ty);
let vtable = make_vtable(ccx, drop_glue, ll_size, ll_align, methods);
- ccx.vtables.borrow_mut().insert(hash_id, vtable);
+ ccx.vtables().borrow_mut().insert(hash_id, vtable);
vtable
}
let tbl = C_struct(ccx, components.as_slice(), false);
let sym = token::gensym("vtable");
let vt_gvar = format!("vtable{}", sym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, val_ty(tbl).to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), val_ty(tbl).to_ref(), buf)
});
llvm::LLVMSetInitializer(vt_gvar, tbl);
llvm::LLVMSetGlobalConstant(vt_gvar, llvm::True);
}
}
-fn emit_vtable_methods(bcx: &Block,
+fn emit_vtable_methods(bcx: Block,
impl_id: ast::DefId,
substs: subst::Substs,
vtables: typeck::vtable_res)
}).collect()
}
-pub fn vtable_ptr<'a>(bcx: &'a Block<'a>,
- id: ast::NodeId,
- self_ty: ty::t) -> ValueRef {
+pub fn vtable_ptr(bcx: Block,
+ id: ast::NodeId,
+ self_ty: ty::t) -> ValueRef {
let ccx = bcx.ccx();
let origins = {
- let vtable_map = ccx.tcx.vtable_map.borrow();
+ let vtable_map = ccx.tcx().vtable_map.borrow();
// This trait cast might be because of implicit coercion
- let adjs = ccx.tcx.adjustments.borrow();
+ let adjs = ccx.tcx().adjustments.borrow();
let adjust = adjs.find(&id);
let method_call = if adjust.is_some() && ty::adjust_is_object(adjust.unwrap()) {
MethodCall::autoobject(id)
get_vtable(bcx, self_ty, origins)
}
-pub fn trans_trait_cast<'a>(bcx: &'a Block<'a>,
- datum: Datum<Expr>,
- id: ast::NodeId,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_trait_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ datum: Datum<Expr>,
+ id: ast::NodeId,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
/*!
* Generates the code to convert from a pointer (`Box<T>`, `&T`, etc)
* into an object (`Box<Trait>`, `&Trait`, etc). This means creating a
use back::link::exported_name;
use driver::session;
use llvm::ValueRef;
+use llvm;
use middle::subst;
use middle::subst::Subst;
use middle::trans::base::{set_llvm_fn_attrs, set_inline_hint};
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util::{local_def, PostExpansionMethod};
+use syntax::attr;
use std::hash::{sip, Hash};
pub fn monomorphic_fn(ccx: &CrateContext,
params: real_substs.types.clone()
};
- match ccx.monomorphized.borrow().find(&hash_id) {
+ match ccx.monomorphized().borrow().find(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
ty::item_path_str(ccx.tcx(), fn_id));
let map_node = session::expect(
ccx.sess(),
- ccx.tcx.map.find(fn_id.node),
+ ccx.tcx().map.find(fn_id.node),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
match map_node {
ast_map::NodeForeignItem(_) => {
- if ccx.tcx.map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
+ if ccx.tcx().map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), true);
}
debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx()));
let mono_ty = llitem_ty.subst(ccx.tcx(), real_substs);
- ccx.stats.n_monos.set(ccx.stats.n_monos.get() + 1);
+ ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
{
- let mut monomorphizing = ccx.monomorphizing.borrow_mut();
+ let mut monomorphizing = ccx.monomorphizing().borrow_mut();
depth = match monomorphizing.find(&fn_id) {
Some(&d) => d, None => 0
};
// recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() {
- ccx.sess().span_fatal(ccx.tcx.map.span(fn_id.node),
+ ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node),
"reached the recursion limit during monomorphization");
}
mono_ty.hash(&mut state);
hash = format!("h{}", state.result());
- ccx.tcx.map.with_path(fn_id.node, |path| {
+ ccx.tcx().map.with_path(fn_id.node, |path| {
exported_name(path, hash.as_slice())
})
};
decl_internal_rust_fn(ccx, mono_ty, s.as_slice())
};
- ccx.monomorphized.borrow_mut().insert(hash_id.take().unwrap(), lldecl);
+ ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
lldecl
};
+ let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
+ base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
+ set_llvm_fn_attrs(attrs, lldecl);
+
+ let is_first = !ccx.available_monomorphizations().borrow().contains(&s);
+ if is_first {
+ ccx.available_monomorphizations().borrow_mut().insert(s.clone());
+ }
+
+ let trans_everywhere = attr::requests_inline(attrs);
+ if trans_everywhere && !is_first {
+ llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
+ }
+
+ // If `true`, then `lldecl` should be given a function body.
+ // Otherwise, it should be left as a declaration of an external
+ // function, with no definition in the current compilation unit.
+ trans_everywhere || is_first
+ };
let lldecl = match map_node {
ast_map::NodeItem(i) => {
..
} => {
let d = mk_lldecl(abi);
- set_llvm_fn_attrs(i.attrs.as_slice(), d);
-
- if abi != abi::Rust {
- foreign::trans_rust_fn_with_foreign_abi(
- ccx, &**decl, &**body, [], d, &psubsts, fn_id.node,
- Some(hash.as_slice()));
- } else {
- trans_fn(ccx, &**decl, &**body, d, &psubsts, fn_id.node, []);
+ let needs_body = setup_lldecl(d, i.attrs.as_slice());
+ if needs_body {
+ if abi != abi::Rust {
+ foreign::trans_rust_fn_with_foreign_abi(
+ ccx, &**decl, &**body, [], d, &psubsts, fn_id.node,
+ Some(hash.as_slice()));
+ } else {
+ trans_fn(ccx, &**decl, &**body, d, &psubsts, fn_id.node, []);
+ }
}
d
}
}
ast_map::NodeVariant(v) => {
- let parent = ccx.tcx.map.get_parent(fn_id.node);
+ let parent = ccx.tcx().map.get_parent(fn_id.node);
let tvs = ty::enum_variants(ccx.tcx(), local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
match *ii {
ast::MethodImplItem(mth) => {
let d = mk_lldecl(abi::Rust);
- set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx,
- &*mth.pe_fn_decl(),
- &*mth.pe_body(),
- d,
- &psubsts,
- mth.id,
- []);
+ let needs_body = setup_lldecl(d, mth.attrs.as_slice());
+ if needs_body {
+ trans_fn(ccx,
+ &*mth.pe_fn_decl(),
+ &*mth.pe_body(),
+ d,
+ &psubsts,
+ mth.id,
+ []);
+ }
d
}
}
match *method {
ast::ProvidedMethod(mth) => {
let d = mk_lldecl(abi::Rust);
- set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx, &*mth.pe_fn_decl(), &*mth.pe_body(), d,
- &psubsts, mth.id, []);
+ let needs_body = setup_lldecl(d, mth.attrs.as_slice());
+ if needs_body {
+ trans_fn(ccx, &*mth.pe_fn_decl(), &*mth.pe_body(), d,
+ &psubsts, mth.id, []);
+ }
d
}
_ => {
}
};
- ccx.monomorphizing.borrow_mut().insert(fn_id, depth);
+ ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id));
(lldecl, true)
use syntax::parse::token::{InternedString, special_idents};
use syntax::parse::token;
-pub struct Reflector<'a, 'b> {
+pub struct Reflector<'a, 'blk, 'tcx: 'blk> {
visitor_val: ValueRef,
visitor_items: &'a [ty::ImplOrTraitItem],
- final_bcx: &'b Block<'b>,
+ final_bcx: Block<'blk, 'tcx>,
tydesc_ty: Type,
- bcx: &'b Block<'b>
+ bcx: Block<'blk, 'tcx>
}
-impl<'a, 'b> Reflector<'a, 'b> {
+impl<'a, 'blk, 'tcx> Reflector<'a, 'blk, 'tcx> {
pub fn c_uint(&mut self, u: uint) -> ValueRef {
C_uint(self.bcx.ccx(), u)
}
let sym = mangle_internal_name_by_path_and_seq(
ast_map::Values([].iter()).chain(None), "get_disr");
- let fn_ty = ty::mk_ctor_fn(&ccx.tcx, ast::DUMMY_NODE_ID,
+ let fn_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[opaqueptrty], ty::mk_u64());
let llfdecl = decl_internal_rust_fn(ccx,
fn_ty,
}
// Emit a sequence of calls to visit_ty::visit_foo
-pub fn emit_calls_to_trait_visit_ty<'a>(
- bcx: &'a Block<'a>,
- t: ty::t,
- visitor_val: ValueRef,
- visitor_trait_id: DefId)
- -> &'a Block<'a> {
+pub fn emit_calls_to_trait_visit_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ visitor_val: ValueRef,
+ visitor_trait_id: DefId)
+ -> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
let final = fcx.new_temp_block("final");
let tydesc_ty = ty::get_tydesc_ty(bcx.tcx()).unwrap();
use middle::trans::expr::{Dest, Ignore, SaveIn};
use middle::trans::expr;
use middle::trans::glue;
+use middle::trans::machine;
use middle::trans::machine::{nonzero_llsize_of, llsize_of_alloc};
use middle::trans::type_::Type;
use middle::trans::type_of;
use syntax::ast;
use syntax::parse::token::InternedString;
-fn get_len(bcx: &Block, vptr: ValueRef) -> ValueRef {
+fn get_len(bcx: Block, vptr: ValueRef) -> ValueRef {
let _icx = push_ctxt("tvec::get_lenl");
Load(bcx, expr::get_len(bcx, vptr))
}
-fn get_dataptr(bcx: &Block, vptr: ValueRef) -> ValueRef {
+fn get_dataptr(bcx: Block, vptr: ValueRef) -> ValueRef {
let _icx = push_ctxt("tvec::get_dataptr");
Load(bcx, expr::get_dataptr(bcx, vptr))
}
-pub fn pointer_add_byte(bcx: &Block, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
+pub fn pointer_add_byte(bcx: Block, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
let _icx = push_ctxt("tvec::pointer_add_byte");
let old_ty = val_ty(ptr);
let bptr = PointerCast(bcx, ptr, Type::i8p(bcx.ccx()));
return PointerCast(bcx, InBoundsGEP(bcx, bptr, [bytes]), old_ty);
}
-pub fn make_drop_glue_unboxed<'a>(
- bcx: &'a Block<'a>,
- vptr: ValueRef,
- unit_ty: ty::t,
- should_deallocate: bool)
- -> &'a Block<'a> {
+pub fn make_drop_glue_unboxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ vptr: ValueRef,
+ unit_ty: ty::t,
+ should_deallocate: bool)
+ -> Block<'blk, 'tcx> {
let not_null = IsNotNull(bcx, vptr);
with_cond(bcx, not_null, |bcx| {
+ let ccx = bcx.ccx();
let tcx = bcx.tcx();
let _icx = push_ctxt("tvec::make_drop_glue_unboxed");
if should_deallocate {
let not_null = IsNotNull(bcx, dataptr);
with_cond(bcx, not_null, |bcx| {
- // FIXME: #13994: the old `Box<[T]>` will not support sized deallocation
- glue::trans_exchange_free(bcx, dataptr, 0, 8)
+ let llty = type_of::type_of(ccx, unit_ty);
+ let llsize = machine::llsize_of(ccx, llty);
+ let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
+ let size = Mul(bcx, llsize, get_len(bcx, vptr));
+ glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
})
} else {
bcx
format!("VecTypes {{unit_ty={}, llunit_ty={}, \
llunit_size={}, llunit_alloc_size={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
- ccx.tn.type_to_string(self.llunit_ty),
- ccx.tn.val_to_string(self.llunit_size),
+ ccx.tn().type_to_string(self.llunit_ty),
+ ccx.tn().val_to_string(self.llunit_size),
self.llunit_alloc_size)
}
}
-pub fn trans_fixed_vstore<'a>(
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
//!
//
// [...] allocates a fixed-size array and moves it around "by value".
};
}
-pub fn trans_slice_vec<'a>(bcx: &'a Block<'a>,
- slice_expr: &ast::Expr,
- content_expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ slice_expr: &ast::Expr,
+ content_expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* &[...] allocates memory on the stack and writes the values into it,
* returning the vector (the caller must make the reference). "..." is
immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
}
-pub fn trans_lit_str<'a>(
- bcx: &'a Block<'a>,
- lit_expr: &ast::Expr,
- str_lit: InternedString,
- dest: Dest)
- -> &'a Block<'a> {
+pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ lit_expr: &ast::Expr,
+ str_lit: InternedString,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
/*!
* Literal strings translate to slices into static memory. This is
* different from trans_slice_vstore() above because it doesn't need to copy
}
}
-pub fn trans_uniq_vec<'a>(bcx: &'a Block<'a>,
- uniq_expr: &ast::Expr,
- content_expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+pub fn trans_uniq_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ uniq_expr: &ast::Expr,
+ content_expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* Box<[...]> and "...".to_string() allocate boxes in the exchange heap and write
* the array elements into them.
debug!(" vt={}, count={:?}", vt.to_string(ccx), count);
let vec_ty = node_id_type(bcx, uniq_expr.id);
- let unit_sz = nonzero_llsize_of(ccx, type_of::type_of(ccx, vt.unit_ty));
+ let llty = type_of::type_of(ccx, vt.unit_ty);
+ let unit_sz = nonzero_llsize_of(ccx, llty);
let llcount = if count < 4u {
C_int(ccx, 4)
} else {
C_uint(ccx, count)
};
let alloc = Mul(bcx, llcount, unit_sz);
- let llty_ptr = type_of::type_of(ccx, vt.unit_ty).ptr_to();
- let align = C_uint(ccx, 8);
+ let llty_ptr = llty.ptr_to();
+ let align = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let Result { bcx: bcx, val: dataptr } = malloc_raw_dyn(bcx,
llty_ptr,
vec_ty,
// Create a temporary scope lest execution should fail while
// constructing the vector.
let temp_scope = fcx.push_custom_cleanup_scope();
- // FIXME: #13994: the old `Box<[T]> will not support sized deallocation,
- // this is a placeholder
- fcx.schedule_free_value(cleanup::CustomScope(temp_scope),
- dataptr, cleanup::HeapExchange, vt.unit_ty);
- debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
- bcx.val_to_string(dataptr), count);
+ fcx.schedule_free_slice(cleanup::CustomScope(temp_scope),
+ dataptr, alloc, align, cleanup::HeapExchange);
- let bcx = write_content(bcx, &vt, uniq_expr,
- content_expr, SaveIn(dataptr));
+ debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
+ bcx.val_to_string(dataptr), count);
+
+ let bcx = write_content(bcx, &vt, uniq_expr,
+ content_expr, SaveIn(dataptr));
fcx.pop_custom_cleanup_scope(temp_scope);
}
}
-pub fn write_content<'a>(
- bcx: &'a Block<'a>,
- vt: &VecTypes,
- vstore_expr: &ast::Expr,
- content_expr: &ast::Expr,
- dest: Dest)
- -> &'a Block<'a> {
+pub fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ vt: &VecTypes,
+ vstore_expr: &ast::Expr,
+ content_expr: &ast::Expr,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::write_content");
let fcx = bcx.fcx;
let mut bcx = bcx;
}
}
-pub fn vec_types_from_expr(bcx: &Block, vec_expr: &ast::Expr) -> VecTypes {
+pub fn vec_types_from_expr(bcx: Block, vec_expr: &ast::Expr) -> VecTypes {
let vec_ty = node_id_type(bcx, vec_expr.id);
vec_types(bcx, ty::sequence_element_type(bcx.tcx(), vec_ty))
}
-pub fn vec_types(bcx: &Block, unit_ty: ty::t) -> VecTypes {
+pub fn vec_types(bcx: Block, unit_ty: ty::t) -> VecTypes {
let ccx = bcx.ccx();
let llunit_ty = type_of::type_of(ccx, unit_ty);
let llunit_size = nonzero_llsize_of(ccx, llunit_ty);
}
}
-pub fn elements_required(bcx: &Block, content_expr: &ast::Expr) -> uint {
+pub fn elements_required(bcx: Block, content_expr: &ast::Expr) -> uint {
//! Figure out the number of elements we need to store this content
match content_expr.node {
}
}
-pub fn get_fixed_base_and_len(bcx: &Block,
+pub fn get_fixed_base_and_len(bcx: Block,
llval: ValueRef,
vec_length: uint)
-> (ValueRef, ValueRef) {
(base, len)
}
-fn get_slice_base_and_len(bcx: &Block,
+fn get_slice_base_and_len(bcx: Block,
llval: ValueRef)
-> (ValueRef, ValueRef) {
let base = Load(bcx, GEPi(bcx, llval, [0u, abi::slice_elt_base]));
(base, len)
}
-pub fn get_base_and_len(bcx: &Block,
+pub fn get_base_and_len(bcx: Block,
llval: ValueRef,
vec_ty: ty::t)
-> (ValueRef, ValueRef) {
}
}
-pub type iter_vec_block<'r,'b> =
- |&'b Block<'b>, ValueRef, ty::t|: 'r -> &'b Block<'b>;
-
-pub fn iter_vec_loop<'r,
- 'b>(
- bcx: &'b Block<'b>,
- data_ptr: ValueRef,
- vt: &VecTypes,
- count: ValueRef,
- f: iter_vec_block<'r,'b>)
- -> &'b Block<'b> {
+pub type iter_vec_block<'a, 'blk, 'tcx> =
+ |Block<'blk, 'tcx>, ValueRef, ty::t|: 'a -> Block<'blk, 'tcx>;
+
+pub fn iter_vec_loop<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ data_ptr: ValueRef,
+ vt: &VecTypes,
+ count: ValueRef,
+ f: iter_vec_block<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::iter_vec_loop");
let fcx = bcx.fcx;
let loop_counter = {
// i = 0
- let i = alloca(loop_bcx, bcx.ccx().int_type, "__i");
+ let i = alloca(loop_bcx, bcx.ccx().int_type(), "__i");
Store(loop_bcx, C_uint(bcx.ccx(), 0), i);
Br(loop_bcx, cond_bcx.llbb);
next_bcx
}
-pub fn iter_vec_raw<'r,
- 'b>(
- bcx: &'b Block<'b>,
- data_ptr: ValueRef,
- unit_ty: ty::t,
- len: ValueRef,
- f: iter_vec_block<'r,'b>)
- -> &'b Block<'b> {
+pub fn iter_vec_raw<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ data_ptr: ValueRef,
+ unit_ty: ty::t,
+ len: ValueRef,
+ f: iter_vec_block<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::iter_vec_raw");
let fcx = bcx.fcx;
}
pub fn void(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMVoidTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
}
pub fn nil(ccx: &CrateContext) -> Type {
}
pub fn metadata(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx()))
}
pub fn i1(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt1TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt1TypeInContext(ccx.llcx()))
}
pub fn i8(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt8TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
}
pub fn i16(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt16TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
}
pub fn i32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt32TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt32TypeInContext(ccx.llcx()))
}
pub fn i64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt64TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt64TypeInContext(ccx.llcx()))
}
pub fn f32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMFloatTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMFloatTypeInContext(ccx.llcx()))
}
pub fn f64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx()))
}
pub fn bool(ccx: &CrateContext) -> Type {
}
pub fn int(ccx: &CrateContext) -> Type {
- match ccx.tcx.sess.targ_cfg.arch {
+ match ccx.tcx().sess.targ_cfg.arch {
X86 | Arm | Mips | Mipsel => Type::i32(ccx),
X86_64 => Type::i64(ccx)
}
pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type {
match t {
- ast::TyI => ccx.int_type,
+ ast::TyI => ccx.int_type(),
ast::TyI8 => Type::i8(ccx),
ast::TyI16 => Type::i16(ccx),
ast::TyI32 => Type::i32(ccx),
pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type {
match t {
- ast::TyU => ccx.int_type,
+ ast::TyU => ccx.int_type(),
ast::TyU8 => Type::i8(ccx),
ast::TyU16 => Type::i16(ccx),
ast::TyU32 => Type::i32(ccx),
pub fn struct_(ccx: &CrateContext, els: &[Type], packed: bool) -> Type {
let els : &[TypeRef] = unsafe { mem::transmute(els) };
- ty!(llvm::LLVMStructTypeInContext(ccx.llcx, els.as_ptr(),
+ ty!(llvm::LLVMStructTypeInContext(ccx.llcx(), els.as_ptr(),
els.len() as c_uint,
packed as Bool))
}
pub fn named_struct(ccx: &CrateContext, name: &str) -> Type {
- ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx, s)))
+ ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx(), s)))
}
pub fn empty_struct(ccx: &CrateContext) -> Type {
}
pub fn generic_glue_fn(cx: &CrateContext) -> Type {
- match cx.tn.find_type("glue_fn") {
+ match cx.tn().find_type("glue_fn") {
Some(ty) => return ty,
None => ()
}
let ty = Type::glue_fn(cx, Type::i8p(cx));
- cx.tn.associate_type("glue_fn", &ty);
+ cx.tn().associate_type("glue_fn", &ty);
ty
}
// The box pointed to by @T.
pub fn at_box(ccx: &CrateContext, ty: Type) -> Type {
Type::struct_(ccx, [
- ccx.int_type, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
+ ccx.int_type(), Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
Type::i8p(ccx), Type::i8p(ccx), ty
], false)
}
// recursive types. For example, enum types rely on this behavior.
pub fn sizing_type_of(cx: &CrateContext, t: ty::t) -> Type {
- match cx.llsizingtypes.borrow().find_copy(&t) {
+ match cx.llsizingtypes().borrow().find_copy(&t) {
Some(t) => return t,
None => ()
}
ty::ty_vec(_, None) | ty::ty_trait(..) | ty::ty_str => fail!("unreachable")
};
- cx.llsizingtypes.borrow_mut().insert(t, llsizingty);
+ cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
llsizingty
}
}
// Check the cache.
- match cx.lltypes.borrow().find(&t) {
+ match cx.lltypes().borrow().find(&t) {
Some(&llty) => return llty,
None => ()
}
t,
t_norm.repr(cx.tcx()),
t_norm,
- cx.tn.type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.tn().type_to_string(llty));
+ cx.lltypes().borrow_mut().insert(t, llty);
return llty;
}
ty::ty_str => {
// This means we get a nicer name in the output (str is always
// unsized).
- cx.tn.find_type("str_slice").unwrap()
+ cx.tn().find_type("str_slice").unwrap()
}
ty::ty_trait(..) => Type::opaque_trait(cx),
_ if !ty::type_is_sized(cx.tcx(), ty) => {
debug!("--> mapped t={} {:?} to llty={}",
t.repr(cx.tcx()),
t,
- cx.tn.type_to_string(llty));
+ cx.tn().type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.lltypes().borrow_mut().insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
/// This only performs a search for a trivially dominating store. The store
/// must be the only user of this value, and there must not be any conditional
/// branches between the store and the given block.
- pub fn get_dominating_store(self, bcx: &Block) -> Option<Value> {
+ pub fn get_dominating_store(self, bcx: Block) -> Option<Value> {
match self.get_single_user().and_then(|user| user.as_store_inst()) {
Some(store) => {
store.get_parent().and_then(|store_bb| {
use std::ops;
use std::rc::Rc;
use std::collections::{HashMap, HashSet};
+use arena::TypedArena;
use syntax::abi;
use syntax::ast::{CrateNum, DefId, FnStyle, Ident, ItemTrait, LOCAL_CRATE};
use syntax::ast::{MutImmutable, MutMutable, Name, NamedField, NodeId};
/// The data structure to keep track of all the information that typechecker
/// generates so that so that it can be reused and doesn't have to be redone
/// later on.
-pub struct ctxt {
+pub struct ctxt<'tcx> {
+ /// The arena that types are allocated from.
+ type_arena: &'tcx TypedArena<t_box_>,
+
/// Specifically use a speedy hash algorithm for this hash map, it's used
/// quite often.
- pub interner: RefCell<FnvHashMap<intern_key, Box<t_box_>>>,
+ interner: RefCell<FnvHashMap<intern_key, &'tcx t_box_>>,
pub next_id: Cell<uint>,
pub sess: Session,
pub def_map: resolve::DefMap,
}
}
-pub fn mk_ctxt(s: Session,
- dm: resolve::DefMap,
- named_region_map: resolve_lifetime::NamedRegionMap,
- map: ast_map::Map,
- freevars: freevars::freevar_map,
- capture_modes: freevars::CaptureModeMap,
- region_maps: middle::region::RegionMaps,
- lang_items: middle::lang_items::LanguageItems,
- stability: stability::Index)
- -> ctxt {
+pub fn mk_ctxt<'tcx>(s: Session,
+ type_arena: &'tcx TypedArena<t_box_>,
+ dm: resolve::DefMap,
+ named_region_map: resolve_lifetime::NamedRegionMap,
+ map: ast_map::Map,
+ freevars: freevars::freevar_map,
+ capture_modes: freevars::CaptureModeMap,
+ region_maps: middle::region::RegionMaps,
+ lang_items: middle::lang_items::LanguageItems,
+ stability: stability::Index) -> ctxt<'tcx> {
ctxt {
+ type_arena: type_arena,
+ interner: RefCell::new(FnvHashMap::new()),
named_region_map: named_region_map,
item_variance_map: RefCell::new(DefIdMap::new()),
variance_computed: Cell::new(false),
- interner: RefCell::new(FnvHashMap::new()),
next_id: Cell::new(primitives::LAST_PRIMITIVE_ID),
sess: s,
def_map: dm,
}
}
- let t = box t_box_ {
+ let t = cx.type_arena.alloc(t_box_ {
sty: st,
id: cx.next_id.get(),
flags: flags,
- };
+ });
let sty_ptr = &t.sty as *const sty;
tc | TC::Managed
} else if Some(did) == cx.lang_items.no_copy_bound() {
tc | TC::OwnsAffine
- } else if Some(did) == cx.lang_items.no_share_bound() {
+ } else if Some(did) == cx.lang_items.no_sync_bound() {
tc | TC::ReachesNoSync
} else if Some(did) == cx.lang_items.unsafe_type() {
// FIXME(#13231): This shouldn't be needed after
}
}
-pub fn method_call_type_param_defs<T>(typer: &T,
- origin: typeck::MethodOrigin)
- -> VecPerParamSpace<TypeParameterDef>
- where T: mc::Typer {
+pub fn method_call_type_param_defs<'tcx, T>(typer: &T,
+ origin: typeck::MethodOrigin)
+ -> VecPerParamSpace<TypeParameterDef>
+ where T: mc::Typer<'tcx> {
match origin {
typeck::MethodStatic(did) => {
ty::lookup_item_type(typer.tcx(), did).generics.types.clone()
let u = TypeNormalizer(cx).fold_ty(t);
return u;
- struct TypeNormalizer<'a>(&'a ctxt);
+ struct TypeNormalizer<'a, 'tcx: 'a>(&'a ctxt<'tcx>);
- impl<'a> TypeFolder for TypeNormalizer<'a> {
- fn tcx<'a>(&'a self) -> &'a ctxt { let TypeNormalizer(c) = *self; c }
+ impl<'a, 'tcx> TypeFolder<'tcx> for TypeNormalizer<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ctxt<'tcx> { let TypeNormalizer(c) = *self; c }
fn fold_ty(&mut self, t: ty::t) -> ty::t {
match self.tcx().normalized_cache.borrow().find_copy(&t) {
}
}
-pub trait ExprTyProvider {
- fn expr_ty(&self, ex: &ast::Expr) -> t;
- fn ty_ctxt<'a>(&'a self) -> &'a ctxt;
-}
-
-impl ExprTyProvider for ctxt {
- fn expr_ty(&self, ex: &ast::Expr) -> t {
- expr_ty(self, ex)
- }
-
- fn ty_ctxt<'a>(&'a self) -> &'a ctxt {
- self
- }
-}
-
// Returns the repeat count for a repeating vector expression.
-pub fn eval_repeat_count<T: ExprTyProvider>(tcx: &T, count_expr: &ast::Expr) -> uint {
+pub fn eval_repeat_count(tcx: &ctxt, count_expr: &ast::Expr) -> uint {
match const_eval::eval_const_expr_partial(tcx, count_expr) {
Ok(ref const_val) => match *const_val {
const_eval::const_int(count) => if count < 0 {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found negative integer");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found negative integer");
+ 0
} else {
- return count as uint
+ count as uint
},
- const_eval::const_uint(count) => return count as uint,
+ const_eval::const_uint(count) => count as uint,
const_eval::const_float(count) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found float");
- return count as uint;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found float");
+ count as uint
}
const_eval::const_str(_) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found string");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found string");
+ 0
}
const_eval::const_bool(_) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found boolean");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found boolean");
+ 0
}
const_eval::const_binary(_) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found binary array");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found binary array");
+ 0
}
const_eval::const_nil => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found ()");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found ()");
+ 0
}
},
Err(..) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected constant integer for repeat count, \
- found variable");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected constant integer for repeat count, \
+ found variable");
+ 0
}
}
}
}
}
-impl mc::Typer for ty::ctxt {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'tcx> mc::Typer<'tcx> for ty::ctxt<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self
}
/// The TypeFoldable trait is implemented for every type that can be folded.
/// Basically, every type that has a corresponding method in TypeFolder.
pub trait TypeFoldable {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Self;
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self;
}
/// The TypeFolder trait defines the actual *folding*. There is a
/// default implementation that does an "identity" fold. Within each
/// identity fold, it should invoke `foo.fold_with(self)` to fold each
/// sub-item.
-pub trait TypeFolder {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt;
+pub trait TypeFolder<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn fold_ty(&mut self, t: ty::t) -> ty::t {
super_fold_ty(self, t)
// needed.
impl<T:TypeFoldable> TypeFoldable for Option<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Option<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Option<T> {
self.as_ref().map(|t| t.fold_with(folder))
}
}
impl<T:TypeFoldable> TypeFoldable for Rc<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Rc<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Rc<T> {
Rc::new((**self).fold_with(folder))
}
}
impl<T:TypeFoldable> TypeFoldable for Vec<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Vec<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Vec<T> {
self.iter().map(|t| t.fold_with(folder)).collect()
}
}
impl<T:TypeFoldable> TypeFoldable for OwnedSlice<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> OwnedSlice<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> OwnedSlice<T> {
self.iter().map(|t| t.fold_with(folder)).collect()
}
}
impl<T:TypeFoldable> TypeFoldable for VecPerParamSpace<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> VecPerParamSpace<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> VecPerParamSpace<T> {
self.map(|t| t.fold_with(folder))
}
}
impl TypeFoldable for ty::TraitStore {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TraitStore {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::TraitStore {
folder.fold_trait_store(*self)
}
}
impl TypeFoldable for ty::t {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::t {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::t {
folder.fold_ty(*self)
}
}
impl TypeFoldable for ty::BareFnTy {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::BareFnTy {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::BareFnTy {
folder.fold_bare_fn_ty(self)
}
}
impl TypeFoldable for ty::ClosureTy {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ClosureTy {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ClosureTy {
folder.fold_closure_ty(self)
}
}
impl TypeFoldable for ty::mt {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::mt {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::mt {
folder.fold_mt(self)
}
}
impl TypeFoldable for ty::FnSig {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::FnSig {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::FnSig {
folder.fold_sig(self)
}
}
impl TypeFoldable for ty::sty {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::sty {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::sty {
folder.fold_sty(self)
}
}
impl TypeFoldable for ty::TraitRef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TraitRef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::TraitRef {
folder.fold_trait_ref(self)
}
}
impl TypeFoldable for ty::Region {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::Region {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::Region {
folder.fold_region(*self)
}
}
impl TypeFoldable for subst::Substs {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> subst::Substs {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> subst::Substs {
folder.fold_substs(self)
}
}
impl TypeFoldable for ty::ItemSubsts {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ItemSubsts {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ItemSubsts {
ty::ItemSubsts {
substs: self.substs.fold_with(folder),
}
}
impl TypeFoldable for ty::AutoRef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::AutoRef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::AutoRef {
folder.fold_autoref(self)
}
}
impl TypeFoldable for typeck::vtable_origin {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> typeck::vtable_origin {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> typeck::vtable_origin {
match *self {
typeck::vtable_static(def_id, ref substs, ref origins) => {
let r_substs = substs.fold_with(folder);
}
impl TypeFoldable for ty::BuiltinBounds {
- fn fold_with<F:TypeFolder>(&self, _folder: &mut F) -> ty::BuiltinBounds {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> ty::BuiltinBounds {
*self
}
}
impl TypeFoldable for ty::ExistentialBounds {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ExistentialBounds {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ExistentialBounds {
folder.fold_existential_bounds(*self)
}
}
impl TypeFoldable for ty::ParamBounds {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ParamBounds {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ParamBounds {
ty::ParamBounds {
opt_region_bound: self.opt_region_bound.fold_with(folder),
builtin_bounds: self.builtin_bounds.fold_with(folder),
}
impl TypeFoldable for ty::TypeParameterDef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TypeParameterDef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::TypeParameterDef {
ty::TypeParameterDef {
ident: self.ident,
def_id: self.def_id,
}
impl TypeFoldable for ty::RegionParameterDef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::RegionParameterDef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::RegionParameterDef {
ty::RegionParameterDef {
name: self.name,
def_id: self.def_id,
}
impl TypeFoldable for ty::Generics {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::Generics {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::Generics {
ty::Generics {
types: self.types.fold_with(folder),
regions: self.regions.fold_with(folder),
}
impl TypeFoldable for ty::UnsizeKind {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::UnsizeKind {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::UnsizeKind {
match *self {
ty::UnsizeLength(len) => ty::UnsizeLength(len),
ty::UnsizeStruct(box ref k, n) => ty::UnsizeStruct(box k.fold_with(folder), n),
// "super" routines: these are the default implementations for TypeFolder.
//
// They should invoke `foo.fold_with()` to do recursive folding.
-
-pub fn super_fold_ty<T:TypeFolder>(this: &mut T,
- t: ty::t)
- -> ty::t {
+pub fn super_fold_ty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ t: ty::t)
+ -> ty::t {
let sty = ty::get(t).sty.fold_with(this);
ty::mk_t(this.tcx(), sty)
}
-pub fn super_fold_substs<T:TypeFolder>(this: &mut T,
- substs: &subst::Substs)
- -> subst::Substs {
+pub fn super_fold_substs<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ substs: &subst::Substs)
+ -> subst::Substs {
let regions = match substs.regions {
subst::ErasedRegions => {
subst::ErasedRegions
types: substs.types.fold_with(this) }
}
-pub fn super_fold_sig<T:TypeFolder>(this: &mut T,
- sig: &ty::FnSig)
- -> ty::FnSig {
+pub fn super_fold_sig<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ sig: &ty::FnSig)
+ -> ty::FnSig {
ty::FnSig { binder_id: sig.binder_id,
inputs: sig.inputs.fold_with(this),
output: sig.output.fold_with(this),
variadic: sig.variadic }
}
-pub fn super_fold_bare_fn_ty<T:TypeFolder>(this: &mut T,
- fty: &ty::BareFnTy)
- -> ty::BareFnTy
+pub fn super_fold_bare_fn_ty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ fty: &ty::BareFnTy)
+ -> ty::BareFnTy
{
ty::BareFnTy { sig: fty.sig.fold_with(this),
abi: fty.abi,
fn_style: fty.fn_style }
}
-pub fn super_fold_closure_ty<T:TypeFolder>(this: &mut T,
- fty: &ty::ClosureTy)
- -> ty::ClosureTy
+pub fn super_fold_closure_ty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ fty: &ty::ClosureTy)
+ -> ty::ClosureTy
{
ty::ClosureTy {
store: fty.store.fold_with(this),
abi: fty.abi,
}
}
-
-pub fn super_fold_trait_ref<T:TypeFolder>(this: &mut T,
- t: &ty::TraitRef)
- -> ty::TraitRef {
+pub fn super_fold_trait_ref<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ t: &ty::TraitRef)
+ -> ty::TraitRef {
ty::TraitRef {
def_id: t.def_id,
substs: t.substs.fold_with(this),
}
}
-pub fn super_fold_mt<T:TypeFolder>(this: &mut T,
- mt: &ty::mt) -> ty::mt {
+pub fn super_fold_mt<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ mt: &ty::mt) -> ty::mt {
ty::mt {ty: mt.ty.fold_with(this),
mutbl: mt.mutbl}
}
-pub fn super_fold_sty<T:TypeFolder>(this: &mut T,
- sty: &ty::sty) -> ty::sty {
+pub fn super_fold_sty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ sty: &ty::sty) -> ty::sty {
match *sty {
ty::ty_box(typ) => {
ty::ty_box(typ.fold_with(this))
}
}
-pub fn super_fold_trait_store<T:TypeFolder>(this: &mut T,
- trait_store: ty::TraitStore)
- -> ty::TraitStore {
+pub fn super_fold_trait_store<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ trait_store: ty::TraitStore)
+ -> ty::TraitStore {
match trait_store {
ty::UniqTraitStore => ty::UniqTraitStore,
ty::RegionTraitStore(r, m) => {
}
}
-pub fn super_fold_existential_bounds<T:TypeFolder>(this: &mut T,
- bounds: ty::ExistentialBounds)
- -> ty::ExistentialBounds {
+pub fn super_fold_existential_bounds<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ bounds: ty::ExistentialBounds)
+ -> ty::ExistentialBounds {
ty::ExistentialBounds {
region_bound: bounds.region_bound.fold_with(this),
builtin_bounds: bounds.builtin_bounds,
}
}
-pub fn super_fold_autoref<T:TypeFolder>(this: &mut T,
- autoref: &ty::AutoRef)
- -> ty::AutoRef
+pub fn super_fold_autoref<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ autoref: &ty::AutoRef)
+ -> ty::AutoRef
{
match *autoref {
ty::AutoPtr(r, m, None) => ty::AutoPtr(this.fold_region(r), m, None),
}
}
-pub fn super_fold_item_substs<T:TypeFolder>(this: &mut T,
- substs: ty::ItemSubsts)
- -> ty::ItemSubsts
+pub fn super_fold_item_substs<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ substs: ty::ItemSubsts)
+ -> ty::ItemSubsts
{
ty::ItemSubsts {
substs: substs.substs.fold_with(this),
///////////////////////////////////////////////////////////////////////////
// Some sample folders
-pub struct BottomUpFolder<'a> {
- pub tcx: &'a ty::ctxt,
+pub struct BottomUpFolder<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>,
pub fldop: |ty::t|: 'a -> ty::t,
}
-impl<'a> TypeFolder for BottomUpFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> TypeFolder<'tcx> for BottomUpFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn fold_ty(&mut self, ty: ty::t) -> ty::t {
let t1 = super_fold_ty(self, ty);
/// (The distinction between "free" and "bound" is represented by
/// keeping track of each `FnSig` in the lexical context of the
/// current position of the fold.)
-pub struct RegionFolder<'a> {
- tcx: &'a ty::ctxt,
+pub struct RegionFolder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
fld_t: |ty::t|: 'a -> ty::t,
fld_r: |ty::Region|: 'a -> ty::Region,
within_binder_ids: Vec<ast::NodeId>,
}
-impl<'a> RegionFolder<'a> {
- pub fn general(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
+ pub fn general(tcx: &'a ty::ctxt<'tcx>,
fld_r: |ty::Region|: 'a -> ty::Region,
fld_t: |ty::t|: 'a -> ty::t)
- -> RegionFolder<'a> {
+ -> RegionFolder<'a, 'tcx> {
RegionFolder {
tcx: tcx,
fld_t: fld_t,
}
}
- pub fn regions(tcx: &'a ty::ctxt, fld_r: |ty::Region|: 'a -> ty::Region)
- -> RegionFolder<'a> {
+ pub fn regions(tcx: &'a ty::ctxt<'tcx>, fld_r: |ty::Region|: 'a -> ty::Region)
+ -> RegionFolder<'a, 'tcx> {
fn noop(t: ty::t) -> ty::t { t }
RegionFolder {
}
}
-impl<'a> TypeFolder for RegionFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn fold_ty(&mut self, ty: ty::t) -> ty::t {
debug!("RegionFolder.fold_ty({})", ty.repr(self.tcx()));
use syntax::{ast, ast_util};
use syntax::codemap::Span;
-pub trait AstConv {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt;
+pub trait AstConv<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype;
fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef>;
r
}
-pub fn opt_ast_region_to_region<AC:AstConv,RS:RegionScope>(
+pub fn opt_ast_region_to_region<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
default_span: Span,
r
}
-fn ast_path_substs<AC:AstConv,RS:RegionScope>(
+fn ast_path_substs<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
decl_generics: &ty::Generics,
substs
}
-pub fn ast_path_to_trait_ref<AC:AstConv,RS:RegionScope>(
+pub fn ast_path_to_trait_ref<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
trait_def_id: ast::DefId,
})
}
-pub fn ast_path_to_ty<AC:AstConv,RS:RegionScope>(
+pub fn ast_path_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
did: ast::DefId,
/// and/or region variables are substituted.
///
/// This is used when checking the constructor in struct literals.
-pub fn ast_path_to_ty_relaxed<AC:AstConv,
+pub fn ast_path_to_ty_relaxed<'tcx, AC: AstConv<'tcx>,
RS:RegionScope>(
this: &AC,
rscope: &RS,
/// Converts the given AST type to a built-in type. A "built-in type" is, at
/// present, either a core numeric type, a string, or `Box`.
-pub fn ast_ty_to_builtin_ty<AC:AstConv,
- RS:RegionScope>(
- this: &AC,
- rscope: &RS,
- ast_ty: &ast::Ty)
- -> Option<ty::t> {
+pub fn ast_ty_to_builtin_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
+ this: &AC,
+ rscope: &RS,
+ ast_ty: &ast::Ty)
+ -> Option<ty::t> {
match ast_ty_to_prim_ty(this.tcx(), ast_ty) {
Some(typ) => return Some(typ),
None => {}
}
}
-pub fn trait_ref_for_unboxed_function<AC:AstConv,
+pub fn trait_ref_for_unboxed_function<'tcx, AC: AstConv<'tcx>,
RS:RegionScope>(
this: &AC,
rscope: &RS,
// Handle `~`, `Box`, and `&` being able to mean strs and vecs.
// If a_seq_ty is a str or a vec, make it a str/vec.
// Also handle first-class trait types.
-fn mk_pointer<AC:AstConv,
- RS:RegionScope>(
- this: &AC,
- rscope: &RS,
- a_seq_ty: &ast::MutTy,
- ptr_ty: PointerTy,
- constr: |ty::t| -> ty::t)
- -> ty::t {
+fn mk_pointer<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
+ this: &AC,
+ rscope: &RS,
+ a_seq_ty: &ast::MutTy,
+ ptr_ty: PointerTy,
+ constr: |ty::t| -> ty::t)
+ -> ty::t {
let tcx = this.tcx();
debug!("mk_pointer(ptr_ty={})", ptr_ty);
// Parses the programmer's textual representation of a type into our
// internal notion of a type.
-pub fn ast_ty_to_ty<AC:AstConv, RS:RegionScope>(
- this: &AC, rscope: &RS, ast_ty: &ast::Ty) -> ty::t {
+pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
+ this: &AC, rscope: &RS, ast_ty: &ast::Ty) -> ty::t {
let tcx = this.tcx();
return typ;
}
-pub fn ty_of_arg<AC: AstConv, RS: RegionScope>(this: &AC, rscope: &RS, a: &ast::Arg,
- expected_ty: Option<ty::t>) -> ty::t {
+pub fn ty_of_arg<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(this: &AC, rscope: &RS,
+ a: &ast::Arg,
+ expected_ty: Option<ty::t>)
+ -> ty::t {
match a.ty.node {
ast::TyInfer if expected_ty.is_some() => expected_ty.unwrap(),
ast::TyInfer => this.ty_infer(a.ty.span),
explicit_self: ast::ExplicitSelf,
}
-pub fn ty_of_method<AC:AstConv>(
+pub fn ty_of_method<'tcx, AC: AstConv<'tcx>>(
this: &AC,
id: ast::NodeId,
fn_style: ast::FnStyle,
(bare_fn_ty, optional_explicit_self_category.unwrap())
}
-pub fn ty_of_bare_fn<AC:AstConv>(this: &AC, id: ast::NodeId,
- fn_style: ast::FnStyle, abi: abi::Abi,
- decl: &ast::FnDecl) -> ty::BareFnTy {
+pub fn ty_of_bare_fn<'tcx, AC: AstConv<'tcx>>(this: &AC, id: ast::NodeId,
+ fn_style: ast::FnStyle, abi: abi::Abi,
+ decl: &ast::FnDecl) -> ty::BareFnTy {
let (bare_fn_ty, _) =
ty_of_method_or_bare_fn(this, id, fn_style, abi, None, decl);
bare_fn_ty
}
-fn ty_of_method_or_bare_fn<AC:AstConv>(
+fn ty_of_method_or_bare_fn<'tcx, AC: AstConv<'tcx>>(
this: &AC,
id: ast::NodeId,
fn_style: ast::FnStyle,
}, explicit_self_category_result)
}
-fn determine_explicit_self_category<AC:AstConv,
+fn determine_explicit_self_category<'tcx, AC: AstConv<'tcx>,
RS:RegionScope>(
this: &AC,
rscope: &RS,
}
}
-pub fn ty_of_closure<AC:AstConv>(
+pub fn ty_of_closure<'tcx, AC: AstConv<'tcx>>(
this: &AC,
id: ast::NodeId,
fn_style: ast::FnStyle,
}
}
-pub fn conv_existential_bounds<AC:AstConv, RS:RegionScope>(
+pub fn conv_existential_bounds<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
this: &AC,
rscope: &RS,
span: Span,
return Some(r);
}
-fn compute_region_bound<AC:AstConv, RS:RegionScope>(
+fn compute_region_bound<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
this: &AC,
rscope: &RS,
span: Span,
fcx.write_ty(expr.id, result_ty);
}
-pub struct pat_ctxt<'a> {
- pub fcx: &'a FnCtxt<'a>,
+pub struct pat_ctxt<'a, 'tcx: 'a> {
+ pub fcx: &'a FnCtxt<'a, 'tcx>,
pub map: PatIdMap,
}
IgnoreStaticMethods,
}
-pub fn lookup<'a>(
- fcx: &'a FnCtxt<'a>,
+pub fn lookup<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
// In a call `a.b::<X, Y, ...>(...)`:
expr: &ast::Expr, // The expression `a.b(...)`.
lcx.search(self_ty)
}
-pub fn lookup_in_trait<'a>(
- fcx: &'a FnCtxt<'a>,
+pub fn lookup_in_trait<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
// In a call `a.b::<X, Y, ...>(...)`:
span: Span, // The expression `a.b(...)`'s span.
}
}
-struct LookupContext<'a> {
- fcx: &'a FnCtxt<'a>,
+struct LookupContext<'a, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
span: Span,
// The receiver to the method call. Only `None` in the case of
RcvrMatchesIfSubtype(ty::t),
}
-impl<'a> LookupContext<'a> {
+impl<'a, 'tcx> LookupContext<'a, 'tcx> {
fn search(&self, self_ty: ty::t) -> Option<MethodCallee> {
let span = self.self_expr.map_or(self.span, |e| e.span);
let self_expr_id = self.self_expr.map(|e| e.id);
idx + 1u, ty::item_path_str(self.tcx(), did));
}
- fn infcx(&'a self) -> &'a infer::InferCtxt<'a> {
+ fn infcx(&'a self) -> &'a infer::InferCtxt<'a, 'tcx> {
&self.fcx.inh.infcx
}
- fn tcx(&self) -> &'a ty::ctxt {
+ fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.fcx.tcx()
}
use middle::subst::{Subst, Substs, VecPerParamSpace, ParamSpace};
use middle::ty::{FnSig, VariantInfo};
use middle::ty::{Polytype};
-use middle::ty::{Disr, ExprTyProvider, ParamTy, ParameterEnvironment};
+use middle::ty::{Disr, ParamTy, ParameterEnvironment};
use middle::ty;
use middle::ty_fold::TypeFolder;
use middle::typeck::astconv::AstConv;
/// Here, the function `foo()` and the closure passed to
/// `bar()` will each have their own `FnCtxt`, but they will
/// share the inherited fields.
-pub struct Inherited<'a> {
- infcx: infer::InferCtxt<'a>,
+pub struct Inherited<'a, 'tcx: 'a> {
+ infcx: infer::InferCtxt<'a, 'tcx>,
locals: RefCell<NodeMap<ty::t>>,
param_env: ty::ParameterEnvironment,
}
#[deriving(Clone)]
-pub struct FnCtxt<'a> {
+pub struct FnCtxt<'a, 'tcx: 'a> {
body_id: ast::NodeId,
// This flag is set to true if, during the writeback phase, we encounter
ps: RefCell<FnStyleState>,
- inh: &'a Inherited<'a>,
+ inh: &'a Inherited<'a, 'tcx>,
- ccx: &'a CrateCtxt<'a>,
+ ccx: &'a CrateCtxt<'a, 'tcx>,
}
-impl<'a> mem_categorization::Typer for FnCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'a, 'tcx> mem_categorization::Typer<'tcx> for FnCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.ccx.tcx
}
fn node_ty(&self, id: ast::NodeId) -> McResult<ty::t> {
}
}
-impl<'a> Inherited<'a> {
- fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> Inherited<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
param_env: ty::ParameterEnvironment)
- -> Inherited<'a> {
+ -> Inherited<'a, 'tcx> {
Inherited {
infcx: infer::new_infer_ctxt(tcx),
locals: RefCell::new(NodeMap::new()),
}
// Used by check_const and check_enum_variants
-pub fn blank_fn_ctxt<'a>(
- ccx: &'a CrateCtxt<'a>,
- inh: &'a Inherited<'a>,
- rty: ty::t,
- body_id: ast::NodeId)
- -> FnCtxt<'a> {
+pub fn blank_fn_ctxt<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
+ inh: &'a Inherited<'a, 'tcx>,
+ rty: ty::t,
+ body_id: ast::NodeId)
+ -> FnCtxt<'a, 'tcx> {
FnCtxt {
body_id: body_id,
writeback_errors: Cell::new(false),
}
}
-fn static_inherited_fields<'a>(ccx: &'a CrateCtxt<'a>) -> Inherited<'a> {
+fn static_inherited_fields<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>)
+ -> Inherited<'a, 'tcx> {
// It's kind of a kludge to manufacture a fake function context
// and statement context, but we might as well do write the code only once
let param_env = ty::ParameterEnvironment {
Inherited::new(ccx.tcx, param_env)
}
-impl<'a> ExprTyProvider for FnCtxt<'a> {
- fn expr_ty(&self, ex: &ast::Expr) -> ty::t {
- self.expr_ty(ex)
- }
-
- fn ty_ctxt<'a>(&'a self) -> &'a ty::ctxt {
- self.ccx.tcx
- }
-}
-
-struct CheckTypeWellFormedVisitor<'a> { ccx: &'a CrateCtxt<'a> }
+struct CheckItemTypesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
+struct CheckTypeWellFormedVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
-impl<'a> Visitor<()> for CheckTypeWellFormedVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for CheckTypeWellFormedVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
check_type_well_formed(self.ccx, i);
visit::walk_item(self, i, ());
}
}
-struct CheckItemTypesVisitor<'a> { ccx: &'a CrateCtxt<'a> }
-impl<'a> Visitor<()> for CheckItemTypesVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for CheckItemTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
check_item(self.ccx, i);
visit::walk_item(self, i, ());
}
}
-struct CheckItemSizedTypesVisitor<'a> { ccx: &'a CrateCtxt<'a> }
+struct CheckItemSizedTypesVisitor<'a, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'tcx>
+}
-impl<'a> Visitor<()> for CheckItemSizedTypesVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for CheckItemSizedTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
check_item_sized(self.ccx, i);
visit::walk_item(self, i, ());
}
}
-struct GatherLocalsVisitor<'a> {
- fcx: &'a FnCtxt<'a>
+struct GatherLocalsVisitor<'a, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'tcx>
}
-impl<'a> GatherLocalsVisitor<'a> {
+impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> {
fn assign(&mut self, nid: ast::NodeId, ty_opt: Option<ty::t>) {
match ty_opt {
None => {
}
}
-impl<'a> Visitor<()> for GatherLocalsVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for GatherLocalsVisitor<'a, 'tcx> {
// Add explicitly-declared locals.
fn visit_local(&mut self, local: &ast::Local, _: ()) {
let o_ty = match local.ty.node {
}
-fn check_fn<'a>(
- ccx: &'a CrateCtxt<'a>,
- fn_style: ast::FnStyle,
- fn_style_id: ast::NodeId,
- fn_sig: &ty::FnSig,
- decl: &ast::FnDecl,
- fn_id: ast::NodeId,
- body: &ast::Block,
- inherited: &'a Inherited<'a>)
- -> FnCtxt<'a>
-{
+fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
+ fn_style: ast::FnStyle,
+ fn_style_id: ast::NodeId,
+ fn_sig: &ty::FnSig,
+ decl: &ast::FnDecl,
+ fn_id: ast::NodeId,
+ body: &ast::Block,
+ inherited: &'a Inherited<'a, 'tcx>)
+ -> FnCtxt<'a, 'tcx> {
/*!
* Helper used by check_bare_fn and check_expr_fn. Does the
* grungy work of checking a function body and returns the
fcx.write_ty(id, t_1);
}
-impl<'a> AstConv for FnCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.ccx.tcx }
+impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.ccx.tcx }
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
ty::lookup_item_type(self.tcx(), id)
}
}
-impl<'a> FnCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.ccx.tcx }
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.ccx.tcx }
- pub fn infcx<'b>(&'b self) -> &'b infer::InferCtxt<'a> {
+ pub fn infcx<'b>(&'b self) -> &'b infer::InferCtxt<'a, 'tcx> {
&self.inh.infcx
}
self.ccx.tcx.sess.err_count() - self.err_count_on_creation
}
- pub fn vtable_context<'a>(&'a self) -> VtableContext<'a> {
+ pub fn vtable_context<'a>(&'a self) -> VtableContext<'a, 'tcx> {
VtableContext {
infcx: self.infcx(),
param_env: &self.inh.param_env,
}
}
-impl<'a> RegionScope for infer::InferCtxt<'a> {
+impl<'a, 'tcx> RegionScope for infer::InferCtxt<'a, 'tcx> {
fn default_region_bound(&self, span: Span) -> Option<ty::Region> {
Some(self.next_region_var(infer::MiscVariable(span)))
}
}
}
-impl<'a> FnCtxt<'a> {
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub fn tag(&self) -> String {
format!("{}", self as *const FnCtxt)
}
}
ast::ExprRepeat(ref element, ref count_expr) => {
check_expr_has_type(fcx, &**count_expr, ty::mk_uint());
- let count = ty::eval_repeat_count(fcx, &**count_expr);
+ let count = ty::eval_repeat_count(fcx.tcx(), &**count_expr);
let uty = match expected {
ExpectHasType(uty) => {
)
)
-pub struct Rcx<'a> {
- fcx: &'a FnCtxt<'a>,
+pub struct Rcx<'a, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
region_param_pairs: Vec<(ty::Region, ty::ParamTy)>,
}
}
-impl<'a> Rcx<'a> {
- pub fn new(fcx: &'a FnCtxt<'a>,
- initial_repeating_scope: ast::NodeId) -> Rcx<'a> {
+impl<'a, 'tcx> Rcx<'a, 'tcx> {
+ pub fn new(fcx: &'a FnCtxt<'a, 'tcx>,
+ initial_repeating_scope: ast::NodeId) -> Rcx<'a, 'tcx> {
Rcx { fcx: fcx,
repeating_scope: initial_repeating_scope,
region_param_pairs: Vec::new() }
}
- pub fn tcx(&self) -> &'a ty::ctxt {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.fcx.ccx.tcx
}
}
}
-impl<'fcx> mc::Typer for Rcx<'fcx> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'fcx, 'tcx> mc::Typer<'tcx> for Rcx<'fcx, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.fcx.ccx.tcx
}
}
}
-impl<'a> Visitor<()> for Rcx<'a> {
+impl<'a, 'tcx> Visitor<()> for Rcx<'a, 'tcx> {
// (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local,
// However, right now we run into an issue whereby some free
// regions are not properly related if they appear within the
RegionSubParamConstraint(Option<ty::t>, ty::Region, ty::ParamTy),
}
-struct Wf<'a> {
- tcx: &'a ty::ctxt,
+struct Wf<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
stack: Vec<(ty::Region, Option<ty::t>)>,
out: Vec<WfConstraint>,
}
wf.out
}
-impl<'a> Wf<'a> {
+impl<'a, 'tcx> Wf<'a, 'tcx> {
fn accumulate_from_ty(&mut self, ty: ty::t) {
debug!("Wf::accumulate_from_ty(ty={})",
ty.repr(self.tcx));
/// A vtable context includes an inference context, a parameter environment,
/// and a list of unboxed closure types.
-pub struct VtableContext<'a> {
- pub infcx: &'a infer::InferCtxt<'a>,
+pub struct VtableContext<'a, 'tcx: 'a> {
+ pub infcx: &'a infer::InferCtxt<'a, 'tcx>,
pub param_env: &'a ty::ParameterEnvironment,
pub unboxed_closures: &'a RefCell<DefIdMap<ty::UnboxedClosure>>,
}
-impl<'a> VtableContext<'a> {
- pub fn tcx(&self) -> &'a ty::ctxt { self.infcx.tcx }
+impl<'a, 'tcx> VtableContext<'a, 'tcx> {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.infcx.tcx }
}
fn lookup_vtables(vcx: &VtableContext,
false)
}
-impl<'a, 'b> visit::Visitor<()> for &'a FnCtxt<'b> {
+impl<'a, 'b, 'tcx> visit::Visitor<()> for &'a FnCtxt<'b, 'tcx> {
fn visit_expr(&mut self, ex: &ast::Expr, _: ()) {
early_resolve_expr(ex, *self, false);
visit::walk_expr(self, ex, ());
// there, it applies a few ad-hoc checks that were not convenient to
// do elsewhere.
-struct WritebackCx<'cx> {
- fcx: &'cx FnCtxt<'cx>,
+struct WritebackCx<'cx, 'tcx: 'cx> {
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
}
-impl<'cx> WritebackCx<'cx> {
- fn new(fcx: &'cx FnCtxt) -> WritebackCx<'cx> {
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+ fn new(fcx: &'cx FnCtxt<'cx, 'tcx>) -> WritebackCx<'cx, 'tcx> {
WritebackCx { fcx: fcx }
}
- fn tcx(&self) -> &'cx ty::ctxt {
+ fn tcx(&self) -> &'cx ty::ctxt<'tcx> {
self.fcx.tcx()
}
}
// below. In general, a function is made into a `visitor` if it must
// traffic in node-ids or update tables in the type context etc.
-impl<'cx> Visitor<()> for WritebackCx<'cx> {
+impl<'cx, 'tcx> Visitor<()> for WritebackCx<'cx, 'tcx> {
fn visit_item(&mut self, _: &ast::Item, _: ()) {
// Ignore items
}
}
}
-impl<'cx> WritebackCx<'cx> {
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
fn visit_upvar_borrow_map(&self) {
if self.fcx.writeback_errors.get() {
return;
// The Resolver. This is the type folding engine that detects
// unresolved types and so forth.
-struct Resolver<'cx> {
- tcx: &'cx ty::ctxt,
- infcx: &'cx infer::InferCtxt<'cx>,
+struct Resolver<'cx, 'tcx: 'cx> {
+ tcx: &'cx ty::ctxt<'tcx>,
+ infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason,
}
-impl<'cx> Resolver<'cx> {
- fn new(fcx: &'cx FnCtxt<'cx>,
+impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
+ fn new(fcx: &'cx FnCtxt<'cx, 'tcx>,
reason: ResolveReason)
- -> Resolver<'cx>
+ -> Resolver<'cx, 'tcx>
{
Resolver { infcx: fcx.infcx(),
tcx: fcx.tcx(),
reason: reason }
}
- fn from_infcx(infcx: &'cx infer::InferCtxt<'cx>,
+ fn from_infcx(infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason)
- -> Resolver<'cx>
+ -> Resolver<'cx, 'tcx>
{
Resolver { infcx: infcx,
tcx: infcx.tcx,
}
}
-impl<'cx> TypeFolder for Resolver<'cx> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.tcx
}
}
}
-struct CoherenceChecker<'a> {
- crate_context: &'a CrateCtxt<'a>,
- inference_context: InferCtxt<'a>,
+struct CoherenceChecker<'a, 'tcx: 'a> {
+ crate_context: &'a CrateCtxt<'a, 'tcx>,
+ inference_context: InferCtxt<'a, 'tcx>,
}
-struct CoherenceCheckVisitor<'a> {
- cc: &'a CoherenceChecker<'a>
+struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
+ cc: &'a CoherenceChecker<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for CoherenceCheckVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for CoherenceCheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item, _: ()) {
//debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
}
}
-struct PrivilegedScopeVisitor<'a> { cc: &'a CoherenceChecker<'a> }
+struct PrivilegedScopeVisitor<'a, 'tcx: 'a> {
+ cc: &'a CoherenceChecker<'a, 'tcx>
+}
-impl<'a> visit::Visitor<()> for PrivilegedScopeVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for PrivilegedScopeVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item, _: ()) {
match item.node {
}
}
-impl<'a> CoherenceChecker<'a> {
+impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
fn check(&self, krate: &Crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// of type parameters and supertraits. This is information we need to
// know later when parsing field defs.
-struct CollectTraitDefVisitor<'a> {
- ccx: &'a CrateCtxt<'a>
+struct CollectTraitDefVisitor<'a, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for CollectTraitDefVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for CollectTraitDefVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
match i.node {
ast::ItemTrait(..) => {
///////////////////////////////////////////////////////////////////////////
// Second phase: collection proper.
-struct CollectItemTypesVisitor<'a> {
- ccx: &'a CrateCtxt<'a>
+struct CollectItemTypesVisitor<'a, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for CollectItemTypesVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for CollectItemTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
convert(self.ccx, i);
visit::walk_item(self, i, ());
fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &ast::Ty) -> ty::t;
}
-impl<'a> ToTy for CrateCtxt<'a> {
+impl<'a, 'tcx> ToTy for CrateCtxt<'a, 'tcx> {
fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &ast::Ty) -> ty::t {
ast_ty_to_ty(self, rs, ast_ty)
}
}
-impl<'a> AstConv for CrateCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> AstConv<'tcx> for CrateCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
if id.krate != ast::LOCAL_CRATE {
// Note: Coerce is not actually a combiner, in that it does not
// conform to the same interface, though it performs a similar
// function.
-pub struct Coerce<'f>(pub CombineFields<'f>);
+pub struct Coerce<'f, 'tcx: 'f>(pub CombineFields<'f, 'tcx>);
-impl<'f> Coerce<'f> {
- pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> {
+impl<'f, 'tcx> Coerce<'f, 'tcx> {
+ pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f, 'tcx> {
let Coerce(ref v) = *self; v
}
use syntax::ast;
use syntax::abi;
-pub trait Combine {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a>;
+pub trait Combine<'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx>;
fn tag(&self) -> String;
fn a_is_expected(&self) -> bool;
fn trace(&self) -> TypeTrace;
- fn equate<'a>(&'a self) -> Equate<'a>;
- fn sub<'a>(&'a self) -> Sub<'a>;
- fn lub<'a>(&'a self) -> Lub<'a>;
- fn glb<'a>(&'a self) -> Glb<'a>;
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx>;
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx>;
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx>;
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx>;
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt>;
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t>;
return Ok(substs);
- fn relate_region_params<C:Combine>(this: &C,
- item_def_id: ast::DefId,
- variances: &[ty::Variance],
- a_rs: &[ty::Region],
- b_rs: &[ty::Region])
- -> cres<Vec<ty::Region>>
- {
+ fn relate_region_params<'tcx, C: Combine<'tcx>>(this: &C,
+ item_def_id: ast::DefId,
+ variances: &[ty::Variance],
+ a_rs: &[ty::Region],
+ b_rs: &[ty::Region])
+ -> cres<Vec<ty::Region>> {
let tcx = this.infcx().tcx;
let num_region_params = variances.len();
}
#[deriving(Clone)]
-pub struct CombineFields<'a> {
- pub infcx: &'a InferCtxt<'a>,
+pub struct CombineFields<'a, 'tcx: 'a> {
+ pub infcx: &'a InferCtxt<'a, 'tcx>,
pub a_is_expected: bool,
pub trace: TypeTrace,
}
-pub fn expected_found<C:Combine,T>(
+pub fn expected_found<'tcx, C: Combine<'tcx>, T>(
this: &C, a: T, b: T) -> ty::expected_found<T> {
if this.a_is_expected() {
ty::expected_found {expected: a, found: b}
}
}
-pub fn super_fn_sigs<C:Combine>(this: &C, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
+pub fn super_fn_sigs<'tcx, C: Combine<'tcx>>(this: &C,
+ a: &ty::FnSig,
+ b: &ty::FnSig)
+ -> cres<ty::FnSig> {
- fn argvecs<C:Combine>(this: &C, a_args: &[ty::t], b_args: &[ty::t]) -> cres<Vec<ty::t> > {
+ fn argvecs<'tcx, C: Combine<'tcx>>(this: &C,
+ a_args: &[ty::t],
+ b_args: &[ty::t])
+ -> cres<Vec<ty::t>> {
if a_args.len() == b_args.len() {
result::collect(a_args.iter().zip(b_args.iter())
.map(|(a, b)| this.args(*a, *b)))
variadic: a.variadic})
}
-pub fn super_tys<C:Combine>(this: &C, a: ty::t, b: ty::t) -> cres<ty::t> {
+pub fn super_tys<'tcx, C: Combine<'tcx>>(this: &C, a: ty::t, b: ty::t) -> cres<ty::t> {
// This is a horrible hack - historically, [T] was not treated as a type,
// so, for example, &T and &[U] should not unify. In fact the only thing
// &[U] should unify with is &[T]. We preserve that behaviour with this
// check.
- fn check_ptr_to_unsized<C:Combine>(this: &C,
- a: ty::t,
- b: ty::t,
- a_inner: ty::t,
- b_inner: ty::t,
- result: ty::t) -> cres<ty::t> {
+ fn check_ptr_to_unsized<'tcx, C: Combine<'tcx>>(this: &C,
+ a: ty::t,
+ b: ty::t,
+ a_inner: ty::t,
+ b_inner: ty::t,
+ result: ty::t) -> cres<ty::t> {
match (&ty::get(a_inner).sty, &ty::get(b_inner).sty) {
(&ty::ty_vec(_, None), &ty::ty_vec(_, None)) |
(&ty::ty_str, &ty::ty_str) |
_ => Err(ty::terr_sorts(expected_found(this, a, b)))
};
- fn unify_integral_variable<C:Combine>(
+ fn unify_integral_variable<'tcx, C: Combine<'tcx>>(
this: &C,
vid_is_expected: bool,
vid: ty::IntVid,
}
}
- fn unify_float_variable<C:Combine>(
+ fn unify_float_variable<'tcx, C: Combine<'tcx>>(
this: &C,
vid_is_expected: bool,
vid: ty::FloatVid,
}
}
-impl<'f> CombineFields<'f> {
- pub fn switch_expected(&self) -> CombineFields<'f> {
+impl<'f, 'tcx> CombineFields<'f, 'tcx> {
+ pub fn switch_expected(&self) -> CombineFields<'f, 'tcx> {
CombineFields {
a_is_expected: !self.a_is_expected,
..(*self).clone()
}
}
- fn equate(&self) -> Equate<'f> {
+ fn equate(&self) -> Equate<'f, 'tcx> {
Equate((*self).clone())
}
- fn sub(&self) -> Sub<'f> {
+ fn sub(&self) -> Sub<'f, 'tcx> {
Sub((*self).clone())
}
use syntax::ast::{Onceness, FnStyle};
-pub struct Equate<'f> {
- fields: CombineFields<'f>
+pub struct Equate<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
}
#[allow(non_snake_case)]
-pub fn Equate<'f>(cf: CombineFields<'f>) -> Equate<'f> {
+pub fn Equate<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Equate<'f, 'tcx> {
Equate { fields: cf }
}
-impl<'f> Combine for Equate<'f> {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.fields.infcx }
+impl<'f, 'tcx> Combine<'tcx> for Equate<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
fn tag(&self) -> String { "eq".to_string() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
- fn equate<'a>(&'a self) -> Equate<'a> { Equate(self.fields.clone()) }
- fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.fields.clone()) }
- fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.fields.clone()) }
- fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.fields.clone()) }
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
self.tys(a, b)
span: codemap::Span);
}
-impl<'a> ErrorReporting for InferCtxt<'a> {
+impl<'a, 'tcx> ErrorReporting for InferCtxt<'a, 'tcx> {
fn report_region_errors(&self,
errors: &Vec<RegionResolutionError>) {
let p_errors = self.process_errors(errors);
region_names: &'a HashSet<ast::Name>
}
-struct Rebuilder<'a> {
- tcx: &'a ty::ctxt,
+struct Rebuilder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
fn_decl: ast::P<ast::FnDecl>,
expl_self_opt: Option<ast::ExplicitSelf_>,
generics: &'a ast::Generics,
Kept
}
-impl<'a> Rebuilder<'a> {
- fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
fn_decl: ast::P<ast::FnDecl>,
expl_self_opt: Option<ast::ExplicitSelf_>,
generics: &'a ast::Generics,
same_regions: &'a [SameRegions],
life_giver: &'a LifeGiver)
- -> Rebuilder<'a> {
+ -> Rebuilder<'a, 'tcx> {
Rebuilder {
tcx: tcx,
fn_decl: fn_decl,
}
}
-impl<'a> ErrorReportingHelpers for InferCtxt<'a> {
+impl<'a, 'tcx> ErrorReportingHelpers for InferCtxt<'a, 'tcx> {
fn give_expl_lifetime_param(&self,
decl: &ast::FnDecl,
fn_style: ast::FnStyle,
use util::ppaux::Repr;
/// "Greatest lower bound" (common subtype)
-pub struct Glb<'f> {
- fields: CombineFields<'f>
+pub struct Glb<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
}
#[allow(non_snake_case)]
-pub fn Glb<'f>(cf: CombineFields<'f>) -> Glb<'f> {
+pub fn Glb<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Glb<'f, 'tcx> {
Glb { fields: cf }
}
-impl<'f> Combine for Glb<'f> {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.fields.infcx }
+impl<'f, 'tcx> Combine<'tcx> for Glb<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
fn tag(&self) -> String { "glb".to_string() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
- fn equate<'a>(&'a self) -> Equate<'a> { Equate(self.fields.clone()) }
- fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.fields.clone()) }
- fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.fields.clone()) }
- fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.fields.clone()) }
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
let tcx = self.fields.infcx.tcx;
fn relate_bound<'a>(&'a self, v: ty::t, a: ty::t, b: ty::t) -> cres<()>;
}
-impl<'a> LatticeDir for Lub<'a> {
+impl<'a, 'tcx> LatticeDir for Lub<'a, 'tcx> {
fn ty_bot(&self, t: ty::t) -> cres<ty::t> {
Ok(t)
}
}
}
-impl<'a> LatticeDir for Glb<'a> {
+impl<'a, 'tcx> LatticeDir for Glb<'a, 'tcx> {
fn ty_bot(&self, _: ty::t) -> cres<ty::t> {
Ok(ty::mk_bot())
}
}
}
-pub fn super_lattice_tys<L:LatticeDir+Combine>(this: &L,
- a: ty::t,
- b: ty::t)
- -> cres<ty::t>
+pub fn super_lattice_tys<'tcx, L:LatticeDir+Combine<'tcx>>(this: &L,
+ a: ty::t,
+ b: ty::t)
+ -> cres<ty::t>
{
debug!("{}.lattice_tys({}, {})",
this.tag(),
// Random utility functions used by LUB/GLB when computing LUB/GLB of
// fn types
-pub fn var_ids<T:Combine>(this: &T,
- map: &HashMap<ty::BoundRegion, ty::Region>)
- -> Vec<RegionVid> {
+pub fn var_ids<'tcx, T: Combine<'tcx>>(this: &T,
+ map: &HashMap<ty::BoundRegion, ty::Region>)
+ -> Vec<RegionVid> {
map.iter().map(|(_, r)| match *r {
ty::ReInfer(ty::ReVar(r)) => { r }
r => {
use util::ppaux::Repr;
/// "Least upper bound" (common supertype)
-pub struct Lub<'f> {
- fields: CombineFields<'f>
+pub struct Lub<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
}
#[allow(non_snake_case)]
-pub fn Lub<'f>(cf: CombineFields<'f>) -> Lub<'f> {
+pub fn Lub<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Lub<'f, 'tcx> {
Lub { fields: cf }
}
-impl<'f> Combine for Lub<'f> {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.fields.infcx }
+impl<'f, 'tcx> Combine<'tcx> for Lub<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
fn tag(&self) -> String { "lub".to_string() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
- fn equate<'a>(&'a self) -> Equate<'a> { Equate(self.fields.clone()) }
- fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.fields.clone()) }
- fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.fields.clone()) }
- fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.fields.clone()) }
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
let tcx = self.fields.infcx.tcx;
pub type fres<T> = Result<T, fixup_err>; // "fixup result"
pub type CoerceResult = cres<Option<ty::AutoAdjustment>>;
-pub struct InferCtxt<'a> {
- pub tcx: &'a ty::ctxt,
+pub struct InferCtxt<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>,
// We instantiate UnificationTable with bounds<ty::t> because the
// types that might instantiate a general type variable have an
// For region variables.
region_vars:
- RegionVarBindings<'a>,
+ RegionVarBindings<'a, 'tcx>,
}
/// Why did we require that the two types be related?
}
}
-pub fn new_infer_ctxt<'a>(tcx: &'a ty::ctxt) -> InferCtxt<'a> {
+pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>)
+ -> InferCtxt<'a, 'tcx> {
InferCtxt {
tcx: tcx,
type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
region_vars_snapshot: RegionSnapshot,
}
-impl<'a> InferCtxt<'a> {
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn combine_fields<'a>(&'a self, a_is_expected: bool, trace: TypeTrace)
- -> CombineFields<'a> {
+ -> CombineFields<'a, 'tcx> {
CombineFields {infcx: self,
a_is_expected: a_is_expected,
trace: trace}
}
- pub fn equate<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Equate<'a> {
+ pub fn equate<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Equate<'a, 'tcx> {
Equate(self.combine_fields(a_is_expected, trace))
}
- pub fn sub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Sub<'a> {
+ pub fn sub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Sub<'a, 'tcx> {
Sub(self.combine_fields(a_is_expected, trace))
}
- pub fn lub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Lub<'a> {
+ pub fn lub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Lub<'a, 'tcx> {
Lub(self.combine_fields(a_is_expected, trace))
}
}
}
-impl<'a> InferCtxt<'a> {
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn next_ty_var_id(&self) -> TyVid {
self.type_variables
.borrow_mut()
pub type CombineMap = HashMap<TwoRegions, RegionVid>;
-pub struct RegionVarBindings<'a> {
- tcx: &'a ty::ctxt,
+pub struct RegionVarBindings<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
var_origins: RefCell<Vec<RegionVariableOrigin>>,
// Constraints of the form `A <= B` introduced by the region
length: uint
}
-impl<'a> RegionVarBindings<'a> {
- pub fn new(tcx: &'a ty::ctxt) -> RegionVarBindings<'a> {
+impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
+ pub fn new(tcx: &'a ty::ctxt<'tcx>) -> RegionVarBindings<'a, 'tcx> {
RegionVarBindings {
tcx: tcx,
var_origins: RefCell::new(Vec::new()),
*self.values.borrow_mut() = Some(v);
errors
}
-}
-impl<'a> RegionVarBindings<'a> {
fn is_subregion_of(&self, sub: Region, sup: Region) -> bool {
self.tcx.region_maps.is_subregion_of(sub, sup)
}
type RegionGraph = graph::Graph<(), Constraint>;
-impl<'a> RegionVarBindings<'a> {
+impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
fn infer_variable_values(&self,
errors: &mut Vec<RegionResolutionError>)
-> Vec<VarValue>
pub static resolve_and_force_all_but_regions: uint =
(resolve_all | force_all) & not_regions;
-pub struct ResolveState<'a> {
- infcx: &'a InferCtxt<'a>,
+pub struct ResolveState<'a, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
modes: uint,
err: Option<fixup_err>,
v_seen: Vec<TyVid> ,
type_depth: uint,
}
-pub fn resolver<'a>(infcx: &'a InferCtxt,
- modes: uint,
- _: Option<Span>)
- -> ResolveState<'a> {
+pub fn resolver<'a, 'tcx>(infcx: &'a InferCtxt<'a, 'tcx>,
+ modes: uint,
+ _: Option<Span>)
+ -> ResolveState<'a, 'tcx> {
ResolveState {
infcx: infcx,
modes: modes,
}
}
-impl<'a> ty_fold::TypeFolder for ResolveState<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'a, 'tcx> ty_fold::TypeFolder<'tcx> for ResolveState<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.infcx.tcx
}
}
}
-impl<'a> ResolveState<'a> {
+impl<'a, 'tcx> ResolveState<'a, 'tcx> {
pub fn should(&mut self, mode: uint) -> bool {
(self.modes & mode) == mode
}
/// "Greatest lower bound" (common subtype)
-pub struct Sub<'f> {
- fields: CombineFields<'f>
+pub struct Sub<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
}
#[allow(non_snake_case)]
-pub fn Sub<'f>(cf: CombineFields<'f>) -> Sub<'f> {
+pub fn Sub<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Sub<'f, 'tcx> {
Sub { fields: cf }
}
-impl<'f> Combine for Sub<'f> {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.fields.infcx }
+impl<'f, 'tcx> Combine<'tcx> for Sub<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
fn tag(&self) -> String { "sub".to_string() }
fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
- fn equate<'a>(&'a self) -> Equate<'a> { Equate(self.fields.clone()) }
- fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.fields.clone()) }
- fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.fields.clone()) }
- fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.fields.clone()) }
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
Sub(self.fields.switch_expected()).tys(b, a)
use syntax::ast;
use util::ppaux::{ty_to_string, UserString};
-struct Env<'a> {
+use arena::TypedArena;
+
+struct Env<'a, 'tcx: 'a> {
krate: ast::Crate,
- tcx: &'a ty::ctxt,
- infcx: &'a infer::InferCtxt<'a>,
+ infcx: &'a infer::InferCtxt<'a, 'tcx>,
}
struct RH<'a> {
let named_region_map = resolve_lifetime::krate(&sess, &krate);
let region_map = region::resolve_crate(&sess, &krate);
let stability_index = stability::Index::build(&krate);
+ let type_arena = TypedArena::new();
let tcx = ty::mk_ctxt(sess,
+ &type_arena,
def_map,
named_region_map,
ast_map,
lang_items,
stability_index);
let infcx = infer::new_infer_ctxt(&tcx);
- let env = Env {krate: krate,
- tcx: &tcx,
- infcx: &infcx};
+ let env = Env {
+ krate: krate,
+ infcx: &infcx
+ };
body(env);
infcx.resolve_regions_and_report_errors();
assert_eq!(tcx.sess.err_count(), expected_err_count);
}
-impl<'a> Env<'a> {
+impl<'a, 'tcx> Env<'a, 'tcx> {
pub fn create_region_hierarchy(&self, rh: &RH) {
for child_rh in rh.sub.iter() {
self.create_region_hierarchy(child_rh);
- self.tcx.region_maps.record_encl_scope(child_rh.id, rh.id);
+ self.infcx.tcx.region_maps.record_encl_scope(child_rh.id, rh.id);
}
}
-> Option<ast::NodeId> {
assert!(idx < names.len());
for item in m.items.iter() {
- if item.ident.user_string(this.tcx) == names[idx] {
+ if item.ident.user_string(this.infcx.tcx) == names[idx] {
return search(this, &**item, idx+1, names);
}
}
match infer::mk_subty(self.infcx, true, infer::Misc(DUMMY_SP), a, b) {
Ok(_) => true,
Err(ref e) => fail!("Encountered error: {}",
- ty::type_err_to_str(self.tcx, e))
+ ty::type_err_to_str(self.infcx.tcx, e))
}
}
}
pub fn ty_to_string(&self, a: ty::t) -> String {
- ty_to_string(self.tcx, a)
+ ty_to_string(self.infcx.tcx, a)
}
pub fn t_fn(&self,
output_ty: ty::t)
-> ty::t
{
- ty::mk_ctor_fn(self.tcx, binder_id, input_tys, output_ty)
+ ty::mk_ctor_fn(self.infcx.tcx, binder_id, input_tys, output_ty)
}
pub fn t_int(&self) -> ty::t {
}
pub fn t_rptr_late_bound(&self, binder_id: ast::NodeId, id: uint) -> ty::t {
- ty::mk_imm_rptr(self.tcx, ty::ReLateBound(binder_id, ty::BrAnon(id)),
+ ty::mk_imm_rptr(self.infcx.tcx, ty::ReLateBound(binder_id, ty::BrAnon(id)),
self.t_int())
}
pub fn t_rptr_scope(&self, id: ast::NodeId) -> ty::t {
- ty::mk_imm_rptr(self.tcx, ty::ReScope(id), self.t_int())
+ ty::mk_imm_rptr(self.infcx.tcx, ty::ReScope(id), self.t_int())
}
pub fn t_rptr_free(&self, nid: ast::NodeId, id: uint) -> ty::t {
- ty::mk_imm_rptr(self.tcx,
+ ty::mk_imm_rptr(self.infcx.tcx,
ty::ReFree(ty::FreeRegion {scope_id: nid,
bound_region: ty::BrAnon(id)}),
self.t_int())
}
pub fn t_rptr_static(&self) -> ty::t {
- ty::mk_imm_rptr(self.tcx, ty::ReStatic, self.t_int())
+ ty::mk_imm_rptr(self.infcx.tcx, ty::ReStatic, self.t_int())
}
pub fn dummy_type_trace(&self) -> infer::TypeTrace {
}
}
- pub fn lub(&self) -> Lub<'a> {
+ pub fn lub(&self) -> Lub<'a, 'tcx> {
let trace = self.dummy_type_trace();
Lub(self.infcx.combine_fields(true, trace))
}
- pub fn glb(&self) -> Glb<'a> {
+ pub fn glb(&self) -> Glb<'a, 'tcx> {
let trace = self.dummy_type_trace();
Glb(self.infcx.combine_fields(true, trace))
}
match self.lub().tys(t1, t2) {
Ok(t) => t,
Err(ref e) => fail!("unexpected error computing LUB: {:?}",
- ty::type_err_to_str(self.tcx, e))
+ ty::type_err_to_str(self.infcx.tcx, e))
}
}
}
Err(ref e) => {
fail!("unexpected error in LUB: {}",
- ty::type_err_to_str(self.tcx, e))
+ ty::type_err_to_str(self.infcx.tcx, e))
}
}
}
-> ures;
}
-impl<'tcx,V:SimplyUnifiable,K:UnifyKey<Option<V>>>
- InferCtxtMethodsForSimplyUnifiableTypes<V,K> for InferCtxt<'tcx>
+impl<'a,'tcx,V:SimplyUnifiable,K:UnifyKey<Option<V>>>
+ InferCtxtMethodsForSimplyUnifiableTypes<V,K> for InferCtxt<'a, 'tcx>
{
fn simple_vars(&self,
a_is_expected: bool,
pub type impl_vtable_map = RefCell<DefIdMap<vtable_res>>;
-pub struct CrateCtxt<'a> {
+pub struct CrateCtxt<'a, 'tcx: 'a> {
// A mapping from method call sites to traits that have that method.
trait_map: resolve::TraitMap,
- tcx: &'a ty::ctxt
+ tcx: &'a ty::ctxt<'tcx>
}
// Functions that write types into the node type table
* The first pass over the crate simply builds up the set of inferreds.
*/
-struct TermsContext<'a> {
- tcx: &'a ty::ctxt,
+struct TermsContext<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
arena: &'a Arena,
empty_variances: Rc<ty::ItemVariances>,
term: VarianceTermPtr<'a>,
}
-fn determine_parameters_to_be_inferred<'a>(tcx: &'a ty::ctxt,
- arena: &'a mut Arena,
- krate: &ast::Crate)
- -> TermsContext<'a> {
+fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
+ arena: &'a mut Arena,
+ krate: &ast::Crate)
+ -> TermsContext<'a, 'tcx> {
let mut terms_cx = TermsContext {
tcx: tcx,
arena: arena,
terms_cx
}
-impl<'a> TermsContext<'a> {
+impl<'a, 'tcx> TermsContext<'a, 'tcx> {
fn add_inferred(&mut self,
item_id: ast::NodeId,
kind: ParamKind,
}
}
-impl<'a> Visitor<()> for TermsContext<'a> {
+impl<'a, 'tcx> Visitor<()> for TermsContext<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
debug!("add_inferreds for item {}", item.repr(self.tcx));
* We walk the set of items and, for each member, generate new constraints.
*/
-struct ConstraintContext<'a> {
- terms_cx: TermsContext<'a>,
+struct ConstraintContext<'a, 'tcx: 'a> {
+ terms_cx: TermsContext<'a, 'tcx>,
// These are the def-id of the std::kinds::marker::InvariantType,
// std::kinds::marker::InvariantLifetime, and so on. The arrays
variance: &'a VarianceTerm<'a>,
}
-fn add_constraints_from_crate<'a>(terms_cx: TermsContext<'a>,
- krate: &ast::Crate)
- -> ConstraintContext<'a> {
+fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>,
+ krate: &ast::Crate)
+ -> ConstraintContext<'a, 'tcx> {
let mut invariant_lang_items = [None, ..2];
let mut covariant_lang_items = [None, ..2];
let mut contravariant_lang_items = [None, ..2];
constraint_cx
}
-impl<'a> Visitor<()> for ConstraintContext<'a> {
+impl<'a, 'tcx> Visitor<()> for ConstraintContext<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let did = ast_util::local_def(item.id);
let tcx = self.terms_cx.tcx;
}
}
-impl<'a> ConstraintContext<'a> {
- fn tcx(&self) -> &'a ty::ctxt {
+impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
+ fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.terms_cx.tcx
}
* inferred is then written into the `variance_map` in the tcx.
*/
-struct SolveContext<'a> {
- terms_cx: TermsContext<'a>,
+struct SolveContext<'a, 'tcx: 'a> {
+ terms_cx: TermsContext<'a, 'tcx>,
constraints: Vec<Constraint<'a>> ,
// Maps from an InferredIndex to the inferred value for that variable.
solutions_cx.write();
}
-impl<'a> SolveContext<'a> {
+impl<'a, 'tcx> SolveContext<'a, 'tcx> {
fn solve(&mut self) {
// Propagate constraints until a fixed point is reached. Note
// that the maximum number of iterations is 2C where C is the
AD_Intel = 1
}
-#[deriving(PartialEq)]
+#[deriving(PartialEq, Clone)]
#[repr(C)]
pub enum CodeGenOptLevel {
CodeGenLevelNone = 0,
use rustc::middle::subst;
use rustc::middle::stability;
-use core;
+use core::DocContext;
use doctree;
use clean;
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
-pub fn try_inline(id: ast::NodeId, into: Option<ast::Ident>)
+pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
- let cx = ::ctxtkey.get().unwrap();
- let tcx = match cx.maybe_typed {
- core::Typed(ref tycx) => tycx,
- core::NotTyped(_) => return None,
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return None,
};
let def = match tcx.def_map.borrow().find(&id) {
Some(def) => *def,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
- try_inline_def(&**cx, tcx, def).map(|vec| {
+ try_inline_def(cx, tcx, def).map(|vec| {
vec.move_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
- item.name = Some(into.clean());
+ item.name = Some(into.clean(cx));
}
_ => {}
}
})
}
-fn try_inline_def(cx: &core::DocContext,
- tcx: &ty::ctxt,
+fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
- clean::TraitItem(build_external_trait(tcx, did))
+ clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, style) => {
// If this function is a tuple struct constructor, we just skip it
return None
}
record_extern_fqn(cx, did, clean::TypeFunction);
- clean::FunctionItem(build_external_function(tcx, did, style))
+ clean::FunctionItem(build_external_function(cx, tcx, did, style))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).move_iter());
- clean::StructItem(build_struct(tcx, did))
+ clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).move_iter());
- build_type(tcx, did)
+ build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
- clean::StaticItem(build_static(tcx, did, mtbl))
+ clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
_ => return None,
};
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
- attrs: load_attrs(tcx, did),
+ attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
- stability: stability::lookup(tcx, did).clean(),
+ stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
-pub fn load_attrs(tcx: &ty::ctxt, did: ast::DefId) -> Vec<clean::Attribute> {
+pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
+ did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
attrs.extend(v.move_iter().map(|a| {
- a.clean()
+ a.clean(cx)
}));
});
attrs
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
-pub fn record_extern_fqn(cx: &core::DocContext,
- did: ast::DefId,
- kind: clean::TypeKind) {
- match cx.maybe_typed {
- core::Typed(ref tcx) => {
+pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
+ match cx.tcx_opt() {
+ Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.move_iter().map(|i| i.to_string()).collect();
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
- core::NotTyped(..) => {}
+ None => {}
}
}
-pub fn build_external_trait(tcx: &ty::ctxt, did: ast::DefId) -> clean::Trait {
+pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
+ did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
- let trait_items = ty::trait_items(tcx, did).clean();
+ let trait_items = ty::trait_items(tcx, did).clean(cx);
let provided = ty::provided_trait_methods(tcx, did);
let mut items = trait_items.move_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
- let bounds = trait_def.bounds.clean();
+ let bounds = trait_def.bounds.clean(cx);
clean::Trait {
- generics: (&def.generics, subst::TypeSpace).clean(),
+ generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
}
}
-fn build_external_function(tcx: &ty::ctxt,
+fn build_external_function(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
style: ast::FnStyle) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
clean::Function {
decl: match ty::get(t.ty).sty {
- ty::ty_bare_fn(ref f) => (did, &f.sig).clean(),
+ ty::ty_bare_fn(ref f) => (did, &f.sig).clean(cx),
_ => fail!("bad function"),
},
- generics: (&t.generics, subst::FnSpace).clean(),
+ generics: (&t.generics, subst::FnSpace).clean(cx),
fn_style: style,
}
}
-fn build_struct(tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
+fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
[ref f, ..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
- generics: (&t.generics, subst::TypeSpace).clean(),
- fields: fields.clean(),
+ generics: (&t.generics, subst::TypeSpace).clean(cx),
+ fields: fields.clean(cx),
fields_stripped: false,
}
}
-fn build_type(tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
+fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
match ty::get(t.ty).sty {
ty::ty_enum(edid, _) if !csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
- generics: (&t.generics, subst::TypeSpace).clean(),
+ generics: (&t.generics, subst::TypeSpace).clean(cx),
variants_stripped: false,
- variants: ty::enum_variants(tcx, edid).clean(),
+ variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
- type_: t.ty.clean(),
- generics: (&t.generics, subst::TypeSpace).clean(),
+ type_: t.ty.clean(cx),
+ generics: (&t.generics, subst::TypeSpace).clean(cx),
})
}
-fn build_impls(cx: &core::DocContext,
- tcx: &ty::ctxt,
+fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
populate_impls(cx, tcx, def, &mut impls)
});
- fn populate_impls(cx: &core::DocContext,
- tcx: &ty::ctxt,
+ fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
impls.move_iter().filter_map(|a| a).collect()
}
-fn build_impl(cx: &core::DocContext,
- tcx: &ty::ctxt,
+fn build_impl(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
if !cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline it.
match associated_trait {
Some(ref t) => {
- let trait_attrs = load_attrs(tcx, t.def_id);
+ let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
None => {}
}
- let attrs = load_attrs(tcx, did);
+ let attrs = load_attrs(cx, tcx, did);
let ty = ty::lookup_item_type(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
if method.vis != ast::Public && associated_trait.is_none() {
return None
}
- let mut item = method.clean();
+ let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
fn_style, decl, self_, generics
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
derived: clean::detect_derived(attrs.as_slice()),
- trait_: associated_trait.clean().map(|bound| {
+ trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(ty) => ty,
clean::RegionBound => unreachable!(),
}
}),
- for_: ty.ty.clean(),
- generics: (&ty.generics, subst::TypeSpace).clean(),
+ for_: ty.ty.clean(cx),
+ generics: (&ty.generics, subst::TypeSpace).clean(cx),
items: trait_items,
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
- stability: stability::lookup(tcx, did).clean(),
+ stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
}
}
-fn build_module(cx: &core::DocContext, tcx: &ty::ctxt,
+fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
- fn fill_in(cx: &core::DocContext, tcx: &ty::ctxt, did: ast::DefId,
+ fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
}
}
-fn build_static(tcx: &ty::ctxt,
+fn build_static(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
- type_: ty::lookup_item_type(tcx, did).ty.clean(),
+ type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
use std::u32;
use std::gc::{Gc, GC};
-use core;
+use core::DocContext;
use doctree;
use visit_ast;
mod inline;
-// load the current DocContext from TLD
-fn get_cx() -> Gc<core::DocContext> {
- *super::ctxtkey.get().unwrap()
-}
-
-// extract the stability index for a node from TLD, if possible
-fn get_stability(def_id: ast::DefId) -> Option<Stability> {
- get_cx().tcx_opt().and_then(|tcx| stability::lookup(tcx, def_id))
- .map(|stab| stab.clean())
+// extract the stability index for a node from tcx, if possible
+fn get_stability(cx: &DocContext, def_id: ast::DefId) -> Option<Stability> {
+ cx.tcx_opt().and_then(|tcx| stability::lookup(tcx, def_id)).clean(cx)
}
pub trait Clean<T> {
- fn clean(&self) -> T;
+ fn clean(&self, cx: &DocContext) -> T;
}
impl<T: Clean<U>, U> Clean<Vec<U>> for Vec<T> {
- fn clean(&self) -> Vec<U> {
- self.iter().map(|x| x.clean()).collect()
+ fn clean(&self, cx: &DocContext) -> Vec<U> {
+ self.iter().map(|x| x.clean(cx)).collect()
}
}
impl<T: Clean<U>, U> Clean<VecPerParamSpace<U>> for VecPerParamSpace<T> {
- fn clean(&self) -> VecPerParamSpace<U> {
- self.map(|x| x.clean())
+ fn clean(&self, cx: &DocContext) -> VecPerParamSpace<U> {
+ self.map(|x| x.clean(cx))
}
}
impl<T: 'static + Clean<U>, U> Clean<U> for Gc<T> {
- fn clean(&self) -> U {
- (**self).clean()
+ fn clean(&self, cx: &DocContext) -> U {
+ (**self).clean(cx)
}
}
impl<T: Clean<U>, U> Clean<U> for Rc<T> {
- fn clean(&self) -> U {
- (**self).clean()
+ fn clean(&self, cx: &DocContext) -> U {
+ (**self).clean(cx)
}
}
impl<T: Clean<U>, U> Clean<Option<U>> for Option<T> {
- fn clean(&self) -> Option<U> {
+ fn clean(&self, cx: &DocContext) -> Option<U> {
match self {
&None => None,
- &Some(ref v) => Some(v.clean())
+ &Some(ref v) => Some(v.clean(cx))
}
}
}
impl<T: Clean<U>, U> Clean<Vec<U>> for syntax::owned_slice::OwnedSlice<T> {
- fn clean(&self) -> Vec<U> {
- self.iter().map(|x| x.clean()).collect()
+ fn clean(&self, cx: &DocContext) -> Vec<U> {
+ self.iter().map(|x| x.clean(cx)).collect()
}
}
pub primitives: Vec<Primitive>,
}
-impl<'a> Clean<Crate> for visit_ast::RustdocVisitor<'a> {
- fn clean(&self) -> Crate {
- let cx = get_cx();
-
+impl<'a, 'tcx> Clean<Crate> for visit_ast::RustdocVisitor<'a, 'tcx> {
+ fn clean(&self, cx: &DocContext) -> Crate {
let mut externs = Vec::new();
cx.sess().cstore.iter_crate_data(|n, meta| {
- externs.push((n, meta.clean()));
+ externs.push((n, meta.clean(cx)));
});
externs.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
// Clean the crate, translating the entire libsyntax AST to one that is
// understood by rustdoc.
- let mut module = self.module.clean();
+ let mut module = self.module.clean(cx);
// Collect all inner modules which are tagged as implementations of
// primitives.
}
impl Clean<ExternalCrate> for cstore::crate_metadata {
- fn clean(&self) -> ExternalCrate {
+ fn clean(&self, cx: &DocContext) -> ExternalCrate {
let mut primitives = Vec::new();
- get_cx().tcx_opt().map(|tcx| {
+ cx.tcx_opt().map(|tcx| {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
self.cnum,
|def, _, _| {
decoder::DlDef(def::DefMod(did)) => did,
_ => return
};
- let attrs = inline::load_attrs(tcx, did);
+ let attrs = inline::load_attrs(cx, tcx, did);
Primitive::find(attrs.as_slice()).map(|prim| primitives.push(prim));
})
});
ExternalCrate {
name: self.name.to_string(),
- attrs: decoder::get_crate_attributes(self.data()).clean(),
+ attrs: decoder::get_crate_attributes(self.data()).clean(cx),
primitives: primitives,
}
}
}
impl Clean<Item> for doctree::Module {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let name = if self.name.is_some() {
- self.name.unwrap().clean()
+ self.name.unwrap().clean(cx)
} else {
"".to_string()
};
let mut foreigns = Vec::new();
- for subforeigns in self.foreigns.clean().move_iter() {
+ for subforeigns in self.foreigns.clean(cx).move_iter() {
for foreign in subforeigns.move_iter() {
foreigns.push(foreign)
}
}
let items: Vec<Vec<Item> > = vec!(
- self.structs.clean(),
- self.enums.clean(),
- self.fns.clean(),
+ self.structs.clean(cx),
+ self.enums.clean(cx),
+ self.fns.clean(cx),
foreigns,
- self.mods.clean(),
- self.typedefs.clean(),
- self.statics.clean(),
- self.traits.clean(),
- self.impls.clean(),
- self.view_items.clean().move_iter()
+ self.mods.clean(cx),
+ self.typedefs.clean(cx),
+ self.statics.clean(cx),
+ self.traits.clean(cx),
+ self.impls.clean(cx),
+ self.view_items.clean(cx).move_iter()
.flat_map(|s| s.move_iter()).collect(),
- self.macros.clean(),
+ self.macros.clean(cx),
);
// determine if we should display the inner contents or
// the outer `mod` item for the source code.
let whence = {
- let ctxt = super::ctxtkey.get().unwrap();
- let cm = ctxt.sess().codemap();
+ let cm = cx.sess().codemap();
let outer = cm.lookup_char_pos(self.where_outer.lo);
let inner = cm.lookup_char_pos(self.where_inner.lo);
if outer.file.start_pos == inner.file.start_pos {
Item {
name: Some(name),
- attrs: self.attrs.clean(),
- source: whence.clean(),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ attrs: self.attrs.clean(cx),
+ source: whence.clean(cx),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: ModuleItem(Module {
is_crate: self.is_crate,
}
impl Clean<Attribute> for ast::MetaItem {
- fn clean(&self) -> Attribute {
+ fn clean(&self, cx: &DocContext) -> Attribute {
match self.node {
ast::MetaWord(ref s) => Word(s.get().to_string()),
ast::MetaList(ref s, ref l) => {
- List(s.get().to_string(), l.clean())
+ List(s.get().to_string(), l.clean(cx))
}
ast::MetaNameValue(ref s, ref v) => {
NameValue(s.get().to_string(), lit_to_string(v))
}
impl Clean<Attribute> for ast::Attribute {
- fn clean(&self) -> Attribute {
- self.desugar_doc().node.value.clean()
+ fn clean(&self, cx: &DocContext) -> Attribute {
+ self.desugar_doc().node.value.clean(cx)
}
}
}
impl Clean<TyParam> for ast::TyParam {
- fn clean(&self) -> TyParam {
+ fn clean(&self, cx: &DocContext) -> TyParam {
TyParam {
- name: self.ident.clean(),
+ name: self.ident.clean(cx),
did: ast::DefId { krate: ast::LOCAL_CRATE, node: self.id },
- bounds: self.bounds.clean(),
- default: self.default.clean()
+ bounds: self.bounds.clean(cx),
+ default: self.default.clean(cx)
}
}
}
impl Clean<TyParam> for ty::TypeParameterDef {
- fn clean(&self) -> TyParam {
- get_cx().external_typarams.borrow_mut().as_mut().unwrap()
- .insert(self.def_id, self.ident.clean());
+ fn clean(&self, cx: &DocContext) -> TyParam {
+ cx.external_typarams.borrow_mut().as_mut().unwrap()
+ .insert(self.def_id, self.ident.clean(cx));
TyParam {
- name: self.ident.clean(),
+ name: self.ident.clean(cx),
did: self.def_id,
- bounds: self.bounds.clean(),
- default: self.default.clean()
+ bounds: self.bounds.clean(cx),
+ default: self.default.clean(cx)
}
}
}
}
impl Clean<TyParamBound> for ast::TyParamBound {
- fn clean(&self) -> TyParamBound {
+ fn clean(&self, cx: &DocContext) -> TyParamBound {
match *self {
ast::RegionTyParamBound(_) => RegionBound,
ast::UnboxedFnTyParamBound(_) => {
// FIXME(pcwalton): Wrong.
RegionBound
}
- ast::TraitTyParamBound(ref t) => TraitBound(t.clean()),
+ ast::TraitTyParamBound(ref t) => TraitBound(t.clean(cx)),
}
}
}
impl Clean<Vec<TyParamBound>> for ty::ExistentialBounds {
- fn clean(&self) -> Vec<TyParamBound> {
+ fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
let mut vec = vec!(RegionBound);
for bb in self.builtin_bounds.iter() {
- vec.push(bb.clean());
+ vec.push(bb.clean(cx));
}
vec
}
}
-fn external_path(name: &str, substs: &subst::Substs) -> Path {
+fn external_path(cx: &DocContext, name: &str, substs: &subst::Substs) -> Path {
let lifetimes = substs.regions().get_slice(subst::TypeSpace)
.iter()
- .filter_map(|v| v.clean())
+ .filter_map(|v| v.clean(cx))
.collect();
let types = Vec::from_slice(substs.types.get_slice(subst::TypeSpace));
- let types = types.clean();
+ let types = types.clean(cx);
Path {
global: false,
segments: vec![PathSegment {
}
impl Clean<TyParamBound> for ty::BuiltinBound {
- fn clean(&self) -> TyParamBound {
- let cx = get_cx();
- let tcx = match cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return RegionBound,
+ fn clean(&self, cx: &DocContext) -> TyParamBound {
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return RegionBound,
};
let empty = subst::Substs::empty();
let (did, path) = match *self {
ty::BoundSend =>
(tcx.lang_items.send_trait().unwrap(),
- external_path("Send", &empty)),
+ external_path(cx, "Send", &empty)),
ty::BoundSized =>
(tcx.lang_items.sized_trait().unwrap(),
- external_path("Sized", &empty)),
+ external_path(cx, "Sized", &empty)),
ty::BoundCopy =>
(tcx.lang_items.copy_trait().unwrap(),
- external_path("Copy", &empty)),
+ external_path(cx, "Copy", &empty)),
ty::BoundSync =>
(tcx.lang_items.sync_trait().unwrap(),
- external_path("Sync", &empty)),
+ external_path(cx, "Sync", &empty)),
};
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.move_iter().map(|i| i.to_string()).collect();
}
impl Clean<TyParamBound> for ty::TraitRef {
- fn clean(&self) -> TyParamBound {
- let cx = get_cx();
- let tcx = match cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return RegionBound,
+ fn clean(&self, cx: &DocContext) -> TyParamBound {
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return RegionBound,
};
let fqn = csearch::get_item_path(tcx, self.def_id);
let fqn = fqn.move_iter().map(|i| i.to_string())
.collect::<Vec<String>>();
- let path = external_path(fqn.last().unwrap().as_slice(),
+ let path = external_path(cx, fqn.last().unwrap().as_slice(),
&self.substs);
cx.external_paths.borrow_mut().as_mut().unwrap().insert(self.def_id,
(fqn, TypeTrait));
}
impl Clean<Vec<TyParamBound>> for ty::ParamBounds {
- fn clean(&self) -> Vec<TyParamBound> {
+ fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
let mut v = Vec::new();
for b in self.builtin_bounds.iter() {
if b != ty::BoundSized {
- v.push(b.clean());
+ v.push(b.clean(cx));
}
}
for t in self.trait_bounds.iter() {
- v.push(t.clean());
+ v.push(t.clean(cx));
}
return v;
}
}
impl Clean<Option<Vec<TyParamBound>>> for subst::Substs {
- fn clean(&self) -> Option<Vec<TyParamBound>> {
+ fn clean(&self, cx: &DocContext) -> Option<Vec<TyParamBound>> {
let mut v = Vec::new();
v.extend(self.regions().iter().map(|_| RegionBound));
- v.extend(self.types.iter().map(|t| TraitBound(t.clean())));
+ v.extend(self.types.iter().map(|t| TraitBound(t.clean(cx))));
if v.len() > 0 {Some(v)} else {None}
}
}
}
impl Clean<Lifetime> for ast::Lifetime {
- fn clean(&self) -> Lifetime {
+ fn clean(&self, _: &DocContext) -> Lifetime {
Lifetime(token::get_name(self.name).get().to_string())
}
}
impl Clean<Lifetime> for ast::LifetimeDef {
- fn clean(&self) -> Lifetime {
+ fn clean(&self, _: &DocContext) -> Lifetime {
Lifetime(token::get_name(self.lifetime.name).get().to_string())
}
}
impl Clean<Lifetime> for ty::RegionParameterDef {
- fn clean(&self) -> Lifetime {
+ fn clean(&self, _: &DocContext) -> Lifetime {
Lifetime(token::get_name(self.name).get().to_string())
}
}
impl Clean<Option<Lifetime>> for ty::Region {
- fn clean(&self) -> Option<Lifetime> {
+ fn clean(&self, cx: &DocContext) -> Option<Lifetime> {
match *self {
ty::ReStatic => Some(Lifetime("'static".to_string())),
ty::ReLateBound(_, ty::BrNamed(_, name)) =>
Some(Lifetime(token::get_name(name).get().to_string())),
- ty::ReEarlyBound(_, _, _, name) => Some(Lifetime(name.clean())),
+ ty::ReEarlyBound(_, _, _, name) => Some(Lifetime(name.clean(cx))),
ty::ReLateBound(..) |
ty::ReFree(..) |
}
impl Clean<Generics> for ast::Generics {
- fn clean(&self) -> Generics {
+ fn clean(&self, cx: &DocContext) -> Generics {
Generics {
- lifetimes: self.lifetimes.clean(),
- type_params: self.ty_params.clean(),
+ lifetimes: self.lifetimes.clean(cx),
+ type_params: self.ty_params.clean(cx),
}
}
}
impl<'a> Clean<Generics> for (&'a ty::Generics, subst::ParamSpace) {
- fn clean(&self) -> Generics {
+ fn clean(&self, cx: &DocContext) -> Generics {
let (me, space) = *self;
Generics {
- type_params: Vec::from_slice(me.types.get_slice(space)).clean(),
- lifetimes: Vec::from_slice(me.regions.get_slice(space)).clean(),
+ type_params: Vec::from_slice(me.types.get_slice(space)).clean(cx),
+ lifetimes: Vec::from_slice(me.regions.get_slice(space)).clean(cx),
}
}
}
}
impl Clean<Item> for ast::Method {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let all_inputs = &self.pe_fn_decl().inputs;
let inputs = match self.pe_explicit_self().node {
ast::SelfStatic => all_inputs.as_slice(),
};
let decl = FnDecl {
inputs: Arguments {
- values: inputs.iter().map(|x| x.clean()).collect(),
+ values: inputs.iter().map(|x| x.clean(cx)).collect(),
},
- output: (self.pe_fn_decl().output.clean()),
- cf: self.pe_fn_decl().cf.clean(),
+ output: (self.pe_fn_decl().output.clean(cx)),
+ cf: self.pe_fn_decl().cf.clean(cx),
attrs: Vec::new()
};
Item {
- name: Some(self.pe_ident().clean()),
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ name: Some(self.pe_ident().clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.pe_vis().clean(),
- stability: get_stability(ast_util::local_def(self.id)),
+ visibility: self.pe_vis().clean(cx),
+ stability: get_stability(cx, ast_util::local_def(self.id)),
inner: MethodItem(Method {
- generics: self.pe_generics().clean(),
- self_: self.pe_explicit_self().node.clean(),
+ generics: self.pe_generics().clean(cx),
+ self_: self.pe_explicit_self().node.clean(cx),
fn_style: self.pe_fn_style().clone(),
decl: decl,
}),
}
impl Clean<Item> for ast::TypeMethod {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let inputs = match self.explicit_self.node {
ast::SelfStatic => self.decl.inputs.as_slice(),
_ => self.decl.inputs.slice_from(1)
};
let decl = FnDecl {
inputs: Arguments {
- values: inputs.iter().map(|x| x.clean()).collect(),
+ values: inputs.iter().map(|x| x.clean(cx)).collect(),
},
- output: (self.decl.output.clean()),
- cf: self.decl.cf.clean(),
+ output: (self.decl.output.clean(cx)),
+ cf: self.decl.cf.clean(cx),
attrs: Vec::new()
};
Item {
- name: Some(self.ident.clean()),
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ name: Some(self.ident.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(self.id),
visibility: None,
- stability: get_stability(ast_util::local_def(self.id)),
+ stability: get_stability(cx, ast_util::local_def(self.id)),
inner: TyMethodItem(TyMethod {
fn_style: self.fn_style.clone(),
decl: decl,
- self_: self.explicit_self.node.clean(),
- generics: self.generics.clean(),
+ self_: self.explicit_self.node.clean(cx),
+ generics: self.generics.clean(cx),
}),
}
}
}
impl Clean<SelfTy> for ast::ExplicitSelf_ {
- fn clean(&self) -> SelfTy {
+ fn clean(&self, cx: &DocContext) -> SelfTy {
match *self {
ast::SelfStatic => SelfStatic,
ast::SelfValue(_) => SelfValue,
ast::SelfRegion(lt, mt, _) => {
- SelfBorrowed(lt.clean(), mt.clean())
+ SelfBorrowed(lt.clean(cx), mt.clean(cx))
}
- ast::SelfExplicit(typ, _) => SelfExplicit(typ.clean()),
+ ast::SelfExplicit(typ, _) => SelfExplicit(typ.clean(cx)),
}
}
}
}
impl Clean<Item> for doctree::Function {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: FunctionItem(Function {
- decl: self.decl.clean(),
- generics: self.generics.clean(),
+ decl: self.decl.clean(cx),
+ generics: self.generics.clean(cx),
fn_style: self.fn_style,
}),
}
}
impl Clean<ClosureDecl> for ast::ClosureTy {
- fn clean(&self) -> ClosureDecl {
+ fn clean(&self, cx: &DocContext) -> ClosureDecl {
ClosureDecl {
- lifetimes: self.lifetimes.clean(),
- decl: self.decl.clean(),
+ lifetimes: self.lifetimes.clean(cx),
+ decl: self.decl.clean(cx),
onceness: self.onceness,
fn_style: self.fn_style,
- bounds: self.bounds.clean()
+ bounds: self.bounds.clean(cx)
}
}
}
}
impl Clean<FnDecl> for ast::FnDecl {
- fn clean(&self) -> FnDecl {
+ fn clean(&self, cx: &DocContext) -> FnDecl {
FnDecl {
inputs: Arguments {
- values: self.inputs.iter().map(|x| x.clean()).collect(),
+ values: self.inputs.clean(cx),
},
- output: (self.output.clean()),
- cf: self.cf.clean(),
+ output: self.output.clean(cx),
+ cf: self.cf.clean(cx),
attrs: Vec::new()
}
}
}
impl<'a> Clean<FnDecl> for (ast::DefId, &'a ty::FnSig) {
- fn clean(&self) -> FnDecl {
- let cx = get_cx();
+ fn clean(&self, cx: &DocContext) -> FnDecl {
let (did, sig) = *self;
let mut names = if did.node != 0 {
csearch::get_method_arg_names(&cx.tcx().sess.cstore, did).move_iter()
let _ = names.next();
}
FnDecl {
- output: sig.output.clean(),
+ output: sig.output.clean(cx),
cf: Return,
attrs: Vec::new(),
inputs: Arguments {
values: sig.inputs.iter().map(|t| {
Argument {
- type_: t.clean(),
+ type_: t.clean(cx),
id: 0,
name: names.next().unwrap_or("".to_string()),
}
}
impl Clean<Argument> for ast::Arg {
- fn clean(&self) -> Argument {
+ fn clean(&self, cx: &DocContext) -> Argument {
Argument {
name: name_from_pat(&*self.pat),
- type_: (self.ty.clean()),
+ type_: (self.ty.clean(cx)),
id: self.id
}
}
}
impl Clean<RetStyle> for ast::RetStyle {
- fn clean(&self) -> RetStyle {
+ fn clean(&self, _: &DocContext) -> RetStyle {
match *self {
ast::Return => Return,
ast::NoReturn => NoReturn
}
impl Clean<Item> for doctree::Trait {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: TraitItem(Trait {
- items: self.items.clean(),
- generics: self.generics.clean(),
- bounds: self.bounds.clean(),
+ items: self.items.clean(cx),
+ generics: self.generics.clean(cx),
+ bounds: self.bounds.clean(cx),
}),
}
}
}
impl Clean<Type> for ast::TraitRef {
- fn clean(&self) -> Type {
- resolve_type(self.path.clean(), None, self.ref_id)
+ fn clean(&self, cx: &DocContext) -> Type {
+ resolve_type(cx, self.path.clean(cx), None, self.ref_id)
}
}
}
impl Clean<TraitItem> for ast::TraitItem {
- fn clean(&self) -> TraitItem {
+ fn clean(&self, cx: &DocContext) -> TraitItem {
match self {
- &ast::RequiredMethod(ref t) => RequiredMethod(t.clean()),
- &ast::ProvidedMethod(ref t) => ProvidedMethod(t.clean()),
+ &ast::RequiredMethod(ref t) => RequiredMethod(t.clean(cx)),
+ &ast::ProvidedMethod(ref t) => ProvidedMethod(t.clean(cx)),
}
}
}
}
impl Clean<ImplItem> for ast::ImplItem {
- fn clean(&self) -> ImplItem {
+ fn clean(&self, cx: &DocContext) -> ImplItem {
match self {
- &ast::MethodImplItem(ref t) => MethodImplItem(t.clean()),
+ &ast::MethodImplItem(ref t) => MethodImplItem(t.clean(cx)),
}
}
}
impl Clean<Item> for ty::Method {
- fn clean(&self) -> Item {
- let cx = get_cx();
+ fn clean(&self, cx: &DocContext) -> Item {
let (self_, sig) = match self.explicit_self {
- ty::StaticExplicitSelfCategory => (ast::SelfStatic.clean(),
+ ty::StaticExplicitSelfCategory => (ast::SelfStatic.clean(cx),
self.fty.sig.clone()),
s => {
let sig = ty::FnSig {
ty::ByReferenceExplicitSelfCategory(..) => {
match ty::get(self.fty.sig.inputs[0]).sty {
ty::ty_rptr(r, mt) => {
- SelfBorrowed(r.clean(), mt.mutbl.clean())
+ SelfBorrowed(r.clean(cx), mt.mutbl.clean(cx))
}
_ => unreachable!(),
}
}
ty::ByBoxExplicitSelfCategory => {
- SelfExplicit(self.fty.sig.inputs[0].clean())
+ SelfExplicit(self.fty.sig.inputs[0].clean(cx))
}
ty::StaticExplicitSelfCategory => unreachable!(),
};
};
Item {
- name: Some(self.ident.clean()),
+ name: Some(self.ident.clean(cx)),
visibility: Some(ast::Inherited),
- stability: get_stability(self.def_id),
+ stability: get_stability(cx, self.def_id),
def_id: self.def_id,
- attrs: inline::load_attrs(cx.tcx(), self.def_id),
+ attrs: inline::load_attrs(cx, cx.tcx(), self.def_id),
source: Span::empty(),
inner: TyMethodItem(TyMethod {
fn_style: self.fty.fn_style,
- generics: (&self.generics, subst::FnSpace).clean(),
+ generics: (&self.generics, subst::FnSpace).clean(cx),
self_: self_,
- decl: (self.def_id, &sig).clean(),
+ decl: (self.def_id, &sig).clean(cx),
})
}
}
}
impl Clean<Item> for ty::ImplOrTraitItem {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
match *self {
- ty::MethodTraitItem(ref mti) => mti.clean(),
+ ty::MethodTraitItem(ref mti) => mti.clean(cx),
}
}
}
}
impl Clean<Type> for ast::Ty {
- fn clean(&self) -> Type {
+ fn clean(&self, cx: &DocContext) -> Type {
use syntax::ast::*;
match self.node {
TyNil => Primitive(Unit),
- TyPtr(ref m) => RawPointer(m.mutbl.clean(), box m.ty.clean()),
+ TyPtr(ref m) => RawPointer(m.mutbl.clean(cx), box m.ty.clean(cx)),
TyRptr(ref l, ref m) =>
- BorrowedRef {lifetime: l.clean(), mutability: m.mutbl.clean(),
- type_: box m.ty.clean()},
- TyBox(ty) => Managed(box ty.clean()),
- TyUniq(ty) => Unique(box ty.clean()),
- TyVec(ty) => Vector(box ty.clean()),
- TyFixedLengthVec(ty, ref e) => FixedVector(box ty.clean(),
- e.span.to_src()),
- TyTup(ref tys) => Tuple(tys.iter().map(|x| x.clean()).collect()),
+ BorrowedRef {lifetime: l.clean(cx), mutability: m.mutbl.clean(cx),
+ type_: box m.ty.clean(cx)},
+ TyBox(ty) => Managed(box ty.clean(cx)),
+ TyUniq(ty) => Unique(box ty.clean(cx)),
+ TyVec(ty) => Vector(box ty.clean(cx)),
+ TyFixedLengthVec(ty, ref e) => FixedVector(box ty.clean(cx),
+ e.span.to_src(cx)),
+ TyTup(ref tys) => Tuple(tys.clean(cx)),
TyPath(ref p, ref tpbs, id) => {
- resolve_type(p.clean(),
- tpbs.clean().map(|x| x),
- id)
+ resolve_type(cx, p.clean(cx), tpbs.clean(cx), id)
}
- TyClosure(ref c) => Closure(box c.clean()),
- TyProc(ref c) => Proc(box c.clean()),
- TyBareFn(ref barefn) => BareFunction(box barefn.clean()),
- TyParen(ref ty) => ty.clean(),
+ TyClosure(ref c) => Closure(box c.clean(cx)),
+ TyProc(ref c) => Proc(box c.clean(cx)),
+ TyBareFn(ref barefn) => BareFunction(box barefn.clean(cx)),
+ TyParen(ref ty) => ty.clean(cx),
TyBot => Bottom,
ref x => fail!("Unimplemented type {:?}", x),
}
}
impl Clean<Type> for ty::t {
- fn clean(&self) -> Type {
+ fn clean(&self, cx: &DocContext) -> Type {
match ty::get(*self).sty {
ty::ty_bot => Bottom,
ty::ty_nil => Primitive(Unit),
ty::ty_float(ast::TyF64) => Primitive(F64),
ty::ty_str => Primitive(Str),
ty::ty_box(t) => {
- let gc_did = get_cx().tcx_opt().and_then(|tcx| {
+ let gc_did = cx.tcx_opt().and_then(|tcx| {
tcx.lang_items.gc()
});
- lang_struct(gc_did, t, "Gc", Managed)
+ lang_struct(cx, gc_did, t, "Gc", Managed)
}
ty::ty_uniq(t) => {
- let box_did = get_cx().tcx_opt().and_then(|tcx| {
+ let box_did = cx.tcx_opt().and_then(|tcx| {
tcx.lang_items.owned_box()
});
- lang_struct(box_did, t, "Box", Unique)
+ lang_struct(cx, box_did, t, "Box", Unique)
}
- ty::ty_vec(ty, None) => Vector(box ty.clean()),
- ty::ty_vec(ty, Some(i)) => FixedVector(box ty.clean(),
+ ty::ty_vec(ty, None) => Vector(box ty.clean(cx)),
+ ty::ty_vec(ty, Some(i)) => FixedVector(box ty.clean(cx),
format!("{}", i)),
- ty::ty_ptr(mt) => RawPointer(mt.mutbl.clean(), box mt.ty.clean()),
+ ty::ty_ptr(mt) => RawPointer(mt.mutbl.clean(cx), box mt.ty.clean(cx)),
ty::ty_rptr(r, mt) => BorrowedRef {
- lifetime: r.clean(),
- mutability: mt.mutbl.clean(),
- type_: box mt.ty.clean(),
+ lifetime: r.clean(cx),
+ mutability: mt.mutbl.clean(cx),
+ type_: box mt.ty.clean(cx),
},
ty::ty_bare_fn(ref fty) => BareFunction(box BareFunctionDecl {
fn_style: fty.fn_style,
generics: Generics {
lifetimes: Vec::new(), type_params: Vec::new()
},
- decl: (ast_util::local_def(0), &fty.sig).clean(),
+ decl: (ast_util::local_def(0), &fty.sig).clean(cx),
abi: fty.abi.to_string(),
}),
ty::ty_closure(ref fty) => {
let decl = box ClosureDecl {
lifetimes: Vec::new(), // FIXME: this looks wrong...
- decl: (ast_util::local_def(0), &fty.sig).clean(),
+ decl: (ast_util::local_def(0), &fty.sig).clean(cx),
onceness: fty.onceness,
fn_style: fty.fn_style,
- bounds: fty.bounds.clean(),
+ bounds: fty.bounds.clean(cx),
};
match fty.store {
ty::UniqTraitStore => Proc(decl),
ty::ty_struct(did, ref substs) |
ty::ty_enum(did, ref substs) |
ty::ty_trait(box ty::TyTrait { def_id: did, ref substs, .. }) => {
- let fqn = csearch::get_item_path(get_cx().tcx(), did);
+ let fqn = csearch::get_item_path(cx.tcx(), did);
let fqn: Vec<String> = fqn.move_iter().map(|i| {
i.to_string()
}).collect();
ty::ty_trait(..) => TypeTrait,
_ => TypeEnum,
};
- let path = external_path(fqn.last().unwrap().to_string().as_slice(),
+ let path = external_path(cx, fqn.last().unwrap().to_string().as_slice(),
substs);
- get_cx().external_paths.borrow_mut().as_mut().unwrap()
- .insert(did, (fqn, kind));
+ cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
ResolvedPath {
path: path,
typarams: None,
did: did,
}
}
- ty::ty_tup(ref t) => Tuple(t.iter().map(|t| t.clean()).collect()),
+ ty::ty_tup(ref t) => Tuple(t.clean(cx)),
ty::ty_param(ref p) => {
if p.space == subst::SelfSpace {
}
impl Clean<Item> for ast::StructField {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let (name, vis) = match self.node.kind {
ast::NamedField(id, vis) => (Some(id), vis),
ast::UnnamedField(vis) => (None, vis)
};
Item {
- name: name.clean(),
- attrs: self.node.attrs.clean(),
- source: self.span.clean(),
+ name: name.clean(cx),
+ attrs: self.node.attrs.clean(cx),
+ source: self.span.clean(cx),
visibility: Some(vis),
- stability: get_stability(ast_util::local_def(self.node.id)),
+ stability: get_stability(cx, ast_util::local_def(self.node.id)),
def_id: ast_util::local_def(self.node.id),
- inner: StructFieldItem(TypedStructField(self.node.ty.clean())),
+ inner: StructFieldItem(TypedStructField(self.node.ty.clean(cx))),
}
}
}
impl Clean<Item> for ty::field_ty {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
use syntax::parse::token::special_idents::unnamed_field;
use rustc::metadata::csearch;
- let cx = get_cx();
- let attrs;
-
let attr_map = csearch::get_struct_field_attrs(&cx.tcx().sess.cstore, self.id);
- let name = if self.name == unnamed_field.name {
- attrs = None;
- None
+ let (name, attrs) = if self.name == unnamed_field.name {
+ (None, None)
} else {
- attrs = Some(attr_map.find(&self.id.node).unwrap());
- Some(self.name)
+ (Some(self.name), Some(attr_map.find(&self.id.node).unwrap()))
};
let ty = ty::lookup_item_type(cx.tcx(), self.id);
Item {
- name: name.clean(),
- attrs: attrs.unwrap_or(&Vec::new()).clean(),
+ name: name.clean(cx),
+ attrs: attrs.unwrap_or(&Vec::new()).clean(cx),
source: Span::empty(),
visibility: Some(self.vis),
- stability: get_stability(self.id),
+ stability: get_stability(cx, self.id),
def_id: self.id,
- inner: StructFieldItem(TypedStructField(ty.ty.clean())),
+ inner: StructFieldItem(TypedStructField(ty.ty.clean(cx))),
}
}
}
pub type Visibility = ast::Visibility;
impl Clean<Option<Visibility>> for ast::Visibility {
- fn clean(&self) -> Option<Visibility> {
+ fn clean(&self, _: &DocContext) -> Option<Visibility> {
Some(*self)
}
}
}
impl Clean<Item> for doctree::Struct {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: StructItem(Struct {
struct_type: self.struct_type,
- generics: self.generics.clean(),
- fields: self.fields.clean(),
+ generics: self.generics.clean(cx),
+ fields: self.fields.clean(cx),
fields_stripped: false,
}),
}
}
impl Clean<VariantStruct> for syntax::ast::StructDef {
- fn clean(&self) -> VariantStruct {
+ fn clean(&self, cx: &DocContext) -> VariantStruct {
VariantStruct {
struct_type: doctree::struct_type_from_def(self),
- fields: self.fields.clean(),
+ fields: self.fields.clean(cx),
fields_stripped: false,
}
}
}
impl Clean<Item> for doctree::Enum {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: EnumItem(Enum {
- variants: self.variants.clean(),
- generics: self.generics.clean(),
+ variants: self.variants.clean(cx),
+ generics: self.generics.clean(cx),
variants_stripped: false,
}),
}
}
impl Clean<Item> for doctree::Variant {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: VariantItem(Variant {
- kind: self.kind.clean(),
+ kind: self.kind.clean(cx),
}),
}
}
}
impl Clean<Item> for ty::VariantInfo {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
// use syntax::parse::token::special_idents::unnamed_field;
- let cx = get_cx();
let kind = match self.arg_names.as_ref().map(|s| s.as_slice()) {
None | Some([]) if self.args.len() == 0 => CLikeVariant,
None | Some([]) => {
- TupleVariant(self.args.iter().map(|t| t.clean()).collect())
+ TupleVariant(self.args.clean(cx))
}
Some(s) => {
StructVariant(VariantStruct {
fields: s.iter().zip(self.args.iter()).map(|(name, ty)| {
Item {
source: Span::empty(),
- name: Some(name.clean()),
+ name: Some(name.clean(cx)),
attrs: Vec::new(),
visibility: Some(ast::Public),
// FIXME: this is not accurate, we need an id for
// more infrastructure work before we can get
// at the needed information here.
def_id: self.id,
- stability: get_stability(self.id),
+ stability: get_stability(cx, self.id),
inner: StructFieldItem(
- TypedStructField(ty.clean())
+ TypedStructField(ty.clean(cx))
)
}
}).collect()
}
};
Item {
- name: Some(self.name.clean()),
- attrs: inline::load_attrs(cx.tcx(), self.id),
+ name: Some(self.name.clean(cx)),
+ attrs: inline::load_attrs(cx, cx.tcx(), self.id),
source: Span::empty(),
visibility: Some(ast::Public),
def_id: self.id,
inner: VariantItem(Variant { kind: kind }),
- stability: get_stability(self.id),
+ stability: get_stability(cx, self.id),
}
}
}
}
impl Clean<VariantKind> for ast::VariantKind {
- fn clean(&self) -> VariantKind {
+ fn clean(&self, cx: &DocContext) -> VariantKind {
match self {
&ast::TupleVariantKind(ref args) => {
if args.len() == 0 {
CLikeVariant
} else {
- TupleVariant(args.iter().map(|x| x.ty.clean()).collect())
+ TupleVariant(args.iter().map(|x| x.ty.clean(cx)).collect())
}
},
- &ast::StructVariantKind(ref sd) => StructVariant(sd.clean()),
+ &ast::StructVariantKind(ref sd) => StructVariant(sd.clean(cx)),
}
}
}
}
impl Clean<Span> for syntax::codemap::Span {
- fn clean(&self) -> Span {
- let ctxt = super::ctxtkey.get().unwrap();
- let cm = ctxt.sess().codemap();
+ fn clean(&self, cx: &DocContext) -> Span {
+ let cm = cx.sess().codemap();
let filename = cm.span_to_filename(*self);
let lo = cm.lookup_char_pos(self.lo);
let hi = cm.lookup_char_pos(self.hi);
}
impl Clean<Path> for ast::Path {
- fn clean(&self) -> Path {
+ fn clean(&self, cx: &DocContext) -> Path {
Path {
global: self.global,
- segments: self.segments.clean(),
+ segments: self.segments.clean(cx),
}
}
}
}
impl Clean<PathSegment> for ast::PathSegment {
- fn clean(&self) -> PathSegment {
+ fn clean(&self, cx: &DocContext) -> PathSegment {
PathSegment {
- name: self.identifier.clean(),
- lifetimes: self.lifetimes.clean(),
- types: self.types.clean(),
+ name: self.identifier.clean(cx),
+ lifetimes: self.lifetimes.clean(cx),
+ types: self.types.clean(cx),
}
}
}
}
impl Clean<String> for ast::Ident {
- fn clean(&self) -> String {
+ fn clean(&self, _: &DocContext) -> String {
token::get_ident(*self).get().to_string()
}
}
impl Clean<String> for ast::Name {
- fn clean(&self) -> String {
+ fn clean(&self, _: &DocContext) -> String {
token::get_name(*self).get().to_string()
}
}
}
impl Clean<Item> for doctree::Typedef {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id.clone()),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: TypedefItem(Typedef {
- type_: self.ty.clean(),
- generics: self.gen.clean(),
+ type_: self.ty.clean(cx),
+ generics: self.gen.clean(cx),
}),
}
}
}
impl Clean<BareFunctionDecl> for ast::BareFnTy {
- fn clean(&self) -> BareFunctionDecl {
+ fn clean(&self, cx: &DocContext) -> BareFunctionDecl {
BareFunctionDecl {
fn_style: self.fn_style,
generics: Generics {
- lifetimes: self.lifetimes.clean(),
+ lifetimes: self.lifetimes.clean(cx),
type_params: Vec::new(),
},
- decl: self.decl.clean(),
+ decl: self.decl.clean(cx),
abi: self.abi.to_string(),
}
}
}
impl Clean<Item> for doctree::Static {
- fn clean(&self) -> Item {
- debug!("claning static {}: {:?}", self.name.clean(), self);
+ fn clean(&self, cx: &DocContext) -> Item {
+ debug!("claning static {}: {:?}", self.name.clean(cx), self);
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: StaticItem(Static {
- type_: self.type_.clean(),
- mutability: self.mutability.clean(),
- expr: self.expr.span.to_src(),
+ type_: self.type_.clean(cx),
+ mutability: self.mutability.clean(cx),
+ expr: self.expr.span.to_src(cx),
}),
}
}
}
impl Clean<Mutability> for ast::Mutability {
- fn clean(&self) -> Mutability {
+ fn clean(&self, _: &DocContext) -> Mutability {
match self {
&ast::MutMutable => Mutable,
&ast::MutImmutable => Immutable,
}
impl Clean<Item> for doctree::Impl {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
name: None,
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: ImplItem(Impl {
- generics: self.generics.clean(),
- trait_: self.trait_.clean(),
- for_: self.for_.clean(),
- items: self.items.clean().move_iter().map(|ti| {
+ generics: self.generics.clean(cx),
+ trait_: self.trait_.clean(cx),
+ for_: self.for_.clean(cx),
+ items: self.items.clean(cx).move_iter().map(|ti| {
match ti {
MethodImplItem(i) => i,
}
}
impl Clean<Vec<Item>> for ast::ViewItem {
- fn clean(&self) -> Vec<Item> {
+ fn clean(&self, cx: &DocContext) -> Vec<Item> {
// We consider inlining the documentation of `pub use` statements, but we
// forcefully don't inline if this is not public or if the
// #[doc(no_inline)] attribute is present.
let convert = |node: &ast::ViewItem_| {
Item {
name: None,
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(0),
- visibility: self.vis.clean(),
+ visibility: self.vis.clean(cx),
stability: None,
- inner: ViewItemItem(ViewItem { inner: node.clean() }),
+ inner: ViewItemItem(ViewItem { inner: node.clean(cx) }),
}
};
let mut ret = Vec::new();
// to keep any non-inlineable reexports so they can be
// listed in the documentation.
let remaining = list.iter().filter(|path| {
- match inline::try_inline(path.node.id(), None) {
+ match inline::try_inline(cx, path.node.id(), None) {
Some(items) => {
ret.extend(items.move_iter()); false
}
}
}
ast::ViewPathSimple(ident, _, id) => {
- match inline::try_inline(id, Some(ident)) {
+ match inline::try_inline(cx, id, Some(ident)) {
Some(items) => ret.extend(items.move_iter()),
None => ret.push(convert(&self.node)),
}
}
impl Clean<ViewItemInner> for ast::ViewItem_ {
- fn clean(&self) -> ViewItemInner {
+ fn clean(&self, cx: &DocContext) -> ViewItemInner {
match self {
&ast::ViewItemExternCrate(ref i, ref p, ref id) => {
let string = match *p {
None => None,
Some((ref x, _)) => Some(x.get().to_string()),
};
- ExternCrate(i.clean(), string, *id)
+ ExternCrate(i.clean(cx), string, *id)
}
&ast::ViewItemUse(ref vp) => {
- Import(vp.clean())
+ Import(vp.clean(cx))
}
}
}
}
impl Clean<ViewPath> for ast::ViewPath {
- fn clean(&self) -> ViewPath {
+ fn clean(&self, cx: &DocContext) -> ViewPath {
match self.node {
ast::ViewPathSimple(ref i, ref p, id) =>
- SimpleImport(i.clean(), resolve_use_source(p.clean(), id)),
+ SimpleImport(i.clean(cx), resolve_use_source(cx, p.clean(cx), id)),
ast::ViewPathGlob(ref p, id) =>
- GlobImport(resolve_use_source(p.clean(), id)),
+ GlobImport(resolve_use_source(cx, p.clean(cx), id)),
ast::ViewPathList(ref p, ref pl, id) => {
- ImportList(resolve_use_source(p.clean(), id),
- pl.clean())
+ ImportList(resolve_use_source(cx, p.clean(cx), id),
+ pl.clean(cx))
}
}
}
}
impl Clean<ViewListIdent> for ast::PathListItem {
- fn clean(&self) -> ViewListIdent {
+ fn clean(&self, cx: &DocContext) -> ViewListIdent {
match self.node {
ast::PathListIdent { id, name } => ViewListIdent {
- name: name.clean(),
- source: resolve_def(id)
+ name: name.clean(cx),
+ source: resolve_def(cx, id)
},
ast::PathListMod { id } => ViewListIdent {
name: "mod".to_string(),
- source: resolve_def(id)
+ source: resolve_def(cx, id)
}
}
}
}
impl Clean<Vec<Item>> for ast::ForeignMod {
- fn clean(&self) -> Vec<Item> {
- self.items.clean()
+ fn clean(&self, cx: &DocContext) -> Vec<Item> {
+ self.items.clean(cx)
}
}
impl Clean<Item> for ast::ForeignItem {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let inner = match self.node {
ast::ForeignItemFn(ref decl, ref generics) => {
ForeignFunctionItem(Function {
- decl: decl.clean(),
- generics: generics.clean(),
+ decl: decl.clean(cx),
+ generics: generics.clean(cx),
fn_style: ast::UnsafeFn,
})
}
ast::ForeignItemStatic(ref ty, mutbl) => {
ForeignStaticItem(Static {
- type_: ty.clean(),
+ type_: ty.clean(cx),
mutability: if mutbl {Mutable} else {Immutable},
expr: "".to_string(),
})
}
};
Item {
- name: Some(self.ident.clean()),
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ name: Some(self.ident.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: get_stability(ast_util::local_def(self.id)),
+ visibility: self.vis.clean(cx),
+ stability: get_stability(cx, ast_util::local_def(self.id)),
inner: inner,
}
}
// Utilities
trait ToSource {
- fn to_src(&self) -> String;
+ fn to_src(&self, cx: &DocContext) -> String;
}
impl ToSource for syntax::codemap::Span {
- fn to_src(&self) -> String {
- debug!("converting span {:?} to snippet", self.clean());
- let ctxt = super::ctxtkey.get().unwrap();
- let cm = ctxt.sess().codemap().clone();
- let sn = match cm.span_to_snippet(*self) {
+ fn to_src(&self, cx: &DocContext) -> String {
+ debug!("converting span {:?} to snippet", self.clean(cx));
+ let sn = match cx.sess().codemap().span_to_snippet(*self) {
Some(x) => x.to_string(),
None => "".to_string()
};
}
/// Given a Type, resolve it using the def_map
-fn resolve_type(path: Path, tpbs: Option<Vec<TyParamBound>>,
+fn resolve_type(cx: &DocContext, path: Path,
+ tpbs: Option<Vec<TyParamBound>>,
id: ast::NodeId) -> Type {
- let cx = get_cx();
- let tycx = match cx.maybe_typed {
- core::Typed(ref tycx) => tycx,
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
// If we're extracting tests, this return value doesn't matter.
- core::NotTyped(_) => return Primitive(Bool),
+ None => return Primitive(Bool),
};
debug!("searching for {:?} in defmap", id);
- let def = match tycx.def_map.borrow().find(&id) {
+ let def = match tcx.def_map.borrow().find(&id) {
Some(&k) => k,
None => fail!("unresolved id not in defmap")
};
ResolvedPath { path: path, typarams: tpbs, did: did }
}
-fn register_def(cx: &core::DocContext, def: def::Def) -> ast::DefId {
+fn register_def(cx: &DocContext, def: def::Def) -> ast::DefId {
let (did, kind) = match def {
def::DefFn(i, _) => (i, TypeFunction),
def::DefTy(i) => (i, TypeEnum),
_ => return def.def_id()
};
if ast_util::is_local(did) { return did }
- let tcx = match cx.maybe_typed {
- core::Typed(ref t) => t,
- core::NotTyped(_) => return did
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return did
};
inline::record_extern_fqn(cx, did, kind);
match kind {
TypeTrait => {
- let t = inline::build_external_trait(tcx, did);
+ let t = inline::build_external_trait(cx, tcx, did);
cx.external_traits.borrow_mut().as_mut().unwrap().insert(did, t);
}
_ => {}
return did;
}
-fn resolve_use_source(path: Path, id: ast::NodeId) -> ImportSource {
+fn resolve_use_source(cx: &DocContext, path: Path, id: ast::NodeId) -> ImportSource {
ImportSource {
path: path,
- did: resolve_def(id),
+ did: resolve_def(cx, id),
}
}
-fn resolve_def(id: ast::NodeId) -> Option<ast::DefId> {
- get_cx().tcx_opt().and_then(|tcx| {
- tcx.def_map.borrow().find(&id).map(|&def| register_def(&*get_cx(), def))
+fn resolve_def(cx: &DocContext, id: ast::NodeId) -> Option<ast::DefId> {
+ cx.tcx_opt().and_then(|tcx| {
+ tcx.def_map.borrow().find(&id).map(|&def| register_def(cx, def))
})
}
}
impl Clean<Item> for doctree::Macro {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(format!("{}!", self.name.clean())),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
- visibility: ast::Public.clean(),
- stability: self.stab.clean(),
+ name: Some(format!("{}!", self.name.clean(cx))),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
+ visibility: ast::Public.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: MacroItem(Macro {
- source: self.whence.to_src(),
+ source: self.whence.to_src(cx),
}),
}
}
}
impl Clean<Stability> for attr::Stability {
- fn clean(&self) -> Stability {
+ fn clean(&self, _: &DocContext) -> Stability {
Stability {
level: self.level,
text: self.text.as_ref().map_or("".to_string(),
}
}
-fn lang_struct(did: Option<ast::DefId>, t: ty::t, name: &str,
+fn lang_struct(cx: &DocContext, did: Option<ast::DefId>,
+ t: ty::t, name: &str,
fallback: fn(Box<Type>) -> Type) -> Type {
let did = match did {
Some(did) => did,
- None => return fallback(box t.clean()),
+ None => return fallback(box t.clean(cx)),
};
- let fqn = csearch::get_item_path(get_cx().tcx(), did);
+ let fqn = csearch::get_item_path(cx.tcx(), did);
let fqn: Vec<String> = fqn.move_iter().map(|i| {
i.to_string()
}).collect();
- get_cx().external_paths.borrow_mut().as_mut().unwrap()
- .insert(did, (fqn, TypeStruct));
+ cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, TypeStruct));
ResolvedPath {
typarams: None,
did: did,
segments: vec![PathSegment {
name: name.to_string(),
lifetimes: vec![],
- types: vec![t.clean()],
+ types: vec![t.clean(cx)],
}],
},
}
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
use std::gc::GC;
use std::os;
use std::collections::{HashMap, HashSet};
+use arena::TypedArena;
use visit_ast::RustdocVisitor;
use clean;
use clean::Clean;
/// Are we generating documentation (`Typed`) or tests (`NotTyped`)?
-pub enum MaybeTyped {
- Typed(middle::ty::ctxt),
+pub enum MaybeTyped<'tcx> {
+ Typed(middle::ty::ctxt<'tcx>),
NotTyped(driver::session::Session)
}
pub type ExternalPaths = RefCell<Option<HashMap<ast::DefId,
(Vec<String>, clean::TypeKind)>>>;
-pub struct DocContext {
+pub struct DocContext<'tcx> {
pub krate: ast::Crate,
- pub maybe_typed: MaybeTyped,
+ pub maybe_typed: MaybeTyped<'tcx>,
pub src: Path,
pub external_paths: ExternalPaths,
pub external_traits: RefCell<Option<HashMap<ast::DefId, clean::Trait>>>,
pub populated_crate_impls: RefCell<HashSet<ast::CrateNum>>,
}
-impl DocContext {
+impl<'tcx> DocContext<'tcx> {
pub fn sess<'a>(&'a self) -> &'a driver::session::Session {
match self.maybe_typed {
Typed(ref tcx) => &tcx.sess,
}
}
- pub fn tcx_opt<'a>(&'a self) -> Option<&'a ty::ctxt> {
+ pub fn tcx_opt<'a>(&'a self) -> Option<&'a ty::ctxt<'tcx>> {
match self.maybe_typed {
Typed(ref tcx) => Some(tcx),
NotTyped(_) => None
}
}
- pub fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+ pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
let tcx_opt = self.tcx_opt();
tcx_opt.expect("tcx not present")
}
pub type Externs = HashMap<String, Vec<String>>;
/// Parses, resolves, and typechecks the given crate
-fn get_ast_and_resolve(cpath: &Path, libs: HashSet<Path>, cfgs: Vec<String>,
- externs: Externs, triple: Option<String>)
- -> (DocContext, CrateAnalysis) {
+fn get_ast_and_resolve<'tcx>(cpath: &Path, libs: Vec<Path>, cfgs: Vec<String>,
+ externs: Externs, triple: Option<String>,
+ type_arena: &'tcx TypedArena<ty::t_box_>)
+ -> (DocContext<'tcx>, CrateAnalysis) {
use syntax::codemap::dummy_spanned;
use rustc::driver::driver::{FileInput,
phase_1_parse_input,
let driver::driver::CrateAnalysis {
exported_items, public_items, ty_cx, ..
- } = phase_3_run_analysis_passes(sess, &krate, ast_map, name);
+ } = phase_3_run_analysis_passes(sess, &krate, ast_map, type_arena, name);
debug!("crate: {:?}", krate);
(DocContext {
})
}
-pub fn run_core(libs: HashSet<Path>, cfgs: Vec<String>, externs: Externs,
+pub fn run_core(libs: Vec<Path>, cfgs: Vec<String>, externs: Externs,
path: &Path, triple: Option<String>)
-> (clean::Crate, CrateAnalysis) {
- let (ctxt, analysis) = get_ast_and_resolve(path, libs, cfgs, externs, triple);
- let ctxt = box(GC) ctxt;
- super::ctxtkey.replace(Some(ctxt));
+ let type_arena = TypedArena::new();
+ let (ctxt, analysis) = get_ast_and_resolve(path, libs, cfgs, externs,
+ triple, &type_arena);
let krate = {
- let mut v = RustdocVisitor::new(&*ctxt, Some(&analysis));
+ let mut v = RustdocVisitor::new(&ctxt, Some(&analysis));
v.visit(&ctxt.krate);
- v.clean()
+ v.clean(&ctxt)
};
let external_paths = ctxt.external_paths.borrow_mut().take();
}).unwrap_or(HashMap::new());
let mut cache = Cache {
impls: HashMap::new(),
- external_paths: paths.iter().map(|(&k, &(ref v, _))| (k, v.clone()))
+ external_paths: paths.iter().map(|(&k, v)| (k, v.ref0().clone()))
.collect(),
paths: paths,
implementors: HashMap::new(),
color: #333;
}
+.location a:first-child { font-weight: bold; }
+
.block {
padding: 0 10px;
margin-bottom: 14px;
* A function to compute the Levenshtein distance between two strings
* Licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported
* Full License can be found at http://creativecommons.org/licenses/by-sa/3.0/legalcode
- * This code is an unmodified version of the code written by Marco de Wit
+ * This code is an unmodified version of the code written by Marco de Wit
* and was found at http://stackoverflow.com/a/18514751/745719
*/
var levenshtein = (function() {
});
}
} else if (
- (lev_distance = levenshtein(searchWords[j], val)) <=
+ (lev_distance = levenshtein(searchWords[j], val)) <=
MAX_LEV_DISTANCE) {
if (typeFilter < 0 || typeFilter === searchIndex[j].ty) {
results.push({
function validateResult(name, path, keys, parent) {
for (var i=0; i < keys.length; ++i) {
// each check is for validation so we negate the conditions and invalidate
- if (!(
+ if (!(
// check for an exact name match
name.toLowerCase().indexOf(keys[i]) > -1 ||
// then an exact path match
path.toLowerCase().indexOf(keys[i]) > -1 ||
// next if there is a parent, check for exact parent match
- (parent !== undefined &&
+ (parent !== undefined &&
parent.name.toLowerCase().indexOf(keys[i]) > -1) ||
// lastly check to see if the name was a levenshtein match
- levenshtein(name.toLowerCase(), keys[i]) <=
+ levenshtein(name.toLowerCase(), keys[i]) <=
MAX_LEV_DISTANCE)) {
return false;
}
});
$(function() {
- var toggle = "<a href='javascript:void(0)'"
- + "class='collapse-toggle'>[<span class='inner'>-</span>]</a>";
+ var toggle = $("<a/>", {'href': 'javascript:void(0)', 'class': 'collapse-toggle'})
+ .html("[<span class='inner'>-</span>]");
$(".method").each(function() {
if ($(this).next().is(".docblock")) {
- $(this).children().first().after(toggle);
+ $(this).children().first().after(toggle[0]);
}
});
- var mainToggle = $(toggle);
- mainToggle.append("<span class='toggle-label' style='display:none'>"
- + " Expand description</span></a>")
- var wrapper = $("<div class='toggle-wrapper'>");
- wrapper.append(mainToggle);
+ var mainToggle =
+ $(toggle).append(
+ $('<span/>', {'class': 'toggle-label'})
+ .css('display', 'none')
+ .html(' Expand description'));
+ var wrapper = $("<div class='toggle-wrapper'>").append(mainToggle);
$("#main > .docblock").before(wrapper);
});
#![feature(globs, struct_variant, managed_boxes, macro_rules, phase)]
+extern crate arena;
extern crate debug;
extern crate getopts;
extern crate libc;
use std::io;
use std::io::{File, MemWriter};
-use std::gc::Gc;
use std::collections::HashMap;
use serialize::{json, Decodable, Encodable};
use externalfiles::ExternalHtml;
"unindent-comments",
];
-local_data_key!(pub ctxtkey: Gc<core::DocContext>)
local_data_key!(pub analysiskey: core::CrateAnalysis)
type Output = (clean::Crate, Vec<plugins::PluginJson> );
info!("starting to run rustc");
let (mut krate, analysis) = std::task::try(proc() {
let cr = cr;
- core::run_core(libs.move_iter().collect(),
- cfgs,
- externs,
- &cr,
- triple)
+ core::run_core(libs, cfgs, externs, &cr, triple)
}).map_err(|boxed_any|format!("{:?}", boxed_any)).unwrap();
info!("finished with rustc");
analysiskey.replace(Some(analysis));
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::collections::HashSet;
use std::io;
use std::string::String;
}
/// Run any tests/code examples in the markdown file `input`.
-pub fn test(input: &str, libs: HashSet<Path>, externs: core::Externs,
+pub fn test(input: &str, libs: Vec<Path>, externs: core::Externs,
mut test_args: Vec<String>) -> int {
let input_str = load_or_return!(input, 1, 2);
use std::collections::{HashSet, HashMap};
use testing;
-use rustc::back::link;
+use rustc::back::write;
use rustc::driver::config;
use rustc::driver::driver;
use rustc::driver::session;
pub fn run(input: &str,
cfgs: Vec<String>,
- libs: HashSet<Path>,
+ libs: Vec<Path>,
externs: core::Externs,
mut test_args: Vec<String>,
crate_name: Option<String>)
"rustdoc-test", None)
.expect("phase_2_configure_and_expand aborted in rustdoc!");
- let ctx = box(GC) core::DocContext {
+ let ctx = core::DocContext {
krate: krate,
maybe_typed: core::NotTyped(sess),
src: input_path,
inlined: RefCell::new(None),
populated_crate_impls: RefCell::new(HashSet::new()),
};
- super::ctxtkey.replace(Some(ctx));
- let mut v = RustdocVisitor::new(&*ctx, None);
+ let mut v = RustdocVisitor::new(&ctx, None);
v.visit(&ctx.krate);
- let mut krate = v.clean();
+ let mut krate = v.clean(&ctx);
match crate_name {
Some(name) => krate.name = name,
None => {}
0
}
-fn runtest(test: &str, cratename: &str, libs: HashSet<Path>, externs: core::Externs,
+fn runtest(test: &str, cratename: &str, libs: Vec<Path>, externs: core::Externs,
should_fail: bool, no_run: bool, as_test_harness: bool) {
// the test harness wants its own `main` & top level functions, so
// never wrap the test in `fn main() { ... }`
maybe_sysroot: Some(os::self_exe_path().unwrap().dir_path()),
addl_lib_search_paths: RefCell::new(libs),
crate_types: vec!(config::CrateTypeExecutable),
- output_types: vec!(link::OutputTypeExe),
+ output_types: vec!(write::OutputTypeExe),
no_trans: no_run,
externs: externs,
cg: config::CodegenOptions {
None,
span_diagnostic_handler);
- let outdir = TempDir::new("rustdoctest").expect("rustdoc needs a tempdir");
+ let outdir = TempDir::new("rustdoctest").ok().expect("rustdoc needs a tempdir");
let out = Some(outdir.path().clone());
let cfg = config::build_configuration(&sess);
let libdir = sess.target_filesearch().get_lib_path();
pub struct Collector {
pub tests: Vec<testing::TestDescAndFn>,
names: Vec<String>,
- libs: HashSet<Path>,
+ libs: Vec<Path>,
externs: core::Externs,
cnt: uint,
use_headers: bool,
}
impl Collector {
- pub fn new(cratename: String, libs: HashSet<Path>, externs: core::Externs,
+ pub fn new(cratename: String, libs: Vec<Path>, externs: core::Externs,
use_headers: bool) -> Collector {
Collector {
tests: Vec::new(),
// also, is there some reason that this doesn't use the 'visit'
// framework from syntax?
-pub struct RustdocVisitor<'a> {
+pub struct RustdocVisitor<'a, 'tcx: 'a> {
pub module: Module,
pub attrs: Vec<ast::Attribute>,
- pub cx: &'a core::DocContext,
+ pub cx: &'a core::DocContext<'tcx>,
pub analysis: Option<&'a core::CrateAnalysis>,
}
-impl<'a> RustdocVisitor<'a> {
- pub fn new<'b>(cx: &'b core::DocContext,
- analysis: Option<&'b core::CrateAnalysis>) -> RustdocVisitor<'b> {
+impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
+ pub fn new(cx: &'a core::DocContext<'tcx>,
+ analysis: Option<&'a core::CrateAnalysis>) -> RustdocVisitor<'a, 'tcx> {
RustdocVisitor {
module: Module::new(None),
attrs: Vec::new(),
}
fn stability(&self, id: ast::NodeId) -> Option<attr::Stability> {
- let tcx = match self.cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return None
- };
- stability::lookup(tcx, ast_util::local_def(id))
+ self.cx.tcx_opt().and_then(|tcx| stability::lookup(tcx, ast_util::local_def(id)))
}
pub fn visit(&mut self, krate: &ast::Crate) {
fn resolve_id(&mut self, id: ast::NodeId, renamed: Option<ast::Ident>,
glob: bool, om: &mut Module, please_inline: bool) -> bool {
- let tcx = match self.cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return false
+ let tcx = match self.cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return false
};
let def = (*tcx.def_map.borrow())[id].def_id();
if !ast_util::is_local(def) { return false }
let mut obj = try!(expect!(self.pop(), Object));
let value = match obj.pop(&name.to_string()) {
- None => return Err(MissingFieldError(name.to_string())),
+ None => {
+ // Add a Null and try to parse it as an Option<_>
+ // to get None as a default value.
+ self.stack.push(Null);
+ match f(self) {
+ Ok(x) => x,
+ Err(_) => return Err(MissingFieldError(name.to_string())),
+ }
+ },
Some(json) => {
self.stack.push(json);
try!(f(self))
}
fn read_option<T>(&mut self, f: |&mut Decoder, bool| -> DecodeResult<T>) -> DecodeResult<T> {
+ debug!("read_option()");
match self.pop() {
Null => f(self, false),
value => { self.stack.push(value); f(self, true) }
use std::{i64, u64, f32, f64, io};
use std::collections::TreeMap;
+ #[deriving(Decodable, Eq, PartialEq, Show)]
+ struct OptionData {
+ opt: Option<uint>,
+ }
+
+ #[test]
+ fn test_decode_option_none() {
+ let s ="{}";
+ let obj: OptionData = super::decode(s).unwrap();
+ assert_eq!(obj, OptionData { opt: None });
+ }
+
+ #[test]
+ fn test_decode_option_some() {
+ let s = "{ \"opt\": 10 }";
+ let obj: OptionData = super::decode(s).unwrap();
+ assert_eq!(obj, OptionData { opt: Some(10u) });
+ }
+
+ #[test]
+ fn test_decode_option_malformed() {
+ check_err::<OptionData>("{ \"opt\": [] }",
+ ExpectedError("Number".to_string(), "[]".to_string()));
+ check_err::<OptionData>("{ \"opt\": false }",
+ ExpectedError("Number".to_string(), "false".to_string()));
+ }
+
#[deriving(PartialEq, Encodable, Decodable, Show)]
enum Animal {
Dog,
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-// ignore-lexer-test FIXME #15883
-
-//! Unordered containers, implemented as hash-tables (`HashSet` and `HashMap` types)
-
-use clone::Clone;
-use cmp::{max, Eq, Equiv, PartialEq};
-use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
-use default::Default;
-use fmt::Show;
-use fmt;
-use hash::{Hash, Hasher, RandomSipHasher};
-use iter::{Iterator, FilterMap, Chain, Repeat, Zip, Extendable};
-use iter::{range, range_inclusive, FromIterator};
-use iter;
-use mem::replace;
-use num;
-use option::{Some, None, Option};
-use result::{Ok, Err};
-use ops::Index;
-
-mod table {
- use clone::Clone;
- use cmp;
- use hash::{Hash, Hasher};
- use iter::range_step_inclusive;
- use iter::{Iterator, range};
- use kinds::marker;
- use mem::{min_align_of, size_of};
- use mem::{overwrite, transmute};
- use num::{CheckedMul, is_power_of_two};
- use ops::Drop;
- use option::{Some, None, Option};
- use ptr::RawPtr;
- use ptr::set_memory;
- use ptr;
- use rt::heap::{allocate, deallocate};
-
- static EMPTY_BUCKET: u64 = 0u64;
-
- /// The raw hashtable, providing safe-ish access to the unzipped and highly
- /// optimized arrays of hashes, keys, and values.
- ///
- /// This design uses less memory and is a lot faster than the naive
- /// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
- /// option on every element, and we get a generally more cache-aware design.
- ///
- /// Key invariants of this structure:
- ///
- /// - if hashes[i] == EMPTY_BUCKET, then keys[i] and vals[i] have
- /// 'undefined' contents. Don't read from them. This invariant is
- /// enforced outside this module with the `EmptyIndex`, `FullIndex`,
- /// and `SafeHash` types.
- ///
- /// - An `EmptyIndex` is only constructed for a bucket at an index with
- /// a hash of EMPTY_BUCKET.
- ///
- /// - A `FullIndex` is only constructed for a bucket at an index with a
- /// non-EMPTY_BUCKET hash.
- ///
- /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
- /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
- /// which will likely map to the same bucket, while not being confused
- /// with "empty".
- ///
- /// - All three "arrays represented by pointers" are the same length:
- /// `capacity`. This is set at creation and never changes. The arrays
- /// are unzipped to save space (we don't have to pay for the padding
- /// between odd sized elements, such as in a map from u64 to u8), and
- /// be more cache aware (scanning through 8 hashes brings in 2 cache
- /// lines, since they're all right beside each other).
- ///
- /// You can kind of think of this module/data structure as a safe wrapper
- /// around just the "table" part of the hashtable. It enforces some
- /// invariants at the type level and employs some performance trickery,
- /// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
- ///
- /// FIXME(cgaebel):
- ///
- /// Feb 11, 2014: This hashtable was just implemented, and, hard as I tried,
- /// isn't yet totally safe. There's a "known exploit" that you can create
- /// multiple FullIndexes for a bucket, `take` one, and then still `take`
- /// the other causing undefined behavior. Currently, there's no story
- /// for how to protect against this statically. Therefore, there are asserts
- /// on `take`, `get`, `get_mut`, and `put` which check the bucket state.
- /// With time, and when we're confident this works correctly, they should
- /// be removed. Also, the bounds check in `peek` is especially painful,
- /// as that's called in the innermost loops of the hashtable and has the
- /// potential to be a major performance drain. Remove this too.
- ///
- /// Or, better than remove, only enable these checks for debug builds.
- /// There's currently no "debug-only" asserts in rust, so if you're reading
- /// this and going "what? of course there are debug-only asserts!", then
- /// please make this use them!
- #[unsafe_no_drop_flag]
- pub struct RawTable<K, V> {
- capacity: uint,
- size: uint,
- hashes: *mut u64,
- keys: *mut K,
- vals: *mut V,
- }
-
- /// Represents an index into a `RawTable` with no key or value in it.
- pub struct EmptyIndex {
- idx: int,
- nocopy: marker::NoCopy,
- }
-
- /// Represents an index into a `RawTable` with a key, value, and hash
- /// in it.
- pub struct FullIndex {
- idx: int,
- hash: SafeHash,
- nocopy: marker::NoCopy,
- }
-
- impl FullIndex {
- /// Since we get the hash for free whenever we check the bucket state,
- /// this function is provided for fast access, letting us avoid
- /// redundant trips back to the hashtable.
- #[inline(always)]
- pub fn hash(&self) -> SafeHash { self.hash }
-
- /// Same comment as with `hash`.
- #[inline(always)]
- pub fn raw_index(&self) -> uint { self.idx as uint }
- }
-
- /// Represents the state of a bucket: it can either have a key/value
- /// pair (be full) or not (be empty). You cannot `take` empty buckets,
- /// and you cannot `put` into full buckets.
- pub enum BucketState {
- Empty(EmptyIndex),
- Full(FullIndex),
- }
-
- /// A hash that is not zero, since we use a hash of zero to represent empty
- /// buckets.
- #[deriving(PartialEq)]
- pub struct SafeHash {
- hash: u64,
- }
-
- impl SafeHash {
- /// Peek at the hash value, which is guaranteed to be non-zero.
- #[inline(always)]
- pub fn inspect(&self) -> u64 { self.hash }
- }
-
- /// We need to remove hashes of 0. That's reserved for empty buckets.
- /// This function wraps up `hash_keyed` to be the only way outside this
- /// module to generate a SafeHash.
- pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
- match hasher.hash(t) {
- // This constant is exceedingly likely to hash to the same
- // bucket, but it won't be counted as empty!
- EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
- h => SafeHash { hash: h },
- }
- }
-
- fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
- assert!(is_power_of_two(target_alignment));
- (unrounded + target_alignment - 1) & !(target_alignment - 1)
- }
-
- #[test]
- fn test_rounding() {
- assert_eq!(round_up_to_next(0, 4), 0);
- assert_eq!(round_up_to_next(1, 4), 4);
- assert_eq!(round_up_to_next(2, 4), 4);
- assert_eq!(round_up_to_next(3, 4), 4);
- assert_eq!(round_up_to_next(4, 4), 4);
- assert_eq!(round_up_to_next(5, 4), 8);
- }
-
- // Returns a tuple of (minimum required malloc alignment, hash_offset,
- // key_offset, val_offset, array_size), from the start of a mallocated array.
- fn calculate_offsets(
- hash_size: uint, hash_align: uint,
- keys_size: uint, keys_align: uint,
- vals_size: uint, vals_align: uint) -> (uint, uint, uint, uint, uint) {
-
- let hash_offset = 0;
- let end_of_hashes = hash_offset + hash_size;
-
- let keys_offset = round_up_to_next(end_of_hashes, keys_align);
- let end_of_keys = keys_offset + keys_size;
-
- let vals_offset = round_up_to_next(end_of_keys, vals_align);
- let end_of_vals = vals_offset + vals_size;
-
- let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
-
- (min_align, hash_offset, keys_offset, vals_offset, end_of_vals)
- }
-
- #[test]
- fn test_offset_calculation() {
- assert_eq!(calculate_offsets(128, 8, 15, 1, 4, 4 ), (8, 0, 128, 144, 148));
- assert_eq!(calculate_offsets(3, 1, 2, 1, 1, 1 ), (1, 0, 3, 5, 6));
- assert_eq!(calculate_offsets(6, 2, 12, 4, 24, 8), (8, 0, 8, 24, 48));
- }
-
- impl<K, V> RawTable<K, V> {
-
- /// Does not initialize the buckets. The caller should ensure they,
- /// at the very least, set every hash to EMPTY_BUCKET.
- unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
- let hashes_size = capacity.checked_mul(&size_of::<u64>())
- .expect("capacity overflow");
- let keys_size = capacity.checked_mul(&size_of::< K >())
- .expect("capacity overflow");
- let vals_size = capacity.checked_mul(&size_of::< V >())
- .expect("capacity overflow");
-
- // Allocating hashmaps is a little tricky. We need to allocate three
- // arrays, but since we know their sizes and alignments up front,
- // we just allocate a single array, and then have the subarrays
- // point into it.
- //
- // This is great in theory, but in practice getting the alignment
- // right is a little subtle. Therefore, calculating offsets has been
- // factored out into a different function.
- let (malloc_alignment, hash_offset, keys_offset, vals_offset, size) =
- calculate_offsets(
- hashes_size, min_align_of::<u64>(),
- keys_size, min_align_of::< K >(),
- vals_size, min_align_of::< V >());
-
- let buffer = allocate(size, malloc_alignment);
-
- let hashes = buffer.offset(hash_offset as int) as *mut u64;
- let keys = buffer.offset(keys_offset as int) as *mut K;
- let vals = buffer.offset(vals_offset as int) as *mut V;
-
- RawTable {
- capacity: capacity,
- size: 0,
- hashes: hashes,
- keys: keys,
- vals: vals,
- }
- }
-
- /// Creates a new raw table from a given capacity. All buckets are
- /// initially empty.
- #[allow(experimental)]
- pub fn new(capacity: uint) -> RawTable<K, V> {
- unsafe {
- let ret = RawTable::new_uninitialized(capacity);
- set_memory(ret.hashes, 0u8, capacity);
- ret
- }
- }
-
- /// Reads a bucket at a given index, returning an enum indicating whether
- /// there's anything there or not. You need to match on this enum to get
- /// the appropriate types to pass on to most of the other functions in
- /// this module.
- pub fn peek(&self, index: uint) -> BucketState {
- debug_assert!(index < self.capacity);
-
- let idx = index as int;
- let hash = unsafe { *self.hashes.offset(idx) };
-
- let nocopy = marker::NoCopy;
-
- match hash {
- EMPTY_BUCKET =>
- Empty(EmptyIndex {
- idx: idx,
- nocopy: nocopy
- }),
- full_hash =>
- Full(FullIndex {
- idx: idx,
- hash: SafeHash { hash: full_hash },
- nocopy: nocopy,
- })
- }
- }
-
- /// Gets references to the key and value at a given index.
- pub fn read<'a>(&'a self, index: &FullIndex) -> (&'a K, &'a V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (&*self.keys.offset(idx), &*self.vals.offset(idx))
- }
- }
-
- /// Gets references to the key and value at a given index, with the
- /// value's reference being mutable.
- pub fn read_mut<'a>(&'a mut self, index: &FullIndex) -> (&'a K, &'a mut V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (&*self.keys.offset(idx), &mut *self.vals.offset(idx))
- }
- }
-
- /// Read everything, mutably.
- pub fn read_all_mut<'a>(&'a mut self, index: &FullIndex)
- -> (&'a mut SafeHash, &'a mut K, &'a mut V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (transmute(self.hashes.offset(idx)),
- &mut *self.keys.offset(idx), &mut *self.vals.offset(idx))
- }
- }
-
- /// Puts a key and value pair, along with the key's hash, into a given
- /// index in the hashtable. Note how the `EmptyIndex` is 'moved' into this
- /// function, because that slot will no longer be empty when we return!
- /// A FullIndex is returned for later use, pointing to the newly-filled
- /// slot in the hashtable.
- ///
- /// Use `make_hash` to construct a `SafeHash` to pass to this function.
- pub fn put(&mut self, index: EmptyIndex, hash: SafeHash, k: K, v: V) -> FullIndex {
- let idx = index.idx;
-
- unsafe {
- debug_assert_eq!(*self.hashes.offset(idx), EMPTY_BUCKET);
- *self.hashes.offset(idx) = hash.inspect();
- overwrite(&mut *self.keys.offset(idx), k);
- overwrite(&mut *self.vals.offset(idx), v);
- }
-
- self.size += 1;
-
- FullIndex { idx: idx, hash: hash, nocopy: marker::NoCopy }
- }
-
- /// Removes a key and value from the hashtable.
- ///
- /// This works similarly to `put`, building an `EmptyIndex` out of the
- /// taken FullIndex.
- pub fn take(&mut self, index: FullIndex) -> (EmptyIndex, K, V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
-
- *self.hashes.offset(idx) = EMPTY_BUCKET;
-
- // Drop the mutable constraint.
- let keys = self.keys as *const K;
- let vals = self.vals as *const V;
-
- let k = ptr::read(keys.offset(idx));
- let v = ptr::read(vals.offset(idx));
-
- self.size -= 1;
-
- (EmptyIndex { idx: idx, nocopy: marker::NoCopy }, k, v)
- }
- }
-
- /// The hashtable's capacity, similar to a vector's.
- pub fn capacity(&self) -> uint {
- self.capacity
- }
-
- /// The number of elements ever `put` in the hashtable, minus the number
- /// of elements ever `take`n.
- pub fn size(&self) -> uint {
- self.size
- }
-
- pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
- Entries { table: self, idx: 0, elems_seen: 0 }
- }
-
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
- MutEntries { table: self, idx: 0, elems_seen: 0 }
- }
-
- pub fn move_iter(self) -> MoveEntries<K, V> {
- MoveEntries { table: self, idx: 0 }
- }
- }
-
- // `read_all_mut` casts a `*u64` to a `*SafeHash`. Since we statically
- // ensure that a `FullIndex` points to an index with a non-zero hash,
- // and a `SafeHash` is just a `u64` with a different name, this is
- // safe.
- //
- // This test ensures that a `SafeHash` really IS the same size as a
- // `u64`. If you need to change the size of `SafeHash` (and
- // consequently made this test fail), `read_all_mut` needs to be
- // modified to no longer assume this.
- #[test]
- fn can_alias_safehash_as_u64() {
- assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
- }
-
- /// Iterator over shared references to entries in a table.
- pub struct Entries<'a, K:'a, V:'a> {
- table: &'a RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Iterator over mutable references to entries in a table.
- pub struct MutEntries<'a, K:'a, V:'a> {
- table: &'a mut RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Iterator over the entries in a table, consuming the table.
- pub struct MoveEntries<K, V> {
- table: RawTable<K, V>,
- idx: uint
- }
-
- impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
- fn next(&mut self) -> Option<(&'a K, &'a V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- Full(idx) => {
- self.elems_seen += 1;
- return Some(self.table.read(&idx));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size() - self.elems_seen;
- (size, Some(size))
- }
- }
-
- impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
- fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- // the transmute here fixes:
- // error: lifetime of `self` is too short to guarantee its contents
- // can be safely reborrowed
- Full(idx) => unsafe {
- self.elems_seen += 1;
- return Some(transmute(self.table.read_mut(&idx)));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size() - self.elems_seen;
- (size, Some(size))
- }
- }
-
- impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
- fn next(&mut self) -> Option<(SafeHash, K, V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- Full(idx) => {
- let h = idx.hash();
- let (_, k, v) = self.table.take(idx);
- return Some((h, k, v));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size();
- (size, Some(size))
- }
- }
-
- impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
- fn clone(&self) -> RawTable<K, V> {
- unsafe {
- let mut new_ht = RawTable::new_uninitialized(self.capacity());
-
- for i in range(0, self.capacity()) {
- match self.peek(i) {
- Empty(_) => {
- *new_ht.hashes.offset(i as int) = EMPTY_BUCKET;
- },
- Full(idx) => {
- let hash = idx.hash().inspect();
- let (k, v) = self.read(&idx);
- *new_ht.hashes.offset(i as int) = hash;
- overwrite(&mut *new_ht.keys.offset(i as int), (*k).clone());
- overwrite(&mut *new_ht.vals.offset(i as int), (*v).clone());
- }
- }
- }
-
- new_ht.size = self.size();
-
- new_ht
- }
- }
- }
-
- #[unsafe_destructor]
- impl<K, V> Drop for RawTable<K, V> {
- fn drop(&mut self) {
- // This is in reverse because we're likely to have partially taken
- // some elements out with `.move_iter()` from the front.
- for i in range_step_inclusive(self.capacity as int - 1, 0, -1) {
- // Check if the size is 0, so we don't do a useless scan when
- // dropping empty tables such as on resize.
- if self.size == 0 { break }
-
- match self.peek(i as uint) {
- Empty(_) => {},
- Full(idx) => { self.take(idx); }
- }
- }
-
- assert_eq!(self.size, 0);
-
- if self.hashes.is_not_null() {
- let hashes_size = self.capacity * size_of::<u64>();
- let keys_size = self.capacity * size_of::<K>();
- let vals_size = self.capacity * size_of::<V>();
- let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
- keys_size, min_align_of::<K>(),
- vals_size, min_align_of::<V>());
-
- unsafe {
- deallocate(self.hashes as *mut u8, size, align);
- // Remember how everything was allocated out of one buffer
- // during initialization? We only need one call to free here.
- }
-
- self.hashes = RawPtr::null();
- }
- }
- }
-}
-
-static INITIAL_LOG2_CAP: uint = 5;
-static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
-
-/// The default behavior of HashMap implements a load factor of 90.9%.
-/// This behavior is characterized by the following conditions:
-///
-/// - if `size * 1.1 < cap < size * 4` then shouldn't resize
-/// - if `cap < minimum_capacity * 2` then shouldn't shrink
-#[deriving(Clone)]
-struct DefaultResizePolicy {
- /// Doubled minimal capacity. The capacity must never drop below
- /// the minimum capacity. (The check happens before the capacity
- /// is potentially halved.)
- minimum_capacity2: uint
-}
-
-impl DefaultResizePolicy {
- fn new(new_capacity: uint) -> DefaultResizePolicy {
- DefaultResizePolicy {
- minimum_capacity2: new_capacity << 1
- }
- }
-
- #[inline]
- fn capacity_range(&self, new_size: uint) -> (uint, uint) {
- ((new_size * 11) / 10, max(new_size << 3, self.minimum_capacity2))
- }
-
- #[inline]
- fn reserve(&mut self, new_capacity: uint) {
- self.minimum_capacity2 = new_capacity << 1;
- }
-}
-
-// The main performance trick in this hashmap is called Robin Hood Hashing.
-// It gains its excellent performance from one key invariant:
-//
-// If an insertion collides with an existing element, and that elements
-// "probe distance" (how far away the element is from its ideal location)
-// is higher than how far we've already probed, swap the elements.
-//
-// This massively lowers variance in probe distance, and allows us to get very
-// high load factors with good performance. The 90% load factor I use is rather
-// conservative.
-//
-// > Why a load factor of approximately 90%?
-//
-// In general, all the distances to initial buckets will converge on the mean.
-// At a load factor of α, the odds of finding the target bucket after k
-// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
-// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
-// this down to make the math easier on the CPU and avoid its FPU.
-// Since on average we start the probing in the middle of a cache line, this
-// strategy pulls in two cache lines of hashes on every lookup. I think that's
-// pretty good, but if you want to trade off some space, it could go down to one
-// cache line on average with an α of 0.84.
-//
-// > Wait, what? Where did you get 1-α^k from?
-//
-// On the first probe, your odds of a collision with an existing element is α.
-// The odds of doing this twice in a row is approximately α^2. For three times,
-// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
-// colliding after k tries is 1-α^k.
-//
-// Future Improvements (FIXME!)
-// ============================
-//
-// Allow the load factor to be changed dynamically and/or at initialization.
-//
-// Also, would it be possible for us to reuse storage when growing the
-// underlying table? This is exactly the use case for 'realloc', and may
-// be worth exploring.
-//
-// Future Optimizations (FIXME!)
-// =============================
-//
-// The paper cited below mentions an implementation which keeps track of the
-// distance-to-initial-bucket histogram. I'm suspicious of this approach because
-// it requires maintaining an internal map. If this map were replaced with a
-// hashmap, it would be faster, but now our data structure is self-referential
-// and blows up. Also, this allows very good first guesses, but array accesses
-// are no longer linear and in one direction, as we have now. There is also
-// memory and cache pressure that this map would entail that would be very
-// difficult to properly see in a microbenchmark.
-//
-// Another possible design choice that I made without any real reason is
-// parameterizing the raw table over keys and values. Technically, all we need
-// is the size and alignment of keys and values, and the code should be just as
-// efficient (well, we might need one for power-of-two size and one for not...).
-// This has the potential to reduce code bloat in rust executables, without
-// really losing anything except 4 words (key size, key alignment, val size,
-// val alignment) which can be passed in to every call of a `RawTable` function.
-// This would definitely be an avenue worth exploring if people start complaining
-// about the size of rust executables.
-//
-// There's also an "optimization" that has been omitted regarding how the
-// hashtable allocates. The vector type has set the expectation that a hashtable
-// which never has an element inserted should not allocate. I'm suspicious of
-// implementing this for hashtables, because supporting it has no performance
-// benefit over using an `Option<HashMap<K, V>>`, and is significantly more
-// complicated.
-
-/// A hash map implementation which uses linear probing with Robin
-/// Hood bucket stealing.
-///
-/// The hashes are all keyed by the task-local random number generator
-/// on creation by default. This means that the ordering of the keys is
-/// randomized, but makes the tables more resistant to
-/// denial-of-service attacks (Hash DoS). This behaviour can be
-/// overridden with one of the constructors.
-///
-/// It is required that the keys implement the `Eq` and `Hash` traits, although
-/// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
-///
-/// Relevant papers/articles:
-///
-/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
-/// 2. Emmanuel Goossaert. ["Robin Hood
-/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
-/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
-/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
-///
-/// # Example
-///
-/// ```
-/// use std::collections::HashMap;
-///
-/// // type inference lets us omit an explicit type signature (which
-/// // would be `HashMap<&str, &str>` in this example).
-/// let mut book_reviews = HashMap::new();
-///
-/// // review some books.
-/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
-/// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
-/// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
-/// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
-///
-/// // check for a specific one.
-/// if !book_reviews.contains_key(&("Les Misérables")) {
-/// println!("We've got {} reviews, but Les Misérables ain't one.",
-/// book_reviews.len());
-/// }
-///
-/// // oops, this review has a lot of spelling mistakes, let's delete it.
-/// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
-///
-/// // look up the values associated with some keys.
-/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
-/// for book in to_find.iter() {
-/// match book_reviews.find(book) {
-/// Some(review) => println!("{}: {}", *book, *review),
-/// None => println!("{} is unreviewed.", *book)
-/// }
-/// }
-///
-/// // iterate over everything.
-/// for (book, review) in book_reviews.iter() {
-/// println!("{}: \"{}\"", *book, *review);
-/// }
-/// ```
-///
-/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
-/// We must also derive `PartialEq`.
-///
-/// ```
-/// use std::collections::HashMap;
-///
-/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
-/// }
-///
-/// let mut vikings = HashMap::new();
-///
-/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
-/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
-/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
-///
-/// // Use derived implementation to print the vikings.
-/// for (land, viking) in vikings.iter() {
-/// println!("{} at {}", viking, land);
-/// }
-/// ```
-#[deriving(Clone)]
-pub struct HashMap<K, V, H = RandomSipHasher> {
- // All hashes are keyed on these values, to prevent hash collision attacks.
- hasher: H,
-
- table: table::RawTable<K, V>,
-
- // We keep this at the end since it might as well have tail padding.
- resize_policy: DefaultResizePolicy,
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
- // Probe the `idx`th bucket for a given hash, returning the index of the
- // target bucket.
- //
- // This exploits the power-of-two size of the hashtable. As long as this
- // is always true, we can use a bitmask of cap-1 to do modular arithmetic.
- //
- // Prefer using this with increasing values of `idx` rather than repeatedly
- // calling `probe_next`. This reduces data-dependencies between loops, which
- // can help the optimizer, and certainly won't hurt it. `probe_next` is
- // simply for convenience, and is no more efficient than `probe`.
- fn probe(&self, hash: &table::SafeHash, idx: uint) -> uint {
- let hash_mask = self.table.capacity() - 1;
-
- // So I heard a rumor that unsigned overflow is safe in rust..
- ((hash.inspect() as uint) + idx) & hash_mask
- }
-
- // Generate the next probe in a sequence. Prefer using 'probe' by itself,
- // but this can sometimes be useful.
- fn probe_next(&self, probe: uint) -> uint {
- let hash_mask = self.table.capacity() - 1;
- (probe + 1) & hash_mask
- }
-
- fn make_hash<X: Hash<S>>(&self, x: &X) -> table::SafeHash {
- table::make_hash(&self.hasher, x)
- }
-
- /// Get the distance of the bucket at the given index that it lies
- /// from its 'ideal' location.
- ///
- /// In the cited blog posts above, this is called the "distance to
- /// initial bucket", or DIB.
- fn bucket_distance(&self, index_of_elem: &table::FullIndex) -> uint {
- // where the hash of the element that happens to reside at
- // `index_of_elem` tried to place itself first.
- let first_probe_index = self.probe(&index_of_elem.hash(), 0);
-
- let raw_index = index_of_elem.raw_index();
-
- if first_probe_index <= raw_index {
- // probe just went forward
- raw_index - first_probe_index
- } else {
- // probe wrapped around the hashtable
- raw_index + (self.table.capacity() - first_probe_index)
- }
- }
-
- /// Search for a pre-hashed key.
- fn search_hashed_generic(&self, hash: &table::SafeHash, is_match: |&K| -> bool)
- -> Option<table::FullIndex> {
- for num_probes in range(0u, self.table.size()) {
- let probe = self.probe(hash, num_probes);
-
- let idx = match self.table.peek(probe) {
- table::Empty(_) => return None, // hit an empty bucket
- table::Full(idx) => idx
- };
-
- // We can finish the search early if we hit any bucket
- // with a lower distance to initial bucket than we've probed.
- if self.bucket_distance(&idx) < num_probes { return None }
-
- // If the hash doesn't match, it can't be this one..
- if *hash != idx.hash() { continue }
-
- let (k, _) = self.table.read(&idx);
-
- // If the key doesn't match, it can't be this one..
- if !is_match(k) { continue }
-
- return Some(idx);
- }
-
- return None
- }
-
- fn search_hashed(&self, hash: &table::SafeHash, k: &K) -> Option<table::FullIndex> {
- self.search_hashed_generic(hash, |k_| *k == *k_)
- }
-
- fn search_equiv<Q: Hash<S> + Equiv<K>>(&self, q: &Q) -> Option<table::FullIndex> {
- self.search_hashed_generic(&self.make_hash(q), |k| q.equiv(k))
- }
-
- /// Search for a key, yielding the index if it's found in the hashtable.
- /// If you already have the hash for the key lying around, use
- /// search_hashed.
- fn search(&self, k: &K) -> Option<table::FullIndex> {
- self.search_hashed(&self.make_hash(k), k)
- }
-
- fn pop_internal(&mut self, starting_index: table::FullIndex) -> Option<V> {
- let starting_probe = starting_index.raw_index();
-
- let ending_probe = {
- let mut probe = self.probe_next(starting_probe);
- for _ in range(0u, self.table.size()) {
- match self.table.peek(probe) {
- table::Empty(_) => {}, // empty bucket. this is the end of our shifting.
- table::Full(idx) => {
- // Bucket that isn't us, which has a non-zero probe distance.
- // This isn't the ending index, so keep searching.
- if self.bucket_distance(&idx) != 0 {
- probe = self.probe_next(probe);
- continue;
- }
-
- // if we do have a bucket_distance of zero, we're at the end
- // of what we need to shift.
- }
- }
- break;
- }
-
- probe
- };
-
- let (_, _, retval) = self.table.take(starting_index);
-
- let mut probe = starting_probe;
- let mut next_probe = self.probe_next(probe);
-
- // backwards-shift all the elements after our newly-deleted one.
- while next_probe != ending_probe {
- match self.table.peek(next_probe) {
- table::Empty(_) => {
- // nothing to shift in. just empty it out.
- match self.table.peek(probe) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
- },
- table::Full(next_idx) => {
- // something to shift. move it over!
- let next_hash = next_idx.hash();
- let (_, next_key, next_val) = self.table.take(next_idx);
- match self.table.peek(probe) {
- table::Empty(idx) => {
- self.table.put(idx, next_hash, next_key, next_val);
- },
- table::Full(idx) => {
- let (emptyidx, _, _) = self.table.take(idx);
- self.table.put(emptyidx, next_hash, next_key, next_val);
- }
- }
- }
- }
-
- probe = next_probe;
- next_probe = self.probe_next(next_probe);
- }
-
- // Done the backwards shift, but there's still an element left!
- // Empty it out.
- match self.table.peek(probe) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
-
- // Now we're done all our shifting. Return the value we grabbed
- // earlier.
- return Some(retval);
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
- /// Return the number of elements in the map.
- fn len(&self) -> uint { self.table.size() }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
- /// Clear the map, removing all key-value pairs. Keeps the allocated memory
- /// for reuse.
- fn clear(&mut self) {
- // Prevent reallocations from happening from now on. Makes it possible
- // for the map to be reused but has a downside: reserves permanently.
- self.resize_policy.reserve(self.table.size());
-
- for i in range(0, self.table.capacity()) {
- match self.table.peek(i) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
- }
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
- fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
- self.search(k).map(|idx| {
- let (_, v) = self.table.read(&idx);
- v
- })
- }
-
- fn contains_key(&self, k: &K) -> bool {
- self.search(k).is_some()
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
- fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
- match self.search(k) {
- None => None,
- Some(idx) => {
- let (_, v) = self.table.read_mut(&idx);
- Some(v)
- }
- }
- }
-
- fn swap(&mut self, k: K, v: V) -> Option<V> {
- let hash = self.make_hash(&k);
- let potential_new_size = self.table.size() + 1;
- self.make_some_room(potential_new_size);
-
- for dib in range_inclusive(0u, self.table.size()) {
- let probe = self.probe(&hash, dib);
-
- let idx = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Found a hole!
- self.table.put(idx, hash, k, v);
- return None;
- },
- table::Full(idx) => idx
- };
-
- if idx.hash() == hash {
- let (bucket_k, bucket_v) = self.table.read_mut(&idx);
- if k == *bucket_k {
- // Found an existing value.
- return Some(replace(bucket_v, v));
- }
- }
-
- let probe_dib = self.bucket_distance(&idx);
-
- if probe_dib < dib {
- // Found a luckier bucket. This implies that the key does not
- // already exist in the hashtable. Just do a robin hood
- // insertion, then.
- self.robin_hood(idx, probe_dib, hash, k, v);
- return None;
- }
- }
-
- // We really shouldn't be here.
- fail!("Internal HashMap error: Out of space.");
- }
-
- fn pop(&mut self, k: &K) -> Option<V> {
- if self.table.size() == 0 {
- return None
- }
-
- let potential_new_size = self.table.size() - 1;
- self.make_some_room(potential_new_size);
-
- let starting_index = match self.search(k) {
- Some(idx) => idx,
- None => return None,
- };
-
- self.pop_internal(starting_index)
- }
-
-}
-
-impl<K: Hash + Eq, V> HashMap<K, V, RandomSipHasher> {
- /// Create an empty HashMap.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::new();
- /// ```
- #[inline]
- pub fn new() -> HashMap<K, V, RandomSipHasher> {
- HashMap::with_capacity(INITIAL_CAPACITY)
- }
-
- /// Creates an empty hash map with the given initial capacity.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
- /// ```
- #[inline]
- pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomSipHasher> {
- let hasher = RandomSipHasher::new();
- HashMap::with_capacity_and_hasher(capacity, hasher)
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
- /// Creates an empty hashmap which will use the given hasher to hash keys.
- ///
- /// The creates map has the default initial capacity.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut map = HashMap::with_hasher(h);
- /// map.insert(1i, 2u);
- /// ```
- #[inline]
- pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
- HashMap::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
- }
-
- /// Create an empty HashMap with space for at least `capacity`
- /// elements, using `hasher` to hash the keys.
- ///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut map = HashMap::with_capacity_and_hasher(10, h);
- /// map.insert(1i, 2u);
- /// ```
- #[inline]
- pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
- let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
- HashMap {
- hasher: hasher,
- resize_policy: DefaultResizePolicy::new(cap),
- table: table::RawTable::new(cap),
- }
- }
-
- /// The hashtable will never try to shrink below this size. You can use
- /// this function to reduce reallocations if your hashtable frequently
- /// grows and shrinks by large amounts.
- ///
- /// This function has no effect on the operational semantics of the
- /// hashtable, only on performance.
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::new();
- /// map.reserve(10);
- /// ```
- pub fn reserve(&mut self, new_minimum_capacity: uint) {
- let cap = num::next_power_of_two(
- max(INITIAL_CAPACITY, new_minimum_capacity));
-
- self.resize_policy.reserve(cap);
-
- if self.table.capacity() < cap {
- self.resize(cap);
- }
- }
-
- /// Resizes the internal vectors to a new capacity. It's your responsibility to:
- /// 1) Make sure the new capacity is enough for all the elements, accounting
- /// for the load factor.
- /// 2) Ensure new_capacity is a power of two.
- fn resize(&mut self, new_capacity: uint) {
- assert!(self.table.size() <= new_capacity);
- assert!(num::is_power_of_two(new_capacity));
-
- let old_table = replace(&mut self.table, table::RawTable::new(new_capacity));
- let old_size = old_table.size();
-
- for (h, k, v) in old_table.move_iter() {
- self.insert_hashed_nocheck(h, k, v);
- }
-
- assert_eq!(self.table.size(), old_size);
- }
-
- /// Performs any necessary resize operations, such that there's space for
- /// new_size elements.
- fn make_some_room(&mut self, new_size: uint) {
- let (grow_at, shrink_at) = self.resize_policy.capacity_range(new_size);
- let cap = self.table.capacity();
-
- // An invalid value shouldn't make us run out of space.
- debug_assert!(grow_at >= new_size);
-
- if cap <= grow_at {
- let new_capacity = cap << 1;
- self.resize(new_capacity);
- } else if shrink_at <= cap {
- let new_capacity = cap >> 1;
- self.resize(new_capacity);
- }
- }
-
- /// Perform robin hood bucket stealing at the given 'index'. You must
- /// also pass that probe's "distance to initial bucket" so we don't have
- /// to recalculate it, as well as the total number of probes already done
- /// so we have some sort of upper bound on the number of probes to do.
- ///
- /// 'hash', 'k', and 'v' are the elements to robin hood into the hashtable.
- fn robin_hood(&mut self, mut index: table::FullIndex, mut dib_param: uint,
- mut hash: table::SafeHash, mut k: K, mut v: V) {
- 'outer: loop {
- let (old_hash, old_key, old_val) = {
- let (old_hash_ref, old_key_ref, old_val_ref) =
- self.table.read_all_mut(&index);
-
- let old_hash = replace(old_hash_ref, hash);
- let old_key = replace(old_key_ref, k);
- let old_val = replace(old_val_ref, v);
-
- (old_hash, old_key, old_val)
- };
-
- let mut probe = self.probe_next(index.raw_index());
-
- for dib in range(dib_param + 1, self.table.size()) {
- let full_index = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Finally. A hole!
- self.table.put(idx, old_hash, old_key, old_val);
- return;
- },
- table::Full(idx) => idx
- };
-
- let probe_dib = self.bucket_distance(&full_index);
-
- // Robin hood! Steal the spot.
- if probe_dib < dib {
- index = full_index;
- dib_param = probe_dib;
- hash = old_hash;
- k = old_key;
- v = old_val;
- continue 'outer;
- }
-
- probe = self.probe_next(probe);
- }
-
- fail!("HashMap fatal error: 100% load factor?");
- }
- }
-
- /// Insert a pre-hashed key-value pair, without first checking
- /// that there's enough room in the buckets. Returns a reference to the
- /// newly insert value.
- ///
- /// If the key already exists, the hashtable will be returned untouched
- /// and a reference to the existing element will be returned.
- fn insert_hashed_nocheck<'a>(
- &'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
-
- for dib in range_inclusive(0u, self.table.size()) {
- let probe = self.probe(&hash, dib);
-
- let idx = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Found a hole!
- let fullidx = self.table.put(idx, hash, k, v);
- let (_, val) = self.table.read_mut(&fullidx);
- return val;
- },
- table::Full(idx) => idx
- };
-
- if idx.hash() == hash {
- let (bucket_k, bucket_v) = self.table.read_mut(&idx);
- // FIXME #12147 the conditional return confuses
- // borrowck if we return bucket_v directly
- let bv: *mut V = bucket_v;
- if k == *bucket_k {
- // Key already exists. Get its reference.
- return unsafe {&mut *bv};
- }
- }
-
- let probe_dib = self.bucket_distance(&idx);
-
- if probe_dib < dib {
- // Found a luckier bucket than me. Better steal his spot.
- self.robin_hood(idx, probe_dib, hash, k, v);
-
- // Now that it's stolen, just read the value's pointer
- // right out of the table!
- match self.table.peek(probe) {
- table::Empty(_) => fail!("Just stole a spot, but now that spot's empty."),
- table::Full(idx) => {
- let (_, v) = self.table.read_mut(&idx);
- return v;
- }
- }
- }
- }
-
- // We really shouldn't be here.
- fail!("Internal HashMap error: Out of space.");
- }
-
- /// Inserts an element which has already been hashed, returning a reference
- /// to that element inside the hashtable. This is more efficient that using
- /// `insert`, since the key will not be rehashed.
- fn insert_hashed<'a>(&'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
- let potential_new_size = self.table.size() + 1;
- self.make_some_room(potential_new_size);
- self.insert_hashed_nocheck(hash, k, v)
- }
-
- /// Return the value corresponding to the key in the map, or insert
- /// and return the value if it doesn't exist.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 1i with key "a"
- /// assert_eq!(*map.find_or_insert("a", 1i), 1);
- ///
- /// // Find the existing key
- /// assert_eq!(*map.find_or_insert("a", -2), 1);
- /// ```
- pub fn find_or_insert<'a>(&'a mut self, k: K, v: V) -> &'a mut V {
- self.find_with_or_insert_with(k, v, |_k, _v, _a| (), |_k, a| a)
- }
-
- /// Return the value corresponding to the key in the map, or create,
- /// insert, and return a new value if it doesn't exist.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 10 with key 2
- /// assert_eq!(*map.find_or_insert_with(2i, |&key| 5 * key as uint), 10u);
- ///
- /// // Find the existing key
- /// assert_eq!(*map.find_or_insert_with(2, |&key| key as uint), 10);
- /// ```
- pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
- -> &'a mut V {
- self.find_with_or_insert_with(k, (), |_k, _v, _a| (), |k, _a| f(k))
- }
-
- /// Insert a key-value pair into the map if the key is not already present.
- /// Otherwise, modify the existing value for the key.
- /// Returns the new or modified value for the key.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 2 with key "a"
- /// assert_eq!(*map.insert_or_update_with("a", 2u, |_key, val| *val = 3), 2);
- ///
- /// // Update and return the existing value
- /// assert_eq!(*map.insert_or_update_with("a", 9, |_key, val| *val = 7), 7);
- /// assert_eq!(map["a"], 7);
- /// ```
- pub fn insert_or_update_with<'a>(
- &'a mut self,
- k: K,
- v: V,
- f: |&K, &mut V|)
- -> &'a mut V {
- self.find_with_or_insert_with(k, v, |k, v, _a| f(k, v), |_k, a| a)
- }
-
- /// Modify and return the value corresponding to the key in the map, or
- /// insert and return a new value if it doesn't exist.
- ///
- /// This method allows for all insertion behaviours of a hashmap;
- /// see methods like
- /// [`insert`](../trait.MutableMap.html#tymethod.insert),
- /// [`find_or_insert`](#method.find_or_insert) and
- /// [`insert_or_update_with`](#method.insert_or_update_with)
- /// for less general and more friendly variations of this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// // map some strings to vectors of strings
- /// let mut map = HashMap::new();
- /// map.insert("a key", vec!["value"]);
- /// map.insert("z key", vec!["value"]);
- ///
- /// let new = vec!["a key", "b key", "z key"];
- ///
- /// for k in new.move_iter() {
- /// map.find_with_or_insert_with(
- /// k, "new value",
- /// // if the key does exist either prepend or append this
- /// // new value based on the first letter of the key.
- /// |key, already, new| {
- /// if key.as_slice().starts_with("z") {
- /// already.insert(0, new);
- /// } else {
- /// already.push(new);
- /// }
- /// },
- /// // if the key doesn't exist in the map yet, add it in
- /// // the obvious way.
- /// |_k, v| vec![v]);
- /// }
- ///
- /// assert_eq!(map.len(), 3);
- /// assert_eq!(map["a key"], vec!["value", "new value"]);
- /// assert_eq!(map["b key"], vec!["new value"]);
- /// assert_eq!(map["z key"], vec!["new value", "value"]);
- /// ```
- pub fn find_with_or_insert_with<'a, A>(&'a mut self,
- k: K,
- a: A,
- found: |&K, &mut V, A|,
- not_found: |&K, A| -> V)
- -> &'a mut V {
- let hash = self.make_hash(&k);
- match self.search_hashed(&hash, &k) {
- None => {
- let v = not_found(&k, a);
- self.insert_hashed(hash, k, v)
- },
- Some(idx) => {
- let (_, v_ref) = self.table.read_mut(&idx);
- found(&k, v_ref, a);
- v_ref
- }
- }
- }
-
- /// Retrieves a value for the given key.
- /// See [`find`](../trait.Map.html#tymethod.find) for a non-failing alternative.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// #![allow(deprecated)]
- ///
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// assert_eq!(map.get(&"a"), &1);
- /// ```
- #[deprecated = "prefer indexing instead, e.g., map[key]"]
- pub fn get<'a>(&'a self, k: &K) -> &'a V {
- match self.find(k) {
- Some(v) => v,
- None => fail!("no entry found for key")
- }
- }
-
- /// Retrieves a mutable value for the given key.
- /// See [`find_mut`](../trait.MutableMap.html#tymethod.find_mut) for a non-failing alternative.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// {
- /// // val will freeze map to prevent usage during its lifetime
- /// let val = map.get_mut(&"a");
- /// *val = 40;
- /// }
- /// assert_eq!(map["a"], 40);
- ///
- /// // A more direct way could be:
- /// *map.get_mut(&"a") = -2;
- /// assert_eq!(map["a"], -2);
- /// ```
- pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
- match self.find_mut(k) {
- Some(v) => v,
- None => fail!("no entry found for key")
- }
- }
-
- /// Return true if the map contains a value for the specified key,
- /// using equivalence.
- ///
- /// See [pop_equiv](#method.pop_equiv) for an extended example.
- pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
- self.search_equiv(key).is_some()
- }
-
- /// Return the value corresponding to the key in the map, using
- /// equivalence.
- ///
- /// See [pop_equiv](#method.pop_equiv) for an extended example.
- pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
- match self.search_equiv(k) {
- None => None,
- Some(idx) => {
- let (_, v_ref) = self.table.read(&idx);
- Some(v_ref)
- }
- }
- }
-
- /// Remove an equivalent key from the map, returning the value at the
- /// key if the key was previously in the map.
- ///
- /// # Example
- ///
- /// This is a slightly silly example where we define the number's parity as
- /// the equivalence class. It is important that the values hash the same,
- /// which is why we override `Hash`.
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::Hash;
- /// use std::hash::sip::SipState;
- ///
- /// #[deriving(Eq, PartialEq)]
- /// struct EvenOrOdd {
- /// num: uint
- /// };
- ///
- /// impl Hash for EvenOrOdd {
- /// fn hash(&self, state: &mut SipState) {
- /// let parity = self.num % 2;
- /// parity.hash(state);
- /// }
- /// }
- ///
- /// impl Equiv<EvenOrOdd> for EvenOrOdd {
- /// fn equiv(&self, other: &EvenOrOdd) -> bool {
- /// self.num % 2 == other.num % 2
- /// }
- /// }
- ///
- /// let mut map = HashMap::new();
- /// map.insert(EvenOrOdd { num: 3 }, "foo");
- ///
- /// assert!(map.contains_key_equiv(&EvenOrOdd { num: 1 }));
- /// assert!(!map.contains_key_equiv(&EvenOrOdd { num: 4 }));
- ///
- /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 5 }), Some(&"foo"));
- /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 2 }), None);
- ///
- /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 1 }), Some("foo"));
- /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 2 }), None);
- ///
- /// ```
- #[experimental]
- pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
- if self.table.size() == 0 {
- return None
- }
-
- let potential_new_size = self.table.size() - 1;
- self.make_some_room(potential_new_size);
-
- let starting_index = match self.search_equiv(k) {
- Some(idx) => idx,
- None => return None,
- };
-
- self.pop_internal(starting_index)
- }
-
- /// An iterator visiting all keys in arbitrary order.
- /// Iterator element type is `&'a K`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for key in map.keys() {
- /// println!("{}", key);
- /// }
- /// ```
- pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
- self.iter().map(|(k, _v)| k)
- }
-
- /// An iterator visiting all values in arbitrary order.
- /// Iterator element type is `&'a V`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for key in map.values() {
- /// println!("{}", key);
- /// }
- /// ```
- pub fn values<'a>(&'a self) -> Values<'a, K, V> {
- self.iter().map(|(_k, v)| v)
- }
-
- /// An iterator visiting all key-value pairs in arbitrary order.
- /// Iterator element type is `(&'a K, &'a V)`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for (key, val) in map.iter() {
- /// println!("key: {} val: {}", key, val);
- /// }
- /// ```
- pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
- self.table.iter()
- }
-
- /// An iterator visiting all key-value pairs in arbitrary order,
- /// with mutable references to the values.
- /// Iterator element type is `(&'a K, &'a mut V)`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// // Update all values
- /// for (_, val) in map.mut_iter() {
- /// *val *= 2;
- /// }
- ///
- /// for (key, val) in map.iter() {
- /// println!("key: {} val: {}", key, val);
- /// }
- /// ```
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
- self.table.mut_iter()
- }
-
- /// Creates a consuming iterator, that is, one that moves each key-value
- /// pair out of the map in arbitrary order. The map cannot be used after
- /// calling this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// // Not possible with .iter()
- /// let vec: Vec<(&str, int)> = map.move_iter().collect();
- /// ```
- pub fn move_iter(self) -> MoveEntries<K, V> {
- self.table.move_iter().map(|(_, k, v)| (k, v))
- }
-}
-
-impl<K: Eq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
- /// Return a copy of the value corresponding to the key.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map: HashMap<uint, String> = HashMap::new();
- /// map.insert(1u, "foo".to_string());
- /// let s: String = map.find_copy(&1).unwrap();
- /// ```
- pub fn find_copy(&self, k: &K) -> Option<V> {
- self.find(k).map(|v| (*v).clone())
- }
-
- /// Return a copy of the value corresponding to the key.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map: HashMap<uint, String> = HashMap::new();
- /// map.insert(1u, "foo".to_string());
- /// let s: String = map.get_copy(&1);
- /// ```
- pub fn get_copy(&self, k: &K) -> V {
- (*self.get(k)).clone()
- }
-}
-
-impl<K: Eq + Hash<S>, V: PartialEq, S, H: Hasher<S>> PartialEq for HashMap<K, V, H> {
- fn eq(&self, other: &HashMap<K, V, H>) -> bool {
- if self.len() != other.len() { return false; }
-
- self.iter()
- .all(|(key, value)| {
- match other.find(key) {
- None => false,
- Some(v) => *value == *v
- }
- })
- }
-}
-
-impl<K: Eq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {}
-
-impl<K: Eq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- try!(write!(f, "{{"));
-
- for (i, (k, v)) in self.iter().enumerate() {
- if i != 0 { try!(write!(f, ", ")); }
- try!(write!(f, "{}: {}", *k, *v));
- }
-
- write!(f, "}}")
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
- fn default() -> HashMap<K, V, H> {
- HashMap::with_hasher(Default::default())
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Index<K, V> for HashMap<K, V, H> {
- #[inline]
- fn index<'a>(&'a self, index: &K) -> &'a V {
- self.get(index)
- }
-}
-
-// FIXME(#12825) Indexing will always try IndexMut first and that causes issues.
-/*impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> ops::IndexMut<K, V> for HashMap<K, V, H> {
- #[inline]
- fn index_mut<'a>(&'a mut self, index: &K) -> &'a mut V {
- self.get_mut(index)
- }
-}*/
-
-/// HashMap iterator
-pub type Entries<'a, K, V> = table::Entries<'a, K, V>;
-
-/// HashMap mutable values iterator
-pub type MutEntries<'a, K, V> = table::MutEntries<'a, K, V>;
-
-/// HashMap move iterator
-pub type MoveEntries<K, V> =
- iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
-
-/// HashMap keys iterator
-pub type Keys<'a, K, V> =
- iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
-
-/// HashMap values iterator
-pub type Values<'a, K, V> =
- iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
- fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
- let (lower, _) = iter.size_hint();
- let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
- map.extend(iter);
- map
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
- fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
- for (k, v) in iter {
- self.insert(k, v);
- }
- }
-}
-
-/// HashSet iterator
-pub type SetItems<'a, K> =
- iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
-
-/// HashSet move iterator
-pub type SetMoveItems<K> =
- iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
-
-/// An implementation of a hash set using the underlying representation of a
-/// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
-/// requires that the elements implement the `Eq` and `Hash` traits.
-///
-/// # Example
-///
-/// ```
-/// use std::collections::HashSet;
-///
-/// // Type inference lets us omit an explicit type signature (which
-/// // would be `HashSet<&str>` in this example).
-/// let mut books = HashSet::new();
-///
-/// // Add some books.
-/// books.insert("A Dance With Dragons");
-/// books.insert("To Kill a Mockingbird");
-/// books.insert("The Odyssey");
-/// books.insert("The Great Gatsby");
-///
-/// // Check for a specific one.
-/// if !books.contains(&("The Winds of Winter")) {
-/// println!("We have {} books, but The Winds of Winter ain't one.",
-/// books.len());
-/// }
-///
-/// // Remove a book.
-/// books.remove(&"The Odyssey");
-///
-/// // Iterate over everything.
-/// for book in books.iter() {
-/// println!("{}", *book);
-/// }
-/// ```
-///
-/// The easiest way to use `HashSet` with a custom type is to derive
-/// `Eq` and `Hash`. We must also derive `PartialEq`, this will in the
-/// future be implied by `Eq`.
-///
-/// ```rust
-/// use std::collections::HashSet;
-///
-/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
-/// }
-///
-/// let mut vikings = HashSet::new();
-///
-/// vikings.insert(Viking { name: "Einar", power: 9u });
-/// vikings.insert(Viking { name: "Einar", power: 9u });
-/// vikings.insert(Viking { name: "Olaf", power: 4u });
-/// vikings.insert(Viking { name: "Harald", power: 8u });
-///
-/// // Use derived implementation to print the vikings.
-/// for x in vikings.iter() {
-/// println!("{}", x);
-/// }
-/// ```
-#[deriving(Clone)]
-pub struct HashSet<T, H = RandomSipHasher> {
- map: HashMap<T, (), H>
-}
-
-impl<T: Hash + Eq> HashSet<T, RandomSipHasher> {
- /// Create an empty HashSet.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::new();
- /// ```
- #[inline]
- pub fn new() -> HashSet<T, RandomSipHasher> {
- HashSet::with_capacity(INITIAL_CAPACITY)
- }
-
- /// Create an empty HashSet with space for at least `n` elements in
- /// the hash table.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::with_capacity(10);
- /// ```
- #[inline]
- pub fn with_capacity(capacity: uint) -> HashSet<T, RandomSipHasher> {
- HashSet { map: HashMap::with_capacity(capacity) }
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
- /// Creates a new empty hash set which will use the given hasher to hash
- /// keys.
- ///
- /// The hash set is also created with the default initial capacity.
- ///
- /// # Example
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut set = HashSet::with_hasher(h);
- /// set.insert(2u);
- /// ```
- #[inline]
- pub fn with_hasher(hasher: H) -> HashSet<T, H> {
- HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
- }
-
- /// Create an empty HashSet with space for at least `capacity`
- /// elements in the hash table, using `hasher` to hash the keys.
- ///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
- ///
- /// # Example
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut set = HashSet::with_capacity_and_hasher(10u, h);
- /// set.insert(1i);
- /// ```
- #[inline]
- pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
- HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
- }
-
- /// Reserve space for at least `n` elements in the hash table.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::new();
- /// set.reserve(10);
- /// ```
- pub fn reserve(&mut self, n: uint) {
- self.map.reserve(n)
- }
-
- /// Returns true if the hash set contains a value equivalent to the
- /// given query value.
- ///
- /// # Example
- ///
- /// This is a slightly silly example where we define the number's
- /// parity as the equivalence class. It is important that the
- /// values hash the same, which is why we implement `Hash`.
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::Hash;
- /// use std::hash::sip::SipState;
- ///
- /// #[deriving(Eq, PartialEq)]
- /// struct EvenOrOdd {
- /// num: uint
- /// };
- ///
- /// impl Hash for EvenOrOdd {
- /// fn hash(&self, state: &mut SipState) {
- /// let parity = self.num % 2;
- /// parity.hash(state);
- /// }
- /// }
- ///
- /// impl Equiv<EvenOrOdd> for EvenOrOdd {
- /// fn equiv(&self, other: &EvenOrOdd) -> bool {
- /// self.num % 2 == other.num % 2
- /// }
- /// }
- ///
- /// let mut set = HashSet::new();
- /// set.insert(EvenOrOdd { num: 3u });
- ///
- /// assert!(set.contains_equiv(&EvenOrOdd { num: 3u }));
- /// assert!(set.contains_equiv(&EvenOrOdd { num: 5u }));
- /// assert!(!set.contains_equiv(&EvenOrOdd { num: 4u }));
- /// assert!(!set.contains_equiv(&EvenOrOdd { num: 2u }));
- ///
- /// ```
- pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
- self.map.contains_key_equiv(value)
- }
-
- /// An iterator visiting all elements in arbitrary order.
- /// Iterator element type is &'a T.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set = HashSet::new();
- /// set.insert("a");
- /// set.insert("b");
- ///
- /// // Will print in an arbitrary order.
- /// for x in set.iter() {
- /// println!("{}", x);
- /// }
- /// ```
- pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
- self.map.keys()
- }
-
- /// Creates a consuming iterator, that is, one that moves each value out
- /// of the set in arbitrary order. The set cannot be used after calling
- /// this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set = HashSet::new();
- /// set.insert("a".to_string());
- /// set.insert("b".to_string());
- ///
- /// // Not possible to collect to a Vec<String> with a regular `.iter()`.
- /// let v: Vec<String> = set.move_iter().collect();
- ///
- /// // Will print in an arbitrary order.
- /// for x in v.iter() {
- /// println!("{}", x);
- /// }
- /// ```
- pub fn move_iter(self) -> SetMoveItems<T> {
- self.map.move_iter().map(|(k, _)| k)
- }
-
- /// Visit the values representing the difference.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Can be seen as `a - b`.
- /// for x in a.difference(&b) {
- /// println!("{}", x); // Print 1
- /// }
- ///
- /// let diff: HashSet<int> = a.difference(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [1i].iter().map(|&x| x).collect());
- ///
- /// // Note that difference is not symmetric,
- /// // and `b - a` means something else:
- /// let diff: HashSet<int> = b.difference(&a).map(|&x| x).collect();
- /// assert_eq!(diff, [4i].iter().map(|&x| x).collect());
- /// ```
- pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
- Repeat::new(other).zip(self.iter())
- .filter_map(|(other, elt)| {
- if !other.contains(elt) { Some(elt) } else { None }
- })
- }
-
- /// Visit the values representing the symmetric difference.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 1, 4 in arbitrary order.
- /// for x in a.symmetric_difference(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff1: HashSet<int> = a.symmetric_difference(&b).map(|&x| x).collect();
- /// let diff2: HashSet<int> = b.symmetric_difference(&a).map(|&x| x).collect();
- ///
- /// assert_eq!(diff1, diff2);
- /// assert_eq!(diff1, [1i, 4].iter().map(|&x| x).collect());
- /// ```
- pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
- -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
- self.difference(other).chain(other.difference(self))
- }
-
- /// Visit the values representing the intersection.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 2, 3 in arbitrary order.
- /// for x in a.intersection(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff: HashSet<int> = a.intersection(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [2i, 3].iter().map(|&x| x).collect());
- /// ```
- pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
- -> SetAlgebraItems<'a, T, H> {
- Repeat::new(other).zip(self.iter())
- .filter_map(|(other, elt)| {
- if other.contains(elt) { Some(elt) } else { None }
- })
- }
-
- /// Visit the values representing the union.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 1, 2, 3, 4 in arbitrary order.
- /// for x in a.union(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff: HashSet<int> = a.union(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [1i, 2, 3, 4].iter().map(|&x| x).collect());
- /// ```
- pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
- -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
- self.iter().chain(other.difference(self))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> PartialEq for HashSet<T, H> {
- fn eq(&self, other: &HashSet<T, H>) -> bool {
- if self.len() != other.len() { return false; }
-
- self.iter().all(|key| other.contains(key))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Collection for HashSet<T, H> {
- fn len(&self) -> uint { self.map.len() }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
- fn clear(&mut self) { self.map.clear() }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
- fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
-
- fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
- self.iter().all(|v| !other.contains(v))
- }
-
- fn is_subset(&self, other: &HashSet<T, H>) -> bool {
- self.iter().all(|v| other.contains(v))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
- fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
-
- fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
-}
-
-
-impl<T: Eq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- try!(write!(f, "{{"));
-
- for (i, x) in self.iter().enumerate() {
- if i != 0 { try!(write!(f, ", ")); }
- try!(write!(f, "{}", *x));
- }
-
- write!(f, "}}")
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
- fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
- let (lower, _) = iter.size_hint();
- let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
- set.extend(iter);
- set
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
- fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
- for k in iter {
- self.insert(k);
- }
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Default for HashSet<T, H> {
- fn default() -> HashSet<T, H> {
- HashSet::with_hasher(Default::default())
- }
-}
-
-// `Repeat` is used to feed the filter closure an explicit capture
-// of a reference to the other set
-/// Set operations iterator
-pub type SetAlgebraItems<'a, T, H> =
- FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
- Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
-
-#[cfg(test)]
-mod test_map {
- use prelude::*;
-
- use super::HashMap;
- use cmp::Equiv;
- use hash;
- use iter::{Iterator,range_inclusive,range_step_inclusive};
- use cell::RefCell;
-
- struct KindaIntLike(int);
-
- impl Equiv<int> for KindaIntLike {
- fn equiv(&self, other: &int) -> bool {
- let KindaIntLike(this) = *self;
- this == *other
- }
- }
- impl<S: hash::Writer> hash::Hash<S> for KindaIntLike {
- fn hash(&self, state: &mut S) {
- let KindaIntLike(this) = *self;
- this.hash(state)
- }
- }
-
- #[test]
- fn test_create_capacity_zero() {
- let mut m = HashMap::with_capacity(0);
-
- assert!(m.insert(1i, 1i));
-
- assert!(m.contains_key(&1));
- assert!(!m.contains_key(&0));
- }
-
- #[test]
- fn test_insert() {
- let mut m = HashMap::new();
- assert_eq!(m.len(), 0);
- assert!(m.insert(1i, 2i));
- assert_eq!(m.len(), 1);
- assert!(m.insert(2i, 4i));
- assert_eq!(m.len(), 2);
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&2).unwrap(), 4);
- }
-
- local_data_key!(drop_vector: RefCell<Vec<int>>)
-
- #[deriving(Hash, PartialEq, Eq)]
- struct Dropable {
- k: uint
- }
-
-
- impl Dropable {
- fn new(k: uint) -> Dropable {
- let v = drop_vector.get().unwrap();
- v.borrow_mut().as_mut_slice()[k] += 1;
-
- Dropable { k: k }
- }
- }
-
- impl Drop for Dropable {
- fn drop(&mut self) {
- let v = drop_vector.get().unwrap();
- v.borrow_mut().as_mut_slice()[self.k] -= 1;
- }
- }
-
- #[test]
- fn test_drops() {
- drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
-
- {
- let mut m = HashMap::new();
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- }
- drop(v);
-
- for i in range(0u, 100) {
- let d1 = Dropable::new(i);
- let d2 = Dropable::new(i+100);
- m.insert(d1, d2);
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 1);
- }
- drop(v);
-
- for i in range(0u, 50) {
- let k = Dropable::new(i);
- let v = m.pop(&k);
-
- assert!(v.is_some());
-
- let v = drop_vector.get().unwrap();
- assert_eq!(v.borrow().as_slice()[i], 1);
- assert_eq!(v.borrow().as_slice()[i+100], 1);
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 50) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- assert_eq!(v.borrow().as_slice()[i+100], 0);
- }
-
- for i in range(50u, 100) {
- assert_eq!(v.borrow().as_slice()[i], 1);
- assert_eq!(v.borrow().as_slice()[i+100], 1);
- }
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- }
- }
-
- #[test]
- fn test_empty_pop() {
- let mut m: HashMap<int, bool> = HashMap::new();
- assert_eq!(m.pop(&0), None);
- }
-
- #[test]
- fn test_lots_of_insertions() {
- let mut m = HashMap::new();
-
- // Try this a few times to make sure we never screw up the hashmap's
- // internal state.
- for _ in range(0i, 10) {
- assert!(m.is_empty());
-
- for i in range_inclusive(1i, 1000) {
- assert!(m.insert(i, i));
-
- for j in range_inclusive(1, i) {
- let r = m.find(&j);
- assert_eq!(r, Some(&j));
- }
-
- for j in range_inclusive(i+1, 1000) {
- let r = m.find(&j);
- assert_eq!(r, None);
- }
- }
-
- for i in range_inclusive(1001i, 2000) {
- assert!(!m.contains_key(&i));
- }
-
- // remove forwards
- for i in range_inclusive(1i, 1000) {
- assert!(m.remove(&i));
-
- for j in range_inclusive(1, i) {
- assert!(!m.contains_key(&j));
- }
-
- for j in range_inclusive(i+1, 1000) {
- assert!(m.contains_key(&j));
- }
- }
-
- for i in range_inclusive(1i, 1000) {
- assert!(!m.contains_key(&i));
- }
-
- for i in range_inclusive(1i, 1000) {
- assert!(m.insert(i, i));
- }
-
- // remove backwards
- for i in range_step_inclusive(1000i, 1, -1) {
- assert!(m.remove(&i));
-
- for j in range_inclusive(i, 1000) {
- assert!(!m.contains_key(&j));
- }
-
- for j in range_inclusive(1, i-1) {
- assert!(m.contains_key(&j));
- }
- }
- }
- }
-
- #[test]
- fn test_find_mut() {
- let mut m = HashMap::new();
- assert!(m.insert(1i, 12i));
- assert!(m.insert(2i, 8i));
- assert!(m.insert(5i, 14i));
- let new = 100;
- match m.find_mut(&5) {
- None => fail!(), Some(x) => *x = new
- }
- assert_eq!(m.find(&5), Some(&new));
- }
-
- #[test]
- fn test_insert_overwrite() {
- let mut m = HashMap::new();
- assert!(m.insert(1i, 2i));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert!(!m.insert(1i, 3i));
- assert_eq!(*m.find(&1).unwrap(), 3);
- }
-
- #[test]
- fn test_insert_conflicts() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert!(m.insert(5i, 3i));
- assert!(m.insert(9i, 4i));
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert_eq!(*m.find(&1).unwrap(), 2);
- }
-
- #[test]
- fn test_conflict_remove() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert!(m.insert(5, 3));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert!(m.insert(9, 4));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert!(m.remove(&1));
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert_eq!(*m.find(&5).unwrap(), 3);
- }
-
- #[test]
- fn test_is_empty() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert!(!m.is_empty());
- assert!(m.remove(&1));
- assert!(m.is_empty());
- }
-
- #[test]
- fn test_pop() {
- let mut m = HashMap::new();
- m.insert(1i, 2i);
- assert_eq!(m.pop(&1), Some(2));
- assert_eq!(m.pop(&1), None);
- }
-
- #[test]
- #[allow(experimental)]
- fn test_pop_equiv() {
- let mut m = HashMap::new();
- m.insert(1i, 2i);
- assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
- assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
- }
-
- #[test]
- fn test_swap() {
- let mut m = HashMap::new();
- assert_eq!(m.swap(1i, 2i), None);
- assert_eq!(m.swap(1i, 3i), Some(2));
- assert_eq!(m.swap(1i, 4i), Some(3));
- }
-
- #[test]
- fn test_move_iter() {
- let hm = {
- let mut hm = HashMap::new();
-
- hm.insert('a', 1i);
- hm.insert('b', 2i);
-
- hm
- };
-
- let v = hm.move_iter().collect::<Vec<(char, int)>>();
- assert!([('a', 1), ('b', 2)] == v.as_slice() || [('b', 2), ('a', 1)] == v.as_slice());
- }
-
- #[test]
- fn test_iterate() {
- let mut m = HashMap::with_capacity(4);
- for i in range(0u, 32) {
- assert!(m.insert(i, i*2));
- }
- assert_eq!(m.len(), 32);
-
- let mut observed: u32 = 0;
-
- for (k, v) in m.iter() {
- assert_eq!(*v, *k * 2);
- observed |= 1 << *k;
- }
- assert_eq!(observed, 0xFFFF_FFFF);
- }
-
- #[test]
- fn test_keys() {
- let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
- let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
- assert_eq!(keys.len(), 3);
- assert!(keys.contains(&1));
- assert!(keys.contains(&2));
- assert!(keys.contains(&3));
- }
-
- #[test]
- fn test_values() {
- let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
- let values = map.values().map(|&v| v).collect::<Vec<char>>();
- assert_eq!(values.len(), 3);
- assert!(values.contains(&'a'));
- assert!(values.contains(&'b'));
- assert!(values.contains(&'c'));
- }
-
- #[test]
- fn test_find() {
- let mut m = HashMap::new();
- assert!(m.find(&1i).is_none());
- m.insert(1i, 2i);
- match m.find(&1) {
- None => fail!(),
- Some(v) => assert_eq!(*v, 2)
- }
- }
-
- #[test]
- fn test_eq() {
- let mut m1 = HashMap::new();
- m1.insert(1i, 2i);
- m1.insert(2i, 3i);
- m1.insert(3i, 4i);
-
- let mut m2 = HashMap::new();
- m2.insert(1i, 2i);
- m2.insert(2i, 3i);
-
- assert!(m1 != m2);
-
- m2.insert(3i, 4i);
-
- assert_eq!(m1, m2);
- }
-
- #[test]
- fn test_show() {
- let mut map: HashMap<int, int> = HashMap::new();
- let empty: HashMap<int, int> = HashMap::new();
-
- map.insert(1i, 2i);
- map.insert(3i, 4i);
-
- let map_str = format!("{}", map);
-
- assert!(map_str == "{1: 2, 3: 4}".to_string() || map_str == "{3: 4, 1: 2}".to_string());
- assert_eq!(format!("{}", empty), "{}".to_string());
- }
-
- #[test]
- fn test_expand() {
- let mut m = HashMap::new();
-
- assert_eq!(m.len(), 0);
- assert!(m.is_empty());
-
- let mut i = 0u;
- let old_cap = m.table.capacity();
- while old_cap == m.table.capacity() {
- m.insert(i, i);
- i += 1;
- }
-
- assert_eq!(m.len(), i);
- assert!(!m.is_empty());
- }
-
- #[test]
- fn test_resize_policy() {
- let mut m = HashMap::new();
-
- assert_eq!(m.len(), 0);
- assert!(m.is_empty());
-
- let initial_cap = m.table.capacity();
- m.reserve(initial_cap * 2);
- let cap = m.table.capacity();
-
- assert_eq!(cap, initial_cap * 2);
-
- let mut i = 0u;
- for _ in range(0, cap * 3 / 4) {
- m.insert(i, i);
- i += 1;
- }
-
- assert_eq!(m.len(), i);
- assert_eq!(m.table.capacity(), cap);
-
- for _ in range(0, cap / 4) {
- m.insert(i, i);
- i += 1;
- }
-
- let new_cap = m.table.capacity();
- assert_eq!(new_cap, cap * 2);
-
- for _ in range(0, cap / 2) {
- i -= 1;
- m.remove(&i);
- assert_eq!(m.table.capacity(), new_cap);
- }
-
- for _ in range(0, cap / 2 - 1) {
- i -= 1;
- m.remove(&i);
- }
-
- assert_eq!(m.table.capacity(), cap);
- assert_eq!(m.len(), i);
- assert!(!m.is_empty());
- }
-
- #[test]
- fn test_find_equiv() {
- let mut m = HashMap::new();
-
- let (foo, bar, baz) = (1i,2i,3i);
- m.insert("foo".to_string(), foo);
- m.insert("bar".to_string(), bar);
- m.insert("baz".to_string(), baz);
-
-
- assert_eq!(m.find_equiv(&("foo")), Some(&foo));
- assert_eq!(m.find_equiv(&("bar")), Some(&bar));
- assert_eq!(m.find_equiv(&("baz")), Some(&baz));
-
- assert_eq!(m.find_equiv(&("qux")), None);
- }
-
- #[test]
- fn test_from_iter() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- for &(k, v) in xs.iter() {
- assert_eq!(map.find(&k), Some(&v));
- }
- }
-
- #[test]
- fn test_size_hint() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- let mut iter = map.iter();
-
- for _ in iter.by_ref().take(3) {}
-
- assert_eq!(iter.size_hint(), (3, Some(3)));
- }
-
- #[test]
- fn test_mut_size_hint() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- let mut iter = map.mut_iter();
-
- for _ in iter.by_ref().take(3) {}
-
- assert_eq!(iter.size_hint(), (3, Some(3)));
- }
-
- #[test]
- fn test_index() {
- let mut map: HashMap<int, int> = HashMap::new();
-
- map.insert(1, 2);
- map.insert(2, 1);
- map.insert(3, 4);
-
- assert_eq!(map[2], 1);
- }
-
- #[test]
- #[should_fail]
- fn test_index_nonexistent() {
- let mut map: HashMap<int, int> = HashMap::new();
-
- map.insert(1, 2);
- map.insert(2, 1);
- map.insert(3, 4);
-
- map[4];
- }
-}
-
-#[cfg(test)]
-mod test_set {
- use prelude::*;
-
- use super::HashSet;
- use slice::ImmutablePartialEqSlice;
- use collections::Collection;
-
- #[test]
- fn test_disjoint() {
- let mut xs = HashSet::new();
- let mut ys = HashSet::new();
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(xs.insert(5i));
- assert!(ys.insert(11i));
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(xs.insert(7));
- assert!(xs.insert(19));
- assert!(xs.insert(4));
- assert!(ys.insert(2));
- assert!(ys.insert(-11));
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(ys.insert(7));
- assert!(!xs.is_disjoint(&ys));
- assert!(!ys.is_disjoint(&xs));
- }
-
- #[test]
- fn test_subset_and_superset() {
- let mut a = HashSet::new();
- assert!(a.insert(0i));
- assert!(a.insert(5));
- assert!(a.insert(11));
- assert!(a.insert(7));
-
- let mut b = HashSet::new();
- assert!(b.insert(0i));
- assert!(b.insert(7));
- assert!(b.insert(19));
- assert!(b.insert(250));
- assert!(b.insert(11));
- assert!(b.insert(200));
-
- assert!(!a.is_subset(&b));
- assert!(!a.is_superset(&b));
- assert!(!b.is_subset(&a));
- assert!(!b.is_superset(&a));
-
- assert!(b.insert(5));
-
- assert!(a.is_subset(&b));
- assert!(!a.is_superset(&b));
- assert!(!b.is_subset(&a));
- assert!(b.is_superset(&a));
- }
-
- #[test]
- fn test_iterate() {
- let mut a = HashSet::new();
- for i in range(0u, 32) {
- assert!(a.insert(i));
- }
- let mut observed: u32 = 0;
- for k in a.iter() {
- observed |= 1 << *k;
- }
- assert_eq!(observed, 0xFFFF_FFFF);
- }
-
- #[test]
- fn test_intersection() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(11i));
- assert!(a.insert(1));
- assert!(a.insert(3));
- assert!(a.insert(77));
- assert!(a.insert(103));
- assert!(a.insert(5));
- assert!(a.insert(-5));
-
- assert!(b.insert(2i));
- assert!(b.insert(11));
- assert!(b.insert(77));
- assert!(b.insert(-9));
- assert!(b.insert(-42));
- assert!(b.insert(5));
- assert!(b.insert(3));
-
- let mut i = 0;
- let expected = [3, 5, 11, 77];
- for x in a.intersection(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_difference() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
-
- assert!(b.insert(3i));
- assert!(b.insert(9));
-
- let mut i = 0;
- let expected = [1, 5, 11];
- for x in a.difference(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_symmetric_difference() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
-
- assert!(b.insert(-2i));
- assert!(b.insert(3));
- assert!(b.insert(9));
- assert!(b.insert(14));
- assert!(b.insert(22));
-
- let mut i = 0;
- let expected = [-2, 1, 5, 11, 14, 22];
- for x in a.symmetric_difference(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_union() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
- assert!(a.insert(16));
- assert!(a.insert(19));
- assert!(a.insert(24));
-
- assert!(b.insert(-2i));
- assert!(b.insert(1));
- assert!(b.insert(5));
- assert!(b.insert(9));
- assert!(b.insert(13));
- assert!(b.insert(19));
-
- let mut i = 0;
- let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
- for x in a.union(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_from_iter() {
- let xs = [1i, 2, 3, 4, 5, 6, 7, 8, 9];
-
- let set: HashSet<int> = xs.iter().map(|&x| x).collect();
-
- for x in xs.iter() {
- assert!(set.contains(x));
- }
- }
-
- #[test]
- fn test_move_iter() {
- let hs = {
- let mut hs = HashSet::new();
-
- hs.insert('a');
- hs.insert('b');
-
- hs
- };
-
- let v = hs.move_iter().collect::<Vec<char>>();
- assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
- }
-
- #[test]
- fn test_eq() {
- // These constants once happened to expose a bug in insert().
- // I'm keeping them around to prevent a regression.
- let mut s1 = HashSet::new();
-
- s1.insert(1i);
- s1.insert(2);
- s1.insert(3);
-
- let mut s2 = HashSet::new();
-
- s2.insert(1i);
- s2.insert(2);
-
- assert!(s1 != s2);
-
- s2.insert(3);
-
- assert_eq!(s1, s2);
- }
-
- #[test]
- fn test_show() {
- let mut set: HashSet<int> = HashSet::new();
- let empty: HashSet<int> = HashSet::new();
-
- set.insert(1i);
- set.insert(2);
-
- let set_str = format!("{}", set);
-
- assert!(set_str == "{1, 2}".to_string() || set_str == "{2, 1}".to_string());
- assert_eq!(format!("{}", empty), "{}".to_string());
- }
-}
-
-#[cfg(test)]
-mod bench {
- extern crate test;
- use prelude::*;
-
- use self::test::Bencher;
- use iter::{range_inclusive};
-
- #[bench]
- fn new_drop(b : &mut Bencher) {
- use super::HashMap;
-
- b.iter(|| {
- let m : HashMap<int, int> = HashMap::new();
- assert_eq!(m.len(), 0);
- })
- }
-
- #[bench]
- fn new_insert_drop(b : &mut Bencher) {
- use super::HashMap;
-
- b.iter(|| {
- let mut m = HashMap::new();
- m.insert(0i, 0i);
- assert_eq!(m.len(), 1);
- })
- }
-
- #[bench]
- fn insert(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1001;
-
- b.iter(|| {
- m.insert(k, k);
- k += 1;
- });
- }
-
- #[bench]
- fn find_existing(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- b.iter(|| {
- for i in range_inclusive(1i, 1000) {
- m.contains_key(&i);
- }
- });
- }
-
- #[bench]
- fn find_nonexisting(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- b.iter(|| {
- for i in range_inclusive(1001i, 2000) {
- m.contains_key(&i);
- }
- });
- }
-
- #[bench]
- fn hashmap_as_queue(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1i;
-
- b.iter(|| {
- m.pop(&k);
- m.insert(k + 1000, k + 1000);
- k += 1;
- });
- }
-
- #[bench]
- fn find_pop_insert(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1i;
-
- b.iter(|| {
- m.find(&(k + 400));
- m.find(&(k + 2000));
- m.pop(&k);
- m.insert(k + 1000, k + 1000);
- k += 1;
- })
- }
-}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![cfg(test)]
+
+extern crate test;
+use prelude::*;
+
+use self::test::Bencher;
+use iter::{range_inclusive};
+
+#[bench]
+fn new_drop(b : &mut Bencher) {
+ use super::HashMap;
+
+ b.iter(|| {
+ let m : HashMap<int, int> = HashMap::new();
+ assert_eq!(m.len(), 0);
+ })
+}
+
+#[bench]
+fn new_insert_drop(b : &mut Bencher) {
+ use super::HashMap;
+
+ b.iter(|| {
+ let mut m = HashMap::new();
+ m.insert(0i, 0i);
+ assert_eq!(m.len(), 1);
+ })
+}
+
+#[bench]
+fn grow_by_insertion(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1001;
+
+ b.iter(|| {
+ m.insert(k, k);
+ k += 1;
+ });
+}
+
+#[bench]
+fn find_existing(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in range_inclusive(1i, 1000) {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn find_nonexisting(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in range_inclusive(1001i, 2000) {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn hashmap_as_queue(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1i;
+
+ b.iter(|| {
+ m.pop(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ });
+}
+
+#[bench]
+fn find_pop_insert(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1i;
+
+ b.iter(|| {
+ m.find(&(k + 400));
+ m.find(&(k + 2000));
+ m.pop(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ })
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp::{max, Eq, Equiv, PartialEq};
+use collections::{Collection, Mutable, MutableSet, Map, MutableMap};
+use default::Default;
+use fmt::Show;
+use fmt;
+use hash::{Hash, Hasher, RandomSipHasher};
+use iter::{Iterator, FromIterator, Extendable};
+use iter;
+use mem::replace;
+use num;
+use ops::{Deref, DerefMut};
+use option::{Some, None, Option};
+use result::{Ok, Err};
+use ops::Index;
+
+use super::table;
+use super::table::{
+ Bucket,
+ Empty,
+ Full,
+ FullBucket,
+ FullBucketImm,
+ FullBucketMut,
+ RawTable,
+ SafeHash
+};
+
+static INITIAL_LOG2_CAP: uint = 5;
+pub static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
+
+/// The default behavior of HashMap implements a load factor of 90.9%.
+/// This behavior is characterized by the following conditions:
+///
+/// - if size > 0.909 * capacity: grow
+/// - if size < 0.25 * capacity: shrink (if this won't bring capacity lower
+/// than the minimum)
+#[deriving(Clone)]
+struct DefaultResizePolicy {
+ /// Doubled minimal capacity. The capacity must never drop below
+ /// the minimum capacity. (The check happens before the capacity
+ /// is potentially halved.)
+ minimum_capacity2: uint
+}
+
+impl DefaultResizePolicy {
+ fn new(new_capacity: uint) -> DefaultResizePolicy {
+ DefaultResizePolicy {
+ minimum_capacity2: new_capacity << 1
+ }
+ }
+
+ #[inline]
+ fn capacity_range(&self, new_size: uint) -> (uint, uint) {
+ // Here, we are rephrasing the logic by specifying the ranges:
+ //
+ // - if `size * 1.1 < cap < size * 4`: don't resize
+ // - if `cap < minimum_capacity * 2`: don't shrink
+ // - otherwise, resize accordingly
+ ((new_size * 11) / 10, max(new_size << 2, self.minimum_capacity2))
+ }
+
+ #[inline]
+ fn reserve(&mut self, new_capacity: uint) {
+ self.minimum_capacity2 = new_capacity << 1;
+ }
+}
+
+// The main performance trick in this hashmap is called Robin Hood Hashing.
+// It gains its excellent performance from one essential operation:
+//
+// If an insertion collides with an existing element, and that element's
+// "probe distance" (how far away the element is from its ideal location)
+// is higher than how far we've already probed, swap the elements.
+//
+// This massively lowers variance in probe distance, and allows us to get very
+// high load factors with good performance. The 90% load factor I use is rather
+// conservative.
+//
+// > Why a load factor of approximately 90%?
+//
+// In general, all the distances to initial buckets will converge on the mean.
+// At a load factor of α, the odds of finding the target bucket after k
+// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
+// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
+// this down to make the math easier on the CPU and avoid its FPU.
+// Since on average we start the probing in the middle of a cache line, this
+// strategy pulls in two cache lines of hashes on every lookup. I think that's
+// pretty good, but if you want to trade off some space, it could go down to one
+// cache line on average with an α of 0.84.
+//
+// > Wait, what? Where did you get 1-α^k from?
+//
+// On the first probe, your odds of a collision with an existing element is α.
+// The odds of doing this twice in a row is approximately α^2. For three times,
+// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
+// colliding after k tries is 1-α^k.
+//
+// The paper from 1986 cited below mentions an implementation which keeps track
+// of the distance-to-initial-bucket histogram. This approach is not suitable
+// for modern architectures because it requires maintaining an internal data
+// structure. This allows very good first guesses, but we are most concerned
+// with guessing entire cache lines, not individual indexes. Furthermore, array
+// accesses are no longer linear and in one direction, as we have now. There
+// is also memory and cache pressure that this would entail that would be very
+// difficult to properly see in a microbenchmark.
+//
+// Future Improvements (FIXME!)
+// ============================
+//
+// Allow the load factor to be changed dynamically and/or at initialization.
+//
+// Also, would it be possible for us to reuse storage when growing the
+// underlying table? This is exactly the use case for 'realloc', and may
+// be worth exploring.
+//
+// Future Optimizations (FIXME!)
+// =============================
+//
+// Another possible design choice that I made without any real reason is
+// parameterizing the raw table over keys and values. Technically, all we need
+// is the size and alignment of keys and values, and the code should be just as
+// efficient (well, we might need one for power-of-two size and one for not...).
+// This has the potential to reduce code bloat in rust executables, without
+// really losing anything except 4 words (key size, key alignment, val size,
+// val alignment) which can be passed in to every call of a `RawTable` function.
+// This would definitely be an avenue worth exploring if people start complaining
+// about the size of rust executables.
+//
+// Annotate exceedingly likely branches in `table::make_hash`
+// and `search_hashed_generic` to reduce instruction cache pressure
+// and mispredictions once it becomes possible (blocked on issue #11092).
+//
+// Shrinking the table could simply reallocate in place after moving buckets
+// to the first half.
+//
+// The growth algorithm (fragment of the Proof of Correctness)
+// --------------------
+//
+// The growth algorithm is basically a fast path of the naive reinsertion-
+// during-resize algorithm. Other paths should never be taken.
+//
+// Consider growing a robin hood hashtable of capacity n. Normally, we do this
+// by allocating a new table of capacity `2n`, and then individually reinsert
+// each element in the old table into the new one. This guarantees that the
+// new table is a valid robin hood hashtable with all the desired statistical
+// properties. Remark that the order we reinsert the elements in should not
+// matter. For simplicity and efficiency, we will consider only linear
+// reinsertions, which consist of reinserting all elements in the old table
+// into the new one by increasing order of index. However we will not be
+// starting our reinsertions from index 0 in general. If we start from index
+// i, for the purpose of reinsertion we will consider all elements with real
+// index j < i to have virtual index n + j.
+//
+// Our hash generation scheme consists of generating a 64-bit hash and
+// truncating the most significant bits. When moving to the new table, we
+// simply introduce a new bit to the front of the hash. Therefore, if an
+// elements has ideal index i in the old table, it can have one of two ideal
+// locations in the new table. If the new bit is 0, then the new ideal index
+// is i. If the new bit is 1, then the new ideal index is n + i. Intutively,
+// we are producing two independent tables of size n, and for each element we
+// independently choose which table to insert it into with equal probability.
+// However the rather than wrapping around themselves on overflowing their
+// indexes, the first table overflows into the first, and the first into the
+// second. Visually, our new table will look something like:
+//
+// [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy]
+//
+// Where x's are elements inserted into the first table, y's are elements
+// inserted into the second, and _'s are empty sections. We now define a few
+// key concepts that we will use later. Note that this is a very abstract
+// perspective of the table. A real resized table would be at least half
+// empty.
+//
+// Theorem: A linear robin hood reinsertion from the first ideal element
+// produces identical results to a linear naive reinsertion from the same
+// element.
+//
+// FIXME(Gankro, pczarn): review the proof and put it all in a separate doc.rs
+
+/// A hash map implementation which uses linear probing with Robin
+/// Hood bucket stealing.
+///
+/// The hashes are all keyed by the task-local random number generator
+/// on creation by default. This means that the ordering of the keys is
+/// randomized, but makes the tables more resistant to
+/// denial-of-service attacks (Hash DoS). This behaviour can be
+/// overridden with one of the constructors.
+///
+/// It is required that the keys implement the `Eq` and `Hash` traits, although
+/// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
+///
+/// Relevant papers/articles:
+///
+/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
+/// 2. Emmanuel Goossaert. ["Robin Hood
+/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
+/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
+/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `HashMap<&str, &str>` in this example).
+/// let mut book_reviews = HashMap::new();
+///
+/// // review some books.
+/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
+/// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
+/// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
+/// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
+///
+/// // check for a specific one.
+/// if !book_reviews.contains_key(&("Les Misérables")) {
+/// println!("We've got {} reviews, but Les Misérables ain't one.",
+/// book_reviews.len());
+/// }
+///
+/// // oops, this review has a lot of spelling mistakes, let's delete it.
+/// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
+///
+/// // look up the values associated with some keys.
+/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
+/// for book in to_find.iter() {
+/// match book_reviews.find(book) {
+/// Some(review) => println!("{}: {}", *book, *review),
+/// None => println!("{} is unreviewed.", *book)
+/// }
+/// }
+///
+/// // iterate over everything.
+/// for (book, review) in book_reviews.iter() {
+/// println!("{}: \"{}\"", *book, *review);
+/// }
+/// ```
+///
+/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
+/// We must also derive `PartialEq`.
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// #[deriving(Hash, Eq, PartialEq, Show)]
+/// struct Viking<'a> {
+/// name: &'a str,
+/// power: uint,
+/// }
+///
+/// let mut vikings = HashMap::new();
+///
+/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
+/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
+/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
+///
+/// // Use derived implementation to print the vikings.
+/// for (land, viking) in vikings.iter() {
+/// println!("{} at {}", viking, land);
+/// }
+/// ```
+#[deriving(Clone)]
+pub struct HashMap<K, V, H = RandomSipHasher> {
+ // All hashes are keyed on these values, to prevent hash collision attacks.
+ hasher: H,
+
+ table: RawTable<K, V>,
+
+ // We keep this at the end since it might as well have tail padding.
+ resize_policy: DefaultResizePolicy,
+}
+
+/// Search for a pre-hashed key.
+fn search_hashed_generic<K, V, M: Deref<RawTable<K, V>>>(table: M,
+ hash: &SafeHash,
+ is_match: |&K| -> bool)
+ -> SearchResult<K, V, M> {
+ let size = table.size();
+ let mut probe = Bucket::new(table, hash);
+ let ib = probe.index();
+
+ while probe.index() != ib + size {
+ let full = match probe.peek() {
+ Empty(b) => return TableRef(b.into_table()), // hit an empty bucket
+ Full(b) => b
+ };
+
+ if full.distance() + ib < full.index() {
+ // We can finish the search early if we hit any bucket
+ // with a lower distance to initial bucket than we've probed.
+ return TableRef(full.into_table());
+ }
+
+ // If the hash doesn't match, it can't be this one..
+ if *hash == full.hash() {
+ let matched = {
+ let (k, _) = full.read();
+ is_match(k)
+ };
+
+ // If the key doesn't match, it can't be this one..
+ if matched {
+ return FoundExisting(full);
+ }
+ }
+
+ probe = full.next();
+ }
+
+ TableRef(probe.into_table())
+}
+
+fn search_hashed<K: Eq, V, M: Deref<RawTable<K, V>>>(table: M, hash: &SafeHash, k: &K)
+ -> SearchResult<K, V, M> {
+ search_hashed_generic(table, hash, |k_| *k == *k_)
+}
+
+fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> V {
+ let (empty, _k, retval) = starting_bucket.take();
+ let mut gap = match empty.gap_peek() {
+ Some(b) => b,
+ None => return retval
+ };
+
+ while gap.full().distance() != 0 {
+ gap = match gap.shift() {
+ Some(b) => b,
+ None => break
+ };
+ }
+
+ // Now we've done all our shifting. Return the value we grabbed earlier.
+ return retval;
+}
+
+/// Perform robin hood bucket stealing at the given `bucket`. You must
+/// also pass the position of that bucket's initial bucket so we don't have
+/// to recalculate it.
+///
+/// `hash`, `k`, and `v` are the elements to "robin hood" into the hashtable.
+fn robin_hood<'a, K: 'a, V: 'a>(mut bucket: FullBucketMut<'a, K, V>,
+ mut ib: uint,
+ mut hash: SafeHash,
+ mut k: K,
+ mut v: V)
+ -> &'a mut V {
+ let starting_index = bucket.index();
+ let size = {
+ let table = bucket.table(); // FIXME "lifetime too short".
+ table.size()
+ };
+ // There can be at most `size - dib` buckets to displace, because
+ // in the worst case, there are `size` elements and we already are
+ // `distance` buckets away from the initial one.
+ let idx_end = starting_index + size - bucket.distance();
+
+ loop {
+ let (old_hash, old_key, old_val) = bucket.replace(hash, k, v);
+ loop {
+ let probe = bucket.next();
+ assert!(probe.index() != idx_end);
+
+ let full_bucket = match probe.peek() {
+ table::Empty(bucket) => {
+ // Found a hole!
+ let b = bucket.put(old_hash, old_key, old_val);
+ // Now that it's stolen, just read the value's pointer
+ // right out of the table!
+ let (_, v) = Bucket::at_index(b.into_table(), starting_index).peek()
+ .expect_full()
+ .into_mut_refs();
+ return v;
+ },
+ table::Full(bucket) => bucket
+ };
+
+ let probe_ib = full_bucket.index() - full_bucket.distance();
+
+ bucket = full_bucket;
+
+ // Robin hood! Steal the spot.
+ if ib < probe_ib {
+ ib = probe_ib;
+ hash = old_hash;
+ k = old_key;
+ v = old_val;
+ break;
+ }
+ }
+ }
+}
+
+/// A result that works like Option<FullBucket<..>> but preserves
+/// the reference that grants us access to the table in any case.
+enum SearchResult<K, V, M> {
+ // This is an entry that holds the given key:
+ FoundExisting(FullBucket<K, V, M>),
+
+ // There was no such entry. The reference is given back:
+ TableRef(M)
+}
+
+impl<K, V, M> SearchResult<K, V, M> {
+ fn into_option(self) -> Option<FullBucket<K, V, M>> {
+ match self {
+ FoundExisting(bucket) => Some(bucket),
+ TableRef(_) => None
+ }
+ }
+}
+
+/// A newtyped mutable reference to the hashmap that allows e.g. Deref to be
+/// implemented without making changes to the visible interface of HashMap.
+/// Used internally because it's accepted by the search functions above.
+struct MapMutRef<'a, K: 'a, V: 'a, H: 'a> {
+ map_ref: &'a mut HashMap<K, V, H>
+}
+
+impl<'a, K, V, H> Deref<RawTable<K, V>> for MapMutRef<'a, K, V, H> {
+ fn deref(&self) -> &RawTable<K, V> {
+ &self.map_ref.table
+ }
+}
+
+impl<'a, K, V, H> DerefMut<RawTable<K, V>> for MapMutRef<'a, K, V, H> {
+ fn deref_mut(&mut self) -> &mut RawTable<K, V> {
+ &mut self.map_ref.table
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
+ fn make_hash<X: Hash<S>>(&self, x: &X) -> SafeHash {
+ table::make_hash(&self.hasher, x)
+ }
+
+ fn search_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, q: &Q)
+ -> Option<FullBucketImm<'a, K, V>> {
+ let hash = self.make_hash(q);
+ search_hashed_generic(&self.table, &hash, |k| q.equiv(k)).into_option()
+ }
+
+ fn search_equiv_mut<'a, Q: Hash<S> + Equiv<K>>(&'a mut self, q: &Q)
+ -> Option<FullBucketMut<'a, K, V>> {
+ let hash = self.make_hash(q);
+ search_hashed_generic(&mut self.table, &hash, |k| q.equiv(k)).into_option()
+ }
+
+ /// Search for a key, yielding the index if it's found in the hashtable.
+ /// If you already have the hash for the key lying around, use
+ /// search_hashed.
+ fn search<'a>(&'a self, k: &K) -> Option<FullBucketImm<'a, K, V>> {
+ let hash = self.make_hash(k);
+ search_hashed(&self.table, &hash, k).into_option()
+ }
+
+ fn search_mut<'a>(&'a mut self, k: &K) -> Option<FullBucketMut<'a, K, V>> {
+ let hash = self.make_hash(k);
+ search_hashed(&mut self.table, &hash, k).into_option()
+ }
+
+ // The caller should ensure that invariants by Robin Hood Hashing hold.
+ fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
+ let cap = self.table.capacity();
+ let mut buckets = Bucket::new(&mut self.table, &hash);
+ let ib = buckets.index();
+
+ while buckets.index() != ib + cap {
+ // We don't need to compare hashes for value swap.
+ // Not even DIBs for Robin Hood.
+ buckets = match buckets.peek() {
+ Empty(empty) => {
+ empty.put(hash, k, v);
+ return;
+ }
+ Full(b) => b.into_bucket()
+ };
+ buckets.next();
+ }
+ fail!("Internal HashMap error: Out of space.");
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
+ /// Return the number of elements in the map.
+ fn len(&self) -> uint { self.table.size() }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
+ /// Clear the map, removing all key-value pairs. Keeps the allocated memory
+ /// for reuse.
+ fn clear(&mut self) {
+ // Prevent reallocations from happening from now on. Makes it possible
+ // for the map to be reused but has a downside: reserves permanently.
+ self.resize_policy.reserve(self.table.size());
+
+ let cap = self.table.capacity();
+ let mut buckets = Bucket::first(&mut self.table);
+
+ while buckets.index() != cap {
+ buckets = match buckets.peek() {
+ Empty(b) => b.next(),
+ Full(full) => {
+ let (b, _, _) = full.take();
+ b.next()
+ }
+ };
+ }
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
+ fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
+ self.search(k).map(|bucket| {
+ let (_, v) = bucket.into_refs();
+ v
+ })
+ }
+
+ fn contains_key(&self, k: &K) -> bool {
+ self.search(k).is_some()
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
+ fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
+ match self.search_mut(k) {
+ Some(bucket) => {
+ let (_, v) = bucket.into_mut_refs();
+ Some(v)
+ }
+ _ => None
+ }
+ }
+
+ fn swap(&mut self, k: K, v: V) -> Option<V> {
+ let hash = self.make_hash(&k);
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+
+ let mut retval = None;
+ self.insert_or_replace_with(hash, k, v, |_, val_ref, val| {
+ retval = Some(replace(val_ref, val));
+ });
+ retval
+ }
+
+
+ fn pop(&mut self, k: &K) -> Option<V> {
+ if self.table.size() == 0 {
+ return None
+ }
+
+ let potential_new_size = self.table.size() - 1;
+ self.make_some_room(potential_new_size);
+
+ self.search_mut(k).map(|bucket| {
+ pop_internal(bucket)
+ })
+ }
+}
+
+impl<K: Hash + Eq, V> HashMap<K, V, RandomSipHasher> {
+ /// Create an empty HashMap.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn new() -> HashMap<K, V, RandomSipHasher> {
+ let hasher = RandomSipHasher::new();
+ HashMap::with_hasher(hasher)
+ }
+
+ /// Creates an empty hash map with the given initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomSipHasher> {
+ let hasher = RandomSipHasher::new();
+ HashMap::with_capacity_and_hasher(capacity, hasher)
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
+ /// Creates an empty hashmap which will use the given hasher to hash keys.
+ ///
+ /// The creates map has the default initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut map = HashMap::with_hasher(h);
+ /// map.insert(1i, 2u);
+ /// ```
+ #[inline]
+ pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
+ HashMap {
+ hasher: hasher,
+ resize_policy: DefaultResizePolicy::new(INITIAL_CAPACITY),
+ table: RawTable::new(0),
+ }
+ }
+
+ /// Create an empty HashMap with space for at least `capacity`
+ /// elements, using `hasher` to hash the keys.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow HashMaps to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut map = HashMap::with_capacity_and_hasher(10, h);
+ /// map.insert(1i, 2u);
+ /// ```
+ #[inline]
+ pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
+ let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
+ HashMap {
+ hasher: hasher,
+ resize_policy: DefaultResizePolicy::new(cap),
+ table: RawTable::new(cap),
+ }
+ }
+
+ /// The hashtable will never try to shrink below this size. You can use
+ /// this function to reduce reallocations if your hashtable frequently
+ /// grows and shrinks by large amounts.
+ ///
+ /// This function has no effect on the operational semantics of the
+ /// hashtable, only on performance.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::new();
+ /// map.reserve(10);
+ /// ```
+ pub fn reserve(&mut self, new_minimum_capacity: uint) {
+ let cap = num::next_power_of_two(
+ max(INITIAL_CAPACITY, new_minimum_capacity));
+
+ self.resize_policy.reserve(cap);
+
+ if self.table.capacity() < cap {
+ self.resize(cap);
+ }
+ }
+
+ /// Resizes the internal vectors to a new capacity. It's your responsibility to:
+ /// 1) Make sure the new capacity is enough for all the elements, accounting
+ /// for the load factor.
+ /// 2) Ensure new_capacity is a power of two.
+ fn resize(&mut self, new_capacity: uint) {
+ assert!(self.table.size() <= new_capacity);
+ assert!(num::is_power_of_two(new_capacity));
+
+ let mut old_table = replace(&mut self.table, RawTable::new(new_capacity));
+ let old_size = old_table.size();
+
+ if old_table.capacity() == 0 || old_table.size() == 0 {
+ return;
+ }
+
+ if new_capacity < old_table.capacity() {
+ // Shrink the table. Naive algorithm for resizing:
+ for (h, k, v) in old_table.move_iter() {
+ self.insert_hashed_nocheck(h, k, v);
+ }
+ } else {
+ // Grow the table.
+ // Specialization of the other branch.
+ let mut bucket = Bucket::first(&mut old_table);
+
+ // "So a few of the first shall be last: for many be called,
+ // but few chosen."
+ //
+ // We'll most likely encounter a few buckets at the beginning that
+ // have their initial buckets near the end of the table. They were
+ // placed at the beginning as the probe wrapped around the table
+ // during insertion. We must skip forward to a bucket that won't
+ // get reinserted too early and won't unfairly steal others spot.
+ // This eliminates the need for robin hood.
+ loop {
+ bucket = match bucket.peek() {
+ Full(full) => {
+ if full.distance() == 0 {
+ // This bucket occupies its ideal spot.
+ // It indicates the start of another "cluster".
+ bucket = full.into_bucket();
+ break;
+ }
+ // Leaving this bucket in the last cluster for later.
+ full.into_bucket()
+ }
+ Empty(b) => {
+ // Encountered a hole between clusters.
+ b.into_bucket()
+ }
+ };
+ bucket.next();
+ }
+
+ // This is how the buckets might be laid out in memory:
+ // ($ marks an initialized bucket)
+ // ________________
+ // |$$$_$$$$$$_$$$$$|
+ //
+ // But we've skipped the entire initial cluster of buckets
+ // and will continue iteration in this order:
+ // ________________
+ // |$$$$$$_$$$$$
+ // ^ wrap around once end is reached
+ // ________________
+ // $$$_____________|
+ // ^ exit once table.size == 0
+ loop {
+ bucket = match bucket.peek() {
+ Full(bucket) => {
+ let h = bucket.hash();
+ let (b, k, v) = bucket.take();
+ self.insert_hashed_ordered(h, k, v);
+ {
+ let t = b.table(); // FIXME "lifetime too short".
+ if t.size() == 0 { break }
+ };
+ b.into_bucket()
+ }
+ Empty(b) => b.into_bucket()
+ };
+ bucket.next();
+ }
+ }
+
+ assert_eq!(self.table.size(), old_size);
+ }
+
+ /// Performs any necessary resize operations, such that there's space for
+ /// new_size elements.
+ fn make_some_room(&mut self, new_size: uint) {
+ let (grow_at, shrink_at) = self.resize_policy.capacity_range(new_size);
+ let cap = self.table.capacity();
+
+ // An invalid value shouldn't make us run out of space.
+ debug_assert!(grow_at >= new_size);
+
+ if cap <= grow_at {
+ let new_capacity = max(cap << 1, INITIAL_CAPACITY);
+ self.resize(new_capacity);
+ } else if shrink_at <= cap {
+ let new_capacity = cap >> 1;
+ self.resize(new_capacity);
+ }
+ }
+
+ /// Insert a pre-hashed key-value pair, without first checking
+ /// that there's enough room in the buckets. Returns a reference to the
+ /// newly insert value.
+ ///
+ /// If the key already exists, the hashtable will be returned untouched
+ /// and a reference to the existing element will be returned.
+ fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
+ self.insert_or_replace_with(hash, k, v, |_, _, _| ())
+ }
+
+ fn insert_or_replace_with<'a>(&'a mut self,
+ hash: SafeHash,
+ k: K,
+ v: V,
+ found_existing: |&mut K, &mut V, V|)
+ -> &'a mut V {
+ // Worst case, we'll find one empty bucket among `size + 1` buckets.
+ let size = self.table.size();
+ let mut probe = Bucket::new(&mut self.table, &hash);
+ let ib = probe.index();
+
+ loop {
+ let mut bucket = match probe.peek() {
+ Empty(bucket) => {
+ // Found a hole!
+ let bucket = bucket.put(hash, k, v);
+ let (_, val) = bucket.into_mut_refs();
+ return val;
+ },
+ Full(bucket) => bucket
+ };
+
+ if bucket.hash() == hash {
+ let found_match = {
+ let (bucket_k, _) = bucket.read_mut();
+ k == *bucket_k
+ };
+ if found_match {
+ let (bucket_k, bucket_v) = bucket.into_mut_refs();
+ debug_assert!(k == *bucket_k);
+ // Key already exists. Get its reference.
+ found_existing(bucket_k, bucket_v, v);
+ return bucket_v;
+ }
+ }
+
+ let robin_ib = bucket.index() as int - bucket.distance() as int;
+
+ if (ib as int) < robin_ib {
+ // Found a luckier bucket than me. Better steal his spot.
+ return robin_hood(bucket, robin_ib as uint, hash, k, v);
+ }
+
+ probe = bucket.next();
+ assert!(probe.index() != ib + size + 1);
+ }
+ }
+
+ /// Inserts an element which has already been hashed, returning a reference
+ /// to that element inside the hashtable. This is more efficient that using
+ /// `insert`, since the key will not be rehashed.
+ fn insert_hashed(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+ self.insert_hashed_nocheck(hash, k, v)
+ }
+
+ /// Return the value corresponding to the key in the map, or insert
+ /// and return the value if it doesn't exist.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 1i with key "a"
+ /// assert_eq!(*map.find_or_insert("a", 1i), 1);
+ ///
+ /// // Find the existing key
+ /// assert_eq!(*map.find_or_insert("a", -2), 1);
+ /// ```
+ pub fn find_or_insert(&mut self, k: K, v: V) -> &mut V {
+ self.find_with_or_insert_with(k, v, |_k, _v, _a| (), |_k, a| a)
+ }
+
+ /// Return the value corresponding to the key in the map, or create,
+ /// insert, and return a new value if it doesn't exist.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 10 with key 2
+ /// assert_eq!(*map.find_or_insert_with(2i, |&key| 5 * key as uint), 10u);
+ ///
+ /// // Find the existing key
+ /// assert_eq!(*map.find_or_insert_with(2, |&key| key as uint), 10);
+ /// ```
+ pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
+ -> &'a mut V {
+ self.find_with_or_insert_with(k, (), |_k, _v, _a| (), |k, _a| f(k))
+ }
+
+ /// Insert a key-value pair into the map if the key is not already present.
+ /// Otherwise, modify the existing value for the key.
+ /// Returns the new or modified value for the key.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 2 with key "a"
+ /// assert_eq!(*map.insert_or_update_with("a", 2u, |_key, val| *val = 3), 2);
+ ///
+ /// // Update and return the existing value
+ /// assert_eq!(*map.insert_or_update_with("a", 9, |_key, val| *val = 7), 7);
+ /// assert_eq!(map["a"], 7);
+ /// ```
+ pub fn insert_or_update_with<'a>(
+ &'a mut self,
+ k: K,
+ v: V,
+ f: |&K, &mut V|)
+ -> &'a mut V {
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+
+ let hash = self.make_hash(&k);
+ self.insert_or_replace_with(hash, k, v, |kref, vref, _v| f(kref, vref))
+ }
+
+ /// Modify and return the value corresponding to the key in the map, or
+ /// insert and return a new value if it doesn't exist.
+ ///
+ /// This method allows for all insertion behaviours of a hashmap;
+ /// see methods like
+ /// [`insert`](../trait.MutableMap.html#tymethod.insert),
+ /// [`find_or_insert`](#method.find_or_insert) and
+ /// [`insert_or_update_with`](#method.insert_or_update_with)
+ /// for less general and more friendly variations of this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// // map some strings to vectors of strings
+ /// let mut map = HashMap::new();
+ /// map.insert("a key", vec!["value"]);
+ /// map.insert("z key", vec!["value"]);
+ ///
+ /// let new = vec!["a key", "b key", "z key"];
+ ///
+ /// for k in new.move_iter() {
+ /// map.find_with_or_insert_with(
+ /// k, "new value",
+ /// // if the key does exist either prepend or append this
+ /// // new value based on the first letter of the key.
+ /// |key, already, new| {
+ /// if key.as_slice().starts_with("z") {
+ /// already.insert(0, new);
+ /// } else {
+ /// already.push(new);
+ /// }
+ /// },
+ /// // if the key doesn't exist in the map yet, add it in
+ /// // the obvious way.
+ /// |_k, v| vec![v]);
+ /// }
+ ///
+ /// assert_eq!(map.len(), 3);
+ /// assert_eq!(map["a key"], vec!["value", "new value"]);
+ /// assert_eq!(map["b key"], vec!["new value"]);
+ /// assert_eq!(map["z key"], vec!["new value", "value"]);
+ /// ```
+ pub fn find_with_or_insert_with<'a, A>(&'a mut self,
+ k: K,
+ a: A,
+ found: |&K, &mut V, A|,
+ not_found: |&K, A| -> V)
+ -> &'a mut V
+ {
+ let hash = self.make_hash(&k);
+ let this = MapMutRef { map_ref: self };
+
+ match search_hashed(this, &hash, &k) {
+ FoundExisting(bucket) => {
+ let (_, v_ref) = bucket.into_mut_refs();
+ found(&k, v_ref, a);
+ v_ref
+ }
+ TableRef(this) => {
+ let v = not_found(&k, a);
+ this.map_ref.insert_hashed(hash, k, v)
+ }
+ }
+ }
+
+ /// Retrieves a value for the given key.
+ /// See [`find`](../trait.Map.html#tymethod.find) for a non-failing alternative.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![allow(deprecated)]
+ ///
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// assert_eq!(map.get(&"a"), &1);
+ /// ```
+ #[deprecated = "prefer indexing instead, e.g., map[key]"]
+ pub fn get<'a>(&'a self, k: &K) -> &'a V {
+ match self.find(k) {
+ Some(v) => v,
+ None => fail!("no entry found for key")
+ }
+ }
+
+ /// Retrieves a mutable value for the given key.
+ /// See [`find_mut`](../trait.MutableMap.html#tymethod.find_mut) for a non-failing alternative.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// {
+ /// // val will freeze map to prevent usage during its lifetime
+ /// let val = map.get_mut(&"a");
+ /// *val = 40;
+ /// }
+ /// assert_eq!(map["a"], 40);
+ ///
+ /// // A more direct way could be:
+ /// *map.get_mut(&"a") = -2;
+ /// assert_eq!(map["a"], -2);
+ /// ```
+ pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
+ match self.find_mut(k) {
+ Some(v) => v,
+ None => fail!("no entry found for key")
+ }
+ }
+
+ /// Return true if the map contains a value for the specified key,
+ /// using equivalence.
+ ///
+ /// See [pop_equiv](#method.pop_equiv) for an extended example.
+ pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
+ self.search_equiv(key).is_some()
+ }
+
+ /// Return the value corresponding to the key in the map, using
+ /// equivalence.
+ ///
+ /// See [pop_equiv](#method.pop_equiv) for an extended example.
+ pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
+ match self.search_equiv(k) {
+ None => None,
+ Some(bucket) => {
+ let (_, v_ref) = bucket.into_refs();
+ Some(v_ref)
+ }
+ }
+ }
+
+ /// Remove an equivalent key from the map, returning the value at the
+ /// key if the key was previously in the map.
+ ///
+ /// # Example
+ ///
+ /// This is a slightly silly example where we define the number's
+ /// parity as the equivalence class. It is important that the
+ /// values hash the same, which is why we implement `Hash`.
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::Hash;
+ /// use std::hash::sip::SipState;
+ ///
+ /// #[deriving(Eq, PartialEq)]
+ /// struct EvenOrOdd {
+ /// num: uint
+ /// };
+ ///
+ /// impl Hash for EvenOrOdd {
+ /// fn hash(&self, state: &mut SipState) {
+ /// let parity = self.num % 2;
+ /// parity.hash(state);
+ /// }
+ /// }
+ ///
+ /// impl Equiv<EvenOrOdd> for EvenOrOdd {
+ /// fn equiv(&self, other: &EvenOrOdd) -> bool {
+ /// self.num % 2 == other.num % 2
+ /// }
+ /// }
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(EvenOrOdd { num: 3 }, "foo");
+ ///
+ /// assert!(map.contains_key_equiv(&EvenOrOdd { num: 1 }));
+ /// assert!(!map.contains_key_equiv(&EvenOrOdd { num: 4 }));
+ ///
+ /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 5 }), Some(&"foo"));
+ /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 2 }), None);
+ ///
+ /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 1 }), Some("foo"));
+ /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 2 }), None);
+ ///
+ /// ```
+ #[experimental]
+ pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
+ if self.table.size() == 0 {
+ return None
+ }
+
+ let potential_new_size = self.table.size() - 1;
+ self.make_some_room(potential_new_size);
+
+ match self.search_equiv_mut(k) {
+ Some(bucket) => {
+ Some(pop_internal(bucket))
+ }
+ _ => None
+ }
+ }
+
+ /// An iterator visiting all keys in arbitrary order.
+ /// Iterator element type is `&'a K`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for key in map.keys() {
+ /// println!("{}", key);
+ /// }
+ /// ```
+ pub fn keys(&self) -> Keys<K, V> {
+ self.iter().map(|(k, _v)| k)
+ }
+
+ /// An iterator visiting all values in arbitrary order.
+ /// Iterator element type is `&'a V`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for key in map.values() {
+ /// println!("{}", key);
+ /// }
+ /// ```
+ pub fn values(&self) -> Values<K, V> {
+ self.iter().map(|(_k, v)| v)
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order.
+ /// Iterator element type is `(&'a K, &'a V)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for (key, val) in map.iter() {
+ /// println!("key: {} val: {}", key, val);
+ /// }
+ /// ```
+ pub fn iter(&self) -> Entries<K, V> {
+ Entries { inner: self.table.iter() }
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order,
+ /// with mutable references to the values.
+ /// Iterator element type is `(&'a K, &'a mut V)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// // Update all values
+ /// for (_, val) in map.mut_iter() {
+ /// *val *= 2;
+ /// }
+ ///
+ /// for (key, val) in map.iter() {
+ /// println!("key: {} val: {}", key, val);
+ /// }
+ /// ```
+ pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ MutEntries { inner: self.table.mut_iter() }
+ }
+
+ /// Creates a consuming iterator, that is, one that moves each key-value
+ /// pair out of the map in arbitrary order. The map cannot be used after
+ /// calling this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// // Not possible with .iter()
+ /// let vec: Vec<(&str, int)> = map.move_iter().collect();
+ /// ```
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ MoveEntries {
+ inner: self.table.move_iter().map(|(_, k, v)| (k, v))
+ }
+ }
+}
+
+impl<K: Eq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
+ /// Return a copy of the value corresponding to the key.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<uint, String> = HashMap::new();
+ /// map.insert(1u, "foo".to_string());
+ /// let s: String = map.find_copy(&1).unwrap();
+ /// ```
+ pub fn find_copy(&self, k: &K) -> Option<V> {
+ self.find(k).map(|v| (*v).clone())
+ }
+
+ /// Return a copy of the value corresponding to the key.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<uint, String> = HashMap::new();
+ /// map.insert(1u, "foo".to_string());
+ /// let s: String = map.get_copy(&1);
+ /// ```
+ pub fn get_copy(&self, k: &K) -> V {
+ (*self.get(k)).clone()
+ }
+}
+
+impl<K: Eq + Hash<S>, V: PartialEq, S, H: Hasher<S>> PartialEq for HashMap<K, V, H> {
+ fn eq(&self, other: &HashMap<K, V, H>) -> bool {
+ if self.len() != other.len() { return false; }
+
+ self.iter().all(|(key, value)|
+ other.find(key).map_or(false, |v| *value == *v)
+ )
+ }
+}
+
+impl<K: Eq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {}
+
+impl<K: Eq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "{{"));
+
+ for (i, (k, v)) in self.iter().enumerate() {
+ if i != 0 { try!(write!(f, ", ")); }
+ try!(write!(f, "{}: {}", *k, *v));
+ }
+
+ write!(f, "}}")
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
+ fn default() -> HashMap<K, V, H> {
+ HashMap::with_hasher(Default::default())
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Index<K, V> for HashMap<K, V, H> {
+ #[inline]
+ fn index<'a>(&'a self, index: &K) -> &'a V {
+ self.get(index)
+ }
+}
+
+// FIXME(#12825) Indexing will always try IndexMut first and that causes issues.
+/*impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> ops::IndexMut<K, V> for HashMap<K, V, H> {
+ #[inline]
+ fn index_mut<'a>(&'a mut self, index: &K) -> &'a mut V {
+ self.get_mut(index)
+ }
+}*/
+
+/// HashMap iterator
+pub struct Entries<'a, K: 'a, V: 'a> {
+ inner: table::Entries<'a, K, V>
+}
+
+/// HashMap mutable values iterator
+pub struct MutEntries<'a, K: 'a, V: 'a> {
+ inner: table::MutEntries<'a, K, V>
+}
+
+/// HashMap move iterator
+pub struct MoveEntries<K, V> {
+ inner: iter::Map<'static, (SafeHash, K, V), (K, V), table::MoveEntries<K, V>>
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<K, V> Iterator<(K, V)> for MoveEntries<K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(K, V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+/// HashMap keys iterator
+pub type Keys<'a, K, V> =
+ iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
+
+/// HashMap values iterator
+pub type Values<'a, K, V> =
+ iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
+ fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
+ let (lower, _) = iter.size_hint();
+ let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
+ map.extend(iter);
+ map
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
+ fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
+ for (k, v) in iter {
+ self.insert(k, v);
+ }
+ }
+}
+
+#[cfg(test)]
+mod test_map {
+ use prelude::*;
+
+ use super::HashMap;
+ use cmp::Equiv;
+ use hash;
+ use iter::{Iterator,range_inclusive,range_step_inclusive};
+ use cell::RefCell;
+
+ struct KindaIntLike(int);
+
+ impl Equiv<int> for KindaIntLike {
+ fn equiv(&self, other: &int) -> bool {
+ let KindaIntLike(this) = *self;
+ this == *other
+ }
+ }
+ impl<S: hash::Writer> hash::Hash<S> for KindaIntLike {
+ fn hash(&self, state: &mut S) {
+ let KindaIntLike(this) = *self;
+ this.hash(state)
+ }
+ }
+
+ #[test]
+ fn test_create_capacity_zero() {
+ let mut m = HashMap::with_capacity(0);
+
+ assert!(m.insert(1i, 1i));
+
+ assert!(m.contains_key(&1));
+ assert!(!m.contains_key(&0));
+ }
+
+ #[test]
+ fn test_insert() {
+ let mut m = HashMap::new();
+ assert_eq!(m.len(), 0);
+ assert!(m.insert(1i, 2i));
+ assert_eq!(m.len(), 1);
+ assert!(m.insert(2i, 4i));
+ assert_eq!(m.len(), 2);
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&2).unwrap(), 4);
+ }
+
+ local_data_key!(drop_vector: RefCell<Vec<int>>)
+
+ #[deriving(Hash, PartialEq, Eq)]
+ struct Dropable {
+ k: uint
+ }
+
+ impl Dropable {
+ fn new(k: uint) -> Dropable {
+ let v = drop_vector.get().unwrap();
+ v.borrow_mut().as_mut_slice()[k] += 1;
+
+ Dropable { k: k }
+ }
+ }
+
+ impl Drop for Dropable {
+ fn drop(&mut self) {
+ let v = drop_vector.get().unwrap();
+ v.borrow_mut().as_mut_slice()[self.k] -= 1;
+ }
+ }
+
+ impl Clone for Dropable {
+ fn clone(&self) -> Dropable {
+ Dropable::new(self.k)
+ }
+ }
+
+ #[test]
+ fn test_drops() {
+ drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
+
+ {
+ let mut m = HashMap::new();
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ drop(v);
+
+ for i in range(0u, 100) {
+ let d1 = Dropable::new(i);
+ let d2 = Dropable::new(i+100);
+ m.insert(d1, d2);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ for i in range(0u, 50) {
+ let k = Dropable::new(i);
+ let v = m.pop(&k);
+
+ assert!(v.is_some());
+
+ let v = drop_vector.get().unwrap();
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ assert_eq!(v.borrow().as_slice()[i+100], 1);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 50) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ assert_eq!(v.borrow().as_slice()[i+100], 0);
+ }
+
+ for i in range(50u, 100) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ assert_eq!(v.borrow().as_slice()[i+100], 1);
+ }
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ }
+
+ #[test]
+ fn test_move_iter_drops() {
+ drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
+
+ let hm = {
+ let mut hm = HashMap::new();
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ drop(v);
+
+ for i in range(0u, 100) {
+ let d1 = Dropable::new(i);
+ let d2 = Dropable::new(i+100);
+ hm.insert(d1, d2);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ hm
+ };
+
+ // By the way, ensure that cloning doesn't screw up the dropping.
+ drop(hm.clone());
+
+ {
+ let mut half = hm.move_iter().take(50);
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ for _ in half {}
+
+ let v = drop_vector.get().unwrap();
+ let nk = range(0u, 100).filter(|&i| {
+ v.borrow().as_slice()[i] == 1
+ }).count();
+
+ let nv = range(0u, 100).filter(|&i| {
+ v.borrow().as_slice()[i+100] == 1
+ }).count();
+
+ assert_eq!(nk, 50);
+ assert_eq!(nv, 50);
+ };
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ }
+
+ #[test]
+ fn test_empty_pop() {
+ let mut m: HashMap<int, bool> = HashMap::new();
+ assert_eq!(m.pop(&0), None);
+ }
+
+ #[test]
+ fn test_lots_of_insertions() {
+ let mut m = HashMap::new();
+
+ // Try this a few times to make sure we never screw up the hashmap's
+ // internal state.
+ for _ in range(0i, 10) {
+ assert!(m.is_empty());
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.insert(i, i));
+
+ for j in range_inclusive(1, i) {
+ let r = m.find(&j);
+ assert_eq!(r, Some(&j));
+ }
+
+ for j in range_inclusive(i+1, 1000) {
+ let r = m.find(&j);
+ assert_eq!(r, None);
+ }
+ }
+
+ for i in range_inclusive(1001i, 2000) {
+ assert!(!m.contains_key(&i));
+ }
+
+ // remove forwards
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.remove(&i));
+
+ for j in range_inclusive(1, i) {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in range_inclusive(i+1, 1000) {
+ assert!(m.contains_key(&j));
+ }
+ }
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(!m.contains_key(&i));
+ }
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.insert(i, i));
+ }
+
+ // remove backwards
+ for i in range_step_inclusive(1000i, 1, -1) {
+ assert!(m.remove(&i));
+
+ for j in range_inclusive(i, 1000) {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in range_inclusive(1, i-1) {
+ assert!(m.contains_key(&j));
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_find_mut() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1i, 12i));
+ assert!(m.insert(2i, 8i));
+ assert!(m.insert(5i, 14i));
+ let new = 100;
+ match m.find_mut(&5) {
+ None => fail!(), Some(x) => *x = new
+ }
+ assert_eq!(m.find(&5), Some(&new));
+ }
+
+ #[test]
+ fn test_insert_overwrite() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1i, 2i));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert!(!m.insert(1i, 3i));
+ assert_eq!(*m.find(&1).unwrap(), 3);
+ }
+
+ #[test]
+ fn test_insert_conflicts() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert!(m.insert(5i, 3i));
+ assert!(m.insert(9i, 4i));
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ }
+
+ #[test]
+ fn test_update_with() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+
+ for i in range(1i, 1000) {
+ assert_eq!(
+ i + 2,
+ *m.insert_or_update_with(i + 1, i + 2, |_k, _v| {
+ fail!("Key not yet present");
+ })
+ );
+ assert_eq!(
+ i + 1,
+ *m.insert_or_update_with(i, i + 3, |k, v| {
+ assert_eq!(*k, i);
+ assert_eq!(*v, i + 1);
+ })
+ );
+ }
+ }
+
+ #[test]
+ fn test_conflict_remove() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert!(m.insert(5, 3));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert!(m.insert(9, 4));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert!(m.remove(&1));
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ }
+
+ #[test]
+ fn test_is_empty() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert!(!m.is_empty());
+ assert!(m.remove(&1));
+ assert!(m.is_empty());
+ }
+
+ #[test]
+ fn test_pop() {
+ let mut m = HashMap::new();
+ m.insert(1i, 2i);
+ assert_eq!(m.pop(&1), Some(2));
+ assert_eq!(m.pop(&1), None);
+ }
+
+ #[test]
+ #[allow(experimental)]
+ fn test_pop_equiv() {
+ let mut m = HashMap::new();
+ m.insert(1i, 2i);
+ assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
+ assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
+ }
+
+ #[test]
+ fn test_swap() {
+ let mut m = HashMap::new();
+ assert_eq!(m.swap(1i, 2i), None);
+ assert_eq!(m.swap(1i, 3i), Some(2));
+ assert_eq!(m.swap(1i, 4i), Some(3));
+ }
+
+ #[test]
+ fn test_iterate() {
+ let mut m = HashMap::with_capacity(4);
+ for i in range(0u, 32) {
+ assert!(m.insert(i, i*2));
+ }
+ assert_eq!(m.len(), 32);
+
+ let mut observed: u32 = 0;
+
+ for (k, v) in m.iter() {
+ assert_eq!(*v, *k * 2);
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+ }
+
+ #[test]
+ fn test_keys() {
+ let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
+ let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn test_values() {
+ let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
+ let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let values = map.values().map(|&v| v).collect::<Vec<char>>();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
+ fn test_find() {
+ let mut m = HashMap::new();
+ assert!(m.find(&1i).is_none());
+ m.insert(1i, 2i);
+ match m.find(&1) {
+ None => fail!(),
+ Some(v) => assert_eq!(*v, 2)
+ }
+ }
+
+ #[test]
+ fn test_find_copy() {
+ let mut m = HashMap::new();
+ assert!(m.find(&1i).is_none());
+
+ for i in range(1i, 10000) {
+ m.insert(i, i + 7);
+ match m.find_copy(&i) {
+ None => fail!(),
+ Some(v) => assert_eq!(v, i + 7)
+ }
+ for j in range(1i, i/100) {
+ match m.find_copy(&j) {
+ None => fail!(),
+ Some(v) => assert_eq!(v, j + 7)
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_eq() {
+ let mut m1 = HashMap::new();
+ m1.insert(1i, 2i);
+ m1.insert(2i, 3i);
+ m1.insert(3i, 4i);
+
+ let mut m2 = HashMap::new();
+ m2.insert(1i, 2i);
+ m2.insert(2i, 3i);
+
+ assert!(m1 != m2);
+
+ m2.insert(3i, 4i);
+
+ assert_eq!(m1, m2);
+ }
+
+ #[test]
+ fn test_show() {
+ let mut map: HashMap<int, int> = HashMap::new();
+ let empty: HashMap<int, int> = HashMap::new();
+
+ map.insert(1i, 2i);
+ map.insert(3i, 4i);
+
+ let map_str = format!("{}", map);
+
+ assert!(map_str == "{1: 2, 3: 4}".to_string() || map_str == "{3: 4, 1: 2}".to_string());
+ assert_eq!(format!("{}", empty), "{}".to_string());
+ }
+
+ #[test]
+ fn test_expand() {
+ let mut m = HashMap::new();
+
+ assert_eq!(m.len(), 0);
+ assert!(m.is_empty());
+
+ let mut i = 0u;
+ let old_cap = m.table.capacity();
+ while old_cap == m.table.capacity() {
+ m.insert(i, i);
+ i += 1;
+ }
+
+ assert_eq!(m.len(), i);
+ assert!(!m.is_empty());
+ }
+
+ #[test]
+ fn test_resize_policy() {
+ let mut m = HashMap::new();
+
+ assert_eq!(m.len(), 0);
+ assert_eq!(m.table.capacity(), 0);
+ assert!(m.is_empty());
+
+ m.insert(0, 0);
+ m.remove(&0);
+ assert!(m.is_empty());
+ let initial_cap = m.table.capacity();
+ m.reserve(initial_cap * 2);
+ let cap = m.table.capacity();
+
+ assert_eq!(cap, initial_cap * 2);
+
+ let mut i = 0u;
+ for _ in range(0, cap * 3 / 4) {
+ m.insert(i, i);
+ i += 1;
+ }
+ // three quarters full
+
+ assert_eq!(m.len(), i);
+ assert_eq!(m.table.capacity(), cap);
+
+ for _ in range(0, cap / 4) {
+ m.insert(i, i);
+ i += 1;
+ }
+ // half full
+
+ let new_cap = m.table.capacity();
+ assert_eq!(new_cap, cap * 2);
+
+ for _ in range(0, cap / 2 - 1) {
+ i -= 1;
+ m.remove(&i);
+ assert_eq!(m.table.capacity(), new_cap);
+ }
+ // A little more than one quarter full.
+ // Shrinking starts as we remove more elements:
+ for _ in range(0, cap / 2 - 1) {
+ i -= 1;
+ m.remove(&i);
+ }
+
+ assert_eq!(m.len(), i);
+ assert!(!m.is_empty());
+ assert_eq!(m.table.capacity(), cap);
+ }
+
+ #[test]
+ fn test_find_equiv() {
+ let mut m = HashMap::new();
+
+ let (foo, bar, baz) = (1i,2i,3i);
+ m.insert("foo".to_string(), foo);
+ m.insert("bar".to_string(), bar);
+ m.insert("baz".to_string(), baz);
+
+
+ assert_eq!(m.find_equiv(&("foo")), Some(&foo));
+ assert_eq!(m.find_equiv(&("bar")), Some(&bar));
+ assert_eq!(m.find_equiv(&("baz")), Some(&baz));
+
+ assert_eq!(m.find_equiv(&("qux")), None);
+ }
+
+ #[test]
+ fn test_from_iter() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ for &(k, v) in xs.iter() {
+ assert_eq!(map.find(&k), Some(&v));
+ }
+ }
+
+ #[test]
+ fn test_size_hint() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ let mut iter = map.iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+ }
+
+ #[test]
+ fn test_mut_size_hint() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ let mut iter = map.mut_iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+ }
+
+ #[test]
+ fn test_index() {
+ let mut map: HashMap<int, int> = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ assert_eq!(map[2], 1);
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_index_nonexistent() {
+ let mut map: HashMap<int, int> = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ map[4];
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unordered containers, implemented as hash-tables
+
+pub use self::map::HashMap;
+pub use self::map::Entries;
+pub use self::map::MutEntries;
+pub use self::map::MoveEntries;
+pub use self::map::Keys;
+pub use self::map::Values;
+pub use self::map::INITIAL_CAPACITY;
+pub use self::set::HashSet;
+pub use self::set::SetItems;
+pub use self::set::SetMoveItems;
+pub use self::set::SetAlgebraItems;
+
+mod bench;
+mod map;
+mod set;
+mod table;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp::{Eq, Equiv, PartialEq};
+use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
+use default::Default;
+use fmt::Show;
+use fmt;
+use hash::{Hash, Hasher, RandomSipHasher};
+use iter::{Iterator, FromIterator, FilterMap, Chain, Repeat, Zip, Extendable};
+use iter;
+use option::{Some, None};
+use result::{Ok, Err};
+
+use super::{HashMap, Entries, MoveEntries, INITIAL_CAPACITY};
+
+
+// Future Optimization (FIXME!)
+// =============================
+//
+// Iteration over zero sized values is a noop. There is no need
+// for `bucket.val` in the case of HashSet. I suppose we would need HKT
+// to get rid of it properly.
+
+/// An implementation of a hash set using the underlying representation of a
+/// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
+/// requires that the elements implement the `Eq` and `Hash` traits.
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashSet;
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `HashSet<&str>` in this example).
+/// let mut books = HashSet::new();
+///
+/// // Add some books.
+/// books.insert("A Dance With Dragons");
+/// books.insert("To Kill a Mockingbird");
+/// books.insert("The Odyssey");
+/// books.insert("The Great Gatsby");
+///
+/// // Check for a specific one.
+/// if !books.contains(&("The Winds of Winter")) {
+/// println!("We have {} books, but The Winds of Winter ain't one.",
+/// books.len());
+/// }
+///
+/// // Remove a book.
+/// books.remove(&"The Odyssey");
+///
+/// // Iterate over everything.
+/// for book in books.iter() {
+/// println!("{}", *book);
+/// }
+/// ```
+///
+/// The easiest way to use `HashSet` with a custom type is to derive
+/// `Eq` and `Hash`. We must also derive `PartialEq`, this will in the
+/// future be implied by `Eq`.
+///
+/// ```
+/// use std::collections::HashSet;
+/// #[deriving(Hash, Eq, PartialEq, Show)]
+/// struct Viking<'a> {
+/// name: &'a str,
+/// power: uint,
+/// }
+///
+/// let mut vikings = HashSet::new();
+///
+/// vikings.insert(Viking { name: "Einar", power: 9u });
+/// vikings.insert(Viking { name: "Einar", power: 9u });
+/// vikings.insert(Viking { name: "Olaf", power: 4u });
+/// vikings.insert(Viking { name: "Harald", power: 8u });
+///
+/// // Use derived implementation to print the vikings.
+/// for x in vikings.iter() {
+/// println!("{}", x);
+/// }
+/// ```
+#[deriving(Clone)]
+pub struct HashSet<T, H = RandomSipHasher> {
+ map: HashMap<T, (), H>
+}
+
+impl<T: Hash + Eq> HashSet<T, RandomSipHasher> {
+ /// Create an empty HashSet.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::new();
+ /// ```
+ #[inline]
+ pub fn new() -> HashSet<T, RandomSipHasher> {
+ HashSet::with_capacity(INITIAL_CAPACITY)
+ }
+
+ /// Create an empty HashSet with space for at least `n` elements in
+ /// the hash table.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: uint) -> HashSet<T, RandomSipHasher> {
+ HashSet { map: HashMap::with_capacity(capacity) }
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
+ /// Creates a new empty hash set which will use the given hasher to hash
+ /// keys.
+ ///
+ /// The hash set is also created with the default initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut set = HashSet::with_hasher(h);
+ /// set.insert(2u);
+ /// ```
+ #[inline]
+ pub fn with_hasher(hasher: H) -> HashSet<T, H> {
+ HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
+ }
+
+ /// Create an empty HashSet with space for at least `capacity`
+ /// elements in the hash table, using `hasher` to hash the keys.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow `HashSet`s to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut set = HashSet::with_capacity_and_hasher(10u, h);
+ /// set.insert(1i);
+ /// ```
+ #[inline]
+ pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
+ HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
+ }
+
+ /// Reserve space for at least `n` elements in the hash table.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::new();
+ /// set.reserve(10);
+ /// ```
+ pub fn reserve(&mut self, n: uint) {
+ self.map.reserve(n)
+ }
+
+ /// Returns true if the hash set contains a value equivalent to the
+ /// given query value.
+ ///
+ /// # Example
+ ///
+ /// This is a slightly silly example where we define the number's
+ /// parity as the equivilance class. It is important that the
+ /// values hash the same, which is why we implement `Hash`.
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::Hash;
+ /// use std::hash::sip::SipState;
+ ///
+ /// #[deriving(Eq, PartialEq)]
+ /// struct EvenOrOdd {
+ /// num: uint
+ /// };
+ ///
+ /// impl Hash for EvenOrOdd {
+ /// fn hash(&self, state: &mut SipState) {
+ /// let parity = self.num % 2;
+ /// parity.hash(state);
+ /// }
+ /// }
+ ///
+ /// impl Equiv<EvenOrOdd> for EvenOrOdd {
+ /// fn equiv(&self, other: &EvenOrOdd) -> bool {
+ /// self.num % 2 == other.num % 2
+ /// }
+ /// }
+ ///
+ /// let mut set = HashSet::new();
+ /// set.insert(EvenOrOdd { num: 3u });
+ ///
+ /// assert!(set.contains_equiv(&EvenOrOdd { num: 3u }));
+ /// assert!(set.contains_equiv(&EvenOrOdd { num: 5u }));
+ /// assert!(!set.contains_equiv(&EvenOrOdd { num: 4u }));
+ /// assert!(!set.contains_equiv(&EvenOrOdd { num: 2u }));
+ ///
+ /// ```
+ pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
+ self.map.contains_key_equiv(value)
+ }
+
+ /// An iterator visiting all elements in arbitrary order.
+ /// Iterator element type is &'a T.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a");
+ /// set.insert("b");
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in set.iter() {
+ /// println!("{}", x);
+ /// }
+ /// ```
+ pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
+ self.map.keys()
+ }
+
+ /// Creates a consuming iterator, that is, one that moves each value out
+ /// of the set in arbitrary order. The set cannot be used after calling
+ /// this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a".to_string());
+ /// set.insert("b".to_string());
+ ///
+ /// // Not possible to collect to a Vec<String> with a regular `.iter()`.
+ /// let v: Vec<String> = set.move_iter().collect();
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in v.iter() {
+ /// println!("{}", x);
+ /// }
+ /// ```
+ pub fn move_iter(self) -> SetMoveItems<T> {
+ self.map.move_iter().map(|(k, _)| k)
+ }
+
+ /// Visit the values representing the difference.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Can be seen as `a - b`.
+ /// for x in a.difference(&b) {
+ /// println!("{}", x); // Print 1
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.difference(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [1i].iter().map(|&x| x).collect());
+ ///
+ /// // Note that difference is not symmetric,
+ /// // and `b - a` means something else:
+ /// let diff: HashSet<int> = b.difference(&a).map(|&x| x).collect();
+ /// assert_eq!(diff, [4i].iter().map(|&x| x).collect());
+ /// ```
+ pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
+ Repeat::new(other).zip(self.iter())
+ .filter_map(|(other, elt)| {
+ if !other.contains(elt) { Some(elt) } else { None }
+ })
+ }
+
+ /// Visit the values representing the symmetric difference.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 1, 4 in arbitrary order.
+ /// for x in a.symmetric_difference(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff1: HashSet<int> = a.symmetric_difference(&b).map(|&x| x).collect();
+ /// let diff2: HashSet<int> = b.symmetric_difference(&a).map(|&x| x).collect();
+ ///
+ /// assert_eq!(diff1, diff2);
+ /// assert_eq!(diff1, [1i, 4].iter().map(|&x| x).collect());
+ /// ```
+ pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
+ self.difference(other).chain(other.difference(self))
+ }
+
+ /// Visit the values representing the intersection.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 2, 3 in arbitrary order.
+ /// for x in a.intersection(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.intersection(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [2i, 3].iter().map(|&x| x).collect());
+ /// ```
+ pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> SetAlgebraItems<'a, T, H> {
+ Repeat::new(other).zip(self.iter())
+ .filter_map(|(other, elt)| {
+ if other.contains(elt) { Some(elt) } else { None }
+ })
+ }
+
+ /// Visit the values representing the union.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order.
+ /// for x in a.union(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.union(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [1i, 2, 3, 4].iter().map(|&x| x).collect());
+ /// ```
+ pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
+ self.iter().chain(other.difference(self))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> PartialEq for HashSet<T, H> {
+ fn eq(&self, other: &HashSet<T, H>) -> bool {
+ if self.len() != other.len() { return false; }
+
+ self.iter().all(|key| other.contains(key))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Collection for HashSet<T, H> {
+ fn len(&self) -> uint { self.map.len() }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
+ fn clear(&mut self) { self.map.clear() }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
+ fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
+
+ fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
+ self.iter().all(|v| !other.contains(v))
+ }
+
+ fn is_subset(&self, other: &HashSet<T, H>) -> bool {
+ self.iter().all(|v| other.contains(v))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
+ fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
+
+ fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
+}
+
+impl<T: Eq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "{{"));
+
+ for (i, x) in self.iter().enumerate() {
+ if i != 0 { try!(write!(f, ", ")); }
+ try!(write!(f, "{}", *x));
+ }
+
+ write!(f, "}}")
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
+ fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
+ let (lower, _) = iter.size_hint();
+ let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
+ set.extend(iter);
+ set
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
+ fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
+ for k in iter {
+ self.insert(k);
+ }
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Default for HashSet<T, H> {
+ fn default() -> HashSet<T, H> {
+ HashSet::with_hasher(Default::default())
+ }
+}
+
+/// HashSet iterator
+pub type SetItems<'a, K> =
+ iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
+
+/// HashSet move iterator
+pub type SetMoveItems<K> =
+ iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
+
+// `Repeat` is used to feed the filter closure an explicit capture
+// of a reference to the other set
+/// Set operations iterator
+pub type SetAlgebraItems<'a, T, H> =
+ FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
+ Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
+
+#[cfg(test)]
+mod test_set {
+ use prelude::*;
+
+ use super::HashSet;
+ use slice::ImmutablePartialEqSlice;
+ use collections::Collection;
+
+ #[test]
+ fn test_disjoint() {
+ let mut xs = HashSet::new();
+ let mut ys = HashSet::new();
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(5i));
+ assert!(ys.insert(11i));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(7));
+ assert!(xs.insert(19));
+ assert!(xs.insert(4));
+ assert!(ys.insert(2));
+ assert!(ys.insert(-11));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(ys.insert(7));
+ assert!(!xs.is_disjoint(&ys));
+ assert!(!ys.is_disjoint(&xs));
+ }
+
+ #[test]
+ fn test_subset_and_superset() {
+ let mut a = HashSet::new();
+ assert!(a.insert(0i));
+ assert!(a.insert(5));
+ assert!(a.insert(11));
+ assert!(a.insert(7));
+
+ let mut b = HashSet::new();
+ assert!(b.insert(0i));
+ assert!(b.insert(7));
+ assert!(b.insert(19));
+ assert!(b.insert(250));
+ assert!(b.insert(11));
+ assert!(b.insert(200));
+
+ assert!(!a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(!b.is_superset(&a));
+
+ assert!(b.insert(5));
+
+ assert!(a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(b.is_superset(&a));
+ }
+
+ #[test]
+ fn test_iterate() {
+ let mut a = HashSet::new();
+ for i in range(0u, 32) {
+ assert!(a.insert(i));
+ }
+ let mut observed: u32 = 0;
+ for k in a.iter() {
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+ }
+
+ #[test]
+ fn test_intersection() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(11i));
+ assert!(a.insert(1));
+ assert!(a.insert(3));
+ assert!(a.insert(77));
+ assert!(a.insert(103));
+ assert!(a.insert(5));
+ assert!(a.insert(-5));
+
+ assert!(b.insert(2i));
+ assert!(b.insert(11));
+ assert!(b.insert(77));
+ assert!(b.insert(-9));
+ assert!(b.insert(-42));
+ assert!(b.insert(5));
+ assert!(b.insert(3));
+
+ let mut i = 0;
+ let expected = [3, 5, 11, 77];
+ for x in a.intersection(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(3i));
+ assert!(b.insert(9));
+
+ let mut i = 0;
+ let expected = [1, 5, 11];
+ for x in a.difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_symmetric_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(-2i));
+ assert!(b.insert(3));
+ assert!(b.insert(9));
+ assert!(b.insert(14));
+ assert!(b.insert(22));
+
+ let mut i = 0;
+ let expected = [-2, 1, 5, 11, 14, 22];
+ for x in a.symmetric_difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_union() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+ assert!(a.insert(16));
+ assert!(a.insert(19));
+ assert!(a.insert(24));
+
+ assert!(b.insert(-2i));
+ assert!(b.insert(1));
+ assert!(b.insert(5));
+ assert!(b.insert(9));
+ assert!(b.insert(13));
+ assert!(b.insert(19));
+
+ let mut i = 0;
+ let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
+ for x in a.union(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_from_iter() {
+ let xs = [1i, 2, 3, 4, 5, 6, 7, 8, 9];
+
+ let set: HashSet<int> = xs.iter().map(|&x| x).collect();
+
+ for x in xs.iter() {
+ assert!(set.contains(x));
+ }
+ }
+
+ #[test]
+ fn test_move_iter() {
+ let hs = {
+ let mut hs = HashSet::new();
+
+ hs.insert('a');
+ hs.insert('b');
+
+ hs
+ };
+
+ let v = hs.move_iter().collect::<Vec<char>>();
+ assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
+ }
+
+ #[test]
+ fn test_eq() {
+ // These constants once happened to expose a bug in insert().
+ // I'm keeping them around to prevent a regression.
+ let mut s1 = HashSet::new();
+
+ s1.insert(1i);
+ s1.insert(2);
+ s1.insert(3);
+
+ let mut s2 = HashSet::new();
+
+ s2.insert(1i);
+ s2.insert(2);
+
+ assert!(s1 != s2);
+
+ s2.insert(3);
+
+ assert_eq!(s1, s2);
+ }
+
+ #[test]
+ fn test_show() {
+ let mut set: HashSet<int> = HashSet::new();
+ let empty: HashSet<int> = HashSet::new();
+
+ set.insert(1i);
+ set.insert(2);
+
+ let set_str = format!("{}", set);
+
+ assert!(set_str == "{1, 2}".to_string() || set_str == "{2, 1}".to_string());
+ assert_eq!(format!("{}", empty), "{}".to_string());
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp;
+use hash::{Hash, Hasher};
+use iter::{Iterator, count};
+use kinds::marker;
+use mem::{min_align_of, size_of};
+use mem;
+use num::{CheckedAdd, CheckedMul, is_power_of_two};
+use ops::{Deref, DerefMut, Drop};
+use option::{Some, None, Option};
+use ptr::{RawPtr, copy_nonoverlapping_memory, zero_memory};
+use ptr;
+use rt::heap::{allocate, deallocate};
+
+static EMPTY_BUCKET: u64 = 0u64;
+
+/// The raw hashtable, providing safe-ish access to the unzipped and highly
+/// optimized arrays of hashes, keys, and values.
+///
+/// This design uses less memory and is a lot faster than the naive
+/// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
+/// option on every element, and we get a generally more cache-aware design.
+///
+/// Essential invariants of this structure:
+///
+/// - if t.hashes[i] == EMPTY_BUCKET, then `Bucket::at_index(&t, i).raw`
+/// points to 'undefined' contents. Don't read from it. This invariant is
+/// enforced outside this module with the `EmptyBucket`, `FullBucket`,
+/// and `SafeHash` types.
+///
+/// - An `EmptyBucket` is only constructed at an index with
+/// a hash of EMPTY_BUCKET.
+///
+/// - A `FullBucket` is only constructed at an index with a
+/// non-EMPTY_BUCKET hash.
+///
+/// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
+/// around hashes of zero by changing them to 0x8000_0000_0000_0000,
+/// which will likely map to the same bucket, while not being confused
+/// with "empty".
+///
+/// - All three "arrays represented by pointers" are the same length:
+/// `capacity`. This is set at creation and never changes. The arrays
+/// are unzipped to save space (we don't have to pay for the padding
+/// between odd sized elements, such as in a map from u64 to u8), and
+/// be more cache aware (scanning through 8 hashes brings in at most
+/// 2 cache lines, since they're all right beside each other).
+///
+/// You can kind of think of this module/data structure as a safe wrapper
+/// around just the "table" part of the hashtable. It enforces some
+/// invariants at the type level and employs some performance trickery,
+/// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
+#[unsafe_no_drop_flag]
+pub struct RawTable<K, V> {
+ capacity: uint,
+ size: uint,
+ hashes: *mut u64,
+ // Because K/V do not appear directly in any of the types in the struct,
+ // inform rustc that in fact instances of K and V are reachable from here.
+ marker: marker::CovariantType<(K,V)>,
+}
+
+struct RawBucket<K, V> {
+ hash: *mut u64,
+ key: *mut K,
+ val: *mut V
+}
+
+pub struct Bucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub struct EmptyBucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub struct FullBucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub type EmptyBucketImm<'table, K, V> = EmptyBucket<K, V, &'table RawTable<K, V>>;
+pub type FullBucketImm<'table, K, V> = FullBucket<K, V, &'table RawTable<K, V>>;
+
+pub type EmptyBucketMut<'table, K, V> = EmptyBucket<K, V, &'table mut RawTable<K, V>>;
+pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
+
+pub enum BucketState<K, V, M> {
+ Empty(EmptyBucket<K, V, M>),
+ Full(FullBucket<K, V, M>),
+}
+
+// A GapThenFull encapsulates the state of two consecutive buckets at once.
+// The first bucket, called the gap, is known to be empty.
+// The second bucket is full.
+struct GapThenFull<K, V, M> {
+ gap: EmptyBucket<K, V, ()>,
+ full: FullBucket<K, V, M>,
+}
+
+/// A hash that is not zero, since we use a hash of zero to represent empty
+/// buckets.
+#[deriving(PartialEq)]
+pub struct SafeHash {
+ hash: u64,
+}
+
+impl SafeHash {
+ /// Peek at the hash value, which is guaranteed to be non-zero.
+ #[inline(always)]
+ pub fn inspect(&self) -> u64 { self.hash }
+}
+
+/// We need to remove hashes of 0. That's reserved for empty buckets.
+/// This function wraps up `hash_keyed` to be the only way outside this
+/// module to generate a SafeHash.
+pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
+ match hasher.hash(t) {
+ // This constant is exceedingly likely to hash to the same
+ // bucket, but it won't be counted as empty! Just so we can maintain
+ // our precious uniform distribution of initial indexes.
+ EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
+ h => SafeHash { hash: h },
+ }
+}
+
+// `replace` casts a `*u64` to a `*SafeHash`. Since we statically
+// ensure that a `FullBucket` points to an index with a non-zero hash,
+// and a `SafeHash` is just a `u64` with a different name, this is
+// safe.
+//
+// This test ensures that a `SafeHash` really IS the same size as a
+// `u64`. If you need to change the size of `SafeHash` (and
+// consequently made this test fail), `replace` needs to be
+// modified to no longer assume this.
+#[test]
+fn can_alias_safehash_as_u64() {
+ assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
+}
+
+impl<K, V> RawBucket<K, V> {
+ unsafe fn offset(self, count: int) -> RawBucket<K, V> {
+ RawBucket {
+ hash: self.hash.offset(count),
+ key: self.key.offset(count),
+ val: self.val.offset(count),
+ }
+ }
+}
+
+// For parameterizing over mutability.
+impl<'t, K, V> Deref<RawTable<K, V>> for &'t RawTable<K, V> {
+ fn deref(&self) -> &RawTable<K, V> {
+ &**self
+ }
+}
+
+impl<'t, K, V> Deref<RawTable<K, V>> for &'t mut RawTable<K, V> {
+ fn deref(&self) -> &RawTable<K,V> {
+ &**self
+ }
+}
+
+impl<'t, K, V> DerefMut<RawTable<K, V>> for &'t mut RawTable<K, V> {
+ fn deref_mut(&mut self) -> &mut RawTable<K,V> {
+ &mut **self
+ }
+}
+
+// Buckets hold references to the table.
+impl<K, V, M> FullBucket<K, V, M> {
+ /// Borrow a reference to the table.
+ pub fn table(&self) -> &M {
+ &self.table
+ }
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+ /// Get the raw index.
+ pub fn index(&self) -> uint {
+ self.idx
+ }
+}
+
+impl<K, V, M> EmptyBucket<K, V, M> {
+ /// Borrow a reference to the table.
+ pub fn table(&self) -> &M {
+ &self.table
+ }
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+}
+
+impl<K, V, M> Bucket<K, V, M> {
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+ /// Get the raw index.
+ pub fn index(&self) -> uint {
+ self.idx
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> Bucket<K, V, M> {
+ pub fn new(table: M, hash: &SafeHash) -> Bucket<K, V, M> {
+ Bucket::at_index(table, hash.inspect() as uint)
+ }
+
+ pub fn at_index(table: M, ib_index: uint) -> Bucket<K, V, M> {
+ let ib_index = ib_index & (table.capacity() - 1);
+ Bucket {
+ raw: unsafe {
+ table.first_bucket_raw().offset(ib_index as int)
+ },
+ idx: ib_index,
+ table: table
+ }
+ }
+
+ pub fn first(table: M) -> Bucket<K, V, M> {
+ Bucket {
+ raw: table.first_bucket_raw(),
+ idx: 0,
+ table: table
+ }
+ }
+
+ /// Reads a bucket at a given index, returning an enum indicating whether
+ /// it's initialized or not. You need to match on this enum to get
+ /// the appropriate types to call most of the other functions in
+ /// this module.
+ pub fn peek(self) -> BucketState<K, V, M> {
+ match unsafe { *self.raw.hash } {
+ EMPTY_BUCKET =>
+ Empty(EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }),
+ _ =>
+ Full(FullBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ })
+ }
+ }
+
+ /// Modifies the bucket pointer in place to make it point to the next slot.
+ pub fn next(&mut self) {
+ // Branchless bucket iteration step.
+ // As we reach the end of the table...
+ // We take the current idx: 0111111b
+ // Xor it by its increment: ^ 1000000b
+ // ------------
+ // 1111111b
+ // Then AND with the capacity: & 1000000b
+ // ------------
+ // to get the backwards offset: 1000000b
+ // ... and it's zero at all other times.
+ let maybe_wraparound_dist = (self.idx ^ (self.idx + 1)) & self.table.capacity();
+ // Finally, we obtain the offset 1 or the offset -cap + 1.
+ let dist = 1i - (maybe_wraparound_dist as int);
+
+ self.idx += 1;
+
+ unsafe {
+ self.raw = self.raw.offset(dist);
+ }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> EmptyBucket<K, V, M> {
+ #[inline]
+ pub fn next(self) -> Bucket<K, V, M> {
+ let mut bucket = self.into_bucket();
+ bucket.next();
+ bucket
+ }
+
+ #[inline]
+ pub fn into_bucket(self) -> Bucket<K, V, M> {
+ Bucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }
+ }
+
+ pub fn gap_peek(self) -> Option<GapThenFull<K, V, M>> {
+ let gap = EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: ()
+ };
+
+ match self.next().peek() {
+ Full(bucket) => {
+ Some(GapThenFull {
+ gap: gap,
+ full: bucket
+ })
+ }
+ Empty(..) => None
+ }
+ }
+}
+
+impl<K, V, M: DerefMut<RawTable<K, V>>> EmptyBucket<K, V, M> {
+ /// Puts given key and value pair, along with the key's hash,
+ /// into this bucket in the hashtable. Note how `self` is 'moved' into
+ /// this function, because this slot will no longer be empty when
+ /// we return! A `FullBucket` is returned for later use, pointing to
+ /// the newly-filled slot in the hashtable.
+ ///
+ /// Use `make_hash` to construct a `SafeHash` to pass to this function.
+ pub fn put(mut self, hash: SafeHash, key: K, value: V)
+ -> FullBucket<K, V, M> {
+ unsafe {
+ *self.raw.hash = hash.inspect();
+ ptr::write(self.raw.key, key);
+ ptr::write(self.raw.val, value);
+ }
+
+ self.table.size += 1;
+
+ FullBucket { raw: self.raw, idx: self.idx, table: self.table }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> FullBucket<K, V, M> {
+ #[inline]
+ pub fn next(self) -> Bucket<K, V, M> {
+ let mut bucket = self.into_bucket();
+ bucket.next();
+ bucket
+ }
+
+ #[inline]
+ pub fn into_bucket(self) -> Bucket<K, V, M> {
+ Bucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }
+ }
+
+ /// Get the distance between this bucket and the 'ideal' location
+ /// as determined by the key's hash stored in it.
+ ///
+ /// In the cited blog posts above, this is called the "distance to
+ /// initial bucket", or DIB. Also known as "probe count".
+ pub fn distance(&self) -> uint {
+ // Calculates the distance one has to travel when going from
+ // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
+ // if the destination is not reached before the end of the table.
+ (self.idx - self.hash().inspect() as uint) & (self.table.capacity() - 1)
+ }
+
+ #[inline]
+ pub fn hash(&self) -> SafeHash {
+ unsafe {
+ SafeHash {
+ hash: *self.raw.hash
+ }
+ }
+ }
+
+ /// Gets references to the key and value at a given index.
+ pub fn read(&self) -> (&K, &V) {
+ unsafe {
+ (&*self.raw.key,
+ &*self.raw.val)
+ }
+ }
+}
+
+impl<K, V, M: DerefMut<RawTable<K, V>>> FullBucket<K, V, M> {
+ /// Removes this bucket's key and value from the hashtable.
+ ///
+ /// This works similarly to `put`, building an `EmptyBucket` out of the
+ /// taken bucket.
+ pub fn take(mut self) -> (EmptyBucket<K, V, M>, K, V) {
+ let key = self.raw.key as *const K;
+ let val = self.raw.val as *const V;
+
+ self.table.size -= 1;
+
+ unsafe {
+ *self.raw.hash = EMPTY_BUCKET;
+ (
+ EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ },
+ ptr::read(key),
+ ptr::read(val)
+ )
+ }
+ }
+
+ pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
+ unsafe {
+ let old_hash = ptr::replace(self.raw.hash as *mut SafeHash, h);
+ let old_key = ptr::replace(self.raw.key, k);
+ let old_val = ptr::replace(self.raw.val, v);
+
+ (old_hash, old_key, old_val)
+ }
+ }
+
+ /// Gets mutable references to the key and value at a given index.
+ pub fn read_mut(&mut self) -> (&mut K, &mut V) {
+ unsafe {
+ (&mut *self.raw.key,
+ &mut *self.raw.val)
+ }
+ }
+}
+
+impl<'t, K, V, M: Deref<RawTable<K, V>> + 't> FullBucket<K, V, M> {
+ /// Exchange a bucket state for immutable references into the table.
+ /// Because the underlying reference to the table is also consumed,
+ /// no further changes to the structure of the table are possible;
+ /// in exchange for this, the returned references have a longer lifetime
+ /// than the references returned by `read()`.
+ pub fn into_refs(self) -> (&'t K, &'t V) {
+ unsafe {
+ (&*self.raw.key,
+ &*self.raw.val)
+ }
+ }
+}
+
+impl<'t, K, V, M: DerefMut<RawTable<K, V>> + 't> FullBucket<K, V, M> {
+ /// This works similarly to `into_refs`, exchanging a bucket state
+ /// for mutable references into the table.
+ pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
+ unsafe {
+ (&mut *self.raw.key,
+ &mut *self.raw.val)
+ }
+ }
+}
+
+impl<K, V, M> BucketState<K, V, M> {
+ // For convenience.
+ pub fn expect_full(self) -> FullBucket<K, V, M> {
+ match self {
+ Full(full) => full,
+ Empty(..) => fail!("Expected full bucket")
+ }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> GapThenFull<K, V, M> {
+ #[inline]
+ pub fn full(&self) -> &FullBucket<K, V, M> {
+ &self.full
+ }
+
+ pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
+ unsafe {
+ *self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
+ copy_nonoverlapping_memory(self.gap.raw.key, self.full.raw.key as *const K, 1);
+ copy_nonoverlapping_memory(self.gap.raw.val, self.full.raw.val as *const V, 1);
+ }
+
+ let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
+
+ match self.full.next().peek() {
+ Full(bucket) => {
+ self.gap.raw = prev_raw;
+ self.gap.idx = prev_idx;
+
+ self.full = bucket;
+
+ Some(self)
+ }
+ Empty(..) => None
+ }
+ }
+}
+
+
+/// Rounds up to a multiple of a power of two. Returns the closest multiple
+/// of `target_alignment` that is higher or equal to `unrounded`.
+///
+/// # Failure
+///
+/// Fails if `target_alignment` is not a power of two.
+fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
+ assert!(is_power_of_two(target_alignment));
+ (unrounded + target_alignment - 1) & !(target_alignment - 1)
+}
+
+#[test]
+fn test_rounding() {
+ assert_eq!(round_up_to_next(0, 4), 0);
+ assert_eq!(round_up_to_next(1, 4), 4);
+ assert_eq!(round_up_to_next(2, 4), 4);
+ assert_eq!(round_up_to_next(3, 4), 4);
+ assert_eq!(round_up_to_next(4, 4), 4);
+ assert_eq!(round_up_to_next(5, 4), 8);
+}
+
+// Returns a tuple of (key_offset, val_offset),
+// from the start of a mallocated array.
+fn calculate_offsets(hashes_size: uint,
+ keys_size: uint, keys_align: uint,
+ vals_align: uint)
+ -> (uint, uint) {
+ let keys_offset = round_up_to_next(hashes_size, keys_align);
+ let end_of_keys = keys_offset + keys_size;
+
+ let vals_offset = round_up_to_next(end_of_keys, vals_align);
+
+ (keys_offset, vals_offset)
+}
+
+// Returns a tuple of (minimum required malloc alignment, hash_offset,
+// array_size), from the start of a mallocated array.
+fn calculate_allocation(hash_size: uint, hash_align: uint,
+ keys_size: uint, keys_align: uint,
+ vals_size: uint, vals_align: uint)
+ -> (uint, uint, uint) {
+ let hash_offset = 0;
+ let (_, vals_offset) = calculate_offsets(hash_size,
+ keys_size, keys_align,
+ vals_align);
+ let end_of_vals = vals_offset + vals_size;
+
+ let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
+
+ (min_align, hash_offset, end_of_vals)
+}
+
+#[test]
+fn test_offset_calculation() {
+ assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148));
+ assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6));
+ assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48));
+ assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144));
+ assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5));
+ assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24));
+}
+
+impl<K, V> RawTable<K, V> {
+ /// Does not initialize the buckets. The caller should ensure they,
+ /// at the very least, set every hash to EMPTY_BUCKET.
+ unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
+ if capacity == 0 {
+ return RawTable {
+ size: 0,
+ capacity: 0,
+ hashes: 0 as *mut u64,
+ marker: marker::CovariantType,
+ };
+ }
+ // No need for `checked_mul` before a more restrictive check performed
+ // later in this method.
+ let hashes_size = capacity * size_of::<u64>();
+ let keys_size = capacity * size_of::< K >();
+ let vals_size = capacity * size_of::< V >();
+
+ // Allocating hashmaps is a little tricky. We need to allocate three
+ // arrays, but since we know their sizes and alignments up front,
+ // we just allocate a single array, and then have the subarrays
+ // point into it.
+ //
+ // This is great in theory, but in practice getting the alignment
+ // right is a little subtle. Therefore, calculating offsets has been
+ // factored out into a different function.
+ let (malloc_alignment, hash_offset, size) =
+ calculate_allocation(
+ hashes_size, min_align_of::<u64>(),
+ keys_size, min_align_of::< K >(),
+ vals_size, min_align_of::< V >());
+
+ // One check for overflow that covers calculation and rounding of size.
+ let size_of_bucket = size_of::<u64>().checked_add(&size_of::<K>()).unwrap()
+ .checked_add(&size_of::<V>()).unwrap();
+ assert!(size >= capacity.checked_mul(&size_of_bucket)
+ .expect("capacity overflow"),
+ "capacity overflow");
+
+ let buffer = allocate(size, malloc_alignment);
+
+ let hashes = buffer.offset(hash_offset as int) as *mut u64;
+
+ RawTable {
+ capacity: capacity,
+ size: 0,
+ hashes: hashes,
+ marker: marker::CovariantType,
+ }
+ }
+
+ fn first_bucket_raw(&self) -> RawBucket<K, V> {
+ let hashes_size = self.capacity * size_of::<u64>();
+ let keys_size = self.capacity * size_of::<K>();
+
+ let buffer = self.hashes as *mut u8;
+ let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
+ keys_size, min_align_of::<K>(),
+ min_align_of::<V>());
+
+ unsafe {
+ RawBucket {
+ hash: self.hashes,
+ key: buffer.offset(keys_offset as int) as *mut K,
+ val: buffer.offset(vals_offset as int) as *mut V
+ }
+ }
+ }
+
+ /// Creates a new raw table from a given capacity. All buckets are
+ /// initially empty.
+ #[allow(experimental)]
+ pub fn new(capacity: uint) -> RawTable<K, V> {
+ unsafe {
+ let ret = RawTable::new_uninitialized(capacity);
+ zero_memory(ret.hashes, capacity);
+ ret
+ }
+ }
+
+ /// The hashtable's capacity, similar to a vector's.
+ pub fn capacity(&self) -> uint {
+ self.capacity
+ }
+
+ /// The number of elements ever `put` in the hashtable, minus the number
+ /// of elements ever `take`n.
+ pub fn size(&self) -> uint {
+ self.size
+ }
+
+ fn raw_buckets(&self) -> RawBuckets<K, V> {
+ RawBuckets {
+ raw: self.first_bucket_raw(),
+ hashes_end: unsafe {
+ self.hashes.offset(self.capacity as int)
+ }
+ }
+ }
+
+ pub fn iter(&self) -> Entries<K, V> {
+ Entries {
+ iter: self.raw_buckets(),
+ elems_left: self.size(),
+ }
+ }
+
+ pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ MutEntries {
+ iter: self.raw_buckets(),
+ elems_left: self.size(),
+ }
+ }
+
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ MoveEntries {
+ iter: self.raw_buckets(),
+ table: self,
+ }
+ }
+
+ /// Returns an iterator that copies out each entry. Used while the table
+ /// is being dropped.
+ unsafe fn rev_move_buckets(&mut self) -> RevMoveBuckets<K, V> {
+ let raw_bucket = self.first_bucket_raw();
+ RevMoveBuckets {
+ raw: raw_bucket.offset(self.capacity as int),
+ hashes_end: raw_bucket.hash,
+ elems_left: self.size
+ }
+ }
+}
+
+/// A raw iterator. The basis for some other iterators in this module. Although
+/// this interface is safe, it's not used outside this module.
+struct RawBuckets<'a, K, V> {
+ raw: RawBucket<K, V>,
+ hashes_end: *mut u64
+}
+
+impl<'a, K, V> Iterator<RawBucket<K, V>> for RawBuckets<'a, K, V> {
+ fn next(&mut self) -> Option<RawBucket<K, V>> {
+ while self.raw.hash != self.hashes_end {
+ unsafe {
+ // We are swapping out the pointer to a bucket and replacing
+ // it with the pointer to the next one.
+ let prev = ptr::replace(&mut self.raw, self.raw.offset(1));
+ if *prev.hash != EMPTY_BUCKET {
+ return Some(prev);
+ }
+ }
+ }
+
+ None
+ }
+}
+
+/// An iterator that moves out buckets in reverse order. It leaves the table
+/// in an an inconsistent state and should only be used for dropping
+/// the table's remaining entries. It's used in the implementation of Drop.
+struct RevMoveBuckets<'a, K, V> {
+ raw: RawBucket<K, V>,
+ hashes_end: *mut u64,
+ elems_left: uint
+}
+
+impl<'a, K, V> Iterator<(K, V)> for RevMoveBuckets<'a, K, V> {
+ fn next(&mut self) -> Option<(K, V)> {
+ if self.elems_left == 0 {
+ return None;
+ }
+
+ loop {
+ debug_assert!(self.raw.hash != self.hashes_end);
+
+ unsafe {
+ self.raw = self.raw.offset(-1);
+
+ if *self.raw.hash != EMPTY_BUCKET {
+ self.elems_left -= 1;
+ return Some((
+ ptr::read(self.raw.key as *const K),
+ ptr::read(self.raw.val as *const V)
+ ));
+ }
+ }
+ }
+ }
+}
+
+/// Iterator over shared references to entries in a table.
+pub struct Entries<'a, K: 'a, V: 'a> {
+ iter: RawBuckets<'a, K, V>,
+ elems_left: uint,
+}
+
+/// Iterator over mutable references to entries in a table.
+pub struct MutEntries<'a, K: 'a, V: 'a> {
+ iter: RawBuckets<'a, K, V>,
+ elems_left: uint,
+}
+
+/// Iterator over the entries in a table, consuming the table.
+pub struct MoveEntries<K, V> {
+ table: RawTable<K, V>,
+ iter: RawBuckets<'static, K, V>
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.iter.next().map(|bucket| {
+ self.elems_left -= 1;
+ unsafe {
+ (&*bucket.key,
+ &*bucket.val)
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ (self.elems_left, Some(self.elems_left))
+ }
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.iter.next().map(|bucket| {
+ self.elems_left -= 1;
+ unsafe {
+ (&*bucket.key,
+ &mut *bucket.val)
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ (self.elems_left, Some(self.elems_left))
+ }
+}
+
+impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
+ fn next(&mut self) -> Option<(SafeHash, K, V)> {
+ self.iter.next().map(|bucket| {
+ self.table.size -= 1;
+ unsafe {
+ (
+ SafeHash {
+ hash: *bucket.hash,
+ },
+ ptr::read(bucket.key as *const K),
+ ptr::read(bucket.val as *const V)
+ )
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ let size = self.table.size();
+ (size, Some(size))
+ }
+}
+
+impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
+ fn clone(&self) -> RawTable<K, V> {
+ unsafe {
+ let mut new_ht = RawTable::new_uninitialized(self.capacity());
+
+ {
+ let cap = self.capacity();
+ let mut new_buckets = Bucket::first(&mut new_ht);
+ let mut buckets = Bucket::first(self);
+ while buckets.index() != cap {
+ match buckets.peek() {
+ Full(full) => {
+ let (h, k, v) = {
+ let (k, v) = full.read();
+ (full.hash(), k.clone(), v.clone())
+ };
+ *new_buckets.raw.hash = h.inspect();
+ mem::overwrite(new_buckets.raw.key, k);
+ mem::overwrite(new_buckets.raw.val, v);
+ }
+ Empty(..) => {
+ *new_buckets.raw.hash = EMPTY_BUCKET;
+ }
+ }
+ new_buckets.next();
+ buckets.next();
+ }
+ };
+
+ new_ht.size = self.size();
+
+ new_ht
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<K, V> Drop for RawTable<K, V> {
+ fn drop(&mut self) {
+ if self.hashes.is_null() {
+ return;
+ }
+ // This is done in reverse because we've likely partially taken
+ // some elements out with `.move_iter()` from the front.
+ // Check if the size is 0, so we don't do a useless scan when
+ // dropping empty tables such as on resize.
+ // Also avoid double drop of elements that have been already moved out.
+ unsafe {
+ for _ in self.rev_move_buckets() {}
+ }
+
+ let hashes_size = self.capacity * size_of::<u64>();
+ let keys_size = self.capacity * size_of::<K>();
+ let vals_size = self.capacity * size_of::<V>();
+ let (align, _, size) = calculate_allocation(hashes_size, min_align_of::<u64>(),
+ keys_size, min_align_of::<K>(),
+ vals_size, min_align_of::<V>());
+
+ unsafe {
+ deallocate(self.hashes as *mut u8, size, align);
+ // Remember how everything was allocated out of one buffer
+ // during initialization? We only need one call to free here.
+ }
+ }
+}
}
}
-#[cfg(stage0)]
-impl Reader for Box<Reader+'static> {
- fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) }
-}
-
-#[cfg(not(stage0))]
impl<'a> Reader for Box<Reader+'a> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) }
}
}
}
-#[cfg(stage0)]
-impl Writer for Box<Writer+'static> {
- #[inline]
- fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) }
-
- #[inline]
- fn flush(&mut self) -> IoResult<()> { self.flush() }
-}
-
-#[cfg(not(stage0))]
impl<'a> Writer for Box<Writer+'a> {
#[inline]
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) }
// sizes. For an example, see #14940. For this reason, chunk the output
// buffer on windows, but on unix we can just write the whole buffer all
// at once.
- let max_size = if cfg!(windows) {64 * 1024} else {uint::MAX};
+ //
+ // For some other references, it appears that this problem has been
+ // encountered by others [1] [2]. We choose the number 8KB just because
+ // libuv does the same.
+ //
+ // [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
+ // [2]: http://www.mail-archive.com/log4net-dev@logging.apache.org/msg00661.html
+ let max_size = if cfg!(windows) {8192} else {uint::MAX};
for chunk in buf.chunks(max_size) {
try!(match self.inner {
TTY(ref mut tty) => tty.write(chunk),
use io::{fs, IoResult};
use io;
-use iter::range;
use libc;
use ops::Drop;
use option::{Option, None, Some};
/// will have the suffix `suffix`. The directory will be automatically
/// deleted once the returned wrapper is destroyed.
///
- /// If no directory can be created, None is returned.
- pub fn new_in(tmpdir: &Path, suffix: &str) -> Option<TempDir> {
+ /// If no directory can be created, `Err` is returned.
+ pub fn new_in(tmpdir: &Path, suffix: &str) -> IoResult<TempDir> {
if !tmpdir.is_absolute() {
return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
}
static mut CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
- for _ in range(0u, 1000) {
+ let mut attempts = 0u;
+ loop {
let filename =
format!("rs-{}-{}-{}",
unsafe { libc::getpid() },
suffix);
let p = tmpdir.join(filename);
match fs::mkdir(&p, io::UserRWX) {
- Err(..) => {}
- Ok(()) => return Some(TempDir { path: Some(p), disarmed: false })
+ Err(error) => {
+ if attempts >= 1000 {
+ return Err(error)
+ }
+ attempts += 1;
+ }
+ Ok(()) => return Ok(TempDir { path: Some(p), disarmed: false })
}
}
- None
}
/// Attempts to make a temporary directory inside of `os::tmpdir()` whose
/// name will have the suffix `suffix`. The directory will be automatically
/// deleted once the returned wrapper is destroyed.
///
- /// If no directory can be created, None is returned.
- pub fn new(suffix: &str) -> Option<TempDir> {
+ /// If no directory can be created, `Err` is returned.
+ pub fn new(suffix: &str) -> IoResult<TempDir> {
TempDir::new_in(&os::tmpdir(), suffix)
}
InlineNever,
}
-/// True if something like #[inline] is found in the list of attrs.
+/// Determine what `#[inline]` attribute is present in `attrs`, if any.
pub fn find_inline_attr(attrs: &[Attribute]) -> InlineAttr {
// FIXME (#2809)---validate the usage of #[inline] and #[inline]
attrs.iter().fold(InlineNone, |ia,attr| {
})
}
+/// True if `#[inline]` or `#[inline(always)]` is present in `attrs`.
+pub fn requests_inline(attrs: &[Attribute]) -> bool {
+ match find_inline_attr(attrs) {
+ InlineHint | InlineAlways => true,
+ InlineNone | InlineNever => false,
+ }
+}
+
/// Tests if any `cfg(...)` meta items in `metas` match `cfg`. e.g.
///
/// test_cfg(`[foo="a", bar]`, `[cfg(foo), cfg(bar)]`) == true
if len == 0 {
OwnedSlice::empty()
} else {
+ // drop excess capacity to avoid breaking sized deallocation
+ v.shrink_to_fit();
+
let p = v.as_mut_ptr();
// we own the allocation now
- unsafe {mem::forget(v)}
+ unsafe { mem::forget(v) }
OwnedSlice { data: p, len: len }
}
let span = self.span;
self.span_warn(span,
format!("this extern crate syntax is deprecated. \
- Use: extern create \"{}\" as {};",
- the_ident.as_str(), path.ref0().get() ).as_slice()
+ Use: extern crate \"{}\" as {};",
+ path.ref0().get(), the_ident.as_str() ).as_slice()
);
Some(path)
} else {None};
#[test]
pub fn ratchet_test() {
- let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
+ let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
let pth = dpth.path().join("ratchet.json");
let mut m1 = MetricMap::new();
-Subproject commit 90a314162053a0c51a50a1c603c9203bef241e0d
+Subproject commit e9d037419441d51ccb0f41aacbc64080b0c6e81b
}
#endif
-size_t
-#if defined(__WIN32__)
-rust_list_dir_wfd_size() {
- return sizeof(WIN32_FIND_DATAW);
-}
-#else
-rust_list_dir_wfd_size() {
- return 0;
-}
-#endif
-
-void*
-#if defined(__WIN32__)
-rust_list_dir_wfd_fp_buf(WIN32_FIND_DATAW* wfd) {
- if(wfd == NULL) {
- return 0;
- }
- else {
- return wfd->cFileName;
- }
-}
-#else
-rust_list_dir_wfd_fp_buf(void* wfd) {
- return 0;
-}
-#endif
-
typedef struct {
int32_t tm_sec;
int32_t tm_min;
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2014-08-05
+2014-09-08
+S 2014-09-05 67b97ab
+ freebsd-x86_64 5ed208394cb2a378ddfaa005b6298d2f142ad47f
+ linux-i386 d90866947bfa09738cf8540d17a8eedc70988fcc
+ linux-x86_64 52955b8f7a3b1bf664345060f421101979ced9f2
+ macos-i386 2a38d39afa94ad6d274464ee4e82b1b98c2b3a11
+ macos-x86_64 51df6e27c7d0776f83023e30a976525934ddb93f
+ winnt-i386 3b0bc6d5c1435f22a3782ae25acd19bc27b2cff4
+
S 2014-08-29 6025926
freebsd-x86_64 285330b798eefcc929fc94c9d0604b6172ce3309
linux-i386 5b57ab2dc32952dc78551a955f3c1746b2d915a3
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[no_mangle]
+pub extern "C" fn foo() -> uint {
+ 1234
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[inline]
+pub fn cci_fn() -> uint {
+ 1200
+}
+
+#[inline]
+pub static CCI_STATIC: uint = 34;
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3 --crate-type=rlib,dylib
+
+pub mod a {
+ pub fn one() -> uint {
+ 1
+ }
+}
+
+pub mod b {
+ pub fn two() -> uint {
+ 2
+ }
+}
+
+pub mod c {
+ use a::one;
+ use b::two;
+ pub fn three() -> uint {
+ one() + two()
+ }
+}
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-use std::cmp::max;
+use std::{cmp, iter, mem};
+use std::sync::Future;
-fn fact(n: uint) -> uint {
- range(1, n + 1).fold(1, |accu, i| accu * i)
+fn rotate(x: &mut [i32]) {
+ let mut prev = x[0];
+ for place in x.mut_iter().rev() {
+ prev = mem::replace(place, prev)
+ }
}
-fn fannkuch(n: uint, i: uint) -> (int, int) {
- let mut perm = Vec::from_fn(n, |e| ((n + e - i) % n + 1) as i32);
- let mut tperm = perm.clone();
- let mut count = Vec::from_elem(n, 0u);
- let mut perm_count = 0i;
- let mut checksum = 0;
+fn next_permutation(perm: &mut [i32], count: &mut [i32]) {
+ for i in range(1, perm.len()) {
+ rotate(perm.mut_slice_to(i + 1));
+ let count_i = &mut count[i];
+ if *count_i >= i as i32 {
+ *count_i = 0;
+ } else {
+ *count_i += 1;
+ break
+ }
+ }
+}
+
+struct P {
+ p: [i32, .. 16],
+}
+
+struct Perm {
+ cnt: [i32, .. 16],
+ fact: [u32, .. 16],
+ n: u32,
+ permcount: u32,
+ perm: P,
+}
+
+impl Perm {
+ fn new(n: u32) -> Perm {
+ let mut fact = [1, .. 16];
+ for i in range(1, n as uint + 1) {
+ fact[i] = fact[i - 1] * i as u32;
+ }
+ Perm {
+ cnt: [0, .. 16],
+ fact: fact,
+ n: n,
+ permcount: 0,
+ perm: P { p: [0, .. 16 ] }
+ }
+ }
+
+ fn get(&mut self, mut idx: i32) -> P {
+ let mut pp = [0u8, .. 16];
+ self.permcount = idx as u32;
+ for (i, place) in self.perm.p.mut_iter().enumerate() {
+ *place = i as i32 + 1;
+ }
- for countdown in range(1, fact(n - 1) + 1).rev() {
- for i in range(1, n) {
- let perm0 = *perm.get(0);
- for j in range(0, i) {
- *perm.get_mut(j) = *perm.get(j + 1);
+ for i in range(1, self.n as uint).rev() {
+ let d = idx / self.fact[i] as i32;
+ self.cnt[i] = d;
+ idx %= self.fact[i] as i32;
+ for (place, val) in pp.mut_iter().zip(self.perm.p.slice_to(i + 1).iter()) {
+ *place = (*val) as u8
}
- *perm.get_mut(i) = perm0;
-
- let count_i = count.get_mut(i);
- if *count_i >= i {
- *count_i = 0;
- } else {
- *count_i += 1;
- break;
+
+ let d = d as uint;
+ for j in range(0, i + 1) {
+ self.perm.p[j] = if j + d <= i {pp[j + d]} else {pp[j+d-i-1]} as i32;
}
}
- tperm.clone_from(&perm);
- let mut flips_count = 0;
- loop {
- let k = *tperm.get(0);
- if k == 1 { break; }
- tperm.mut_slice_to(k as uint).reverse();
- flips_count += 1;
+ self.perm
+ }
+
+ fn count(&self) -> u32 { self.permcount }
+ fn max(&self) -> u32 { self.fact[self.n as uint] }
+
+ fn next(&mut self) -> P {
+ next_permutation(self.perm.p, self.cnt);
+ self.permcount += 1;
+
+ self.perm
+ }
+}
+
+
+fn reverse(tperm: &mut [i32], mut k: uint) {
+ tperm.mut_slice_to(k).reverse()
+}
+
+fn work(mut perm: Perm, n: uint, max: uint) -> (i32, i32) {
+ let mut checksum = 0;
+ let mut maxflips = 0;
+
+ let mut p = perm.get(n as i32);
+
+ while perm.count() < max as u32 {
+ let mut flips = 0;
+
+ while p.p[0] != 1 {
+ let k = p.p[0] as uint;
+ reverse(p.p, k);
+ flips += 1;
}
- perm_count = max(perm_count, flips_count);
- checksum += if countdown & 1 == 1 {flips_count} else {-flips_count}
+
+ checksum += if perm.count() % 2 == 0 {flips} else {-flips};
+ maxflips = cmp::max(maxflips, flips);
+
+ p = perm.next();
}
- (checksum, perm_count)
+
+ (checksum, maxflips)
}
-fn main() {
- let n = std::os::args().as_slice()
- .get(1)
- .and_then(|arg| from_str(arg.as_slice()))
- .unwrap_or(2u);
-
- let (tx, rx) = channel();
- for i in range(0, n) {
- let tx = tx.clone();
- spawn(proc() tx.send(fannkuch(n, i)));
+fn fannkuch(n: i32) -> (i32, i32) {
+ let perm = Perm::new(n as u32);
+
+ let N = 4;
+ let mut futures = vec![];
+ let k = perm.max() / N;
+
+ for (i, j) in range(0, N).zip(iter::count(0, k)) {
+ let max = cmp::min(j+k, perm.max());
+
+ futures.push(Future::spawn(proc() {
+ work(perm, j as uint, max as uint)
+ }))
}
- drop(tx);
let mut checksum = 0;
- let mut perm = 0;
- for (cur_cks, cur_perm) in rx.iter() {
- checksum += cur_cks;
- perm = max(perm, cur_perm);
+ let mut maxflips = 0;
+ for fut in futures.mut_iter() {
+ let (cs, mf) = fut.get();
+ checksum += cs;
+ maxflips = cmp::max(maxflips, mf);
}
- println!("{}\nPfannkuchen({}) = {}", checksum, n, perm);
+ (checksum, maxflips)
+}
+
+fn main() {
+ let n = std::os::args().as_slice()
+ .get(1)
+ .and_then(|arg| from_str(arg.as_slice()))
+ .unwrap_or(2i32);
+
+ let (checksum, maxflips) = fannkuch(n);
+ println!("{}\nPfannkuchen({}) = {}", checksum, n, maxflips);
}
let f5: &mut Fat<ToBar> = &mut Fat { f1: 5, f2: "some str", ptr: Bar1 {f :42} };
let z: Box<ToBar> = box Bar1 {f: 36};
f5.ptr = *z; //~ ERROR dynamically sized type on lhs of assignment
+ //~^ ERROR E0161
}
let g: &Fat<[int]> = &f;
let h: &Fat<Fat<[int]>> = &Fat { ptr: *g };
//~^ ERROR trying to initialise a dynamically sized struct
+ //~^^ ERROR E0161
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that dynamically sized rvalues are forbidden
+
+pub fn main() {
+ let _x: Box<str> = box *"hello world";
+ //~^ ERROR E0161
+
+ let array: &[int] = &[1, 2, 3];
+ let _x: Box<[int]> = box *array;
+ //~^ ERROR E0161
+}
fn new_struct(r: A+'static) -> Struct {
//~^ ERROR variable `r` has dynamically sized type
Struct { r: r } //~ ERROR trying to initialise a dynamically sized struct
+ //~^ ERROR E0161
+ //~^^ ERROR E0161
}
trait Curve {}
let x = -2147483649_i32; //~ error: literal out of range for its type
let x = 9223372036854775808_i64; //~ error: literal out of range for its type
+ let x = -9223372036854775808_i64; // should be OK
let x = 18446744073709551615_i64; //~ error: literal out of range for its type
let x = -3.40282348e+38_f32; //~ error: literal out of range for its type
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure we give a sane error message when the user requests LTO with a
+// library built with -C codegen-units > 1.
+
+// aux-build:sepcomp_lib.rs
+// compile-flags: -Z lto
+// error-pattern:missing compressed bytecode
+// no-prefer-dynamic
+
+extern crate sepcomp_lib;
+use sepcomp_lib::a::one;
+use sepcomp_lib::b::two;
+use sepcomp_lib::c::three;
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(two(), 2);
+ assert_eq!(three(), 3);
+}
//~^ ERROR failed to find an implementation
//~^^ ERROR instantiating a type parameter with an incompatible type
-impl<T> Foo<T> {
+impl<T> Foo<T> { //~ ERROR failed to find an implementation
+//~^ ERROR instantiating a type parameter with an incompatible type
fn uhoh() {}
}
// Test some tuples.
fn f9<Sized? X>(x1: Box<S<X>>, x2: Box<E<X>>) {
- f5(&(*x1, 34i)); //~ERROR instantiating a type parameter with an incompatible type `(S<X>,int)`,
- f5(&(32i, *x2)); //~ERROR instantiating a type parameter with an incompatible type `(int,E<X>)`,
+ f5(&(*x1, 34i)); //~ERROR E0161
+ //~^ ERROR instantiating a type parameter with an incompatible type
+ f5(&(32i, *x2)); //~ERROR E0161
+ //~^ ERROR instantiating a type parameter with an incompatible type
}
-// I would like these to fail eventually.
-/*
// impl - bounded
trait T1<Z: T> {
}
struct S3<Sized? Y>;
-impl<Sized? X: T> T1<X> for S3<X> { //ERROR instantiating a type parameter with an incompatible type
+impl<Sized? X: T> T1<X> for S3<X> { //~ ERROR instantiating a type parameter with an incompatible
}
// impl - unbounded
trait T2<Z> {
}
-impl<Sized? X> T2<X> for S3<X> { //ERROR instantiating a type parameter with an incompatible type `X
-*/
+impl<Sized? X> T2<X> for S3<X> { //~ ERROR instantiating a type parameter with an incompatible type
+}
// impl - struct
trait T3<Sized? Z> {
struct S4<Y>;
impl<Sized? X> T3<X> for S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
}
+impl<Sized? X> S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
+}
pub fn main() {
let y: X = *x1; //~ERROR variable `y` has dynamically sized type `X`
let y = *x2; //~ERROR variable `y` has dynamically sized type `X`
let (y, z) = (*x3, 4i); //~ERROR variable `y` has dynamically sized type `X`
+ //~^ ERROR E0161
}
fn f4<Sized? X: T>(x1: Box<X>, x2: Box<X>, x3: Box<X>) {
let y: X = *x1; //~ERROR variable `y` has dynamically sized type `X`
let y = *x2; //~ERROR variable `y` has dynamically sized type `X`
let (y, z) = (*x3, 4i); //~ERROR variable `y` has dynamically sized type `X`
+ //~^ ERROR E0161
}
fn g1<Sized? X>(x: X) {} //~ERROR variable `x` has dynamically sized type `X`
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:capacity overflow
+
+use std::collections::hashmap::HashMap;
+use std::uint;
+use std::mem::size_of;
+
+fn main() {
+ let threshold = uint::MAX / size_of::<(u64, u64, u64)>();
+ let mut h = HashMap::<u64, u64>::with_capacity(threshold + 100);
+ h.insert(0, 0);
+}
--- /dev/null
+-include ../tools.mk
+
+# Verifies that the -L arguments given to the linker is in the same order
+# as the -L arguments on the rustc command line.
+
+CORRECT_DIR=$(TMPDIR)/correct
+WRONG_DIR=$(TMPDIR)/wrong
+
+all: $(TMPDIR)/libcorrect.a $(TMPDIR)/libwrong.a
+ mkdir -p $(CORRECT_DIR) $(WRONG_DIR)
+ mv $(TMPDIR)/libcorrect.a $(CORRECT_DIR)/libfoo.a
+ mv $(TMPDIR)/libwrong.a $(WRONG_DIR)/libfoo.a
+ $(RUSTC) main.rs -o $(TMPDIR)/should_succeed -L $(CORRECT_DIR) -L $(WRONG_DIR)
+ $(call RUN,should_succeed)
+ $(RUSTC) main.rs -o $(TMPDIR)/should_fail -L $(WRONG_DIR) -L $(CORRECT_DIR)
+ $(call FAIL,should_fail)
+
--- /dev/null
+int should_return_one() { return 1; }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate libc;
+
+#[link(name="foo")]
+extern {
+ fn should_return_one() -> libc::c_int;
+}
+
+fn main() {
+ let result = unsafe {
+ should_return_one()
+ };
+
+ if result != 1 {
+ std::os::set_exit_status(255);
+ }
+}
--- /dev/null
+int should_return_one() { return 0; }
$(call REMOVE_RLIBS,bar)
$(call REMOVE_DYLIBS,bar)
rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
+ # Check that $(TMPDIR) is empty.
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=bin
rm $(TMPDIR)/$(call BIN,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=asm,ir,bc,obj,link
rm $(TMPDIR)/bar.ll
rm $(TMPDIR)/bar.bc
rm $(TMPDIR)/bar.s
rm $(TMPDIR)/bar.o
rm $(TMPDIR)/$(call BIN,bar)
- $(RUSTC) foo.rs --emit=asm,ir,bc,obj,link --crate-type=staticlib
- rm $(TMPDIR)/bar.ll
- rm $(TMPDIR)/bar.s
- rm $(TMPDIR)/bar.o
- rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=asm -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=bc -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=ir -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=obj -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=link -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=rlib -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=dylib -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo) # FIXME 13794
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=staticlib -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=bin -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
+ $(RUSTC) foo.rs --emit=asm,ir,bc,obj,link --crate-type=staticlib
+ rm $(TMPDIR)/bar.ll
+ rm $(TMPDIR)/bar.s
+ rm $(TMPDIR)/bar.o
+ rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
mv $(TMPDIR)/bar.bc $(TMPDIR)/foo.bc
+ # Don't check that the $(TMPDIR) is empty - we left `foo.bc` for later
+ # comparison.
+
$(RUSTC) foo.rs --emit=bc,link --crate-type=rlib
cmp $(TMPDIR)/foo.bc $(TMPDIR)/bar.bc
rm $(TMPDIR)/bar.bc
rm $(TMPDIR)/foo.bc
$(call REMOVE_RLIBS,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
--- /dev/null
+-include ../tools.mk
+
+# Check that cross-crate inlined items are inlined in all compilation units
+# that refer to them, and not in any other compilation units.
+
+all:
+ $(RUSTC) cci_lib.rs
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ .*cci_fn)" -eq "2" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c CCI_STATIC.*=.*constant)" -eq "2" ]
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "rlib"]
+
+#[inline]
+pub fn cci_fn() -> uint {
+ 1234
+}
+
+#[inline]
+pub static CCI_STATIC: uint = 2345;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate cci_lib;
+use cci_lib::{cci_fn, CCI_STATIC};
+
+fn call1() -> uint {
+ cci_fn() + CCI_STATIC
+}
+
+mod a {
+ use cci_lib::cci_fn;
+ pub fn call2() -> uint {
+ cci_fn()
+ }
+}
+
+mod b {
+ use cci_lib::CCI_STATIC;
+ pub fn call3() -> uint {
+ CCI_STATIC
+ }
+}
+
+fn main() {
+ call1();
+ a::call2();
+ b::call3();
+}
--- /dev/null
+-include ../tools.mk
+
+# Test that #[inline(always)] functions still get inlined across compilation
+# unit boundaries. Compilation should produce three IR files, with each one
+# containing a definition of the inlined function. Also, the non-#[inline]
+# function should be defined in only one compilation unit.
+
+all:
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ i32\ .*inlined)" -eq "1" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ available_externally\ i32\ .*inlined)" -eq "2" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ i32\ .*normal)" -eq "1" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c declare\ i32\ .*normal)" -eq "2" ]
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[inline]
+fn inlined() -> u32 {
+ 1234
+}
+
+fn normal() -> u32 {
+ 2345
+}
+
+mod a {
+ pub fn f() -> u32 {
+ ::inlined() + ::normal()
+ }
+}
+
+mod b {
+ pub fn f() -> u32 {
+ ::inlined() + ::normal()
+ }
+}
+
+fn main() {
+ a::f();
+ b::f();
+}
--- /dev/null
+-include ../tools.mk
+
+# Test that separate compilation actually puts code into separate compilation
+# units. `foo.rs` defines `magic_fn` in three different modules, which should
+# wind up in three different compilation units.
+
+all:
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ .*magic_fn)" -eq "3" ]
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn magic_fn() -> uint {
+ 1234
+}
+
+mod a {
+ pub fn magic_fn() -> uint {
+ 2345
+ }
+}
+
+mod b {
+ pub fn magic_fn() -> uint {
+ 3456
+ }
+}
+
+fn main() { }
unsafe {
static U_RWX: i32 = (libc::S_IRUSR | libc::S_IWUSR | libc::S_IXUSR) as i32;
- let tmpdir = TempDir::new("rename_directory").expect("rename_directory failed");
+ let tmpdir = TempDir::new("rename_directory").ok().expect("rename_directory failed");
let tmpdir = tmpdir.path();
let old_path = tmpdir.join_many(["foo", "bar", "baz"]);
fs::mkdir_recursive(&old_path, io::UserRWX);
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+// aux-build:sepcomp_cci_lib.rs
+
+// Test accessing cross-crate inlined items from multiple compilation units.
+
+extern crate sepcomp_cci_lib;
+use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+
+fn call1() -> uint {
+ cci_fn() + CCI_STATIC
+}
+
+mod a {
+ use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+ pub fn call2() -> uint {
+ cci_fn() + CCI_STATIC
+ }
+}
+
+mod b {
+ use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+ pub fn call3() -> uint {
+ cci_fn() + CCI_STATIC
+ }
+}
+
+fn main() {
+ assert_eq!(call1(), 1234);
+ assert_eq!(a::call2(), 1234);
+ assert_eq!(b::call3(), 1234);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+// aux-build:sepcomp-extern-lib.rs
+
+// Test accessing external items from multiple compilation units.
+
+#[link(name = "sepcomp-extern-lib")]
+extern {
+ #[allow(ctypes)]
+ fn foo() -> uint;
+}
+
+fn call1() -> uint {
+ unsafe { foo() }
+}
+
+mod a {
+ pub fn call2() -> uint {
+ unsafe { ::foo() }
+ }
+}
+
+mod b {
+ pub fn call3() -> uint {
+ unsafe { ::foo() }
+ }
+}
+
+fn main() {
+ assert_eq!(call1(), 1234);
+ assert_eq!(a::call2(), 1234);
+ assert_eq!(b::call3(), 1234);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test references to items that haven't been translated yet.
+
+// Generate some code in the first compilation unit before declaring any
+// modules. This ensures that the first module doesn't go into the same
+// compilation unit as the top-level module.
+fn pad() -> uint { 0 }
+
+mod b {
+ pub fn three() -> uint {
+ ::one() + ::a::two()
+ }
+}
+
+mod a {
+ pub fn two() -> uint {
+ ::one() + ::one()
+ }
+}
+
+fn one() -> uint {
+ 1
+}
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(a::two(), 2);
+ assert_eq!(b::three(), 3);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test basic separate compilation functionality. The functions should be able
+// to call each other even though they will be placed in different compilation
+// units.
+
+// Generate some code in the first compilation unit before declaring any
+// modules. This ensures that the first module doesn't go into the same
+// compilation unit as the top-level module.
+fn one() -> uint { 1 }
+
+mod a {
+ pub fn two() -> uint {
+ ::one() + ::one()
+ }
+}
+
+mod b {
+ pub fn three() -> uint {
+ ::one() + ::a::two()
+ }
+}
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(a::two(), 2);
+ assert_eq!(b::three(), 3);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:sepcomp_lib.rs
+
+// Test linking against a library built with -C codegen-units > 1
+
+extern crate sepcomp_lib;
+use sepcomp_lib::a::one;
+use sepcomp_lib::b::two;
+use sepcomp_lib::c::three;
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(two(), 2);
+ assert_eq!(three(), 3);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test references to static items across compilation units.
+
+fn pad() -> uint { 0 }
+
+static ONE: uint = 1;
+
+mod b {
+ // Separate compilation always switches to the LLVM module with the fewest
+ // instructions. Make sure we have some instructions in this module so
+ // that `a` and `b` don't go into the same compilation unit.
+ fn pad() -> uint { 0 }
+
+ pub static THREE: uint = ::ONE + ::a::TWO;
+}
+
+mod a {
+ fn pad() -> uint { 0 }
+
+ pub static TWO: uint = ::ONE + ::ONE;
+}
+
+fn main() {
+ assert_eq!(ONE, 1);
+ assert_eq!(a::TWO, 2);
+ assert_eq!(b::THREE, 3);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test unwinding through multiple compilation units.
+
+// According to acrichto, in the distant past `ld -r` (which is used during
+// linking when codegen-units > 1) was known to produce object files with
+// damaged unwinding tables. This may be related to GNU binutils bug #6893
+// ("Partial linking results in corrupt .eh_frame_hdr"), but I'm not certain.
+// In any case, this test should let us know if enabling parallel codegen ever
+// breaks unwinding.
+
+fn pad() -> uint { 0 }
+
+mod a {
+ pub fn f() {
+ fail!();
+ }
+}
+
+mod b {
+ pub fn g() {
+ ::a::f();
+ }
+}
+
+fn main() {
+ std::task::try(proc() { ::b::g() }).unwrap_err();
+}
pub fn test_rmdir_recursive_ok() {
let rwx = io::UserRWX;
- let tmpdir = TempDir::new("test").expect("test_rmdir_recursive_ok: \
- couldn't create temp dir");
+ let tmpdir = TempDir::new("test").ok().expect("test_rmdir_recursive_ok: \
+ couldn't create temp dir");
let tmpdir = tmpdir.path();
let root = tmpdir.join("foo");
}
fn in_tmpdir(f: ||) {
- let tmpdir = TempDir::new("test").expect("can't make tmpdir");
+ let tmpdir = TempDir::new("test").ok().expect("can't make tmpdir");
assert!(os::change_dir(tmpdir.path()));
f();