CFG_CPUTYPE=aarch64
;;
+ powerpc)
+ CFG_CPUTYPE=powerpc
+ ;;
+
x86_64 | x86-64 | x64 | amd64)
CFG_CPUTYPE=x86_64
;;
make_dir $t/rt/jemalloc
for i in \
isaac sync test \
- arch/i386 arch/x86_64 arch/arm arch/aarch64 arch/mips
+ arch/i386 arch/x86_64 arch/arm arch/aarch64 arch/mips arch/powerpc
do
make_dir $t/rt/stage$s/$i
done
msg "configuring LLVM for $gnu_t"
- LLVM_TARGETS="--enable-targets=x86,x86_64,arm,aarch64,mips"
+ LLVM_TARGETS="--enable-targets=x86,x86_64,arm,aarch64,mips,powerpc"
LLVM_BUILD="--build=$gnu_t"
LLVM_HOST="--host=$gnu_t"
LLVM_TARGET="--target=$gnu_t"
--- /dev/null
+# powerpc-unknown-linux-gnu configuration
+CROSS_PREFIX_powerpc-unknown-linux-gnu=powerpc-linux-gnu-
+CC_powerpc-unknown-linux-gnu=$(CC)
+CXX_powerpc-unknown-linux-gnu=$(CXX)
+CPP_powerpc-unknown-linux-gnu=$(CPP)
+AR_powerpc-unknown-linux-gnu=$(AR)
+CFG_LIB_NAME_powerpc-unknown-linux-gnu=lib$(1).so
+CFG_STATIC_LIB_NAME_powerpc-unknown-linux-gnu=lib$(1).a
+CFG_LIB_GLOB_powerpc-unknown-linux-gnu=lib$(1)-*.so
+CFG_LIB_DSYM_GLOB_powerpc-unknown-linux-gnu=lib$(1)-*.dylib.dSYM
+CFG_CFLAGS_powerpc-unknown-linux-gnu := -m32 $(CFLAGS)
+CFG_GCCISH_CFLAGS_powerpc-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32 $(CFLAGS)
+CFG_GCCISH_CXXFLAGS_powerpc-unknown-linux-gnu := -fno-rtti $(CXXFLAGS)
+CFG_GCCISH_LINK_FLAGS_powerpc-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m32
+CFG_GCCISH_DEF_FLAG_powerpc-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list=
+CFG_GCCISH_PRE_LIB_FLAGS_powerpc-unknown-linux-gnu := -Wl,-whole-archive
+CFG_GCCISH_POST_LIB_FLAGS_powerpc-unknown-linux-gnu := -Wl,-no-whole-archive
+CFG_DEF_SUFFIX_powerpc-unknown-linux-gnu := .linux.def
+CFG_LLC_FLAGS_powerpc-unknown-linux-gnu :=
+CFG_INSTALL_NAME_powerpc-unknown-linux-gnu =
+CFG_EXE_SUFFIX_powerpc-unknown-linux-gnu =
+CFG_WINDOWSY_powerpc-unknown-linux-gnu :=
+CFG_UNIXY_powerpc-unknown-linux-gnu := 1
+CFG_PATH_MUNGE_powerpc-unknown-linux-gnu := true
+CFG_LDPATH_powerpc-unknown-linux-gnu :=
+CFG_RUN_powerpc-unknown-linux-gnu=$(2)
+CFG_RUN_TARG_powerpc-unknown-linux-gnu=$(call CFG_RUN_powerpc-unknown-linux-gnu,,$(2))
+CFG_GNU_TRIPLE_powerpc-unknown-linux-gnu := powerpc-unknown-linux-gnu
######################################################################
# FIXME: x86-ism
-LLVM_COMPONENTS=x86 arm aarch64 mips ipo bitreader bitwriter linker asmparser mcjit \
+LLVM_COMPONENTS=x86 arm aarch64 mips powerpc ipo bitreader bitwriter linker asmparser mcjit \
interpreter instrumentation
# Only build these LLVM tools
$$(CFG_GCCISH_DEF_FLAG_$(1))$$(3) $$(2) \
$$(call CFG_INSTALL_NAME_$(1),$$(4))
- ifeq ($$(findstring $(HOST_$(1)),arm aarch64 mips mipsel),)
+ ifeq ($$(findstring $(HOST_$(1)),arm aarch64 mips mipsel powerpc),)
# We're using llvm-mc as our assembler because it supports
# .cfi pseudo-ops on mac
-o=$$(1)
else
- # For the ARM, AARCH64 and MIPS crosses, use the toolchain assembler
+ # For the ARM, AARCH64, MIPS and POWER crosses, use the toolchain assembler
# FIXME: We should be able to use the LLVM assembler
CFG_ASSEMBLE_$(1)=$$(CC_$(1)) $$(CFG_GCCISH_CFLAGS_$(1)) \
$$(CFG_DEPEND_FLAGS) $$(2) -c -o $$(1)
The following configurations must be defined by the implementation:
* `target_arch = "..."`. Target CPU architecture, such as `"x86"`, `"x86_64"`
- `"mips"`, `"arm"`, or `"aarch64"`.
+ `"mips"`, `"powerpc"`, `"arm"`, or `"aarch64"`.
* `target_endian = "..."`. Endianness of the target CPU, either `"little"` or
`"big"`.
* `target_family = "..."`. Operating system family of the target, e. g.
not(feature = "external_crate"),
any(target_arch = "arm",
target_arch = "mips",
- target_arch = "mipsel")))]
+ target_arch = "mipsel",
+ target_arch = "powerpc")))]
const MIN_ALIGN: uint = 8;
#[cfg(all(not(feature = "external_funcs"),
not(feature = "external_crate"),
#[test]
fn test_slice_from() {
let vec: &[int] = &[1, 2, 3, 4];
- assert_eq!(&vec[0..], vec);
+ assert_eq!(&vec[], vec);
let b: &[int] = &[3, 4];
assert_eq!(&vec[2..], b);
let b: &[int] = &[];
#[test]
fn test_slice_to() {
let vec: &[int] = &[1, 2, 3, 4];
- assert_eq!(&vec[0..4], vec);
+ assert_eq!(&vec[..4], vec);
let b: &[int] = &[1, 2];
- assert_eq!(&vec[0..2], b);
+ assert_eq!(&vec[..2], b);
let b: &[int] = &[];
- assert_eq!(&vec[0..0], b);
+ assert_eq!(&vec[..0], b);
}
let (left, right) = values.split_at_mut(2);
{
let left: &[_] = left;
- assert!(left[0..left.len()] == [1, 2][]);
+ assert!(left[..left.len()] == [1, 2][]);
}
for p in left.iter_mut() {
*p += 1;
{
let right: &[_] = right;
- assert!(right[0..right.len()] == [3, 4, 5][]);
+ assert!(right[..right.len()] == [3, 4, 5][]);
}
for p in right.iter_mut() {
*p += 2;
/// out of bounds.
///
/// See also `slice`, `slice_from` and `slice_chars`.
- #[unstable = "use slice notation [0..a] instead"]
+ #[unstable = "use slice notation [..a] instead"]
fn slice_to(&self, end: uint) -> &str {
core_str::StrExt::slice_to(&self[], end)
}
if i > 0 {
unsafe {
- res.as_mut_vec().push_all(&v[0..i])
+ res.as_mut_vec().push_all(&v[..i])
};
}
//! # }
//! ```
-#![unstable]
+#![deprecated = "It is unclear if this module is more robust than implementing \
+ Drop on a custom type, and this module is being removed with no \
+ replacement. Use a custom Drop implementation to regain existing \
+ functionality."]
+#![allow(deprecated)]
use ops::{Drop, FnMut, FnOnce};
}
}
- f(unsafe { str::from_utf8_unchecked(&buf[0..end]) })
+ f(unsafe { str::from_utf8_unchecked(&buf[..end]) })
}
for c in sign.into_iter() {
let mut b = [0; 4];
let n = c.encode_utf8(&mut b).unwrap_or(0);
- let b = unsafe { str::from_utf8_unchecked(&b[0..n]) };
+ let b = unsafe { str::from_utf8_unchecked(&b[..n]) };
try!(f.buf.write_str(b));
}
if prefixed { f.buf.write_str(prefix) }
fn fmt(&self, f: &mut Formatter) -> Result {
let mut utf8 = [0u8; 4];
let amt = self.encode_utf8(&mut utf8).unwrap_or(0);
- let s: &str = unsafe { mem::transmute(&utf8[0..amt]) };
+ let s: &str = unsafe { mem::transmute(&utf8[..amt]) };
String::fmt(s, f)
}
}
#[inline]
fn split_at(&self, mid: uint) -> (&[T], &[T]) {
- (&self[0..mid], &self[mid..])
+ (&self[..mid], &self[mid..])
}
#[inline]
#[inline]
fn init(&self) -> &[T] {
- &self[0..(self.len() - 1)]
+ &self[..(self.len() - 1)]
}
#[inline]
#[inline]
fn starts_with(&self, needle: &[T]) -> bool where T: PartialEq {
let n = needle.len();
- self.len() >= n && needle == &self[0..n]
+ self.len() >= n && needle == &self[..n]
}
#[inline]
match self.v.iter().position(|x| (self.pred)(x)) {
None => self.finish(),
Some(idx) => {
- let ret = Some(&self.v[0..idx]);
+ let ret = Some(&self.v[..idx]);
self.v = &self.v[(idx + 1)..];
ret
}
None => self.finish(),
Some(idx) => {
let ret = Some(&self.v[(idx + 1)..]);
- self.v = &self.v[0..idx];
+ self.v = &self.v[..idx];
ret
}
}
if self.size > self.v.len() {
None
} else {
- let ret = Some(&self.v[0..self.size]);
+ let ret = Some(&self.v[..self.size]);
self.v = &self.v[1..];
ret
}
//
// What's going on is we have some critical factorization (u, v) of the
// needle, and we want to determine whether u is a suffix of
- // &v[0..period]. If it is, we use "Algorithm CP1". Otherwise we use
+ // &v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
// "Algorithm CP2", which is optimized for when the period of the needle
// is large.
- if &needle[0..crit_pos] == &needle[period.. period + crit_pos] {
+ if &needle[..crit_pos] == &needle[period.. period + crit_pos] {
TwoWaySearcher {
crit_pos: crit_pos,
period: period,
#[inline]
fn starts_with(&self, needle: &str) -> bool {
let n = needle.len();
- self.len() >= n && needle.as_bytes() == &self.as_bytes()[0..n]
+ self.len() >= n && needle.as_bytes() == &self.as_bytes()[..n]
}
#[inline]
fn check(input: char, expect: &[u8]) {
let mut buf = [0u8; 4];
let n = input.encode_utf8(buf.as_mut_slice()).unwrap_or(0);
- assert_eq!(&buf[0..n], expect);
+ assert_eq!(&buf[..n], expect);
}
check('x', &[0x78]);
fn check(input: char, expect: &[u16]) {
let mut buf = [0u16; 2];
let n = input.encode_utf16(buf.as_mut_slice()).unwrap_or(0);
- assert_eq!(&buf[0..n], expect);
+ assert_eq!(&buf[..n], expect);
}
check('x', &[0x0078]);
let v: &[_] = &[0i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert_eq!(v[..4].iter().count(), 4);
assert_eq!(v[..10].iter().count(), 10);
- assert_eq!(v[0..0].iter().count(), 0);
+ assert_eq!(v[..0].iter().count(), 0);
}
#[test]
let v: &[_] = &[0i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert_eq!(v[..4].iter().map(|&x| x).sum(), 6);
assert_eq!(v.iter().map(|&x| x).sum(), 55);
- assert_eq!(v[0..0].iter().map(|&x| x).sum(), 0);
+ assert_eq!(v[..0].iter().map(|&x| x).sum(), 0);
}
#[test]
fn test_iterator_product() {
let v: &[_] = &[0i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- assert_eq!(v[0..4].iter().map(|&x| x).product(), 0);
+ assert_eq!(v[..4].iter().map(|&x| x).product(), 0);
assert_eq!(v[1..5].iter().map(|&x| x).product(), 24);
- assert_eq!(v[0..0].iter().map(|&x| x).product(), 1);
+ assert_eq!(v[..0].iter().map(|&x| x).product(), 1);
}
#[test]
fn test_iterator_max() {
let v: &[_] = &[0i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- assert_eq!(v[0..4].iter().map(|&x| x).max(), Some(3));
+ assert_eq!(v[..4].iter().map(|&x| x).max(), Some(3));
assert_eq!(v.iter().map(|&x| x).max(), Some(10));
- assert_eq!(v[0..0].iter().map(|&x| x).max(), None);
+ assert_eq!(v[..0].iter().map(|&x| x).max(), None);
}
#[test]
fn test_iterator_min() {
let v: &[_] = &[0i, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- assert_eq!(v[0..4].iter().map(|&x| x).min(), Some(0));
+ assert_eq!(v[..4].iter().map(|&x| x).min(), Some(0));
assert_eq!(v.iter().map(|&x| x).min(), Some(0));
- assert_eq!(v[0..0].iter().map(|&x| x).min(), None);
+ assert_eq!(v[..0].iter().map(|&x| x).min(), None);
}
#[test]
assert!(v.iter().all(|&x| x < 10));
assert!(!v.iter().all(|&x| x % 2 == 0));
assert!(!v.iter().all(|&x| x > 100));
- assert!(v[0..0].iter().all(|_| panic!()));
+ assert!(v[..0].iter().all(|_| panic!()));
}
#[test]
assert!(v.iter().any(|&x| x < 10));
assert!(v.iter().any(|&x| x % 2 == 0));
assert!(!v.iter().any(|&x| x > 100));
- assert!(!v[0..0].iter().any(|_| panic!()));
+ assert!(!v[..0].iter().any(|_| panic!()));
}
#[test]
}
#[test]
-#[cfg(any(target_arch = "x86",
- target_arch = "arm",
- target_arch = "mips",
- target_arch = "mipsel"))]
+#[cfg(target_pointer_width = "32")]
fn size_of_32() {
assert_eq!(size_of::<uint>(), 4u);
assert_eq!(size_of::<*const uint>(), 4u);
}
#[test]
-#[cfg(any(target_arch = "x86_64",
- target_arch = "aarch64"))]
+#[cfg(target_pointer_width = "64")]
fn size_of_64() {
assert_eq!(size_of::<uint>(), 8u);
assert_eq!(size_of::<*const uint>(), 8u);
}
#[test]
-#[cfg(any(target_arch = "x86",
- target_arch = "arm",
- target_arch = "mips",
- target_arch = "mipsel"))]
+#[cfg(target_pointer_width = "32")]
fn align_of_32() {
assert_eq!(align_of::<uint>(), 4u);
assert_eq!(align_of::<*const uint>(), 4u);
}
#[test]
-#[cfg(any(target_arch = "x86_64",
- target_arch = "aarch64"))]
+#[cfg(target_pointer_width = "64")]
fn align_of_64() {
assert_eq!(align_of::<uint>(), 8u);
assert_eq!(align_of::<*const uint>(), 8u);
flags: 0,
precision: CountImplied,
width: CountImplied,
- ty: &self.input[0..0],
+ ty: &self.input[..0],
};
if !self.consume(':') { return spec }
self.cur.next();
pos
}
- Some(..) | None => { return &self.input[0..0]; }
+ Some(..) | None => { return &self.input[..0]; }
};
let mut end;
loop {
#[cfg(any(target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
- target_arch = "mipsel"))]
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
pub mod arch {
pub mod c95 {
pub type c_char = i8;
}
#[cfg(any(target_arch = "x86",
target_arch = "mips",
- target_arch = "mipsel"))]
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
pub mod posix88 {
pub type off_t = i32;
pub type dev_t = u64;
pub __size: [u32; 9]
}
}
- #[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
+ #[cfg(any(target_arch = "mips",
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
pub mod posix01 {
use types::os::arch::c95::{c_long, c_ulong, time_t};
use types::os::arch::posix88::{gid_t, ino_t};
pub const EHWPOISON: c_int = 133;
}
- #[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
+ #[cfg(any(target_arch = "mips",
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
pub mod posix88 {
use types::os::arch::c95::c_int;
use types::common::c95::c_void;
#[cfg(all(target_os = "linux",
any(target_arch = "mips",
target_arch = "mipsel",
- target_arch = "aarch64")))]
+ target_arch = "aarch64",
+ target_arch = "powerpc")))]
pub const PTHREAD_STACK_MIN: size_t = 131072;
pub const CLOCK_REALTIME: c_int = 0;
pub const SHUT_WR: c_int = 1;
pub const SHUT_RDWR: c_int = 2;
}
- #[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
+ #[cfg(any(target_arch = "mips",
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
pub mod bsd44 {
use types::os::arch::c95::c_int;
pub const MAP_NONBLOCK : c_int = 0x010000;
pub const MAP_STACK : c_int = 0x020000;
}
- #[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
+ #[cfg(any(target_arch = "mips",
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
pub mod extra {
use types::os::arch::c95::c_int;
/// let choices = [1i, 2, 4, 8, 16, 32];
/// let mut rng = thread_rng();
/// println!("{:?}", rng.choose(&choices));
- /// # // uncomment when slicing syntax is stable
- /// //assert_eq!(rng.choose(&choices[0..0]), None);
+ /// assert_eq!(rng.choose(&choices[..0]), None);
/// ```
fn choose<'a, T>(&mut self, values: &'a [T]) -> Option<&'a T> {
if values.is_empty() {
// there (left), and what will be appended on the end (right)
let cap = self.buf.len() - self.pos;
let (left, right) = if cap <= buf.len() {
- (&buf[0..cap], &buf[cap..])
+ (&buf[..cap], &buf[cap..])
} else {
let result: (_, &[_]) = (buf, &[]);
result
warned |= check_must_use(cx, &it.attrs[], s.span);
}
} else {
- csearch::get_item_attrs(&cx.sess().cstore, did, |attrs| {
- warned |= check_must_use(cx, &attrs[], s.span);
- });
+ let attrs = csearch::get_item_attrs(&cx.sess().cstore, did);
+ warned |= check_must_use(cx, &attrs[], s.span);
}
}
_ => {}
let mut span = e.span;
let id = match e.node {
- ast::ExprPath(..) | ast::ExprStruct(..) => {
+ ast::ExprPath(..) | ast::ExprQPath(..) | ast::ExprStruct(..) => {
match cx.tcx.def_map.borrow().get(&e.id) {
Some(&def) => def.def_id(),
None => return
decoder::get_methods_if_impl(cstore.intr.clone(), &*cdata, def.node)
}
-pub fn get_item_attrs<F>(cstore: &cstore::CStore,
- def_id: ast::DefId,
- f: F) where
- F: FnOnce(Vec<ast::Attribute>),
-{
+pub fn get_item_attrs(cstore: &cstore::CStore,
+ def_id: ast::DefId)
+ -> Vec<ast::Attribute> {
let cdata = cstore.get_crate_data(def_id.krate);
- decoder::get_item_attrs(&*cdata, def_id.node, f)
+ decoder::get_item_attrs(&*cdata, def_id.node)
}
pub fn get_struct_fields(cstore: &cstore::CStore,
ret
}
-pub fn get_item_attrs<F>(cdata: Cmd,
- orig_node_id: ast::NodeId,
- f: F) where
- F: FnOnce(Vec<ast::Attribute>),
-{
+pub fn get_item_attrs(cdata: Cmd,
+ orig_node_id: ast::NodeId)
+ -> Vec<ast::Attribute> {
// The attributes for a tuple struct are attached to the definition, not the ctor;
// we assume that someone passing in a tuple struct ctor is actually wanting to
// look at the definition
let node_id = get_tuple_struct_definition_if_ctor(cdata, orig_node_id);
let node_id = node_id.map(|x| x.node).unwrap_or(orig_node_id);
let item = lookup_item(node_id, cdata.data());
- f(get_attributes(item));
+ get_attributes(item)
}
pub fn get_struct_field_attrs(cdata: Cmd) -> HashMap<ast::NodeId, Vec<ast::Attribute>> {
debug!("filesearch: searching lib path");
let tlib_path = make_target_lib_path(self.sysroot,
- self.triple);
+ self.triple);
if !visited_dirs.contains(tlib_path.as_vec()) {
match f(&tlib_path) {
FileMatches => found = true,
ast::ExprMac(..) |
ast::ExprClosure(..) |
ast::ExprLit(..) |
- ast::ExprPath(..) => {
+ ast::ExprPath(..) |
+ ast::ExprQPath(..) => {
self.straightline(expr, pred, None::<ast::Expr>.iter())
}
}
expression");
}
}
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
match v.tcx.def_map.borrow()[e.id] {
DefStatic(..) | DefConst(..) |
DefFn(..) | DefStaticMethod(..) | DefMethod(..) |
}
};
head.map(|mut head| {
- head.push_all(&r[0..col]);
+ head.push_all(&r[..col]);
head.push_all(&r[(col + 1)..]);
head
})
"{} are not allowed to have custom pointers",
self.msg());
}
- ast::ExprPath(..) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
match ty::resolve_expr(self.tcx, e) {
def::DefStatic(..) if self.mode == InConstant => {
let msg = "constants cannot refer to other statics, \
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
- ast::ExprPath(..) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
match self.def_map.borrow().get(&e.id) {
Some(&DefStatic(def_id, _)) |
Some(&DefConst(def_id)) if
// FIXME: (#3728) we can probably do something CCI-ish
// surrounding nonlocal constants. But we don't yet.
- ast::ExprPath(_) => self.lookup_constness(e),
+ ast::ExprPath(_) | ast::ExprQPath(_) => self.lookup_constness(e),
ast::ExprRepeat(..) => general_const,
}
}
+ ast::ExprQPath(_) => {
+ match lookup_const(tcx, expr) {
+ Some(actual) => return const_expr_to_pat(tcx, actual),
+ _ => unreachable!()
+ }
+ }
+
_ => ast::PatLit(P(expr.clone()))
};
P(ast::Pat { id: expr.id, node: pat, span: expr.span })
ty::ty_float(ast::TyF64) => (f64, const_float, f64)
}))
}
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
match lookup_const(tcx, e) {
Some(actual_e) => eval_const_expr_partial(tcx, &*actual_e),
None => Err("non-constant path in constant expr".to_string())
ast::ExprInlineAsm(..) => {
self.require_unsafe(expr.span, "use of inline assembly");
}
- ast::ExprPath(..) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
if let def::DefStatic(_, true) = ty::resolve_expr(self.tcx, expr) {
self.require_unsafe(expr.span, "use of mutable static");
}
self.walk_expr(&**subexpr)
}
- ast::ExprPath(..) => { }
+ ast::ExprPath(_) | ast::ExprQPath(_) => { }
ast::ExprUnary(ast::UnDeref, ref base) => { // *base
if !self.walk_overloaded_operator(expr, &**base, Vec::new(), PassArgs::ByRef) {
fn visit_expr(ir: &mut IrMaps, expr: &Expr) {
match expr.node {
// live nodes required for uses or definitions of variables:
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
let def = ir.tcx.def_map.borrow()[expr.id].clone();
debug!("expr {}: path that leads to {:?}", expr.id, def);
if let DefLocal(..) = def {
match expr.node {
// Interesting cases with control flow or which gen/kill
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
self.access_path(expr, succ, ACC_READ | ACC_USE)
}
// just ignore such cases and treat them as reads.
match expr.node {
- ast::ExprPath(_) => succ,
+ ast::ExprPath(_) | ast::ExprQPath(_) => succ,
ast::ExprField(ref e, _) => self.propagate_through_expr(&**e, succ),
ast::ExprTupField(ref e, _) => self.propagate_through_expr(&**e, succ),
_ => self.propagate_through_expr(expr, succ)
fn write_lvalue(&mut self, expr: &Expr, succ: LiveNode, acc: uint)
-> LiveNode {
match expr.node {
- ast::ExprPath(_) => self.access_path(expr, succ, acc),
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
+ self.access_path(expr, succ, acc)
+ }
// We do not track other lvalues, so just propagate through
// to their subcomponents. Also, it may happen that
ast::ExprBlock(..) | ast::ExprMac(..) | ast::ExprAddrOf(..) |
ast::ExprStruct(..) | ast::ExprRepeat(..) | ast::ExprParen(..) |
ast::ExprClosure(..) | ast::ExprPath(..) | ast::ExprBox(..) |
- ast::ExprRange(..) => {
+ ast::ExprRange(..) | ast::ExprQPath(..) => {
visit::walk_expr(this, expr);
}
ast::ExprIfLet(..) => {
fn check_lvalue(&mut self, expr: &Expr) {
match expr.node {
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
if let DefLocal(nid) = self.ir.tcx.def_map.borrow()[expr.id].clone() {
// Assignment to an immutable variable or argument: only legal
// if there is no later assignment. If this local is actually
}
}
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
let def = (*self.tcx().def_map.borrow())[expr.id];
self.cat_def(expr.id, expr.span, expr_ty, def)
}
struct type?!"),
}
}
- ast::ExprPath(..) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
let guard = |&: did: ast::DefId| {
let fields = ty::lookup_struct_fields(self.tcx, did);
let any_priv = fields.iter().any(|f| {
fn visit_expr(&mut self, expr: &ast::Expr) {
match expr.node {
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
let def = match self.tcx.def_map.borrow().get(&expr.id) {
Some(&def) => def,
None => {
use std::fmt;
use std::slice::Iter;
-use std::vec::Vec;
+use std::vec::{Vec, IntoIter};
use syntax::codemap::{Span, DUMMY_SP};
///////////////////////////////////////////////////////////////////////////
self.content.iter()
}
+ pub fn into_iter(self) -> IntoIter<T> {
+ self.content.into_iter()
+ }
+
pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> {
EnumeratedItems::new(self)
}
span: Span) -> Option<String> {
let def_id = trait_ref.def_id;
let mut report = None;
- ty::each_attr(infcx.tcx, def_id, |item| {
+ for item in ty::get_attrs(infcx.tcx, def_id).iter() {
if item.check_name("rustc_on_unimplemented") {
let err_sp = if item.meta().span == DUMMY_SP {
span
eg `#[rustc_on_unimplemented = \"foo\"]`",
trait_str).as_slice());
}
- false
- } else {
- true
+ break;
}
- });
+ }
report
}
note_obligation_cause(infcx, obligation);
}
- SelectionError::Unimplemented => {
- match obligation.predicate {
- ty::Predicate::Trait(ref trait_predicate) => {
- let trait_predicate =
- infcx.resolve_type_vars_if_possible(trait_predicate);
- if !trait_predicate.references_error() {
- let trait_ref = trait_predicate.to_poly_trait_ref();
- infcx.tcx.sess.span_err(
- obligation.cause.span,
- format!(
- "the trait `{}` is not implemented for the type `{}`",
- trait_ref.user_string(infcx.tcx),
- trait_ref.self_ty().user_string(infcx.tcx)).as_slice());
- // Check if it has a custom "#[rustc_on_unimplemented]" error message,
- // report with that message if it does
- let custom_note = report_on_unimplemented(infcx, &*trait_ref.0,
- obligation.cause.span);
- if let Some(s) = custom_note {
- infcx.tcx.sess.span_note(obligation.cause.span,
- s.as_slice());
- }
- }
- }
- ty::Predicate::Equate(ref predicate) => {
- let predicate = infcx.resolve_type_vars_if_possible(predicate);
- let err = infcx.equality_predicate(obligation.cause.span,
- &predicate).unwrap_err();
+ SelectionError::Unimplemented => {
+ match &obligation.cause.code {
+ &ObligationCauseCode::CompareImplMethodObligation => {
infcx.tcx.sess.span_err(
obligation.cause.span,
format!(
- "the requirement `{}` is not satisfied (`{}`)",
- predicate.user_string(infcx.tcx),
- ty::type_err_to_str(infcx.tcx, &err)).as_slice());
+ "the requirement `{}` appears on the impl \
+ method but not on the corresponding trait method",
+ obligation.predicate.user_string(infcx.tcx)).as_slice());
}
+ _ => {
+ match obligation.predicate {
+ ty::Predicate::Trait(ref trait_predicate) => {
+ let trait_predicate =
+ infcx.resolve_type_vars_if_possible(trait_predicate);
- ty::Predicate::RegionOutlives(ref predicate) => {
- let predicate = infcx.resolve_type_vars_if_possible(predicate);
- let err = infcx.region_outlives_predicate(obligation.cause.span,
- &predicate).unwrap_err();
- infcx.tcx.sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied (`{}`)",
- predicate.user_string(infcx.tcx),
- ty::type_err_to_str(infcx.tcx, &err)).as_slice());
- }
+ if !trait_predicate.references_error() {
+ let trait_ref = trait_predicate.to_poly_trait_ref();
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the trait `{}` is not implemented for the type `{}`",
+ trait_ref.user_string(infcx.tcx),
+ trait_ref.self_ty().user_string(infcx.tcx)).as_slice());
+ // Check if it has a custom "#[rustc_on_unimplemented]"
+ // error message, report with that message if it does
+ let custom_note = report_on_unimplemented(infcx, &*trait_ref.0,
+ obligation.cause.span);
+ if let Some(s) = custom_note {
+ infcx.tcx.sess.span_note(obligation.cause.span,
+ s.as_slice());
+ }
+ }
+ }
- ty::Predicate::Projection(..) |
- ty::Predicate::TypeOutlives(..) => {
- let predicate =
- infcx.resolve_type_vars_if_possible(&obligation.predicate);
- infcx.tcx.sess.span_err(
- obligation.cause.span,
- format!(
- "the requirement `{}` is not satisfied",
- predicate.user_string(infcx.tcx)).as_slice());
+ ty::Predicate::Equate(ref predicate) => {
+ let predicate = infcx.resolve_type_vars_if_possible(predicate);
+ let err = infcx.equality_predicate(obligation.cause.span,
+ &predicate).unwrap_err();
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, &err)).as_slice());
+ }
+
+ ty::Predicate::RegionOutlives(ref predicate) => {
+ let predicate = infcx.resolve_type_vars_if_possible(predicate);
+ let err = infcx.region_outlives_predicate(obligation.cause.span,
+ &predicate).unwrap_err();
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied (`{}`)",
+ predicate.user_string(infcx.tcx),
+ ty::type_err_to_str(infcx.tcx, &err)).as_slice());
+ }
+
+ ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => {
+ let predicate =
+ infcx.resolve_type_vars_if_possible(&obligation.predicate);
+ infcx.tcx.sess.span_err(
+ obligation.cause.span,
+ format!(
+ "the requirement `{}` is not satisfied",
+ predicate.user_string(infcx.tcx)).as_slice());
+ }
+ }
}
}
}
+
OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => {
let expected_trait_ref = infcx.resolve_type_vars_if_possible(&*expected_trait_ref);
let actual_trait_ref = infcx.resolve_type_vars_if_possible(&*actual_trait_ref);
obligation.cause.span,
format!(
"type mismatch: the type `{}` implements the trait `{}`, \
- but the trait `{}` is required ({})",
+ but the trait `{}` is required ({})",
expected_trait_ref.self_ty().user_string(infcx.tcx),
expected_trait_ref.user_string(infcx.tcx),
actual_trait_ref.user_string(infcx.tcx),
ty::type_err_to_str(infcx.tcx, e)).as_slice());
- note_obligation_cause(infcx, obligation);
+ note_obligation_cause(infcx, obligation);
}
}
}
}
fn note_obligation_cause_code<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>,
- _predicate: &ty::Predicate<'tcx>,
+ predicate: &ty::Predicate<'tcx>,
cause_span: Span,
cause_code: &ObligationCauseCode<'tcx>)
{
let parent_predicate = parent_trait_ref.as_predicate();
note_obligation_cause_code(infcx, &parent_predicate, cause_span, &*data.parent_code);
}
+ ObligationCauseCode::CompareImplMethodObligation => {
+ span_note!(tcx.sess, cause_span,
+ "the requirement `{}` appears on the impl method\
+ but not on the corresponding trait method",
+ predicate.user_string(infcx.tcx));
+ }
}
}
// static items must have `Sync` type
SharedStatic,
+
BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
ImplDerivedObligation(DerivedObligationCause<'tcx>),
+
+ CompareImplMethodObligation,
}
#[derive(Clone)]
.is_ok()
})
}
+ (&BuiltinCandidate(_), &ParamCandidate(_)) => {
+ // If we have a where-clause like `Option<K> : Send`,
+ // then we wind up in a situation where there is a
+ // default rule (`Option<K>:Send if K:Send) and the
+ // where-clause that both seem applicable. Just take
+ // the where-clause in that case.
+ true
+ }
(&ProjectionCandidate, &ParamCandidate(_)) => {
// FIXME(#20297) -- this gives where clauses precedent
// over projections. Really these are just two means
use util::nodemap::{FnvHashMap};
use arena::TypedArena;
-use std::borrow::BorrowFrom;
+use std::borrow::{BorrowFrom, Cow};
use std::cell::{Cell, RefCell};
use std::cmp::{self, Ordering};
use std::fmt::{self, Show};
use std::mem;
use std::ops;
use std::rc::Rc;
+use std::vec::CowVec;
use collections::enum_set::{EnumSet, CLike};
use std::collections::{HashMap, HashSet};
use syntax::abi;
}
match expr.node {
- ast::ExprPath(..) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
match resolve_expr(tcx, expr) {
def::DefVariant(tid, vid, _) => {
let variant_info = enum_variant_with_id(tcx, tid, vid);
vec
}
-/// Iterate over attributes of a definition.
-// (This should really be an iterator, but that would require csearch and
-// decoder to use iterators instead of higher-order functions.)
-pub fn each_attr<F>(tcx: &ctxt, did: DefId, mut f: F) -> bool where
- F: FnMut(&ast::Attribute) -> bool,
-{
+/// Get the attributes of a definition.
+pub fn get_attrs<'tcx>(tcx: &'tcx ctxt, did: DefId)
+ -> CowVec<'tcx, ast::Attribute> {
if is_local(did) {
let item = tcx.map.expect_item(did.node);
- item.attrs.iter().all(|attr| f(attr))
+ Cow::Borrowed(&item.attrs[])
} else {
- info!("getting foreign attrs");
- let mut cont = true;
- csearch::get_item_attrs(&tcx.sess.cstore, did, |attrs| {
- if cont {
- cont = attrs.iter().all(|attr| f(attr));
- }
- });
- info!("done");
- cont
+ Cow::Owned(csearch::get_item_attrs(&tcx.sess.cstore, did))
}
}
/// Determine whether an item is annotated with an attribute
pub fn has_attr(tcx: &ctxt, did: DefId, attr: &str) -> bool {
- let mut found = false;
- each_attr(tcx, did, |item| {
- if item.check_name(attr) {
- found = true;
- false
- } else {
- true
- }
- });
- found
+ get_attrs(tcx, did).iter().any(|item| item.check_name(attr))
}
/// Determine whether an item is annotated with `#[repr(packed)]`
pub fn lookup_repr_hints(tcx: &ctxt, did: DefId) -> Rc<Vec<attr::ReprAttr>> {
memoized(&tcx.repr_hint_cache, did, |did: DefId| {
Rc::new(if did.krate == LOCAL_CRATE {
- let mut acc = Vec::new();
- ty::each_attr(tcx, did, |meta| {
- acc.extend(attr::find_repr_attrs(tcx.sess.diagnostic(),
- meta).into_iter());
- true
- });
- acc
+ get_attrs(tcx, did).iter().flat_map(|meta| {
+ attr::find_repr_attrs(tcx.sess.diagnostic(), meta).into_iter()
+ }).collect()
} else {
csearch::get_repr_attrs(&tcx.sess.cstore, did)
})
self.mt.repr(tcx))
}
}
+
+impl<'a, 'tcx> Repr<'tcx> for ParameterEnvironment<'a, 'tcx> {
+ fn repr(&self, tcx: &ctxt<'tcx>) -> String {
+ format!("ParameterEnvironment(\
+ free_substs={}, \
+ implicit_region_bound={}, \
+ caller_bounds={})",
+ self.free_substs.repr(tcx),
+ self.implicit_region_bound.repr(tcx),
+ self.caller_bounds.repr(tcx))
+ }
+ }
}
}
+impl<'a, 'tcx> TypeFoldable<'tcx> for ty::ParameterEnvironment<'a, 'tcx> where 'tcx: 'a {
+ fn fold_with<F:TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ParameterEnvironment<'a, 'tcx> {
+ ty::ParameterEnvironment {
+ tcx: self.tcx,
+ free_substs: self.free_substs.fold_with(folder),
+ implicit_region_bound: self.implicit_region_bound.fold_with(folder),
+ caller_bounds: self.caller_bounds.fold_with(folder),
+ selection_cache: traits::SelectionCache::new(),
+ }
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// "super" routines: these are the default implementations for TypeFolder.
//
use std::slice;
-#[derive(Clone)]
+#[derive(Clone, Show)]
pub struct SearchPaths {
paths: Vec<(PathKind, Path)>,
}
iter: slice::Iter<'a, (PathKind, Path)>,
}
-#[derive(Eq, PartialEq, Clone, Copy)]
+#[derive(Eq, PartialEq, Clone, Copy, Show)]
pub enum PathKind {
Native,
Crate,
0
};
- for t in tps[0..(tps.len() - num_defaults)].iter() {
+ for t in tps[..(tps.len() - num_defaults)].iter() {
strs.push(ty_to_string(cx, *t))
}
if input.len() >= buffer_remaining {
copy_memory(
self.buffer.slice_mut(self.buffer_idx, size),
- &input[0..buffer_remaining]);
+ &input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
assert!(self.buffer_idx == 64);
self.buffer_idx = 0;
- return &self.buffer[0..64];
+ return &self.buffer[..64];
}
fn position(&self) -> uint { self.buffer_idx }
SawExprIndex,
SawExprRange,
SawExprPath,
+ SawExprQPath,
SawExprAddrOf(ast::Mutability),
SawExprRet,
SawExprInlineAsm(&'a ast::InlineAsm),
ExprIndex(..) => SawExprIndex,
ExprRange(..) => SawExprRange,
ExprPath(..) => SawExprPath,
+ ExprQPath(..) => SawExprQPath,
ExprAddrOf(m, _) => SawExprAddrOf(m),
ExprBreak(id) => SawExprBreak(id.map(content)),
ExprAgain(id) => SawExprAgain(id.map(content)),
mod i686_unknown_linux_gnu;
mod mips_unknown_linux_gnu;
mod mipsel_unknown_linux_gnu;
+mod powerpc_unknown_linux_gnu;
mod x86_64_apple_darwin;
mod x86_64_apple_ios;
mod x86_64_pc_windows_gnu;
/// OS name to use for conditional compilation.
pub target_os: String,
/// Architecture to use for ABI considerations. Valid options: "x86", "x86_64", "arm",
- /// "aarch64", and "mips". "mips" includes "mipsel".
+ /// "aarch64", "mips", and "powerpc". "mips" includes "mipsel".
pub arch: String,
/// Optional settings with defaults.
pub options: TargetOptions,
i686_unknown_linux_gnu,
mips_unknown_linux_gnu,
mipsel_unknown_linux_gnu,
+ powerpc_unknown_linux_gnu,
arm_linux_androideabi,
arm_unknown_linux_gnueabi,
arm_unknown_linux_gnueabihf,
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use target::Target;
+
+pub fn target() -> Target {
+ let mut base = super::linux_base::opts();
+ base.pre_link_args.push("-m32".to_string());
+
+ Target {
+ data_layout: "E-S8-p:32:32-f64:32:64-i64:32:64-f80:32:32-n8:16:32".to_string(),
+ llvm_target: "powerpc-unknown-linux-gnu".to_string(),
+ target_endian: "big".to_string(),
+ target_pointer_width: "32".to_string(),
+ arch: "powerpc".to_string(),
+ target_os: "linux".to_string(),
+ options: base,
+ }
+}
list_metadata(&sess, &(*ifile), &mut stdout).unwrap();
}
Input::Str(_) => {
- early_error("can not list metadata for stdin");
+ early_error("cannot list metadata for stdin");
}
}
return;
pub fn LLVMInitializeMipsTargetMC();
pub fn LLVMInitializeMipsAsmPrinter();
pub fn LLVMInitializeMipsAsmParser();
+ pub fn LLVMInitializePowerPCTargetInfo();
+ pub fn LLVMInitializePowerPCTarget();
+ pub fn LLVMInitializePowerPCTargetMC();
+ pub fn LLVMInitializePowerPCAsmPrinter();
+ pub fn LLVMInitializePowerPCAsmParser();
pub fn LLVMRustAddPass(PM: PassManagerRef, Pass: *const c_char) -> bool;
pub fn LLVMRustCreateTargetMachine(Triple: *const c_char,
LLVMInitializeMipsAsmPrinter();
LLVMInitializeMipsAsmParser();
+ LLVMInitializePowerPCTargetInfo();
+ LLVMInitializePowerPCTarget();
+ LLVMInitializePowerPCTargetMC();
+ LLVMInitializePowerPCAsmPrinter();
+ LLVMInitializePowerPCAsmParser();
+
LLVMRustSetLLVMOptions(0 as c_int,
0 as *const _);
use syntax::ast::{Arm, BindByRef, BindByValue, BindingMode, Block, Crate, CrateNum};
use syntax::ast::{DefId, Expr, ExprAgain, ExprBreak, ExprField};
use syntax::ast::{ExprClosure, ExprForLoop, ExprLoop, ExprWhile, ExprMethodCall};
-use syntax::ast::{ExprPath, ExprStruct, FnDecl};
+use syntax::ast::{ExprPath, ExprQPath, ExprStruct, FnDecl};
use syntax::ast::{ForeignItemFn, ForeignItemStatic, Generics};
use syntax::ast::{Ident, ImplItem, Item, ItemConst, ItemEnum, ItemFn};
use syntax::ast::{ItemForeignMod, ItemImpl, ItemMac, ItemMod, ItemStatic};
// idx +- 1 to account for the
// colons on either side
&mpath[(idx + 1)..],
- &mpath[0..(idx - 1)]);
+ &mpath[..(idx - 1)]);
return Failed(Some((span, msg)));
},
None => {
TraitImplementation => "implement",
TraitDerivation => "derive",
TraitObject => "reference",
- TraitQPath => "extract an associated type from",
+ TraitQPath => "extract an associated item from",
};
let msg = format!("attempt to {} a nonexistent trait `{}`", usage_str, path_str);
}
}
- match result_def {
- None => {
- match self.resolve_path(ty.id, path, TypeNS, true) {
- Some(def) => {
- debug!("(resolving type) resolved `{:?}` to \
- type {:?}",
- token::get_ident(path.segments.last().unwrap() .identifier),
- def);
- result_def = Some(def);
- }
- None => {
- result_def = None;
- }
- }
- }
- Some(_) => {} // Continue.
+ if let None = result_def {
+ result_def = self.resolve_path(ty.id, path, TypeNS, true);
}
match result_def {
Some(def) => {
// Write the result into the def map.
debug!("(resolving type) writing resolution for `{}` \
- (id {})",
+ (id {}) = {:?}",
self.path_names_to_string(path),
- path_id);
+ path_id, def);
self.record_def(path_id, def);
}
None => {
TyQPath(ref qpath) => {
self.resolve_type(&*qpath.self_type);
self.resolve_trait_reference(ty.id, &*qpath.trait_ref, TraitQPath);
+ for ty in qpath.item_path.parameters.types().into_iter() {
+ self.resolve_type(&**ty);
+ }
+ for binding in qpath.item_path.parameters.bindings().into_iter() {
+ self.resolve_type(&*binding.ty);
+ }
}
TyPolyTraitRef(ref bounds) => {
// The interpretation of paths depends on whether the path has
// multiple elements in it or not.
- ExprPath(ref path) => {
+ ExprPath(_) | ExprQPath(_) => {
+ let mut path_from_qpath;
+ let path = match expr.node {
+ ExprPath(ref path) => path,
+ ExprQPath(ref qpath) => {
+ self.resolve_type(&*qpath.self_type);
+ self.resolve_trait_reference(expr.id, &*qpath.trait_ref, TraitQPath);
+ path_from_qpath = qpath.trait_ref.path.clone();
+ path_from_qpath.segments.push(qpath.item_path.clone());
+ &path_from_qpath
+ }
+ _ => unreachable!()
+ };
// This is a local path in the value namespace. Walk through
// scopes looking for it.
-
- let path_name = self.path_names_to_string(path);
-
match self.resolve_path(expr.id, path, ValueNS, true) {
// Check if struct variant
Some((DefVariant(_, _, true), _)) => {
+ let path_name = self.path_names_to_string(path);
self.resolve_error(expr.span,
format!("`{}` is a struct variant name, but \
this expression \
Some(def) => {
// Write the result into the def map.
debug!("(resolving expr) resolved `{}`",
- path_name);
+ self.path_names_to_string(path));
self.record_def(expr.id, def);
}
// (The pattern matching def_tys where the id is in self.structs
// matches on regular structs while excluding tuple- and enum-like
// structs, which wouldn't result in this error.)
+ let path_name = self.path_names_to_string(path);
match self.with_no_errors(|this|
this.resolve_path(expr.id, path, TypeNS, false)) {
Some((DefTy(struct_id, _), _))
fn is_versioned_bytecode_format(bc: &[u8]) -> bool {
let magic_id_byte_count = link::RLIB_BYTECODE_OBJECT_MAGIC.len();
return bc.len() > magic_id_byte_count &&
- &bc[0..magic_id_byte_count] == link::RLIB_BYTECODE_OBJECT_MAGIC;
+ &bc[..magic_id_byte_count] == link::RLIB_BYTECODE_OBJECT_MAGIC;
}
fn extract_bytecode_format_version(bc: &[u8]) -> u32 {
llvm::LLVMInitializeMipsAsmPrinter();
llvm::LLVMInitializeMipsAsmParser();
+ llvm::LLVMInitializePowerPCTargetInfo();
+ llvm::LLVMInitializePowerPCTarget();
+ llvm::LLVMInitializePowerPCTargetMC();
+ llvm::LLVMInitializePowerPCAsmPrinter();
+ llvm::LLVMInitializePowerPCAsmParser();
+
llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
llvm_args.as_ptr());
});
if len <= 2 {
return;
}
- let sub_paths = &sub_paths[0..(len-2)];
+ let sub_paths = &sub_paths[..(len-2)];
for &(ref span, ref qualname) in sub_paths.iter() {
self.fmt.sub_mod_ref_str(path.span,
*span,
span: Span,
path: &ast::Path,
ref_kind: Option<recorder::Row>) {
- if generated_code(path.span) {
+ if generated_code(span) {
return
}
visit::walk_expr(self, ex);
},
ast::ExprPath(ref path) => {
- self.process_path(ex.id, ex.span, path, None);
+ self.process_path(ex.id, path.span, path, None);
visit::walk_path(self, path);
}
+ ast::ExprQPath(ref qpath) => {
+ let mut path = qpath.trait_ref.path.clone();
+ path.segments.push(qpath.item_path.clone());
+ self.process_path(ex.id, ex.span, &path, None);
+ visit::walk_qpath(self, ex.span, &**qpath);
+ }
ast::ExprStruct(ref path, ref fields, ref base) =>
self.process_struct_lit(ex, path, fields, base),
ast::ExprMethodCall(_, _, ref args) => self.process_method_call(ex, args),
"")
}
def::DefVariant(..) => {
- paths_to_process.push((id, p.span, p.clone(), Some(ref_kind)))
+ paths_to_process.push((id, p.clone(), Some(ref_kind)))
}
// FIXME(nrc) what are these doing here?
def::DefStatic(_, _) => {}
*def)
}
}
- for &(id, span, ref path, ref_kind) in paths_to_process.iter() {
- self.process_path(id, span, path, ref_kind);
+ for &(id, ref path, ref_kind) in paths_to_process.iter() {
+ self.process_path(id, path.span, path, ref_kind);
}
self.collecting = false;
self.collected_paths.clear();
let values = values.iter().map(|s| {
// Never take more than 1020 chars
if s.len() > 1020 {
- &s[0..1020]
+ &s[..1020]
} else {
&s[]
}
// Collect all of the matches that can match against anything.
enter_match(bcx, dm, m, col, val, |pats| {
if pat_is_binding_or_wild(dm, &*pats[col]) {
- let mut r = pats[0..col].to_vec();
+ let mut r = pats[..col].to_vec();
r.push_all(&pats[(col + 1)..]);
Some(r)
} else {
/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body`
fn is_discr_reassigned(bcx: Block, discr: &ast::Expr, body: &ast::Expr) -> bool {
let (vid, field) = match discr.node {
- ast::ExprPath(..) => match bcx.def(discr.id) {
+ ast::ExprPath(_) | ast::ExprQPath(_) => match bcx.def(discr.id) {
def::DefLocal(vid) | def::DefUpvar(vid, _, _) => (vid, None),
_ => return false
},
// Default per-arch clobbers
// Basically what clang does
-#[cfg(any(target_arch = "arm",
- target_arch = "aarch64",
- target_arch = "mips",
- target_arch = "mipsel"))]
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
fn get_clobbers() -> String {
"".to_string()
}
let f = decl_rust_fn(ccx, fn_ty, name);
- csearch::get_item_attrs(&ccx.sess().cstore, did, |attrs| {
- set_llvm_fn_attrs(ccx, &attrs[], f)
- });
+ let attrs = csearch::get_item_attrs(&ccx.sess().cstore, did);
+ set_llvm_fn_attrs(ccx, &attrs[], f);
ccx.externs().borrow_mut().insert(name.to_string(), f);
f
// don't do this then linker errors can be generated where the linker
// complains that one object files has a thread local version of the
// symbol and another one doesn't.
- ty::each_attr(ccx.tcx(), did, |attr| {
+ for attr in ty::get_attrs(ccx.tcx(), did).iter() {
if attr.check_name("thread_local") {
llvm::set_thread_local(c, true);
}
- true
- });
+ }
ccx.externs().borrow_mut().insert(name.to_string(), c);
return c;
}
let mut cx = cx;
for (i, &arg) in variant.args.iter().enumerate() {
- cx = (*f)(cx,
- adt::trans_field_ptr(cx, repr, av, variant.disr_val, i),
- arg.subst(tcx, substs));
+ let arg = monomorphize::apply_param_substs(tcx, substs, &arg);
+ cx = f(cx, adt::trans_field_ptr(cx, repr, av, variant.disr_val, i), arg);
}
return cx;
}
for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs.iter()) {
*small_vec_e = C_i32(self.ccx, ix as i32);
}
- self.inbounds_gep(base, &small_vec[0..ixs.len()])
+ self.inbounds_gep(base, &small_vec[..ixs.len()])
} else {
let v = ixs.iter().map(|i| C_i32(self.ccx, *i as i32)).collect::<Vec<ValueRef>>();
self.count_insn("gepi");
use trans::cabi_x86_win64;
use trans::cabi_arm;
use trans::cabi_aarch64;
+use trans::cabi_powerpc;
use trans::cabi_mips;
use trans::type_::Type;
cabi_arm::compute_abi_info(ccx, atys, rty, ret_def, flavor)
},
"mips" => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def),
+ "powerpc" => cabi_powerpc::compute_abi_info(ccx, atys, rty, ret_def),
a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)
[]),
}
--- /dev/null
+// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use libc::c_uint;
+use llvm;
+use llvm::{Integer, Pointer, Float, Double, Struct, Array};
+use llvm::{StructRetAttribute, ZExtAttribute};
+use trans::cabi::{FnType, ArgType};
+use trans::context::CrateContext;
+use trans::type_::Type;
+
+use std::cmp;
+
+fn align_up_to(off: uint, a: uint) -> uint {
+ return (off + a - 1u) / a * a;
+}
+
+fn align(off: uint, ty: Type) -> uint {
+ let a = ty_align(ty);
+ return align_up_to(off, a);
+}
+
+fn ty_align(ty: Type) -> uint {
+ match ty.kind() {
+ Integer => {
+ unsafe {
+ ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
+ }
+ }
+ Pointer => 4,
+ Float => 4,
+ Double => 8,
+ Struct => {
+ if ty.is_packed() {
+ 1
+ } else {
+ let str_tys = ty.field_types();
+ str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
+ }
+ }
+ Array => {
+ let elt = ty.element_type();
+ ty_align(elt)
+ }
+ _ => panic!("ty_size: unhandled type")
+ }
+}
+
+fn ty_size(ty: Type) -> uint {
+ match ty.kind() {
+ Integer => {
+ unsafe {
+ ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
+ }
+ }
+ Pointer => 4,
+ Float => 4,
+ Double => 8,
+ Struct => {
+ if ty.is_packed() {
+ let str_tys = ty.field_types();
+ str_tys.iter().fold(0, |s, t| s + ty_size(*t))
+ } else {
+ let str_tys = ty.field_types();
+ let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
+ align(size, ty)
+ }
+ }
+ Array => {
+ let len = ty.array_length();
+ let elt = ty.element_type();
+ let eltsz = ty_size(elt);
+ len * eltsz
+ }
+ _ => panic!("ty_size: unhandled type")
+ }
+}
+
+fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
+ if is_reg_ty(ty) {
+ let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
+ ArgType::direct(ty, None, None, attr)
+ } else {
+ ArgType::indirect(ty, Some(StructRetAttribute))
+ }
+}
+
+fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut uint) -> ArgType {
+ let orig_offset = *offset;
+ let size = ty_size(ty) * 8;
+ let mut align = ty_align(ty);
+
+ align = cmp::min(cmp::max(align, 4), 8);
+ *offset = align_up_to(*offset, align);
+ *offset += align_up_to(size, align * 8) / 8;
+
+ if is_reg_ty(ty) {
+ let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
+ ArgType::direct(ty, None, None, attr)
+ } else {
+ ArgType::direct(
+ ty,
+ Some(struct_ty(ccx, ty)),
+ padding_ty(ccx, align, orig_offset),
+ None
+ )
+ }
+}
+
+fn is_reg_ty(ty: Type) -> bool {
+ return match ty.kind() {
+ Integer
+ | Pointer
+ | Float
+ | Double => true,
+ _ => false
+ };
+}
+
+fn padding_ty(ccx: &CrateContext, align: uint, offset: uint) -> Option<Type> {
+ if ((align - 1 ) & offset) > 0 {
+ Some(Type::i32(ccx))
+ } else {
+ None
+ }
+}
+
+fn coerce_to_int(ccx: &CrateContext, size: uint) -> Vec<Type> {
+ let int_ty = Type::i32(ccx);
+ let mut args = Vec::new();
+
+ let mut n = size / 32;
+ while n > 0 {
+ args.push(int_ty);
+ n -= 1;
+ }
+
+ let r = size % 32;
+ if r > 0 {
+ unsafe {
+ args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
+ }
+ }
+
+ args
+}
+
+fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
+ let size = ty_size(ty) * 8;
+ Type::struct_(ccx, coerce_to_int(ccx, size).as_slice(), false)
+}
+
+pub fn compute_abi_info(ccx: &CrateContext,
+ atys: &[Type],
+ rty: Type,
+ ret_def: bool) -> FnType {
+ let ret_ty = if ret_def {
+ classify_ret_ty(ccx, rty)
+ } else {
+ ArgType::direct(Type::void(ccx), None, None, None)
+ };
+
+ let sret = ret_ty.is_indirect();
+ let mut arg_tys = Vec::new();
+ let mut offset = if sret { 4 } else { 0 };
+
+ for aty in atys.iter() {
+ let ty = classify_arg_ty(ccx, *aty, &mut offset);
+ arg_tys.push(ty);
+ };
+
+ return FnType {
+ arg_tys: arg_tys,
+ ret_ty: ret_ty,
+ };
+}
debug!("callee::trans(expr={})", expr.repr(bcx.tcx()));
// pick out special kinds of expressions that can be called:
- if let ast::ExprPath(_) = expr.node {
- return trans_def(bcx, bcx.def(expr.id), expr);
+ match expr.node {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
+ return trans_def(bcx, bcx.def(expr.id), expr);
+ }
+ _ => {}
}
// any other expressions are closures:
C_array(llunitty, &vs[])
}
}
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
let def = cx.tcx().def_map.borrow()[e.id];
match def {
def::DefFn(..) | def::DefStaticMethod(..) | def::DefMethod(..) => {
fn get_unique_type_id_of_type<'a>(&mut self, cx: &CrateContext<'a, 'tcx>,
type_: Ty<'tcx>) -> UniqueTypeId {
- // basic type -> {:name of the type:}
- // tuple -> {tuple_(:param-uid:)*}
- // struct -> {struct_:svh: / :node-id:_<(:param-uid:),*> }
- // enum -> {enum_:svh: / :node-id:_<(:param-uid:),*> }
- // enum variant -> {variant_:variant-name:_:enum-uid:}
- // reference (&) -> {& :pointee-uid:}
- // mut reference (&mut) -> {&mut :pointee-uid:}
- // ptr (*) -> {* :pointee-uid:}
- // mut ptr (*mut) -> {*mut :pointee-uid:}
- // unique ptr (~) -> {~ :pointee-uid:}
- // @-ptr (@) -> {@ :pointee-uid:}
- // sized vec ([T; x]) -> {[:size:] :element-uid:}
- // unsized vec ([T]) -> {[] :element-uid:}
- // trait (T) -> {trait_:svh: / :node-id:_<(:param-uid:),*> }
- // closure -> {<unsafe_> <once_> :store-sigil: |(:param-uid:),* <,_...>| -> \
- // :return-type-uid: : (:bounds:)*}
- // function -> {<unsafe_> <abi_> fn( (:param-uid:)* <,_...> ) -> \
- // :return-type-uid:}
- // unique vec box (~[]) -> {HEAP_VEC_BOX<:pointee-uid:>}
- // gc box -> {GC_BOX<:pointee-uid:>}
+ // basic type -> {:name of the type:}
+ // tuple -> {tuple_(:param-uid:)*}
+ // struct -> {struct_:svh: / :node-id:_<(:param-uid:),*> }
+ // enum -> {enum_:svh: / :node-id:_<(:param-uid:),*> }
+ // enum variant -> {variant_:variant-name:_:enum-uid:}
+ // reference (&) -> {& :pointee-uid:}
+ // mut reference (&mut) -> {&mut :pointee-uid:}
+ // ptr (*) -> {* :pointee-uid:}
+ // mut ptr (*mut) -> {*mut :pointee-uid:}
+ // unique ptr (~) -> {~ :pointee-uid:}
+ // @-ptr (@) -> {@ :pointee-uid:}
+ // sized vec ([T; x]) -> {[:size:] :element-uid:}
+ // unsized vec ([T]) -> {[] :element-uid:}
+ // trait (T) -> {trait_:svh: / :node-id:_<(:param-uid:),*> }
+ // closure -> {<unsafe_> <once_> :store-sigil:
+ // |(:param-uid:),* <,_...>| -> \
+ // :return-type-uid: : (:bounds:)*}
+ // function -> {<unsafe_> <abi_> fn( (:param-uid:)* <,_...> ) -> \
+ // :return-type-uid:}
+ // unique vec box (~[]) -> {HEAP_VEC_BOX<:pointee-uid:>}
+ // gc box -> {GC_BOX<:pointee-uid:>}
+ // projection (<T as U>::V) -> {<:ty-uid: as :trait-uid:> :: :name-uid: }
match self.type_to_unique_id.get(&type_).cloned() {
Some(unique_type_id) => return unique_type_id,
principal.substs,
&mut unique_type_id);
},
+ ty::ty_projection(ref projection) => {
+ unique_type_id.push_str("<");
+
+ let self_ty = projection.trait_ref.self_ty();
+ let self_type_id = self.get_unique_type_id_of_type(cx, self_ty);
+ let self_type_id = self.get_unique_type_id_as_string(self_type_id);
+ unique_type_id.push_str(&self_type_id[]);
+
+ unique_type_id.push_str(" as ");
+
+ from_def_id_and_substs(self,
+ cx,
+ projection.trait_ref.def_id,
+ projection.trait_ref.substs,
+ &mut unique_type_id);
+
+ unique_type_id.push_str(">::");
+ unique_type_id.push_str(token::get_name(projection.item_name).get());
+ },
ty::ty_bare_fn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
unique_type_id.push_str("unsafe ");
closure_ty,
&mut unique_type_id);
},
- _ => {
+ ty::ty_err |
+ ty::ty_infer(_) |
+ ty::ty_open(_) |
+ ty::ty_param(_) => {
cx.sess().bug(&format!("get_unique_type_id_of_type() - unexpected type: {}, {:?}",
&ppaux::ty_to_string(cx.tcx(), type_)[],
type_.sty)[])
let variable_access = IndirectVariable {
alloca: env_pointer,
- address_operations: &address_operations[0..address_op_count]
+ address_operations: &address_operations[..address_op_count]
};
declare_local(bcx,
ast::ExprLit(_) |
ast::ExprBreak(_) |
ast::ExprAgain(_) |
- ast::ExprPath(_) => {}
+ ast::ExprPath(_) |
+ ast::ExprQPath(_) => {}
ast::ExprCast(ref sub_exp, _) |
ast::ExprAddrOf(_, ref sub_exp) |
ty::ty_unboxed_closure(..) => {
output.push_str("closure");
}
+ ty::ty_projection(ref projection) => {
+ output.push_str("<");
+ let self_ty = projection.trait_ref.self_ty();
+ push_debuginfo_type_name(cx, self_ty, true, output);
+
+ output.push_str(" as ");
+
+ push_item_name(cx, projection.trait_ref.def_id, false, output);
+ push_type_params(cx, projection.trait_ref.substs, output);
+
+ output.push_str(">::");
+ output.push_str(token::get_name(projection.item_name).get());
+ }
ty::ty_err |
ty::ty_infer(_) |
ty::ty_open(_) |
- ty::ty_projection(..) |
ty::ty_param(_) => {
cx.sess().bug(&format!("debuginfo: Trying to create type name for \
unexpected type: {}", ppaux::ty_to_string(cx.tcx(), t))[]);
ast::ExprParen(ref e) => {
trans(bcx, &**e)
}
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
trans_def(bcx, expr, bcx.def(expr.id))
}
ast::ExprField(ref base, ident) => {
ast::ExprParen(ref e) => {
trans_into(bcx, &**e, dest)
}
- ast::ExprPath(_) => {
+ ast::ExprPath(_) | ast::ExprQPath(_) => {
trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
}
ast::ExprIf(ref cond, ref thn, ref els) => {
mod cabi_arm;
mod cabi_aarch64;
mod cabi_mips;
+mod cabi_powerpc;
mod foreign;
mod intrinsic;
mod debuginfo;
ast_map::NodeArg(..) |
ast_map::NodeBlock(..) |
ast_map::NodePat(..) |
+ ast_map::NodeViewItem(..) |
ast_map::NodeLocal(..) => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node)[])
debug!("qpath_to_ty: trait_ref={}", trait_ref.repr(this.tcx()));
+ // `<T as Trait>::U<V>` shouldn't parse right now.
+ assert!(qpath.item_path.parameters.is_empty());
+
return this.projected_ty(ast_ty.span,
trait_ref,
- qpath.item_name.name);
+ qpath.item_path.identifier.name);
}
// Parses the programmer's textual representation of a type into our
};
instantiate_path(pcx.fcx, path, ty::lookup_item_type(tcx, enum_def_id),
- def, pat.span, pat.id);
+ None, def, pat.span, pat.id);
let pat_ty = fcx.node_ty(pat.id);
demand::eqtype(fcx, pat.span, expected, pat_ty);
} else {
ctor_scheme
};
- instantiate_path(pcx.fcx, path, path_scheme, def, pat.span, pat.id);
+ instantiate_path(pcx.fcx, path, path_scheme, None, def, pat.span, pat.id);
let pat_ty = fcx.node_ty(pat.id);
demand::eqtype(fcx, pat.span, expected, pat_ty);
}
};
+ let field_type = pcx.fcx.normalize_associated_types_in(span, &field_type);
+
check_pat(pcx, &*field.pat, field_type);
}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::infer;
+use middle::traits;
+use middle::ty::{self};
+use middle::subst::{self, Subst, Substs, VecPerParamSpace};
+use util::ppaux::{self, Repr};
+
+use syntax::ast;
+use syntax::codemap::{Span};
+use syntax::parse::token;
+
+use super::assoc;
+
+/// Checks that a method from an impl conforms to the signature of
+/// the same method as declared in the trait.
+///
+/// # Parameters
+///
+/// - impl_m: type of the method we are checking
+/// - impl_m_span: span to use for reporting errors
+/// - impl_m_body_id: id of the method body
+/// - trait_m: the method in the trait
+/// - impl_trait_ref: the TraitRef corresponding to the trait implementation
+
+pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
+ impl_m: &ty::Method<'tcx>,
+ impl_m_span: Span,
+ impl_m_body_id: ast::NodeId,
+ trait_m: &ty::Method<'tcx>,
+ impl_trait_ref: &ty::TraitRef<'tcx>) {
+ debug!("compare_impl_method(impl_trait_ref={})",
+ impl_trait_ref.repr(tcx));
+
+ debug!("compare_impl_method: impl_trait_ref (liberated) = {}",
+ impl_trait_ref.repr(tcx));
+
+ let infcx = infer::new_infer_ctxt(tcx);
+ let mut fulfillment_cx = traits::FulfillmentContext::new();
+
+ let trait_to_impl_substs = &impl_trait_ref.substs;
+
+ // Try to give more informative error messages about self typing
+ // mismatches. Note that any mismatch will also be detected
+ // below, where we construct a canonical function type that
+ // includes the self parameter as a normal parameter. It's just
+ // that the error messages you get out of this code are a bit more
+ // inscrutable, particularly for cases where one method has no
+ // self.
+ match (&trait_m.explicit_self, &impl_m.explicit_self) {
+ (&ty::StaticExplicitSelfCategory,
+ &ty::StaticExplicitSelfCategory) => {}
+ (&ty::StaticExplicitSelfCategory, _) => {
+ tcx.sess.span_err(
+ impl_m_span,
+ format!("method `{}` has a `{}` declaration in the impl, \
+ but not in the trait",
+ token::get_name(trait_m.name),
+ ppaux::explicit_self_category_to_str(
+ &impl_m.explicit_self)).as_slice());
+ return;
+ }
+ (_, &ty::StaticExplicitSelfCategory) => {
+ tcx.sess.span_err(
+ impl_m_span,
+ format!("method `{}` has a `{}` declaration in the trait, \
+ but not in the impl",
+ token::get_name(trait_m.name),
+ ppaux::explicit_self_category_to_str(
+ &trait_m.explicit_self)).as_slice());
+ return;
+ }
+ _ => {
+ // Let the type checker catch other errors below
+ }
+ }
+
+ let num_impl_m_type_params = impl_m.generics.types.len(subst::FnSpace);
+ let num_trait_m_type_params = trait_m.generics.types.len(subst::FnSpace);
+ if num_impl_m_type_params != num_trait_m_type_params {
+ span_err!(tcx.sess, impl_m_span, E0049,
+ "method `{}` has {} type parameter{} \
+ but its trait declaration has {} type parameter{}",
+ token::get_name(trait_m.name),
+ num_impl_m_type_params,
+ if num_impl_m_type_params == 1 {""} else {"s"},
+ num_trait_m_type_params,
+ if num_trait_m_type_params == 1 {""} else {"s"});
+ return;
+ }
+
+ if impl_m.fty.sig.0.inputs.len() != trait_m.fty.sig.0.inputs.len() {
+ span_err!(tcx.sess, impl_m_span, E0050,
+ "method `{}` has {} parameter{} \
+ but the declaration in trait `{}` has {}",
+ token::get_name(trait_m.name),
+ impl_m.fty.sig.0.inputs.len(),
+ if impl_m.fty.sig.0.inputs.len() == 1 {""} else {"s"},
+ ty::item_path_str(tcx, trait_m.def_id),
+ trait_m.fty.sig.0.inputs.len());
+ return;
+ }
+
+ // This code is best explained by example. Consider a trait:
+ //
+ // trait Trait<'t,T> {
+ // fn method<'a,M>(t: &'t T, m: &'a M) -> Self;
+ // }
+ //
+ // And an impl:
+ //
+ // impl<'i, 'j, U> Trait<'j, &'i U> for Foo {
+ // fn method<'b,N>(t: &'j &'i U, m: &'b N) -> Foo;
+ // }
+ //
+ // We wish to decide if those two method types are compatible.
+ //
+ // We start out with trait_to_impl_substs, that maps the trait
+ // type parameters to impl type parameters. This is taken from the
+ // impl trait reference:
+ //
+ // trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo}
+ //
+ // We create a mapping `dummy_substs` that maps from the impl type
+ // parameters to fresh types and regions. For type parameters,
+ // this is the identity transform, but we could as well use any
+ // skolemized types. For regions, we convert from bound to free
+ // regions (Note: but only early-bound regions, i.e., those
+ // declared on the impl or used in type parameter bounds).
+ //
+ // impl_to_skol_substs = {'i => 'i0, U => U0, N => N0 }
+ //
+ // Now we can apply skol_substs to the type of the impl method
+ // to yield a new function type in terms of our fresh, skolemized
+ // types:
+ //
+ // <'b> fn(t: &'i0 U0, m: &'b) -> Foo
+ //
+ // We now want to extract and substitute the type of the *trait*
+ // method and compare it. To do so, we must create a compound
+ // substitution by combining trait_to_impl_substs and
+ // impl_to_skol_substs, and also adding a mapping for the method
+ // type parameters. We extend the mapping to also include
+ // the method parameters.
+ //
+ // trait_to_skol_substs = { T => &'i0 U0, Self => Foo, M => N0 }
+ //
+ // Applying this to the trait method type yields:
+ //
+ // <'a> fn(t: &'i0 U0, m: &'a) -> Foo
+ //
+ // This type is also the same but the name of the bound region ('a
+ // vs 'b). However, the normal subtyping rules on fn types handle
+ // this kind of equivalency just fine.
+ //
+ // We now use these subsititions to ensure that all declared bounds are
+ // satisfied by the implementation's method.
+ //
+ // We do this by creating a parameter environment which contains a
+ // substition corresponding to impl_to_skol_substs. We then build
+ // trait_to_skol_substs and use it to convert the predicates contained
+ // in the trait_m.generics to the skolemized form.
+ //
+ // Finally we register each of these predicates as an obligation in
+ // a fresh FulfillmentCtxt, and invoke select_all_or_error.
+
+ // Create a parameter environment that represents the implementation's
+ // method.
+ let impl_param_env =
+ ty::ParameterEnvironment::for_item(tcx, impl_m.def_id.node);
+
+ // Create mapping from impl to skolemized.
+ let impl_to_skol_substs = &impl_param_env.free_substs;
+
+ // Create mapping from trait to skolemized.
+ let trait_to_skol_substs =
+ trait_to_impl_substs
+ .subst(tcx, impl_to_skol_substs)
+ .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(),
+ impl_to_skol_substs.regions().get_slice(subst::FnSpace).to_vec());
+ debug!("compare_impl_method: trait_to_skol_substs={}",
+ trait_to_skol_substs.repr(tcx));
+
+ // Check region bounds. FIXME(@jroesch) refactor this away when removing
+ // ParamBounds.
+ if !check_region_bounds_on_impl_method(tcx,
+ impl_m_span,
+ impl_m,
+ &trait_m.generics,
+ &impl_m.generics,
+ &trait_to_skol_substs,
+ impl_to_skol_substs) {
+ return;
+ }
+
+ // Create obligations for each predicate declared by the impl
+ // definition in the context of the trait's parameter
+ // environment. We can't just use `impl_env.caller_bounds`,
+ // however, because we want to replace all late-bound regions with
+ // region variables.
+ let impl_bounds =
+ impl_m.generics.to_bounds(tcx, impl_to_skol_substs);
+
+ let (impl_bounds, _) =
+ infcx.replace_late_bound_regions_with_fresh_var(
+ impl_m_span,
+ infer::HigherRankedType,
+ &ty::Binder(impl_bounds));
+ debug!("compare_impl_method: impl_bounds={}",
+ impl_bounds.repr(tcx));
+
+ // // Normalize the associated types in the impl_bounds.
+ // let traits::Normalized { value: impl_bounds, .. } =
+ // traits::normalize(&mut selcx, normalize_cause.clone(), &impl_bounds);
+
+ // Normalize the associated types in the trait_bounds.
+ let trait_bounds = trait_m.generics.to_bounds(tcx, &trait_to_skol_substs);
+ // let traits::Normalized { value: trait_bounds, .. } =
+ // traits::normalize(&mut selcx, normalize_cause, &trait_bounds);
+
+ // Obtain the predicate split predicate sets for each.
+ let trait_pred = trait_bounds.predicates.split();
+ let impl_pred = impl_bounds.predicates.split();
+
+ // This is the only tricky bit of the new way we check implementation methods
+ // We need to build a set of predicates where only the FnSpace bounds
+ // are from the trait and we assume all other bounds from the implementation
+ // to be previously satisfied.
+ //
+ // We then register the obligations from the impl_m and check to see
+ // if all constraints hold.
+ let hybrid_preds = VecPerParamSpace::new(
+ impl_pred.types,
+ impl_pred.selfs,
+ trait_pred.fns
+ );
+
+ // Construct trait parameter environment and then shift it into the skolemized viewpoint.
+ let mut trait_param_env = impl_param_env.clone();
+ // The key step here is to update the caller_bounds's predicates to be
+ // the new hybrid bounds we computed.
+ trait_param_env.caller_bounds.predicates = hybrid_preds;
+
+ debug!("compare_impl_method: trait_bounds={}",
+ trait_param_env.caller_bounds.repr(tcx));
+
+ let mut selcx = traits::SelectionContext::new(&infcx, &trait_param_env);
+
+ let normalize_cause =
+ traits::ObligationCause::misc(impl_m_span, impl_m_body_id);
+
+ for predicate in impl_pred.fns.into_iter() {
+ let traits::Normalized { value: predicate, .. } =
+ traits::normalize(&mut selcx, normalize_cause.clone(), &predicate);
+
+ let cause = traits::ObligationCause {
+ span: impl_m_span,
+ body_id: impl_m_body_id,
+ code: traits::ObligationCauseCode::CompareImplMethodObligation
+ };
+
+ fulfillment_cx.register_predicate_obligation(
+ &infcx,
+ traits::Obligation::new(cause, predicate));
+ }
+
+ // We now need to check that the signature of the impl method is
+ // compatible with that of the trait method. We do this by
+ // checking that `impl_fty <: trait_fty`.
+ //
+ // FIXME. Unfortunately, this doesn't quite work right now because
+ // associated type normalization is not integrated into subtype
+ // checks. For the comparison to be valid, we need to
+ // normalize the associated types in the impl/trait methods
+ // first. However, because function types bind regions, just
+ // calling `normalize_associated_types_in` would have no effect on
+ // any associated types appearing in the fn arguments or return
+ // type.
+
+ // Compute skolemized form of impl and trait method tys.
+ let impl_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(impl_m.fty.clone()));
+ let impl_fty = impl_fty.subst(tcx, impl_to_skol_substs);
+ let trait_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(trait_m.fty.clone()));
+ let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
+
+ let err = infcx.try(|snapshot| {
+ let origin = infer::MethodCompatCheck(impl_m_span);
+
+ let (impl_sig, _) =
+ infcx.replace_late_bound_regions_with_fresh_var(impl_m_span,
+ infer::HigherRankedType,
+ &impl_m.fty.sig);
+ let impl_sig =
+ impl_sig.subst(tcx, impl_to_skol_substs);
+ let impl_sig =
+ assoc::normalize_associated_types_in(&infcx,
+ &impl_param_env,
+ &mut fulfillment_cx,
+ impl_m_span,
+ impl_m_body_id,
+ &impl_sig);
+ let impl_fty =
+ ty::mk_bare_fn(tcx,
+ None,
+ tcx.mk_bare_fn(ty::BareFnTy { unsafety: impl_m.fty.unsafety,
+ abi: impl_m.fty.abi,
+ sig: ty::Binder(impl_sig) }));
+ debug!("compare_impl_method: impl_fty={}",
+ impl_fty.repr(tcx));
+
+ let (trait_sig, skol_map) =
+ infcx.skolemize_late_bound_regions(&trait_m.fty.sig, snapshot);
+ let trait_sig =
+ trait_sig.subst(tcx, &trait_to_skol_substs);
+ let trait_sig =
+ assoc::normalize_associated_types_in(&infcx,
+ &impl_param_env,
+ &mut fulfillment_cx,
+ impl_m_span,
+ impl_m_body_id,
+ &trait_sig);
+ let trait_fty =
+ ty::mk_bare_fn(tcx,
+ None,
+ tcx.mk_bare_fn(ty::BareFnTy { unsafety: trait_m.fty.unsafety,
+ abi: trait_m.fty.abi,
+ sig: ty::Binder(trait_sig) }));
+
+ debug!("compare_impl_method: trait_fty={}",
+ trait_fty.repr(tcx));
+
+ try!(infer::mk_subty(&infcx, false, origin, impl_fty, trait_fty));
+
+ infcx.leak_check(&skol_map, snapshot)
+ });
+
+ match err {
+ Ok(()) => { }
+ Err(terr) => {
+ debug!("checking trait method for compatibility: impl ty {}, trait ty {}",
+ impl_fty.repr(tcx),
+ trait_fty.repr(tcx));
+ span_err!(tcx.sess, impl_m_span, E0053,
+ "method `{}` has an incompatible type for trait: {}",
+ token::get_name(trait_m.name),
+ ty::type_err_to_str(tcx, &terr));
+ return;
+ }
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ match fulfillment_cx.select_all_or_error(&infcx, &trait_param_env) {
+ Err(ref errors) => { traits::report_fulfillment_errors(&infcx, errors) }
+ Ok(_) => {}
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of lifetime
+ // parameters.
+ infcx.resolve_regions_and_report_errors(impl_m_body_id);
+
+ fn check_region_bounds_on_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
+ span: Span,
+ impl_m: &ty::Method<'tcx>,
+ trait_generics: &ty::Generics<'tcx>,
+ impl_generics: &ty::Generics<'tcx>,
+ trait_to_skol_substs: &Substs<'tcx>,
+ impl_to_skol_substs: &Substs<'tcx>)
+ -> bool
+ {
+
+ let trait_params = trait_generics.regions.get_slice(subst::FnSpace);
+ let impl_params = impl_generics.regions.get_slice(subst::FnSpace);
+
+ debug!("check_region_bounds_on_impl_method: \
+ trait_generics={} \
+ impl_generics={} \
+ trait_to_skol_substs={} \
+ impl_to_skol_substs={}",
+ trait_generics.repr(tcx),
+ impl_generics.repr(tcx),
+ trait_to_skol_substs.repr(tcx),
+ impl_to_skol_substs.repr(tcx));
+
+ // Must have same number of early-bound lifetime parameters.
+ // Unfortunately, if the user screws up the bounds, then this
+ // will change classification between early and late. E.g.,
+ // if in trait we have `<'a,'b:'a>`, and in impl we just have
+ // `<'a,'b>`, then we have 2 early-bound lifetime parameters
+ // in trait but 0 in the impl. But if we report "expected 2
+ // but found 0" it's confusing, because it looks like there
+ // are zero. Since I don't quite know how to phrase things at
+ // the moment, give a kind of vague error message.
+ if trait_params.len() != impl_params.len() {
+ tcx.sess.span_err(
+ span,
+ &format!("lifetime parameters or bounds on method `{}` do \
+ not match the trait declaration",
+ token::get_name(impl_m.name))[]);
+ return false;
+ }
+
+ return true;
+ }
+}
pub use self::LvaluePreference::*;
pub use self::Expectation::*;
+pub use self::compare_method::compare_impl_method;
use self::IsBinopAssignment::*;
use self::TupleArgumentsFlag::*;
use middle::lang_items::TypeIdLangItem;
use lint;
use util::common::{block_query, indenter, loop_query};
-use util::ppaux::{self, UserString, Repr};
+use util::ppaux::{self, Repr};
use util::nodemap::{DefIdMap, FnvHashMap, NodeMap};
use std::cell::{Cell, Ref, RefCell};
pub mod wf;
mod closure;
mod callee;
+mod compare_method;
-/// Fields that are part of a `FnCtxt` which are inherited by
/// closures defined within the function. For example:
///
/// fn foo() {
}
}
-/// Checks that a method from an impl conforms to the signature of
-/// the same method as declared in the trait.
-///
-/// # Parameters
-///
-/// - impl_generics: the generics declared on the impl itself (not the method!)
-/// - impl_m: type of the method we are checking
-/// - impl_m_span: span to use for reporting errors
-/// - impl_m_body_id: id of the method body
-/// - trait_m: the method in the trait
-/// - trait_to_impl_substs: the substitutions used on the type of the trait
-fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
- impl_m: &ty::Method<'tcx>,
- impl_m_span: Span,
- impl_m_body_id: ast::NodeId,
- trait_m: &ty::Method<'tcx>,
- impl_trait_ref: &ty::TraitRef<'tcx>) {
- debug!("compare_impl_method(impl_trait_ref={})",
- impl_trait_ref.repr(tcx));
-
- debug!("impl_trait_ref (liberated) = {}",
- impl_trait_ref.repr(tcx));
-
- let infcx = infer::new_infer_ctxt(tcx);
- let mut fulfillment_cx = traits::FulfillmentContext::new();
-
- let trait_to_impl_substs = &impl_trait_ref.substs;
-
- // Try to give more informative error messages about self typing
- // mismatches. Note that any mismatch will also be detected
- // below, where we construct a canonical function type that
- // includes the self parameter as a normal parameter. It's just
- // that the error messages you get out of this code are a bit more
- // inscrutable, particularly for cases where one method has no
- // self.
- match (&trait_m.explicit_self, &impl_m.explicit_self) {
- (&ty::StaticExplicitSelfCategory,
- &ty::StaticExplicitSelfCategory) => {}
- (&ty::StaticExplicitSelfCategory, _) => {
- tcx.sess.span_err(
- impl_m_span,
- &format!("method `{}` has a `{}` declaration in the impl, \
- but not in the trait",
- token::get_name(trait_m.name),
- ppaux::explicit_self_category_to_str(
- &impl_m.explicit_self))[]);
- return;
- }
- (_, &ty::StaticExplicitSelfCategory) => {
- tcx.sess.span_err(
- impl_m_span,
- &format!("method `{}` has a `{}` declaration in the trait, \
- but not in the impl",
- token::get_name(trait_m.name),
- ppaux::explicit_self_category_to_str(
- &trait_m.explicit_self))[]);
- return;
- }
- _ => {
- // Let the type checker catch other errors below
- }
- }
-
- let num_impl_m_type_params = impl_m.generics.types.len(subst::FnSpace);
- let num_trait_m_type_params = trait_m.generics.types.len(subst::FnSpace);
- if num_impl_m_type_params != num_trait_m_type_params {
- span_err!(tcx.sess, impl_m_span, E0049,
- "method `{}` has {} type parameter{} \
- but its trait declaration has {} type parameter{}",
- token::get_name(trait_m.name),
- num_impl_m_type_params,
- if num_impl_m_type_params == 1 {""} else {"s"},
- num_trait_m_type_params,
- if num_trait_m_type_params == 1 {""} else {"s"});
- return;
- }
-
- if impl_m.fty.sig.0.inputs.len() != trait_m.fty.sig.0.inputs.len() {
- span_err!(tcx.sess, impl_m_span, E0050,
- "method `{}` has {} parameter{} \
- but the declaration in trait `{}` has {}",
- token::get_name(trait_m.name),
- impl_m.fty.sig.0.inputs.len(),
- if impl_m.fty.sig.0.inputs.len() == 1 {""} else {"s"},
- ty::item_path_str(tcx, trait_m.def_id),
- trait_m.fty.sig.0.inputs.len());
- return;
- }
-
- // This code is best explained by example. Consider a trait:
- //
- // trait Trait<'t,T> {
- // fn method<'a,M>(t: &'t T, m: &'a M) -> Self;
- // }
- //
- // And an impl:
- //
- // impl<'i, 'j, U> Trait<'j, &'i U> for Foo {
- // fn method<'b,N>(t: &'j &'i U, m: &'b N) -> Foo;
- // }
- //
- // We wish to decide if those two method types are compatible.
- //
- // We start out with trait_to_impl_substs, that maps the trait
- // type parameters to impl type parameters. This is taken from the
- // impl trait reference:
- //
- // trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo}
- //
- // We create a mapping `dummy_substs` that maps from the impl type
- // parameters to fresh types and regions. For type parameters,
- // this is the identity transform, but we could as well use any
- // skolemized types. For regions, we convert from bound to free
- // regions (Note: but only early-bound regions, i.e., those
- // declared on the impl or used in type parameter bounds).
- //
- // impl_to_skol_substs = {'i => 'i0, U => U0, N => N0 }
- //
- // Now we can apply skol_substs to the type of the impl method
- // to yield a new function type in terms of our fresh, skolemized
- // types:
- //
- // <'b> fn(t: &'i0 U0, m: &'b) -> Foo
- //
- // We now want to extract and substitute the type of the *trait*
- // method and compare it. To do so, we must create a compound
- // substitution by combining trait_to_impl_substs and
- // impl_to_skol_substs, and also adding a mapping for the method
- // type parameters. We extend the mapping to also include
- // the method parameters.
- //
- // trait_to_skol_substs = { T => &'i0 U0, Self => Foo, M => N0 }
- //
- // Applying this to the trait method type yields:
- //
- // <'a> fn(t: &'i0 U0, m: &'a) -> Foo
- //
- // This type is also the same but the name of the bound region ('a
- // vs 'b). However, the normal subtyping rules on fn types handle
- // this kind of equivalency just fine.
-
- // Create mapping from impl to skolemized.
- let impl_param_env = ty::construct_parameter_environment(tcx, &impl_m.generics, impl_m_body_id);
- let impl_to_skol_substs = &impl_param_env.free_substs;
-
- // Create mapping from trait to skolemized.
- let trait_to_skol_substs =
- trait_to_impl_substs
- .subst(tcx, impl_to_skol_substs)
- .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(),
- impl_to_skol_substs.regions().get_slice(subst::FnSpace).to_vec());
-
- // Check region bounds.
- if !check_region_bounds_on_impl_method(tcx,
- impl_m_span,
- impl_m,
- &trait_m.generics,
- &impl_m.generics,
- &trait_to_skol_substs,
- impl_to_skol_substs) {
- return;
- }
-
- // Check bounds. Note that the bounds from the impl may reference
- // late-bound regions declared on the impl, so liberate those.
- // This requires two artificial binding scopes -- one for the impl,
- // and one for the method.
- //
- // An example would be:
- //
- // trait Foo<T> { fn method<U:Bound<T>>() { ... } }
- //
- // impl<'a> Foo<&'a T> for &'a U {
- // fn method<U:Bound<&'a T>>() { ... }
- // }
- //
- // Here, the region parameter `'a` is late-bound, so in the bound
- // `Bound<&'a T>`, the lifetime `'a` will be late-bound with a
- // depth of 3 (it is nested within 3 binders: the impl, method,
- // and trait-ref itself). So when we do the liberation, we have
- // two introduce two `ty::Binder` scopes, one for the impl and one
- // the method.
- //
- // The only late-bounded regions that can possibly appear here are
- // from the impl, not the method. This is because region
- // parameters declared on the method which appear in a type bound
- // would be early bound. On the trait side, there can be no
- // late-bound lifetimes because trait definitions do not introduce
- // a late region binder.
- let trait_bounds =
- trait_m.generics.types.get_slice(subst::FnSpace).iter()
- .map(|trait_param_def| &trait_param_def.bounds);
- let impl_bounds =
- impl_m.generics.types.get_slice(subst::FnSpace).iter()
- .map(|impl_param_def| &impl_param_def.bounds);
- for (i, (trait_param_bounds, impl_param_bounds)) in
- trait_bounds.zip(impl_bounds).enumerate()
- {
- // Check that the impl does not require any builtin-bounds
- // that the trait does not guarantee:
- let extra_bounds =
- impl_param_bounds.builtin_bounds -
- trait_param_bounds.builtin_bounds;
- if !extra_bounds.is_empty() {
- span_err!(tcx.sess, impl_m_span, E0051,
- "in method `{}`, type parameter {} requires `{}`, \
- which is not required by the corresponding type parameter \
- in the trait declaration",
- token::get_name(trait_m.name),
- i,
- extra_bounds.user_string(tcx));
- return;
- }
-
- // Check that the trait bounds of the trait imply the bounds of its
- // implementation.
- //
- // FIXME(pcwalton): We could be laxer here regarding sub- and super-
- // traits, but I doubt that'll be wanted often, so meh.
- for impl_trait_bound in impl_param_bounds.trait_bounds.iter() {
- debug!("compare_impl_method(): impl-trait-bound subst");
- let impl_trait_bound =
- impl_trait_bound.subst(tcx, impl_to_skol_substs);
-
- // There may be late-bound regions from the impl in the
- // impl's bound, so "liberate" those. Note that the
- // trait_to_skol_substs is derived from the impl's
- // trait-ref, and the late-bound regions appearing there
- // have already been liberated, so the result should match
- // up.
-
- let found_match_in_trait =
- trait_param_bounds.trait_bounds.iter().any(|trait_bound| {
- debug!("compare_impl_method(): trait-bound subst");
- let trait_bound =
- trait_bound.subst(tcx, &trait_to_skol_substs);
- infer::mk_sub_poly_trait_refs(&infcx,
- true,
- infer::Misc(impl_m_span),
- trait_bound,
- impl_trait_bound.clone()).is_ok()
- });
-
- if !found_match_in_trait {
- span_err!(tcx.sess, impl_m_span, E0052,
- "in method `{}`, type parameter {} requires bound `{}`, which is not \
- required by the corresponding type parameter in the trait declaration",
- token::get_name(trait_m.name),
- i,
- impl_trait_bound.user_string(tcx));
- }
- }
- }
-
- // We now need to check that the signature of the impl method is
- // compatible with that of the trait method. We do this by
- // checking that `impl_fty <: trait_fty`.
- //
- // FIXME. Unfortunately, this doesn't quite work right now because
- // associated type normalization is not integrated into subtype
- // checks. For the comparison to be valid, we need to
- // normalize the associated types in the impl/trait methods
- // first. However, because function types bind regions, just
- // calling `normalize_associated_types_in` would have no effect on
- // any associated types appearing in the fn arguments or return
- // type.
-
-
- // Compute skolemized form of impl and trait method tys.
- let impl_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(impl_m.fty.clone()));
- let impl_fty = impl_fty.subst(tcx, impl_to_skol_substs);
- let trait_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(trait_m.fty.clone()));
- let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
-
- let err = infcx.try(|snapshot| {
- let origin = infer::MethodCompatCheck(impl_m_span);
-
- let (impl_sig, _) =
- infcx.replace_late_bound_regions_with_fresh_var(impl_m_span,
- infer::HigherRankedType,
- &impl_m.fty.sig);
- let impl_sig =
- impl_sig.subst(tcx, impl_to_skol_substs);
- let impl_sig =
- assoc::normalize_associated_types_in(&infcx,
- &impl_param_env,
- &mut fulfillment_cx,
- impl_m_span,
- impl_m_body_id,
- &impl_sig);
- let impl_fty =
- ty::mk_bare_fn(tcx,
- None,
- tcx.mk_bare_fn(ty::BareFnTy { unsafety: impl_m.fty.unsafety,
- abi: impl_m.fty.abi,
- sig: ty::Binder(impl_sig) }));
- debug!("compare_impl_method: impl_fty={}",
- impl_fty.repr(tcx));
-
- let (trait_sig, skol_map) =
- infcx.skolemize_late_bound_regions(&trait_m.fty.sig, snapshot);
- let trait_sig =
- trait_sig.subst(tcx, &trait_to_skol_substs);
- let trait_sig =
- assoc::normalize_associated_types_in(&infcx,
- &impl_param_env,
- &mut fulfillment_cx,
- impl_m_span,
- impl_m_body_id,
- &trait_sig);
- let trait_fty =
- ty::mk_bare_fn(tcx,
- None,
- tcx.mk_bare_fn(ty::BareFnTy { unsafety: trait_m.fty.unsafety,
- abi: trait_m.fty.abi,
- sig: ty::Binder(trait_sig) }));
-
- debug!("compare_impl_method: trait_fty={}",
- trait_fty.repr(tcx));
-
- try!(infer::mk_subty(&infcx, false, origin, impl_fty, trait_fty));
-
- infcx.leak_check(&skol_map, snapshot)
- });
-
- match err {
- Ok(()) => { }
- Err(terr) => {
- debug!("checking trait method for compatibility: impl ty {}, trait ty {}",
- impl_fty.repr(tcx),
- trait_fty.repr(tcx));
- span_err!(tcx.sess, impl_m_span, E0053,
- "method `{}` has an incompatible type for trait: {}",
- token::get_name(trait_m.name),
- ty::type_err_to_str(tcx, &terr));
- return;
- }
- }
-
- // Run the fulfillment context to completion to accommodate any
- // associated type normalizations that may have occurred.
- match fulfillment_cx.select_all_or_error(&infcx, &impl_param_env) {
- Ok(()) => { }
- Err(errors) => {
- traits::report_fulfillment_errors(&infcx, &errors);
- }
- }
-
- // Finally, resolve all regions. This catches wily misuses of lifetime
- // parameters.
- infcx.resolve_regions_and_report_errors(impl_m_body_id);
-
- /// Check that region bounds on impl method are the same as those on the trait. In principle,
- /// it could be ok for there to be fewer region bounds on the impl method, but this leads to an
- /// annoying corner case that is painful to handle (described below), so for now we can just
- /// forbid it.
- ///
- /// Example (see `src/test/compile-fail/regions-bound-missing-bound-in-impl.rs`):
- ///
- /// ```
- /// trait Foo<'a> {
- /// fn method1<'b>();
- /// fn method2<'b:'a>();
- /// }
- ///
- /// impl<'a> Foo<'a> for ... {
- /// fn method1<'b:'a>() { .. case 1, definitely bad .. }
- /// fn method2<'b>() { .. case 2, could be ok .. }
- /// }
- /// ```
- ///
- /// The "definitely bad" case is case #1. Here, the impl adds an extra constraint not present
- /// in the trait.
- ///
- /// The "maybe bad" case is case #2. Here, the impl adds an extra constraint not present in the
- /// trait. We could in principle allow this, but it interacts in a complex way with early/late
- /// bound resolution of lifetimes. Basically the presence or absence of a lifetime bound
- /// affects whether the lifetime is early/late bound, and right now the code breaks if the
- /// trait has an early bound lifetime parameter and the method does not.
- fn check_region_bounds_on_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>,
- span: Span,
- impl_m: &ty::Method<'tcx>,
- trait_generics: &ty::Generics<'tcx>,
- impl_generics: &ty::Generics<'tcx>,
- trait_to_skol_substs: &Substs<'tcx>,
- impl_to_skol_substs: &Substs<'tcx>)
- -> bool
- {
-
- let trait_params = trait_generics.regions.get_slice(subst::FnSpace);
- let impl_params = impl_generics.regions.get_slice(subst::FnSpace);
-
- debug!("check_region_bounds_on_impl_method: \
- trait_generics={} \
- impl_generics={} \
- trait_to_skol_substs={} \
- impl_to_skol_substs={}",
- trait_generics.repr(tcx),
- impl_generics.repr(tcx),
- trait_to_skol_substs.repr(tcx),
- impl_to_skol_substs.repr(tcx));
-
- // Must have same number of early-bound lifetime parameters.
- // Unfortunately, if the user screws up the bounds, then this
- // will change classification between early and late. E.g.,
- // if in trait we have `<'a,'b:'a>`, and in impl we just have
- // `<'a,'b>`, then we have 2 early-bound lifetime parameters
- // in trait but 0 in the impl. But if we report "expected 2
- // but found 0" it's confusing, because it looks like there
- // are zero. Since I don't quite know how to phrase things at
- // the moment, give a kind of vague error message.
- if trait_params.len() != impl_params.len() {
- tcx.sess.span_err(
- span,
- &format!("lifetime parameters or bounds on method `{}` do \
- not match the trait declaration",
- token::get_name(impl_m.name))[]);
- return false;
- }
-
- // Each parameter `'a:'b+'c+'d` in trait should have the same
- // set of bounds in the impl, after subst.
- for (trait_param, impl_param) in
- trait_params.iter().zip(
- impl_params.iter())
- {
- let trait_bounds =
- trait_param.bounds.subst(tcx, trait_to_skol_substs);
- let impl_bounds =
- impl_param.bounds.subst(tcx, impl_to_skol_substs);
-
- debug!("check_region_bounds_on_impl_method: \
- trait_param={} \
- impl_param={} \
- trait_bounds={} \
- impl_bounds={}",
- trait_param.repr(tcx),
- impl_param.repr(tcx),
- trait_bounds.repr(tcx),
- impl_bounds.repr(tcx));
-
- // Collect the set of bounds present in trait but not in
- // impl.
- let missing: Vec<ty::Region> =
- trait_bounds.iter()
- .filter(|&b| !impl_bounds.contains(b))
- .map(|&b| b)
- .collect();
-
- // Collect set present in impl but not in trait.
- let extra: Vec<ty::Region> =
- impl_bounds.iter()
- .filter(|&b| !trait_bounds.contains(b))
- .map(|&b| b)
- .collect();
-
- debug!("missing={} extra={}",
- missing.repr(tcx), extra.repr(tcx));
-
- let err = if missing.len() != 0 || extra.len() != 0 {
- tcx.sess.span_err(
- span,
- &format!(
- "the lifetime parameter `{}` declared in the impl \
- has a distinct set of bounds \
- from its counterpart `{}` \
- declared in the trait",
- impl_param.name.user_string(tcx),
- trait_param.name.user_string(tcx))[]);
- true
- } else {
- false
- };
-
- if missing.len() != 0 {
- tcx.sess.span_note(
- span,
- &format!("the impl is missing the following bounds: `{}`",
- missing.user_string(tcx))[]);
- }
-
- if extra.len() != 0 {
- tcx.sess.span_note(
- span,
- &format!("the impl has the following extra bounds: `{}`",
- extra.user_string(tcx))[]);
- }
-
- if err {
- return false;
- }
- }
-
- return true;
- }
-}
-
fn check_cast(fcx: &FnCtxt,
cast_expr: &ast::Expr,
e: &ast::Expr,
obligations.map_move(|o| self.register_predicate(o));
}
+
+ // Only for fields! Returns <none> for methods>
+ // Indifferent to privacy flags
+ pub fn lookup_field_ty(&self,
+ span: Span,
+ class_id: ast::DefId,
+ items: &[ty::field_ty],
+ fieldname: ast::Name,
+ substs: &subst::Substs<'tcx>)
+ -> Option<Ty<'tcx>>
+ {
+ let o_field = items.iter().find(|f| f.name == fieldname);
+ o_field.map(|f| ty::lookup_field_type(self.tcx(), class_id, f.id, substs))
+ .map(|t| self.normalize_associated_types_in(span, &t))
+ }
+
+ pub fn lookup_tup_field_ty(&self,
+ span: Span,
+ class_id: ast::DefId,
+ items: &[ty::field_ty],
+ idx: uint,
+ substs: &subst::Substs<'tcx>)
+ -> Option<Ty<'tcx>>
+ {
+ let o_field = if idx < items.len() { Some(&items[idx]) } else { None };
+ o_field.map(|f| ty::lookup_field_type(self.tcx(), class_id, f.id, substs))
+ .map(|t| self.normalize_associated_types_in(span, &t))
+ }
}
impl<'a, 'tcx> RegionScope for FnCtxt<'a, 'tcx> {
TypeAndSubsts { substs: substs, ty: substd_ty }
}
-// Only for fields! Returns <none> for methods>
-// Indifferent to privacy flags
-pub fn lookup_field_ty<'tcx>(tcx: &ty::ctxt<'tcx>,
- class_id: ast::DefId,
- items: &[ty::field_ty],
- fieldname: ast::Name,
- substs: &subst::Substs<'tcx>)
- -> Option<Ty<'tcx>> {
-
- let o_field = items.iter().find(|f| f.name == fieldname);
- o_field.map(|f| ty::lookup_field_type(tcx, class_id, f.id, substs))
-}
-
-pub fn lookup_tup_field_ty<'tcx>(tcx: &ty::ctxt<'tcx>,
- class_id: ast::DefId,
- items: &[ty::field_ty],
- idx: uint,
- substs: &subst::Substs<'tcx>)
- -> Option<Ty<'tcx>> {
-
- let o_field = if idx < items.len() { Some(&items[idx]) } else { None };
- o_field.map(|f| ty::lookup_field_type(tcx, class_id, f.id, substs))
-}
-
// Controls whether the arguments are automatically referenced. This is useful
// for overloaded binary and unary operators.
#[derive(Copy, PartialEq)]
ty::ty_struct(base_id, substs) => {
debug!("struct named {}", ppaux::ty_to_string(tcx, base_t));
let fields = ty::lookup_struct_fields(tcx, base_id);
- lookup_field_ty(tcx, base_id, &fields[],
- field.node.name, &(*substs))
+ fcx.lookup_field_ty(expr.span, base_id, &fields[],
+ field.node.name, &(*substs))
}
_ => None
}
if tuple_like {
debug!("tuple struct named {}", ppaux::ty_to_string(tcx, base_t));
let fields = ty::lookup_struct_fields(tcx, base_id);
- lookup_tup_field_ty(tcx, base_id, &fields[],
- idx.node, &(*substs))
+ fcx.lookup_tup_field_ty(expr.span, base_id, &fields[],
+ idx.node, &(*substs))
} else {
None
}
};
fcx.write_ty(id, oprnd_t);
}
- ast::ExprPath(ref pth) => {
- let defn = lookup_def(fcx, pth.span, id);
+ ast::ExprPath(ref path) => {
+ let defn = lookup_def(fcx, path.span, id);
+ let pty = type_scheme_for_def(fcx, expr.span, defn);
+ instantiate_path(fcx, path, pty, None, defn, expr.span, expr.id);
+
+ // We always require that the type provided as the value for
+ // a type parameter outlives the moment of instantiation.
+ constrain_path_type_parameters(fcx, expr);
+ }
+ ast::ExprQPath(ref qpath) => {
+ // Require explicit type params for the trait.
+ let self_ty = fcx.to_ty(&*qpath.self_type);
+ astconv::instantiate_trait_ref(fcx, fcx, &*qpath.trait_ref, Some(self_ty), None);
+
+ let defn = lookup_def(fcx, expr.span, id);
let pty = type_scheme_for_def(fcx, expr.span, defn);
- instantiate_path(fcx, pth, pty, defn, expr.span, expr.id);
+ let mut path = qpath.trait_ref.path.clone();
+ path.segments.push(qpath.item_path.clone());
+ instantiate_path(fcx, &path, pty, Some(self_ty), defn, expr.span, expr.id);
// We always require that the type provided as the value for
// a type parameter outlives the moment of instantiation.
pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
path: &ast::Path,
type_scheme: TypeScheme<'tcx>,
+ opt_self_ty: Option<Ty<'tcx>>,
def: def::Def,
span: Span,
node_id: ast::NodeId) {
}
}
}
+ if let Some(self_ty) = opt_self_ty {
+ // `<T as Trait>::foo` shouldn't have resolved to a `Self`-less item.
+ assert_eq!(type_defs.len(subst::SelfSpace), 1);
+ substs.types.push(subst::SelfSpace, self_ty);
+ }
// Now we have to compare the types that the user *actually*
// provided against the types that were *expected*. If the user
pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Attribute> {
- let mut attrs = Vec::new();
- csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
- attrs.extend(v.into_iter().map(|a| {
- a.clean(cx)
- }));
- });
- attrs
+ let attrs = csearch::get_item_attrs(&tcx.sess.cstore, did);
+ attrs.into_iter().map(|a| a.clean(cx)).collect()
}
/// Record an external fully qualified name in the external_paths cache.
}
}
+impl<T, U> Clean<U> for ty::Binder<T> where T: Clean<U> {
+ fn clean(&self, cx: &DocContext) -> U {
+ self.0.clean(cx)
+ }
+}
+
impl<T: Clean<U>, U> Clean<Vec<U>> for syntax::owned_slice::OwnedSlice<T> {
fn clean(&self, cx: &DocContext) -> Vec<U> {
self.iter().map(|x| x.clean(cx)).collect()
}
}
-impl<'tcx> Clean<Vec<TyParamBound>> for ty::ExistentialBounds<'tcx> {
- fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
- let mut vec = vec![];
- self.region_bound.clean(cx).map(|b| vec.push(RegionBound(b)));
+impl<'tcx> Clean<(Vec<TyParamBound>, Vec<TypeBinding>)> for ty::ExistentialBounds<'tcx> {
+ fn clean(&self, cx: &DocContext) -> (Vec<TyParamBound>, Vec<TypeBinding>) {
+ let mut tp_bounds = vec![];
+ self.region_bound.clean(cx).map(|b| tp_bounds.push(RegionBound(b)));
for bb in self.builtin_bounds.iter() {
- vec.push(bb.clean(cx));
+ tp_bounds.push(bb.clean(cx));
}
- // FIXME(#20299) -- should do something with projection bounds
+ let mut bindings = vec![];
+ for &ty::Binder(ref pb) in self.projection_bounds.iter() {
+ bindings.push(TypeBinding {
+ name: pb.projection_ty.item_name.clean(cx),
+ ty: pb.ty.clean(cx)
+ });
+ }
- vec
+ (tp_bounds, bindings)
}
}
fn external_path_params(cx: &DocContext, trait_did: Option<ast::DefId>,
- substs: &subst::Substs) -> PathParameters {
+ bindings: Vec<TypeBinding>, substs: &subst::Substs) -> PathParameters {
use rustc::middle::ty::sty;
let lifetimes = substs.regions().get_slice(subst::TypeSpace)
.iter()
return PathParameters::AngleBracketed {
lifetimes: lifetimes,
types: types.clean(cx),
- bindings: vec![]
+ bindings: bindings
}
}
};
PathParameters::AngleBracketed {
lifetimes: lifetimes,
types: types.clean(cx),
- bindings: vec![] // FIXME(#20646)
+ bindings: bindings
}
}
}
// trait_did should be set to a trait's DefId if called on a TraitRef, in order to sugar
// from Fn<(A, B,), C> to Fn(A, B) -> C
fn external_path(cx: &DocContext, name: &str, trait_did: Option<ast::DefId>,
- substs: &subst::Substs) -> Path {
+ bindings: Vec<TypeBinding>, substs: &subst::Substs) -> Path {
Path {
global: false,
segments: vec![PathSegment {
name: name.to_string(),
- params: external_path_params(cx, trait_did, substs)
+ params: external_path_params(cx, trait_did, bindings, substs)
}],
}
}
let (did, path) = match *self {
ty::BoundSend =>
(tcx.lang_items.send_trait().unwrap(),
- external_path(cx, "Send", None, &empty)),
+ external_path(cx, "Send", None, vec![], &empty)),
ty::BoundSized =>
(tcx.lang_items.sized_trait().unwrap(),
- external_path(cx, "Sized", None, &empty)),
+ external_path(cx, "Sized", None, vec![], &empty)),
ty::BoundCopy =>
(tcx.lang_items.copy_trait().unwrap(),
- external_path(cx, "Copy", None, &empty)),
+ external_path(cx, "Copy", None, vec![], &empty)),
ty::BoundSync =>
(tcx.lang_items.sync_trait().unwrap(),
- external_path(cx, "Sync", None, &empty)),
+ external_path(cx, "Sync", None, vec![], &empty)),
};
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.into_iter().map(|i| i.to_string()).collect();
}
}
-impl<'tcx> Clean<TyParamBound> for ty::PolyTraitRef<'tcx> {
- fn clean(&self, cx: &DocContext) -> TyParamBound {
- self.0.clean(cx)
- }
-}
-
impl<'tcx> Clean<TyParamBound> for ty::TraitRef<'tcx> {
fn clean(&self, cx: &DocContext) -> TyParamBound {
let tcx = match cx.tcx_opt() {
let fqn = fqn.into_iter().map(|i| i.to_string())
.collect::<Vec<String>>();
let path = external_path(cx, fqn.last().unwrap().as_slice(),
- Some(self.def_id), self.substs);
+ Some(self.def_id), vec![], self.substs);
cx.external_paths.borrow_mut().as_mut().unwrap().insert(self.def_id,
(fqn, TypeTrait));
pub enum WherePredicate {
BoundPredicate { ty: Type, bounds: Vec<TyParamBound> },
RegionPredicate { lifetime: Lifetime, bounds: Vec<Lifetime>},
- // FIXME (#20041)
- EqPredicate
+ EqPredicate { lhs: Type, rhs: Type }
}
impl Clean<WherePredicate> for ast::WherePredicate {
}
ast::WherePredicate::EqPredicate(_) => {
- WherePredicate::EqPredicate
+ unimplemented!() // FIXME(#20041)
}
}
}
}
+impl<'a> Clean<WherePredicate> for ty::Predicate<'a> {
+ fn clean(&self, cx: &DocContext) -> WherePredicate {
+ use rustc::middle::ty::Predicate;
+
+ match *self {
+ Predicate::Trait(ref pred) => pred.clean(cx),
+ Predicate::Equate(ref pred) => pred.clean(cx),
+ Predicate::RegionOutlives(ref pred) => pred.clean(cx),
+ Predicate::TypeOutlives(ref pred) => pred.clean(cx),
+ Predicate::Projection(ref pred) => pred.clean(cx)
+ }
+ }
+}
+
+impl<'a> Clean<WherePredicate> for ty::TraitPredicate<'a> {
+ fn clean(&self, cx: &DocContext) -> WherePredicate {
+ WherePredicate::BoundPredicate {
+ ty: self.trait_ref.substs.self_ty().clean(cx).unwrap(),
+ bounds: vec![self.trait_ref.clean(cx)]
+ }
+ }
+}
+
+impl<'tcx> Clean<WherePredicate> for ty::EquatePredicate<'tcx> {
+ fn clean(&self, cx: &DocContext) -> WherePredicate {
+ let ty::EquatePredicate(ref lhs, ref rhs) = *self;
+ WherePredicate::EqPredicate {
+ lhs: lhs.clean(cx),
+ rhs: rhs.clean(cx)
+ }
+ }
+}
+
+impl Clean<WherePredicate> for ty::OutlivesPredicate<ty::Region, ty::Region> {
+ fn clean(&self, cx: &DocContext) -> WherePredicate {
+ let ty::OutlivesPredicate(ref a, ref b) = *self;
+ WherePredicate::RegionPredicate {
+ lifetime: a.clean(cx).unwrap(),
+ bounds: vec![b.clean(cx).unwrap()]
+ }
+ }
+}
+
+impl<'tcx> Clean<WherePredicate> for ty::OutlivesPredicate<ty::Ty<'tcx>, ty::Region> {
+ fn clean(&self, cx: &DocContext) -> WherePredicate {
+ let ty::OutlivesPredicate(ref ty, ref lt) = *self;
+
+ WherePredicate::BoundPredicate {
+ ty: ty.clean(cx),
+ bounds: vec![TyParamBound::RegionBound(lt.clean(cx).unwrap())]
+ }
+ }
+}
+
+impl<'tcx> Clean<WherePredicate> for ty::ProjectionPredicate<'tcx> {
+ fn clean(&self, cx: &DocContext) -> WherePredicate {
+ WherePredicate::EqPredicate {
+ lhs: self.projection_ty.clean(cx),
+ rhs: self.ty.clean(cx)
+ }
+ }
+}
+
+impl<'tcx> Clean<Type> for ty::ProjectionTy<'tcx> {
+ fn clean(&self, cx: &DocContext) -> Type {
+ let trait_ = match self.trait_ref.clean(cx) {
+ TyParamBound::TraitBound(t, _) => t.trait_,
+ TyParamBound::RegionBound(_) => panic!("cleaning a trait got a region??"),
+ };
+ Type::QPath {
+ name: self.item_name.clean(cx),
+ self_type: box self.trait_ref.self_ty().clean(cx),
+ trait_: box trait_
+ }
+ }
+}
+
// maybe use a Generic enum and use ~[Generic]?
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show)]
pub struct Generics {
impl<'a, 'tcx> Clean<Generics> for (&'a ty::Generics<'tcx>, subst::ParamSpace) {
fn clean(&self, cx: &DocContext) -> Generics {
- let (me, space) = *self;
+ use std::collections::HashSet;
+ use syntax::ast::TraitBoundModifier as TBM;
+ use self::WherePredicate as WP;
+
+ fn has_sized_bound(bounds: &[TyParamBound], cx: &DocContext) -> bool {
+ if let Some(tcx) = cx.tcx_opt() {
+ let sized_did = match tcx.lang_items.sized_trait() {
+ Some(did) => did,
+ None => return false
+ };
+ for bound in bounds.iter() {
+ if let TyParamBound::TraitBound(PolyTrait {
+ trait_: Type::ResolvedPath { did, .. }, ..
+ }, TBM::None) = *bound {
+ if did == sized_did {
+ return true
+ }
+ }
+ }
+ }
+ false
+ }
+
+ let (gens, space) = *self;
+ // Bounds in the type_params and lifetimes fields are repeated in the predicates
+ // field (see rustc_typeck::collect::ty_generics), so remove them.
+ let stripped_typarams = gens.types.get_slice(space).iter().map(|tp| {
+ let mut stp = tp.clone();
+ stp.bounds = ty::ParamBounds::empty();
+ stp.clean(cx)
+ }).collect::<Vec<_>>();
+ let stripped_lifetimes = gens.regions.get_slice(space).iter().map(|rp| {
+ let mut srp = rp.clone();
+ srp.bounds = Vec::new();
+ srp.clean(cx)
+ }).collect::<Vec<_>>();
+
+ let where_predicates = gens.predicates.get_slice(space).to_vec().clean(cx);
+ // Type parameters have a Sized bound by default unless removed with ?Sized.
+ // Scan through the predicates and mark any type parameter with a Sized
+ // bound, removing the bounds as we find them.
+ let mut sized_params = HashSet::new();
+ let mut where_predicates = where_predicates.into_iter().filter_map(|pred| {
+ if let WP::BoundPredicate { ty: Type::Generic(ref g), ref bounds } = pred {
+ if has_sized_bound(&**bounds, cx) {
+ sized_params.insert(g.clone());
+ return None
+ }
+ }
+ Some(pred)
+ }).collect::<Vec<_>>();
+ // Finally, run through the type parameters again and insert a ?Sized unbound for
+ // any we didn't find to be Sized.
+ for tp in stripped_typarams.iter() {
+ if !sized_params.contains(&tp.name) {
+ let mut sized_bound = ty::BuiltinBound::BoundSized.clean(cx);
+ if let TyParamBound::TraitBound(_, ref mut tbm) = sized_bound {
+ *tbm = TBM::Maybe
+ };
+ where_predicates.push(WP::BoundPredicate {
+ ty: Type::Generic(tp.name.clone()),
+ bounds: vec![sized_bound]
+ })
+ }
+ }
+
+ // It would be nice to collect all of the bounds on a type and recombine
+ // them if possible, to avoid e.g. `where T: Foo, T: Bar, T: Sized, T: 'a`
+ // and instead see `where T: Foo + Bar + Sized + 'a`
+
Generics {
- type_params: me.types.get_slice(space).to_vec().clean(cx),
- lifetimes: me.regions.get_slice(space).to_vec().clean(cx),
- where_predicates: vec![]
+ type_params: stripped_typarams,
+ lifetimes: stripped_lifetimes,
+ where_predicates: where_predicates
}
}
}
}
}
-#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show)]
-pub struct ClosureDecl {
- pub lifetimes: Vec<Lifetime>,
- pub decl: FnDecl,
- pub onceness: ast::Onceness,
- pub unsafety: ast::Unsafety,
- pub bounds: Vec<TyParamBound>,
-}
-
-impl Clean<ClosureDecl> for ast::ClosureTy {
- fn clean(&self, cx: &DocContext) -> ClosureDecl {
- ClosureDecl {
- lifetimes: self.lifetimes.clean(cx),
- decl: self.decl.clean(cx),
- onceness: self.onceness,
- unsafety: self.unsafety,
- bounds: self.bounds.clean(cx)
- }
- }
-}
-
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show)]
pub struct FnDecl {
pub inputs: Arguments,
Generic(String),
/// Primitives are just the fixed-size numeric types (plus int/uint/float), and char.
Primitive(PrimitiveType),
- Closure(Box<ClosureDecl>),
- Proc(Box<ClosureDecl>),
/// extern "ABI" fn
BareFunction(Box<BareFunctionDecl>),
Tuple(Vec<Type>),
_ => TypeEnum,
};
let path = external_path(cx, fqn.last().unwrap().to_string().as_slice(),
- None, substs);
+ None, vec![], substs);
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
ResolvedPath {
path: path,
let did = principal.def_id();
let fqn = csearch::get_item_path(cx.tcx(), did);
let fqn: Vec<_> = fqn.into_iter().map(|i| i.to_string()).collect();
+ let (typarams, bindings) = bounds.clean(cx);
let path = external_path(cx, fqn.last().unwrap().to_string().as_slice(),
- Some(did), principal.substs());
+ Some(did), bindings, principal.substs());
cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, TypeTrait));
ResolvedPath {
path: path,
- typarams: Some(bounds.clean(cx)),
+ typarams: Some(typarams),
did: did,
}
}
impl Clean<Type> for ast::QPath {
fn clean(&self, cx: &DocContext) -> Type {
Type::QPath {
- name: self.item_name.clean(cx),
+ name: self.item_path.identifier.clean(cx),
self_type: box self.self_type.clean(cx),
trait_: box self.trait_ref.clean(cx)
}
try!(write!(f, "{}", lifetime));
}
}
- &clean::WherePredicate::EqPredicate => {
- unimplemented!()
+ &clean::WherePredicate::EqPredicate { ref lhs, ref rhs } => {
+ try!(write!(f, "{} == {}", lhs, rhs));
}
}
}
match rel_root {
Some(root) => {
let mut root = String::from_str(root.as_slice());
- for seg in path.segments[0..amt].iter() {
+ for seg in path.segments[..amt].iter() {
if "super" == seg.name ||
"self" == seg.name {
try!(write!(w, "{}::", seg.name));
}
}
None => {
- for seg in path.segments[0..amt].iter() {
+ for seg in path.segments[..amt].iter() {
try!(write!(w, "{}::", seg.name));
}
}
}
clean::Infer => write!(f, "_"),
clean::Primitive(prim) => primitive_link(f, prim, prim.to_string()),
- clean::Closure(ref decl) => {
- write!(f, "{style}{lifetimes}|{args}|{bounds}{arrow}",
- style = UnsafetySpace(decl.unsafety),
- lifetimes = if decl.lifetimes.len() == 0 {
- "".to_string()
- } else {
- format!("for <{}>",
- CommaSep(decl.lifetimes.as_slice()))
- },
- args = decl.decl.inputs,
- arrow = decl.decl.output,
- bounds = {
- let mut ret = String::new();
- for bound in decl.bounds.iter() {
- match *bound {
- clean::RegionBound(..) => {}
- clean::TraitBound(ref t, modifier) => {
- if ret.len() == 0 {
- ret.push_str(": ");
- } else {
- ret.push_str(" + ");
- }
- if modifier == ast::TraitBoundModifier::Maybe {
- ret.push_str("?");
- }
- ret.push_str(format!("{}",
- *t).as_slice());
- }
- }
- }
- ret
- })
- }
- clean::Proc(ref decl) => {
- write!(f, "{style}{lifetimes}proc({args}){bounds}{arrow}",
- style = UnsafetySpace(decl.unsafety),
- lifetimes = if decl.lifetimes.len() == 0 {
- "".to_string()
- } else {
- format!("for <{}>",
- CommaSep(decl.lifetimes.as_slice()))
- },
- args = decl.decl.inputs,
- bounds = if decl.bounds.len() == 0 {
- "".to_string()
- } else {
- let m = decl.bounds
- .iter()
- .map(|s| s.to_string());
- format!(
- ": {}",
- m.collect::<Vec<String>>().connect(" + "))
- },
- arrow = decl.decl.output)
- }
clean::BareFunction(ref decl) => {
write!(f, "{}{}fn{}{}",
UnsafetySpace(decl.unsafety),
use libc;
use std::ascii::AsciiExt;
use std::ffi::CString;
-use std::cell::{RefCell, Cell};
+use std::cell::RefCell;
use std::collections::HashMap;
use std::fmt;
use std::slice;
thread_local!(static USED_HEADER_MAP: RefCell<HashMap<String, uint>> = {
RefCell::new(HashMap::new())
});
-thread_local!(static TEST_IDX: Cell<uint> = Cell::new(0));
thread_local!(pub static PLAYGROUND_KRATE: RefCell<Option<Option<String>>> = {
RefCell::new(None)
if rendered { return }
PLAYGROUND_KRATE.with(|krate| {
let mut s = String::new();
- let id = krate.borrow().as_ref().map(|krate| {
- let idx = TEST_IDX.with(|slot| {
- let i = slot.get();
- slot.set(i + 1);
- i
- });
-
+ krate.borrow().as_ref().map(|krate| {
let test = origtext.lines().map(|l| {
stripped_filtered_line(l).unwrap_or(l)
}).collect::<Vec<&str>>().connect("\n");
let krate = krate.as_ref().map(|s| s.as_slice());
let test = test::maketest(test.as_slice(), krate, false, false);
- s.push_str(format!("<span id='rust-example-raw-{}' \
- class='rusttest'>{}</span>",
- idx, Escape(test.as_slice())).as_slice());
- format!("rust-example-rendered-{}", idx)
+ s.push_str(format!("<span class='rusttest'>{}</span>",
+ Escape(test.as_slice())).as_slice());
});
- let id = id.as_ref().map(|a| a.as_slice());
- s.push_str(highlight::highlight(text.as_slice(), None, id)
- .as_slice());
+ s.push_str(highlight::highlight(text.as_slice(),
+ None,
+ Some("rust-example-rendered"))
+ .as_slice());
let output = CString::from_vec(s.into_bytes());
hoedown_buffer_puts(ob, output.as_ptr());
})
/// previous state (if any).
pub fn reset_headers() {
USED_HEADER_MAP.with(|s| s.borrow_mut().clear());
- TEST_IDX.with(|s| s.set(0));
}
impl<'a> fmt::String for Markdown<'a> {
(function() {
if (window.playgroundUrl) {
$('pre.rust').hover(function() {
- if (!$(this).attr('id')) { return; }
- var id = '#' + $(this).attr('id').replace('rendered', 'raw');
var a = $('<a>').text('⇱').attr('class', 'test-arrow');
- var code = $(id).text();
+ var code = $(this).siblings(".rusttest").text();
a.attr('href', window.playgroundUrl + '?code=' +
encodeURIComponent(code));
a.attr('target', '_blank');
fn escape_char(writer: &mut fmt::Writer, v: char) -> fmt::Result {
let mut buf = [0; 4];
let n = v.encode_utf8(&mut buf).unwrap();
- let buf = unsafe { str::from_utf8_unchecked(&buf[0..n]) };
+ let buf = unsafe { str::from_utf8_unchecked(&buf[..n]) };
escape_str(writer, buf)
}
}
if n > 0 {
- wr.write_str(&BUF[0..n])
+ wr.write_str(&BUF[..n])
} else {
Ok(())
}
use cmp;
use fmt;
use io::{Reader, Writer, Stream, Buffer, DEFAULT_BUF_SIZE, IoResult};
-use iter::{IteratorExt, ExactSizeIterator};
+use iter::{IteratorExt, ExactSizeIterator, repeat};
use ops::Drop;
use option::Option;
use option::Option::{Some, None};
impl<R: Reader> BufferedReader<R> {
/// Creates a new `BufferedReader` with the specified buffer capacity
pub fn with_capacity(cap: uint, inner: R) -> BufferedReader<R> {
- // It's *much* faster to create an uninitialized buffer than it is to
- // fill everything in with 0. This buffer is entirely an implementation
- // detail and is never exposed, so we're safe to not initialize
- // everything up-front. This allows creation of BufferedReader instances
- // to be very cheap (large mallocs are not nearly as expensive as large
- // callocs).
- let mut buf = Vec::with_capacity(cap);
- unsafe { buf.set_len(cap); }
BufferedReader {
inner: inner,
- buf: buf,
+ // We can't use the same trick here as we do for BufferedWriter,
+ // since this memory is visible to the inner Reader.
+ buf: repeat(0).take(cap).collect(),
pos: 0,
cap: 0,
}
let nread = {
let available = try!(self.fill_buf());
let nread = cmp::min(available.len(), buf.len());
- slice::bytes::copy_memory(buf, &available[0..nread]);
+ slice::bytes::copy_memory(buf, &available[..nread]);
nread
};
self.pos += nread;
impl<W: Writer> BufferedWriter<W> {
/// Creates a new `BufferedWriter` with the specified buffer capacity
pub fn with_capacity(cap: uint, inner: W) -> BufferedWriter<W> {
- // See comments in BufferedReader for why this uses unsafe code.
+ // It's *much* faster to create an uninitialized buffer than it is to
+ // fill everything in with 0. This buffer is entirely an implementation
+ // detail and is never exposed, so we're safe to not initialize
+ // everything up-front. This allows creation of BufferedWriter instances
+ // to be very cheap (large mallocs are not nearly as expensive as large
+ // callocs).
let mut buf = Vec::with_capacity(cap);
unsafe { buf.set_len(cap); }
BufferedWriter {
fn flush_buf(&mut self) -> IoResult<()> {
if self.pos != 0 {
- let ret = self.inner.as_mut().unwrap().write(&self.buf[0..self.pos]);
+ let ret = self.inner.as_mut().unwrap().write(&self.buf[..self.pos]);
self.pos = 0;
ret
} else {
fn write(&mut self, buf: &[u8]) -> IoResult<()> {
match buf.iter().rposition(|&b| b == b'\n') {
Some(i) => {
- try!(self.inner.write(&buf[0..(i + 1)]));
+ try!(self.inner.write(&buf[..(i + 1)]));
try!(self.inner.flush());
try!(self.inner.write(&buf[(i + 1)..]));
Ok(())
Some(src) => {
let dst = buf.slice_from_mut(num_read);
let count = cmp::min(src.len(), dst.len());
- bytes::copy_memory(dst, &src[0..count]);
+ bytes::copy_memory(dst, &src[..count]);
count
},
None => 0,
let mut read_buf = [0; 1028];
let read_str = match check!(read_stream.read(&mut read_buf)) {
-1|0 => panic!("shouldn't happen"),
- n => str::from_utf8(&read_buf[0..n]).unwrap().to_string()
+ n => str::from_utf8(&read_buf[..n]).unwrap().to_string()
};
assert_eq!(read_str.as_slice(), message);
}
let write_len = min(buf.len(), self.len());
{
- let input = &self[0..write_len];
+ let input = &self[..write_len];
let output = buf.slice_to_mut(write_len);
slice::bytes::copy_memory(output, input);
}
Ok(())
} else {
- slice::bytes::copy_memory(dst, &src[0..dst_len]);
+ slice::bytes::copy_memory(dst, &src[..dst_len]);
self.pos += dst_len;
assert_eq!(buf, b);
assert_eq!(reader.read(&mut buf), Ok(3));
let b: &[_] = &[5, 6, 7];
- assert_eq!(&buf[0..3], b);
+ assert_eq!(&buf[..3], b);
assert!(reader.read(&mut buf).is_err());
let mut reader = MemReader::new(vec!(0, 1, 2, 3, 4, 5, 6, 7));
assert_eq!(reader.read_until(3).unwrap(), vec!(0, 1, 2, 3));
assert_eq!(buf.as_slice(), b);
assert_eq!(reader.read(&mut buf), Ok(3));
let b: &[_] = &[5, 6, 7];
- assert_eq!(&buf[0..3], b);
+ assert_eq!(&buf[..3], b);
assert!(reader.read(&mut buf).is_err());
let mut reader = &mut in_buf.as_slice();
assert_eq!(reader.read_until(3).unwrap(), vec!(0, 1, 2, 3));
assert_eq!(buf, b);
assert_eq!(reader.read(&mut buf), Ok(3));
let b: &[_] = &[5, 6, 7];
- assert_eq!(&buf[0..3], b);
+ assert_eq!(&buf[..3], b);
assert!(reader.read(&mut buf).is_err());
let mut reader = BufReader::new(in_buf.as_slice());
assert_eq!(reader.read_until(3).unwrap(), vec!(0, 1, 2, 3));
fn write_char(&mut self, c: char) -> IoResult<()> {
let mut buf = [0u8; 4];
let n = c.encode_utf8(buf.as_mut_slice()).unwrap_or(0);
- self.write(&buf[0..n])
+ self.write(&buf[..n])
}
/// Write the result of passing n through `int::to_str_bytes`.
};
match available.iter().position(|&b| b == byte) {
Some(i) => {
- res.push_all(&available[0..(i + 1)]);
+ res.push_all(&available[..(i + 1)]);
used = i + 1;
break
}
}
}
}
- match str::from_utf8(&buf[0..width]).ok() {
+ match str::from_utf8(&buf[..width]).ok() {
Some(s) => Ok(s.char_at(0)),
None => Err(standard_error(InvalidInput))
}
let mut tail = [0u16; 8];
let (tail_size, _) = read_groups(self, &mut tail, 8 - head_size);
- Some(ipv6_addr_from_head_tail(&head[0..head_size], &tail[0..tail_size]))
+ Some(ipv6_addr_from_head_tail(&head[..head_size], &tail[..tail_size]))
}
fn read_ipv6_addr(&mut self) -> Option<IpAddr> {
impl<R: Buffer> Buffer for LimitReader<R> {
fn fill_buf<'a>(&'a mut self) -> io::IoResult<&'a [u8]> {
let amt = try!(self.inner.fill_buf());
- let buf = &amt[0..cmp::min(amt.len(), self.limit)];
+ let buf = &amt[..cmp::min(amt.len(), self.limit)];
if buf.len() == 0 {
Err(io::standard_error(io::EndOfFile))
} else {
impl<R: Reader, W: Writer> Reader for TeeReader<R, W> {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
self.reader.read(buf).and_then(|len| {
- self.writer.write(&mut buf[0..len]).map(|()| len)
+ self.writer.write(&mut buf[..len]).map(|()| len)
})
}
}
Err(ref e) if e.kind == io::EndOfFile => return Ok(()),
Err(e) => return Err(e),
};
- try!(w.write(&buf[0..len]));
+ try!(w.write(&buf[..len]));
}
}
pub const ARCH: &'static str = "mipsel";
}
+#[cfg(target_arch = "powerpc")]
+mod arch_consts {
+ pub const ARCH: &'static str = "powerpc";
+}
+
#[cfg(test)]
mod tests {
use prelude::v1::*;
match name.rposition_elem(&dot) {
None | Some(0) => name,
Some(1) if name == b".." => name,
- Some(pos) => &name[0..pos]
+ Some(pos) => &name[..pos]
}
})
}
let extlen = extension.container_as_bytes().len();
match (name.rposition_elem(&dot), extlen) {
(None, 0) | (Some(0), 0) => None,
- (Some(idx), 0) => Some(name[0..idx].to_vec()),
+ (Some(idx), 0) => Some(name[..idx].to_vec()),
(idx, extlen) => {
let idx = match idx {
None | Some(0) => name.len(),
let mut v;
v = Vec::with_capacity(idx + extlen + 1);
- v.push_all(&name[0..idx]);
+ v.push_all(&name[..idx]);
v.push(dot);
v.push_all(extension.container_as_bytes());
Some(v)
}
Some(idx) => {
let mut v = Vec::with_capacity(idx + 1 + filename.len());
- v.push_all(&self.repr[0..(idx+1)]);
+ v.push_all(&self.repr[..(idx+1)]);
v.push_all(filename);
// FIXME: this is slow
self.repr = Path::normalize(v.as_slice());
match self.sepidx {
None if b".." == self.repr => self.repr.as_slice(),
None => dot_static,
- Some(0) => &self.repr[0..1],
+ Some(0) => &self.repr[..1],
Some(idx) if &self.repr[(idx+1)..] == b".." => self.repr.as_slice(),
- Some(idx) => &self.repr[0..idx]
+ Some(idx) => &self.repr[..idx]
}
}
}
Some((_,idxa,end)) if &self.repr[idxa..end] == ".." => {
let mut s = String::with_capacity(end + 1 + filename.len());
- s.push_str(&self.repr[0..end]);
+ s.push_str(&self.repr[..end]);
s.push(SEP);
s.push_str(filename);
self.update_normalized(&s[]);
}
Some((idxb,idxa,_)) if self.prefix == Some(DiskPrefix) && idxa == self.prefix_len() => {
let mut s = String::with_capacity(idxb + filename.len());
- s.push_str(&self.repr[0..idxb]);
+ s.push_str(&self.repr[..idxb]);
s.push_str(filename);
self.update_normalized(&s[]);
}
Some((idxb,_,_)) => {
let mut s = String::with_capacity(idxb + 1 + filename.len());
- s.push_str(&self.repr[0..idxb]);
+ s.push_str(&self.repr[..idxb]);
s.push(SEP);
s.push_str(filename);
self.update_normalized(&s[]);
Some((idxb,_,end)) if &self.repr[idxb..end] == "\\" => {
&self.repr[]
}
- Some((0,idxa,_)) => &self.repr[0..idxa],
+ Some((0,idxa,_)) => &self.repr[..idxa],
Some((idxb,idxa,_)) => {
match self.prefix {
Some(DiskPrefix) | Some(VerbatimDiskPrefix) if idxb == self.prefix_len() => {
- &self.repr[0..idxa]
+ &self.repr[..idxa]
}
- _ => &self.repr[0..idxb]
+ _ => &self.repr[..idxb]
}
}
})
if self.prefix.is_some() {
Some(Path::new(match self.prefix {
Some(DiskPrefix) if self.is_absolute() => {
- &self.repr[0..(self.prefix_len()+1)]
+ &self.repr[..(self.prefix_len()+1)]
}
Some(VerbatimDiskPrefix) => {
- &self.repr[0..(self.prefix_len()+1)]
+ &self.repr[..(self.prefix_len()+1)]
}
- _ => &self.repr[0..self.prefix_len()]
+ _ => &self.repr[..self.prefix_len()]
}))
} else if is_vol_relative(self) {
- Some(Path::new(&self.repr[0..1]))
+ Some(Path::new(&self.repr[..1]))
} else {
None
}
}
(None, None) => true,
(a, b) if a == b => {
- &s_repr[0..self.prefix_len()] == &o_repr[0..other.prefix_len()]
+ &s_repr[..self.prefix_len()] == &o_repr[..other.prefix_len()]
}
_ => false
}
match prefix.unwrap() {
DiskPrefix => {
let len = prefix_len(prefix) + is_abs as uint;
- let mut s = String::from_str(&s[0..len]);
+ let mut s = String::from_str(&s[..len]);
unsafe {
let v = s.as_mut_vec();
v[0] = (*v)[0].to_ascii_uppercase();
}
VerbatimDiskPrefix => {
let len = prefix_len(prefix) + is_abs as uint;
- let mut s = String::from_str(&s[0..len]);
+ let mut s = String::from_str(&s[..len]);
unsafe {
let v = s.as_mut_vec();
v[4] = (*v)[4].to_ascii_uppercase();
_ => {
let plen = prefix_len(prefix);
if s.len() > plen {
- Some(String::from_str(&s[0..plen]))
+ Some(String::from_str(&s[..plen]))
} else { None }
}
}
} else if is_abs && comps.is_empty() {
Some(repeat(SEP).take(1).collect())
} else {
- let prefix_ = &s[0..prefix_len(prefix)];
+ let prefix_ = &s[..prefix_len(prefix)];
let n = prefix_.len() +
if is_abs { comps.len() } else { comps.len() - 1} +
comps.iter().map(|v| v.len()).sum();
s.push(':');
}
Some(VerbatimDiskPrefix) => {
- s.push_str(&prefix_[0..4]);
+ s.push_str(&prefix_[..4]);
s.push(prefix_.as_bytes()[4].to_ascii_uppercase() as char);
s.push_str(&prefix_[5..]);
}
fn update_sepidx(&mut self) {
let s = if self.has_nonsemantic_trailing_slash() {
- &self.repr[0..(self.repr.len()-1)]
+ &self.repr[..(self.repr.len()-1)]
} else { &self.repr[] };
let sep_test: fn(char) -> bool = if !prefix_is_verbatim(self.prefix) {
is_sep
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
- target_arch = "aarch64")))]
+ target_arch = "aarch64",
+ target_arch = "powerpc")))]
fn getrandom(buf: &mut [u8]) -> libc::c_long {
extern "C" {
fn syscall(number: libc::c_long, ...) -> libc::c_long;
const NR_GETRANDOM: libc::c_long = 355;
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
const NR_GETRANDOM: libc::c_long = 384;
+ #[cfg(target_arch = "powerpc")]
+ const NR_GETRANDOM: libc::c_long = 384;
unsafe {
syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), 0u)
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
- target_arch = "aarch64"))))]
+ target_arch = "aarch64",
+ target_arch = "powerpc"))))]
fn getrandom(_buf: &mut [u8]) -> libc::c_long { -1 }
fn getrandom_fill_bytes(v: &mut [u8]) {
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
- target_arch = "aarch64")))]
+ target_arch = "aarch64",
+ target_arch = "powerpc")))]
fn is_getrandom_available() -> bool {
use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering};
any(target_arch = "x86_64",
target_arch = "x86",
target_arch = "arm",
- target_arch = "aarch64"))))]
+ target_arch = "aarch64",
+ target_arch = "powerpc"))))]
fn is_getrandom_available() -> bool { false }
/// A random number generator that retrieves randomness straight from
#[cfg(any(target_arch = "mips", target_arch = "mipsel"))]
pub const unwinder_private_data_size: uint = 2;
+#[cfg(target_arch = "powerpc")]
+pub const unwinder_private_data_size: uint = 2;
+
#[repr(C)]
pub struct _Unwind_Exception {
pub exception_class: _Unwind_Exception_Class,
// MAX_CALLBACKS, so we're sure to clamp it as necessary.
let callbacks = {
let amt = CALLBACK_CNT.load(Ordering::SeqCst);
- &CALLBACKS[0..cmp::min(amt, MAX_CALLBACKS)]
+ &CALLBACKS[..cmp::min(amt, MAX_CALLBACKS)]
};
for cb in callbacks.iter() {
match cb.load(Ordering::SeqCst) {
impl<'a> fmt::Writer for BufWriter<'a> {
fn write_str(&mut self, bytes: &str) -> fmt::Result {
let left = self.buf.slice_from_mut(self.pos);
- let to_write = &bytes.as_bytes()[0..cmp::min(bytes.len(), left.len())];
+ let to_write = &bytes.as_bytes()[..cmp::min(bytes.len(), left.len())];
slice::bytes::copy_memory(left, to_write);
self.pos += to_write.len();
Ok(())
let mut msg = [0u8; 512];
let mut w = BufWriter { buf: &mut msg, pos: 0 };
let _ = write!(&mut w, "{}", args);
- let msg = str::from_utf8(&w.buf[0..w.pos]).unwrap_or("aborted");
+ let msg = str::from_utf8(&w.buf[..w.pos]).unwrap_or("aborted");
let msg = if msg.is_empty() {"aborted"} else {msg};
rterrln!("fatal runtime error: {}", msg);
unsafe { intrinsics::abort(); }
unsafe fn target_record_sp_limit(_: uint) {
}
+ // powerpc - FIXME(POWERPC): missing...
+ #[cfg(target_arch = "powerpc")]
+ unsafe fn target_record_sp_limit(_: uint) {
+ }
+
+
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
1024
}
+ // powepc - FIXME(POWERPC): missing...
+ #[cfg(target_arch = "powerpc")]
+ unsafe fn target_get_sp_limit() -> uint {
+ 1024
+ }
+
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
target_os = "android"))]
pub const FIONBIO: libc::c_ulong = 0x5421;
#[cfg(all(target_os = "linux",
- any(target_arch = "mips", target_arch = "mipsel")))]
+ any(target_arch = "mips",
+ target_arch = "mipsel",
+ target_arch = "powerpc")))]
pub const FIONBIO: libc::c_ulong = 0x667e;
#[cfg(any(target_os = "macos",
target_os = "android"))]
pub const FIOCLEX: libc::c_ulong = 0x5451;
#[cfg(all(target_os = "linux",
- any(target_arch = "mips", target_arch = "mipsel")))]
+ any(target_arch = "mips",
+ target_arch = "mipsel",
+ target_arch = "powerpc")))]
pub const FIOCLEX: libc::c_ulong = 0x6601;
#[cfg(any(target_os = "macos",
}
#[cfg(all(target_os = "linux",
- any(target_arch = "mips", target_arch = "mipsel")))]
+ any(target_arch = "mips",
+ target_arch = "mipsel",
+ target_arch = "powerpc")))]
mod signal {
use libc;
all(target_os = "linux", target_arch = "aarch64"),
all(target_os = "linux", target_arch = "mips"), // may not match
all(target_os = "linux", target_arch = "mipsel"), // may not match
+ all(target_os = "linux", target_arch = "powerpc"), // may not match
target_os = "android"))] // may not match
mod signal {
use libc;
#[cfg(any(target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
- target_arch = "mipsel"))]
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
const __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8;
#[cfg(target_arch = "aarch64")]
const __SIZEOF_PTHREAD_MUTEX_T: uint = 48 - 8;
target_arch = "arm",
target_arch = "aarch64",
target_arch = "mips",
- target_arch = "mipsel"))]
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
const __SIZEOF_PTHREAD_COND_T: uint = 48 - 8;
#[cfg(any(target_arch = "x86_64",
#[cfg(any(target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
- target_arch = "mipsel"))]
+ target_arch = "mipsel",
+ target_arch = "powerpc"))]
const __SIZEOF_PTHREAD_RWLOCK_T: uint = 32 - 8;
#[repr(C)]
#[inline]
pub unsafe fn wait(&self, mutex: &Mutex) {
- let r = ffi::SleepConditionVariableCS(self.inner.get(),
- mutex::raw(mutex),
- libc::INFINITE);
+ let r = ffi::SleepConditionVariableSRW(self.inner.get(),
+ mutex::raw(mutex),
+ libc::INFINITE,
+ 0);
debug_assert!(r != 0);
}
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- let r = ffi::SleepConditionVariableCS(self.inner.get(),
- mutex::raw(mutex),
- dur.num_milliseconds() as DWORD);
+ let r = ffi::SleepConditionVariableSRW(self.inner.get(),
+ mutex::raw(mutex),
+ dur.num_milliseconds() as DWORD,
+ 0);
if r == 0 {
const ERROR_TIMEOUT: DWORD = 0x5B4;
debug_assert_eq!(os::errno() as uint, ERROR_TIMEOUT as uint);
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use prelude::v1::*;
-
-use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
-use alloc::{self, heap};
-
-use libc::DWORD;
+use marker::Sync;
+use cell::UnsafeCell;
use sys::sync as ffi;
-const SPIN_COUNT: DWORD = 4000;
+pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> }
-pub struct Mutex { inner: AtomicUsize }
-
-pub const MUTEX_INIT: Mutex = Mutex { inner: ATOMIC_USIZE_INIT };
+pub const MUTEX_INIT: Mutex = Mutex {
+ inner: UnsafeCell { value: ffi::SRWLOCK_INIT }
+};
unsafe impl Sync for Mutex {}
#[inline]
-pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION {
- m.get()
+pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK {
+ m.inner.get()
}
+// So you might be asking why we're using SRWLock instead of CriticalSection?
+//
+// 1. SRWLock is several times faster than CriticalSection according to benchmarks performed on both
+// Windows 8 and Windows 7.
+//
+// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix implementation
+// deadlocks so consistency is preferred. See #19962 for more details.
+//
+// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy is there there are
+// no guarantees of fairness.
+
impl Mutex {
#[inline]
- pub unsafe fn new() -> Mutex {
- Mutex { inner: AtomicUsize::new(init_lock() as uint) }
- }
+ pub unsafe fn new() -> Mutex { MUTEX_INIT }
#[inline]
pub unsafe fn lock(&self) {
- ffi::EnterCriticalSection(self.get())
+ ffi::AcquireSRWLockExclusive(self.inner.get())
}
#[inline]
pub unsafe fn try_lock(&self) -> bool {
- ffi::TryEnterCriticalSection(self.get()) != 0
+ ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0
}
#[inline]
pub unsafe fn unlock(&self) {
- ffi::LeaveCriticalSection(self.get())
+ ffi::ReleaseSRWLockExclusive(self.inner.get())
}
+ #[inline]
pub unsafe fn destroy(&self) {
- let lock = self.inner.swap(0, Ordering::SeqCst);
- if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) }
- }
-
- unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION {
- match self.inner.load(Ordering::SeqCst) {
- 0 => {}
- n => return n as ffi::LPCRITICAL_SECTION
- }
- let lock = init_lock();
- match self.inner.compare_and_swap(0, lock as uint, Ordering::SeqCst) {
- 0 => return lock as ffi::LPCRITICAL_SECTION,
- _ => {}
- }
- free_lock(lock);
- return self.inner.load(Ordering::SeqCst) as ffi::LPCRITICAL_SECTION;
+ // ...
}
}
-
-unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION {
- let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8)
- as ffi::LPCRITICAL_SECTION;
- if block.is_null() { alloc::oom() }
- ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT);
- return block;
-}
-
-unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) {
- ffi::DeleteCriticalSection(h);
- heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8);
-}
pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
match v.iter().position(|c| *c == 0) {
// don't include the 0
- Some(i) => &v[0..i],
+ Some(i) => &v[..i],
None => v
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use libc::{BOOL, DWORD, c_void, LPVOID};
+use libc::{BOOL, DWORD, c_void, LPVOID, c_ulong};
use libc::types::os::arch::extra::BOOLEAN;
-pub type LPCRITICAL_SECTION = *mut c_void;
-pub type LPCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
-pub type LPSRWLOCK = *mut SRWLOCK;
-
-#[cfg(target_arch = "x86")]
-pub const CRITICAL_SECTION_SIZE: uint = 24;
-#[cfg(target_arch = "x86_64")]
-pub const CRITICAL_SECTION_SIZE: uint = 40;
+pub type PCONDITION_VARIABLE = *mut CONDITION_VARIABLE;
+pub type PSRWLOCK = *mut SRWLOCK;
+pub type ULONG = c_ulong;
#[repr(C)]
pub struct CONDITION_VARIABLE { pub ptr: LPVOID }
pub const SRWLOCK_INIT: SRWLOCK = SRWLOCK { ptr: 0 as *mut _ };
extern "system" {
- // critical sections
- pub fn InitializeCriticalSectionAndSpinCount(
- lpCriticalSection: LPCRITICAL_SECTION,
- dwSpinCount: DWORD) -> BOOL;
- pub fn DeleteCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
- pub fn EnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
- pub fn LeaveCriticalSection(lpCriticalSection: LPCRITICAL_SECTION);
- pub fn TryEnterCriticalSection(lpCriticalSection: LPCRITICAL_SECTION) -> BOOL;
-
// condition variables
- pub fn SleepConditionVariableCS(ConditionVariable: LPCONDITION_VARIABLE,
- CriticalSection: LPCRITICAL_SECTION,
- dwMilliseconds: DWORD) -> BOOL;
- pub fn WakeConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
- pub fn WakeAllConditionVariable(ConditionVariable: LPCONDITION_VARIABLE);
+ pub fn SleepConditionVariableSRW(ConditionVariable: PCONDITION_VARIABLE,
+ SRWLock: PSRWLOCK,
+ dwMilliseconds: DWORD,
+ Flags: ULONG) -> BOOL;
+ pub fn WakeConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
+ pub fn WakeAllConditionVariable(ConditionVariable: PCONDITION_VARIABLE);
// slim rwlocks
- pub fn AcquireSRWLockExclusive(SRWLock: LPSRWLOCK);
- pub fn AcquireSRWLockShared(SRWLock: LPSRWLOCK);
- pub fn ReleaseSRWLockExclusive(SRWLock: LPSRWLOCK);
- pub fn ReleaseSRWLockShared(SRWLock: LPSRWLOCK);
- pub fn TryAcquireSRWLockExclusive(SRWLock: LPSRWLOCK) -> BOOLEAN;
- pub fn TryAcquireSRWLockShared(SRWLock: LPSRWLOCK) -> BOOLEAN;
+ pub fn AcquireSRWLockExclusive(SRWLock: PSRWLOCK);
+ pub fn AcquireSRWLockShared(SRWLock: PSRWLOCK);
+ pub fn ReleaseSRWLockExclusive(SRWLock: PSRWLOCK);
+ pub fn ReleaseSRWLockShared(SRWLock: PSRWLOCK);
+ pub fn TryAcquireSRWLockExclusive(SRWLock: PSRWLOCK) -> BOOLEAN;
+ pub fn TryAcquireSRWLockShared(SRWLock: PSRWLOCK) -> BOOLEAN;
}
-
/// Variable reference, possibly containing `::` and/or
/// type parameters, e.g. foo::bar::<baz>
ExprPath(Path),
+ /// A "qualified path", e.g. `<Vec<T> as SomeTrait>::SomeType`
+ ExprQPath(P<QPath>),
ExprAddrOf(Mutability, P<Expr>),
ExprBreak(Option<Ident>),
///
/// <Vec<T> as SomeTrait>::SomeAssociatedItem
/// ^~~~~ ^~~~~~~~~ ^~~~~~~~~~~~~~~~~~
-/// self_type trait_name item_name
+/// self_type trait_name item_path
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
pub struct QPath {
pub self_type: P<Ty>,
pub trait_ref: P<TraitRef>,
- pub item_name: Ident, // FIXME(#20301) -- should use Name
+ pub item_path: PathSegment,
}
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
pub span: Span,
}
+impl ViewItem {
+ pub fn id(&self) -> NodeId {
+ match self.node {
+ ViewItemExternCrate(_, _, id) => id,
+ ViewItemUse(ref vp) => match vp.node {
+ ViewPathSimple(_, _, id) => id,
+ ViewPathGlob(_, id) => id,
+ ViewPathList(_, _, id) => id,
+ }
+ }
+ }
+}
+
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
pub enum ViewItem_ {
/// Ident: name used to refer to this crate in the code
#[derive(Copy, Show)]
pub enum Node<'ast> {
NodeItem(&'ast Item),
+ NodeViewItem(&'ast ViewItem),
NodeForeignItem(&'ast ForeignItem),
NodeTraitItem(&'ast TraitItem),
NodeImplItem(&'ast ImplItem),
/// All the node types, with a parent ID.
EntryItem(NodeId, &'ast Item),
+ EntryViewItem(NodeId, &'ast ViewItem),
EntryForeignItem(NodeId, &'ast ForeignItem),
EntryTraitItem(NodeId, &'ast TraitItem),
EntryImplItem(NodeId, &'ast ImplItem),
fn from_node(p: NodeId, node: Node<'ast>) -> MapEntry<'ast> {
match node {
NodeItem(n) => EntryItem(p, n),
+ NodeViewItem(n) => EntryViewItem(p, n),
NodeForeignItem(n) => EntryForeignItem(p, n),
NodeTraitItem(n) => EntryTraitItem(p, n),
NodeImplItem(n) => EntryImplItem(p, n),
fn parent(self) -> Option<NodeId> {
Some(match self {
EntryItem(id, _) => id,
+ EntryViewItem(id, _) => id,
EntryForeignItem(id, _) => id,
EntryTraitItem(id, _) => id,
EntryImplItem(id, _) => id,
fn to_node(self) -> Option<Node<'ast>> {
Some(match self {
EntryItem(_, n) => NodeItem(n),
+ EntryViewItem(_, n) => NodeViewItem(n),
EntryForeignItem(_, n) => NodeForeignItem(n),
EntryTraitItem(_, n) => NodeTraitItem(n),
EntryImplItem(_, n) => NodeImplItem(n),
}
}
+ pub fn expect_view_item(&self, id: NodeId) -> &'ast ViewItem {
+ match self.find(id) {
+ Some(NodeViewItem(view_item)) => view_item,
+ _ => panic!("expected view item, found {}", self.node_to_string(id))
+ }
+ }
+
pub fn expect_struct(&self, id: NodeId) -> &'ast StructDef {
match self.find(id) {
Some(NodeItem(i)) => {
NodesMatchingSuffix {
map: self,
item_name: parts.last().unwrap(),
- in_which: &parts[0..(parts.len() - 1)],
+ in_which: &parts[..(parts.len() - 1)],
idx: 0,
}
}
pub fn opt_span(&self, id: NodeId) -> Option<Span> {
let sp = match self.find(id) {
Some(NodeItem(item)) => item.span,
+ Some(NodeViewItem(item)) => item.span,
Some(NodeForeignItem(foreign_item)) => foreign_item.span,
Some(NodeTraitItem(trait_method)) => {
match *trait_method {
self.parent = parent;
}
+ fn visit_view_item(&mut self, item: &'ast ViewItem) {
+ self.insert(item.id(), NodeViewItem(item));
+ visit::walk_view_item(self, item);
+ }
+
fn visit_pat(&mut self, pat: &'ast Pat) {
self.insert(pat.id, match pat.node {
// Note: this is at least *potentially* a pattern...
fn print_node(&mut self, node: &Node) -> IoResult<()> {
match *node {
NodeItem(a) => self.print_item(&*a),
+ NodeViewItem(a) => self.print_view_item(&*a),
NodeForeignItem(a) => self.print_foreign_item(&*a),
NodeTraitItem(a) => self.print_trait_method(&*a),
NodeImplItem(a) => self.print_impl_item(&*a),
};
format!("{} {}{}", item_str, path_str, id_str)
}
+ Some(NodeViewItem(item)) => {
+ format!("view item {}{}", pprust::view_item_to_string(&*item), id_str)
+ }
Some(NodeForeignItem(item)) => {
let path_str = map.path_to_str_with_ident(id, item.ident);
format!("foreign item {}{}", path_str, id_str)
let begin = begin.to_uint();
let slice = &self.src[begin..];
match slice.find('\n') {
- Some(e) => &slice[0..e],
+ Some(e) => &slice[..e],
None => slice
}.to_string()
})
// to be miscolored. We assume this is rare enough that we don't
// have to worry about it.
if msg.ends_with("\n") {
- try!(t.write_str(&msg[0..(msg.len()-1)]));
+ try!(t.write_str(&msg[..(msg.len()-1)]));
try!(t.reset());
try!(t.write_str("\n"));
} else {
QPath {
self_type: fld.fold_ty(qpath.self_type),
trait_ref: qpath.trait_ref.map(|tr| fld.fold_trait_ref(tr)),
- item_name: fld.fold_ident(qpath.item_name),
+ item_path: PathSegment {
+ identifier: fld.fold_ident(qpath.item_path.identifier),
+ parameters: fld.fold_path_parameters(qpath.item_path.parameters),
+ }
}
})
}
e2.map(|x| folder.fold_expr(x)))
}
ExprPath(pth) => ExprPath(folder.fold_path(pth)),
+ ExprQPath(qpath) => ExprQPath(folder.fold_qpath(qpath)),
ExprBreak(opt_ident) => ExprBreak(opt_ident.map(|x| folder.fold_ident(x))),
ExprAgain(opt_ident) => ExprAgain(opt_ident.map(|x| folder.fold_ident(x))),
ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))),
use ast::{ExprBreak, ExprCall, ExprCast};
use ast::{ExprField, ExprTupField, ExprClosure, ExprIf, ExprIfLet, ExprIndex};
use ast::{ExprLit, ExprLoop, ExprMac, ExprRange};
-use ast::{ExprMethodCall, ExprParen, ExprPath};
+use ast::{ExprMethodCall, ExprParen, ExprPath, ExprQPath};
use ast::{ExprRepeat, ExprRet, ExprStruct, ExprTup, ExprUnary};
use ast::{ExprVec, ExprWhile, ExprWhileLet, ExprForLoop, Field, FnDecl};
use ast::{FnUnboxedClosureKind, FnMutUnboxedClosureKind};
TyQPath(P(QPath {
self_type: self_type,
trait_ref: P(trait_ref),
- item_name: item_name,
+ item_path: ast::PathSegment {
+ identifier: item_name,
+ parameters: ast::PathParameters::none()
+ }
}))
} else if self.check(&token::ModSep) ||
self.token.is_ident() ||
if !self.eat(&token::ModSep) {
segments.push(ast::PathSegment {
identifier: identifier,
- parameters: ast::AngleBracketedParameters(ast::AngleBracketedParameterData {
- lifetimes: Vec::new(),
- types: OwnedSlice::empty(),
- bindings: OwnedSlice::empty(),
- })
+ parameters: ast::PathParameters::none()
});
return segments;
}
hi = self.last_span.hi;
}
_ => {
+ if self.eat_lt() {
+ // QUALIFIED PATH `<TYPE as TRAIT_REF>::item::<'a, T>`
+ let self_type = self.parse_ty_sum();
+ self.expect_keyword(keywords::As);
+ let trait_ref = self.parse_trait_ref();
+ self.expect(&token::Gt);
+ self.expect(&token::ModSep);
+ let item_name = self.parse_ident();
+ let parameters = if self.eat(&token::ModSep) {
+ self.expect_lt();
+ // Consumed `item::<`, go look for types
+ let (lifetimes, types, bindings) =
+ self.parse_generic_values_after_lt();
+ ast::AngleBracketedParameters(ast::AngleBracketedParameterData {
+ lifetimes: lifetimes,
+ types: OwnedSlice::from_vec(types),
+ bindings: OwnedSlice::from_vec(bindings),
+ })
+ } else {
+ ast::PathParameters::none()
+ };
+ let hi = self.span.hi;
+ return self.mk_expr(lo, hi, ExprQPath(P(QPath {
+ self_type: self_type,
+ trait_ref: P(trait_ref),
+ item_path: ast::PathSegment {
+ identifier: item_name,
+ parameters: parameters
+ }
+ })));
+ }
if self.eat_keyword(keywords::Move) {
return self.parse_lambda_expr(CaptureByValue);
}
try!(self.print_bounds("", &bounds[]));
}
ast::TyQPath(ref qpath) => {
- try!(word(&mut self.s, "<"));
- try!(self.print_type(&*qpath.self_type));
- try!(space(&mut self.s));
- try!(self.word_space("as"));
- try!(self.print_trait_ref(&*qpath.trait_ref));
- try!(word(&mut self.s, ">"));
- try!(word(&mut self.s, "::"));
- try!(self.print_ident(qpath.item_name));
+ try!(self.print_qpath(&**qpath, false))
}
ast::TyFixedLengthVec(ref ty, ref v) => {
try!(word(&mut self.s, "["));
}
}
ast::ExprPath(ref path) => try!(self.print_path(path, true)),
+ ast::ExprQPath(ref qpath) => try!(self.print_qpath(&**qpath, true)),
ast::ExprBreak(opt_ident) => {
try!(word(&mut self.s, "break"));
try!(space(&mut self.s));
Ok(())
}
+ fn print_qpath(&mut self,
+ qpath: &ast::QPath,
+ colons_before_params: bool)
+ -> IoResult<()>
+ {
+ try!(word(&mut self.s, "<"));
+ try!(self.print_type(&*qpath.self_type));
+ try!(space(&mut self.s));
+ try!(self.word_space("as"));
+ try!(self.print_trait_ref(&*qpath.trait_ref));
+ try!(word(&mut self.s, ">"));
+ try!(word(&mut self.s, "::"));
+ try!(self.print_ident(qpath.item_path.identifier));
+ self.print_path_parameters(&qpath.item_path.parameters, colons_before_params)
+ }
+
fn print_path_parameters(&mut self,
parameters: &ast::PathParameters,
colons_before_params: bool)
fn visit_path(&mut self, path: &'v Path, _id: ast::NodeId) {
walk_path(self, path)
}
+ fn visit_qpath(&mut self, qpath_span: Span, qpath: &'v QPath) {
+ walk_qpath(self, qpath_span, qpath)
+ }
fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment) {
walk_path_segment(self, path_span, path_segment)
}
walk_ty_param_bounds_helper(visitor, bounds);
}
TyQPath(ref qpath) => {
- visitor.visit_ty(&*qpath.self_type);
- visitor.visit_trait_ref(&*qpath.trait_ref);
- visitor.visit_ident(typ.span, qpath.item_name);
+ visitor.visit_qpath(typ.span, &**qpath);
}
TyFixedLengthVec(ref ty, ref expression) => {
visitor.visit_ty(&**ty);
}
}
+pub fn walk_qpath<'v, V: Visitor<'v>>(visitor: &mut V,
+ qpath_span: Span,
+ qpath: &'v QPath) {
+ visitor.visit_ty(&*qpath.self_type);
+ visitor.visit_trait_ref(&*qpath.trait_ref);
+ visitor.visit_path_segment(qpath_span, &qpath.item_path);
+}
+
pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V,
path_span: Span,
segment: &'v PathSegment) {
ExprPath(ref path) => {
visitor.visit_path(path, expression.id)
}
+ ExprQPath(ref qpath) => {
+ visitor.visit_qpath(expression.span, &**qpath)
+ }
ExprBreak(_) | ExprAgain(_) => {}
ExprRet(ref optional_expression) => {
walk_expr_opt(visitor, optional_expression)
--- /dev/null
+// Mark stack as non-executable
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack, "", %progbits
+#endif
+
+/* See i386/morestack.S for the lengthy, general explanation. */
+
+.global rust_stack_exhausted
+
+.hidden __morestack
+
+// FIXME(POWERPC): this might not be perfectly right but works for now
+__morestack:
+ .cfi_startproc
+ bl rust_stack_exhausted
+ // the above function ensures that it never returns
+ .cfi_endproc
+.end __morestack
--- /dev/null
+// Mark stack as non-executable
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack, "", %progbits
+#endif
# If this file is modified, then llvm will be forcibly cleaned and then rebuilt.
# The actual contents of this file do not matter, but to trigger a change on the
# build bots then the contents should be changed so git updates the mtime.
-2015-01-05
+2015-01-13
copy_memory(buf.as_mut_slice(), alu);
let buf_len = buf.len();
copy_memory(buf.slice_mut(alu_len, buf_len),
- &alu[0..LINE_LEN]);
+ &alu[..LINE_LEN]);
let mut pos = 0;
let mut bytes;
for i in range(0u, chars_left) {
buf[i] = self.nextc();
}
- self.out.write(&buf[0..chars_left])
+ self.out.write(&buf[..chars_left])
}
}
impl Something for X {
fn yay<T: Str>(_:Option<X>, thing: &[T]) {
-//~^ ERROR in method `yay`, type parameter 0 requires bound `Str`, which is not required
-
+ //~^ ERROR the requirement `T : Str` appears on the impl method
}
}
}
impl A for E {
- fn b<F: Sync, G>(_x: F) -> F { panic!() } //~ ERROR type parameter 0 requires `Sync`
+ fn b<F: Sync, G>(_x: F) -> F { panic!() }
+ //~^ ERROR `F : core::marker::Sync` appears on the impl method
}
fn main() {}
x: &'a mut &'a isize
}
-pub trait Foo<'a> {
+pub trait Foo<'a, 't> {
fn no_bound<'b>(self, b: Inv<'b>);
fn has_bound<'b:'a>(self, b: Inv<'b>);
fn wrong_bound1<'b,'c,'d:'a+'b>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>);
- fn wrong_bound2<'b,'c,'d:'a+'b+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>);
+ fn okay_bound<'b,'c,'d:'a+'b+'c>(self, b: Inv<'b>, c: Inv<'c>, d: Inv<'d>);
+ fn another_bound<'x: 'a>(self, x: Inv<'x>);
}
-impl<'a> Foo<'a> for &'a isize {
+impl<'a, 't> Foo<'a, 't> for &'a isize {
fn no_bound<'b:'a>(self, b: Inv<'b>) {
//~^ ERROR lifetime parameters or bounds on method `no_bound` do not match
}
// cases.
}
- fn wrong_bound2<'b,'c,'e:'b+'c>(self, b: Inv<'b>, c: Inv<'c>, e: Inv<'e>) {
- //~^ ERROR distinct set of bounds from its counterpart
+ fn okay_bound<'b,'c,'e:'b+'c>(self, b: Inv<'b>, c: Inv<'c>, e: Inv<'e>) {
}
+
+ fn another_bound<'x: 't>(self, x: Inv<'x>) {}
}
fn main() { }
impl Foo for isize {
// invalid bound for T, was defined as Eq in trait
fn test_error1_fn<T: Ord>(&self) {}
- //~^ ERROR in method `test_error1_fn`, type parameter 0 requires bound `core::cmp::Ord`
+ //~^ ERROR the requirement `T : core::cmp::Ord` appears on the impl
// invalid bound for T, was defined as Eq + Ord in trait
fn test_error2_fn<T: Eq + B>(&self) {}
- //~^ ERROR in method `test_error2_fn`, type parameter 0 requires bound `B`
+ //~^ ERROR the requirement `T : B` appears on the impl
// invalid bound for T, was defined as Eq + Ord in trait
fn test_error3_fn<T: B + Eq>(&self) {}
- //~^ ERROR in method `test_error3_fn`, type parameter 0 requires bound `B`
+ //~^ ERROR the requirement `T : B` appears on the impl
// multiple bounds, same order as in trait
fn test3_fn<T: Ord + Eq>(&self) {}
// parameters in impls must be equal or more general than in the defining trait
fn test_error5_fn<T: B>(&self) {}
- //~^ ERROR in method `test_error5_fn`, type parameter 0 requires bound `B`
+ //~^ ERROR the requirement `T : B` appears on the impl
// bound `std::cmp::Eq` not enforced by this implementation, but this is OK
fn test6_fn<T: A>(&self) {}
fn test_error7_fn<T: A + Eq>(&self) {}
- //~^ ERROR in method `test_error7_fn`, type parameter 0 requires bound `core::cmp::Eq`
+ //~^ ERROR the requirement `T : core::cmp::Eq` appears on the impl
fn test_error8_fn<T: C>(&self) {}
- //~^ ERROR in method `test_error8_fn`, type parameter 0 requires bound `C`
+ //~^ ERROR the requirement `T : C` appears on the impl
}
impl Trait for usize {
fn method<G: Getter<usize>>() {}
- //~^ ERROR in method `method`, type parameter 0 requires bound `Getter<usize>`
+ //~^ G : Getter<usize>` appears on the impl method but not on the corresponding trait method
}
fn main() {}
-
impl<A, T: Iterator<A>> IteratorUtil<A> for T {
fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> {
- //~^ ERROR in method `zip`, type parameter 1 requires bound `Iterator<B>`
+ //~^ ERROR the requirement `U : Iterator<B>` appears on the impl method
ZipIterator{a: self, b: other}
}
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::borrow::IntoCow;
+
+fn main() {
+ <String as IntoCow>::into_cow("foo".to_string());
+ //~^ ERROR wrong number of type arguments: expected 2, found 0
+}
+
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::ops::Add;
+
+fn main() {
+ <i32 as Add<u32>>::add(1, 2);
+ //~^ ERROR the trait `core::ops::Add<u32>` is not implemented for the type `i32`
+ <i32 as Add<i32>>::add(1u32, 2);
+ //~^ ERROR mismatched types
+ <i32 as Add<i32>>::add(1, 2u32);
+ //~^ ERROR mismatched types
+}
+
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-android: FIXME(#10381)
+// min-lldb-version: 310
+
+// compile-flags:-g
+
+struct Peekable<I> where I: Iterator {
+ _iter: I,
+ _next: Option<<I as Iterator>::Item>,
+}
+
+fn main() {
+ let mut iter = Vec::<i32>::new().into_iter();
+ let next = iter.next();
+ let _v = Peekable {
+ _iter: iter,
+ _next : next,
+ };
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test associated types appearing in struct-like enum variants.
+
+use self::VarValue::*;
+
+pub trait UnifyKey {
+ type Value;
+ fn to_index(&self) -> usize;
+}
+
+pub enum VarValue<K:UnifyKey> {
+ Redirect { to: K },
+ Root { value: K::Value, rank: usize },
+}
+
+fn get<'a,K:UnifyKey<Value=Option<V>>,V>(table: &'a Vec<VarValue<K>>, key: &K) -> &'a Option<V> {
+ match table[key.to_index()] {
+ VarValue::Redirect { to: ref k } => get(table, k),
+ VarValue::Root { value: ref v, rank: _ } => v,
+ }
+}
+
+impl UnifyKey for usize {
+ type Value = Option<char>;
+ fn to_index(&self) -> usize { *self }
+}
+
+fn main() {
+ let table = vec![/* 0 */ Redirect { to: 1 },
+ /* 1 */ Redirect { to: 3 },
+ /* 2 */ Root { value: Some('x'), rank: 0 },
+ /* 3 */ Redirect { to: 2 }];
+ assert_eq!(get(&table, &0), &Some('x'));
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test associated types appearing in tuple-like enum variants.
+
+use self::VarValue::*;
+
+pub trait UnifyKey {
+ type Value;
+ fn to_index(&self) -> usize;
+}
+
+pub enum VarValue<K:UnifyKey> {
+ Redirect(K),
+ Root(K::Value, usize),
+}
+
+fn get<'a,K:UnifyKey<Value=Option<V>>,V>(table: &'a Vec<VarValue<K>>, key: &K) -> &'a Option<V> {
+ match table[key.to_index()] {
+ VarValue::Redirect(ref k) => get(table, k),
+ VarValue::Root(ref v, _) => v,
+ }
+}
+
+impl UnifyKey for usize {
+ type Value = Option<char>;
+ fn to_index(&self) -> usize { *self }
+}
+
+fn main() {
+ let table = vec![/* 0 */ Redirect(1),
+ /* 1 */ Redirect(3),
+ /* 2 */ Root(Some('x'), 0),
+ /* 3 */ Redirect(2)];
+ assert_eq!(get(&table, &0), &Some('x'));
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we correctly normalize the type of a struct field
+// which has an associated type.
+
+pub trait UnifyKey {
+ type Value;
+}
+
+pub struct Node<K:UnifyKey> {
+ pub key: K,
+ pub value: K::Value,
+}
+
+fn foo<K : UnifyKey<Value=Option<V>>,V : Clone>(node: &Node<K>) -> Option<V> {
+ node.value.clone()
+}
+
+impl UnifyKey for i32 {
+ type Value = Option<u32>;
+}
+
+impl UnifyKey for u32 {
+ type Value = Option<i32>;
+}
+
+pub fn main() {
+ let node: Node<i32> = Node { key: 1, value: Some(22) };
+ assert_eq!(foo(&node), Some(22_u32));
+
+ let node: Node<u32> = Node { key: 1, value: Some(22) };
+ assert_eq!(foo(&node), Some(22_i32));
+}
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we correctly normalize the type of a struct field
+// which has an associated type.
+
+pub trait UnifyKey {
+ type Value;
+}
+
+pub struct Node<K:UnifyKey>(K, K::Value);
+
+fn foo<K : UnifyKey<Value=Option<V>>,V : Clone>(node: &Node<K>) -> Option<V> {
+ node.1.clone()
+}
+
+impl UnifyKey for i32 {
+ type Value = Option<u32>;
+}
+
+impl UnifyKey for u32 {
+ type Value = Option<i32>;
+}
+
+pub fn main() {
+ let node: Node<i32> = Node(1, Some(22));
+ assert_eq!(foo(&node), Some(22_u32));
+
+ let node: Node<u32> = Node(1, Some(22));
+ assert_eq!(foo(&node), Some(22_i32));
+}
// ignore-windows FIXME #13259
#![feature(unboxed_closures)]
+#![feature(unsafe_destructor)]
use std::os;
use std::io::process::Command;
-use std::finally::Finally;
use std::str;
+use std::ops::{Drop, FnMut, FnOnce};
#[inline(never)]
fn foo() {
#[inline(never)]
fn double() {
- (|&mut:| {
- panic!("once");
- }).finally(|| {
- panic!("twice");
- })
+ struct Double;
+
+ impl Drop for Double {
+ fn drop(&mut self) { panic!("twice") }
+ }
+
+ let _d = Double;
+
+ panic!("once");
}
fn runtest(me: &str) {
#![feature(macro_rules)]
+use std::borrow::{Cow, IntoCow};
use std::collections::Bitv;
use std::default::Default;
use std::iter::FromIterator;
+use std::ops::Add;
use std::option::IntoIter as OptionIter;
use std::rand::Rand;
use std::rand::XorShiftRng as DummyRng;
fn odd(x: uint) -> bool { x % 2 == 1 }
fn dummy_rng() -> DummyRng { DummyRng::new_unseeded() }
+trait Size: Sized {
+ fn size() -> uint { std::mem::size_of::<Self>() }
+}
+impl<T> Size for T {}
+
macro_rules! tests {
($($expr:expr, $ty:ty, ($($test:expr),*);)+) => (pub fn main() {$({
const C: $ty = $expr;
// , (vec![b'f', b'o', b'o'], u8_as_i8);
// Trait static methods.
- // FIXME qualified path expressions aka UFCS i.e. <T as Trait>::method.
+ <bool as Size>::size, fn() -> uint, ();
Default::default, fn() -> int, ();
+ <int as Default>::default, fn() -> int, ();
Rand::rand, fn(&mut DummyRng) -> int, (&mut dummy_rng());
+ <int as Rand>::rand, fn(&mut DummyRng) -> int, (&mut dummy_rng());
Rand::rand::<DummyRng>, fn(&mut DummyRng) -> int, (&mut dummy_rng());
+ <int as Rand>::rand::<DummyRng>, fn(&mut DummyRng) -> int, (&mut dummy_rng());
// Trait non-static methods.
Clone::clone, fn(&int) -> int, (&5);
+ <int as Clone>::clone, fn(&int) -> int, (&5);
FromIterator::from_iter, fn(OptionIter<int>) -> Vec<int>, (Some(5).into_iter());
- FromIterator::from_iter::<OptionIter<int>>, fn(OptionIter<int>) -> Vec<int>
- , (Some(5).into_iter());
+ <Vec<_> as FromIterator<_>>::from_iter, fn(OptionIter<int>) -> Vec<int>,
+ (Some(5).into_iter());
+ <Vec<int> as FromIterator<_>>::from_iter, fn(OptionIter<int>) -> Vec<int>,
+ (Some(5).into_iter());
+ FromIterator::from_iter::<OptionIter<int>>, fn(OptionIter<int>) -> Vec<int>,
+ (Some(5).into_iter());
+ <Vec<int> as FromIterator<_>>::from_iter::<OptionIter<int>>, fn(OptionIter<int>) -> Vec<int>,
+ (Some(5).into_iter());
+ Add::add, fn(i32, i32) -> i32, (5, 6);
+ <i32 as Add<_>>::add, fn(i32, i32) -> i32, (5, 6);
+ <i32 as Add<i32>>::add, fn(i32, i32) -> i32, (5, 6);
+ <String as IntoCow<_, _>>::into_cow, fn(String) -> Cow<'static, String, str>,
+ ("foo".to_string());
+ <String as IntoCow<'static, _, _>>::into_cow, fn(String) -> Cow<'static, String, str>,
+ ("foo".to_string());
}
let ss: &&[int] = &s;
let sss: &&&[int] = &ss;
- println!("{:?}", &s[0..3]);
+ println!("{:?}", &s[..3]);
println!("{:?}", &ss[3..]);
println!("{:?}", &sss[2..4]);
}
let abc = [1i, 2, 3];
let tf = [true, false];
let x = [(), ()];
- let slice = &x[0..1];
+ let slice = &x[..1];
assert_repr_eq(&abc[], "[1i, 2i, 3i]".to_string());
assert_repr_eq(&tf[], "[true, false]".to_string());
let cmp: &[int] = &[3, 4, 5];
assert!(&x[2..] == cmp);
let cmp: &[int] = &[1, 2, 3];
- assert!(&x[0..3] == cmp);
+ assert!(&x[..3] == cmp);
let cmp: &[int] = &[2, 3, 4];
assert!(&x[1..4] == cmp);
let cmp: &[int] = &[3, 4, 5];
assert!(&x[2..] == cmp);
let cmp: &[int] = &[1, 2, 3];
- assert!(&x[0..3] == cmp);
+ assert!(&x[..3] == cmp);
let cmp: &[int] = &[2, 3, 4];
assert!(&x[1..4] == cmp);
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that we do not error out because of a (False) ambiguity
+// between the builtin rules for Sized and the where clause. Issue
+// #20959.
+
+fn foo<K>(x: Option<K>)
+ where Option<K> : Sized
+{
+ let _y = x;
+}
+
+fn main() {
+ foo(Some(22));
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Bound {}
+
+trait Trait {
+ fn a<T>(&self, T) where T: Bound;
+ fn b<T>(&self, T) where T: Bound;
+ fn c<T: Bound>(&self, T);
+ fn d<T: Bound>(&self, T);
+}
+
+impl Trait for bool {
+ fn a<T: Bound>(&self, _: T) {}
+ //^~ This gets rejected but should be accepted
+ fn b<T>(&self, _: T) where T: Bound {}
+ fn c<T: Bound>(&self, _: T) {}
+ fn d<T>(&self, _: T) where T: Bound {}
+}
+
+fn main() {}