/// buf.push(5i);
/// buf.push(3);
/// buf.push(4);
- /// assert_eq!(buf.iter().collect::<Vec<&int>>().as_slice(), &[&5, &3, &4]);
+ /// let b: &[_] = &[&5, &3, &4];
+ /// assert_eq!(buf.iter().collect::<Vec<&int>>().as_slice(), b);
/// ```
pub fn iter<'a>(&'a self) -> Items<'a, T> {
Items{index: 0, rindex: self.nelts, lo: self.lo, elts: self.elts.as_slice()}
/// for num in buf.mut_iter() {
/// *num = *num - 2;
/// }
- /// assert_eq!(buf.mut_iter().collect::<Vec<&mut int>>().as_slice(), &[&mut 3, &mut 1, &mut 2]);
+ /// let b: &[_] = &[&mut 3, &mut 1, &mut 2];
+ /// assert_eq!(buf.mut_iter().collect::<Vec<&mut int>>().as_slice(), b);
/// ```
pub fn mut_iter<'a>(&'a mut self) -> MutItems<'a, T> {
let start_index = raw_index(self.lo, self.elts.len(), 0);
use std::iter::order;
// official Unicode test data
// from http://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakTest.txt
- let test_same = [
+ let test_same: [(_, &[_]), .. 325] = [
("\u0020\u0020", &["\u0020", "\u0020"]), ("\u0020\u0308\u0020", &["\u0020\u0308",
"\u0020"]), ("\u0020\u000D", &["\u0020", "\u000D"]), ("\u0020\u0308\u000D",
&["\u0020\u0308", "\u000D"]), ("\u0020\u000A", &["\u0020", "\u000A"]),
("\u0646\u200D\u0020", &["\u0646\u200D", "\u0020"]),
];
- let test_diff = [
+ let test_diff: [(_, &[_], &[_]), .. 23] = [
("\u0020\u0903", &["\u0020\u0903"], &["\u0020", "\u0903"]), ("\u0020\u0308\u0903",
&["\u0020\u0308\u0903"], &["\u0020\u0308", "\u0903"]), ("\u000D\u0308\u0903",
&["\u000D", "\u0308\u0903"], &["\u000D", "\u0308", "\u0903"]), ("\u000A\u0308\u0903",
// test the indices iterators
let s = "a̐éö̲\r\n";
let gr_inds = s.grapheme_indices(true).collect::<Vec<(uint, &str)>>();
- assert_eq!(gr_inds.as_slice(), &[(0u, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")]);
+ let b: &[_] = &[(0u, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")];
+ assert_eq!(gr_inds.as_slice(), b);
let gr_inds = s.grapheme_indices(true).rev().collect::<Vec<(uint, &str)>>();
- assert_eq!(gr_inds.as_slice(), &[(11, "\r\n"), (6, "ö̲"), (3, "é"), (0u, "a̐")]);
+ let b: &[_] = &[(11, "\r\n"), (6, "ö̲"), (3, "é"), (0u, "a̐")];
+ assert_eq!(gr_inds.as_slice(), b);
let mut gr_inds = s.grapheme_indices(true);
let e1 = gr_inds.size_hint();
assert_eq!(e1, (1, Some(13)));
// make sure the reverse iterator does the right thing with "\n" at beginning of string
let s = "\n\r\n\r";
let gr = s.graphemes(true).rev().collect::<Vec<&str>>();
- assert_eq!(gr.as_slice(), &["\r", "\r\n", "\n"]);
+ let b: &[_] = &["\r", "\r\n", "\n"];
+ assert_eq!(gr.as_slice(), b);
}
#[test]
///
/// ```
/// let s = String::from_str("hello");
- /// assert_eq!(s.as_bytes(), &[104, 101, 108, 108, 111]);
+ /// let b: &[_] = &[104, 101, 108, 108, 111];
+ /// assert_eq!(s.as_bytes(), b);
/// ```
#[inline]
pub fn as_bytes<'a>(&'a self) -> &'a [u8] {
/// bytes[1] = 51;
/// bytes[4] = 48;
/// }
- /// assert_eq!(s.as_bytes(), &[104, 51, 108, 108, 48]);
+ /// let b: &[_] = &[104, 51, 108, 108, 48];
+ /// assert_eq!(s.as_bytes(), b);
/// assert_eq!(s.as_slice(), "h3ll0")
/// ```
#[inline]
/// assert_eq!(Greater.reverse(), Less);
///
///
- /// let mut data = &mut [2u, 10, 5, 8];
+ /// let mut data: &mut [_] = &mut [2u, 10, 5, 8];
///
/// // sort the array from largest to smallest.
/// data.sort_by(|a, b| a.cmp(b).reverse());
///
- /// assert_eq!(data, &mut [10u, 8, 5, 2]);
+ /// let b: &mut [_] = &mut [10u, 8, 5, 2];
+ /// assert!(data == b);
/// ```
#[inline]
#[experimental]
fn as_slice<'a>(&'a self) -> &'a [T] {
match *self {
Some(ref x) => slice::ref_slice(x),
- None => &[]
+ None => {
+ let result: &[_] = &[];
+ result
+ }
}
}
}
}
#[cfg(not(stage0))]
pub struct TraitObject {
- pub data: *(),
- pub vtable: *(),
+ pub data: *mut (),
+ pub vtable: *mut (),
}
/// This trait is meant to map equivalences between raw structs and their
fn equiv(&self, other: &V) -> bool { self.as_slice() == other.as_slice() }
}
+#[unstable = "waiting for DST"]
+impl<'a,T:PartialEq> PartialEq for &'a mut [T] {
+ fn eq(&self, other: & &'a mut [T]) -> bool {
+ self.len() == other.len() &&
+ order::eq(self.iter(), other.iter())
+ }
+ fn ne(&self, other: & &'a mut [T]) -> bool {
+ self.len() != other.len() ||
+ order::ne(self.iter(), other.iter())
+ }
+}
+
+#[unstable = "waiting for DST"]
+impl<'a,T:Eq> Eq for &'a mut [T] {}
+
+#[unstable = "waiting for DST"]
+impl<'a,T:PartialEq, V: Slice<T>> Equiv<V> for &'a mut [T] {
+ #[inline]
+ fn equiv(&self, other: &V) -> bool { self.as_slice() == other.as_slice() }
+}
+
#[unstable = "waiting for DST"]
impl<'a,T:Ord> Ord for &'a [T] {
fn cmp(&self, other: & &'a [T]) -> Ordering {
#[test]
fn test_all() {
- let v: Box<&[int]> = box &[1i, 2, 3, 4, 5];
+ let v: Box<[int]> = box [1i, 2, 3, 4, 5];
assert!(v.iter().all(|&x| x < 10));
assert!(!v.iter().all(|&x| x % 2 == 0));
assert!(!v.iter().all(|&x| x > 100));
#[test]
fn test_any() {
- let v: Box<&[int]> = box &[1i, 2, 3, 4, 5];
+ let v: Box<[int]> = box [1i, 2, 3, 4, 5];
assert!(v.iter().any(|&x| x < 10));
assert!(v.iter().any(|&x| x % 2 == 0));
assert!(!v.iter().any(|&x| x > 100));
}
#[cfg(not(stage0))]
fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
- inner: *TyDesc) -> bool {
+ inner: *const TyDesc) -> bool {
self.align(align);
if ! self.inner.visit_evec_fixed(n, sz, align, inner) {
return false;
#[cfg(not(stage0))]
fn visit_evec_fixed(&mut self, n: uint, sz: uint, _align: uint,
- inner: *TyDesc) -> bool {
+ inner: *const TyDesc) -> bool {
let assumed_size = if sz == 0 { n } else { sz };
self.get::<()>(|this, b| {
this.write_vec_range(b, assumed_size, inner)
/// A wrapper for a nullable pointer. Don't use this except for interacting
/// with libc. Basically Option, but without the dependence on libstd.
// If/when libprim happens, this can be removed in favor of that
-pub enum Nullable<type T> {
+pub enum Nullable<T> {
Null,
NotNull(T)
}
let (left, right) = if cap <= buf.len() {
(buf.slice_to(cap), buf.slice_from(cap))
} else {
- (buf, &[])
+ let result: (_, &[_]) = (buf, &[]);
+ result
};
// Do the necessary writes
writer.write([1, 2, 3]).unwrap();
writer.write([4, 5, 6, 7]).unwrap();
assert_eq!(writer.tell(), Ok(8));
- assert_eq!(writer.get_ref(), &[0, 1, 2, 3, 4, 5, 6, 7]);
+ let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7];
+ assert_eq!(writer.get_ref(), b);
writer.seek(0, io::SeekSet).unwrap();
assert_eq!(writer.tell(), Ok(0));
writer.write([3, 4]).unwrap();
- assert_eq!(writer.get_ref(), &[3, 4, 2, 3, 4, 5, 6, 7]);
+ let b: &[_] = &[3, 4, 2, 3, 4, 5, 6, 7];
+ assert_eq!(writer.get_ref(), b);
writer.seek(1, io::SeekCur).unwrap();
writer.write([0, 1]).unwrap();
- assert_eq!(writer.get_ref(), &[3, 4, 2, 0, 1, 5, 6, 7]);
+ let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 7];
+ assert_eq!(writer.get_ref(), b);
writer.seek(-1, io::SeekEnd).unwrap();
writer.write([1, 2]).unwrap();
- assert_eq!(writer.get_ref(), &[3, 4, 2, 0, 1, 5, 6, 1, 2]);
+ let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2];
+ assert_eq!(writer.get_ref(), b);
writer.seek(1, io::SeekEnd).unwrap();
writer.write([1]).unwrap();
- assert_eq!(writer.get_ref(), &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1]);
+ let b: &[_] = &[3, 4, 2, 0, 1, 5, 6, 1, 2, 0, 1];
+ assert_eq!(writer.get_ref(), b);
}
#[test]
// implementations below. If pointer arithmetic is done through integers the
// optimizations start to break down.
extern "rust-intrinsic" {
- fn offset<type T>(dst: *const T, offset: int) -> *const T;
+ fn offset<T>(dst: *const T, offset: int) -> *const T;
}
#[no_mangle]
E0015,
E0016,
E0017,
- E0018,
E0019,
E0020,
E0021,
E0034,
E0035,
E0036,
- E0037,
E0038,
E0039,
E0040,
E0061,
E0062,
E0063,
- E0064,
- E0065,
E0066,
E0067,
E0068,
E0108,
E0109,
E0110,
- E0111,
- E0112,
E0113,
E0114,
E0115,
box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
- node: ast::ExprAddrOf(box(GC) ast::MutImmutable,
+ node: ast::ExprAddrOf(ast::MutImmutable,
box(GC) ast::Expr {
id: ast::DUMMY_NODE_ID,
- node: ast::ExprVec(cx.testfns.borrow().iter().map(|test| {
+ node: ast::ExprVec(cx.testfns.iter().map(|test| {
mk_test_desc_and_fn_rec(cx, test)
}).collect()),
span: DUMMY_SP,
declare_lint!(pub VARIANT_SIZE_DIFFERENCE, Allow,
"detects enums with widely varying variant sizes")
+declare_lint!(pub TRANSMUTE_FAT_PTR, Allow,
+ "detects transmutes of fat pointers")
+
/// Does nothing as a lint pass, but registers some `Lint`s
/// which are used by other parts of the compiler.
pub struct HardwiredLints;
}
fn emit_autoref(&mut self, ecx: &e::EncodeContext, autoref: &ty::AutoRef) {
+ use serialize::Encoder;
+
self.emit_enum("AutoRef", |this| {
match autoref {
&ty::AutoPtr(r, m, None) => {
}
fn emit_auto_deref_ref(&mut self, ecx: &e::EncodeContext, auto_deref_ref: &ty::AutoDerefRef) {
+ use serialize::Encoder;
+
self.emit_struct("AutoDerefRef", 2, |this| {
this.emit_struct_field("autoderefs", 0, |this| auto_deref_ref.autoderefs.encode(this));
this.emit_struct_field("autoref", 1, |this| {
}
fn emit_unsize_kind(&mut self, ecx: &e::EncodeContext, uk: &ty::UnsizeKind) {
+ use serialize::Encoder;
+
self.emit_enum("UnsizeKind", |this| {
match *uk {
ty::UnsizeLength(len) => {
_ if ty::adjust_is_object(adjustment) => {
let method_call = MethodCall::autoobject(id);
for &method in tcx.method_map.borrow().find(&method_call).iter() {
- rbml_w.tag(c::tag_table_method_map, |ebml_w| {
+ rbml_w.tag(c::tag_table_method_map, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
encode_method_callee(ecx, rbml_w, method_call.adjustment, method)
for &dr in tcx.vtable_map.borrow().find(&method_call).iter() {
rbml_w.tag(c::tag_table_vtable_map, |rbml_w| {
rbml_w.id(id);
- rbml_w.tag(c::tag_table_val, |ebml_w| {
+ rbml_w.tag(c::tag_table_val, |rbml_w| {
encode_vtable_res_with_key(ecx, rbml_w, method_call.adjustment, dr);
})
})
rbml_w.tag(c::tag_table_adjustments, |rbml_w| {
rbml_w.id(id);
rbml_w.tag(c::tag_table_val, |rbml_w| {
- rbml_w.emit_auto_adjustment(ecx, adj);
+ rbml_w.emit_auto_adjustment(ecx, adjustment);
})
})
}
// Mutable slices are allowed.
ExprVec(_) => {}
_ => span_err!(v.tcx.sess, e.span, E0017,
- "references in constants may only refer to immutable values");
+ "references in constants may only refer to immutable values")
}
},
}
}
- fn check_transmute(&self, span: Span, from: ty::t, to: ty::t) {
+ fn check_transmute(&self, span: Span, from: ty::t, to: ty::t, id: ast::NodeId) {
if type_size_is_affected_by_type_parameters(self.tcx, from) {
span_err!(self.tcx.sess, span, E0139,
"cannot transmute from a type that contains type parameters");
span: span,
from: from,
to: to,
+ id: id,
};
self.tcx.transmute_restrictions.borrow_mut().push(restriction);
}
if bare_fn_ty.abi == RustIntrinsic => {
let from = *bare_fn_ty.sig.inputs.get(0);
let to = bare_fn_ty.sig.output;
- self.check_transmute(expr.span, from, to);
+ self.check_transmute(expr.span, from, to, expr.id);
}
_ => {
self.tcx
}
pub fn deref_kind(tcx: &ty::ctxt, t: ty::t) -> deref_kind {
- debug!("deref_kind {}", ty_to_str(tcx, t));
+ debug!("deref_kind {}", ty_to_string(tcx, t));
match opt_deref_kind(t) {
Some(k) => k,
None => {
self.content.slice(start, limit)
}
- fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
+ pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] {
let (start, limit) = self.limits(space);
self.content.mut_slice(start, limit)
}
match *o {
lit(lit_expr) => {
let lit_ty = ty::node_id_to_type(bcx.tcx(), lit_expr.id);
- let (llval, _) = consts::const_expr(ccx, &*lit_expr, true);
+ let (llval, _, _) = consts::const_expr(ccx, &*lit_expr, true);
let lit_datum = immediate_rvalue(llval, lit_ty);
let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
return single_result(Result::new(bcx, lit_datum.val));
/// For structs, and struct-like parts of anything fancier.
pub struct Struct {
- // If the struct is DST, then we will not know its size. We must be careful
- // never to use such a struct when a fixed size is required (e.g., stack
- // allocation).
- pub size: Option<u64>,
- pub align: Option<u64>,
+ // If the struct is DST, then the size and alignment do not take into
+ // account the unsized fields of the struct.
+ pub size: u64,
+ pub align: u64,
+ pub sized: bool,
pub packed: bool,
pub fields: Vec<ty::t>
}
}).collect(), dtor);
}
_ => cx.sess().bug(format!("adt::represent_type called on non-ADT type: {}",
- ty_to_str(cx.tcx(), t)).as_slice())
+ ty_to_string(cx.tcx(), t)).as_slice())
}
}
impl Case {
fn is_zerolen(&self, cx: &CrateContext) -> bool {
- mk_struct(cx, self.tys.as_slice(), false).size.unwrap() == 0
+ mk_struct(cx, self.tys.as_slice(), false).size == 0
}
fn find_ptr(&self) -> Option<PointerField> {
// Box<T> could either be a thin or fat pointer depending on T
ty::ty_uniq(t) => match ty::get(t).sty {
- ty::ty_vec(_, None) | return Some(FatPointer(i, slice_elt_base)),
+ ty::ty_vec(_, None) => return Some(FatPointer(i, slice_elt_base)),
// Box<Trait> is a pair of pointers: the actual object and a vtable
ty::ty_trait(..) => return Some(FatPointer(i, trt_field_box)),
let lltys = tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect::<Vec<_>>();
let llty_rec = Type::struct_(cx, lltys.as_slice(), packed);
Struct {
- size: Some(machine::llsize_of_alloc(cx, llty_rec) /*bad*/as u64),
- align: Some(machine::llalign_of_min(cx, llty_rec) /*bad*/as u64),
+ size: machine::llsize_of_alloc(cx, llty_rec),
+ align: machine::llalign_of_min(cx, llty_rec),
+ sized: true,
packed: packed,
fields: Vec::from_slice(tys),
}
} else {
+ // Ignore any dynamically sized fields.
+ let lltys = tys.iter().filter(|&ty| ty::type_is_sized(cx.tcx(), *ty))
+ .map(|&ty| type_of::sizing_type_of(cx, ty)).collect::<Vec<_>>();
+ let llty_rec = Type::struct_(cx, lltys.as_slice(), packed);
Struct {
- size: None,
- align: None,
+ size: machine::llsize_of_alloc(cx, llty_rec),
+ align: machine::llalign_of_min(cx, llty_rec),
+ sized: false,
packed: packed,
fields: Vec::from_slice(tys),
- }
+ }
}
}
* unbounded recursion; see also the comments in `trans::type_of`.
*/
pub fn type_of(cx: &CrateContext, r: &Repr) -> Type {
- generic_type_of(cx, r, None, false)
+ generic_type_of(cx, r, None, false, false)
}
-pub fn sizing_type_of(cx: &CrateContext, r: &Repr) -> Type {
- generic_type_of(cx, r, None, true)
+// Pass dst=true if the type you are passing is a DST. Yes, we could figure
+// this out, but if you call this on an unsized type without realising it, you
+// are going to get the wrong type (it will not include the unsized parts of it).
+pub fn sizing_type_of(cx: &CrateContext, r: &Repr, dst: bool) -> Type {
+ generic_type_of(cx, r, None, true, dst)
}
pub fn incomplete_type_of(cx: &CrateContext, r: &Repr, name: &str) -> Type {
- generic_type_of(cx, r, Some(name), false)
+ generic_type_of(cx, r, Some(name), false, false)
}
pub fn finish_type_of(cx: &CrateContext, r: &Repr, llty: &mut Type) {
match *r {
CEnum(..) | General(..) | RawNullablePointer { .. } => { }
Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
- llty.set_struct_body(struct_llfields(cx, st, false).as_slice(),
+ llty.set_struct_body(struct_llfields(cx, st, false, false).as_slice(),
st.packed)
}
}
-fn generic_type_of(cx: &CrateContext, r: &Repr, name: Option<&str>, sizing: bool) -> Type {
+fn generic_type_of(cx: &CrateContext,
+ r: &Repr,
+ name: Option<&str>,
+ sizing: bool,
+ dst: bool) -> Type {
match *r {
CEnum(ity, _, _) => ll_inttype(cx, ity),
RawNullablePointer { nnty, .. } => type_of::sizing_type_of(cx, nnty),
Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => {
match name {
None => {
- Type::struct_(cx, struct_llfields(cx, st, sizing).as_slice(),
+ Type::struct_(cx, struct_llfields(cx, st, sizing, dst).as_slice(),
st.packed)
}
Some(name) => { assert_eq!(sizing, false); Type::named_struct(cx, name) }
// of the size.
//
// FIXME #10604: this breaks when vector types are present.
- let size = sts.iter().map(|st| st.size.unwrap()).max().unwrap();
- let most_aligned = sts.iter().max_by(|st| st.align.unwrap()).unwrap();
- let align = most_aligned.align.unwrap();
+ let size = sts.iter().map(|st| st.size).max().unwrap();
+ let most_aligned = sts.iter().max_by(|st| st.align).unwrap();
+ let align = most_aligned.align;
let discr_ty = ll_inttype(cx, ity);
let discr_size = machine::llsize_of_alloc(cx, discr_ty) as u64;
let align_units = (size + align - 1) / align - 1;
}
}
-fn struct_llfields(cx: &CrateContext, st: &Struct, sizing: bool) -> Vec<Type> {
+fn struct_llfields(cx: &CrateContext, st: &Struct, sizing: bool, dst: bool) -> Vec<Type> {
if sizing {
- st.fields.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
+ st.fields.iter().filter(|&ty| !dst || ty::type_is_sized(cx.tcx(), *ty))
+ .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
} else {
st.fields.iter().map(|&ty| type_of::type_of(cx, ty)).collect()
}
}
General(ity, ref cases, _) => {
let case = cases.get(discr as uint);
- let max_sz = cases.iter().map(|x| x.size.unwrap()).max().unwrap();
+ let max_sz = cases.iter().map(|x| x.size).max().unwrap();
let lldiscr = C_integral(ll_inttype(ccx, ity), discr as u64, true);
let contents = build_const_struct(ccx,
case,
(vec!(lldiscr)).append(vals).as_slice());
- C_struct(ccx, contents.append([padding(ccx, max_sz - case.size.unwrap())]).as_slice(),
+ C_struct(ccx, contents.append([padding(ccx, max_sz - case.size)]).as_slice(),
false)
}
Univariant(ref st, _dro) => {
for &ty in st.fields.iter() {
let llty = type_of::sizing_type_of(ccx, ty);
if !st.packed {
- let type_align = machine::llalign_of_min(ccx, llty) as u64;
+ let type_align = type_of::align_of(ccx, ty) as u64;
offset = roundup(offset, type_align);
}
offsets.push(offset);
offset += machine::llsize_of_alloc(ccx, val_ty(val)) as u64;
}
- assert!(offset <= st.size.unwrap());
- if offset != st.size.unwrap() {
- cfields.push(padding(ccx, st.size.unwrap() - offset));
+ assert!(st.sized && offset <= st.size);
+ if offset != st.size {
+ cfields.push(padding(ccx, st.size - offset));
}
cfields
use middle::trans::inline;
use middle::trans::intrinsic;
use middle::trans::machine;
-use middle::trans::machine::{llalign_of_min, llsize_of, llsize_of_real};
+use middle::trans::machine::{llsize_of, llsize_of_real};
use middle::trans::meth;
use middle::trans::monomorphize;
use middle::trans::tvec;
// Grab the TypeRef type of box_ptr_ty.
let box_ptr_ty = ty::mk_box(bcx.tcx(), t);
let llty = type_of(ccx, box_ptr_ty);
- let llalign = C_uint(ccx, llalign_of_min(ccx, llty) as uint);
+ let llalign = C_uint(ccx, type_of::align_of(ccx, box_ptr_ty) as uint);
// Allocate space:
let drop_glue = glue::get_drop_glue(ccx, t);
return cx;
}
+ let (data_ptr, info) = if ty::type_is_sized(cx.tcx(), t) {
+ (av, None)
+ } else {
+ let data = GEPi(cx, av, [0, abi::slice_elt_base]);
+ let info = GEPi(cx, av, [0, abi::slice_elt_len]);
+ (Load(cx, data), Some(Load(cx, info)))
+ };
+
let mut cx = cx;
match ty::get(t).sty {
ty::ty_struct(..) => {
let repr = adt::represent_type(cx.ccx(), t);
expr::with_field_tys(cx.tcx(), t, None, |discr, field_tys| {
for (i, field_ty) in field_tys.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &*repr, av, discr, i);
- cx = f(cx, llfld_a, field_ty.mt.ty);
+ let field_ty = field_ty.mt.ty;
+ let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
+
+ let val = if ty::type_is_sized(cx.tcx(), field_ty) {
+ llfld_a
+ } else {
+ let boxed_ty = ty::mk_open(cx.tcx(), field_ty);
+ let scratch = datum::rvalue_scratch_datum(cx, boxed_ty, "__fat_ptr_iter");
+ Store(cx, llfld_a, GEPi(cx, scratch.val, [0, abi::slice_elt_base]));
+ Store(cx, info.unwrap(), GEPi(cx, scratch.val, [0, abi::slice_elt_len]));
+ scratch.val
+ };
+ cx = f(cx, val, field_ty);
}
})
}
let repr = adt::represent_type(cx.ccx(), t);
let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id);
for (i, upvar) in upvars.iter().enumerate() {
- let llupvar = adt::trans_field_ptr(cx, &*repr, av, 0, i);
+ let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
cx = f(cx, llupvar, upvar.ty);
}
}
ty::ty_vec(_, Some(n)) => {
- let (base, len) = tvec::get_fixed_base_and_len(cx, av, n);
+ let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
let unit_ty = ty::sequence_element_type(cx.tcx(), t);
cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
}
ty::ty_tup(ref args) => {
let repr = adt::represent_type(cx.ccx(), t);
for (i, arg) in args.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &*repr, av, 0, i);
+ let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
cx = f(cx, llfld_a, *arg);
}
}
let variant_cx =
iter_variant(variant_cx,
&*repr,
- av,
+ data_ptr,
&**variant,
substs,
|x,y,z| f(x,y,z));
// FIXME(15064) Lang item methods may (in the reflect case) not have proper
// types, so doing an attribute lookup will fail.
let attributes = if is_lang_item {
- Vec::new()
+ llvm::AttrBuilder::new()
} else {
get_fn_llvm_attributes(bcx.ccx(), fn_ty)
};
if ty::type_is_structural(t) {
let llty = type_of::type_of(ccx, t);
let llsz = llsize_of(ccx, llty);
- let llalign = llalign_of_min(ccx, llty);
+ let llalign = type_of::align_of(ccx, t);
call_memcpy(bcx, dst, src, llsz, llalign as u32);
} else {
store_ty(bcx, Load(bcx, src), dst, t);
if cx.unreachable.get() { return; }
let _icx = push_ctxt("zero_mem");
let bcx = cx;
- let ccx = cx.ccx();
- let llty = type_of::type_of(ccx, t);
- memzero(&B(bcx), llptr, llty);
+ memzero(&B(bcx), llptr, t);
}
// Always use this function instead of storing a zero constant to the memory
// allocation for large data structures, and the generated code will be
// awful. (A telltale sign of this is large quantities of
// `mov [byte ptr foo],0` in the generated code.)
-fn memzero(b: &Builder, llptr: ValueRef, ty: Type) {
+fn memzero(b: &Builder, llptr: ValueRef, ty: ty::t) {
let _icx = push_ctxt("memzero");
let ccx = b.ccx;
+ let llty = type_of::type_of(ccx, ty);
+
let intrinsic_key = match ccx.sess().targ_cfg.arch {
X86 | Arm | Mips | Mipsel => "llvm.memset.p0i8.i32",
X86_64 => "llvm.memset.p0i8.i64"
let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
let llzeroval = C_u8(ccx, 0);
- let size = machine::llsize_of(ccx, ty);
- let align = C_i32(ccx, llalign_of_min(ccx, ty) as i32);
+ let size = machine::llsize_of(ccx, llty);
+ let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
let volatile = C_bool(ccx, false);
b.call(llintrinsicfn, [llptr, llzeroval, size, align, volatile], None);
}
Alloca(cx, ty, name)
}
-pub fn alloca_zeroed(cx: &Block, ty: Type, name: &str) -> ValueRef {
+pub fn alloca_zeroed(cx: &Block, ty: ty::t, name: &str) -> ValueRef {
+ let llty = type_of::type_of(cx.ccx(), ty);
if cx.unreachable.get() {
unsafe {
- return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
+ return llvm::LLVMGetUndef(llty.ptr_to().to_ref());
}
}
- let p = alloca_no_lifetime(cx, ty, name);
+ let p = alloca_no_lifetime(cx, llty, name);
let b = cx.fcx.ccx.builder();
b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
memzero(&b, p, ty);
for j in range(0, args.len()) {
let tuple_element_type = untupled_arg_types[j];
let tuple_element_datum =
- tuple_datum.get_element(tuple_element_type,
+ tuple_datum.get_element(bcx,
+ tuple_element_type,
|llval| GEPi(bcx, llval, [0, j]));
let tuple_element_datum = tuple_element_datum.to_expr_datum();
let tuple_element_datum =
let repr_ptr = &*repr;
for i in range(0, field_types.len()) {
let arg_datum = tuple_lvalue_datum.get_element(
+ bcx,
*field_types.get(i),
|srcval| {
adt::trans_field_ptr(bcx, repr_ptr, srcval, 0, i)
if simple && !ty::type_is_fat_ptr(tcx, ty) {
return true;
}
+ if !ty::type_is_sized(tcx, ty) {
+ return false;
+ }
match ty::get(ty).sty {
ty::ty_bot => true,
ty::ty_struct(..) | ty::ty_enum(..) | ty::ty_tup(..) |
(v, llunitty, inlineable.iter().fold(true, |a, &b| a && b))
}
-pub fn const_addr_of(cx: &CrateContext, cv: ValueRef) -> ValueRef {
+pub fn const_addr_of(cx: &CrateContext, cv: ValueRef, mutbl: ast::Mutability) -> ValueRef {
unsafe {
let gv = "const".with_c_str(|name| {
llvm::LLVMAddGlobal(cx.llmod, val_ty(cv).to_ref(), name)
}
_ => cx.sess().span_bug(e.span,
format!("unimplemented type in const unsize: {}",
- ty_to_str(cx.tcx(), ty)).as_slice())
+ ty_to_string(cx.tcx(), ty)).as_slice())
}
}
_ => {
_ => cx.sess().span_bug(base.span,
format!("index-expr base must be a vector \
or string type, found {}",
- ty_to_str(cx.tcx(), bt)).as_slice())
+ ty_to_string(cx.tcx(), bt)).as_slice())
},
ty::ty_rptr(_, mt) => match ty::get(mt.ty).sty {
ty::ty_vec(_, Some(u)) => {
_ => cx.sess().span_bug(base.span,
format!("index-expr base must be a vector \
or string type, found {}",
- ty_to_str(cx.tcx(), bt)).as_slice())
+ ty_to_string(cx.tcx(), bt)).as_slice())
},
_ => cx.sess().span_bug(base.span,
format!("index-expr base must be a vector \
or string type, found {}",
- ty_to_str(cx.tcx(), bt)).as_slice())
+ ty_to_string(cx.tcx(), bt)).as_slice())
};
let len = llvm::LLVMConstIntGetZExtValue(len) as u64;
let filename = C_str_slice(ccx, filename);
let line = C_int(ccx, loc.line as int);
let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
- let expr_file_line = consts::const_addr_of(ccx, expr_file_line_const);
+ let expr_file_line = consts::const_addr_of(ccx, expr_file_line_const, ast::MutImmutable);
let args = vec!(expr_file_line);
let did = langcall(bcx, Some(sp), "", FailFnLangItem);
let bcx = callee::trans_lang_call(bcx,
let filename = C_str_slice(ccx, filename);
let line = C_int(ccx, loc.line as int);
let file_line_const = C_struct(ccx, &[filename, line], false);
- let file_line = consts::const_addr_of(ccx, file_line_const);
+ let file_line = consts::const_addr_of(ccx, file_line_const, ast::MutImmutable);
let args = vec!(file_line, index, len);
let did = langcall(bcx, Some(sp), "", FailBoundsCheckFnLangItem);
let bcx = callee::trans_lang_call(bcx,
use llvm::ValueRef;
use middle::trans::base::*;
+use middle::trans::build::Load;
use middle::trans::common::*;
use middle::trans::cleanup;
use middle::trans::cleanup::CleanupMethods;
* does not dominate the end of `scope`.
*/
- let llty = type_of::type_of(bcx.ccx(), ty);
let scratch = if zero {
- alloca_zeroed(bcx, llty, name)
+ alloca_zeroed(bcx, ty, name)
} else {
+ let llty = type_of::type_of(bcx.ccx(), ty);
alloca(bcx, llty, name)
};
}
_ => bcx.tcx().sess.bug(
format!("Unexpected unsized type in get_element: {}",
- bcx.ty_to_str(self.ty)).as_slice())
+ bcx.ty_to_string(self.ty)).as_slice())
};
Datum {
val: val,
## Recursive Types
Some kinds of types, such as structs and enums can be recursive. That means that
-the type definition of some type X refers to some other type which in turn (transitively)
-refers to X. This introduces cycles into the type referral graph. A naive algorithm doing
-an on-demand, depth-first traversal of this graph when describing types, can get trapped
-in an endless loop when it reaches such a cycle.
+the type definition of some type X refers to some other type which in turn
+(transitively) refers to X. This introduces cycles into the type referral graph.
+A naive algorithm doing an on-demand, depth-first traversal of this graph when
+describing types, can get trapped in an endless loop when it reaches such a
+cycle.
For example, the following simple type for a singly-linked list...
false);
}
+// FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill-
+// defined concept. For the case of an actual trait pointer (i.e., Box<Trait>,
+// &Trait), trait_object_type should be the whole thing (e.g, Box<Trait>) and
+// trait_type should be the actual trait (e.g., Trait). Where the trait is part
+// of a DST struct, there is no trait_object_type and the results of this
+// function will be a little bit weird.
fn trait_pointer_metadata(cx: &CrateContext,
- trait_object_type: ty::t,
+ trait_type: ty::t,
+ trait_object_type: Option<ty::t>,
unique_type_id: UniqueTypeId)
-> DIType {
// The implementation provided here is a stub. It makes sure that the trait
// type is assigned the correct name, size, namespace, and source location.
// But it does not describe the trait's methods.
- let def_id = match ty::get(trait_object_type).sty {
+ let def_id = match ty::get(trait_type).sty {
ty::ty_trait(box ty::TyTrait { def_id, .. }) => def_id,
_ => {
- let pp_type_name = ppaux::ty_to_string(cx.tcx(), trait_object_type);
+ let pp_type_name = ppaux::ty_to_string(cx.tcx(), trait_type);
cx.sess().bug(format!("debuginfo: Unexpected trait-object type in \
trait_pointer_metadata(): {}",
pp_type_name.as_slice()).as_slice());
}
};
- let trait_pointer_type_name =
+ let trait_object_type = trait_object_type.unwrap_or(trait_type);
+ let trait_type_name =
compute_debuginfo_type_name(cx, trait_object_type, false);
let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id);
let trait_llvm_type = type_of::type_of(cx, trait_object_type);
composite_type_metadata(cx,
- trait_pointer_llvm_type,
- trait_pointer_type_name.as_slice(),
+ trait_llvm_type,
+ trait_type_name.as_slice(),
unique_type_id,
[],
containing_scope,
ty::ty_str => fixed_vec_metadata(cx, unique_type_id, ty::mk_i8(), 0, usage_site_span),
ty::ty_trait(..) => {
MetadataCreationResult::new(
- trait_pointer_metadata(cx, t, unique_type_id),
+ trait_pointer_metadata(cx, t, None, unique_type_id),
false)
}
ty::ty_uniq(ty) | ty::ty_ptr(ty::mt{ty, ..}) | ty::ty_rptr(_, ty::mt{ty, ..}) => {
}
ty::ty_trait(..) => {
MetadataCreationResult::new(
- trait_pointer_metadata(cx, ty, unique_type_id),
+ trait_pointer_metadata(cx, ty, Some(t), unique_type_id),
false)
}
_ => {
push_debuginfo_type_name(cx, inner_type, true, output);
},
- ty::ty_vec(ty::mt { ty: inner_type, .. }, optional_length) => {
+ ty::ty_vec(inner_type, optional_length) => {
output.push_char('[');
push_debuginfo_type_name(cx, inner_type, true, output);
}
ty::ty_err |
ty::ty_infer(_) |
+ ty::ty_open(_) |
ty::ty_param(_) => {
cx.sess().bug(format!("debuginfo: Trying to create type name for \
unexpected type: {}", ppaux::ty_to_string(cx.tcx(), t)).as_slice());
use util::common::indenter;
use util::ppaux::Repr;
use util::nodemap::NodeMap;
-use middle::trans::machine::{llalign_of_min, llsize_of, llsize_of_alloc};
+use middle::trans::machine::{llsize_of, llsize_of_alloc};
use middle::trans::type_::Type;
use syntax::ast;
};
if autoderefs > 0 {
+ let lval = unpack_datum!(bcx,
+ datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
+
datum = unpack_datum!(
- bcx, deref_multiple(bcx, expr, datum, autoderefs));
+ bcx, deref_multiple(bcx, expr, lval.to_expr_datum(), autoderefs));
}
match adj.autoref {
-> DatumBlock<'a, Expr> {
if !ty::type_is_sized(bcx.tcx(), datum.ty) {
debug!("Taking address of unsized type {}",
- bcx.ty_to_str(datum.ty));
+ bcx.ty_to_string(datum.ty));
ref_fat_ptr(bcx, expr, datum)
} else {
debug!("Taking address of sized type {}",
- bcx.ty_to_str(datum.ty));
+ bcx.ty_to_string(datum.ty));
auto_ref(bcx, datum, expr)
}
}
// Retrieve the information we are losing (making dynamic) in an unsizing
// adjustment.
+ // When making a dtor, we need to do different things depending on the
+ // ownership of the object.. mk_ty is a function for turning unsized_type
+ // into a type to be destructed. If we want to end up with a Box pointer,
+ // then mk_ty should make a Box pointer (T -> Box<T>), if we want a
+ // borrowed reference then it should be T -> &T.
fn unsized_info<'a>(bcx: &'a Block<'a>,
kind: &ty::UnsizeKind,
id: ast::NodeId,
- sized_ty: ty::t) -> ValueRef {
+ unsized_ty: ty::t,
+ mk_ty: |ty::t| -> ty::t) -> ValueRef {
match kind {
&ty::UnsizeLength(len) => C_uint(bcx.ccx(), len),
- &ty::UnsizeStruct(box ref k, tp_index) => match ty::get(sized_ty).sty {
+ &ty::UnsizeStruct(box ref k, tp_index) => match ty::get(unsized_ty).sty {
ty::ty_struct(_, ref substs) => {
- let ty_substs = substs.types.get_vec(subst::TypeSpace);
- let sized_ty = ty_substs.get(tp_index);
- unsized_info(bcx, k, id, *sized_ty)
+ let ty_substs = substs.types.get_slice(subst::TypeSpace);
+ // The dtor for a field treats it like a value, so mk_ty
+ // should just be the identity function.
+ unsized_info(bcx, k, id, ty_substs[tp_index], |t| t)
}
_ => bcx.sess().bug(format!("UnsizeStruct with bad sty: {}",
- bcx.ty_to_str(sized_ty)).as_slice())
+ bcx.ty_to_string(unsized_ty)).as_slice())
},
&ty::UnsizeVtable(..) =>
PointerCast(bcx,
- meth::vtable_ptr(bcx, id, sized_ty),
+ meth::vtable_ptr(bcx, id, mk_ty(unsized_ty)),
Type::vtable_ptr(bcx.ccx()))
}
}
&ty::UnsizeVtable(..) =>
|_bcx, val| PointerCast(bcx, val, Type::i8p(bcx.ccx()))
};
- let info = |bcx, _val| unsized_info(bcx, k, expr.id, datum_ty);
+ let info = |bcx, _val| unsized_info(bcx,
+ k,
+ expr.id,
+ ty::deref_or_dont(datum_ty),
+ |t| ty::mk_rptr(tcx,
+ ty::ReStatic,
+ ty::mt{
+ ty: t,
+ mutbl: ast::MutImmutable
+ }));
into_fat_ptr(bcx, expr, datum, dest_ty, base, info)
}
let unboxed_ty = match ty::get(datum_ty).sty {
ty::ty_uniq(t) => t,
_ => bcx.sess().bug(format!("Expected ty_uniq, found {}",
- bcx.ty_to_str(datum_ty)).as_slice())
+ bcx.ty_to_string(datum_ty)).as_slice())
};
let result_ty = ty::mk_uniq(tcx, ty::unsize_ty(tcx, unboxed_ty, k, expr.span));
let lval = unpack_datum!(bcx,
datum.to_lvalue_datum(bcx, "unsize_unique_expr", expr.id));
- let scratch = rvalue_scratch_datum(bcx, result_ty, "__fat_ptr");
+ let scratch = rvalue_scratch_datum(bcx, result_ty, "__uniq_fat_ptr");
let llbox_ty = type_of::type_of(bcx.ccx(), datum_ty);
let base = PointerCast(bcx, get_dataptr(bcx, scratch.val), llbox_ty.ptr_to());
bcx = lval.store_to(bcx, base);
- let info = unsized_info(bcx, k, expr.id, unboxed_ty);
+ let info = unsized_info(bcx, k, expr.id, unboxed_ty, |t| ty::mk_uniq(tcx, t));
Store(bcx, info, get_len(bcx, scratch.val));
+ let scratch = unpack_datum!(bcx,
+ scratch.to_expr_datum().to_lvalue_datum(bcx,
+ "fresh_uniq_fat_ptr",
+ expr.id));
+
DatumBlock::new(bcx, scratch.to_expr_datum())
}
ast::ExprField(ref base, ident, _) => {
trans_rec_field(bcx, &**base, ident.node)
}
- ast::ExprIndex(base, idx) => {
- trans_index(bcx, expr.span, &**base, &**idx, MethodCall::expr(expr.id))
+ ast::ExprIndex(ref base, ref idx) => {
+ trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
}
ast::ExprBox(_, ref contents) => {
// Special case for `Box<T>` and `Gc<T>`
let contents_ty = expr_ty(bcx, &**contents);
match ty::get(box_ty).sty {
ty::ty_uniq(..) => {
- match contents.node {
- ast::ExprRepeat(..) | ast::ExprVec(..) => {
- // Special case for owned vectors.
- fcx.push_ast_cleanup_scope(contents.id);
- let datum = unpack_datum!(
- bcx, tvec::trans_uniq_vec(bcx, expr, &**contents));
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, contents.id);
- DatumBlock::new(bcx, datum)
- }
- _ => {
- trans_uniq_expr(bcx, box_ty, &**contents, contents_ty)
- }
+ let is_vec = match contents.node {
+ ast::ExprRepeat(..) | ast::ExprVec(..) => true,
+ ast::ExprLit(lit) => match lit.node {
+ ast::LitStr(..) => true,
+ _ => false
+ },
+ _ => false
+ };
+
+ if is_vec {
+ // Special case for owned vectors.
+ fcx.push_ast_cleanup_scope(contents.id);
+ let datum = unpack_datum!(
+ bcx, tvec::trans_uniq_vec(bcx, expr, &**contents));
+ bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, contents.id);
+ DatumBlock::new(bcx, datum)
+ } else {
+ trans_uniq_expr(bcx, box_ty, &**contents, contents_ty)
}
}
ty::ty_box(..) => {
if ty::type_is_sized(bcx.tcx(), d.ty) {
DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
} else {
- debug!("nrc: {}", bcx.ty_to_str(d.ty))
let scratch = rvalue_scratch_datum(bcx, ty::mk_open(bcx.tcx(), d.ty), "");
Store(bcx, d.val, get_dataptr(bcx, scratch.val));
let info = Load(bcx, get_len(bcx, base_datum.val));
}
fn trans_index<'a>(bcx: &'a Block<'a>,
- sp: codemap::Span,
+ index_expr: &ast::Expr,
base: &ast::Expr,
idx: &ast::Expr,
method_call: MethodCall)
let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
for &(i, t) in base.fields.iter() {
let datum = base_datum.get_element(
- t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
+ bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
+ assert!(ty::type_is_sized(bcx.tcx(), datum.ty));
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
bcx = datum.store_to(bcx, dest);
}
fcx.schedule_drop_mem(scope, dest, e_ty);
}
- for base in optbase.iter() {
- // FIXME #6573: is it sound to use the destination's repr on the base?
- // And, would it ever be reasonable to be here with discr != 0?
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
- for &(i, t) in base.fields.iter() {
- let datum = base_datum.get_element(
- bcx,
- t,
- |srcval| adt::trans_field_ptr(bcx, repr, srcval, discr, i));
- assert!(ty::type_is_sized(bcx.tcx(), datum.ty));
- let dest = adt::trans_field_ptr(bcx, repr, addr, discr, i);
- bcx = datum.store_to(bcx, dest);
- }
- }
-
adt::trans_set_discr(bcx, &*repr, addr, discr);
fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
-> DatumBlock<'a, Expr> {
let _icx = push_ctxt("trans_uniq_expr");
let fcx = bcx.fcx;
+ assert!(ty::type_is_sized(bcx.tcx(), contents_ty));
let llty = type_of::type_of(bcx.ccx(), contents_ty);
let size = llsize_of(bcx.ccx(), llty);
- let align = C_uint(bcx.ccx(), llalign_of_min(bcx.ccx(), llty) as uint);
+ let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty) as uint);
let llty_ptr = llty.ptr_to();
let Result { bcx, val } = malloc_raw_dyn(bcx, llty_ptr, box_ty, size, align);
// Unique boxes do not allocate for zero-size types. The standard library
match ty::get(sub_datum.ty).sty {
ty::ty_open(_) => {
// Opened DST value, close to a fat pointer
- debug!("Closing fat pointer {}", bcx.ty_to_str(sub_datum.ty));
+ debug!("Closing fat pointer {}", bcx.ty_to_string(sub_datum.ty));
let scratch = rvalue_scratch_datum(bcx,
ty::close_type(bcx.tcx(), sub_datum.ty),
use llvm;
use middle::lang_items::{FreeFnLangItem, ExchangeFreeFnLangItem};
use middle::subst;
+use middle::subst::Subst;
use middle::trans::adt;
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::cleanup;
use middle::trans::cleanup::CleanupMethods;
use middle::trans::common::*;
+use middle::trans::datum;
use middle::trans::expr;
use middle::trans::machine::*;
use middle::trans::reflect;
use middle::trans::tvec;
use middle::trans::type_::Type;
-use middle::trans::type_of::{type_of, sizing_type_of};
+use middle::trans::type_of::{type_of, sizing_type_of, align_of};
use middle::ty;
use util::ppaux::ty_to_short_str;
use util::ppaux;
Some(expr::Ignore)).bcx
}
-pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
- align: u64) -> &'a Block<'a> {
+fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
+ align: ValueRef) -> &'a Block<'a> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx,
langcall(cx, None, "", ExchangeFreeFnLangItem),
- [PointerCast(cx, v, Type::i8p(ccx)), C_uint(ccx, size as uint), C_uint(ccx, align as uint)],
+ [PointerCast(cx, v, Type::i8p(ccx)), size, align],
Some(expr::Ignore)).bcx
}
+pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
+ align: u64) -> &'a Block<'a> {
+ trans_exchange_free_internal(cx,
+ v,
+ C_uint(cx.ccx(), size as uint),
+ C_uint(cx.ccx(), align as uint))
+}
+
pub fn trans_exchange_free_ty<'a>(bcx: &'a Block<'a>, ptr: ValueRef,
content_ty: ty::t) -> &'a Block<'a> {
+ assert!(ty::type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
// `Box<ZeroSizeType>` does not allocate.
if content_size != 0 {
- let content_align = llalign_of_min(bcx.ccx(), sizing_type);
+ let content_align = align_of(bcx.ccx(), content_ty);
trans_exchange_free(bcx, ptr, content_size, content_align)
} else {
bcx
pub fn get_drop_glue_type(ccx: &CrateContext, t: ty::t) -> ty::t {
let tcx = ccx.tcx();
+ // Even if there is no dtor for t, there might be one deeper down and we
+ // might need to pass in the vtable ptr.
+ if !ty::type_is_sized(tcx, t) {
+ return t
+ }
if !ty::type_needs_drop(tcx, t) {
return ty::mk_i8();
}
}
pub fn get_drop_glue(ccx: &CrateContext, t: ty::t) -> ValueRef {
+ debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t));
let t = get_drop_glue_type(ccx, t);
+ debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t));
match ccx.drop_glues.borrow().find(&t) {
Some(&glue) => return glue,
_ => { }
}
- let llfnty = Type::glue_fn(ccx, type_of(ccx, t).ptr_to());
+ let llty = if ty::type_is_sized(ccx.tcx(), t) {
+ type_of(ccx, t).ptr_to()
+ } else {
+ type_of(ccx, ty::mk_uniq(ccx.tcx(), t)).ptr_to()
+ };
+
+ let llfnty = Type::glue_fn(ccx, llty);
let glue = declare_generic_glue(ccx, t, llfnty, "drop");
ccx.drop_glues.borrow_mut().insert(t, glue);
substs: &subst::Substs)
-> &'a Block<'a> {
let repr = adt::represent_type(bcx.ccx(), t);
- let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, v0));
+ let struct_data = if ty::type_is_sized(bcx.tcx(), t) {
+ v0
+ } else {
+ let llval = GEPi(bcx, v0, [0, abi::slice_elt_base]);
+ Load(bcx, llval)
+ };
+ let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data));
with_cond(bcx, load_ty(bcx, drop_flag.val, ty::mk_bool()), |cx| {
trans_struct_drop(cx, t, v0, dtor_did, class_did, substs)
})
let dtor_addr = get_res_dtor(bcx.ccx(), dtor_did, t,
class_did, substs);
- // The second argument is the "self" argument for drop
+ // The first argument is the "self" argument for drop
let params = unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(dtor_addr));
ty.element_type().func_params()
};
- adt::fold_variants(bcx, &*repr, v0, |variant_cx, st, value| {
+ let fty = ty::lookup_item_type(bcx.tcx(), dtor_did).ty.subst(bcx.tcx(), substs);
+ let self_ty = match ty::get(fty).sty {
+ ty::ty_bare_fn(ref f) => {
+ assert!(f.sig.inputs.len() == 1);
+ f.sig.inputs[0]
+ }
+ _ => bcx.sess().bug(format!("Expected function type, found {}",
+ bcx.ty_to_string(fty)).as_slice())
+ };
+
+ let (struct_data, info) = if ty::type_is_sized(bcx.tcx(), t) {
+ (v0, None)
+ } else {
+ let data = GEPi(bcx, v0, [0, abi::slice_elt_base]);
+ let info = GEPi(bcx, v0, [0, abi::slice_elt_len]);
+ (Load(bcx, data), Some(Load(bcx, info)))
+ };
+
+ adt::fold_variants(bcx, &*repr, struct_data, |variant_cx, st, value| {
// Be sure to put all of the fields into a scope so we can use an invoke
// instruction to call the user destructor but still call the field
// destructors if the user destructor fails.
// Class dtors have no explicit args, so the params should
// just consist of the environment (self).
assert_eq!(params.len(), 1);
- let self_arg = PointerCast(variant_cx, value, *params.get(0));
+ let self_arg = if ty::type_is_fat_ptr(bcx.tcx(), self_ty) {
+ // The dtor expects a fat pointer, so make one, even if we have to fake it.
+ let boxed_ty = ty::mk_open(bcx.tcx(), t);
+ let scratch = datum::rvalue_scratch_datum(bcx, boxed_ty, "__fat_ptr_drop_self");
+ Store(bcx, value, GEPi(bcx, scratch.val, [0, abi::slice_elt_base]));
+ Store(bcx,
+ // If we just had a thin pointer, make a fat pointer by sticking
+ // null where we put the unsizing info. This works because t
+ // is a sized type, so we will only unpack the fat pointer, never
+ // use the fake info.
+ info.unwrap_or(C_null(Type::i8p(bcx.ccx()))),
+ GEPi(bcx, scratch.val, [0, abi::slice_elt_len]));
+ PointerCast(variant_cx, scratch.val, *params.get(0))
+ } else {
+ PointerCast(variant_cx, value, *params.get(0))
+ };
let args = vec!(self_arg);
// Add all the fields as a value which needs to be cleaned at the end of
// the order in which fields get dropped.
for (i, ty) in st.fields.iter().enumerate().rev() {
let llfld_a = adt::struct_field_ptr(variant_cx, &*st, value, i, false);
+
+ let val = if ty::type_is_sized(bcx.tcx(), *ty) {
+ llfld_a
+ } else {
+ let boxed_ty = ty::mk_open(bcx.tcx(), *ty);
+ let scratch = datum::rvalue_scratch_datum(bcx, boxed_ty, "__fat_ptr_drop_field");
+ Store(bcx, llfld_a, GEPi(bcx, scratch.val, [0, abi::slice_elt_base]));
+ Store(bcx, info.unwrap(), GEPi(bcx, scratch.val, [0, abi::slice_elt_len]));
+ scratch.val
+ };
variant_cx.fcx.schedule_drop_mem(cleanup::CustomScope(field_scope),
- llfld_a, *ty);
+ val, *ty);
}
let dtor_ty = ty::mk_ctor_fn(variant_cx.tcx(), ast::DUMMY_NODE_ID,
[get_drop_glue_type(bcx.ccx(), t)], ty::mk_nil());
- let (_, variant_cx) = invoke(variant_cx, dtor_addr, args, dtor_ty, None);
+ let (_, variant_cx) = invoke(variant_cx, dtor_addr, args, dtor_ty, None, false);
variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope);
variant_cx
})
}
+fn size_and_align_of_dst<'a>(bcx: &'a Block<'a>, t :ty::t, info: ValueRef) -> (ValueRef, ValueRef) {
+ debug!("calculate size of DST: {}; with lost info: {}",
+ bcx.ty_to_string(t), bcx.val_to_string(info));
+ if ty::type_is_sized(bcx.tcx(), t) {
+ let sizing_type = sizing_type_of(bcx.ccx(), t);
+ let size = C_uint(bcx.ccx(), llsize_of_alloc(bcx.ccx(), sizing_type) as uint);
+ let align = C_uint(bcx.ccx(), align_of(bcx.ccx(), t) as uint);
+ return (size, align);
+ }
+ match ty::get(t).sty {
+ ty::ty_struct(id, ref substs) => {
+ let ccx = bcx.ccx();
+ // First get the size of all statically known fields.
+ // Don't use type_of::sizing_type_of because that expects t to be sized.
+ assert!(!ty::type_is_simd(bcx.tcx(), t));
+ let repr = adt::represent_type(ccx, t);
+ let sizing_type = adt::sizing_type_of(ccx, &*repr, true);
+ let sized_size = C_uint(ccx, llsize_of_alloc(ccx, sizing_type) as uint);
+ let sized_align = C_uint(ccx, llalign_of_min(ccx, sizing_type) as uint);
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field).
+ let fields = ty::struct_fields(bcx.tcx(), id, substs);
+ let last_field = fields[fields.len()-1];
+ let field_ty = last_field.mt.ty;
+ let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
+
+ // Return the sum of sizes and max of aligns.
+ let size = Add(bcx, sized_size, unsized_size);
+ let align = Select(bcx,
+ ICmp(bcx, llvm::IntULT, sized_align, unsized_align),
+ sized_align,
+ unsized_align);
+ (size, align)
+ }
+ ty::ty_trait(..) => {
+ // info points to the vtable and the second entry in the vtable is the
+ // dynamic size of the object.
+ let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to());
+ let size_ptr = GEPi(bcx, info, [1u]);
+ let align_ptr = GEPi(bcx, info, [2u]);
+ (Load(bcx, size_ptr), Load(bcx, align_ptr))
+ }
+ ty::ty_vec(unit_ty, None) => {
+ // The info in this case is the length of the vec, so the size is that
+ // times the unit size.
+ let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
+ let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
+ (Mul(bcx, info, C_uint(bcx.ccx(), unit_size as uint)), C_uint(bcx.ccx(), 8))
+ }
+ _ => bcx.sess().bug(format!("Unexpected unsized type, found {}",
+ bcx.ty_to_string(t)).as_slice())
+ }
+}
+
fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'a> {
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
ty::ty_trait(..) => {
let lluniquevalue = GEPi(bcx, v0, [0, abi::trt_field_box]);
// Only drop the value when it is non-null
- with_cond(bcx, IsNotNull(bcx, Load(bcx, lluniquevalue)), |bcx| {
+ let concrete_ptr = Load(bcx, lluniquevalue);
+ with_cond(bcx, IsNotNull(bcx, concrete_ptr), |bcx| {
let dtor_ptr = Load(bcx, GEPi(bcx, v0, [0, abi::trt_field_vtable]));
let dtor = Load(bcx, dtor_ptr);
Call(bcx,
bcx
})
}
+ ty::ty_struct(..) if !ty::type_is_sized(bcx.tcx(), content_ty) => {
+ let llval = GEPi(bcx, v0, [0, abi::slice_elt_base]);
+ let llbox = Load(bcx, llval);
+ let not_null = IsNotNull(bcx, llbox);
+ with_cond(bcx, not_null, |bcx| {
+ let bcx = drop_ty(bcx, v0, content_ty);
+ let info = GEPi(bcx, v0, [0, abi::slice_elt_len]);
+ let info = Load(bcx, info);
+ let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
+ trans_exchange_free_internal(bcx, llbox, llsize, llalign)
+ })
+ }
_ => {
- let llval = if ty::type_is_sized(bcx.tcx(), content_ty) {
- v0
- } else {
- // The Box is a fat pointer
- GEPi(bcx, v0, [0, abi::trt_field_box])
- };
+ assert!(ty::type_is_sized(bcx.tcx(), content_ty));
+ let llval = v0;
let llbox = Load(bcx, llval);
let not_null = IsNotNull(bcx, llbox);
with_cond(bcx, not_null, |bcx| {
let tcx = bcx.tcx();
match ty::ty_dtor(tcx, did) {
ty::TraitDtor(dtor, true) => {
- trans_struct_drop_flag(bcx, t, v0, dtor, did, substs)
+ // FIXME(16758) Since the struct is unsized, it is hard to
+ // find the drop flag (which is at the end of the struct).
+ // Lets just ignore the flag and pretend everything will be
+ // OK.
+ if ty::type_is_sized(bcx.tcx(), t) {
+ trans_struct_drop_flag(bcx, t, v0, dtor, did, substs)
+ } else {
+ // Give the user a heads up that we are doing something
+ // stupid and dangerous.
+ bcx.sess().warn(format!("Ignoring drop flag in destructor for {}\
+ because the struct is unsized. See issue\
+ #16758",
+ bcx.ty_to_string(t)).as_slice());
+ trans_struct_drop(bcx, t, v0, dtor, did, substs)
+ }
}
ty::TraitDtor(dtor, false) => {
trans_struct_drop(bcx, t, v0, dtor, did, substs)
trans_exchange_free(bcx, env, 0, 8)
})
}
+ ty::ty_trait(..) => {
+ // No need to do a null check here (as opposed to the Box<trait case
+ // above), because this happens for a trait field in an unsized
+ // struct. If anything is null, it is the whole struct and we won't
+ // get here.
+ let lluniquevalue = GEPi(bcx, v0, [0, abi::trt_field_box]);
+ let dtor_ptr = Load(bcx, GEPi(bcx, v0, [0, abi::trt_field_vtable]));
+ let dtor = Load(bcx, dtor_ptr);
+ Call(bcx,
+ dtor,
+ [PointerCast(bcx, Load(bcx, lluniquevalue), Type::i8p(bcx.ccx()))],
+ None);
+ bcx
+ }
+ ty::ty_vec(ty, None) => tvec::make_drop_glue_unboxed(bcx, v0, ty),
_ => {
+ assert!(ty::type_is_sized(bcx.tcx(), t));
if ty::type_needs_drop(bcx.tcx(), t) &&
ty::type_is_structural(t) {
iter_structural_ty(bcx, v0, t, drop_ty)
ccx,
t,
format!("glue_{}", name).as_slice());
- debug!("{} is for type {}", fn_nm, ppaux::ty_to_string(ccx.tcx(), t));
let llfn = decl_cdecl_fn(ccx, fn_nm.as_slice(), llfnty, ty::mk_nil());
note_unique_llvm_symbol(ccx, fn_nm);
return llfn;
"s"
}).as_slice());
}
+ if ty::type_is_fat_ptr(ccx.tcx(), transmute_restriction.to) ||
+ ty::type_is_fat_ptr(ccx.tcx(), transmute_restriction.from) {
+ ccx.sess()
+ .add_lint(::lint::builtin::TRANSMUTE_FAT_PTR,
+ transmute_restriction.id,
+ transmute_restriction.span,
+ format!("Transmuting fat pointer types; {} to {}.\
+ Beware of relying on the compiler's representation",
+ ty_to_string(ccx.tcx(), transmute_restriction.from),
+ ty_to_string(ccx.tcx(), transmute_restriction.to)));
+ }
}
ccx.sess().abort_if_errors();
}
}
(_, "min_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
- let lltp_ty = type_of::type_of(ccx, tp_ty);
- C_uint(ccx, machine::llalign_of_min(ccx, lltp_ty) as uint)
+ C_uint(ccx, type_of::align_of(ccx, tp_ty) as uint)
}
(_, "pref_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
tp_ty: ty::t, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
- let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
+ let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
let name = if allow_overlap {
dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
- let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
+ let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
"llvm.memset.p0i8.i32"
use middle::trans::expr::{SaveIn, Ignore};
use middle::trans::expr;
use middle::trans::glue;
+use middle::trans::machine;
use middle::trans::monomorphize;
use middle::trans::type_::Type;
use middle::trans::type_of::*;
use syntax::{ast, ast_map, visit};
use syntax::ast_util::PostExpansionMethod;
+// drop_glue pointer, size, align.
+static VTABLE_OFFSET: uint = 3;
+
/**
The main "translation" pass for methods. Generates code
for non-monomorphized methods only. Other methods will
GEPi(bcx, llpair,
[0u, abi::trt_field_vtable]),
Type::vtable(ccx).ptr_to().ptr_to()));
- let mptr = Load(bcx, GEPi(bcx, llvtable, [0u, n_method + 1]));
+ let mptr = Load(bcx, GEPi(bcx, llvtable, [0u, n_method + VTABLE_OFFSET]));
let mptr = PointerCast(bcx, mptr, llcallee_ty.ptr_to());
return Callee {
}
});
+ let size_ty = sizing_type_of(ccx, self_ty);
+ let size = machine::llsize_of_alloc(ccx, size_ty);
+ let ll_size = C_uint(ccx, size as uint);
+ let align = align_of(ccx, self_ty);
+ let ll_align = C_uint(ccx, align as uint);
+
// Generate a destructor for the vtable.
let drop_glue = glue::get_drop_glue(ccx, self_ty);
- let vtable = make_vtable(ccx, drop_glue, methods);
+ let vtable = make_vtable(ccx, drop_glue, ll_size, ll_align, methods);
ccx.vtables.borrow_mut().insert(hash_id, vtable);
vtable
/// Helper function to declare and initialize the vtable.
pub fn make_vtable<I: Iterator<ValueRef>>(ccx: &CrateContext,
drop_glue: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
ptrs: I)
-> ValueRef {
let _icx = push_ctxt("meth::make_vtable");
- let components: Vec<_> = Some(drop_glue).move_iter().chain(ptrs).collect();
+ let head = vec![drop_glue, size, align];
+ let components: Vec<_> = head.move_iter().chain(ptrs).collect();
unsafe {
let tbl = C_struct(ccx, components.as_slice(), false);
pub fn c_size_and_align(&mut self, t: ty::t) -> Vec<ValueRef> {
let tr = type_of(self.bcx.ccx(), t);
let s = machine::llsize_of_real(self.bcx.ccx(), tr);
- let a = machine::llalign_of_min(self.bcx.ccx(), tr);
+ let a = align_of(self.bcx.ccx(), t);
return vec!(self.c_uint(s as uint),
self.c_uint(a as uint));
}
ty::MethodTraitItem(ref method) => (*method).clone(),
};
let mth_ty = ty::mk_bare_fn(tcx, method.fty.clone());
- debug!("Emit call visit method: visit_{}: {}", ty_name, ty_to_str(tcx, mth_ty));
+ debug!("Emit call visit method: visit_{}: {}", ty_name, ty_to_string(tcx, mth_ty));
let v = self.visitor_val;
debug!("passing {} args:", args.len());
let mut bcx = self.bcx;
// Unfortunately we can't do anything here because at runtime we
// pass around the value by pointer (*u8). But unsized pointers are
// fat and so we can't just cast them to *u8 and back. So we have
- // to work with the pointer directly (see ty_rptr/ty_uniq). See
- // ty_struct for where this causes issues.
+ // to work with the pointer directly (see ty_rptr/ty_uniq).
+ fail!("Can't reflect unsized type")
+ }
+ // FIXME(15049) Reflection for unsized structs.
+ ty::ty_struct(..) if !ty::type_is_sized(bcx.tcx(), t) => {
fail!("Can't reflect unsized type")
}
// because we cannot reflect unsized types (see note above). We
// just pretend the unsized field does not exist and print nothing.
// This is sub-optimal.
- let len = if ty::type_is_sized(tcx, t) {
- fields.len()
- } else {
- assert!(fields.len() > 0);
- fields.len() - 1
- };
+ let len = fields.len();
let extra = (vec!(
self.c_slice(
)).append(self.c_size_and_align(t).as_slice());
self.bracketed("class", extra.as_slice(), |this| {
for (i, field) in fields.iter().enumerate() {
- if ty::type_is_sized(tcx, field.mt.ty) {
- let extra = (vec!(
- this.c_uint(i),
- this.c_slice(token::get_ident(field.ident)),
- this.c_bool(named_fields)
- )).append(this.c_mt(&field.mt).as_slice());
- this.visit("class_field", extra.as_slice());
- }
+ let extra = (vec!(
+ this.c_uint(i),
+ this.c_slice(token::get_ident(field.ident)),
+ this.c_bool(named_fields)
+ )).append(this.c_mt(&field.mt).as_slice());
+ this.visit("class_field", extra.as_slice());
}
})
}
use middle::trans::base::*;
use middle::trans::base;
use middle::trans::build::*;
+use middle::trans::callee;
use middle::trans::cleanup;
use middle::trans::cleanup::CleanupMethods;
use middle::trans::common::*;
// Handle the &[...] case:
let vt = vec_types_from_expr(bcx, content_expr);
let count = elements_required(bcx, content_expr);
- debug!(" vt={}, count={:?}", vt.to_str(ccx), count);
+ debug!(" vt={}, count={:?}", vt.to_string(ccx), count);
let llcount = C_uint(ccx, count);
let fixed_ty = ty::mk_vec(bcx.tcx(),
let llfixed = if count == 0 {
// Just create a zero-sized alloca to preserve
// the non-null invariant of the inner slice ptr
- base::arrayalloca(bcx, vt.llunit_ty, llcount)
+ let llfixed = base::arrayalloca(bcx, vt.llunit_ty, llcount);
+ BitCast(bcx, llfixed, llfixed_ty)
} else {
// Make a fixed-length backing array and allocate it on the stack.
let llfixed = base::arrayalloca(bcx, vt.llunit_ty, llcount);
content_expr: &ast::Expr)
-> DatumBlock<'a, Expr> {
/*!
- * ~[...] and "...".to_string() allocate boxes in the exchange heap and write
+ * Box<[...]> and "...".to_string() allocate boxes in the exchange heap and write
* the array elements into them.
*/
- debug!("trans_uniq_vec(vstore_expr={})", bcx.expr_to_string(uniq_expr));
+ debug!("trans_uniq_vec(uniq_expr={})", bcx.expr_to_string(uniq_expr));
let fcx = bcx.fcx;
let ccx = fcx.ccx;
+ // Handle "".to_string().
+ match content_expr.node {
+ ast::ExprLit(lit) => {
+ match lit.node {
+ ast::LitStr(ref s, _) => {
+ let llptrval = C_cstr(ccx, (*s).clone(), false);
+ let llptrval = PointerCast(bcx, llptrval, Type::i8p(ccx));
+ let llsizeval = C_uint(ccx, s.get().len());
+ let typ = ty::mk_uniq(bcx.tcx(), ty::mk_str(bcx.tcx()));
+ let lldestval = rvalue_scratch_datum(bcx,
+ typ,
+ "");
+ let alloc_fn = langcall(bcx,
+ Some(lit.span),
+ "",
+ StrDupUniqFnLangItem);
+ let bcx = callee::trans_lang_call(
+ bcx,
+ alloc_fn,
+ [ llptrval, llsizeval ],
+ Some(expr::SaveIn(lldestval.val))).bcx;
+ return DatumBlock::new(bcx, lldestval).to_expr_datumblock();
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+
let vt = vec_types_from_expr(bcx, content_expr);
let count = elements_required(bcx, content_expr);
- debug!(" vt={}, count={:?}", vt.to_str(ccx), count);
+ debug!(" vt={}, count={:?}", vt.to_string(ccx), count);
let vec_ty = node_id_type(bcx, uniq_expr.id);
let unit_sz = nonzero_llsize_of(ccx, type_of::type_of(ccx, vt.unit_ty));
- let fill = Mul(bcx, C_uint(ccx, count), unit_sz);
- let alloc = if count < 4u {
- Mul(bcx, C_int(ccx, 4), unit_sz)
+ let llcount = if count < 4u {
+ C_int(ccx, 4)
} else {
- fill
+ C_uint(ccx, count)
};
+ let alloc = Mul(bcx, llcount, unit_sz);
let llty_ptr = type_of::type_of(ccx, vt.unit_ty).ptr_to();
let align = C_uint(ccx, 8);
let Result { bcx: bcx, val: dataptr } = malloc_raw_dyn(bcx,
dataptr, cleanup::HeapExchange, vt.unit_ty);
debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
- bcx.val_to_str(dataptr), count);
+ bcx.val_to_string(dataptr), count);
let bcx = write_content(bcx, &vt, uniq_expr,
content_expr, SaveIn(dataptr));
use middle::trans::adt;
use middle::trans::common::*;
use middle::trans::foreign;
+use middle::trans::machine;
use middle::ty;
use util::ppaux;
use util::ppaux::Repr;
let llsizingty = match ty::get(t).sty {
_ if !ty::lltype_is_sized(cx.tcx(), t) => {
cx.sess().bug(format!("trying to take the sizing type of {}, an unsized type",
- ppaux::ty_to_str(cx.tcx(), t)).as_slice())
+ ppaux::ty_to_string(cx.tcx(), t)).as_slice())
}
ty::ty_nil | ty::ty_bot => Type::nil(cx),
ty::ty_tup(..) | ty::ty_enum(..) | ty::ty_unboxed_closure(..) => {
let repr = adt::represent_type(cx, t);
- adt::sizing_type_of(cx, &*repr)
+ adt::sizing_type_of(cx, &*repr, false)
}
ty::ty_struct(..) => {
Type::vector(&type_of(cx, et), n as u64)
} else {
let repr = adt::represent_type(cx, t);
- adt::sizing_type_of(cx, &*repr)
+ adt::sizing_type_of(cx, &*repr, false)
}
}
ty::ty_infer(..) | ty::ty_param(..) | ty::ty_err(..) => {
cx.sess().bug(format!("fictitious type {} in sizing_type_of()",
- ppaux::ty_to_str(cx.tcx(), t)).as_slice())
+ ppaux::ty_to_string(cx.tcx(), t)).as_slice())
}
ty::ty_vec(_, None) | ty::ty_trait(..) | ty::ty_str => fail!("unreachable")
};
// NB: If you update this, be sure to update `sizing_type_of()` as well.
pub fn type_of(cx: &CrateContext, t: ty::t) -> Type {
fn type_of_unsize_info(cx: &CrateContext, t: ty::t) -> Type {
+ // It is possible to end up here with a sized type. This happens with a
+ // struct which might be unsized, but is monomorphised to a sized type.
+ // In this case we'll fake a fat pointer with no unsize info (we use 0).
+ // However, its still a fat pointer, so we need some type use.
+ if ty::type_is_sized(cx.tcx(), t) {
+ return Type::i8p(cx);
+ }
+
match ty::get(ty::unsized_part_of_type(cx.tcx(), t)).sty {
ty::ty_str | ty::ty_vec(..) => Type::uint_from_ty(cx, ast::TyU),
ty::ty_trait(_) => Type::vtable_ptr(cx),
}
ty::ty_trait(..) => Type::opaque_trait(cx),
_ => cx.sess().bug(format!("ty_open with sized type: {}",
- ppaux::ty_to_str(cx.tcx(), t)).as_slice())
+ ppaux::ty_to_string(cx.tcx(), t)).as_slice())
},
ty::ty_infer(..) => cx.sess().bug("type_of with ty_infer"),
return llty;
}
+pub fn align_of(cx: &CrateContext, t: ty::t) -> u64 {
+ let llty = sizing_type_of(cx, t);
+ machine::llalign_of_min(cx, llty)
+}
+
// Want refinements! (Or case classes, I guess
pub enum named_ty {
a_struct,
AutoUnsafe(ast::Mutability),
}
-// Ugly little helper function. The bool in the returned tuple is true if there
-// is an 'unsize to trait object' adjustment at the bottom of the adjustment. If
-// that is surrounded by an AutoPtr, then we also return the region of the
-// AutoPtr (in the third argument). The second bool is true if the adjustment is
-// unique.
+// Ugly little helper function. The first bool in the returned tuple is true if
+// there is an 'unsize to trait object' adjustment at the bottom of the
+// adjustment. If that is surrounded by an AutoPtr, then we also return the
+// region of the AutoPtr (in the third argument). The second bool is true if the
+// adjustment is unique.
fn autoref_object_region(autoref: &AutoRef) -> (bool, bool, Option<Region>) {
- fn unsize_kind_region(k: &UnsizeKind) -> (bool, bool, Option<Region>) {
+ fn unsize_kind_is_object(k: &UnsizeKind) -> bool {
match k {
- &UnsizeVtable(..) => (true, false, None),
- &UnsizeStruct(box ref k, _) => unsize_kind_region(k),
- _ => (false, false, None)
+ &UnsizeVtable(..) => true,
+ &UnsizeStruct(box ref k, _) => unsize_kind_is_object(k),
+ _ => false
}
}
match autoref {
- &AutoUnsize(ref k) => unsize_kind_region(k),
- &AutoUnsizeUniq(ref k) => match k {
- &UnsizeVtable(..) => (true, true, None),
- _ => (false, false, None)
- },
+ &AutoUnsize(ref k) => (unsize_kind_is_object(k), false, None),
+ &AutoUnsizeUniq(ref k) => (unsize_kind_is_object(k), true, None),
&AutoPtr(adj_r, _, Some(box ref autoref)) => {
let (b, u, r) = autoref_object_region(autoref);
if r.is_some() || u {
pub from: t,
/// The type being transmuted to.
pub to: t,
+ /// NodeIf of the transmute intrinsic.
+ pub id: ast::NodeId,
}
/// The data structure to keep track of all the information that typechecker
}
match get(ty).sty {
ty_nil | ty_bot | ty_bool | ty_char | ty_int(_) | ty_uint(_) | ty_float(_) |
- ty_str | ty_infer(_) | ty_param(_) | ty_unboxed_closure(_) | ty_err => {}
+ ty_str | ty_infer(_) | ty_param(_) | ty_unboxed_closure(_, _) | ty_err => {}
ty_box(ty) | ty_uniq(ty) | ty_vec(ty, _) | ty_open(ty) => maybe_walk_ty(ty, f),
ty_ptr(ref tm) | ty_rptr(_, ref tm) => {
maybe_walk_ty(tm.ty, f);
ty_str | ty_trait(..) | ty_vec(..) => ty,
ty_struct(_, ref substs) => {
// Exactly one of the type parameters must be unsized.
- for tp in substs.types.get_vec(subst::TypeSpace).iter() {
+ for tp in substs.types.get_slice(subst::TypeSpace).iter() {
if !type_is_sized(cx, *tp) {
return unsized_part_of_type(cx, *tp);
}
}
- fail!("Unsized struct type with no unsized type params?");
+ fail!("Unsized struct type with no unsized type params? {}", ty_to_string(cx, ty));
}
_ => {
assert!(type_is_sized(cx, ty),
}
}
+pub fn deref_or_dont(t: t) -> t {
+ match get(t).sty {
+ ty_box(ty) | ty_uniq(ty) => {
+ ty
+ },
+ ty_rptr(_, mt) | ty_ptr(mt) => mt.ty,
+ _ => t
+ }
+}
+
pub fn close_type(cx: &ctxt, t: t) -> t {
match get(t).sty {
ty_open(t) => mk_rptr(cx, ReStatic, mt {ty: t, mutbl:ast::MutImmutable}),
_ => cx.sess.bug(format!("Trying to close a non-open type {}",
- ty_to_str(cx, t)).as_slice())
+ ty_to_string(cx, t)).as_slice())
}
}
// This is exactly the same as the above, except it supports strings,
// which can't actually be indexed.
pub fn array_element_ty(t: t) -> Option<t> {
- match get(ty).sty {
+ match get(t).sty {
ty_vec(t, _) => Some(t),
- ty_str => Some(ty: mk_u8()),
+ ty_str => Some(mk_u8()),
_ => None
}
}
}
_ => cx.sess.span_bug(span,
format!("UnsizeLength with bad sty: {}",
- ty_to_str(cx, ty)).as_slice())
+ ty_to_string(cx, ty)).as_slice())
},
&UnsizeStruct(box ref k, tp_index) => match get(ty).sty {
ty_struct(did, ref substs) => {
- let ty_substs = substs.types.get_vec(subst::TypeSpace);
- let old_ty = ty_substs.get(tp_index);
- let new_ty = unsize_ty(cx, *old_ty, k, span);
+ let ty_substs = substs.types.get_slice(subst::TypeSpace);
+ let new_ty = unsize_ty(cx, ty_substs[tp_index], k, span);
let mut unsized_substs = substs.clone();
- *unsized_substs.types.get_mut_vec(subst::TypeSpace).get_mut(tp_index) = new_ty;
+ unsized_substs.types.get_mut_slice(subst::TypeSpace)[tp_index] = new_ty;
mk_struct(cx, did, unsized_substs)
}
_ => cx.sess.span_bug(span,
format!("UnsizeStruct with bad sty: {}",
- ty_to_str(cx, ty)).as_slice())
+ ty_to_string(cx, ty)).as_slice())
},
&UnsizeVtable(bounds, def_id, ref substs) => {
mk_trait(cx, def_id, substs.clone(), bounds)
ty_tup(_) |
ty_param(_) |
ty_infer(_) |
+ ty_open(_) |
ty_err => {}
}
})
}
}
+impl TypeFoldable for ty::UnsizeKind {
+ fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::UnsizeKind {
+ match *self {
+ ty::UnsizeLength(len) => ty::UnsizeLength(len),
+ ty::UnsizeStruct(box ref k, n) => ty::UnsizeStruct(box k.fold_with(folder), n),
+ ty::UnsizeVtable(bounds, def_id, ref substs) => {
+ ty::UnsizeVtable(bounds.fold_with(folder), def_id, substs.fold_with(folder))
+ }
+ }
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// "super" routines: these are the default implementations for TypeFolder.
//
match *autoref {
ty::AutoPtr(r, m, None) => ty::AutoPtr(this.fold_region(r), m, None),
ty::AutoPtr(r, m, Some(ref a)) => {
- ty::AutoPtr(this.fold_region(r), m, Some(box super_fold_autoref(this, a.clone())))
+ ty::AutoPtr(this.fold_region(r), m, Some(box super_fold_autoref(this, &**a)))
}
ty::AutoUnsafe(m) => ty::AutoUnsafe(m),
- ty::AutoUnsize(ref k) => ty::AutoUnsize(k.clone()),
- ty::AutoUnsizeUniq(ref k) => ty::AutoUnsizeUniq(k.clone()),
+ ty::AutoUnsize(ref k) => ty::AutoUnsize(k.fold_with(this)),
+ ty::AutoUnsizeUniq(ref k) => ty::AutoUnsizeUniq(k.fold_with(this)),
}
}
// [T, ..len] -> [T] or &[T] or &&[T]
fn auto_unsize_vec(&self, ty: ty::t, autoderefs: uint, len: uint) -> Option<MethodCallee> {
let tcx = self.tcx();
- debug!("auto_unsize_vec {}", ppaux::ty_to_str(tcx, ty));
+ debug!("auto_unsize_vec {}", ppaux::ty_to_string(tcx, ty));
// First try to borrow to an unsized vec.
let entry = self.search_for_some_kind_of_autorefd_method(
match self.fcx.mk_subty(false, infer::Misc(span),
rcvr_ty, transformed_self_ty) {
Ok(_) => {}
- Err(e) => {
+ Err(_) => {
self.bug(format!(
"{} was a subtype of {} but now is not?",
self.ty_to_string(rcvr_ty),
if ty::type_is_scalar(t_1) {
// Supply the type as a hint so as to influence integer
// literals and other things that might care.
- check_expr_with_hint(fcx, e, t_1)
+ check_expr_with_expectation(fcx, e, ExpectCastableToType(t_1))
} else {
check_expr(fcx, e)
}
}
check_expr_coercable_to_type(fcx, &**arg, formal_ty);
-
}
}
}
// 'else' branch.
let expected = match expected.only_has_type() {
ExpectHasType(ety) => {
- match infer::resolve_type(fcx.infcx(), ety, force_tvar) {
+ match infer::resolve_type(fcx.infcx(), Some(sp), ety, force_tvar) {
Ok(rty) if !ty::type_is_ty_var(rty) => ExpectHasType(rty),
_ => NoExpectation
}
}
- None => None
+ _ => NoExpectation
};
check_block_with_expected(fcx, then_blk, expected);
let then_ty = fcx.node_ty(then_blk.id);
type ExprCheckerWithTy = fn(&FnCtxt, &ast::Expr, ty::t);
- fn check_fn_for_vec_elements_expected(fcx: &FnCtxt,
- expected: Expectation)
- -> (ExprCheckerWithTy, ty::t) {
- let tcx = fcx.ccx.tcx;
- let (coerce, t) = match expected {
- // If we're given an expected type, we can try to coerce to it
- ExpectHasType(t) if ty::type_is_vec(t) => (true, ty::sequence_element_type(tcx, t)),
- // Otherwise we just leave the type to be resolved later
- _ => (false, fcx.infcx().next_ty_var())
- };
- if coerce {
- (check_expr_coercable_to_type, t)
- } else {
- (check_expr_has_type, t)
- }
- }
-
let tcx = fcx.ccx.tcx;
let id = expr.id;
match expr.node {
}
}
ast::ExprUnary(unop, ref oprnd) => {
- let expected = expected.only_has_type();
let expected_inner = expected.map(fcx, |sty| {
match unop {
ast::UnBox | ast::UnUniq => match *sty {
}
},
Some(ref e) => {
- //check_expr_has_type(fcx, e, ret_ty);
check_expr_coercable_to_type(fcx, &**e, ret_ty);
}
}
check_cast(fcx, &**e, &**t, id, expr.span);
}
ast::ExprVec(ref args) => {
- let uty = unpack_expected(
- fcx, expected,
- |sty| match *sty {
- ty::ty_vec(ty, _) => Some(ty),
- _ => None
- });
+ let uty = match expected {
+ ExpectHasType(uty) => {
+ match ty::get(uty).sty {
+ ty::ty_vec(ty, _) => Some(ty),
+ _ => None
+ }
+ }
+ _ => None
+ };
let typ = match uty {
Some(uty) => {
check_expr_has_type(fcx, &**count_expr, ty::mk_uint());
let count = ty::eval_repeat_count(fcx, &**count_expr);
- let uty = unpack_expected(
- fcx, expected,
- |sty| match *sty {
- ty::ty_vec(ty, _) => Some(ty),
- _ => None
- });
+ let uty = match expected {
+ ExpectHasType(uty) => {
+ match ty::get(uty).sty {
+ ty::ty_vec(ty, _) => Some(ty),
+ _ => None
+ }
+ }
+ _ => None
+ };
let (element_ty, t) = match uty {
Some(uty) => {
let mut err_field = false;
let elt_ts = elts.iter().enumerate().map(|(i, e)| {
- let opt_hint = match flds {
- Some(ref fs) if i < fs.len() => ExpectHasType(*fs.get(i)),
- _ => NoExpectation
- };
- let t = match opt_hint {
- ExpectHasType(ety) => {
+ let t = match flds {
+ Some(ref fs) if i < fs.len() => {
+ let ety = *fs.get(i);
check_expr_coercable_to_type(fcx, &**e, ety);
ety
}
_ => {
- check_expr_with_expectation(fcx, &**e, opt_hint);
+ check_expr_with_expectation(fcx, &**e, NoExpectation);
fcx.expr_ty(&**e)
}
};
}
match blk.expr {
None => if any_err {
- fcx.write_error(blk.id);
- }
- else if any_bot {
- fcx.write_bot(blk.id);
- }
- else {
- fcx.write_nil(blk.id);
- },
- Some(e) => {
- if any_bot && !warned {
- fcx.ccx
- .tcx
- .sess
- .add_lint(lint::builtin::UNREACHABLE_CODE,
- e.id,
- e.span,
- "unreachable expression".to_string());
- }
- let ety = match expected {
- ExpectHasType(ety) => {
- check_expr_coercable_to_type(fcx, &*e, ety);
- ety
+ fcx.write_error(blk.id);
}
- _ => {
- check_expr_with_expectation(fcx, &*e, expected);
- fcx.expr_ty(e)
+ else if any_bot {
+ fcx.write_bot(blk.id);
}
- };
+ else {
+ fcx.write_nil(blk.id);
+ },
+ Some(e) => {
+ if any_bot && !warned {
+ fcx.ccx
+ .tcx
+ .sess
+ .add_lint(lint::builtin::UNREACHABLE_CODE,
+ e.id,
+ e.span,
+ "unreachable expression".to_string());
+ }
+ let ety = match expected {
+ ExpectHasType(ety) => {
+ check_expr_coercable_to_type(fcx, &*e, ety);
+ ety
+ }
+ _ => {
+ check_expr_with_expectation(fcx, &*e, expected);
+ fcx.expr_ty(&*e)
+ }
+ };
- fcx.write_ty(blk.id, ety);
- if any_err {
- fcx.write_error(blk.id);
- } else if any_bot {
- fcx.write_bot(blk.id);
+ fcx.write_ty(blk.id, ety);
+ if any_err {
+ fcx.write_error(blk.id);
+ } else if any_bot {
+ fcx.write_bot(blk.id);
+ }
}
- }
};
});
}
}
AutoDerefRef(ref adj) => {
- assert!(!ty::adjust_is_object(adjustment));
for autoderef in range(0, adj.autoderefs) {
let method_call = MethodCall::autoderef(ex.id, autoderef);
match fcx.inh.method_map.borrow().find(&method_call) {
}
}
}
- _ => {
- assert!(!ty::adjust_is_object(adjustment));
- }
+ _ => {}
}
}
None => {}
}
&ty::UnsizeStruct(box ref k, tp_index) => match ty::get(src_ty).sty {
ty::ty_struct(_, ref substs) => {
- let ty_substs = substs.types.get_vec(subst::TypeSpace);
- let field_ty = *ty_substs.get(tp_index);
- let field_ty = structurally_resolved_type(fcx, sp, field_ty);
+ let ty_substs = substs.types.get_slice(subst::TypeSpace);
+ let field_ty = structurally_resolved_type(fcx, sp, ty_substs[tp_index]);
trait_cast_types_unsize(fcx, k, field_ty, sp)
}
_ => fail!("Failed to find a ty_struct to correspond with \
UnsizeStruct whilst walking adjustment. Found {}",
- ppaux::ty_to_str(fcx.tcx(), src_ty))
+ ppaux::ty_to_string(fcx.tcx(), src_ty))
},
_ => None
}
}
match autoref {
- &ty::AutoUnsize(ref k) => trait_cast_types_unsize(fcx, k, src_ty, sp),
- &ty::AutoUnsizeUniq(ref k) => match k {
- &ty::UnsizeVtable(bounds, def_id, ref substs) => {
- Some((src_ty, ty::mk_trait(fcx.tcx(), def_id, substs.clone(), bounds)))
- }
- _ => None
- },
+ &ty::AutoUnsize(ref k) |
+ &ty::AutoUnsizeUniq(ref k) => trait_cast_types_unsize(fcx, k, src_ty, sp),
&ty::AutoPtr(_, _, Some(box ref autoref)) => {
trait_cast_types_autoref(fcx, autoref, src_ty, sp)
}
}
}
- pub fn unpack_actual_value(&self, a: ty::t, f: |&ty::sty| -> CoerceResult)
- -> CoerceResult {
+ pub fn unpack_actual_value<T>(&self, a: ty::t, f: |&ty::sty| -> T)
+ -> T {
match resolve_type(self.get_ref().infcx, None,
a, try_resolve_tvar_shallow) {
Ok(t) => {
let sty_b = &ty::get(b).sty;
match (sty_a, sty_b) {
- (&ty::ty_uniq(t_a), &ty::ty_rptr(_, mt_b)) => Err(ty::terr_mismatch),
+ (&ty::ty_uniq(_), &ty::ty_rptr(..)) => Err(ty::terr_mismatch),
(&ty::ty_rptr(_, ty::mt{ty: t_a, ..}), &ty::ty_rptr(_, mt_b)) => {
self.unpack_actual_value(t_a, |sty_a| {
match self.unsize_ty(sty_a, mt_b.ty) {
if did_a == did_b => {
debug!("unsizing a struct");
// Try unsizing each type param in turn to see if we end up with ty_b.
- let ty_substs_a = substs_a.types.get_vec(subst::TypeSpace);
- let ty_substs_b = substs_b.types.get_vec(subst::TypeSpace);
+ let ty_substs_a = substs_a.types.get_slice(subst::TypeSpace);
+ let ty_substs_b = substs_b.types.get_slice(subst::TypeSpace);
assert!(ty_substs_a.len() == ty_substs_b.len());
let sub = Sub(self.get_ref().clone());
Some((new_tp, k)) => {
// Check that the whole types match.
let mut new_substs = substs_a.clone();
- *new_substs.types.get_mut_vec(subst::TypeSpace).get_mut(i) = new_tp;
+ new_substs.types.get_mut_slice(subst::TypeSpace)[i] = new_tp;
let ty = ty::mk_struct(tcx, did_a, new_substs);
if self.get_ref().infcx.try(|| sub.tys(ty, ty_b)).is_err() {
debug!("Unsized type parameter '{}', but still \
let r_a = self.get_ref().infcx.next_region_var(coercion);
let a_borrowed = match *sty_a {
- ty::ty_uniq(ty) => return Err(ty::terr_mismatch),
- ty::ty_rptr(_, ty::mt{ty, ..}) => match ty::get(ty).sty {
+ ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) => match ty::get(ty).sty {
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
buf.push_str(mt_to_string(cx, tm).as_slice());
buf
}
- ty_open(typ) => format!("opened<{}>", ty_to_str(cx, typ)),
+ ty_open(typ) => format!("opened<{}>", ty_to_string(cx, typ)),
ty_tup(ref elems) => {
let strs: Vec<String> = elems.iter().map(|elem| ty_to_string(cx, *elem)).collect();
format!("({})", strs.connect(","))
use std::rc::Rc;
use std::gc::{Gc, GC};
use std::cell::{Cell, RefCell};
-use std::strbuf::StrBuf;
pub trait Encoder<E> {
// Primitive types:
use ast::{ViewPath, ViewPathGlob, ViewPathList, ViewPathSimple};
use ast::{Visibility, WhereClause, WherePredicate};
use ast;
-use ast_util::{as_prec, ident_to_path, lit_is_str, operator_prec};
+use ast_util::{as_prec, ident_to_path, operator_prec};
use ast_util;
use attr;
use codemap::{Span, BytePos, Spanned, spanned, mk_sp};
ex = self.mk_unary(UnUniq, e);
}
token::IDENT(_, _) => {
- if self.is_keyword(keywords::Box) {
- self.bump();
+ if !self.is_keyword(keywords::Box) {
+ return self.parse_dot_or_call_expr();
+ }
- // Check for a place: `box(PLACE) EXPR`.
- if self.eat(&token::LPAREN) {
- // Support `box() EXPR` as the default.
- if !self.eat(&token::RPAREN) {
- let place = self.parse_expr();
- self.expect(&token::RPAREN);
- let subexpression = self.parse_prefix_expr();
- hi = subexpression.span.hi;
- ex = ExprBox(place, subexpression);
- return self.mk_expr(lo, hi, ex);
- }
+ self.bump();
+
+ // Check for a place: `box(PLACE) EXPR`.
+ if self.eat(&token::LPAREN) {
+ // Support `box() EXPR` as the default.
+ if !self.eat(&token::RPAREN) {
+ let place = self.parse_expr();
+ self.expect(&token::RPAREN);
+ let subexpression = self.parse_prefix_expr();
+ hi = subexpression.span.hi;
+ ex = ExprBox(place, subexpression);
+ return self.mk_expr(lo, hi, ex);
}
+ }
// Otherwise, we use the unique pointer default.
let subexpression = self.parse_prefix_expr();
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
pub tm_nsec: i32,
}
-impl Tm {
- pub fn tm_zone<'a>(&'a self) -> &'a str {
- self.tm_zone.as_slice()
- }
-}
-
pub fn empty_tm() -> Tm {
- // 64 is the max size of the timezone buffer allocated on windows
- // in rust_localtime. In glibc the max timezone size is supposedly 3.
- let mut zone = StrBuf::new();
- for _ in range(0, 64) {
- zone.push_char(' ')
- }
Tm {
tm_sec: 0_i32,
tm_min: 0_i32,
tm_yday: 0_i32,
tm_isdst: 0_i32,
tm_gmtoff: 0_i32,
- tm_zone: zone,
tm_nsec: 0_i32,
}
}
'Z' => {
if match_str(s, pos, "UTC") || match_str(s, pos, "GMT") {
tm.tm_gmtoff = 0_i32;
- tm.tm_zone = "UTC".into_strbuf();
Ok(pos + 3u)
} else {
// It's odd, but to maintain compatibility with c's
let (v, pos) = item;
if v == 0_i32 {
tm.tm_gmtoff = 0_i32;
- tm.tm_zone = "UTC".into_strbuf();
}
Ok(pos)
tm_yday: 0_i32,
tm_isdst: 0_i32,
tm_gmtoff: 0_i32,
- tm_zone: StrBuf::new(),
tm_nsec: 0_i32,
};
let mut pos = 0u;
tm_yday: tm.tm_yday,
tm_isdst: tm.tm_isdst,
tm_gmtoff: tm.tm_gmtoff,
- tm_zone: tm.tm_zone.clone(),
tm_nsec: tm.tm_nsec,
})
} else { result }
'w' => (tm.tm_wday as int).to_string(),
'Y' => (tm.tm_year as int + 1900).to_string(),
'y' => format!("{:02d}", (tm.tm_year as int + 1900) % 100),
- 'Z' => tm.tm_zone.as_slice().to_owned(),
'z' => {
let sign = if tm.tm_gmtoff > 0_i32 { '+' } else { '-' };
let mut m = num::abs(tm.tm_gmtoff) / 60_i32;
assert_eq!(utc.tm_yday, 43_i32);
assert_eq!(utc.tm_isdst, 0_i32);
assert_eq!(utc.tm_gmtoff, 0_i32);
- assert_eq!(utc.tm_zone(), "UTC");
assert_eq!(utc.tm_nsec, 54321_i32);
}
assert_eq!(local.tm_yday, 43_i32);
assert_eq!(local.tm_isdst, 0_i32);
assert_eq!(local.tm_gmtoff, -28800_i32);
-
- // FIXME (#2350): We should probably standardize on the timezone
- // abbreviation.
- let zone = local.tm_zone();
- assert!(zone == "PST" || zone == "Pacific Standard Time");
-
assert_eq!(local.tm_nsec, 54321_i32);
}
assert!(tm.tm_wday == 0_i32);
assert!(tm.tm_isdst == 0_i32);
assert!(tm.tm_gmtoff == 0_i32);
- assert!(tm.tm_zone() == "");
assert!(tm.tm_nsec == 0_i32);
}
Err(_) => ()
assert!(tm.tm_yday == 0_i32);
assert!(tm.tm_isdst == 0_i32);
assert!(tm.tm_gmtoff == 0_i32);
- assert!(tm.tm_zone() == "");
assert!(tm.tm_nsec == 12340000_i32);
}
}
assert!(test("6", "%w"));
assert!(test("2009", "%Y"));
assert!(test("09", "%y"));
- assert!(strptime("UTC", "%Z").unwrap().tm_zone() == "UTC");
- assert!(strptime("PST", "%Z").unwrap().tm_zone() == "");
- assert!(strptime("-0000", "%z").unwrap().tm_gmtoff == 0);
- assert!(strptime("-0800", "%z").unwrap().tm_gmtoff == 0);
+ assert!(strptime("-0000", "%z").unwrap().tm_gmtoff ==
+ 0);
+ assert!(strptime("-0800", "%z").unwrap().tm_gmtoff ==
+ 0);
assert!(test("%", "%%"));
// Test for #7256
///
/// ```rust
/// let gr1 = "a\u0310e\u0301o\u0308\u0332".graphemes(true).collect::<Vec<&str>>();
- /// assert_eq!(gr1.as_slice(), &["a\u0310", "e\u0301", "o\u0308\u0332"]);
+ /// let b: &[_] = &["a\u0310", "e\u0301", "o\u0308\u0332"];
+ /// assert_eq!(gr1.as_slice(), b);
/// let gr2 = "a\r\nb🇷🇺🇸🇹".graphemes(true).collect::<Vec<&str>>();
- /// assert_eq!(gr2.as_slice(), &["a", "\r\n", "b", "🇷🇺🇸🇹"]);
+ /// let b: &[_] = &["a", "\r\n", "b", "🇷🇺🇸🇹"];
+ /// assert_eq!(gr2.as_slice(), b);
/// ```
fn graphemes(&self, is_extended: bool) -> Graphemes<'a>;
///
/// ```rust
/// let gr_inds = "a̐éö̲\r\n".grapheme_indices(true).collect::<Vec<(uint, &str)>>();
- /// assert_eq!(gr_inds.as_slice(), &[(0u, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")]);
+ /// let b: &[_] = &[(0u, "a̐"), (3, "é"), (6, "ö̲"), (11, "\r\n")];
+ /// assert_eq!(gr_inds.as_slice(), b);
/// ```
fn grapheme_indices(&self, is_extended: bool) -> GraphemeIndices<'a>;
t("\0", "%00");
t("\n", "%0A");
- t(&[0u8, 10, 37], "%00%0A%25");
+ let a: &[_] = &[0u8, 10, 37];
+ t(a, "%00%0A%25");
}
#[test]
t("\0", "%00");
t("\n", "%0A");
- t(&[0u8, 10, 37], "%00%0A%25");
+ let a: &[_] = &[0u8, 10, 37];
+ t(a, "%00%0A%25");
}
#[test]
}
#endif
-typedef struct
-{
- size_t fill; // in bytes; if zero, heapified
- size_t alloc; // in bytes
- uint8_t *data;
-} rust_vec;
-
-typedef rust_vec rust_str_buf;
-
typedef struct {
int32_t tm_sec;
int32_t tm_min;
int32_t tm_yday;
int32_t tm_isdst;
int32_t tm_gmtoff;
- rust_str_buf tm_zone;
int32_t tm_nsec;
} rust_tm;
out_tm->tm_isdst = in_tm->tm_isdst;
}
-void tm_to_rust_tm(struct tm* in_tm, rust_tm* out_tm, int32_t gmtoff,
- const char *zone, int32_t nsec) {
+void tm_to_rust_tm(struct tm* in_tm,
+ rust_tm* out_tm,
+ int32_t gmtoff,
+ int32_t nsec) {
out_tm->tm_sec = in_tm->tm_sec;
out_tm->tm_min = in_tm->tm_min;
out_tm->tm_hour = in_tm->tm_hour;
out_tm->tm_isdst = in_tm->tm_isdst;
out_tm->tm_gmtoff = gmtoff;
out_tm->tm_nsec = nsec;
-
- if (zone != NULL) {
- size_t size = strlen(zone);
- assert(out_tm->tm_zone.alloc >= size);
- memcpy(out_tm->tm_zone.data, zone, size);
- out_tm->tm_zone.fill = size;
- }
}
#if defined(__WIN32__)
time_t s = sec;
GMTIME(&s, &tm);
- tm_to_rust_tm(&tm, timeptr, 0, "UTC", nsec);
+ tm_to_rust_tm(&tm, timeptr, 0, nsec);
}
void
time_t s = sec;
LOCALTIME(&s, &tm);
- const char* zone = NULL;
#if defined(__WIN32__)
int32_t gmtoff = -timezone;
- wchar_t wbuffer[64] = {0};
- char buffer[256] = {0};
- // strftime("%Z") can contain non-UTF-8 characters on non-English locale (issue #9418),
- // so time zone should be converted from UTF-16 string.
- // Since wcsftime depends on setlocale() result,
- // instead we convert it using MultiByteToWideChar.
- if (strftime(buffer, sizeof(buffer) / sizeof(char), "%Z", &tm) > 0) {
- // ANSI -> UTF-16
- MultiByteToWideChar(CP_ACP, 0, buffer, -1, wbuffer, sizeof(wbuffer) / sizeof(wchar_t));
- // UTF-16 -> UTF-8
- WideCharToMultiByte(CP_UTF8, 0, wbuffer, -1, buffer, sizeof(buffer), NULL, NULL);
- zone = buffer;
- }
#else
int32_t gmtoff = tm.tm_gmtoff;
- zone = tm.tm_zone;
#endif
- tm_to_rust_tm(&tm, timeptr, gmtoff, zone, nsec);
+ tm_to_rust_tm(&tm, timeptr, gmtoff, nsec);
}
int64_t
// except according to those terms.
static a: &'static str = "foo";
-static b: *const u8 = a as *const u8; //~ ERROR mismatched types: expected `*u8` but found `&'static str`
-static c: *const u8 = &a as *const u8; //~ ERROR mismatched types: expected `*u8` but found `&&'static str`
+static b: *const u8 = a as *const u8;
+//~^ ERROR mismatched types: expected `*const u8`, found `&'static str`
+static c: *const u8 = &a as *const u8;
+//~^ ERROR mismatched types: expected `*const u8`, found `&&'static str`
fn main() {
}
// Forbid assignment into a dynamically sized type.
-struct Fat<type T> {
+struct Fat<Sized? T> {
f1: int,
f2: &'static str,
ptr: T
// Forbid assignment into a dynamically sized type.
-struct Fat<type T> {
+struct Fat<Sized? T> {
f1: int,
f2: &'static str,
ptr: T
// Assignment.
let f5: &mut Fat<ToBar> = &mut Fat { f1: 5, f2: "some str", ptr: Bar1 {f :42} };
let z: Box<ToBar> = box Bar1 {f: 36};
- f5.ptr = Bar1 {f: 36}; //~ ERROR mismatched types: expected `ToBar` but found `Bar1`
+ f5.ptr = Bar1 {f: 36}; //~ ERROR mismatched types: expected `ToBar`, found `Bar1`
}
// Attempt to change the type as well as unsizing.
-struct Fat<type T> {
+struct Fat<Sized? T> {
ptr: T
}
let f1 = Fat { ptr: [1, 2, 3] };
let f2: &Fat<[int, ..3]> = &f1;
let f3: &Fat<[uint]> = f2;
- //~^ ERROR mismatched types: expected `&Fat<[uint]>` but found `&Fat<[int, .. 3]>`
+ //~^ ERROR mismatched types: expected `&Fat<[uint]>`, found `&Fat<[int, .. 3]>`
// With a trait.
let f1 = Fat { ptr: Foo };
// Attempt to change the mutability as well as unsizing.
-struct Fat<type T> {
+struct Fat<Sized? T> {
ptr: T
}
// Attempt to extend the lifetime as well as unsizing.
-struct Fat<type T> {
+struct Fat<Sized? T> {
ptr: T
}
// Attempt to coerce from unsized to sized.
-struct Fat<type T> {
+struct Fat<Sized? T> {
ptr: T
}
// With a vec of ints.
let f1: &Fat<[int]> = &Fat { ptr: [1, 2, 3] };
let f2: &Fat<[int, ..3]> = f1;
- //~^ ERROR mismatched types: expected `&Fat<[int, .. 3]>` but found `&Fat<[int]>`
+ //~^ ERROR mismatched types: expected `&Fat<[int, .. 3]>`, found `&Fat<[int]>`
}
// because it would require stack allocation of an unsized temporary (*g in the
// test).
-struct Fat<type T> {
+struct Fat<Sized? T> {
ptr: T
}
// aux-build:issue_3907.rs
extern crate issue_3907;
-type Foo = issue_3907::Foo; //~ ERROR: reference to trait
+type Foo = issue_3907::Foo;
struct S {
name: int
}
+fn bar(_x: Foo) {} //~ ERROR variable `_x` has dynamically sized type `issue_3907::Foo`
+
fn main() {}
fn get_tw_map(tw: &TraitWrapper) -> &MyTrait {
match *tw {
- A(box ref map) => map, //~ ERROR mismatched types: expected `Box<MyTrait>` but found a box
+ A(box ref map) => map, //~ ERROR type `Box<MyTrait>` cannot be dereferenced
}
}
// except according to those terms.
trait I {}
-type K = I; //~ ERROR: reference to trait
+type K = I;
+
+fn foo(_x: K) {} //~ ERROR: variable `_x` has dynamically sized type `I`
fn main() {}
fn new_struct(r: A) -> Struct {
//~^ ERROR variable `r` has dynamically sized type `A`
- Struct { r: r }
+ Struct { r: r } //~ ERROR trying to initialise a dynamically sized struct
}
trait Curve {}
fn main() {
let a = A {v: box B{v: None} as Box<Foo+Send>};
- //~^ ERROR cannot pack type `Box<B>` as a trait bounded by Send because the type does not fulfil
+ //~^ ERROR cannot pack type `Box<B>`, which does not fulfill `Send`, as a trait bounded by Send
let v = Rc::new(RefCell::new(a));
let w = v.clone();
let b = &*v;
fn first((value, _): (int, f64)) -> int { value }
fn main() {
- let y = first ((1,2,3));
+ let y = first ((1,2.0,3));
//~^ ERROR expected a tuple with 2 elements, found one with 3 elements
}
as core::fmt::rt::Piece<'static>)] as
[core::fmt::rt::Piece<'static>, .. 1]);
let __args_vec =
- (&([] as &'static [core::fmt::Argument<'static>]) as
- &'static [core::fmt::Argument<'static>]);
+ (&([] as [core::fmt::Argument<'static>, .. 0]) as
+ &'static [core::fmt::Argument<'static>, .. 0]);
let __args =
(unsafe {
((::std::fmt::Arguments::new as
[core::fmt::rt::Piece<'static>, .. 1]),
(__args_vec
as
- &'static [core::fmt::Argument<'static>]))
+ &'static [core::fmt::Argument<'static>, .. 0]))
as core::fmt::Arguments<'static>)
} as core::fmt::Arguments<'static>);
N10 -> N11;
N11 -> N12;
N12 -> N13;
- N13 -> N7[label="exiting scope_0 expr break \'outer,\lexiting scope_1 stmt break \'outer ;,\lexiting scope_2 block { break \'outer ; \"unreachable\"; },\lexiting scope_3 expr if x == 1i { break \'outer ; \"unreachable\"; },\lexiting scope_4 stmt if x == 1i { break \'outer ; \"unreachable\"; },\lexiting scope_5 block {\l if x == 1i { break \'outer ; \"unreachable\"; }\l if y >= 2i { break ; \"unreachable\"; }\l y -= 3;\l}\l,\lexiting scope_6 expr \'inner:\l loop {\l if x == 1 { break \'outer ; \"unreachable\"; }\l if y >= 2 { break ; \"unreachable\"; }\l y -= 3;\l }\l,\lexiting scope_7 stmt \'inner:\l loop {\l if x == 1 { break \'outer ; \"unreachable\"; }\l if y >= 2 { break ; \"unreachable\"; }\l y -= 3;\l }\l,\lexiting scope_8 block {\l \'inner:\l loop {\l if x == 1 { break \'outer ; \"unreachable\"; }\l if y >= 2 { break ; \"unreachable\"; }\l y -= 3;\l }\l y -= 4;\l x -= 5;\l}\l"];
+ N13 -> N7[label="exiting scope_0 expr break \'outer,\lexiting scope_1 stmt break \'outer ;,\lexiting scope_2 block { break \'outer ; \"unreachable\"; },\lexiting scope_3 expr if x == 1i { break \'outer ; \"unreachable\"; },\lexiting scope_4 stmt if x == 1i { break \'outer ; \"unreachable\"; },\lexiting scope_5 block {\l if x == 1i { break \'outer ; \"unreachable\"; }\l if y >= 2i { break ; \"unreachable\"; }\l y -= 3i;\l}\l,\lexiting scope_6 expr \'inner:\l loop {\l if x == 1i { break \'outer ; \"unreachable\"; }\l if y >= 2i { break ; \"unreachable\"; }\l y -= 3i;\l }\l,\lexiting scope_7 stmt \'inner:\l loop {\l if x == 1i { break \'outer ; \"unreachable\"; }\l if y >= 2i { break ; \"unreachable\"; }\l y -= 3i;\l }\l,\lexiting scope_8 block {\l \'inner:\l loop {\l if x == 1i { break \'outer ; \"unreachable\"; }\l if y >= 2i { break ; \"unreachable\"; }\l y -= 3i;\l }\l y -= 4i;\l x -= 5i;\l}\l"];
N14 -> N15;
N15 -> N16;
N12 -> N17;
N27[label="expr y"];
N28[label="expr y -= 1i"];
N29[label="block {\l if x == 1i { continue \'outer ; \"unreachable\"; }\l if y >= 1i { break ; \"unreachable\"; }\l y -= 1i;\l}\l"];
- N30[label="expr 1"];
+ N30[label="expr 1i"];
N31[label="expr y"];
N32[label="expr y -= 1i"];
N33[label="expr 1i"];
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+static mut DROP_RAN: bool = false;
+
+struct Foo;
+impl Drop for Foo {
+ fn drop(&mut self) {
+ unsafe { DROP_RAN = true; }
+ }
+}
+
+trait Trait {}
+impl Trait for Foo {}
+
+struct Fat<Sized? T> {
+ f: T
+}
+
+pub fn main() {
+ {
+ let _x: Box<Fat<Trait>> = box Fat { f: Foo };
+ }
+ unsafe {
+ assert!(DROP_RAN);
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+static mut DROP_RAN: int = 0;
+
+struct Foo;
+impl Drop for Foo {
+ fn drop(&mut self) {
+ unsafe { DROP_RAN += 1; }
+ }
+}
+
+struct Fat<Sized? T> {
+ f: T
+}
+
+pub fn main() {
+ {
+ let _x: Box<Fat<[Foo]>> = box Fat { f: [Foo, Foo, Foo] };
+ }
+ unsafe {
+ assert!(DROP_RAN == 3);
+ }
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// FIXME(15049) Re-enable this test.
+// ignore-test
// Test that structs with unsized fields work with {:?} reflection.
extern crate debug;
-struct Fat<type T> {
+struct Fat<Sized? T> {
f1: int,
f2: &'static str,
ptr: T
// As dst-struct.rs, but the unsized field is the only field in the struct.
-struct Fat<type T> {
+struct Fat<Sized? T> {
ptr: T
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-struct Fat<type T> {
+struct Fat<Sized? T> {
f1: int,
f2: &'static str,
ptr: T
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-struct Fat<type T> {
+struct Fat<Sized? T> {
f1: int,
f2: &'static str,
ptr: T
fn main() {
// A fixed-size array allocated in a garbage-collected box
- let x = box(GC) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let x = box(GC) [1i, 2, 3, 4, 5, 6, 7, 8, 9, 10];
assert_eq!(x[0], 1);
assert_eq!(x[6], 7);
assert_eq!(x[9], 10);
let y = x;
- assert!(*y == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ assert!(*y == [1i, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
}
: "r"(&wrap(y, "in", &mut history)));
}
assert_eq!((x,y), (1,1));
- assert_eq!(history.as_slice(), &["out", "in"]);
+ let b: &[_] = &["out", "in"];
+ assert_eq!(history.as_slice(), b);
}
}
}
// except according to those terms.
fn main() {
- let mut x = &[1i, 2, 3, 4];
+ let mut x: &[_] = &[1i, 2, 3, 4];
let mut result = vec!();
loop {
assert_eq!(match_vecs_snoc::<uint>(&[], &[]), "both empty");
assert_eq!(match_vecs_snoc(&[1i, 2, 3], &[]), "one empty");
- assert_eq!(match_nested_vecs_cons(None, Ok(&[4u, 2u])), "None, Ok(at least two elements)");
+ assert_eq!(match_nested_vecs_cons(None, Ok::<&[_], ()>(&[4u, 2u])),
+ "None, Ok(at least two elements)");
assert_eq!(match_nested_vecs_cons::<uint>(None, Err(())), "None, Ok(less than one element)");
- assert_eq!(match_nested_vecs_cons::<bool>(Some(&[]), Ok(&[])), "Some(empty), Ok(empty)");
- assert_eq!(match_nested_vecs_cons(Some(&[1i]), Err(())), "Some(non-empty), any");
- assert_eq!(match_nested_vecs_cons(Some(&[(42i, ())]), Ok(&[(1i, ())])), "Some(non-empty), any");
+ assert_eq!(match_nested_vecs_cons::<bool>(Some::<&[_]>(&[]), Ok::<&[_], ()>(&[])),
+ "Some(empty), Ok(empty)");
+ assert_eq!(match_nested_vecs_cons(Some::<&[_]>(&[1i]), Err(())), "Some(non-empty), any");
+ assert_eq!(match_nested_vecs_cons(Some::<&[_]>(&[(42i, ())]), Ok::<&[_], ()>(&[(1i, ())])),
+ "Some(non-empty), any");
- assert_eq!(match_nested_vecs_snoc(None, Ok(&[4u, 2u])), "None, Ok(at least two elements)");
+ assert_eq!(match_nested_vecs_snoc(None, Ok::<&[_], ()>(&[4u, 2u])),
+ "None, Ok(at least two elements)");
assert_eq!(match_nested_vecs_snoc::<uint>(None, Err(())), "None, Ok(less than one element)");
- assert_eq!(match_nested_vecs_snoc::<bool>(Some(&[]), Ok(&[])), "Some(empty), Ok(empty)");
- assert_eq!(match_nested_vecs_snoc(Some(&[1i]), Err(())), "Some(non-empty), any");
- assert_eq!(match_nested_vecs_snoc(Some(&[(42i, ())]), Ok(&[(1i, ())])), "Some(non-empty), any");
+ assert_eq!(match_nested_vecs_snoc::<bool>(Some::<&[_]>(&[]), Ok::<&[_], ()>(&[])),
+ "Some(empty), Ok(empty)");
+ assert_eq!(match_nested_vecs_snoc(Some::<&[_]>(&[1i]), Err(())), "Some(non-empty), any");
+ assert_eq!(match_nested_vecs_snoc(Some::<&[_]>(&[(42i, ())]), Ok::<&[_], ()>(&[(1i, ())])),
+ "Some(non-empty), any");
}
}
}
unsafe {
- assert_eq!(&[1, 2, 3], ORDER.as_slice());
+ let expected: &[_] = &[1, 2, 3];
+ assert_eq!(expected, ORDER.as_slice());
}
}
}
fn e() {
- match &[1i, 2, 3] {
+ let x: &[int] = &[1i, 2, 3];
+ match x {
[1, 2] => (),
[..] => ()
}
// except according to those terms.
pub fn main() {
- assert_eq!((vec!(0, 1)).to_string(), "[0, 1]".to_string());
+ assert_eq!((vec!(0i, 1)).to_string(), "[0, 1]".to_string());
- let foo = vec!(3, 4);
+ let foo = vec!(3i, 4);
let bar: &[int] = &[4, 5];
assert_eq!(foo.to_string(), "[3, 4]".to_string());