--- /dev/null
+// Adapted from rustc run-pass test suite
+
+#![feature(no_core, arbitrary_self_types, box_syntax)]
+#![feature(rustc_attrs)]
+
+#![feature(start, lang_items)]
+#![no_core]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(&(stringify!($l != $r), file!(), line!(), 0));
+ }
+ }
+}
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+ // without unsized_locals), but wrappers arond `Self` currently are not.
+ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+ // fn wrapper(self: Wrapper<Self>) -> i32;
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+ ***self
+ }
+}
+
+#[start]
+fn main(_: isize, _: *const *const u8) -> isize {
+ let pw = Ptr(box Wrapper(5)) as Ptr<Wrapper<dyn Trait>>;
+ assert_eq!(pw.ptr_wrapper(), 5);
+
+ let wp = Wrapper(Ptr(box 6)) as Wrapper<Ptr<dyn Trait>>;
+ assert_eq!(wp.wrapper_ptr(), 6);
+
+ let wpw = Wrapper(Ptr(box Wrapper(7))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+
+ 0
+}
impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
// *mut T -> *mut U
impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
#[lang = "receiver"]
pub trait Receiver {}
}
}
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
impl PartialEq for isize {
fn eq(&self, other: &isize) -> bool {
(*self) == (*other)
drop_in_place(to_drop);
}
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
#[lang = "owned_box"]
pub struct Box<T: ?Sized>(*mut T);
}
}
+impl<T> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &**self
+ }
+}
+
#[lang = "exchange_malloc"]
// Make it available to jited mini_core_hello_world
// FIXME remove next line when jit supports linking rlibs
lval.write_cvalue(fx, operand.unchecked_cast_to(dest_layout));
} else {
// fat-ptr -> thin-ptr
- let (ptr, _extra) = operand.load_value_pair(fx);
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
lval.write_cvalue(fx, CValue::ByVal(ptr, dest_layout))
}
} else if let ty::Adt(adt_def, _substs) = from_ty.sty {
Offset (_) bug; // Handled above
}
} else {
- let (lhs_ptr, lhs_extra) = lhs.load_value_pair(fx);
- let (rhs_ptr, rhs_extra) = rhs.load_value_pair(fx);
+ let (lhs_ptr, lhs_extra) = lhs.load_scalar_pair(fx);
+ let (rhs_ptr, rhs_extra) = rhs.load_scalar_pair(fx);
let res = match bin_op {
BinOp::Eq => {
let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
fx.bcx.ins().load(clif_ty, MemFlags::new(), addr, 0)
}
CValue::ByVal(value, _layout) => value,
- CValue::ByValPair(_, _, _layout) => bug!("Please use load_value_pair for ByValPair"),
+ CValue::ByValPair(_, _, _layout) => bug!("Please use load_scalar_pair for ByValPair"),
}
}
- pub fn load_value_pair<'a>(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> (Value, Value)
+ /// Load a value pair with layout.abi of scalar pair
+ pub fn load_scalar_pair<'a>(self, fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> (Value, Value)
where
'tcx: 'a,
{
match self {
CValue::ByRef(addr, layout) => {
- assert_eq!(
- layout.size.bytes(),
- fx.tcx.data_layout.pointer_size.bytes() * 2
+ let (a, b) = match &layout.abi {
+ layout::Abi::ScalarPair(a, b) => (a.clone(), b.clone()),
+ _ => unreachable!(),
+ };
+ let clif_ty1 = crate::abi::scalar_to_clif_type(fx.tcx, a.clone());
+ let clif_ty2 = crate::abi::scalar_to_clif_type(fx.tcx, b);
+ let val1 = fx.bcx.ins().load(clif_ty1, MemFlags::new(), addr, 0);
+ let val2 = fx.bcx.ins().load(
+ clif_ty2,
+ MemFlags::new(),
+ addr,
+ a.value.size(&fx.tcx).bytes() as i32,
);
- let val1_offset = layout.fields.offset(0).bytes() as i32;
- let val2_offset = layout.fields.offset(1).bytes() as i32;
- let val1 = fx
- .bcx
- .ins()
- .load(fx.pointer_type, MemFlags::new(), addr, val1_offset);
- let val2 = fx
- .bcx
- .ins()
- .load(fx.pointer_type, MemFlags::new(), addr, val2_offset);
(val1, val2)
}
- CValue::ByVal(_, _layout) => bug!("Please use load_value for ByVal"),
+ CValue::ByVal(_, _layout) => bug!("Please use load_scalar for ByVal"),
CValue::ByValPair(val1, val2, _layout) => (val1, val2),
}
}
size_of_val, <T> (c ptr) {
let layout = fx.layout_of(T);
let size = if layout.is_unsized() {
- let (_ptr, info) = ptr.load_value_pair(fx);
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
size
} else {
min_align_of_val, <T> (c ptr) {
let layout = fx.layout_of(T);
let align = if layout.is_unsized() {
- let (_ptr, info) = ptr.load_value_pair(fx);
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
align
} else {
{
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e., &'a fmt::Debug+Send => &'a fmt::Debug
- src.load_value_pair(fx)
+ src.load_scalar_pair(fx)
} else {
let base = src.load_scalar(fx);
unsize_thin_ptr(fx, base, src_ty, dst_ty)
arg: CValue<'tcx>,
idx: usize,
) -> (Value, Value) {
- let arg = if arg.layout().ty.is_box() {
- // Cast `Box<T>` to `*mut T` so `load_value_pair` works
- arg.unchecked_cast_to(fx.layout_of(fx.tcx.mk_mut_ptr(arg.layout().ty.boxed_ty())))
- } else {
- arg
- };
-
- let (ptr, vtable) = arg.load_value_pair(fx);
+ let (ptr, vtable) = arg.load_scalar_pair(fx);
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
let func_ref = fx.bcx.ins().load(
pointer_ty(fx.tcx),
$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin
./target/out/mini_core_hello_world abc bcd
+echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+$RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin
+./target/out/arbitrary_self_types_pointers_and_wrappers
+
echo "[BUILD] sysroot"
time ./build_sysroot/build_sysroot.sh