-#![feature(no_core)]
+#![feature(no_core, unboxed_closures)]
#![no_core]
#![allow(dead_code)]
})(0u8, 42u16)
}
+struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16], )> for IsNotEmpty {
+ type Output = bool;
+
+ #[inline]
+ extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16], )) -> bool {
+ self.call_mut(arg)
+ }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16], )> for IsNotEmpty {
+ #[inline]
+ extern "rust-call" fn call_mut(&mut self, arg: (&'a &'b [u16], )) -> bool {
+ true
+ }
+}
+
fn eq_char(a: char, b: char) -> bool {
a == b
}
#[lang = "fn_once"]
#[rustc_paren_sugar]
-trait FnOnce<Args> {
+pub trait FnOnce<Args> {
type Output;
extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
#[lang = "fn_mut"]
#[rustc_paren_sugar]
-trait FnMut<Args> : FnOnce<Args> {
+pub trait FnMut<Args> : FnOnce<Args> {
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
}
)
}
Abi::System => bug!("system abi should be selected elsewhere"),
- // TODO: properly implement intrinsics
Abi::RustIntrinsic => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
_ => unimplemented!("unsupported abi {:?}", sig.abi),
};
offset: None,
}); // Dummy stack slot for debugging
+ enum ArgKind {
+ Normal(Value),
+ Spread(Vec<Value>),
+ }
+
let func_params = fx.mir.args_iter().map(|local| {
- let layout = fx.layout_of(fx.mir.local_decls[local].ty);
+ let arg_ty = fx.mir.local_decls[local].ty;
+
+ // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+ if Some(local) == fx.mir.spread_arg {
+ // This argument (e.g. the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual function arguments.
+
+ let tupled_arg_tys = match arg_ty.sty {
+ ty::TyTuple(ref tys) => tys,
+ _ => bug!("spread argument isn't a tuple?!")
+ };
+
+ let mut ebb_params = Vec::new();
+ for arg_ty in tupled_arg_tys.iter() {
+ let cton_type = fx.cton_type(arg_ty).unwrap_or(types::I64);
+ ebb_params.push(fx.bcx.append_ebb_param(start_ebb, cton_type));
+ }
+
+ (local, ArgKind::Spread(ebb_params), arg_ty)
+ } else {
+ let cton_type = fx.cton_type(arg_ty).unwrap_or(types::I64);
+ (local, ArgKind::Normal(fx.bcx.append_ebb_param(start_ebb, cton_type)), arg_ty)
+ }
+ }).collect::<Vec<(Local, ArgKind, Ty)>>();
+
+ let ret_layout = fx.layout_of(fx.return_type());
+ fx.local_map.insert(RETURN_PLACE, CPlace::Addr(ret_param, ret_layout));
+
+ for (local, arg_kind, ty) in func_params {
+ let layout = fx.layout_of(ty);
let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: layout.size.bytes() as u32,
offset: None,
});
- let ty = fx.mir.local_decls[local].ty;
- let cton_type = fx.cton_type(ty).unwrap_or(types::I64);
- (local, fx.bcx.append_ebb_param(start_ebb, cton_type), ty, stack_slot)
- }).collect::<Vec<(Local, Value, Ty, StackSlot)>>();
-
- let ret_layout = fx.layout_of(fx.return_type());
- fx.local_map.insert(RETURN_PLACE, CPlace::Addr(ret_param, ret_layout));
- for (local, ebb_param, ty, stack_slot) in func_params {
let place = CPlace::from_stack_slot(fx, stack_slot, ty);
- if fx.cton_type(ty).is_some() {
- place.write_cvalue(fx, CValue::ByVal(ebb_param, place.layout()));
- } else {
- place.write_cvalue(fx, CValue::ByRef(ebb_param, place.layout()));
+
+ match arg_kind {
+ ArgKind::Normal(ebb_param) => {
+ if fx.cton_type(ty).is_some() {
+ place.write_cvalue(fx, CValue::ByVal(ebb_param, place.layout()));
+ } else {
+ place.write_cvalue(fx, CValue::ByRef(ebb_param, place.layout()));
+ }
+ }
+ ArgKind::Spread(ebb_params) => {
+ for (i, ebb_param) in ebb_params.into_iter().enumerate() {
+ let sub_place = place.place_field(fx, mir::Field::new(i));
+ if fx.cton_type(sub_place.layout().ty).is_some() {
+ sub_place.write_cvalue(fx, CValue::ByVal(ebb_param, sub_place.layout()));
+ } else {
+ sub_place.write_cvalue(fx, CValue::ByRef(ebb_param, sub_place.layout()));
+ }
+ }
+ }
}
fx.local_map.insert(local, place);
}
},
_ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
}
+ println!("{:?} {:?}", pack_arg.layout().ty, args.iter().map(|a|a.layout().ty).collect::<Vec<_>>());
args
} else {
args
let usize_layout = fx.layout_of(fx.tcx.types.usize);
let ret = return_place.unwrap();
match intrinsic {
- "copy" => {
+ "copy" | "copy_nonoverlapping" => {
/*let elem_ty = substs.type_at(0);
assert_eq!(args.len(), 3);
let src = args[0];
use std::fmt;
-use syntax::ast::{IntTy, UintTy};
+use syntax::ast::{IntTy, UintTy, FloatTy};
use rustc_target::spec::{HasTargetSpec, Target};
use cranelift_module::{Module, FuncId, DataId};
}
}
TypeVariants::TyChar => types::I32,
+ TypeVariants::TyFloat(size) => {
+ match size {
+ FloatTy::F32 => types::I32,
+ FloatTy::F64 => types::I64,
+ }
+ }
TypeVariants::TyFnPtr(_) => types::I64,
TypeVariants::TyRawPtr(TypeAndMut { ty, mutbl: _ }) | TypeVariants::TyRef(_, ty, _) => {
if ty.is_sized(tcx.at(DUMMY_SP), ParamEnv::reveal_all()) {
})
}
+fn codegen_field<'a, 'tcx: 'a>(
+ fx: &mut FunctionCx<'a, 'tcx>,
+ base: Value,
+ layout: TyLayout<'tcx>,
+ field: mir::Field
+) -> (Value, TyLayout<'tcx>) {
+ let field_offset = layout.fields.offset(field.index());
+ let field_ty = layout.field(&*fx, field.index());
+ if field_offset.bytes() > 0 {
+ let field_offset = fx.bcx.ins().iconst(types::I64, field_offset.bytes() as i64);
+ (fx.bcx.ins().iadd(base, field_offset), field_ty)
+ } else {
+ (base, field_ty)
+ }
+}
+
/// A read-only value
#[derive(Debug, Copy, Clone)]
pub enum CValue<'tcx> {
}
pub fn value_field<'a>(self, fx: &mut FunctionCx<'a, 'tcx>, field: mir::Field) -> CValue<'tcx> where 'tcx: 'a {
- use rustc::ty::util::IntTypeExt;
-
let (base, layout) = match self {
CValue::ByRef(addr, layout) => (addr, layout),
_ => bug!("place_field for {:?}", self),
};
- let field_offset = layout.fields.offset(field.index());
- let field_layout = if field.index() == 0 {
- fx.layout_of(if let ty::TyAdt(adt_def, _) = layout.ty.sty {
- adt_def.repr.discr_type().to_ty(fx.tcx)
- } else {
- // This can only be `0`, for now, so `u8` will suffice.
- fx.tcx.types.u8
- })
- } else {
- layout.field(&*fx, field.index())
- };
- if field_offset.bytes() > 0 {
- let field_offset = fx.bcx.ins().iconst(types::I64, field_offset.bytes() as i64);
- CValue::ByRef(fx.bcx.ins().iadd(base, field_offset), field_layout)
- } else {
- CValue::ByRef(base, field_layout)
- }
+
+ let (field_ptr, field_layout) = codegen_field(fx, base, layout, field);
+ CValue::ByRef(field_ptr, field_layout)
}
pub fn const_val<'a>(fx: &mut FunctionCx<'a, 'tcx>, ty: Ty<'tcx>, const_val: i64) -> CValue<'tcx> where 'tcx: 'a {
pub fn place_field(self, fx: &mut FunctionCx<'a, 'tcx>, field: mir::Field) -> CPlace<'tcx> {
let base = self.expect_addr();
let layout = self.layout();
- let field_offset = layout.fields.offset(field.index());
- let field_ty = layout.field(&*fx, field.index());
- if field_offset.bytes() > 0 {
- let field_offset = fx.bcx.ins().iconst(types::I64, field_offset.bytes() as i64);
- CPlace::Addr(fx.bcx.ins().iadd(base, field_offset), field_ty)
- } else {
- CPlace::Addr(base, field_ty)
- }
+
+ let (field_ptr, field_layout) = codegen_field(fx, base, layout, field);
+ CPlace::Addr(field_ptr, field_layout)
}
pub fn unchecked_cast_to(self, layout: TyLayout<'tcx>) -> Self {