use back::{link, abi};
use driver::config;
use driver::config::{NoDebugInfo, FullDebugInfo};
-use driver::session::Session;
use driver::driver::{CrateAnalysis, CrateTranslation};
+use driver::session::Session;
+use lint;
+use llvm::{BasicBlockRef, ModuleRef, ValueRef, Vector, get_param};
use llvm;
-use llvm::{ModuleRef, ValueRef, BasicBlockRef};
-use llvm::{Vector};
use metadata::{csearch, encoder, loader};
-use lint;
use middle::astencode;
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
-use middle::weak_lang_items;
use middle::subst;
+use middle::weak_lang_items;
use middle::subst::Subst;
use middle::trans::_match;
use middle::trans::adt;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use std::{i8, i16, i32, i64};
-use std::gc::Gc;
-use syntax::abi::{X86, X86_64, Arm, Mips, Mipsel, Rust, RustIntrinsic};
+use syntax::abi::{X86, X86_64, Arm, Mips, Mipsel, Rust, RustCall};
+use syntax::abi::{RustIntrinsic, Abi};
use syntax::ast_util::{local_def, is_local};
use syntax::attr::AttrMetaMethods;
use syntax::attr;
_ => {}
}
+ if ccx.tcx.sess.opts.cg.no_redzone {
+ unsafe {
+ llvm::LLVMAddFunctionAttribute(llfn,
+ llvm::FunctionIndex as c_uint,
+ llvm::NoRedZoneAttribute as uint64_t)
+ }
+ }
+
llvm::SetFunctionCallConv(llfn, cc);
// Function addresses in Rust are never significant, allowing functions to be merged.
llvm::SetUnnamedAddr(llfn, true);
}
pub fn decl_rust_fn(ccx: &CrateContext, fn_ty: ty::t, name: &str) -> ValueRef {
- let (inputs, output, has_env) = match ty::get(fn_ty).sty {
- ty::ty_bare_fn(ref f) => (f.sig.inputs.clone(), f.sig.output, false),
- ty::ty_closure(ref f) => (f.sig.inputs.clone(), f.sig.output, true),
+ let (inputs, output, abi, env) = match ty::get(fn_ty).sty {
+ ty::ty_bare_fn(ref f) => {
+ (f.sig.inputs.clone(), f.sig.output, f.abi, None)
+ }
+ ty::ty_closure(ref f) => {
+ (f.sig.inputs.clone(), f.sig.output, f.abi, Some(Type::i8p(ccx)))
+ }
+ ty::ty_unboxed_closure(closure_did) => {
+ let unboxed_closure_types = ccx.tcx
+ .unboxed_closure_types
+ .borrow();
+ let function_type = unboxed_closure_types.get(&closure_did);
+ let llenvironment_type = type_of(ccx, fn_ty).ptr_to();
+ (function_type.sig.inputs.clone(),
+ function_type.sig.output,
+ RustCall,
+ Some(llenvironment_type))
+ }
_ => fail!("expected closure or fn")
};
- let llfty = type_of_rust_fn(ccx, has_env, inputs.as_slice(), output);
+ let llfty = type_of_rust_fn(ccx, env, inputs.as_slice(), output, abi);
+ debug!("decl_rust_fn(input count={},type={})",
+ inputs.len(),
+ ccx.tn.type_to_string(llfty));
+
let llfn = decl_fn(ccx, name, llvm::CCallConv, llfty, output);
let attrs = get_fn_llvm_attributes(ccx, fn_ty);
- for &(idx, attr) in attrs.iter() {
- unsafe {
- llvm::LLVMAddFunctionAttribute(llfn, idx as c_uint, attr);
- }
- }
+ attrs.apply_llfn(llfn);
llfn
}
}
})
}
+ ty::ty_unboxed_closure(def_id) => {
+ let repr = adt::represent_type(cx.ccx(), t);
+ let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id);
+ for (i, upvar) in upvars.iter().enumerate() {
+ let llupvar = adt::trans_field_ptr(cx, &*repr, av, 0, i);
+ cx = f(cx, llupvar, upvar.ty);
+ }
+ }
ty::ty_vec(_, Some(n)) => {
let unit_ty = ty::sequence_element_type(cx.tcx(), t);
let (base, len) = tvec::get_fixed_base_and_byte_len(cx, av, unit_ty, n);
ty::ty_bare_fn(ref fn_ty) => {
match fn_ty.abi.for_target(ccx.sess().targ_cfg.os,
ccx.sess().targ_cfg.arch) {
- Some(Rust) => {
+ Some(Rust) | Some(RustCall) => {
get_extern_rust_fn(ccx, t, name.as_slice(), did)
}
Some(RustIntrinsic) => {
llargs.as_slice(),
normal_bcx.llbb,
landing_pad,
- attributes.as_slice());
+ Some(attributes));
return (llresult, normal_bcx);
} else {
debug!("calling {} at {}", llfn, bcx.llbb);
None => debuginfo::clear_source_location(bcx.fcx)
};
- let llresult = Call(bcx, llfn, llargs.as_slice(), attributes.as_slice());
+ let llresult = Call(bcx, llfn, llargs.as_slice(), Some(attributes));
return (llresult, bcx);
}
}
next_cx
}
+pub fn call_lifetime_start(cx: &Block, ptr: ValueRef) {
+ if cx.sess().opts.optimize == config::No {
+ return;
+ }
+
+ let _icx = push_ctxt("lifetime_start");
+ let ccx = cx.ccx();
+
+ let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
+ let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
+ let lifetime_start = ccx.get_intrinsic(&"llvm.lifetime.start");
+ Call(cx, lifetime_start, [llsize, ptr], None);
+}
+
+pub fn call_lifetime_end(cx: &Block, ptr: ValueRef) {
+ if cx.sess().opts.optimize == config::No {
+ return;
+ }
+
+ let _icx = push_ctxt("lifetime_end");
+ let ccx = cx.ccx();
+
+ let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
+ let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
+ let lifetime_end = ccx.get_intrinsic(&"llvm.lifetime.end");
+ Call(cx, lifetime_end, [llsize, ptr], None);
+}
+
pub fn call_memcpy(cx: &Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
let _icx = push_ctxt("call_memcpy");
let ccx = cx.ccx();
let size = IntCast(cx, n_bytes, ccx.int_type);
let align = C_i32(ccx, align as i32);
let volatile = C_bool(ccx, false);
- Call(cx, memcpy, [dst_ptr, src_ptr, size, align, volatile], []);
+ Call(cx, memcpy, [dst_ptr, src_ptr, size, align, volatile], None);
}
pub fn memcpy_ty(bcx: &Block, dst: ValueRef, src: ValueRef, t: ty::t) {
let size = machine::llsize_of(ccx, ty);
let align = C_i32(ccx, llalign_of_min(ccx, ty) as i32);
let volatile = C_bool(ccx, false);
- b.call(llintrinsicfn, [llptr, llzeroval, size, align, volatile], []);
+ b.call(llintrinsicfn, [llptr, llzeroval, size, align, volatile], None);
}
pub fn alloc_ty(bcx: &Block, t: ty::t, name: &str) -> ValueRef {
}
pub fn alloca(cx: &Block, ty: Type, name: &str) -> ValueRef {
- alloca_maybe_zeroed(cx, ty, name, false)
+ let p = alloca_no_lifetime(cx, ty, name);
+ call_lifetime_start(cx, p);
+ p
}
-pub fn alloca_maybe_zeroed(cx: &Block, ty: Type, name: &str, zero: bool) -> ValueRef {
+pub fn alloca_no_lifetime(cx: &Block, ty: Type, name: &str) -> ValueRef {
let _icx = push_ctxt("alloca");
if cx.unreachable.get() {
unsafe {
}
}
debuginfo::clear_source_location(cx.fcx);
- let p = Alloca(cx, ty, name);
- if zero {
- let b = cx.fcx.ccx.builder();
- b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
- memzero(&b, p, ty);
+ Alloca(cx, ty, name)
+}
+
+pub fn alloca_zeroed(cx: &Block, ty: Type, name: &str) -> ValueRef {
+ if cx.unreachable.get() {
+ unsafe {
+ return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
+ }
}
+ let p = alloca_no_lifetime(cx, ty, name);
+ let b = cx.fcx.ccx.builder();
+ b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
+ memzero(&b, p, ty);
p
}
}
}
debuginfo::clear_source_location(cx.fcx);
- return ArrayAlloca(cx, ty, v);
+ let p = ArrayAlloca(cx, ty, v);
+ call_lifetime_start(cx, p);
+ p
}
// Creates and returns space for, or returns the argument representing, the
// slot where the return value of the function must go.
pub fn make_return_pointer(fcx: &FunctionContext, output_type: ty::t)
-> ValueRef {
- unsafe {
- if type_of::return_uses_outptr(fcx.ccx, output_type) {
- llvm::LLVMGetParam(fcx.llfn, 0)
- } else {
- let lloutputtype = type_of::type_of(fcx.ccx, output_type);
- AllocaFcx(fcx, lloutputtype, "__make_return_pointer")
- }
+ if type_of::return_uses_outptr(fcx.ccx, output_type) {
+ get_param(fcx.llfn, 0)
+ } else {
+ let lloutputtype = type_of::type_of(fcx.ccx, output_type);
+ AllocaFcx(fcx, lloutputtype, "__make_return_pointer")
}
}
output_type: ty::t,
param_substs: &'a param_substs,
sp: Option<Span>,
- block_arena: &'a TypedArena<Block<'a>>)
+ block_arena: &'a TypedArena<Block<'a>>,
+ handle_items: HandleItemsFlag)
-> FunctionContext<'a> {
param_substs.validate();
block_arena: block_arena,
ccx: ccx,
debug_context: debug_context,
- scopes: RefCell::new(Vec::new())
+ scopes: RefCell::new(Vec::new()),
+ handle_items: handle_items,
};
if has_env {
- fcx.llenv = Some(unsafe {
- llvm::LLVMGetParam(fcx.llfn, fcx.env_arg_pos() as c_uint)
- });
+ fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint))
}
fcx
-> Vec<RvalueDatum> {
let _icx = push_ctxt("create_datums_for_fn_args");
- // Return an array wrapping the ValueRefs that we get from
- // llvm::LLVMGetParam for each argument into datums.
+ // Return an array wrapping the ValueRefs that we get from `get_param` for
+ // each argument into datums.
arg_tys.iter().enumerate().map(|(i, &arg_ty)| {
- let llarg = unsafe {
- llvm::LLVMGetParam(fcx.llfn, fcx.arg_pos(i) as c_uint)
- };
+ let llarg = get_param(fcx.llfn, fcx.arg_pos(i) as c_uint);
datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty))
}).collect()
}
+/// Creates rvalue datums for each of the incoming function arguments and
+/// tuples the arguments. These will later be stored into appropriate lvalue
+/// datums.
+fn create_datums_for_fn_args_under_call_abi<
+ 'a>(
+ mut bcx: &'a Block<'a>,
+ arg_scope: cleanup::CustomScopeIndex,
+ arg_tys: &[ty::t])
+ -> Vec<RvalueDatum> {
+ let mut result = Vec::new();
+ for (i, &arg_ty) in arg_tys.iter().enumerate() {
+ if i < arg_tys.len() - 1 {
+ // Regular argument.
+ let llarg = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(i) as c_uint);
+ result.push(datum::Datum::new(llarg, arg_ty, arg_kind(bcx.fcx,
+ arg_ty)));
+ continue
+ }
+
+ // This is the last argument. Tuple it.
+ match ty::get(arg_ty).sty {
+ ty::ty_tup(ref tupled_arg_tys) => {
+ let tuple_args_scope_id = cleanup::CustomScope(arg_scope);
+ let tuple =
+ unpack_datum!(bcx,
+ datum::lvalue_scratch_datum(bcx,
+ arg_ty,
+ "tupled_args",
+ false,
+ tuple_args_scope_id,
+ (),
+ |(),
+ mut bcx,
+ llval| {
+ for (j, &tupled_arg_ty) in
+ tupled_arg_tys.iter().enumerate() {
+ let llarg =
+ get_param(bcx.fcx.llfn,
+ bcx.fcx.arg_pos(i + j) as c_uint);
+ let lldest = GEPi(bcx, llval, [0, j]);
+ let datum = datum::Datum::new(
+ llarg,
+ tupled_arg_ty,
+ arg_kind(bcx.fcx, tupled_arg_ty));
+ bcx = datum.store_to(bcx, lldest);
+ }
+ bcx
+ }));
+ let tuple = unpack_datum!(bcx,
+ tuple.to_expr_datum()
+ .to_rvalue_datum(bcx,
+ "argtuple"));
+ result.push(tuple);
+ }
+ ty::ty_nil => {
+ let mode = datum::Rvalue::new(datum::ByValue);
+ result.push(datum::Datum::new(C_nil(bcx.ccx()),
+ ty::mk_nil(),
+ mode))
+ }
+ _ => {
+ bcx.tcx().sess.bug("last argument of a function with \
+ `rust-call` ABI isn't a tuple?!")
+ }
+ };
+
+ }
+
+ result
+}
+
fn copy_args_to_allocas<'a>(fcx: &FunctionContext<'a>,
arg_scope: cleanup::CustomScopeIndex,
bcx: &'a Block<'a>,
bcx
}
+fn copy_unboxed_closure_args_to_allocas<'a>(
+ mut bcx: &'a Block<'a>,
+ arg_scope: cleanup::CustomScopeIndex,
+ args: &[ast::Arg],
+ arg_datums: Vec<RvalueDatum>,
+ monomorphized_arg_types: &[ty::t])
+ -> &'a Block<'a> {
+ let _icx = push_ctxt("copy_unboxed_closure_args_to_allocas");
+ let arg_scope_id = cleanup::CustomScope(arg_scope);
+
+ assert_eq!(arg_datums.len(), 1);
+
+ let arg_datum = arg_datums.move_iter().next().unwrap();
+
+ // Untuple the rest of the arguments.
+ let tuple_datum =
+ unpack_datum!(bcx,
+ arg_datum.to_lvalue_datum_in_scope(bcx,
+ "argtuple",
+ arg_scope_id));
+ let empty = Vec::new();
+ let untupled_arg_types = match ty::get(monomorphized_arg_types[0]).sty {
+ ty::ty_tup(ref types) => types.as_slice(),
+ ty::ty_nil => empty.as_slice(),
+ _ => {
+ bcx.tcx().sess.span_bug(args[0].pat.span,
+ "first arg to `rust-call` ABI function \
+ wasn't a tuple?!")
+ }
+ };
+ for j in range(0, args.len()) {
+ let tuple_element_type = untupled_arg_types[j];
+ let tuple_element_datum =
+ tuple_datum.get_element(tuple_element_type,
+ |llval| GEPi(bcx, llval, [0, j]));
+ let tuple_element_datum = tuple_element_datum.to_expr_datum();
+ let tuple_element_datum =
+ unpack_datum!(bcx,
+ tuple_element_datum.to_rvalue_datum(bcx,
+ "arg"));
+ bcx = _match::store_arg(bcx,
+ args[j].pat,
+ tuple_element_datum,
+ arg_scope_id);
+
+ if bcx.fcx.ccx.sess().opts.debuginfo == FullDebugInfo {
+ debuginfo::create_argument_metadata(bcx, &args[j]);
+ }
+ }
+
+ bcx
+}
+
// Ties up the llstaticallocas -> llloadenv -> lltop edges,
// and builds the return block.
pub fn finish_fn<'a>(fcx: &'a FunctionContext<'a>,
Ret(ret_cx, retval);
}
+#[deriving(Clone, Eq, PartialEq)]
+pub enum IsUnboxedClosureFlag {
+ NotUnboxedClosure,
+ IsUnboxedClosure,
+}
+
// trans_closure: Builds an LLVM function out of a source function.
// If the function closes over its environment a closure will be
// returned.
param_substs: ¶m_substs,
id: ast::NodeId,
_attributes: &[ast::Attribute],
+ arg_types: Vec<ty::t>,
output_type: ty::t,
- maybe_load_env: <'a> |&'a Block<'a>| -> &'a Block<'a>) {
+ abi: Abi,
+ has_env: bool,
+ is_unboxed_closure: IsUnboxedClosureFlag,
+ maybe_load_env: <'a> |&'a Block<'a>| -> &'a Block<'a>,
+ handle_items: HandleItemsFlag) {
ccx.stats.n_closures.set(ccx.stats.n_closures.get() + 1);
let _icx = push_ctxt("trans_closure");
debug!("trans_closure(..., param_substs={})",
param_substs.repr(ccx.tcx()));
- let has_env = match ty::get(ty::node_id_to_type(ccx.tcx(), id)).sty {
- ty::ty_closure(_) => true,
- _ => false
- };
-
let arena = TypedArena::new();
let fcx = new_fn_ctxt(ccx,
llfndecl,
output_type,
param_substs,
Some(body.span),
- &arena);
+ &arena,
+ handle_items);
let mut bcx = init_function(&fcx, false, output_type);
// cleanup scope for the incoming arguments
let block_ty = node_id_type(bcx, body.id);
// Set up arguments to the function.
- let arg_tys = ty::ty_fn_args(node_id_type(bcx, id));
- let arg_datums = create_datums_for_fn_args(&fcx, arg_tys.as_slice());
+ let monomorphized_arg_types =
+ arg_types.iter()
+ .map(|at| monomorphize_type(bcx, *at))
+ .collect::<Vec<_>>();
+ for monomorphized_arg_type in monomorphized_arg_types.iter() {
+ debug!("trans_closure: monomorphized_arg_type: {}",
+ ty_to_string(ccx.tcx(), *monomorphized_arg_type));
+ }
+ debug!("trans_closure: function lltype: {}",
+ bcx.fcx.ccx.tn.val_to_string(bcx.fcx.llfn));
+
+ let arg_datums = if abi != RustCall {
+ create_datums_for_fn_args(&fcx,
+ monomorphized_arg_types.as_slice())
+ } else {
+ create_datums_for_fn_args_under_call_abi(
+ bcx,
+ arg_scope,
+ monomorphized_arg_types.as_slice())
+ };
- bcx = copy_args_to_allocas(&fcx,
- arg_scope,
- bcx,
- decl.inputs.as_slice(),
- arg_datums);
+ bcx = match is_unboxed_closure {
+ NotUnboxedClosure => {
+ copy_args_to_allocas(&fcx,
+ arg_scope,
+ bcx,
+ decl.inputs.as_slice(),
+ arg_datums)
+ }
+ IsUnboxedClosure => {
+ copy_unboxed_closure_args_to_allocas(
+ bcx,
+ arg_scope,
+ decl.inputs.as_slice(),
+ arg_datums,
+ monomorphized_arg_types.as_slice())
+ }
+ };
bcx = maybe_load_env(bcx);
llfndecl: ValueRef,
param_substs: ¶m_substs,
id: ast::NodeId,
- attrs: &[ast::Attribute]) {
+ attrs: &[ast::Attribute],
+ handle_items: HandleItemsFlag) {
let _s = StatRecorder::new(ccx, ccx.tcx.map.path_to_string(id).to_string());
debug!("trans_fn(param_substs={})", param_substs.repr(ccx.tcx()));
let _icx = push_ctxt("trans_fn");
- let output_type = ty::ty_fn_ret(ty::node_id_to_type(ccx.tcx(), id));
- trans_closure(ccx, decl, body, llfndecl,
- param_substs, id, attrs, output_type, |bcx| bcx);
+ let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
+ let arg_types = ty::ty_fn_args(fn_ty);
+ let output_type = ty::ty_fn_ret(fn_ty);
+ let abi = ty::ty_fn_abi(fn_ty);
+ trans_closure(ccx,
+ decl,
+ body,
+ llfndecl,
+ param_substs,
+ id,
+ attrs,
+ arg_types,
+ output_type,
+ abi,
+ false,
+ NotUnboxedClosure,
+ |bcx| bcx,
+ handle_items);
}
pub fn trans_enum_variant(ccx: &CrateContext,
llfndecl);
}
+pub fn trans_named_tuple_constructor<'a>(mut bcx: &'a Block<'a>,
+ ctor_ty: ty::t,
+ disr: ty::Disr,
+ args: callee::CallArgs,
+ dest: expr::Dest) -> Result<'a> {
+
+ let ccx = bcx.fcx.ccx;
+ let tcx = &ccx.tcx;
+
+ let result_ty = match ty::get(ctor_ty).sty {
+ ty::ty_bare_fn(ref bft) => bft.sig.output,
+ _ => ccx.sess().bug(
+ format!("trans_enum_variant_constructor: \
+ unexpected ctor return type {}",
+ ctor_ty.repr(tcx)).as_slice())
+ };
+
+ // Get location to store the result. If the user does not care about
+ // the result, just make a stack slot
+ let llresult = match dest {
+ expr::SaveIn(d) => d,
+ expr::Ignore => {
+ if !type_is_zero_size(ccx, result_ty) {
+ alloc_ty(bcx, result_ty, "constructor_result")
+ } else {
+ C_undef(type_of::type_of(ccx, result_ty))
+ }
+ }
+ };
+
+ if !type_is_zero_size(ccx, result_ty) {
+ let repr = adt::represent_type(ccx, result_ty);
+
+ match args {
+ callee::ArgExprs(exprs) => {
+ let fields = exprs.iter().map(|x| *x).enumerate().collect::<Vec<_>>();
+ bcx = expr::trans_adt(bcx, &*repr, disr, fields.as_slice(),
+ None, expr::SaveIn(llresult));
+ }
+ _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor")
+ }
+ }
+
+ // If the caller doesn't care about the result
+ // drop the temporary we made
+ let bcx = match dest {
+ expr::SaveIn(_) => bcx,
+ expr::Ignore => glue::drop_ty(bcx, llresult, result_ty)
+ };
+
+ Result::new(bcx, llresult)
+}
+
pub fn trans_tuple_struct(ccx: &CrateContext,
_fields: &[ast::StructField],
ctor_id: ast::NodeId,
let arena = TypedArena::new();
let fcx = new_fn_ctxt(ccx, llfndecl, ctor_id, false, result_ty,
- param_substs, None, &arena);
+ param_substs, None, &arena, TranslateItems);
let bcx = init_function(&fcx, false, result_ty);
let arg_tys = ty::ty_fn_args(ctor_ty);
if !type_is_zero_size(fcx.ccx, result_ty) {
let repr = adt::represent_type(ccx, result_ty);
- adt::trans_start_init(bcx, &*repr, fcx.llretptr.get().unwrap(), disr);
for (i, arg_datum) in arg_datums.move_iter().enumerate() {
let lldestptr = adt::trans_field_ptr(bcx,
&*repr,
i);
arg_datum.store_to(bcx, lldestptr);
}
+ adt::trans_set_discr(bcx, &*repr, fcx.llretptr.get().unwrap(), disr);
}
finish_fn(&fcx, bcx, result_ty);
}
-fn trans_enum_def(ccx: &CrateContext, enum_definition: &ast::EnumDef,
- sp: Span, id: ast::NodeId, vi: &[Rc<ty::VariantInfo>],
- i: &mut uint) {
- for variant in enum_definition.variants.iter() {
- let disr_val = vi[*i].disr_val;
- *i += 1;
-
- match variant.node.kind {
- ast::TupleVariantKind(ref args) if args.len() > 0 => {
- let llfn = get_item_val(ccx, variant.node.id);
- trans_enum_variant(ccx, id, &**variant, args.as_slice(),
- disr_val, ¶m_substs::empty(), llfn);
- }
- ast::TupleVariantKind(_) => {
- // Nothing to do.
- }
- ast::StructVariantKind(struct_def) => {
- trans_struct_def(ccx, struct_def);
- }
- }
- }
-
- enum_variant_size_lint(ccx, enum_definition, sp, id);
-}
-
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
let avar = adt::represent_type(ccx, ty::node_id_to_type(ccx.tcx(), id));
match *avar {
- adt::General(_, ref variants) => {
+ adt::General(_, ref variants, _) => {
for var in variants.iter() {
let mut size = 0;
for field in var.fields.iter().skip(1) {
let _icx = push_ctxt("trans_item");
match item.node {
ast::ItemFn(ref decl, _fn_style, abi, ref generics, ref body) => {
- if abi != Rust {
+ if abi != Rust {
let llfndecl = get_item_val(ccx, item.id);
foreign::trans_rust_fn_with_foreign_abi(
ccx, &**decl, &**body, item.attrs.as_slice(), llfndecl, item.id);
llfn,
¶m_substs::empty(),
item.id,
- item.attrs.as_slice());
+ item.attrs.as_slice(),
+ TranslateItems);
} else {
// Be sure to travel more than just one layer deep to catch nested
// items in blocks and such.
ast::ItemMod(ref m) => {
trans_mod(ccx, m);
}
- ast::ItemEnum(ref enum_definition, ref generics) => {
- if !generics.is_type_parameterized() {
- let vi = ty::enum_variants(ccx.tcx(), local_def(item.id));
- let mut i = 0;
- trans_enum_def(ccx, enum_definition, item.span, item.id, vi.as_slice(), &mut i);
- }
+ ast::ItemEnum(ref enum_definition, _) => {
+ enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
}
ast::ItemStatic(_, m, ref expr) => {
// Recurse on the expression to catch items in blocks
ast::ItemForeignMod(ref foreign_mod) => {
foreign::trans_foreign_mod(ccx, foreign_mod);
}
- ast::ItemStruct(struct_def, ref generics) => {
- if !generics.is_type_parameterized() {
- trans_struct_def(ccx, struct_def);
- }
- }
ast::ItemTrait(..) => {
// Inside of this trait definition, we won't be actually translating any
// functions, but the trait still needs to be walked. Otherwise default
}
}
-pub fn trans_struct_def(ccx: &CrateContext, struct_def: Gc<ast::StructDef>) {
- // If this is a tuple-like struct, translate the constructor.
- match struct_def.ctor_id {
- // We only need to translate a constructor if there are fields;
- // otherwise this is a unit-like struct.
- Some(ctor_id) if struct_def.fields.len() > 0 => {
- let llfndecl = get_item_val(ccx, ctor_id);
- trans_tuple_struct(ccx, struct_def.fields.as_slice(),
- ctor_id, ¶m_substs::empty(), llfndecl);
- }
- Some(_) | None => {}
- }
-}
-
// Translate a module. Doing this amounts to translating the items in the
// module; there ends up being no artifact (aside from linkage names) of
// separate modules in the compiled program. That's because modules exist
-> ValueRef {
match ty::get(node_type).sty {
ty::ty_bare_fn(ref f) => {
- assert!(f.abi == Rust);
+ assert!(f.abi == Rust || f.abi == RustCall);
}
_ => fail!("expected bare rust fn")
};
llfn
}
-pub fn get_fn_llvm_attributes(ccx: &CrateContext, fn_ty: ty::t) -> Vec<(uint, u64)> {
+pub fn get_fn_llvm_attributes(ccx: &CrateContext, fn_ty: ty::t)
+ -> llvm::AttrBuilder {
use middle::ty::{BrAnon, ReLateBound};
- let (fn_sig, has_env) = match ty::get(fn_ty).sty {
- ty::ty_closure(ref f) => (f.sig.clone(), true),
- ty::ty_bare_fn(ref f) => (f.sig.clone(), false),
+ let (fn_sig, abi, has_env) = match ty::get(fn_ty).sty {
+ ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
+ ty::ty_bare_fn(ref f) => (f.sig.clone(), f.abi, false),
+ ty::ty_unboxed_closure(closure_did) => {
+ let unboxed_closure_types = ccx.tcx
+ .unboxed_closure_types
+ .borrow();
+ let function_type = unboxed_closure_types.get(&closure_did);
+ (function_type.sig.clone(), RustCall, true)
+ }
_ => fail!("expected closure or function.")
};
// Since index 0 is the return value of the llvm func, we start
// at either 1 or 2 depending on whether there's an env slot or not
let mut first_arg_offset = if has_env { 2 } else { 1 };
- let mut attrs = Vec::new();
+ let mut attrs = llvm::AttrBuilder::new();
let ret_ty = fn_sig.output;
+ // These have an odd calling convention, so we skip them for now.
+ //
+ // FIXME(pcwalton): We don't have to skip them; just untuple the result.
+ if abi == RustCall {
+ return attrs;
+ }
+
// A function pointer is called without the declaration
// available, so we have to apply any attributes with ABI
// implications directly to the call instruction. Right now,
// the only attribute we need to worry about is `sret`.
if type_of::return_uses_outptr(ccx, ret_ty) {
- attrs.push((1, llvm::StructRetAttribute as u64));
+ let llret_sz = llsize_of_real(ccx, type_of::type_of(ccx, ret_ty));
// The outptr can be noalias and nocapture because it's entirely
- // invisible to the program. We can also mark it as nonnull
- attrs.push((1, llvm::NoAliasAttribute as u64));
- attrs.push((1, llvm::NoCaptureAttribute as u64));
- attrs.push((1, llvm::NonNullAttribute as u64));
+ // invisible to the program. We also know it's nonnull as well
+ // as how many bytes we can dereference
+ attrs.arg(1, llvm::StructRetAttribute)
+ .arg(1, llvm::NoAliasAttribute)
+ .arg(1, llvm::NoCaptureAttribute)
+ .arg(1, llvm::DereferenceableAttribute(llret_sz));
// Add one more since there's an outptr
first_arg_offset += 1;
ty::ty_str | ty::ty_vec(..) | ty::ty_trait(..) => true, _ => false
} => {}
ty::ty_uniq(_) => {
- attrs.push((llvm::ReturnIndex as uint, llvm::NoAliasAttribute as u64));
+ attrs.ret(llvm::NoAliasAttribute);
}
_ => {}
}
- // We can also mark the return value as `nonnull` in certain cases
+ // We can also mark the return value as `dereferenceable` in certain cases
match ty::get(ret_ty).sty {
// These are not really pointers but pairs, (pointer, len)
ty::ty_uniq(it) |
ty::ty_rptr(_, ty::mt { ty: it, .. }) if match ty::get(it).sty {
ty::ty_str | ty::ty_vec(..) | ty::ty_trait(..) => true, _ => false
} => {}
- ty::ty_uniq(_) | ty::ty_rptr(_, _) => {
- attrs.push((llvm::ReturnIndex as uint, llvm::NonNullAttribute as u64));
+ ty::ty_uniq(inner) | ty::ty_rptr(_, ty::mt { ty: inner, .. }) => {
+ let llret_sz = llsize_of_real(ccx, type_of::type_of(ccx, inner));
+ attrs.ret(llvm::DereferenceableAttribute(llret_sz));
}
_ => {}
}
match ty::get(ret_ty).sty {
ty::ty_bool => {
- attrs.push((llvm::ReturnIndex as uint, llvm::ZExtAttribute as u64));
+ attrs.ret(llvm::ZExtAttribute);
}
_ => {}
}
match ty::get(t).sty {
// this needs to be first to prevent fat pointers from falling through
_ if !type_is_immediate(ccx, t) => {
+ let llarg_sz = llsize_of_real(ccx, type_of::type_of(ccx, t));
+
// For non-immediate arguments the callee gets its own copy of
// the value on the stack, so there are no aliases. It's also
// program-invisible so can't possibly capture
- attrs.push((idx, llvm::NoAliasAttribute as u64));
- attrs.push((idx, llvm::NoCaptureAttribute as u64));
- attrs.push((idx, llvm::NonNullAttribute as u64));
+ attrs.arg(idx, llvm::NoAliasAttribute)
+ .arg(idx, llvm::NoCaptureAttribute)
+ .arg(idx, llvm::DereferenceableAttribute(llarg_sz));
}
+
ty::ty_bool => {
- attrs.push((idx, llvm::ZExtAttribute as u64));
+ attrs.arg(idx, llvm::ZExtAttribute);
}
+
// `~` pointer parameters never alias because ownership is transferred
- ty::ty_uniq(_) => {
- attrs.push((idx, llvm::NoAliasAttribute as u64));
- attrs.push((idx, llvm::NonNullAttribute as u64));
+ ty::ty_uniq(inner) => {
+ let llsz = llsize_of_real(ccx, type_of::type_of(ccx, inner));
+
+ attrs.arg(idx, llvm::NoAliasAttribute)
+ .arg(idx, llvm::DereferenceableAttribute(llsz));
+ }
+
+ // The visit glue deals only with opaque pointers so we don't
+ // actually know the concrete type of Self thus we don't know how
+ // many bytes to mark as dereferenceable so instead we just mark
+ // it as nonnull which still holds true
+ ty::ty_rptr(b, ty::mt { ty: it, mutbl }) if match ty::get(it).sty {
+ ty::ty_param(_) => true, _ => false
+ } && mutbl == ast::MutMutable => {
+ attrs.arg(idx, llvm::NoAliasAttribute)
+ .arg(idx, llvm::NonNullAttribute);
+
+ match b {
+ ReLateBound(_, BrAnon(_)) => {
+ attrs.arg(idx, llvm::NoCaptureAttribute);
+ }
+ _ => {}
+ }
}
+
// `&mut` pointer parameters never alias other parameters, or mutable global data
- ty::ty_rptr(b, mt) if mt.mutbl == ast::MutMutable => {
- attrs.push((idx, llvm::NoAliasAttribute as u64));
- attrs.push((idx, llvm::NonNullAttribute as u64));
+ // `&` pointer parameters never alias either (for LLVM's purposes) as long as the
+ // interior is safe
+ ty::ty_rptr(b, mt) if mt.mutbl == ast::MutMutable ||
+ !ty::type_contents(ccx.tcx(), mt.ty).interior_unsafe() => {
+
+ let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
+ attrs.arg(idx, llvm::NoAliasAttribute)
+ .arg(idx, llvm::DereferenceableAttribute(llsz));
+
match b {
ReLateBound(_, BrAnon(_)) => {
- attrs.push((idx, llvm::NoCaptureAttribute as u64));
+ attrs.arg(idx, llvm::NoCaptureAttribute);
}
_ => {}
}
}
+
// When a reference in an argument has no named lifetime, it's impossible for that
// reference to escape this function (returned or stored beyond the call by a closure).
- ty::ty_rptr(ReLateBound(_, BrAnon(_)), _) => {
- attrs.push((idx, llvm::NoCaptureAttribute as u64));
- attrs.push((idx, llvm::NonNullAttribute as u64));
+ ty::ty_rptr(ReLateBound(_, BrAnon(_)), mt) => {
+ let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
+ attrs.arg(idx, llvm::NoCaptureAttribute)
+ .arg(idx, llvm::DereferenceableAttribute(llsz));
}
- // & pointer parameters are never null
- ty::ty_rptr(_, _) => {
- attrs.push((idx, llvm::NonNullAttribute as u64));
+
+ // & pointer parameters are also never null and we know exactly how
+ // many bytes we can dereference
+ ty::ty_rptr(_, mt) => {
+ let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
+ attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
}
_ => ()
}
vec!(
opaque_rust_main,
- llvm::LLVMGetParam(llfn, 0),
- llvm::LLVMGetParam(llfn, 1)
+ get_param(llfn, 0),
+ get_param(llfn, 1)
)
};
(start_fn, args)
} else {
debug!("using user-defined start fn");
let args = vec!(
- llvm::LLVMGetParam(llfn, 0 as c_uint),
- llvm::LLVMGetParam(llfn, 1 as c_uint)
+ get_param(llfn, 0 as c_uint),
+ get_param(llfn, 1 as c_uint)
);
(rust_main, args)