}
}
-pub fn with_insn_ctxt<F>(blk: F) where
- F: FnOnce(&[&'static str]),
+pub fn with_insn_ctxt<F>(blk: F)
+ where F: FnOnce(&[&'static str])
{
TASK_LOCAL_INSN_KEY.with(move |slot| {
slot.borrow().as_ref().map(move |s| blk(s));
}
pub struct _InsnCtxt {
- _cannot_construct_outside_of_this_module: ()
+ _cannot_construct_outside_of_this_module: (),
}
impl Drop for _InsnCtxt {
fn drop(&mut self) {
TASK_LOCAL_INSN_KEY.with(|slot| {
match slot.borrow_mut().as_mut() {
- Some(ctx) => { ctx.pop(); }
+ Some(ctx) => {
+ ctx.pop();
+ }
None => {}
}
})
None => {}
}
});
- _InsnCtxt { _cannot_construct_outside_of_this_module: () }
+ _InsnCtxt {
+ _cannot_construct_outside_of_this_module: (),
+ }
}
pub struct StatRecorder<'a, 'tcx: 'a> {
}
impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
- pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String)
- -> StatRecorder<'a, 'tcx> {
+ pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
let istart = ccx.stats().n_llvm_insns.get();
StatRecorder {
ccx: ccx,
fn drop(&mut self) {
if self.ccx.sess().trans_stats() {
let iend = self.ccx.stats().n_llvm_insns.get();
- self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
- iend - self.istart));
+ self.ccx
+ .stats()
+ .fn_stats
+ .borrow_mut()
+ .push((self.name.take().unwrap(), iend - self.istart));
self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
// Reset LLVM insn count to avoid compound costs.
self.ccx.stats().n_llvm_insns.set(self.istart);
}
}
-fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_ty: Ty<'tcx>,
- name: &str, did: DefId) -> ValueRef {
+fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ fn_ty: Ty<'tcx>,
+ name: &str,
+ did: DefId)
+ -> ValueRef {
match ccx.externs().borrow().get(name) {
Some(n) => return *n,
- None => ()
+ None => (),
}
let f = declare::declare_rust_fn(ccx, name, fn_ty);
pub fn self_type_for_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_id: DefId,
fn_ty: Ty<'tcx>)
- -> Ty<'tcx>
-{
+ -> Ty<'tcx> {
let closure_kind = ccx.tcx().closure_kind(closure_id);
match closure_kind {
ty::FnClosureKind => {
ty::FnMutClosureKind => {
ccx.tcx().mk_mut_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty)
}
- ty::FnOnceClosureKind => fn_ty
+ ty::FnOnceClosureKind => fn_ty,
}
}
*ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
}
-pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: DefId,
- t: Ty<'tcx>) -> ValueRef {
+pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ did: DefId,
+ t: Ty<'tcx>)
+ -> ValueRef {
let name = csearch::get_symbol(&ccx.sess().cstore, did);
let ty = type_of(ccx, t);
match ccx.externs().borrow_mut().get(&name) {
Some(n) => return *n,
- None => ()
+ None => (),
}
// FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
// FIXME(nagisa): investigate whether it can be changed into define_global
return c;
}
-fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- info_ty: Ty<'tcx>, it: LangItem) -> DefId {
+fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
match bcx.tcx().lang_items.require(it) {
Ok(id) => id,
Err(s) => {
// Allocate space:
let r = callee::trans_lang_call(bcx,
- require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem),
- &[size, align],
- None,
- debug_loc);
+ require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem),
+ &[size, align],
+ None,
+ debug_loc);
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
-pub fn bin_op_to_icmp_predicate(ccx: &CrateContext, op: hir::BinOp_, signed: bool)
+pub fn bin_op_to_icmp_predicate(ccx: &CrateContext,
+ op: hir::BinOp_,
+ signed: bool)
-> llvm::IntPredicate {
match op {
hir::BiEq => llvm::IntEQ,
hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
op => {
- ccx.sess().bug(&format!("comparison_op_to_icmp_predicate: expected \
- comparison operator, found {:?}", op));
+ ccx.sess()
+ .bug(&format!("comparison_op_to_icmp_predicate: expected comparison operator, \
+ found {:?}",
+ op));
}
}
}
-pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: hir::BinOp_)
- -> llvm::RealPredicate {
+pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: hir::BinOp_) -> llvm::RealPredicate {
match op {
hir::BiEq => llvm::RealOEQ,
hir::BiNe => llvm::RealUNE,
hir::BiGt => llvm::RealOGT,
hir::BiGe => llvm::RealOGE,
op => {
- ccx.sess().bug(&format!("comparison_op_to_fcmp_predicate: expected \
- comparison operator, found {:?}", op));
+ ccx.sess()
+ .bug(&format!("comparison_op_to_fcmp_predicate: expected comparison operator, \
+ found {:?}",
+ op));
}
}
}
hir::BiLe => (llvm::IntULE, llvm::IntULT),
hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
- _ => unreachable!()
+ _ => unreachable!(),
};
let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
hir::BiEq | hir::BiLe | hir::BiGe => return C_bool(bcx.ccx(), true),
hir::BiNe | hir::BiLt | hir::BiGt => return C_bool(bcx.ccx(), false),
// refinements would be nice
- _ => bcx.sess().bug("compare_scalar_types: must be a comparison operator")
+ _ => bcx.sess().bug("compare_scalar_types: must be a comparison operator"),
}
}
ty::TyBareFn(..) | ty::TyBool | ty::TyUint(_) | ty::TyChar => {
- ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
+ ICmp(bcx,
+ bin_op_to_icmp_predicate(bcx.ccx(), op, false),
+ lhs,
+ rhs,
+ debug_loc)
}
ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
- ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
+ ICmp(bcx,
+ bin_op_to_icmp_predicate(bcx.ccx(), op, false),
+ lhs,
+ rhs,
+ debug_loc)
}
ty::TyRawPtr(_) => {
let lhs_addr = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_ADDR]));
let rhs_addr = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_ADDR]));
let rhs_extra = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_EXTRA]));
compare_fat_ptrs(bcx,
- lhs_addr, lhs_extra,
- rhs_addr, rhs_extra,
- t, op, debug_loc)
+ lhs_addr,
+ lhs_extra,
+ rhs_addr,
+ rhs_extra,
+ t,
+ op,
+ debug_loc)
}
ty::TyInt(_) => {
- ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, true), lhs, rhs, debug_loc)
+ ICmp(bcx,
+ bin_op_to_icmp_predicate(bcx.ccx(), op, true),
+ lhs,
+ rhs,
+ debug_loc)
}
ty::TyFloat(_) => {
- FCmp(bcx, bin_op_to_fcmp_predicate(bcx.ccx(), op), lhs, rhs, debug_loc)
+ FCmp(bcx,
+ bin_op_to_fcmp_predicate(bcx.ccx(), op),
+ lhs,
+ rhs,
+ debug_loc)
}
// Should never get here, because t is scalar.
- _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types")
+ _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types"),
}
}
av: ValueRef,
t: Ty<'tcx>,
mut f: F)
- -> Block<'blk, 'tcx> where
- F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
+ -> Block<'blk, 'tcx>
+ where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
{
let _icx = push_ctxt("iter_structural_ty");
variant: ty::VariantDef<'tcx>,
substs: &Substs<'tcx>,
f: &mut F)
- -> Block<'blk, 'tcx> where
- F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
+ -> Block<'blk, 'tcx>
+ where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
{
let _icx = push_ctxt("iter_variant");
let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
- cx = f(cx, adt::trans_field_ptr(cx, repr, av, variant.disr_val, i), arg);
+ cx = f(cx,
+ adt::trans_field_ptr(cx, repr, av, variant.disr_val, i),
+ arg);
}
return cx;
}
let mut cx = cx;
match t.sty {
- ty::TyStruct(..) => {
- let repr = adt::represent_type(cx.ccx(), t);
- let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
- for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
-
- let val = if common::type_is_sized(cx.tcx(), field_ty) {
- llfld_a
- } else {
- let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
- Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
- Store(cx, info.unwrap(), expr::get_meta(cx, scratch.val));
- scratch.val
- };
- cx = f(cx, val, field_ty);
- }
- }
- ty::TyClosure(_, ref substs) => {
- let repr = adt::represent_type(cx.ccx(), t);
- for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
- let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
- cx = f(cx, llupvar, upvar_ty);
- }
- }
- ty::TyArray(_, n) => {
- let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
- let unit_ty = t.sequence_element_type(cx.tcx());
- cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
- }
- ty::TySlice(_) | ty::TyStr => {
- let unit_ty = t.sequence_element_type(cx.tcx());
- cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f);
- }
- ty::TyTuple(ref args) => {
- let repr = adt::represent_type(cx.ccx(), t);
- for (i, arg) in args.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
- cx = f(cx, llfld_a, *arg);
- }
- }
- ty::TyEnum(en, substs) => {
- let fcx = cx.fcx;
- let ccx = fcx.ccx;
-
- let repr = adt::represent_type(ccx, t);
- let n_variants = en.variants.len();
-
- // NB: we must hit the discriminant first so that structural
- // comparison know not to proceed when the discriminants differ.
-
- match adt::trans_switch(cx, &*repr, av) {
- (_match::Single, None) => {
- if n_variants != 0 {
- assert!(n_variants == 1);
- cx = iter_variant(cx, &*repr, av, &en.variants[0],
- substs, &mut f);
- }
- }
- (_match::Switch, Some(lldiscrim_a)) => {
- cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
-
- // Create a fall-through basic block for the "else" case of
- // the switch instruction we're about to generate. Note that
- // we do **not** use an Unreachable instruction here, even
- // though most of the time this basic block will never be hit.
- //
- // When an enum is dropped it's contents are currently
- // overwritten to DTOR_DONE, which means the discriminant
- // could have changed value to something not within the actual
- // range of the discriminant. Currently this function is only
- // used for drop glue so in this case we just return quickly
- // from the outer function, and any other use case will only
- // call this for an already-valid enum in which case the `ret
- // void` will never be hit.
- let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
- RetVoid(ret_void_cx, DebugLoc::None);
- let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb,
- n_variants);
- let next_cx = fcx.new_temp_block("enum-iter-next");
-
- for variant in &en.variants {
- let variant_cx =
- fcx.new_temp_block(
- &format!("enum-iter-variant-{}",
- &variant.disr_val.to_string())
- );
- let case_val = adt::trans_case(cx, &*repr, variant.disr_val);
- AddCase(llswitch, case_val, variant_cx.llbb);
- let variant_cx =
- iter_variant(variant_cx,
- &*repr,
- data_ptr,
- variant,
- substs,
- &mut f);
- Br(variant_cx, next_cx.llbb, DebugLoc::None);
- }
- cx = next_cx;
- }
- _ => ccx.sess().unimpl("value from adt::trans_switch \
- in iter_structural_ty")
- }
- }
- _ => {
- cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
- }
+ ty::TyStruct(..) => {
+ let repr = adt::represent_type(cx.ccx(), t);
+ let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
+ for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
+ let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
+
+ let val = if common::type_is_sized(cx.tcx(), field_ty) {
+ llfld_a
+ } else {
+ let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
+ Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
+ Store(cx, info.unwrap(), expr::get_meta(cx, scratch.val));
+ scratch.val
+ };
+ cx = f(cx, val, field_ty);
+ }
+ }
+ ty::TyClosure(_, ref substs) => {
+ let repr = adt::represent_type(cx.ccx(), t);
+ for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
+ let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
+ cx = f(cx, llupvar, upvar_ty);
+ }
+ }
+ ty::TyArray(_, n) => {
+ let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
+ let unit_ty = t.sequence_element_type(cx.tcx());
+ cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
+ }
+ ty::TySlice(_) | ty::TyStr => {
+ let unit_ty = t.sequence_element_type(cx.tcx());
+ cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f);
+ }
+ ty::TyTuple(ref args) => {
+ let repr = adt::represent_type(cx.ccx(), t);
+ for (i, arg) in args.iter().enumerate() {
+ let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
+ cx = f(cx, llfld_a, *arg);
+ }
+ }
+ ty::TyEnum(en, substs) => {
+ let fcx = cx.fcx;
+ let ccx = fcx.ccx;
+
+ let repr = adt::represent_type(ccx, t);
+ let n_variants = en.variants.len();
+
+ // NB: we must hit the discriminant first so that structural
+ // comparison know not to proceed when the discriminants differ.
+
+ match adt::trans_switch(cx, &*repr, av) {
+ (_match::Single, None) => {
+ if n_variants != 0 {
+ assert!(n_variants == 1);
+ cx = iter_variant(cx, &*repr, av, &en.variants[0], substs, &mut f);
+ }
+ }
+ (_match::Switch, Some(lldiscrim_a)) => {
+ cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
+
+ // Create a fall-through basic block for the "else" case of
+ // the switch instruction we're about to generate. Note that
+ // we do **not** use an Unreachable instruction here, even
+ // though most of the time this basic block will never be hit.
+ //
+ // When an enum is dropped it's contents are currently
+ // overwritten to DTOR_DONE, which means the discriminant
+ // could have changed value to something not within the actual
+ // range of the discriminant. Currently this function is only
+ // used for drop glue so in this case we just return quickly
+ // from the outer function, and any other use case will only
+ // call this for an already-valid enum in which case the `ret
+ // void` will never be hit.
+ let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
+ RetVoid(ret_void_cx, DebugLoc::None);
+ let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
+ let next_cx = fcx.new_temp_block("enum-iter-next");
+
+ for variant in &en.variants {
+ let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}",
+ &variant.disr_val
+ .to_string()));
+ let case_val = adt::trans_case(cx, &*repr, variant.disr_val);
+ AddCase(llswitch, case_val, variant_cx.llbb);
+ let variant_cx = iter_variant(variant_cx,
+ &*repr,
+ data_ptr,
+ variant,
+ substs,
+ &mut f);
+ Br(variant_cx, next_cx.llbb, DebugLoc::None);
+ }
+ cx = next_cx;
+ }
+ _ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"),
+ }
+ }
+ _ => {
+ cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
+ }
}
return cx;
}
// Note that we preserve binding levels here:
let substs = principal.0.substs.with_self_ty(source).erase_regions();
let substs = ccx.tcx().mk_substs(substs);
- let trait_ref = ty::Binder(ty::TraitRef { def_id: principal.def_id(),
- substs: substs });
+ let trait_ref = ty::Binder(ty::TraitRef {
+ def_id: principal.def_id(),
+ substs: substs,
+ });
consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
Type::vtable_ptr(ccx))
}
_ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
- target))
+ target)),
}
}
(PointerCast(bcx, src, ptr_ty),
unsized_info(bcx.ccx(), a, b, None, bcx.fcx.param_substs))
}
- _ => bcx.sess().bug(
- &format!("unsize_thin_ptr: called on bad types"))
+ _ => bcx.sess().bug(&format!("unsize_thin_ptr: called on bad types")),
}
}
let src_repr = adt::represent_type(bcx.ccx(), src_ty);
let src_fields = match &*src_repr {
&adt::Repr::Univariant(ref s, _) => &s.fields,
- _ => bcx.sess().bug("struct has non-univariant repr")
+ _ => bcx.sess().bug("struct has non-univariant repr"),
};
let dst_repr = adt::represent_type(bcx.ccx(), dst_ty);
let dst_fields = match &*dst_repr {
&adt::Repr::Univariant(ref s, _) => &s.fields,
- _ => bcx.sess().bug("struct has non-univariant repr")
+ _ => bcx.sess().bug("struct has non-univariant repr"),
};
let iter = src_fields.iter().zip(dst_fields).enumerate();
for (i, (src_fty, dst_fty)) in iter {
- if type_is_zero_size(bcx.ccx(), dst_fty) { continue; }
+ if type_is_zero_size(bcx.ccx(), dst_fty) {
+ continue;
+ }
let src_f = adt::trans_field_ptr(bcx, &src_repr, src, 0, i);
let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, 0, i);
if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty);
} else {
- coerce_unsized_into(
- bcx,
- src_f, src_fty,
- dst_f, dst_fty
- );
+ coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
}
}
}
_ => bcx.sess().bug(&format!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
src_ty,
- dst_ty))
+ dst_ty)),
}
}
-pub fn cast_shift_expr_rhs(cx: Block,
- op: hir::BinOp_,
- lhs: ValueRef,
- rhs: ValueRef)
- -> ValueRef {
- cast_shift_rhs(op, lhs, rhs,
- |a,b| Trunc(cx, a, b),
- |a,b| ZExt(cx, a, b))
+pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+ cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
}
-pub fn cast_shift_const_rhs(op: hir::BinOp_,
- lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- cast_shift_rhs(op, lhs, rhs,
+pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+ cast_shift_rhs(op,
+ lhs,
+ rhs,
|a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
|a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
}
rhs: ValueRef,
trunc: F,
zext: G)
- -> ValueRef where
- F: FnOnce(ValueRef, Type) -> ValueRef,
- G: FnOnce(ValueRef, Type) -> ValueRef,
+ -> ValueRef
+ where F: FnOnce(ValueRef, Type) -> ValueRef,
+ G: FnOnce(ValueRef, Type) -> ValueRef
{
// Shifts may have any size int on the rhs
if rustc_front::util::is_shift_binop(op) {
let mut rhs_llty = val_ty(rhs);
let mut lhs_llty = val_ty(lhs);
- if rhs_llty.kind() == Vector { rhs_llty = rhs_llty.element_type() }
- if lhs_llty.kind() == Vector { lhs_llty = lhs_llty.element_type() }
+ if rhs_llty.kind() == Vector {
+ rhs_llty = rhs_llty.element_type()
+ }
+ if lhs_llty.kind() == Vector {
+ lhs_llty = lhs_llty.element_type()
+ }
let rhs_sz = rhs_llty.int_width();
let lhs_sz = lhs_llty.int_width();
if lhs_sz < rhs_sz {
}
pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- val_t: Ty<'tcx>) -> (Type, u64) {
+ val_t: Ty<'tcx>)
+ -> (Type, u64) {
match val_t.sty {
ty::TyInt(t) => {
let llty = Type::int_from_ty(cx.ccx(), t);
}
}
-pub fn fail_if_zero_or_overflows<'blk, 'tcx>(
- cx: Block<'blk, 'tcx>,
- call_info: NodeIdAndSpan,
- divrem: hir::BinOp,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
+pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ call_info: NodeIdAndSpan,
+ divrem: hir::BinOp,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ rhs_t: Ty<'tcx>)
+ -> Block<'blk, 'tcx> {
let (zero_text, overflow_text) = if divrem.node == hir::BiDiv {
("attempted to divide by zero",
"attempted to divide with overflow")
}
ty::TyStruct(def, _) if def.is_simd() => {
let mut res = C_bool(cx.ccx(), false);
- for i in 0 .. rhs_t.simd_size(cx.tcx()) {
- res = Or(cx, res,
- IsNull(cx,
- ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), debug_loc);
+ for i in 0..rhs_t.simd_size(cx.tcx()) {
+ res = Or(cx,
+ res,
+ IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))),
+ debug_loc);
}
(res, false)
}
// integers, no action beyond checking for zero need be taken.
if is_signed {
let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
- let minus_one = ICmp(bcx, llvm::IntEQ, rhs,
- C_integral(llty, !0, false), debug_loc);
+ let minus_one = ICmp(bcx,
+ llvm::IntEQ,
+ rhs,
+ C_integral(llty, !0, false),
+ debug_loc);
with_cond(bcx, minus_one, |bcx| {
- let is_min = ICmp(bcx, llvm::IntEQ, lhs,
- C_integral(llty, min, true), debug_loc);
+ let is_min = ICmp(bcx,
+ llvm::IntEQ,
+ lhs,
+ C_integral(llty, min, true),
+ debug_loc);
with_cond(bcx, is_min, |bcx| {
- controlflow::trans_fail(bcx,
- call_info,
- InternedString::new(overflow_text))
+ controlflow::trans_fail(bcx, call_info, InternedString::new(overflow_text))
})
})
} else {
}
pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- did: DefId, t: Ty<'tcx>) -> ValueRef {
+ did: DefId,
+ t: Ty<'tcx>)
+ -> ValueRef {
let name = csearch::get_symbol(&ccx.sess().cstore, did);
match t.sty {
ty::TyBareFn(_, ref fn_ty) => {
debug!("arg: {}", bcx.val_to_string(llarg));
}
- let llresult = Call(bcx,
- llfn,
- &llargs[..],
- Some(attributes),
- debug_loc);
+ let llresult = Call(bcx, llfn, &llargs[..], Some(attributes), debug_loc);
return (llresult, bcx);
}
}
bcx.fcx.needs_invoke()
}
-pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- v: ValueRef, t: Ty<'tcx>) -> ValueRef {
+pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef {
let _icx = push_ctxt("load_if_immediate");
- if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); }
+ if type_is_immediate(cx.ccx(), t) {
+ return load_ty(cx, v, t);
+ }
return v;
}
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
-pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
+pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) {
return C_undef(type_of::type_of(cx.ccx(), t));
}
}
}
- let val = if t.is_bool() {
+ let val = if t.is_bool() {
LoadRangeAssert(cx, ptr, 0, 2, llvm::False)
} else if t.is_char() {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
- } else if (t.is_region_ptr() || t.is_unique())
- && !common::type_is_fat_ptr(cx.tcx(), t) {
- LoadNonNull(cx, ptr)
+ } else if (t.is_region_ptr() || t.is_unique()) && !common::type_is_fat_ptr(cx.tcx(), t) {
+ LoadNonNull(cx, ptr)
} else {
Load(cx, ptr)
};
}
debug!("store_ty: {} : {:?} <- {}",
- cx.val_to_string(dst), t,
+ cx.val_to_string(dst),
+ t,
cx.val_to_string(v));
if common::type_is_fat_ptr(cx.tcx(), t) {
- Store(cx, ExtractValue(cx, v, abi::FAT_PTR_ADDR), expr::get_dataptr(cx, dst));
- Store(cx, ExtractValue(cx, v, abi::FAT_PTR_EXTRA), expr::get_meta(cx, dst));
+ Store(cx,
+ ExtractValue(cx, v, abi::FAT_PTR_ADDR),
+ expr::get_dataptr(cx, dst));
+ Store(cx,
+ ExtractValue(cx, v, abi::FAT_PTR_EXTRA),
+ expr::get_meta(cx, dst));
} else {
let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
unsafe {
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
src: ValueRef,
- _ty: Ty<'tcx>) -> (ValueRef, ValueRef)
-{
+ _ty: Ty<'tcx>)
+ -> (ValueRef, ValueRef) {
// FIXME: emit metadata
(Load(cx, expr::get_dataptr(cx, src)),
Load(cx, expr::get_meta(cx, src)))
}
}
-pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local)
- -> Block<'blk, 'tcx> {
+pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> {
debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
let _indenter = indenter();
let _icx = push_ctxt("init_local");
common::BlockS::new(llbb, is_lpad, None, fcx)
}
-pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- f: F)
- -> Block<'blk, 'tcx> where
- F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>,
+pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
+ where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
{
let _icx = push_ctxt("with_cond");
let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
let lifetime_start = ccx.get_intrinsic(&"llvm.lifetime.start");
- Call(cx, lifetime_start, &[C_u64(ccx, size), ptr], None, DebugLoc::None);
+ Call(cx,
+ lifetime_start,
+ &[C_u64(ccx, size), ptr],
+ None,
+ DebugLoc::None);
}
pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
let lifetime_end = ccx.get_intrinsic(&"llvm.lifetime.end");
- Call(cx, lifetime_end, &[C_u64(ccx, size), ptr], None, DebugLoc::None);
+ Call(cx,
+ lifetime_end,
+ &[C_u64(ccx, size), ptr],
+ None,
+ DebugLoc::None);
}
// Generates code for resumption of unwind at the end of a landing pad.
let size = IntCast(cx, n_bytes, ccx.int_type());
let align = C_i32(ccx, align as i32);
let volatile = C_bool(ccx, false);
- Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None, DebugLoc::None);
+ Call(cx,
+ memcpy,
+ &[dst_ptr, src_ptr, size, align, volatile],
+ None,
+ DebugLoc::None);
}
-pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- dst: ValueRef, src: ValueRef,
- t: Ty<'tcx>) {
+pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) {
let _icx = push_ctxt("memcpy_ty");
let ccx = bcx.ccx();
}
pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
- if cx.unreachable.get() { return; }
+ if cx.unreachable.get() {
+ return;
+ }
let _icx = push_ctxt("drop_done_fill_mem");
let bcx = cx;
memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
}
pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
- if cx.unreachable.get() { return; }
+ if cx.unreachable.get() {
+ return;
+ }
let _icx = push_ctxt("init_zero_mem");
let bcx = cx;
memfill(&B(bcx), llptr, t, 0);
let size = machine::llsize_of(ccx, llty);
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
let volatile = C_bool(ccx, false);
- b.call(llintrinsicfn, &[llptr, llzeroval, size, align, volatile], None);
+ b.call(llintrinsicfn,
+ &[llptr, llzeroval, size, align, volatile],
+ None);
}
pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> ValueRef {
// Creates the alloca slot which holds the pointer to the slot for the final return value
pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
- output_type: Ty<'tcx>) -> ValueRef {
+ output_type: Ty<'tcx>)
+ -> ValueRef {
let lloutputtype = type_of::type_of(fcx.ccx, output_type);
// We create an alloca to hold a pointer of type `output_type`
impl FindNestedReturn {
fn new() -> FindNestedReturn {
- FindNestedReturn { found: false }
+ FindNestedReturn {
+ found: false,
+ }
}
}
hir::ExprRet(..) => {
self.found = true;
}
- _ => intravisit::walk_expr(self, e)
+ _ => intravisit::walk_expr(self, e),
}
}
}
hir::ItemFn(_, _, _, _, _, ref blk) => {
blk
}
- _ => tcx.sess.bug("unexpected item variant in has_nested_returns")
+ _ => tcx.sess.bug("unexpected item variant in has_nested_returns"),
}
}
Some(hir_map::NodeTraitItem(trait_item)) => {
match trait_item.node {
hir::MethodTraitItem(_, Some(ref body)) => body,
_ => {
- tcx.sess.bug("unexpected variant: trait item other than a \
- provided method in has_nested_returns")
+ tcx.sess.bug("unexpected variant: trait item other than a provided method in \
+ has_nested_returns")
}
}
}
match impl_item.node {
hir::ImplItemKind::Method(_, ref body) => body,
_ => {
- tcx.sess.bug("unexpected variant: non-method impl item in \
- has_nested_returns")
+ tcx.sess.bug("unexpected variant: non-method impl item in has_nested_returns")
}
}
}
Some(hir_map::NodeExpr(e)) => {
match e.node {
hir::ExprClosure(_, _, ref blk) => blk,
- _ => tcx.sess.bug("unexpected expr variant in has_nested_returns")
+ _ => tcx.sess.bug("unexpected expr variant in has_nested_returns"),
}
}
Some(hir_map::NodeVariant(..)) |
None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
_ => tcx.sess.bug(&format!("unexpected variant in has_nested_returns: {}",
- tcx.map.path_to_string(id)))
+ tcx.map.path_to_string(id))),
};
(blk.id, Some(cfg::CFG::new(tcx, blk)))
} else {
ccx.tcx().map.path_to_string(id).to_string()
},
- id, param_substs);
+ id,
+ param_substs);
let uses_outptr = match output_type {
ty::FnConverging(output_type) => {
- let substd_output_type =
- monomorphize::apply_param_substs(ccx.tcx(), param_substs, &output_type);
+ let substd_output_type = monomorphize::apply_param_substs(ccx.tcx(),
+ param_substs,
+ &output_type);
type_of::return_uses_outptr(ccx, substd_output_type)
}
- ty::FnDiverging => false
+ ty::FnDiverging => false,
};
let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl);
let (blk_id, cfg) = build_cfg(ccx.tcx(), id);
let mir = ccx.mir_map().get(&id);
let mut fcx = FunctionContext {
- mir: mir,
- llfn: llfndecl,
- llenv: None,
- llretslotptr: Cell::new(None),
- param_env: ccx.tcx().empty_parameter_environment(),
- alloca_insert_pt: Cell::new(None),
- llreturn: Cell::new(None),
- needs_ret_allocas: nested_returns,
- personality: Cell::new(None),
- caller_expects_out_pointer: uses_outptr,
- lllocals: RefCell::new(NodeMap()),
- llupvars: RefCell::new(NodeMap()),
- lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
- id: id,
- param_substs: param_substs,
- span: sp,
- block_arena: block_arena,
- ccx: ccx,
- debug_context: debug_context,
- scopes: RefCell::new(Vec::new()),
- cfg: cfg
+ mir: mir,
+ llfn: llfndecl,
+ llenv: None,
+ llretslotptr: Cell::new(None),
+ param_env: ccx.tcx().empty_parameter_environment(),
+ alloca_insert_pt: Cell::new(None),
+ llreturn: Cell::new(None),
+ needs_ret_allocas: nested_returns,
+ personality: Cell::new(None),
+ caller_expects_out_pointer: uses_outptr,
+ lllocals: RefCell::new(NodeMap()),
+ llupvars: RefCell::new(NodeMap()),
+ lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
+ id: id,
+ param_substs: param_substs,
+ span: sp,
+ block_arena: block_arena,
+ ccx: ccx,
+ debug_context: debug_context,
+ scopes: RefCell::new(Vec::new()),
+ cfg: cfg,
};
if has_env {
// - new_fn_ctxt
// - trans_args
-pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>)
- -> datum::Rvalue {
+pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>) -> datum::Rvalue {
use trans::datum::{ByRef, ByValue};
datum::Rvalue {
let mut idx = fcx.arg_offset() as c_uint;
for (i, &arg_ty) in arg_tys.iter().enumerate() {
let arg_datum = if !has_tupled_arg || i < arg_tys.len() - 1 {
- if type_of::arg_is_indirect(bcx.ccx(), arg_ty)
- && bcx.sess().opts.debuginfo != FullDebugInfo {
+ if type_of::arg_is_indirect(bcx.ccx(), arg_ty) &&
+ bcx.sess().opts.debuginfo != FullDebugInfo {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
bcx.fcx.schedule_lifetime_end(arg_scope_id, llarg);
bcx.fcx.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
- datum::Datum::new(llarg, arg_ty, datum::Lvalue::new("create_datum_for_fn_args"))
+ datum::Datum::new(llarg,
+ arg_ty,
+ datum::Lvalue::new("create_datum_for_fn_args"))
} else if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
let data = get_param(fcx.llfn, idx);
let extra = get_param(fcx.llfn, idx + 1);
let llarg = get_param(fcx.llfn, idx);
idx += 1;
let tmp = datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty));
- unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
- arg_scope_id, tmp,
- |tmp, bcx, dst| tmp.store_to(bcx, dst)))
+ unpack_datum!(bcx,
+ datum::lvalue_scratch_datum(bcx,
+ arg_ty,
+ "",
+ arg_scope_id,
+ tmp,
+ |tmp, bcx, dst| tmp.store_to(bcx, dst)))
}
} else {
// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
}))
}
_ => {
- bcx.tcx().sess.bug("last argument of a function with \
- `rust-call` ABI isn't a tuple?!")
+ bcx.tcx()
+ .sess
+ .bug("last argument of a function with `rust-call` ABI isn't a tuple?!")
}
}
};
}
raw_block(fcx, false, llreturn)
}
- None => last_bcx
+ None => last_bcx,
};
// This shouldn't need to recompute the return type,
Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location)
}
}
- }
+ },
}
}
let _icx = push_ctxt("trans_closure");
attributes::emit_uwtable(llfndecl, true);
- debug!("trans_closure(..., param_substs={:?})",
- param_substs);
+ debug!("trans_closure(..., param_substs={:?})", param_substs);
let has_env = match closure_env {
closure::ClosureEnv::Closure(..) => true,
}
// cleanup scope for the incoming arguments
- let fn_cleanup_debug_loc =
- debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, fn_ast_id, body.span, true);
+ let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(ccx,
+ fn_ast_id,
+ body.span,
+ true);
let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
let block_ty = node_id_type(bcx, body.id);
// Set up arguments to the function.
- let monomorphized_arg_types =
- decl.inputs.iter()
- .map(|arg| node_id_type(bcx, arg.id))
- .collect::<Vec<_>>();
+ let monomorphized_arg_types = decl.inputs
+ .iter()
+ .map(|arg| node_id_type(bcx, arg.id))
+ .collect::<Vec<_>>();
for monomorphized_arg_type in &monomorphized_arg_types {
debug!("trans_closure: monomorphized_arg_type: {:?}",
monomorphized_arg_type);
let has_tupled_arg = match closure_env {
closure::ClosureEnv::NotClosure => abi == RustCall,
- _ => false
+ _ => false,
};
- bcx = create_datums_for_fn_args(bcx, &decl.inputs, &monomorphized_arg_types,
- has_tupled_arg, arg_scope);
+ bcx = create_datums_for_fn_args(bcx,
+ &decl.inputs,
+ &monomorphized_arg_types,
+ has_tupled_arg,
+ arg_scope);
bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope));
}
}
- let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id,
- fn_cleanup_debug_loc.span);
+ let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id, fn_cleanup_debug_loc.span);
// Insert the mandatory first few basic blocks before lltop.
finish_fn(&fcx, bcx, output_type, ret_debug_loc);
let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
let output_type = sig.output;
let abi = fn_ty.fn_abi();
- trans_closure(ccx, decl, body, llfndecl, param_substs, id, attrs, output_type, abi,
+ trans_closure(ccx,
+ decl,
+ body,
+ llfndecl,
+ param_substs,
+ id,
+ attrs,
+ output_type,
+ abi,
closure::ClosureEnv::NotClosure);
}
llfndecl: ValueRef) {
let _icx = push_ctxt("trans_enum_variant");
- trans_enum_variant_or_tuple_like_struct(
- ccx,
- ctor_id,
- disr,
- param_substs,
- llfndecl);
+ trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, disr, param_substs, llfndecl);
}
pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
expr::SaveIn(llresult),
debug_loc);
}
- _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor")
+ _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor"),
}
} else {
// Just eval all the expressions (if any). Since expressions in Rust can have arbitrary
bcx = expr::trans_into(bcx, expr, expr::Ignore);
}
}
- _ => ()
+ _ => (),
}
}
llfndecl: ValueRef) {
let _icx = push_ctxt("trans_tuple_struct");
- trans_enum_variant_or_tuple_like_struct(
- ccx,
- ctor_id,
- 0,
- param_substs,
- llfndecl);
+ trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, 0, param_substs, llfndecl);
}
fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
- fcx = new_fn_ctxt(ccx, llfndecl, ctor_id, false, result_ty,
- param_substs, None, &arena);
+ fcx = new_fn_ctxt(ccx,
+ llfndecl,
+ ctor_id,
+ false,
+ result_ty,
+ param_substs,
+ None,
+ &arena);
let bcx = init_function(&fcx, false, result_ty);
assert!(!fcx.needs_ret_allocas);
let repr = adt::represent_type(ccx, result_ty.unwrap());
let mut llarg_idx = fcx.arg_offset() as c_uint;
for (i, arg_ty) in arg_tys.into_iter().enumerate() {
- let lldestptr = adt::trans_field_ptr(bcx,
- &*repr,
- dest,
- disr,
- i);
+ let lldestptr = adt::trans_field_ptr(bcx, &*repr, dest, disr, i);
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
- Store(bcx, get_param(fcx.llfn, llarg_idx), expr::get_dataptr(bcx, lldestptr));
- Store(bcx, get_param(fcx.llfn, llarg_idx + 1), expr::get_meta(bcx, lldestptr));
+ Store(bcx,
+ get_param(fcx.llfn, llarg_idx),
+ expr::get_dataptr(bcx, lldestptr));
+ Store(bcx,
+ get_param(fcx.llfn, llarg_idx + 1),
+ expr::get_meta(bcx, lldestptr));
llarg_idx += 2;
} else {
let arg = get_param(fcx.llfn, llarg_idx);
if is_allow && !print_info {
// we're not interested in anything here
- return
+ return;
}
let ty = ccx.tcx().node_id_to_type(id);
match *avar {
adt::General(..) => {
for (i, var) in enum_def.variants.iter().enumerate() {
- ccx.tcx().sess.span_note(var.span,
- &*format!("variant data: {} bytes", sizes[i]));
+ ccx.tcx()
+ .sess
+ .span_note(var.span, &*format!("variant data: {} bytes", sizes[i]));
}
}
_ => {}
if !is_allow && largest > slargest * 3 && slargest > 0 {
// Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
// pass for the latter already ran.
- lint::raw_emit_lint(&ccx.tcx().sess, lint::builtin::VARIANT_SIZE_DIFFERENCES,
- *lvlsrc.unwrap(), Some(sp),
- &format!("enum variant is more than three times larger \
- ({} bytes) than the next largest (ignoring padding)",
- largest));
+ lint::raw_emit_lint(&ccx.tcx().sess,
+ lint::builtin::VARIANT_SIZE_DIFFERENCES,
+ *lvlsrc.unwrap(),
+ Some(sp),
+ &format!("enum variant is more than three times larger ({} bytes) \
+ than the next largest (ignoring padding)",
+ largest));
ccx.sess().span_note(enum_def.variants[largest_index].span,
"this variant is the largest");
}
fn set_global_section(ccx: &CrateContext, llval: ValueRef, i: &hir::Item) {
- match attr::first_attr_value_str_by_name(&i.attrs,
- "link_section") {
+ match attr::first_attr_value_str_by_name(&i.attrs, "link_section") {
Some(sect) => {
if contains_null(§) {
- ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`",
- §));
+ ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", §));
}
unsafe {
let buf = CString::new(sect.as_bytes()).unwrap();
let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
match item.node {
- hir::ItemFn(ref decl, _, _, abi, ref generics, ref body) => {
- if !generics.is_type_parameterized() {
- let trans_everywhere = attr::requests_inline(&item.attrs);
- // Ignore `trans_everywhere` for cross-crate inlined items
- // (`from_external`). `trans_item` will be called once for each
- // compilation unit that references the item, so it will still get
- // translated everywhere it's needed.
- for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
- let llfn = get_item_val(ccx, item.id);
- let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
- if abi != Rust {
- foreign::trans_rust_fn_with_foreign_abi(ccx, &**decl, &**body, &item.attrs,
- llfn, empty_substs, item.id, None);
- } else {
- trans_fn(ccx, &**decl, &**body, llfn, empty_substs, item.id, &item.attrs);
- }
- set_global_section(ccx, llfn, item);
- update_linkage(ccx, llfn, Some(item.id),
- if is_origin { OriginalTranslation } else { InlinedCopy });
-
- if is_entry_fn(ccx.sess(), item.id) {
- create_entry_wrapper(ccx, item.span, llfn);
- // check for the #[rustc_error] annotation, which forces an
- // error in trans. This is used to write compile-fail tests
- // that actually test that compilation succeeds without
- // reporting an error.
- let item_def_id = ccx.tcx().map.local_def_id(item.id);
- if ccx.tcx().has_attr(item_def_id, "rustc_error") {
- ccx.tcx().sess.span_fatal(item.span, "compilation successful");
+ hir::ItemFn(ref decl, _, _, abi, ref generics, ref body) => {
+ if !generics.is_type_parameterized() {
+ let trans_everywhere = attr::requests_inline(&item.attrs);
+ // Ignore `trans_everywhere` for cross-crate inlined items
+ // (`from_external`). `trans_item` will be called once for each
+ // compilation unit that references the item, so it will still get
+ // translated everywhere it's needed.
+ for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+ let llfn = get_item_val(ccx, item.id);
+ let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
+ if abi != Rust {
+ foreign::trans_rust_fn_with_foreign_abi(ccx,
+ &**decl,
+ &**body,
+ &item.attrs,
+ llfn,
+ empty_substs,
+ item.id,
+ None);
+ } else {
+ trans_fn(ccx,
+ &**decl,
+ &**body,
+ llfn,
+ empty_substs,
+ item.id,
+ &item.attrs);
+ }
+ set_global_section(ccx, llfn, item);
+ update_linkage(ccx,
+ llfn,
+ Some(item.id),
+ if is_origin {
+ OriginalTranslation
+ } else {
+ InlinedCopy
+ });
+
+ if is_entry_fn(ccx.sess(), item.id) {
+ create_entry_wrapper(ccx, item.span, llfn);
+ // check for the #[rustc_error] annotation, which forces an
+ // error in trans. This is used to write compile-fail tests
+ // that actually test that compilation succeeds without
+ // reporting an error.
+ let item_def_id = ccx.tcx().map.local_def_id(item.id);
+ if ccx.tcx().has_attr(item_def_id, "rustc_error") {
+ ccx.tcx().sess.span_fatal(item.span, "compilation successful");
+ }
}
}
}
}
- }
- hir::ItemImpl(_, _, ref generics, _, _, ref impl_items) => {
- meth::trans_impl(ccx,
- item.name,
- &impl_items[..],
- generics,
- item.id);
- }
- hir::ItemMod(_) => {
- // modules have no equivalent at runtime, they just affect
- // the mangled names of things contained within
- }
- hir::ItemEnum(ref enum_definition, ref gens) => {
- if gens.ty_params.is_empty() {
- // sizes only make sense for non-generic types
-
- enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
- }
- }
- hir::ItemConst(..) => {
- }
- hir::ItemStatic(_, m, ref expr) => {
- let g = match consts::trans_static(ccx, m, expr, item.id, &item.attrs) {
- Ok(g) => g,
- Err(err) => ccx.tcx().sess.span_fatal(expr.span, &err.description()),
- };
- set_global_section(ccx, g, item);
- update_linkage(ccx, g, Some(item.id), OriginalTranslation);
- },
- hir::ItemForeignMod(ref foreign_mod) => {
- foreign::trans_foreign_mod(ccx, foreign_mod);
- }
- hir::ItemTrait(..) => {
- }
- _ => {/* fall through */ }
+ hir::ItemImpl(_, _, ref generics, _, _, ref impl_items) => {
+ meth::trans_impl(ccx, item.name, &impl_items[..], generics, item.id);
+ }
+ hir::ItemMod(_) => {
+ // modules have no equivalent at runtime, they just affect
+ // the mangled names of things contained within
+ }
+ hir::ItemEnum(ref enum_definition, ref gens) => {
+ if gens.ty_params.is_empty() {
+ // sizes only make sense for non-generic types
+
+ enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
+ }
+ }
+ hir::ItemConst(..) => {}
+ hir::ItemStatic(_, m, ref expr) => {
+ let g = match consts::trans_static(ccx, m, expr, item.id, &item.attrs) {
+ Ok(g) => g,
+ Err(err) => ccx.tcx().sess.span_fatal(expr.span, &err.description()),
+ };
+ set_global_section(ccx, g, item);
+ update_linkage(ccx, g, Some(item.id), OriginalTranslation);
+ }
+ hir::ItemForeignMod(ref foreign_mod) => {
+ foreign::trans_foreign_mod(ccx, foreign_mod);
+ }
+ hir::ItemTrait(..) => {}
+ _ => {
+ // fall through
+ }
}
}
sp: Span,
sym: String,
node_id: ast::NodeId,
- cc: llvm::CallConv,
- llfty: Type) -> ValueRef {
+ cc: llvm::CallConv,
+ llfty: Type)
+ -> ValueRef {
debug!("register_fn_llvmty id={} sym={}", node_id, sym);
let llfn = declare::define_fn(ccx, &sym[..], cc, llfty,
-> ValueRef {
if let ty::TyBareFn(_, ref f) = node_type.sty {
if f.abi != Rust && f.abi != RustCall {
- ccx.sess().span_bug(sp, &format!("only the `{}` or `{}` calling conventions are valid \
- for this function; `{}` was specified",
- Rust.name(), RustCall.name(), f.abi.name()));
+ ccx.sess().span_bug(sp,
+ &format!("only the `{}` or `{}` calling conventions are valid \
+ for this function; `{}` was specified",
+ Rust.name(),
+ RustCall.name(),
+ f.abi.name()));
}
} else {
ccx.sess().span_bug(sp, "expected bare rust function")
}
- let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(||{
+ let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(|| {
ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
});
finish_register_fn(ccx, sym, node_id);
pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool {
match *sess.entry_fn.borrow() {
Some((entry_id, _)) => node_id == entry_id,
- None => false
+ None => false,
}
}
/// Create the `main` function which will initialise the rust runtime and call users’ main
/// function.
-pub fn create_entry_wrapper(ccx: &CrateContext,
- sp: Span,
- main_llfn: ValueRef) {
+pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) {
let et = ccx.sess().entry_type.get().unwrap();
match et {
config::EntryMain => {
sp: Span,
rust_main: ValueRef,
use_start_lang_item: bool) {
- let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()],
- &ccx.int_type());
+ let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type());
- let llfn = declare::define_cfn(ccx, "main", llfty,
- ccx.tcx().mk_nil()).unwrap_or_else(||{
+ let llfn = declare::define_cfn(ccx, "main", llfty, ccx.tcx().mk_nil()).unwrap_or_else(|| {
ccx.sess().span_err(sp, "entry symbol `main` defined multiple times");
// FIXME: We should be smart and show a better diagnostic here.
ccx.sess().help("did you use #[no_mangle] on `fn main`? Use #[start] instead");
});
let llbb = unsafe {
- llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn,
- "top\0".as_ptr() as *const _)
+ llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _)
};
let bld = ccx.raw_builder();
unsafe {
let (start_fn, args) = if use_start_lang_item {
let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
Ok(id) => id,
- Err(s) => { ccx.sess().fatal(&s[..]); }
+ Err(s) => {
+ ccx.sess().fatal(&s[..]);
+ }
+ };
+ let start_fn = if let Some(start_node_id) = ccx.tcx()
+ .map
+ .as_local_node_id(start_def_id) {
+ get_item_val(ccx, start_node_id)
+ } else {
+ let start_fn_type = csearch::get_type(ccx.tcx(), start_def_id).ty;
+ trans_external_path(ccx, start_def_id, start_fn_type)
};
- let start_fn =
- if let Some(start_node_id) = ccx.tcx().map.as_local_node_id(start_def_id) {
- get_item_val(ccx, start_node_id)
- } else {
- let start_fn_type = csearch::get_type(ccx.tcx(),
- start_def_id).ty;
- trans_external_path(ccx, start_def_id, start_fn_type)
- };
let args = {
- let opaque_rust_main = llvm::LLVMBuildPointerCast(bld,
- rust_main, Type::i8p(ccx).to_ref(),
- "rust_main\0".as_ptr() as *const _);
-
- vec!(
- opaque_rust_main,
- get_param(llfn, 0),
- get_param(llfn, 1)
- )
+ let opaque_rust_main =
+ llvm::LLVMBuildPointerCast(bld,
+ rust_main,
+ Type::i8p(ccx).to_ref(),
+ "rust_main\0".as_ptr() as *const _);
+
+ vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)]
};
(start_fn, args)
} else {
debug!("using user-defined start fn");
- let args = vec!(
- get_param(llfn, 0 as c_uint),
- get_param(llfn, 1 as c_uint)
- );
+ let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)];
(rust_main, args)
};
}
}
-fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, id: ast::NodeId,
- ty: Ty<'tcx>, attrs: &[ast::Attribute]) -> String {
+fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ id: ast::NodeId,
+ ty: Ty<'tcx>,
+ attrs: &[ast::Attribute])
+ -> String {
match ccx.external_srcs().borrow().get(&id) {
Some(&did) => {
let sym = csearch::get_symbol(&ccx.sess().cstore, did);
// Create the global before evaluating the initializer;
// this is necessary to allow recursive statics.
let llty = type_of(ccx, ty);
- let g = declare::define_global(ccx, &sym[..],
- llty).unwrap_or_else(|| {
- ccx.sess().span_fatal(i.span, &format!("symbol `{}` is already defined",
- sym))
+ let g = declare::define_global(ccx, &sym[..], llty).unwrap_or_else(|| {
+ ccx.sess()
+ .span_fatal(i.span, &format!("symbol `{}` is already defined", sym))
});
ccx.item_symbols().borrow_mut().insert(i.id, sym);
llfn
}
- _ => ccx.sess().bug("get_item_val: weird result in table")
+ _ => ccx.sess().bug("get_item_val: weird result in table"),
};
v
}
_ => {
ccx.sess().span_bug(trait_item.span,
- "unexpected variant: trait item other than a provided \
- method in get_item_val()");
+ "unexpected variant: trait item other than a provided \
+ method in get_item_val()");
}
}
}
}
_ => {
ccx.sess().span_bug(impl_item.span,
- "unexpected variant: non-method impl item in \
- get_item_val()");
+ "unexpected variant: non-method impl item in \
+ get_item_val()");
}
}
}
let ty = ccx.tcx().node_id_to_type(id);
let parent = ccx.tcx().map.get_parent(id);
let enm = ccx.tcx().map.expect_item(parent);
- let sym = exported_name(ccx,
- id,
- ty,
- &enm.attrs);
+ let sym = exported_name(ccx, id, ty, &enm.attrs);
llfn = match enm.node {
hir::ItemEnum(_, _) => {
register_fn(ccx, (*v).span, sym, id, ty)
}
- _ => ccx.sess().bug("NodeVariant, shouldn't happen")
+ _ => ccx.sess().bug("NodeVariant, shouldn't happen"),
};
attributes::inline(llfn, attributes::InlineAttr::Hint);
llfn
hir_map::NodeStructCtor(struct_def) => {
// Only register the constructor if this is a tuple-like struct.
let ctor_id = if struct_def.is_struct() {
- ccx.sess().bug("attempt to register a constructor of \
- a non-tuple-like struct")
+ ccx.sess().bug("attempt to register a constructor of a non-tuple-like struct")
} else {
struct_def.id()
};
let parent = ccx.tcx().map.get_parent(id);
let struct_item = ccx.tcx().map.expect_item(parent);
let ty = ccx.tcx().node_id_to_type(ctor_id);
- let sym = exported_name(ccx,
- id,
- ty,
- &struct_item.attrs);
- let llfn = register_fn(ccx, struct_item.span,
- sym, ctor_id, ty);
+ let sym = exported_name(ccx, id, ty, &struct_item.attrs);
+ let llfn = register_fn(ccx, struct_item.span, sym, ctor_id, ty);
attributes::inline(llfn, attributes::InlineAttr::Hint);
llfn
}
ref variant => {
- ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}",
- variant))
+ ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}", variant))
}
};
val
}
-fn register_method(ccx: &CrateContext, id: ast::NodeId,
- attrs: &[ast::Attribute], span: Span) -> ValueRef {
+fn register_method(ccx: &CrateContext,
+ id: ast::NodeId,
+ attrs: &[ast::Attribute],
+ span: Span)
+ -> ValueRef {
let mty = ccx.tcx().node_id_to_type(id);
let sym = exported_name(ccx, id, mty, &attrs);
}
}
-pub fn write_metadata(cx: &SharedCrateContext, krate: &hir::Crate,
- reachable: &NodeSet) -> Vec<u8> {
+pub fn write_metadata(cx: &SharedCrateContext, krate: &hir::Crate, reachable: &NodeSet) -> Vec<u8> {
use flate;
- let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
- *ty != config::CrateTypeExecutable
- });
+ let any_library = cx.sess()
+ .crate_types
+ .borrow()
+ .iter()
+ .any(|ty| *ty != config::CrateTypeExecutable);
if !any_library {
- return Vec::new()
+ return Vec::new();
}
- let encode_inlined_item: encoder::EncodeInlinedItem =
- Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii));
+ let encode_inlined_item: encoder::EncodeInlinedItem = Box::new(|ecx, rbml_w, ii| {
+ astencode::encode_inlined_item(ecx, rbml_w, ii)
+ });
- let encode_parms = crate_ctxt_to_encode_parms(cx, encode_inlined_item,
- reachable);
+ let encode_parms = crate_ctxt_to_encode_parms(cx, encode_inlined_item, reachable);
let metadata = encoder::encode_metadata(encode_parms, krate);
let mut compressed = encoder::metadata_encoding_version.to_vec();
compressed.push_all(&flate::deflate_bytes(&metadata));
cx.link_meta().crate_hash);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
- llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(),
- buf.as_ptr())
+ llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr())
};
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
if !(linkage == llvm::ExternalLinkage as c_uint &&
llvm::LLVMIsDeclaration(val) != 0) &&
!(linkage == llvm::AvailableExternallyLinkage as c_uint) {
- continue
+ continue;
}
let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
- .to_bytes().to_vec();
+ .to_bytes()
+ .to_vec();
declared.insert(name);
}
}
// We only care about external definitions.
if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
llvm::LLVMIsDeclaration(val) == 0) {
- continue
+ continue;
}
let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
- .to_bytes().to_vec();
+ .to_bytes()
+ .to_vec();
if !declared.contains(&name) &&
!reachable.contains(str::from_utf8(&name).unwrap()) {
llvm::SetLinkage(val, llvm::InternalLinkage);
unsafe {
for ccx in cx.iter() {
let exported: Vec<_> = iter_globals(ccx.llmod())
- .filter(|&val| llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
- llvm::LLVMIsDeclaration(val) == 0)
- .collect();
+ .filter(|&val| {
+ llvm::LLVMGetLinkage(val) ==
+ llvm::ExternalLinkage as c_uint &&
+ llvm::LLVMIsDeclaration(val) == 0
+ })
+ .collect();
let i8p_ty = Type::i8p(&ccx);
for val in exported {
let mut imp_name = prefix.as_bytes().to_vec();
imp_name.extend(name.to_bytes());
let imp_name = CString::new(imp_name).unwrap();
- let imp = llvm::LLVMAddGlobal(ccx.llmod(), i8p_ty.to_ref(),
+ let imp = llvm::LLVMAddGlobal(ccx.llmod(),
+ i8p_ty.to_ref(),
imp_name.as_ptr() as *const _);
let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref());
llvm::LLVMSetInitializer(imp, init);
}
}
}
-