use trans::common::*;
use trans::consts;
use trans::datum::*;
+use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
use trans::expr::{self, Dest};
use trans::tvec;
use trans::type_of;
-use trans::debuginfo;
use middle::ty::{self, Ty};
use session::config::FullDebugInfo;
use util::common::indenter;
let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]);
let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
- let slice_len = Sub(bcx, len, slice_len_offset);
+ let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None);
let slice_ty = ty::mk_slice(bcx.tcx(),
bcx.tcx().mk_region(ty::ReStatic),
ty::mt {ty: vt.unit_ty, mutbl: ast::MutImmutable});
elems.extend(range(0, before).map(|i| GEPi(bcx, base, &[i])));
elems.extend(range(0, after).rev().map(|i| {
InBoundsGEP(bcx, base, &[
- Sub(bcx, len, C_uint(bcx.ccx(), i + 1))
+ Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None)
])
}));
ExtractedBlock { vals: elems, bcx: bcx }
Infallible =>
panic!("attempted to panic in a non-panicking panic handler!"),
JumpToBasicBlock(basic_block) =>
- Br(bcx, basic_block),
+ Br(bcx, basic_block, DebugLoc::None),
Unreachable =>
build::Unreachable(bcx)
}
}
}
- with_cond(bcx, Not(bcx, val), |bcx| {
+ with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| {
// Guard does not match: remove all bindings from the lllocals table
for (_, &binding_info) in data.bindings_map.iter() {
call_lifetime_end(bcx, binding_info.llmatch);
}
_ => ()
}
- Br(bcx, data.bodycx.llbb);
+ Br(bcx, data.bodycx.llbb, DebugLoc::None);
}
}
}
if !exhaustive || i + 1 < len {
opt_cx = bcx.fcx.new_temp_block("match_case");
match kind {
- Single => Br(bcx, opt_cx.llbb),
+ Single => Br(bcx, opt_cx.llbb, DebugLoc::None),
Switch => {
match opt.trans(bcx) {
SingleResult(r) => {
compare_scalar_types(
bcx, test_val, vend,
t, ast::BiLe);
- Result::new(bcx, And(bcx, llge, llle))
+ Result::new(bcx, And(bcx, llge, llle, DebugLoc::None))
}
LowerBound(Result { bcx, val }) => {
compare_scalar_types(bcx, test_val, val, t, ast::BiGe)
if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) {
branch_chk = Some(JumpToBasicBlock(bcx.llbb));
}
- CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb);
+ CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, DebugLoc::None);
}
_ => ()
}
} else if kind == Compare || kind == CompareSliceLength {
- Br(bcx, else_cx.llbb);
+ Br(bcx, else_cx.llbb, DebugLoc::None);
}
let mut size = 0u;
// Compile the fall-through case, if any
if !exhaustive && kind != Single {
if kind == Compare || kind == CompareSliceLength {
- Br(bcx, else_cx.llbb);
+ Br(bcx, else_cx.llbb, DebugLoc::None);
}
match chk {
// If there is only one default arm left, move on to the next
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::datum;
+use trans::debuginfo::DebugLoc;
use trans::machine;
use trans::monomorphize;
use trans::type_::Type;
let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
variant_cx = f(variant_cx, case, variant_value);
- Br(variant_cx, bcx_next.llbb);
+ Br(variant_cx, bcx_next.llbb, DebugLoc::None);
}
bcx_next
use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_integral};
use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
use trans::common::{CrateContext, ExternMap, FunctionContext};
-use trans::common::{NodeInfo, Result};
+use trans::common::{Result};
use trans::common::{node_id_type, return_type_is_void};
use trans::common::{tydesc_info, type_is_immediate};
use trans::common::{type_is_zero_size, val_ty};
use trans::context::SharedCrateContext;
use trans::controlflow;
use trans::datum;
-use trans::debuginfo;
+use trans::debuginfo::{self, DebugLoc};
use trans::expr;
use trans::foreign;
use trans::glue;
&**variant,
substs,
&mut f);
- Br(variant_cx, next_cx.llbb);
+ Br(variant_cx, next_cx.llbb, DebugLoc::None);
}
cx = next_cx;
}
llfn: ValueRef,
llargs: &[ValueRef],
fn_ty: Ty<'tcx>,
- call_info: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> (ValueRef, Block<'blk, 'tcx>) {
let _icx = push_ctxt("invoke_");
if bcx.unreachable.get() {
let normal_bcx = bcx.fcx.new_temp_block("normal-return");
let landing_pad = bcx.fcx.get_landing_pad();
- match call_info {
- Some(info) => debuginfo::set_source_location(bcx.fcx, info.id, info.span),
- None => debuginfo::clear_source_location(bcx.fcx)
- };
-
let llresult = Invoke(bcx,
llfn,
&llargs[],
normal_bcx.llbb,
landing_pad,
- Some(attributes));
+ Some(attributes),
+ debug_loc);
return (llresult, normal_bcx);
} else {
debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
debug!("arg: {}", bcx.val_to_string(llarg));
}
- match call_info {
- Some(info) => debuginfo::set_source_location(bcx.fcx, info.id, info.span),
- None => debuginfo::clear_source_location(bcx.fcx)
- };
-
- let llresult = Call(bcx, llfn, &llargs[], Some(attributes));
+ let llresult = Call(bcx,
+ llfn,
+ &llargs[],
+ Some(attributes),
+ debug_loc);
return (llresult, bcx);
}
}
let fcx = bcx.fcx;
let next_cx = fcx.new_temp_block("next");
let cond_cx = fcx.new_temp_block("cond");
- CondBr(bcx, val, cond_cx.llbb, next_cx.llbb);
+ CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
let after_cx = f(cond_cx);
if !after_cx.terminated.get() {
- Br(after_cx, next_cx.llbb);
+ Br(after_cx, next_cx.llbb, DebugLoc::None);
}
next_cx
}
let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
let lifetime_start = ccx.get_intrinsic(&"llvm.lifetime.start");
- Call(cx, lifetime_start, &[llsize, ptr], None);
+ Call(cx, lifetime_start, &[llsize, ptr], None, DebugLoc::None);
}
pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
let lifetime_end = ccx.get_intrinsic(&"llvm.lifetime.end");
- Call(cx, lifetime_end, &[llsize, ptr], None);
+ Call(cx, lifetime_end, &[llsize, ptr], None, DebugLoc::None);
}
pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
let size = IntCast(cx, n_bytes, ccx.int_type());
let align = C_i32(ccx, align as i32);
let volatile = C_bool(ccx, false);
- Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
+ Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None, DebugLoc::None);
}
pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// and builds the return block.
pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
last_bcx: Block<'blk, 'tcx>,
- retty: ty::FnOutput<'tcx>) {
+ retty: ty::FnOutput<'tcx>,
+ ret_debug_loc: DebugLoc) {
let _icx = push_ctxt("finish_fn");
let ret_cx = match fcx.llreturn.get() {
Some(llreturn) => {
if !last_bcx.terminated.get() {
- Br(last_bcx, llreturn);
+ Br(last_bcx, llreturn, DebugLoc::None);
}
raw_block(fcx, false, llreturn)
}
// This shouldn't need to recompute the return type,
// as new_fn_ctxt did it already.
let substd_retty = fcx.monomorphize(&retty);
- build_return_block(fcx, ret_cx, substd_retty);
+ build_return_block(fcx, ret_cx, substd_retty, ret_debug_loc);
debuginfo::clear_source_location(fcx);
fcx.cleanup();
// Builds the return block for a function.
pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
ret_cx: Block<'blk, 'tcx>,
- retty: ty::FnOutput<'tcx>) {
+ retty: ty::FnOutput<'tcx>,
+ ret_debug_location: DebugLoc) {
if fcx.llretslotptr.get().is_none() ||
(!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
- return RetVoid(ret_cx);
+ return RetVoid(ret_cx, ret_debug_location);
}
let retslot = if fcx.needs_ret_allocas {
if let ty::FnConverging(retty) = retty {
store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty);
}
- RetVoid(ret_cx)
+ RetVoid(ret_cx, ret_debug_location)
} else {
- Ret(ret_cx, retval)
+ Ret(ret_cx, retval, ret_debug_location)
}
}
// Otherwise, copy the return value to the ret slot
ty::FnConverging(retty) => {
if fcx.caller_expects_out_pointer {
memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty);
- RetVoid(ret_cx)
+ RetVoid(ret_cx, ret_debug_location)
} else {
- Ret(ret_cx, load_ty(ret_cx, retslot, retty))
+ Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location)
}
}
ty::FnDiverging => {
if fcx.caller_expects_out_pointer {
- RetVoid(ret_cx)
+ RetVoid(ret_cx, ret_debug_location)
} else {
- Ret(ret_cx, C_undef(Type::nil(fcx.ccx)))
+ Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location)
}
}
}
match fcx.llreturn.get() {
Some(_) => {
- Br(bcx, fcx.return_exit_block());
+ Br(bcx, fcx.return_exit_block(), DebugLoc::None);
fcx.pop_custom_cleanup_scope(arg_scope);
}
None => {
}
}
+ let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id,
+ fn_cleanup_debug_loc.span);
+
// Insert the mandatory first few basic blocks before lltop.
- finish_fn(&fcx, bcx, output_type);
+ finish_fn(&fcx, bcx, output_type, ret_debug_loc);
}
// trans_fn: creates an LLVM function corresponding to a source language
disr: ty::Disr,
args: callee::CallArgs,
dest: expr::Dest,
- call_info: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Result<'blk, 'tcx> {
let ccx = bcx.fcx.ccx;
&fields[],
None,
expr::SaveIn(llresult),
- call_info);
+ debug_loc);
}
_ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor")
}
let bcx = match dest {
expr::SaveIn(_) => bcx,
expr::Ignore => {
- glue::drop_ty(bcx, llresult, result_ty, call_info)
+ glue::drop_ty(bcx, llresult, result_ty, debug_loc)
}
};
adt::trans_set_discr(bcx, &*repr, dest, disr);
}
- finish_fn(&fcx, bcx, result_ty);
+ finish_fn(&fcx, bcx, result_ty, DebugLoc::None);
}
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
use trans::builder::Builder;
use trans::type_::Type;
+use trans::debuginfo::DebugLoc;
use libc::{c_uint, c_char};
// for (panic/break/return statements, call to diverging functions, etc), and
// further instructions to the block should simply be ignored.
-pub fn RetVoid(cx: Block) {
- if cx.unreachable.get() { return; }
+pub fn RetVoid(cx: Block, debug_loc: DebugLoc) {
+ if cx.unreachable.get() {
+ return;
+ }
check_not_terminated(cx);
terminate(cx, "RetVoid");
+ debug_loc.apply(cx.fcx);
B(cx).ret_void();
}
-pub fn Ret(cx: Block, v: ValueRef) {
- if cx.unreachable.get() { return; }
+pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) {
+ if cx.unreachable.get() {
+ return;
+ }
check_not_terminated(cx);
terminate(cx, "Ret");
+ debug_loc.apply(cx.fcx);
B(cx).ret(v);
}
-pub fn AggregateRet(cx: Block, ret_vals: &[ValueRef]) {
- if cx.unreachable.get() { return; }
+pub fn AggregateRet(cx: Block,
+ ret_vals: &[ValueRef],
+ debug_loc: DebugLoc) {
+ if cx.unreachable.get() {
+ return;
+ }
check_not_terminated(cx);
terminate(cx, "AggregateRet");
+ debug_loc.apply(cx.fcx);
B(cx).aggregate_ret(ret_vals);
}
-pub fn Br(cx: Block, dest: BasicBlockRef) {
- if cx.unreachable.get() { return; }
+pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) {
+ if cx.unreachable.get() {
+ return;
+ }
check_not_terminated(cx);
terminate(cx, "Br");
+ debug_loc.apply(cx.fcx);
B(cx).br(dest);
}
pub fn CondBr(cx: Block,
if_: ValueRef,
then: BasicBlockRef,
- else_: BasicBlockRef) {
- if cx.unreachable.get() { return; }
+ else_: BasicBlockRef,
+ debug_loc: DebugLoc) {
+ if cx.unreachable.get() {
+ return;
+ }
check_not_terminated(cx);
terminate(cx, "CondBr");
+ debug_loc.apply(cx.fcx);
B(cx).cond_br(if_, then, else_);
}
}
}
-pub fn IndirectBr(cx: Block, addr: ValueRef, num_dests: uint) {
- if cx.unreachable.get() { return; }
+pub fn IndirectBr(cx: Block,
+ addr: ValueRef,
+ num_dests: uint,
+ debug_loc: DebugLoc) {
+ if cx.unreachable.get() {
+ return;
+ }
check_not_terminated(cx);
terminate(cx, "IndirectBr");
+ debug_loc.apply(cx.fcx);
B(cx).indirect_br(addr, num_dests);
}
args: &[ValueRef],
then: BasicBlockRef,
catch: BasicBlockRef,
- attributes: Option<AttrBuilder>)
+ attributes: Option<AttrBuilder>,
+ debug_loc: DebugLoc)
-> ValueRef {
if cx.unreachable.get() {
return C_null(Type::i8(cx.ccx()));
debug!("Invoke({} with arguments ({}))",
cx.val_to_string(fn_),
args.iter().map(|a| cx.val_to_string(*a)).collect::<Vec<String>>().connect(", "));
+ debug_loc.apply(cx.fcx);
B(cx).invoke(fn_, args, then, catch, attributes)
}
}
/* Arithmetic */
-pub fn Add(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn Add(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).add(lhs, rhs)
}
-pub fn NSWAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn NSWAdd(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nswadd(lhs, rhs)
}
-pub fn NUWAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn NUWAdd(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nuwadd(lhs, rhs)
}
-pub fn FAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn FAdd(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).fadd(lhs, rhs)
}
-pub fn Sub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn Sub(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).sub(lhs, rhs)
}
-pub fn NSWSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn NSWSub(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nswsub(lhs, rhs)
}
-pub fn NUWSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn NUWSub(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nuwsub(lhs, rhs)
}
-pub fn FSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn FSub(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).fsub(lhs, rhs)
}
-pub fn Mul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn Mul(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).mul(lhs, rhs)
}
-pub fn NSWMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn NSWMul(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nswmul(lhs, rhs)
}
-pub fn NUWMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn NUWMul(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nuwmul(lhs, rhs)
}
-pub fn FMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn FMul(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).fmul(lhs, rhs)
}
-pub fn UDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn UDiv(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).udiv(lhs, rhs)
}
-pub fn SDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn SDiv(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).sdiv(lhs, rhs)
}
-pub fn ExactSDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn ExactSDiv(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).exactsdiv(lhs, rhs)
}
-pub fn FDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn FDiv(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).fdiv(lhs, rhs)
}
-pub fn URem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn URem(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).urem(lhs, rhs)
}
-pub fn SRem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn SRem(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).srem(lhs, rhs)
}
-pub fn FRem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn FRem(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).frem(lhs, rhs)
}
-pub fn Shl(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn Shl(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).shl(lhs, rhs)
}
-pub fn LShr(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn LShr(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).lshr(lhs, rhs)
}
-pub fn AShr(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn AShr(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).ashr(lhs, rhs)
}
-pub fn And(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn And(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).and(lhs, rhs)
}
-pub fn Or(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn Or(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).or(lhs, rhs)
}
-pub fn Xor(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+pub fn Xor(cx: Block,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).xor(lhs, rhs)
}
-pub fn BinOp(cx: Block, op: Opcode, lhs: ValueRef, rhs: ValueRef)
+pub fn BinOp(cx: Block,
+ op: Opcode,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ debug_loc: DebugLoc)
-> ValueRef {
- if cx.unreachable.get() { return _Undef(lhs); }
+ if cx.unreachable.get() {
+ return _Undef(lhs);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).binop(op, lhs, rhs)
}
-pub fn Neg(cx: Block, v: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(v); }
+pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(v);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).neg(v)
}
-pub fn NSWNeg(cx: Block, v: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(v); }
+pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(v);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nswneg(v)
}
-pub fn NUWNeg(cx: Block, v: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(v); }
+pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(v);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).nuwneg(v)
}
-pub fn FNeg(cx: Block, v: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(v); }
+pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(v);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).fneg(v)
}
-pub fn Not(cx: Block, v: ValueRef) -> ValueRef {
- if cx.unreachable.get() { return _Undef(v); }
+pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef {
+ if cx.unreachable.get() {
+ return _Undef(v);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).not(v)
}
/* Memory */
-pub fn Malloc(cx: Block, ty: Type) -> ValueRef {
+pub fn Malloc(cx: Block, ty: Type, debug_loc: DebugLoc) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
+ debug_loc.apply(cx.fcx);
B(cx).malloc(ty)
}
}
-pub fn ArrayMalloc(cx: Block, ty: Type, val: ValueRef) -> ValueRef {
+pub fn ArrayMalloc(cx: Block,
+ ty: Type,
+ val: ValueRef,
+ debug_loc: DebugLoc) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
+ debug_loc.apply(cx.fcx);
B(cx).array_malloc(ty, val)
}
}
pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef {
let b = fcx.ccx.builder();
b.position_before(fcx.alloca_insert_pt.get().unwrap());
+ DebugLoc::None.apply(fcx);
b.alloca(ty, name)
}
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); }
let b = cx.fcx.ccx.builder();
b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
+ DebugLoc::None.apply(cx.fcx);
b.array_alloca(ty, val)
}
}
B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia)
}
-pub fn Call(cx: Block, fn_: ValueRef, args: &[ValueRef],
- attributes: Option<AttrBuilder>) -> ValueRef {
- if cx.unreachable.get() { return _UndefReturn(cx, fn_); }
+pub fn Call(cx: Block,
+ fn_: ValueRef,
+ args: &[ValueRef],
+ attributes: Option<AttrBuilder>,
+ debug_loc: DebugLoc)
+ -> ValueRef {
+ if cx.unreachable.get() {
+ return _UndefReturn(cx, fn_);
+ }
+ debug_loc.apply(cx.fcx);
B(cx).call(fn_, args, attributes)
}
use trans::common::*;
use trans::consts;
use trans::datum::*;
+use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::expr;
use trans::glue;
use trans::inline;
ArgVals(&llargs[]),
dest).bcx;
- finish_fn(&fcx, bcx, sig.output);
+ finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty, llfn);
/// For non-lang items, `dest` is always Some, and hence the result is written into memory
/// somewhere. Nonetheless we return the actual return value of the function.
pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- call_info: Option<NodeInfo>,
+ call_info: Option<NodeIdAndSpan>,
callee_ty: Ty<'tcx>,
get_callee: F,
args: CallArgs<'a, 'tcx>,
disr,
args,
dest.unwrap(),
- call_info);
+ call_info.debug_loc());
}
};
llfn,
&llargs[],
callee_ty,
- call_info);
+ call_info.debug_loc());
bcx = b;
llresult = llret;
match (dest, opt_llretslot, ret_ty) {
(Some(expr::Ignore), Some(llretslot), ty::FnConverging(ret_ty)) => {
// drop the value if it is not being saved.
- bcx = glue::drop_ty(bcx, llretslot, ret_ty, call_info);
+ bcx = glue::drop_ty(bcx,
+ llretslot,
+ ret_ty,
+ call_info.debug_loc());
call_lifetime_end(bcx, llretslot);
}
_ => {}
use trans::build;
use trans::callee;
use trans::common;
-use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
-use trans::debuginfo;
+use trans::common::{Block, FunctionContext, ExprId, NodeIdAndSpan};
+use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::glue;
use middle::region;
use trans::type_::Type;
// The debug location any drop calls generated for this scope will be
// associated with.
- debug_loc: Option<NodeInfo>,
+ debug_loc: DebugLoc,
cached_early_exits: Vec<CachedEarlyExit>,
cached_landing_pad: Option<BasicBlockRef>,
fn is_lifetime_end(&self) -> bool;
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
- debug_loc: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Block<'blk, 'tcx>;
}
impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
/// Invoked when we start to trans the code contained within a new cleanup scope.
- fn push_ast_cleanup_scope(&self, debug_loc: NodeInfo) {
+ fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
debug!("push_ast_cleanup_scope({})",
self.ccx.tcx().map.node_to_string(debug_loc.id));
}
self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
- Some(debug_loc)));
+ debug_loc.debug_loc()));
}
fn push_loop_cleanup_scope(&self,
.borrow()
.last()
.map(|opt_scope| opt_scope.debug_loc)
- .unwrap_or(None);
+ .unwrap_or(DebugLoc::None);
self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
CustomScopeIndex { index: index }
}
fn push_custom_cleanup_scope_with_debug_loc(&self,
- debug_loc: NodeInfo)
+ debug_loc: NodeIdAndSpan)
-> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
- self.push_scope(CleanupScope::new(CustomScopeKind, Some(debug_loc)));
+ self.push_scope(CleanupScope::new(CustomScopeKind,
+ debug_loc.debug_loc()));
CustomScopeIndex { index: index }
}
scope.debug_loc);
}
}
- build::Br(bcx_out, prev_llbb);
+ build::Br(bcx_out, prev_llbb, DebugLoc::None);
prev_llbb = bcx_in.llbb;
} else {
debug!("no suitable cleanups in {}",
// Generate the cleanup block and branch to it.
let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit);
- build::Br(pad_bcx, cleanup_llbb);
+ build::Br(pad_bcx, cleanup_llbb, DebugLoc::None);
return pad_bcx.llbb;
}
impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
fn new(kind: CleanupScopeKind<'blk, 'tcx>,
- debug_loc: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> CleanupScope<'blk, 'tcx> {
CleanupScope {
kind: kind,
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
- debug_loc: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc)
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
- debug_loc: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
- apply_debug_loc(bcx.fcx, debug_loc);
+ debug_loc.apply(bcx.fcx);
match self.heap {
HeapExchange => {
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
- debug_loc: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
- apply_debug_loc(bcx.fcx, debug_loc);
+ debug_loc.apply(bcx.fcx);
match self.heap {
HeapExchange => {
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
- debug_loc: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
- apply_debug_loc(bcx.fcx, debug_loc);
+ debug_loc.apply(bcx.fcx);
base::call_lifetime_end(bcx, self.ptr);
bcx
}
!label.is_unwind() || c.clean_on_unwind()
}
-fn apply_debug_loc(fcx: &FunctionContext, debug_loc: Option<NodeInfo>) {
- match debug_loc {
- Some(ref src_loc) => {
- debuginfo::set_source_location(fcx, src_loc.id, src_loc.span);
- }
- None => {
- debuginfo::clear_source_location(fcx);
- }
- }
-}
-
///////////////////////////////////////////////////////////////////////////
// These traits just exist to put the methods into this file.
pub trait CleanupMethods<'blk, 'tcx> {
- fn push_ast_cleanup_scope(&self, id: NodeInfo);
+ fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
exits: [Block<'blk, 'tcx>; EXIT_MAX]);
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
fn push_custom_cleanup_scope_with_debug_loc(&self,
- debug_loc: NodeInfo)
+ debug_loc: NodeIdAndSpan)
-> CustomScopeIndex;
fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- cleanup_scope: ast::NodeId)
- -> Block<'blk, 'tcx>;
+ bcx: Block<'blk, 'tcx>,
+ cleanup_scope: ast::NodeId)
+ -> Block<'blk, 'tcx>;
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId);
fn pop_custom_cleanup_scope(&self,
use trans::cleanup;
use trans::consts;
use trans::datum;
-use trans::debuginfo;
+use trans::debuginfo::{self, DebugLoc};
use trans::machine;
use trans::monomorphize;
use trans::type_::Type;
*/
#[derive(Copy)]
-pub struct NodeInfo {
+pub struct NodeIdAndSpan {
pub id: ast::NodeId,
pub span: Span,
}
-pub fn expr_info(expr: &ast::Expr) -> NodeInfo {
- NodeInfo { id: expr.id, span: expr.span }
+pub fn expr_info(expr: &ast::Expr) -> NodeIdAndSpan {
+ NodeIdAndSpan { id: expr.id, span: expr.span }
}
pub struct BuilderRef_res {
let mut reachable = false;
for bcx in in_cxs.iter() {
if !bcx.unreachable.get() {
- build::Br(*bcx, out.llbb);
+ build::Br(*bcx, out.llbb, DebugLoc::None);
reachable = true;
}
}
use trans::consts;
use trans::datum;
use trans::debuginfo;
+use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::expr;
use trans::meth;
use trans::type_::Type;
let then_bcx_out = trans_block(then_bcx_in, &*thn, dest);
trans::debuginfo::clear_source_location(bcx.fcx);
+ let cond_source_loc = cond.debug_loc();
+
let next_bcx;
match els {
Some(elexpr) => {
let else_bcx_out = expr::trans_into(else_bcx_in, &*elexpr, dest);
next_bcx = bcx.fcx.join_blocks(if_id,
&[then_bcx_out, else_bcx_out]);
- CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb);
+ CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc);
}
None => {
next_bcx = bcx.fcx.new_id_block("next-block", if_id);
- Br(then_bcx_out, next_bcx.llbb);
- CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb);
+ Br(then_bcx_out, next_bcx.llbb, DebugLoc::None);
+ CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc);
}
}
}
pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- loop_id: ast::NodeId,
+ loop_expr: &ast::Expr,
cond: &ast::Expr,
body: &ast::Block)
-> Block<'blk, 'tcx> {
// | body_bcx_out --+
// next_bcx_in
- let next_bcx_in = fcx.new_id_block("while_exit", loop_id);
+ let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id);
let cond_bcx_in = fcx.new_id_block("while_cond", cond.id);
let body_bcx_in = fcx.new_id_block("while_body", body.id);
- fcx.push_loop_cleanup_scope(loop_id, [next_bcx_in, cond_bcx_in]);
+ fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]);
- Br(bcx, cond_bcx_in.llbb);
+ Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc());
// compile the block where we will handle loop cleanups
- let cleanup_llbb = fcx.normal_exit_block(loop_id, cleanup::EXIT_BREAK);
+ let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK);
// compile the condition
let Result {bcx: cond_bcx_out, val: cond_val} =
expr::trans(cond_bcx_in, cond).to_llbool();
- CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb);
+
+ CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc());
// loop body:
let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
- Br(body_bcx_out, cond_bcx_in.llbb);
+ Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None);
- fcx.pop_loop_cleanup_scope(loop_id);
+ fcx.pop_loop_cleanup_scope(loop_expr.id);
return next_bcx_in;
}
/// Translates a `for` loop.
pub fn trans_for<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- loop_info: NodeInfo,
+ loop_info: NodeIdAndSpan,
pat: &ast::Pat,
head: &ast::Expr,
body: &ast::Block)
let body_bcx_in = bcx.fcx.new_id_block("for_body", body.id);
bcx.fcx.push_loop_cleanup_scope(loop_info.id,
[next_bcx_in, loopback_bcx_in]);
- Br(bcx, loopback_bcx_in.llbb);
+ Br(bcx, loopback_bcx_in.llbb, DebugLoc::None);
let cleanup_llbb = bcx.fcx.normal_exit_block(loop_info.id,
cleanup::EXIT_BREAK);
None);
let i1_type = Type::i1(loopback_bcx_out.ccx());
let llcondition = Trunc(loopback_bcx_out, lldiscriminant, i1_type);
- CondBr(loopback_bcx_out, llcondition, body_bcx_in.llbb, cleanup_llbb);
+ CondBr(loopback_bcx_out, llcondition, body_bcx_in.llbb, cleanup_llbb, DebugLoc::None);
// Now we're in the body. Unpack the `Option` value into the programmer-
// supplied pattern.
body_bcx_out.fcx
.pop_and_trans_custom_cleanup_scope(body_bcx_out,
option_cleanup_scope);
- Br(body_bcx_out, loopback_bcx_in.llbb);
+ Br(body_bcx_out, loopback_bcx_in.llbb, DebugLoc::None);
// Codegen cleanups and leave.
next_bcx_in.fcx.pop_loop_cleanup_scope(loop_info.id);
}
pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- loop_id: ast::NodeId,
+ loop_expr: &ast::Expr,
body: &ast::Block)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_loop");
// Links between body_bcx_in and next_bcx are created by
// break statements.
- let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_id);
+ let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id);
let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id);
- fcx.push_loop_cleanup_scope(loop_id, [next_bcx_in, body_bcx_in]);
+ fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]);
- Br(bcx, body_bcx_in.llbb);
+ Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc());
let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
- Br(body_bcx_out, body_bcx_in.llbb);
+ Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None);
- fcx.pop_loop_cleanup_scope(loop_id);
+ fcx.pop_loop_cleanup_scope(loop_expr.id);
return next_bcx_in;
}
pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr_id: ast::NodeId,
+ expr: &ast::Expr,
opt_label: Option<Ident>,
exit: uint)
-> Block<'blk, 'tcx> {
let loop_id = match opt_label {
None => fcx.top_loop_scope(),
Some(_) => {
- match bcx.tcx().def_map.borrow().get(&expr_id) {
+ match bcx.tcx().def_map.borrow().get(&expr.id) {
Some(&def::DefLabel(loop_id)) => loop_id,
ref r => {
bcx.tcx().sess.bug(&format!("{:?} in def-map for label",
// Generate appropriate cleanup code and branch
let cleanup_llbb = fcx.normal_exit_block(loop_id, exit);
- Br(bcx, cleanup_llbb);
+ Br(bcx, cleanup_llbb, expr.debug_loc());
Unreachable(bcx); // anything afterwards should be ignored
return bcx;
}
pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr_id: ast::NodeId,
+ expr: &ast::Expr,
label_opt: Option<Ident>)
-> Block<'blk, 'tcx> {
- return trans_break_cont(bcx, expr_id, label_opt, cleanup::EXIT_BREAK);
+ return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK);
}
pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr_id: ast::NodeId,
+ expr: &ast::Expr,
label_opt: Option<Ident>)
-> Block<'blk, 'tcx> {
- return trans_break_cont(bcx, expr_id, label_opt, cleanup::EXIT_LOOP);
+ return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP);
}
pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- e: Option<&ast::Expr>)
+ return_expr: &ast::Expr,
+ retval_expr: Option<&ast::Expr>)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_ret");
let fcx = bcx.fcx;
let mut bcx = bcx;
- let dest = match (fcx.llretslotptr.get(), e) {
- (Some(_), Some(e)) => {
- let ret_ty = expr_ty(bcx, &*e);
+ let dest = match (fcx.llretslotptr.get(), retval_expr) {
+ (Some(_), Some(retval_expr)) => {
+ let ret_ty = expr_ty(bcx, &*retval_expr);
expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(ret_ty), "ret_slot"))
}
_ => expr::Ignore,
};
- if let Some(x) = e {
+ if let Some(x) = retval_expr {
bcx = expr::trans_into(bcx, &*x, dest);
match dest {
expr::SaveIn(slot) if fcx.needs_ret_allocas => {
}
}
let cleanup_llbb = fcx.return_exit_block();
- Br(bcx, cleanup_llbb);
+ Br(bcx, cleanup_llbb, return_expr.debug_loc());
Unreachable(bcx);
return bcx;
}
use self::MemberDescriptionFactory::*;
use self::RecursiveTypeDescription::*;
use self::EnumDiscriminantInfo::*;
-use self::DebugLocation::*;
+use self::InternalDebugLocation::*;
use llvm;
use llvm::{ModuleRef, ContextRef, ValueRef};
use metadata::csearch;
use middle::subst::{self, Substs};
use trans::{self, adt, machine, type_of};
-use trans::common::*;
+use trans::common::{self, NodeIdAndSpan, CrateContext, FunctionContext, Block,
+ C_bytes, C_i32, C_i64, NormalizingUnboxedClosureTyper};
use trans::_match::{BindingInfo, TrByCopy, TrByMove, TrByRef};
use trans::monomorphize;
use trans::type_::Type;
pub struct CrateDebugContext<'tcx> {
llcontext: ContextRef,
builder: DIBuilderRef,
- current_debug_location: Cell<DebugLocation>,
+ current_debug_location: Cell<InternalDebugLocation>,
created_files: RefCell<FnvHashMap<String, DIFile>>,
created_enum_disr_types: RefCell<DefIdMap<DIType>>,
}
};
- let variable_type = node_id_type(bcx, node_id);
+ let variable_type = common::node_id_type(bcx, node_id);
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, span).fn_metadata;
// env_pointer is the alloca containing the pointer to the environment,
// so it's type is **EnvironmentType. In order to find out the type of
// the environment we have to "dereference" two times.
- let llvm_env_data_type = val_ty(env_pointer).element_type().element_type();
+ let llvm_env_data_type = common::val_ty(env_pointer).element_type()
+ .element_type();
let byte_offset_of_var_in_env = machine::llelement_offset(cx,
llvm_env_data_type,
env_index);
node_id: ast::NodeId,
node_span: Span,
is_block: bool)
- -> NodeInfo {
+ -> NodeIdAndSpan {
// A debug location needs two things:
// (1) A span (of which only the beginning will actually be used)
// (2) An AST node-id which will be used to look up the lexical scope
}
}
- NodeInfo {
+ NodeIdAndSpan {
id: node_id,
span: cleanup_span
}
}
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub enum DebugLoc {
+ At(ast::NodeId, Span),
+ None
+}
+
+impl DebugLoc {
+ pub fn apply(&self, fcx: &FunctionContext) {
+ match *self {
+ DebugLoc::At(node_id, span) => {
+ set_source_location(fcx, node_id, span);
+ }
+ DebugLoc::None => {
+ clear_source_location(fcx);
+ }
+ }
+ }
+}
+
+pub trait ToDebugLoc {
+ fn debug_loc(&self) -> DebugLoc;
+}
+
+impl ToDebugLoc for ast::Expr {
+ fn debug_loc(&self) -> DebugLoc {
+ DebugLoc::At(self.id, self.span)
+ }
+}
+
+impl ToDebugLoc for NodeIdAndSpan {
+ fn debug_loc(&self) -> DebugLoc {
+ DebugLoc::At(self.id, self.span)
+ }
+}
+
+impl ToDebugLoc for Option<NodeIdAndSpan> {
+ fn debug_loc(&self) -> DebugLoc {
+ match *self {
+ Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span),
+ None => DebugLoc::None
+ }
+ }
+}
+
/// Sets the current debug location at the beginning of the span.
///
/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). The node_id
let loc = span_start(cx, span);
let scope = scope_metadata(fcx, node_id, span);
- set_debug_location(cx, DebugLocation::new(scope,
- loc.line,
- loc.col.to_uint()));
+ set_debug_location(cx, InternalDebugLocation::new(scope,
+ loc.line,
+ loc.col.to_uint()));
} else {
set_debug_location(cx, UnknownLocation);
}
)
};
- set_debug_location(cx, DebugLocation::new(scope_metadata,
- loc.line,
- loc.col.to_uint()));
+ set_debug_location(cx, InternalDebugLocation::new(scope_metadata,
+ loc.line,
+ loc.col.to_uint()));
unsafe {
let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd(
DIB(cx),
}
#[derive(Copy, PartialEq)]
-enum DebugLocation {
+enum InternalDebugLocation {
KnownLocation { scope: DIScope, line: uint, col: uint },
UnknownLocation
}
-impl DebugLocation {
- fn new(scope: DIScope, line: uint, col: uint) -> DebugLocation {
+impl InternalDebugLocation {
+ fn new(scope: DIScope, line: uint, col: uint) -> InternalDebugLocation {
KnownLocation {
scope: scope,
line: line,
}
}
-fn set_debug_location(cx: &CrateContext, debug_location: DebugLocation) {
+fn set_debug_location(cx: &CrateContext, debug_location: InternalDebugLocation) {
if debug_location == debug_context(cx).current_debug_location.get() {
return;
}
use trans::cleanup::{self, CleanupMethods};
use trans::common::*;
use trans::datum::*;
-use trans::debuginfo;
+use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
use trans::glue;
use trans::machine;
use trans::meth;
let expected = Call(bcx,
expect,
&[bounds_check, C_bool(ccx, false)],
- None);
+ None,
+ index_expr.debug_loc());
bcx = with_cond(bcx, expected, |bcx| {
controlflow::trans_fail_bounds_check(bcx,
index_expr.span,
trans_into(bcx, &**e, Ignore)
}
ast::ExprBreak(label_opt) => {
- controlflow::trans_break(bcx, expr.id, label_opt)
+ controlflow::trans_break(bcx, expr, label_opt)
}
ast::ExprAgain(label_opt) => {
- controlflow::trans_cont(bcx, expr.id, label_opt)
+ controlflow::trans_cont(bcx, expr, label_opt)
}
ast::ExprRet(ref ex) => {
// Check to see if the return expression itself is reachable.
};
if reachable {
- controlflow::trans_ret(bcx, ex.as_ref().map(|e| &**e))
+ controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
} else {
// If it's not reachable, just translate the inner expression
// directly. This avoids having to manage a return slot when
}
}
ast::ExprWhile(ref cond, ref body, _) => {
- controlflow::trans_while(bcx, expr.id, &**cond, &**body)
+ controlflow::trans_while(bcx, expr, &**cond, &**body)
}
ast::ExprForLoop(ref pat, ref head, ref body, _) => {
controlflow::trans_for(bcx,
&**body)
}
ast::ExprLoop(ref body, _) => {
- controlflow::trans_loop(bcx, expr.id, &**body)
+ controlflow::trans_loop(bcx, expr, &**body)
}
ast::ExprAssign(ref dst, ref src) => {
let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
bcx = glue::drop_ty(bcx,
dst_datum.val,
dst_datum.ty,
- Some(NodeInfo { id: expr.id, span: expr.span }));
+ expr.debug_loc());
src_datum.store_to(bcx, dst_datum.val)
} else {
src_datum.store_to(bcx, dst_datum.val)
&numbered_fields[],
None,
dest,
- Some(NodeInfo { id: expr.id, span: expr.span }))
+ expr.debug_loc())
}
ast::ExprLit(ref lit) => {
match lit.node {
numbered_fields.as_slice(),
optbase,
dest,
- Some(NodeInfo { id: expr_id, span: expr_span }))
+ DebugLoc::At(expr_id, expr_span))
})
}
fields: &[(uint, &ast::Expr)],
optbase: Option<StructBaseInfo<'a, 'tcx>>,
dest: Dest,
- source_location: Option<NodeInfo>)
+ debug_location: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_adt");
let fcx = bcx.fcx;
let repr = adt::represent_type(bcx.ccx(), ty);
- match source_location {
- Some(src_loc) => debuginfo::set_source_location(bcx.fcx,
- src_loc.id,
- src_loc.span),
- None => {}
- };
+ debug_location.apply(bcx.fcx);
// If we don't care about the result, just make a
// temporary stack slot
}
}
- match source_location {
- Some(src_loc) => debuginfo::set_source_location(bcx.fcx,
- src_loc.id,
- src_loc.span),
- None => {}
- };
+ debug_location.apply(bcx.fcx);
if ty::type_is_simd(bcx.tcx(), ty) {
// This is the constructor of a SIMD type, such types are
match dest {
SaveIn(_) => bcx,
Ignore => {
- bcx = glue::drop_ty(bcx, addr, ty, source_location);
+ bcx = glue::drop_ty(bcx, addr, ty, debug_location);
base::call_lifetime_end(bcx, addr);
bcx
}
let un_ty = expr_ty(bcx, expr);
+ let debug_loc = expr.debug_loc();
+
match op {
ast::UnNot => {
let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
- let llresult = Not(bcx, datum.to_llscalarish(bcx));
+ let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
}
ast::UnNeg => {
let val = datum.to_llscalarish(bcx);
let llneg = {
if ty::type_is_fp(un_ty) {
- FNeg(bcx, val)
+ FNeg(bcx, val, debug_loc)
} else {
- Neg(bcx, val)
+ Neg(bcx, val, debug_loc)
}
};
immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
let rhs = base::cast_shift_expr_rhs(bcx, op, lhs, rhs);
+ let binop_debug_loc = binop_expr.debug_loc();
+
let mut bcx = bcx;
let val = match op {
ast::BiAdd => {
- if is_float { FAdd(bcx, lhs, rhs) }
- else { Add(bcx, lhs, rhs) }
+ if is_float {
+ FAdd(bcx, lhs, rhs, binop_debug_loc)
+ } else {
+ Add(bcx, lhs, rhs, binop_debug_loc)
+ }
}
ast::BiSub => {
- if is_float { FSub(bcx, lhs, rhs) }
- else { Sub(bcx, lhs, rhs) }
+ if is_float {
+ FSub(bcx, lhs, rhs, binop_debug_loc)
+ } else {
+ Sub(bcx, lhs, rhs, binop_debug_loc)
+ }
}
ast::BiMul => {
- if is_float { FMul(bcx, lhs, rhs) }
- else { Mul(bcx, lhs, rhs) }
+ if is_float {
+ FMul(bcx, lhs, rhs, binop_debug_loc)
+ } else {
+ Mul(bcx, lhs, rhs, binop_debug_loc)
+ }
}
ast::BiDiv => {
if is_float {
- FDiv(bcx, lhs, rhs)
+ FDiv(bcx, lhs, rhs, binop_debug_loc)
} else {
// Only zero-check integers; fp /0 is NaN
bcx = base::fail_if_zero_or_overflows(bcx, binop_expr.span,
op, lhs, rhs, rhs_t);
if is_signed {
- SDiv(bcx, lhs, rhs)
+ SDiv(bcx, lhs, rhs, binop_debug_loc)
} else {
- UDiv(bcx, lhs, rhs)
+ UDiv(bcx, lhs, rhs, binop_debug_loc)
}
}
}
ast::BiRem => {
if is_float {
- FRem(bcx, lhs, rhs)
+ FRem(bcx, lhs, rhs, binop_debug_loc)
} else {
// Only zero-check integers; fp %0 is NaN
bcx = base::fail_if_zero_or_overflows(bcx, binop_expr.span,
op, lhs, rhs, rhs_t);
if is_signed {
- SRem(bcx, lhs, rhs)
+ SRem(bcx, lhs, rhs, binop_debug_loc)
} else {
- URem(bcx, lhs, rhs)
+ URem(bcx, lhs, rhs, binop_debug_loc)
}
}
}
- ast::BiBitOr => Or(bcx, lhs, rhs),
- ast::BiBitAnd => And(bcx, lhs, rhs),
- ast::BiBitXor => Xor(bcx, lhs, rhs),
- ast::BiShl => Shl(bcx, lhs, rhs),
+ ast::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
+ ast::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
+ ast::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
+ ast::BiShl => Shl(bcx, lhs, rhs, binop_debug_loc),
ast::BiShr => {
if is_signed {
- AShr(bcx, lhs, rhs)
- } else { LShr(bcx, lhs, rhs) }
+ AShr(bcx, lhs, rhs, binop_debug_loc)
+ } else {
+ LShr(bcx, lhs, rhs, binop_debug_loc)
+ }
}
ast::BiEq | ast::BiNe | ast::BiLt | ast::BiGe | ast::BiLe | ast::BiGt => {
if ty::type_is_scalar(rhs_t) {
let before_rhs = fcx.new_id_block("before_rhs", b.id);
match op {
- lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb),
- lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb)
+ lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
+ lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
}
let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
}
- Br(past_rhs, join.llbb);
+ Br(past_rhs, join.llbb, DebugLoc::None);
let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
&[past_lhs.llbb, past_rhs.llbb]);
use trans::consts;
use trans::common::*;
use trans::datum;
-use trans::debuginfo;
+use trans::debuginfo::DebugLoc;
use trans::expr;
use trans::machine::*;
use trans::tvec;
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
- source_location: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty(t={})", t.repr(bcx.tcx()));
v
};
- match source_location {
- Some(sl) => debuginfo::set_source_location(bcx.fcx, sl.id, sl.span),
- None => debuginfo::clear_source_location(bcx.fcx)
- };
-
- Call(bcx, glue, &[ptr], None);
+ Call(bcx, glue, &[ptr], None, debug_loc);
}
bcx
}
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
- source_location: Option<NodeInfo>)
+ debug_loc: DebugLoc)
-> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
store_ty(bcx, v, vp, t);
- drop_ty(bcx, vp, t, source_location)
+ drop_ty(bcx, vp, t, debug_loc)
}
pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef {
class_did,
&[get_drop_glue_type(bcx.ccx(), t)],
ty::mk_nil(bcx.tcx()));
- let (_, variant_cx) = invoke(variant_cx, dtor_addr, &args[], dtor_ty, None);
+ let (_, variant_cx) = invoke(variant_cx, dtor_addr, &args[], dtor_ty, DebugLoc::None);
variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope);
variant_cx
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// Return the sum of sizes and max of aligns.
- let size = Add(bcx, sized_size, unsized_size);
+ let size = Add(bcx, sized_size, unsized_size, DebugLoc::None);
let align = Select(bcx,
ICmp(bcx, llvm::IntULT, sized_align, unsized_align),
sized_align,
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
- (Mul(bcx, info, C_uint(bcx.ccx(), unit_size)), C_uint(bcx.ccx(), unit_align))
+ (Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None),
+ C_uint(bcx.ccx(), unit_align))
}
_ => bcx.sess().bug(&format!("Unexpected unsized type, found {}",
bcx.ty_to_string(t))[])
Call(bcx,
dtor,
&[PointerCast(bcx, lluniquevalue, Type::i8p(bcx.ccx()))],
- None);
+ None,
+ DebugLoc::None);
bcx
})
}
let llbox = Load(bcx, llval);
let not_null = IsNotNull(bcx, llbox);
with_cond(bcx, not_null, |bcx| {
- let bcx = drop_ty(bcx, v0, content_ty, None);
+ let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = GEPi(bcx, v0, &[0, abi::FAT_PTR_EXTRA]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
let llbox = Load(bcx, llval);
let not_null = IsNotNull(bcx, llbox);
with_cond(bcx, not_null, |bcx| {
- let bcx = drop_ty(bcx, llbox, content_ty, None);
+ let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty)
})
}
}
ty::NoDtor => {
// No dtor? Just the default case
- iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, None))
+ iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
}
}
}
- ty::ty_unboxed_closure(..) => iter_structural_ty(bcx,
- v0,
- t,
- |bb, vv, tt| drop_ty(bb, vv, tt, None)),
+ ty::ty_unboxed_closure(..) => {
+ iter_structural_ty(bcx,
+ v0,
+ t,
+ |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
+ }
ty::ty_trait(..) => {
// No need to do a null check here (as opposed to the Box<trait case
// above), because this happens for a trait field in an unsized
Call(bcx,
dtor,
&[PointerCast(bcx, Load(bcx, lluniquevalue), Type::i8p(bcx.ccx()))],
- None);
+ None,
+ DebugLoc::None);
bcx
},
ty::ty_vec(_, None) | ty::ty_str => {
},
_ => {
assert!(type_is_sized(bcx.tcx(), t));
- if type_needs_drop(bcx.tcx(), t) &&
- ty::type_is_structural(t) {
- iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, None))
+ if type_needs_drop(bcx.tcx(), t) && ty::type_is_structural(t) {
+ iter_structural_ty(bcx,
+ v0,
+ t,
+ |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None))
} else {
bcx
}
let llrawptr0 = get_param(llfn, fcx.arg_pos(0) as c_uint);
let bcx = helper(bcx, llrawptr0, t);
- finish_fn(&fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())));
+ finish_fn(&fcx, bcx, ty::FnConverging(ty::mk_nil(ccx.tcx())), DebugLoc::None);
llfn
}
use trans::cleanup::CleanupMethods;
use trans::common::*;
use trans::datum::*;
+use trans::debuginfo::DebugLoc;
use trans::expr;
use trans::glue;
use trans::type_of::*;
args: callee::CallArgs<'a, 'tcx>,
dest: expr::Dest,
substs: subst::Substs<'tcx>,
- call_info: NodeInfo)
- -> Result<'blk, 'tcx>
-{
+ call_info: NodeIdAndSpan)
+ -> Result<'blk, 'tcx> {
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let tcx = bcx.tcx();
fcx.pop_custom_cleanup_scope(cleanup_scope);
+ let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
+
// These are the only intrinsic functions that diverge.
if name.get() == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap"));
- Call(bcx, llfn, &[], None);
+ Call(bcx, llfn, &[], None, call_debug_location);
Unreachable(bcx);
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
} else if name.get() == "unreachable" {
let simple = get_simple_intrinsic(ccx, &*foreign_item);
let llval = match (simple, name.get()) {
(Some(llfn), _) => {
- Call(bcx, llfn, llargs.as_slice(), None)
+ Call(bcx, llfn, llargs.as_slice(), None, call_debug_location)
}
(_, "breakpoint") => {
let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
- Call(bcx, llfn, &[], None)
+ Call(bcx, llfn, &[], None, call_debug_location)
}
(_, "size_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
}
(_, "copy_nonoverlapping_memory") => {
- copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
- llargs[0], llargs[1], llargs[2])
+ copy_intrinsic(bcx,
+ false,
+ false,
+ *substs.types.get(FnSpace, 0),
+ llargs[0],
+ llargs[1],
+ llargs[2],
+ call_debug_location)
}
(_, "copy_memory") => {
- copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
- llargs[0], llargs[1], llargs[2])
+ copy_intrinsic(bcx,
+ true,
+ false,
+ *substs.types.get(FnSpace, 0),
+ llargs[0],
+ llargs[1],
+ llargs[2],
+ call_debug_location)
}
(_, "set_memory") => {
- memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
- llargs[0], llargs[1], llargs[2])
+ memset_intrinsic(bcx,
+ false,
+ *substs.types.get(FnSpace, 0),
+ llargs[0],
+ llargs[1],
+ llargs[2],
+ call_debug_location)
}
(_, "volatile_copy_nonoverlapping_memory") => {
- copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
- llargs[0], llargs[1], llargs[2])
+ copy_intrinsic(bcx,
+ false,
+ true,
+ *substs.types.get(FnSpace, 0),
+ llargs[0],
+ llargs[1],
+ llargs[2],
+ call_debug_location)
}
(_, "volatile_copy_memory") => {
- copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
- llargs[0], llargs[1], llargs[2])
+ copy_intrinsic(bcx,
+ true,
+ true,
+ *substs.types.get(FnSpace, 0),
+ llargs[0],
+ llargs[1],
+ llargs[2],
+ call_debug_location)
}
(_, "volatile_set_memory") => {
- memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
- llargs[0], llargs[1], llargs[2])
+ memset_intrinsic(bcx,
+ true,
+ *substs.types.get(FnSpace, 0),
+ llargs[0],
+ llargs[1],
+ llargs[2],
+ call_debug_location)
}
(_, "volatile_load") => {
VolatileLoad(bcx, llargs[0])
C_nil(ccx)
},
- (_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", llargs[0]),
- (_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", llargs[0]),
- (_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", llargs[0]),
- (_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", llargs[0]),
- (_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", llargs[0]),
- (_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", llargs[0]),
- (_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", llargs[0]),
- (_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", llargs[0]),
+ (_, "ctlz8") => count_zeros_intrinsic(bcx,
+ "llvm.ctlz.i8",
+ llargs[0],
+ call_debug_location),
+ (_, "ctlz16") => count_zeros_intrinsic(bcx,
+ "llvm.ctlz.i16",
+ llargs[0],
+ call_debug_location),
+ (_, "ctlz32") => count_zeros_intrinsic(bcx,
+ "llvm.ctlz.i32",
+ llargs[0],
+ call_debug_location),
+ (_, "ctlz64") => count_zeros_intrinsic(bcx,
+ "llvm.ctlz.i64",
+ llargs[0],
+ call_debug_location),
+ (_, "cttz8") => count_zeros_intrinsic(bcx,
+ "llvm.cttz.i8",
+ llargs[0],
+ call_debug_location),
+ (_, "cttz16") => count_zeros_intrinsic(bcx,
+ "llvm.cttz.i16",
+ llargs[0],
+ call_debug_location),
+ (_, "cttz32") => count_zeros_intrinsic(bcx,
+ "llvm.cttz.i32",
+ llargs[0],
+ call_debug_location),
+ (_, "cttz64") => count_zeros_intrinsic(bcx,
+ "llvm.cttz.i64",
+ llargs[0],
+ call_debug_location),
(_, "i8_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.sadd.with.overflow.i8",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i16_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.sadd.with.overflow.i16",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i32_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.sadd.with.overflow.i32",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i64_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.sadd.with.overflow.i64",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u8_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.uadd.with.overflow.i8",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u16_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.uadd.with.overflow.i16",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u32_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.uadd.with.overflow.i32",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u64_add_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
- llargs[0], llargs[1]),
-
+ with_overflow_intrinsic(bcx,
+ "llvm.uadd.with.overflow.i64",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i8_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.ssub.with.overflow.i8",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i16_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.ssub.with.overflow.i16",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i32_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.ssub.with.overflow.i32",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i64_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
- llargs[0], llargs[1]),
-
+ with_overflow_intrinsic(bcx,
+ "llvm.ssub.with.overflow.i64",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u8_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.usub.with.overflow.i8",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u16_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.usub.with.overflow.i16",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u32_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.usub.with.overflow.i32",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u64_sub_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
- llargs[0], llargs[1]),
-
+ with_overflow_intrinsic(bcx,
+ "llvm.usub.with.overflow.i64",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i8_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.smul.with.overflow.i8",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i16_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.smul.with.overflow.i16",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i32_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.smul.with.overflow.i32",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "i64_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
- llargs[0], llargs[1]),
-
+ with_overflow_intrinsic(bcx,
+ "llvm.smul.with.overflow.i64",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u8_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.umul.with.overflow.i8",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u16_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.umul.with.overflow.i16",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u32_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
- llargs[0], llargs[1]),
+ with_overflow_intrinsic(bcx,
+ "llvm.umul.with.overflow.i32",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "u64_mul_with_overflow") =>
- with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
- llargs[0], llargs[1]),
-
+ with_overflow_intrinsic(bcx,
+ "llvm.umul.with.overflow.i64",
+ ret_ty,
+ llargs[0],
+ llargs[1],
+ call_debug_location),
(_, "return_address") => {
if !fcx.caller_expects_out_pointer {
tcx.sess.span_err(call_info.span,
// If we made a temporary stack slot, let's clean it up
match dest {
expr::Ignore => {
- bcx = glue::drop_ty(bcx, llresult, ret_ty, Some(call_info));
+ bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
}
expr::SaveIn(_) => {}
}
}
fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>,
- dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
+ allow_overlap: bool,
+ volatile: bool,
+ tp_ty: Ty<'tcx>,
+ dst: ValueRef,
+ src: ValueRef,
+ count: ValueRef,
+ call_debug_location: DebugLoc)
+ -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
let llfn = ccx.get_intrinsic(&name);
- Call(bcx, llfn, &[dst_ptr, src_ptr, Mul(bcx, size, count), align,
- C_bool(ccx, volatile)], None)
+ Call(bcx,
+ llfn,
+ &[dst_ptr,
+ src_ptr,
+ Mul(bcx, size, count, DebugLoc::None),
+ align,
+ C_bool(ccx, volatile)],
+ None,
+ call_debug_location)
}
-fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, volatile: bool, tp_ty: Ty<'tcx>,
- dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
+fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ volatile: bool,
+ tp_ty: Ty<'tcx>,
+ dst: ValueRef,
+ val: ValueRef,
+ count: ValueRef,
+ call_debug_location: DebugLoc)
+ -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
let llfn = ccx.get_intrinsic(&name);
- Call(bcx, llfn, &[dst_ptr, val, Mul(bcx, size, count), align,
- C_bool(ccx, volatile)], None)
+ Call(bcx,
+ llfn,
+ &[dst_ptr,
+ val,
+ Mul(bcx, size, count, DebugLoc::None),
+ align,
+ C_bool(ccx, volatile)],
+ None,
+ call_debug_location)
}
-fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
+fn count_zeros_intrinsic(bcx: Block,
+ name: &'static str,
+ val: ValueRef,
+ call_debug_location: DebugLoc)
+ -> ValueRef {
let y = C_bool(bcx.ccx(), false);
let llfn = bcx.ccx().get_intrinsic(&name);
- Call(bcx, llfn, &[val, y], None)
+ Call(bcx, llfn, &[val, y], None, call_debug_location)
}
-fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, name: &'static str,
- t: Ty<'tcx>, a: ValueRef, b: ValueRef) -> ValueRef {
+fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ name: &'static str,
+ t: Ty<'tcx>,
+ a: ValueRef,
+ b: ValueRef,
+ call_debug_location: DebugLoc)
+ -> ValueRef {
let llfn = bcx.ccx().get_intrinsic(&name);
// Convert `i1` to a `bool`, and write it to the out parameter
- let val = Call(bcx, llfn, &[a, b], None);
+ let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
let result = ExtractValue(bcx, val, 0);
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
let ret = C_undef(type_of::type_of(bcx.ccx(), t));
use trans::cleanup;
use trans::common::*;
use trans::datum::*;
+use trans::debuginfo::DebugLoc;
use trans::expr::{SaveIn, Ignore};
use trans::expr;
use trans::glue;
ArgVals(llargs.as_slice()),
dest).bcx;
- finish_fn(&fcx, bcx, sig.output);
+ finish_fn(&fcx, bcx, sig.output, DebugLoc::None);
(llfn, method_bare_fn_ty)
}
use trans::common::*;
use trans::consts;
use trans::datum::*;
+use trans::debuginfo::DebugLoc;
use trans::expr::{Dest, Ignore, SaveIn};
use trans::expr;
use trans::glue;
let dataptr = get_dataptr(bcx, vptr);
let bcx = if type_needs_drop(tcx, unit_ty) {
let len = get_len(bcx, vptr);
- iter_vec_raw(bcx, dataptr, unit_ty, len, |bb, vv, tt| glue::drop_ty(bb, vv, tt, None))
+ iter_vec_raw(bcx,
+ dataptr,
+ unit_ty,
+ len,
+ |bb, vv, tt| glue::drop_ty(bb, vv, tt, DebugLoc::None))
} else {
bcx
};
let not_empty = ICmp(bcx, llvm::IntNE, len, C_uint(ccx, 0u));
with_cond(bcx, not_empty, |bcx| {
let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty));
- let size = Mul(bcx, C_uint(ccx, unit_size), len);
+ let size = Mul(bcx, C_uint(ccx, unit_size), len, DebugLoc::None);
glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
})
} else {
let cond_bcx = fcx.new_temp_block("expr_repeat: loop cond");
let body_bcx = fcx.new_temp_block("expr_repeat: body: set");
let inc_bcx = fcx.new_temp_block("expr_repeat: body: inc");
- Br(bcx, loop_bcx.llbb);
+ Br(bcx, loop_bcx.llbb, DebugLoc::None);
let loop_counter = {
// i = 0
let i = alloca(loop_bcx, bcx.ccx().int_type(), "__i");
Store(loop_bcx, C_uint(bcx.ccx(), 0u), i);
- Br(loop_bcx, cond_bcx.llbb);
+ Br(loop_bcx, cond_bcx.llbb, DebugLoc::None);
i
};
let rhs = count;
let cond_val = ICmp(cond_bcx, llvm::IntULT, lhs, rhs);
- CondBr(cond_bcx, cond_val, body_bcx.llbb, next_bcx.llbb);
+ CondBr(cond_bcx, cond_val, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
}
{ // loop body
};
let body_bcx = f(body_bcx, lleltptr, vt.unit_ty);
- Br(body_bcx, inc_bcx.llbb);
+ Br(body_bcx, inc_bcx.llbb, DebugLoc::None);
}
{ // i += 1
let i = Load(inc_bcx, loop_counter);
- let plusone = Add(inc_bcx, i, C_uint(bcx.ccx(), 1u));
+ let plusone = Add(inc_bcx, i, C_uint(bcx.ccx(), 1u), DebugLoc::None);
Store(inc_bcx, plusone, loop_counter);
- Br(inc_bcx, cond_bcx.llbb);
+ Br(inc_bcx, cond_bcx.llbb, DebugLoc::None);
}
next_bcx
// Now perform the iteration.
let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
- Br(bcx, header_bcx.llbb);
+ Br(bcx, header_bcx.llbb, DebugLoc::None);
let data_ptr =
Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
let not_yet_at_end =
ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr);
let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
let next_bcx = fcx.new_temp_block("iter_vec_next");
- CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb);
+ CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
let body_bcx = f(body_bcx, data_ptr, vt.unit_ty);
AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,
&[C_int(bcx.ccx(), 1i)]),
body_bcx.llbb);
- Br(body_bcx, header_bcx.llbb);
+ Br(body_bcx, header_bcx.llbb, DebugLoc::None);
next_bcx
}
}