1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use llvm::{self, BasicBlock};
13 use llvm::debuginfo::DIScope;
15 use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
16 use rustc::ty::layout::{LayoutOf, TyLayout};
17 use rustc::mir::{self, Mir};
18 use rustc::ty::subst::Substs;
19 use rustc::session::config::DebugInfo;
22 use common::{CodegenCx, Funclet};
23 use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
24 use monomorphize::Instance;
25 use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode};
27 use interfaces::{BuilderMethods, CommonMethods};
29 use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
30 use syntax::symbol::keywords;
34 use rustc_data_structures::bit_set::BitSet;
35 use rustc_data_structures::indexed_vec::IndexVec;
37 pub use self::constant::codegen_static_initializer;
39 use self::analyze::CleanupKind;
40 use self::place::PlaceRef;
41 use rustc::mir::traversal;
43 use self::operand::{OperandRef, OperandValue};
45 /// Master context for codegenning from MIR.
46 pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> {
47 instance: Instance<'tcx>,
49 mir: &'a mir::Mir<'tcx>,
51 debug_context: FunctionDebugContext<'ll>,
55 cx: &'a CodegenCx<'ll, 'tcx>,
57 fn_ty: FnType<'tcx, Ty<'tcx>>,
59 /// When unwinding is initiated, we have to store this personality
60 /// value somewhere so that we can load it and re-use it in the
61 /// resume instruction. The personality is (afaik) some kind of
62 /// value used for C++ unwinding, which must filter by type: we
63 /// don't really care about it very much. Anyway, this value
64 /// contains an alloca into which the personality is stored and
65 /// then later loaded when generating the DIVERGE_BLOCK.
66 personality_slot: Option<PlaceRef<'tcx, V>>,
68 /// A `Block` for each MIR `BasicBlock`
69 blocks: IndexVec<mir::BasicBlock, &'ll BasicBlock>,
71 /// The funclet status of each basic block
72 cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
74 /// When targeting MSVC, this stores the cleanup info for each funclet
75 /// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets'
76 /// head block in RPO.
77 funclets: &'a IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>,
79 /// This stores the landing-pad block for a given BB, computed lazily on GNU
80 /// and eagerly on MSVC.
81 landing_pads: IndexVec<mir::BasicBlock, Option<&'ll BasicBlock>>,
83 /// Cached unreachable block
84 unreachable_block: Option<&'ll BasicBlock>,
86 /// The location where each MIR arg/var/tmp/ret is stored. This is
87 /// usually an `PlaceRef` representing an alloca, but not always:
88 /// sometimes we can skip the alloca and just store the value
89 /// directly using an `OperandRef`, which makes for tighter LLVM
90 /// IR. The conditions for using an `OperandRef` are as follows:
92 /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
93 /// - the operand must never be referenced indirectly
94 /// - we should not take its address using the `&` operator
95 /// - nor should it appear in a place path like `tmp.a`
96 /// - the operand must be defined by an rvalue that can generate immediate
99 /// Avoiding allocs can also be important for certain intrinsics,
100 /// notably `expect`.
101 locals: IndexVec<mir::Local, LocalRef<'tcx, V>>,
103 /// Debug information for MIR scopes.
104 scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
106 /// If this function is being monomorphized, this contains the type substitutions used.
107 param_substs: &'tcx Substs<'tcx>,
110 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
111 pub fn monomorphize<T>(&self, value: &T) -> T
112 where T: TypeFoldable<'tcx>
114 self.cx.tcx.subst_and_normalize_erasing_regions(
116 ty::ParamEnv::reveal_all(),
121 pub fn set_debug_loc(
123 bx: &Builder<'_, 'll, '_>,
124 source_info: mir::SourceInfo
126 let (scope, span) = self.debug_loc(source_info);
127 debuginfo::set_source_location(&self.debug_context, bx, scope, span);
130 pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) {
131 // Bail out if debug info emission is not enabled.
132 match self.debug_context {
133 FunctionDebugContext::DebugInfoDisabled |
134 FunctionDebugContext::FunctionWithoutDebugInfo => {
135 return (self.scopes[source_info.scope].scope_metadata, source_info.span);
137 FunctionDebugContext::RegularContext(_) =>{}
140 // In order to have a good line stepping behavior in debugger, we overwrite debug
141 // locations of macro expansions with that of the outermost expansion site
142 // (unless the crate is being compiled with `-Z debug-macros`).
143 if source_info.span.ctxt() == NO_EXPANSION ||
144 self.cx.sess().opts.debugging_opts.debug_macros {
145 let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo());
146 (scope, source_info.span)
148 // Walk up the macro expansion chain until we reach a non-expanded span.
149 // We also stop at the function body level because no line stepping can occur
150 // at the level above that.
151 let mut span = source_info.span;
152 while span.ctxt() != NO_EXPANSION && span.ctxt() != self.mir.span.ctxt() {
153 if let Some(info) = span.ctxt().outer().expn_info() {
154 span = info.call_site;
159 let scope = self.scope_metadata_for_loc(source_info.scope, span.lo());
160 // Use span of the outermost expansion site, while keeping the original lexical scope.
165 // DILocations inherit source file name from the parent DIScope. Due to macro expansions
166 // it may so happen that the current span belongs to a different file than the DIScope
167 // corresponding to span's containing source scope. If so, we need to create a DIScope
168 // "extension" into that file.
169 fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos)
170 -> Option<&'ll DIScope> {
171 let scope_metadata = self.scopes[scope_id].scope_metadata;
172 if pos < self.scopes[scope_id].file_start_pos ||
173 pos >= self.scopes[scope_id].file_end_pos {
174 let cm = self.cx.sess().source_map();
175 let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate;
176 Some(debuginfo::extend_scope_to_file(self.cx,
177 scope_metadata.unwrap(),
178 &cm.lookup_char_pos(pos).file,
186 enum LocalRef<'tcx, V> {
187 Place(PlaceRef<'tcx, V>),
188 /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
189 /// `*p` is the fat pointer that references the actual unsized place.
190 /// Every time it is initialized, we have to reallocate the place
191 /// and update the fat pointer. That's the reason why it is indirect.
192 UnsizedPlace(PlaceRef<'tcx, V>),
193 Operand(Option<OperandRef<'tcx, V>>),
196 impl LocalRef<'tcx, &'ll Value> {
198 cx: &CodegenCx<'ll, 'tcx>,
199 layout: TyLayout<'tcx>,
200 ) -> LocalRef<'tcx, &'ll Value> {
202 // Zero-size temporaries aren't always initialized, which
203 // doesn't matter because they don't contain data, but
204 // we need something in the operand.
205 LocalRef::Operand(Some(OperandRef::new_zst(cx, layout)))
207 LocalRef::Operand(None)
212 ///////////////////////////////////////////////////////////////////////////
215 cx: &'a CodegenCx<'ll, 'tcx>,
218 instance: Instance<'tcx>,
219 sig: ty::FnSig<'tcx>,
221 let fn_ty = FnType::new(cx, sig, &[]);
222 debug!("fn_ty: {:?}", fn_ty);
224 debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir);
225 let bx = Builder::new_block(cx, llfn, "start");
227 if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
228 bx.set_personality_fn(cx.eh_personality());
231 let cleanup_kinds = analyze::cleanup_kinds(&mir);
232 // Allocate a `Block` for every basic block, except
233 // the start block, if nothing loops back to it.
234 let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
235 let block_bxs: IndexVec<mir::BasicBlock, &'ll BasicBlock> =
236 mir.basic_blocks().indices().map(|bb| {
237 if bb == mir::START_BLOCK && !reentrant_start_block {
240 bx.build_sibling_block(&format!("{:?}", bb)).llbb()
244 // Compute debuginfo scopes from MIR scopes.
245 let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context);
246 let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs);
248 let mut fx = FunctionCx {
254 personality_slot: None,
256 unreachable_block: None,
261 locals: IndexVec::new(),
264 assert!(!instance.substs.needs_infer());
269 let memory_locals = analyze::non_ssa_locals(&fx);
271 // Allocate variable and temp allocas
273 let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals);
275 let mut allocate_local = |local| {
276 let decl = &mir.local_decls[local];
277 let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
278 assert!(!layout.ty.has_erasable_regions());
280 if let Some(name) = decl.name {
282 let debug_scope = fx.scopes[decl.visibility_scope];
283 let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full;
285 if !memory_locals.contains(local) && !dbg {
286 debug!("alloc: {:?} ({}) -> operand", local, name);
287 return LocalRef::new_operand(bx.cx(), layout);
290 debug!("alloc: {:?} ({}) -> place", local, name);
291 if layout.is_unsized() {
293 PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str());
294 // FIXME: add an appropriate debuginfo
295 LocalRef::UnsizedPlace(indirect_place)
297 let place = PlaceRef::alloca(&bx, layout, &name.as_str());
299 let (scope, span) = fx.debug_loc(mir::SourceInfo {
300 span: decl.source_info.span,
301 scope: decl.visibility_scope,
303 declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(),
304 VariableAccess::DirectVariable { alloca: place.llval },
305 VariableKind::LocalVariable, span);
307 LocalRef::Place(place)
310 // Temporary or return place
311 if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
312 debug!("alloc: {:?} (return place) -> place", local);
313 let llretptr = llvm::get_param(llfn, 0);
314 LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
315 } else if memory_locals.contains(local) {
316 debug!("alloc: {:?} -> place", local);
317 if layout.is_unsized() {
319 PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local));
320 LocalRef::UnsizedPlace(indirect_place)
322 LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local)))
325 // If this is an immediate local, we do not create an
326 // alloca in advance. Instead we wait until we see the
327 // definition and update the operand there.
328 debug!("alloc: {:?} -> operand", local);
329 LocalRef::new_operand(bx.cx(), layout)
334 let retptr = allocate_local(mir::RETURN_PLACE);
336 .chain(args.into_iter())
337 .chain(mir.vars_and_temps_iter().map(allocate_local))
341 // Branch to the START block, if it's not the entry block.
342 if reentrant_start_block {
343 bx.br(fx.blocks[mir::START_BLOCK]);
346 // Up until here, IR instructions for this function have explicitly not been annotated with
347 // source code location, so we don't step into call setup code. From here on, source location
348 // emitting should be enabled.
349 debuginfo::start_emitting_source_locations(&fx.debug_context);
351 let rpo = traversal::reverse_postorder(&mir);
352 let mut visited = BitSet::new_empty(mir.basic_blocks().len());
354 // Codegen the body of each block using reverse postorder
356 visited.insert(bb.index());
357 fx.codegen_block(bb);
360 // Remove blocks that haven't been visited, or have no
362 for bb in mir.basic_blocks().indices() {
364 if !visited.contains(bb.index()) {
365 debug!("codegen_mir: block {:?} was not visited", bb);
367 llvm::LLVMDeleteBasicBlock(fx.blocks[bb]);
375 bx: &Builder<'a, 'll, 'tcx>,
376 cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
377 block_bxs: &IndexVec<mir::BasicBlock, &'ll BasicBlock>)
378 -> (IndexVec<mir::BasicBlock, Option<&'ll BasicBlock>>,
379 IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>)
381 block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
382 match *cleanup_kind {
383 CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
384 _ => return (None, None)
389 match mir[bb].terminator.as_ref().map(|t| &t.kind) {
390 // This is a basic block that we're aborting the program for,
391 // notably in an `extern` function. These basic blocks are inserted
392 // so that we assert that `extern` functions do indeed not panic,
393 // and if they do we abort the process.
395 // On MSVC these are tricky though (where we're doing funclets). If
396 // we were to do a cleanuppad (like below) the normal functions like
397 // `longjmp` would trigger the abort logic, terminating the
398 // program. Instead we insert the equivalent of `catch(...)` for C++
399 // which magically doesn't trigger when `longjmp` files over this
402 // Lots more discussion can be found on #48251 but this codegen is
403 // modeled after clang's for:
410 Some(&mir::TerminatorKind::Abort) => {
411 let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
412 let cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
413 ret_llbb = cs_bx.llbb();
415 let cs = cs_bx.catch_switch(None, None, 1);
416 cs_bx.add_handler(cs, cp_bx.llbb());
418 // The "null" here is actually a RTTI type descriptor for the
419 // C++ personality function, but `catch (...)` has no type so
420 // it's null. The 64 here is actually a bitfield which
421 // represents that this is a catch-all block.
422 let null = bx.cx().const_null(bx.cx().type_i8p());
423 let sixty_four = bx.cx().const_i32(64);
424 cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
428 let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
429 ret_llbb = cleanup_bx.llbb();
430 cleanup = cleanup_bx.cleanup_pad(None, &[]);
435 (Some(ret_llbb), Some(Funclet::new(cleanup)))
439 /// Produce, for each argument, a `Value` pointing at the
440 /// argument's value. As arguments are places, these are always
443 bx: &Builder<'a, 'll, 'tcx>,
444 fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>,
445 scopes: &IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
446 memory_locals: &BitSet<mir::Local>,
447 ) -> Vec<LocalRef<'tcx, &'ll Value>> {
451 let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize;
453 // Get the argument scope, if it exists and if we need it.
454 let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
455 let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full {
456 arg_scope.scope_metadata
461 mir.args_iter().enumerate().map(|(arg_index, local)| {
462 let arg_decl = &mir.local_decls[local];
464 let name = if let Some(name) = arg_decl.name {
465 name.as_str().to_string()
467 format!("arg{}", arg_index)
470 if Some(local) == mir.spread_arg {
471 // This argument (e.g. the last argument in the "rust-call" ABI)
472 // is a tuple that was spread at the ABI level and now we have
473 // to reconstruct it into a tuple local variable, from multiple
474 // individual LLVM function arguments.
476 let arg_ty = fx.monomorphize(&arg_decl.ty);
477 let tupled_arg_tys = match arg_ty.sty {
478 ty::Tuple(ref tys) => tys,
479 _ => bug!("spread argument isn't a tuple?!")
482 let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name);
483 for i in 0..tupled_arg_tys.len() {
484 let arg = &fx.fn_ty.args[idx];
486 if arg.pad.is_some() {
489 arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i));
492 // Now that we have one alloca that contains the aggregate value,
493 // we can create one debuginfo entry for the argument.
494 arg_scope.map(|scope| {
495 let variable_access = VariableAccess::DirectVariable {
501 arg_decl.name.unwrap_or(keywords::Invalid.name()),
504 VariableKind::ArgumentVariable(arg_index + 1),
509 return LocalRef::Place(place);
512 let arg = &fx.fn_ty.args[idx];
514 if arg.pad.is_some() {
518 if arg_scope.is_none() && !memory_locals.contains(local) {
519 // We don't have to cast or keep the argument in the alloca.
520 // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
521 // of putting everything in allocas just so we can use llvm.dbg.declare.
522 let local = |op| LocalRef::Operand(Some(op));
524 PassMode::Ignore => {
525 return local(OperandRef::new_zst(bx.cx(), arg.layout));
527 PassMode::Direct(_) => {
528 let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
529 bx.set_value_name(llarg, &name);
532 OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
534 PassMode::Pair(..) => {
535 let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
536 bx.set_value_name(a, &(name.clone() + ".0"));
539 let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
540 bx.set_value_name(b, &(name + ".1"));
543 return local(OperandRef {
544 val: OperandValue::Pair(a, b),
552 let place = if arg.is_sized_indirect() {
553 // Don't copy an indirect argument to an alloca, the caller
554 // already put it in a temporary alloca and gave it up.
556 let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
557 bx.set_value_name(llarg, &name);
559 PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
560 } else if arg.is_unsized_indirect() {
561 // As the storage for the indirect argument lives during
562 // the whole function call, we just copy the fat pointer.
563 let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
565 let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
567 let indirect_operand = OperandValue::Pair(llarg, llextra);
569 let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name);
570 indirect_operand.store(&bx, tmp);
573 let tmp = PlaceRef::alloca(bx, arg.layout, &name);
574 arg.store_fn_arg(bx, &mut llarg_idx, tmp);
577 arg_scope.map(|scope| {
578 // Is this a regular argument?
579 if arg_index > 0 || mir.upvar_decls.is_empty() {
580 // The Rust ABI passes indirect variables using a pointer and a manual copy, so we
581 // need to insert a deref here, but the C ABI uses a pointer and a copy using the
582 // byval attribute, for which LLVM always does the deref itself,
583 // so we must not add it.
584 let variable_access = VariableAccess::DirectVariable {
591 arg_decl.name.unwrap_or(keywords::Invalid.name()),
595 VariableKind::ArgumentVariable(arg_index + 1),
601 // Or is it the closure environment?
602 let (closure_layout, env_ref) = match arg.layout.ty.sty {
603 ty::RawPtr(ty::TypeAndMut { ty, .. }) |
604 ty::Ref(_, ty, _) => (bx.cx().layout_of(ty), true),
605 _ => (arg.layout, false)
608 let (def_id, upvar_substs) = match closure_layout.ty.sty {
609 ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
610 ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
611 _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty)
613 let upvar_tys = upvar_substs.upvar_tys(def_id, tcx);
615 // Store the pointer to closure data in an alloca for debuginfo
616 // because that's what the llvm.dbg.declare intrinsic expects.
618 // FIXME(eddyb) this shouldn't be necessary but SROA seems to
619 // mishandle DW_OP_plus not preceded by DW_OP_deref, i.e. it
620 // doesn't actually strip the offset when splitting the closure
621 // environment into its components so it ends up out of bounds.
622 // (cuviper) It seems to be fine without the alloca on LLVM 6 and later.
623 let env_alloca = !env_ref && llvm_util::get_major_version() < 6;
624 let env_ptr = if env_alloca {
625 let scratch = PlaceRef::alloca(bx,
626 bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
627 "__debuginfo_env_ptr");
628 bx.store(place.llval, scratch.llval, scratch.align);
634 for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
635 let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
638 [llvm::LLVMRustDIBuilderCreateOpDeref(),
639 llvm::LLVMRustDIBuilderCreateOpPlusUconst(),
640 byte_offset_of_var_in_env as i64,
641 llvm::LLVMRustDIBuilderCreateOpDeref()]
644 // The environment and the capture can each be indirect.
646 // FIXME(eddyb) see above why we sometimes have to keep
647 // a pointer in an alloca for debuginfo atm.
648 let mut ops = if env_ref || env_alloca { &ops[..] } else { &ops[1..] };
650 let ty = if let (true, &ty::Ref(_, ty, _)) = (decl.by_ref, &ty.sty) {
653 ops = &ops[..ops.len() - 1];
657 let variable_access = VariableAccess::IndirectVariable {
659 address_operations: &ops
668 VariableKind::LocalVariable,
673 if arg.is_unsized_indirect() {
674 LocalRef::UnsizedPlace(place)
676 LocalRef::Place(place)