2 use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
3 use rustc::ty::layout::{TyLayout, HasTyCtxt};
4 use rustc::mir::{self, Mir};
5 use rustc::ty::subst::Substs;
6 use rustc::session::config::DebugInfo;
7 use rustc_mir::monomorphize::Instance;
8 use rustc_target::abi::call::{FnType, PassMode};
10 use crate::debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext};
13 use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
14 use syntax::symbol::keywords;
18 use rustc_data_structures::bit_set::BitSet;
19 use rustc_data_structures::indexed_vec::IndexVec;
21 use self::analyze::CleanupKind;
22 use self::place::PlaceRef;
23 use rustc::mir::traversal;
25 use self::operand::{OperandRef, OperandValue};
27 /// Master context for codegenning from MIR.
28 pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> {
29 instance: Instance<'tcx>,
31 mir: &'a mir::Mir<'tcx>,
33 debug_context: FunctionDebugContext<Bx::DIScope>,
37 cx: &'a Bx::CodegenCx,
39 fn_ty: FnType<'tcx, Ty<'tcx>>,
41 /// When unwinding is initiated, we have to store this personality
42 /// value somewhere so that we can load it and re-use it in the
43 /// resume instruction. The personality is (afaik) some kind of
44 /// value used for C++ unwinding, which must filter by type: we
45 /// don't really care about it very much. Anyway, this value
46 /// contains an alloca into which the personality is stored and
47 /// then later loaded when generating the DIVERGE_BLOCK.
48 personality_slot: Option<PlaceRef<'tcx, Bx::Value,>>,
50 /// A `Block` for each MIR `BasicBlock`
51 blocks: IndexVec<mir::BasicBlock, Bx::BasicBlock>,
53 /// The funclet status of each basic block
54 cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
56 /// When targeting MSVC, this stores the cleanup info for each funclet
57 /// BB. This is initialized as we compute the funclets' head block in RPO.
58 funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
60 /// This stores the landing-pad block for a given BB, computed lazily on GNU
61 /// and eagerly on MSVC.
62 landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
64 /// Cached unreachable block
65 unreachable_block: Option<Bx::BasicBlock>,
67 /// The location where each MIR arg/var/tmp/ret is stored. This is
68 /// usually an `PlaceRef` representing an alloca, but not always:
69 /// sometimes we can skip the alloca and just store the value
70 /// directly using an `OperandRef`, which makes for tighter LLVM
71 /// IR. The conditions for using an `OperandRef` are as follows:
73 /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
74 /// - the operand must never be referenced indirectly
75 /// - we should not take its address using the `&` operator
76 /// - nor should it appear in a place path like `tmp.a`
77 /// - the operand must be defined by an rvalue that can generate immediate
80 /// Avoiding allocs can also be important for certain intrinsics,
82 locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
84 /// Debug information for MIR scopes.
85 scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<Bx::DIScope>>,
87 /// If this function is being monomorphized, this contains the type substitutions used.
88 param_substs: &'tcx Substs<'tcx>,
91 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
92 pub fn monomorphize<T>(&self, value: &T) -> T
93 where T: TypeFoldable<'tcx>
95 self.cx.tcx().subst_and_normalize_erasing_regions(
97 ty::ParamEnv::reveal_all(),
102 pub fn set_debug_loc(
105 source_info: mir::SourceInfo
107 let (scope, span) = self.debug_loc(source_info);
108 bx.set_source_location(&self.debug_context, scope, span);
111 pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option<Bx::DIScope>, Span) {
112 // Bail out if debug info emission is not enabled.
113 match self.debug_context {
114 FunctionDebugContext::DebugInfoDisabled |
115 FunctionDebugContext::FunctionWithoutDebugInfo => {
116 return (self.scopes[source_info.scope].scope_metadata, source_info.span);
118 FunctionDebugContext::RegularContext(_) =>{}
121 // In order to have a good line stepping behavior in debugger, we overwrite debug
122 // locations of macro expansions with that of the outermost expansion site
123 // (unless the crate is being compiled with `-Z debug-macros`).
124 if source_info.span.ctxt() == NO_EXPANSION ||
125 self.cx.sess().opts.debugging_opts.debug_macros {
126 let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo());
127 (scope, source_info.span)
129 // Walk up the macro expansion chain until we reach a non-expanded span.
130 // We also stop at the function body level because no line stepping can occur
131 // at the level above that.
132 let mut span = source_info.span;
133 while span.ctxt() != NO_EXPANSION && span.ctxt() != self.mir.span.ctxt() {
134 if let Some(info) = span.ctxt().outer().expn_info() {
135 span = info.call_site;
140 let scope = self.scope_metadata_for_loc(source_info.scope, span.lo());
141 // Use span of the outermost expansion site, while keeping the original lexical scope.
146 // DILocations inherit source file name from the parent DIScope. Due to macro expansions
147 // it may so happen that the current span belongs to a different file than the DIScope
148 // corresponding to span's containing source scope. If so, we need to create a DIScope
149 // "extension" into that file.
150 fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos)
151 -> Option<Bx::DIScope> {
152 let scope_metadata = self.scopes[scope_id].scope_metadata;
153 if pos < self.scopes[scope_id].file_start_pos ||
154 pos >= self.scopes[scope_id].file_end_pos {
155 let sm = self.cx.sess().source_map();
156 let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate;
157 Some(self.cx.extend_scope_to_file(
158 scope_metadata.unwrap(),
159 &sm.lookup_char_pos(pos).file,
168 enum LocalRef<'tcx, V> {
169 Place(PlaceRef<'tcx, V>),
170 /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
171 /// `*p` is the fat pointer that references the actual unsized place.
172 /// Every time it is initialized, we have to reallocate the place
173 /// and update the fat pointer. That's the reason why it is indirect.
174 UnsizedPlace(PlaceRef<'tcx, V>),
175 Operand(Option<OperandRef<'tcx, V>>),
178 impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> {
179 fn new_operand<Cx: CodegenMethods<'tcx, Value = V>>(
181 layout: TyLayout<'tcx>,
182 ) -> LocalRef<'tcx, V> {
184 // Zero-size temporaries aren't always initialized, which
185 // doesn't matter because they don't contain data, but
186 // we need something in the operand.
187 LocalRef::Operand(Some(OperandRef::new_zst(cx, layout)))
189 LocalRef::Operand(None)
194 ///////////////////////////////////////////////////////////////////////////
196 pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
197 cx: &'a Bx::CodegenCx,
200 instance: Instance<'tcx>,
201 sig: ty::FnSig<'tcx>,
203 let fn_ty = cx.new_fn_type(sig, &[]);
204 debug!("fn_ty: {:?}", fn_ty);
206 cx.create_function_debug_context(instance, sig, llfn, mir);
207 let mut bx = Bx::new_block(cx, llfn, "start");
209 if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
210 bx.set_personality_fn(cx.eh_personality());
213 let cleanup_kinds = analyze::cleanup_kinds(&mir);
214 // Allocate a `Block` for every basic block, except
215 // the start block, if nothing loops back to it.
216 let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
217 let block_bxs: IndexVec<mir::BasicBlock, Bx::BasicBlock> =
218 mir.basic_blocks().indices().map(|bb| {
219 if bb == mir::START_BLOCK && !reentrant_start_block {
222 bx.build_sibling_block(&format!("{:?}", bb)).llbb()
226 // Compute debuginfo scopes from MIR scopes.
227 let scopes = cx.create_mir_scopes(mir, &debug_context);
228 let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs);
230 let mut fx = FunctionCx {
236 personality_slot: None,
238 unreachable_block: None,
243 locals: IndexVec::new(),
246 assert!(!instance.substs.needs_infer());
251 let memory_locals = analyze::non_ssa_locals(&fx);
253 // Allocate variable and temp allocas
255 let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals);
257 let mut allocate_local = |local| {
258 let decl = &mir.local_decls[local];
259 let layout = bx.layout_of(fx.monomorphize(&decl.ty));
260 assert!(!layout.ty.has_erasable_regions());
262 if let Some(name) = decl.name {
264 let debug_scope = fx.scopes[decl.visibility_scope];
265 let dbg = debug_scope.is_valid() &&
266 bx.sess().opts.debuginfo == DebugInfo::Full;
268 if !memory_locals.contains(local) && !dbg {
269 debug!("alloc: {:?} ({}) -> operand", local, name);
270 return LocalRef::new_operand(bx.cx(), layout);
273 debug!("alloc: {:?} ({}) -> place", local, name);
274 if layout.is_unsized() {
276 PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str());
277 // FIXME: add an appropriate debuginfo
278 LocalRef::UnsizedPlace(indirect_place)
280 let place = PlaceRef::alloca(&mut bx, layout, &name.as_str());
282 let (scope, span) = fx.debug_loc(mir::SourceInfo {
283 span: decl.source_info.span,
284 scope: decl.visibility_scope,
286 bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(),
287 VariableAccess::DirectVariable { alloca: place.llval },
288 VariableKind::LocalVariable, span);
290 LocalRef::Place(place)
293 // Temporary or return place
294 if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
295 debug!("alloc: {:?} (return place) -> place", local);
296 let llretptr = fx.cx.get_param(llfn, 0);
297 LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi))
298 } else if memory_locals.contains(local) {
299 debug!("alloc: {:?} -> place", local);
300 if layout.is_unsized() {
301 let indirect_place = PlaceRef::alloca_unsized_indirect(
304 &format!("{:?}", local),
306 LocalRef::UnsizedPlace(indirect_place)
308 LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local)))
311 // If this is an immediate local, we do not create an
312 // alloca in advance. Instead we wait until we see the
313 // definition and update the operand there.
314 debug!("alloc: {:?} -> operand", local);
315 LocalRef::new_operand(bx.cx(), layout)
320 let retptr = allocate_local(mir::RETURN_PLACE);
322 .chain(args.into_iter())
323 .chain(mir.vars_and_temps_iter().map(allocate_local))
327 // Branch to the START block, if it's not the entry block.
328 if reentrant_start_block {
329 bx.br(fx.blocks[mir::START_BLOCK]);
332 // Up until here, IR instructions for this function have explicitly not been annotated with
333 // source code location, so we don't step into call setup code. From here on, source location
334 // emitting should be enabled.
335 debuginfo::start_emitting_source_locations(&fx.debug_context);
337 let rpo = traversal::reverse_postorder(&mir);
338 let mut visited = BitSet::new_empty(mir.basic_blocks().len());
340 // Codegen the body of each block using reverse postorder
342 visited.insert(bb.index());
343 fx.codegen_block(bb);
346 // Remove blocks that haven't been visited, or have no
348 for bb in mir.basic_blocks().indices() {
350 if !visited.contains(bb.index()) {
351 debug!("codegen_mir: block {:?} was not visited", bb);
353 bx.delete_basic_block(fx.blocks[bb]);
359 fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
362 cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
363 block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>)
364 -> (IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
365 IndexVec<mir::BasicBlock, Option<Bx::Funclet>>)
367 block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
368 match *cleanup_kind {
369 CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
370 _ => return (None, None)
375 match mir[bb].terminator.as_ref().map(|t| &t.kind) {
376 // This is a basic block that we're aborting the program for,
377 // notably in an `extern` function. These basic blocks are inserted
378 // so that we assert that `extern` functions do indeed not panic,
379 // and if they do we abort the process.
381 // On MSVC these are tricky though (where we're doing funclets). If
382 // we were to do a cleanuppad (like below) the normal functions like
383 // `longjmp` would trigger the abort logic, terminating the
384 // program. Instead we insert the equivalent of `catch(...)` for C++
385 // which magically doesn't trigger when `longjmp` files over this
388 // Lots more discussion can be found on #48251 but this codegen is
389 // modeled after clang's for:
396 Some(&mir::TerminatorKind::Abort) => {
397 let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
398 let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
399 ret_llbb = cs_bx.llbb();
401 let cs = cs_bx.catch_switch(None, None, 1);
402 cs_bx.add_handler(cs, cp_bx.llbb());
404 // The "null" here is actually a RTTI type descriptor for the
405 // C++ personality function, but `catch (...)` has no type so
406 // it's null. The 64 here is actually a bitfield which
407 // represents that this is a catch-all block.
408 let null = bx.const_null(bx.type_i8p());
409 let sixty_four = bx.const_i32(64);
410 funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
414 let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
415 ret_llbb = cleanup_bx.llbb();
416 funclet = cleanup_bx.cleanup_pad(None, &[]);
421 (Some(ret_llbb), Some(funclet))
425 /// Produces, for each argument, a `Value` pointing at the
426 /// argument's value. As arguments are places, these are always
428 fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
430 fx: &FunctionCx<'a, 'tcx, Bx>,
433 debuginfo::MirDebugScope<Bx::DIScope>
435 memory_locals: &BitSet<mir::Local>,
436 ) -> Vec<LocalRef<'tcx, Bx::Value>> {
438 let tcx = fx.cx.tcx();
440 let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize;
442 // Get the argument scope, if it exists and if we need it.
443 let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
444 let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full {
445 arg_scope.scope_metadata
450 mir.args_iter().enumerate().map(|(arg_index, local)| {
451 let arg_decl = &mir.local_decls[local];
453 let name = if let Some(name) = arg_decl.name {
454 name.as_str().to_string()
456 format!("arg{}", arg_index)
459 if Some(local) == mir.spread_arg {
460 // This argument (e.g., the last argument in the "rust-call" ABI)
461 // is a tuple that was spread at the ABI level and now we have
462 // to reconstruct it into a tuple local variable, from multiple
463 // individual LLVM function arguments.
465 let arg_ty = fx.monomorphize(&arg_decl.ty);
466 let tupled_arg_tys = match arg_ty.sty {
467 ty::Tuple(ref tys) => tys,
468 _ => bug!("spread argument isn't a tuple?!")
471 let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name);
472 for i in 0..tupled_arg_tys.len() {
473 let arg = &fx.fn_ty.args[idx];
475 if arg.pad.is_some() {
478 let pr_field = place.project_field(bx, i);
479 bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
482 // Now that we have one alloca that contains the aggregate value,
483 // we can create one debuginfo entry for the argument.
484 arg_scope.map(|scope| {
485 let variable_access = VariableAccess::DirectVariable {
490 arg_decl.name.unwrap_or(keywords::Invalid.name()),
493 VariableKind::ArgumentVariable(arg_index + 1),
498 return LocalRef::Place(place);
501 let arg = &fx.fn_ty.args[idx];
503 if arg.pad.is_some() {
507 if arg_scope.is_none() && !memory_locals.contains(local) {
508 // We don't have to cast or keep the argument in the alloca.
509 // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
510 // of putting everything in allocas just so we can use llvm.dbg.declare.
511 let local = |op| LocalRef::Operand(Some(op));
513 PassMode::Ignore => {
514 return local(OperandRef::new_zst(bx.cx(), arg.layout));
516 PassMode::Direct(_) => {
517 let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
518 bx.set_value_name(llarg, &name);
521 OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
523 PassMode::Pair(..) => {
524 let a = bx.get_param(bx.llfn(), llarg_idx as c_uint);
525 bx.set_value_name(a, &(name.clone() + ".0"));
528 let b = bx.get_param(bx.llfn(), llarg_idx as c_uint);
529 bx.set_value_name(b, &(name + ".1"));
532 return local(OperandRef {
533 val: OperandValue::Pair(a, b),
541 let place = if arg.is_sized_indirect() {
542 // Don't copy an indirect argument to an alloca, the caller
543 // already put it in a temporary alloca and gave it up.
545 let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
546 bx.set_value_name(llarg, &name);
548 PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi)
549 } else if arg.is_unsized_indirect() {
550 // As the storage for the indirect argument lives during
551 // the whole function call, we just copy the fat pointer.
552 let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
554 let llextra = bx.get_param(bx.llfn(), llarg_idx as c_uint);
556 let indirect_operand = OperandValue::Pair(llarg, llextra);
558 let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name);
559 indirect_operand.store(bx, tmp);
562 let tmp = PlaceRef::alloca(bx, arg.layout, &name);
563 bx.store_fn_arg(arg, &mut llarg_idx, tmp);
566 arg_scope.map(|scope| {
567 // Is this a regular argument?
568 if arg_index > 0 || mir.upvar_decls.is_empty() {
569 // The Rust ABI passes indirect variables using a pointer and a manual copy, so we
570 // need to insert a deref here, but the C ABI uses a pointer and a copy using the
571 // byval attribute, for which LLVM always does the deref itself,
572 // so we must not add it.
573 let variable_access = VariableAccess::DirectVariable {
579 arg_decl.name.unwrap_or(keywords::Invalid.name()),
583 VariableKind::ArgumentVariable(arg_index + 1),
589 let pin_did = tcx.lang_items().pin_type();
590 // Or is it the closure environment?
591 let (closure_layout, env_ref) = match arg.layout.ty.sty {
592 ty::RawPtr(ty::TypeAndMut { ty, .. }) |
593 ty::Ref(_, ty, _) => (bx.layout_of(ty), true),
594 ty::Adt(def, substs) if Some(def.did) == pin_did => {
595 match substs.type_at(0).sty {
596 ty::Ref(_, ty, _) => (bx.layout_of(ty), true),
597 _ => (arg.layout, false),
600 _ => (arg.layout, false)
603 let (def_id, upvar_substs) = match closure_layout.ty.sty {
604 ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
605 ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
606 _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty)
608 let upvar_tys = upvar_substs.upvar_tys(def_id, tcx);
610 for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
611 let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
613 let ops = bx.debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env);
615 // The environment and the capture can each be indirect.
616 let mut ops = if env_ref { &ops[..] } else { &ops[1..] };
618 let ty = if let (true, &ty::Ref(_, ty, _)) = (decl.by_ref, &ty.sty) {
621 ops = &ops[..ops.len() - 1];
625 let variable_access = VariableAccess::IndirectVariable {
627 address_operations: &ops
635 VariableKind::LocalVariable,
640 if arg.is_unsized_indirect() {
641 LocalRef::UnsizedPlace(place)
643 LocalRef::Place(place)