4 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt, TyAndLayout};
5 use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
6 use rustc_target::abi::call::{FnAbi, PassMode};
10 use rustc_index::bit_set::BitSet;
11 use rustc_index::vec::IndexVec;
13 use self::analyze::CleanupKind;
14 use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
15 use self::place::PlaceRef;
16 use rustc_middle::mir::traversal;
18 use self::operand::{OperandRef, OperandValue};
20 /// Master context for codegenning from MIR.
21 pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
22 instance: Instance<'tcx>,
24 mir: mir::ReadOnlyBodyAndCache<'tcx, 'tcx>,
26 debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
30 cx: &'a Bx::CodegenCx,
32 fn_abi: FnAbi<'tcx, Ty<'tcx>>,
34 /// When unwinding is initiated, we have to store this personality
35 /// value somewhere so that we can load it and re-use it in the
36 /// resume instruction. The personality is (afaik) some kind of
37 /// value used for C++ unwinding, which must filter by type: we
38 /// don't really care about it very much. Anyway, this value
39 /// contains an alloca into which the personality is stored and
40 /// then later loaded when generating the DIVERGE_BLOCK.
41 personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
43 /// A `Block` for each MIR `BasicBlock`
44 blocks: IndexVec<mir::BasicBlock, Bx::BasicBlock>,
46 /// The funclet status of each basic block
47 cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
49 /// When targeting MSVC, this stores the cleanup info for each funclet
50 /// BB. This is initialized as we compute the funclets' head block in RPO.
51 funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
53 /// This stores the landing-pad block for a given BB, computed lazily on GNU
54 /// and eagerly on MSVC.
55 landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
57 /// Cached unreachable block
58 unreachable_block: Option<Bx::BasicBlock>,
60 /// The location where each MIR arg/var/tmp/ret is stored. This is
61 /// usually an `PlaceRef` representing an alloca, but not always:
62 /// sometimes we can skip the alloca and just store the value
63 /// directly using an `OperandRef`, which makes for tighter LLVM
64 /// IR. The conditions for using an `OperandRef` are as follows:
66 /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
67 /// - the operand must never be referenced indirectly
68 /// - we should not take its address using the `&` operator
69 /// - nor should it appear in a place path like `tmp.a`
70 /// - the operand must be defined by an rvalue that can generate immediate
73 /// Avoiding allocs can also be important for certain intrinsics,
75 locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
77 /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
78 /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
79 per_local_var_debug_info:
80 Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
82 /// Caller location propagated if this function has `#[track_caller]`.
83 caller_location: Option<OperandRef<'tcx, Bx::Value>>,
86 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
87 pub fn monomorphize<T>(&self, value: &T) -> T
89 T: Copy + TypeFoldable<'tcx>,
91 debug!("monomorphize: self.instance={:?}", self.instance);
92 if let Some(substs) = self.instance.substs_for_mir_body() {
93 self.cx.tcx().subst_and_normalize_erasing_regions(
95 ty::ParamEnv::reveal_all(),
99 self.cx.tcx().normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
104 enum LocalRef<'tcx, V> {
105 Place(PlaceRef<'tcx, V>),
106 /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
107 /// `*p` is the fat pointer that references the actual unsized place.
108 /// Every time it is initialized, we have to reallocate the place
109 /// and update the fat pointer. That's the reason why it is indirect.
110 UnsizedPlace(PlaceRef<'tcx, V>),
111 Operand(Option<OperandRef<'tcx, V>>),
114 impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
115 fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
117 layout: TyAndLayout<'tcx>,
118 ) -> LocalRef<'tcx, V> {
120 // Zero-size temporaries aren't always initialized, which
121 // doesn't matter because they don't contain data, but
122 // we need something in the operand.
123 LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
125 LocalRef::Operand(None)
130 ///////////////////////////////////////////////////////////////////////////
132 pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
133 cx: &'a Bx::CodegenCx,
134 instance: Instance<'tcx>,
136 assert!(!instance.substs.needs_infer());
138 let llfn = cx.get_fn(instance);
140 let mir = cx.tcx().instance_mir(instance.def);
142 let fn_abi = FnAbi::of_instance(cx, instance, &[]);
143 debug!("fn_abi: {:?}", fn_abi);
145 let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
147 let mut bx = Bx::new_block(cx, llfn, "start");
149 if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
150 bx.set_personality_fn(cx.eh_personality());
155 let cleanup_kinds = analyze::cleanup_kinds(&mir);
156 // Allocate a `Block` for every basic block, except
157 // the start block, if nothing loops back to it.
158 let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
159 let block_bxs: IndexVec<mir::BasicBlock, Bx::BasicBlock> = mir
163 if bb == mir::START_BLOCK && !reentrant_start_block {
166 bx.build_sibling_block(&format!("{:?}", bb)).llbb()
171 let (landing_pads, funclets) = create_funclets(&mir, &mut bx, &cleanup_kinds, &block_bxs);
172 let mir_body: &mir::Body<'_> = *mir;
173 let mut fx = FunctionCx {
179 personality_slot: None,
181 unreachable_block: None,
185 locals: IndexVec::new(),
187 per_local_var_debug_info: None,
188 caller_location: None,
191 fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info();
193 let memory_locals = analyze::non_ssa_locals(&fx);
195 // Allocate variable and temp allocas
197 let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
199 let mut allocate_local = |local| {
200 let decl = &mir_body.local_decls[local];
201 let layout = bx.layout_of(fx.monomorphize(&decl.ty));
202 assert!(!layout.ty.has_erasable_regions());
204 if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
205 debug!("alloc: {:?} (return place) -> place", local);
206 let llretptr = bx.get_param(0);
207 return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
210 if memory_locals.contains(local) {
211 debug!("alloc: {:?} -> place", local);
212 if layout.is_unsized() {
213 LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
215 LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
218 debug!("alloc: {:?} -> operand", local);
219 LocalRef::new_operand(&mut bx, layout)
223 let retptr = allocate_local(mir::RETURN_PLACE);
225 .chain(args.into_iter())
226 .chain(mir_body.vars_and_temps_iter().map(allocate_local))
230 // Apply debuginfo to the newly allocated locals.
231 fx.debug_introduce_locals(&mut bx);
233 // Branch to the START block, if it's not the entry block.
234 if reentrant_start_block {
235 bx.br(fx.blocks[mir::START_BLOCK]);
238 let rpo = traversal::reverse_postorder(&mir_body);
239 let mut visited = BitSet::new_empty(mir_body.basic_blocks().len());
241 // Codegen the body of each block using reverse postorder
243 visited.insert(bb.index());
244 fx.codegen_block(bb);
247 // Remove blocks that haven't been visited, or have no
249 for bb in mir_body.basic_blocks().indices() {
251 if !visited.contains(bb.index()) {
252 debug!("codegen_mir: block {:?} was not visited", bb);
254 bx.delete_basic_block(fx.blocks[bb]);
260 fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
261 mir: &'tcx mir::Body<'tcx>,
263 cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
264 block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>,
266 IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
267 IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
272 .map(|((bb, &llbb), cleanup_kind)| {
273 match *cleanup_kind {
274 CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
275 _ => return (None, None),
280 match mir[bb].terminator.as_ref().map(|t| &t.kind) {
281 // This is a basic block that we're aborting the program for,
282 // notably in an `extern` function. These basic blocks are inserted
283 // so that we assert that `extern` functions do indeed not panic,
284 // and if they do we abort the process.
286 // On MSVC these are tricky though (where we're doing funclets). If
287 // we were to do a cleanuppad (like below) the normal functions like
288 // `longjmp` would trigger the abort logic, terminating the
289 // program. Instead we insert the equivalent of `catch(...)` for C++
290 // which magically doesn't trigger when `longjmp` files over this
293 // Lots more discussion can be found on #48251 but this codegen is
294 // modeled after clang's for:
301 Some(&mir::TerminatorKind::Abort) => {
302 let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb));
303 let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb));
304 ret_llbb = cs_bx.llbb();
306 let cs = cs_bx.catch_switch(None, None, 1);
307 cs_bx.add_handler(cs, cp_bx.llbb());
309 // The "null" here is actually a RTTI type descriptor for the
310 // C++ personality function, but `catch (...)` has no type so
311 // it's null. The 64 here is actually a bitfield which
312 // represents that this is a catch-all block.
313 let null = bx.const_null(bx.type_i8p());
314 let sixty_four = bx.const_i32(64);
315 funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
319 let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
320 ret_llbb = cleanup_bx.llbb();
321 funclet = cleanup_bx.cleanup_pad(None, &[]);
326 (Some(ret_llbb), Some(funclet))
331 /// Produces, for each argument, a `Value` pointing at the
332 /// argument's value. As arguments are places, these are always
334 fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
336 fx: &mut FunctionCx<'a, 'tcx, Bx>,
337 memory_locals: &BitSet<mir::Local>,
338 ) -> Vec<LocalRef<'tcx, Bx::Value>> {
341 let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
346 .map(|(arg_index, local)| {
347 let arg_decl = &mir.local_decls[local];
349 if Some(local) == mir.spread_arg {
350 // This argument (e.g., the last argument in the "rust-call" ABI)
351 // is a tuple that was spread at the ABI level and now we have
352 // to reconstruct it into a tuple local variable, from multiple
353 // individual LLVM function arguments.
355 let arg_ty = fx.monomorphize(&arg_decl.ty);
356 let tupled_arg_tys = match arg_ty.kind {
357 ty::Tuple(ref tys) => tys,
358 _ => bug!("spread argument isn't a tuple?!"),
361 let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
362 for i in 0..tupled_arg_tys.len() {
363 let arg = &fx.fn_abi.args[idx];
365 if arg.pad.is_some() {
368 let pr_field = place.project_field(bx, i);
369 bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
372 return LocalRef::Place(place);
375 if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
376 let arg_ty = fx.monomorphize(&arg_decl.ty);
378 let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
379 bx.va_start(va_list.llval);
381 return LocalRef::Place(va_list);
384 let arg = &fx.fn_abi.args[idx];
386 if arg.pad.is_some() {
390 if !memory_locals.contains(local) {
391 // We don't have to cast or keep the argument in the alloca.
392 // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
393 // of putting everything in allocas just so we can use llvm.dbg.declare.
394 let local = |op| LocalRef::Operand(Some(op));
396 PassMode::Ignore => {
397 return local(OperandRef::new_zst(bx, arg.layout));
399 PassMode::Direct(_) => {
400 let llarg = bx.get_param(llarg_idx);
402 return local(OperandRef::from_immediate_or_packed_pair(
403 bx, llarg, arg.layout,
406 PassMode::Pair(..) => {
407 let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
410 return local(OperandRef {
411 val: OperandValue::Pair(a, b),
419 if arg.is_sized_indirect() {
420 // Don't copy an indirect argument to an alloca, the caller
421 // already put it in a temporary alloca and gave it up.
423 let llarg = bx.get_param(llarg_idx);
425 LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
426 } else if arg.is_unsized_indirect() {
427 // As the storage for the indirect argument lives during
428 // the whole function call, we just copy the fat pointer.
429 let llarg = bx.get_param(llarg_idx);
431 let llextra = bx.get_param(llarg_idx);
433 let indirect_operand = OperandValue::Pair(llarg, llextra);
435 let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
436 indirect_operand.store(bx, tmp);
437 LocalRef::UnsizedPlace(tmp)
439 let tmp = PlaceRef::alloca(bx, arg.layout);
440 bx.store_fn_arg(arg, &mut llarg_idx, tmp);
444 .collect::<Vec<_>>();
446 if fx.instance.def.requires_caller_location(bx.tcx()) {
448 fx.fn_abi.args.len(),
450 "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
453 let arg = fx.fn_abi.args.last().unwrap();
455 PassMode::Direct(_) => (),
456 _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
459 fx.caller_location = Some(OperandRef {
460 val: OperandValue::Immediate(bx.get_param(llarg_idx)),