2 use rustc_errors::ErrorReported;
4 use rustc_middle::mir::interpret::ErrorHandled;
5 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
6 use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
7 use rustc_symbol_mangling::typeid_for_fnabi;
8 use rustc_target::abi::call::{FnAbi, PassMode};
12 use rustc_index::bit_set::BitSet;
13 use rustc_index::vec::IndexVec;
15 use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
16 use self::place::PlaceRef;
17 use rustc_middle::mir::traversal;
19 use self::operand::{OperandRef, OperandValue};
21 /// Master context for codegenning from MIR.
22 pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
23 instance: Instance<'tcx>,
25 mir: &'tcx mir::Body<'tcx>,
27 debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
31 cx: &'a Bx::CodegenCx,
33 fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
35 /// When unwinding is initiated, we have to store this personality
36 /// value somewhere so that we can load it and re-use it in the
37 /// resume instruction. The personality is (afaik) some kind of
38 /// value used for C++ unwinding, which must filter by type: we
39 /// don't really care about it very much. Anyway, this value
40 /// contains an alloca into which the personality is stored and
41 /// then later loaded when generating the DIVERGE_BLOCK.
42 personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
44 /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily
45 /// as-needed (e.g. RPO reaching it or another block branching to it).
46 // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
47 // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
48 cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
50 /// The funclet status of each basic block
51 cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
53 /// When targeting MSVC, this stores the cleanup info for each funclet BB.
54 /// This is initialized at the same time as the `landing_pads` entry for the
55 /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge.
56 funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
58 /// This stores the cached landing/cleanup pad block for a given BB.
59 // FIXME(eddyb) rename this to `eh_pads`.
60 landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
62 /// Cached unreachable block
63 unreachable_block: Option<Bx::BasicBlock>,
65 /// The location where each MIR arg/var/tmp/ret is stored. This is
66 /// usually an `PlaceRef` representing an alloca, but not always:
67 /// sometimes we can skip the alloca and just store the value
68 /// directly using an `OperandRef`, which makes for tighter LLVM
69 /// IR. The conditions for using an `OperandRef` are as follows:
71 /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
72 /// - the operand must never be referenced indirectly
73 /// - we should not take its address using the `&` operator
74 /// - nor should it appear in a place path like `tmp.a`
75 /// - the operand must be defined by an rvalue that can generate immediate
78 /// Avoiding allocs can also be important for certain intrinsics,
80 locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
82 /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
83 /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
84 per_local_var_debug_info:
85 Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
87 /// Caller location propagated if this function has `#[track_caller]`.
88 caller_location: Option<OperandRef<'tcx, Bx::Value>>,
91 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
92 pub fn monomorphize<T>(&self, value: T) -> T
94 T: Copy + TypeFoldable<'tcx>,
96 debug!("monomorphize: self.instance={:?}", self.instance);
97 self.instance.subst_mir_and_normalize_erasing_regions(
99 ty::ParamEnv::reveal_all(),
105 enum LocalRef<'tcx, V> {
106 Place(PlaceRef<'tcx, V>),
107 /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
108 /// `*p` is the fat pointer that references the actual unsized place.
109 /// Every time it is initialized, we have to reallocate the place
110 /// and update the fat pointer. That's the reason why it is indirect.
111 UnsizedPlace(PlaceRef<'tcx, V>),
112 Operand(Option<OperandRef<'tcx, V>>),
115 impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
116 fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
118 layout: TyAndLayout<'tcx>,
119 ) -> LocalRef<'tcx, V> {
121 // Zero-size temporaries aren't always initialized, which
122 // doesn't matter because they don't contain data, but
123 // we need something in the operand.
124 LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
126 LocalRef::Operand(None)
131 ///////////////////////////////////////////////////////////////////////////
133 #[instrument(level = "debug", skip(cx))]
134 pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
135 cx: &'a Bx::CodegenCx,
136 instance: Instance<'tcx>,
138 assert!(!instance.substs.needs_infer());
140 let llfn = cx.get_fn(instance);
142 let mir = cx.tcx().instance_mir(instance.def);
144 let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
145 debug!("fn_abi: {:?}", fn_abi);
147 let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
149 let start_llbb = Bx::append_block(cx, llfn, "start");
150 let mut bx = Bx::build(cx, start_llbb);
152 if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
153 bx.set_personality_fn(cx.eh_personality());
156 let cleanup_kinds = analyze::cleanup_kinds(&mir);
157 let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir
160 .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None })
163 let mut fx = FunctionCx {
169 personality_slot: None,
171 unreachable_block: None,
173 landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
174 funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()),
175 locals: IndexVec::new(),
177 per_local_var_debug_info: None,
178 caller_location: None,
181 fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
183 // Evaluate all required consts; codegen later assumes that CTFE will never fail.
184 let mut all_consts_ok = true;
185 for const_ in &mir.required_consts {
186 if let Err(err) = fx.eval_mir_constant(const_) {
187 all_consts_ok = false;
189 // errored or at least linted
190 ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {}
191 ErrorHandled::TooGeneric => {
192 span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
198 // We leave the IR in some half-built state here, and rely on this code not even being
199 // submitted to LLVM once an error was raised.
203 let memory_locals = analyze::non_ssa_locals(&fx);
205 // Allocate variable and temp allocas
207 let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
209 let mut allocate_local = |local| {
210 let decl = &mir.local_decls[local];
211 let layout = bx.layout_of(fx.monomorphize(decl.ty));
212 assert!(!layout.ty.has_erasable_regions(cx.tcx()));
214 if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
215 debug!("alloc: {:?} (return place) -> place", local);
216 let llretptr = bx.get_param(0);
217 return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
220 if memory_locals.contains(local) {
221 debug!("alloc: {:?} -> place", local);
222 if layout.is_unsized() {
223 LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
225 LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
228 debug!("alloc: {:?} -> operand", local);
229 LocalRef::new_operand(&mut bx, layout)
233 let retptr = allocate_local(mir::RETURN_PLACE);
235 .chain(args.into_iter())
236 .chain(mir.vars_and_temps_iter().map(allocate_local))
240 // Apply debuginfo to the newly allocated locals.
241 fx.debug_introduce_locals(&mut bx);
243 // Codegen the body of each block using reverse postorder
244 // FIXME(eddyb) reuse RPO iterator between `analysis` and this.
245 for (bb, _) in traversal::reverse_postorder(&mir) {
246 fx.codegen_block(bb);
249 // For backends that support CFI using type membership (i.e., testing whether a given pointer
250 // is associated with a type identifier).
251 if cx.tcx().sess.is_sanitizer_cfi_enabled() {
252 let typeid = typeid_for_fnabi(cx.tcx(), fn_abi);
253 bx.type_metadata(llfn, typeid.clone());
257 /// Produces, for each argument, a `Value` pointing at the
258 /// argument's value. As arguments are places, these are always
260 fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
262 fx: &mut FunctionCx<'a, 'tcx, Bx>,
263 memory_locals: &BitSet<mir::Local>,
264 ) -> Vec<LocalRef<'tcx, Bx::Value>> {
267 let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
269 let mut num_untupled = None;
274 .map(|(arg_index, local)| {
275 let arg_decl = &mir.local_decls[local];
277 if Some(local) == mir.spread_arg {
278 // This argument (e.g., the last argument in the "rust-call" ABI)
279 // is a tuple that was spread at the ABI level and now we have
280 // to reconstruct it into a tuple local variable, from multiple
281 // individual LLVM function arguments.
283 let arg_ty = fx.monomorphize(arg_decl.ty);
284 let tupled_arg_tys = match arg_ty.kind() {
285 ty::Tuple(tys) => tys,
286 _ => bug!("spread argument isn't a tuple?!"),
289 let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
290 for i in 0..tupled_arg_tys.len() {
291 let arg = &fx.fn_abi.args[idx];
293 if arg.pad.is_some() {
296 let pr_field = place.project_field(bx, i);
297 bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
301 num_untupled.replace(tupled_arg_tys.len()),
302 "Replaced existing num_tupled"
305 return LocalRef::Place(place);
308 if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
309 let arg_ty = fx.monomorphize(arg_decl.ty);
311 let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
312 bx.va_start(va_list.llval);
314 return LocalRef::Place(va_list);
317 let arg = &fx.fn_abi.args[idx];
319 if arg.pad.is_some() {
323 if !memory_locals.contains(local) {
324 // We don't have to cast or keep the argument in the alloca.
325 // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
326 // of putting everything in allocas just so we can use llvm.dbg.declare.
327 let local = |op| LocalRef::Operand(Some(op));
329 PassMode::Ignore => {
330 return local(OperandRef::new_zst(bx, arg.layout));
332 PassMode::Direct(_) => {
333 let llarg = bx.get_param(llarg_idx);
335 return local(OperandRef::from_immediate_or_packed_pair(
336 bx, llarg, arg.layout,
339 PassMode::Pair(..) => {
340 let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
343 return local(OperandRef {
344 val: OperandValue::Pair(a, b),
352 if arg.is_sized_indirect() {
353 // Don't copy an indirect argument to an alloca, the caller
354 // already put it in a temporary alloca and gave it up.
356 let llarg = bx.get_param(llarg_idx);
358 LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
359 } else if arg.is_unsized_indirect() {
360 // As the storage for the indirect argument lives during
361 // the whole function call, we just copy the fat pointer.
362 let llarg = bx.get_param(llarg_idx);
364 let llextra = bx.get_param(llarg_idx);
366 let indirect_operand = OperandValue::Pair(llarg, llextra);
368 let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
369 indirect_operand.store(bx, tmp);
370 LocalRef::UnsizedPlace(tmp)
372 let tmp = PlaceRef::alloca(bx, arg.layout);
373 bx.store_fn_arg(arg, &mut llarg_idx, tmp);
377 .collect::<Vec<_>>();
379 if fx.instance.def.requires_caller_location(bx.tcx()) {
380 let mir_args = if let Some(num_untupled) = num_untupled {
381 // Subtract off the tupled argument that gets 'expanded'
382 args.len() - 1 + num_untupled
387 fx.fn_abi.args.len(),
389 "#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR",
393 let arg = fx.fn_abi.args.last().unwrap();
395 PassMode::Direct(_) => (),
396 _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
399 fx.caller_location = Some(OperandRef {
400 val: OperandValue::Immediate(bx.get_param(llarg_idx)),
411 pub mod coverageinfo;