1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 //! Translate the completed AST to the LLVM IR.
12 //! Some functions here, such as trans_block and trans_expr, return a value --
13 //! the result of the translation to LLVM -- while others, such as trans_fn,
14 //! trans_impl, and trans_item, are called only for the side effect of adding a
15 //! particular definition to the LLVM IR output we're producing.
17 //! Hopefully useful general knowledge about trans:
19 //! * There's no way to find out the Ty type of a ValueRef. Doing so
20 //! would be "trying to get the eggs out of an omelette" (credit:
21 //! pcwalton). You can, instead, find out its TypeRef by calling val_ty,
22 //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
23 //! int) and rec(x=int, y=int, z=int) will have the same TypeRef.
25 #![allow(non_camel_case_types)]
27 pub use self::ValueOrigin::*;
29 use super::CrateTranslation;
30 use super::ModuleTranslation;
32 use back::link::mangle_exported_name;
33 use back::{link, abi};
35 use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
37 use metadata::{csearch, encoder, loader};
38 use middle::astencode;
40 use middle::def_id::DefId;
41 use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
42 use middle::weak_lang_items;
43 use middle::pat_util::simple_name;
44 use middle::subst::Substs;
45 use middle::ty::{self, Ty, HasTypeFlags};
46 use rustc::front::map as hir_map;
47 use session::config::{self, NoDebugInfo, FullDebugInfo};
51 use trans::attributes;
53 use trans::builder::{Builder, noname};
55 use trans::cleanup::{self, CleanupMethods, DropHint};
57 use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral};
58 use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
59 use trans::common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
60 use trans::common::{Result, NodeIdAndSpan, VariantInfo};
61 use trans::common::{node_id_type, return_type_is_void};
62 use trans::common::{type_is_immediate, type_is_zero_size, val_ty};
65 use trans::context::SharedCrateContext;
66 use trans::controlflow;
68 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
75 use trans::machine::{llsize_of, llsize_of_real};
77 use trans::monomorphize;
79 use trans::type_::Type;
81 use trans::type_of::*;
82 use trans::value::Value;
83 use util::common::indenter;
84 use util::sha2::Sha256;
85 use util::nodemap::{NodeMap, NodeSet};
87 use arena::TypedArena;
89 use std::ffi::{CStr, CString};
90 use std::cell::{Cell, RefCell};
91 use std::collections::{HashMap, HashSet};
93 use std::{i8, i16, i32, i64};
94 use syntax::abi::{Rust, RustCall, RustIntrinsic, PlatformIntrinsic, Abi};
95 use syntax::codemap::Span;
96 use syntax::parse::token::InternedString;
97 use syntax::attr::AttrMetaMethods;
100 use rustc_front::visit::Visitor;
101 use rustc_front::visit;
102 use rustc_front::hir;
106 static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
111 pub fn with_insn_ctxt<F>(blk: F) where
112 F: FnOnce(&[&'static str]),
114 TASK_LOCAL_INSN_KEY.with(move |slot| {
115 slot.borrow().as_ref().map(move |s| blk(s));
119 pub fn init_insn_ctxt() {
120 TASK_LOCAL_INSN_KEY.with(|slot| {
121 *slot.borrow_mut() = Some(Vec::new());
125 pub struct _InsnCtxt {
126 _cannot_construct_outside_of_this_module: ()
129 impl Drop for _InsnCtxt {
131 TASK_LOCAL_INSN_KEY.with(|slot| {
132 match slot.borrow_mut().as_mut() {
133 Some(ctx) => { ctx.pop(); }
140 pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
141 debug!("new InsnCtxt: {}", s);
142 TASK_LOCAL_INSN_KEY.with(|slot| {
143 match slot.borrow_mut().as_mut() {
144 Some(ctx) => ctx.push(s),
148 _InsnCtxt { _cannot_construct_outside_of_this_module: () }
151 pub struct StatRecorder<'a, 'tcx: 'a> {
152 ccx: &'a CrateContext<'a, 'tcx>,
153 name: Option<String>,
157 impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
158 pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String)
159 -> StatRecorder<'a, 'tcx> {
160 let istart = ccx.stats().n_llvm_insns.get();
169 impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
171 if self.ccx.sess().trans_stats() {
172 let iend = self.ccx.stats().n_llvm_insns.get();
173 self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
174 iend - self.istart));
175 self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
176 // Reset LLVM insn count to avoid compound costs.
177 self.ccx.stats().n_llvm_insns.set(self.istart);
182 fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_ty: Ty<'tcx>,
183 name: &str, did: DefId) -> ValueRef {
184 match ccx.externs().borrow().get(name) {
185 Some(n) => return *n,
189 let f = declare::declare_rust_fn(ccx, name, fn_ty);
191 let attrs = csearch::get_item_attrs(&ccx.sess().cstore, did);
192 attributes::from_fn_attrs(ccx, &attrs[..], f);
194 ccx.externs().borrow_mut().insert(name.to_string(), f);
198 pub fn self_type_for_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
203 let closure_kind = ccx.tcx().closure_kind(closure_id);
205 ty::FnClosureKind => {
206 ccx.tcx().mk_imm_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty)
208 ty::FnMutClosureKind => {
209 ccx.tcx().mk_mut_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty)
211 ty::FnOnceClosureKind => fn_ty
215 pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind {
216 *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
219 pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: DefId,
220 t: Ty<'tcx>) -> ValueRef {
221 let name = csearch::get_symbol(&ccx.sess().cstore, did);
222 let ty = type_of(ccx, t);
223 match ccx.externs().borrow_mut().get(&name) {
224 Some(n) => return *n,
227 // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
228 // FIXME(nagisa): investigate whether it can be changed into define_global
229 let c = declare::declare_global(ccx, &name[..], ty);
230 // Thread-local statics in some other crate need to *always* be linked
231 // against in a thread-local fashion, so we need to be sure to apply the
232 // thread-local attribute locally if it was present remotely. If we
233 // don't do this then linker errors can be generated where the linker
234 // complains that one object files has a thread local version of the
235 // symbol and another one doesn't.
236 for attr in ccx.tcx().get_attrs(did).iter() {
237 if attr.check_name("thread_local") {
238 llvm::set_thread_local(c, true);
241 if ccx.use_dll_storage_attrs() {
242 llvm::SetDLLStorageClass(c, llvm::DLLImportStorageClass);
244 ccx.externs().borrow_mut().insert(name.to_string(), c);
248 fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
249 info_ty: Ty<'tcx>, it: LangItem) -> DefId {
250 match bcx.tcx().lang_items.require(it) {
253 bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s));
258 // The following malloc_raw_dyn* functions allocate a box to contain
259 // a given type, but with a potentially dynamic size.
261 pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
267 -> Result<'blk, 'tcx> {
268 let _icx = push_ctxt("malloc_raw_exchange");
271 let r = callee::trans_lang_call(bcx,
272 require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem),
277 Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
281 pub fn bin_op_to_icmp_predicate(ccx: &CrateContext, op: hir::BinOp_, signed: bool)
282 -> llvm::IntPredicate {
284 hir::BiEq => llvm::IntEQ,
285 hir::BiNe => llvm::IntNE,
286 hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
287 hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
288 hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
289 hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
291 ccx.sess().bug(&format!("comparison_op_to_icmp_predicate: expected \
292 comparison operator, found {:?}", op));
297 pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: hir::BinOp_)
298 -> llvm::RealPredicate {
300 hir::BiEq => llvm::RealOEQ,
301 hir::BiNe => llvm::RealUNE,
302 hir::BiLt => llvm::RealOLT,
303 hir::BiLe => llvm::RealOLE,
304 hir::BiGt => llvm::RealOGT,
305 hir::BiGe => llvm::RealOGE,
307 ccx.sess().bug(&format!("comparison_op_to_fcmp_predicate: expected \
308 comparison operator, found {:?}", op));
313 pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
321 ty::TyTuple(ref tys) if tys.is_empty() => {
322 // We don't need to do actual comparisons for nil.
323 // () == () holds but () < () does not.
325 hir::BiEq | hir::BiLe | hir::BiGe => return C_bool(bcx.ccx(), true),
326 hir::BiNe | hir::BiLt | hir::BiGt => return C_bool(bcx.ccx(), false),
327 // refinements would be nice
328 _ => bcx.sess().bug("compare_scalar_types: must be a comparison operator")
331 ty::TyBareFn(..) | ty::TyBool | ty::TyUint(_) | ty::TyChar => {
332 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
334 ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
335 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
338 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, true), lhs, rhs, debug_loc)
341 FCmp(bcx, bin_op_to_fcmp_predicate(bcx.ccx(), op), lhs, rhs, debug_loc)
343 // Should never get here, because t is scalar.
344 _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types")
348 pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
356 let signed = match t.sty {
358 let cmp = bin_op_to_fcmp_predicate(bcx.ccx(), op);
359 return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty);
361 ty::TyUint(_) => false,
362 ty::TyInt(_) => true,
363 _ => bcx.sess().bug("compare_simd_types: invalid SIMD type"),
366 let cmp = bin_op_to_icmp_predicate(bcx.ccx(), op, signed);
367 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
368 // to get the correctly sized type. This will compile to a single instruction
369 // once the IR is converted to assembly if the SIMD instruction is supported
370 // by the target architecture.
371 SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
374 // Iterates through the elements of a structural type.
375 pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
379 -> Block<'blk, 'tcx> where
380 F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
382 let _icx = push_ctxt("iter_structural_ty");
384 fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
385 repr: &adt::Repr<'tcx>,
387 variant: ty::VariantDef<'tcx>,
388 substs: &Substs<'tcx>,
390 -> Block<'blk, 'tcx> where
391 F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
393 let _icx = push_ctxt("iter_variant");
397 for (i, field) in variant.fields.iter().enumerate() {
398 let arg = monomorphize::field_ty(tcx, substs, field);
399 cx = f(cx, adt::trans_field_ptr(cx, repr, av, variant.disr_val, i), arg);
404 let (data_ptr, info) = if common::type_is_sized(cx.tcx(), t) {
407 let data = expr::get_dataptr(cx, av);
408 let info = expr::get_meta(cx, av);
409 (Load(cx, data), Some(Load(cx, info)))
414 ty::TyStruct(..) => {
415 let repr = adt::represent_type(cx.ccx(), t);
416 let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
417 for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
418 let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
420 let val = if common::type_is_sized(cx.tcx(), field_ty) {
423 let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
424 Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
425 Store(cx, info.unwrap(), expr::get_meta(cx, scratch.val));
428 cx = f(cx, val, field_ty);
431 ty::TyClosure(_, ref substs) => {
432 let repr = adt::represent_type(cx.ccx(), t);
433 for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
434 let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
435 cx = f(cx, llupvar, upvar_ty);
438 ty::TyArray(_, n) => {
439 let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
440 let unit_ty = t.sequence_element_type(cx.tcx());
441 cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
443 ty::TySlice(_) | ty::TyStr => {
444 let unit_ty = t.sequence_element_type(cx.tcx());
445 cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f);
447 ty::TyTuple(ref args) => {
448 let repr = adt::represent_type(cx.ccx(), t);
449 for (i, arg) in args.iter().enumerate() {
450 let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
451 cx = f(cx, llfld_a, *arg);
454 ty::TyEnum(en, substs) => {
458 let repr = adt::represent_type(ccx, t);
459 let n_variants = en.variants.len();
461 // NB: we must hit the discriminant first so that structural
462 // comparison know not to proceed when the discriminants differ.
464 match adt::trans_switch(cx, &*repr, av) {
465 (_match::Single, None) => {
467 assert!(n_variants == 1);
468 cx = iter_variant(cx, &*repr, av, &en.variants[0],
472 (_match::Switch, Some(lldiscrim_a)) => {
473 cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
475 // Create a fall-through basic block for the "else" case of
476 // the switch instruction we're about to generate. Note that
477 // we do **not** use an Unreachable instruction here, even
478 // though most of the time this basic block will never be hit.
480 // When an enum is dropped it's contents are currently
481 // overwritten to DTOR_DONE, which means the discriminant
482 // could have changed value to something not within the actual
483 // range of the discriminant. Currently this function is only
484 // used for drop glue so in this case we just return quickly
485 // from the outer function, and any other use case will only
486 // call this for an already-valid enum in which case the `ret
487 // void` will never be hit.
488 let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
489 RetVoid(ret_void_cx, DebugLoc::None);
490 let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb,
492 let next_cx = fcx.new_temp_block("enum-iter-next");
494 for variant in &en.variants {
497 &format!("enum-iter-variant-{}",
498 &variant.disr_val.to_string())
500 match adt::trans_case(cx, &*repr, variant.disr_val) {
501 _match::SingleResult(r) => {
502 AddCase(llswitch, r.val, variant_cx.llbb)
504 _ => ccx.sess().unimpl("value from adt::trans_case \
505 in iter_structural_ty")
508 iter_variant(variant_cx,
514 Br(variant_cx, next_cx.llbb, DebugLoc::None);
518 _ => ccx.sess().unimpl("value from adt::trans_switch \
519 in iter_structural_ty")
523 cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
529 pub fn cast_shift_expr_rhs(cx: Block,
534 cast_shift_rhs(op, lhs, rhs,
535 |a,b| Trunc(cx, a, b),
536 |a,b| ZExt(cx, a, b))
539 pub fn cast_shift_const_rhs(op: hir::BinOp_,
540 lhs: ValueRef, rhs: ValueRef) -> ValueRef {
541 cast_shift_rhs(op, lhs, rhs,
542 |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
543 |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
546 fn cast_shift_rhs<F, G>(op: hir::BinOp_,
552 F: FnOnce(ValueRef, Type) -> ValueRef,
553 G: FnOnce(ValueRef, Type) -> ValueRef,
555 // Shifts may have any size int on the rhs
556 if rustc_front::util::is_shift_binop(op) {
557 let mut rhs_llty = val_ty(rhs);
558 let mut lhs_llty = val_ty(lhs);
559 if rhs_llty.kind() == Vector { rhs_llty = rhs_llty.element_type() }
560 if lhs_llty.kind() == Vector { lhs_llty = lhs_llty.element_type() }
561 let rhs_sz = rhs_llty.int_width();
562 let lhs_sz = lhs_llty.int_width();
565 } else if lhs_sz > rhs_sz {
566 // FIXME (#1877: If shifting by negative
567 // values becomes not undefined then this is wrong.
577 pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
578 val_t: Ty<'tcx>) -> (Type, u64) {
581 let llty = Type::int_from_ty(cx.ccx(), t);
583 ast::TyIs if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
584 ast::TyIs => i64::MIN as u64,
585 ast::TyI8 => i8::MIN as u64,
586 ast::TyI16 => i16::MIN as u64,
587 ast::TyI32 => i32::MIN as u64,
588 ast::TyI64 => i64::MIN as u64,
596 pub fn fail_if_zero_or_overflows<'blk, 'tcx>(
597 cx: Block<'blk, 'tcx>,
598 call_info: NodeIdAndSpan,
603 -> Block<'blk, 'tcx> {
604 let (zero_text, overflow_text) = if divrem.node == hir::BiDiv {
605 ("attempted to divide by zero",
606 "attempted to divide with overflow")
608 ("attempted remainder with a divisor of zero",
609 "attempted remainder with overflow")
611 let debug_loc = call_info.debug_loc();
613 let (is_zero, is_signed) = match rhs_t.sty {
615 let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
616 (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
619 let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
620 (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
622 ty::TyStruct(def, _) if def.is_simd() => {
623 let mut res = C_bool(cx.ccx(), false);
624 for i in 0 .. rhs_t.simd_size(cx.tcx()) {
627 ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), debug_loc);
632 cx.sess().bug(&format!("fail-if-zero on unexpected type: {}", rhs_t));
635 let bcx = with_cond(cx, is_zero, |bcx| {
636 controlflow::trans_fail(bcx, call_info, InternedString::new(zero_text))
639 // To quote LLVM's documentation for the sdiv instruction:
641 // Division by zero leads to undefined behavior. Overflow also leads
642 // to undefined behavior; this is a rare case, but can occur, for
643 // example, by doing a 32-bit division of -2147483648 by -1.
645 // In order to avoid undefined behavior, we perform runtime checks for
646 // signed division/remainder which would trigger overflow. For unsigned
647 // integers, no action beyond checking for zero need be taken.
649 let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
650 let minus_one = ICmp(bcx, llvm::IntEQ, rhs,
651 C_integral(llty, !0, false), debug_loc);
652 with_cond(bcx, minus_one, |bcx| {
653 let is_min = ICmp(bcx, llvm::IntEQ, lhs,
654 C_integral(llty, min, true), debug_loc);
655 with_cond(bcx, is_min, |bcx| {
656 controlflow::trans_fail(bcx,
658 InternedString::new(overflow_text))
666 pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
667 did: DefId, t: Ty<'tcx>) -> ValueRef {
668 let name = csearch::get_symbol(&ccx.sess().cstore, did);
670 ty::TyBareFn(_, ref fn_ty) => {
671 match ccx.sess().target.target.adjust_abi(fn_ty.abi) {
673 get_extern_rust_fn(ccx, t, &name[..], did)
675 RustIntrinsic | PlatformIntrinsic => {
676 ccx.sess().bug("unexpected intrinsic in trans_external_path")
679 let attrs = csearch::get_item_attrs(&ccx.sess().cstore, did);
680 foreign::register_foreign_item_fn(ccx, fn_ty.abi, t, &name, &attrs)
685 get_extern_const(ccx, did, t)
690 pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
695 -> (ValueRef, Block<'blk, 'tcx>) {
696 let _icx = push_ctxt("invoke_");
697 if bcx.unreachable.get() {
698 return (C_null(Type::i8(bcx.ccx())), bcx);
701 let attributes = attributes::from_fn_type(bcx.ccx(), fn_ty);
703 match bcx.opt_node_id {
705 debug!("invoke at ???");
708 debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
712 if need_invoke(bcx) {
713 debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
714 for &llarg in llargs {
715 debug!("arg: {}", bcx.val_to_string(llarg));
717 let normal_bcx = bcx.fcx.new_temp_block("normal-return");
718 let landing_pad = bcx.fcx.get_landing_pad();
720 let llresult = Invoke(bcx,
727 return (llresult, normal_bcx);
729 debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
730 for &llarg in llargs {
731 debug!("arg: {}", bcx.val_to_string(llarg));
734 let llresult = Call(bcx,
739 return (llresult, bcx);
743 /// Returns whether this session's target will use SEH-based unwinding.
745 /// This is only true for MSVC targets, and even then the 64-bit MSVC target
746 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
747 /// 64-bit MinGW) instead of "full SEH".
748 pub fn wants_msvc_seh(sess: &Session) -> bool {
749 sess.target.target.options.is_like_msvc && sess.target.target.arch == "x86"
752 pub fn need_invoke(bcx: Block) -> bool {
753 // FIXME(#25869) currently SEH-based unwinding is pretty buggy in LLVM and
754 // is being overhauled as this is being written. Until that
755 // time such that upstream LLVM's implementation is more solid
756 // and we start binding it we need to skip invokes for any
757 // target which wants SEH-based unwinding.
758 if bcx.sess().no_landing_pads() || wants_msvc_seh(bcx.sess()) {
762 // Avoid using invoke if we are already inside a landing pad.
767 bcx.fcx.needs_invoke()
770 pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
771 v: ValueRef, t: Ty<'tcx>) -> ValueRef {
772 let _icx = push_ctxt("load_if_immediate");
773 if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); }
777 /// Helper for loading values from memory. Does the necessary conversion if the in-memory type
778 /// differs from the type used for SSA values. Also handles various special cases where the type
779 /// gives us better information about what we are loading.
780 pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
781 ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
782 if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) {
783 return C_undef(type_of::type_of(cx.ccx(), t));
786 let ptr = to_arg_ty_ptr(cx, ptr, t);
787 let align = type_of::align_of(cx.ccx(), t);
789 if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
790 let load = Load(cx, ptr);
792 llvm::LLVMSetAlignment(load, align);
798 let global = llvm::LLVMIsAGlobalVariable(ptr);
799 if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
800 let val = llvm::LLVMGetInitializer(global);
802 return to_arg_ty(cx, val, t);
807 let val = if t.is_bool() {
808 LoadRangeAssert(cx, ptr, 0, 2, llvm::False)
809 } else if t.is_char() {
810 // a char is a Unicode codepoint, and so takes values from 0
811 // to 0x10FFFF inclusive only.
812 LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
813 } else if (t.is_region_ptr() || t.is_unique())
814 && !common::type_is_fat_ptr(cx.tcx(), t) {
821 llvm::LLVMSetAlignment(val, align);
824 to_arg_ty(cx, val, t)
827 /// Helper for storing values in memory. Does the necessary conversion if the in-memory type
828 /// differs from the type used for SSA values.
829 pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
830 if cx.unreachable.get() {
834 if common::type_is_fat_ptr(cx.tcx(), t) {
835 Store(cx, ExtractValue(cx, v, abi::FAT_PTR_ADDR), expr::get_dataptr(cx, dst));
836 Store(cx, ExtractValue(cx, v, abi::FAT_PTR_EXTRA), expr::get_meta(cx, dst));
838 let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
840 llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t));
845 pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
847 ZExt(bcx, val, Type::i8(bcx.ccx()))
853 pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
855 Trunc(bcx, val, Type::i1(bcx.ccx()))
861 pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef {
862 if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() {
863 // We want to pass small aggregates as immediate values, but using an aggregate LLVM type
864 // for this leads to bad optimizations, so its arg type is an appropriately sized integer
865 // and we have to convert it
866 BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to())
872 pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local)
873 -> Block<'blk, 'tcx> {
874 debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
875 let _indenter = indenter();
876 let _icx = push_ctxt("init_local");
877 _match::store_local(bcx, local)
880 pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
883 -> Block<'blk, 'tcx> {
884 common::BlockS::new(llbb, is_lpad, None, fcx)
887 pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
890 -> Block<'blk, 'tcx> where
891 F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>,
893 let _icx = push_ctxt("with_cond");
895 if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) {
900 let next_cx = fcx.new_temp_block("next");
901 let cond_cx = fcx.new_temp_block("cond");
902 CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
903 let after_cx = f(cond_cx);
904 if !after_cx.terminated.get() {
905 Br(after_cx, next_cx.llbb, DebugLoc::None);
910 pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
911 if cx.sess().opts.optimize == config::No {
915 let _icx = push_ctxt("lifetime_start");
918 let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type());
923 let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
924 let lifetime_start = ccx.get_intrinsic(&"llvm.lifetime.start");
925 Call(cx, lifetime_start, &[C_u64(ccx, size), ptr], None, DebugLoc::None);
928 pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
929 if cx.sess().opts.optimize == config::No {
933 let _icx = push_ctxt("lifetime_end");
936 let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type());
941 let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
942 let lifetime_end = ccx.get_intrinsic(&"llvm.lifetime.end");
943 Call(cx, lifetime_end, &[C_u64(ccx, size), ptr], None, DebugLoc::None);
946 // Generates code for resumption of unwind at the end of a landing pad.
947 pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) {
948 if !bcx.sess().target.target.options.custom_unwind_resume {
951 let exc_ptr = ExtractValue(bcx, lpval, 0);
952 let llunwresume = bcx.fcx.eh_unwind_resume();
953 Call(bcx, llunwresume, &[exc_ptr], None, DebugLoc::None);
959 pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
960 let _icx = push_ctxt("call_memcpy");
962 let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
963 let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
964 let memcpy = ccx.get_intrinsic(&key);
965 let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
966 let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
967 let size = IntCast(cx, n_bytes, ccx.int_type());
968 let align = C_i32(ccx, align as i32);
969 let volatile = C_bool(ccx, false);
970 Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None, DebugLoc::None);
973 pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
974 dst: ValueRef, src: ValueRef,
976 let _icx = push_ctxt("memcpy_ty");
979 if type_is_zero_size(ccx, t) {
983 if t.is_structural() {
984 let llty = type_of::type_of(ccx, t);
985 let llsz = llsize_of(ccx, llty);
986 let llalign = type_of::align_of(ccx, t);
987 call_memcpy(bcx, dst, src, llsz, llalign as u32);
989 store_ty(bcx, load_ty(bcx, src, t), dst, t);
993 pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
994 if cx.unreachable.get() { return; }
995 let _icx = push_ctxt("drop_done_fill_mem");
997 memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
1000 pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
1001 if cx.unreachable.get() { return; }
1002 let _icx = push_ctxt("init_zero_mem");
1004 memfill(&B(bcx), llptr, t, 0);
1007 // Always use this function instead of storing a constant byte to the memory
1008 // in question. e.g. if you store a zero constant, LLVM will drown in vreg
1009 // allocation for large data structures, and the generated code will be
1010 // awful. (A telltale sign of this is large quantities of
1011 // `mov [byte ptr foo],0` in the generated code.)
1012 fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
1013 let _icx = push_ctxt("memfill");
1016 let llty = type_of::type_of(ccx, ty);
1017 let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
1018 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
1020 let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
1021 let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
1022 let llzeroval = C_u8(ccx, byte);
1023 let size = machine::llsize_of(ccx, llty);
1024 let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
1025 let volatile = C_bool(ccx, false);
1026 b.call(llintrinsicfn, &[llptr, llzeroval, size, align, volatile], None);
1029 pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> ValueRef {
1030 let _icx = push_ctxt("alloc_ty");
1031 let ccx = bcx.ccx();
1032 let ty = type_of::type_of(ccx, t);
1033 assert!(!t.has_param_types());
1034 alloca(bcx, ty, name)
1037 pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
1038 let _icx = push_ctxt("alloca");
1039 if cx.unreachable.get() {
1041 return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
1044 debuginfo::clear_source_location(cx.fcx);
1045 Alloca(cx, ty, name)
1048 pub fn set_value_name(val: ValueRef, name: &str) {
1050 let name = CString::new(name).unwrap();
1051 llvm::LLVMSetValueName(val, name.as_ptr());
1055 // Creates the alloca slot which holds the pointer to the slot for the final return value
1056 pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1057 output_type: Ty<'tcx>) -> ValueRef {
1058 let lloutputtype = type_of::type_of(fcx.ccx, output_type);
1060 // We create an alloca to hold a pointer of type `output_type`
1061 // which will hold the pointer to the right alloca which has the
1063 if fcx.needs_ret_allocas {
1064 // Let's create the stack slot
1065 let slot = AllocaFcx(fcx, lloutputtype.ptr_to(), "llretslotptr");
1067 // and if we're using an out pointer, then store that in our newly made slot
1068 if type_of::return_uses_outptr(fcx.ccx, output_type) {
1069 let outptr = get_param(fcx.llfn, 0);
1071 let b = fcx.ccx.builder();
1072 b.position_before(fcx.alloca_insert_pt.get().unwrap());
1073 b.store(outptr, slot);
1078 // But if there are no nested returns, we skip the indirection and have a single
1081 if type_of::return_uses_outptr(fcx.ccx, output_type) {
1082 get_param(fcx.llfn, 0)
1084 AllocaFcx(fcx, lloutputtype, "sret_slot")
1089 struct FindNestedReturn {
1093 impl FindNestedReturn {
1094 fn new() -> FindNestedReturn {
1095 FindNestedReturn { found: false }
1099 impl<'v> Visitor<'v> for FindNestedReturn {
1100 fn visit_expr(&mut self, e: &hir::Expr) {
1102 hir::ExprRet(..) => {
1105 _ => visit::walk_expr(self, e)
1110 fn build_cfg(tcx: &ty::ctxt, id: ast::NodeId) -> (ast::NodeId, Option<cfg::CFG>) {
1111 let blk = match tcx.map.find(id) {
1112 Some(hir_map::NodeItem(i)) => {
1114 hir::ItemFn(_, _, _, _, _, ref blk) => {
1117 _ => tcx.sess.bug("unexpected item variant in has_nested_returns")
1120 Some(hir_map::NodeTraitItem(trait_item)) => {
1121 match trait_item.node {
1122 hir::MethodTraitItem(_, Some(ref body)) => body,
1124 tcx.sess.bug("unexpected variant: trait item other than a \
1125 provided method in has_nested_returns")
1129 Some(hir_map::NodeImplItem(impl_item)) => {
1130 match impl_item.node {
1131 hir::MethodImplItem(_, ref body) => body,
1133 tcx.sess.bug("unexpected variant: non-method impl item in \
1134 has_nested_returns")
1138 Some(hir_map::NodeExpr(e)) => {
1140 hir::ExprClosure(_, _, ref blk) => blk,
1141 _ => tcx.sess.bug("unexpected expr variant in has_nested_returns")
1144 Some(hir_map::NodeVariant(..)) |
1145 Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
1148 None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
1150 _ => tcx.sess.bug(&format!("unexpected variant in has_nested_returns: {}",
1151 tcx.map.path_to_string(id)))
1154 (blk.id, Some(cfg::CFG::new(tcx, blk)))
1157 // Checks for the presence of "nested returns" in a function.
1158 // Nested returns are when the inner expression of a return expression
1159 // (the 'expr' in 'return expr') contains a return expression. Only cases
1160 // where the outer return is actually reachable are considered. Implicit
1161 // returns from the end of blocks are considered as well.
1163 // This check is needed to handle the case where the inner expression is
1164 // part of a larger expression that may have already partially-filled the
1165 // return slot alloca. This can cause errors related to clean-up due to
1166 // the clobbering of the existing value in the return slot.
1167 fn has_nested_returns(tcx: &ty::ctxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
1168 for index in cfg.graph.depth_traverse(cfg.entry) {
1169 let n = cfg.graph.node_data(index);
1170 match tcx.map.find(n.id()) {
1171 Some(hir_map::NodeExpr(ex)) => {
1172 if let hir::ExprRet(Some(ref ret_expr)) = ex.node {
1173 let mut visitor = FindNestedReturn::new();
1174 visit::walk_expr(&mut visitor, &**ret_expr);
1180 Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => {
1181 let mut visitor = FindNestedReturn::new();
1182 walk_list!(&mut visitor, visit_expr, &blk.expr);
1194 // NB: must keep 4 fns in sync:
1197 // - create_datums_for_fn_args.
1201 // Be warned! You must call `init_function` before doing anything with the
1202 // returned function context.
1203 pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
1207 output_type: ty::FnOutput<'tcx>,
1208 param_substs: &'tcx Substs<'tcx>,
1210 block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
1211 -> FunctionContext<'a, 'tcx> {
1212 common::validate_substs(param_substs);
1214 debug!("new_fn_ctxt(path={}, id={}, param_substs={:?})",
1218 ccx.tcx().map.path_to_string(id).to_string()
1222 let uses_outptr = match output_type {
1223 ty::FnConverging(output_type) => {
1224 let substd_output_type =
1225 monomorphize::apply_param_substs(ccx.tcx(), param_substs, &output_type);
1226 type_of::return_uses_outptr(ccx, substd_output_type)
1228 ty::FnDiverging => false
1230 let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl);
1231 let (blk_id, cfg) = build_cfg(ccx.tcx(), id);
1232 let nested_returns = if let Some(ref cfg) = cfg {
1233 has_nested_returns(ccx.tcx(), cfg, blk_id)
1238 let mut fcx = FunctionContext {
1241 llretslotptr: Cell::new(None),
1242 param_env: ccx.tcx().empty_parameter_environment(),
1243 alloca_insert_pt: Cell::new(None),
1244 llreturn: Cell::new(None),
1245 needs_ret_allocas: nested_returns,
1246 personality: Cell::new(None),
1247 caller_expects_out_pointer: uses_outptr,
1248 lllocals: RefCell::new(NodeMap()),
1249 llupvars: RefCell::new(NodeMap()),
1250 lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
1252 param_substs: param_substs,
1254 block_arena: block_arena,
1256 debug_context: debug_context,
1257 scopes: RefCell::new(Vec::new()),
1262 fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint))
1268 /// Performs setup on a newly created function, creating the entry scope block
1269 /// and allocating space for the return pointer.
1270 pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>,
1272 output: ty::FnOutput<'tcx>)
1273 -> Block<'a, 'tcx> {
1274 let entry_bcx = fcx.new_temp_block("entry-block");
1276 // Use a dummy instruction as the insertion point for all allocas.
1277 // This is later removed in FunctionContext::cleanup.
1278 fcx.alloca_insert_pt.set(Some(unsafe {
1279 Load(entry_bcx, C_null(Type::i8p(fcx.ccx)));
1280 llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
1283 if let ty::FnConverging(output_type) = output {
1284 // This shouldn't need to recompute the return type,
1285 // as new_fn_ctxt did it already.
1286 let substd_output_type = fcx.monomorphize(&output_type);
1287 if !return_type_is_void(fcx.ccx, substd_output_type) {
1288 // If the function returns nil/bot, there is no real return
1289 // value, so do not set `llretslotptr`.
1290 if !skip_retptr || fcx.caller_expects_out_pointer {
1291 // Otherwise, we normally allocate the llretslotptr, unless we
1292 // have been instructed to skip it for immediate return
1294 fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type)));
1299 // Create the drop-flag hints for every unfragmented path in the function.
1300 let tcx = fcx.ccx.tcx();
1301 let fn_did = tcx.map.local_def_id(fcx.id);
1302 let mut hints = fcx.lldropflag_hints.borrow_mut();
1303 let fragment_infos = tcx.fragment_infos.borrow();
1305 // Intern table for drop-flag hint datums.
1306 let mut seen = HashMap::new();
1308 if let Some(fragment_infos) = fragment_infos.get(&fn_did) {
1309 for &info in fragment_infos {
1311 let make_datum = |id| {
1312 let init_val = C_u8(fcx.ccx, adt::DTOR_NEEDED_HINT);
1313 let llname = &format!("dropflag_hint_{}", id);
1314 debug!("adding hint {}", llname);
1315 let ty = tcx.types.u8;
1316 let ptr = alloc_ty(entry_bcx, ty, llname);
1317 Store(entry_bcx, init_val, ptr);
1318 let flag = datum::Lvalue::new_dropflag_hint("base::init_function");
1319 datum::Datum::new(ptr, ty, flag)
1322 let (var, datum) = match info {
1323 ty::FragmentInfo::Moved { var, .. } |
1324 ty::FragmentInfo::Assigned { var, .. } => {
1325 let datum = seen.get(&var).cloned().unwrap_or_else(|| {
1326 let datum = make_datum(var);
1327 seen.insert(var, datum.clone());
1334 ty::FragmentInfo::Moved { move_expr: expr_id, .. } => {
1335 debug!("FragmentInfo::Moved insert drop hint for {}", expr_id);
1336 hints.insert(expr_id, DropHint::new(var, datum));
1338 ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => {
1339 debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id);
1340 hints.insert(expr_id, DropHint::new(var, datum));
1349 // NB: must keep 4 fns in sync:
1352 // - create_datums_for_fn_args.
1356 pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>)
1358 use trans::datum::{ByRef, ByValue};
1361 mode: if arg_is_indirect(cx.ccx, t) { ByRef } else { ByValue }
1365 // create_datums_for_fn_args: creates lvalue datums for each of the
1366 // incoming function arguments.
1367 pub fn create_datums_for_fn_args<'a, 'tcx>(mut bcx: Block<'a, 'tcx>,
1369 arg_tys: &[Ty<'tcx>],
1370 has_tupled_arg: bool,
1371 arg_scope: cleanup::CustomScopeIndex)
1372 -> Block<'a, 'tcx> {
1373 let _icx = push_ctxt("create_datums_for_fn_args");
1375 let arg_scope_id = cleanup::CustomScope(arg_scope);
1377 // Return an array wrapping the ValueRefs that we get from `get_param` for
1378 // each argument into datums.
1380 // For certain mode/type combinations, the raw llarg values are passed
1381 // by value. However, within the fn body itself, we want to always
1382 // have all locals and arguments be by-ref so that we can cancel the
1383 // cleanup and for better interaction with LLVM's debug info. So, if
1384 // the argument would be passed by value, we store it into an alloca.
1385 // This alloca should be optimized away by LLVM's mem-to-reg pass in
1386 // the event it's not truly needed.
1387 let mut idx = fcx.arg_offset() as c_uint;
1388 for (i, &arg_ty) in arg_tys.iter().enumerate() {
1389 let arg_datum = if !has_tupled_arg || i < arg_tys.len() - 1 {
1390 if type_of::arg_is_indirect(bcx.ccx(), arg_ty)
1391 && bcx.sess().opts.debuginfo != FullDebugInfo {
1392 // Don't copy an indirect argument to an alloca, the caller
1393 // already put it in a temporary alloca and gave it up, unless
1394 // we emit extra-debug-info, which requires local allocas :(.
1395 let llarg = get_param(fcx.llfn, idx);
1397 bcx.fcx.schedule_lifetime_end(arg_scope_id, llarg);
1398 bcx.fcx.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
1400 datum::Datum::new(llarg, arg_ty, datum::Lvalue::new("create_datum_for_fn_args"))
1401 } else if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
1402 let data = get_param(fcx.llfn, idx);
1403 let extra = get_param(fcx.llfn, idx + 1);
1405 unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
1406 arg_scope_id, (data, extra),
1407 |(data, extra), bcx, dst| {
1408 Store(bcx, data, expr::get_dataptr(bcx, dst));
1409 Store(bcx, extra, expr::get_meta(bcx, dst));
1413 let llarg = get_param(fcx.llfn, idx);
1415 let tmp = datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty));
1416 unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
1418 |tmp, bcx, dst| tmp.store_to(bcx, dst)))
1421 // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
1423 ty::TyTuple(ref tupled_arg_tys) => {
1425 datum::lvalue_scratch_datum(bcx,
1433 for (j, &tupled_arg_ty) in
1434 tupled_arg_tys.iter().enumerate() {
1435 let lldest = StructGEP(bcx, llval, j);
1436 if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) {
1437 let data = get_param(bcx.fcx.llfn, idx);
1438 let extra = get_param(bcx.fcx.llfn, idx + 1);
1439 Store(bcx, data, expr::get_dataptr(bcx, lldest));
1440 Store(bcx, extra, expr::get_meta(bcx, lldest));
1443 let datum = datum::Datum::new(
1444 get_param(bcx.fcx.llfn, idx),
1446 arg_kind(bcx.fcx, tupled_arg_ty));
1448 bcx = datum.store_to(bcx, lldest);
1455 bcx.tcx().sess.bug("last argument of a function with \
1456 `rust-call` ABI isn't a tuple?!")
1461 let pat = &*args[i].pat;
1462 bcx = if let Some(name) = simple_name(pat) {
1463 // Generate nicer LLVM for the common case of fn a pattern
1465 set_value_name(arg_datum.val, &bcx.name(name));
1466 bcx.fcx.lllocals.borrow_mut().insert(pat.id, arg_datum);
1469 // General path. Copy out the values that are used in the
1471 _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id)
1473 debuginfo::create_argument_metadata(bcx, &args[i]);
1479 // Ties up the llstaticallocas -> llloadenv -> lltop edges,
1480 // and builds the return block.
1481 pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
1482 last_bcx: Block<'blk, 'tcx>,
1483 retty: ty::FnOutput<'tcx>,
1484 ret_debug_loc: DebugLoc) {
1485 let _icx = push_ctxt("finish_fn");
1487 let ret_cx = match fcx.llreturn.get() {
1489 if !last_bcx.terminated.get() {
1490 Br(last_bcx, llreturn, DebugLoc::None);
1492 raw_block(fcx, false, llreturn)
1497 // This shouldn't need to recompute the return type,
1498 // as new_fn_ctxt did it already.
1499 let substd_retty = fcx.monomorphize(&retty);
1500 build_return_block(fcx, ret_cx, substd_retty, ret_debug_loc);
1502 debuginfo::clear_source_location(fcx);
1506 // Builds the return block for a function.
1507 pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
1508 ret_cx: Block<'blk, 'tcx>,
1509 retty: ty::FnOutput<'tcx>,
1510 ret_debug_location: DebugLoc) {
1511 if fcx.llretslotptr.get().is_none() ||
1512 (!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
1513 return RetVoid(ret_cx, ret_debug_location);
1516 let retslot = if fcx.needs_ret_allocas {
1517 Load(ret_cx, fcx.llretslotptr.get().unwrap())
1519 fcx.llretslotptr.get().unwrap()
1521 let retptr = Value(retslot);
1522 match retptr.get_dominating_store(ret_cx) {
1523 // If there's only a single store to the ret slot, we can directly return
1524 // the value that was stored and omit the store and the alloca
1526 let retval = s.get_operand(0).unwrap().get();
1527 s.erase_from_parent();
1529 if retptr.has_no_uses() {
1530 retptr.erase_from_parent();
1533 let retval = if retty == ty::FnConverging(fcx.ccx.tcx().types.bool) {
1534 Trunc(ret_cx, retval, Type::i1(fcx.ccx))
1539 if fcx.caller_expects_out_pointer {
1540 if let ty::FnConverging(retty) = retty {
1541 store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty);
1543 RetVoid(ret_cx, ret_debug_location)
1545 Ret(ret_cx, retval, ret_debug_location)
1548 // Otherwise, copy the return value to the ret slot
1549 None => match retty {
1550 ty::FnConverging(retty) => {
1551 if fcx.caller_expects_out_pointer {
1552 memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty);
1553 RetVoid(ret_cx, ret_debug_location)
1555 Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location)
1558 ty::FnDiverging => {
1559 if fcx.caller_expects_out_pointer {
1560 RetVoid(ret_cx, ret_debug_location)
1562 Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location)
1569 /// Builds an LLVM function out of a source function.
1571 /// If the function closes over its environment a closure will be returned.
1572 pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1576 param_substs: &'tcx Substs<'tcx>,
1577 fn_ast_id: ast::NodeId,
1578 _attributes: &[ast::Attribute],
1579 output_type: ty::FnOutput<'tcx>,
1581 closure_env: closure::ClosureEnv<'b>) {
1582 ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
1584 let _icx = push_ctxt("trans_closure");
1585 attributes::emit_uwtable(llfndecl, true);
1587 debug!("trans_closure(..., param_substs={:?})",
1590 let has_env = match closure_env {
1591 closure::ClosureEnv::Closure(..) => true,
1592 closure::ClosureEnv::NotClosure => false,
1595 let (arena, fcx): (TypedArena<_>, FunctionContext);
1596 arena = TypedArena::new();
1597 fcx = new_fn_ctxt(ccx,
1605 let mut bcx = init_function(&fcx, false, output_type);
1607 // cleanup scope for the incoming arguments
1608 let fn_cleanup_debug_loc =
1609 debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, fn_ast_id, body.span, true);
1610 let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
1612 let block_ty = node_id_type(bcx, body.id);
1614 // Set up arguments to the function.
1615 let monomorphized_arg_types =
1617 .map(|arg| node_id_type(bcx, arg.id))
1618 .collect::<Vec<_>>();
1619 for monomorphized_arg_type in &monomorphized_arg_types {
1620 debug!("trans_closure: monomorphized_arg_type: {:?}",
1621 monomorphized_arg_type);
1623 debug!("trans_closure: function lltype: {}",
1624 bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
1626 let has_tupled_arg = match closure_env {
1627 closure::ClosureEnv::NotClosure => abi == RustCall,
1631 bcx = create_datums_for_fn_args(bcx, &decl.inputs, &monomorphized_arg_types,
1632 has_tupled_arg, arg_scope);
1634 bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope));
1636 // Up until here, IR instructions for this function have explicitly not been annotated with
1637 // source code location, so we don't step into call setup code. From here on, source location
1638 // emitting should be enabled.
1639 debuginfo::start_emitting_source_locations(&fcx);
1641 let dest = match fcx.llretslotptr.get() {
1642 Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")),
1644 assert!(type_is_zero_size(bcx.ccx(), block_ty));
1649 // This call to trans_block is the place where we bridge between
1650 // translation calls that don't have a return value (trans_crate,
1651 // trans_mod, trans_item, et cetera) and those that do
1652 // (trans_block, trans_expr, et cetera).
1653 bcx = controlflow::trans_block(bcx, body, dest);
1656 expr::SaveIn(slot) if fcx.needs_ret_allocas => {
1657 Store(bcx, slot, fcx.llretslotptr.get().unwrap());
1662 match fcx.llreturn.get() {
1664 Br(bcx, fcx.return_exit_block(), DebugLoc::None);
1665 fcx.pop_custom_cleanup_scope(arg_scope);
1668 // Microoptimization writ large: avoid creating a separate
1669 // llreturn basic block
1670 bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
1674 // Put return block after all other blocks.
1675 // This somewhat improves single-stepping experience in debugger.
1677 let llreturn = fcx.llreturn.get();
1678 if let Some(llreturn) = llreturn {
1679 llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
1683 let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id,
1684 fn_cleanup_debug_loc.span);
1686 // Insert the mandatory first few basic blocks before lltop.
1687 finish_fn(&fcx, bcx, output_type, ret_debug_loc);
1690 /// Creates an LLVM function corresponding to a source language function.
1691 pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1695 param_substs: &'tcx Substs<'tcx>,
1697 attrs: &[ast::Attribute]) {
1698 let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
1699 debug!("trans_fn(param_substs={:?})", param_substs);
1700 let _icx = push_ctxt("trans_fn");
1701 let fn_ty = ccx.tcx().node_id_to_type(id);
1702 let output_type = ccx.tcx().erase_late_bound_regions(&fn_ty.fn_ret());
1703 let abi = fn_ty.fn_abi();
1704 trans_closure(ccx, decl, body, llfndecl, param_substs, id, attrs, output_type, abi,
1705 closure::ClosureEnv::NotClosure);
1708 pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1709 ctor_id: ast::NodeId,
1711 param_substs: &'tcx Substs<'tcx>,
1712 llfndecl: ValueRef) {
1713 let _icx = push_ctxt("trans_enum_variant");
1715 trans_enum_variant_or_tuple_like_struct(
1723 pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1726 args: callee::CallArgs,
1728 debug_loc: DebugLoc)
1729 -> Result<'blk, 'tcx> {
1731 let ccx = bcx.fcx.ccx;
1733 let result_ty = match ctor_ty.sty {
1734 ty::TyBareFn(_, ref bft) => {
1735 bcx.tcx().erase_late_bound_regions(&bft.sig.output()).unwrap()
1737 _ => ccx.sess().bug(
1738 &format!("trans_enum_variant_constructor: \
1739 unexpected ctor return type {}",
1743 // Get location to store the result. If the user does not care about
1744 // the result, just make a stack slot
1745 let llresult = match dest {
1746 expr::SaveIn(d) => d,
1748 if !type_is_zero_size(ccx, result_ty) {
1749 let llresult = alloc_ty(bcx, result_ty, "constructor_result");
1750 call_lifetime_start(bcx, llresult);
1753 C_undef(type_of::type_of(ccx, result_ty).ptr_to())
1758 if !type_is_zero_size(ccx, result_ty) {
1760 callee::ArgExprs(exprs) => {
1761 let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
1762 bcx = expr::trans_adt(bcx,
1767 expr::SaveIn(llresult),
1770 _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor")
1773 // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary
1774 // contents, there could be side-effects we need from them.
1776 callee::ArgExprs(exprs) => {
1778 bcx = expr::trans_into(bcx, expr, expr::Ignore);
1785 // If the caller doesn't care about the result
1786 // drop the temporary we made
1787 let bcx = match dest {
1788 expr::SaveIn(_) => bcx,
1790 let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
1791 if !type_is_zero_size(ccx, result_ty) {
1792 call_lifetime_end(bcx, llresult);
1798 Result::new(bcx, llresult)
1801 pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1802 ctor_id: ast::NodeId,
1803 param_substs: &'tcx Substs<'tcx>,
1804 llfndecl: ValueRef) {
1805 let _icx = push_ctxt("trans_tuple_struct");
1807 trans_enum_variant_or_tuple_like_struct(
1815 fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1816 ctor_id: ast::NodeId,
1818 param_substs: &'tcx Substs<'tcx>,
1819 llfndecl: ValueRef) {
1820 let ctor_ty = ccx.tcx().node_id_to_type(ctor_id);
1821 let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
1823 let result_ty = match ctor_ty.sty {
1824 ty::TyBareFn(_, ref bft) => {
1825 ccx.tcx().erase_late_bound_regions(&bft.sig.output())
1827 _ => ccx.sess().bug(
1828 &format!("trans_enum_variant_or_tuple_like_struct: \
1829 unexpected ctor return type {}",
1833 let (arena, fcx): (TypedArena<_>, FunctionContext);
1834 arena = TypedArena::new();
1835 fcx = new_fn_ctxt(ccx, llfndecl, ctor_id, false, result_ty,
1836 param_substs, None, &arena);
1837 let bcx = init_function(&fcx, false, result_ty);
1839 assert!(!fcx.needs_ret_allocas);
1841 let arg_tys = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_args());
1843 if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) {
1844 let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
1845 let repr = adt::represent_type(ccx, result_ty.unwrap());
1846 let mut llarg_idx = fcx.arg_offset() as c_uint;
1847 for (i, arg_ty) in arg_tys.into_iter().enumerate() {
1848 let lldestptr = adt::trans_field_ptr(bcx,
1853 if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
1854 Store(bcx, get_param(fcx.llfn, llarg_idx), expr::get_dataptr(bcx, lldestptr));
1855 Store(bcx, get_param(fcx.llfn, llarg_idx + 1), expr::get_meta(bcx, lldestptr));
1858 let arg = get_param(fcx.llfn, llarg_idx);
1861 if arg_is_indirect(ccx, arg_ty) {
1862 memcpy_ty(bcx, lldestptr, arg, arg_ty);
1864 store_ty(bcx, arg, lldestptr, arg_ty);
1868 adt::trans_set_discr(bcx, &*repr, dest, disr);
1871 finish_fn(&fcx, bcx, result_ty, DebugLoc::None);
1874 fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &hir::EnumDef, sp: Span, id: ast::NodeId) {
1875 let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
1877 let print_info = ccx.sess().print_enum_sizes();
1879 let levels = ccx.tcx().node_lint_levels.borrow();
1880 let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
1881 let lvlsrc = levels.get(&(id, lint_id));
1882 let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow);
1884 if is_allow && !print_info {
1885 // we're not interested in anything here
1889 let ty = ccx.tcx().node_id_to_type(id);
1890 let avar = adt::represent_type(ccx, ty);
1892 adt::General(_, ref variants, _) => {
1893 for var in variants {
1895 for field in var.fields.iter().skip(1) {
1896 // skip the discriminant
1897 size += llsize_of_real(ccx, sizing_type_of(ccx, *field));
1902 _ => { /* its size is either constant or unimportant */ }
1905 let (largest, slargest, largest_index) = sizes.iter().enumerate().fold((0, 0, 0),
1906 |(l, s, li), (idx, &size)|
1909 } else if size > s {
1917 let llty = type_of::sizing_type_of(ccx, ty);
1919 let sess = &ccx.tcx().sess;
1920 sess.span_note(sp, &*format!("total size: {} bytes", llsize_of_real(ccx, llty)));
1922 adt::General(..) => {
1923 for (i, var) in enum_def.variants.iter().enumerate() {
1924 ccx.tcx().sess.span_note(var.span,
1925 &*format!("variant data: {} bytes", sizes[i]));
1932 // we only warn if the largest variant is at least thrice as large as
1933 // the second-largest.
1934 if !is_allow && largest > slargest * 3 && slargest > 0 {
1935 // Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
1936 // pass for the latter already ran.
1937 lint::raw_emit_lint(&ccx.tcx().sess, lint::builtin::VARIANT_SIZE_DIFFERENCES,
1938 *lvlsrc.unwrap(), Some(sp),
1939 &format!("enum variant is more than three times larger \
1940 ({} bytes) than the next largest (ignoring padding)",
1943 ccx.sess().span_note(enum_def.variants[largest_index].span,
1944 "this variant is the largest");
1948 pub struct TransItemVisitor<'a, 'tcx: 'a> {
1949 pub ccx: &'a CrateContext<'a, 'tcx>,
1952 impl<'a, 'tcx, 'v> Visitor<'v> for TransItemVisitor<'a, 'tcx> {
1953 fn visit_item(&mut self, i: &hir::Item) {
1954 trans_item(self.ccx, i);
1958 pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
1959 // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
1960 // applicable to variable declarations and may not really make sense for
1961 // Rust code in the first place but whitelist them anyway and trust that
1962 // the user knows what s/he's doing. Who knows, unanticipated use cases
1963 // may pop up in the future.
1965 // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
1966 // and don't have to be, LLVM treats them as no-ops.
1968 "appending" => Some(llvm::AppendingLinkage),
1969 "available_externally" => Some(llvm::AvailableExternallyLinkage),
1970 "common" => Some(llvm::CommonLinkage),
1971 "extern_weak" => Some(llvm::ExternalWeakLinkage),
1972 "external" => Some(llvm::ExternalLinkage),
1973 "internal" => Some(llvm::InternalLinkage),
1974 "linkonce" => Some(llvm::LinkOnceAnyLinkage),
1975 "linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
1976 "private" => Some(llvm::PrivateLinkage),
1977 "weak" => Some(llvm::WeakAnyLinkage),
1978 "weak_odr" => Some(llvm::WeakODRLinkage),
1984 /// Enum describing the origin of an LLVM `Value`, for linkage purposes.
1985 #[derive(Copy, Clone)]
1986 pub enum ValueOrigin {
1987 /// The LLVM `Value` is in this context because the corresponding item was
1988 /// assigned to the current compilation unit.
1989 OriginalTranslation,
1990 /// The `Value`'s corresponding item was assigned to some other compilation
1991 /// unit, but the `Value` was translated in this context anyway because the
1992 /// item is marked `#[inline]`.
1996 /// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
1997 /// If the `llval` is the direct translation of a specific Rust item, `id`
1998 /// should be set to the `NodeId` of that item. (This mapping should be
1999 /// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
2000 /// `None`.) `llval_origin` indicates whether `llval` is the translation of an
2001 /// item assigned to `ccx`'s compilation unit or an inlined copy of an item
2002 /// assigned to a different compilation unit.
2003 pub fn update_linkage(ccx: &CrateContext,
2005 id: Option<ast::NodeId>,
2006 llval_origin: ValueOrigin) {
2007 match llval_origin {
2009 // `llval` is a translation of an item defined in a separate
2010 // compilation unit. This only makes sense if there are at least
2011 // two compilation units.
2012 assert!(ccx.sess().opts.cg.codegen_units > 1);
2013 // `llval` is a copy of something defined elsewhere, so use
2014 // `AvailableExternallyLinkage` to avoid duplicating code in the
2016 llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
2019 OriginalTranslation => {},
2022 if let Some(id) = id {
2023 let item = ccx.tcx().map.get(id);
2024 if let hir_map::NodeItem(i) = item {
2025 if let Some(name) = attr::first_attr_value_str_by_name(&i.attrs, "linkage") {
2026 if let Some(linkage) = llvm_linkage_by_name(&name) {
2027 llvm::SetLinkage(llval, linkage);
2029 ccx.sess().span_fatal(i.span, "invalid linkage specified");
2037 Some(id) if ccx.reachable().contains(&id) => {
2038 llvm::SetLinkage(llval, llvm::ExternalLinkage);
2041 // `id` does not refer to an item in `ccx.reachable`.
2042 if ccx.sess().opts.cg.codegen_units > 1 {
2043 llvm::SetLinkage(llval, llvm::ExternalLinkage);
2045 llvm::SetLinkage(llval, llvm::InternalLinkage);
2051 fn set_global_section(ccx: &CrateContext, llval: ValueRef, i: &hir::Item) {
2052 match attr::first_attr_value_str_by_name(&i.attrs,
2055 if contains_null(§) {
2056 ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`",
2060 let buf = CString::new(sect.as_bytes()).unwrap();
2061 llvm::LLVMSetSection(llval, buf.as_ptr());
2068 pub fn trans_item(ccx: &CrateContext, item: &hir::Item) {
2069 let _icx = push_ctxt("trans_item");
2071 let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
2074 hir::ItemFn(ref decl, _, _, abi, ref generics, ref body) => {
2075 if !generics.is_type_parameterized() {
2076 let trans_everywhere = attr::requests_inline(&item.attrs);
2077 // Ignore `trans_everywhere` for cross-crate inlined items
2078 // (`from_external`). `trans_item` will be called once for each
2079 // compilation unit that references the item, so it will still get
2080 // translated everywhere it's needed.
2081 for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
2082 let llfn = get_item_val(ccx, item.id);
2083 let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
2085 foreign::trans_rust_fn_with_foreign_abi(ccx, &**decl, &**body, &item.attrs,
2086 llfn, empty_substs, item.id, None);
2088 trans_fn(ccx, &**decl, &**body, llfn, empty_substs, item.id, &item.attrs);
2090 set_global_section(ccx, llfn, item);
2091 update_linkage(ccx, llfn, Some(item.id),
2092 if is_origin { OriginalTranslation } else { InlinedCopy });
2094 if is_entry_fn(ccx.sess(), item.id) {
2095 create_entry_wrapper(ccx, item.span, llfn);
2096 // check for the #[rustc_error] annotation, which forces an
2097 // error in trans. This is used to write compile-fail tests
2098 // that actually test that compilation succeeds without
2099 // reporting an error.
2100 let item_def_id = ccx.tcx().map.local_def_id(item.id);
2101 if ccx.tcx().has_attr(item_def_id, "rustc_error") {
2102 ccx.tcx().sess.span_fatal(item.span, "compilation successful");
2108 // Be sure to travel more than just one layer deep to catch nested
2109 // items in blocks and such.
2110 let mut v = TransItemVisitor{ ccx: ccx };
2111 v.visit_block(&**body);
2113 hir::ItemImpl(_, _, ref generics, _, _, ref impl_items) => {
2114 meth::trans_impl(ccx,
2120 hir::ItemMod(ref m) => {
2121 trans_mod(&ccx.rotate(), m);
2123 hir::ItemEnum(ref enum_definition, ref gens) => {
2124 if gens.ty_params.is_empty() {
2125 // sizes only make sense for non-generic types
2127 enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
2130 hir::ItemConst(_, ref expr) => {
2131 // Recurse on the expression to catch items in blocks
2132 let mut v = TransItemVisitor{ ccx: ccx };
2133 v.visit_expr(&**expr);
2135 hir::ItemStatic(_, m, ref expr) => {
2136 // Recurse on the expression to catch items in blocks
2137 let mut v = TransItemVisitor{ ccx: ccx };
2138 v.visit_expr(&**expr);
2140 let g = match consts::trans_static(ccx, m, expr, item.id, &item.attrs) {
2142 Err(err) => ccx.tcx().sess.span_fatal(expr.span, &err.description()),
2144 set_global_section(ccx, g, item);
2145 update_linkage(ccx, g, Some(item.id), OriginalTranslation);
2147 hir::ItemForeignMod(ref foreign_mod) => {
2148 foreign::trans_foreign_mod(ccx, foreign_mod);
2150 hir::ItemTrait(..) => {
2151 // Inside of this trait definition, we won't be actually translating any
2152 // functions, but the trait still needs to be walked. Otherwise default
2153 // methods with items will not get translated and will cause ICE's when
2154 // metadata time comes around.
2155 let mut v = TransItemVisitor{ ccx: ccx };
2156 visit::walk_item(&mut v, item);
2158 _ => {/* fall through */ }
2162 // Translate a module. Doing this amounts to translating the items in the
2163 // module; there ends up being no artifact (aside from linkage names) of
2164 // separate modules in the compiled program. That's because modules exist
2165 // only as a convenience for humans working with the code, to organize names
2166 // and control visibility.
2167 pub fn trans_mod(ccx: &CrateContext, m: &hir::Mod) {
2168 let _icx = push_ctxt("trans_mod");
2169 for item in &m.items {
2170 trans_item(ccx, &**item);
2175 // only use this for foreign function ABIs and glue, use `register_fn` for Rust functions
2176 pub fn register_fn_llvmty(ccx: &CrateContext,
2179 node_id: ast::NodeId,
2181 llfty: Type) -> ValueRef {
2182 debug!("register_fn_llvmty id={} sym={}", node_id, sym);
2184 let llfn = declare::define_fn(ccx, &sym[..], cc, llfty,
2185 ty::FnConverging(ccx.tcx().mk_nil())).unwrap_or_else(||{
2186 ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
2188 finish_register_fn(ccx, sym, node_id);
2192 fn finish_register_fn(ccx: &CrateContext, sym: String, node_id: ast::NodeId) {
2193 ccx.item_symbols().borrow_mut().insert(node_id, sym);
2196 fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
2199 node_id: ast::NodeId,
2200 node_type: Ty<'tcx>)
2202 if let ty::TyBareFn(_, ref f) = node_type.sty {
2203 if f.abi != Rust && f.abi != RustCall {
2204 ccx.sess().span_bug(sp, &format!("only the `{}` or `{}` calling conventions are valid \
2205 for this function; `{}` was specified",
2206 Rust.name(), RustCall.name(), f.abi.name()));
2209 ccx.sess().span_bug(sp, "expected bare rust function")
2212 let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(||{
2213 ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
2215 finish_register_fn(ccx, sym, node_id);
2219 pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool {
2220 match *sess.entry_fn.borrow() {
2221 Some((entry_id, _)) => node_id == entry_id,
2226 /// Create the `main` function which will initialise the rust runtime and call users’ main
2228 pub fn create_entry_wrapper(ccx: &CrateContext,
2230 main_llfn: ValueRef) {
2231 let et = ccx.sess().entry_type.get().unwrap();
2233 config::EntryMain => {
2234 create_entry_fn(ccx, sp, main_llfn, true);
2236 config::EntryStart => create_entry_fn(ccx, sp, main_llfn, false),
2237 config::EntryNone => {} // Do nothing.
2240 fn create_entry_fn(ccx: &CrateContext,
2242 rust_main: ValueRef,
2243 use_start_lang_item: bool) {
2244 let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()],
2247 let llfn = declare::define_cfn(ccx, "main", llfty,
2248 ccx.tcx().mk_nil()).unwrap_or_else(||{
2249 ccx.sess().span_err(sp, "entry symbol `main` defined multiple times");
2250 // FIXME: We should be smart and show a better diagnostic here.
2251 ccx.sess().help("did you use #[no_mangle] on `fn main`? Use #[start] instead");
2252 ccx.sess().abort_if_errors();
2257 llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn,
2258 "top\0".as_ptr() as *const _)
2260 let bld = ccx.raw_builder();
2262 llvm::LLVMPositionBuilderAtEnd(bld, llbb);
2264 debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx);
2266 let (start_fn, args) = if use_start_lang_item {
2267 let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
2269 Err(s) => { ccx.sess().fatal(&s[..]); }
2272 if let Some(start_node_id) = ccx.tcx().map.as_local_node_id(start_def_id) {
2273 get_item_val(ccx, start_node_id)
2275 let start_fn_type = csearch::get_type(ccx.tcx(),
2277 trans_external_path(ccx, start_def_id, start_fn_type)
2281 let opaque_rust_main = llvm::LLVMBuildPointerCast(bld,
2282 rust_main, Type::i8p(ccx).to_ref(),
2283 "rust_main\0".as_ptr() as *const _);
2293 debug!("using user-defined start fn");
2295 get_param(llfn, 0 as c_uint),
2296 get_param(llfn, 1 as c_uint)
2302 let result = llvm::LLVMBuildCall(bld,
2305 args.len() as c_uint,
2308 llvm::LLVMBuildRet(bld, result);
2313 fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, id: ast::NodeId,
2314 ty: Ty<'tcx>, attrs: &[ast::Attribute]) -> String {
2315 match ccx.external_srcs().borrow().get(&id) {
2317 let sym = csearch::get_symbol(&ccx.sess().cstore, did);
2318 debug!("found item {} in other crate...", sym);
2324 match attr::find_export_name_attr(ccx.sess().diagnostic(), attrs) {
2325 // Use provided name
2326 Some(name) => name.to_string(),
2328 let path = ccx.tcx().map.def_path_from_id(id);
2329 if attr::contains_name(attrs, "no_mangle") {
2331 path.last().unwrap().data.to_string()
2333 match weak_lang_items::link_name(attrs) {
2334 Some(name) => name.to_string(),
2336 // Usual name mangling
2337 mangle_exported_name(ccx, path, ty, id)
2345 fn contains_null(s: &str) -> bool {
2346 s.bytes().any(|b| b == 0)
2349 pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
2350 debug!("get_item_val(id=`{}`)", id);
2352 match ccx.item_vals().borrow().get(&id).cloned() {
2353 Some(v) => return v,
2357 let item = ccx.tcx().map.get(id);
2358 debug!("get_item_val: id={} item={:?}", id, item);
2359 let val = match item {
2360 hir_map::NodeItem(i) => {
2361 let ty = ccx.tcx().node_id_to_type(i.id);
2362 let sym = || exported_name(ccx, id, ty, &i.attrs);
2364 let v = match i.node {
2365 hir::ItemStatic(..) => {
2366 // If this static came from an external crate, then
2367 // we need to get the symbol from csearch instead of
2368 // using the current crate's name/version
2369 // information in the hash of the symbol
2371 debug!("making {}", sym);
2373 // Create the global before evaluating the initializer;
2374 // this is necessary to allow recursive statics.
2375 let llty = type_of(ccx, ty);
2376 let g = declare::define_global(ccx, &sym[..],
2377 llty).unwrap_or_else(|| {
2378 ccx.sess().span_fatal(i.span, &format!("symbol `{}` is already defined",
2382 ccx.item_symbols().borrow_mut().insert(i.id, sym);
2386 hir::ItemFn(_, _, _, abi, _, _) => {
2388 let llfn = if abi == Rust {
2389 register_fn(ccx, i.span, sym, i.id, ty)
2391 foreign::register_rust_fn_with_foreign_abi(ccx, i.span, sym, i.id)
2393 attributes::from_fn_attrs(ccx, &i.attrs, llfn);
2397 _ => ccx.sess().bug("get_item_val: weird result in table")
2403 hir_map::NodeTraitItem(trait_item) => {
2404 debug!("get_item_val(): processing a NodeTraitItem");
2405 match trait_item.node {
2406 hir::MethodTraitItem(_, Some(_)) => {
2407 register_method(ccx, id, &trait_item.attrs, trait_item.span)
2410 ccx.sess().span_bug(trait_item.span,
2411 "unexpected variant: trait item other than a provided \
2412 method in get_item_val()");
2417 hir_map::NodeImplItem(impl_item) => {
2418 match impl_item.node {
2419 hir::MethodImplItem(..) => {
2420 register_method(ccx, id, &impl_item.attrs, impl_item.span)
2423 ccx.sess().span_bug(impl_item.span,
2424 "unexpected variant: non-method impl item in \
2430 hir_map::NodeForeignItem(ni) => {
2432 hir::ForeignItemFn(..) => {
2433 let abi = ccx.tcx().map.get_foreign_abi(id);
2434 let ty = ccx.tcx().node_id_to_type(ni.id);
2435 let name = foreign::link_name(&*ni);
2436 foreign::register_foreign_item_fn(ccx, abi, ty, &name, &ni.attrs)
2438 hir::ForeignItemStatic(..) => {
2439 foreign::register_static(ccx, &*ni)
2444 hir_map::NodeVariant(ref v) => {
2446 let fields = if v.node.data.is_struct() {
2447 ccx.sess().bug("struct variant kind unexpected in get_item_val")
2449 v.node.data.fields()
2451 assert!(!fields.is_empty());
2452 let ty = ccx.tcx().node_id_to_type(id);
2453 let parent = ccx.tcx().map.get_parent(id);
2454 let enm = ccx.tcx().map.expect_item(parent);
2455 let sym = exported_name(ccx,
2460 llfn = match enm.node {
2461 hir::ItemEnum(_, _) => {
2462 register_fn(ccx, (*v).span, sym, id, ty)
2464 _ => ccx.sess().bug("NodeVariant, shouldn't happen")
2466 attributes::inline(llfn, attributes::InlineAttr::Hint);
2470 hir_map::NodeStructCtor(struct_def) => {
2471 // Only register the constructor if this is a tuple-like struct.
2472 let ctor_id = if struct_def.is_struct() {
2473 ccx.sess().bug("attempt to register a constructor of \
2474 a non-tuple-like struct")
2478 let parent = ccx.tcx().map.get_parent(id);
2479 let struct_item = ccx.tcx().map.expect_item(parent);
2480 let ty = ccx.tcx().node_id_to_type(ctor_id);
2481 let sym = exported_name(ccx,
2484 &struct_item.attrs);
2485 let llfn = register_fn(ccx, struct_item.span,
2487 attributes::inline(llfn, attributes::InlineAttr::Hint);
2492 ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}",
2497 // All LLVM globals and functions are initially created as external-linkage
2498 // declarations. If `trans_item`/`trans_fn` later turns the declaration
2499 // into a definition, it adjusts the linkage then (using `update_linkage`).
2501 // The exception is foreign items, which have their linkage set inside the
2502 // call to `foreign::register_*` above. We don't touch the linkage after
2503 // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
2504 // other item translation functions do).
2506 ccx.item_vals().borrow_mut().insert(id, val);
2510 fn register_method(ccx: &CrateContext, id: ast::NodeId,
2511 attrs: &[ast::Attribute], span: Span) -> ValueRef {
2512 let mty = ccx.tcx().node_id_to_type(id);
2514 let sym = exported_name(ccx, id, mty, &attrs);
2516 if let ty::TyBareFn(_, ref f) = mty.sty {
2517 let llfn = if f.abi == Rust || f.abi == RustCall {
2518 register_fn(ccx, span, sym, id, mty)
2520 foreign::register_rust_fn_with_foreign_abi(ccx, span, sym, id)
2522 attributes::from_fn_attrs(ccx, &attrs, llfn);
2525 ccx.sess().span_bug(span, "expected bare rust function");
2529 pub fn crate_ctxt_to_encode_parms<'a, 'tcx>(cx: &'a SharedCrateContext<'a, 'tcx>,
2530 ie: encoder::EncodeInlinedItem<'a>,
2531 reachable: &'a NodeSet)
2532 -> encoder::EncodeParams<'a, 'tcx> {
2533 encoder::EncodeParams {
2534 diag: cx.sess().diagnostic(),
2536 reexports: cx.export_map(),
2537 item_symbols: cx.item_symbols(),
2538 link_meta: cx.link_meta(),
2539 cstore: &cx.sess().cstore,
2540 encode_inlined_item: ie,
2541 reachable: reachable,
2545 pub fn write_metadata(cx: &SharedCrateContext, krate: &hir::Crate,
2546 reachable: &NodeSet) -> Vec<u8> {
2549 let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
2550 *ty != config::CrateTypeExecutable
2556 let encode_inlined_item: encoder::EncodeInlinedItem =
2557 Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii));
2559 let encode_parms = crate_ctxt_to_encode_parms(cx, encode_inlined_item,
2561 let metadata = encoder::encode_metadata(encode_parms, krate);
2562 let mut compressed = encoder::metadata_encoding_version.to_vec();
2563 compressed.push_all(&flate::deflate_bytes(&metadata));
2564 let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]);
2565 let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
2566 let name = format!("rust_metadata_{}_{}",
2567 cx.link_meta().crate_name,
2568 cx.link_meta().crate_hash);
2569 let buf = CString::new(name).unwrap();
2570 let llglobal = unsafe {
2571 llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(),
2575 llvm::LLVMSetInitializer(llglobal, llconst);
2576 let name = loader::meta_section_name(&cx.sess().target.target);
2577 let name = CString::new(name).unwrap();
2578 llvm::LLVMSetSection(llglobal, name.as_ptr())
2583 /// Find any symbols that are defined in one compilation unit, but not declared
2584 /// in any other compilation unit. Give these symbols internal linkage.
2585 fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<&str>) {
2587 let mut declared = HashSet::new();
2589 // Collect all external declarations in all compilation units.
2590 for ccx in cx.iter() {
2591 for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
2592 let linkage = llvm::LLVMGetLinkage(val);
2593 // We only care about external declarations (not definitions)
2594 // and available_externally definitions.
2595 if !(linkage == llvm::ExternalLinkage as c_uint &&
2596 llvm::LLVMIsDeclaration(val) != 0) &&
2597 !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
2601 let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
2602 .to_bytes().to_vec();
2603 declared.insert(name);
2607 // Examine each external definition. If the definition is not used in
2608 // any other compilation unit, and is not reachable from other crates,
2609 // then give it internal linkage.
2610 for ccx in cx.iter() {
2611 for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
2612 // We only care about external definitions.
2613 if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
2614 llvm::LLVMIsDeclaration(val) == 0) {
2618 let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
2619 .to_bytes().to_vec();
2620 if !declared.contains(&name) &&
2621 !reachable.contains(str::from_utf8(&name).unwrap()) {
2622 llvm::SetLinkage(val, llvm::InternalLinkage);
2623 llvm::SetDLLStorageClass(val, llvm::DefaultStorageClass);
2630 // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
2631 // This is required to satisfy `dllimport` references to static data in .rlibs
2632 // when using MSVC linker. We do this only for data, as linker can fix up
2633 // code references on its own.
2634 // See #26591, #27438
2635 fn create_imps(cx: &SharedCrateContext) {
2636 // The x86 ABI seems to require that leading underscores are added to symbol
2637 // names, so we need an extra underscore on 32-bit. There's also a leading
2638 // '\x01' here which disables LLVM's symbol mangling (e.g. no extra
2639 // underscores added in front).
2640 let prefix = if cx.sess().target.target.target_pointer_width == "32" {
2646 for ccx in cx.iter() {
2647 let exported: Vec<_> = iter_globals(ccx.llmod())
2648 .filter(|&val| llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
2649 llvm::LLVMIsDeclaration(val) == 0)
2652 let i8p_ty = Type::i8p(&ccx);
2653 for val in exported {
2654 let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
2655 let mut imp_name = prefix.as_bytes().to_vec();
2656 imp_name.extend(name.to_bytes());
2657 let imp_name = CString::new(imp_name).unwrap();
2658 let imp = llvm::LLVMAddGlobal(ccx.llmod(), i8p_ty.to_ref(),
2659 imp_name.as_ptr() as *const _);
2660 let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref());
2661 llvm::LLVMSetInitializer(imp, init);
2662 llvm::SetLinkage(imp, llvm::ExternalLinkage);
2670 step: unsafe extern "C" fn(ValueRef) -> ValueRef,
2673 impl Iterator for ValueIter {
2674 type Item = ValueRef;
2676 fn next(&mut self) -> Option<ValueRef> {
2679 self.cur = unsafe { (self.step)(old) };
2687 fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter {
2690 cur: llvm::LLVMGetFirstGlobal(llmod),
2691 step: llvm::LLVMGetNextGlobal,
2696 fn iter_functions(llmod: llvm::ModuleRef) -> ValueIter {
2699 cur: llvm::LLVMGetFirstFunction(llmod),
2700 step: llvm::LLVMGetNextFunction,
2705 /// The context provided lists a set of reachable ids as calculated by
2706 /// middle::reachable, but this contains far more ids and symbols than we're
2707 /// actually exposing from the object file. This function will filter the set in
2708 /// the context to the set of ids which correspond to symbols that are exposed
2709 /// from the object file being generated.
2711 /// This list is later used by linkers to determine the set of symbols needed to
2712 /// be exposed from a dynamic library and it's also encoded into the metadata.
2713 pub fn filter_reachable_ids(ccx: &SharedCrateContext) -> NodeSet {
2714 ccx.reachable().iter().map(|x| *x).filter(|id| {
2715 // First, only worry about nodes which have a symbol name
2716 ccx.item_symbols().borrow().contains_key(id)
2718 // Next, we want to ignore some FFI functions that are not exposed from
2719 // this crate. Reachable FFI functions can be lumped into two
2722 // 1. Those that are included statically via a static library
2723 // 2. Those included otherwise (e.g. dynamically or via a framework)
2725 // Although our LLVM module is not literally emitting code for the
2726 // statically included symbols, it's an export of our library which
2727 // needs to be passed on to the linker and encoded in the metadata.
2729 // As a result, if this id is an FFI item (foreign item) then we only
2730 // let it through if it's included statically.
2731 match ccx.tcx().map.get(id) {
2732 hir_map::NodeForeignItem(..) => {
2733 ccx.sess().cstore.is_statically_included_foreign_item(id)
2740 pub fn trans_crate(tcx: &ty::ctxt, analysis: ty::CrateAnalysis) -> CrateTranslation {
2741 let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis;
2742 let krate = tcx.map.krate();
2744 let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
2747 tcx.sess.opts.debug_assertions
2750 let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
2753 tcx.sess.opts.debug_assertions
2756 // Before we touch LLVM, make sure that multithreading is enabled.
2758 use std::sync::Once;
2759 static INIT: Once = Once::new();
2760 static mut POISONED: bool = false;
2762 if llvm::LLVMStartMultithreaded() != 1 {
2763 // use an extra bool to make sure that all future usage of LLVM
2764 // cannot proceed despite the Once not running more than once.
2768 ::back::write::configure_llvm(&tcx.sess);
2772 tcx.sess.bug("couldn't enable multi-threaded LLVM");
2776 let link_meta = link::build_link_meta(&tcx.sess, krate, name);
2778 let codegen_units = tcx.sess.opts.cg.codegen_units;
2779 let shared_ccx = SharedCrateContext::new(&link_meta.crate_name,
2790 let ccx = shared_ccx.get_ccx(0);
2792 // First, verify intrinsics.
2793 intrinsic::check_intrinsics(&ccx);
2795 // Next, translate the module.
2797 let _icx = push_ctxt("text");
2798 trans_mod(&ccx, &krate.module);
2802 for ccx in shared_ccx.iter() {
2803 if ccx.sess().opts.debuginfo != NoDebugInfo {
2804 debuginfo::finalize(&ccx);
2806 for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
2808 let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
2809 llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
2810 llvm::LLVMDeleteGlobal(old_g);
2815 let reachable_symbol_ids = filter_reachable_ids(&shared_ccx);
2817 // Translate the metadata.
2818 let metadata = write_metadata(&shared_ccx, krate, &reachable_symbol_ids);
2820 if shared_ccx.sess().trans_stats() {
2821 let stats = shared_ccx.stats();
2822 println!("--- trans stats ---");
2823 println!("n_glues_created: {}", stats.n_glues_created.get());
2824 println!("n_null_glues: {}", stats.n_null_glues.get());
2825 println!("n_real_glues: {}", stats.n_real_glues.get());
2827 println!("n_fns: {}", stats.n_fns.get());
2828 println!("n_monos: {}", stats.n_monos.get());
2829 println!("n_inlines: {}", stats.n_inlines.get());
2830 println!("n_closures: {}", stats.n_closures.get());
2831 println!("fn stats:");
2832 stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
2833 insns_b.cmp(&insns_a)
2835 for tuple in stats.fn_stats.borrow().iter() {
2837 (ref name, insns) => {
2838 println!("{} insns, {}", insns, *name);
2843 if shared_ccx.sess().count_llvm_insns() {
2844 for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
2845 println!("{:7} {}", *v, *k);
2849 let modules = shared_ccx.iter()
2850 .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
2853 let sess = shared_ccx.sess();
2854 let mut reachable_symbols = reachable_symbol_ids.iter().map(|id| {
2855 shared_ccx.item_symbols().borrow()[id].to_string()
2856 }).collect::<Vec<_>>();
2857 if sess.entry_fn.borrow().is_some() {
2858 reachable_symbols.push("main".to_string());
2861 // For the purposes of LTO, we add to the reachable set all of the upstream
2862 // reachable extern fns. These functions are all part of the public ABI of
2863 // the final product, so LTO needs to preserve them.
2865 sess.cstore.iter_crate_data(|cnum, _| {
2866 let syms = csearch::get_reachable_ids(&sess.cstore, cnum);
2867 reachable_symbols.extend(syms.into_iter().filter(|did| {
2868 csearch::is_extern_fn(&sess.cstore, *did, shared_ccx.tcx())
2870 csearch::get_symbol(&sess.cstore, did)
2875 if codegen_units > 1 {
2876 internalize_symbols(&shared_ccx,
2877 &reachable_symbols.iter().map(|x| &x[..]).collect());
2880 if sess.target.target.options.is_like_msvc &&
2881 sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) {
2882 create_imps(&shared_ccx);
2885 let metadata_module = ModuleTranslation {
2886 llcx: shared_ccx.metadata_llcx(),
2887 llmod: shared_ccx.metadata_llmod(),
2889 let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
2893 metadata_module: metadata_module,
2896 reachable: reachable_symbols,
2897 no_builtins: no_builtins,