1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_camel_case_types, non_snake_case)]
13 //! Code that is useful in various trans modules.
15 pub use self::ExprOrMethodCall::*;
19 use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
20 use llvm::{True, False, Bool, OperandBundleDef};
23 use middle::def_id::DefId;
25 use middle::lang_items::LangItem;
26 use middle::subst::{self, Substs};
33 use trans::debuginfo::{self, DebugLoc};
36 use trans::monomorphize;
37 use trans::type_::Type;
40 use middle::ty::{self, Ty};
41 use middle::ty::fold::{TypeFolder, TypeFoldable};
43 use rustc::mir::repr::Mir;
44 use util::nodemap::{FnvHashMap, NodeMap};
46 use arena::TypedArena;
47 use libc::{c_uint, c_char};
48 use std::ffi::CString;
49 use std::cell::{Cell, RefCell};
52 use syntax::codemap::{DUMMY_SP, Span};
53 use syntax::parse::token::InternedString;
54 use syntax::parse::token;
56 pub use trans::context::CrateContext;
58 /// Is the type's representation size known at compile time?
59 pub fn type_is_sized<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
60 ty.is_sized(&tcx.empty_parameter_environment(), DUMMY_SP)
63 pub fn type_is_fat_ptr<'tcx>(cx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool {
65 ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
66 ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
68 !type_is_sized(cx, ty)
76 fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
78 ty::TyStruct(def, substs) => {
79 let fields = &def.struct_variant().fields;
80 fields.len() == 1 && {
81 type_is_immediate(ccx, monomorphize::field_ty(ccx.tcx(), substs, &fields[0]))
88 pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
89 use trans::machine::llsize_of_alloc;
90 use trans::type_of::sizing_type_of;
93 let simple = ty.is_scalar() ||
94 ty.is_unique() || ty.is_region_ptr() ||
95 type_is_newtype_immediate(ccx, ty) ||
97 if simple && !type_is_fat_ptr(tcx, ty) {
100 if !type_is_sized(tcx, ty) {
104 ty::TyStruct(..) | ty::TyEnum(..) | ty::TyTuple(..) | ty::TyArray(_, _) |
105 ty::TyClosure(..) => {
106 let llty = sizing_type_of(ccx, ty);
107 llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
109 _ => type_is_zero_size(ccx, ty)
113 /// Identify types which have size zero at runtime.
114 pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
115 use trans::machine::llsize_of_alloc;
116 use trans::type_of::sizing_type_of;
117 let llty = sizing_type_of(ccx, ty);
118 llsize_of_alloc(ccx, llty) == 0
121 /// Identifies types which we declare to be equivalent to `void` in C for the purpose of function
122 /// return types. These are `()`, bot, uninhabited enums and all other zero-sized types.
123 pub fn return_type_is_void<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
124 ty.is_nil() || ty.is_empty(ccx.tcx()) || type_is_zero_size(ccx, ty)
127 /// Generates a unique symbol based off the name given. This is used to create
128 /// unique symbols for things like closures.
129 pub fn gensym_name(name: &str) -> ast::Name {
130 let num = token::gensym(name).0;
131 // use one colon which will get translated to a period by the mangler, and
132 // we're guaranteed that `num` is globally unique for this crate.
133 token::gensym(&format!("{}:{}", name, num))
137 * A note on nomenclature of linking: "extern", "foreign", and "upcall".
139 * An "extern" is an LLVM symbol we wind up emitting an undefined external
140 * reference to. This means "we don't have the thing in this compilation unit,
141 * please make sure you link it in at runtime". This could be a reference to
142 * C code found in a C library, or rust code found in a rust crate.
144 * Most "externs" are implicitly declared (automatically) as a result of a
145 * user declaring an extern _module_ dependency; this causes the rust driver
146 * to locate an extern crate, scan its compilation metadata, and emit extern
147 * declarations for any symbols used by the declaring crate.
149 * A "foreign" is an extern that references C (or other non-rust ABI) code.
150 * There is no metadata to scan for extern references so in these cases either
151 * a header-digester like bindgen, or manual function prototypes, have to
152 * serve as declarators. So these are usually given explicitly as prototype
153 * declarations, in rust code, with ABI attributes on them noting which ABI to
156 * An "upcall" is a foreign call generated by the compiler (not corresponding
157 * to any user-written call in the code) into the runtime library, to perform
158 * some helper task such as bringing a task to life, allocating memory, etc.
164 #[derive(Copy, Clone)]
165 pub struct NodeIdAndSpan {
170 pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan {
171 NodeIdAndSpan { id: expr.id, span: expr.span }
174 /// The concrete version of ty::FieldDef. The name is the field index if
175 /// the field is numeric.
176 pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>);
178 /// The concrete version of ty::VariantDef
179 pub struct VariantInfo<'tcx> {
181 pub fields: Vec<Field<'tcx>>
184 impl<'tcx> VariantInfo<'tcx> {
185 pub fn from_ty(tcx: &ty::ctxt<'tcx>,
187 opt_def: Option<Def>)
191 ty::TyStruct(adt, substs) | ty::TyEnum(adt, substs) => {
192 let variant = match opt_def {
193 None => adt.struct_variant(),
194 Some(def) => adt.variant_of_def(def)
198 discr: Disr::from(variant.disr_val),
199 fields: variant.fields.iter().map(|f| {
200 Field(f.name, monomorphize::field_ty(tcx, substs, f))
205 ty::TyTuple(ref v) => {
208 fields: v.iter().enumerate().map(|(i, &t)| {
209 Field(token::intern(&i.to_string()), t)
215 tcx.sess.bug(&format!(
216 "cannot get field types from the type {:?}",
222 /// Return the variant corresponding to a given node (e.g. expr)
223 pub fn of_node(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
224 let node_def = tcx.def_map.borrow().get(&id).map(|v| v.full_def());
225 Self::from_ty(tcx, ty, node_def)
228 pub fn field_index(&self, name: ast::Name) -> usize {
229 self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| {
230 panic!("unknown field `{}`", name)
235 pub struct BuilderRef_res {
239 impl Drop for BuilderRef_res {
242 llvm::LLVMDisposeBuilder(self.b);
247 pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res {
253 pub type ExternMap = FnvHashMap<String, ValueRef>;
255 pub fn validate_substs(substs: &Substs) {
256 assert!(!substs.types.needs_infer());
259 // work around bizarre resolve errors
260 type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
261 pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
263 #[derive(Clone, Debug)]
264 struct HintEntry<'tcx> {
265 // The datum for the dropflag-hint itself; note that many
266 // source-level Lvalues will be associated with the same
267 // dropflag-hint datum.
268 datum: cleanup::DropHintDatum<'tcx>,
271 pub struct DropFlagHintsMap<'tcx> {
272 // Maps NodeId for expressions that read/write unfragmented state
273 // to that state's drop-flag "hint." (A stack-local hint
274 // indicates either that (1.) it is certain that no-drop is
275 // needed, or (2.) inline drop-flag must be consulted.)
276 node_map: NodeMap<HintEntry<'tcx>>,
279 impl<'tcx> DropFlagHintsMap<'tcx> {
280 pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } }
281 pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) }
282 pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) {
283 self.node_map.insert(id, HintEntry { datum: datum });
285 pub fn hint_datum(&self, id: ast::NodeId) -> Option<cleanup::DropHintDatum<'tcx>> {
286 self.node_map.get(&id).map(|t|t.datum)
290 // Function context. Every LLVM function we create will have one of
292 pub struct FunctionContext<'a, 'tcx: 'a> {
293 // The MIR for this function. At present, this is optional because
294 // we only have MIR available for things that are local to the
296 pub mir: Option<&'a Mir<'tcx>>,
298 // The ValueRef returned from a call to llvm::LLVMAddFunction; the
299 // address of the first instruction in the sequence of
300 // instructions for this function that will go in the .text
301 // section of the executable we're generating.
304 // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
305 pub param_env: ty::ParameterEnvironment<'a, 'tcx>,
307 // The environment argument in a closure.
308 pub llenv: Option<ValueRef>,
310 // A pointer to where to store the return value. If the return type is
311 // immediate, this points to an alloca in the function. Otherwise, it's a
312 // pointer to the hidden first parameter of the function. After function
313 // construction, this should always be Some.
314 pub llretslotptr: Cell<Option<ValueRef>>,
316 // These pub elements: "hoisted basic blocks" containing
317 // administrative activities that have to happen in only one place in
318 // the function, due to LLVM's quirks.
319 // A marker for the place where we want to insert the function's static
320 // allocas, so that LLVM will coalesce them into a single alloca call.
321 pub alloca_insert_pt: Cell<Option<ValueRef>>,
322 pub llreturn: Cell<Option<BasicBlockRef>>,
324 // If the function has any nested return's, including something like:
325 // fn foo() -> Option<Foo> { Some(Foo { x: return None }) }, then
326 // we use a separate alloca for each return
327 pub needs_ret_allocas: bool,
329 // When working with landingpad-based exceptions this value is alloca'd and
330 // later loaded when using the resume instruction. This ends up being
331 // critical to chaining landing pads and resuing already-translated
334 // Note that for cleanuppad-based exceptions this is not used.
335 pub landingpad_alloca: Cell<Option<ValueRef>>,
337 // True if the caller expects this fn to use the out pointer to
338 // return. Either way, your code should write into the slot llretslotptr
339 // points to, but if this value is false, that slot will be a local alloca.
340 pub caller_expects_out_pointer: bool,
342 // Maps the DefId's for local variables to the allocas created for
343 // them in llallocas.
344 pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
346 // Same as above, but for closure upvars
347 pub llupvars: RefCell<NodeMap<ValueRef>>,
349 // Carries info about drop-flags for local bindings (longer term,
350 // paths) for the code being compiled.
351 pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
353 // The NodeId of the function, or -1 if it doesn't correspond to
354 // a user-defined function.
357 // If this function is being monomorphized, this contains the type
358 // substitutions used.
359 pub param_substs: &'tcx Substs<'tcx>,
361 // The source span and nesting context where this function comes from, for
362 // error reporting and symbol generation.
363 pub span: Option<Span>,
365 // The arena that blocks are allocated from.
366 pub block_arena: &'a TypedArena<BlockS<'a, 'tcx>>,
368 // This function's enclosing crate context.
369 pub ccx: &'a CrateContext<'a, 'tcx>,
371 // Used and maintained by the debuginfo module.
372 pub debug_context: debuginfo::FunctionDebugContext,
375 pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
377 pub cfg: Option<cfg::CFG>,
380 impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
381 pub fn mir(&self) -> &'a Mir<'tcx> {
385 pub fn arg_offset(&self) -> usize {
386 self.env_arg_pos() + if self.llenv.is_some() { 1 } else { 0 }
389 pub fn env_arg_pos(&self) -> usize {
390 if self.caller_expects_out_pointer {
397 pub fn cleanup(&self) {
399 llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt
405 pub fn get_llreturn(&self) -> BasicBlockRef {
406 if self.llreturn.get().is_none() {
408 self.llreturn.set(Some(unsafe {
409 llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn,
410 "return\0".as_ptr() as *const _)
414 self.llreturn.get().unwrap()
417 pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>,
418 output: ty::FnOutput<'tcx>,
419 name: &str) -> ValueRef {
420 if self.needs_ret_allocas {
421 base::alloca(bcx, match output {
422 ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type),
423 ty::FnDiverging => Type::void(bcx.ccx())
426 self.llretslotptr.get().unwrap()
430 pub fn new_block(&'a self,
432 opt_node_id: Option<ast::NodeId>)
435 let name = CString::new(name).unwrap();
436 let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
439 BlockS::new(llbb, opt_node_id, self)
443 pub fn new_id_block(&'a self,
445 node_id: ast::NodeId)
447 self.new_block(name, Some(node_id))
450 pub fn new_temp_block(&'a self,
453 self.new_block(name, None)
456 pub fn join_blocks(&'a self,
458 in_cxs: &[Block<'a, 'tcx>])
460 let out = self.new_id_block("join", id);
461 let mut reachable = false;
463 if !bcx.unreachable.get() {
464 build::Br(*bcx, out.llbb, DebugLoc::None);
469 build::Unreachable(out);
474 pub fn monomorphize<T>(&self, value: &T) -> T
475 where T : TypeFoldable<'tcx>
477 monomorphize::apply_param_substs(self.ccx.tcx(),
482 /// This is the same as `common::type_needs_drop`, except that it
483 /// may use or update caches within this `FunctionContext`.
484 pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
485 self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env)
488 pub fn eh_personality(&self) -> ValueRef {
489 // The exception handling personality function.
491 // If our compilation unit has the `eh_personality` lang item somewhere
492 // within it, then we just need to translate that. Otherwise, we're
493 // building an rlib which will depend on some upstream implementation of
494 // this function, so we just codegen a generic reference to it. We don't
495 // specify any of the types for the function, we just make it a symbol
496 // that LLVM can later use.
498 // Note that MSVC is a little special here in that we don't use the
499 // `eh_personality` lang item at all. Currently LLVM has support for
500 // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
501 // *name of the personality function* to decide what kind of unwind side
502 // tables/landing pads to emit. It looks like Dwarf is used by default,
503 // injecting a dependency on the `_Unwind_Resume` symbol for resuming
504 // an "exception", but for MSVC we want to force SEH. This means that we
505 // can't actually have the personality function be our standard
506 // `rust_eh_personality` function, but rather we wired it up to the
507 // CRT's custom personality function, which forces LLVM to consider
508 // landing pads as "landing pads for SEH".
509 let target = &self.ccx.sess().target.target;
510 match self.ccx.tcx().lang_items.eh_personality() {
511 Some(def_id) if !base::wants_msvc_seh(self.ccx.sess()) => {
512 callee::trans_fn_ref(self.ccx, def_id, ExprId(0),
513 self.param_substs).val
516 let mut personality = self.ccx.eh_personality().borrow_mut();
518 Some(llpersonality) => llpersonality,
520 let name = if !base::wants_msvc_seh(self.ccx.sess()) {
521 "rust_eh_personality"
522 } else if target.arch == "x86" {
525 "__C_specific_handler"
527 let fty = Type::variadic_func(&[], &Type::i32(self.ccx));
528 let f = declare::declare_cfn(self.ccx, name, fty,
529 self.ccx.tcx().types.i32);
530 *personality = Some(f);
538 // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
539 // otherwise declares it as an external funtion.
540 pub fn eh_unwind_resume(&self) -> ValueRef {
541 use trans::attributes;
542 assert!(self.ccx.sess().target.target.options.custom_unwind_resume);
543 match self.ccx.tcx().lang_items.eh_unwind_resume() {
545 callee::trans_fn_ref(self.ccx, def_id, ExprId(0),
546 self.param_substs).val
549 let mut unwresume = self.ccx.eh_unwind_resume().borrow_mut();
553 let fty = Type::func(&[Type::i8p(self.ccx)], &Type::void(self.ccx));
554 let llfn = declare::declare_fn(self.ccx,
555 "rust_eh_unwind_resume",
557 fty, ty::FnDiverging);
558 attributes::unwind(llfn, true);
559 *unwresume = Some(llfn);
568 // Basic block context. We create a block context for each basic block
569 // (single-entry, single-exit sequence of instructions) we generate from Rust
570 // code. Each basic block we generate is attached to a function, typically
571 // with many basic blocks per function. All the basic blocks attached to a
572 // function are organized as a directed graph.
573 pub struct BlockS<'blk, 'tcx: 'blk> {
574 // The BasicBlockRef returned from a call to
575 // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
576 // block to the function pointed to by llfn. We insert
577 // instructions into that block by way of this block context.
578 // The block pointing to this one in the function's digraph.
579 pub llbb: BasicBlockRef,
580 pub terminated: Cell<bool>,
581 pub unreachable: Cell<bool>,
583 // If this block part of a landing pad, then this is `Some` indicating what
584 // kind of landing pad its in, otherwise this is none.
585 pub lpad: RefCell<Option<LandingPad>>,
587 // AST node-id associated with this block, if any. Used for
588 // debugging purposes only.
589 pub opt_node_id: Option<ast::NodeId>,
591 // The function context for the function to which this block is
593 pub fcx: &'blk FunctionContext<'blk, 'tcx>,
596 pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
598 impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
599 pub fn new(llbb: BasicBlockRef,
600 opt_node_id: Option<ast::NodeId>,
601 fcx: &'blk FunctionContext<'blk, 'tcx>)
602 -> Block<'blk, 'tcx> {
603 fcx.block_arena.alloc(BlockS {
605 terminated: Cell::new(false),
606 unreachable: Cell::new(false),
607 lpad: RefCell::new(None),
608 opt_node_id: opt_node_id,
613 pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
616 pub fn tcx(&self) -> &'blk ty::ctxt<'tcx> {
619 pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
621 pub fn mir(&self) -> &'blk Mir<'tcx> {
625 pub fn name(&self, name: ast::Name) -> String {
629 pub fn node_id_to_string(&self, id: ast::NodeId) -> String {
630 self.tcx().map.node_to_string(id).to_string()
633 pub fn def(&self, nid: ast::NodeId) -> Def {
634 match self.tcx().def_map.borrow().get(&nid) {
635 Some(v) => v.full_def(),
637 self.tcx().sess.bug(&format!(
638 "no def associated with node id {}", nid));
643 pub fn val_to_string(&self, val: ValueRef) -> String {
644 self.ccx().tn().val_to_string(val)
647 pub fn llty_str(&self, ty: Type) -> String {
648 self.ccx().tn().type_to_string(ty)
651 pub fn to_str(&self) -> String {
652 format!("[block {:p}]", self)
655 pub fn monomorphize<T>(&self, value: &T) -> T
656 where T : TypeFoldable<'tcx>
658 monomorphize::apply_param_substs(self.tcx(),
659 self.fcx.param_substs,
664 /// A structure representing an active landing pad for the duration of a basic
667 /// Each `Block` may contain an instance of this, indicating whether the block
668 /// is part of a landing pad or not. This is used to make decision about whether
669 /// to emit `invoke` instructions (e.g. in a landing pad we don't continue to
670 /// use `invoke`) and also about various function call metadata.
672 /// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
673 /// just a bunch of `None` instances (not too interesting), but for MSVC
674 /// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
675 /// When inside of a landing pad, each function call in LLVM IR needs to be
676 /// annotated with which landing pad it's a part of. This is accomplished via
677 /// the `OperandBundleDef` value created for MSVC landing pads.
678 pub struct LandingPad {
679 cleanuppad: Option<ValueRef>,
680 operand: Option<OperandBundleDef>,
684 pub fn gnu() -> LandingPad {
685 LandingPad { cleanuppad: None, operand: None }
688 pub fn msvc(cleanuppad: ValueRef) -> LandingPad {
690 cleanuppad: Some(cleanuppad),
691 operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])),
695 pub fn bundle(&self) -> Option<&OperandBundleDef> {
696 self.operand.as_ref()
700 impl Clone for LandingPad {
701 fn clone(&self) -> LandingPad {
703 cleanuppad: self.cleanuppad,
704 operand: self.cleanuppad.map(|p| {
705 OperandBundleDef::new("funclet", &[p])
711 pub struct Result<'blk, 'tcx: 'blk> {
712 pub bcx: Block<'blk, 'tcx>,
716 impl<'b, 'tcx> Result<'b, 'tcx> {
717 pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
725 pub fn val_ty(v: ValueRef) -> Type {
727 Type::from_ref(llvm::LLVMTypeOf(v))
731 // LLVM constant constructors.
732 pub fn C_null(t: Type) -> ValueRef {
734 llvm::LLVMConstNull(t.to_ref())
738 pub fn C_undef(t: Type) -> ValueRef {
740 llvm::LLVMGetUndef(t.to_ref())
744 pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef {
746 llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool)
750 pub fn C_floating(s: &str, t: Type) -> ValueRef {
752 let s = CString::new(s).unwrap();
753 llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr())
757 pub fn C_floating_f64(f: f64, t: Type) -> ValueRef {
759 llvm::LLVMConstReal(t.to_ref(), f)
763 pub fn C_nil(ccx: &CrateContext) -> ValueRef {
764 C_struct(ccx, &[], false)
767 pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef {
768 C_integral(Type::i1(ccx), val as u64, false)
771 pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef {
772 C_integral(Type::i32(ccx), i as u64, true)
775 pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef {
776 C_integral(Type::i32(ccx), i as u64, false)
779 pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
780 C_integral(Type::i64(ccx), i, false)
783 pub fn C_int<I: AsI64>(ccx: &CrateContext, i: I) -> ValueRef {
786 let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
789 // make sure it doesn't overflow
790 assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1)));
793 C_integral(ccx.int_type(), v as u64, true)
796 pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
799 let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
802 // make sure it doesn't overflow
803 assert!(v < (1<<bit_size));
806 C_integral(ccx.int_type(), v, false)
809 pub trait AsI64 { fn as_i64(self) -> i64; }
810 pub trait AsU64 { fn as_u64(self) -> u64; }
812 // FIXME: remove the intptr conversions, because they
813 // are host-architecture-dependent
814 impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }}
815 impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }}
816 impl AsI64 for isize { fn as_i64(self) -> i64 { self as i64 }}
818 impl AsU64 for u64 { fn as_u64(self) -> u64 { self as u64 }}
819 impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 }}
820 impl AsU64 for usize { fn as_u64(self) -> u64 { self as u64 }}
822 pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef {
823 C_integral(Type::i8(ccx), i as u64, false)
827 // This is a 'c-like' raw string, which differs from
828 // our boxed-and-length-annotated strings.
829 pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
831 match cx.const_cstr_cache().borrow().get(&s) {
832 Some(&llval) => return llval,
836 let sc = llvm::LLVMConstStringInContext(cx.llcx(),
837 s.as_ptr() as *const c_char,
839 !null_terminated as Bool);
841 let gsym = token::gensym("str");
842 let sym = format!("str{}", gsym.0);
843 let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{
844 cx.sess().bug(&format!("symbol `{}` is already defined", sym));
846 llvm::LLVMSetInitializer(g, sc);
847 llvm::LLVMSetGlobalConstant(g, True);
848 llvm::SetLinkage(g, llvm::InternalLinkage);
850 cx.const_cstr_cache().borrow_mut().insert(s, g);
855 // NB: Do not use `do_spill_noroot` to make this into a constant string, or
856 // you will be kicked off fast isel. See issue #4352 for an example of this.
857 pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
859 let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx));
860 C_named_struct(cx.tn().find_type("str_slice").unwrap(), &[cs, C_uint(cx, len)])
863 pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
864 C_struct_in_context(cx.llcx(), elts, packed)
867 pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
869 llvm::LLVMConstStructInContext(llcx,
870 elts.as_ptr(), elts.len() as c_uint,
875 pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef {
877 llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint)
881 pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
883 return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint);
887 pub fn C_vector(elts: &[ValueRef]) -> ValueRef {
889 return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
893 pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
894 C_bytes_in_context(cx.llcx(), bytes)
897 pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
899 let ptr = bytes.as_ptr() as *const c_char;
900 return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
904 pub fn const_get_elt(cx: &CrateContext, v: ValueRef, us: &[c_uint])
907 let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
909 debug!("const_get_elt(v={}, us={:?}, r={})",
910 cx.tn().val_to_string(v), us, cx.tn().val_to_string(r));
916 pub fn const_to_int(v: ValueRef) -> i64 {
918 llvm::LLVMConstIntGetSExtValue(v)
922 pub fn const_to_uint(v: ValueRef) -> u64 {
924 llvm::LLVMConstIntGetZExtValue(v)
928 fn is_const_integral(v: ValueRef) -> bool {
930 !llvm::LLVMIsAConstantInt(v).is_null()
934 pub fn const_to_opt_int(v: ValueRef) -> Option<i64> {
936 if is_const_integral(v) {
937 Some(llvm::LLVMConstIntGetSExtValue(v))
944 pub fn const_to_opt_uint(v: ValueRef) -> Option<u64> {
946 if is_const_integral(v) {
947 Some(llvm::LLVMConstIntGetZExtValue(v))
954 pub fn is_undef(val: ValueRef) -> bool {
956 llvm::LLVMIsUndef(val) != False
960 #[allow(dead_code)] // potentially useful
961 pub fn is_null(val: ValueRef) -> bool {
963 llvm::LLVMIsNull(val) != False
967 pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
968 bcx.fcx.monomorphize(&t)
971 pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> {
973 let t = tcx.node_id_to_type(id);
974 monomorphize_type(bcx, t)
977 pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
978 node_id_type(bcx, ex.id)
981 pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
982 monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex))
985 /// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
986 /// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
987 /// guarantee to us that all nested obligations *could be* resolved if we wanted to.
988 pub fn fulfill_obligation<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
990 trait_ref: ty::PolyTraitRef<'tcx>)
991 -> traits::Vtable<'tcx, ()>
995 // Remove any references to regions; this helps improve caching.
996 let trait_ref = tcx.erase_regions(&trait_ref);
998 // First check the cache.
999 match ccx.trait_cache().borrow().get(&trait_ref) {
1001 info!("Cache hit: {:?}", trait_ref);
1002 return (*vtable).clone();
1007 debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}",
1008 trait_ref, trait_ref.def_id());
1011 // Do the initial selection for the obligation. This yields the
1012 // shallow result we are looking for -- that is, what specific impl.
1013 let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
1014 let mut selcx = traits::SelectionContext::new(&infcx);
1017 traits::Obligation::new(traits::ObligationCause::misc(span, ast::DUMMY_NODE_ID),
1018 trait_ref.to_poly_trait_predicate());
1019 let selection = match selcx.select(&obligation) {
1020 Ok(Some(selection)) => selection,
1022 // Ambiguity can happen when monomorphizing during trans
1023 // expands to some humongo type that never occurred
1024 // statically -- this humongo type can then overflow,
1025 // leading to an ambiguous result. So report this as an
1026 // overflow bug, since I believe this is the only case
1027 // where ambiguity can result.
1028 debug!("Encountered ambiguity selecting `{:?}` during trans, \
1029 presuming due to overflow",
1031 ccx.sess().span_fatal(
1033 "reached the recursion limit during monomorphization (selection ambiguity)");
1038 &format!("Encountered error `{:?}` selecting `{:?}` during trans",
1044 // Currently, we use a fulfillment context to completely resolve
1045 // all nested obligations. This is because they can inform the
1046 // inference of the impl's type parameters.
1047 let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
1048 let vtable = selection.map(|predicate| {
1049 fulfill_cx.register_predicate_obligation(&infcx, predicate);
1051 let vtable = infer::drain_fulfillment_cx_or_panic(
1052 span, &infcx, &mut fulfill_cx, &vtable
1055 info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
1057 ccx.trait_cache().borrow_mut().insert(trait_ref, vtable.clone());
1062 /// Normalizes the predicates and checks whether they hold. If this
1063 /// returns false, then either normalize encountered an error or one
1064 /// of the predicates did not hold. Used when creating vtables to
1065 /// check for unsatisfiable methods.
1066 pub fn normalize_and_test_predicates<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1067 predicates: Vec<ty::Predicate<'tcx>>)
1070 debug!("normalize_and_test_predicates(predicates={:?})",
1073 let tcx = ccx.tcx();
1074 let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables);
1075 let mut selcx = traits::SelectionContext::new(&infcx);
1076 let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut();
1077 let cause = traits::ObligationCause::dummy();
1078 let traits::Normalized { value: predicates, obligations } =
1079 traits::normalize(&mut selcx, cause.clone(), &predicates);
1080 for obligation in obligations {
1081 fulfill_cx.register_predicate_obligation(&infcx, obligation);
1083 for predicate in predicates {
1084 let obligation = traits::Obligation::new(cause.clone(), predicate);
1085 fulfill_cx.register_predicate_obligation(&infcx, obligation);
1088 infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok()
1091 // Key used to lookup values supplied for type parameters in an expr.
1092 #[derive(Copy, Clone, PartialEq, Debug)]
1093 pub enum ExprOrMethodCall {
1094 // Type parameters for a path like `None::<int>`
1095 ExprId(ast::NodeId),
1097 // Type parameters for a method call like `a.foo::<int>()`
1098 MethodCallKey(ty::MethodCall)
1101 pub fn node_id_substs<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1102 node: ExprOrMethodCall,
1103 param_substs: &subst::Substs<'tcx>)
1104 -> subst::Substs<'tcx> {
1105 let tcx = ccx.tcx();
1107 let substs = match node {
1109 tcx.node_id_item_substs(id).substs
1111 MethodCallKey(method_call) => {
1112 tcx.tables.borrow().method_map[&method_call].substs.clone()
1116 if substs.types.needs_infer() {
1117 tcx.sess.bug(&format!("type parameters for node {:?} include inference types: {:?}",
1121 monomorphize::apply_param_substs(tcx,
1123 &substs.erase_regions())
1126 pub fn langcall(bcx: Block,
1131 match bcx.tcx().lang_items.require(li) {
1134 let msg = format!("{} {}", msg, s);
1136 Some(span) => bcx.tcx().sess.span_fatal(span, &msg[..]),
1137 None => bcx.tcx().sess.fatal(&msg[..]),
1143 /// Return the VariantDef corresponding to an inlined variant node
1144 pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1145 inlined_vid: ast::NodeId)
1146 -> ty::VariantDef<'tcx>
1149 let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid);
1150 debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty,
1152 let adt_def = match ctor_ty.sty {
1153 ty::TyBareFn(_, &ty::BareFnTy { sig: ty::Binder(ty::FnSig {
1154 output: ty::FnConverging(ty), ..
1157 }.ty_adt_def().unwrap();
1158 let inlined_vid_def_id = ccx.tcx().map.local_def_id(inlined_vid);
1159 adt_def.variants.iter().find(|v| {
1160 inlined_vid_def_id == v.did ||
1161 ccx.external().borrow().get(&v.did) == Some(&Some(inlined_vid))
1162 }).unwrap_or_else(|| {
1163 ccx.sess().bug(&format!("no variant for {:?}::{}", adt_def, inlined_vid))
1167 // To avoid UB from LLVM, these two functions mask RHS with an
1168 // appropriate mask unconditionally (i.e. the fallback behavior for
1169 // all shifts). For 32- and 64-bit types, this matches the semantics
1170 // of Java. (See related discussion on #1877 and #10183.)
1172 pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1175 binop_debug_loc: DebugLoc) -> ValueRef {
1176 let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
1177 // #1877, #10183: Ensure that input is always valid
1178 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
1179 build::Shl(bcx, lhs, rhs, binop_debug_loc)
1182 pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1186 binop_debug_loc: DebugLoc) -> ValueRef {
1187 let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
1188 // #1877, #10183: Ensure that input is always valid
1189 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
1190 let is_signed = lhs_t.is_signed();
1192 build::AShr(bcx, lhs, rhs, binop_debug_loc)
1194 build::LShr(bcx, lhs, rhs, binop_debug_loc)
1198 fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1200 debug_loc: DebugLoc) -> ValueRef {
1201 let rhs_llty = val_ty(rhs);
1202 build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
1205 pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1208 invert: bool) -> ValueRef {
1209 let kind = llty.kind();
1211 TypeKind::Integer => {
1212 // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
1213 let val = llty.int_width() - 1;
1215 C_integral(mask_llty, !val, true)
1217 C_integral(mask_llty, val, false)
1220 TypeKind::Vector => {
1221 let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
1222 build::VectorSplat(bcx, mask_llty.vector_length(), mask)
1224 _ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
1228 pub fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1232 if let Some(node_id) = ccx.tcx().map.as_local_node_id(did) {
1233 base::get_item_val(ccx, node_id)
1235 base::get_extern_const(ccx, did, ty)