1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_camel_case_types, non_snake_case)]
13 //! Code that is useful in various trans modules.
17 use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
18 use llvm::{True, False, Bool, OperandBundleDef};
19 use rustc::hir::def::Def;
20 use rustc::hir::def_id::DefId;
21 use rustc::infer::TransNormalize;
22 use rustc::util::common::MemoizationMap;
23 use middle::lang_items::LangItem;
24 use rustc::ty::subst::Substs;
25 use abi::{Abi, FnType};
32 use debuginfo::{self, DebugLoc};
39 use rustc::ty::{self, Ty, TyCtxt};
40 use rustc::ty::layout::Layout;
41 use rustc::traits::{self, SelectionContext, Reveal};
42 use rustc::ty::fold::TypeFoldable;
45 use arena::TypedArena;
46 use libc::{c_uint, c_char};
48 use std::ffi::CString;
49 use std::cell::{Cell, RefCell};
52 use syntax::parse::token::InternedString;
53 use syntax::parse::token;
54 use syntax_pos::{DUMMY_SP, Span};
56 pub use context::{CrateContext, SharedCrateContext};
58 /// Is the type's representation size known at compile time?
59 pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
60 ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP)
63 pub fn type_is_fat_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
65 ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
66 ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
68 !type_is_sized(tcx, ty)
76 pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
77 use machine::llsize_of_alloc;
78 use type_of::sizing_type_of;
81 let simple = ty.is_scalar() ||
82 ty.is_unique() || ty.is_region_ptr() ||
84 if simple && !type_is_fat_ptr(tcx, ty) {
87 if !type_is_sized(tcx, ty) {
91 ty::TyAdt(..) | ty::TyTuple(..) | ty::TyArray(..) | ty::TyClosure(..) => {
92 let llty = sizing_type_of(ccx, ty);
93 llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
95 _ => type_is_zero_size(ccx, ty)
99 /// Returns Some([a, b]) if the type has a pair of fields with types a and b.
100 pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
101 -> Option<[Ty<'tcx>; 2]> {
103 ty::TyAdt(adt, substs) => {
104 assert_eq!(adt.variants.len(), 1);
105 let fields = &adt.variants[0].fields;
106 if fields.len() != 2 {
109 Some([monomorphize::field_ty(ccx.tcx(), substs, &fields[0]),
110 monomorphize::field_ty(ccx.tcx(), substs, &fields[1])])
112 ty::TyClosure(_, ty::ClosureSubsts { upvar_tys: tys, .. }) |
113 ty::TyTuple(tys) => {
117 Some([tys[0], tys[1]])
123 /// Returns true if the type is represented as a pair of immediates.
124 pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
126 match *ccx.layout_of(ty) {
127 Layout::FatPointer { .. } => true,
128 Layout::Univariant { ref variant, .. } => {
129 // There must be only 2 fields.
130 if variant.offsets.len() != 2 {
134 match type_pair_fields(ccx, ty) {
136 type_is_immediate(ccx, a) && type_is_immediate(ccx, b)
145 /// Identify types which have size zero at runtime.
146 pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
147 use machine::llsize_of_alloc;
148 use type_of::sizing_type_of;
149 let llty = sizing_type_of(ccx, ty);
150 llsize_of_alloc(ccx, llty) == 0
154 * A note on nomenclature of linking: "extern", "foreign", and "upcall".
156 * An "extern" is an LLVM symbol we wind up emitting an undefined external
157 * reference to. This means "we don't have the thing in this compilation unit,
158 * please make sure you link it in at runtime". This could be a reference to
159 * C code found in a C library, or rust code found in a rust crate.
161 * Most "externs" are implicitly declared (automatically) as a result of a
162 * user declaring an extern _module_ dependency; this causes the rust driver
163 * to locate an extern crate, scan its compilation metadata, and emit extern
164 * declarations for any symbols used by the declaring crate.
166 * A "foreign" is an extern that references C (or other non-rust ABI) code.
167 * There is no metadata to scan for extern references so in these cases either
168 * a header-digester like bindgen, or manual function prototypes, have to
169 * serve as declarators. So these are usually given explicitly as prototype
170 * declarations, in rust code, with ABI attributes on them noting which ABI to
173 * An "upcall" is a foreign call generated by the compiler (not corresponding
174 * to any user-written call in the code) into the runtime library, to perform
175 * some helper task such as bringing a task to life, allocating memory, etc.
181 /// The concrete version of ty::FieldDef. The name is the field index if
182 /// the field is numeric.
183 pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>);
185 /// The concrete version of ty::VariantDef
186 pub struct VariantInfo<'tcx> {
188 pub fields: Vec<Field<'tcx>>
191 impl<'a, 'tcx> VariantInfo<'tcx> {
192 pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>,
194 opt_def: Option<Def>)
198 ty::TyAdt(adt, substs) => {
199 let variant = match opt_def {
200 None => adt.struct_variant(),
201 Some(def) => adt.variant_of_def(def)
205 discr: Disr::from(variant.disr_val),
206 fields: variant.fields.iter().map(|f| {
207 Field(f.name, monomorphize::field_ty(tcx, substs, f))
212 ty::TyTuple(ref v) => {
215 fields: v.iter().enumerate().map(|(i, &t)| {
216 Field(token::intern(&i.to_string()), t)
222 bug!("cannot get field types from the type {:?}", ty);
228 pub struct BuilderRef_res {
232 impl Drop for BuilderRef_res {
235 llvm::LLVMDisposeBuilder(self.b);
240 pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res {
246 pub fn validate_substs(substs: &Substs) {
247 assert!(!substs.needs_infer());
250 // Function context. Every LLVM function we create will have one of
252 pub struct FunctionContext<'a, 'tcx: 'a> {
253 // The MIR for this function. At present, this is optional because
254 // we only have MIR available for things that are local to the
256 pub mir: Option<CachedMir<'a, 'tcx>>,
258 // The ValueRef returned from a call to llvm::LLVMAddFunction; the
259 // address of the first instruction in the sequence of
260 // instructions for this function that will go in the .text
261 // section of the executable we're generating.
264 // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
265 pub param_env: ty::ParameterEnvironment<'tcx>,
267 // A pointer to where to store the return value. If the return type is
268 // immediate, this points to an alloca in the function. Otherwise, it's a
269 // pointer to the hidden first parameter of the function. After function
270 // construction, this should always be Some.
271 pub llretslotptr: Cell<Option<ValueRef>>,
273 // These pub elements: "hoisted basic blocks" containing
274 // administrative activities that have to happen in only one place in
275 // the function, due to LLVM's quirks.
276 // A marker for the place where we want to insert the function's static
277 // allocas, so that LLVM will coalesce them into a single alloca call.
278 pub alloca_insert_pt: Cell<Option<ValueRef>>,
280 // When working with landingpad-based exceptions this value is alloca'd and
281 // later loaded when using the resume instruction. This ends up being
282 // critical to chaining landing pads and resuing already-translated
285 // Note that for cleanuppad-based exceptions this is not used.
286 pub landingpad_alloca: Cell<Option<ValueRef>>,
288 // Describes the return/argument LLVM types and their ABI handling.
291 // If this function is being monomorphized, this contains the type
292 // substitutions used.
293 pub param_substs: &'tcx Substs<'tcx>,
295 // The source span and nesting context where this function comes from, for
296 // error reporting and symbol generation.
297 pub span: Option<Span>,
299 // The arena that blocks are allocated from.
300 pub block_arena: &'a TypedArena<BlockS<'a, 'tcx>>,
302 // The arena that landing pads are allocated from.
303 pub lpad_arena: TypedArena<LandingPad>,
305 // This function's enclosing crate context.
306 pub ccx: &'a CrateContext<'a, 'tcx>,
308 // Used and maintained by the debuginfo module.
309 pub debug_context: debuginfo::FunctionDebugContext,
312 pub scopes: RefCell<Vec<cleanup::CleanupScope<'tcx>>>,
315 impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
316 pub fn mir(&self) -> CachedMir<'a, 'tcx> {
317 self.mir.clone().expect("fcx.mir was empty")
320 pub fn cleanup(&self) {
322 llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt
328 pub fn new_block(&'a self,
332 let name = CString::new(name).unwrap();
333 let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
336 BlockS::new(llbb, self)
340 pub fn monomorphize<T>(&self, value: &T) -> T
341 where T: TransNormalize<'tcx>
343 monomorphize::apply_param_substs(self.ccx.shared(),
348 /// This is the same as `common::type_needs_drop`, except that it
349 /// may use or update caches within this `FunctionContext`.
350 pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
351 self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env)
354 pub fn eh_personality(&self) -> ValueRef {
355 // The exception handling personality function.
357 // If our compilation unit has the `eh_personality` lang item somewhere
358 // within it, then we just need to translate that. Otherwise, we're
359 // building an rlib which will depend on some upstream implementation of
360 // this function, so we just codegen a generic reference to it. We don't
361 // specify any of the types for the function, we just make it a symbol
362 // that LLVM can later use.
364 // Note that MSVC is a little special here in that we don't use the
365 // `eh_personality` lang item at all. Currently LLVM has support for
366 // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
367 // *name of the personality function* to decide what kind of unwind side
368 // tables/landing pads to emit. It looks like Dwarf is used by default,
369 // injecting a dependency on the `_Unwind_Resume` symbol for resuming
370 // an "exception", but for MSVC we want to force SEH. This means that we
371 // can't actually have the personality function be our standard
372 // `rust_eh_personality` function, but rather we wired it up to the
373 // CRT's custom personality function, which forces LLVM to consider
374 // landing pads as "landing pads for SEH".
377 match tcx.lang_items.eh_personality() {
378 Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
379 Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx)
382 if let Some(llpersonality) = ccx.eh_personality().get() {
385 let name = if base::wants_msvc_seh(ccx.sess()) {
388 "rust_eh_personality"
390 let fty = Type::variadic_func(&[], &Type::i32(ccx));
391 let f = declare::declare_cfn(ccx, name, fty);
392 ccx.eh_personality().set(Some(f));
398 // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
399 // otherwise declares it as an external function.
400 pub fn eh_unwind_resume(&self) -> Callee<'tcx> {
404 assert!(ccx.sess().target.target.options.custom_unwind_resume);
405 if let Some(def_id) = tcx.lang_items.eh_unwind_resume() {
406 return Callee::def(ccx, def_id, Substs::empty(tcx));
409 let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
410 unsafety: hir::Unsafety::Unsafe,
412 sig: ty::Binder(ty::FnSig {
413 inputs: vec![tcx.mk_mut_ptr(tcx.types.u8)],
414 output: tcx.types.never,
419 let unwresume = ccx.eh_unwind_resume();
420 if let Some(llfn) = unwresume.get() {
421 return Callee::ptr(llfn, ty);
423 let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty);
424 attributes::unwind(llfn, true);
425 unwresume.set(Some(llfn));
426 Callee::ptr(llfn, ty)
430 // Basic block context. We create a block context for each basic block
431 // (single-entry, single-exit sequence of instructions) we generate from Rust
432 // code. Each basic block we generate is attached to a function, typically
433 // with many basic blocks per function. All the basic blocks attached to a
434 // function are organized as a directed graph.
435 pub struct BlockS<'blk, 'tcx: 'blk> {
436 // The BasicBlockRef returned from a call to
437 // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
438 // block to the function pointed to by llfn. We insert
439 // instructions into that block by way of this block context.
440 // The block pointing to this one in the function's digraph.
441 pub llbb: BasicBlockRef,
442 pub terminated: Cell<bool>,
443 pub unreachable: Cell<bool>,
445 // If this block part of a landing pad, then this is `Some` indicating what
446 // kind of landing pad its in, otherwise this is none.
447 pub lpad: Cell<Option<&'blk LandingPad>>,
449 // The function context for the function to which this block is
451 pub fcx: &'blk FunctionContext<'blk, 'tcx>,
454 pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
456 impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
457 pub fn new(llbb: BasicBlockRef,
458 fcx: &'blk FunctionContext<'blk, 'tcx>)
459 -> Block<'blk, 'tcx> {
460 fcx.block_arena.alloc(BlockS {
462 terminated: Cell::new(false),
463 unreachable: Cell::new(false),
464 lpad: Cell::new(None),
469 pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
472 pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
475 pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> {
478 pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
480 pub fn lpad(&self) -> Option<&'blk LandingPad> {
484 pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) {
485 // FIXME: use an IVar?
489 pub fn set_lpad(&self, lpad: Option<LandingPad>) {
490 self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p)))
493 pub fn mir(&self) -> CachedMir<'blk, 'tcx> {
497 pub fn name(&self, name: ast::Name) -> String {
501 pub fn node_id_to_string(&self, id: ast::NodeId) -> String {
502 self.tcx().map.node_to_string(id).to_string()
505 pub fn to_str(&self) -> String {
506 format!("[block {:p}]", self)
509 pub fn monomorphize<T>(&self, value: &T) -> T
510 where T: TransNormalize<'tcx>
512 monomorphize::apply_param_substs(self.fcx.ccx.shared(),
513 self.fcx.param_substs,
517 pub fn build(&'blk self) -> BlockAndBuilder<'blk, 'tcx> {
518 BlockAndBuilder::new(self, OwnedBuilder::new_with_ccx(self.ccx()))
522 pub struct OwnedBuilder<'blk, 'tcx: 'blk> {
523 builder: Builder<'blk, 'tcx>
526 impl<'blk, 'tcx> OwnedBuilder<'blk, 'tcx> {
527 pub fn new_with_ccx(ccx: &'blk CrateContext<'blk, 'tcx>) -> Self {
528 // Create a fresh builder from the crate context.
529 let llbuilder = unsafe {
530 llvm::LLVMCreateBuilderInContext(ccx.llcx())
534 llbuilder: llbuilder,
541 impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> {
544 llvm::LLVMDisposeBuilder(self.builder.llbuilder);
549 pub struct BlockAndBuilder<'blk, 'tcx: 'blk> {
550 bcx: Block<'blk, 'tcx>,
551 owned_builder: OwnedBuilder<'blk, 'tcx>,
554 impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> {
555 pub fn new(bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>) -> Self {
556 // Set the builder's position to this block's end.
557 owned_builder.builder.position_at_end(bcx.llbb);
560 owned_builder: owned_builder,
564 pub fn with_block<F, R>(&self, f: F) -> R
565 where F: FnOnce(Block<'blk, 'tcx>) -> R
567 let result = f(self.bcx);
568 self.position_at_end(self.bcx.llbb);
572 pub fn map_block<F>(self, f: F) -> Self
573 where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
575 let BlockAndBuilder { bcx, owned_builder } = self;
577 BlockAndBuilder::new(bcx, owned_builder)
580 pub fn at_start<F, R>(&self, f: F) -> R
581 where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R
583 self.position_at_start(self.bcx.llbb);
585 self.position_at_end(self.bcx.llbb);
589 // Methods delegated to bcx
591 pub fn is_unreachable(&self) -> bool {
592 self.bcx.unreachable.get()
595 pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
598 pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> {
601 pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> {
604 pub fn sess(&self) -> &'blk Session {
608 pub fn llbb(&self) -> BasicBlockRef {
612 pub fn mir(&self) -> CachedMir<'blk, 'tcx> {
616 pub fn monomorphize<T>(&self, value: &T) -> T
617 where T: TransNormalize<'tcx>
619 self.bcx.monomorphize(value)
622 pub fn set_lpad(&self, lpad: Option<LandingPad>) {
623 self.bcx.set_lpad(lpad)
626 pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) {
627 // FIXME: use an IVar?
628 self.bcx.set_lpad_ref(lpad);
631 pub fn lpad(&self) -> Option<&'blk LandingPad> {
636 impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> {
637 type Target = Builder<'blk, 'tcx>;
638 fn deref(&self) -> &Self::Target {
639 &self.owned_builder.builder
643 /// A structure representing an active landing pad for the duration of a basic
646 /// Each `Block` may contain an instance of this, indicating whether the block
647 /// is part of a landing pad or not. This is used to make decision about whether
648 /// to emit `invoke` instructions (e.g. in a landing pad we don't continue to
649 /// use `invoke`) and also about various function call metadata.
651 /// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
652 /// just a bunch of `None` instances (not too interesting), but for MSVC
653 /// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
654 /// When inside of a landing pad, each function call in LLVM IR needs to be
655 /// annotated with which landing pad it's a part of. This is accomplished via
656 /// the `OperandBundleDef` value created for MSVC landing pads.
657 pub struct LandingPad {
658 cleanuppad: Option<ValueRef>,
659 operand: Option<OperandBundleDef>,
663 pub fn gnu() -> LandingPad {
664 LandingPad { cleanuppad: None, operand: None }
667 pub fn msvc(cleanuppad: ValueRef) -> LandingPad {
669 cleanuppad: Some(cleanuppad),
670 operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])),
674 pub fn bundle(&self) -> Option<&OperandBundleDef> {
675 self.operand.as_ref()
678 pub fn cleanuppad(&self) -> Option<ValueRef> {
683 impl Clone for LandingPad {
684 fn clone(&self) -> LandingPad {
686 cleanuppad: self.cleanuppad,
687 operand: self.cleanuppad.map(|p| {
688 OperandBundleDef::new("funclet", &[p])
694 pub struct Result<'blk, 'tcx: 'blk> {
695 pub bcx: Block<'blk, 'tcx>,
699 impl<'b, 'tcx> Result<'b, 'tcx> {
700 pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
708 pub fn val_ty(v: ValueRef) -> Type {
710 Type::from_ref(llvm::LLVMTypeOf(v))
714 // LLVM constant constructors.
715 pub fn C_null(t: Type) -> ValueRef {
717 llvm::LLVMConstNull(t.to_ref())
721 pub fn C_undef(t: Type) -> ValueRef {
723 llvm::LLVMGetUndef(t.to_ref())
727 pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef {
729 llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool)
733 pub fn C_floating_f64(f: f64, t: Type) -> ValueRef {
735 llvm::LLVMConstReal(t.to_ref(), f)
739 pub fn C_nil(ccx: &CrateContext) -> ValueRef {
740 C_struct(ccx, &[], false)
743 pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef {
744 C_integral(Type::i1(ccx), val as u64, false)
747 pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef {
748 C_integral(Type::i32(ccx), i as u64, true)
751 pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef {
752 C_integral(Type::i32(ccx), i as u64, false)
755 pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
756 C_integral(Type::i64(ccx), i, false)
759 pub fn C_uint<I: AsU64>(ccx: &CrateContext, i: I) -> ValueRef {
762 let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type());
765 // make sure it doesn't overflow
766 assert!(v < (1<<bit_size));
769 C_integral(ccx.int_type(), v, false)
772 pub trait AsI64 { fn as_i64(self) -> i64; }
773 pub trait AsU64 { fn as_u64(self) -> u64; }
775 // FIXME: remove the intptr conversions, because they
776 // are host-architecture-dependent
777 impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }}
778 impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }}
779 impl AsI64 for isize { fn as_i64(self) -> i64 { self as i64 }}
781 impl AsU64 for u64 { fn as_u64(self) -> u64 { self as u64 }}
782 impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 }}
783 impl AsU64 for usize { fn as_u64(self) -> u64 { self as u64 }}
785 pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef {
786 C_integral(Type::i8(ccx), i as u64, false)
790 // This is a 'c-like' raw string, which differs from
791 // our boxed-and-length-annotated strings.
792 pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
794 if let Some(&llval) = cx.const_cstr_cache().borrow().get(&s) {
798 let sc = llvm::LLVMConstStringInContext(cx.llcx(),
799 s.as_ptr() as *const c_char,
801 !null_terminated as Bool);
802 let sym = cx.generate_local_symbol_name("str");
803 let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{
804 bug!("symbol `{}` is already defined", sym);
806 llvm::LLVMSetInitializer(g, sc);
807 llvm::LLVMSetGlobalConstant(g, True);
808 llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
810 cx.const_cstr_cache().borrow_mut().insert(s, g);
815 // NB: Do not use `do_spill_noroot` to make this into a constant string, or
816 // you will be kicked off fast isel. See issue #4352 for an example of this.
817 pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
819 let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx));
820 C_named_struct(cx.tn().find_type("str_slice").unwrap(), &[cs, C_uint(cx, len)])
823 pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
824 C_struct_in_context(cx.llcx(), elts, packed)
827 pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
829 llvm::LLVMConstStructInContext(llcx,
830 elts.as_ptr(), elts.len() as c_uint,
835 pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef {
837 llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint)
841 pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
843 return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint);
847 pub fn C_vector(elts: &[ValueRef]) -> ValueRef {
849 return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
853 pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
854 C_bytes_in_context(cx.llcx(), bytes)
857 pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
859 let ptr = bytes.as_ptr() as *const c_char;
860 return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
864 pub fn const_get_elt(v: ValueRef, us: &[c_uint])
867 let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
869 debug!("const_get_elt(v={:?}, us={:?}, r={:?})",
870 Value(v), us, Value(r));
876 pub fn const_to_uint(v: ValueRef) -> u64 {
878 llvm::LLVMConstIntGetZExtValue(v)
882 fn is_const_integral(v: ValueRef) -> bool {
884 !llvm::LLVMIsAConstantInt(v).is_null()
888 pub fn const_to_opt_int(v: ValueRef) -> Option<i64> {
890 if is_const_integral(v) {
891 Some(llvm::LLVMConstIntGetSExtValue(v))
898 pub fn const_to_opt_uint(v: ValueRef) -> Option<u64> {
900 if is_const_integral(v) {
901 Some(llvm::LLVMConstIntGetZExtValue(v))
908 pub fn is_undef(val: ValueRef) -> bool {
910 llvm::LLVMIsUndef(val) != False
914 #[allow(dead_code)] // potentially useful
915 pub fn is_null(val: ValueRef) -> bool {
917 llvm::LLVMIsNull(val) != False
921 /// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
922 /// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
923 /// guarantee to us that all nested obligations *could be* resolved if we wanted to.
924 pub fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
926 trait_ref: ty::PolyTraitRef<'tcx>)
927 -> traits::Vtable<'tcx, ()>
931 // Remove any references to regions; this helps improve caching.
932 let trait_ref = tcx.erase_regions(&trait_ref);
934 scx.trait_cache().memoize(trait_ref, || {
935 debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
936 trait_ref, trait_ref.def_id());
938 // Do the initial selection for the obligation. This yields the
939 // shallow result we are looking for -- that is, what specific impl.
940 tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| {
941 let mut selcx = SelectionContext::new(&infcx);
943 let obligation_cause = traits::ObligationCause::misc(span,
945 let obligation = traits::Obligation::new(obligation_cause,
946 trait_ref.to_poly_trait_predicate());
948 let selection = match selcx.select(&obligation) {
949 Ok(Some(selection)) => selection,
951 // Ambiguity can happen when monomorphizing during trans
952 // expands to some humongo type that never occurred
953 // statically -- this humongo type can then overflow,
954 // leading to an ambiguous result. So report this as an
955 // overflow bug, since I believe this is the only case
956 // where ambiguity can result.
957 debug!("Encountered ambiguity selecting `{:?}` during trans, \
958 presuming due to overflow",
960 tcx.sess.span_fatal(span,
961 "reached the recursion limit during monomorphization \
962 (selection ambiguity)");
965 span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
970 debug!("fulfill_obligation: selection={:?}", selection);
972 // Currently, we use a fulfillment context to completely resolve
973 // all nested obligations. This is because they can inform the
974 // inference of the impl's type parameters.
975 let mut fulfill_cx = traits::FulfillmentContext::new();
976 let vtable = selection.map(|predicate| {
977 debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
978 fulfill_cx.register_predicate_obligation(&infcx, predicate);
980 let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
982 info!("Cache miss: {:?} => {:?}", trait_ref, vtable);
988 pub fn langcall(tcx: TyCtxt,
993 match tcx.lang_items.require(li) {
996 let msg = format!("{} {}", msg, s);
998 Some(span) => tcx.sess.span_fatal(span, &msg[..]),
999 None => tcx.sess.fatal(&msg[..]),
1005 // To avoid UB from LLVM, these two functions mask RHS with an
1006 // appropriate mask unconditionally (i.e. the fallback behavior for
1007 // all shifts). For 32- and 64-bit types, this matches the semantics
1008 // of Java. (See related discussion on #1877 and #10183.)
1010 pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1013 binop_debug_loc: DebugLoc) -> ValueRef {
1014 let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
1015 // #1877, #10183: Ensure that input is always valid
1016 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
1017 build::Shl(bcx, lhs, rhs, binop_debug_loc)
1020 pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1024 binop_debug_loc: DebugLoc) -> ValueRef {
1025 let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
1026 // #1877, #10183: Ensure that input is always valid
1027 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
1028 let is_signed = lhs_t.is_signed();
1030 build::AShr(bcx, lhs, rhs, binop_debug_loc)
1032 build::LShr(bcx, lhs, rhs, binop_debug_loc)
1036 fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1038 debug_loc: DebugLoc) -> ValueRef {
1039 let rhs_llty = val_ty(rhs);
1040 build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
1043 pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1046 invert: bool) -> ValueRef {
1047 let kind = llty.kind();
1049 TypeKind::Integer => {
1050 // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
1051 let val = llty.int_width() - 1;
1053 C_integral(mask_llty, !val, true)
1055 C_integral(mask_llty, val, false)
1058 TypeKind::Vector => {
1059 let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
1060 build::VectorSplat(bcx, mask_llty.vector_length(), mask)
1062 _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),