use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
use middle::weak_lang_items;
use middle::pat_util::simple_name;
-use middle::subst::Substs;
+use middle::subst::{self, Substs};
+use middle::traits;
use middle::ty::{self, Ty, TypeFoldable};
+use middle::ty::adjustment::CustomCoerceUnsized;
use rustc::dep_graph::DepNode;
use rustc::front::map as hir_map;
use rustc::util::common::time;
-use rustc_mir::mir_map::MirMap;
+use rustc::mir::mir_map::MirMap;
use session::config::{self, NoDebugInfo, FullDebugInfo};
use session::Session;
use trans::_match;
use trans::cleanup::{self, CleanupMethods, DropHint};
use trans::closure;
use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
+use trans::collector::{self, TransItem, TransItemState, TransItemCollectionMode};
use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
use trans::common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
use trans::common::{Result, NodeIdAndSpan, VariantInfo};
-use trans::common::{node_id_type, return_type_is_void};
+use trans::common::{node_id_type, return_type_is_void, fulfill_obligation};
use trans::common::{type_is_immediate, type_is_zero_size, val_ty};
use trans::common;
use trans::consts;
use std::collections::{HashMap, HashSet};
use std::str;
use std::{i8, i16, i32, i64};
-use syntax::abi::{Rust, RustCall, RustIntrinsic, PlatformIntrinsic, Abi};
-use syntax::codemap::Span;
+use syntax::abi::Abi;
+use syntax::codemap::{Span, DUMMY_SP};
use syntax::parse::token::InternedString;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
}
}
+pub fn custom_coerce_unsize_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
+ source_ty: Ty<'tcx>,
+ target_ty: Ty<'tcx>)
+ -> CustomCoerceUnsized {
+ let trait_substs = Substs::erased(subst::VecPerParamSpace::new(vec![target_ty],
+ vec![source_ty],
+ Vec::new()));
+ let trait_ref = ty::Binder(ty::TraitRef {
+ def_id: ccx.tcx().lang_items.coerce_unsized_trait().unwrap(),
+ substs: ccx.tcx().mk_substs(trait_substs)
+ });
+
+ match fulfill_obligation(ccx, DUMMY_SP, trait_ref) {
+ traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
+ ccx.tcx().custom_coerce_unsized_kind(impl_def_id)
+ }
+ vtable => {
+ ccx.sess().bug(&format!("invalid CoerceUnsized vtable: {:?}",
+ vtable));
+ }
+ }
+}
+
pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
}
ty::TyInt(t) => {
let llty = Type::int_from_ty(cx.ccx(), t);
let min = match t {
- ast::TyIs if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
- ast::TyIs => i64::MIN as u64,
- ast::TyI8 => i8::MIN as u64,
- ast::TyI16 => i16::MIN as u64,
- ast::TyI32 => i32::MIN as u64,
- ast::TyI64 => i64::MIN as u64,
+ ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
+ ast::IntTy::Is => i64::MIN as u64,
+ ast::IntTy::I8 => i8::MIN as u64,
+ ast::IntTy::I16 => i16::MIN as u64,
+ ast::IntTy::I32 => i32::MIN as u64,
+ ast::IntTy::I64 => i64::MIN as u64,
};
(llty, min)
}
match t.sty {
ty::TyBareFn(_, ref fn_ty) => {
match ccx.sess().target.target.adjust_abi(fn_ty.abi) {
- Rust | RustCall => {
+ Abi::Rust | Abi::RustCall => {
get_extern_rust_fn(ccx, t, &name[..], did)
}
- RustIntrinsic | PlatformIntrinsic => {
+ Abi::RustIntrinsic | Abi::PlatformIntrinsic => {
ccx.sess().bug("unexpected intrinsic in trans_external_path")
}
_ => {
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
/// 64-bit MinGW) instead of "full SEH".
pub fn wants_msvc_seh(sess: &Session) -> bool {
- sess.target.target.options.is_like_msvc && sess.target.target.arch == "x86"
+ sess.target.target.options.is_like_msvc
}
pub fn avoid_invoke(bcx: Block) -> bool {
- // FIXME(#25869) currently SEH-based unwinding is pretty buggy in LLVM and
- // is being overhauled as this is being written. Until that
- // time such that upstream LLVM's implementation is more solid
- // and we start binding it we need to skip invokes for any
- // target which wants SEH-based unwinding.
- if bcx.sess().no_landing_pads() || wants_msvc_seh(bcx.sess()) {
- true
- } else if bcx.is_lpad {
- // Avoid using invoke if we are already inside a landing pad.
- true
- } else {
- false
- }
+ bcx.sess().no_landing_pads() || bcx.lpad().is_some()
}
pub fn need_invoke(bcx: Block) -> bool {
}
pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
- is_lpad: bool,
llbb: BasicBlockRef)
-> Block<'blk, 'tcx> {
- common::BlockS::new(llbb, is_lpad, None, fcx)
+ common::BlockS::new(llbb, None, fcx)
}
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
let volatile = C_bool(ccx, false);
b.call(llintrinsicfn,
&[llptr, llzeroval, size, align, volatile],
- None);
+ None, None);
}
/// In general, when we create an scratch value in an alloca, the
// Block, which we do not have for `alloca_insert_pt`).
core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| {
let ptr = b.pointercast(p, Type::i8p(ccx));
- b.call(lifetime_start, &[C_u64(ccx, size), ptr], None);
+ b.call(lifetime_start, &[C_u64(ccx, size), ptr], None, None);
});
memfill(&b, p, ty, adt::DTOR_DONE);
p
false
};
- let mir = ccx.mir_map().get(&id);
+ let mir = ccx.mir_map().map.get(&id);
let mut fcx = FunctionContext {
mir: mir,
alloca_insert_pt: Cell::new(None),
llreturn: Cell::new(None),
needs_ret_allocas: nested_returns,
- personality: Cell::new(None),
+ landingpad_alloca: Cell::new(None),
caller_expects_out_pointer: uses_outptr,
lllocals: RefCell::new(NodeMap()),
llupvars: RefCell::new(NodeMap()),
param_substs: param_substs,
span: sp,
block_arena: block_arena,
+ lpad_arena: TypedArena::new(),
ccx: ccx,
debug_context: debug_context,
scopes: RefCell::new(Vec::new()),
if !last_bcx.terminated.get() {
Br(last_bcx, llreturn, DebugLoc::None);
}
- raw_block(fcx, false, llreturn)
+ raw_block(fcx, llreturn)
}
None => last_bcx,
};
closure_env: closure::ClosureEnv<'b>) {
ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
+ record_translation_item_as_generated(ccx, fn_ast_id, param_substs);
+
let _icx = push_ctxt("trans_closure");
attributes::emit_uwtable(llfndecl, true);
let mut bcx = init_function(&fcx, false, output_type);
if attributes.iter().any(|item| item.check_name("rustc_mir")) {
- mir::trans_mir(bcx);
+ mir::trans_mir(bcx.build());
fcx.cleanup();
return;
}
bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
let has_tupled_arg = match closure_env {
- closure::ClosureEnv::NotClosure => abi == RustCall,
+ closure::ClosureEnv::NotClosure => abi == Abi::RustCall,
_ => false,
};
// Insert the mandatory first few basic blocks before lltop.
finish_fn(&fcx, bcx, output_type, ret_debug_loc);
+
+ fn record_translation_item_as_generated<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ node_id: ast::NodeId,
+ param_substs: &'tcx Substs<'tcx>) {
+ if !collector::collecting_debug_information(ccx) {
+ return;
+ }
+
+ let def_id = match ccx.tcx().node_id_to_type(node_id).sty {
+ ty::TyClosure(def_id, _) => def_id,
+ _ => ccx.external_srcs()
+ .borrow()
+ .get(&node_id)
+ .map(|did| *did)
+ .unwrap_or_else(|| ccx.tcx().map.local_def_id(node_id)),
+ };
+
+ ccx.record_translation_item_as_generated(TransItem::Fn{
+ def_id: def_id,
+ substs: ccx.tcx().mk_substs(ccx.tcx().erase_regions(param_substs)),
+ });
+ }
}
/// Creates an LLVM function corresponding to a source language function.
for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
let llfn = get_item_val(ccx, item.id);
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
- if abi != Rust {
+ if abi != Abi::Rust {
foreign::trans_rust_fn_with_foreign_abi(ccx,
&**decl,
&**body,
node_type: Ty<'tcx>)
-> ValueRef {
if let ty::TyBareFn(_, ref f) = node_type.sty {
- if f.abi != Rust && f.abi != RustCall {
+ if f.abi != Abi::Rust && f.abi != Abi::RustCall {
ccx.sess().span_bug(sp,
&format!("only the `{}` or `{}` calling conventions are valid \
for this function; `{}` was specified",
- Rust.name(),
- RustCall.name(),
+ Abi::Rust.name(),
+ Abi::RustCall.name(),
f.abi.name()));
}
} else {
(rust_main, args)
};
- let result = llvm::LLVMBuildCall(bld,
- start_fn,
- args.as_ptr(),
- args.len() as c_uint,
- noname());
+ let result = llvm::LLVMRustBuildCall(bld,
+ start_fn,
+ args.as_ptr(),
+ args.len() as c_uint,
+ 0 as *mut _,
+ noname());
llvm::LLVMBuildRet(bld, result);
}
hir::ItemFn(_, _, _, abi, _, _) => {
let sym = sym();
- let llfn = if abi == Rust {
+ let llfn = if abi == Abi::Rust {
register_fn(ccx, i.span, sym, i.id, ty)
} else {
foreign::register_rust_fn_with_foreign_abi(ccx, i.span, sym, i.id)
let sym = exported_name(ccx, id, mty, &attrs);
if let ty::TyBareFn(_, ref f) = mty.sty {
- let llfn = if f.abi == Rust || f.abi == RustCall {
+ let llfn = if f.abi == Abi::Rust || f.abi == Abi::RustCall {
register_fn(ccx, span, sym, id, mty)
} else {
foreign::register_rust_fn_with_foreign_abi(ccx, span, sym, id)
// First, verify intrinsics.
intrinsic::check_intrinsics(&ccx);
+ collect_translation_items(&ccx);
+
// Next, translate all items. See `TransModVisitor` for
// details on why we walk in this particular way.
{
intravisit::walk_mod(&mut TransItemsWithinModVisitor { ccx: &ccx }, &krate.module);
krate.visit_all_items(&mut TransModVisitor { ccx: &ccx });
}
+
+ collector::print_collection_results(&ccx);
}
for ccx in shared_ccx.iter() {
for cnum in sess.cstore.crates() {
let syms = sess.cstore.reachable_ids(cnum);
reachable_symbols.extend(syms.into_iter().filter(|did| {
- sess.cstore.is_extern_fn(shared_ccx.tcx(), *did) ||
- sess.cstore.is_static(*did)
+ sess.cstore.is_extern_item(shared_ccx.tcx(), *did)
}).map(|did| {
sess.cstore.item_symbol(did)
}));
}
}
}
+
+fn collect_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>) {
+ let time_passes = ccx.sess().time_passes();
+
+ let collection_mode = match ccx.sess().opts.debugging_opts.print_trans_items {
+ Some(ref s) => {
+ let mode_string = s.to_lowercase();
+ let mode_string = mode_string.trim();
+ if mode_string == "eager" {
+ TransItemCollectionMode::Eager
+ } else {
+ if mode_string != "lazy" {
+ let message = format!("Unknown codegen-item collection mode '{}'. \
+ Falling back to 'lazy' mode.",
+ mode_string);
+ ccx.sess().warn(&message);
+ }
+
+ TransItemCollectionMode::Lazy
+ }
+ }
+ None => TransItemCollectionMode::Lazy
+ };
+
+ let items = time(time_passes, "translation item collection", || {
+ collector::collect_crate_translation_items(&ccx, collection_mode)
+ });
+
+ if ccx.sess().opts.debugging_opts.print_trans_items.is_some() {
+ let mut item_keys: Vec<_> = items.iter()
+ .map(|i| i.to_string(ccx))
+ .collect();
+ item_keys.sort();
+
+ for item in item_keys {
+ println!("TRANS_ITEM {}", item);
+ }
+
+ let mut ccx_map = ccx.translation_items().borrow_mut();
+
+ for cgi in items {
+ ccx_map.insert(cgi, TransItemState::PredictedButNotGenerated);
+ }
+ }
+}