}
fn get_symbol_hash(ccx: &CrateContext, t: ty::t) -> String {
- match ccx.type_hashcodes.borrow().find(&t) {
+ match ccx.type_hashcodes().borrow().find(&t) {
Some(h) => return h.to_string(),
None => {}
}
- let mut symbol_hasher = ccx.symbol_hasher.borrow_mut();
- let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, &ccx.link_meta);
- ccx.type_hashcodes.borrow_mut().insert(t, hash.clone());
+ let mut symbol_hasher = ccx.symbol_hasher().borrow_mut();
+ let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, ccx.link_meta());
+ ccx.type_hashcodes().borrow_mut().insert(t, hash.clone());
hash
}
}
ast::PatIdent(..) | ast::PatEnum(..) | ast::PatStruct(..) => {
// This is either an enum variant or a variable binding.
- let opt_def = ccx.tcx.def_map.borrow().find_copy(&cur.id);
+ let opt_def = ccx.tcx().def_map.borrow().find_copy(&cur.id);
match opt_def {
Some(def::DefVariant(enum_id, var_id, _)) => {
let variant = ty::enum_variant_with_id(ccx.tcx(), enum_id, var_id);
/// Decides how to represent a given type.
pub fn represent_type(cx: &CrateContext, t: ty::t) -> Rc<Repr> {
debug!("Representing: {}", ty_to_string(cx.tcx(), t));
- match cx.adt_reprs.borrow().find(&t) {
+ match cx.adt_reprs().borrow().find(&t) {
Some(repr) => return repr.clone(),
None => {}
}
let repr = Rc::new(represent_type_uncached(cx, t));
debug!("Represented as: {:?}", repr)
- cx.adt_reprs.borrow_mut().insert(t, repr.clone());
+ cx.adt_reprs().borrow_mut().insert(t, repr.clone());
repr
}
attempts = choose_shortest;
},
attr::ReprPacked => {
- cx.tcx.sess.bug("range_to_inttype: found ReprPacked on an enum");
+ cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum");
}
}
for &ity in attempts.iter() {
} else {
0
};
- let istart = ccx.stats.n_llvm_insns.get();
+ let istart = ccx.stats().n_llvm_insns.get();
StatRecorder {
ccx: ccx,
name: Some(name),
if self.ccx.sess().trans_stats() {
let end = time::precise_time_ns();
let elapsed = ((end - self.start) / 1_000_000) as uint;
- let iend = self.ccx.stats.n_llvm_insns.get();
- self.ccx.stats.fn_stats.borrow_mut().push((self.name.take().unwrap(),
+ let iend = self.ccx.stats().n_llvm_insns.get();
+ self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
elapsed,
iend - self.istart));
- self.ccx.stats.n_fns.set(self.ccx.stats.n_fns.get() + 1);
+ self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
// Reset LLVM insn count to avoid compound costs.
- self.ccx.stats.n_llvm_insns.set(self.istart);
+ self.ccx.stats().n_llvm_insns.set(self.istart);
}
}
}
let llfn: ValueRef = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMGetOrInsertFunction(ccx.llmod, buf, ty.to_ref())
+ llvm::LLVMGetOrInsertFunction(ccx.llmod(), buf, ty.to_ref())
}
});
_ => {}
}
- if ccx.tcx.sess.opts.cg.no_redzone {
+ if ccx.tcx().sess.opts.cg.no_redzone {
unsafe {
llvm::LLVMAddFunctionAttribute(llfn,
llvm::FunctionIndex as c_uint,
}
fn get_extern_rust_fn(ccx: &CrateContext, fn_ty: ty::t, name: &str, did: ast::DefId) -> ValueRef {
- match ccx.externs.borrow().find_equiv(&name) {
+ match ccx.externs().borrow().find_equiv(&name) {
Some(n) => return *n,
None => ()
}
set_llvm_fn_attrs(attrs.as_slice(), f)
});
- ccx.externs.borrow_mut().insert(name.to_string(), f);
+ ccx.externs().borrow_mut().insert(name.to_string(), f);
f
}
let unboxed_closure_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_id);
match unboxed_closure.kind {
ty::FnUnboxedClosureKind => {
- ty::mk_imm_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_imm_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnMutUnboxedClosureKind => {
- ty::mk_mut_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_mut_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnOnceUnboxedClosureKind => unboxed_closure_type,
}
pub fn kind_for_unboxed_closure(ccx: &CrateContext, closure_id: ast::DefId)
-> ty::UnboxedClosureKind {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
unboxed_closures.get(&closure_id).kind
}
(f.sig.inputs.clone(), f.sig.output, f.abi, Some(Type::i8p(ccx)))
}
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_did);
let function_type = unboxed_closure.closure_type.clone();
let self_type = self_type_for_unboxed_closure(ccx, closure_did);
let llfty = type_of_rust_fn(ccx, env, inputs.as_slice(), output, abi);
debug!("decl_rust_fn(input count={},type={})",
inputs.len(),
- ccx.tn.type_to_string(llfty));
+ ccx.tn().type_to_string(llfty));
let llfn = decl_fn(ccx, name, llvm::CCallConv, llfty, output);
let attrs = get_fn_llvm_attributes(ccx, fn_ty);
// Type descriptor and type glue stuff
pub fn get_tydesc(ccx: &CrateContext, t: ty::t) -> Rc<tydesc_info> {
- match ccx.tydescs.borrow().find(&t) {
+ match ccx.tydescs().borrow().find(&t) {
Some(inf) => return inf.clone(),
_ => { }
}
- ccx.stats.n_static_tydescs.set(ccx.stats.n_static_tydescs.get() + 1u);
+ ccx.stats().n_static_tydescs.set(ccx.stats().n_static_tydescs.get() + 1u);
let inf = Rc::new(glue::declare_tydesc(ccx, t));
- ccx.tydescs.borrow_mut().insert(t, inf.clone());
+ ccx.tydescs().borrow_mut().insert(t, inf.clone());
inf
}
// Double-check that we never ask LLVM to declare the same symbol twice. It
// silently mangles such symbols, breaking our linkage model.
pub fn note_unique_llvm_symbol(ccx: &CrateContext, sym: String) {
- if ccx.all_llvm_symbols.borrow().contains(&sym) {
+ if ccx.all_llvm_symbols().borrow().contains(&sym) {
ccx.sess().bug(format!("duplicate LLVM symbol: {}", sym).as_slice());
}
- ccx.all_llvm_symbols.borrow_mut().insert(sym);
+ ccx.all_llvm_symbols().borrow_mut().insert(sym);
}
let dtor_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[glue::get_drop_glue_type(ccx, t)], ty::mk_nil());
get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name.as_slice(),
llvm::CCallConv,
llty,
}
_ => {
let llty = type_of(ccx, t);
- get_extern_const(&mut *ccx.externs.borrow_mut(),
- ccx.llmod,
+ get_extern_const(&mut *ccx.externs().borrow_mut(),
+ ccx.llmod(),
name.as_slice(),
llty)
}
let memcpy = ccx.get_intrinsic(&key);
let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
- let size = IntCast(cx, n_bytes, ccx.int_type);
+ let size = IntCast(cx, n_bytes, ccx.int_type());
let align = C_i32(ccx, align as i32);
let volatile = C_bool(ccx, false);
Call(cx, memcpy, [dst_ptr, src_ptr, size, align, volatile], None);
if id == -1 {
"".to_string()
} else {
- ccx.tcx.map.path_to_string(id).to_string()
+ ccx.tcx().map.path_to_string(id).to_string()
},
id, param_substs.repr(ccx.tcx()));
is_unboxed_closure: IsUnboxedClosureFlag,
maybe_load_env: <'a>|&'a Block<'a>, ScopeId|
-> &'a Block<'a>) {
- ccx.stats.n_closures.set(ccx.stats.n_closures.get() + 1);
+ ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
let _icx = push_ctxt("trans_closure");
set_uwtable(llfndecl);
ty_to_string(ccx.tcx(), *monomorphized_arg_type));
}
debug!("trans_closure: function lltype: {}",
- bcx.fcx.ccx.tn.val_to_string(bcx.fcx.llfn));
+ bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
let arg_datums = if abi != RustCall {
create_datums_for_fn_args(&fcx,
param_substs: ¶m_substs,
id: ast::NodeId,
attrs: &[ast::Attribute]) {
- let _s = StatRecorder::new(ccx, ccx.tcx.map.path_to_string(id).to_string());
+ let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
debug!("trans_fn(param_substs={})", param_substs.repr(ccx.tcx()));
let _icx = push_ctxt("trans_fn");
let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
dest: expr::Dest) -> Result<'a> {
let ccx = bcx.fcx.ccx;
- let tcx = &ccx.tcx;
+ let tcx = ccx.tcx();
let result_ty = match ty::get(ctor_ty).sty {
ty::ty_bare_fn(ref bft) => bft.sig.output,
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
- let levels = ccx.tcx.node_lint_levels.borrow();
+ let levels = ccx.tcx().node_lint_levels.borrow();
let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCE);
let lvlsrc = match levels.find(&(id, lint_id)) {
None | Some(&(lint::Allow, _)) => return,
static");
}
- let v = ccx.const_values.borrow().get_copy(&item.id);
+ let v = ccx.const_values().borrow().get_copy(&item.id);
unsafe {
if !(llvm::LLVMConstIntGetZExtValue(v) != 0) {
ccx.sess().span_fatal(expr.span, "static assertion failed");
fn finish_register_fn(ccx: &CrateContext, sp: Span, sym: String, node_id: ast::NodeId,
llfn: ValueRef) {
- ccx.item_symbols.borrow_mut().insert(node_id, sym);
+ ccx.item_symbols().borrow_mut().insert(node_id, sym);
- if !ccx.reachable.contains(&node_id) {
+ if !ccx.reachable().contains(&node_id) {
llvm::SetLinkage(llfn, llvm::InternalLinkage);
}
// otherwise it would continue to be exhausted (bad), and both it and the
// eh_personality functions need to be externally linkable.
let def = ast_util::local_def(node_id);
- if ccx.tcx.lang_items.stack_exhausted() == Some(def) {
+ if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
unset_split_stack(llfn);
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
- if ccx.tcx.lang_items.eh_personality() == Some(def) {
+ if ccx.tcx().lang_items.eh_personality() == Some(def) {
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
ty::ty_bare_fn(ref f) => (f.sig.clone(), f.abi, false),
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let ref function_type = unboxed_closures.get(&closure_did)
.closure_type;
fn create_entry_fn(ccx: &CrateContext,
rust_main: ValueRef,
use_start_lang_item: bool) {
- let llfty = Type::func([ccx.int_type, Type::i8p(ccx).ptr_to()],
- &ccx.int_type);
+ let llfty = Type::func([ccx.int_type(), Type::i8p(ccx).ptr_to()],
+ &ccx.int_type());
let llfn = decl_cdecl_fn(ccx, "main", llfty, ty::mk_nil());
let llbb = "top".with_c_str(|buf| {
unsafe {
- llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, buf)
}
});
- let bld = ccx.builder.b;
+ let bld = ccx.raw_builder();
unsafe {
llvm::LLVMPositionBuilderAtEnd(bld, llbb);
let (start_fn, args) = if use_start_lang_item {
- let start_def_id = match ccx.tcx.lang_items.require(StartFnLangItem) {
+ let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
Ok(id) => id,
Err(s) => { ccx.sess().fatal(s.as_slice()); }
};
// Use provided name
Some(name) => name.get().to_string(),
- _ => ccx.tcx.map.with_path(id, |mut path| {
+ _ => ccx.tcx().map.with_path(id, |mut path| {
if attr::contains_name(attrs, "no_mangle") {
// Don't mangle
path.last().unwrap().to_string()
pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
debug!("get_item_val(id=`{:?}`)", id);
- match ccx.item_vals.borrow().find_copy(&id) {
+ match ccx.item_vals().borrow().find_copy(&id) {
Some(v) => return v,
None => {}
}
let mut foreign = false;
- let item = ccx.tcx.map.get(id);
+ let item = ccx.tcx().map.get(id);
let val = match item {
ast_map::NodeItem(i) => {
let ty = ty::node_id_to_type(ccx.tcx(), i.id);
// information in the hash of the symbol
debug!("making {}", sym);
let (sym, is_local) = {
- match ccx.external_srcs.borrow().find(&i.id) {
+ match ccx.external_srcs().borrow().find(&i.id) {
Some(&did) => {
debug!("but found in other crate...");
(csearch::get_symbol(&ccx.sess().cstore,
// We need the translated value here, because for enums the
// LLVM type is not fully determined by the Rust type.
let (v, inlineable, _) = consts::const_expr(ccx, &**expr, is_local);
- ccx.const_values.borrow_mut().insert(id, v);
+ ccx.const_values().borrow_mut().insert(id, v);
let mut inlineable = inlineable;
unsafe {
let llty = llvm::LLVMTypeOf(v);
let g = sym.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty, buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty, buf)
});
- if !ccx.reachable.contains(&id) {
+ if !ccx.reachable().contains(&id) {
llvm::SetLinkage(g, llvm::InternalLinkage);
}
if !inlineable {
debug!("{} not inlined", sym);
- ccx.non_inlineable_statics.borrow_mut()
+ ccx.non_inlineable_statics().borrow_mut()
.insert(id);
}
- ccx.item_symbols.borrow_mut().insert(i.id, sym);
+ ccx.item_symbols().borrow_mut().insert(i.id, sym);
g
}
}
match ni.node {
ast::ForeignItemFn(..) => {
- let abi = ccx.tcx.map.get_foreign_abi(id);
+ let abi = ccx.tcx().map.get_foreign_abi(id);
let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
let name = foreign::link_name(&*ni);
foreign::register_foreign_item_fn(ccx, abi, ty,
};
assert!(args.len() != 0u);
let ty = ty::node_id_to_type(ccx.tcx(), id);
- let parent = ccx.tcx.map.get_parent(id);
- let enm = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let enm = ccx.tcx().map.expect_item(parent);
let sym = exported_name(ccx,
id,
ty,
}
Some(ctor_id) => ctor_id,
};
- let parent = ccx.tcx.map.get_parent(id);
- let struct_item = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let struct_item = ccx.tcx().map.expect_item(parent);
let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
let sym = exported_name(ccx,
id,
// foreign items (extern fns and extern statics) don't have internal
// linkage b/c that doesn't quite make sense. Otherwise items can
// have internal linkage if they're not reachable.
- if !foreign && !ccx.reachable.contains(&id) {
+ if !foreign && !ccx.reachable().contains(&id) {
llvm::SetLinkage(val, llvm::InternalLinkage);
}
- ccx.item_vals.borrow_mut().insert(id, val);
+ ccx.item_vals().borrow_mut().insert(id, val);
val
}
pub fn p2i(ccx: &CrateContext, v: ValueRef) -> ValueRef {
unsafe {
- return llvm::LLVMConstPtrToInt(v, ccx.int_type.to_ref());
+ return llvm::LLVMConstPtrToInt(v, ccx.int_type().to_ref());
}
}
encoder::EncodeParams {
diag: cx.sess().diagnostic(),
tcx: cx.tcx(),
- reexports2: &cx.exp_map2,
- item_symbols: &cx.item_symbols,
- non_inlineable_statics: &cx.non_inlineable_statics,
- link_meta: &cx.link_meta,
+ reexports2: cx.exp_map2(),
+ item_symbols: cx.item_symbols(),
+ non_inlineable_statics: cx.non_inlineable_statics(),
+ link_meta: cx.link_meta(),
cstore: &cx.sess().cstore,
encode_inlined_item: ie,
- reachable: &cx.reachable,
+ reachable: cx.reachable(),
}
}
let llmeta = C_bytes(cx, compressed.as_slice());
let llconst = C_struct(cx, [llmeta], false);
let name = format!("rust_metadata_{}_{}",
- cx.link_meta.crate_name,
- cx.link_meta.crate_hash);
+ cx.link_meta().crate_name,
+ cx.link_meta().crate_hash);
let llglobal = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(cx.metadata_llmod, val_ty(llconst).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf)
}
});
unsafe {
let metadata = write_metadata(&ccx, &krate);
if ccx.sess().trans_stats() {
println!("--- trans stats ---");
- println!("n_static_tydescs: {}", ccx.stats.n_static_tydescs.get());
- println!("n_glues_created: {}", ccx.stats.n_glues_created.get());
- println!("n_null_glues: {}", ccx.stats.n_null_glues.get());
- println!("n_real_glues: {}", ccx.stats.n_real_glues.get());
-
- println!("n_fns: {}", ccx.stats.n_fns.get());
- println!("n_monos: {}", ccx.stats.n_monos.get());
- println!("n_inlines: {}", ccx.stats.n_inlines.get());
- println!("n_closures: {}", ccx.stats.n_closures.get());
+ println!("n_static_tydescs: {}", ccx.stats().n_static_tydescs.get());
+ println!("n_glues_created: {}", ccx.stats().n_glues_created.get());
+ println!("n_null_glues: {}", ccx.stats().n_null_glues.get());
+ println!("n_real_glues: {}", ccx.stats().n_real_glues.get());
+
+ println!("n_fns: {}", ccx.stats().n_fns.get());
+ println!("n_monos: {}", ccx.stats().n_monos.get());
+ println!("n_inlines: {}", ccx.stats().n_inlines.get());
+ println!("n_closures: {}", ccx.stats().n_closures.get());
println!("fn stats:");
- ccx.stats.fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
+ ccx.stats().fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
insns_b.cmp(&insns_a)
});
- for tuple in ccx.stats.fn_stats.borrow().iter() {
+ for tuple in ccx.stats().fn_stats.borrow().iter() {
match *tuple {
(ref name, ms, insns) => {
println!("{} insns, {} ms, {}", insns, ms, *name);
}
}
if ccx.sess().count_llvm_insns() {
- for (k, v) in ccx.stats.llvm_insns.borrow().iter() {
+ for (k, v) in ccx.stats().llvm_insns.borrow().iter() {
println!("{:7u} {}", *v, *k);
}
}
- let llcx = ccx.llcx;
- let link_meta = ccx.link_meta.clone();
- let llmod = ccx.llmod;
+ let llcx = ccx.llcx();
+ let link_meta = ccx.link_meta().clone();
+ let llmod = ccx.llmod();
- let mut reachable: Vec<String> = ccx.reachable.iter().filter_map(|id| {
- ccx.item_symbols.borrow().find(id).map(|s| s.to_string())
+ let mut reachable: Vec<String> = ccx.reachable().iter().filter_map(|id| {
+ ccx.item_symbols().borrow().find(id).map(|s| s.to_string())
}).collect();
// For the purposes of LTO, we add to the reachable set all of the upstream
// referenced from rt/rust_try.ll
reachable.push("rust_eh_personality_catch".to_string());
- let metadata_module = ccx.metadata_llmod;
- let formats = ccx.tcx.dependency_formats.borrow().clone();
+ let metadata_module = ccx.metadata_llmod();
+ let formats = ccx.tcx().dependency_formats.borrow().clone();
let no_builtins = attr::contains_name(krate.attrs.as_slice(), "no_builtins");
- (ccx.tcx, CrateTranslation {
+ (ccx.take_tcx(), CrateTranslation {
context: llcx,
module: llmod,
link: link_meta,
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
return llvm::LLVMGetUndef(eltty.to_ref());
}
unsafe {
let ccx = cx.fcx.ccx;
if cx.unreachable.get() {
- return llvm::LLVMGetUndef(ccx.int_type.to_ref());
+ return llvm::LLVMGetUndef(ccx.int_type().to_ref());
}
B(cx).atomic_load(pointer_val, order)
}
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
unsafe {
llvm::LLVMGetUndef(eltty.to_ref())
let retty = if ty.kind() == llvm::Integer {
ty.return_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
B(cx).count_insn("ret_undef");
llvm::LLVMGetUndef(retty.to_ref())
pub fn PtrDiff(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
- if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type.to_ref()); }
+ if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); }
B(cx).ptrdiff(lhs, rhs)
}
}
impl<'a> Builder<'a> {
pub fn new(ccx: &'a CrateContext) -> Builder<'a> {
Builder {
- llbuilder: ccx.builder.b,
+ llbuilder: ccx.raw_builder(),
ccx: ccx,
}
}
pub fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() {
- self.ccx.stats.n_llvm_insns.set(self.ccx
- .stats
+ self.ccx.stats().n_llvm_insns.set(self.ccx
+ .stats()
.n_llvm_insns
.get() + 1);
}
if self.ccx.sess().count_llvm_insns() {
base::with_insn_ctxt(|v| {
- let mut h = self.ccx.stats.llvm_insns.borrow_mut();
+ let mut h = self.ccx.stats().llvm_insns.borrow_mut();
// Build version of path with cycles removed.
self.count_insn("invoke");
debug!("Invoke {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
let v = [min, max];
llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
- llvm::LLVMMDNodeInContext(self.ccx.llcx,
+ llvm::LLVMMDNodeInContext(self.ccx.llcx(),
v.as_ptr(), v.len() as c_uint));
}
pub fn store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store");
unsafe {
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store.volatile");
unsafe {
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
self.count_insn("store.atomic");
unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
else { llvm::False };
let argtys = inputs.iter().map(|v| {
- debug!("Asm Input Type: {:?}", self.ccx.tn.val_to_string(*v));
+ debug!("Asm Input Type: {:?}", self.ccx.tn().val_to_string(*v));
val_ty(*v)
}).collect::<Vec<_>>();
- debug!("Asm Output Type: {:?}", self.ccx.tn.type_to_string(output));
+ debug!("Asm Output Type: {:?}", self.ccx.tn().type_to_string(output));
let fty = Type::func(argtys.as_slice(), &output);
unsafe {
let v = llvm::LLVMInlineAsm(
self.count_insn("call");
debug!("Call {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
let r = size % 32;
if r > 0 {
unsafe {
- args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx, r as c_uint)));
+ args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
}
}
*/
debug!("push_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
// FIXME(#2202) -- currently closure bodies have a parent
// region, which messes up the assertion below, since there
// this new AST scope had better be its immediate child.
let top_scope = self.top_ast_scope();
if top_scope.is_some() {
- assert_eq!(self.ccx.tcx.region_maps.opt_encl_scope(id), top_scope);
+ assert_eq!(self.ccx.tcx().region_maps.opt_encl_scope(id), top_scope);
}
self.push_scope(CleanupScope::new(AstScopeKind(id)));
id: ast::NodeId,
exits: [&'a Block<'a>, ..EXIT_MAX]) {
debug!("push_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
assert_eq!(Some(id), self.top_ast_scope());
self.push_scope(CleanupScope::new(LoopScopeKind(id, exits)));
*/
debug!("pop_and_trans_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
*/
debug!("pop_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
debug!("schedule_lifetime_end({:?}, val={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val));
+ self.ccx.tn().val_to_string(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
debug!("schedule_drop_mem({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
true);
debug!("schedule_drop_immediate({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_free_value({:?}, val={}, heap={:?})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
self.ccx.sess().bug(
format!("no cleanup scope {} found",
- self.ccx.tcx.map.node_to_string(cleanup_scope)).as_slice());
+ self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
}
fn schedule_clean_in_custom_scope(&self,
let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
None => {
- let mut personality = self.ccx.eh_personality.borrow_mut();
+ let mut personality = self.ccx.eh_personality().borrow_mut();
match *personality {
Some(llpersonality) => llpersonality,
None => {
pub fn get_or_create_declaration_if_unboxed_closure(ccx: &CrateContext,
closure_id: ast::DefId)
-> Option<ValueRef> {
- if !ccx.tcx.unboxed_closures.borrow().contains_key(&closure_id) {
+ if !ccx.tcx().unboxed_closures.borrow().contains_key(&closure_id) {
// Not an unboxed closure.
return None
}
- match ccx.unboxed_closure_vals.borrow().find(&closure_id) {
+ match ccx.unboxed_closure_vals().borrow().find(&closure_id) {
Some(llfn) => {
debug!("get_or_create_declaration_if_unboxed_closure(): found \
closure");
None => {}
}
- let function_type = ty::mk_unboxed_closure(&ccx.tcx,
+ let function_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let symbol = ccx.tcx.map.with_path(closure_id.node, |path| {
+ let symbol = ccx.tcx().map.with_path(closure_id.node, |path| {
mangle_internal_name_by_path_and_seq(path, "unboxed_closure")
});
debug!("get_or_create_declaration_if_unboxed_closure(): inserting new \
closure {} (type {})",
closure_id,
- ccx.tn.type_to_string(val_ty(llfn)));
- ccx.unboxed_closure_vals.borrow_mut().insert(closure_id, llfn);
+ ccx.tn().type_to_string(val_ty(llfn)));
+ ccx.unboxed_closure_vals().borrow_mut().insert(closure_id, llfn);
Some(llfn)
}
}
};
- match ccx.closure_bare_wrapper_cache.borrow().find(&fn_ptr) {
+ match ccx.closure_bare_wrapper_cache().borrow().find(&fn_ptr) {
Some(&llval) => return llval,
None => {}
}
decl_rust_fn(ccx, closure_ty, name.as_slice())
};
- ccx.closure_bare_wrapper_cache.borrow_mut().insert(fn_ptr, llfn);
+ ccx.closure_bare_wrapper_cache().borrow_mut().insert(fn_ptr, llfn);
// This is only used by statics inlined from a different crate.
if !is_local {
ty::ty_struct(..) | ty::ty_enum(..) | ty::ty_tup(..) |
ty::ty_unboxed_closure(..) => {
let llty = sizing_type_of(ccx, ty);
- llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type)
+ llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
}
_ => type_is_zero_size(ccx, ty)
}
self.llreturn.set(Some(unsafe {
"return".with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx, self.llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, buf)
})
}))
}
-> &'a Block<'a> {
unsafe {
let llbb = name.with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx,
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
buf)
});
pub fn ccx(&self) -> &'a CrateContext { self.fcx.ccx }
pub fn tcx(&self) -> &'a ty::ctxt {
- &self.fcx.ccx.tcx
+ self.fcx.ccx.tcx()
}
pub fn sess(&self) -> &'a Session { self.fcx.ccx.sess() }
}
pub fn val_to_string(&self, val: ValueRef) -> String {
- self.ccx().tn.val_to_string(val)
+ self.ccx().tn().val_to_string(val)
}
pub fn llty_str(&self, ty: Type) -> String {
- self.ccx().tn.type_to_string(ty)
+ self.ccx().tn().type_to_string(ty)
}
pub fn ty_to_string(&self, t: ty::t) -> String {
}
pub fn C_int(ccx: &CrateContext, i: int) -> ValueRef {
- C_integral(ccx.int_type, i as u64, true)
+ C_integral(ccx.int_type(), i as u64, true)
}
pub fn C_uint(ccx: &CrateContext, i: uint) -> ValueRef {
- C_integral(ccx.int_type, i as u64, false)
+ C_integral(ccx.int_type(), i as u64, false)
}
pub fn C_u8(ccx: &CrateContext, i: uint) -> ValueRef {
// our boxed-and-length-annotated strings.
pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
unsafe {
- match cx.const_cstr_cache.borrow().find(&s) {
+ match cx.const_cstr_cache().borrow().find(&s) {
Some(&llval) => return llval,
None => ()
}
- let sc = llvm::LLVMConstStringInContext(cx.llcx,
+ let sc = llvm::LLVMConstStringInContext(cx.llcx(),
s.get().as_ptr() as *const c_char,
s.get().len() as c_uint,
!null_terminated as Bool);
let gsym = token::gensym("str");
let g = format!("str{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(sc).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(sc).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::SetLinkage(g, llvm::InternalLinkage);
- cx.const_cstr_cache.borrow_mut().insert(s, g);
+ cx.const_cstr_cache().borrow_mut().insert(s, g);
g
}
}
let len = s.get().len();
let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s, false),
Type::i8p(cx).to_ref());
- C_named_struct(cx.tn.find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
+ C_named_struct(cx.tn().find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
}
}
let gsym = token::gensym("binary");
let g = format!("binary{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(lldata).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(lldata).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, lldata);
llvm::LLVMSetGlobalConstant(g, True);
pub fn C_struct(ccx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
unsafe {
- llvm::LLVMConstStructInContext(ccx.llcx,
+ llvm::LLVMConstStructInContext(ccx.llcx(),
elts.as_ptr(), elts.len() as c_uint,
packed as Bool)
}
pub fn C_bytes(ccx: &CrateContext, bytes: &[u8]) -> ValueRef {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
- return llvm::LLVMConstStringInContext(ccx.llcx, ptr, bytes.len() as c_uint, True);
+ return llvm::LLVMConstStringInContext(ccx.llcx(), ptr, bytes.len() as c_uint, True);
}
}
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={}, us={:?}, r={})",
- cx.tn.val_to_string(v), us, cx.tn.val_to_string(r));
+ cx.tn().val_to_string(v), us, cx.tn().val_to_string(r));
return r;
}
pub fn const_ptrcast(cx: &CrateContext, a: ValueRef, t: Type) -> ValueRef {
unsafe {
let b = llvm::LLVMConstPointerCast(a, t.ptr_to().to_ref());
- assert!(cx.const_globals.borrow_mut().insert(b as int, a));
+ assert!(cx.const_globals().borrow_mut().insert(b as int, a));
b
}
}
pub fn const_addr_of(cx: &CrateContext, cv: ValueRef, mutbl: ast::Mutability) -> ValueRef {
unsafe {
let gv = "const".with_c_str(|name| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(cv).to_ref(), name)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(cv).to_ref(), name)
});
llvm::LLVMSetInitializer(gv, cv);
llvm::LLVMSetGlobalConstant(gv,
}
fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef {
- let v = match cx.const_globals.borrow().find(&(v as int)) {
+ let v = match cx.const_globals().borrow().find(&(v as int)) {
Some(&v) => v,
None => v
};
pub fn get_const_val(cx: &CrateContext,
mut def_id: ast::DefId) -> (ValueRef, bool) {
- let contains_key = cx.const_values.borrow().contains_key(&def_id.node);
+ let contains_key = cx.const_values().borrow().contains_key(&def_id.node);
if !ast_util::is_local(def_id) || !contains_key {
if !ast_util::is_local(def_id) {
def_id = inline::maybe_instantiate_inline(cx, def_id);
}
- match cx.tcx.map.expect_item(def_id.node).node {
+ match cx.tcx().map.expect_item(def_id.node).node {
ast::ItemStatic(_, ast::MutImmutable, _) => {
trans_const(cx, ast::MutImmutable, def_id.node);
}
}
}
- (cx.const_values.borrow().get_copy(&def_id.node),
- !cx.non_inlineable_statics.borrow().contains(&def_id.node))
+ (cx.const_values().borrow().get_copy(&def_id.node),
+ !cx.non_inlineable_statics().borrow().contains(&def_id.node))
}
pub fn const_expr(cx: &CrateContext, e: &ast::Expr, is_local: bool) -> (ValueRef, bool, ty::t) {
let mut inlineable = inlineable;
let ety = ty::expr_ty(cx.tcx(), e);
let mut ety_adjusted = ty::expr_ty_adjusted(cx.tcx(), e);
- let opt_adj = cx.tcx.adjustments.borrow().find_copy(&e.id);
+ let opt_adj = cx.tcx().adjustments.borrow().find_copy(&e.id);
match opt_adj {
None => { }
Some(adj) => {
(expr::cast_enum, expr::cast_integral) => {
let repr = adt::represent_type(cx, basety);
let discr = adt::const_get_discrim(cx, &*repr, v);
- let iv = C_integral(cx.int_type, discr, false);
+ let iv = C_integral(cx.int_type(), discr, false);
let ety_cast = expr::cast_type_kind(cx.tcx(), ety);
match ety_cast {
expr::cast_integral => {
let g = base::get_item_val(ccx, id);
// At this point, get_item_val has already translated the
// constant's initializer to determine its LLVM type.
- let v = ccx.const_values.borrow().get_copy(&id);
+ let v = ccx.const_values().borrow().get_copy(&id);
llvm::LLVMSetInitializer(g, v);
if m != ast::MutMutable {
llvm::LLVMSetGlobalConstant(g, True);
use driver::config::NoDebugInfo;
use driver::session::Session;
use llvm;
-use llvm::{ContextRef, ModuleRef, ValueRef};
+use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
use llvm::{TargetData};
use llvm::mk_target_data;
use metadata::common::LinkMeta;
}
pub struct CrateContext {
- pub llmod: ModuleRef,
- pub llcx: ContextRef,
- pub metadata_llmod: ModuleRef,
- pub td: TargetData,
- pub tn: TypeNames,
- pub externs: RefCell<ExternMap>,
- pub item_vals: RefCell<NodeMap<ValueRef>>,
- pub exp_map2: resolve::ExportMap2,
- pub reachable: NodeSet,
- pub item_symbols: RefCell<NodeMap<String>>,
- pub link_meta: LinkMeta,
- pub drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
- pub tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
+ llmod: ModuleRef,
+ llcx: ContextRef,
+ metadata_llmod: ModuleRef,
+ td: TargetData,
+ tn: TypeNames,
+ externs: RefCell<ExternMap>,
+ item_vals: RefCell<NodeMap<ValueRef>>,
+ exp_map2: resolve::ExportMap2,
+ reachable: NodeSet,
+ item_symbols: RefCell<NodeMap<String>>,
+ link_meta: LinkMeta,
+ drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
+ tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
/// Set when running emit_tydescs to enforce that no more tydescs are
/// created.
- pub finished_tydescs: Cell<bool>,
+ finished_tydescs: Cell<bool>,
/// Track mapping of external ids to local items imported for inlining
- pub external: RefCell<DefIdMap<Option<ast::NodeId>>>,
+ external: RefCell<DefIdMap<Option<ast::NodeId>>>,
/// Backwards version of the `external` map (inlined items to where they
/// came from)
- pub external_srcs: RefCell<NodeMap<ast::DefId>>,
+ external_srcs: RefCell<NodeMap<ast::DefId>>,
/// A set of static items which cannot be inlined into other crates. This
/// will prevent in IIItem() structures from being encoded into the metadata
/// that is generated
- pub non_inlineable_statics: RefCell<NodeSet>,
+ non_inlineable_statics: RefCell<NodeSet>,
/// Cache instances of monomorphized functions
- pub monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
- pub monomorphizing: RefCell<DefIdMap<uint>>,
+ monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
+ monomorphizing: RefCell<DefIdMap<uint>>,
/// Cache generated vtables
- pub vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
+ vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
/// Cache of constant strings,
- pub const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
+ const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
/// Reverse-direction for const ptrs cast from globals.
/// Key is an int, cast from a ValueRef holding a *T,
/// when we ptrcast, and we have to ptrcast during translation
/// of a [T] const because we form a slice, a [*T,int] pair, not
/// a pointer to an LLVM array type.
- pub const_globals: RefCell<HashMap<int, ValueRef>>,
+ const_globals: RefCell<HashMap<int, ValueRef>>,
/// Cache of emitted const values
- pub const_values: RefCell<NodeMap<ValueRef>>,
+ const_values: RefCell<NodeMap<ValueRef>>,
/// Cache of external const values
- pub extern_const_values: RefCell<DefIdMap<ValueRef>>,
+ extern_const_values: RefCell<DefIdMap<ValueRef>>,
- pub impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
+ impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
/// Cache of closure wrappers for bare fn's.
- pub closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
-
- pub lltypes: RefCell<HashMap<ty::t, Type>>,
- pub llsizingtypes: RefCell<HashMap<ty::t, Type>>,
- pub adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
- pub symbol_hasher: RefCell<Sha256>,
- pub type_hashcodes: RefCell<HashMap<ty::t, String>>,
- pub all_llvm_symbols: RefCell<HashSet<String>>,
- pub tcx: ty::ctxt,
- pub stats: Stats,
- pub int_type: Type,
- pub opaque_vec_type: Type,
- pub builder: BuilderRef_res,
+ closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
+
+ lltypes: RefCell<HashMap<ty::t, Type>>,
+ llsizingtypes: RefCell<HashMap<ty::t, Type>>,
+ adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
+ symbol_hasher: RefCell<Sha256>,
+ type_hashcodes: RefCell<HashMap<ty::t, String>>,
+ all_llvm_symbols: RefCell<HashSet<String>>,
+ tcx: ty::ctxt,
+ stats: Stats,
+ int_type: Type,
+ opaque_vec_type: Type,
+ builder: BuilderRef_res,
/// Holds the LLVM values for closure IDs.
- pub unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
+ unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
- pub dbg_cx: Option<debuginfo::CrateDebugContext>,
+ dbg_cx: Option<debuginfo::CrateDebugContext>,
- pub eh_personality: RefCell<Option<ValueRef>>,
+ eh_personality: RefCell<Option<ValueRef>>,
intrinsics: RefCell<HashMap<&'static str, ValueRef>>,
}
ccx.opaque_vec_type = Type::opaque_vec(&ccx);
let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
- str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type], false);
- ccx.tn.associate_type("str_slice", &str_slice_ty);
+ str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type()], false);
+ ccx.tn().associate_type("str_slice", &str_slice_ty);
- ccx.tn.associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
+ ccx.tn().associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
if ccx.sess().count_llvm_insns() {
base::init_insn_ctxt()
&self.tcx
}
+ pub fn take_tcx(self) -> ty::ctxt {
+ self.tcx
+ }
+
pub fn sess<'a>(&'a self) -> &'a Session {
&self.tcx.sess
}
Builder::new(self)
}
+ pub fn raw_builder<'a>(&'a self) -> BuilderRef {
+ self.builder.b
+ }
+
pub fn tydesc_type(&self) -> Type {
self.tn.find_type("tydesc").unwrap()
}
let ref cfg = self.sess().targ_cfg;
cfg.os != abi::OsiOS || cfg.arch != abi::Arm
}
+
+
+ pub fn llmod(&self) -> ModuleRef {
+ self.llmod
+ }
+
+ pub fn llcx(&self) -> ContextRef {
+ self.llcx
+ }
+
+ pub fn metadata_llmod(&self) -> ModuleRef {
+ self.metadata_llmod
+ }
+
+ pub fn td<'a>(&'a self) -> &'a TargetData {
+ &self.td
+ }
+
+ pub fn tn<'a>(&'a self) -> &'a TypeNames {
+ &self.tn
+ }
+
+ pub fn externs<'a>(&'a self) -> &'a RefCell<ExternMap> {
+ &self.externs
+ }
+
+ pub fn item_vals<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.item_vals
+ }
+
+ pub fn exp_map2<'a>(&'a self) -> &'a resolve::ExportMap2 {
+ &self.exp_map2
+ }
+
+ pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+ &self.reachable
+ }
+
+ pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+ &self.item_symbols
+ }
+
+ pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+ &self.link_meta
+ }
+
+ pub fn drop_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, ValueRef>> {
+ &self.drop_glues
+ }
+
+ pub fn tydescs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<tydesc_info>>> {
+ &self.tydescs
+ }
+
+ pub fn finished_tydescs<'a>(&'a self) -> &'a Cell<bool> {
+ &self.finished_tydescs
+ }
+
+ pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
+ &self.external
+ }
+
+ pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<ast::DefId>> {
+ &self.external_srcs
+ }
+
+ pub fn non_inlineable_statics<'a>(&'a self) -> &'a RefCell<NodeSet> {
+ &self.non_inlineable_statics
+ }
+
+ pub fn monomorphized<'a>(&'a self) -> &'a RefCell<HashMap<MonoId, ValueRef>> {
+ &self.monomorphized
+ }
+
+ pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<uint>> {
+ &self.monomorphizing
+ }
+
+ pub fn vtables<'a>(&'a self) -> &'a RefCell<HashMap<(ty::t, MonoId), ValueRef>> {
+ &self.vtables
+ }
+
+ pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<HashMap<InternedString, ValueRef>> {
+ &self.const_cstr_cache
+ }
+
+ pub fn const_globals<'a>(&'a self) -> &'a RefCell<HashMap<int, ValueRef>> {
+ &self.const_globals
+ }
+
+ pub fn const_values<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.const_values
+ }
+
+ pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.extern_const_values
+ }
+
+ pub fn impl_method_cache<'a>(&'a self)
+ -> &'a RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>> {
+ &self.impl_method_cache
+ }
+
+ pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<HashMap<ValueRef, ValueRef>> {
+ &self.closure_bare_wrapper_cache
+ }
+
+ pub fn lltypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.lltypes
+ }
+
+ pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.llsizingtypes
+ }
+
+ pub fn adt_reprs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<adt::Repr>>> {
+ &self.adt_reprs
+ }
+
+ pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+ &self.symbol_hasher
+ }
+
+ pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.type_hashcodes
+ }
+
+ pub fn all_llvm_symbols<'a>(&'a self) -> &'a RefCell<HashSet<String>> {
+ &self.all_llvm_symbols
+ }
+
+ pub fn stats<'a>(&'a self) -> &'a Stats {
+ &self.stats
+ }
+
+ pub fn int_type(&self) -> Type {
+ self.int_type
+ }
+
+ pub fn opaque_vec_type(&self) -> Type {
+ self.opaque_vec_type
+ }
+
+ pub fn unboxed_closure_vals<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.unboxed_closure_vals
+ }
+
+ pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext> {
+ &self.dbg_cx
+ }
+
+ pub fn eh_personality<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
+ &self.eh_personality
+ }
+
+ fn intrinsics<'a>(&'a self) -> &'a RefCell<HashMap<&'static str, ValueRef>> {
+ &self.intrinsics
+ }
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {
($name:expr fn() -> $ret:expr) => (
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name, Type::func([], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
);
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name,
Type::func([$($arg),*], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
let f = base::decl_cdecl_fn(ccx, stringify!($cname),
Type::func([$($arg),*], &$ret),
ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
#[allow(dead_code)] // useful for debugging
pub fn to_string(&self, ccx: &CrateContext) -> String {
format!("Datum({}, {}, {:?})",
- ccx.tn.val_to_string(self.val),
+ ccx.tn().val_to_string(self.val),
ty_to_string(ccx.tcx(), self.ty),
self.kind)
}
// First, find out the 'real' def_id of the type. Items inlined from
// other crates have to be mapped back to their source.
let source_def_id = if def_id.krate == ast::LOCAL_CRATE {
- match cx.external_srcs.borrow().find_copy(&def_id.node) {
+ match cx.external_srcs().borrow().find_copy(&def_id.node) {
Some(source_def_id) => {
// The given def_id identifies the inlined copy of a
// type definition, let's take the source of the copy.
// Get the crate hash as first part of the identifier.
let crate_hash = if source_def_id.krate == ast::LOCAL_CRATE {
- cx.link_meta.crate_hash.clone()
+ cx.link_meta().crate_hash.clone()
} else {
cx.sess().cstore.get_crate_hash(source_def_id.krate)
};
/// Create any deferred debug metadata nodes
pub fn finalize(cx: &CrateContext) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
if cx.sess().targ_cfg.os == abi::OsMacos ||
cx.sess().targ_cfg.os == abi::OsiOS {
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 2));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 2));
} else {
// FIXME(#13611) this is a kludge fix because the Linux bots have
// gdb 7.4 which doesn't understand dwarf4, we should
// do something more graceful here.
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 3));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 3));
}
// Prevent bitcode readers from deleting the debug info.
"Debug Info Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s,
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s,
llvm::LLVMRustDebugMetadataVersion));
};
}
pub fn create_global_var_metadata(cx: &CrateContext,
node_id: ast::NodeId,
global: ValueRef) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
// crate should already contain debuginfo for it. More importantly, the
// global might not even exist in un-inlined form anywhere which would lead
// to a linker errors.
- if cx.external_srcs.borrow().contains_key(&node_id) {
+ if cx.external_srcs().borrow().contains_key(&node_id) {
return;
}
- let var_item = cx.tcx.map.get(node_id);
+ let var_item = cx.tcx().map.get(node_id);
let (ident, span) = match var_item {
ast_map::NodeItem(item) => {
}
let cx = bcx.ccx();
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, path1| {
let var_ident = path1.node;
let cx = bcx.ccx();
- let ast_item = cx.tcx.map.find(node_id);
+ let ast_item = cx.tcx().map.find(node_id);
let variable_ident = match ast_item {
None => {
let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
let aops = unsafe {
- [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type.to_ref())]
+ [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type().to_ref())]
};
// Regardless of the actual type (`T`) we're always passed the stack slot (alloca)
// for the binding. For ByRef bindings that's a `T*` but for ByMove bindings we
let fcx = bcx.fcx;
let cx = fcx.ccx;
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, arg.pat.span).fn_metadata;
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, path1| {
let empty_generics = ast_util::empty_generics();
- let fnitem = cx.tcx.map.get(fn_ast_id);
+ let fnitem = cx.tcx().map.get(fn_ast_id);
let (ident, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
ast_map::NodeItem(ref item) => {
// externally visible or by being inlined into something externally visible).
// It might better to use the `exported_items` set from `driver::CrateAnalysis`
// in the future, but (atm) this set is not available in the translation pass.
- !cx.reachable.contains(&node_id)
+ !cx.reachable().contains(&node_id)
}
#[allow(non_snake_case)]
});
fn fallback_path(cx: &CrateContext) -> CString {
- cx.link_meta.crate_name.as_slice().to_c_str()
+ cx.link_meta().crate_name.as_slice().to_c_str()
}
}
match scope_map.borrow().find_copy(&node_id) {
Some(scope_metadata) => scope_metadata,
None => {
- let node = fcx.ccx.tcx.map.get(node_id);
+ let node = fcx.ccx.tcx().map.get(node_id);
fcx.ccx.sess().span_bug(span,
format!("debuginfo: Could not find scope info for node {:?}",
def_id: ast::DefId)
-> token::InternedString {
let name = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.get_path_elem(def_id.node).name()
+ cx.tcx().map.get_path_elem(def_id.node).name()
} else {
- csearch::get_item_path(&cx.tcx, def_id).last().unwrap().name()
+ csearch::get_item_path(cx.tcx(), def_id).last().unwrap().name()
};
token::get_name(name)
content_llvm_type: Type)
-> bool {
member_llvm_types.len() == 5 &&
- member_llvm_types[0] == cx.int_type &&
+ member_llvm_types[0] == cx.int_type() &&
member_llvm_types[1] == Type::generic_glue_fn(cx).ptr_to() &&
member_llvm_types[2] == Type::i8(cx).ptr_to() &&
member_llvm_types[3] == Type::i8(cx).ptr_to() &&
-> bool {
member_llvm_types.len() == 2 &&
member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
- member_llvm_types[1] == cx.int_type
+ member_llvm_types[1] == cx.int_type()
}
}
};
unsafe {
- llvm::LLVMSetCurrentDebugLocation(cx.builder.b, metadata_node);
+ llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
}
debug_context(cx).current_debug_location.set(debug_location);
#[inline]
fn debug_context<'a>(cx: &'a CrateContext) -> &'a CrateDebugContext {
- let debug_context: &'a CrateDebugContext = cx.dbg_cx.get_ref();
+ let debug_context: &'a CrateDebugContext = cx.dbg_cx().get_ref();
debug_context
}
#[inline]
#[allow(non_snake_case)]
fn DIB(cx: &CrateContext) -> DIBuilderRef {
- cx.dbg_cx.get_ref().builder
+ cx.dbg_cx().get_ref().builder
}
fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
}
fn assert_type_for_node_id(cx: &CrateContext, node_id: ast::NodeId, error_span: Span) {
- if !cx.tcx.node_types.borrow().contains_key(&(node_id as uint)) {
+ if !cx.tcx().node_types.borrow().contains_key(&(node_id as uint)) {
cx.sess().span_bug(error_span, "debuginfo: Could not find type for node id!");
}
}
-> (DIScope, Span) {
let containing_scope = namespace_for_item(cx, def_id).scope;
let definition_span = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.span(def_id.node)
+ cx.tcx().map.span(def_id.node)
} else {
// For external items there is no span information
codemap::DUMMY_SP
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
struct ScopeStackEntry {
scope_metadata: DIScope,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
}
fn crate_root_namespace<'a>(cx: &'a CrateContext) -> &'a str {
- cx.link_meta.crate_name.as_slice()
+ cx.link_meta().crate_name.as_slice()
}
fn namespace_for_item(cx: &CrateContext, def_id: ast::DefId) -> Rc<NamespaceTreeNode> {
impl Dest {
pub fn to_string(&self, ccx: &CrateContext) -> String {
match *self {
- SaveIn(v) => format!("SaveIn({})", ccx.tn.val_to_string(v)),
+ SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
Ignore => "Ignore".to_string()
}
}
let mut bcx = bcx;
// Check for overloaded index.
- let method_ty = ccx.tcx
+ let method_ty = ccx.tcx()
.method_map
.borrow()
.find(&method_call)
let ix_size = machine::llbitsize_of_real(bcx.ccx(),
val_ty(ix_val));
let int_size = machine::llbitsize_of_real(bcx.ccx(),
- ccx.int_type);
+ ccx.int_type());
let ix_val = {
if ix_size < int_size {
if ty::type_is_signed(expr_ty(bcx, idx)) {
- SExt(bcx, ix_val, ccx.int_type)
- } else { ZExt(bcx, ix_val, ccx.int_type) }
+ SExt(bcx, ix_val, ccx.int_type())
+ } else { ZExt(bcx, ix_val, ccx.int_type()) }
} else if ix_size > int_size {
- Trunc(bcx, ix_val, ccx.int_type)
+ Trunc(bcx, ix_val, ccx.int_type())
} else {
ix_val
}
let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
PointerCast(bcx, val, pty)
} else {
- match bcx.ccx().extern_const_values.borrow().find(&did) {
+ match bcx.ccx().extern_const_values().borrow().find(&did) {
None => {} // Continue.
Some(llval) => {
return *llval;
&bcx.ccx().sess().cstore,
did);
let llval = symbol.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(bcx.ccx().llmod,
+ llvm::LLVMAddGlobal(bcx.ccx().llmod(),
llty.to_ref(),
buf)
});
- bcx.ccx().extern_const_values.borrow_mut()
+ bcx.ccx().extern_const_values().borrow_mut()
.insert(did, llval);
llval
}
// Otherwise, we should be in the RvalueDpsExpr path.
assert!(
op == ast::UnDeref ||
- !ccx.tcx.method_map.borrow().contains_key(&method_call));
+ !ccx.tcx().method_map.borrow().contains_key(&method_call));
let un_ty = expr_ty(bcx, expr);
let ccx = bcx.ccx();
// if overloaded, would be RvalueDpsExpr
- assert!(!ccx.tcx.method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
+ assert!(!ccx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
match op {
ast::BiAnd => {
let mut bcx = bcx;
// Check for overloaded deref.
- let method_ty = ccx.tcx.method_map.borrow()
+ let method_ty = ccx.tcx().method_map.borrow()
.find(&method_call).map(|method| method.ty);
let datum = match method_ty {
Some(method_ty) => {
};
unsafe {
let g1 = ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty2.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty2.to_ref(), buf)
});
llvm::SetLinkage(g1, linkage);
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(ident.get());
let g2 = real_name.with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
});
llvm::SetLinkage(g2, llvm::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
}
None => unsafe {
ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
})
}
}
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let llfn = base::get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name,
cc,
llfn_ty,
llfn={}, \
llretptr={})",
callee_ty.repr(tcx),
- ccx.tn.val_to_string(llfn),
- ccx.tn.val_to_string(llretptr));
+ ccx.tn().val_to_string(llfn),
+ ccx.tn().val_to_string(llretptr));
let (fn_abi, fn_sig) = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref fn_ty) => (fn_ty.abi, fn_ty.sig.clone()),
debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}",
i,
- ccx.tn.val_to_string(llarg_rust),
+ ccx.tn().val_to_string(llarg_rust),
rust_indirect,
- ccx.tn.type_to_string(arg_tys[i].ty));
+ ccx.tn().type_to_string(arg_tys[i].ty));
// Ensure that we always have the Rust value indirectly,
// because it makes bitcasting easier.
}
debug!("llarg_rust={} (after indirection)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Check whether we need to do any casting
match arg_tys[i].cast {
}
debug!("llarg_rust={} (after casting)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Finally, load the value if needed for the foreign ABI
let foreign_indirect = arg_tys[i].is_indirect();
};
debug!("argument {}, llarg_foreign={}",
- i, ccx.tn.val_to_string(llarg_foreign));
+ i, ccx.tn().val_to_string(llarg_foreign));
// fill padding with undef value
match arg_tys[i].pad {
None => fn_type.ret_ty.ty
};
- debug!("llretptr={}", ccx.tn.val_to_string(llretptr));
- debug!("llforeign_retval={}", ccx.tn.val_to_string(llforeign_retval));
- debug!("llrust_ret_ty={}", ccx.tn.type_to_string(llrust_ret_ty));
- debug!("llforeign_ret_ty={}", ccx.tn.type_to_string(llforeign_ret_ty));
+ debug!("llretptr={}", ccx.tn().val_to_string(llretptr));
+ debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval));
+ debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty));
+ debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty));
if llrust_ret_ty == llforeign_ret_ty {
base::store_ty(bcx, llforeign_retval, llretptr, fn_sig.output)
_ => {}
}
- ccx.item_symbols.borrow_mut().insert(foreign_item.id,
+ ccx.item_symbols().borrow_mut().insert(foreign_item.id,
lname.get().to_string());
}
}
let llfn = base::decl_fn(ccx, name, cconv, llfn_ty, ty::mk_nil());
add_argument_attributes(&tys, llfn);
debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})",
- ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty);
add_argument_attributes(&tys, llfn);
debug!("register_rust_fn_with_foreign_abi(node_id={:?}, llfn_ty={}, llfn={})",
- node_id, ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let t = ty::node_id_to_type(tcx, id).subst(
ccx.tcx(), ¶m_substs.substs);
- let ps = ccx.tcx.map.with_path(id, |path| {
+ let ps = ccx.tcx().map.with_path(id, |path| {
let abi = Some(ast_map::PathName(special_idents::clownshoe_abi.name));
link::mangle(path.chain(abi.move_iter()), hash)
});
_ => {
ccx.sess().bug(format!("build_rust_fn: extern fn {} has ty {}, \
expected a bare fn ty",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
t.repr(tcx)).as_slice());
}
};
debug!("build_rust_fn: path={} id={} t={}",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
id, t.repr(tcx));
let llfn = base::decl_internal_rust_fn(ccx, t, ps.as_slice());
let tcx = ccx.tcx();
debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={})",
- ccx.tn.val_to_string(llrustfn),
- ccx.tn.val_to_string(llwrapfn),
+ ccx.tn().val_to_string(llrustfn),
+ ccx.tn().val_to_string(llwrapfn),
t.repr(ccx.tcx()));
// Avoid all the Rust generation stuff and just generate raw
let the_block =
"the block".with_c_str(
- |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llwrapfn, s));
+ |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn, s));
let builder = ccx.builder();
builder.position_at_end(the_block);
match foreign_outptr {
Some(llforeign_outptr) => {
debug!("out pointer, foreign={}",
- ccx.tn.val_to_string(llforeign_outptr));
+ ccx.tn().val_to_string(llforeign_outptr));
let llrust_retptr =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
debug!("out pointer, foreign={} (casted)",
- ccx.tn.val_to_string(llrust_retptr));
+ ccx.tn().val_to_string(llrust_retptr));
llrust_args.push(llrust_retptr);
return_alloca = None;
}
allocad={}, \
llrust_ret_ty={}, \
return_ty={}",
- ccx.tn.val_to_string(slot),
- ccx.tn.type_to_string(llrust_ret_ty),
+ ccx.tn().val_to_string(slot),
+ ccx.tn().type_to_string(llrust_ret_ty),
tys.fn_sig.output.repr(tcx));
llrust_args.push(slot);
return_alloca = Some(slot);
let mut llforeign_arg = get_param(llwrapfn, foreign_index);
debug!("llforeign_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llforeign_arg));
+ i, ccx.tn().val_to_string(llforeign_arg));
debug!("rust_indirect = {}, foreign_indirect = {}",
rust_indirect, foreign_indirect);
};
debug!("llrust_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llrust_arg));
+ i, ccx.tn().val_to_string(llrust_arg));
llrust_args.push(llrust_arg);
}
// Perform the call itself
- debug!("calling llrustfn = {}, t = {}", ccx.tn.val_to_string(llrustfn), t.repr(ccx.tcx()));
+ debug!("calling llrustfn = {}, t = {}",
+ ccx.tn().val_to_string(llrustfn), t.repr(ccx.tcx()));
let attributes = base::get_fn_llvm_attributes(ccx, t);
let llrust_ret_val = builder.call(llrustfn, llrust_args.as_slice(), Some(attributes));
fn_ty={} -> {}, \
ret_def={}",
ty.repr(ccx.tcx()),
- ccx.tn.types_to_str(llsig.llarg_tys.as_slice()),
- ccx.tn.type_to_string(llsig.llret_ty),
- ccx.tn.types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
- ccx.tn.type_to_string(fn_ty.ret_ty.ty),
+ ccx.tn().types_to_str(llsig.llarg_tys.as_slice()),
+ ccx.tn().type_to_string(llsig.llret_ty),
+ ccx.tn().types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
+ ccx.tn().type_to_string(fn_ty.ret_ty.ty),
ret_def);
ForeignTypes {
debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t));
let t = get_drop_glue_type(ccx, t);
debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t));
- match ccx.drop_glues.borrow().find(&t) {
+ match ccx.drop_glues().borrow().find(&t) {
Some(&glue) => return glue,
_ => { }
}
let llfnty = Type::glue_fn(ccx, llty);
let glue = declare_generic_glue(ccx, t, llfnty, "drop");
- ccx.drop_glues.borrow_mut().insert(t, glue);
+ ccx.drop_glues().borrow_mut().insert(t, glue);
make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
pub fn declare_tydesc(ccx: &CrateContext, t: ty::t) -> tydesc_info {
// If emit_tydescs already ran, then we shouldn't be creating any new
// tydescs.
- assert!(!ccx.finished_tydescs.get());
+ assert!(!ccx.finished_tydescs().get());
let llty = type_of(ccx, t);
debug!("+++ declare_tydesc {} {}", ppaux::ty_to_string(ccx.tcx(), t), name);
let gvar = name.as_slice().with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(ccx.llmod, ccx.tydesc_type().to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), ccx.tydesc_type().to_ref(), buf)
}
});
note_unique_llvm_symbol(ccx, name);
let bcx = init_function(&fcx, false, ty::mk_nil());
llvm::SetLinkage(llfn, llvm::InternalLinkage);
- ccx.stats.n_glues_created.set(ccx.stats.n_glues_created.get() + 1u);
+ ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1u);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
pub fn emit_tydescs(ccx: &CrateContext) {
let _icx = push_ctxt("emit_tydescs");
// As of this point, allow no more tydescs to be created.
- ccx.finished_tydescs.set(true);
+ ccx.finished_tydescs().set(true);
let glue_fn_ty = Type::generic_glue_fn(ccx).ptr_to();
- for (_, ti) in ccx.tydescs.borrow().iter() {
+ for (_, ti) in ccx.tydescs().borrow().iter() {
// Each of the glue functions needs to be cast to a generic type
// before being put into the tydesc because we only have a singleton
// tydesc type. Then we'll recast each function to its real type when
let drop_glue = unsafe {
llvm::LLVMConstPointerCast(get_drop_glue(ccx, ti.ty), glue_fn_ty.to_ref())
};
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() + 1);
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() + 1);
let visit_glue =
match ti.visit_glue.get() {
None => {
- ccx.stats.n_null_glues.set(ccx.stats.n_null_glues.get() +
+ ccx.stats().n_null_glues.set(ccx.stats().n_null_glues.get() +
1u);
C_null(glue_fn_ty)
}
Some(v) => {
unsafe {
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() +
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() +
1);
llvm::LLVMConstPointerCast(v, glue_fn_ty.to_ref())
}
pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId)
-> ast::DefId {
let _icx = push_ctxt("maybe_instantiate_inline");
- match ccx.external.borrow().find(&fn_id) {
+ match ccx.external().borrow().find(&fn_id) {
Some(&Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline({}): already inline as node id {}",
|a,b,c,d| astencode::decode_inlined_item(a, b, c, d));
return match csearch_result {
csearch::not_found => {
- ccx.external.borrow_mut().insert(fn_id, None);
+ ccx.external().borrow_mut().insert(fn_id, None);
fn_id
}
csearch::found(ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
trans_item(ccx, &*item);
// We're bringing an external global into this crate, but we don't
local_def(item.id)
}
csearch::found(ast::IIForeign(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
local_def(item.id)
}
csearch::found_parent(parent_id, ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(parent_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, parent_id);
+ ccx.external().borrow_mut().insert(parent_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
let mut my_id = 0;
match item.node {
let vs_there = ty::enum_variants(ccx.tcx(), parent_id);
for (here, there) in vs_here.iter().zip(vs_there.iter()) {
if there.id == fn_id { my_id = here.id.node; }
- ccx.external.borrow_mut().insert(there.id, Some(here.id.node));
+ ccx.external().borrow_mut().insert(there.id, Some(here.id.node));
}
}
ast::ItemStruct(ref struct_def, _) => {
match struct_def.ctor_id {
None => {}
Some(ctor_id) => {
- ccx.external.borrow_mut().insert(fn_id, Some(ctor_id));
+ ccx.external().borrow_mut().insert(fn_id, Some(ctor_id));
my_id = ctor_id;
}
}
match impl_item {
ast::ProvidedInlinedTraitItem(mth) |
ast::RequiredInlinedTraitItem(mth) => {
- ccx.external.borrow_mut().insert(fn_id, Some(mth.id));
- ccx.external_srcs.borrow_mut().insert(mth.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(mth.id));
+ ccx.external_srcs().borrow_mut().insert(mth.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
}
}
/// Performs late verification that intrinsics are used correctly. At present,
/// the only intrinsic that needs such verification is `transmute`.
pub fn check_intrinsics(ccx: &CrateContext) {
- for transmute_restriction in ccx.tcx
+ for transmute_restriction in ccx.tcx()
.transmute_restrictions
.borrow()
.iter() {
let hash = ty::hash_crate_independent(
ccx.tcx(),
*substs.types.get(FnSpace, 0),
- &ccx.link_meta.crate_hash);
+ &ccx.link_meta().crate_hash);
// NB: This needs to be kept in lockstep with the TypeId struct in
// the intrinsic module
C_named_struct(llret_ty, [C_u64(ccx, hash)])
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
+ let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
let name = if allow_overlap {
if int_size == 32 {
"llvm.memmove.p0i8.p0i8.i32"
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
+ let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
"llvm.memset.p0i8.i32"
} else {
"llvm.memset.p0i8.i64"
impl LlvmRepr for Type {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.type_to_string(*self)
+ ccx.tn().type_to_string(*self)
}
}
impl LlvmRepr for ValueRef {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.val_to_string(*self)
+ ccx.tn().val_to_string(*self)
}
}
// Returns the number of bytes clobbered by a Store to this type.
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMStoreSizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABISizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// below.
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- let nbits = llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64;
+ let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64;
if nbits & 7 != 0 {
// Not an even number of bytes, spills into "next" byte.
1 + (nbits >> 3)
/// Returns the "real" size of the type in bits.
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64
+ llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64
}
}
// space to be consumed.
pub fn nonzero_llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
if llbitsize_of_real(cx, ty) == 0 {
- unsafe { llvm::LLVMConstInt(cx.int_type.to_ref(), 1, False) }
+ unsafe { llvm::LLVMConstInt(cx.int_type().to_ref(), 1, False) }
} else {
llsize_of(cx, ty)
}
// allocations inside a stack frame, which LLVM has a free hand in.
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMPreferredAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// and similar ABI-mandated things.
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABIAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
pub fn llalign_of(cx: &CrateContext, ty: Type) -> ValueRef {
unsafe {
return llvm::LLVMConstIntCast(
- llvm::LLVMAlignOf(ty.to_ref()), cx.int_type.to_ref(), False);
+ llvm::LLVMAlignOf(ty.to_ref()), cx.int_type().to_ref(), False);
}
}
pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: uint) -> u64 {
unsafe {
- return llvm::LLVMOffsetOfElement(cx.td.lltd, struct_ty.to_ref(), element as u32) as u64;
+ return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(), element as u32) as u64;
}
}
let vtable_key = MethodCall::expr(expr_id);
let vtbls = resolve_vtables_in_fn_ctxt(
bcx.fcx,
- ccx.tcx.vtable_map.borrow().get(&vtable_key));
+ ccx.tcx().vtable_map.borrow().get(&vtable_key));
match *vtbls.get_self().unwrap().get(0) {
typeck::vtable_static(impl_did, ref rcvr_substs, ref rcvr_origins) => {
fn method_with_name(ccx: &CrateContext, impl_id: ast::DefId, name: ast::Name)
-> ast::DefId {
- match ccx.impl_method_cache.borrow().find_copy(&(impl_id, name)) {
+ match ccx.impl_method_cache().borrow().find_copy(&(impl_id, name)) {
Some(m) => return m,
None => {}
}
- let impl_items = ccx.tcx.impl_items.borrow();
+ let impl_items = ccx.tcx().impl_items.borrow();
let impl_items =
impl_items.find(&impl_id)
.expect("could not find impl while translating");
.find(|&did| {
match *did {
ty::MethodTraitItemId(did) => {
- ty::impl_or_trait_item(&ccx.tcx,
+ ty::impl_or_trait_item(ccx.tcx(),
did).ident()
.name ==
name
}).expect("could not find method while \
translating");
- ccx.impl_method_cache.borrow_mut().insert((impl_id, name),
+ ccx.impl_method_cache().borrow_mut().insert((impl_id, name),
meth_did.def_id());
meth_did.def_id()
}
// Check the cache.
let hash_id = (self_ty, monomorphize::make_vtable_id(ccx, origins.get(0)));
- match ccx.vtables.borrow().find(&hash_id) {
+ match ccx.vtables().borrow().find(&hash_id) {
Some(&val) => { return val }
None => { }
}
let drop_glue = glue::get_drop_glue(ccx, self_ty);
let vtable = make_vtable(ccx, drop_glue, ll_size, ll_align, methods);
- ccx.vtables.borrow_mut().insert(hash_id, vtable);
+ ccx.vtables().borrow_mut().insert(hash_id, vtable);
vtable
}
let tbl = C_struct(ccx, components.as_slice(), false);
let sym = token::gensym("vtable");
let vt_gvar = format!("vtable{}", sym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, val_ty(tbl).to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), val_ty(tbl).to_ref(), buf)
});
llvm::LLVMSetInitializer(vt_gvar, tbl);
llvm::LLVMSetGlobalConstant(vt_gvar, llvm::True);
self_ty: ty::t) -> ValueRef {
let ccx = bcx.ccx();
let origins = {
- let vtable_map = ccx.tcx.vtable_map.borrow();
+ let vtable_map = ccx.tcx().vtable_map.borrow();
// This trait cast might be because of implicit coercion
- let adjs = ccx.tcx.adjustments.borrow();
+ let adjs = ccx.tcx().adjustments.borrow();
let adjust = adjs.find(&id);
let method_call = if adjust.is_some() && ty::adjust_is_object(adjust.unwrap()) {
MethodCall::autoobject(id)
params: real_substs.types.clone()
};
- match ccx.monomorphized.borrow().find(&hash_id) {
+ match ccx.monomorphized().borrow().find(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
ty::item_path_str(ccx.tcx(), fn_id));
let map_node = session::expect(
ccx.sess(),
- ccx.tcx.map.find(fn_id.node),
+ ccx.tcx().map.find(fn_id.node),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
match map_node {
ast_map::NodeForeignItem(_) => {
- if ccx.tcx.map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
+ if ccx.tcx().map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), true);
}
debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx()));
let mono_ty = llitem_ty.subst(ccx.tcx(), real_substs);
- ccx.stats.n_monos.set(ccx.stats.n_monos.get() + 1);
+ ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
{
- let mut monomorphizing = ccx.monomorphizing.borrow_mut();
+ let mut monomorphizing = ccx.monomorphizing().borrow_mut();
depth = match monomorphizing.find(&fn_id) {
Some(&d) => d, None => 0
};
// recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() {
- ccx.sess().span_fatal(ccx.tcx.map.span(fn_id.node),
+ ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node),
"reached the recursion limit during monomorphization");
}
mono_ty.hash(&mut state);
hash = format!("h{}", state.result());
- ccx.tcx.map.with_path(fn_id.node, |path| {
+ ccx.tcx().map.with_path(fn_id.node, |path| {
exported_name(path, hash.as_slice())
})
};
decl_internal_rust_fn(ccx, mono_ty, s.as_slice())
};
- ccx.monomorphized.borrow_mut().insert(hash_id.take().unwrap(), lldecl);
+ ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
lldecl
};
}
}
ast_map::NodeVariant(v) => {
- let parent = ccx.tcx.map.get_parent(fn_id.node);
+ let parent = ccx.tcx().map.get_parent(fn_id.node);
let tvs = ty::enum_variants(ccx.tcx(), local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
}
};
- ccx.monomorphizing.borrow_mut().insert(fn_id, depth);
+ ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id));
(lldecl, true)
let sym = mangle_internal_name_by_path_and_seq(
ast_map::Values([].iter()).chain(None), "get_disr");
- let fn_ty = ty::mk_ctor_fn(&ccx.tcx, ast::DUMMY_NODE_ID,
+ let fn_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[opaqueptrty], ty::mk_u64());
let llfdecl = decl_internal_rust_fn(ccx,
fn_ty,
format!("VecTypes {{unit_ty={}, llunit_ty={}, \
llunit_size={}, llunit_alloc_size={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
- ccx.tn.type_to_string(self.llunit_ty),
- ccx.tn.val_to_string(self.llunit_size),
+ ccx.tn().type_to_string(self.llunit_ty),
+ ccx.tn().val_to_string(self.llunit_size),
self.llunit_alloc_size)
}
}
let loop_counter = {
// i = 0
- let i = alloca(loop_bcx, bcx.ccx().int_type, "__i");
+ let i = alloca(loop_bcx, bcx.ccx().int_type(), "__i");
Store(loop_bcx, C_uint(bcx.ccx(), 0), i);
Br(loop_bcx, cond_bcx.llbb);
}
pub fn void(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMVoidTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
}
pub fn nil(ccx: &CrateContext) -> Type {
}
pub fn metadata(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx()))
}
pub fn i1(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt1TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt1TypeInContext(ccx.llcx()))
}
pub fn i8(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt8TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
}
pub fn i16(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt16TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
}
pub fn i32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt32TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt32TypeInContext(ccx.llcx()))
}
pub fn i64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt64TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt64TypeInContext(ccx.llcx()))
}
pub fn f32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMFloatTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMFloatTypeInContext(ccx.llcx()))
}
pub fn f64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx()))
}
pub fn bool(ccx: &CrateContext) -> Type {
}
pub fn int(ccx: &CrateContext) -> Type {
- match ccx.tcx.sess.targ_cfg.arch {
+ match ccx.tcx().sess.targ_cfg.arch {
X86 | Arm | Mips | Mipsel => Type::i32(ccx),
X86_64 => Type::i64(ccx)
}
pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type {
match t {
- ast::TyI => ccx.int_type,
+ ast::TyI => ccx.int_type(),
ast::TyI8 => Type::i8(ccx),
ast::TyI16 => Type::i16(ccx),
ast::TyI32 => Type::i32(ccx),
pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type {
match t {
- ast::TyU => ccx.int_type,
+ ast::TyU => ccx.int_type(),
ast::TyU8 => Type::i8(ccx),
ast::TyU16 => Type::i16(ccx),
ast::TyU32 => Type::i32(ccx),
pub fn struct_(ccx: &CrateContext, els: &[Type], packed: bool) -> Type {
let els : &[TypeRef] = unsafe { mem::transmute(els) };
- ty!(llvm::LLVMStructTypeInContext(ccx.llcx, els.as_ptr(),
+ ty!(llvm::LLVMStructTypeInContext(ccx.llcx(), els.as_ptr(),
els.len() as c_uint,
packed as Bool))
}
pub fn named_struct(ccx: &CrateContext, name: &str) -> Type {
- ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx, s)))
+ ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx(), s)))
}
pub fn empty_struct(ccx: &CrateContext) -> Type {
}
pub fn generic_glue_fn(cx: &CrateContext) -> Type {
- match cx.tn.find_type("glue_fn") {
+ match cx.tn().find_type("glue_fn") {
Some(ty) => return ty,
None => ()
}
let ty = Type::glue_fn(cx, Type::i8p(cx));
- cx.tn.associate_type("glue_fn", &ty);
+ cx.tn().associate_type("glue_fn", &ty);
ty
}
// The box pointed to by @T.
pub fn at_box(ccx: &CrateContext, ty: Type) -> Type {
Type::struct_(ccx, [
- ccx.int_type, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
+ ccx.int_type(), Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
Type::i8p(ccx), Type::i8p(ccx), ty
], false)
}
// recursive types. For example, enum types rely on this behavior.
pub fn sizing_type_of(cx: &CrateContext, t: ty::t) -> Type {
- match cx.llsizingtypes.borrow().find_copy(&t) {
+ match cx.llsizingtypes().borrow().find_copy(&t) {
Some(t) => return t,
None => ()
}
ty::ty_vec(_, None) | ty::ty_trait(..) | ty::ty_str => fail!("unreachable")
};
- cx.llsizingtypes.borrow_mut().insert(t, llsizingty);
+ cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
llsizingty
}
}
// Check the cache.
- match cx.lltypes.borrow().find(&t) {
+ match cx.lltypes().borrow().find(&t) {
Some(&llty) => return llty,
None => ()
}
t,
t_norm.repr(cx.tcx()),
t_norm,
- cx.tn.type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.tn().type_to_string(llty));
+ cx.lltypes().borrow_mut().insert(t, llty);
return llty;
}
ty::ty_str => {
// This means we get a nicer name in the output (str is always
// unsized).
- cx.tn.find_type("str_slice").unwrap()
+ cx.tn().find_type("str_slice").unwrap()
}
ty::ty_trait(..) => Type::opaque_trait(cx),
_ if !ty::type_is_sized(cx.tcx(), ty) => {
debug!("--> mapped t={} {:?} to llty={}",
t.repr(cx.tcx()),
t,
- cx.tn.type_to_string(llty));
+ cx.tn().type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.lltypes().borrow_mut().insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {