let ret_ty = sig.output();
let mut ret = arg_of(ret_ty, true);
- if !type_is_fat_ptr(ccx.tcx(), ret_ty) {
+ if !type_is_fat_ptr(ccx, ret_ty) {
// The `noalias` attribute on the return value is useful to a
// function ptr caller.
if let ty::TyBox(_) = ret_ty.sty {
for ty in inputs.iter().chain(extra_args.iter()) {
let mut arg = arg_of(ty, false);
- if type_is_fat_ptr(ccx.tcx(), ty) {
+ if type_is_fat_ptr(ccx, ty) {
let original_tys = arg.original_ty.field_types();
let sizing_tys = arg.ty.field_types();
assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2));
};
// Fat pointers are returned by-value.
if !self.ret.is_ignore() {
- if !type_is_fat_ptr(ccx.tcx(), sig.output()) {
+ if !type_is_fat_ptr(ccx, sig.output()) {
fixup(&mut self.ret);
}
}
sizing: bool, dst: bool) -> Vec<Type> {
let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]);
if sizing {
- fields.filter(|ty| !dst || type_is_sized(cx.tcx(), *ty))
+ fields.filter(|ty| !dst || cx.shared().type_is_sized(*ty))
.map(|ty| type_of::sizing_type_of(cx, ty)).collect()
} else {
fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect()
// * First field - Always aligned properly
// * Packed struct - There is no alignment padding
// * Field is sized - pointer is properly aligned already
- if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || type_is_sized(bcx.tcx(), fty) {
+ if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
+ bcx.ccx().shared().type_is_sized(fty) {
return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
}
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
(&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
- assert!(common::type_is_sized(bcx.tcx(), a));
+ assert!(bcx.ccx().shared().type_is_sized(a));
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to();
(bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx(), a, b, None))
}
(&ty::TyRef(..), &ty::TyRef(..)) |
(&ty::TyRef(..), &ty::TyRawPtr(..)) |
(&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
- let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) {
+ let (base, info) = if common::type_is_fat_ptr(bcx.ccx(), src_ty) {
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False)
- } else if (t.is_region_ptr() || t.is_unique()) &&
- !common::type_is_fat_ptr(ccx.tcx(), t) {
+ } else if (t.is_region_ptr() || t.is_unique()) && !common::type_is_fat_ptr(ccx, t) {
b.load_nonnull(ptr)
} else {
b.load(ptr)
pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
- if common::type_is_fat_ptr(cx.tcx(), t) {
+ if common::type_is_fat_ptr(cx.ccx(), t) {
let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR);
let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
store_fat_ptr(cx, lladdr, llextra, dst, t);
let llsz = llsize_of(ccx, llty);
let llalign = type_of::align_of(ccx, t);
call_memcpy(bcx, dst, src, llsz, llalign as u32);
- } else if common::type_is_fat_ptr(bcx.tcx(), t) {
+ } else if common::type_is_fat_ptr(bcx.ccx(), t) {
let (data, extra) = load_fat_ptr(bcx, src, t);
store_fat_ptr(bcx, data, extra, dst, t);
} else {
let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i);
let arg = &fcx.fn_ty.args[arg_idx];
arg_idx += 1;
- if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
+ if common::type_is_fat_ptr(bcx.ccx(), arg_ty) {
let meta = &fcx.fn_ty.args[arg_idx];
arg_idx += 1;
arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr));
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> {
- if !self.type_needs_drop(ty) { return CleanupScope::noop(); }
+ if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
let drop = DropValue {
val: val,
ty: ty,
pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
- if !self.type_needs_drop(ty) { return CleanupScope::noop(); }
+ if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
let drop = DropValue {
val: val,
use syntax_pos::DUMMY_SP;
use base::custom_coerce_unsize_info;
use context::SharedCrateContext;
-use common::{fulfill_obligation, type_is_sized};
+use common::fulfill_obligation;
use glue::{self, DropGlueKind};
use monomorphize::{self, Instance};
use util::nodemap::{FxHashSet, FxHashMap, DefIdMap};
TransItem::Static(node_id) => {
let def_id = scx.tcx().map.local_def_id(node_id);
let ty = scx.tcx().item_type(def_id);
- let ty = glue::get_drop_glue_type(scx.tcx(), ty);
+ let ty = glue::get_drop_glue_type(scx, ty);
neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
recursion_depth_reset = None;
self.param_substs,
&ty);
assert!(ty.is_normalized_for_trans());
- let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
+ let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
let operand_ty = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&mt.ty);
- let ty = glue::get_drop_glue_type(tcx, operand_ty);
+ let ty = glue::get_drop_glue_type(self.scx, operand_ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
} else {
bug!("Has the drop_in_place() intrinsic's signature changed?")
let field_type = monomorphize::apply_param_substs(scx,
substs,
&field_type);
- let field_type = glue::get_drop_glue_type(scx.tcx(), field_type);
+ let field_type = glue::get_drop_glue_type(scx, field_type);
- if glue::type_needs_drop(scx.tcx(), field_type) {
+ if scx.type_needs_drop(field_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type)));
}
}
}
ty::TyClosure(def_id, substs) => {
for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) {
- let upvar_ty = glue::get_drop_glue_type(scx.tcx(), upvar_ty);
- if glue::type_needs_drop(scx.tcx(), upvar_ty) {
+ let upvar_ty = glue::get_drop_glue_type(scx, upvar_ty);
+ if scx.type_needs_drop(upvar_ty) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty)));
}
}
ty::TyBox(inner_type) |
ty::TySlice(inner_type) |
ty::TyArray(inner_type, _) => {
- let inner_type = glue::get_drop_glue_type(scx.tcx(), inner_type);
- if glue::type_needs_drop(scx.tcx(), inner_type) {
+ let inner_type = glue::get_drop_glue_type(scx, inner_type);
+ if scx.type_needs_drop(inner_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
ty::TyTuple(args) => {
for arg in args {
- let arg = glue::get_drop_glue_type(scx.tcx(), arg);
- if glue::type_needs_drop(scx.tcx(), arg) {
+ let arg = glue::get_drop_glue_type(scx, arg);
+ if scx.type_needs_drop(arg) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(arg)));
}
}
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
let (inner_source, inner_target) = (a, b);
- if !type_is_sized(scx.tcx(), inner_source) {
+ if !scx.type_is_sized(inner_source) {
(inner_source, inner_target)
} else {
scx.tcx().struct_lockstep_tails(inner_source, inner_target)
output.extend(methods);
}
// Also add the destructor
- let dg_type = glue::get_drop_glue_type(scx.tcx(), impl_ty);
+ let dg_type = glue::get_drop_glue_type(scx, impl_ty);
output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type)));
}
}
def_id_to_string(self.scx.tcx(), def_id));
let ty = self.scx.tcx().item_type(def_id);
- let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
+ let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
}
use syntax::ast;
use syntax::symbol::{Symbol, InternedString};
-use syntax_pos::{DUMMY_SP, Span};
+use syntax_pos::Span;
pub use context::{CrateContext, SharedCrateContext};
-/// Is the type's representation size known at compile time?
-pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
- ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP)
-}
-
-pub fn type_is_fat_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
+pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyBox(ty) => {
- !type_is_sized(tcx, ty)
+ !ccx.shared().type_is_sized(ty)
}
_ => {
false
use machine::llsize_of_alloc;
use type_of::sizing_type_of;
- let tcx = ccx.tcx();
let simple = ty.is_scalar() ||
ty.is_unique() || ty.is_region_ptr() ||
ty.is_simd();
- if simple && !type_is_fat_ptr(tcx, ty) {
+ if simple && !type_is_fat_ptr(ccx, ty) {
return true;
}
- if !type_is_sized(tcx, ty) {
+ if !ccx.shared().type_is_sized(ty) {
return false;
}
match ty.sty {
// section of the executable we're generating.
pub llfn: ValueRef,
- // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
- param_env: ty::ParameterEnvironment<'tcx>,
-
// A pointer to where to store the return value. If the return type is
// immediate, this points to an alloca in the function. Otherwise, it's a
// pointer to the hidden first parameter of the function. After function
let mut fcx = FunctionContext {
llfn: llfndecl,
llretslotptr: None,
- param_env: ccx.tcx().empty_parameter_environment(),
alloca_insert_pt: None,
fn_ty: fn_ty,
param_substs: param_substs,
value)
}
- /// This is the same as `common::type_needs_drop`, except that it
- /// may use or update caches within this `FunctionContext`.
- pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
- self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env)
- }
-
pub fn eh_personality(&self) -> ValueRef {
// The exception handling personality function.
//
use std::str;
use syntax::ast;
use syntax::symbol::InternedString;
+use syntax_pos::DUMMY_SP;
use abi::FnType;
pub struct Stats {
exported_symbols: NodeSet,
link_meta: LinkMeta,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ param_env: ty::ParameterEnvironment<'tcx>,
stats: Stats,
check_overflow: bool,
export_map: export_map,
exported_symbols: exported_symbols,
link_meta: link_meta,
+ param_env: tcx.empty_parameter_environment(),
tcx: tcx,
stats: Stats {
n_glues_created: Cell::new(0),
}
}
+ pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
+ self.tcx.type_needs_drop_given_env(ty, &self.param_env)
+ }
+
+ pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_sized(self.tcx, &self.param_env, DUMMY_SP)
+ }
+
pub fn metadata_llmod(&self) -> ModuleRef {
self.metadata_llmod
}
use middle::lang_items::ExchangeFreeFnLangItem;
use rustc::ty::subst::{Substs};
use rustc::traits;
-use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable};
+use rustc::ty::{self, AdtKind, Ty, TypeFoldable};
use adt;
use base::*;
use callee::Callee;
pub fn trans_exchange_free_ty<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx>
) {
- assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
+ assert!(bcx.ccx().shared().type_is_sized(content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
}
}
-pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
- tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment())
-}
-
-pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
+pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
assert!(t.is_normalized_for_trans());
- let t = tcx.erase_regions(&t);
+ let t = scx.tcx().erase_regions(&t);
// Even if there is no dtor for t, there might be one deeper down and we
// might need to pass in the vtable ptr.
- if !type_is_sized(tcx, t) {
+ if !scx.type_is_sized(t) {
return t;
}
// returned `tcx.types.i8` does not appear unsound. The impact on
// code quality is unknown at this time.)
- if !type_needs_drop(tcx, t) {
- return tcx.types.i8;
+ if !scx.type_needs_drop(t) {
+ return scx.tcx().types.i8;
}
match t.sty {
- ty::TyBox(typ) if !type_needs_drop(tcx, typ)
- && type_is_sized(tcx, typ) => {
- tcx.infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| {
+ ty::TyBox(typ) if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) => {
+ scx.tcx().infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| {
let layout = t.layout(&infcx).unwrap();
- if layout.size(&tcx.data_layout).bytes() == 0 {
+ if layout.size(&scx.tcx().data_layout).bytes() == 0 {
// `Box<ZeroSizeType>` does not allocate.
- tcx.types.i8
+ scx.tcx().types.i8
} else {
t
}
) {
// NB: v is an *alias* of type t here, not a direct value.
debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor);
- if bcx.fcx().type_needs_drop(t) {
+ if bcx.ccx().shared().type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
DropGlueKind::TyContents(t)
DropGlueKind::Ty(t)
};
let glue = get_drop_glue_core(ccx, g);
- let glue_type = get_drop_glue_type(ccx.tcx(), t);
+ let glue_type = get_drop_glue_type(ccx.shared(), t);
let ptr = if glue_type != t {
bcx.pointercast(v, type_of(ccx, glue_type).ptr_to())
} else {
}
fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef {
- let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t));
+ let g = g.map_ty(|t| get_drop_glue_type(ccx.shared(), t));
match ccx.drop_glues().borrow().get(&g) {
Some(&(glue, _)) => glue,
None => {
}
pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) {
- let tcx = ccx.tcx();
- assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty()));
+ assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty()));
let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
let fcx = FunctionContext::new(ccx, llfn, fn_ty, None, false);
};
let (sized_args, unsized_args);
- let args: &[ValueRef] = if type_is_sized(tcx, t) {
+ let args: &[ValueRef] = if bcx.ccx().shared().type_is_sized(t) {
sized_args = [v0];
&sized_args
} else {
-> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {:?}",
t, Value(info));
- if type_is_sized(bcx.tcx(), t) {
+ if bcx.ccx().shared().type_is_sized(t) {
let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t);
// special. It may move to library and have Drop impl. As
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
- if !type_is_sized(bcx.tcx(), content_ty) {
+ if !bcx.ccx().shared().type_is_sized(content_ty) {
let llval = get_dataptr(&bcx, v0);
let llbox = bcx.load(llval);
drop_ty(&bcx, v0, content_ty);
bcx
}
_ => {
- if bcx.fcx().type_needs_drop(t) {
+ if bcx.ccx().shared().type_needs_drop(t) {
drop_structural_ty(bcx, v0, t)
} else {
bcx
}
}
- let value = if type_is_sized(cx.tcx(), t) {
+ let value = if cx.ccx().shared().type_is_sized(t) {
adt::MaybeSizedValue::sized(av)
} else {
// FIXME(#36457) -- we should pass unsized values as two arguments
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i);
- let val = if type_is_sized(cx.tcx(), field_ty) {
+ let val = if cx.ccx().shared().type_is_sized(field_ty) {
llfld_a
} else {
// FIXME(#36457) -- we should pass unsized values as two arguments
}
"size_of_val" => {
let tp_ty = substs.type_at(0);
- if !type_is_sized(tcx, tp_ty) {
+ if !bcx.ccx().shared().type_is_sized(tp_ty) {
let (llsize, _) =
glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
llsize
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
- if !type_is_sized(tcx, tp_ty) {
+ if !bcx.ccx().shared().type_is_sized(tp_ty) {
let (_, llalign) =
glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
llalign
"needs_drop" => {
let tp_ty = substs.type_at(0);
- C_bool(ccx, bcx.fcx().type_needs_drop(tp_ty))
+ C_bool(ccx, bcx.ccx().shared().type_needs_drop(tp_ty))
}
"offset" => {
let ptr = llargs[0];
},
"volatile_store" => {
let tp_ty = substs.type_at(0);
- if type_is_fat_ptr(bcx.tcx(), tp_ty) {
+ if type_is_fat_ptr(bcx.ccx(), tp_ty) {
bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0]));
bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0]));
} else {
// This assumes the type is "simple", i.e. no
// destructors, and the contents are SIMD
// etc.
- assert!(!bcx.fcx().type_needs_drop(arg_type));
+ assert!(!bcx.ccx().shared().type_needs_drop(arg_type));
let arg = adt::MaybeSizedValue::sized(llarg);
(0..contents.len())
.map(|i| {
use rustc::mir::visit::{Visitor, LvalueContext};
use rustc::mir::traversal;
use common::{self, BlockAndBuilder};
-use glue;
use super::rvalue;
pub fn lvalue_locals<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mir: &mir::Mir<'tcx>) -> BitVector {
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.
assert!(common::type_is_immediate(bcx.ccx(), ty) ||
- common::type_is_fat_ptr(bcx.tcx(), ty));
+ common::type_is_fat_ptr(bcx.ccx(), ty));
} else if common::type_is_imm_pair(bcx.ccx(), ty) {
// We allow pairs and uses of any of their 2 fields.
} else {
let ty = self.bcx.fcx().monomorphize(&ty.to_ty(self.bcx.tcx()));
// Only need the lvalue if we're actually dropping it.
- if glue::type_needs_drop(self.bcx.tcx(), ty) {
+ if self.bcx.ccx().shared().type_needs_drop(ty) {
self.mark_as_lvalue(index);
}
}
let ty = bcx.fcx().monomorphize(&ty);
// Double check for necessity to drop
- if !glue::type_needs_drop(bcx.tcx(), ty) {
+ if !bcx.ccx().shared().type_needs_drop(ty) {
funclet_br(self, bcx, target);
return;
}
let lvalue = self.trans_lvalue(&bcx, location);
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
- let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty);
- let is_sized = common::type_is_sized(bcx.tcx(), ty);
+ let drop_ty = glue::get_drop_glue_type(bcx.ccx().shared(), ty);
+ let is_sized = bcx.ccx().shared().type_is_sized(ty);
let llvalue = if is_sized {
if drop_ty != ty {
bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
};
// Double check for necessity to drop
- if !glue::type_needs_drop(bcx.tcx(), ty) {
+ if !bcx.ccx().shared().type_needs_drop(ty) {
funclet_br(self, bcx, target);
return;
}
};
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
- let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty);
- let is_sized = common::type_is_sized(bcx.tcx(), ty);
+ let drop_ty = glue::get_drop_glue_type(bcx.ccx().shared(), ty);
+ let is_sized = bcx.ccx().shared().type_is_sized(ty);
let llvalue = if is_sized {
if drop_ty != ty {
bcx.pointercast(llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
callee: &mut CalleeData) {
if let Pair(a, b) = op.val {
// Treat the values in a fat pointer separately.
- if common::type_is_fat_ptr(bcx.tcx(), op.ty) {
+ if common::type_is_fat_ptr(bcx.ccx(), op.ty) {
let (ptr, meta) = (a, b);
if *next_idx == 0 {
if let Virtual(idx) = *callee {
let base = adt::MaybeSizedValue::sized(llval);
for (n, &ty) in arg_types.iter().enumerate() {
let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n);
- let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
+ let val = if common::type_is_fat_ptr(bcx.ccx(), ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty);
Pair(lldata, llextra)
} else {
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use {abi, adt, base, Disr, machine};
use callee::Callee;
-use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty, type_is_sized};
+use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty};
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral};
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint};
use common::{const_to_opt_int, const_to_opt_uint};
.projection_ty(tcx, &projection.elem);
let base = tr_base.to_const(span);
let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx);
- let is_sized = common::type_is_sized(tcx, projected_ty);
+ let is_sized = self.ccx.shared().type_is_sized(projected_ty);
let (projected, llextra) = match projection.elem {
mir::ProjectionElem::Deref => {
mir::CastKind::Unsize => {
// unsize targets other than to a fat pointer currently
// can't be in constants.
- assert!(common::type_is_fat_ptr(tcx, cast_ty));
+ assert!(common::type_is_fat_ptr(self.ccx, cast_ty));
let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference)
.expect("consts: unsizing got non-pointer type").ty;
- let (base, old_info) = if !common::type_is_sized(tcx, pointee_ty) {
+ let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) {
// Normally, the source is a thin pointer and we are
// adding extra info to make a fat pointer. The exception
// is when we are upcasting an existing object fat pointer
mir::CastKind::Misc => { // Casts from a fat-ptr.
let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty);
let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty);
- if common::type_is_fat_ptr(tcx, operand.ty) {
+ if common::type_is_fat_ptr(self.ccx, operand.ty) {
let (data_ptr, meta_ptr) = operand.get_fat_ptr();
- if common::type_is_fat_ptr(tcx, cast_ty) {
+ if common::type_is_fat_ptr(self.ccx, cast_ty) {
let ll_cft = ll_cast_ty.field_types();
let ll_fft = ll_from_ty.field_types();
let data_cast = consts::ptrcast(data_ptr, ll_cft[0]);
let base = match tr_lvalue.base {
Base::Value(llval) => {
// FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
- let align = if type_is_sized(self.ccx.tcx(), ty) {
+ let align = if self.ccx.shared().type_is_sized(ty) {
type_of::align_of(self.ccx, ty)
} else {
self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign
Base::Static(llval) => llval
};
- let ptr = if common::type_is_sized(tcx, ty) {
+ let ptr = if self.ccx.shared().type_is_sized(ty) {
base
} else {
C_struct(self.ccx, &[base, tr_lvalue.llextra], false)
LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
};
let discr = discr as u64;
- let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx));
+ let is_sized = self.ccx().shared().type_is_sized(projected_ty.to_ty(tcx));
let base = if is_sized {
adt::MaybeSizedValue::sized(tr_base.llval)
} else {
let dst = bcx.struct_gep(lltemp, i);
let arg = &fcx.fn_ty.args[idx];
idx += 1;
- if common::type_is_fat_ptr(tcx, tupled_arg_ty) {
+ if common::type_is_fat_ptr(bcx.ccx(), tupled_arg_ty) {
// We pass fat pointers as two words, but inside the tuple
// they are the two sub-fields of a single aggregate field.
let meta = &fcx.fn_ty.args[idx];
}
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
- let val = if common::type_is_fat_ptr(tcx, arg_ty) {
+ let val = if common::type_is_fat_ptr(bcx.ccx(), arg_ty) {
let meta = &fcx.fn_ty.args[idx];
idx += 1;
assert_eq!((meta.cast, meta.pad), (None, None));
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else {
let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index));
- if common::type_is_fat_ptr(tcx, arg_ty) {
+ if common::type_is_fat_ptr(bcx.ccx(), arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,
// so make an alloca to store them in.
{
debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
- let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
+ let val = if common::type_is_fat_ptr(bcx.ccx(), ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty);
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx(), ty) {
mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
let cast_ty = bcx.fcx().monomorphize(&cast_ty);
- if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
+ if common::type_is_fat_ptr(bcx.ccx(), cast_ty) {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
mir::CastKind::Unsize => {
// unsize targets other than to a fat pointer currently
// can't be operands.
- assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty));
+ assert!(common::type_is_fat_ptr(bcx.ccx(), cast_ty));
match operand.val {
OperandValue::Pair(lldata, llextra) => {
}
}
}
- mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => {
+ mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx(), operand.ty) => {
let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
- if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
+ if common::type_is_fat_ptr(bcx.ccx(), cast_ty) {
let ll_cft = ll_cast_ty.field_types();
let ll_fft = ll_from_ty.field_types();
let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
// Note: lvalues are indirect, so storing the `llval` into the
// destination effectively creates a reference.
- let operand = if common::type_is_sized(bcx.tcx(), ty) {
+ let operand = if bcx.ccx().shared().type_is_sized(ty) {
OperandRef {
val: OperandValue::Immediate(tr_lvalue.llval),
ty: ref_ty,
mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
let lhs = self.trans_operand(&bcx, lhs);
let rhs = self.trans_operand(&bcx, rhs);
- let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) {
+ let llresult = if common::type_is_fat_ptr(bcx.ccx(), lhs.ty) {
match (lhs.val, rhs.val) {
(OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra)) => {
linkage: llvm::Linkage,
symbol_name: &str) {
let tcx = ccx.tcx();
- assert_eq!(dg.ty(), glue::get_drop_glue_type(tcx, dg.ty()));
+ assert_eq!(dg.ty(), glue::get_drop_glue_type(ccx.shared(), dg.ty()));
let t = dg.ty();
let sig = tcx.mk_fn_sig(iter::once(tcx.mk_mut_ptr(tcx.types.i8)), tcx.mk_nil(), false);
let _recursion_lock = cx.enter_type_of(t);
let llsizingty = match t.sty {
- _ if !type_is_sized(cx.tcx(), t) => {
+ _ if !cx.shared().type_is_sized(t) => {
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false)
}
ty::TyBox(ty) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
- if type_is_sized(cx.tcx(), ty) {
+ if cx.shared().type_is_sized(ty) {
Type::i8p(cx)
} else {
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false)
// FIXME(eddyb) Temporary sanity check for ty::layout.
let layout = cx.layout_of(t);
- if !type_is_sized(cx.tcx(), t) {
+ if !cx.shared().type_is_sized(t) {
if !layout.is_unsized() {
bug!("layout should be unsized for type `{}` / {:#?}",
t, layout);
match ty.sty {
ty::TyBox(t) |
ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) |
- ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !type_is_sized(ccx.tcx(), t) => {
+ ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !ccx.shared().type_is_sized(t) => {
in_memory_type_of(ccx, t).ptr_to()
}
_ => bug!("expected fat ptr ty but got {:?}", ty)
/// is too large for it to be placed in SSA value (by our rules).
/// For the raw type without far pointer indirection, see `in_memory_type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
- let ty = if !type_is_sized(cx.tcx(), ty) {
+ let ty = if !cx.shared().type_is_sized(ty) {
cx.tcx().mk_imm_ptr(ty)
} else {
ty
ty::TyBox(ty) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
- if !type_is_sized(cx.tcx(), ty) {
+ if !cx.shared().type_is_sized(ty) {
if let ty::TyStr = ty.sty {
// This means we get a nicer name in the output (str is always
// unsized).