layout::RawNullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
let llptrty = type_of::sizing_type_of(bcx.ccx,
- monomorphize::field_ty(bcx.ccx.tcx(), substs,
+ monomorphize::field_ty(bcx.tcx(), substs,
&def.variants[nndiscr as usize].fields[0]));
bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty))
}
}
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
- bcx.ccx.sess().target.target.arch == "arm" || bcx.ccx.sess().target.target.arch == "aarch64"
+ bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
}
fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
}
layout::General { discr: d, ref variants, .. } => {
let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false);
- fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false));
+ fields.insert(0, d.to_ty(&bcx.tcx(), false));
struct_field_ptr(bcx, &variants[discr.0 as usize],
&fields,
val, ix + 1, true)
// Default per-arch clobbers
// Basically what clang does
- let arch_clobbers = match &bcx.ccx.sess().target.target.arch[..] {
+ let arch_clobbers = match &bcx.sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
_ => Vec::new()
};
assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter().map(|f| {
- monomorphize::field_ty(bcx.ccx.tcx(), substs_a, f)
+ monomorphize::field_ty(bcx.tcx(), substs_a, f)
});
let dst_fields = def_b.variants[0].fields.iter().map(|f| {
- monomorphize::field_ty(bcx.ccx.tcx(), substs_b, f)
+ monomorphize::field_ty(bcx.tcx(), substs_b, f)
});
let src = adt::MaybeSizedValue::sized(src);
use type_::Type;
use value::Value;
use libc::{c_uint, c_char};
-use rustc::ty::{Ty, TypeFoldable};
+use rustc::ty::{Ty, TyCtxt, TypeFoldable};
+use rustc::session::Session;
use type_of;
use std::borrow::Cow;
builder
}
+ pub fn sess(&self) -> &Session {
+ self.ccx.sess()
+ }
+
+ pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
+ self.ccx.tcx()
+ }
+
pub fn llfn(&self) -> ValueRef {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
let llpersonality = bcx.ccx.eh_personality();
bcx.set_personality_fn(llpersonality);
- if base::wants_msvc_seh(bcx.ccx.sess()) {
+ if base::wants_msvc_seh(bcx.sess()) {
let pad = bcx.cleanup_pad(None, &[]);
let funclet = Some(Funclet::new(pad));
self.trans(funclet.as_ref(), &bcx);
// Insert cleanup instructions into the cleanup block
self.trans(None, &bcx);
- if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
+ if !bcx.sess().target.target.options.custom_unwind_resume {
bcx.resume(llretval);
} else {
let exc_ptr = bcx.extract_value(llretval, 0);
fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
CleanupScope {
cleanup: Some(drop_val),
- landing_pad: if !bcx.ccx.sess().no_landing_pads() {
+ landing_pad: if !bcx.sess().no_landing_pads() {
Some(drop_val.get_landing_pad(bcx))
} else {
None
};
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
- debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span));
+ debug!("set_source_location: {}", builder.sess().codemap().span_to_string(span));
let loc = span_start(builder.ccx, span);
InternalDebugLocation::new(scope, loc.line, loc.col.to_usize())
} else {
ptr: MaybeSizedValue,
content_ty: Ty<'tcx>
) {
- let def_id = langcall(bcx.ccx.tcx(), None, "", BoxFreeFnLangItem);
- let substs = bcx.ccx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
+ let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem);
+ let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty)));
let callee = Callee::def(bcx.ccx, def_id, substs);
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
}
ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => {
let shallow_drop = def.is_union();
- let tcx = bcx.ccx.tcx();
+ let tcx = bcx.tcx();
let def = t.ty_adt_def().unwrap();
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let last_field = def.struct_variant().fields.last().unwrap();
- let field_ty = monomorphize::field_ty(bcx.ccx.tcx(), substs, last_field);
+ let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
// FIXME (#26403, #27023): We should be adding padding
(bcx.load(size_ptr), bcx.load(align_ptr))
}
ty::TySlice(_) | ty::TyStr => {
- let unit_ty = t.sequence_element_type(bcx.ccx.tcx());
+ let unit_ty = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx, unit_ty);
av: adt::MaybeSizedValue,
variant: &'tcx ty::VariantDef,
substs: &Substs<'tcx>) {
- let tcx = cx.ccx.tcx();
+ let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
let mut cx = cx;
match t.sty {
ty::TyClosure(def_id, substs) => {
- for (i, upvar_ty) in substs.upvar_tys(def_id, cx.ccx.tcx()).enumerate() {
+ for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i);
drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty);
}
ty::TyArray(_, n) => {
let base = get_dataptr(&cx, ptr.value);
let len = C_uint(cx.ccx, n);
- let unit_ty = t.sequence_element_type(cx.ccx.tcx());
+ let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
}
ty::TySlice(_) | ty::TyStr => {
- let unit_ty = t.sequence_element_type(cx.ccx.tcx());
+ let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta,
|bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty));
}
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
- let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.ccx.tcx(), t, None);
+ let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i);
let ptr = if cx.ccx.shared().type_is_sized(field_ty) {
}
}
(adt::BranchKind::Switch, Some(lldiscrim_a)) => {
- let tcx = cx.ccx.tcx();
+ let tcx = cx.tcx();
drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize);
// Create a fall-through basic block for the "else" case of
}
cx = next_cx;
}
- _ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
+ _ => cx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"),
}
}
},
_ => {
- cx.ccx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
+ cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t))
}
}
return cx;
local_ptr: ValueRef,
dest: ValueRef,
) {
- if bcx.ccx.sess().no_landing_pads() {
+ if bcx.sess().no_landing_pads() {
bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
} else if wants_msvc_seh(bcx.sess()) {
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
- bcx.ccx.sess(), span,
+ bcx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ",
$msg),
name, $($fmt)*));
- let tcx = bcx.ccx.tcx();
+ let tcx = bcx.tcx();
let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig());
let arg_tys = sig.inputs();
let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps);
Lifetime::End.call(&bcx, ps);
- if !bcx.ccx.sess().target.target.options.custom_unwind_resume {
+ if !bcx.sess().target.target.options.custom_unwind_resume {
bcx.resume(lp);
} else {
let exc_ptr = bcx.extract_value(lp, 0);
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(&bcx, discr);
- let ty = discr_lvalue.ty.to_ty(bcx.ccx.tcx());
+ let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
let mut bb_hist = FxHashMap();
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval),
- ty: tr_lvalue.ty.to_ty(bcx.ccx.tcx())
+ ty: tr_lvalue.ty.to_ty(bcx.tcx())
}
}
};
}
mir::TerminatorKind::Drop { ref location, target, unwind } => {
- let ty = location.ty(&self.mir, bcx.ccx.tcx()).to_ty(bcx.ccx.tcx());
+ let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx());
let ty = self.monomorphize(&ty);
// Double check for necessity to drop
self.set_debug_loc(&bcx, terminator.source_info);
// Get the location information.
- let loc = bcx.ccx.sess().codemap().lookup_char_pos(span.lo);
+ let loc = bcx.sess().codemap().lookup_char_pos(span.lo);
let filename = Symbol::intern(&loc.file.name).as_str();
let filename = C_str_slice(bcx.ccx, filename);
let line = C_u32(bcx.ccx, loc.line as u32);
if const_cond == Some(!expected) {
if let Some(err) = const_err {
let err = ConstEvalErr{ span: span, kind: err };
- let mut diag = bcx.ccx.tcx().sess.struct_span_warn(
+ let mut diag = bcx.tcx().sess.struct_span_warn(
span, "this expression will panic at run-time");
- note_const_eval_err(bcx.ccx.tcx(), &err, span, "expression", &mut diag);
+ note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag);
diag.emit();
}
}
// Obtain the panic entry point.
- let def_id = common::langcall(bcx.ccx.tcx(), Some(span), "", lang_item);
+ let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx, def_id,
bcx.ccx.empty_substs_for_def_id(def_id));
let llfn = callee.reify(bcx.ccx);
_ => bug!("{} is not callable", callee.ty)
};
- let sig = bcx.ccx.tcx().erase_late_bound_regions_and_normalize(sig);
+ let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig);
// Handle intrinsics old trans wants Expr's for, ourselves.
let intrinsic = match (&callee.ty.sty, &callee.data) {
(&ty::TyFnDef(def_id, ..), &Intrinsic) => {
- Some(bcx.ccx.tcx().item_name(def_id).as_str())
+ Some(bcx.tcx().item_name(def_id).as_str())
}
_ => None
};
let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
- let op_ty = op_arg.ty(&self.mir, bcx.ccx.tcx());
+ let op_ty = op_arg.ty(&self.mir, bcx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args);
let imm_op = |x| OperandRef {
val: Immediate(x),
// We won't be checking the type again.
- ty: bcx.ccx.tcx().types.err
+ ty: bcx.tcx().types.err
};
self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee);
src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) {
let mut val = self.trans_operand(bcx, src);
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
- let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.ccx.tcx()));
+ let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx()));
let out_type_size = llbitsize_of_real(bcx.ccx, llouttype);
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx, def_id, substs);
let ty = match f.ty.sty {
- ty::TyFnDef(.., f) => bcx.ccx.tcx().mk_fn_ptr(f),
+ ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f),
_ => f.ty
};
val = OperandRef {
let llindex = C_uint(bcx.ccx, from);
let llbase = project_index(llindex);
- let base_ty = tr_base.ty.to_ty(bcx.ccx.tcx());
+ let base_ty = tr_base.ty.to_ty(bcx.tcx());
match base_ty.sty {
ty::TyArray(..) => {
// must cast the lvalue pointer type to the new
// User variable
let source_info = decl.source_info.unwrap();
let debug_scope = mircx.scopes[source_info.scope];
- let dbg = debug_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo;
+ let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
if !lvalue_locals.contains(local.index()) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let mir = mircx.mir;
- let tcx = bcx.ccx.tcx();
+ let tcx = bcx.tcx();
let mut idx = 0;
let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
// Get the argument scope, if it exists and if we need it.
let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
- let arg_scope = if arg_scope.is_valid() && bcx.ccx.sess().opts.debuginfo == FullDebugInfo {
+ let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo {
Some(arg_scope.scope_metadata)
} else {
None
let arg = &mircx.fn_ty.args[idx];
idx += 1;
- let llval = if arg.is_indirect() && bcx.ccx.sess().opts.debuginfo != FullDebugInfo {
+ let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up, unless
// we emit extra-debug-info, which requires local allocas :(.
// for most lvalues, to consume them we just load them
// out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
- let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
+ let ty = tr_lvalue.ty.to_ty(bcx.tcx());
self.trans_load(bcx, tr_lvalue.llval, ty)
}
mir::Rvalue::Repeat(ref elem, ref count) => {
let tr_elem = self.trans_operand(&bcx, elem);
- let size = count.value.as_u64(bcx.ccx.tcx().sess.target.uint_type);
+ let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
match *kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
let disr = Disr::from(adt_def.variants[variant_index].disr_val);
- let dest_ty = dest.ty.to_ty(bcx.ccx.tcx());
+ let dest_ty = dest.ty.to_ty(bcx.tcx());
adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr));
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
},
_ => {
// If this is a tuple or closure, we need to translate GEP indices.
- let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.ccx.tcx()));
+ let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
let translation = if let Layout::Univariant { ref variant, .. } = *layout {
Some(&variant.memory_index)
} else {
mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output);
- (lvalue.llval, lvalue.ty.to_ty(bcx.ccx.tcx()))
+ (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
}).collect();
let input_vals = inputs.iter().map(|input| {
mir::Rvalue::Ref(_, bk, ref lvalue) => {
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
- let ty = tr_lvalue.ty.to_ty(bcx.ccx.tcx());
- let ref_ty = bcx.ccx.tcx().mk_ref(
- bcx.ccx.tcx().mk_region(ty::ReErased),
+ let ty = tr_lvalue.ty.to_ty(bcx.tcx());
+ let ref_ty = bcx.tcx().mk_ref(
+ bcx.tcx().mk_region(ty::ReErased),
ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
);
let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
let operand = OperandRef {
val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)),
- ty: bcx.ccx.tcx().types.usize,
+ ty: bcx.tcx().types.usize,
};
(bcx, operand)
}
};
let operand = OperandRef {
val: OperandValue::Immediate(llresult),
- ty: op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty),
+ ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
};
(bcx, operand)
}
let result = self.trans_scalar_checked_binop(&bcx, op,
lhs.immediate(), rhs.immediate(),
lhs.ty);
- let val_ty = op.ty(bcx.ccx.tcx(), lhs.ty, rhs.ty);
- let operand_ty = bcx.ccx.tcx().intern_tup(&[val_ty, bcx.ccx.tcx().types.bool]);
+ let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
+ let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]);
let operand = OperandRef {
val: result,
ty: operand_ty
let align = type_of::align_of(bcx.ccx, content_ty);
let llalign = C_uint(bcx.ccx, align);
let llty_ptr = llty.ptr_to();
- let box_ty = bcx.ccx.tcx().mk_box(content_ty);
+ let box_ty = bcx.tcx().mk_box(content_ty);
// Allocate space:
- let def_id = match bcx.ccx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
+ let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) {
Ok(id) => id,
Err(s) => {
- bcx.ccx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
+ bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
}
};
- let r = Callee::def(bcx.ccx, def_id, bcx.ccx.tcx().intern_substs(&[]))
+ let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[]))
.reify(bcx.ccx);
let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
// will only succeed if both operands are constant.
// This is necessary to determine when an overflow Assert
// will always panic at runtime, and produce a warning.
- if let Some((val, of)) = const_scalar_checked_binop(bcx.ccx.tcx(), op, lhs, rhs, input_ty) {
+ if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
return OperandValue::Pair(val, C_bool(bcx.ccx, of));
}
use syntax::ast::UintTy::*;
use rustc::ty::{TyInt, TyUint};
- let tcx = bcx.ccx.tcx();
+ let tcx = bcx.tcx();
let new_sty = match ty.sty {
TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {