fn layout_of(self, ty: Self::Ty) -> Self::TyLayout;
}
-pub trait TyLayoutMethods<'a, C: LayoutOf>: Sized {
+pub trait TyLayoutMethods<'a, C: LayoutOf<Ty = Self>>: Sized {
fn for_variant(this: TyLayout<'a, Self>, cx: C, variant_index: usize) -> TyLayout<'a, Self>;
fn field(this: TyLayout<'a, Self>, cx: C, i: usize) -> C::TyLayout;
}
impl<'a, Ty> TyLayout<'a, Ty> {
pub fn for_variant<C>(self, cx: C, variant_index: usize) -> Self
- where Ty: TyLayoutMethods<'a, C>, C: LayoutOf {
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::for_variant(self, cx, variant_index)
}
pub fn field<C>(self, cx: C, i: usize) -> C::TyLayout
- where Ty: TyLayoutMethods<'a, C>, C: LayoutOf {
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> {
Ty::field(self, cx, i)
}
}
pub options: TargetOptions,
}
+pub trait HasTargetSpec: Copy {
+ fn target_spec(&self) -> &Target;
+}
+
+impl<'a> HasTargetSpec for &'a Target {
+ fn target_spec(&self) -> &Target {
+ self
+ }
+}
+
/// Optional aspects of a target specification.
///
/// This has an implementation of `Default`, see each field for what the default is. In general,
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
+use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
+use rustc_target::spec::HasTargetSpec;
use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, LayoutOf, Size, TyLayout};
+use rustc::ty::layout;
use libc::c_uint;
use std::cmp;
}
}
-pub trait LayoutExt<'tcx> {
+pub trait LayoutExt<'a, Ty>: Sized {
fn is_aggregate(&self) -> bool;
- fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg>;
+ fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
+ where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy;
}
-impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
+impl<'a, Ty> LayoutExt<'a, Ty> for TyLayout<'a, Ty> {
fn is_aggregate(&self) -> bool {
match self.abi {
layout::Abi::Uninhabited |
}
}
- fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg> {
+ fn homogeneous_aggregate<C>(&self, cx: C) -> Option<Reg>
+ where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self> + Copy
+ {
match self.abi {
layout::Abi::Uninhabited => None,
/// Information about how to pass an argument to,
/// or return a value from, a function, under some ABI.
#[derive(Debug)]
-pub struct ArgType<'tcx> {
- pub layout: TyLayout<'tcx>,
+pub struct ArgType<'tcx, Ty = ty::Ty<'tcx>> {
+ pub layout: TyLayout<'tcx, Ty>,
/// Dummy argument, which is emitted before the real argument.
pub pad: Option<Reg>,
pub mode: PassMode,
}
-impl<'a, 'tcx> ArgType<'tcx> {
- fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
+impl<'a, 'tcx, Ty> ArgType<'tcx, Ty> {
+ fn new(layout: TyLayout<'tcx, Ty>) -> Self {
ArgType {
layout,
pad: None,
pub fn is_ignore(&self) -> bool {
self.mode == PassMode::Ignore
}
+}
+impl<'a, 'tcx> ArgType<'tcx> {
/// Get the LLVM type for a place of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
#[derive(Debug)]
-pub struct FnType<'tcx> {
+pub struct FnType<'tcx, Ty = ty::Ty<'tcx>> {
/// The LLVM types of each argument.
- pub args: Vec<ArgType<'tcx>>,
+ pub args: Vec<ArgType<'tcx, Ty>>,
/// LLVM return type.
- pub ret: ArgType<'tcx>,
+ pub ret: ArgType<'tcx, Ty>,
pub variadic: bool,
pub fn new(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
- extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
+ extra_args: &[Ty<'tcx>]) -> Self {
let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
fn_ty.adjust_for_abi(cx, sig.abi);
fn_ty
pub fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
- extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
+ extra_args: &[Ty<'tcx>]) -> Self {
let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
// Don't pass the vtable, it's not an argument of the virtual fn.
{
pub fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
sig: ty::FnSig<'tcx>,
- extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
+ extra_args: &[Ty<'tcx>]) -> Self {
debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
use self::Abi::*;
// Handle safe Rust thin and fat pointers.
let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
scalar: &layout::Scalar,
- layout: TyLayout<'tcx>,
+ layout: TyLayout<'tcx, Ty<'tcx>>,
offset: Size,
is_return: bool| {
// Booleans are always an i1 that needs to be zero-extended.
return;
}
- match &cx.sess().target.target.arch[..] {
+ if let Err(msg) = self.adjust_for_cabi(cx, abi) {
+ cx.sess().fatal(&msg);
+ }
+ }
+}
+
+impl<'a, Ty> FnType<'a, Ty> {
+ fn adjust_for_cabi<C>(&mut self, cx: C, abi: Abi) -> Result<(), String>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
+ {
+ match &cx.target_spec().arch[..] {
"x86" => {
let flavor = if abi == Abi::Fastcall {
cabi_x86::Flavor::Fastcall
},
"x86_64" => if abi == Abi::SysV64 {
cabi_x86_64::compute_abi_info(cx, self);
- } else if abi == Abi::Win64 || cx.sess().target.target.options.is_like_windows {
+ } else if abi == Abi::Win64 || cx.target_spec().options.is_like_windows {
cabi_x86_win64::compute_abi_info(self);
} else {
cabi_x86_64::compute_abi_info(cx, self);
"s390x" => cabi_s390x::compute_abi_info(cx, self),
"asmjs" => cabi_asmjs::compute_abi_info(cx, self),
"wasm32" => {
- if cx.sess().opts.target_triple.triple().contains("emscripten") {
+ if cx.target_spec().llvm_target.contains("emscripten") {
cabi_asmjs::compute_abi_info(cx, self)
} else {
- cabi_wasm32::compute_abi_info(cx, self)
+ cabi_wasm32::compute_abi_info(self)
}
}
"msp430" => cabi_msp430::compute_abi_info(self),
"nvptx" => cabi_nvptx::compute_abi_info(self),
"nvptx64" => cabi_nvptx64::compute_abi_info(self),
"hexagon" => cabi_hexagon::compute_abi_info(self),
- a => cx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
+ a => return Err(format!("unrecognized arch \"{}\" in target specification", a))
}
if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
attrs.set(ArgAttribute::StructRet);
}
+
+ Ok(())
}
+}
+impl<'a, 'tcx> FnType<'tcx> {
pub fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
let mut llargument_tys = Vec::new();
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CodegenCx;
+use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
-fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
- -> Option<Uniform> {
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+ -> Option<Uniform>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
let size = arg.layout.size;
})
}
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
return;
ret.make_indirect();
}
-fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(32);
return;
arg.make_indirect();
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CodegenCx;
+use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
+use rustc_target::spec::HasTargetSpec;
use llvm::CallConv;
-fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
- -> Option<Uniform> {
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+ -> Option<Uniform>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
let size = arg.layout.size;
})
}
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>, vfp: bool) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, vfp: bool)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
return;
ret.make_indirect();
}
-fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) {
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, vfp: bool)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(32);
return;
});
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
+{
// If this is a target with a hard-float ABI, and the function is not explicitly
// `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
- let vfp = cx.sess().target.target.llvm_target.ends_with("hf")
+ let vfp = cx.target_spec().llvm_target.ends_with("hf")
&& fty.cconv != CallConv::ArmAapcsCallConv
&& !fty.variadic;
// except according to those terms.
use abi::{FnType, ArgType, LayoutExt, Uniform};
-use context::CodegenCx;
+use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
// See the https://github.com/kripken/emscripten-fastcomp-clang repository.
// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if ret.layout.is_aggregate() {
if let Some(unit) = ret.layout.homogeneous_aggregate(cx) {
let size = ret.layout.size;
}
}
-fn classify_arg_ty(arg: &mut ArgType) {
+fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() {
arg.make_indirect_byval();
}
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
use abi::{FnType, ArgType, LayoutExt};
-fn classify_ret_ty(ret: &mut ArgType) {
+fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
}
}
-fn classify_arg_ty(arg: &mut ArgType) {
+fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
}
}
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
-use context::CodegenCx;
-use rustc::ty::layout::Size;
+use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- ret: &mut ArgType<'tcx>,
- offset: &mut Size) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect();
- *offset += cx.tcx.data_layout.pointer_size;
+ *offset += cx.data_layout().pointer_size;
}
}
-fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
- let dl = &cx.tcx.data_layout;
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
+ let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
*offset = offset.abi_align(align) + size.abi_align(align);
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
let mut offset = Size::from_bytes(0);
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
// except according to those terms.
use abi::{ArgAttribute, ArgType, CastTarget, FnType, LayoutExt, PassMode, Reg, RegKind, Uniform};
-use context::CodegenCx;
-use rustc::ty::layout::{self, Size};
+use rustc_target::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
-fn extend_integer_width_mips(arg: &mut ArgType, bits: u64) {
+fn extend_integer_width_mips<Ty>(arg: &mut ArgType<Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips
- if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
- if let layout::Int(i, signed) = scalar.value {
+ if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
+ if let abi::Int(i, signed) = scalar.value {
if !signed && i.size().bits() == 32 {
if let PassMode::Direct(ref mut attrs) = arg.mode {
attrs.set(ArgAttribute::SExt);
arg.extend_integer_width_to(bits);
}
-fn float_reg<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &ArgType<'tcx>, i: usize) -> Option<Reg> {
+fn float_reg<'a, Ty, C>(cx: C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
match ret.layout.field(cx, i).abi {
- layout::Abi::Scalar(ref scalar) => match scalar.value {
- layout::F32 => Some(Reg::f32()),
- layout::F64 => Some(Reg::f64()),
+ abi::Abi::Scalar(ref scalar) => match scalar.value {
+ abi::F32 => Some(Reg::f32()),
+ abi::F64 => Some(Reg::f64()),
_ => None
},
_ => None
}
}
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
extend_integer_width_mips(ret, 64);
return;
// use of float registers to structures (not unions) containing exactly one or two
// float fields.
- if let layout::FieldPlacement::Arbitrary { .. } = ret.layout.fields {
+ if let abi::FieldPlacement::Arbitrary { .. } = ret.layout.fields {
if ret.layout.fields.count() == 1 {
if let Some(reg) = float_reg(cx, ret, 0) {
ret.cast_to(reg);
}
}
-fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !arg.layout.is_aggregate() {
extend_integer_width_mips(arg, 64);
return;
}
- let dl = &cx.tcx.data_layout;
+ let dl = cx.data_layout();
let size = arg.layout.size;
let mut prefix = [None; 8];
let mut prefix_index = 0;
match arg.layout.fields {
- layout::FieldPlacement::Array { .. } => {
+ abi::FieldPlacement::Array { .. } => {
// Arrays are passed indirectly
arg.make_indirect();
return;
}
- layout::FieldPlacement::Union(_) => {
+ abi::FieldPlacement::Union(_) => {
// Unions and are always treated as a series of 64-bit integer chunks
},
- layout::FieldPlacement::Arbitrary { .. } => {
+ abi::FieldPlacement::Arbitrary { .. } => {
// Structures are split up into a series of 64-bit integer chunks, but any aligned
// doubles not part of another aggregate are passed as floats.
let mut last_offset = Size::from_bytes(0);
let offset = arg.layout.fields.offset(i);
// We only care about aligned doubles
- if let layout::Abi::Scalar(ref scalar) = field.abi {
- if let layout::F64 = scalar.value {
+ if let abi::Abi::Scalar(ref scalar) = field.abi {
+ if let abi::F64 = scalar.value {
if offset.is_abi_aligned(dl.f64_align) {
// Insert enough integers to cover [last_offset, offset)
assert!(last_offset.is_abi_aligned(dl.f64_align));
});
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
// returned by reference. To pass a structure or union by reference, the caller
// places its address in the appropriate location: either in a register or on
// the stack, according to its position in the argument list. (..)"
-fn classify_ret_ty(ret: &mut ArgType) {
+fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
}
}
-fn classify_arg_ty(arg: &mut ArgType) {
+fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
}
}
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
use abi::{ArgType, FnType, LayoutExt};
-fn classify_ret_ty(ret: &mut ArgType) {
+fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
}
}
-fn classify_arg_ty(arg: &mut ArgType) {
+fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
}
}
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
use abi::{ArgType, FnType, LayoutExt};
-fn classify_ret_ty(ret: &mut ArgType) {
+fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
}
}
-fn classify_arg_ty(arg: &mut ArgType) {
+fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
}
}
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
-use context::CodegenCx;
-use rustc::ty::layout::Size;
+use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- ret: &mut ArgType<'tcx>,
- offset: &mut Size) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect();
- *offset += cx.tcx.data_layout.pointer_size;
+ *offset += cx.data_layout().pointer_size;
}
}
-fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
- let dl = &cx.tcx.data_layout;
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
+ let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
*offset = offset.abi_align(align) + size.abi_align(align);
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
let mut offset = Size::from_bytes(0);
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
// need to be fixed when PowerPC vector support is added.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CodegenCx;
-use rustc::ty::layout;
+
+use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
#[derive(Debug, Clone, Copy, PartialEq)]
enum ABI {
}
use self::ABI::*;
-fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- arg: &mut ArgType<'tcx>,
- abi: ABI)
- -> Option<Uniform> {
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
+ -> Option<Uniform>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// ELFv1 only passes one-member aggregates transparently.
// ELFv2 passes up to eight uniquely addressable members.
})
}
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>, abi: ABI) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>, abi: ABI)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
ret.make_indirect();
}
-fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) {
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>, abi: ABI)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
if size.bits() <= 64 {
(Reg { kind: RegKind::Integer, size }, size)
} else {
- let align = layout::Align::from_bits(64, 64).unwrap();
+ let align = Align::from_bits(64, 64).unwrap();
(Reg::i64(), size.abi_align(align))
}
},
});
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
- let abi = match cx.sess().target.target.target_endian.as_str() {
- "big" => ELFv1,
- "little" => ELFv2,
- _ => unimplemented!(),
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
+ let abi = match cx.data_layout().endian {
+ Endian::Big => ELFv1,
+ Endian::Little => ELFv2,
};
if !fty.ret.is_ignore() {
// for a pre-z13 machine or using -mno-vx.
use abi::{FnType, ArgType, LayoutExt, Reg};
-use context::CodegenCx;
-use rustc::ty::layout::{self, TyLayout};
+use rustc_target::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
-fn classify_ret_ty(ret: &mut ArgType) {
+fn classify_ret_ty<'a, Ty, C>(ret: &mut ArgType<Ty>)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
ret.extend_integer_width_to(64);
} else {
}
}
-fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- layout: TyLayout<'tcx>) -> bool {
+fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
+ where Ty: TyLayoutMethods<'a, C>,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
match layout.abi {
- layout::Abi::Scalar(ref scalar) => {
+ abi::Abi::Scalar(ref scalar) => {
match scalar.value {
- layout::F32 | layout::F64 => true,
+ abi::F32 | abi::F64 => true,
_ => false
}
}
- layout::Abi::Aggregate { .. } => {
+ abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0))
} else {
}
}
-fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
arg.extend_integer_width_to(64);
return;
}
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
}
// except according to those terms.
use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
-use context::CodegenCx;
-use rustc::ty::layout::Size;
+use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- ret: &mut ArgType<'tcx>,
- offset: &mut Size) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<Ty>, offset: &mut Size)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect();
- *offset += cx.tcx.data_layout.pointer_size;
+ *offset += cx.data_layout().pointer_size;
}
}
-fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
- let dl = &cx.tcx.data_layout;
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<Ty>, offset: &mut Size)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
+ let dl = cx.data_layout();
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
*offset = offset.abi_align(align) + size.abi_align(align);
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<Ty>)
+ where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
+{
let mut offset = Size::from_bytes(0);
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
// FIXME: This needs an audit for correctness and completeness.
use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CodegenCx;
+use rustc_target::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
-fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
- -> Option<Uniform> {
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+ -> Option<Uniform>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
// Ensure we have at most eight uniquely addressable members.
if arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
})
}
-fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<'a, Ty, C>(cx: C, ret: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
return;
ret.make_indirect();
}
-fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+fn classify_arg_ty<'a, Ty, C>(cx: C, arg: &mut ArgType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64);
return;
});
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
}
// except according to those terms.
use abi::{FnType, ArgType};
-use context::CodegenCx;
-fn classify_ret_ty<'a, 'tcx>(_cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<Ty>(ret: &mut ArgType<Ty>) {
ret.extend_integer_width_to(32);
}
-fn classify_arg_ty(arg: &mut ArgType) {
+fn classify_arg_ty<Ty>(arg: &mut ArgType<Ty>) {
arg.extend_integer_width_to(32);
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
if !fty.ret.is_ignore() {
- classify_ret_ty(cx, &mut fty.ret);
+ classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {
// except according to those terms.
use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind};
-use common::CodegenCx;
-
-use rustc::ty::layout::{self, TyLayout};
+use rustc_target::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
+use rustc_target::spec::HasTargetSpec;
#[derive(PartialEq)]
pub enum Flavor {
Fastcall
}
-fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- layout: TyLayout<'tcx>) -> bool {
+fn is_single_fp_element<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>) -> bool
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
match layout.abi {
- layout::Abi::Scalar(ref scalar) => {
+ abi::Abi::Scalar(ref scalar) => {
match scalar.value {
- layout::F32 | layout::F64 => true,
+ abi::F32 | abi::F64 => true,
_ => false
}
}
- layout::Abi::Aggregate { .. } => {
+ abi::Abi::Aggregate { .. } => {
if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
is_single_fp_element(cx, layout.field(cx, 0))
} else {
}
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- fty: &mut FnType<'tcx>,
- flavor: Flavor) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>, flavor: Flavor)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
+{
if !fty.ret.is_ignore() {
if fty.ret.layout.is_aggregate() {
// Returning a structure. Most often, this will use
// Some links:
// http://www.angelcode.com/dev/callconv/callconv.html
// Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
- let t = &cx.sess().target.target;
+ let t = cx.target_spec();
if t.options.abi_return_struct_as_int {
// According to Clang, everyone but MSVC returns single-element
// float aggregates directly in a floating-point register.
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind};
-use context::CodegenCx;
-
-use rustc::ty::layout::{self, TyLayout, Size};
+use rustc_target::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
/// Classification of "eightbyte" components.
// NB: the order of the variants is from general to specific,
const LARGEST_VECTOR_SIZE: usize = 512;
const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
-fn classify_arg<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &ArgType<'tcx>)
- -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory> {
- fn classify<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
- layout: TyLayout<'tcx>,
- cls: &mut [Option<Class>],
- off: Size)
- -> Result<(), Memory> {
+fn classify_arg<'a, Ty, C>(cx: C, arg: &ArgType<'a, Ty>)
+ -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
+ fn classify<'a, Ty, C>(cx: C, layout: TyLayout<'a, Ty>,
+ cls: &mut [Option<Class>], off: Size) -> Result<(), Memory>
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+ {
if !off.is_abi_aligned(layout.align) {
if !layout.is_zst() {
return Err(Memory);
}
let mut c = match layout.abi {
- layout::Abi::Uninhabited => return Ok(()),
+ abi::Abi::Uninhabited => return Ok(()),
- layout::Abi::Scalar(ref scalar) => {
+ abi::Abi::Scalar(ref scalar) => {
match scalar.value {
- layout::Int(..) |
- layout::Pointer => Class::Int,
- layout::F32 |
- layout::F64 => Class::Sse
+ abi::Int(..) |
+ abi::Pointer => Class::Int,
+ abi::F32 |
+ abi::F64 => Class::Sse
}
}
- layout::Abi::Vector { .. } => Class::Sse,
+ abi::Abi::Vector { .. } => Class::Sse,
- layout::Abi::ScalarPair(..) |
- layout::Abi::Aggregate { .. } => {
+ abi::Abi::ScalarPair(..) |
+ abi::Abi::Aggregate { .. } => {
match layout.variants {
- layout::Variants::Single { .. } => {
+ abi::Variants::Single { .. } => {
for i in 0..layout.fields.count() {
let field_off = off + layout.fields.offset(i);
classify(cx, layout.field(cx, i), cls, field_off)?;
}
return Ok(());
}
- layout::Variants::Tagged { .. } |
- layout::Variants::NicheFilling { .. } => return Err(Memory),
+ abi::Variants::Tagged { .. } |
+ abi::Variants::NicheFilling { .. } => return Err(Memory),
}
}
target
}
-pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, Ty, C>(cx: C, fty: &mut FnType<'a, Ty>)
+ where Ty: TyLayoutMethods<'a, C> + Copy,
+ C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
+{
let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9
let mut sse_regs = 8; // XMM0-7
- let mut x86_64_ty = |arg: &mut ArgType<'tcx>, is_arg: bool| {
+ let mut x86_64_ty = |arg: &mut ArgType<'a, Ty>, is_arg: bool| {
let mut cls_or_mem = classify_arg(cx, arg);
let mut needed_int = 0;
use abi::{ArgType, FnType, Reg};
-use rustc::ty::layout;
+use rustc_target::abi;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
-pub fn compute_abi_info(fty: &mut FnType) {
- let fixup = |a: &mut ArgType| {
+pub fn compute_abi_info<Ty>(fty: &mut FnType<Ty>) {
+ let fixup = |a: &mut ArgType<Ty>| {
match a.layout.abi {
- layout::Abi::Uninhabited => {}
- layout::Abi::ScalarPair(..) |
- layout::Abi::Aggregate { .. } => {
+ abi::Abi::Uninhabited => {}
+ abi::Abi::ScalarPair(..) |
+ abi::Abi::Aggregate { .. } => {
match a.layout.size.bits() {
8 => a.cast_to(Reg::i8()),
16 => a.cast_to(Reg::i16()),
_ => a.make_indirect()
}
}
- layout::Abi::Vector { .. } => {
+ abi::Abi::Vector { .. } => {
// FIXME(eddyb) there should be a size cap here
// (probably what clang calls "illegal vectors").
}
- layout::Abi::Scalar(_) => {
+ abi::Abi::Scalar(_) => {
if a.layout.size.bytes() > 8 {
a.make_indirect();
} else {
use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap;
+use rustc_target::spec::{HasTargetSpec, Target};
use std::ffi::{CStr, CString};
use std::cell::{Cell, RefCell};
}
}
+impl<'a, 'tcx> HasTargetSpec for &'a CodegenCx<'a, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target.target
+ }
+}
+
impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
self.tcx