let typ = self.infcx.tcx.tables().node_id_to_type(expr.id);
match typ.sty {
ty::TyFnDef(.., ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => {
- let from = bare_fn_ty.sig.0.inputs[0];
- let to = bare_fn_ty.sig.0.output;
+ let from = bare_fn_ty.sig.skip_binder().inputs()[0];
+ let to = bare_fn_ty.sig.skip_binder().output();
self.check_transmute(expr.span, from, to, expr.id);
}
_ => {
// The `Self` type is erased, so it should not appear in list of
// arguments or return type apart from the receiver.
let ref sig = self.item_type(method.def_id).fn_sig();
- for &input_ty in &sig.0.inputs[1..] {
+ for input_ty in &sig.skip_binder().inputs()[1..] {
if self.contains_illegal_self_type_reference(trait_def_id, input_ty) {
return Some(MethodViolationCode::ReferencesSelf);
}
}
- if self.contains_illegal_self_type_reference(trait_def_id, sig.0.output) {
+ if self.contains_illegal_self_type_reference(trait_def_id, sig.output().skip_binder()) {
return Some(MethodViolationCode::ReferencesSelf);
}
ty::TyFnDef(.., &ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
- sig: ty::Binder(ty::FnSig {
- inputs: _,
- output: _,
- variadic: false
- })
+ ref sig,
}) |
ty::TyFnPtr(&ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
- sig: ty::Binder(ty::FnSig {
- inputs: _,
- output: _,
- variadic: false
- })
- }) => {
+ ref sig
+ }) if !sig.variadic() => {
candidates.vec.push(FnPointerCandidate);
}
-> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)>
{
let arguments_tuple = match tuple_arguments {
- TupleArgumentsFlag::No => sig.0.inputs[0],
- TupleArgumentsFlag::Yes => self.intern_tup(&sig.0.inputs[..]),
+ TupleArgumentsFlag::No => sig.skip_binder().inputs()[0],
+ TupleArgumentsFlag::Yes =>
+ self.intern_tup(sig.skip_binder().inputs()),
};
let trait_ref = ty::TraitRef {
def_id: fn_trait_def_id,
substs: self.mk_substs_trait(self_ty, &[arguments_tuple]),
};
- ty::Binder((trait_ref, sig.0.output))
+ ty::Binder((trait_ref, sig.skip_binder().output()))
}
}
Some(TupleSimplifiedType(tys.len()))
}
ty::TyFnDef(.., ref f) | ty::TyFnPtr(ref f) => {
- Some(FunctionSimplifiedType(f.sig.0.inputs.len()))
+ Some(FunctionSimplifiedType(f.sig.skip_binder().inputs().len()))
}
ty::TyProjection(_) | ty::TyParam(_) => {
if can_simplify_params {
fn add_fn_sig(&mut self, fn_sig: &ty::PolyFnSig) {
let mut computation = FlagComputation::new();
- computation.add_tys(&fn_sig.0.inputs);
- computation.add_ty(fn_sig.0.output);
+ computation.add_tys(fn_sig.skip_binder().inputs());
+ computation.add_ty(fn_sig.skip_binder().output());
self.add_bound_computation(&computation);
}
-> RelateResult<'tcx, ty::FnSig<'tcx>>
where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
{
- if a.variadic != b.variadic {
+ if a.variadic() != b.variadic() {
return Err(TypeError::VariadicMismatch(
- expected_found(relation, &a.variadic, &b.variadic)));
+ expected_found(relation, &a.variadic(), &b.variadic())));
}
- let inputs = relate_arg_vecs(relation,
- &a.inputs,
- &b.inputs)?;
- let output = relation.relate(&a.output, &b.output)?;
+ if a.inputs().len() != b.inputs().len() {
+ return Err(TypeError::ArgCount);
+ }
- Ok(ty::FnSig {inputs: inputs,
- output: output,
- variadic: a.variadic})
- }
-}
+ let inputs = a.inputs().iter().zip(b.inputs()).map(|(&a, &b)| {
+ relation.relate_with_variance(ty::Contravariant, &a, &b)
+ }).collect::<Result<Vec<_>, _>>()?;
+ let output = relation.relate(&a.output(), &b.output())?;
-fn relate_arg_vecs<'a, 'gcx, 'tcx, R>(relation: &mut R,
- a_args: &[Ty<'tcx>],
- b_args: &[Ty<'tcx>])
- -> RelateResult<'tcx, Vec<Ty<'tcx>>>
- where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a
-{
- if a_args.len() != b_args.len() {
- return Err(TypeError::ArgCount);
+ Ok(ty::FnSig::new(inputs, output, a.variadic()))
}
-
- a_args.iter().zip(b_args)
- .map(|(a, b)| relation.relate_with_variance(ty::Contravariant, a, b))
- .collect()
}
impl<'tcx> Relate<'tcx> for ast::Unsafety {
impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> {
type Lifted = ty::FnSig<'tcx>;
fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<Self::Lifted> {
- tcx.lift(&self.inputs[..]).and_then(|inputs| {
- tcx.lift(&self.output).map(|output| {
- ty::FnSig {
- inputs: inputs,
- output: output,
- variadic: self.variadic
- }
+ tcx.lift(self.inputs()).and_then(|inputs| {
+ tcx.lift(&self.output()).map(|output| {
+ ty::FnSig::new(inputs, output, self.variadic())
})
})
}
impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
- ty::FnSig { inputs: self.inputs.fold_with(folder),
- output: self.output.fold_with(folder),
- variadic: self.variadic }
+ ty::FnSig::new(self.inputs().to_owned().fold_with(folder),
+ self.output().fold_with(folder),
+ self.variadic())
}
fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
- self.inputs.visit_with(visitor) || self.output.visit_with(visitor)
+ self.inputs().to_owned().visit_with(visitor) || self.output().visit_with(visitor)
}
}
/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns)
#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
pub struct FnSig<'tcx> {
- pub inputs: Vec<Ty<'tcx>>,
- pub output: Ty<'tcx>,
- pub variadic: bool
+ inputs: Vec<Ty<'tcx>>,
+ output: Ty<'tcx>,
+ variadic: bool
+}
+
+impl<'tcx> FnSig<'tcx> {
+ pub fn new(inputs: Vec<Ty<'tcx>>, output: Ty<'tcx>, variadic: bool) -> Self {
+ FnSig { inputs: inputs, output: output, variadic: variadic }
+ }
+
+ pub fn inputs(&self) -> &[Ty<'tcx>] {
+ &self.inputs
+ }
+
+ pub fn output(&self) -> Ty<'tcx> {
+ self.output
+ }
+
+ pub fn variadic(&self) -> bool {
+ self.variadic
+ }
}
pub type PolyFnSig<'tcx> = Binder<FnSig<'tcx>>;
impl<'tcx> PolyFnSig<'tcx> {
- pub fn inputs(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
- self.map_bound_ref(|fn_sig| fn_sig.inputs.clone())
+ pub fn inputs<'a>(&'a self) -> Binder<&[Ty<'tcx>]> {
+ Binder(self.0.inputs())
}
pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> {
self.map_bound_ref(|fn_sig| fn_sig.inputs[index])
// Type accessors for substructures of types
pub fn fn_args(&self) -> ty::Binder<Vec<Ty<'tcx>>> {
- self.fn_sig().inputs()
+ ty::Binder(self.fn_sig().inputs().skip_binder().iter().cloned().collect::<Vec<_>>())
}
pub fn fn_ret(&self) -> Binder<Ty<'tcx>> {
self.hash(f.unsafety);
self.hash(f.abi);
self.hash(f.sig.variadic());
- self.hash(f.sig.inputs().skip_binder().len());
+ self.hash(f.sig.skip_binder().inputs().len());
}
TyDynamic(ref data, ..) => {
if let Some(p) = data.principal() {
}
fn push_sig_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, sig: &ty::PolyFnSig<'tcx>) {
- stack.push(sig.0.output);
- stack.extend(sig.0.inputs.iter().cloned().rev());
+ stack.push(sig.skip_binder().output());
+ stack.extend(sig.skip_binder().inputs().iter().cloned().rev());
}
impl<'tcx> fmt::Display for ty::FnSig<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "fn")?;
- fn_sig(f, &self.inputs, self.variadic, self.output)
+ fn_sig(f, self.inputs(), self.variadic(), self.output())
}
}
impl<'tcx> fmt::Debug for ty::FnSig<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "({:?}; variadic: {})->{:?}", self.inputs, self.variadic, self.output)
+ write!(f, "({:?}; variadic: {})->{:?}", self.inputs(), self.variadic(), self.output())
}
}
let typ = cx.tcx.tables().node_id_to_type(expr.id);
match typ.sty {
ty::TyFnDef(.., ref bare_fn) if bare_fn.abi == RustIntrinsic => {
- let from = bare_fn.sig.0.inputs[0];
- let to = bare_fn.sig.0.output;
+ let from = bare_fn.sig.skip_binder().inputs()[0];
+ let to = bare_fn.sig.skip_binder().output();
return Some((&from.sty, &to.sty));
}
_ => (),
}
let sig = cx.erase_late_bound_regions(&bare_fn.sig);
- if !sig.output.is_nil() {
- let r = self.check_type_for_ffi(cache, sig.output);
+ if !sig.output().is_nil() {
+ let r = self.check_type_for_ffi(cache, sig.output());
match r {
FfiSafe => {}
_ => {
}
}
}
- for arg in sig.inputs {
+ for arg in sig.inputs() {
let r = self.check_type_for_ffi(cache, arg);
match r {
FfiSafe => {}
let sig = self.cx.tcx.item_type(def_id).fn_sig();
let sig = self.cx.tcx.erase_late_bound_regions(&sig);
- for (&input_ty, input_hir) in sig.inputs.iter().zip(&decl.inputs) {
- self.check_type_for_ffi_and_report_errors(input_hir.ty.span, &input_ty);
+ for (input_ty, input_hir) in sig.inputs().iter().zip(&decl.inputs) {
+ self.check_type_for_ffi_and_report_errors(input_hir.ty.span, input_ty);
}
if let hir::Return(ref ret_hir) = decl.output {
- let ret_ty = sig.output;
+ let ret_ty = sig.output();
if !ret_ty.is_nil() {
self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty);
}
let diverges = match ty.sty {
ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => {
// FIXME(canndrew): This is_never should probably be an is_uninhabited
- f.sig.0.output.is_never()
+ f.sig.skip_binder().output().is_never()
}
_ => false
};
span_bug!(expr.span, "method call has late-bound regions")
});
- assert_eq!(sig.inputs.len(), 2);
+ assert_eq!(sig.inputs().len(), 2);
let tupled_args = Expr {
- ty: sig.inputs[1],
+ ty: sig.inputs()[1],
temp_lifetime: temp_lifetime,
span: expr.span,
kind: ExprKind::Tuple {
.iter()
.enumerate()
.map(|(index, arg)| {
- (fn_sig.inputs[index], Some(&*arg.pat))
+ (fn_sig.inputs()[index], Some(&*arg.pat))
});
let body = self.tcx.map.expr(body_id);
let arguments = implicit_argument.into_iter().chain(explicit_arguments);
self.cx(MirSource::Fn(id)).build(|cx| {
- build::construct_fn(cx, id, arguments, abi, fn_sig.output, body)
+ build::construct_fn(cx, id, arguments, abi, fn_sig.output(), body)
});
intravisit::walk_fn(self, fk, decl, body_id, span, id);
match *destination {
Some((ref dest, _)) => {
let dest_ty = dest.ty(mir, tcx).to_ty(tcx);
- if let Err(terr) = self.sub_types(sig.output, dest_ty) {
+ if let Err(terr) = self.sub_types(sig.output(), dest_ty) {
span_mirbug!(self, term,
"call dest mismatch ({:?} <- {:?}): {:?}",
- dest_ty, sig.output, terr);
+ dest_ty, sig.output(), terr);
}
},
None => {
// FIXME(canndrew): This is_never should probably be an is_uninhabited
- if !sig.output.is_never() {
+ if !sig.output().is_never() {
span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig);
}
},
args: &[Operand<'tcx>])
{
debug!("check_call_inputs({:?}, {:?})", sig, args);
- if args.len() < sig.inputs.len() ||
- (args.len() > sig.inputs.len() && !sig.variadic) {
+ if args.len() < sig.inputs().len() ||
+ (args.len() > sig.inputs().len() && !sig.variadic()) {
span_mirbug!(self, term, "call to {:?} with wrong # of args", sig);
}
- for (n, (fn_arg, op_arg)) in sig.inputs.iter().zip(args).enumerate() {
+ for (n, (fn_arg, op_arg)) in sig.inputs().iter().zip(args).enumerate() {
let op_arg_ty = op_arg.ty(mir, self.tcx());
if let Err(terr) = self.sub_types(op_arg_ty, fn_arg) {
span_mirbug!(self, term, "bad arg #{:?} ({:?} <- {:?}): {:?}",
// box_free takes a Box as a pointer. Allow for that.
- if sig.inputs.len() != 1 {
+ if sig.inputs().len() != 1 {
span_mirbug!(self, term, "box_free should take 1 argument");
return;
}
- let pointee_ty = match sig.inputs[0].sty {
+ let pointee_ty = match sig.inputs()[0].sty {
ty::TyRawPtr(mt) => mt.ty,
_ => {
span_mirbug!(self, term, "box_free should take a raw ptr");
Cdecl => llvm::CCallConv,
};
- let mut inputs = &sig.inputs[..];
+ let mut inputs = sig.inputs();
let extra_args = if abi == RustCall {
- assert!(!sig.variadic && extra_args.is_empty());
+ assert!(!sig.variadic() && extra_args.is_empty());
- match inputs[inputs.len() - 1].sty {
+ match sig.inputs().last().unwrap().sty {
ty::TyTuple(ref tupled_arguments) => {
- inputs = &inputs[..inputs.len() - 1];
+ inputs = &sig.inputs()[0..sig.inputs().len() - 1];
&tupled_arguments[..]
}
_ => {
}
}
} else {
- assert!(sig.variadic || extra_args.is_empty());
+ assert!(sig.variadic() || extra_args.is_empty());
extra_args
};
}
};
- let ret_ty = sig.output;
+ let ret_ty = sig.output();
let mut ret = arg_of(ret_ty, true);
if !type_is_fat_ptr(ccx.tcx(), ret_ty) {
FnType {
args: args,
ret: ret,
- variadic: sig.variadic,
+ variadic: sig.variadic(),
cconv: cconv
}
}
};
// Fat pointers are returned by-value.
if !self.ret.is_ignore() {
- if !type_is_fat_ptr(ccx.tcx(), sig.output) {
+ if !type_is_fat_ptr(ccx.tcx(), sig.output()) {
fixup(&mut self.ret);
}
}
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
let mut arg_idx = 0;
- for (i, arg_ty) in sig.inputs.into_iter().enumerate() {
- let lldestptr = adt::trans_field_ptr(bcx, sig.output, dest_val, Disr::from(disr), i);
+ for (i, arg_ty) in sig.inputs().into_iter().enumerate() {
+ let lldestptr = adt::trans_field_ptr(bcx, sig.output(), dest_val, Disr::from(disr), i);
let arg = &fcx.fn_ty.args[arg_idx];
arg_idx += 1;
let b = &bcx.build();
arg.store_fn_arg(b, &mut llarg_idx, lldestptr);
}
}
- adt::trans_set_discr(bcx, sig.output, dest, disr);
+ adt::trans_set_discr(bcx, sig.output(), dest, disr);
}
fcx.finish(bcx, DebugLoc::None);
// Make a version with the type of by-ref closure.
let ty::ClosureTy { unsafety, abi, mut sig } = tcx.closure_type(def_id, substs);
- sig.0.inputs.insert(0, ref_closure_ty); // sig has no self type as of yet
+ sig.0 = ty::FnSig::new({
+ let mut inputs = sig.0.inputs().to_owned();
+ inputs.insert(0, ref_closure_ty); // sig has no self type as of yet
+ inputs
+ },
+ sig.0.output(),
+ sig.0.variadic()
+ );
let llref_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: unsafety,
abi: abi,
// Make a version of the closure type with the same arguments, but
// with argument #0 being by value.
assert_eq!(abi, Abi::RustCall);
- sig.0.inputs[0] = closure_ty;
+ sig.0 = ty::FnSig::new(
+ {
+ let mut inputs = sig.0.inputs().to_owned();
+ inputs[0] = closure_ty;
+ inputs
+ },
+ sig.0.output(),
+ sig.0.variadic()
+ );
let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
let fn_ty = FnType::new(ccx, abi, &sig, &[]);
}
};
let sig = tcx.erase_late_bound_regions_and_normalize(sig);
- let tuple_input_ty = tcx.intern_tup(&sig.inputs[..]);
- let sig = ty::FnSig {
- inputs: vec![bare_fn_ty_maybe_ref,
- tuple_input_ty],
- output: sig.output,
- variadic: false
- };
+ let tuple_input_ty = tcx.intern_tup(sig.inputs());
+ let sig = ty::FnSig::new(
+ vec![bare_fn_ty_maybe_ref, tuple_input_ty],
+ sig.output(),
+ false
+ );
let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]);
let tuple_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::C,
- sig: ty::Binder(ty::FnSig {
- inputs: vec![tcx.mk_mut_ptr(tcx.types.u8)],
- output: tcx.types.never,
- variadic: false
- }),
+ sig: ty::Binder(ty::FnSig::new(
+ vec![tcx.mk_mut_ptr(tcx.types.u8)],
+ tcx.types.never,
+ false
+ )),
}));
let unwresume = ccx.eh_unwind_resume();
ty::ClosureKind::FnOnce => ty,
};
- let sig = sig.map_bound(|sig| ty::FnSig {
- inputs: iter::once(env_ty).chain(sig.inputs).collect(),
- ..sig
- });
+ let sig = sig.map_bound(|sig| ty::FnSig::new(
+ iter::once(env_ty).chain(sig.inputs().into_iter().cloned()).collect(),
+ sig.output(),
+ sig.variadic()
+ ));
Cow::Owned(ty::BareFnTy { unsafety: unsafety, abi: abi, sig: sig })
}
_ => bug!("unexpected type {:?} to ty_fn_sig", ty)
{
let signature = cx.tcx().erase_late_bound_regions(signature);
- let mut signature_metadata: Vec<DIType> = Vec::with_capacity(signature.inputs.len() + 1);
+ let mut signature_metadata: Vec<DIType> = Vec::with_capacity(signature.inputs().len() + 1);
// return type
- signature_metadata.push(match signature.output.sty {
+ signature_metadata.push(match signature.output().sty {
ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
- _ => type_metadata(cx, signature.output, span)
+ _ => type_metadata(cx, signature.output(), span)
});
// regular arguments
- for &argument_type in &signature.inputs {
+ for &argument_type in signature.inputs() {
signature_metadata.push(type_metadata(cx, argument_type, span));
}
return create_DIArray(DIB(cx), &[]);
}
- let mut signature = Vec::with_capacity(sig.inputs.len() + 1);
+ let mut signature = Vec::with_capacity(sig.inputs().len() + 1);
// Return type -- llvm::DIBuilder wants this at index 0
- signature.push(match sig.output.sty {
+ signature.push(match sig.output().sty {
ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(),
- _ => type_metadata(cx, sig.output, syntax_pos::DUMMY_SP)
+ _ => type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)
});
let inputs = if abi == Abi::RustCall {
- &sig.inputs[..sig.inputs.len()-1]
+ &sig.inputs()[..sig.inputs().len() - 1]
} else {
- &sig.inputs[..]
+ sig.inputs()
};
// Arguments types
signature.push(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP));
}
- if abi == Abi::RustCall && !sig.inputs.is_empty() {
- if let ty::TyTuple(args) = sig.inputs[sig.inputs.len() - 1].sty {
+ if abi == Abi::RustCall && !sig.inputs().is_empty() {
+ if let ty::TyTuple(args) = sig.inputs()[sig.inputs().len() - 1].sty {
for &argument_type in args {
signature.push(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP));
}
output.push_str("fn(");
let sig = cx.tcx().erase_late_bound_regions_and_normalize(sig);
- if !sig.inputs.is_empty() {
- for ¶meter_type in &sig.inputs {
+ if !sig.inputs().is_empty() {
+ for ¶meter_type in sig.inputs() {
push_debuginfo_type_name(cx, parameter_type, true, output);
output.push_str(", ");
}
output.pop();
}
- if sig.variadic {
- if !sig.inputs.is_empty() {
+ if sig.variadic() {
+ if !sig.inputs().is_empty() {
output.push_str(", ...");
} else {
output.push_str("...");
output.push(')');
- if !sig.output.is_nil() {
+ if !sig.output().is_nil() {
output.push_str(" -> ");
- push_debuginfo_type_name(cx, sig.output, true, output);
+ push_debuginfo_type_name(cx, sig.output(), true, output);
}
},
ty::TyClosure(..) => {
let llfn = declare_raw_fn(ccx, name, fty.cconv, fty.llvm_type(ccx));
// FIXME(canndrew): This is_never should really be an is_uninhabited
- if sig.output.is_never() {
+ if sig.output().is_never() {
llvm::Attribute::NoReturn.apply_llfn(Function, llfn);
}
};
let sig = tcx.erase_late_bound_regions_and_normalize(&fty.sig);
- let arg_tys = sig.inputs;
- let ret_ty = sig.output;
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
let name = &*tcx.item_name(def_id).as_str();
let span = match call_debug_location {
// again to find them and extract the arguments
intr.inputs.iter()
.zip(llargs)
- .zip(&arg_tys)
+ .zip(arg_tys)
.flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
.collect()
};
trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
- let sig = ty::FnSig {
- inputs: inputs,
- output: output,
- variadic: false,
- };
+ let sig = ty::FnSig::new(inputs, output, false);
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
- sig: ty::Binder(ty::FnSig {
- inputs: vec![i8p],
- output: tcx.mk_nil(),
- variadic: false,
- }),
+ sig: ty::Binder(ty::FnSig::new(vec![i8p], tcx.mk_nil(), false)),
}));
let output = tcx.types.i32;
let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
let tcx = bcx.tcx();
let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig());
- let arg_tys = sig.inputs;
+ let arg_tys = sig.inputs();
// every intrinsic takes a SIMD vector as its first argument
require_simd!(arg_tys[0], "input");
return;
}
- let extra_args = &args[sig.inputs.len()..];
+ let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(&self.mir, bcx.tcx());
bcx.monomorphize(&op_ty)
// Make a fake operand for store_return
let op = OperandRef {
val: Ref(dst),
- ty: sig.output,
+ ty: sig.output(),
};
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
}
debug_loc.apply_to_bcx(ret_bcx);
let op = OperandRef {
val: Immediate(invokeret),
- ty: sig.output,
+ ty: sig.output(),
};
self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op);
});
if let Some((_, target)) = *destination {
let op = OperandRef {
val: Immediate(llret),
- ty: sig.output,
+ ty: sig.output(),
};
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
funclet_br(self, bcx, target);
assert_eq!(dg.ty(), glue::get_drop_glue_type(tcx, dg.ty()));
let t = dg.ty();
- let sig = ty::FnSig {
- inputs: vec![tcx.mk_mut_ptr(tcx.types.i8)],
- output: tcx.mk_nil(),
- variadic: false,
- };
+ let sig = ty::FnSig::new(vec![tcx.mk_mut_ptr(tcx.types.i8)], tcx.mk_nil(), false);
// Create a FnType for fn(*mut i8) and substitute the real type in
// later - that prevents FnType from splitting fat pointers up.
output.push_str("fn(");
- let ty::FnSig {
- inputs: sig_inputs,
- output: sig_output,
- variadic: sig_variadic
- } = self.tcx.erase_late_bound_regions_and_normalize(sig);
+ let sig = self.tcx.erase_late_bound_regions_and_normalize(sig);
- if !sig_inputs.is_empty() {
- for ¶meter_type in &sig_inputs {
+ if !sig.inputs().is_empty() {
+ for ¶meter_type in sig.inputs() {
self.push_type_name(parameter_type, output);
output.push_str(", ");
}
output.pop();
}
- if sig_variadic {
- if !sig_inputs.is_empty() {
+ if sig.variadic() {
+ if !sig.inputs().is_empty() {
output.push_str(", ...");
} else {
output.push_str("...");
output.push(')');
- if !sig_output.is_nil() {
+ if !sig.output().is_nil() {
output.push_str(" -> ");
- self.push_type_name(sig_output, output);
+ self.push_type_name(sig.output(), output);
}
},
ty::TyClosure(def_id, ref closure_substs) => {
// checking for here would be considered early bound
// anyway.)
let inputs = bare_fn_ty.sig.inputs();
- let late_bound_in_args = tcx.collect_constrained_late_bound_regions(&inputs);
+ let late_bound_in_args = tcx.collect_constrained_late_bound_regions(
+ &inputs.map_bound(|i| i.to_owned()));
let output = bare_fn_ty.sig.output();
let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output);
for br in late_bound_in_ret.difference(&late_bound_in_args) {
self.tcx().mk_bare_fn(ty::BareFnTy {
unsafety: unsafety,
abi: abi,
- sig: ty::Binder(ty::FnSig {
- inputs: input_tys,
- output: output_ty,
- variadic: decl.variadic
- }),
+ sig: ty::Binder(ty::FnSig::new(input_tys, output_ty, decl.variadic)),
})
}
let expected_arg_ty = expected_sig.as_ref().and_then(|e| {
// no guarantee that the correct number of expected args
// were supplied
- if i < e.inputs.len() {
- Some(e.inputs[i])
+ if i < e.inputs().len() {
+ Some(e.inputs()[i])
} else {
None
}
self.ty_of_arg(&rb, a, expected_arg_ty)
}).collect();
- let expected_ret_ty = expected_sig.map(|e| e.output);
+ let expected_ret_ty = expected_sig.map(|e| e.output());
let is_infer = match decl.output {
hir::Return(ref output) if output.node == hir::TyInfer => true,
ty::ClosureTy {
unsafety: unsafety,
abi: abi,
- sig: ty::Binder(ty::FnSig {inputs: input_tys,
- output: output_ty,
- variadic: decl.variadic}),
+ sig: ty::Binder(ty::FnSig::new(input_tys, output_ty, decl.variadic)),
}
}
// This is the "default" function signature, used in case of error.
// In that case, we check each argument against "error" in order to
// set up all the node type bindings.
- error_fn_sig = ty::Binder(ty::FnSig {
- inputs: self.err_args(arg_exprs.len()),
- output: self.tcx.types.err,
- variadic: false,
- });
+ error_fn_sig = ty::Binder(ty::FnSig::new(
+ self.err_args(arg_exprs.len()),
+ self.tcx.types.err,
+ false,
+ ));
(&error_fn_sig, None)
}
let expected_arg_tys =
self.expected_types_for_fn_args(call_expr.span,
expected,
- fn_sig.output,
- &fn_sig.inputs);
+ fn_sig.output(),
+ fn_sig.inputs());
self.check_argument_types(call_expr.span,
- &fn_sig.inputs,
+ fn_sig.inputs(),
&expected_arg_tys[..],
arg_exprs,
- fn_sig.variadic,
+ fn_sig.variadic(),
TupleArgumentsFlag::DontTupleArguments,
def_span);
- fn_sig.output
+ fn_sig.output()
}
fn confirm_deferred_closure_call(&self,
let expected_arg_tys = self.expected_types_for_fn_args(call_expr.span,
expected,
- fn_sig.output.clone(),
- &fn_sig.inputs);
+ fn_sig.output().clone(),
+ fn_sig.inputs());
self.check_argument_types(call_expr.span,
- &fn_sig.inputs,
+ fn_sig.inputs(),
&expected_arg_tys,
arg_exprs,
- fn_sig.variadic,
+ fn_sig.variadic(),
TupleArgumentsFlag::TupleArguments,
None);
- fn_sig.output
+ fn_sig.output()
}
fn confirm_overloaded_call(&self,
debug!("attempt_resolution: method_callee={:?}", method_callee);
- for (&method_arg_ty, &self_arg_ty) in
- method_sig.inputs[1..].iter().zip(&self.fn_sig.inputs) {
- fcx.demand_eqtype(self.call_expr.span, self_arg_ty, method_arg_ty);
+ for (method_arg_ty, self_arg_ty) in
+ method_sig.inputs().into_iter().skip(1).zip(self.fn_sig.inputs()) {
+ fcx.demand_eqtype(self.call_expr.span, &self_arg_ty, &method_arg_ty);
}
- fcx.demand_eqtype(self.call_expr.span, method_sig.output, self.fn_sig.output);
+ fcx.demand_eqtype(self.call_expr.span, method_sig.output(), self.fn_sig.output());
fcx.write_overloaded_call_method_map(self.call_expr, method_callee);
}
// Tuple up the arguments and insert the resulting function type into
// the `closures` table.
- fn_ty.sig.0.inputs = vec![self.tcx.intern_tup(&fn_ty.sig.0.inputs[..])];
+ fn_ty.sig.0 = ty::FnSig::new(vec![self.tcx.intern_tup(fn_ty.sig.skip_binder().inputs())],
+ fn_ty.sig.skip_binder().output(),
+ fn_ty.sig.variadic()
+ );
debug!("closure for {:?} --> sig={:?} opt_kind={:?}",
expr_def_id,
debug!("deduce_sig_from_projection: ret_param_ty {:?}",
ret_param_ty);
- let fn_sig = ty::FnSig {
- inputs: input_tys,
- output: ret_param_ty,
- variadic: false,
- };
+ let fn_sig = ty::FnSig::new(input_tys, ret_param_ty, false);
debug!("deduce_sig_from_projection: fn_sig {:?}", fn_sig);
Some(fn_sig)
_ => bug!("{:?} is not a MethodTraitItem", trait_m),
};
- let impl_iter = impl_sig.inputs.iter();
- let trait_iter = trait_sig.inputs.iter();
+ let impl_iter = impl_sig.inputs().iter();
+ let trait_iter = trait_sig.inputs().iter();
impl_iter.zip(trait_iter)
.zip(impl_m_iter)
.zip(trait_m_iter)
})
.next()
.unwrap_or_else(|| {
- if infcx.sub_types(false, &cause, impl_sig.output, trait_sig.output)
+ if infcx.sub_types(false, &cause, impl_sig.output(),
+ trait_sig.output())
.is_err() {
(impl_m_output.span(), Some(trait_m_output.span()))
} else {
};
let impl_m_fty = m_fty(impl_m);
let trait_m_fty = m_fty(trait_m);
- if impl_m_fty.sig.0.inputs.len() != trait_m_fty.sig.0.inputs.len() {
- let trait_number_args = trait_m_fty.sig.0.inputs.len();
- let impl_number_args = impl_m_fty.sig.0.inputs.len();
+ let trait_number_args = trait_m_fty.sig.inputs().skip_binder().len();
+ let impl_number_args = impl_m_fty.sig.inputs().skip_binder().len();
+ if trait_number_args != impl_number_args {
let trait_m_node_id = tcx.map.as_local_node_id(trait_m.def_id);
let trait_span = if let Some(trait_id) = trait_m_node_id {
match tcx.map.expect_trait_item(trait_id).node {
let fty = tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: abi,
- sig: ty::Binder(FnSig {
- inputs: inputs,
- output: output,
- variadic: false,
- }),
+ sig: ty::Binder(FnSig::new(inputs, output, false)),
}));
let i_n_tps = tcx.item_generics(def_id).types.len();
if i_n_tps != n_tps {
let fn_ty = tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
- sig: ty::Binder(FnSig {
- inputs: vec![mut_u8],
- output: tcx.mk_nil(),
- variadic: false,
- }),
+ sig: ty::Binder(FnSig::new(vec![mut_u8], tcx.mk_nil(), false)),
});
(0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32)
}
let sig = tcx.item_type(def_id).fn_sig();
let sig = tcx.no_late_bound_regions(sig).unwrap();
- if intr.inputs.len() != sig.inputs.len() {
+ if intr.inputs.len() != sig.inputs().len() {
span_err!(tcx.sess, it.span, E0444,
"platform-specific intrinsic has invalid number of \
arguments: found {}, expected {}",
- sig.inputs.len(), intr.inputs.len());
+ sig.inputs().len(), intr.inputs.len());
return
}
- let input_pairs = intr.inputs.iter().zip(&sig.inputs);
+ let input_pairs = intr.inputs.iter().zip(sig.inputs());
for (i, (expected_arg, arg)) in input_pairs.enumerate() {
match_intrinsic_type_to_type(ccx, &format!("argument {}", i + 1), it.span,
&mut structural_to_nomimal, expected_arg, arg);
}
match_intrinsic_type_to_type(ccx, "return value", it.span,
&mut structural_to_nomimal,
- &intr.output, sig.output);
+ &intr.output, sig.output());
return
}
None => {
infer::FnCall,
&fty.sig).0;
let fn_sig = self.instantiate_type_scheme(span, trait_ref.substs, &fn_sig);
- let transformed_self_ty = fn_sig.inputs[0];
+ let transformed_self_ty = fn_sig.inputs()[0];
let method_ty = tcx.mk_fn_def(def_id, trait_ref.substs,
tcx.mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(fn_sig),
// Create the function context. This is either derived from scratch or,
// in the case of function expressions, based on the outer context.
- let mut fcx = FnCtxt::new(inherited, fn_sig.output, body.id);
+ let mut fcx = FnCtxt::new(inherited, fn_sig.output(), body.id);
*fcx.ps.borrow_mut() = UnsafetyState::function(unsafety, unsafety_id);
fcx.require_type_is_sized(fcx.ret_ty, decl.output.span(), traits::ReturnType);
fcx.ret_ty = fcx.instantiate_anon_types(&fcx.ret_ty);
- fn_sig.output = fcx.ret_ty;
+ fn_sig = ty::FnSig::new(fn_sig.inputs().to_owned(), fcx.ret_ty, fn_sig.variadic());
{
let mut visit = GatherLocalsVisitor { fcx: &fcx, };
// Add formal parameters.
- for (arg_ty, input) in fn_sig.inputs.iter().zip(&decl.inputs) {
+ for (arg_ty, input) in fn_sig.inputs().iter().zip(&decl.inputs) {
// The type of the argument must be well-formed.
//
// NB -- this is now checked in wfcheck, but that
ty::TyFnDef(def_id, .., ref fty) => {
// HACK(eddyb) ignore self in the definition (see above).
let expected_arg_tys = self.expected_types_for_fn_args(sp, expected,
- fty.sig.0.output,
- &fty.sig.0.inputs[1..]);
-
- self.check_argument_types(sp, &fty.sig.0.inputs[1..], &expected_arg_tys[..],
- args_no_rcvr, fty.sig.0.variadic, tuple_arguments,
+ fty.sig.0.output(),
+ &fty.sig.0.inputs()[1..]);
+ self.check_argument_types(sp, &fty.sig.0.inputs()[1..], &expected_arg_tys[..],
+ args_no_rcvr, fty.sig.0.variadic(), tuple_arguments,
self.tcx.map.span_if_local(def_id));
- fty.sig.0.output
+ fty.sig.0.output()
}
_ => {
span_bug!(callee_expr.span, "method without bare fn type");
//
// FIXME(#27579) return types should not be implied bounds
let fn_sig_tys: Vec<_> =
- fn_sig.inputs.iter()
- .cloned()
- .chain(Some(fn_sig.output))
- .collect();
+ fn_sig.inputs().iter().cloned().chain(Some(fn_sig.output())).collect();
let old_body_id = self.set_body_id(body_id.node_id());
self.relate_free_regions(&fn_sig_tys[..], body_id.node_id(), span);
let fn_sig = method.ty.fn_sig();
let fn_sig = // late-bound regions should have been instantiated
self.tcx.no_late_bound_regions(fn_sig).unwrap();
- let self_ty = fn_sig.inputs[0];
+ let self_ty = fn_sig.inputs()[0];
let (m, r) = match self_ty.sty {
ty::TyRef(r, ref m) => (m.mutbl, r),
_ => {
self.type_must_outlive(infer::CallRcvr(deref_expr.span),
self_ty, r_deref_expr);
self.type_must_outlive(infer::CallReturn(deref_expr.span),
- fn_sig.output, r_deref_expr);
- fn_sig.output
+ fn_sig.output(), r_deref_expr);
+ fn_sig.output()
}
None => derefd_ty
};
let fty = fcx.instantiate_type_scheme(span, free_substs, &fty);
let sig = fcx.tcx.liberate_late_bound_regions(free_id_outlive, &fty.sig);
- for &input_ty in &sig.inputs {
- fcx.register_wf_obligation(input_ty, span, self.code.clone());
+ for input_ty in sig.inputs() {
+ fcx.register_wf_obligation(&input_ty, span, self.code.clone());
}
- implied_bounds.extend(sig.inputs);
+ implied_bounds.extend(sig.inputs());
- fcx.register_wf_obligation(sig.output, span, self.code.clone());
+ fcx.register_wf_obligation(sig.output(), span, self.code.clone());
// FIXME(#25759) return types should not be implied bounds
- implied_bounds.push(sig.output);
+ implied_bounds.push(sig.output());
self.check_where_clauses(fcx, span, predicates);
}
debug!("check_method_receiver: sig={:?}", sig);
- let self_arg_ty = sig.inputs[0];
+ let self_arg_ty = sig.inputs()[0];
let rcvr_ty = match ExplicitSelf::determine(self_ty, self_arg_ty) {
ExplicitSelf::ByValue => self_ty,
ExplicitSelf::ByReference(region, mutbl) => {
tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: abi::Abi::Rust,
- sig: ty::Binder(ty::FnSig {
- inputs: inputs,
- output: ty,
- variadic: false
- })
+ sig: ty::Binder(ty::FnSig::new(inputs, ty, false))
}))
}
};
ccx.tcx.mk_fn_def(def_id, substs, ccx.tcx.mk_bare_fn(ty::BareFnTy {
abi: abi,
unsafety: hir::Unsafety::Unsafe,
- sig: ty::Binder(ty::FnSig {inputs: input_tys,
- output: output,
- variadic: decl.variadic}),
+ sig: ty::Binder(ty::FnSig::new(input_tys, output, decl.variadic)),
}))
}
tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
- sig: ty::Binder(ty::FnSig {
- inputs: Vec::new(),
- output: tcx.mk_nil(),
- variadic: false
- })
+ sig: ty::Binder(ty::FnSig::new(Vec::new(), tcx.mk_nil(), false))
}));
require_same_types(
tcx.mk_bare_fn(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
- sig: ty::Binder(ty::FnSig {
- inputs: vec![
+ sig: ty::Binder(ty::FnSig::new(
+ vec![
tcx.types.isize,
tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))
],
- output: tcx.types.isize,
- variadic: false,
- }),
+ tcx.types.isize,
+ false,
+ )),
}));
require_same_types(
sig: &ty::PolyFnSig<'tcx>,
variance: VarianceTermPtr<'a>) {
let contra = self.contravariant(variance);
- for &input in &sig.0.inputs {
+ for &input in sig.0.inputs() {
self.add_constraints_from_ty(generics, input, contra);
}
- self.add_constraints_from_ty(generics, sig.0.output, variance);
+ self.add_constraints_from_ty(generics, sig.0.output(), variance);
}
/// Adds constraints appropriate for a region appearing in a
cx.tcx.sess.cstore.fn_arg_names(did).into_iter()
}.peekable();
FnDecl {
- output: Return(sig.0.output.clean(cx)),
+ output: Return(sig.skip_binder().output().clean(cx)),
attrs: Attributes::default(),
- variadic: sig.0.variadic,
+ variadic: sig.skip_binder().variadic,
inputs: Arguments {
- values: sig.0.inputs.iter().map(|t| {
+ values: sig.skip_binder().inputs().iter().map(|t| {
Argument {
type_: t.clean(cx),
id: ast::CRATE_NODE_ID,