let assoc_bindings: Vec<_> =
data.bindings.iter()
.map(|b| ConvertedBinding { item_name: b.name,
- ty: ast_ty_to_ty(this, rscope, &*b.ty),
+ ty: ast_ty_to_ty(this, rscope, &b.ty),
span: b.span })
.collect();
err.span_suggestion(full_span, "try adding parentheses (per RFC 438):",
format!("&{}({} +{})",
mutbl_str,
- pprust::ty_to_string(&*mut_ty.ty),
+ pprust::ty_to_string(&mut_ty.ty),
pprust::bounds_to_string(bounds)));
}
(&hir::TyRptr(Some(ref lt), ref mut_ty), Some(full_span)) => {
format!("&{} {}({} +{})",
pprust::lifetime_to_string(lt),
mutbl_str,
- pprust::ty_to_string(&*mut_ty.ty),
+ pprust::ty_to_string(&mut_ty.ty),
pprust::bounds_to_string(bounds)));
}
let typ = match ast_ty.node {
hir::TyVec(ref ty) => {
- tcx.mk_slice(ast_ty_to_ty(this, rscope, &**ty))
+ tcx.mk_slice(ast_ty_to_ty(this, rscope, &ty))
}
hir::TyObjectSum(ref ty, ref bounds) => {
- match ast_ty_to_trait_ref(this, rscope, &**ty, bounds) {
+ match ast_ty_to_trait_ref(this, rscope, &ty, bounds) {
Ok((trait_ref, projection_bounds)) => {
trait_ref_to_object_type(this,
rscope,
}
hir::TyPtr(ref mt) => {
tcx.mk_ptr(ty::TypeAndMut {
- ty: ast_ty_to_ty(this, rscope, &*mt.ty),
+ ty: ast_ty_to_ty(this, rscope, &mt.ty),
mutbl: mt.mutbl
})
}
&ObjectLifetimeDefaultRscope::new(
rscope,
ty::ObjectLifetimeDefault::Specific(r));
- let t = ast_ty_to_ty(this, rscope1, &*mt.ty);
+ let t = ast_ty_to_ty(this, rscope1, &mt.ty);
tcx.mk_ref(tcx.mk_region(r), ty::TypeAndMut {ty: t, mutbl: mt.mutbl})
}
hir::TyTup(ref fields) => {
let flds = fields.iter()
- .map(|t| ast_ty_to_ty(this, rscope, &**t))
+ .map(|t| ast_ty_to_ty(this, rscope, &t))
.collect();
tcx.mk_tup(flds)
}
hir::TyBareFn(ref bf) => {
require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);
- let bare_fn = ty_of_bare_fn(this, bf.unsafety, bf.abi, &*bf.decl);
+ let bare_fn = ty_of_bare_fn(this, bf.unsafety, bf.abi, &bf.decl);
tcx.mk_fn(None, tcx.mk_bare_fn(bare_fn))
}
hir::TyPolyTraitRef(ref bounds) => {
Ok(r) => {
match r {
ConstVal::Int(i) =>
- tcx.mk_array(ast_ty_to_ty(this, rscope, &**ty),
+ tcx.mk_array(ast_ty_to_ty(this, rscope, &ty),
i as usize),
ConstVal::Uint(i) =>
- tcx.mk_array(ast_ty_to_ty(this, rscope, &**ty),
+ tcx.mk_array(ast_ty_to_ty(this, rscope, &ty),
i as usize),
_ => {
span_err!(tcx.sess, ast_ty.span, E0249,
match a.ty.node {
hir::TyInfer if expected_ty.is_some() => expected_ty.unwrap(),
hir::TyInfer => this.ty_infer(None, None, None, a.ty.span),
- _ => ast_ty_to_ty(this, rscope, &*a.ty),
+ _ => ast_ty_to_ty(this, rscope, &a.ty),
}
}
let arg_tys: Vec<Ty> =
arg_params.iter().map(|a| ty_of_arg(this, &rb, a, None)).collect();
let arg_pats: Vec<String> =
- arg_params.iter().map(|a| pprust::pat_to_string(&*a.pat)).collect();
+ arg_params.iter().map(|a| pprust::pat_to_string(&a.pat)).collect();
// Second, if there was exactly one lifetime (either a substitution or a
// reference) in the arguments, then any anonymous regions in the output
Some(ty::ExplicitSelfCategory::ByReference(region, mutability)))
}
hir::SelfExplicit(ref ast_type, _) => {
- let explicit_type = ast_ty_to_ty(this, rscope, &**ast_type);
+ let explicit_type = ast_ty_to_ty(this, rscope, &ast_type);
// We wish to (for now) categorize an explicit self
// declaration like `self: SomeType` into either `self`,
_ if is_infer =>
ty::FnConverging(this.ty_infer(None, None, None, decl.output.span())),
hir::Return(ref output) =>
- ty::FnConverging(ast_ty_to_ty(this, &rb, &**output)),
+ ty::FnConverging(ast_ty_to_ty(this, &rb, &output)),
hir::DefaultReturn(..) => unreachable!(),
hir::NoReturn(..) => ty::FnDiverging
};
fcx.write_ty(pat.id, expected);
}
hir::PatLit(ref lt) => {
- check_expr(fcx, &**lt);
- let expr_ty = fcx.expr_ty(&**lt);
+ check_expr(fcx, <);
+ let expr_ty = fcx.expr_ty(<);
// Byte string patterns behave the same way as array patterns
// They can denote both statically and dynamically sized byte arrays
}
if let Some(ref p) = *sub {
- check_pat(pcx, &**p, expected);
+ check_pat(pcx, &p, expected);
}
}
}
fcx.write_ty(pat.id, pat_ty);
demand::eqtype(fcx, pat.span, expected, pat_ty);
for (element_pat, element_ty) in elements.iter().zip(element_tys) {
- check_pat(pcx, &**element_pat, element_ty);
+ check_pat(pcx, &element_pat, element_ty);
}
}
hir::PatBox(ref inner) => {
let inner_ty = fcx.infcx().next_ty_var();
let uniq_ty = tcx.mk_box(inner_ty);
- if check_dereferencable(pcx, pat.span, expected, &**inner) {
+ if check_dereferencable(pcx, pat.span, expected, &inner) {
// Here, `demand::subtype` is good enough, but I don't
// think any errors can be introduced by using
// `demand::eqtype`.
demand::eqtype(fcx, pat.span, expected, uniq_ty);
fcx.write_ty(pat.id, uniq_ty);
- check_pat(pcx, &**inner, inner_ty);
+ check_pat(pcx, &inner, inner_ty);
} else {
fcx.write_error(pat.id);
- check_pat(pcx, &**inner, tcx.types.err);
+ check_pat(pcx, &inner, tcx.types.err);
}
}
hir::PatRegion(ref inner, mutbl) => {
let expected = fcx.infcx().shallow_resolve(expected);
- if check_dereferencable(pcx, pat.span, expected, &**inner) {
+ if check_dereferencable(pcx, pat.span, expected, &inner) {
// `demand::subtype` would be good enough, but using
// `eqtype` turns out to be equally general. See (*)
// below for details.
};
fcx.write_ty(pat.id, rptr_ty);
- check_pat(pcx, &**inner, inner_ty);
+ check_pat(pcx, &inner, inner_ty);
} else {
fcx.write_error(pat.id);
- check_pat(pcx, &**inner, tcx.types.err);
+ check_pat(pcx, &inner, tcx.types.err);
}
}
hir::PatVec(ref before, ref slice, ref after) => {
demand::eqtype(fcx, pat.span, expected, pat_ty);
for elt in before {
- check_pat(pcx, &**elt, inner_ty);
+ check_pat(pcx, &elt, inner_ty);
}
if let Some(ref slice) = *slice {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
ty: tcx.mk_slice(inner_ty),
mutbl: mutbl
});
- check_pat(pcx, &**slice, slice_ty);
+ check_pat(pcx, &slice, slice_ty);
}
for elt in after {
- check_pat(pcx, &**elt, inner_ty);
+ check_pat(pcx, &elt, inner_ty);
}
}
}
for arm in arms {
let mut pcx = pat_ctxt {
fcx: fcx,
- map: pat_id_map(&tcx.def_map, &*arm.pats[0]),
+ map: pat_id_map(&tcx.def_map, &arm.pats[0]),
};
for p in &arm.pats {
- check_pat(&mut pcx, &**p, discrim_ty);
+ check_pat(&mut pcx, &p, discrim_ty);
}
}
// arm for inconsistent arms or to the whole match when a `()` type
// is required).
Expectation::ExpectHasType(ety) if ety != fcx.tcx().mk_nil() => {
- check_expr_coercable_to_type(fcx, &*arm.body, ety);
+ check_expr_coercable_to_type(fcx, &arm.body, ety);
ety
}
_ => {
- check_expr_with_expectation(fcx, &*arm.body, expected);
+ check_expr_with_expectation(fcx, &arm.body, expected);
fcx.node_ty(arm.body.id)
}
};
if let Some(ref e) = arm.guard {
- check_expr_has_type(fcx, &**e, tcx.types.bool);
+ check_expr_has_type(fcx, &e, tcx.types.bool);
}
if result_ty.references_error() || bty.references_error() {
if let Some(subpats) = subpats {
for pat in subpats {
- check_pat(pcx, &**pat, tcx.types.err);
+ check_pat(pcx, &pat, tcx.types.err);
}
}
fcx.write_error(pat.id);
if let Some(subpats) = subpats {
for pat in subpats {
- check_pat(pcx, &**pat, tcx.types.err);
+ check_pat(pcx, &pat, tcx.types.err);
}
}
};
if let Some(subpats) = subpats {
if subpats.len() == arg_tys.len() {
for (subpat, arg_ty) in subpats.iter().zip(arg_tys) {
- check_pat(pcx, &**subpat, arg_ty);
+ check_pat(pcx, &subpat, arg_ty);
}
} else if arg_tys.is_empty() {
span_err!(tcx.sess, pat.span, E0024,
subpats.len(), if subpats.len() == 1 {""} else {"s"}, kind_name);
for pat in subpats {
- check_pat(pcx, &**pat, tcx.types.err);
+ check_pat(pcx, &pat, tcx.types.err);
}
} else {
span_err!(tcx.sess, pat.span, E0023,
arg_tys.len(), if arg_tys.len() == 1 {""} else {"s"});
for pat in subpats {
- check_pat(pcx, &**pat, tcx.types.err);
+ check_pat(pcx, &pat, tcx.types.err);
}
}
}
}
};
- check_pat(pcx, &*field.pat, field_ty);
+ check_pat(pcx, &field.pat, field_ty);
}
// Report an error if not all the fields were specified.
match method::lookup_in_trait_adjusted(fcx,
call_expr.span,
- Some(&*callee_expr),
+ Some(&callee_expr),
method_name,
trait_def_id,
autoderefs,
call_expr.span,
expected,
fn_sig.output.clone(),
- &*fn_sig.inputs);
+ &fn_sig.inputs);
check_argument_types(fcx,
call_expr.span,
- &*fn_sig.inputs,
- &*expected_arg_tys,
+ &fn_sig.inputs,
+ &expected_arg_tys,
arg_exprs,
fn_sig.variadic,
TupleArgumentsFlag::TupleArguments);
&fn_sig,
decl,
expr.id,
- &*body,
+ &body,
fcx.inh);
// Tuple up the arguments and insert the resulting function type into
probe::WhereClausePick(ref poly_trait_ref) => {
// Where clauses can have bound regions in them. We need to instantiate
// those to convert from a poly-trait-ref to a trait-ref.
- self.replace_late_bound_regions_with_fresh_var(&*poly_trait_ref).substs.clone()
+ self.replace_late_bound_regions_with_fresh_var(&poly_trait_ref).substs.clone()
}
}
}
NoPreference,
|ty, _| {
match ty.sty {
- ty::TyTrait(ref data) => Some(closure(self, ty, &**data)),
+ ty::TyTrait(ref data) => Some(closure(self, ty, &data)),
_ => None,
}
});
hir::ExprField(ref expr, _) |
hir::ExprTupField(ref expr, _) |
hir::ExprIndex(ref expr, _) |
- hir::ExprUnary(hir::UnDeref, ref expr) => exprs.push(&**expr),
+ hir::ExprUnary(hir::UnDeref, ref expr) => exprs.push(&expr),
_ => break,
}
}
unsize: None
}))), false)
};
- let index_expr_ty = self.fcx.expr_ty(&**index_expr);
+ let index_expr_ty = self.fcx.expr_ty(&index_expr);
let result = check::try_index_step(
self.fcx,
ty::MethodCall::expr(expr.id),
expr,
- &**base_expr,
+ &base_expr,
adjusted_base_ty,
autoderefs,
unsize,
if let Some((input_ty, return_ty)) = result {
demand::suptype(self.fcx, index_expr.span, input_ty, index_expr_ty);
- let expr_ty = self.fcx.expr_ty(&*expr);
+ let expr_ty = self.fcx.expr_ty(&expr);
demand::suptype(self.fcx, expr.span, expr_ty, return_ty);
}
}
self.fcx,
expr.span,
Some(method_call),
- Some(&**base_expr),
- self.fcx.expr_ty(&**base_expr),
+ Some(&base_expr),
+ self.fcx.expr_ty(&base_expr),
PreferMutLvalue);
}
}
for (i, trait_did) in candidates.iter().enumerate() {
err.fileline_help(span,
- &*format!("candidate #{}: use `{}`",
+ &format!("candidate #{}: use `{}`",
i + 1,
fcx.tcx().item_path_str(*trait_did)));
}
for (i, trait_info) in candidates.iter().enumerate() {
err.fileline_help(span,
- &*format!("candidate #{}: `{}`",
+ &format!("candidate #{}: `{}`",
i + 1,
fcx.tcx().item_path_str(trait_info.def_id)));
}
fn visit_ty(&mut self, t: &'tcx hir::Ty) {
match t.node {
hir::TyFixedLengthVec(_, ref expr) => {
- check_const_in_type(self.ccx, &**expr, self.ccx.tcx.types.usize);
+ check_const_in_type(self.ccx, &expr, self.ccx.tcx.types.usize);
}
_ => {}
}
// Add explicitly-declared locals.
fn visit_local(&mut self, local: &'tcx hir::Local) {
let o_ty = match local.ty {
- Some(ref ty) => Some(self.fcx.to_ty(&**ty)),
+ Some(ref ty) => Some(self.fcx.to_ty(&ty)),
None => None
};
self.assign(local.span, local.id, o_ty);
fn visit_ty(&mut self, t: &'tcx hir::Ty) {
match t.node {
hir::TyFixedLengthVec(ref ty, ref count_expr) => {
- self.visit_ty(&**ty);
- check_expr_with_hint(self.fcx, &**count_expr, self.fcx.tcx().types.usize);
+ self.visit_ty(&ty);
+ check_expr_with_hint(self.fcx, &count_expr, self.fcx.tcx().types.usize);
}
hir::TyBareFn(ref function_declaration) => {
intravisit::walk_fn_decl_nopat(self, &function_declaration.decl);
// Create type variables for each argument.
pat_util::pat_bindings(
&tcx.def_map,
- &*input.pat,
+ &input.pat,
|_bm, pat_id, sp, _path| {
let var_ty = visit.assign(sp, pat_id, None);
fcx.require_type_is_sized(var_ty, sp,
// Check the pattern.
let pcx = pat_ctxt {
fcx: &fcx,
- map: pat_id_map(&tcx.def_map, &*input.pat),
+ map: pat_id_map(&tcx.def_map, &input.pat),
};
- _match::check_pat(&pcx, &*input.pat, *arg_ty);
+ _match::check_pat(&pcx, &input.pat, *arg_ty);
}
visit.visit_block(body);
match it.node {
// Consts can play a role in type-checking, so they are included here.
hir::ItemStatic(_, _, ref e) |
- hir::ItemConst(_, ref e) => check_const(ccx, it.span, &**e, it.id),
+ hir::ItemConst(_, ref e) => check_const(ccx, it.span, &e, it.id),
hir::ItemEnum(ref enum_definition, _) => {
check_enum_variants(ccx,
it.span,
hir::ItemFn(ref decl, _, _, _, _, ref body) => {
let fn_pty = ccx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(it.id));
let param_env = ParameterEnvironment::for_item(ccx.tcx, it.id);
- check_bare_fn(ccx, &**decl, &**body, it.id, it.span, fn_pty.ty, param_env);
+ check_bare_fn(ccx, &decl, &body, it.id, it.span, fn_pty.ty, param_env);
}
hir::ItemImpl(_, _, _, _, _, ref impl_items) => {
debug!("ItemImpl {} with id {}", it.name, it.id);
for impl_item in impl_items {
match impl_item.node {
hir::ImplItemKind::Const(_, ref expr) => {
- check_const(ccx, impl_item.span, &*expr, impl_item.id)
+ check_const(ccx, impl_item.span, &expr, impl_item.id)
}
hir::ImplItemKind::Method(ref sig, ref body) => {
check_method_body(ccx, &impl_pty.generics, sig, body,
for trait_item in trait_items {
match trait_item.node {
hir::ConstTraitItem(_, Some(ref expr)) => {
- check_const(ccx, trait_item.span, &*expr, trait_item.id)
+ check_const(ccx, trait_item.span, &expr, trait_item.id)
}
hir::MethodTraitItem(ref sig, Some(ref body)) => {
check_trait_fn_not_const(ccx, trait_item.span, sig.constness);
}) {
if let Some(ref istring) = attr.value_str() {
let parser = Parser::new(&istring);
- let types = &*generics.ty_params;
+ let types = &generics.ty_params;
for token in parser {
match token {
Piece::String(_) => (), // Normal string, no need to check it
&impl_const,
impl_item.span,
trait_const,
- &*impl_trait_ref);
+ &impl_trait_ref);
} else {
span_err!(tcx.sess, impl_item.span, E0323,
"item `{}` is an associated const, \
(PreferMutLvalue, Some(trait_did)) => {
method::lookup_in_trait_adjusted(fcx,
expr.span,
- Some(&*base_expr),
+ Some(&base_expr),
token::intern("index_mut"),
trait_did,
autoderefs,
(None, Some(trait_did)) => {
method::lookup_in_trait_adjusted(fcx,
expr.span,
- Some(&*base_expr),
+ Some(&base_expr),
token::intern("index"),
trait_did,
autoderefs,
} else {
expected_arg_tys = match expected_arg_tys.get(0) {
Some(&ty) => match ty.sty {
- ty::TyTuple(ref tys) => &**tys,
+ ty::TyTuple(ref tys) => &tys,
_ => &[]
},
None => &[]
});
check_expr_with_unifier(fcx,
- &**arg,
+ &arg,
expected.unwrap_or(ExpectHasType(formal_ty)),
NoPreference, || {
// 2. Coerce to the most detailed type that could be coerced
// to, which is `expected_ty` if `rvalue_hint` returns an
// `ExprHasType(expected_ty)`, or the `formal_ty` otherwise.
let coerce_ty = expected.and_then(|e| e.only_has_type(fcx));
- demand::coerce(fcx, arg.span, coerce_ty.unwrap_or(formal_ty), &**arg);
+ demand::coerce(fcx, arg.span, coerce_ty.unwrap_or(formal_ty), &arg);
// 3. Relate the expected type and the formal one,
// if the expected type was used for the coercion.
// arguments which we skipped above.
if variadic {
for arg in args.iter().skip(expected_arg_count) {
- check_expr(fcx, &**arg);
+ check_expr(fcx, &arg);
// There are a few types which get autopromoted when passed via varargs
// in C but we just error out instead and require explicit casts.
let arg_ty = structurally_resolved_type(fcx, arg.span,
- fcx.expr_ty(&**arg));
+ fcx.expr_ty(&arg));
match arg_ty.sty {
ty::TyFloat(ast::FloatTy::F32) => {
fcx.type_error_message(arg.span,
tps: &[P<hir::Ty>],
expected: Expectation<'tcx>,
lvalue_pref: LvaluePreference) {
- let rcvr = &*args[0];
- check_expr_with_lvalue_pref(fcx, &*rcvr, lvalue_pref);
+ let rcvr = &args[0];
+ check_expr_with_lvalue_pref(fcx, &rcvr, lvalue_pref);
// no need to check for bot/err -- callee does that
let expr_t = structurally_resolved_type(fcx,
expr.span,
- fcx.expr_ty(&*rcvr));
+ fcx.expr_ty(&rcvr));
- let tps = tps.iter().map(|ast_ty| fcx.to_ty(&**ast_ty)).collect::<Vec<_>>();
+ let tps = tps.iter().map(|ast_ty| fcx.to_ty(&ast_ty)).collect::<Vec<_>>();
let fn_ty = match method::lookup(fcx,
method_name.span,
method_name.node,
let branches_ty = match opt_else_expr {
Some(ref else_expr) => {
- check_expr_with_expectation(fcx, &**else_expr, expected);
- let else_ty = fcx.expr_ty(&**else_expr);
+ check_expr_with_expectation(fcx, &else_expr, expected);
+ let else_ty = fcx.expr_ty(&else_expr);
infer::common_supertype(fcx.infcx(),
TypeOrigin::IfExpression(sp),
true,
// Make sure to give a type to the field even if there's
// an error, so we can continue typechecking
- check_expr_coercable_to_type(fcx, &*field.expr, expected_field_type);
+ check_expr_coercable_to_type(fcx, &field.expr, expected_field_type);
}
// Make sure the programmer specified all the fields.
// otherwise we might ICE
fcx.write_error(id);
for field in fields {
- check_expr(fcx, &*field.expr);
+ check_expr(fcx, &field.expr);
}
match *base_expr {
- Some(ref base) => check_expr(fcx, &**base),
+ Some(ref base) => check_expr(fcx, &base),
None => {}
}
}
}
});
check_expr_with_expectation(fcx, subexpr, expected_inner);
- let referent_ty = fcx.expr_ty(&**subexpr);
+ let referent_ty = fcx.expr_ty(&subexpr);
fcx.write_ty(id, tcx.mk_box(referent_ty));
}
hir::ExprLit(ref lit) => {
- let typ = check_lit(fcx, &**lit, expected);
+ let typ = check_lit(fcx, &lit, expected);
fcx.write_ty(id, typ);
}
hir::ExprBinary(op, ref lhs, ref rhs) => {
_ => NoPreference
};
check_expr_with_expectation_and_lvalue_pref(
- fcx, &**oprnd, expected_inner, lvalue_pref);
- let mut oprnd_t = fcx.expr_ty(&**oprnd);
+ fcx, &oprnd, expected_inner, lvalue_pref);
+ let mut oprnd_t = fcx.expr_ty(&oprnd);
if !oprnd_t.references_error() {
match unop {
Some(mt) => mt.ty,
None => match try_overloaded_deref(fcx, expr.span,
Some(MethodCall::expr(expr.id)),
- Some(&**oprnd), oprnd_t, lvalue_pref) {
+ Some(&oprnd), oprnd_t, lvalue_pref) {
Some(mt) => mt.ty,
None => {
fcx.type_error_message(expr.span, |actual| {
if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) {
oprnd_t = op::check_user_unop(fcx, "!", "not",
tcx.lang_items.not_trait(),
- expr, &**oprnd, oprnd_t, unop);
+ expr, &oprnd, oprnd_t, unop);
}
}
hir::UnNeg => {
if !(oprnd_t.is_integral() || oprnd_t.is_fp()) {
oprnd_t = op::check_user_unop(fcx, "-", "neg",
tcx.lang_items.neg_trait(),
- expr, &**oprnd, oprnd_t, unop);
+ expr, &oprnd, oprnd_t, unop);
}
}
}
let hint = expected.only_has_type(fcx).map_or(NoExpectation, |ty| {
match ty.sty {
ty::TyRef(_, ref mt) | ty::TyRawPtr(ref mt) => {
- if fcx.tcx().expr_is_lval(&**oprnd) {
+ if fcx.tcx().expr_is_lval(&oprnd) {
// Lvalues may legitimately have unsized types.
// For example, dereferences of a fat pointer and
// the last field of a struct can be unsized.
});
let lvalue_pref = LvaluePreference::from_mutbl(mutbl);
check_expr_with_expectation_and_lvalue_pref(fcx,
- &**oprnd,
+ &oprnd,
hint,
lvalue_pref);
- let tm = ty::TypeAndMut { ty: fcx.expr_ty(&**oprnd), mutbl: mutbl };
+ let tm = ty::TypeAndMut { ty: fcx.expr_ty(&oprnd), mutbl: mutbl };
let oprnd_t = if tm.ty.references_error() {
tcx.types.err
} else {
}
hir::ExprInlineAsm(ref ia) => {
for &(_, ref input) in &ia.inputs {
- check_expr(fcx, &**input);
+ check_expr(fcx, &input);
}
for out in &ia.outputs {
- check_expr(fcx, &*out.expr);
+ check_expr(fcx, &out.expr);
}
fcx.write_nil(id);
}
not `()`");
},
Some(ref e) => {
- check_expr_coercable_to_type(fcx, &**e, result_type);
+ check_expr_coercable_to_type(fcx, &e, result_type);
}
}
}
ty::FnDiverging => {
if let Some(ref e) = *expr_opt {
- check_expr(fcx, &**e);
+ check_expr(fcx, &e);
}
span_err!(tcx.sess, expr.span, E0166,
"`return` in a function declared as diverging");
fcx.write_ty(id, fcx.infcx().next_diverging_ty_var());
}
hir::ExprAssign(ref lhs, ref rhs) => {
- check_expr_with_lvalue_pref(fcx, &**lhs, PreferMutLvalue);
+ check_expr_with_lvalue_pref(fcx, &lhs, PreferMutLvalue);
let tcx = fcx.tcx();
- if !tcx.expr_is_lval(&**lhs) {
+ if !tcx.expr_is_lval(&lhs) {
span_err!(tcx.sess, expr.span, E0070,
"invalid left-hand side expression");
}
- let lhs_ty = fcx.expr_ty(&**lhs);
- check_expr_coercable_to_type(fcx, &**rhs, lhs_ty);
- let rhs_ty = fcx.expr_ty(&**rhs);
+ let lhs_ty = fcx.expr_ty(&lhs);
+ check_expr_coercable_to_type(fcx, &rhs, lhs_ty);
+ let rhs_ty = fcx.expr_ty(&rhs);
- fcx.require_expr_have_sized_type(&**lhs, traits::AssignmentLhsSized);
+ fcx.require_expr_have_sized_type(&lhs, traits::AssignmentLhsSized);
if lhs_ty.references_error() || rhs_ty.references_error() {
fcx.write_error(id);
}
}
hir::ExprIf(ref cond, ref then_blk, ref opt_else_expr) => {
- check_then_else(fcx, &**cond, &**then_blk, opt_else_expr.as_ref().map(|e| &**e),
+ check_then_else(fcx, &cond, &then_blk, opt_else_expr.as_ref().map(|e| &**e),
id, expr.span, expected);
}
hir::ExprWhile(ref cond, ref body, _) => {
- check_expr_has_type(fcx, &**cond, tcx.types.bool);
- check_block_no_value(fcx, &**body);
- let cond_ty = fcx.expr_ty(&**cond);
+ check_expr_has_type(fcx, &cond, tcx.types.bool);
+ check_block_no_value(fcx, &body);
+ let cond_ty = fcx.expr_ty(&cond);
let body_ty = fcx.node_ty(body.id);
if cond_ty.references_error() || body_ty.references_error() {
fcx.write_error(id);
}
}
hir::ExprLoop(ref body, _) => {
- check_block_no_value(fcx, &**body);
- if !may_break(tcx, expr.id, &**body) {
+ check_block_no_value(fcx, &body);
+ if !may_break(tcx, expr.id, &body) {
fcx.write_ty(id, fcx.infcx().next_diverging_ty_var());
} else {
fcx.write_nil(id);
}
}
hir::ExprMatch(ref discrim, ref arms, match_src) => {
- _match::check_match(fcx, expr, &**discrim, arms, expected, match_src);
+ _match::check_match(fcx, expr, &discrim, arms, expected, match_src);
}
hir::ExprClosure(capture, ref decl, ref body) => {
- closure::check_expr_closure(fcx, expr, capture, &**decl, &**body, expected);
+ closure::check_expr_closure(fcx, expr, capture, &decl, &body, expected);
}
hir::ExprBlock(ref b) => {
- check_block_with_expected(fcx, &**b, expected);
+ check_block_with_expected(fcx, &b, expected);
fcx.write_ty(id, fcx.node_ty(b.id));
}
hir::ExprCall(ref callee, ref args) => {
- callee::check_call(fcx, expr, &**callee, &args[..], expected);
+ callee::check_call(fcx, expr, &callee, &args[..], expected);
// we must check that return type of called functions is WF:
let ret_ty = fcx.expr_ty(expr);
}
hir::ExprMethodCall(name, ref tps, ref args) => {
check_method_call(fcx, expr, name, &args[..], &tps[..], expected, lvalue_pref);
- let arg_tys = args.iter().map(|a| fcx.expr_ty(&**a));
+ let arg_tys = args.iter().map(|a| fcx.expr_ty(&a));
let args_err = arg_tys.fold(false, |rest_err, a| rest_err || a.references_error());
if args_err {
fcx.write_error(id);
}
hir::ExprCast(ref e, ref t) => {
if let hir::TyFixedLengthVec(_, ref count_expr) = t.node {
- check_expr_with_hint(fcx, &**count_expr, tcx.types.usize);
+ check_expr_with_hint(fcx, &count_expr, tcx.types.usize);
}
// Find the type of `e`. Supply hints based on the type we are casting to,
}
}
hir::ExprType(ref e, ref t) => {
- let typ = fcx.to_ty(&**t);
- check_expr_eq_type(fcx, &**e, typ);
+ let typ = fcx.to_ty(&t);
+ check_expr_eq_type(fcx, &e, typ);
fcx.write_ty(id, typ);
}
hir::ExprVec(ref args) => {
let typ = match uty {
Some(uty) => {
for e in args {
- check_expr_coercable_to_type(fcx, &**e, uty);
+ check_expr_coercable_to_type(fcx, &e, uty);
}
uty
}
None => {
let t: Ty = fcx.infcx().next_ty_var();
for e in args {
- check_expr_has_type(fcx, &**e, t);
+ check_expr_has_type(fcx, &e, t);
}
t
}
fcx.write_ty(id, typ);
}
hir::ExprRepeat(ref element, ref count_expr) => {
- check_expr_has_type(fcx, &**count_expr, tcx.types.usize);
- let count = fcx.tcx().eval_repeat_count(&**count_expr);
+ check_expr_has_type(fcx, &count_expr, tcx.types.usize);
+ let count = fcx.tcx().eval_repeat_count(&count_expr);
let uty = match expected {
ExpectHasType(uty) => {
let (element_ty, t) = match uty {
Some(uty) => {
- check_expr_coercable_to_type(fcx, &**element, uty);
+ check_expr_coercable_to_type(fcx, &element, uty);
(uty, uty)
}
None => {
let t: Ty = fcx.infcx().next_ty_var();
- check_expr_has_type(fcx, &**element, t);
- (fcx.expr_ty(&**element), t)
+ check_expr_has_type(fcx, &element, t);
+ (fcx.expr_ty(&element), t)
}
};
let t = match flds {
Some(ref fs) if i < fs.len() => {
let ety = fs[i];
- check_expr_coercable_to_type(fcx, &**e, ety);
+ check_expr_coercable_to_type(fcx, &e, ety);
ety
}
_ => {
- check_expr_with_expectation(fcx, &**e, NoExpectation);
- fcx.expr_ty(&**e)
+ check_expr_with_expectation(fcx, &e, NoExpectation);
+ fcx.expr_ty(&e)
}
};
err_field = err_field || t.references_error();
fcx.require_expr_have_sized_type(expr, traits::StructInitializerSized);
}
hir::ExprField(ref base, ref field) => {
- check_field(fcx, expr, lvalue_pref, &**base, field);
+ check_field(fcx, expr, lvalue_pref, &base, field);
}
hir::ExprTupField(ref base, idx) => {
- check_tup_field(fcx, expr, lvalue_pref, &**base, idx);
+ check_tup_field(fcx, expr, lvalue_pref, &base, idx);
}
hir::ExprIndex(ref base, ref idx) => {
- check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref);
- check_expr(fcx, &**idx);
+ check_expr_with_lvalue_pref(fcx, &base, lvalue_pref);
+ check_expr(fcx, &idx);
- let base_t = fcx.expr_ty(&**base);
- let idx_t = fcx.expr_ty(&**idx);
+ let base_t = fcx.expr_ty(&base);
+ let idx_t = fcx.expr_ty(&idx);
if base_t.references_error() {
fcx.write_ty(id, base_t);
fcx.write_ty(id, element_ty);
}
None => {
- check_expr_has_type(fcx, &**idx, fcx.tcx().types.err);
+ check_expr_has_type(fcx, &idx, fcx.tcx().types.err);
fcx.type_error_message(
expr.span,
|actual| {
}
hir::ExprRange(ref start, ref end) => {
let t_start = start.as_ref().map(|e| {
- check_expr(fcx, &**e);
- fcx.expr_ty(&**e)
+ check_expr(fcx, &e);
+ fcx.expr_ty(&e)
});
let t_end = end.as_ref().map(|e| {
- check_expr(fcx, &**e);
- fcx.expr_ty(&**e)
+ check_expr(fcx, &e);
+ fcx.expr_ty(&e)
});
let idx_type = match (t_start, t_end) {
fcx.write_ty(local.id, t);
if let Some(ref init) = local.init {
- check_decl_initializer(fcx, local, &**init);
- let init_ty = fcx.expr_ty(&**init);
+ check_decl_initializer(fcx, local, &init);
+ let init_ty = fcx.expr_ty(&init);
if init_ty.references_error() {
fcx.write_ty(local.id, init_ty);
}
let pcx = pat_ctxt {
fcx: fcx,
- map: pat_id_map(&tcx.def_map, &*local.pat),
+ map: pat_id_map(&tcx.def_map, &local.pat),
};
- _match::check_pat(&pcx, &*local.pat, t);
+ _match::check_pat(&pcx, &local.pat, t);
let pat_ty = fcx.node_ty(local.pat.id);
if pat_ty.references_error() {
fcx.write_ty(local.id, pat_ty);
node_id = id;
match decl.node {
hir::DeclLocal(ref l) => {
- check_decl_local(fcx, &**l);
+ check_decl_local(fcx, &l);
let l_t = fcx.node_ty(l.id);
saw_bot = saw_bot || fcx.infcx().type_var_diverges(l_t);
saw_err = saw_err || l_t.references_error();
hir::StmtExpr(ref expr, id) => {
node_id = id;
// Check with expected type of ()
- check_expr_has_type(fcx, &**expr, fcx.tcx().mk_nil());
- let expr_ty = fcx.expr_ty(&**expr);
+ check_expr_has_type(fcx, &expr, fcx.tcx().mk_nil());
+ let expr_ty = fcx.expr_ty(&expr);
saw_bot = saw_bot || fcx.infcx().type_var_diverges(expr_ty);
saw_err = saw_err || expr_ty.references_error();
}
hir::StmtSemi(ref expr, id) => {
node_id = id;
- check_expr(fcx, &**expr);
- let expr_ty = fcx.expr_ty(&**expr);
+ check_expr(fcx, &expr);
+ let expr_ty = fcx.expr_ty(&expr);
saw_bot |= fcx.infcx().type_var_diverges(expr_ty);
saw_err |= expr_ty.references_error();
}
}
let ety = match expected {
ExpectHasType(ety) => {
- check_expr_coercable_to_type(fcx, &**e, ety);
+ check_expr_coercable_to_type(fcx, &e, ety);
ety
}
_ => {
- check_expr_with_expectation(fcx, &**e, expected);
- fcx.expr_ty(&**e)
+ check_expr_with_expectation(fcx, &e, expected);
+ fcx.expr_ty(&e)
}
};
let type_count = type_defs.len(space);
assert_eq!(substs.types.len(space), 0);
for (i, typ) in data.types.iter().enumerate() {
- let t = fcx.to_ty(&**typ);
+ let t = fcx.to_ty(&typ);
if i < type_count {
substs.types.push(space, t);
} else if i == type_count {
}
let input_tys: Vec<Ty> =
- data.inputs.iter().map(|ty| fcx.to_ty(&**ty)).collect();
+ data.inputs.iter().map(|ty| fcx.to_ty(&ty)).collect();
let tuple_ty = fcx.tcx().mk_tup(input_tys);
}
let output_ty: Option<Ty> =
- data.output.as_ref().map(|ty| fcx.to_ty(&**ty));
+ data.output.as_ref().map(|ty| fcx.to_ty(&ty));
let output_ty =
output_ty.unwrap_or(fcx.tcx().mk_nil());
pub fn may_break(cx: &ty::ctxt, id: ast::NodeId, b: &hir::Block) -> bool {
// First: is there an unlabeled break immediately
// inside the loop?
- (loop_query(&*b, |e| {
+ (loop_query(&b, |e| {
match *e {
hir::ExprBreak(None) => true,
_ => false
fn visit_arm(rcx: &mut Rcx, arm: &hir::Arm) {
// see above
for p in &arm.pats {
- constrain_bindings_in_pat(&**p, rcx);
+ constrain_bindings_in_pat(&p, rcx);
}
intravisit::walk_arm(rcx, arm);
fn visit_local(rcx: &mut Rcx, l: &hir::Local) {
// see above
- constrain_bindings_in_pat(&*l.pat, rcx);
+ constrain_bindings_in_pat(&l.pat, rcx);
link_local(rcx, l);
intravisit::walk_local(rcx, l);
}
hir::ExprCall(ref callee, ref args) => {
if has_method_map {
- constrain_call(rcx, expr, Some(&**callee),
+ constrain_call(rcx, expr, Some(&callee),
args.iter().map(|e| &**e), false);
} else {
- constrain_callee(rcx, callee.id, expr, &**callee);
+ constrain_callee(rcx, callee.id, expr, &callee);
constrain_call(rcx, expr, None,
args.iter().map(|e| &**e), false);
}
}
hir::ExprMethodCall(_, _, ref args) => {
- constrain_call(rcx, expr, Some(&*args[0]),
+ constrain_call(rcx, expr, Some(&args[0]),
args[1..].iter().map(|e| &**e), false);
intravisit::walk_expr(rcx, expr);
hir::ExprAssignOp(_, ref lhs, ref rhs) => {
if has_method_map {
- constrain_call(rcx, expr, Some(&**lhs),
+ constrain_call(rcx, expr, Some(&lhs),
Some(&**rhs).into_iter(), false);
}
}
hir::ExprIndex(ref lhs, ref rhs) if has_method_map => {
- constrain_call(rcx, expr, Some(&**lhs),
+ constrain_call(rcx, expr, Some(&lhs),
Some(&**rhs).into_iter(), true);
intravisit::walk_expr(rcx, expr);
// overloaded op. Note that we (sadly) currently use an
// implicit "by ref" sort of passing style here. This
// should be converted to an adjustment!
- constrain_call(rcx, expr, Some(&**lhs),
+ constrain_call(rcx, expr, Some(&lhs),
Some(&**rhs).into_iter(), implicitly_ref_args);
intravisit::walk_expr(rcx, expr);
hir::ExprBinary(_, ref lhs, ref rhs) => {
// If you do `x OP y`, then the types of `x` and `y` must
// outlive the operation you are performing.
- let lhs_ty = rcx.resolve_expr_type_adjusted(&**lhs);
- let rhs_ty = rcx.resolve_expr_type_adjusted(&**rhs);
+ let lhs_ty = rcx.resolve_expr_type_adjusted(&lhs);
+ let rhs_ty = rcx.resolve_expr_type_adjusted(&rhs);
for &ty in &[lhs_ty, rhs_ty] {
type_must_outlive(rcx,
infer::Operand(expr.span),
let implicitly_ref_args = !hir_util::is_by_value_unop(op);
// As above.
- constrain_call(rcx, expr, Some(&**lhs),
+ constrain_call(rcx, expr, Some(&lhs),
None::<hir::Expr>.iter(), implicitly_ref_args);
intravisit::walk_expr(rcx, expr);
let method_call = MethodCall::expr(expr.id);
let base_ty = match rcx.fcx.inh.tables.borrow().method_map.get(&method_call) {
Some(method) => {
- constrain_call(rcx, expr, Some(&**base),
+ constrain_call(rcx, expr, Some(&base),
None::<hir::Expr>.iter(), true);
let fn_ret = // late-bound regions in overloaded method calls are instantiated
rcx.tcx().no_late_bound_regions(&method.ty.fn_ret()).unwrap();
hir::ExprIndex(ref vec_expr, _) => {
// For a[b], the lifetime of a must enclose the deref
- let vec_type = rcx.resolve_expr_type_adjusted(&**vec_expr);
+ let vec_type = rcx.resolve_expr_type_adjusted(&vec_expr);
constrain_index(rcx, expr, vec_type);
intravisit::walk_expr(rcx, expr);
// Determine if we are casting `source` to a trait
// instance. If so, we have to be sure that the type of
// the source obeys the trait's region bound.
- constrain_cast(rcx, expr, &**source);
+ constrain_cast(rcx, expr, &source);
intravisit::walk_expr(rcx, expr);
}
hir::ExprAddrOf(m, ref base) => {
- link_addr_of(rcx, expr, m, &**base);
+ link_addr_of(rcx, expr, m, &base);
// Require that when you write a `&expr` expression, the
// resulting pointer has a lifetime that encompasses the
}
hir::ExprMatch(ref discr, ref arms, _) => {
- link_match(rcx, &**discr, &arms[..]);
+ link_match(rcx, &discr, &arms[..]);
intravisit::walk_expr(rcx, expr);
}
hir::ExprClosure(_, _, ref body) => {
- check_expr_fn_block(rcx, expr, &**body);
+ check_expr_fn_block(rcx, expr, &body);
}
hir::ExprLoop(ref body, _) => {
hir::ExprWhile(ref cond, ref body, _) => {
let repeating_scope = rcx.set_repeating_scope(cond.id);
- rcx.visit_expr(&**cond);
+ rcx.visit_expr(&cond);
rcx.set_repeating_scope(body.id);
- rcx.visit_block(&**body);
+ rcx.visit_block(&body);
rcx.set_repeating_scope(repeating_scope);
}
rcx, infer::CallRcvr(r.span),
r.id, callee_region);
if implicitly_ref_args {
- link_by_ref(rcx, &*r, callee_scope);
+ link_by_ref(rcx, &r, callee_scope);
}
}
}
};
let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx());
let discr_cmt = ignore_err!(mc.cat_expr(init_expr));
- link_pattern(rcx, mc, discr_cmt, &*local.pat);
+ link_pattern(rcx, mc, discr_cmt, &local.pat);
}
/// Computes the guarantors for any ref bindings in a match and
debug!("discr_cmt={:?}", discr_cmt);
for arm in arms {
for root_pat in &arm.pats {
- link_pattern(rcx, mc, discr_cmt.clone(), &**root_pat);
+ link_pattern(rcx, mc, discr_cmt.clone(), &root_pat);
}
}
}
arg_ty,
arg_cmt,
arg);
- link_pattern(rcx, mc, arg_cmt, &*arg.pat);
+ link_pattern(rcx, mc, arg_cmt, &arg.pat);
}
}
// `[_, ..slice, _]` pattern
hir::PatVec(_, Some(ref slice_pat), _) => {
- match mc.cat_slice_pattern(sub_cmt, &**slice_pat) {
+ match mc.cat_slice_pattern(sub_cmt, &slice_pat) {
Ok((slice_cmt, slice_mutbl, slice_r)) => {
link_region(rcx, sub_pat.span, &slice_r,
ty::BorrowKind::from_mutbl(slice_mutbl),
fn visit_expr(&mut self, expr: &hir::Expr) {
match expr.node {
hir::ExprClosure(cc, _, ref body) => {
- self.check_closure(expr, cc, &**body);
+ self.check_closure(expr, cc, &body);
}
_ => { }
wbcx.visit_block(blk);
for arg in &decl.inputs {
wbcx.visit_node_id(ResolvingPattern(arg.pat.span), arg.id);
- wbcx.visit_pat(&*arg.pat);
+ wbcx.visit_pat(&arg.pat);
// Privacy needs the type for the whole pattern, not just each binding
- if !pat_util::pat_is_binding(&fcx.tcx().def_map.borrow(), &*arg.pat) {
+ if !pat_util::pat_is_binding(&fcx.tcx().def_map.borrow(), &arg.pat) {
wbcx.visit_node_id(ResolvingPattern(arg.pat.span),
arg.pat.id);
}
fn visit_ty(&mut self, t: &hir::Ty) {
match t.node {
hir::TyFixedLengthVec(ref ty, ref count_expr) => {
- self.visit_ty(&**ty);
+ self.visit_ty(&ty);
write_ty_to_tcx(self.tcx(), count_expr.id, self.tcx().types.usize);
}
hir::TyBareFn(ref function_declaration) => {
_ => tcx.sess.bug(&format!("get_trait_def({:?}): not an item", trait_id))
};
- trait_def_of_item(self, &*item)
+ trait_def_of_item(self, &item)
} else {
tcx.lookup_trait_def(trait_id)
}
v: &hir::StructField,
ty_f: ty::FieldDefMaster<'tcx>)
{
- let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &*v.node.ty);
+ let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &v.node.ty);
ty_f.fulfill_ty(tt);
write_ty_to_tcx(ccx.tcx, v.node.id, tt);
debug!("convert: impl_bounds={:?}", ty_predicates);
- let selfty = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, &**selfty);
+ let selfty = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, &selfty);
write_ty_to_tcx(tcx, it.id, selfty);
tcx.register_item_type(def_id,
if let hir::ImplItemKind::Const(ref ty, _) = impl_item.node {
let ty = ccx.icx(&ty_predicates)
- .to_ty(&ExplicitRscope, &*ty);
+ .to_ty(&ExplicitRscope, &ty);
tcx.register_item_type(ccx.tcx.map.local_def_id(impl_item.id),
TypeScheme {
generics: ty_generics.clone(),
if let Some(node_id) = ccx.tcx.map.as_local_node_id(def_id) {
match ccx.tcx.map.find(node_id) {
Some(hir_map::NodeItem(item)) => {
- type_scheme_of_item(ccx, &*item)
+ type_scheme_of_item(ccx, &item)
}
Some(hir_map::NodeForeignItem(foreign_item)) => {
let abi = ccx.tcx.map.get_foreign_abi(node_id);
- type_scheme_of_foreign_item(ccx, &*foreign_item, abi)
+ type_scheme_of_foreign_item(ccx, &foreign_item, abi)
}
x => {
ccx.tcx.sess.bug(&format!("unexpected sort of node \
let tcx = ccx.tcx;
match it.node {
hir::ItemStatic(ref t, _, _) | hir::ItemConst(ref t, _) => {
- let ty = ccx.icx(&()).to_ty(&ExplicitRscope, &**t);
+ let ty = ccx.icx(&()).to_ty(&ExplicitRscope, &t);
ty::TypeScheme { ty: ty, generics: ty::Generics::empty() }
}
hir::ItemFn(ref decl, unsafety, _, abi, ref generics, _) => {
let ty_generics = ty_generics_for_fn(ccx, generics, &ty::Generics::empty());
- let tofd = astconv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &**decl);
+ let tofd = astconv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &decl);
let ty = tcx.mk_fn(Some(ccx.tcx.map.local_def_id(it.id)), tcx.mk_bare_fn(tofd));
ty::TypeScheme { ty: ty, generics: ty_generics }
}
hir::ItemTy(ref t, ref generics) => {
let ty_generics = ty_generics_for_type_or_impl(ccx, generics);
- let ty = ccx.icx(generics).to_ty(&ExplicitRscope, &**t);
+ let ty = ccx.icx(generics).to_ty(&ExplicitRscope, &t);
ty::TypeScheme { ty: ty, generics: ty_generics }
}
hir::ItemEnum(ref ei, ref generics) => {
&hir::WherePredicate::BoundPredicate(ref bound_pred) => {
let ty = ast_ty_to_ty(&ccx.icx(&(base_predicates, ast_generics)),
&ExplicitRscope,
- &*bound_pred.bounded_ty);
+ &bound_pred.bounded_ty);
for bound in bound_pred.bounds.iter() {
match bound {
-> ty::TypeScheme<'tcx>
{
for i in &decl.inputs {
- match (*i).pat.node {
+ match i.pat.node {
hir::PatIdent(_, _, _) => (),
hir::PatWild => (),
_ => {
- span_err!(ccx.tcx.sess, (*i).pat.span, E0130,
+ span_err!(ccx.tcx.sess, i.pat.span, E0130,
"patterns aren't allowed in foreign function declarations");
}
}
let output = match decl.output {
hir::Return(ref ty) =>
- ty::FnConverging(ast_ty_to_ty(&ccx.icx(ast_generics), &rb, &**ty)),
+ ty::FnConverging(ast_ty_to_ty(&ccx.icx(ast_generics), &rb, &ty)),
hir::DefaultReturn(..) =>
ty::FnConverging(ccx.tcx.mk_nil()),
hir::NoReturn(..) =>