1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Translation of Expressions
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
32 //! Public entry points:
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
47 //! - `trans_local_var -> Datum`: looks up a local variable or upvar.
49 #![allow(non_camel_case_types)]
51 pub use self::Dest::*;
52 use self::lazy_binop_ty::*;
55 use llvm::{self, ValueRef, TypeKind};
56 use middle::check_const;
58 use middle::lang_items::CoerceUnsizedTraitLangItem;
59 use middle::subst::{Substs, VecPerParamSpace};
61 use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
64 use trans::cleanup::{self, CleanupMethods, DropHintMethods};
67 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
74 use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
75 use middle::ty::adjustment::{AdjustUnsafeFnPointer, CustomCoerceUnsized};
76 use middle::ty::{self, Ty};
77 use middle::ty::MethodCall;
78 use middle::ty::cast::{CastKind, CastTy};
79 use util::common::indenter;
80 use trans::machine::{llsize_of, llsize_of_alloc};
81 use trans::type_::Type;
86 use syntax::{ast, ast_util, codemap};
87 use syntax::parse::token::InternedString;
89 use syntax::parse::token;
94 // These are passed around by the code generating functions to track the
95 // destination of a computation's value.
97 #[derive(Copy, Clone, PartialEq)]
104 pub fn to_string(&self, ccx: &CrateContext) -> String {
106 SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
107 Ignore => "Ignore".to_string()
112 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
113 /// better optimized LLVM code.
114 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
117 -> Block<'blk, 'tcx> {
120 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
122 if adjustment_required(bcx, expr) {
123 // use trans, which may be less efficient but
124 // which will perform the adjustments:
125 let datum = unpack_datum!(bcx, trans(bcx, expr));
126 return datum.store_to_dest(bcx, dest, expr.id);
129 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
130 if !qualif.intersects(
131 check_const::ConstQualif::NOT_CONST |
132 check_const::ConstQualif::NEEDS_DROP
134 if !qualif.intersects(check_const::ConstQualif::PREFER_IN_PLACE) {
135 if let SaveIn(lldest) = dest {
136 let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
137 bcx.fcx.param_substs);
138 // Cast pointer to destination, because constants
139 // have different types.
140 let lldest = PointerCast(bcx, lldest, val_ty(global));
141 memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
144 // Even if we don't have a value to emit, and the expression
145 // doesn't have any side-effects, we still have to translate the
146 // body of any closures.
147 // FIXME: Find a better way of handling this case.
149 // The only way we're going to see a `const` at this point is if
150 // it prefers in-place instantiation, likely because it contains
151 // `[x; N]` somewhere within.
153 hir::ExprPath(..) => {
154 match bcx.def(expr.id) {
155 def::DefConst(did) => {
156 let const_expr = consts::get_const_expr(bcx.ccx(), did, expr);
157 // Temporarily get cleanup scopes out of the way,
158 // as they require sub-expressions to be contained
159 // inside the current AST scope.
160 // These should record no cleanups anyways, `const`
161 // can't have destructors.
162 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
164 // Lock emitted debug locations to the location of
165 // the constant reference expression.
166 debuginfo::with_source_location_override(bcx.fcx,
169 bcx = trans_into(bcx, const_expr, dest)
171 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
173 assert!(scopes.is_empty());
184 debug!("trans_into() expr={:?}", expr);
186 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
190 bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
192 let kind = expr_kind(bcx.tcx(), expr);
194 ExprKind::Lvalue | ExprKind::RvalueDatum => {
195 trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
197 ExprKind::RvalueDps => {
198 trans_rvalue_dps_unadjusted(bcx, expr, dest)
200 ExprKind::RvalueStmt => {
201 trans_rvalue_stmt_unadjusted(bcx, expr)
205 bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
208 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
209 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
211 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
213 -> DatumBlock<'blk, 'tcx, Expr> {
214 debug!("trans(expr={:?})", expr);
218 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
219 let adjusted_global = !qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS);
220 let global = if !qualif.intersects(
221 check_const::ConstQualif::NOT_CONST |
222 check_const::ConstQualif::NEEDS_DROP
224 let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
225 bcx.fcx.param_substs);
227 if qualif.intersects(check_const::ConstQualif::HAS_STATIC_BORROWS) {
228 // Is borrowed as 'static, must return lvalue.
230 // Cast pointer to global, because constants have different types.
231 let const_ty = expr_ty_adjusted(bcx, expr);
232 let llty = type_of::type_of(bcx.ccx(), const_ty);
233 let global = PointerCast(bcx, global, llty.ptr_to());
234 let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
235 return DatumBlock::new(bcx, datum.to_expr_datum());
238 // Otherwise, keep around and perform adjustments, if needed.
239 let const_ty = if adjusted_global {
240 expr_ty_adjusted(bcx, expr)
245 // This could use a better heuristic.
246 Some(if type_is_immediate(bcx.ccx(), const_ty) {
247 // Cast pointer to global, because constants have different types.
248 let llty = type_of::type_of(bcx.ccx(), const_ty);
249 let global = PointerCast(bcx, global, llty.ptr_to());
250 // Maybe just get the value directly, instead of loading it?
251 immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
253 let scratch = alloc_ty(bcx, const_ty, "const");
254 call_lifetime_start(bcx, scratch);
255 let lldest = if !const_ty.is_structural() {
256 // Cast pointer to slot, because constants have different types.
257 PointerCast(bcx, scratch, val_ty(global))
259 // In this case, memcpy_ty calls llvm.memcpy after casting both
260 // source and destination to i8*, so we don't need any casts.
263 memcpy_ty(bcx, lldest, global, const_ty);
264 Datum::new(scratch, const_ty, Rvalue::new(ByRef))
270 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
274 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
275 let datum = match global {
276 Some(rvalue) => rvalue.to_expr_datum(),
277 None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
279 let datum = if adjusted_global {
280 datum // trans::consts already performed adjustments.
282 unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
284 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
285 return DatumBlock::new(bcx, datum);
288 pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
289 StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
292 pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
293 StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
296 pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
297 Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
298 Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
301 /// Retrieve the information we are losing (making dynamic) in an unsizing
304 /// The `old_info` argument is a bit funny. It is intended for use
305 /// in an upcast, where the new vtable for an object will be drived
306 /// from the old one.
307 pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
310 old_info: Option<ValueRef>,
311 param_substs: &'tcx Substs<'tcx>)
313 let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
314 match (&source.sty, &target.sty) {
315 (&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
316 (&ty::TyTrait(_), &ty::TyTrait(_)) => {
317 // For now, upcasts are limited to changes in marker
318 // traits, and hence never actually require an actual
319 // change to the vtable.
320 old_info.expect("unsized_info: missing old info for trait upcast")
322 (_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => {
323 // Note that we preserve binding levels here:
324 let substs = principal.0.substs.with_self_ty(source).erase_regions();
325 let substs = ccx.tcx().mk_substs(substs);
326 let trait_ref = ty::Binder(ty::TraitRef { def_id: principal.def_id(),
328 consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
329 Type::vtable_ptr(ccx))
331 _ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}",
337 fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
338 expr: &hir::Expr) -> bool {
339 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
340 None => { return false; }
344 // Don't skip a conversion from Box<T> to &T, etc.
345 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
350 AdjustReifyFnPointer => {
351 // FIXME(#19925) once fn item types are
352 // zero-sized, we'll need to return true here
355 AdjustUnsafeFnPointer => {
356 // purely a type-level thing
359 AdjustDerefRef(ref adj) => {
360 // We are a bit paranoid about adjustments and thus might have a re-
361 // borrow here which merely derefs and then refs again (it might have
362 // a different region or mutability, but we don't care here).
363 !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
368 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
369 /// translation of `expr`.
370 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
372 datum: Datum<'tcx, Expr>)
373 -> DatumBlock<'blk, 'tcx, Expr>
376 let mut datum = datum;
377 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
379 return DatumBlock::new(bcx, datum);
383 debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
385 datum.to_string(bcx.ccx()),
388 AdjustReifyFnPointer => {
389 // FIXME(#19925) once fn item types are
390 // zero-sized, we'll need to do something here
392 AdjustUnsafeFnPointer => {
393 // purely a type-level thing
395 AdjustDerefRef(ref adj) => {
396 let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
397 // We are a bit paranoid about adjustments and thus might have a re-
398 // borrow here which merely derefs and then refs again (it might have
399 // a different region or mutability, but we don't care here).
401 // Don't skip a conversion from Box<T> to &T, etc.
403 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
404 // Don't skip an overloaded deref.
416 if adj.autoderefs > skip_reborrows {
418 let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
419 datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
420 lval.to_expr_datum(),
421 adj.autoderefs - skip_reborrows));
424 // (You might think there is a more elegant way to do this than a
425 // skip_reborrows bool, but then you remember that the borrow checker exists).
426 if skip_reborrows == 0 && adj.autoref.is_some() {
427 datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
430 if let Some(target) = adj.unsize {
431 // We do not arrange cleanup ourselves; if we already are an
432 // L-value, then cleanup will have already been scheduled (and
433 // the `datum.to_rvalue_datum` call below will emit code to zero
434 // the drop flag when moving out of the L-value). If we are an
435 // R-value, then we do not need to schedule cleanup.
436 let source_datum = unpack_datum!(bcx,
437 datum.to_rvalue_datum(bcx, "__coerce_source"));
439 let target = bcx.monomorphize(&target);
441 let scratch = alloc_ty(bcx, target, "__coerce_target");
442 call_lifetime_start(bcx, scratch);
443 let target_datum = Datum::new(scratch, target,
445 bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
446 datum = Datum::new(scratch, target,
447 RvalueExpr(Rvalue::new(ByRef)));
451 debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
452 DatumBlock::new(bcx, datum)
455 fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
457 source: Datum<'tcx, Rvalue>,
458 target: Datum<'tcx, Rvalue>)
459 -> Block<'blk, 'tcx> {
461 debug!("coerce_unsized({} -> {})",
462 source.to_string(bcx.ccx()),
463 target.to_string(bcx.ccx()));
465 match (&source.ty.sty, &target.ty.sty) {
466 (&ty::TyBox(a), &ty::TyBox(b)) |
467 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
468 &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
469 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
470 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
471 (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
472 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
473 let (inner_source, inner_target) = (a, b);
475 let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
476 // Normally, the source is a thin pointer and we are
477 // adding extra info to make a fat pointer. The exception
478 // is when we are upcasting an existing object fat pointer
479 // to use a different vtable. In that case, we want to
480 // load out the original data pointer so we can repackage
482 (Load(bcx, get_dataptr(bcx, source.val)),
483 Some(Load(bcx, get_meta(bcx, source.val))))
485 let val = if source.kind.is_by_ref() {
486 load_ty(bcx, source.val, source.ty)
493 let info = unsized_info(bcx.ccx(), inner_source, inner_target,
494 old_info, bcx.fcx.param_substs);
496 // Compute the base pointer. This doesn't change the pointer value,
497 // but merely its type.
498 let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
499 let base = PointerCast(bcx, base, ptr_ty);
501 Store(bcx, base, get_dataptr(bcx, target.val));
502 Store(bcx, info, get_meta(bcx, target.val));
505 // This can be extended to enums and tuples in the future.
506 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
507 (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
508 assert_eq!(def_id_a, def_id_b);
510 // The target is already by-ref because it's to be written to.
511 let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
512 assert!(target.kind.is_by_ref());
514 let trait_substs = Substs::erased(VecPerParamSpace::new(vec![target.ty],
517 let trait_ref = ty::Binder(ty::TraitRef {
518 def_id: langcall(bcx, Some(span), "coercion",
519 CoerceUnsizedTraitLangItem),
520 substs: bcx.tcx().mk_substs(trait_substs)
523 let kind = match fulfill_obligation(bcx.ccx(), span, trait_ref) {
524 traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
525 bcx.tcx().custom_coerce_unsized_kind(impl_def_id)
528 bcx.sess().span_bug(span, &format!("invalid CoerceUnsized vtable: {:?}",
533 let repr_source = adt::represent_type(bcx.ccx(), source.ty);
534 let src_fields = match &*repr_source {
535 &adt::Repr::Univariant(ref s, _) => &s.fields,
536 _ => bcx.sess().span_bug(span,
537 &format!("Non univariant struct? (repr_source: {:?})",
540 let repr_target = adt::represent_type(bcx.ccx(), target.ty);
541 let target_fields = match &*repr_target {
542 &adt::Repr::Univariant(ref s, _) => &s.fields,
543 _ => bcx.sess().span_bug(span,
544 &format!("Non univariant struct? (repr_target: {:?})",
548 let coerce_index = match kind {
549 CustomCoerceUnsized::Struct(i) => i
551 assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
553 let iter = src_fields.iter().zip(target_fields).enumerate();
554 for (i, (src_ty, target_ty)) in iter {
555 let ll_source = adt::trans_field_ptr(bcx, &repr_source, source.val, 0, i);
556 let ll_target = adt::trans_field_ptr(bcx, &repr_target, target.val, 0, i);
558 // If this is the field we need to coerce, recurse on it.
559 if i == coerce_index {
560 coerce_unsized(bcx, span,
561 Datum::new(ll_source, src_ty,
563 Datum::new(ll_target, target_ty,
564 Rvalue::new(ByRef)));
566 // Otherwise, simply copy the data from the source.
567 assert!(src_ty.is_phantom_data() || src_ty == target_ty);
568 memcpy_ty(bcx, ll_target, ll_source, src_ty);
572 _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
579 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
580 /// that the expr represents.
582 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
583 /// something like `x().f` is translated into roughly the equivalent of
585 /// { tmp = x(); tmp.f }
586 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
589 -> DatumBlock<'blk, 'tcx, Lvalue> {
591 let datum = unpack_datum!(bcx, trans(bcx, expr));
592 return datum.to_lvalue_datum(bcx, name, expr.id);
595 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
597 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
599 -> DatumBlock<'blk, 'tcx, Expr> {
602 debug!("trans_unadjusted(expr={:?})", expr);
603 let _indenter = indenter();
605 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
607 return match expr_kind(bcx.tcx(), expr) {
608 ExprKind::Lvalue | ExprKind::RvalueDatum => {
609 let datum = unpack_datum!(bcx, {
610 trans_datum_unadjusted(bcx, expr)
613 DatumBlock {bcx: bcx, datum: datum}
616 ExprKind::RvalueStmt => {
617 bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
618 nil(bcx, expr_ty(bcx, expr))
621 ExprKind::RvalueDps => {
622 let ty = expr_ty(bcx, expr);
623 if type_is_zero_size(bcx.ccx(), ty) {
624 bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
627 let scratch = rvalue_scratch_datum(bcx, ty, "");
628 bcx = trans_rvalue_dps_unadjusted(
629 bcx, expr, SaveIn(scratch.val));
631 // Note: this is not obviously a good idea. It causes
632 // immediate values to be loaded immediately after a
633 // return from a call or other similar expression,
634 // which in turn leads to alloca's having shorter
635 // lifetimes and hence larger stack frames. However,
636 // in turn it can lead to more register pressure.
637 // Still, in practice it seems to increase
638 // performance, since we have fewer problems with
640 let scratch = unpack_datum!(
641 bcx, scratch.to_appropriate_datum(bcx));
643 DatumBlock::new(bcx, scratch.to_expr_datum())
648 fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
649 -> DatumBlock<'blk, 'tcx, Expr> {
650 let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
651 let datum = immediate_rvalue(llval, ty);
652 DatumBlock::new(bcx, datum.to_expr_datum())
656 fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
658 -> DatumBlock<'blk, 'tcx, Expr> {
661 let _icx = push_ctxt("trans_datum_unadjusted");
664 hir::ExprPath(..) => {
665 trans_def(bcx, expr, bcx.def(expr.id))
667 hir::ExprField(ref base, name) => {
668 trans_rec_field(bcx, &**base, name.node)
670 hir::ExprTupField(ref base, idx) => {
671 trans_rec_tup_field(bcx, &**base, idx.node)
673 hir::ExprIndex(ref base, ref idx) => {
674 trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
676 hir::ExprBox(_, ref contents) => {
677 // Special case for `Box<T>`
678 let box_ty = expr_ty(bcx, expr);
679 let contents_ty = expr_ty(bcx, &**contents);
682 trans_uniq_expr(bcx, expr, box_ty, &**contents, contents_ty)
684 _ => bcx.sess().span_bug(expr.span,
685 "expected unique box")
689 hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &**lit),
690 hir::ExprBinary(op, ref lhs, ref rhs) => {
691 trans_binary(bcx, expr, op, &**lhs, &**rhs)
693 hir::ExprUnary(op, ref x) => {
694 trans_unary(bcx, expr, op, &**x)
696 hir::ExprAddrOf(_, ref x) => {
698 hir::ExprRepeat(..) | hir::ExprVec(..) => {
699 // Special case for slices.
700 let cleanup_debug_loc =
701 debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
705 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
706 let datum = unpack_datum!(
707 bcx, tvec::trans_slice_vec(bcx, expr, &**x));
708 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
709 DatumBlock::new(bcx, datum)
712 trans_addr_of(bcx, expr, &**x)
716 hir::ExprCast(ref val, _) => {
717 // Datum output mode means this is a scalar cast:
718 trans_imm_cast(bcx, &**val, expr.id)
721 bcx.tcx().sess.span_bug(
723 &format!("trans_rvalue_datum_unadjusted reached \
724 fall-through case: {:?}",
730 fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
733 -> DatumBlock<'blk, 'tcx, Expr> where
734 F: FnOnce(&'blk ty::ctxt<'tcx>, &VariantInfo<'tcx>) -> usize,
737 let _icx = push_ctxt("trans_rec_field");
739 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
740 let bare_ty = base_datum.ty;
741 let repr = adt::represent_type(bcx.ccx(), bare_ty);
742 let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
744 let ix = get_idx(bcx.tcx(), &vinfo);
745 let d = base_datum.get_element(
748 |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, vinfo.discr, ix));
750 if type_is_sized(bcx.tcx(), d.ty) {
751 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
753 let scratch = rvalue_scratch_datum(bcx, d.ty, "");
754 Store(bcx, d.val, get_dataptr(bcx, scratch.val));
755 let info = Load(bcx, get_meta(bcx, base_datum.val));
756 Store(bcx, info, get_meta(bcx, scratch.val));
758 // Always generate an lvalue datum, because this pointer doesn't own
759 // the data and cleanup is scheduled elsewhere.
760 DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
764 /// Translates `base.field`.
765 fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
768 -> DatumBlock<'blk, 'tcx, Expr> {
769 trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
772 /// Translates `base.<idx>`.
773 fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
776 -> DatumBlock<'blk, 'tcx, Expr> {
777 trans_field(bcx, base, |_, _| idx)
780 fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
781 index_expr: &hir::Expr,
784 method_call: MethodCall)
785 -> DatumBlock<'blk, 'tcx, Expr> {
786 //! Translates `base[idx]`.
788 let _icx = push_ctxt("trans_index");
792 let index_expr_debug_loc = index_expr.debug_loc();
794 // Check for overloaded index.
795 let method_ty = ccx.tcx()
800 .map(|method| method.ty);
801 let elt_datum = match method_ty {
803 let method_ty = monomorphize_type(bcx, method_ty);
805 let base_datum = unpack_datum!(bcx, trans(bcx, base));
807 // Translate index expression.
808 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
810 let ref_ty = // invoked methods have LB regions instantiated:
811 bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
812 let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
814 bcx.tcx().sess.span_bug(index_expr.span,
815 "index method didn't return a \
816 dereferenceable type?!")
818 Some(elt_tm) => elt_tm.ty,
821 // Overloaded. Evaluate `trans_overloaded_op`, which will
822 // invoke the user's index() method, which basically yields
823 // a `&T` pointer. We can then proceed down the normal
824 // path (below) to dereference that `&T`.
825 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
827 trans_overloaded_op(bcx,
831 Some((ix_datum, idx.id)),
832 Some(SaveIn(scratch.val)),
834 let datum = scratch.to_expr_datum();
835 let lval = Lvalue::new("expr::trans_index overload");
836 if type_is_sized(bcx.tcx(), elt_ty) {
837 Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
839 Datum::new(datum.val, elt_ty, LvalueExpr(lval))
843 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
847 // Translate index expression and cast to a suitable LLVM integer.
848 // Rust is less strict than LLVM in this regard.
849 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
850 let ix_val = ix_datum.to_llscalarish(bcx);
851 let ix_size = machine::llbitsize_of_real(bcx.ccx(),
853 let int_size = machine::llbitsize_of_real(bcx.ccx(),
856 if ix_size < int_size {
857 if expr_ty(bcx, idx).is_signed() {
858 SExt(bcx, ix_val, ccx.int_type())
859 } else { ZExt(bcx, ix_val, ccx.int_type()) }
860 } else if ix_size > int_size {
861 Trunc(bcx, ix_val, ccx.int_type())
867 let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
869 let (base, len) = base_datum.get_vec_base_and_len(bcx);
871 debug!("trans_index: base {}", bcx.val_to_string(base));
872 debug!("trans_index: len {}", bcx.val_to_string(len));
874 let bounds_check = ICmp(bcx,
878 index_expr_debug_loc);
879 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
880 let expected = Call(bcx,
882 &[bounds_check, C_bool(ccx, false)],
884 index_expr_debug_loc);
885 bcx = with_cond(bcx, expected, |bcx| {
886 controlflow::trans_fail_bounds_check(bcx,
887 expr_info(index_expr),
891 let elt = InBoundsGEP(bcx, base, &[ix_val]);
892 let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
893 let lval = Lvalue::new("expr::trans_index fallback");
894 Datum::new(elt, unit_ty, LvalueExpr(lval))
898 DatumBlock::new(bcx, elt_datum)
901 fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
902 ref_expr: &hir::Expr,
904 -> DatumBlock<'blk, 'tcx, Expr> {
905 //! Translates a reference to a path.
907 let _icx = push_ctxt("trans_def_lvalue");
909 def::DefFn(..) | def::DefMethod(..) |
910 def::DefStruct(_) | def::DefVariant(..) => {
911 let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
912 bcx.fcx.param_substs);
913 DatumBlock::new(bcx, datum.to_expr_datum())
915 def::DefStatic(did, _) => {
916 // There are two things that may happen here:
917 // 1) If the static item is defined in this crate, it will be
918 // translated using `get_item_val`, and we return a pointer to
920 // 2) If the static item is defined in another crate then we add
921 // (or reuse) a declaration of an external global, and return a
923 let const_ty = expr_ty(bcx, ref_expr);
925 // For external constants, we don't inline.
926 let val = if did.is_local() {
929 // The LLVM global has the type of its initializer,
930 // which may not be equal to the enum's type for
932 let val = base::get_item_val(bcx.ccx(), did.node);
933 let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
934 PointerCast(bcx, val, pty)
937 base::get_extern_const(bcx.ccx(), did, const_ty)
939 let lval = Lvalue::new("expr::trans_def");
940 DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval)))
942 def::DefConst(_) => {
943 bcx.sess().span_bug(ref_expr.span,
944 "constant expression should not reach expr::trans_def")
947 DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
952 fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
954 -> Block<'blk, 'tcx> {
956 let _icx = push_ctxt("trans_rvalue_stmt");
958 if bcx.unreachable.get() {
962 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
965 hir::ExprBreak(label_opt) => {
966 controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node))
968 hir::ExprAgain(label_opt) => {
969 controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node))
971 hir::ExprRet(ref ex) => {
972 // Check to see if the return expression itself is reachable.
973 // This can occur when the inner expression contains a return
974 let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
975 cfg.node_is_reachable(expr.id)
981 controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
983 // If it's not reachable, just translate the inner expression
984 // directly. This avoids having to manage a return slot when
985 // it won't actually be used anyway.
986 if let &Some(ref x) = ex {
987 bcx = trans_into(bcx, &**x, Ignore);
989 // Mark the end of the block as unreachable. Once we get to
990 // a return expression, there's no more we should be doing
996 hir::ExprWhile(ref cond, ref body, _) => {
997 controlflow::trans_while(bcx, expr, &**cond, &**body)
999 hir::ExprLoop(ref body, _) => {
1000 controlflow::trans_loop(bcx, expr, &**body)
1002 hir::ExprAssign(ref dst, ref src) => {
1003 let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
1004 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign"));
1006 if bcx.fcx.type_needs_drop(dst_datum.ty) {
1007 // If there are destructors involved, make sure we
1008 // are copying from an rvalue, since that cannot possible
1009 // alias an lvalue. We are concerned about code like:
1017 // where e.g. a : Option<Foo> and a.b :
1018 // Option<Foo>. In that case, freeing `a` before the
1019 // assignment may also free `a.b`!
1021 // We could avoid this intermediary with some analysis
1022 // to determine whether `dst` may possibly own `src`.
1023 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1024 let src_datum = unpack_datum!(
1025 bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
1026 let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
1027 let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
1029 // 1. Drop the data at the destination, passing the
1030 // drop-hint in case the lvalue has already been
1031 // dropped or moved.
1032 bcx = glue::drop_ty_core(bcx,
1039 // 2. We are overwriting the destination; ensure that
1040 // its drop-hint (if any) says "initialized."
1041 if let Some(hint_val) = opt_hint_val {
1042 let hint_llval = hint_val.value();
1043 let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
1044 Store(bcx, drop_needed, hint_llval);
1046 src_datum.store_to(bcx, dst_datum.val)
1048 src_datum.store_to(bcx, dst_datum.val)
1051 hir::ExprAssignOp(op, ref dst, ref src) => {
1052 let has_method_map = bcx.tcx()
1056 .contains_key(&MethodCall::expr(expr.id));
1059 let dst = unpack_datum!(bcx, trans(bcx, &**dst));
1060 let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
1061 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), dst,
1062 Some((src_datum, src.id)), None, false).bcx
1064 trans_assign_op(bcx, expr, op, &**dst, &**src)
1067 hir::ExprInlineAsm(ref a) => {
1068 asm::trans_inline_asm(bcx, a)
1071 bcx.tcx().sess.span_bug(
1073 &format!("trans_rvalue_stmt_unadjusted reached \
1074 fall-through case: {:?}",
1080 fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1083 -> Block<'blk, 'tcx> {
1084 let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
1086 let tcx = bcx.tcx();
1088 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1091 hir::ExprPath(..) => {
1092 trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
1094 hir::ExprIf(ref cond, ref thn, ref els) => {
1095 controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest)
1097 hir::ExprMatch(ref discr, ref arms, _) => {
1098 _match::trans_match(bcx, expr, &**discr, &arms[..], dest)
1100 hir::ExprBlock(ref blk) => {
1101 controlflow::trans_block(bcx, &**blk, dest)
1103 hir::ExprStruct(_, ref fields, ref base) => {
1106 base.as_ref().map(|e| &**e),
1109 node_id_type(bcx, expr.id),
1112 hir::ExprRange(ref start, ref end) => {
1113 // FIXME it is just not right that we are synthesising ast nodes in
1115 fn make_field(field_name: &str, expr: P<hir::Expr>) -> hir::Field {
1117 name: codemap::dummy_spanned(token::str_to_ident(field_name).name),
1119 span: codemap::DUMMY_SP,
1123 // A range just desugars into a struct.
1124 // Note that the type of the start and end may not be the same, but
1125 // they should only differ in their lifetime, which should not matter
1127 let (did, fields, ty_params) = match (start, end) {
1128 (&Some(ref start), &Some(ref end)) => {
1130 let fields = vec![make_field("start", start.clone()),
1131 make_field("end", end.clone())];
1132 (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
1134 (&Some(ref start), &None) => {
1135 // Desugar to RangeFrom
1136 let fields = vec![make_field("start", start.clone())];
1137 (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
1139 (&None, &Some(ref end)) => {
1140 // Desugar to RangeTo
1141 let fields = vec![make_field("end", end.clone())];
1142 (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
1145 // Desugar to RangeFull
1146 (tcx.lang_items.range_full_struct(), vec![], vec![])
1150 if let Some(did) = did {
1151 let substs = Substs::new_type(ty_params, vec![]);
1157 tcx.mk_struct(tcx.lookup_adt_def(did),
1158 tcx.mk_substs(substs)),
1161 tcx.sess.span_bug(expr.span,
1162 "No lang item for ranges (how did we get this far?)")
1165 hir::ExprTup(ref args) => {
1166 let numbered_fields: Vec<(usize, &hir::Expr)> =
1167 args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
1171 &numbered_fields[..],
1176 hir::ExprLit(ref lit) => {
1178 ast::LitStr(ref s, _) => {
1179 tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
1184 .span_bug(expr.span,
1185 "trans_rvalue_dps_unadjusted shouldn't be \
1186 translating this type of literal")
1190 hir::ExprVec(..) | hir::ExprRepeat(..) => {
1191 tvec::trans_fixed_vstore(bcx, expr, dest)
1193 hir::ExprClosure(_, ref decl, ref body) => {
1194 let dest = match dest {
1195 SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
1196 Ignore => closure::Dest::Ignore(bcx.ccx())
1198 let substs = match expr_ty(bcx, expr).sty {
1199 ty::TyClosure(_, ref substs) => substs,
1201 bcx.tcx().sess.span_bug(
1203 &format!("closure expr without closure type: {:?}", t)),
1205 closure::trans_closure_expr(dest, decl, body, expr.id, substs).unwrap_or(bcx)
1207 hir::ExprCall(ref f, ref args) => {
1208 if bcx.tcx().is_method_call(expr.id) {
1209 trans_overloaded_call(bcx,
1215 callee::trans_call(bcx,
1218 callee::ArgExprs(&args[..]),
1222 hir::ExprMethodCall(_, _, ref args) => {
1223 callee::trans_method_call(bcx,
1226 callee::ArgExprs(&args[..]),
1229 hir::ExprBinary(op, ref lhs, ref rhs) => {
1230 // if not overloaded, would be RvalueDatumExpr
1231 let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
1232 let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
1233 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
1234 Some((rhs_datum, rhs.id)), Some(dest),
1235 !rustc_front::util::is_by_value_binop(op.node)).bcx
1237 hir::ExprUnary(op, ref subexpr) => {
1238 // if not overloaded, would be RvalueDatumExpr
1239 let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
1240 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
1241 arg, None, Some(dest), !rustc_front::util::is_by_value_unop(op)).bcx
1243 hir::ExprIndex(ref base, ref idx) => {
1244 // if not overloaded, would be RvalueDatumExpr
1245 let base = unpack_datum!(bcx, trans(bcx, &**base));
1246 let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
1247 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
1248 Some((idx_datum, idx.id)), Some(dest), true).bcx
1250 hir::ExprCast(..) => {
1251 // Trait casts used to come this way, now they should be coercions.
1252 bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
1254 hir::ExprAssignOp(op, _, _) => {
1255 bcx.tcx().sess.span_bug(
1257 &format!("augmented assignment `{}=` should always be a rvalue_stmt",
1258 rustc_front::util::binop_to_string(op.node)))
1261 bcx.tcx().sess.span_bug(
1263 &format!("trans_rvalue_dps_unadjusted reached fall-through \
1270 fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1271 ref_expr: &hir::Expr,
1274 -> Block<'blk, 'tcx> {
1275 let _icx = push_ctxt("trans_def_dps_unadjusted");
1277 let lldest = match dest {
1278 SaveIn(lldest) => lldest,
1279 Ignore => { return bcx; }
1283 def::DefVariant(tid, vid, _) => {
1284 let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
1285 if let ty::VariantKind::Tuple = variant.kind() {
1287 let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
1288 ExprId(ref_expr.id),
1289 bcx.fcx.param_substs).val;
1290 Store(bcx, llfn, lldest);
1294 let ty = expr_ty(bcx, ref_expr);
1295 let repr = adt::represent_type(bcx.ccx(), ty);
1296 adt::trans_set_discr(bcx, &*repr, lldest, variant.disr_val);
1300 def::DefStruct(_) => {
1301 let ty = expr_ty(bcx, ref_expr);
1303 ty::TyStruct(def, _) if def.has_dtor() => {
1304 let repr = adt::represent_type(bcx.ccx(), ty);
1305 adt::trans_set_discr(bcx, &*repr, lldest, 0);
1312 bcx.tcx().sess.span_bug(ref_expr.span, &format!(
1313 "Non-DPS def {:?} referened by {}",
1314 def, bcx.node_id_to_string(ref_expr.id)));
1319 pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1320 ref_expr: &hir::Expr,
1322 param_substs: &'tcx Substs<'tcx>)
1323 -> Datum<'tcx, Rvalue> {
1324 let _icx = push_ctxt("trans_def_datum_unadjusted");
1327 def::DefFn(did, _) |
1328 def::DefStruct(did) | def::DefVariant(_, did, _) => {
1329 callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
1331 def::DefMethod(method_did) => {
1332 match ccx.tcx().impl_or_trait_item(method_did).container() {
1333 ty::ImplContainer(_) => {
1334 callee::trans_fn_ref(ccx, method_did,
1335 ExprId(ref_expr.id),
1338 ty::TraitContainer(trait_did) => {
1339 meth::trans_static_method_callee(ccx, method_did,
1340 trait_did, ref_expr.id,
1346 ccx.tcx().sess.span_bug(ref_expr.span, &format!(
1347 "trans_def_fn_unadjusted invoked on: {:?} for {:?}",
1354 /// Translates a reference to a local variable or argument. This always results in an lvalue datum.
1355 pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1357 -> Datum<'tcx, Lvalue> {
1358 let _icx = push_ctxt("trans_local_var");
1361 def::DefUpvar(nid, _, _) => {
1362 // Can't move upvars, so this is never a ZeroMemLastUse.
1363 let local_ty = node_id_type(bcx, nid);
1364 let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
1365 bcx, nid, HintKind::ZeroAndMaintain);
1366 match bcx.fcx.llupvars.borrow().get(&nid) {
1367 Some(&val) => Datum::new(val, local_ty, lval),
1369 bcx.sess().bug(&format!(
1370 "trans_local_var: no llval for upvar {} found",
1375 def::DefLocal(nid) => {
1376 let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
1379 bcx.sess().bug(&format!(
1380 "trans_local_var: no datum for local/arg {} found",
1384 debug!("take_local(nid={}, v={}, ty={})",
1385 nid, bcx.val_to_string(datum.val), datum.ty);
1389 bcx.sess().unimpl(&format!(
1390 "unsupported def type in trans_local_var: {:?}",
1396 fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1397 fields: &[hir::Field],
1398 base: Option<&hir::Expr>,
1399 expr_span: codemap::Span,
1400 expr_id: ast::NodeId,
1402 dest: Dest) -> Block<'blk, 'tcx> {
1403 let _icx = push_ctxt("trans_rec");
1405 let tcx = bcx.tcx();
1406 let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
1408 let mut need_base = vec![true; vinfo.fields.len()];
1410 let numbered_fields = fields.iter().map(|field| {
1411 let pos = vinfo.field_index(field.name.node);
1412 need_base[pos] = false;
1414 }).collect::<Vec<_>>();
1416 let optbase = match base {
1417 Some(base_expr) => {
1418 let mut leftovers = Vec::new();
1419 for (i, b) in need_base.iter().enumerate() {
1421 leftovers.push((i, vinfo.fields[i].1));
1424 Some(StructBaseInfo {expr: base_expr,
1425 fields: leftovers })
1428 if need_base.iter().any(|b| *b) {
1429 tcx.sess.span_bug(expr_span, "missing fields and no base expr")
1441 DebugLoc::At(expr_id, expr_span))
1444 /// Information that `trans_adt` needs in order to fill in the fields
1445 /// of a struct copied from a base struct (e.g., from an expression
1446 /// like `Foo { a: b, ..base }`.
1448 /// Note that `fields` may be empty; the base expression must always be
1449 /// evaluated for side-effects.
1450 pub struct StructBaseInfo<'a, 'tcx> {
1451 /// The base expression; will be evaluated after all explicit fields.
1452 expr: &'a hir::Expr,
1453 /// The indices of fields to copy paired with their types.
1454 fields: Vec<(usize, Ty<'tcx>)>
1457 /// Constructs an ADT instance:
1459 /// - `fields` should be a list of field indices paired with the
1460 /// expression to store into that field. The initializers will be
1461 /// evaluated in the order specified by `fields`.
1463 /// - `optbase` contains information on the base struct (if any) from
1464 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1465 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1468 fields: &[(usize, &hir::Expr)],
1469 optbase: Option<StructBaseInfo<'a, 'tcx>>,
1471 debug_location: DebugLoc)
1472 -> Block<'blk, 'tcx> {
1473 let _icx = push_ctxt("trans_adt");
1475 let repr = adt::represent_type(bcx.ccx(), ty);
1477 debug_location.apply(bcx.fcx);
1479 // If we don't care about the result, just make a
1480 // temporary stack slot
1481 let addr = match dest {
1484 let llresult = alloc_ty(bcx, ty, "temp");
1485 call_lifetime_start(bcx, llresult);
1490 // This scope holds intermediates that must be cleaned should
1491 // panic occur before the ADT as a whole is ready.
1492 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1495 // Issue 23112: The original logic appeared vulnerable to same
1496 // order-of-eval bug. But, SIMD values are tuple-structs;
1497 // i.e. functional record update (FRU) syntax is unavailable.
1499 // To be safe, double-check that we did not get here via FRU.
1500 assert!(optbase.is_none());
1502 // This is the constructor of a SIMD type, such types are
1503 // always primitive machine types and so do not have a
1504 // destructor or require any clean-up.
1505 let llty = type_of::type_of(bcx.ccx(), ty);
1507 // keep a vector as a register, and running through the field
1508 // `insertelement`ing them directly into that register
1509 // (i.e. avoid GEPi and `store`s to an alloca) .
1510 let mut vec_val = C_undef(llty);
1512 for &(i, ref e) in fields {
1513 let block_datum = trans(bcx, &**e);
1514 bcx = block_datum.bcx;
1515 let position = C_uint(bcx.ccx(), i);
1516 let value = block_datum.datum.to_llscalarish(bcx);
1517 vec_val = InsertElement(bcx, vec_val, value, position);
1519 Store(bcx, vec_val, addr);
1520 } else if let Some(base) = optbase {
1521 // Issue 23112: If there is a base, then order-of-eval
1522 // requires field expressions eval'ed before base expression.
1524 // First, trans field expressions to temporary scratch values.
1525 let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
1526 let datum = unpack_datum!(bcx, trans(bcx, &**e));
1530 debug_location.apply(bcx.fcx);
1532 // Second, trans the base to the dest.
1533 assert_eq!(discr, 0);
1535 match expr_kind(bcx.tcx(), &*base.expr) {
1536 ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
1537 bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
1539 ExprKind::RvalueStmt => {
1540 bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
1543 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
1544 for &(i, t) in &base.fields {
1545 let datum = base_datum.get_element(
1546 bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
1547 assert!(type_is_sized(bcx.tcx(), datum.ty));
1548 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1549 bcx = datum.store_to(bcx, dest);
1554 // Finally, move scratch field values into actual field locations
1555 for (i, datum) in scratch_vals {
1556 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1557 bcx = datum.store_to(bcx, dest);
1560 // No base means we can write all fields directly in place.
1561 for &(i, ref e) in fields {
1562 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1563 let e_ty = expr_ty_adjusted(bcx, &**e);
1564 bcx = trans_into(bcx, &**e, SaveIn(dest));
1565 let scope = cleanup::CustomScope(custom_cleanup_scope);
1566 fcx.schedule_lifetime_end(scope, dest);
1567 // FIXME: nonzeroing move should generalize to fields
1568 fcx.schedule_drop_mem(scope, dest, e_ty, None);
1572 adt::trans_set_discr(bcx, &*repr, addr, discr);
1574 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1576 // If we don't care about the result drop the temporary we made
1580 bcx = glue::drop_ty(bcx, addr, ty, debug_location);
1581 base::call_lifetime_end(bcx, addr);
1588 fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1591 -> DatumBlock<'blk, 'tcx, Expr> {
1592 // must not be a string constant, that is a RvalueDpsExpr
1593 let _icx = push_ctxt("trans_immediate_lit");
1594 let ty = expr_ty(bcx, expr);
1595 let v = consts::const_lit(bcx.ccx(), expr, lit);
1596 immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
1599 fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1602 sub_expr: &hir::Expr)
1603 -> DatumBlock<'blk, 'tcx, Expr> {
1604 let ccx = bcx.ccx();
1606 let _icx = push_ctxt("trans_unary_datum");
1608 let method_call = MethodCall::expr(expr.id);
1610 // The only overloaded operator that is translated to a datum
1611 // is an overloaded deref, since it is always yields a `&T`.
1612 // Otherwise, we should be in the RvalueDpsExpr path.
1613 assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
1615 let un_ty = expr_ty(bcx, expr);
1617 let debug_loc = expr.debug_loc();
1621 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1622 let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
1623 immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
1626 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1627 let val = datum.to_llscalarish(bcx);
1628 let (bcx, llneg) = {
1630 let result = FNeg(bcx, val, debug_loc);
1633 let is_signed = un_ty.is_signed();
1634 let result = Neg(bcx, val, debug_loc);
1635 let bcx = if bcx.ccx().check_overflow() && is_signed {
1636 let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
1637 let is_min = ICmp(bcx, llvm::IntEQ, val,
1638 C_integral(llty, min, true), debug_loc);
1639 with_cond(bcx, is_min, |bcx| {
1640 let msg = InternedString::new(
1641 "attempted to negate with overflow");
1642 controlflow::trans_fail(bcx, expr_info(expr), msg)
1650 immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
1653 trans_uniq_expr(bcx, expr, un_ty, sub_expr, expr_ty(bcx, sub_expr))
1656 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1657 deref_once(bcx, expr, datum, method_call)
1662 fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1663 box_expr: &hir::Expr,
1665 contents: &hir::Expr,
1666 contents_ty: Ty<'tcx>)
1667 -> DatumBlock<'blk, 'tcx, Expr> {
1668 let _icx = push_ctxt("trans_uniq_expr");
1670 assert!(type_is_sized(bcx.tcx(), contents_ty));
1671 let llty = type_of::type_of(bcx.ccx(), contents_ty);
1672 let size = llsize_of(bcx.ccx(), llty);
1673 let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
1674 let llty_ptr = llty.ptr_to();
1675 let Result { bcx, val } = malloc_raw_dyn(bcx,
1680 box_expr.debug_loc());
1681 // Unique boxes do not allocate for zero-size types. The standard library
1682 // may assume that `free` is never called on the pointer returned for
1683 // `Box<ZeroSizeType>`.
1684 let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
1685 trans_into(bcx, contents, SaveIn(val))
1687 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1688 fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
1689 val, cleanup::HeapExchange, contents_ty);
1690 let bcx = trans_into(bcx, contents, SaveIn(val));
1691 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1694 immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
1697 fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1698 lval: Datum<'tcx, Lvalue>)
1699 -> DatumBlock<'blk, 'tcx, Expr> {
1700 let dest_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), lval.ty);
1701 let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
1702 memcpy_ty(bcx, scratch.val, lval.val, scratch.ty);
1704 DatumBlock::new(bcx, scratch.to_expr_datum())
1707 fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1709 subexpr: &hir::Expr)
1710 -> DatumBlock<'blk, 'tcx, Expr> {
1711 let _icx = push_ctxt("trans_addr_of");
1713 let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
1714 if !type_is_sized(bcx.tcx(), sub_datum.ty) {
1715 // DST lvalue, close to a fat pointer
1716 ref_fat_ptr(bcx, sub_datum)
1718 // Sized value, ref to a thin pointer
1719 let ty = expr_ty(bcx, expr);
1720 immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
1724 fn trans_fat_ptr_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1725 binop_expr: &hir::Expr,
1728 lhs: Datum<'tcx, Rvalue>,
1729 rhs: Datum<'tcx, Rvalue>)
1730 -> DatumBlock<'blk, 'tcx, Expr>
1732 let debug_loc = binop_expr.debug_loc();
1734 let lhs_addr = Load(bcx, GEPi(bcx, lhs.val, &[0, abi::FAT_PTR_ADDR]));
1735 let lhs_extra = Load(bcx, GEPi(bcx, lhs.val, &[0, abi::FAT_PTR_EXTRA]));
1737 let rhs_addr = Load(bcx, GEPi(bcx, rhs.val, &[0, abi::FAT_PTR_ADDR]));
1738 let rhs_extra = Load(bcx, GEPi(bcx, rhs.val, &[0, abi::FAT_PTR_EXTRA]));
1740 let val = match op.node {
1742 let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
1743 let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc);
1744 And(bcx, addr_eq, extra_eq, debug_loc)
1747 let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc);
1748 let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc);
1749 Or(bcx, addr_eq, extra_eq, debug_loc)
1751 hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => {
1752 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
1753 let (op, strict_op) = match op.node {
1754 hir::BiLt => (llvm::IntULT, llvm::IntULT),
1755 hir::BiLe => (llvm::IntULE, llvm::IntULT),
1756 hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
1757 hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
1761 let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
1762 let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc);
1763 let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc);
1765 let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc);
1766 Or(bcx, addr_strict, addr_eq_extra_op, debug_loc)
1769 bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
1773 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1776 fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1777 binop_expr: &hir::Expr,
1780 lhs: Datum<'tcx, Rvalue>,
1781 rhs: Datum<'tcx, Rvalue>)
1782 -> DatumBlock<'blk, 'tcx, Expr>
1784 let _icx = push_ctxt("trans_scalar_binop");
1786 let tcx = bcx.tcx();
1788 assert!(!lhs_t.is_simd());
1789 let is_float = lhs_t.is_fp();
1790 let is_signed = lhs_t.is_signed();
1791 let info = expr_info(binop_expr);
1793 let binop_debug_loc = binop_expr.debug_loc();
1796 let lhs = lhs.to_llscalarish(bcx);
1797 let rhs = rhs.to_llscalarish(bcx);
1798 let val = match op.node {
1801 FAdd(bcx, lhs, rhs, binop_debug_loc)
1803 let (newbcx, res) = with_overflow_check(
1804 bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
1811 FSub(bcx, lhs, rhs, binop_debug_loc)
1813 let (newbcx, res) = with_overflow_check(
1814 bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
1821 FMul(bcx, lhs, rhs, binop_debug_loc)
1823 let (newbcx, res) = with_overflow_check(
1824 bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
1831 FDiv(bcx, lhs, rhs, binop_debug_loc)
1833 // Only zero-check integers; fp /0 is NaN
1834 bcx = base::fail_if_zero_or_overflows(bcx,
1835 expr_info(binop_expr),
1841 SDiv(bcx, lhs, rhs, binop_debug_loc)
1843 UDiv(bcx, lhs, rhs, binop_debug_loc)
1849 // LLVM currently always lowers the `frem` instructions appropriate
1850 // library calls typically found in libm. Notably f64 gets wired up
1851 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
1852 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
1853 // instead just an inline function in a header that goes up to a
1854 // f64, uses `fmod`, and then comes back down to a f32.
1856 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
1857 // still unconditionally lower frem instructions over 32-bit floats
1858 // to a call to `fmodf`. To work around this we special case MSVC
1859 // 32-bit float rem instructions and instead do the call out to
1860 // `fmod` ourselves.
1862 // Note that this is currently duplicated with src/libcore/ops.rs
1863 // which does the same thing, and it would be nice to perhaps unify
1864 // these two implementations on day! Also note that we call `fmod`
1865 // for both 32 and 64-bit floats because if we emit any FRem
1866 // instruction at all then LLVM is capable of optimizing it into a
1867 // 32-bit FRem (which we're trying to avoid).
1868 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
1869 tcx.sess.target.target.arch == "x86";
1871 let f64t = Type::f64(bcx.ccx());
1872 let fty = Type::func(&[f64t, f64t], &f64t);
1873 let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
1875 if lhs_t == tcx.types.f32 {
1876 let lhs = FPExt(bcx, lhs, f64t);
1877 let rhs = FPExt(bcx, rhs, f64t);
1878 let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
1879 FPTrunc(bcx, res, Type::f32(bcx.ccx()))
1881 Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc)
1884 FRem(bcx, lhs, rhs, binop_debug_loc)
1887 // Only zero-check integers; fp %0 is NaN
1888 bcx = base::fail_if_zero_or_overflows(bcx,
1889 expr_info(binop_expr),
1890 op, lhs, rhs, lhs_t);
1892 SRem(bcx, lhs, rhs, binop_debug_loc)
1894 URem(bcx, lhs, rhs, binop_debug_loc)
1898 hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
1899 hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
1900 hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
1902 let (newbcx, res) = with_overflow_check(
1903 bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
1908 let (newbcx, res) = with_overflow_check(
1909 bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
1913 hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
1914 base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
1917 bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
1921 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1924 // refinement types would obviate the need for this
1925 enum lazy_binop_ty {
1930 fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1931 binop_expr: &hir::Expr,
1935 -> DatumBlock<'blk, 'tcx, Expr> {
1936 let _icx = push_ctxt("trans_lazy_binop");
1937 let binop_ty = expr_ty(bcx, binop_expr);
1940 let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
1941 let lhs = lhs.to_llscalarish(past_lhs);
1943 if past_lhs.unreachable.get() {
1944 return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
1947 let join = fcx.new_id_block("join", binop_expr.id);
1948 let before_rhs = fcx.new_id_block("before_rhs", b.id);
1951 lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
1952 lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
1955 let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
1956 let rhs = rhs.to_llscalarish(past_rhs);
1958 if past_rhs.unreachable.get() {
1959 return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
1962 Br(past_rhs, join.llbb, DebugLoc::None);
1963 let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
1964 &[past_lhs.llbb, past_rhs.llbb]);
1966 return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
1969 fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1974 -> DatumBlock<'blk, 'tcx, Expr> {
1975 let _icx = push_ctxt("trans_binary");
1976 let ccx = bcx.ccx();
1978 // if overloaded, would be RvalueDpsExpr
1979 assert!(!ccx.tcx().is_method_call(expr.id));
1983 trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
1986 trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
1990 let binop_ty = expr_ty(bcx, expr);
1992 let lhs = unpack_datum!(bcx, trans(bcx, lhs));
1993 let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
1994 debug!("trans_binary (expr {}): lhs={}",
1995 expr.id, lhs.to_string(ccx));
1996 let rhs = unpack_datum!(bcx, trans(bcx, rhs));
1997 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
1998 debug!("trans_binary (expr {}): rhs={}",
1999 expr.id, rhs.to_string(ccx));
2001 if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
2002 assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
2003 "built-in binary operators on fat pointers are homogeneous");
2004 trans_fat_ptr_binop(bcx, expr, binop_ty, op, lhs, rhs)
2006 assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
2007 "built-in binary operators on fat pointers are homogeneous");
2008 trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
2014 fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2016 method_call: MethodCall,
2017 lhs: Datum<'tcx, Expr>,
2018 rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
2021 -> Result<'blk, 'tcx> {
2022 callee::trans_call_inner(bcx,
2024 |bcx, arg_cleanup_scope| {
2025 meth::trans_method_callee(bcx,
2030 callee::ArgOverloadedOp(lhs, rhs, autoref),
2034 fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
2036 callee: &'a hir::Expr,
2037 args: &'a [P<hir::Expr>],
2039 -> Block<'blk, 'tcx> {
2040 debug!("trans_overloaded_call {}", expr.id);
2041 let method_call = MethodCall::expr(expr.id);
2042 let mut all_args = vec!(callee);
2043 all_args.extend(args.iter().map(|e| &**e));
2045 callee::trans_call_inner(bcx,
2047 |bcx, arg_cleanup_scope| {
2048 meth::trans_method_callee(
2054 callee::ArgOverloadedCall(all_args),
2059 pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
2064 if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
2068 match (t_in.builtin_deref(true, ty::NoPreference),
2069 t_out.builtin_deref(true, ty::NoPreference)) {
2070 (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
2074 // This condition isn't redundant with the check for CoercionCast:
2075 // different types can be substituted into the same type, and
2076 // == equality can be overconservative if there are regions.
2082 fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2085 -> DatumBlock<'blk, 'tcx, Expr>
2087 use middle::ty::cast::CastTy::*;
2088 use middle::ty::cast::IntTy::*;
2090 fn int_cast(bcx: Block,
2097 let _icx = push_ctxt("int_cast");
2098 let srcsz = llsrctype.int_width();
2099 let dstsz = lldsttype.int_width();
2100 return if dstsz == srcsz {
2101 BitCast(bcx, llsrc, lldsttype)
2102 } else if srcsz > dstsz {
2103 TruncOrBitCast(bcx, llsrc, lldsttype)
2105 SExtOrBitCast(bcx, llsrc, lldsttype)
2107 ZExtOrBitCast(bcx, llsrc, lldsttype)
2111 fn float_cast(bcx: Block,
2117 let _icx = push_ctxt("float_cast");
2118 let srcsz = llsrctype.float_width();
2119 let dstsz = lldsttype.float_width();
2120 return if dstsz > srcsz {
2121 FPExt(bcx, llsrc, lldsttype)
2122 } else if srcsz > dstsz {
2123 FPTrunc(bcx, llsrc, lldsttype)
2127 let _icx = push_ctxt("trans_cast");
2129 let ccx = bcx.ccx();
2131 let t_in = expr_ty_adjusted(bcx, expr);
2132 let t_out = node_id_type(bcx, id);
2134 debug!("trans_cast({:?} as {:?})", t_in, t_out);
2135 let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
2136 let ll_t_out = type_of::arg_type_of(ccx, t_out);
2137 // Convert the value to be cast into a ValueRef, either by-ref or
2138 // by-value as appropriate given its type:
2139 let mut datum = unpack_datum!(bcx, trans(bcx, expr));
2141 let datum_ty = monomorphize_type(bcx, datum.ty);
2143 if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
2145 return DatumBlock::new(bcx, datum);
2148 if type_is_fat_ptr(bcx.tcx(), t_in) {
2149 assert!(datum.kind.is_by_ref());
2150 if type_is_fat_ptr(bcx.tcx(), t_out) {
2151 return DatumBlock::new(bcx, Datum::new(
2152 PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
2155 )).to_expr_datumblock();
2157 // Return the address
2158 return immediate_rvalue_bcx(bcx,
2160 Load(bcx, get_dataptr(bcx, datum.val)),
2162 t_out).to_expr_datumblock();
2166 let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
2167 let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
2169 let (llexpr, signed) = if let Int(CEnum) = r_t_in {
2170 let repr = adt::represent_type(ccx, t_in);
2171 let datum = unpack_datum!(
2172 bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
2173 let llexpr_ptr = datum.to_llref();
2174 let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx)));
2175 ll_t_in = val_ty(discr);
2176 (discr, adt::is_discr_signed(&*repr))
2178 (datum.to_llscalarish(bcx), t_in.is_signed())
2181 let newval = match (r_t_in, r_t_out) {
2182 (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
2183 PointerCast(bcx, llexpr, ll_t_out)
2185 (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
2186 (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
2188 (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
2189 (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
2190 (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
2191 (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
2192 (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
2193 (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
2195 _ => ccx.sess().span_bug(expr.span,
2196 &format!("translating unsupported cast: \
2202 return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
2205 fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2210 -> Block<'blk, 'tcx> {
2211 let _icx = push_ctxt("trans_assign_op");
2214 debug!("trans_assign_op(expr={:?})", expr);
2216 // User-defined operator methods cannot be used with `+=` etc right now
2217 assert!(!bcx.tcx().is_method_call(expr.id));
2219 // Evaluate LHS (destination), which should be an lvalue
2220 let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
2221 assert!(!bcx.fcx.type_needs_drop(dst.ty));
2222 let lhs = load_ty(bcx, dst.val, dst.ty);
2223 let lhs = immediate_rvalue(lhs, dst.ty);
2225 // Evaluate RHS - FIXME(#28160) this sucks
2226 let rhs = unpack_datum!(bcx, trans(bcx, &*src));
2227 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
2229 // Perform computation and store the result
2230 let result_datum = unpack_datum!(
2231 bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
2232 return result_datum.store_to(bcx, dst.val);
2235 fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2236 datum: Datum<'tcx, Expr>,
2238 -> DatumBlock<'blk, 'tcx, Expr> {
2241 // Ensure cleanup of `datum` if not already scheduled and obtain
2242 // a "by ref" pointer.
2243 let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
2245 // Compute final type. Note that we are loose with the region and
2246 // mutability, since those things don't matter in trans.
2247 let referent_ty = lv_datum.ty;
2248 let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
2251 let llref = lv_datum.to_llref();
2253 // Construct the resulting datum, using what was the "by ref"
2254 // ValueRef of type `referent_ty` to be the "by value" ValueRef
2255 // of type `&referent_ty`.
2256 // Pointers to DST types are non-immediate, and therefore still use ByRef.
2257 let kind = if type_is_sized(bcx.tcx(), referent_ty) { ByValue } else { ByRef };
2258 DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(kind))))
2261 fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2263 datum: Datum<'tcx, Expr>,
2265 -> DatumBlock<'blk, 'tcx, Expr> {
2267 let mut datum = datum;
2269 let method_call = MethodCall::autoderef(expr.id, i as u32);
2270 datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
2272 DatumBlock { bcx: bcx, datum: datum }
2275 fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2277 datum: Datum<'tcx, Expr>,
2278 method_call: MethodCall)
2279 -> DatumBlock<'blk, 'tcx, Expr> {
2280 let ccx = bcx.ccx();
2282 debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
2284 datum.to_string(ccx),
2289 // Check for overloaded deref.
2290 let method_ty = ccx.tcx()
2294 .get(&method_call).map(|method| method.ty);
2296 let datum = match method_ty {
2297 Some(method_ty) => {
2298 let method_ty = monomorphize_type(bcx, method_ty);
2300 // Overloaded. Evaluate `trans_overloaded_op`, which will
2301 // invoke the user's deref() method, which basically
2302 // converts from the `Smaht<T>` pointer that we have into
2303 // a `&T` pointer. We can then proceed down the normal
2304 // path (below) to dereference that `&T`.
2305 let datum = if method_call.autoderef == 0 {
2308 // Always perform an AutoPtr when applying an overloaded auto-deref
2309 unpack_datum!(bcx, auto_ref(bcx, datum, expr))
2312 let ref_ty = // invoked methods have their LB regions instantiated
2313 ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
2314 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
2316 unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
2317 datum, None, Some(SaveIn(scratch.val)),
2319 scratch.to_expr_datum()
2322 // Not overloaded. We already have a pointer we know how to deref.
2327 let r = match datum.ty.sty {
2328 ty::TyBox(content_ty) => {
2329 // Make sure we have an lvalue datum here to get the
2330 // proper cleanups scheduled
2331 let datum = unpack_datum!(
2332 bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
2334 if type_is_sized(bcx.tcx(), content_ty) {
2335 let ptr = load_ty(bcx, datum.val, datum.ty);
2336 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
2338 // A fat pointer and a DST lvalue have the same representation
2339 // just different types. Since there is no temporary for `*e`
2340 // here (because it is unsized), we cannot emulate the sized
2341 // object code path for running drop glue and free. Instead,
2342 // we schedule cleanup for `e`, turning it into an lvalue.
2344 let lval = Lvalue::new("expr::deref_once ty_uniq");
2345 let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
2346 DatumBlock::new(bcx, datum)
2350 ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
2351 ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
2352 let lval = Lvalue::new("expr::deref_once ptr");
2353 if type_is_sized(bcx.tcx(), content_ty) {
2354 let ptr = datum.to_llscalarish(bcx);
2356 // Always generate an lvalue datum, even if datum.mode is
2357 // an rvalue. This is because datum.mode is only an
2358 // rvalue for non-owning pointers like &T or *T, in which
2359 // case cleanup *is* scheduled elsewhere, by the true
2360 // owner (or, in the case of *T, by the user).
2361 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
2363 // A fat pointer and a DST lvalue have the same representation
2364 // just different types.
2365 DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
2370 bcx.tcx().sess.span_bug(
2372 &format!("deref invoked on expr of invalid type {:?}",
2377 debug!("deref_once(expr={}, method_call={:?}, result={})",
2378 expr.id, method_call, r.datum.to_string(ccx));
2393 fn codegen_strategy(&self) -> OverflowCodegen {
2394 use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
2396 OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
2397 OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
2398 OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
2400 OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
2401 OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
2406 enum OverflowCodegen {
2407 ViaIntrinsic(OverflowOpViaIntrinsic),
2408 ViaInputCheck(OverflowOpViaInputCheck),
2411 enum OverflowOpViaInputCheck { Shl, Shr, }
2414 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2416 impl OverflowOpViaIntrinsic {
2417 fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
2418 let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
2419 bcx.ccx().get_intrinsic(&name)
2421 fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
2422 use syntax::ast::IntTy::*;
2423 use syntax::ast::UintTy::*;
2424 use middle::ty::{TyInt, TyUint};
2426 let new_sty = match ty.sty {
2427 TyInt(TyIs) => match &tcx.sess.target.target.target_pointer_width[..] {
2428 "32" => TyInt(TyI32),
2429 "64" => TyInt(TyI64),
2430 _ => panic!("unsupported target word size")
2432 TyUint(TyUs) => match &tcx.sess.target.target.target_pointer_width[..] {
2433 "32" => TyUint(TyU32),
2434 "64" => TyUint(TyU64),
2435 _ => panic!("unsupported target word size")
2437 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
2438 _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
2443 OverflowOpViaIntrinsic::Add => match new_sty {
2444 TyInt(TyI8) => "llvm.sadd.with.overflow.i8",
2445 TyInt(TyI16) => "llvm.sadd.with.overflow.i16",
2446 TyInt(TyI32) => "llvm.sadd.with.overflow.i32",
2447 TyInt(TyI64) => "llvm.sadd.with.overflow.i64",
2449 TyUint(TyU8) => "llvm.uadd.with.overflow.i8",
2450 TyUint(TyU16) => "llvm.uadd.with.overflow.i16",
2451 TyUint(TyU32) => "llvm.uadd.with.overflow.i32",
2452 TyUint(TyU64) => "llvm.uadd.with.overflow.i64",
2454 _ => unreachable!(),
2456 OverflowOpViaIntrinsic::Sub => match new_sty {
2457 TyInt(TyI8) => "llvm.ssub.with.overflow.i8",
2458 TyInt(TyI16) => "llvm.ssub.with.overflow.i16",
2459 TyInt(TyI32) => "llvm.ssub.with.overflow.i32",
2460 TyInt(TyI64) => "llvm.ssub.with.overflow.i64",
2462 TyUint(TyU8) => "llvm.usub.with.overflow.i8",
2463 TyUint(TyU16) => "llvm.usub.with.overflow.i16",
2464 TyUint(TyU32) => "llvm.usub.with.overflow.i32",
2465 TyUint(TyU64) => "llvm.usub.with.overflow.i64",
2467 _ => unreachable!(),
2469 OverflowOpViaIntrinsic::Mul => match new_sty {
2470 TyInt(TyI8) => "llvm.smul.with.overflow.i8",
2471 TyInt(TyI16) => "llvm.smul.with.overflow.i16",
2472 TyInt(TyI32) => "llvm.smul.with.overflow.i32",
2473 TyInt(TyI64) => "llvm.smul.with.overflow.i64",
2475 TyUint(TyU8) => "llvm.umul.with.overflow.i8",
2476 TyUint(TyU16) => "llvm.umul.with.overflow.i16",
2477 TyUint(TyU32) => "llvm.umul.with.overflow.i32",
2478 TyUint(TyU64) => "llvm.umul.with.overflow.i64",
2480 _ => unreachable!(),
2485 fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
2486 info: NodeIdAndSpan,
2487 lhs_t: Ty<'tcx>, lhs: ValueRef,
2489 binop_debug_loc: DebugLoc)
2490 -> (Block<'blk, 'tcx>, ValueRef) {
2491 let llfn = self.to_intrinsic(bcx, lhs_t);
2493 let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
2494 let result = ExtractValue(bcx, val, 0); // iN operation result
2495 let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
2497 let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
2500 let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
2501 Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
2502 None, binop_debug_loc);
2505 base::with_cond(bcx, cond, |bcx|
2506 controlflow::trans_fail(bcx, info,
2507 InternedString::new("arithmetic operation overflowed")));
2513 impl OverflowOpViaInputCheck {
2514 fn build_with_input_check<'blk, 'tcx>(&self,
2515 bcx: Block<'blk, 'tcx>,
2516 info: NodeIdAndSpan,
2520 binop_debug_loc: DebugLoc)
2521 -> (Block<'blk, 'tcx>, ValueRef)
2523 let lhs_llty = val_ty(lhs);
2524 let rhs_llty = val_ty(rhs);
2526 // Panic if any bits are set outside of bits that we always
2529 // Note that the mask's value is derived from the LHS type
2530 // (since that is where the 32/64 distinction is relevant) but
2531 // the mask's type must match the RHS type (since they will
2532 // both be fed into a and-binop)
2533 let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
2535 let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
2536 let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
2537 let result = match *self {
2538 OverflowOpViaInputCheck::Shl =>
2539 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2540 OverflowOpViaInputCheck::Shr =>
2541 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2544 base::with_cond(bcx, cond, |bcx|
2545 controlflow::trans_fail(bcx, info,
2546 InternedString::new("shift operation overflowed")));
2552 fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2555 invert: bool) -> ValueRef {
2556 let kind = llty.kind();
2558 TypeKind::Integer => {
2559 // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
2560 let val = llty.int_width() - 1;
2562 C_integral(mask_llty, !val, true)
2564 C_integral(mask_llty, val, false)
2567 TypeKind::Vector => {
2568 let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
2569 VectorSplat(bcx, mask_llty.vector_length(), mask)
2571 _ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
2575 // Check if an integer or vector contains a nonzero element.
2576 fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2578 binop_debug_loc: DebugLoc) -> ValueRef {
2579 let llty = val_ty(value);
2580 let kind = llty.kind();
2582 TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
2583 TypeKind::Vector => {
2584 // Check if any elements of the vector are nonzero by treating
2585 // it as a wide integer and checking if the integer is nonzero.
2586 let width = llty.vector_length() as u64 * llty.element_type().int_width();
2587 let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
2588 build_nonzero_check(bcx, int_value, binop_debug_loc)
2590 _ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
2594 // To avoid UB from LLVM, these two functions mask RHS with an
2595 // appropriate mask unconditionally (i.e. the fallback behavior for
2596 // all shifts). For 32- and 64-bit types, this matches the semantics
2597 // of Java. (See related discussion on #1877 and #10183.)
2599 fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2602 binop_debug_loc: DebugLoc) -> ValueRef {
2603 let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
2604 // #1877, #10183: Ensure that input is always valid
2605 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
2606 Shl(bcx, lhs, rhs, binop_debug_loc)
2609 fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2613 binop_debug_loc: DebugLoc) -> ValueRef {
2614 let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
2615 // #1877, #10183: Ensure that input is always valid
2616 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
2617 let is_signed = lhs_t.is_signed();
2619 AShr(bcx, lhs, rhs, binop_debug_loc)
2621 LShr(bcx, lhs, rhs, binop_debug_loc)
2625 fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2627 debug_loc: DebugLoc) -> ValueRef {
2628 let rhs_llty = val_ty(rhs);
2629 And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
2632 fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
2633 lhs_t: Ty<'tcx>, lhs: ValueRef,
2635 binop_debug_loc: DebugLoc)
2636 -> (Block<'blk, 'tcx>, ValueRef) {
2637 if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
2638 if bcx.ccx().check_overflow() {
2640 match oop.codegen_strategy() {
2641 OverflowCodegen::ViaIntrinsic(oop) =>
2642 oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2643 OverflowCodegen::ViaInputCheck(oop) =>
2644 oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2647 let res = match oop {
2648 OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
2649 OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
2650 OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
2653 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2655 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2661 /// We categorize expressions into three kinds. The distinction between
2662 /// lvalue/rvalue is fundamental to the language. The distinction between the
2663 /// two kinds of rvalues is an artifact of trans which reflects how we will
2664 /// generate code for that kind of expression. See trans/expr.rs for more
2666 #[derive(Copy, Clone)]
2674 fn expr_kind(tcx: &ty::ctxt, expr: &hir::Expr) -> ExprKind {
2675 if tcx.is_method_call(expr.id) {
2676 // Overloaded operations are generally calls, and hence they are
2677 // generated via DPS, but there are a few exceptions:
2678 return match expr.node {
2679 // `a += b` has a unit result.
2680 hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
2682 // the deref method invoked for `*a` always yields an `&T`
2683 hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
2685 // the index method invoked for `a[i]` always yields an `&T`
2686 hir::ExprIndex(..) => ExprKind::Lvalue,
2688 // in the general case, result could be any type, use DPS
2689 _ => ExprKind::RvalueDps
2694 hir::ExprPath(..) => {
2695 match tcx.resolve_expr(expr) {
2696 def::DefStruct(_) | def::DefVariant(..) => {
2697 if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
2699 ExprKind::RvalueDatum
2705 // Special case: A unit like struct's constructor must be called without () at the
2706 // end (like `UnitStruct`) which means this is an ExprPath to a DefFn. But in case
2707 // of unit structs this is should not be interpreted as function pointer but as
2708 // call to the constructor.
2709 def::DefFn(_, true) => ExprKind::RvalueDps,
2711 // Fn pointers are just scalar values.
2712 def::DefFn(..) | def::DefMethod(..) => ExprKind::RvalueDatum,
2714 // Note: there is actually a good case to be made that
2715 // DefArg's, particularly those of immediate type, ought to
2716 // considered rvalues.
2717 def::DefStatic(..) |
2719 def::DefLocal(..) => ExprKind::Lvalue,
2722 def::DefAssociatedConst(..) => ExprKind::RvalueDatum,
2727 &format!("uncategorized def for expr {}: {:?}",
2734 hir::ExprUnary(hir::UnDeref, _) |
2735 hir::ExprField(..) |
2736 hir::ExprTupField(..) |
2737 hir::ExprIndex(..) => {
2742 hir::ExprMethodCall(..) |
2743 hir::ExprStruct(..) |
2744 hir::ExprRange(..) |
2747 hir::ExprMatch(..) |
2748 hir::ExprClosure(..) |
2749 hir::ExprBlock(..) |
2750 hir::ExprRepeat(..) |
2751 hir::ExprVec(..) => {
2755 hir::ExprLit(ref lit) if ast_util::lit_is_str(&**lit) => {
2759 hir::ExprBreak(..) |
2760 hir::ExprAgain(..) |
2762 hir::ExprWhile(..) |
2764 hir::ExprAssign(..) |
2765 hir::ExprInlineAsm(..) |
2766 hir::ExprAssignOp(..) => {
2767 ExprKind::RvalueStmt
2770 hir::ExprLit(_) | // Note: LitStr is carved out above
2771 hir::ExprUnary(..) |
2772 hir::ExprBox(None, _) |
2773 hir::ExprAddrOf(..) |
2774 hir::ExprBinary(..) |
2775 hir::ExprCast(..) => {
2776 ExprKind::RvalueDatum
2779 hir::ExprBox(Some(ref place), _) => {
2780 // Special case `Box<T>` for now:
2781 let def_id = match tcx.def_map.borrow().get(&place.id) {
2782 Some(def) => def.def_id(),
2783 None => panic!("no def for place"),
2785 if tcx.lang_items.exchange_heap() == Some(def_id) {
2786 ExprKind::RvalueDatum