1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Translation of Expressions
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
32 //! Public entry points:
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
47 //! - `trans_local_var -> Datum`: looks up a local variable or upvar.
49 #![allow(non_camel_case_types)]
51 pub use self::Dest::*;
52 use self::lazy_binop_ty::*;
55 use llvm::{self, ValueRef, TypeKind};
56 use middle::const_qualif::ConstQualif;
58 use middle::subst::Substs;
59 use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
62 use trans::cleanup::{self, CleanupMethods, DropHintMethods};
65 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
73 use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
74 use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
75 use middle::ty::adjustment::CustomCoerceUnsized;
76 use middle::ty::{self, Ty};
77 use middle::ty::MethodCall;
78 use middle::ty::cast::{CastKind, CastTy};
79 use util::common::indenter;
80 use trans::machine::{llsize_of, llsize_of_alloc};
81 use trans::type_::Type;
86 use syntax::{ast, codemap};
87 use syntax::parse::token::InternedString;
93 // These are passed around by the code generating functions to track the
94 // destination of a computation's value.
96 #[derive(Copy, Clone, PartialEq)]
103 pub fn to_string(&self, ccx: &CrateContext) -> String {
105 SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
106 Ignore => "Ignore".to_string()
111 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
112 /// better optimized LLVM code.
113 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
116 -> Block<'blk, 'tcx> {
119 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
121 if adjustment_required(bcx, expr) {
122 // use trans, which may be less efficient but
123 // which will perform the adjustments:
124 let datum = unpack_datum!(bcx, trans(bcx, expr));
125 return datum.store_to_dest(bcx, dest, expr.id);
128 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
129 if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
130 if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
131 if let SaveIn(lldest) = dest {
132 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
133 bcx.fcx.param_substs,
134 consts::TrueConst::No) {
136 // Cast pointer to destination, because constants
137 // have different types.
138 let lldest = PointerCast(bcx, lldest, val_ty(global));
139 memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
142 Err(consts::ConstEvalFailure::Runtime(_)) => {
143 // in case const evaluation errors, translate normally
144 // debug assertions catch the same errors
147 Err(consts::ConstEvalFailure::Compiletime(_)) => {
153 // If we see a const here, that's because it evaluates to a type with zero size. We
154 // should be able to just discard it, since const expressions are guaranteed not to
155 // have side effects. This seems to be reached through tuple struct constructors being
156 // passed zero-size constants.
157 if let hir::ExprPath(..) = expr.node {
158 match bcx.def(expr.id) {
159 Def::Const(_) | Def::AssociatedConst(_) => {
160 assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
167 // Even if we don't have a value to emit, and the expression
168 // doesn't have any side-effects, we still have to translate the
169 // body of any closures.
170 // FIXME: Find a better way of handling this case.
172 // The only way we're going to see a `const` at this point is if
173 // it prefers in-place instantiation, likely because it contains
174 // `[x; N]` somewhere within.
176 hir::ExprPath(..) => {
177 match bcx.def(expr.id) {
178 Def::Const(did) | Def::AssociatedConst(did) => {
179 let empty_substs = bcx.tcx().mk_substs(Substs::trans_empty());
180 let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
182 // Temporarily get cleanup scopes out of the way,
183 // as they require sub-expressions to be contained
184 // inside the current AST scope.
185 // These should record no cleanups anyways, `const`
186 // can't have destructors.
187 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
189 // Lock emitted debug locations to the location of
190 // the constant reference expression.
191 debuginfo::with_source_location_override(bcx.fcx,
194 bcx = trans_into(bcx, const_expr, dest)
196 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
198 assert!(scopes.is_empty());
209 debug!("trans_into() expr={:?}", expr);
211 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
215 bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
217 let kind = expr_kind(bcx.tcx(), expr);
219 ExprKind::Lvalue | ExprKind::RvalueDatum => {
220 trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
222 ExprKind::RvalueDps => {
223 trans_rvalue_dps_unadjusted(bcx, expr, dest)
225 ExprKind::RvalueStmt => {
226 trans_rvalue_stmt_unadjusted(bcx, expr)
230 bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
233 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
234 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
236 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
238 -> DatumBlock<'blk, 'tcx, Expr> {
239 debug!("trans(expr={:?})", expr);
243 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
244 let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
245 let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
246 match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
247 bcx.fcx.param_substs,
248 consts::TrueConst::No) {
250 if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
251 // Is borrowed as 'static, must return lvalue.
253 // Cast pointer to global, because constants have different types.
254 let const_ty = expr_ty_adjusted(bcx, expr);
255 let llty = type_of::type_of(bcx.ccx(), const_ty);
256 let global = PointerCast(bcx, global, llty.ptr_to());
257 let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
258 return DatumBlock::new(bcx, datum.to_expr_datum());
261 // Otherwise, keep around and perform adjustments, if needed.
262 let const_ty = if adjusted_global {
263 expr_ty_adjusted(bcx, expr)
268 // This could use a better heuristic.
269 Some(if type_is_immediate(bcx.ccx(), const_ty) {
270 // Cast pointer to global, because constants have different types.
271 let llty = type_of::type_of(bcx.ccx(), const_ty);
272 let global = PointerCast(bcx, global, llty.ptr_to());
273 // Maybe just get the value directly, instead of loading it?
274 immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
276 let scratch = alloc_ty(bcx, const_ty, "const");
277 call_lifetime_start(bcx, scratch);
278 let lldest = if !const_ty.is_structural() {
279 // Cast pointer to slot, because constants have different types.
280 PointerCast(bcx, scratch, val_ty(global))
282 // In this case, memcpy_ty calls llvm.memcpy after casting both
283 // source and destination to i8*, so we don't need any casts.
286 memcpy_ty(bcx, lldest, global, const_ty);
287 Datum::new(scratch, const_ty, Rvalue::new(ByRef))
290 Err(consts::ConstEvalFailure::Runtime(_)) => {
291 // in case const evaluation errors, translate normally
292 // debug assertions catch the same errors
296 Err(consts::ConstEvalFailure::Compiletime(_)) => {
297 // generate a dummy llvm value
298 let const_ty = expr_ty(bcx, expr);
299 let llty = type_of::type_of(bcx.ccx(), const_ty);
300 let dummy = C_undef(llty.ptr_to());
301 Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
308 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
312 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
313 let datum = match global {
314 Some(rvalue) => rvalue.to_expr_datum(),
315 None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
317 let datum = if adjusted_global {
318 datum // trans::consts already performed adjustments.
320 unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
322 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
323 return DatumBlock::new(bcx, datum);
326 pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
327 StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
330 pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
331 StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
334 pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
335 Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
336 Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
339 fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
340 expr: &hir::Expr) -> bool {
341 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
342 None => { return false; }
346 // Don't skip a conversion from Box<T> to &T, etc.
347 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
352 AdjustReifyFnPointer => {
353 // FIXME(#19925) once fn item types are
354 // zero-sized, we'll need to return true here
357 AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
358 // purely a type-level thing
361 AdjustDerefRef(ref adj) => {
362 // We are a bit paranoid about adjustments and thus might have a re-
363 // borrow here which merely derefs and then refs again (it might have
364 // a different region or mutability, but we don't care here).
365 !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
370 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
371 /// translation of `expr`.
372 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
374 datum: Datum<'tcx, Expr>)
375 -> DatumBlock<'blk, 'tcx, Expr>
378 let mut datum = datum;
379 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
381 return DatumBlock::new(bcx, datum);
385 debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
387 datum.to_string(bcx.ccx()),
390 AdjustReifyFnPointer => {
391 // FIXME(#19925) once fn item types are
392 // zero-sized, we'll need to do something here
394 AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
395 // purely a type-level thing
397 AdjustDerefRef(ref adj) => {
398 let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
399 // We are a bit paranoid about adjustments and thus might have a re-
400 // borrow here which merely derefs and then refs again (it might have
401 // a different region or mutability, but we don't care here).
403 // Don't skip a conversion from Box<T> to &T, etc.
405 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
406 // Don't skip an overloaded deref.
418 if adj.autoderefs > skip_reborrows {
420 let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
421 datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
422 lval.to_expr_datum(),
423 adj.autoderefs - skip_reborrows));
426 // (You might think there is a more elegant way to do this than a
427 // skip_reborrows bool, but then you remember that the borrow checker exists).
428 if skip_reborrows == 0 && adj.autoref.is_some() {
429 datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
432 if let Some(target) = adj.unsize {
433 // We do not arrange cleanup ourselves; if we already are an
434 // L-value, then cleanup will have already been scheduled (and
435 // the `datum.to_rvalue_datum` call below will emit code to zero
436 // the drop flag when moving out of the L-value). If we are an
437 // R-value, then we do not need to schedule cleanup.
438 let source_datum = unpack_datum!(bcx,
439 datum.to_rvalue_datum(bcx, "__coerce_source"));
441 let target = bcx.monomorphize(&target);
443 let scratch = alloc_ty(bcx, target, "__coerce_target");
444 call_lifetime_start(bcx, scratch);
445 let target_datum = Datum::new(scratch, target,
447 bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
448 datum = Datum::new(scratch, target,
449 RvalueExpr(Rvalue::new(ByRef)));
453 debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
454 DatumBlock::new(bcx, datum)
457 fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
459 source: Datum<'tcx, Rvalue>,
460 target: Datum<'tcx, Rvalue>)
461 -> Block<'blk, 'tcx> {
463 debug!("coerce_unsized({} -> {})",
464 source.to_string(bcx.ccx()),
465 target.to_string(bcx.ccx()));
467 match (&source.ty.sty, &target.ty.sty) {
468 (&ty::TyBox(a), &ty::TyBox(b)) |
469 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
470 &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
471 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
472 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
473 (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
474 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
475 let (inner_source, inner_target) = (a, b);
477 let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
478 // Normally, the source is a thin pointer and we are
479 // adding extra info to make a fat pointer. The exception
480 // is when we are upcasting an existing object fat pointer
481 // to use a different vtable. In that case, we want to
482 // load out the original data pointer so we can repackage
484 (Load(bcx, get_dataptr(bcx, source.val)),
485 Some(Load(bcx, get_meta(bcx, source.val))))
487 let val = if source.kind.is_by_ref() {
488 load_ty(bcx, source.val, source.ty)
495 let info = unsized_info(bcx.ccx(), inner_source, inner_target,
496 old_info, bcx.fcx.param_substs);
498 // Compute the base pointer. This doesn't change the pointer value,
499 // but merely its type.
500 let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
501 let base = PointerCast(bcx, base, ptr_ty);
503 Store(bcx, base, get_dataptr(bcx, target.val));
504 Store(bcx, info, get_meta(bcx, target.val));
507 // This can be extended to enums and tuples in the future.
508 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
509 (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
510 assert_eq!(def_id_a, def_id_b);
512 // The target is already by-ref because it's to be written to.
513 let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
514 assert!(target.kind.is_by_ref());
516 let kind = custom_coerce_unsize_info(bcx.ccx(), source.ty, target.ty);
518 let repr_source = adt::represent_type(bcx.ccx(), source.ty);
519 let src_fields = match &*repr_source {
520 &adt::Repr::Univariant(ref s, _) => &s.fields,
521 _ => bcx.sess().span_bug(span,
522 &format!("Non univariant struct? (repr_source: {:?})",
525 let repr_target = adt::represent_type(bcx.ccx(), target.ty);
526 let target_fields = match &*repr_target {
527 &adt::Repr::Univariant(ref s, _) => &s.fields,
528 _ => bcx.sess().span_bug(span,
529 &format!("Non univariant struct? (repr_target: {:?})",
533 let coerce_index = match kind {
534 CustomCoerceUnsized::Struct(i) => i
536 assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
538 let source_val = adt::MaybeSizedValue::sized(source.val);
539 let target_val = adt::MaybeSizedValue::sized(target.val);
541 let iter = src_fields.iter().zip(target_fields).enumerate();
542 for (i, (src_ty, target_ty)) in iter {
543 let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
544 let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
546 // If this is the field we need to coerce, recurse on it.
547 if i == coerce_index {
548 coerce_unsized(bcx, span,
549 Datum::new(ll_source, src_ty,
551 Datum::new(ll_target, target_ty,
552 Rvalue::new(ByRef)));
554 // Otherwise, simply copy the data from the source.
555 assert!(src_ty.is_phantom_data() || src_ty == target_ty);
556 memcpy_ty(bcx, ll_target, ll_source, src_ty);
560 _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
567 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
568 /// that the expr represents.
570 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
571 /// something like `x().f` is translated into roughly the equivalent of
573 /// { tmp = x(); tmp.f }
574 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
577 -> DatumBlock<'blk, 'tcx, Lvalue> {
579 let datum = unpack_datum!(bcx, trans(bcx, expr));
580 return datum.to_lvalue_datum(bcx, name, expr.id);
583 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
585 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
587 -> DatumBlock<'blk, 'tcx, Expr> {
590 debug!("trans_unadjusted(expr={:?})", expr);
591 let _indenter = indenter();
593 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
595 return match expr_kind(bcx.tcx(), expr) {
596 ExprKind::Lvalue | ExprKind::RvalueDatum => {
597 let datum = unpack_datum!(bcx, {
598 trans_datum_unadjusted(bcx, expr)
601 DatumBlock {bcx: bcx, datum: datum}
604 ExprKind::RvalueStmt => {
605 bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
606 nil(bcx, expr_ty(bcx, expr))
609 ExprKind::RvalueDps => {
610 let ty = expr_ty(bcx, expr);
611 if type_is_zero_size(bcx.ccx(), ty) {
612 bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
615 let scratch = rvalue_scratch_datum(bcx, ty, "");
616 bcx = trans_rvalue_dps_unadjusted(
617 bcx, expr, SaveIn(scratch.val));
619 // Note: this is not obviously a good idea. It causes
620 // immediate values to be loaded immediately after a
621 // return from a call or other similar expression,
622 // which in turn leads to alloca's having shorter
623 // lifetimes and hence larger stack frames. However,
624 // in turn it can lead to more register pressure.
625 // Still, in practice it seems to increase
626 // performance, since we have fewer problems with
628 let scratch = unpack_datum!(
629 bcx, scratch.to_appropriate_datum(bcx));
631 DatumBlock::new(bcx, scratch.to_expr_datum())
636 fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
637 -> DatumBlock<'blk, 'tcx, Expr> {
638 let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
639 let datum = immediate_rvalue(llval, ty);
640 DatumBlock::new(bcx, datum.to_expr_datum())
644 fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
646 -> DatumBlock<'blk, 'tcx, Expr> {
649 let _icx = push_ctxt("trans_datum_unadjusted");
652 hir::ExprType(ref e, _) => {
655 hir::ExprPath(..) => {
656 trans_def(bcx, expr, bcx.def(expr.id))
658 hir::ExprField(ref base, name) => {
659 trans_rec_field(bcx, &base, name.node)
661 hir::ExprTupField(ref base, idx) => {
662 trans_rec_tup_field(bcx, &base, idx.node)
664 hir::ExprIndex(ref base, ref idx) => {
665 trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
667 hir::ExprBox(ref contents) => {
668 // Special case for `Box<T>`
669 let box_ty = expr_ty(bcx, expr);
670 let contents_ty = expr_ty(bcx, &contents);
673 trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
675 _ => bcx.sess().span_bug(expr.span,
676 "expected unique box")
680 hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
681 hir::ExprBinary(op, ref lhs, ref rhs) => {
682 trans_binary(bcx, expr, op, &lhs, &rhs)
684 hir::ExprUnary(op, ref x) => {
685 trans_unary(bcx, expr, op, &x)
687 hir::ExprAddrOf(_, ref x) => {
689 hir::ExprRepeat(..) | hir::ExprVec(..) => {
690 // Special case for slices.
691 let cleanup_debug_loc =
692 debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
696 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
697 let datum = unpack_datum!(
698 bcx, tvec::trans_slice_vec(bcx, expr, &x));
699 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
700 DatumBlock::new(bcx, datum)
703 trans_addr_of(bcx, expr, &x)
707 hir::ExprCast(ref val, _) => {
708 // Datum output mode means this is a scalar cast:
709 trans_imm_cast(bcx, &val, expr.id)
712 bcx.tcx().sess.span_bug(
714 &format!("trans_rvalue_datum_unadjusted reached \
715 fall-through case: {:?}",
721 fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
724 -> DatumBlock<'blk, 'tcx, Expr> where
725 F: FnOnce(&'blk ty::ctxt<'tcx>, &VariantInfo<'tcx>) -> usize,
728 let _icx = push_ctxt("trans_rec_field");
730 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
731 let bare_ty = base_datum.ty;
732 let repr = adt::represent_type(bcx.ccx(), bare_ty);
733 let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
735 let ix = get_idx(bcx.tcx(), &vinfo);
736 let d = base_datum.get_element(
740 adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
743 if type_is_sized(bcx.tcx(), d.ty) {
744 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
746 let scratch = rvalue_scratch_datum(bcx, d.ty, "");
747 Store(bcx, d.val, get_dataptr(bcx, scratch.val));
748 let info = Load(bcx, get_meta(bcx, base_datum.val));
749 Store(bcx, info, get_meta(bcx, scratch.val));
751 // Always generate an lvalue datum, because this pointer doesn't own
752 // the data and cleanup is scheduled elsewhere.
753 DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
757 /// Translates `base.field`.
758 fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
761 -> DatumBlock<'blk, 'tcx, Expr> {
762 trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
765 /// Translates `base.<idx>`.
766 fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
769 -> DatumBlock<'blk, 'tcx, Expr> {
770 trans_field(bcx, base, |_, _| idx)
773 fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
774 index_expr: &hir::Expr,
777 method_call: MethodCall)
778 -> DatumBlock<'blk, 'tcx, Expr> {
779 //! Translates `base[idx]`.
781 let _icx = push_ctxt("trans_index");
785 let index_expr_debug_loc = index_expr.debug_loc();
787 // Check for overloaded index.
788 let method_ty = ccx.tcx()
793 .map(|method| method.ty);
794 let elt_datum = match method_ty {
796 let method_ty = monomorphize_type(bcx, method_ty);
798 let base_datum = unpack_datum!(bcx, trans(bcx, base));
800 // Translate index expression.
801 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
803 let ref_ty = // invoked methods have LB regions instantiated:
804 bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
805 let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
807 bcx.tcx().sess.span_bug(index_expr.span,
808 "index method didn't return a \
809 dereferenceable type?!")
811 Some(elt_tm) => elt_tm.ty,
814 // Overloaded. Evaluate `trans_overloaded_op`, which will
815 // invoke the user's index() method, which basically yields
816 // a `&T` pointer. We can then proceed down the normal
817 // path (below) to dereference that `&T`.
818 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
820 trans_overloaded_op(bcx,
824 Some((ix_datum, idx.id)),
825 Some(SaveIn(scratch.val)),
827 let datum = scratch.to_expr_datum();
828 let lval = Lvalue::new("expr::trans_index overload");
829 if type_is_sized(bcx.tcx(), elt_ty) {
830 Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
832 Datum::new(datum.val, elt_ty, LvalueExpr(lval))
836 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
840 // Translate index expression and cast to a suitable LLVM integer.
841 // Rust is less strict than LLVM in this regard.
842 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
843 let ix_val = ix_datum.to_llscalarish(bcx);
844 let ix_size = machine::llbitsize_of_real(bcx.ccx(),
846 let int_size = machine::llbitsize_of_real(bcx.ccx(),
849 if ix_size < int_size {
850 if expr_ty(bcx, idx).is_signed() {
851 SExt(bcx, ix_val, ccx.int_type())
852 } else { ZExt(bcx, ix_val, ccx.int_type()) }
853 } else if ix_size > int_size {
854 Trunc(bcx, ix_val, ccx.int_type())
860 let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
862 let (base, len) = base_datum.get_vec_base_and_len(bcx);
864 debug!("trans_index: base {}", bcx.val_to_string(base));
865 debug!("trans_index: len {}", bcx.val_to_string(len));
867 let bounds_check = ICmp(bcx,
871 index_expr_debug_loc);
872 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
873 let expected = Call(bcx,
875 &[bounds_check, C_bool(ccx, false)],
877 index_expr_debug_loc);
878 bcx = with_cond(bcx, expected, |bcx| {
879 controlflow::trans_fail_bounds_check(bcx,
880 expr_info(index_expr),
884 let elt = InBoundsGEP(bcx, base, &[ix_val]);
885 let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
886 let lval = Lvalue::new("expr::trans_index fallback");
887 Datum::new(elt, unit_ty, LvalueExpr(lval))
891 DatumBlock::new(bcx, elt_datum)
894 fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
895 ref_expr: &hir::Expr,
897 -> DatumBlock<'blk, 'tcx, Expr> {
898 //! Translates a reference to a path.
900 let _icx = push_ctxt("trans_def_lvalue");
902 Def::Fn(..) | Def::Method(..) |
903 Def::Struct(..) | Def::Variant(..) => {
904 let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
905 bcx.fcx.param_substs);
906 DatumBlock::new(bcx, datum.to_expr_datum())
908 Def::Static(did, _) => {
909 let const_ty = expr_ty(bcx, ref_expr);
910 let val = get_static_val(bcx.ccx(), did, const_ty);
911 let lval = Lvalue::new("expr::trans_def");
912 DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval)))
914 Def::Const(_) | Def::AssociatedConst(_) => {
915 bcx.sess().span_bug(ref_expr.span,
916 "constant expression should not reach expr::trans_def")
919 DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
924 fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
926 -> Block<'blk, 'tcx> {
928 let _icx = push_ctxt("trans_rvalue_stmt");
930 if bcx.unreachable.get() {
934 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
937 hir::ExprBreak(label_opt) => {
938 controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name))
940 hir::ExprType(ref e, _) => {
941 trans_into(bcx, &e, Ignore)
943 hir::ExprAgain(label_opt) => {
944 controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name))
946 hir::ExprRet(ref ex) => {
947 // Check to see if the return expression itself is reachable.
948 // This can occur when the inner expression contains a return
949 let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
950 cfg.node_is_reachable(expr.id)
956 controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
958 // If it's not reachable, just translate the inner expression
959 // directly. This avoids having to manage a return slot when
960 // it won't actually be used anyway.
961 if let &Some(ref x) = ex {
962 bcx = trans_into(bcx, &x, Ignore);
964 // Mark the end of the block as unreachable. Once we get to
965 // a return expression, there's no more we should be doing
971 hir::ExprWhile(ref cond, ref body, _) => {
972 controlflow::trans_while(bcx, expr, &cond, &body)
974 hir::ExprLoop(ref body, _) => {
975 controlflow::trans_loop(bcx, expr, &body)
977 hir::ExprAssign(ref dst, ref src) => {
978 let src_datum = unpack_datum!(bcx, trans(bcx, &src));
979 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
981 if bcx.fcx.type_needs_drop(dst_datum.ty) {
982 // If there are destructors involved, make sure we
983 // are copying from an rvalue, since that cannot possible
984 // alias an lvalue. We are concerned about code like:
992 // where e.g. a : Option<Foo> and a.b :
993 // Option<Foo>. In that case, freeing `a` before the
994 // assignment may also free `a.b`!
996 // We could avoid this intermediary with some analysis
997 // to determine whether `dst` may possibly own `src`.
998 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
999 let src_datum = unpack_datum!(
1000 bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
1001 let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
1002 let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
1004 // 1. Drop the data at the destination, passing the
1005 // drop-hint in case the lvalue has already been
1006 // dropped or moved.
1007 bcx = glue::drop_ty_core(bcx,
1014 // 2. We are overwriting the destination; ensure that
1015 // its drop-hint (if any) says "initialized."
1016 if let Some(hint_val) = opt_hint_val {
1017 let hint_llval = hint_val.value();
1018 let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
1019 Store(bcx, drop_needed, hint_llval);
1021 src_datum.store_to(bcx, dst_datum.val)
1023 src_datum.store_to(bcx, dst_datum.val)
1026 hir::ExprAssignOp(op, ref dst, ref src) => {
1027 let has_method_map = bcx.tcx()
1031 .contains_key(&MethodCall::expr(expr.id));
1034 let dst = unpack_datum!(bcx, trans(bcx, &dst));
1035 let src_datum = unpack_datum!(bcx, trans(bcx, &src));
1036 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), dst,
1037 Some((src_datum, src.id)), None, false).bcx
1039 trans_assign_op(bcx, expr, op, &dst, &src)
1042 hir::ExprInlineAsm(ref a) => {
1043 asm::trans_inline_asm(bcx, a)
1046 bcx.tcx().sess.span_bug(
1048 &format!("trans_rvalue_stmt_unadjusted reached \
1049 fall-through case: {:?}",
1055 fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1058 -> Block<'blk, 'tcx> {
1059 let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
1062 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1065 hir::ExprType(ref e, _) => {
1066 trans_into(bcx, &e, dest)
1068 hir::ExprPath(..) => {
1069 trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
1071 hir::ExprIf(ref cond, ref thn, ref els) => {
1072 controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
1074 hir::ExprMatch(ref discr, ref arms, _) => {
1075 _match::trans_match(bcx, expr, &discr, &arms[..], dest)
1077 hir::ExprBlock(ref blk) => {
1078 controlflow::trans_block(bcx, &blk, dest)
1080 hir::ExprStruct(_, ref fields, ref base) => {
1083 base.as_ref().map(|e| &**e),
1086 node_id_type(bcx, expr.id),
1089 hir::ExprTup(ref args) => {
1090 let numbered_fields: Vec<(usize, &hir::Expr)> =
1091 args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
1095 &numbered_fields[..],
1100 hir::ExprLit(ref lit) => {
1102 ast::LitKind::Str(ref s, _) => {
1103 tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
1108 .span_bug(expr.span,
1109 "trans_rvalue_dps_unadjusted shouldn't be \
1110 translating this type of literal")
1114 hir::ExprVec(..) | hir::ExprRepeat(..) => {
1115 tvec::trans_fixed_vstore(bcx, expr, dest)
1117 hir::ExprClosure(_, ref decl, ref body) => {
1118 let dest = match dest {
1119 SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
1120 Ignore => closure::Dest::Ignore(bcx.ccx())
1123 // NB. To get the id of the closure, we don't use
1124 // `local_def_id(id)`, but rather we extract the closure
1125 // def-id from the expr's type. This is because this may
1126 // be an inlined expression from another crate, and we
1127 // want to get the ORIGINAL closure def-id, since that is
1128 // the key we need to find the closure-kind and
1129 // closure-type etc.
1130 let (def_id, substs) = match expr_ty(bcx, expr).sty {
1131 ty::TyClosure(def_id, ref substs) => (def_id, substs),
1133 bcx.tcx().sess.span_bug(
1135 &format!("closure expr without closure type: {:?}", t)),
1138 closure::trans_closure_expr(dest,
1144 &expr.attrs).unwrap_or(bcx)
1146 hir::ExprCall(ref f, ref args) => {
1147 if bcx.tcx().is_method_call(expr.id) {
1148 trans_overloaded_call(bcx,
1154 callee::trans_call(bcx,
1157 callee::ArgExprs(&args[..]),
1161 hir::ExprMethodCall(_, _, ref args) => {
1162 callee::trans_method_call(bcx,
1165 callee::ArgExprs(&args[..]),
1168 hir::ExprBinary(op, ref lhs, ref rhs) => {
1169 // if not overloaded, would be RvalueDatumExpr
1170 let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
1171 let rhs_datum = unpack_datum!(bcx, trans(bcx, &rhs));
1172 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
1173 Some((rhs_datum, rhs.id)), Some(dest),
1174 !rustc_front::util::is_by_value_binop(op.node)).bcx
1176 hir::ExprUnary(op, ref subexpr) => {
1177 // if not overloaded, would be RvalueDatumExpr
1178 let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
1179 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
1180 arg, None, Some(dest), !rustc_front::util::is_by_value_unop(op)).bcx
1182 hir::ExprIndex(ref base, ref idx) => {
1183 // if not overloaded, would be RvalueDatumExpr
1184 let base = unpack_datum!(bcx, trans(bcx, &base));
1185 let idx_datum = unpack_datum!(bcx, trans(bcx, &idx));
1186 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
1187 Some((idx_datum, idx.id)), Some(dest), true).bcx
1189 hir::ExprCast(..) => {
1190 // Trait casts used to come this way, now they should be coercions.
1191 bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
1193 hir::ExprAssignOp(op, _, _) => {
1194 bcx.tcx().sess.span_bug(
1196 &format!("augmented assignment `{}=` should always be a rvalue_stmt",
1197 rustc_front::util::binop_to_string(op.node)))
1200 bcx.tcx().sess.span_bug(
1202 &format!("trans_rvalue_dps_unadjusted reached fall-through \
1209 fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1210 ref_expr: &hir::Expr,
1213 -> Block<'blk, 'tcx> {
1214 let _icx = push_ctxt("trans_def_dps_unadjusted");
1216 let lldest = match dest {
1217 SaveIn(lldest) => lldest,
1218 Ignore => { return bcx; }
1222 Def::Variant(tid, vid) => {
1223 let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
1224 if let ty::VariantKind::Tuple = variant.kind() {
1226 let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
1227 ExprId(ref_expr.id),
1228 bcx.fcx.param_substs).val;
1229 Store(bcx, llfn, lldest);
1233 let ty = expr_ty(bcx, ref_expr);
1234 let repr = adt::represent_type(bcx.ccx(), ty);
1235 adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
1239 Def::Struct(..) => {
1240 let ty = expr_ty(bcx, ref_expr);
1242 ty::TyStruct(def, _) if def.has_dtor() => {
1243 let repr = adt::represent_type(bcx.ccx(), ty);
1244 adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
1251 bcx.tcx().sess.span_bug(ref_expr.span, &format!(
1252 "Non-DPS def {:?} referened by {}",
1253 def, bcx.node_id_to_string(ref_expr.id)));
1258 pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1259 ref_expr: &hir::Expr,
1261 param_substs: &'tcx Substs<'tcx>)
1262 -> Datum<'tcx, Rvalue> {
1263 let _icx = push_ctxt("trans_def_datum_unadjusted");
1267 Def::Struct(did) | Def::Variant(_, did) => {
1268 callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
1270 Def::Method(method_did) => {
1271 match ccx.tcx().impl_or_trait_item(method_did).container() {
1272 ty::ImplContainer(_) => {
1273 callee::trans_fn_ref(ccx, method_did,
1274 ExprId(ref_expr.id),
1277 ty::TraitContainer(trait_did) => {
1278 meth::trans_static_method_callee(ccx, method_did,
1279 trait_did, ref_expr.id,
1285 ccx.tcx().sess.span_bug(ref_expr.span, &format!(
1286 "trans_def_fn_unadjusted invoked on: {:?} for {:?}",
1293 /// Translates a reference to a local variable or argument. This always results in an lvalue datum.
1294 pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1296 -> Datum<'tcx, Lvalue> {
1297 let _icx = push_ctxt("trans_local_var");
1300 Def::Upvar(_, nid, _, _) => {
1301 // Can't move upvars, so this is never a ZeroMemLastUse.
1302 let local_ty = node_id_type(bcx, nid);
1303 let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
1304 bcx, nid, HintKind::ZeroAndMaintain);
1305 match bcx.fcx.llupvars.borrow().get(&nid) {
1306 Some(&val) => Datum::new(val, local_ty, lval),
1308 bcx.sess().bug(&format!(
1309 "trans_local_var: no llval for upvar {} found",
1314 Def::Local(_, nid) => {
1315 let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
1318 bcx.sess().bug(&format!(
1319 "trans_local_var: no datum for local/arg {} found",
1323 debug!("take_local(nid={}, v={}, ty={})",
1324 nid, bcx.val_to_string(datum.val), datum.ty);
1328 bcx.sess().unimpl(&format!(
1329 "unsupported def type in trans_local_var: {:?}",
1335 fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1336 fields: &[hir::Field],
1337 base: Option<&hir::Expr>,
1338 expr_span: codemap::Span,
1339 expr_id: ast::NodeId,
1341 dest: Dest) -> Block<'blk, 'tcx> {
1342 let _icx = push_ctxt("trans_rec");
1344 let tcx = bcx.tcx();
1345 let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
1347 let mut need_base = vec![true; vinfo.fields.len()];
1349 let numbered_fields = fields.iter().map(|field| {
1350 let pos = vinfo.field_index(field.name.node);
1351 need_base[pos] = false;
1353 }).collect::<Vec<_>>();
1355 let optbase = match base {
1356 Some(base_expr) => {
1357 let mut leftovers = Vec::new();
1358 for (i, b) in need_base.iter().enumerate() {
1360 leftovers.push((i, vinfo.fields[i].1));
1363 Some(StructBaseInfo {expr: base_expr,
1364 fields: leftovers })
1367 if need_base.iter().any(|b| *b) {
1368 tcx.sess.span_bug(expr_span, "missing fields and no base expr")
1380 DebugLoc::At(expr_id, expr_span))
1383 /// Information that `trans_adt` needs in order to fill in the fields
1384 /// of a struct copied from a base struct (e.g., from an expression
1385 /// like `Foo { a: b, ..base }`.
1387 /// Note that `fields` may be empty; the base expression must always be
1388 /// evaluated for side-effects.
1389 pub struct StructBaseInfo<'a, 'tcx> {
1390 /// The base expression; will be evaluated after all explicit fields.
1391 expr: &'a hir::Expr,
1392 /// The indices of fields to copy paired with their types.
1393 fields: Vec<(usize, Ty<'tcx>)>
1396 /// Constructs an ADT instance:
1398 /// - `fields` should be a list of field indices paired with the
1399 /// expression to store into that field. The initializers will be
1400 /// evaluated in the order specified by `fields`.
1402 /// - `optbase` contains information on the base struct (if any) from
1403 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1404 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1407 fields: &[(usize, &hir::Expr)],
1408 optbase: Option<StructBaseInfo<'a, 'tcx>>,
1410 debug_location: DebugLoc)
1411 -> Block<'blk, 'tcx> {
1412 let _icx = push_ctxt("trans_adt");
1414 let repr = adt::represent_type(bcx.ccx(), ty);
1416 debug_location.apply(bcx.fcx);
1418 // If we don't care about the result, just make a
1419 // temporary stack slot
1420 let addr = match dest {
1423 let llresult = alloc_ty(bcx, ty, "temp");
1424 call_lifetime_start(bcx, llresult);
1429 debug!("trans_adt");
1431 // This scope holds intermediates that must be cleaned should
1432 // panic occur before the ADT as a whole is ready.
1433 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1436 // Issue 23112: The original logic appeared vulnerable to same
1437 // order-of-eval bug. But, SIMD values are tuple-structs;
1438 // i.e. functional record update (FRU) syntax is unavailable.
1440 // To be safe, double-check that we did not get here via FRU.
1441 assert!(optbase.is_none());
1443 // This is the constructor of a SIMD type, such types are
1444 // always primitive machine types and so do not have a
1445 // destructor or require any clean-up.
1446 let llty = type_of::type_of(bcx.ccx(), ty);
1448 // keep a vector as a register, and running through the field
1449 // `insertelement`ing them directly into that register
1450 // (i.e. avoid GEPi and `store`s to an alloca) .
1451 let mut vec_val = C_undef(llty);
1453 for &(i, ref e) in fields {
1454 let block_datum = trans(bcx, &e);
1455 bcx = block_datum.bcx;
1456 let position = C_uint(bcx.ccx(), i);
1457 let value = block_datum.datum.to_llscalarish(bcx);
1458 vec_val = InsertElement(bcx, vec_val, value, position);
1460 Store(bcx, vec_val, addr);
1461 } else if let Some(base) = optbase {
1462 // Issue 23112: If there is a base, then order-of-eval
1463 // requires field expressions eval'ed before base expression.
1465 // First, trans field expressions to temporary scratch values.
1466 let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
1467 let datum = unpack_datum!(bcx, trans(bcx, &e));
1471 debug_location.apply(bcx.fcx);
1473 // Second, trans the base to the dest.
1474 assert_eq!(discr, Disr(0));
1476 let addr = adt::MaybeSizedValue::sized(addr);
1477 match expr_kind(bcx.tcx(), &base.expr) {
1478 ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
1479 bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
1481 ExprKind::RvalueStmt => {
1482 bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
1485 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
1486 for &(i, t) in &base.fields {
1487 let datum = base_datum.get_element(
1488 bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
1489 assert!(type_is_sized(bcx.tcx(), datum.ty));
1490 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1491 bcx = datum.store_to(bcx, dest);
1496 // Finally, move scratch field values into actual field locations
1497 for (i, datum) in scratch_vals {
1498 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1499 bcx = datum.store_to(bcx, dest);
1502 // No base means we can write all fields directly in place.
1503 let addr = adt::MaybeSizedValue::sized(addr);
1504 for &(i, ref e) in fields {
1505 let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
1506 let e_ty = expr_ty_adjusted(bcx, &e);
1507 bcx = trans_into(bcx, &e, SaveIn(dest));
1508 let scope = cleanup::CustomScope(custom_cleanup_scope);
1509 fcx.schedule_lifetime_end(scope, dest);
1510 // FIXME: nonzeroing move should generalize to fields
1511 fcx.schedule_drop_mem(scope, dest, e_ty, None);
1515 adt::trans_set_discr(bcx, &repr, addr, discr);
1517 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1519 // If we don't care about the result drop the temporary we made
1523 bcx = glue::drop_ty(bcx, addr, ty, debug_location);
1524 base::call_lifetime_end(bcx, addr);
1531 fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1534 -> DatumBlock<'blk, 'tcx, Expr> {
1535 // must not be a string constant, that is a RvalueDpsExpr
1536 let _icx = push_ctxt("trans_immediate_lit");
1537 let ty = expr_ty(bcx, expr);
1538 let v = consts::const_lit(bcx.ccx(), expr, lit);
1539 immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
1542 fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1545 sub_expr: &hir::Expr)
1546 -> DatumBlock<'blk, 'tcx, Expr> {
1547 let ccx = bcx.ccx();
1549 let _icx = push_ctxt("trans_unary_datum");
1551 let method_call = MethodCall::expr(expr.id);
1553 // The only overloaded operator that is translated to a datum
1554 // is an overloaded deref, since it is always yields a `&T`.
1555 // Otherwise, we should be in the RvalueDpsExpr path.
1556 assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
1558 let un_ty = expr_ty(bcx, expr);
1560 let debug_loc = expr.debug_loc();
1564 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1565 let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
1566 immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
1569 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1570 let val = datum.to_llscalarish(bcx);
1571 let (bcx, llneg) = {
1573 let result = FNeg(bcx, val, debug_loc);
1576 let is_signed = un_ty.is_signed();
1577 let result = Neg(bcx, val, debug_loc);
1578 let bcx = if bcx.ccx().check_overflow() && is_signed {
1579 let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
1580 let is_min = ICmp(bcx, llvm::IntEQ, val,
1581 C_integral(llty, min, true), debug_loc);
1582 with_cond(bcx, is_min, |bcx| {
1583 let msg = InternedString::new(
1584 "attempted to negate with overflow");
1585 controlflow::trans_fail(bcx, expr_info(expr), msg)
1593 immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
1596 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1597 deref_once(bcx, expr, datum, method_call)
1602 fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1603 box_expr: &hir::Expr,
1605 contents: &hir::Expr,
1606 contents_ty: Ty<'tcx>)
1607 -> DatumBlock<'blk, 'tcx, Expr> {
1608 let _icx = push_ctxt("trans_uniq_expr");
1610 assert!(type_is_sized(bcx.tcx(), contents_ty));
1611 let llty = type_of::type_of(bcx.ccx(), contents_ty);
1612 let size = llsize_of(bcx.ccx(), llty);
1613 let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
1614 let llty_ptr = llty.ptr_to();
1615 let Result { bcx, val } = malloc_raw_dyn(bcx,
1620 box_expr.debug_loc());
1621 // Unique boxes do not allocate for zero-size types. The standard library
1622 // may assume that `free` is never called on the pointer returned for
1623 // `Box<ZeroSizeType>`.
1624 let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
1625 trans_into(bcx, contents, SaveIn(val))
1627 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1628 fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
1629 val, cleanup::HeapExchange, contents_ty);
1630 let bcx = trans_into(bcx, contents, SaveIn(val));
1631 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1634 immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
1637 fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1639 subexpr: &hir::Expr)
1640 -> DatumBlock<'blk, 'tcx, Expr> {
1641 let _icx = push_ctxt("trans_addr_of");
1643 let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
1644 let ty = expr_ty(bcx, expr);
1645 if !type_is_sized(bcx.tcx(), sub_datum.ty) {
1646 // Always generate an lvalue datum, because this pointer doesn't own
1647 // the data and cleanup is scheduled elsewhere.
1648 DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
1650 // Sized value, ref to a thin pointer
1651 immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
1655 fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1656 binop_expr: &hir::Expr,
1659 lhs: Datum<'tcx, Rvalue>,
1660 rhs: Datum<'tcx, Rvalue>)
1661 -> DatumBlock<'blk, 'tcx, Expr>
1663 let _icx = push_ctxt("trans_scalar_binop");
1665 let tcx = bcx.tcx();
1667 assert!(!lhs_t.is_simd());
1668 let is_float = lhs_t.is_fp();
1669 let is_signed = lhs_t.is_signed();
1670 let info = expr_info(binop_expr);
1672 let binop_debug_loc = binop_expr.debug_loc();
1675 let lhs = lhs.to_llscalarish(bcx);
1676 let rhs = rhs.to_llscalarish(bcx);
1677 let val = match op.node {
1680 FAdd(bcx, lhs, rhs, binop_debug_loc)
1682 let (newbcx, res) = with_overflow_check(
1683 bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
1690 FSub(bcx, lhs, rhs, binop_debug_loc)
1692 let (newbcx, res) = with_overflow_check(
1693 bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
1700 FMul(bcx, lhs, rhs, binop_debug_loc)
1702 let (newbcx, res) = with_overflow_check(
1703 bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
1710 FDiv(bcx, lhs, rhs, binop_debug_loc)
1712 // Only zero-check integers; fp /0 is NaN
1713 bcx = base::fail_if_zero_or_overflows(bcx,
1714 expr_info(binop_expr),
1720 SDiv(bcx, lhs, rhs, binop_debug_loc)
1722 UDiv(bcx, lhs, rhs, binop_debug_loc)
1728 // LLVM currently always lowers the `frem` instructions appropriate
1729 // library calls typically found in libm. Notably f64 gets wired up
1730 // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for
1731 // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's
1732 // instead just an inline function in a header that goes up to a
1733 // f64, uses `fmod`, and then comes back down to a f32.
1735 // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will
1736 // still unconditionally lower frem instructions over 32-bit floats
1737 // to a call to `fmodf`. To work around this we special case MSVC
1738 // 32-bit float rem instructions and instead do the call out to
1739 // `fmod` ourselves.
1741 // Note that this is currently duplicated with src/libcore/ops.rs
1742 // which does the same thing, and it would be nice to perhaps unify
1743 // these two implementations on day! Also note that we call `fmod`
1744 // for both 32 and 64-bit floats because if we emit any FRem
1745 // instruction at all then LLVM is capable of optimizing it into a
1746 // 32-bit FRem (which we're trying to avoid).
1747 let use_fmod = tcx.sess.target.target.options.is_like_msvc &&
1748 tcx.sess.target.target.arch == "x86";
1750 let f64t = Type::f64(bcx.ccx());
1751 let fty = Type::func(&[f64t, f64t], &f64t);
1752 let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty,
1754 if lhs_t == tcx.types.f32 {
1755 let lhs = FPExt(bcx, lhs, f64t);
1756 let rhs = FPExt(bcx, rhs, f64t);
1757 let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
1758 FPTrunc(bcx, res, Type::f32(bcx.ccx()))
1760 Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc)
1763 FRem(bcx, lhs, rhs, binop_debug_loc)
1766 // Only zero-check integers; fp %0 is NaN
1767 bcx = base::fail_if_zero_or_overflows(bcx,
1768 expr_info(binop_expr),
1769 op, lhs, rhs, lhs_t);
1771 SRem(bcx, lhs, rhs, binop_debug_loc)
1773 URem(bcx, lhs, rhs, binop_debug_loc)
1777 hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
1778 hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
1779 hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
1781 let (newbcx, res) = with_overflow_check(
1782 bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
1787 let (newbcx, res) = with_overflow_check(
1788 bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
1792 hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
1793 base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
1796 bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
1800 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1803 // refinement types would obviate the need for this
1804 enum lazy_binop_ty {
1809 fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1810 binop_expr: &hir::Expr,
1814 -> DatumBlock<'blk, 'tcx, Expr> {
1815 let _icx = push_ctxt("trans_lazy_binop");
1816 let binop_ty = expr_ty(bcx, binop_expr);
1819 let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
1820 let lhs = lhs.to_llscalarish(past_lhs);
1822 if past_lhs.unreachable.get() {
1823 return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
1826 let join = fcx.new_id_block("join", binop_expr.id);
1827 let before_rhs = fcx.new_id_block("before_rhs", b.id);
1830 lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
1831 lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
1834 let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
1835 let rhs = rhs.to_llscalarish(past_rhs);
1837 if past_rhs.unreachable.get() {
1838 return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
1841 Br(past_rhs, join.llbb, DebugLoc::None);
1842 let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
1843 &[past_lhs.llbb, past_rhs.llbb]);
1845 return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
1848 fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1853 -> DatumBlock<'blk, 'tcx, Expr> {
1854 let _icx = push_ctxt("trans_binary");
1855 let ccx = bcx.ccx();
1857 // if overloaded, would be RvalueDpsExpr
1858 assert!(!ccx.tcx().is_method_call(expr.id));
1862 trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
1865 trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
1869 let binop_ty = expr_ty(bcx, expr);
1871 let lhs = unpack_datum!(bcx, trans(bcx, lhs));
1872 let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
1873 debug!("trans_binary (expr {}): lhs={}",
1874 expr.id, lhs.to_string(ccx));
1875 let rhs = unpack_datum!(bcx, trans(bcx, rhs));
1876 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
1877 debug!("trans_binary (expr {}): rhs={}",
1878 expr.id, rhs.to_string(ccx));
1880 if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
1881 assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
1882 "built-in binary operators on fat pointers are homogeneous");
1883 assert_eq!(binop_ty, bcx.tcx().types.bool);
1884 let val = base::compare_scalar_types(
1891 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1893 assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
1894 "built-in binary operators on fat pointers are homogeneous");
1895 trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
1901 fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1903 method_call: MethodCall,
1904 lhs: Datum<'tcx, Expr>,
1905 rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
1908 -> Result<'blk, 'tcx> {
1909 callee::trans_call_inner(bcx,
1911 |bcx, arg_cleanup_scope| {
1912 meth::trans_method_callee(bcx,
1917 callee::ArgOverloadedOp(lhs, rhs, autoref),
1921 fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1923 callee: &'a hir::Expr,
1924 args: &'a [P<hir::Expr>],
1926 -> Block<'blk, 'tcx> {
1927 debug!("trans_overloaded_call {}", expr.id);
1928 let method_call = MethodCall::expr(expr.id);
1929 let mut all_args = vec!(callee);
1930 all_args.extend(args.iter().map(|e| &**e));
1932 callee::trans_call_inner(bcx,
1934 |bcx, arg_cleanup_scope| {
1935 meth::trans_method_callee(
1941 callee::ArgOverloadedCall(all_args),
1946 pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
1951 if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
1955 match (t_in.builtin_deref(true, ty::NoPreference),
1956 t_out.builtin_deref(true, ty::NoPreference)) {
1957 (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
1961 // This condition isn't redundant with the check for CoercionCast:
1962 // different types can be substituted into the same type, and
1963 // == equality can be overconservative if there are regions.
1969 fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1972 -> DatumBlock<'blk, 'tcx, Expr>
1974 use middle::ty::cast::CastTy::*;
1975 use middle::ty::cast::IntTy::*;
1977 fn int_cast(bcx: Block,
1984 let _icx = push_ctxt("int_cast");
1985 let srcsz = llsrctype.int_width();
1986 let dstsz = lldsttype.int_width();
1987 return if dstsz == srcsz {
1988 BitCast(bcx, llsrc, lldsttype)
1989 } else if srcsz > dstsz {
1990 TruncOrBitCast(bcx, llsrc, lldsttype)
1992 SExtOrBitCast(bcx, llsrc, lldsttype)
1994 ZExtOrBitCast(bcx, llsrc, lldsttype)
1998 fn float_cast(bcx: Block,
2004 let _icx = push_ctxt("float_cast");
2005 let srcsz = llsrctype.float_width();
2006 let dstsz = lldsttype.float_width();
2007 return if dstsz > srcsz {
2008 FPExt(bcx, llsrc, lldsttype)
2009 } else if srcsz > dstsz {
2010 FPTrunc(bcx, llsrc, lldsttype)
2014 let _icx = push_ctxt("trans_cast");
2016 let ccx = bcx.ccx();
2018 let t_in = expr_ty_adjusted(bcx, expr);
2019 let t_out = node_id_type(bcx, id);
2021 debug!("trans_cast({:?} as {:?})", t_in, t_out);
2022 let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
2023 let ll_t_out = type_of::arg_type_of(ccx, t_out);
2024 // Convert the value to be cast into a ValueRef, either by-ref or
2025 // by-value as appropriate given its type:
2026 let mut datum = unpack_datum!(bcx, trans(bcx, expr));
2028 let datum_ty = monomorphize_type(bcx, datum.ty);
2030 if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
2032 return DatumBlock::new(bcx, datum);
2035 if type_is_fat_ptr(bcx.tcx(), t_in) {
2036 assert!(datum.kind.is_by_ref());
2037 if type_is_fat_ptr(bcx.tcx(), t_out) {
2038 return DatumBlock::new(bcx, Datum::new(
2039 PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
2042 )).to_expr_datumblock();
2044 // Return the address
2045 return immediate_rvalue_bcx(bcx,
2047 Load(bcx, get_dataptr(bcx, datum.val)),
2049 t_out).to_expr_datumblock();
2053 let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
2054 let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
2056 let (llexpr, signed) = if let Int(CEnum) = r_t_in {
2057 let repr = adt::represent_type(ccx, t_in);
2058 let datum = unpack_datum!(
2059 bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
2060 let llexpr_ptr = datum.to_llref();
2061 let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
2062 Some(Type::i64(ccx)), true);
2063 ll_t_in = val_ty(discr);
2064 (discr, adt::is_discr_signed(&repr))
2066 (datum.to_llscalarish(bcx), t_in.is_signed())
2069 let newval = match (r_t_in, r_t_out) {
2070 (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
2071 PointerCast(bcx, llexpr, ll_t_out)
2073 (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
2074 (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
2076 (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
2077 (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
2078 (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
2079 (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
2080 (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
2081 (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
2083 _ => ccx.sess().span_bug(expr.span,
2084 &format!("translating unsupported cast: \
2090 return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
2093 fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2098 -> Block<'blk, 'tcx> {
2099 let _icx = push_ctxt("trans_assign_op");
2102 debug!("trans_assign_op(expr={:?})", expr);
2104 // User-defined operator methods cannot be used with `+=` etc right now
2105 assert!(!bcx.tcx().is_method_call(expr.id));
2107 // Evaluate LHS (destination), which should be an lvalue
2108 let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
2109 assert!(!bcx.fcx.type_needs_drop(dst.ty));
2110 let lhs = load_ty(bcx, dst.val, dst.ty);
2111 let lhs = immediate_rvalue(lhs, dst.ty);
2113 // Evaluate RHS - FIXME(#28160) this sucks
2114 let rhs = unpack_datum!(bcx, trans(bcx, &src));
2115 let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
2117 // Perform computation and store the result
2118 let result_datum = unpack_datum!(
2119 bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
2120 return result_datum.store_to(bcx, dst.val);
2123 fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2124 datum: Datum<'tcx, Expr>,
2126 -> DatumBlock<'blk, 'tcx, Expr> {
2129 // Ensure cleanup of `datum` if not already scheduled and obtain
2130 // a "by ref" pointer.
2131 let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
2133 // Compute final type. Note that we are loose with the region and
2134 // mutability, since those things don't matter in trans.
2135 let referent_ty = lv_datum.ty;
2136 let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
2138 // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
2139 // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
2140 // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
2141 // indirection and for thin pointers, this has no ill effects.
2142 let kind = if type_is_sized(bcx.tcx(), referent_ty) {
2143 RvalueExpr(Rvalue::new(ByValue))
2145 LvalueExpr(lv_datum.kind)
2149 let llref = lv_datum.to_llref();
2150 DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
2153 fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2155 datum: Datum<'tcx, Expr>,
2157 -> DatumBlock<'blk, 'tcx, Expr> {
2159 let mut datum = datum;
2161 let method_call = MethodCall::autoderef(expr.id, i as u32);
2162 datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
2164 DatumBlock { bcx: bcx, datum: datum }
2167 fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2169 datum: Datum<'tcx, Expr>,
2170 method_call: MethodCall)
2171 -> DatumBlock<'blk, 'tcx, Expr> {
2172 let ccx = bcx.ccx();
2174 debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
2176 datum.to_string(ccx),
2181 // Check for overloaded deref.
2182 let method_ty = ccx.tcx()
2186 .get(&method_call).map(|method| method.ty);
2188 let datum = match method_ty {
2189 Some(method_ty) => {
2190 let method_ty = monomorphize_type(bcx, method_ty);
2192 // Overloaded. Evaluate `trans_overloaded_op`, which will
2193 // invoke the user's deref() method, which basically
2194 // converts from the `Smaht<T>` pointer that we have into
2195 // a `&T` pointer. We can then proceed down the normal
2196 // path (below) to dereference that `&T`.
2197 let datum = if method_call.autoderef == 0 {
2200 // Always perform an AutoPtr when applying an overloaded auto-deref
2201 unpack_datum!(bcx, auto_ref(bcx, datum, expr))
2204 let ref_ty = // invoked methods have their LB regions instantiated
2205 ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
2206 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
2208 unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
2209 datum, None, Some(SaveIn(scratch.val)),
2211 scratch.to_expr_datum()
2214 // Not overloaded. We already have a pointer we know how to deref.
2219 let r = match datum.ty.sty {
2220 ty::TyBox(content_ty) => {
2221 // Make sure we have an lvalue datum here to get the
2222 // proper cleanups scheduled
2223 let datum = unpack_datum!(
2224 bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
2226 if type_is_sized(bcx.tcx(), content_ty) {
2227 let ptr = load_ty(bcx, datum.val, datum.ty);
2228 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
2230 // A fat pointer and a DST lvalue have the same representation
2231 // just different types. Since there is no temporary for `*e`
2232 // here (because it is unsized), we cannot emulate the sized
2233 // object code path for running drop glue and free. Instead,
2234 // we schedule cleanup for `e`, turning it into an lvalue.
2236 let lval = Lvalue::new("expr::deref_once ty_uniq");
2237 let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
2238 DatumBlock::new(bcx, datum)
2242 ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
2243 ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
2244 let lval = Lvalue::new("expr::deref_once ptr");
2245 if type_is_sized(bcx.tcx(), content_ty) {
2246 let ptr = datum.to_llscalarish(bcx);
2248 // Always generate an lvalue datum, even if datum.mode is
2249 // an rvalue. This is because datum.mode is only an
2250 // rvalue for non-owning pointers like &T or *T, in which
2251 // case cleanup *is* scheduled elsewhere, by the true
2252 // owner (or, in the case of *T, by the user).
2253 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
2255 // A fat pointer and a DST lvalue have the same representation
2256 // just different types.
2257 DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
2262 bcx.tcx().sess.span_bug(
2264 &format!("deref invoked on expr of invalid type {:?}",
2269 debug!("deref_once(expr={}, method_call={:?}, result={})",
2270 expr.id, method_call, r.datum.to_string(ccx));
2285 fn codegen_strategy(&self) -> OverflowCodegen {
2286 use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
2288 OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
2289 OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
2290 OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
2292 OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
2293 OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
2298 enum OverflowCodegen {
2299 ViaIntrinsic(OverflowOpViaIntrinsic),
2300 ViaInputCheck(OverflowOpViaInputCheck),
2303 enum OverflowOpViaInputCheck { Shl, Shr, }
2306 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2308 impl OverflowOpViaIntrinsic {
2309 fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
2310 let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
2311 bcx.ccx().get_intrinsic(&name)
2313 fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
2314 use syntax::ast::IntTy::*;
2315 use syntax::ast::UintTy::*;
2316 use middle::ty::{TyInt, TyUint};
2318 let new_sty = match ty.sty {
2319 TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
2322 _ => panic!("unsupported target word size")
2324 TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
2325 "32" => TyUint(U32),
2326 "64" => TyUint(U64),
2327 _ => panic!("unsupported target word size")
2329 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
2330 _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
2335 OverflowOpViaIntrinsic::Add => match new_sty {
2336 TyInt(I8) => "llvm.sadd.with.overflow.i8",
2337 TyInt(I16) => "llvm.sadd.with.overflow.i16",
2338 TyInt(I32) => "llvm.sadd.with.overflow.i32",
2339 TyInt(I64) => "llvm.sadd.with.overflow.i64",
2341 TyUint(U8) => "llvm.uadd.with.overflow.i8",
2342 TyUint(U16) => "llvm.uadd.with.overflow.i16",
2343 TyUint(U32) => "llvm.uadd.with.overflow.i32",
2344 TyUint(U64) => "llvm.uadd.with.overflow.i64",
2346 _ => unreachable!(),
2348 OverflowOpViaIntrinsic::Sub => match new_sty {
2349 TyInt(I8) => "llvm.ssub.with.overflow.i8",
2350 TyInt(I16) => "llvm.ssub.with.overflow.i16",
2351 TyInt(I32) => "llvm.ssub.with.overflow.i32",
2352 TyInt(I64) => "llvm.ssub.with.overflow.i64",
2354 TyUint(U8) => "llvm.usub.with.overflow.i8",
2355 TyUint(U16) => "llvm.usub.with.overflow.i16",
2356 TyUint(U32) => "llvm.usub.with.overflow.i32",
2357 TyUint(U64) => "llvm.usub.with.overflow.i64",
2359 _ => unreachable!(),
2361 OverflowOpViaIntrinsic::Mul => match new_sty {
2362 TyInt(I8) => "llvm.smul.with.overflow.i8",
2363 TyInt(I16) => "llvm.smul.with.overflow.i16",
2364 TyInt(I32) => "llvm.smul.with.overflow.i32",
2365 TyInt(I64) => "llvm.smul.with.overflow.i64",
2367 TyUint(U8) => "llvm.umul.with.overflow.i8",
2368 TyUint(U16) => "llvm.umul.with.overflow.i16",
2369 TyUint(U32) => "llvm.umul.with.overflow.i32",
2370 TyUint(U64) => "llvm.umul.with.overflow.i64",
2372 _ => unreachable!(),
2377 fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
2378 info: NodeIdAndSpan,
2379 lhs_t: Ty<'tcx>, lhs: ValueRef,
2381 binop_debug_loc: DebugLoc)
2382 -> (Block<'blk, 'tcx>, ValueRef) {
2383 let llfn = self.to_intrinsic(bcx, lhs_t);
2385 let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
2386 let result = ExtractValue(bcx, val, 0); // iN operation result
2387 let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
2389 let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
2392 let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
2393 Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
2394 None, binop_debug_loc);
2397 base::with_cond(bcx, cond, |bcx|
2398 controlflow::trans_fail(bcx, info,
2399 InternedString::new("arithmetic operation overflowed")));
2405 impl OverflowOpViaInputCheck {
2406 fn build_with_input_check<'blk, 'tcx>(&self,
2407 bcx: Block<'blk, 'tcx>,
2408 info: NodeIdAndSpan,
2412 binop_debug_loc: DebugLoc)
2413 -> (Block<'blk, 'tcx>, ValueRef)
2415 let lhs_llty = val_ty(lhs);
2416 let rhs_llty = val_ty(rhs);
2418 // Panic if any bits are set outside of bits that we always
2421 // Note that the mask's value is derived from the LHS type
2422 // (since that is where the 32/64 distinction is relevant) but
2423 // the mask's type must match the RHS type (since they will
2424 // both be fed into an and-binop)
2425 let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
2427 let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
2428 let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
2429 let result = match *self {
2430 OverflowOpViaInputCheck::Shl =>
2431 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2432 OverflowOpViaInputCheck::Shr =>
2433 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2436 base::with_cond(bcx, cond, |bcx|
2437 controlflow::trans_fail(bcx, info,
2438 InternedString::new("shift operation overflowed")));
2444 // Check if an integer or vector contains a nonzero element.
2445 fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2447 binop_debug_loc: DebugLoc) -> ValueRef {
2448 let llty = val_ty(value);
2449 let kind = llty.kind();
2451 TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
2452 TypeKind::Vector => {
2453 // Check if any elements of the vector are nonzero by treating
2454 // it as a wide integer and checking if the integer is nonzero.
2455 let width = llty.vector_length() as u64 * llty.element_type().int_width();
2456 let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
2457 build_nonzero_check(bcx, int_value, binop_debug_loc)
2459 _ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
2463 fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
2464 lhs_t: Ty<'tcx>, lhs: ValueRef,
2466 binop_debug_loc: DebugLoc)
2467 -> (Block<'blk, 'tcx>, ValueRef) {
2468 if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
2469 if bcx.ccx().check_overflow() {
2471 match oop.codegen_strategy() {
2472 OverflowCodegen::ViaIntrinsic(oop) =>
2473 oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2474 OverflowCodegen::ViaInputCheck(oop) =>
2475 oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2478 let res = match oop {
2479 OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
2480 OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
2481 OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
2484 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2486 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2492 /// We categorize expressions into three kinds. The distinction between
2493 /// lvalue/rvalue is fundamental to the language. The distinction between the
2494 /// two kinds of rvalues is an artifact of trans which reflects how we will
2495 /// generate code for that kind of expression. See trans/expr.rs for more
2497 #[derive(Copy, Clone)]
2505 fn expr_kind(tcx: &ty::ctxt, expr: &hir::Expr) -> ExprKind {
2506 if tcx.is_method_call(expr.id) {
2507 // Overloaded operations are generally calls, and hence they are
2508 // generated via DPS, but there are a few exceptions:
2509 return match expr.node {
2510 // `a += b` has a unit result.
2511 hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
2513 // the deref method invoked for `*a` always yields an `&T`
2514 hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
2516 // the index method invoked for `a[i]` always yields an `&T`
2517 hir::ExprIndex(..) => ExprKind::Lvalue,
2519 // in the general case, result could be any type, use DPS
2520 _ => ExprKind::RvalueDps
2525 hir::ExprPath(..) => {
2526 match tcx.resolve_expr(expr) {
2527 Def::Struct(..) | Def::Variant(..) => {
2528 if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
2530 ExprKind::RvalueDatum
2536 // Fn pointers are just scalar values.
2537 Def::Fn(..) | Def::Method(..) => ExprKind::RvalueDatum,
2539 // Note: there is actually a good case to be made that
2540 // DefArg's, particularly those of immediate type, ought to
2541 // considered rvalues.
2544 Def::Local(..) => ExprKind::Lvalue,
2547 Def::AssociatedConst(..) => ExprKind::RvalueDatum,
2552 &format!("uncategorized def for expr {}: {:?}",
2559 hir::ExprType(ref expr, _) => {
2560 expr_kind(tcx, expr)
2563 hir::ExprUnary(hir::UnDeref, _) |
2564 hir::ExprField(..) |
2565 hir::ExprTupField(..) |
2566 hir::ExprIndex(..) => {
2571 hir::ExprMethodCall(..) |
2572 hir::ExprStruct(..) |
2575 hir::ExprMatch(..) |
2576 hir::ExprClosure(..) |
2577 hir::ExprBlock(..) |
2578 hir::ExprRepeat(..) |
2579 hir::ExprVec(..) => {
2583 hir::ExprLit(ref lit) if lit.node.is_str() => {
2587 hir::ExprBreak(..) |
2588 hir::ExprAgain(..) |
2590 hir::ExprWhile(..) |
2592 hir::ExprAssign(..) |
2593 hir::ExprInlineAsm(..) |
2594 hir::ExprAssignOp(..) => {
2595 ExprKind::RvalueStmt
2598 hir::ExprLit(_) | // Note: LitStr is carved out above
2599 hir::ExprUnary(..) |
2601 hir::ExprAddrOf(..) |
2602 hir::ExprBinary(..) |
2603 hir::ExprCast(..) => {
2604 ExprKind::RvalueDatum