1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Translation of Expressions
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
32 //! Public entry points:
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
47 //! - `trans_local_var -> Datum`: looks up a local variable or upvar.
49 #![allow(non_camel_case_types)]
51 pub use self::Dest::*;
52 use self::lazy_binop_ty::*;
55 use llvm::{self, ValueRef, TypeKind};
56 use middle::check_const;
58 use middle::lang_items::CoerceUnsizedTraitLangItem;
59 use middle::subst::{Substs, VecPerParamSpace};
61 use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
64 use trans::cleanup::{self, CleanupMethods};
67 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
71 use trans::monomorphize;
74 use middle::cast::{CastKind, CastTy};
75 use middle::ty::{AdjustDerefRef, AdjustReifyFnPointer, AdjustUnsafeFnPointer};
76 use middle::ty::{self, Ty};
77 use middle::ty::MethodCall;
78 use util::common::indenter;
79 use trans::machine::{llsize_of, llsize_of_alloc};
80 use trans::type_::Type;
82 use syntax::{ast, ast_util, codemap};
83 use syntax::parse::token::InternedString;
85 use syntax::parse::token;
90 // These are passed around by the code generating functions to track the
91 // destination of a computation's value.
93 #[derive(Copy, Clone, PartialEq)]
100 pub fn to_string(&self, ccx: &CrateContext) -> String {
102 SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
103 Ignore => "Ignore".to_string()
108 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
109 /// better optimized LLVM code.
110 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
113 -> Block<'blk, 'tcx> {
116 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
118 if bcx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
119 // use trans, which may be less efficient but
120 // which will perform the adjustments:
121 let datum = unpack_datum!(bcx, trans(bcx, expr));
122 return datum.store_to_dest(bcx, dest, expr.id);
125 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
126 if !qualif.intersects(
127 check_const::ConstQualif::NOT_CONST |
128 check_const::ConstQualif::NEEDS_DROP
130 if !qualif.intersects(check_const::ConstQualif::PREFER_IN_PLACE) {
131 if let SaveIn(lldest) = dest {
132 let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
133 bcx.fcx.param_substs);
134 // Cast pointer to destination, because constants
135 // have different types.
136 let lldest = PointerCast(bcx, lldest, val_ty(global));
137 memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
140 // Even if we don't have a value to emit, and the expression
141 // doesn't have any side-effects, we still have to translate the
142 // body of any closures.
143 // FIXME: Find a better way of handling this case.
145 // The only way we're going to see a `const` at this point is if
146 // it prefers in-place instantiation, likely because it contains
147 // `[x; N]` somewhere within.
149 ast::ExprPath(..) => {
150 match bcx.def(expr.id) {
151 def::DefConst(did) => {
152 let const_expr = consts::get_const_expr(bcx.ccx(), did, expr);
153 // Temporarily get cleanup scopes out of the way,
154 // as they require sub-expressions to be contained
155 // inside the current AST scope.
156 // These should record no cleanups anyways, `const`
157 // can't have destructors.
158 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
160 // Lock emitted debug locations to the location of
161 // the constant reference expression.
162 debuginfo::with_source_location_override(bcx.fcx,
165 bcx = trans_into(bcx, const_expr, dest)
167 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
169 assert!(scopes.is_empty());
180 debug!("trans_into() expr={:?}", expr);
182 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
186 bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
188 let kind = expr_kind(bcx.tcx(), expr);
190 ExprKind::Lvalue | ExprKind::RvalueDatum => {
191 trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
193 ExprKind::RvalueDps => {
194 trans_rvalue_dps_unadjusted(bcx, expr, dest)
196 ExprKind::RvalueStmt => {
197 trans_rvalue_stmt_unadjusted(bcx, expr)
201 bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
204 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
205 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
207 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
209 -> DatumBlock<'blk, 'tcx, Expr> {
210 debug!("trans(expr={:?})", expr);
214 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
215 let adjusted_global = !qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS);
216 let global = if !qualif.intersects(
217 check_const::ConstQualif::NOT_CONST |
218 check_const::ConstQualif::NEEDS_DROP
220 let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
221 bcx.fcx.param_substs);
223 if qualif.intersects(check_const::ConstQualif::HAS_STATIC_BORROWS) {
224 // Is borrowed as 'static, must return lvalue.
226 // Cast pointer to global, because constants have different types.
227 let const_ty = expr_ty_adjusted(bcx, expr);
228 let llty = type_of::type_of(bcx.ccx(), const_ty);
229 let global = PointerCast(bcx, global, llty.ptr_to());
230 let datum = Datum::new(global, const_ty, Lvalue);
231 return DatumBlock::new(bcx, datum.to_expr_datum());
234 // Otherwise, keep around and perform adjustments, if needed.
235 let const_ty = if adjusted_global {
236 expr_ty_adjusted(bcx, expr)
241 // This could use a better heuristic.
242 Some(if type_is_immediate(bcx.ccx(), const_ty) {
243 // Cast pointer to global, because constants have different types.
244 let llty = type_of::type_of(bcx.ccx(), const_ty);
245 let global = PointerCast(bcx, global, llty.ptr_to());
246 // Maybe just get the value directly, instead of loading it?
247 immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
249 let llty = type_of::type_of(bcx.ccx(), const_ty);
250 // HACK(eddyb) get around issues with lifetime intrinsics.
251 let scratch = alloca_no_lifetime(bcx, llty, "const");
252 let lldest = if !const_ty.is_structural() {
253 // Cast pointer to slot, because constants have different types.
254 PointerCast(bcx, scratch, val_ty(global))
256 // In this case, memcpy_ty calls llvm.memcpy after casting both
257 // source and destination to i8*, so we don't need any casts.
260 memcpy_ty(bcx, lldest, global, const_ty);
261 Datum::new(scratch, const_ty, Rvalue::new(ByRef))
267 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
271 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
272 let datum = match global {
273 Some(rvalue) => rvalue.to_expr_datum(),
274 None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
276 let datum = if adjusted_global {
277 datum // trans::consts already performed adjustments.
279 unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
281 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
282 return DatumBlock::new(bcx, datum);
285 pub fn get_len(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
286 GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_EXTRA])
289 pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
290 GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_ADDR])
293 pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
294 Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
295 Store(bcx, Load(bcx, get_len(bcx, src_ptr)), get_len(bcx, dst_ptr));
298 /// Retrieve the information we are losing (making dynamic) in an unsizing
301 /// The `old_info` argument is a bit funny. It is intended for use
302 /// in an upcast, where the new vtable for an object will be drived
303 /// from the old one.
304 pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
307 old_info: Option<ValueRef>,
308 param_substs: &'tcx Substs<'tcx>)
310 let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
311 match (&source.sty, &target.sty) {
312 (&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
313 (&ty::TyTrait(_), &ty::TyTrait(_)) => {
314 // For now, upcasts are limited to changes in marker
315 // traits, and hence never actually require an actual
316 // change to the vtable.
317 old_info.expect("unsized_info: missing old info for trait upcast")
319 (_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => {
320 // Note that we preserve binding levels here:
321 let substs = principal.0.substs.with_self_ty(source).erase_regions();
322 let substs = ccx.tcx().mk_substs(substs);
323 let trait_ref = ty::Binder(ty::TraitRef { def_id: principal.def_id(),
325 consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
326 Type::vtable_ptr(ccx))
328 _ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}",
334 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
335 /// translation of `expr`.
336 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
338 datum: Datum<'tcx, Expr>)
339 -> DatumBlock<'blk, 'tcx, Expr>
342 let mut datum = datum;
343 let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
345 return DatumBlock::new(bcx, datum);
349 debug!("unadjusted datum for expr {:?}: {} adjustment={:?}",
351 datum.to_string(bcx.ccx()),
354 AdjustReifyFnPointer => {
355 // FIXME(#19925) once fn item types are
356 // zero-sized, we'll need to do something here
358 AdjustUnsafeFnPointer => {
359 // purely a type-level thing
361 AdjustDerefRef(ref adj) => {
362 let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
363 // We are a bit paranoid about adjustments and thus might have a re-
364 // borrow here which merely derefs and then refs again (it might have
365 // a different region or mutability, but we don't care here).
367 // Don't skip a conversion from Box<T> to &T, etc.
369 if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
370 // Don't skip an overloaded deref.
382 if adj.autoderefs > skip_reborrows {
384 let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
385 datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
386 lval.to_expr_datum(),
387 adj.autoderefs - skip_reborrows));
390 // (You might think there is a more elegant way to do this than a
391 // skip_reborrows bool, but then you remember that the borrow checker exists).
392 if skip_reborrows == 0 && adj.autoref.is_some() {
393 if !type_is_sized(bcx.tcx(), datum.ty) {
395 let lval = unpack_datum!(bcx,
396 datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id));
397 datum = unpack_datum!(bcx, ref_fat_ptr(bcx, lval));
399 datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
403 if let Some(target) = adj.unsize {
404 // We do not arrange cleanup ourselves; if we already are an
405 // L-value, then cleanup will have already been scheduled (and
406 // the `datum.to_rvalue_datum` call below will emit code to zero
407 // the drop flag when moving out of the L-value). If we are an
408 // R-value, then we do not need to schedule cleanup.
409 let source_datum = unpack_datum!(bcx,
410 datum.to_rvalue_datum(bcx, "__coerce_source"));
412 let target = bcx.monomorphize(&target);
413 let llty = type_of::type_of(bcx.ccx(), target);
415 // HACK(eddyb) get around issues with lifetime intrinsics.
416 let scratch = alloca_no_lifetime(bcx, llty, "__coerce_target");
417 let target_datum = Datum::new(scratch, target,
419 bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
420 datum = Datum::new(scratch, target,
421 RvalueExpr(Rvalue::new(ByRef)));
425 debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
426 DatumBlock::new(bcx, datum)
429 fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
431 source: Datum<'tcx, Rvalue>,
432 target: Datum<'tcx, Rvalue>)
433 -> Block<'blk, 'tcx> {
435 debug!("coerce_unsized({} -> {})",
436 source.to_string(bcx.ccx()),
437 target.to_string(bcx.ccx()));
439 match (&source.ty.sty, &target.ty.sty) {
440 (&ty::TyBox(a), &ty::TyBox(b)) |
441 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
442 &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
443 (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
444 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
445 (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
446 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
447 let (inner_source, inner_target) = (a, b);
449 let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
450 // Normally, the source is a thin pointer and we are
451 // adding extra info to make a fat pointer. The exception
452 // is when we are upcasting an existing object fat pointer
453 // to use a different vtable. In that case, we want to
454 // load out the original data pointer so we can repackage
456 (Load(bcx, get_dataptr(bcx, source.val)),
457 Some(Load(bcx, get_len(bcx, source.val))))
459 let val = if source.kind.is_by_ref() {
460 load_ty(bcx, source.val, source.ty)
467 let info = unsized_info(bcx.ccx(), inner_source, inner_target,
468 old_info, bcx.fcx.param_substs);
470 // Compute the base pointer. This doesn't change the pointer value,
471 // but merely its type.
472 let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
473 let base = PointerCast(bcx, base, ptr_ty);
475 Store(bcx, base, get_dataptr(bcx, target.val));
476 Store(bcx, info, get_len(bcx, target.val));
479 // This can be extended to enums and tuples in the future.
480 // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
481 (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
482 assert_eq!(def_id_a, def_id_b);
484 // The target is already by-ref because it's to be written to.
485 let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
486 assert!(target.kind.is_by_ref());
488 let trait_substs = Substs::erased(VecPerParamSpace::new(vec![target.ty],
491 let trait_ref = ty::Binder(ty::TraitRef {
492 def_id: langcall(bcx, Some(span), "coercion",
493 CoerceUnsizedTraitLangItem),
494 substs: bcx.tcx().mk_substs(trait_substs)
497 let kind = match fulfill_obligation(bcx.ccx(), span, trait_ref) {
498 traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
499 bcx.tcx().custom_coerce_unsized_kind(impl_def_id)
502 bcx.sess().span_bug(span, &format!("invalid CoerceUnsized vtable: {:?}",
507 let repr_source = adt::represent_type(bcx.ccx(), source.ty);
508 let src_fields = match &*repr_source {
509 &adt::Repr::Univariant(ref s, _) => &s.fields,
510 _ => bcx.sess().span_bug(span,
511 &format!("Non univariant struct? (repr_source: {:?})",
514 let repr_target = adt::represent_type(bcx.ccx(), target.ty);
515 let target_fields = match &*repr_target {
516 &adt::Repr::Univariant(ref s, _) => &s.fields,
517 _ => bcx.sess().span_bug(span,
518 &format!("Non univariant struct? (repr_target: {:?})",
522 let coerce_index = match kind {
523 ty::CustomCoerceUnsized::Struct(i) => i
525 assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
527 let iter = src_fields.iter().zip(target_fields).enumerate();
528 for (i, (src_ty, target_ty)) in iter {
529 let ll_source = adt::trans_field_ptr(bcx, &repr_source, source.val, 0, i);
530 let ll_target = adt::trans_field_ptr(bcx, &repr_target, target.val, 0, i);
532 // If this is the field we need to coerce, recurse on it.
533 if i == coerce_index {
534 coerce_unsized(bcx, span,
535 Datum::new(ll_source, src_ty,
537 Datum::new(ll_target, target_ty,
538 Rvalue::new(ByRef)));
540 // Otherwise, simply copy the data from the source.
541 assert_eq!(src_ty, target_ty);
542 memcpy_ty(bcx, ll_target, ll_source, src_ty);
546 _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}",
553 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
554 /// that the expr represents.
556 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
557 /// something like `x().f` is translated into roughly the equivalent of
559 /// { tmp = x(); tmp.f }
560 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
563 -> DatumBlock<'blk, 'tcx, Lvalue> {
565 let datum = unpack_datum!(bcx, trans(bcx, expr));
566 return datum.to_lvalue_datum(bcx, name, expr.id);
569 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
571 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
573 -> DatumBlock<'blk, 'tcx, Expr> {
576 debug!("trans_unadjusted(expr={:?})", expr);
577 let _indenter = indenter();
579 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
581 return match expr_kind(bcx.tcx(), expr) {
582 ExprKind::Lvalue | ExprKind::RvalueDatum => {
583 let datum = unpack_datum!(bcx, {
584 trans_datum_unadjusted(bcx, expr)
587 DatumBlock {bcx: bcx, datum: datum}
590 ExprKind::RvalueStmt => {
591 bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
592 nil(bcx, expr_ty(bcx, expr))
595 ExprKind::RvalueDps => {
596 let ty = expr_ty(bcx, expr);
597 if type_is_zero_size(bcx.ccx(), ty) {
598 bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
601 let scratch = rvalue_scratch_datum(bcx, ty, "");
602 bcx = trans_rvalue_dps_unadjusted(
603 bcx, expr, SaveIn(scratch.val));
605 // Note: this is not obviously a good idea. It causes
606 // immediate values to be loaded immediately after a
607 // return from a call or other similar expression,
608 // which in turn leads to alloca's having shorter
609 // lifetimes and hence larger stack frames. However,
610 // in turn it can lead to more register pressure.
611 // Still, in practice it seems to increase
612 // performance, since we have fewer problems with
614 let scratch = unpack_datum!(
615 bcx, scratch.to_appropriate_datum(bcx));
617 DatumBlock::new(bcx, scratch.to_expr_datum())
622 fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
623 -> DatumBlock<'blk, 'tcx, Expr> {
624 let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
625 let datum = immediate_rvalue(llval, ty);
626 DatumBlock::new(bcx, datum.to_expr_datum())
630 fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
632 -> DatumBlock<'blk, 'tcx, Expr> {
635 let _icx = push_ctxt("trans_datum_unadjusted");
638 ast::ExprParen(ref e) => {
641 ast::ExprPath(..) => {
642 trans_def(bcx, expr, bcx.def(expr.id))
644 ast::ExprField(ref base, ident) => {
645 trans_rec_field(bcx, &**base, ident.node.name)
647 ast::ExprTupField(ref base, idx) => {
648 trans_rec_tup_field(bcx, &**base, idx.node)
650 ast::ExprIndex(ref base, ref idx) => {
651 trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
653 ast::ExprBox(_, ref contents) => {
654 // Special case for `Box<T>`
655 let box_ty = expr_ty(bcx, expr);
656 let contents_ty = expr_ty(bcx, &**contents);
659 trans_uniq_expr(bcx, expr, box_ty, &**contents, contents_ty)
661 _ => bcx.sess().span_bug(expr.span,
662 "expected unique box")
666 ast::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &**lit),
667 ast::ExprBinary(op, ref lhs, ref rhs) => {
668 trans_binary(bcx, expr, op, &**lhs, &**rhs)
670 ast::ExprUnary(op, ref x) => {
671 trans_unary(bcx, expr, op, &**x)
673 ast::ExprAddrOf(_, ref x) => {
675 ast::ExprRepeat(..) | ast::ExprVec(..) => {
676 // Special case for slices.
677 let cleanup_debug_loc =
678 debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
682 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
683 let datum = unpack_datum!(
684 bcx, tvec::trans_slice_vec(bcx, expr, &**x));
685 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
686 DatumBlock::new(bcx, datum)
689 trans_addr_of(bcx, expr, &**x)
693 ast::ExprCast(ref val, _) => {
694 // Datum output mode means this is a scalar cast:
695 trans_imm_cast(bcx, &**val, expr.id)
698 bcx.tcx().sess.span_bug(
700 &format!("trans_rvalue_datum_unadjusted reached \
701 fall-through case: {:?}",
707 fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
710 -> DatumBlock<'blk, 'tcx, Expr> where
711 F: FnOnce(&'blk ty::ctxt<'tcx>, &[ty::Field<'tcx>]) -> usize,
714 let _icx = push_ctxt("trans_rec_field");
716 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
717 let bare_ty = base_datum.ty;
718 let repr = adt::represent_type(bcx.ccx(), bare_ty);
719 with_field_tys(bcx.tcx(), bare_ty, None, move |discr, field_tys| {
720 let ix = get_idx(bcx.tcx(), field_tys);
721 let d = base_datum.get_element(
724 |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, ix));
726 if type_is_sized(bcx.tcx(), d.ty) {
727 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
729 let scratch = rvalue_scratch_datum(bcx, d.ty, "");
730 Store(bcx, d.val, get_dataptr(bcx, scratch.val));
731 let info = Load(bcx, get_len(bcx, base_datum.val));
732 Store(bcx, info, get_len(bcx, scratch.val));
734 // Always generate an lvalue datum, because this pointer doesn't own
735 // the data and cleanup is scheduled elsewhere.
736 DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr))
742 /// Translates `base.field`.
743 fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
746 -> DatumBlock<'blk, 'tcx, Expr> {
747 trans_field(bcx, base, |tcx, field_tys| tcx.field_idx_strict(field, field_tys))
750 /// Translates `base.<idx>`.
751 fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
754 -> DatumBlock<'blk, 'tcx, Expr> {
755 trans_field(bcx, base, |_, _| idx)
758 fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
759 index_expr: &ast::Expr,
762 method_call: MethodCall)
763 -> DatumBlock<'blk, 'tcx, Expr> {
764 //! Translates `base[idx]`.
766 let _icx = push_ctxt("trans_index");
770 let index_expr_debug_loc = index_expr.debug_loc();
772 // Check for overloaded index.
773 let method_ty = ccx.tcx()
778 .map(|method| method.ty);
779 let elt_datum = match method_ty {
781 let method_ty = monomorphize_type(bcx, method_ty);
783 let base_datum = unpack_datum!(bcx, trans(bcx, base));
785 // Translate index expression.
786 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
788 let ref_ty = // invoked methods have LB regions instantiated:
789 bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
790 let elt_ty = match ref_ty.builtin_deref(true) {
792 bcx.tcx().sess.span_bug(index_expr.span,
793 "index method didn't return a \
794 dereferenceable type?!")
796 Some(elt_tm) => elt_tm.ty,
799 // Overloaded. Evaluate `trans_overloaded_op`, which will
800 // invoke the user's index() method, which basically yields
801 // a `&T` pointer. We can then proceed down the normal
802 // path (below) to dereference that `&T`.
803 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
805 trans_overloaded_op(bcx,
809 Some((ix_datum, idx.id)),
810 Some(SaveIn(scratch.val)),
812 let datum = scratch.to_expr_datum();
813 if type_is_sized(bcx.tcx(), elt_ty) {
814 Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr)
816 Datum::new(datum.val, elt_ty, LvalueExpr)
820 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
824 // Translate index expression and cast to a suitable LLVM integer.
825 // Rust is less strict than LLVM in this regard.
826 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
827 let ix_val = ix_datum.to_llscalarish(bcx);
828 let ix_size = machine::llbitsize_of_real(bcx.ccx(),
830 let int_size = machine::llbitsize_of_real(bcx.ccx(),
833 if ix_size < int_size {
834 if expr_ty(bcx, idx).is_signed() {
835 SExt(bcx, ix_val, ccx.int_type())
836 } else { ZExt(bcx, ix_val, ccx.int_type()) }
837 } else if ix_size > int_size {
838 Trunc(bcx, ix_val, ccx.int_type())
844 let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
846 let (base, len) = base_datum.get_vec_base_and_len(bcx);
848 debug!("trans_index: base {}", bcx.val_to_string(base));
849 debug!("trans_index: len {}", bcx.val_to_string(len));
851 let bounds_check = ICmp(bcx,
855 index_expr_debug_loc);
856 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
857 let expected = Call(bcx,
859 &[bounds_check, C_bool(ccx, false)],
861 index_expr_debug_loc);
862 bcx = with_cond(bcx, expected, |bcx| {
863 controlflow::trans_fail_bounds_check(bcx,
864 expr_info(index_expr),
868 let elt = InBoundsGEP(bcx, base, &[ix_val]);
869 let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
870 Datum::new(elt, unit_ty, LvalueExpr)
874 DatumBlock::new(bcx, elt_datum)
877 fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
878 ref_expr: &ast::Expr,
880 -> DatumBlock<'blk, 'tcx, Expr> {
881 //! Translates a reference to a path.
883 let _icx = push_ctxt("trans_def_lvalue");
885 def::DefFn(..) | def::DefMethod(..) |
886 def::DefStruct(_) | def::DefVariant(..) => {
887 let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
888 bcx.fcx.param_substs);
889 DatumBlock::new(bcx, datum.to_expr_datum())
891 def::DefStatic(did, _) => {
892 // There are two things that may happen here:
893 // 1) If the static item is defined in this crate, it will be
894 // translated using `get_item_val`, and we return a pointer to
896 // 2) If the static item is defined in another crate then we add
897 // (or reuse) a declaration of an external global, and return a
899 let const_ty = expr_ty(bcx, ref_expr);
901 // For external constants, we don't inline.
902 let val = if did.krate == ast::LOCAL_CRATE {
905 // The LLVM global has the type of its initializer,
906 // which may not be equal to the enum's type for
908 let val = base::get_item_val(bcx.ccx(), did.node);
909 let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
910 PointerCast(bcx, val, pty)
913 base::get_extern_const(bcx.ccx(), did, const_ty)
915 DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
917 def::DefConst(_) => {
918 bcx.sess().span_bug(ref_expr.span,
919 "constant expression should not reach expr::trans_def")
922 DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
927 fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
929 -> Block<'blk, 'tcx> {
931 let _icx = push_ctxt("trans_rvalue_stmt");
933 if bcx.unreachable.get() {
937 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
940 ast::ExprParen(ref e) => {
941 trans_into(bcx, &**e, Ignore)
943 ast::ExprBreak(label_opt) => {
944 controlflow::trans_break(bcx, expr, label_opt)
946 ast::ExprAgain(label_opt) => {
947 controlflow::trans_cont(bcx, expr, label_opt)
949 ast::ExprRet(ref ex) => {
950 // Check to see if the return expression itself is reachable.
951 // This can occur when the inner expression contains a return
952 let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
953 cfg.node_is_reachable(expr.id)
959 controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
961 // If it's not reachable, just translate the inner expression
962 // directly. This avoids having to manage a return slot when
963 // it won't actually be used anyway.
964 if let &Some(ref x) = ex {
965 bcx = trans_into(bcx, &**x, Ignore);
967 // Mark the end of the block as unreachable. Once we get to
968 // a return expression, there's no more we should be doing
974 ast::ExprWhile(ref cond, ref body, _) => {
975 controlflow::trans_while(bcx, expr, &**cond, &**body)
977 ast::ExprLoop(ref body, _) => {
978 controlflow::trans_loop(bcx, expr, &**body)
980 ast::ExprAssign(ref dst, ref src) => {
981 let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
982 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign"));
984 if bcx.fcx.type_needs_drop(dst_datum.ty) {
985 // If there are destructors involved, make sure we
986 // are copying from an rvalue, since that cannot possible
987 // alias an lvalue. We are concerned about code like:
995 // where e.g. a : Option<Foo> and a.b :
996 // Option<Foo>. In that case, freeing `a` before the
997 // assignment may also free `a.b`!
999 // We could avoid this intermediary with some analysis
1000 // to determine whether `dst` may possibly own `src`.
1001 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1002 let src_datum = unpack_datum!(
1003 bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
1004 bcx = glue::drop_ty(bcx,
1008 src_datum.store_to(bcx, dst_datum.val)
1010 src_datum.store_to(bcx, dst_datum.val)
1013 ast::ExprAssignOp(op, ref dst, ref src) => {
1014 trans_assign_op(bcx, expr, op, &**dst, &**src)
1016 ast::ExprInlineAsm(ref a) => {
1017 asm::trans_inline_asm(bcx, a)
1020 bcx.tcx().sess.span_bug(
1022 &format!("trans_rvalue_stmt_unadjusted reached \
1023 fall-through case: {:?}",
1029 fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1032 -> Block<'blk, 'tcx> {
1033 let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
1035 let tcx = bcx.tcx();
1037 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1040 ast::ExprParen(ref e) => {
1041 trans_into(bcx, &**e, dest)
1043 ast::ExprPath(..) => {
1044 trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
1046 ast::ExprIf(ref cond, ref thn, ref els) => {
1047 controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest)
1049 ast::ExprMatch(ref discr, ref arms, _) => {
1050 _match::trans_match(bcx, expr, &**discr, &arms[..], dest)
1052 ast::ExprBlock(ref blk) => {
1053 controlflow::trans_block(bcx, &**blk, dest)
1055 ast::ExprStruct(_, ref fields, ref base) => {
1058 base.as_ref().map(|e| &**e),
1061 node_id_type(bcx, expr.id),
1064 ast::ExprRange(ref start, ref end) => {
1065 // FIXME it is just not right that we are synthesising ast nodes in
1067 fn make_field(field_name: &str, expr: P<ast::Expr>) -> ast::Field {
1069 ident: codemap::dummy_spanned(token::str_to_ident(field_name)),
1071 span: codemap::DUMMY_SP,
1075 // A range just desugars into a struct.
1076 // Note that the type of the start and end may not be the same, but
1077 // they should only differ in their lifetime, which should not matter
1079 let (did, fields, ty_params) = match (start, end) {
1080 (&Some(ref start), &Some(ref end)) => {
1082 let fields = vec![make_field("start", start.clone()),
1083 make_field("end", end.clone())];
1084 (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
1086 (&Some(ref start), &None) => {
1087 // Desugar to RangeFrom
1088 let fields = vec![make_field("start", start.clone())];
1089 (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
1091 (&None, &Some(ref end)) => {
1092 // Desugar to RangeTo
1093 let fields = vec![make_field("end", end.clone())];
1094 (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
1097 // Desugar to RangeFull
1098 (tcx.lang_items.range_full_struct(), vec![], vec![])
1102 if let Some(did) = did {
1103 let substs = Substs::new_type(ty_params, vec![]);
1109 tcx.mk_struct(did, tcx.mk_substs(substs)),
1112 tcx.sess.span_bug(expr.span,
1113 "No lang item for ranges (how did we get this far?)")
1116 ast::ExprTup(ref args) => {
1117 let numbered_fields: Vec<(usize, &ast::Expr)> =
1118 args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
1122 &numbered_fields[..],
1127 ast::ExprLit(ref lit) => {
1129 ast::LitStr(ref s, _) => {
1130 tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
1135 .span_bug(expr.span,
1136 "trans_rvalue_dps_unadjusted shouldn't be \
1137 translating this type of literal")
1141 ast::ExprVec(..) | ast::ExprRepeat(..) => {
1142 tvec::trans_fixed_vstore(bcx, expr, dest)
1144 ast::ExprClosure(_, ref decl, ref body) => {
1145 let dest = match dest {
1146 SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
1147 Ignore => closure::Dest::Ignore(bcx.ccx())
1149 let substs = match expr_ty(bcx, expr).sty {
1150 ty::TyClosure(_, ref substs) => substs,
1152 bcx.tcx().sess.span_bug(
1154 &format!("closure expr without closure type: {:?}", t)),
1156 closure::trans_closure_expr(dest, decl, body, expr.id, substs).unwrap_or(bcx)
1158 ast::ExprCall(ref f, ref args) => {
1159 if bcx.tcx().is_method_call(expr.id) {
1160 trans_overloaded_call(bcx,
1166 callee::trans_call(bcx,
1169 callee::ArgExprs(&args[..]),
1173 ast::ExprMethodCall(_, _, ref args) => {
1174 callee::trans_method_call(bcx,
1177 callee::ArgExprs(&args[..]),
1180 ast::ExprBinary(op, ref lhs, ref rhs) => {
1181 // if not overloaded, would be RvalueDatumExpr
1182 let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
1183 let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
1184 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
1185 Some((rhs_datum, rhs.id)), Some(dest),
1186 !ast_util::is_by_value_binop(op.node)).bcx
1188 ast::ExprUnary(op, ref subexpr) => {
1189 // if not overloaded, would be RvalueDatumExpr
1190 let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
1191 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
1192 arg, None, Some(dest), !ast_util::is_by_value_unop(op)).bcx
1194 ast::ExprIndex(ref base, ref idx) => {
1195 // if not overloaded, would be RvalueDatumExpr
1196 let base = unpack_datum!(bcx, trans(bcx, &**base));
1197 let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
1198 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
1199 Some((idx_datum, idx.id)), Some(dest), true).bcx
1201 ast::ExprCast(..) => {
1202 // Trait casts used to come this way, now they should be coercions.
1203 bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
1205 ast::ExprAssignOp(op, ref dst, ref src) => {
1206 trans_assign_op(bcx, expr, op, &**dst, &**src)
1209 bcx.tcx().sess.span_bug(
1211 &format!("trans_rvalue_dps_unadjusted reached fall-through \
1218 fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1219 ref_expr: &ast::Expr,
1222 -> Block<'blk, 'tcx> {
1223 let _icx = push_ctxt("trans_def_dps_unadjusted");
1225 let lldest = match dest {
1226 SaveIn(lldest) => lldest,
1227 Ignore => { return bcx; }
1231 def::DefVariant(tid, vid, _) => {
1232 let variant_info = bcx.tcx().enum_variant_with_id(tid, vid);
1233 if !variant_info.args.is_empty() {
1235 let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
1236 ExprId(ref_expr.id),
1237 bcx.fcx.param_substs).val;
1238 Store(bcx, llfn, lldest);
1242 let ty = expr_ty(bcx, ref_expr);
1243 let repr = adt::represent_type(bcx.ccx(), ty);
1244 adt::trans_set_discr(bcx, &*repr, lldest,
1245 variant_info.disr_val);
1249 def::DefStruct(_) => {
1250 let ty = expr_ty(bcx, ref_expr);
1252 ty::TyStruct(did, _) if bcx.tcx().has_dtor(did) => {
1253 let repr = adt::represent_type(bcx.ccx(), ty);
1254 adt::trans_set_discr(bcx, &*repr, lldest, 0);
1261 bcx.tcx().sess.span_bug(ref_expr.span, &format!(
1262 "Non-DPS def {:?} referened by {}",
1263 def, bcx.node_id_to_string(ref_expr.id)));
1268 pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1269 ref_expr: &ast::Expr,
1271 param_substs: &'tcx Substs<'tcx>)
1272 -> Datum<'tcx, Rvalue> {
1273 let _icx = push_ctxt("trans_def_datum_unadjusted");
1276 def::DefFn(did, _) |
1277 def::DefStruct(did) | def::DefVariant(_, did, _) |
1278 def::DefMethod(did, def::FromImpl(_)) => {
1279 callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
1281 def::DefMethod(impl_did, def::FromTrait(trait_did)) => {
1282 meth::trans_static_method_callee(ccx, impl_did,
1283 trait_did, ref_expr.id,
1287 ccx.tcx().sess.span_bug(ref_expr.span, &format!(
1288 "trans_def_fn_unadjusted invoked on: {:?} for {:?}",
1295 /// Translates a reference to a local variable or argument. This always results in an lvalue datum.
1296 pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1298 -> Datum<'tcx, Lvalue> {
1299 let _icx = push_ctxt("trans_local_var");
1302 def::DefUpvar(nid, _) => {
1303 // Can't move upvars, so this is never a ZeroMemLastUse.
1304 let local_ty = node_id_type(bcx, nid);
1305 match bcx.fcx.llupvars.borrow().get(&nid) {
1306 Some(&val) => Datum::new(val, local_ty, Lvalue),
1308 bcx.sess().bug(&format!(
1309 "trans_local_var: no llval for upvar {} found",
1314 def::DefLocal(nid) => {
1315 let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
1318 bcx.sess().bug(&format!(
1319 "trans_local_var: no datum for local/arg {} found",
1323 debug!("take_local(nid={}, v={}, ty={})",
1324 nid, bcx.val_to_string(datum.val), datum.ty);
1328 bcx.sess().unimpl(&format!(
1329 "unsupported def type in trans_local_var: {:?}",
1335 /// Helper for enumerating the field types of structs, enums, or records. The optional node ID here
1336 /// is the node ID of the path identifying the enum variant in use. If none, this cannot possibly
1337 /// an enum variant (so, if it is and `node_id_opt` is none, this function panics).
1338 pub fn with_field_tys<'tcx, R, F>(tcx: &ty::ctxt<'tcx>,
1340 node_id_opt: Option<ast::NodeId>,
1343 F: FnOnce(ty::Disr, &[ty::Field<'tcx>]) -> R,
1346 ty::TyStruct(did, substs) => {
1347 let fields = tcx.struct_fields(did, substs);
1348 let fields = monomorphize::normalize_associated_type(tcx, &fields);
1352 ty::TyTuple(ref v) => {
1353 let fields: Vec<_> = v.iter().enumerate().map(|(i, &f)| {
1355 name: token::intern(&i.to_string()),
1356 mt: ty::TypeAndMut {
1358 mutbl: ast::MutImmutable
1365 ty::TyEnum(_, substs) => {
1366 // We want the *variant* ID here, not the enum ID.
1369 tcx.sess.bug(&format!(
1370 "cannot get field types from the enum type {:?} \
1375 let def = tcx.def_map.borrow().get(&node_id).unwrap().full_def();
1377 def::DefVariant(enum_id, variant_id, _) => {
1378 let variant_info = tcx.enum_variant_with_id(enum_id, variant_id);
1379 let fields = tcx.struct_fields(variant_id, substs);
1380 let fields = monomorphize::normalize_associated_type(tcx, &fields);
1381 op(variant_info.disr_val, &fields[..])
1384 tcx.sess.bug("resolve didn't map this expr to a \
1393 tcx.sess.bug(&format!(
1394 "cannot get field types from the type {:?}",
1400 fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1401 fields: &[ast::Field],
1402 base: Option<&ast::Expr>,
1403 expr_span: codemap::Span,
1404 expr_id: ast::NodeId,
1406 dest: Dest) -> Block<'blk, 'tcx> {
1407 let _icx = push_ctxt("trans_rec");
1409 let tcx = bcx.tcx();
1410 with_field_tys(tcx, ty, Some(expr_id), |discr, field_tys| {
1411 let mut need_base = vec![true; field_tys.len()];
1413 let numbered_fields = fields.iter().map(|field| {
1415 field_tys.iter().position(|field_ty|
1416 field_ty.name == field.ident.node.name);
1417 let result = match opt_pos {
1419 need_base[i] = false;
1423 tcx.sess.span_bug(field.span,
1424 "Couldn't find field in struct type")
1428 }).collect::<Vec<_>>();
1429 let optbase = match base {
1430 Some(base_expr) => {
1431 let mut leftovers = Vec::new();
1432 for (i, b) in need_base.iter().enumerate() {
1434 leftovers.push((i, field_tys[i].mt.ty));
1437 Some(StructBaseInfo {expr: base_expr,
1438 fields: leftovers })
1441 if need_base.iter().any(|b| *b) {
1442 tcx.sess.span_bug(expr_span, "missing fields and no base expr")
1454 DebugLoc::At(expr_id, expr_span))
1458 /// Information that `trans_adt` needs in order to fill in the fields
1459 /// of a struct copied from a base struct (e.g., from an expression
1460 /// like `Foo { a: b, ..base }`.
1462 /// Note that `fields` may be empty; the base expression must always be
1463 /// evaluated for side-effects.
1464 pub struct StructBaseInfo<'a, 'tcx> {
1465 /// The base expression; will be evaluated after all explicit fields.
1466 expr: &'a ast::Expr,
1467 /// The indices of fields to copy paired with their types.
1468 fields: Vec<(usize, Ty<'tcx>)>
1471 /// Constructs an ADT instance:
1473 /// - `fields` should be a list of field indices paired with the
1474 /// expression to store into that field. The initializers will be
1475 /// evaluated in the order specified by `fields`.
1477 /// - `optbase` contains information on the base struct (if any) from
1478 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1479 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1482 fields: &[(usize, &ast::Expr)],
1483 optbase: Option<StructBaseInfo<'a, 'tcx>>,
1485 debug_location: DebugLoc)
1486 -> Block<'blk, 'tcx> {
1487 let _icx = push_ctxt("trans_adt");
1489 let repr = adt::represent_type(bcx.ccx(), ty);
1491 debug_location.apply(bcx.fcx);
1493 // If we don't care about the result, just make a
1494 // temporary stack slot
1495 let addr = match dest {
1497 Ignore => alloc_ty(bcx, ty, "temp"),
1500 // This scope holds intermediates that must be cleaned should
1501 // panic occur before the ADT as a whole is ready.
1502 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1504 if ty.is_simd(bcx.tcx()) {
1505 // Issue 23112: The original logic appeared vulnerable to same
1506 // order-of-eval bug. But, SIMD values are tuple-structs;
1507 // i.e. functional record update (FRU) syntax is unavailable.
1509 // To be safe, double-check that we did not get here via FRU.
1510 assert!(optbase.is_none());
1512 // This is the constructor of a SIMD type, such types are
1513 // always primitive machine types and so do not have a
1514 // destructor or require any clean-up.
1515 let llty = type_of::type_of(bcx.ccx(), ty);
1517 // keep a vector as a register, and running through the field
1518 // `insertelement`ing them directly into that register
1519 // (i.e. avoid GEPi and `store`s to an alloca) .
1520 let mut vec_val = C_undef(llty);
1522 for &(i, ref e) in fields {
1523 let block_datum = trans(bcx, &**e);
1524 bcx = block_datum.bcx;
1525 let position = C_uint(bcx.ccx(), i);
1526 let value = block_datum.datum.to_llscalarish(bcx);
1527 vec_val = InsertElement(bcx, vec_val, value, position);
1529 Store(bcx, vec_val, addr);
1530 } else if let Some(base) = optbase {
1531 // Issue 23112: If there is a base, then order-of-eval
1532 // requires field expressions eval'ed before base expression.
1534 // First, trans field expressions to temporary scratch values.
1535 let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
1536 let datum = unpack_datum!(bcx, trans(bcx, &**e));
1540 debug_location.apply(bcx.fcx);
1542 // Second, trans the base to the dest.
1543 assert_eq!(discr, 0);
1545 match expr_kind(bcx.tcx(), &*base.expr) {
1546 ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
1547 bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
1549 ExprKind::RvalueStmt => {
1550 bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
1553 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
1554 for &(i, t) in &base.fields {
1555 let datum = base_datum.get_element(
1556 bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
1557 assert!(type_is_sized(bcx.tcx(), datum.ty));
1558 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1559 bcx = datum.store_to(bcx, dest);
1564 // Finally, move scratch field values into actual field locations
1565 for (i, datum) in scratch_vals {
1566 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1567 bcx = datum.store_to(bcx, dest);
1570 // No base means we can write all fields directly in place.
1571 for &(i, ref e) in fields {
1572 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1573 let e_ty = expr_ty_adjusted(bcx, &**e);
1574 bcx = trans_into(bcx, &**e, SaveIn(dest));
1575 let scope = cleanup::CustomScope(custom_cleanup_scope);
1576 fcx.schedule_lifetime_end(scope, dest);
1577 fcx.schedule_drop_mem(scope, dest, e_ty);
1581 adt::trans_set_discr(bcx, &*repr, addr, discr);
1583 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1585 // If we don't care about the result drop the temporary we made
1589 bcx = glue::drop_ty(bcx, addr, ty, debug_location);
1590 base::call_lifetime_end(bcx, addr);
1597 fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1600 -> DatumBlock<'blk, 'tcx, Expr> {
1601 // must not be a string constant, that is a RvalueDpsExpr
1602 let _icx = push_ctxt("trans_immediate_lit");
1603 let ty = expr_ty(bcx, expr);
1604 let v = consts::const_lit(bcx.ccx(), expr, lit);
1605 immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
1608 fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1611 sub_expr: &ast::Expr)
1612 -> DatumBlock<'blk, 'tcx, Expr> {
1613 let ccx = bcx.ccx();
1615 let _icx = push_ctxt("trans_unary_datum");
1617 let method_call = MethodCall::expr(expr.id);
1619 // The only overloaded operator that is translated to a datum
1620 // is an overloaded deref, since it is always yields a `&T`.
1621 // Otherwise, we should be in the RvalueDpsExpr path.
1622 assert!(op == ast::UnDeref || !ccx.tcx().is_method_call(expr.id));
1624 let un_ty = expr_ty(bcx, expr);
1626 let debug_loc = expr.debug_loc();
1630 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1631 let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
1632 immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
1635 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1636 let val = datum.to_llscalarish(bcx);
1637 let (bcx, llneg) = {
1639 let result = FNeg(bcx, val, debug_loc);
1642 let is_signed = un_ty.is_signed();
1643 let result = Neg(bcx, val, debug_loc);
1644 let bcx = if bcx.ccx().check_overflow() && is_signed {
1645 let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
1646 let is_min = ICmp(bcx, llvm::IntEQ, val,
1647 C_integral(llty, min, true), debug_loc);
1648 with_cond(bcx, is_min, |bcx| {
1649 let msg = InternedString::new(
1650 "attempted to negate with overflow");
1651 controlflow::trans_fail(bcx, expr_info(expr), msg)
1659 immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
1662 trans_uniq_expr(bcx, expr, un_ty, sub_expr, expr_ty(bcx, sub_expr))
1665 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1666 deref_once(bcx, expr, datum, method_call)
1671 fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1672 box_expr: &ast::Expr,
1674 contents: &ast::Expr,
1675 contents_ty: Ty<'tcx>)
1676 -> DatumBlock<'blk, 'tcx, Expr> {
1677 let _icx = push_ctxt("trans_uniq_expr");
1679 assert!(type_is_sized(bcx.tcx(), contents_ty));
1680 let llty = type_of::type_of(bcx.ccx(), contents_ty);
1681 let size = llsize_of(bcx.ccx(), llty);
1682 let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
1683 let llty_ptr = llty.ptr_to();
1684 let Result { bcx, val } = malloc_raw_dyn(bcx,
1689 box_expr.debug_loc());
1690 // Unique boxes do not allocate for zero-size types. The standard library
1691 // may assume that `free` is never called on the pointer returned for
1692 // `Box<ZeroSizeType>`.
1693 let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
1694 trans_into(bcx, contents, SaveIn(val))
1696 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1697 fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
1698 val, cleanup::HeapExchange, contents_ty);
1699 let bcx = trans_into(bcx, contents, SaveIn(val));
1700 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1703 immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
1706 fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1707 lval: Datum<'tcx, Lvalue>)
1708 -> DatumBlock<'blk, 'tcx, Expr> {
1709 let dest_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), lval.ty);
1710 let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
1711 memcpy_ty(bcx, scratch.val, lval.val, scratch.ty);
1713 DatumBlock::new(bcx, scratch.to_expr_datum())
1716 fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1718 subexpr: &ast::Expr)
1719 -> DatumBlock<'blk, 'tcx, Expr> {
1720 let _icx = push_ctxt("trans_addr_of");
1722 let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
1723 if !type_is_sized(bcx.tcx(), sub_datum.ty) {
1724 // DST lvalue, close to a fat pointer
1725 ref_fat_ptr(bcx, sub_datum)
1727 // Sized value, ref to a thin pointer
1728 let ty = expr_ty(bcx, expr);
1729 immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
1733 // Important to get types for both lhs and rhs, because one might be _|_
1734 // and the other not.
1735 fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1736 binop_expr: &ast::Expr,
1743 -> DatumBlock<'blk, 'tcx, Expr> {
1744 let _icx = push_ctxt("trans_eager_binop");
1746 let tcx = bcx.tcx();
1747 let is_simd = lhs_t.is_simd(tcx);
1748 let intype = if is_simd {
1749 lhs_t.simd_type(tcx)
1753 let is_float = intype.is_fp();
1754 let is_signed = intype.is_signed();
1755 let info = expr_info(binop_expr);
1757 let binop_debug_loc = binop_expr.debug_loc();
1760 let val = match op.node {
1763 FAdd(bcx, lhs, rhs, binop_debug_loc)
1765 Add(bcx, lhs, rhs, binop_debug_loc)
1767 let (newbcx, res) = with_overflow_check(
1768 bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
1775 FSub(bcx, lhs, rhs, binop_debug_loc)
1777 Sub(bcx, lhs, rhs, binop_debug_loc)
1779 let (newbcx, res) = with_overflow_check(
1780 bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
1787 FMul(bcx, lhs, rhs, binop_debug_loc)
1789 Mul(bcx, lhs, rhs, binop_debug_loc)
1791 let (newbcx, res) = with_overflow_check(
1792 bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
1799 FDiv(bcx, lhs, rhs, binop_debug_loc)
1801 // Only zero-check integers; fp /0 is NaN
1802 bcx = base::fail_if_zero_or_overflows(bcx,
1803 expr_info(binop_expr),
1809 SDiv(bcx, lhs, rhs, binop_debug_loc)
1811 UDiv(bcx, lhs, rhs, binop_debug_loc)
1817 FRem(bcx, lhs, rhs, binop_debug_loc)
1819 // Only zero-check integers; fp %0 is NaN
1820 bcx = base::fail_if_zero_or_overflows(bcx,
1821 expr_info(binop_expr),
1822 op, lhs, rhs, rhs_t);
1824 SRem(bcx, lhs, rhs, binop_debug_loc)
1826 URem(bcx, lhs, rhs, binop_debug_loc)
1830 ast::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
1831 ast::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
1832 ast::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
1834 let (newbcx, res) = with_overflow_check(
1835 bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
1840 let (newbcx, res) = with_overflow_check(
1841 bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
1845 ast::BiEq | ast::BiNe | ast::BiLt | ast::BiGe | ast::BiLe | ast::BiGt => {
1847 base::compare_simd_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
1849 base::compare_scalar_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
1853 bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
1857 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1860 // refinement types would obviate the need for this
1861 enum lazy_binop_ty {
1866 fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1867 binop_expr: &ast::Expr,
1871 -> DatumBlock<'blk, 'tcx, Expr> {
1872 let _icx = push_ctxt("trans_lazy_binop");
1873 let binop_ty = expr_ty(bcx, binop_expr);
1876 let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
1877 let lhs = lhs.to_llscalarish(past_lhs);
1879 if past_lhs.unreachable.get() {
1880 return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
1883 let join = fcx.new_id_block("join", binop_expr.id);
1884 let before_rhs = fcx.new_id_block("before_rhs", b.id);
1887 lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
1888 lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
1891 let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
1892 let rhs = rhs.to_llscalarish(past_rhs);
1894 if past_rhs.unreachable.get() {
1895 return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
1898 Br(past_rhs, join.llbb, DebugLoc::None);
1899 let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
1900 &[past_lhs.llbb, past_rhs.llbb]);
1902 return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
1905 fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1910 -> DatumBlock<'blk, 'tcx, Expr> {
1911 let _icx = push_ctxt("trans_binary");
1912 let ccx = bcx.ccx();
1914 // if overloaded, would be RvalueDpsExpr
1915 assert!(!ccx.tcx().is_method_call(expr.id));
1919 trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
1922 trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
1926 let lhs_datum = unpack_datum!(bcx, trans(bcx, lhs));
1927 let rhs_datum = unpack_datum!(bcx, trans(bcx, rhs));
1928 let binop_ty = expr_ty(bcx, expr);
1930 debug!("trans_binary (expr {}): lhs_datum={}",
1932 lhs_datum.to_string(ccx));
1933 let lhs_ty = lhs_datum.ty;
1934 let lhs = lhs_datum.to_llscalarish(bcx);
1936 debug!("trans_binary (expr {}): rhs_datum={}",
1938 rhs_datum.to_string(ccx));
1939 let rhs_ty = rhs_datum.ty;
1940 let rhs = rhs_datum.to_llscalarish(bcx);
1941 trans_eager_binop(bcx, expr, binop_ty, op,
1942 lhs_ty, lhs, rhs_ty, rhs)
1947 fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1949 method_call: MethodCall,
1950 lhs: Datum<'tcx, Expr>,
1951 rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>,
1954 -> Result<'blk, 'tcx> {
1955 callee::trans_call_inner(bcx,
1957 |bcx, arg_cleanup_scope| {
1958 meth::trans_method_callee(bcx,
1963 callee::ArgOverloadedOp(lhs, rhs, autoref),
1967 fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1969 callee: &'a ast::Expr,
1970 args: &'a [P<ast::Expr>],
1972 -> Block<'blk, 'tcx> {
1973 debug!("trans_overloaded_call {}", expr.id);
1974 let method_call = MethodCall::expr(expr.id);
1975 let mut all_args = vec!(callee);
1976 all_args.extend(args.iter().map(|e| &**e));
1978 callee::trans_call_inner(bcx,
1980 |bcx, arg_cleanup_scope| {
1981 meth::trans_method_callee(
1987 callee::ArgOverloadedCall(all_args),
1992 pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
1997 if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
2001 match (t_in.builtin_deref(true), t_out.builtin_deref(true)) {
2002 (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
2006 // This condition isn't redundant with the check for CoercionCast:
2007 // different types can be substituted into the same type, and
2008 // == equality can be overconservative if there are regions.
2014 fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2017 -> DatumBlock<'blk, 'tcx, Expr>
2019 use middle::cast::CastTy::*;
2020 use middle::cast::IntTy::*;
2022 fn int_cast(bcx: Block,
2029 let _icx = push_ctxt("int_cast");
2030 let srcsz = llsrctype.int_width();
2031 let dstsz = lldsttype.int_width();
2032 return if dstsz == srcsz {
2033 BitCast(bcx, llsrc, lldsttype)
2034 } else if srcsz > dstsz {
2035 TruncOrBitCast(bcx, llsrc, lldsttype)
2037 SExtOrBitCast(bcx, llsrc, lldsttype)
2039 ZExtOrBitCast(bcx, llsrc, lldsttype)
2043 fn float_cast(bcx: Block,
2049 let _icx = push_ctxt("float_cast");
2050 let srcsz = llsrctype.float_width();
2051 let dstsz = lldsttype.float_width();
2052 return if dstsz > srcsz {
2053 FPExt(bcx, llsrc, lldsttype)
2054 } else if srcsz > dstsz {
2055 FPTrunc(bcx, llsrc, lldsttype)
2059 let _icx = push_ctxt("trans_cast");
2061 let ccx = bcx.ccx();
2063 let t_in = expr_ty_adjusted(bcx, expr);
2064 let t_out = node_id_type(bcx, id);
2066 debug!("trans_cast({:?} as {:?})", t_in, t_out);
2067 let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
2068 let ll_t_out = type_of::arg_type_of(ccx, t_out);
2069 // Convert the value to be cast into a ValueRef, either by-ref or
2070 // by-value as appropriate given its type:
2071 let mut datum = unpack_datum!(bcx, trans(bcx, expr));
2073 let datum_ty = monomorphize_type(bcx, datum.ty);
2075 if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
2077 return DatumBlock::new(bcx, datum);
2080 if type_is_fat_ptr(bcx.tcx(), t_in) {
2081 assert!(datum.kind.is_by_ref());
2082 if type_is_fat_ptr(bcx.tcx(), t_out) {
2083 return DatumBlock::new(bcx, Datum::new(
2084 PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
2087 )).to_expr_datumblock();
2089 // Return the address
2090 return immediate_rvalue_bcx(bcx,
2092 Load(bcx, get_dataptr(bcx, datum.val)),
2094 t_out).to_expr_datumblock();
2098 let r_t_in = CastTy::from_ty(bcx.tcx(), t_in).expect("bad input type for cast");
2099 let r_t_out = CastTy::from_ty(bcx.tcx(), t_out).expect("bad output type for cast");
2101 let (llexpr, signed) = if let Int(CEnum) = r_t_in {
2102 let repr = adt::represent_type(ccx, t_in);
2103 let datum = unpack_datum!(
2104 bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
2105 let llexpr_ptr = datum.to_llref();
2106 let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx)));
2107 ll_t_in = val_ty(discr);
2108 (discr, adt::is_discr_signed(&*repr))
2110 (datum.to_llscalarish(bcx), t_in.is_signed())
2113 let newval = match (r_t_in, r_t_out) {
2114 (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
2115 PointerCast(bcx, llexpr, ll_t_out)
2117 (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
2118 (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
2120 (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
2121 (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
2122 (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
2123 (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
2124 (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
2125 (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
2127 _ => ccx.sess().span_bug(expr.span,
2128 &format!("translating unsupported cast: \
2134 return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
2137 fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2142 -> Block<'blk, 'tcx> {
2143 let _icx = push_ctxt("trans_assign_op");
2146 debug!("trans_assign_op(expr={:?})", expr);
2148 // User-defined operator methods cannot be used with `+=` etc right now
2149 assert!(!bcx.tcx().is_method_call(expr.id));
2151 // Evaluate LHS (destination), which should be an lvalue
2152 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
2153 assert!(!bcx.fcx.type_needs_drop(dst_datum.ty));
2154 let dst_ty = dst_datum.ty;
2155 let dst = load_ty(bcx, dst_datum.val, dst_datum.ty);
2158 let rhs_datum = unpack_datum!(bcx, trans(bcx, &*src));
2159 let rhs_ty = rhs_datum.ty;
2160 let rhs = rhs_datum.to_llscalarish(bcx);
2162 // Perform computation and store the result
2163 let result_datum = unpack_datum!(
2164 bcx, trans_eager_binop(bcx, expr, dst_datum.ty, op,
2165 dst_ty, dst, rhs_ty, rhs));
2166 return result_datum.store_to(bcx, dst_datum.val);
2169 fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2170 datum: Datum<'tcx, Expr>,
2172 -> DatumBlock<'blk, 'tcx, Expr> {
2175 // Ensure cleanup of `datum` if not already scheduled and obtain
2176 // a "by ref" pointer.
2177 let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
2179 // Compute final type. Note that we are loose with the region and
2180 // mutability, since those things don't matter in trans.
2181 let referent_ty = lv_datum.ty;
2182 let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty);
2185 let llref = lv_datum.to_llref();
2187 // Construct the resulting datum, using what was the "by ref"
2188 // ValueRef of type `referent_ty` to be the "by value" ValueRef
2189 // of type `&referent_ty`.
2190 // Pointers to DST types are non-immediate, and therefore still use ByRef.
2191 let kind = if type_is_sized(bcx.tcx(), referent_ty) { ByValue } else { ByRef };
2192 DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(kind))))
2195 fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2197 datum: Datum<'tcx, Expr>,
2199 -> DatumBlock<'blk, 'tcx, Expr> {
2201 let mut datum = datum;
2203 let method_call = MethodCall::autoderef(expr.id, i as u32);
2204 datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
2206 DatumBlock { bcx: bcx, datum: datum }
2209 fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2211 datum: Datum<'tcx, Expr>,
2212 method_call: MethodCall)
2213 -> DatumBlock<'blk, 'tcx, Expr> {
2214 let ccx = bcx.ccx();
2216 debug!("deref_once(expr={:?}, datum={}, method_call={:?})",
2218 datum.to_string(ccx),
2223 // Check for overloaded deref.
2224 let method_ty = ccx.tcx()
2228 .get(&method_call).map(|method| method.ty);
2230 let datum = match method_ty {
2231 Some(method_ty) => {
2232 let method_ty = monomorphize_type(bcx, method_ty);
2234 // Overloaded. Evaluate `trans_overloaded_op`, which will
2235 // invoke the user's deref() method, which basically
2236 // converts from the `Smaht<T>` pointer that we have into
2237 // a `&T` pointer. We can then proceed down the normal
2238 // path (below) to dereference that `&T`.
2239 let datum = if method_call.autoderef == 0 {
2242 // Always perform an AutoPtr when applying an overloaded auto-deref
2243 unpack_datum!(bcx, auto_ref(bcx, datum, expr))
2246 let ref_ty = // invoked methods have their LB regions instantiated
2247 ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap();
2248 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
2250 unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
2251 datum, None, Some(SaveIn(scratch.val)),
2253 scratch.to_expr_datum()
2256 // Not overloaded. We already have a pointer we know how to deref.
2261 let r = match datum.ty.sty {
2262 ty::TyBox(content_ty) => {
2263 // Make sure we have an lvalue datum here to get the
2264 // proper cleanups scheduled
2265 let datum = unpack_datum!(
2266 bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
2268 if type_is_sized(bcx.tcx(), content_ty) {
2269 let ptr = load_ty(bcx, datum.val, datum.ty);
2270 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
2272 // A fat pointer and a DST lvalue have the same representation
2273 // just different types. Since there is no temporary for `*e`
2274 // here (because it is unsized), we cannot emulate the sized
2275 // object code path for running drop glue and free. Instead,
2276 // we schedule cleanup for `e`, turning it into an lvalue.
2278 let datum = Datum::new(datum.val, content_ty, LvalueExpr);
2279 DatumBlock::new(bcx, datum)
2283 ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
2284 ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
2285 if type_is_sized(bcx.tcx(), content_ty) {
2286 let ptr = datum.to_llscalarish(bcx);
2288 // Always generate an lvalue datum, even if datum.mode is
2289 // an rvalue. This is because datum.mode is only an
2290 // rvalue for non-owning pointers like &T or *T, in which
2291 // case cleanup *is* scheduled elsewhere, by the true
2292 // owner (or, in the case of *T, by the user).
2293 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
2295 // A fat pointer and a DST lvalue have the same representation
2296 // just different types.
2297 DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr))
2302 bcx.tcx().sess.span_bug(
2304 &format!("deref invoked on expr of illegal type {:?}",
2309 debug!("deref_once(expr={}, method_call={:?}, result={})",
2310 expr.id, method_call, r.datum.to_string(ccx));
2325 fn codegen_strategy(&self) -> OverflowCodegen {
2326 use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
2328 OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
2329 OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
2330 OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
2332 OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
2333 OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
2338 enum OverflowCodegen {
2339 ViaIntrinsic(OverflowOpViaIntrinsic),
2340 ViaInputCheck(OverflowOpViaInputCheck),
2343 enum OverflowOpViaInputCheck { Shl, Shr, }
2346 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2348 impl OverflowOpViaIntrinsic {
2349 fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
2350 let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
2351 bcx.ccx().get_intrinsic(&name)
2353 fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
2354 use syntax::ast::IntTy::*;
2355 use syntax::ast::UintTy::*;
2356 use middle::ty::{TyInt, TyUint};
2358 let new_sty = match ty.sty {
2359 TyInt(TyIs) => match &tcx.sess.target.target.target_pointer_width[..] {
2360 "32" => TyInt(TyI32),
2361 "64" => TyInt(TyI64),
2362 _ => panic!("unsupported target word size")
2364 TyUint(TyUs) => match &tcx.sess.target.target.target_pointer_width[..] {
2365 "32" => TyUint(TyU32),
2366 "64" => TyUint(TyU64),
2367 _ => panic!("unsupported target word size")
2369 ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
2370 _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
2375 OverflowOpViaIntrinsic::Add => match new_sty {
2376 TyInt(TyI8) => "llvm.sadd.with.overflow.i8",
2377 TyInt(TyI16) => "llvm.sadd.with.overflow.i16",
2378 TyInt(TyI32) => "llvm.sadd.with.overflow.i32",
2379 TyInt(TyI64) => "llvm.sadd.with.overflow.i64",
2381 TyUint(TyU8) => "llvm.uadd.with.overflow.i8",
2382 TyUint(TyU16) => "llvm.uadd.with.overflow.i16",
2383 TyUint(TyU32) => "llvm.uadd.with.overflow.i32",
2384 TyUint(TyU64) => "llvm.uadd.with.overflow.i64",
2386 _ => unreachable!(),
2388 OverflowOpViaIntrinsic::Sub => match new_sty {
2389 TyInt(TyI8) => "llvm.ssub.with.overflow.i8",
2390 TyInt(TyI16) => "llvm.ssub.with.overflow.i16",
2391 TyInt(TyI32) => "llvm.ssub.with.overflow.i32",
2392 TyInt(TyI64) => "llvm.ssub.with.overflow.i64",
2394 TyUint(TyU8) => "llvm.usub.with.overflow.i8",
2395 TyUint(TyU16) => "llvm.usub.with.overflow.i16",
2396 TyUint(TyU32) => "llvm.usub.with.overflow.i32",
2397 TyUint(TyU64) => "llvm.usub.with.overflow.i64",
2399 _ => unreachable!(),
2401 OverflowOpViaIntrinsic::Mul => match new_sty {
2402 TyInt(TyI8) => "llvm.smul.with.overflow.i8",
2403 TyInt(TyI16) => "llvm.smul.with.overflow.i16",
2404 TyInt(TyI32) => "llvm.smul.with.overflow.i32",
2405 TyInt(TyI64) => "llvm.smul.with.overflow.i64",
2407 TyUint(TyU8) => "llvm.umul.with.overflow.i8",
2408 TyUint(TyU16) => "llvm.umul.with.overflow.i16",
2409 TyUint(TyU32) => "llvm.umul.with.overflow.i32",
2410 TyUint(TyU64) => "llvm.umul.with.overflow.i64",
2412 _ => unreachable!(),
2417 fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
2418 info: NodeIdAndSpan,
2419 lhs_t: Ty<'tcx>, lhs: ValueRef,
2421 binop_debug_loc: DebugLoc)
2422 -> (Block<'blk, 'tcx>, ValueRef) {
2423 let llfn = self.to_intrinsic(bcx, lhs_t);
2425 let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
2426 let result = ExtractValue(bcx, val, 0); // iN operation result
2427 let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
2429 let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
2432 let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
2433 Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
2434 None, binop_debug_loc);
2437 base::with_cond(bcx, cond, |bcx|
2438 controlflow::trans_fail(bcx, info,
2439 InternedString::new("arithmetic operation overflowed")));
2445 impl OverflowOpViaInputCheck {
2446 fn build_with_input_check<'blk, 'tcx>(&self,
2447 bcx: Block<'blk, 'tcx>,
2448 info: NodeIdAndSpan,
2452 binop_debug_loc: DebugLoc)
2453 -> (Block<'blk, 'tcx>, ValueRef)
2455 let lhs_llty = val_ty(lhs);
2456 let rhs_llty = val_ty(rhs);
2458 // Panic if any bits are set outside of bits that we always
2461 // Note that the mask's value is derived from the LHS type
2462 // (since that is where the 32/64 distinction is relevant) but
2463 // the mask's type must match the RHS type (since they will
2464 // both be fed into a and-binop)
2465 let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
2467 let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
2468 let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
2469 let result = match *self {
2470 OverflowOpViaInputCheck::Shl =>
2471 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2472 OverflowOpViaInputCheck::Shr =>
2473 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2476 base::with_cond(bcx, cond, |bcx|
2477 controlflow::trans_fail(bcx, info,
2478 InternedString::new("shift operation overflowed")));
2484 fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2487 invert: bool) -> ValueRef {
2488 let kind = llty.kind();
2490 TypeKind::Integer => {
2491 // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
2492 let val = llty.int_width() - 1;
2494 C_integral(mask_llty, !val, true)
2496 C_integral(mask_llty, val, false)
2499 TypeKind::Vector => {
2500 let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
2501 VectorSplat(bcx, mask_llty.vector_length(), mask)
2503 _ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
2507 // Check if an integer or vector contains a nonzero element.
2508 fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2510 binop_debug_loc: DebugLoc) -> ValueRef {
2511 let llty = val_ty(value);
2512 let kind = llty.kind();
2514 TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
2515 TypeKind::Vector => {
2516 // Check if any elements of the vector are nonzero by treating
2517 // it as a wide integer and checking if the integer is nonzero.
2518 let width = llty.vector_length() as u64 * llty.element_type().int_width();
2519 let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
2520 build_nonzero_check(bcx, int_value, binop_debug_loc)
2522 _ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
2526 // To avoid UB from LLVM, these two functions mask RHS with an
2527 // appropriate mask unconditionally (i.e. the fallback behavior for
2528 // all shifts). For 32- and 64-bit types, this matches the semantics
2529 // of Java. (See related discussion on #1877 and #10183.)
2531 fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2534 binop_debug_loc: DebugLoc) -> ValueRef {
2535 let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShl, lhs, rhs);
2536 // #1877, #10183: Ensure that input is always valid
2537 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
2538 Shl(bcx, lhs, rhs, binop_debug_loc)
2541 fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2545 binop_debug_loc: DebugLoc) -> ValueRef {
2546 let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShr, lhs, rhs);
2547 // #1877, #10183: Ensure that input is always valid
2548 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
2549 let tcx = bcx.tcx();
2550 let is_simd = lhs_t.is_simd(tcx);
2551 let intype = if is_simd {
2552 lhs_t.simd_type(tcx)
2556 let is_signed = intype.is_signed();
2558 AShr(bcx, lhs, rhs, binop_debug_loc)
2560 LShr(bcx, lhs, rhs, binop_debug_loc)
2564 fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2566 debug_loc: DebugLoc) -> ValueRef {
2567 let rhs_llty = val_ty(rhs);
2568 And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
2571 fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
2572 lhs_t: Ty<'tcx>, lhs: ValueRef,
2574 binop_debug_loc: DebugLoc)
2575 -> (Block<'blk, 'tcx>, ValueRef) {
2576 if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
2577 if bcx.ccx().check_overflow() {
2579 match oop.codegen_strategy() {
2580 OverflowCodegen::ViaIntrinsic(oop) =>
2581 oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2582 OverflowCodegen::ViaInputCheck(oop) =>
2583 oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2586 let res = match oop {
2587 OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
2588 OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
2589 OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
2592 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2594 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2600 /// We categorize expressions into three kinds. The distinction between
2601 /// lvalue/rvalue is fundamental to the language. The distinction between the
2602 /// two kinds of rvalues is an artifact of trans which reflects how we will
2603 /// generate code for that kind of expression. See trans/expr.rs for more
2605 #[derive(Copy, Clone)]
2613 fn expr_kind(tcx: &ty::ctxt, expr: &ast::Expr) -> ExprKind {
2614 if tcx.is_method_call(expr.id) {
2615 // Overloaded operations are generally calls, and hence they are
2616 // generated via DPS, but there are a few exceptions:
2617 return match expr.node {
2618 // `a += b` has a unit result.
2619 ast::ExprAssignOp(..) => ExprKind::RvalueStmt,
2621 // the deref method invoked for `*a` always yields an `&T`
2622 ast::ExprUnary(ast::UnDeref, _) => ExprKind::Lvalue,
2624 // the index method invoked for `a[i]` always yields an `&T`
2625 ast::ExprIndex(..) => ExprKind::Lvalue,
2627 // in the general case, result could be any type, use DPS
2628 _ => ExprKind::RvalueDps
2633 ast::ExprPath(..) => {
2634 match tcx.resolve_expr(expr) {
2635 def::DefStruct(_) | def::DefVariant(..) => {
2636 if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty {
2638 ExprKind::RvalueDatum
2644 // Special case: A unit like struct's constructor must be called without () at the
2645 // end (like `UnitStruct`) which means this is an ExprPath to a DefFn. But in case
2646 // of unit structs this is should not be interpreted as function pointer but as
2647 // call to the constructor.
2648 def::DefFn(_, true) => ExprKind::RvalueDps,
2650 // Fn pointers are just scalar values.
2651 def::DefFn(..) | def::DefMethod(..) => ExprKind::RvalueDatum,
2653 // Note: there is actually a good case to be made that
2654 // DefArg's, particularly those of immediate type, ought to
2655 // considered rvalues.
2656 def::DefStatic(..) |
2658 def::DefLocal(..) => ExprKind::Lvalue,
2661 def::DefAssociatedConst(..) => ExprKind::RvalueDatum,
2666 &format!("uncategorized def for expr {}: {:?}",
2673 ast::ExprUnary(ast::UnDeref, _) |
2674 ast::ExprField(..) |
2675 ast::ExprTupField(..) |
2676 ast::ExprIndex(..) => {
2681 ast::ExprMethodCall(..) |
2682 ast::ExprStruct(..) |
2683 ast::ExprRange(..) |
2686 ast::ExprMatch(..) |
2687 ast::ExprClosure(..) |
2688 ast::ExprBlock(..) |
2689 ast::ExprRepeat(..) |
2690 ast::ExprVec(..) => {
2694 ast::ExprIfLet(..) => {
2695 tcx.sess.span_bug(expr.span, "non-desugared ExprIfLet");
2697 ast::ExprWhileLet(..) => {
2698 tcx.sess.span_bug(expr.span, "non-desugared ExprWhileLet");
2701 ast::ExprForLoop(..) => {
2702 tcx.sess.span_bug(expr.span, "non-desugared ExprForLoop");
2705 ast::ExprLit(ref lit) if ast_util::lit_is_str(&**lit) => {
2709 ast::ExprBreak(..) |
2710 ast::ExprAgain(..) |
2712 ast::ExprWhile(..) |
2714 ast::ExprAssign(..) |
2715 ast::ExprInlineAsm(..) |
2716 ast::ExprAssignOp(..) => {
2717 ExprKind::RvalueStmt
2720 ast::ExprLit(_) | // Note: LitStr is carved out above
2721 ast::ExprUnary(..) |
2722 ast::ExprBox(None, _) |
2723 ast::ExprAddrOf(..) |
2724 ast::ExprBinary(..) |
2725 ast::ExprCast(..) => {
2726 ExprKind::RvalueDatum
2729 ast::ExprBox(Some(ref place), _) => {
2730 // Special case `Box<T>` for now:
2731 let def_id = match tcx.def_map.borrow().get(&place.id) {
2732 Some(def) => def.def_id(),
2733 None => panic!("no def for place"),
2735 if tcx.lang_items.exchange_heap() == Some(def_id) {
2736 ExprKind::RvalueDatum
2742 ast::ExprParen(ref e) => expr_kind(tcx, &**e),
2744 ast::ExprMac(..) => {
2747 "macro expression remains after expansion");