1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Translation of Expressions
13 //! The expr module handles translation of expressions. The most general
14 //! translation routine is `trans()`, which will translate an expression
15 //! into a datum. `trans_into()` is also available, which will translate
16 //! an expression and write the result directly into memory, sometimes
17 //! avoiding the need for a temporary stack slot. Finally,
18 //! `trans_to_lvalue()` is available if you'd like to ensure that the
19 //! result has cleanup scheduled.
21 //! Internally, each of these functions dispatches to various other
22 //! expression functions depending on the kind of expression. We divide
23 //! up expressions into:
25 //! - **Datum expressions:** Those that most naturally yield values.
26 //! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
27 //! - **DPS expressions:** Those that most naturally write into a location
28 //! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
29 //! - **Statement expressions:** That that do not generate a meaningful
30 //! result. Examples would be `while { ... }` or `return 44`.
32 //! Public entry points:
34 //! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
35 //! storing the result into `dest`. This is the preferred form, if you
38 //! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
39 //! `Datum` with the result. You can then store the datum, inspect
40 //! the value, etc. This may introduce temporaries if the datum is a
43 //! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
44 //! expression and ensures that the result has a cleanup associated with it,
45 //! creating a temporary stack slot if necessary.
47 //! - `trans_local_var -> Datum`: looks up a local variable or upvar.
49 #![allow(non_camel_case_types)]
51 pub use self::Dest::*;
52 use self::lazy_binop_ty::*;
55 use llvm::{self, ValueRef};
56 use middle::check_const;
58 use middle::lang_items::CoerceUnsizedTraitLangItem;
59 use middle::mem_categorization::Typer;
60 use middle::subst::{Substs, VecPerParamSpace};
62 use trans::{_match, adt, asm, base, callee, closure, consts, controlflow};
65 use trans::cleanup::{self, CleanupMethods};
68 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
72 use trans::monomorphize;
75 use middle::cast::{CastKind, CastTy};
76 use middle::ty::{struct_fields, tup_fields};
77 use middle::ty::{AdjustDerefRef, AdjustReifyFnPointer, AdjustUnsafeFnPointer};
78 use middle::ty::{self, Ty};
79 use middle::ty::MethodCall;
80 use util::common::indenter;
81 use util::ppaux::Repr;
82 use trans::machine::{llsize_of, llsize_of_alloc};
83 use trans::type_::Type;
85 use syntax::{ast, ast_util, codemap};
86 use syntax::parse::token::InternedString;
88 use syntax::parse::token;
89 use std::iter::repeat;
94 // These are passed around by the code generating functions to track the
95 // destination of a computation's value.
97 #[derive(Copy, Clone, PartialEq)]
104 pub fn to_string(&self, ccx: &CrateContext) -> String {
106 SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
107 Ignore => "Ignore".to_string()
112 /// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
113 /// better optimized LLVM code.
114 pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
117 -> Block<'blk, 'tcx> {
120 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
122 if bcx.tcx().adjustments.borrow().contains_key(&expr.id) {
123 // use trans, which may be less efficient but
124 // which will perform the adjustments:
125 let datum = unpack_datum!(bcx, trans(bcx, expr));
126 return datum.store_to_dest(bcx, dest, expr.id);
129 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
130 if !qualif.intersects(
131 check_const::ConstQualif::NOT_CONST |
132 check_const::ConstQualif::NEEDS_DROP
134 if !qualif.intersects(check_const::ConstQualif::PREFER_IN_PLACE) {
135 if let SaveIn(lldest) = dest {
136 let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
137 bcx.fcx.param_substs);
138 // Cast pointer to destination, because constants
139 // have different types.
140 let lldest = PointerCast(bcx, lldest, val_ty(global));
141 memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
143 // Don't do anything in the Ignore case, consts don't need drop.
146 // The only way we're going to see a `const` at this point is if
147 // it prefers in-place instantiation, likely because it contains
148 // `[x; N]` somewhere within.
150 ast::ExprPath(..) => {
151 match bcx.def(expr.id) {
152 def::DefConst(did) => {
153 let const_expr = consts::get_const_expr(bcx.ccx(), did, expr);
154 // Temporarily get cleanup scopes out of the way,
155 // as they require sub-expressions to be contained
156 // inside the current AST scope.
157 // These should record no cleanups anyways, `const`
158 // can't have destructors.
159 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
161 // Lock emitted debug locations to the location of
162 // the constant reference expression.
163 debuginfo::with_source_location_override(bcx.fcx,
166 bcx = trans_into(bcx, const_expr, dest)
168 let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
170 assert!(scopes.is_empty());
181 debug!("trans_into() expr={}", expr.repr(bcx.tcx()));
183 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
187 bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
189 let kind = ty::expr_kind(bcx.tcx(), expr);
191 ty::LvalueExpr | ty::RvalueDatumExpr => {
192 trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
194 ty::RvalueDpsExpr => {
195 trans_rvalue_dps_unadjusted(bcx, expr, dest)
197 ty::RvalueStmtExpr => {
198 trans_rvalue_stmt_unadjusted(bcx, expr)
202 bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
205 /// Translates an expression, returning a datum (and new block) encapsulating the result. When
206 /// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
208 pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
210 -> DatumBlock<'blk, 'tcx, Expr> {
211 debug!("trans(expr={})", bcx.expr_to_string(expr));
215 let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
216 let adjusted_global = !qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS);
217 let global = if !qualif.intersects(
218 check_const::ConstQualif::NOT_CONST |
219 check_const::ConstQualif::NEEDS_DROP
221 let global = consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
222 bcx.fcx.param_substs);
224 if qualif.intersects(check_const::ConstQualif::HAS_STATIC_BORROWS) {
225 // Is borrowed as 'static, must return lvalue.
227 // Cast pointer to global, because constants have different types.
228 let const_ty = expr_ty_adjusted(bcx, expr);
229 let llty = type_of::type_of(bcx.ccx(), const_ty);
230 let global = PointerCast(bcx, global, llty.ptr_to());
231 let datum = Datum::new(global, const_ty, Lvalue);
232 return DatumBlock::new(bcx, datum.to_expr_datum());
235 // Otherwise, keep around and perform adjustments, if needed.
236 let const_ty = if adjusted_global {
237 expr_ty_adjusted(bcx, expr)
242 // This could use a better heuristic.
243 Some(if type_is_immediate(bcx.ccx(), const_ty) {
244 // Cast pointer to global, because constants have different types.
245 let llty = type_of::type_of(bcx.ccx(), const_ty);
246 let global = PointerCast(bcx, global, llty.ptr_to());
247 // Maybe just get the value directly, instead of loading it?
248 immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
250 let llty = type_of::type_of(bcx.ccx(), const_ty);
251 // HACK(eddyb) get around issues with lifetime intrinsics.
252 let scratch = alloca_no_lifetime(bcx, llty, "const");
253 let lldest = if !ty::type_is_structural(const_ty) {
254 // Cast pointer to slot, because constants have different types.
255 PointerCast(bcx, scratch, val_ty(global))
257 // In this case, memcpy_ty calls llvm.memcpy after casting both
258 // source and destination to i8*, so we don't need any casts.
261 memcpy_ty(bcx, lldest, global, const_ty);
262 Datum::new(scratch, const_ty, Rvalue::new(ByRef))
268 let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
272 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
273 let datum = match global {
274 Some(rvalue) => rvalue.to_expr_datum(),
275 None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
277 let datum = if adjusted_global {
278 datum // trans::consts already performed adjustments.
280 unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
282 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
283 return DatumBlock::new(bcx, datum);
286 pub fn get_len(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
287 GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_EXTRA])
290 pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
291 GEPi(bcx, fat_ptr, &[0, abi::FAT_PTR_ADDR])
294 pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
295 Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
296 Store(bcx, Load(bcx, get_len(bcx, src_ptr)), get_len(bcx, dst_ptr));
299 /// Retrieve the information we are losing (making dynamic) in an unsizing
302 /// The `old_info` argument is a bit funny. It is intended for use
303 /// in an upcast, where the new vtable for an object will be drived
304 /// from the old one.
305 pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
308 old_info: Option<ValueRef>,
309 param_substs: &'tcx Substs<'tcx>)
311 let (source, target) = ty::struct_lockstep_tails(ccx.tcx(), source, target);
312 match (&source.sty, &target.sty) {
313 (&ty::ty_vec(_, Some(len)), &ty::ty_vec(_, None)) => C_uint(ccx, len),
314 (&ty::ty_trait(_), &ty::ty_trait(_)) => {
315 // For now, upcasts are limited to changes in marker
316 // traits, and hence never actually require an actual
317 // change to the vtable.
318 old_info.expect("unsized_info: missing old info for trait upcast")
320 (_, &ty::ty_trait(box ty::TyTrait { ref principal, .. })) => {
321 // Note that we preserve binding levels here:
322 let substs = principal.0.substs.with_self_ty(source).erase_regions();
323 let substs = ccx.tcx().mk_substs(substs);
324 let trait_ref = ty::Binder(ty::TraitRef { def_id: principal.def_id(),
326 consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs),
327 Type::vtable_ptr(ccx))
329 _ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {} -> {}",
330 source.repr(ccx.tcx()),
331 target.repr(ccx.tcx())))
335 /// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
336 /// translation of `expr`.
337 fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
339 datum: Datum<'tcx, Expr>)
340 -> DatumBlock<'blk, 'tcx, Expr>
343 let mut datum = datum;
344 let adjustment = match bcx.tcx().adjustments.borrow().get(&expr.id).cloned() {
346 return DatumBlock::new(bcx, datum);
350 debug!("unadjusted datum for expr {}: {} adjustment={:?}",
351 expr.repr(bcx.tcx()),
352 datum.to_string(bcx.ccx()),
355 AdjustReifyFnPointer => {
356 // FIXME(#19925) once fn item types are
357 // zero-sized, we'll need to do something here
359 AdjustUnsafeFnPointer => {
360 // purely a type-level thing
362 AdjustDerefRef(ref adj) => {
363 let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
364 // We are a bit paranoid about adjustments and thus might have a re-
365 // borrow here which merely derefs and then refs again (it might have
366 // a different region or mutability, but we don't care here).
368 // Don't skip a conversion from Box<T> to &T, etc.
370 let method_call = MethodCall::autoderef(expr.id, 0);
371 if bcx.tcx().method_map.borrow().contains_key(&method_call) {
372 // Don't skip an overloaded deref.
384 if adj.autoderefs > skip_reborrows {
386 let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
387 datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
388 lval.to_expr_datum(),
389 adj.autoderefs - skip_reborrows));
392 // (You might think there is a more elegant way to do this than a
393 // skip_reborrows bool, but then you remember that the borrow checker exists).
394 if skip_reborrows == 0 && adj.autoref.is_some() {
395 if !type_is_sized(bcx.tcx(), datum.ty) {
397 let lval = unpack_datum!(bcx,
398 datum.to_lvalue_datum(bcx, "ref_fat_ptr", expr.id));
399 datum = unpack_datum!(bcx, ref_fat_ptr(bcx, lval));
401 datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
405 if let Some(target) = adj.unsize {
406 // We do not arrange cleanup ourselves; if we already are an
407 // L-value, then cleanup will have already been scheduled (and
408 // the `datum.to_rvalue_datum` call below will emit code to zero
409 // the drop flag when moving out of the L-value). If we are an
410 // R-value, then we do not need to schedule cleanup.
411 let source_datum = unpack_datum!(bcx,
412 datum.to_rvalue_datum(bcx, "__coerce_source"));
414 let target = bcx.monomorphize(&target);
415 let llty = type_of::type_of(bcx.ccx(), target);
417 // HACK(eddyb) get around issues with lifetime intrinsics.
418 let scratch = alloca_no_lifetime(bcx, llty, "__coerce_target");
419 let target_datum = Datum::new(scratch, target,
421 bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
422 datum = Datum::new(scratch, target,
423 RvalueExpr(Rvalue::new(ByRef)));
427 debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
428 DatumBlock::new(bcx, datum)
431 fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
433 source: Datum<'tcx, Rvalue>,
434 target: Datum<'tcx, Rvalue>)
435 -> Block<'blk, 'tcx> {
437 debug!("coerce_unsized({} -> {})",
438 source.to_string(bcx.ccx()),
439 target.to_string(bcx.ccx()));
441 match (&source.ty.sty, &target.ty.sty) {
442 (&ty::ty_uniq(a), &ty::ty_uniq(b)) |
443 (&ty::ty_rptr(_, ty::mt { ty: a, .. }), &ty::ty_rptr(_, ty::mt { ty: b, .. })) |
444 (&ty::ty_rptr(_, ty::mt { ty: a, .. }), &ty::ty_ptr(ty::mt { ty: b, .. })) |
445 (&ty::ty_ptr(ty::mt { ty: a, .. }), &ty::ty_ptr(ty::mt { ty: b, .. })) => {
446 let (inner_source, inner_target) = (a, b);
448 let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
449 // Normally, the source is a thin pointer and we are
450 // adding extra info to make a fat pointer. The exception
451 // is when we are upcasting an existing object fat pointer
452 // to use a different vtable. In that case, we want to
453 // load out the original data pointer so we can repackage
455 (Load(bcx, get_dataptr(bcx, source.val)),
456 Some(Load(bcx, get_len(bcx, source.val))))
458 let val = if source.kind.is_by_ref() {
459 load_ty(bcx, source.val, source.ty)
466 let info = unsized_info(bcx.ccx(), inner_source, inner_target,
467 old_info, bcx.fcx.param_substs);
469 // Compute the base pointer. This doesn't change the pointer value,
470 // but merely its type.
471 let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
472 let base = PointerCast(bcx, base, ptr_ty);
474 Store(bcx, base, get_dataptr(bcx, target.val));
475 Store(bcx, info, get_len(bcx, target.val));
478 // This can be extended to enums and tuples in the future.
479 // (&ty::ty_enum(def_id_a, _), &ty::ty_enum(def_id_b, _)) |
480 (&ty::ty_struct(def_id_a, _), &ty::ty_struct(def_id_b, _)) => {
481 assert_eq!(def_id_a, def_id_b);
483 // The target is already by-ref because it's to be written to.
484 let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
485 assert!(target.kind.is_by_ref());
487 let trait_substs = Substs::erased(VecPerParamSpace::new(vec![target.ty],
490 let trait_ref = ty::Binder(ty::TraitRef {
491 def_id: langcall(bcx, Some(span), "coercion",
492 CoerceUnsizedTraitLangItem),
493 substs: bcx.tcx().mk_substs(trait_substs)
496 let kind = match fulfill_obligation(bcx.ccx(), span, trait_ref) {
497 traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
498 ty::custom_coerce_unsized_kind(bcx.tcx(), impl_def_id)
501 bcx.sess().span_bug(span, &format!("invalid CoerceUnsized vtable: {}",
502 vtable.repr(bcx.tcx())));
506 let repr_source = adt::represent_type(bcx.ccx(), source.ty);
507 let src_fields = match &*repr_source {
508 &adt::Repr::Univariant(ref s, _) => &s.fields,
509 _ => bcx.sess().span_bug(span,
510 &format!("Non univariant struct? (repr_source: {:?})",
513 let repr_target = adt::represent_type(bcx.ccx(), target.ty);
514 let target_fields = match &*repr_target {
515 &adt::Repr::Univariant(ref s, _) => &s.fields,
516 _ => bcx.sess().span_bug(span,
517 &format!("Non univariant struct? (repr_target: {:?})",
521 let coerce_index = match kind {
522 ty::CustomCoerceUnsized::Struct(i) => i
524 assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
526 let iter = src_fields.iter().zip(target_fields.iter()).enumerate();
527 for (i, (src_ty, target_ty)) in iter {
528 let ll_source = adt::trans_field_ptr(bcx, &repr_source, source.val, 0, i);
529 let ll_target = adt::trans_field_ptr(bcx, &repr_target, target.val, 0, i);
531 // If this is the field we need to coerce, recurse on it.
532 if i == coerce_index {
533 coerce_unsized(bcx, span,
534 Datum::new(ll_source, src_ty,
536 Datum::new(ll_target, target_ty,
537 Rvalue::new(ByRef)));
539 // Otherwise, simply copy the data from the source.
540 assert_eq!(src_ty, target_ty);
541 memcpy_ty(bcx, ll_target, ll_source, src_ty);
545 _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {} -> {}",
546 source.ty.repr(bcx.tcx()),
547 target.ty.repr(bcx.tcx())))
552 /// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
553 /// that the expr represents.
555 /// If this expression is an rvalue, this implies introducing a temporary. In other words,
556 /// something like `x().f` is translated into roughly the equivalent of
558 /// { tmp = x(); tmp.f }
559 pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
562 -> DatumBlock<'blk, 'tcx, Lvalue> {
564 let datum = unpack_datum!(bcx, trans(bcx, expr));
565 return datum.to_lvalue_datum(bcx, name, expr.id);
568 /// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
570 fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
572 -> DatumBlock<'blk, 'tcx, Expr> {
575 debug!("trans_unadjusted(expr={})", bcx.expr_to_string(expr));
576 let _indenter = indenter();
578 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
580 return match ty::expr_kind(bcx.tcx(), expr) {
581 ty::LvalueExpr | ty::RvalueDatumExpr => {
582 let datum = unpack_datum!(bcx, {
583 trans_datum_unadjusted(bcx, expr)
586 DatumBlock {bcx: bcx, datum: datum}
589 ty::RvalueStmtExpr => {
590 bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
591 nil(bcx, expr_ty(bcx, expr))
594 ty::RvalueDpsExpr => {
595 let ty = expr_ty(bcx, expr);
596 if type_is_zero_size(bcx.ccx(), ty) {
597 bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
600 let scratch = rvalue_scratch_datum(bcx, ty, "");
601 bcx = trans_rvalue_dps_unadjusted(
602 bcx, expr, SaveIn(scratch.val));
604 // Note: this is not obviously a good idea. It causes
605 // immediate values to be loaded immediately after a
606 // return from a call or other similar expression,
607 // which in turn leads to alloca's having shorter
608 // lifetimes and hence larger stack frames. However,
609 // in turn it can lead to more register pressure.
610 // Still, in practice it seems to increase
611 // performance, since we have fewer problems with
613 let scratch = unpack_datum!(
614 bcx, scratch.to_appropriate_datum(bcx));
616 DatumBlock::new(bcx, scratch.to_expr_datum())
621 fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
622 -> DatumBlock<'blk, 'tcx, Expr> {
623 let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
624 let datum = immediate_rvalue(llval, ty);
625 DatumBlock::new(bcx, datum.to_expr_datum())
629 fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
631 -> DatumBlock<'blk, 'tcx, Expr> {
634 let _icx = push_ctxt("trans_datum_unadjusted");
637 ast::ExprParen(ref e) => {
640 ast::ExprPath(..) => {
641 trans_def(bcx, expr, bcx.def(expr.id))
643 ast::ExprField(ref base, ident) => {
644 trans_rec_field(bcx, &**base, ident.node.name)
646 ast::ExprTupField(ref base, idx) => {
647 trans_rec_tup_field(bcx, &**base, idx.node)
649 ast::ExprIndex(ref base, ref idx) => {
650 trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
652 ast::ExprBox(_, ref contents) => {
653 // Special case for `Box<T>`
654 let box_ty = expr_ty(bcx, expr);
655 let contents_ty = expr_ty(bcx, &**contents);
658 trans_uniq_expr(bcx, expr, box_ty, &**contents, contents_ty)
660 _ => bcx.sess().span_bug(expr.span,
661 "expected unique box")
665 ast::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &**lit),
666 ast::ExprBinary(op, ref lhs, ref rhs) => {
667 trans_binary(bcx, expr, op, &**lhs, &**rhs)
669 ast::ExprUnary(op, ref x) => {
670 trans_unary(bcx, expr, op, &**x)
672 ast::ExprAddrOf(_, ref x) => {
674 ast::ExprRepeat(..) | ast::ExprVec(..) => {
675 // Special case for slices.
676 let cleanup_debug_loc =
677 debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
681 fcx.push_ast_cleanup_scope(cleanup_debug_loc);
682 let datum = unpack_datum!(
683 bcx, tvec::trans_slice_vec(bcx, expr, &**x));
684 bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
685 DatumBlock::new(bcx, datum)
688 trans_addr_of(bcx, expr, &**x)
692 ast::ExprCast(ref val, _) => {
693 // Datum output mode means this is a scalar cast:
694 trans_imm_cast(bcx, &**val, expr.id)
697 bcx.tcx().sess.span_bug(
699 &format!("trans_rvalue_datum_unadjusted reached \
700 fall-through case: {:?}",
706 fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
709 -> DatumBlock<'blk, 'tcx, Expr> where
710 F: FnOnce(&'blk ty::ctxt<'tcx>, &[ty::field<'tcx>]) -> usize,
713 let _icx = push_ctxt("trans_rec_field");
715 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
716 let bare_ty = base_datum.ty;
717 let repr = adt::represent_type(bcx.ccx(), bare_ty);
718 with_field_tys(bcx.tcx(), bare_ty, None, move |discr, field_tys| {
719 let ix = get_idx(bcx.tcx(), field_tys);
720 let d = base_datum.get_element(
723 |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, ix));
725 if type_is_sized(bcx.tcx(), d.ty) {
726 DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
728 let scratch = rvalue_scratch_datum(bcx, d.ty, "");
729 Store(bcx, d.val, get_dataptr(bcx, scratch.val));
730 let info = Load(bcx, get_len(bcx, base_datum.val));
731 Store(bcx, info, get_len(bcx, scratch.val));
733 DatumBlock::new(bcx, scratch.to_expr_datum())
740 /// Translates `base.field`.
741 fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
744 -> DatumBlock<'blk, 'tcx, Expr> {
745 trans_field(bcx, base, |tcx, field_tys| ty::field_idx_strict(tcx, field, field_tys))
748 /// Translates `base.<idx>`.
749 fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
752 -> DatumBlock<'blk, 'tcx, Expr> {
753 trans_field(bcx, base, |_, _| idx)
756 fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
757 index_expr: &ast::Expr,
760 method_call: MethodCall)
761 -> DatumBlock<'blk, 'tcx, Expr> {
762 //! Translates `base[idx]`.
764 let _icx = push_ctxt("trans_index");
768 let index_expr_debug_loc = index_expr.debug_loc();
770 // Check for overloaded index.
771 let method_ty = ccx.tcx()
775 .map(|method| method.ty);
776 let elt_datum = match method_ty {
778 let method_ty = monomorphize_type(bcx, method_ty);
780 let base_datum = unpack_datum!(bcx, trans(bcx, base));
782 // Translate index expression.
783 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
785 let ref_ty = // invoked methods have LB regions instantiated:
786 ty::no_late_bound_regions(
787 bcx.tcx(), &ty::ty_fn_ret(method_ty)).unwrap().unwrap();
788 let elt_ty = match ty::deref(ref_ty, true) {
790 bcx.tcx().sess.span_bug(index_expr.span,
791 "index method didn't return a \
792 dereferenceable type?!")
794 Some(elt_tm) => elt_tm.ty,
797 // Overloaded. Evaluate `trans_overloaded_op`, which will
798 // invoke the user's index() method, which basically yields
799 // a `&T` pointer. We can then proceed down the normal
800 // path (below) to dereference that `&T`.
801 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
803 trans_overloaded_op(bcx,
807 vec![(ix_datum, idx.id)],
808 Some(SaveIn(scratch.val)),
810 let datum = scratch.to_expr_datum();
811 if type_is_sized(bcx.tcx(), elt_ty) {
812 Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr)
814 Datum::new(datum.val, elt_ty, LvalueExpr)
818 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
822 // Translate index expression and cast to a suitable LLVM integer.
823 // Rust is less strict than LLVM in this regard.
824 let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
825 let ix_val = ix_datum.to_llscalarish(bcx);
826 let ix_size = machine::llbitsize_of_real(bcx.ccx(),
828 let int_size = machine::llbitsize_of_real(bcx.ccx(),
831 if ix_size < int_size {
832 if ty::type_is_signed(expr_ty(bcx, idx)) {
833 SExt(bcx, ix_val, ccx.int_type())
834 } else { ZExt(bcx, ix_val, ccx.int_type()) }
835 } else if ix_size > int_size {
836 Trunc(bcx, ix_val, ccx.int_type())
842 let unit_ty = ty::sequence_element_type(bcx.tcx(), base_datum.ty);
844 let (base, len) = base_datum.get_vec_base_and_len(bcx);
846 debug!("trans_index: base {}", bcx.val_to_string(base));
847 debug!("trans_index: len {}", bcx.val_to_string(len));
849 let bounds_check = ICmp(bcx,
853 index_expr_debug_loc);
854 let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
855 let expected = Call(bcx,
857 &[bounds_check, C_bool(ccx, false)],
859 index_expr_debug_loc);
860 bcx = with_cond(bcx, expected, |bcx| {
861 controlflow::trans_fail_bounds_check(bcx,
862 expr_info(index_expr),
866 let elt = InBoundsGEP(bcx, base, &[ix_val]);
867 let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
868 Datum::new(elt, unit_ty, LvalueExpr)
872 DatumBlock::new(bcx, elt_datum)
875 fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
876 ref_expr: &ast::Expr,
878 -> DatumBlock<'blk, 'tcx, Expr> {
879 //! Translates a reference to a path.
881 let _icx = push_ctxt("trans_def_lvalue");
883 def::DefFn(..) | def::DefMethod(..) |
884 def::DefStruct(_) | def::DefVariant(..) => {
885 let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def,
886 bcx.fcx.param_substs);
887 DatumBlock::new(bcx, datum.to_expr_datum())
889 def::DefStatic(did, _) => {
890 // There are two things that may happen here:
891 // 1) If the static item is defined in this crate, it will be
892 // translated using `get_item_val`, and we return a pointer to
894 // 2) If the static item is defined in another crate then we add
895 // (or reuse) a declaration of an external global, and return a
897 let const_ty = expr_ty(bcx, ref_expr);
899 // For external constants, we don't inline.
900 let val = if did.krate == ast::LOCAL_CRATE {
903 // The LLVM global has the type of its initializer,
904 // which may not be equal to the enum's type for
906 let val = base::get_item_val(bcx.ccx(), did.node);
907 let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
908 PointerCast(bcx, val, pty)
911 base::get_extern_const(bcx.ccx(), did, const_ty)
913 DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
915 def::DefConst(_) => {
916 bcx.sess().span_bug(ref_expr.span,
917 "constant expression should not reach expr::trans_def")
920 DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum())
925 fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
927 -> Block<'blk, 'tcx> {
929 let _icx = push_ctxt("trans_rvalue_stmt");
931 if bcx.unreachable.get() {
935 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
938 ast::ExprParen(ref e) => {
939 trans_into(bcx, &**e, Ignore)
941 ast::ExprBreak(label_opt) => {
942 controlflow::trans_break(bcx, expr, label_opt)
944 ast::ExprAgain(label_opt) => {
945 controlflow::trans_cont(bcx, expr, label_opt)
947 ast::ExprRet(ref ex) => {
948 // Check to see if the return expression itself is reachable.
949 // This can occur when the inner expression contains a return
950 let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
951 cfg.node_is_reachable(expr.id)
957 controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
959 // If it's not reachable, just translate the inner expression
960 // directly. This avoids having to manage a return slot when
961 // it won't actually be used anyway.
962 if let &Some(ref x) = ex {
963 bcx = trans_into(bcx, &**x, Ignore);
965 // Mark the end of the block as unreachable. Once we get to
966 // a return expression, there's no more we should be doing
972 ast::ExprWhile(ref cond, ref body, _) => {
973 controlflow::trans_while(bcx, expr, &**cond, &**body)
975 ast::ExprLoop(ref body, _) => {
976 controlflow::trans_loop(bcx, expr, &**body)
978 ast::ExprAssign(ref dst, ref src) => {
979 let src_datum = unpack_datum!(bcx, trans(bcx, &**src));
980 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign"));
982 if bcx.fcx.type_needs_drop(dst_datum.ty) {
983 // If there are destructors involved, make sure we
984 // are copying from an rvalue, since that cannot possible
985 // alias an lvalue. We are concerned about code like:
993 // where e.g. a : Option<Foo> and a.b :
994 // Option<Foo>. In that case, freeing `a` before the
995 // assignment may also free `a.b`!
997 // We could avoid this intermediary with some analysis
998 // to determine whether `dst` may possibly own `src`.
999 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1000 let src_datum = unpack_datum!(
1001 bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
1002 bcx = glue::drop_ty(bcx,
1006 src_datum.store_to(bcx, dst_datum.val)
1008 src_datum.store_to(bcx, dst_datum.val)
1011 ast::ExprAssignOp(op, ref dst, ref src) => {
1012 trans_assign_op(bcx, expr, op, &**dst, &**src)
1014 ast::ExprInlineAsm(ref a) => {
1015 asm::trans_inline_asm(bcx, a)
1018 bcx.tcx().sess.span_bug(
1020 &format!("trans_rvalue_stmt_unadjusted reached \
1021 fall-through case: {:?}",
1027 fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1030 -> Block<'blk, 'tcx> {
1031 let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
1033 let tcx = bcx.tcx();
1035 debuginfo::set_source_location(bcx.fcx, expr.id, expr.span);
1038 ast::ExprParen(ref e) => {
1039 trans_into(bcx, &**e, dest)
1041 ast::ExprPath(..) => {
1042 trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest)
1044 ast::ExprIf(ref cond, ref thn, ref els) => {
1045 controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest)
1047 ast::ExprMatch(ref discr, ref arms, _) => {
1048 _match::trans_match(bcx, expr, &**discr, &arms[..], dest)
1050 ast::ExprBlock(ref blk) => {
1051 controlflow::trans_block(bcx, &**blk, dest)
1053 ast::ExprStruct(_, ref fields, ref base) => {
1056 base.as_ref().map(|e| &**e),
1059 node_id_type(bcx, expr.id),
1062 ast::ExprRange(ref start, ref end) => {
1063 // FIXME it is just not right that we are synthesising ast nodes in
1065 fn make_field(field_name: &str, expr: P<ast::Expr>) -> ast::Field {
1067 ident: codemap::dummy_spanned(token::str_to_ident(field_name)),
1069 span: codemap::DUMMY_SP,
1073 // A range just desugars into a struct.
1074 // Note that the type of the start and end may not be the same, but
1075 // they should only differ in their lifetime, which should not matter
1077 let (did, fields, ty_params) = match (start, end) {
1078 (&Some(ref start), &Some(ref end)) => {
1080 let fields = vec![make_field("start", start.clone()),
1081 make_field("end", end.clone())];
1082 (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
1084 (&Some(ref start), &None) => {
1085 // Desugar to RangeFrom
1086 let fields = vec![make_field("start", start.clone())];
1087 (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
1089 (&None, &Some(ref end)) => {
1090 // Desugar to RangeTo
1091 let fields = vec![make_field("end", end.clone())];
1092 (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
1095 // Desugar to RangeFull
1096 (tcx.lang_items.range_full_struct(), vec![], vec![])
1100 if let Some(did) = did {
1101 let substs = Substs::new_type(ty_params, vec![]);
1107 ty::mk_struct(tcx, did, tcx.mk_substs(substs)),
1110 tcx.sess.span_bug(expr.span,
1111 "No lang item for ranges (how did we get this far?)")
1114 ast::ExprTup(ref args) => {
1115 let numbered_fields: Vec<(usize, &ast::Expr)> =
1116 args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
1120 &numbered_fields[..],
1125 ast::ExprLit(ref lit) => {
1127 ast::LitStr(ref s, _) => {
1128 tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
1133 .span_bug(expr.span,
1134 "trans_rvalue_dps_unadjusted shouldn't be \
1135 translating this type of literal")
1139 ast::ExprVec(..) | ast::ExprRepeat(..) => {
1140 tvec::trans_fixed_vstore(bcx, expr, dest)
1142 ast::ExprClosure(_, ref decl, ref body) => {
1143 let dest = match dest {
1144 SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
1145 Ignore => closure::Dest::Ignore(bcx.ccx())
1147 closure::trans_closure_expr(dest, &**decl, &**body, expr.id, bcx.fcx.param_substs)
1150 ast::ExprCall(ref f, ref args) => {
1151 if bcx.tcx().is_method_call(expr.id) {
1152 trans_overloaded_call(bcx,
1158 callee::trans_call(bcx,
1161 callee::ArgExprs(&args[..]),
1165 ast::ExprMethodCall(_, _, ref args) => {
1166 callee::trans_method_call(bcx,
1169 callee::ArgExprs(&args[..]),
1172 ast::ExprBinary(op, ref lhs, ref rhs) => {
1173 // if not overloaded, would be RvalueDatumExpr
1174 let lhs = unpack_datum!(bcx, trans(bcx, &**lhs));
1175 let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs));
1176 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs,
1177 vec![(rhs_datum, rhs.id)], Some(dest),
1178 !ast_util::is_by_value_binop(op.node)).bcx
1180 ast::ExprUnary(op, ref subexpr) => {
1181 // if not overloaded, would be RvalueDatumExpr
1182 let arg = unpack_datum!(bcx, trans(bcx, &**subexpr));
1183 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id),
1184 arg, Vec::new(), Some(dest), !ast_util::is_by_value_unop(op)).bcx
1186 ast::ExprIndex(ref base, ref idx) => {
1187 // if not overloaded, would be RvalueDatumExpr
1188 let base = unpack_datum!(bcx, trans(bcx, &**base));
1189 let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx));
1190 trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base,
1191 vec![(idx_datum, idx.id)], Some(dest), true).bcx
1193 ast::ExprCast(..) => {
1194 // Trait casts used to come this way, now they should be coercions.
1195 bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)")
1197 ast::ExprAssignOp(op, ref dst, ref src) => {
1198 trans_assign_op(bcx, expr, op, &**dst, &**src)
1201 bcx.tcx().sess.span_bug(
1203 &format!("trans_rvalue_dps_unadjusted reached fall-through \
1210 fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1211 ref_expr: &ast::Expr,
1214 -> Block<'blk, 'tcx> {
1215 let _icx = push_ctxt("trans_def_dps_unadjusted");
1217 let lldest = match dest {
1218 SaveIn(lldest) => lldest,
1219 Ignore => { return bcx; }
1223 def::DefVariant(tid, vid, _) => {
1224 let variant_info = ty::enum_variant_with_id(bcx.tcx(), tid, vid);
1225 if !variant_info.args.is_empty() {
1227 let llfn = callee::trans_fn_ref(bcx.ccx(), vid,
1228 ExprId(ref_expr.id),
1229 bcx.fcx.param_substs).val;
1230 Store(bcx, llfn, lldest);
1234 let ty = expr_ty(bcx, ref_expr);
1235 let repr = adt::represent_type(bcx.ccx(), ty);
1236 adt::trans_set_discr(bcx, &*repr, lldest,
1237 variant_info.disr_val);
1241 def::DefStruct(_) => {
1242 let ty = expr_ty(bcx, ref_expr);
1244 ty::ty_struct(did, _) if ty::has_dtor(bcx.tcx(), did) => {
1245 let repr = adt::represent_type(bcx.ccx(), ty);
1246 adt::trans_set_discr(bcx, &*repr, lldest, 0);
1253 bcx.tcx().sess.span_bug(ref_expr.span, &format!(
1254 "Non-DPS def {:?} referened by {}",
1255 def, bcx.node_id_to_string(ref_expr.id)));
1260 pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1261 ref_expr: &ast::Expr,
1263 param_substs: &'tcx Substs<'tcx>)
1264 -> Datum<'tcx, Rvalue> {
1265 let _icx = push_ctxt("trans_def_datum_unadjusted");
1268 def::DefFn(did, _) |
1269 def::DefStruct(did) | def::DefVariant(_, did, _) |
1270 def::DefMethod(did, def::FromImpl(_)) => {
1271 callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs)
1273 def::DefMethod(impl_did, def::FromTrait(trait_did)) => {
1274 meth::trans_static_method_callee(ccx, impl_did,
1275 trait_did, ref_expr.id,
1279 ccx.tcx().sess.span_bug(ref_expr.span, &format!(
1280 "trans_def_fn_unadjusted invoked on: {:?} for {}",
1282 ref_expr.repr(ccx.tcx())));
1287 /// Translates a reference to a local variable or argument. This always results in an lvalue datum.
1288 pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1290 -> Datum<'tcx, Lvalue> {
1291 let _icx = push_ctxt("trans_local_var");
1294 def::DefUpvar(nid, _) => {
1295 // Can't move upvars, so this is never a ZeroMemLastUse.
1296 let local_ty = node_id_type(bcx, nid);
1297 match bcx.fcx.llupvars.borrow().get(&nid) {
1298 Some(&val) => Datum::new(val, local_ty, Lvalue),
1300 bcx.sess().bug(&format!(
1301 "trans_local_var: no llval for upvar {} found",
1306 def::DefLocal(nid) => {
1307 let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
1310 bcx.sess().bug(&format!(
1311 "trans_local_var: no datum for local/arg {} found",
1315 debug!("take_local(nid={}, v={}, ty={})",
1316 nid, bcx.val_to_string(datum.val), bcx.ty_to_string(datum.ty));
1320 bcx.sess().unimpl(&format!(
1321 "unsupported def type in trans_local_var: {:?}",
1327 /// Helper for enumerating the field types of structs, enums, or records. The optional node ID here
1328 /// is the node ID of the path identifying the enum variant in use. If none, this cannot possibly
1329 /// an enum variant (so, if it is and `node_id_opt` is none, this function panics).
1330 pub fn with_field_tys<'tcx, R, F>(tcx: &ty::ctxt<'tcx>,
1332 node_id_opt: Option<ast::NodeId>,
1335 F: FnOnce(ty::Disr, &[ty::field<'tcx>]) -> R,
1338 ty::ty_struct(did, substs) => {
1339 let fields = struct_fields(tcx, did, substs);
1340 let fields = monomorphize::normalize_associated_type(tcx, &fields);
1344 ty::ty_tup(ref v) => {
1345 op(0, &tup_fields(&v[..]))
1348 ty::ty_enum(_, substs) => {
1349 // We want the *variant* ID here, not the enum ID.
1352 tcx.sess.bug(&format!(
1353 "cannot get field types from the enum type {} \
1358 let def = tcx.def_map.borrow().get(&node_id).unwrap().full_def();
1360 def::DefVariant(enum_id, variant_id, _) => {
1361 let variant_info = ty::enum_variant_with_id(tcx, enum_id, variant_id);
1362 let fields = struct_fields(tcx, variant_id, substs);
1363 let fields = monomorphize::normalize_associated_type(tcx, &fields);
1364 op(variant_info.disr_val, &fields[..])
1367 tcx.sess.bug("resolve didn't map this expr to a \
1376 tcx.sess.bug(&format!(
1377 "cannot get field types from the type {}",
1383 fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1384 fields: &[ast::Field],
1385 base: Option<&ast::Expr>,
1386 expr_span: codemap::Span,
1387 expr_id: ast::NodeId,
1389 dest: Dest) -> Block<'blk, 'tcx> {
1390 let _icx = push_ctxt("trans_rec");
1392 let tcx = bcx.tcx();
1393 with_field_tys(tcx, ty, Some(expr_id), |discr, field_tys| {
1394 let mut need_base: Vec<bool> = repeat(true).take(field_tys.len()).collect();
1396 let numbered_fields = fields.iter().map(|field| {
1398 field_tys.iter().position(|field_ty|
1399 field_ty.name == field.ident.node.name);
1400 let result = match opt_pos {
1402 need_base[i] = false;
1406 tcx.sess.span_bug(field.span,
1407 "Couldn't find field in struct type")
1411 }).collect::<Vec<_>>();
1412 let optbase = match base {
1413 Some(base_expr) => {
1414 let mut leftovers = Vec::new();
1415 for (i, b) in need_base.iter().enumerate() {
1417 leftovers.push((i, field_tys[i].mt.ty));
1420 Some(StructBaseInfo {expr: base_expr,
1421 fields: leftovers })
1424 if need_base.iter().any(|b| *b) {
1425 tcx.sess.span_bug(expr_span, "missing fields and no base expr")
1437 DebugLoc::At(expr_id, expr_span))
1441 /// Information that `trans_adt` needs in order to fill in the fields
1442 /// of a struct copied from a base struct (e.g., from an expression
1443 /// like `Foo { a: b, ..base }`.
1445 /// Note that `fields` may be empty; the base expression must always be
1446 /// evaluated for side-effects.
1447 pub struct StructBaseInfo<'a, 'tcx> {
1448 /// The base expression; will be evaluated after all explicit fields.
1449 expr: &'a ast::Expr,
1450 /// The indices of fields to copy paired with their types.
1451 fields: Vec<(usize, Ty<'tcx>)>
1454 /// Constructs an ADT instance:
1456 /// - `fields` should be a list of field indices paired with the
1457 /// expression to store into that field. The initializers will be
1458 /// evaluated in the order specified by `fields`.
1460 /// - `optbase` contains information on the base struct (if any) from
1461 /// which remaining fields are copied; see comments on `StructBaseInfo`.
1462 pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1465 fields: &[(usize, &ast::Expr)],
1466 optbase: Option<StructBaseInfo<'a, 'tcx>>,
1468 debug_location: DebugLoc)
1469 -> Block<'blk, 'tcx> {
1470 let _icx = push_ctxt("trans_adt");
1472 let repr = adt::represent_type(bcx.ccx(), ty);
1474 debug_location.apply(bcx.fcx);
1476 // If we don't care about the result, just make a
1477 // temporary stack slot
1478 let addr = match dest {
1480 Ignore => alloc_ty(bcx, ty, "temp"),
1483 // This scope holds intermediates that must be cleaned should
1484 // panic occur before the ADT as a whole is ready.
1485 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1487 if ty::type_is_simd(bcx.tcx(), ty) {
1488 // Issue 23112: The original logic appeared vulnerable to same
1489 // order-of-eval bug. But, SIMD values are tuple-structs;
1490 // i.e. functional record update (FRU) syntax is unavailable.
1492 // To be safe, double-check that we did not get here via FRU.
1493 assert!(optbase.is_none());
1495 // This is the constructor of a SIMD type, such types are
1496 // always primitive machine types and so do not have a
1497 // destructor or require any clean-up.
1498 let llty = type_of::type_of(bcx.ccx(), ty);
1500 // keep a vector as a register, and running through the field
1501 // `insertelement`ing them directly into that register
1502 // (i.e. avoid GEPi and `store`s to an alloca) .
1503 let mut vec_val = C_undef(llty);
1505 for &(i, ref e) in fields {
1506 let block_datum = trans(bcx, &**e);
1507 bcx = block_datum.bcx;
1508 let position = C_uint(bcx.ccx(), i);
1509 let value = block_datum.datum.to_llscalarish(bcx);
1510 vec_val = InsertElement(bcx, vec_val, value, position);
1512 Store(bcx, vec_val, addr);
1513 } else if let Some(base) = optbase {
1514 // Issue 23112: If there is a base, then order-of-eval
1515 // requires field expressions eval'ed before base expression.
1517 // First, trans field expressions to temporary scratch values.
1518 let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
1519 let datum = unpack_datum!(bcx, trans(bcx, &**e));
1523 debug_location.apply(bcx.fcx);
1525 // Second, trans the base to the dest.
1526 assert_eq!(discr, 0);
1528 match ty::expr_kind(bcx.tcx(), &*base.expr) {
1529 ty::RvalueDpsExpr | ty::RvalueDatumExpr if !bcx.fcx.type_needs_drop(ty) => {
1530 bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
1532 ty::RvalueStmtExpr => bcx.tcx().sess.bug("unexpected expr kind for struct base expr"),
1534 let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base"));
1535 for &(i, t) in &base.fields {
1536 let datum = base_datum.get_element(
1537 bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i));
1538 assert!(type_is_sized(bcx.tcx(), datum.ty));
1539 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1540 bcx = datum.store_to(bcx, dest);
1545 // Finally, move scratch field values into actual field locations
1546 for (i, datum) in scratch_vals.into_iter() {
1547 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1548 bcx = datum.store_to(bcx, dest);
1551 // No base means we can write all fields directly in place.
1552 for &(i, ref e) in fields {
1553 let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
1554 let e_ty = expr_ty_adjusted(bcx, &**e);
1555 bcx = trans_into(bcx, &**e, SaveIn(dest));
1556 let scope = cleanup::CustomScope(custom_cleanup_scope);
1557 fcx.schedule_lifetime_end(scope, dest);
1558 fcx.schedule_drop_mem(scope, dest, e_ty);
1562 adt::trans_set_discr(bcx, &*repr, addr, discr);
1564 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1566 // If we don't care about the result drop the temporary we made
1570 bcx = glue::drop_ty(bcx, addr, ty, debug_location);
1571 base::call_lifetime_end(bcx, addr);
1578 fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1581 -> DatumBlock<'blk, 'tcx, Expr> {
1582 // must not be a string constant, that is a RvalueDpsExpr
1583 let _icx = push_ctxt("trans_immediate_lit");
1584 let ty = expr_ty(bcx, expr);
1585 let v = consts::const_lit(bcx.ccx(), expr, lit);
1586 immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
1589 fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1592 sub_expr: &ast::Expr)
1593 -> DatumBlock<'blk, 'tcx, Expr> {
1594 let ccx = bcx.ccx();
1596 let _icx = push_ctxt("trans_unary_datum");
1598 let method_call = MethodCall::expr(expr.id);
1600 // The only overloaded operator that is translated to a datum
1601 // is an overloaded deref, since it is always yields a `&T`.
1602 // Otherwise, we should be in the RvalueDpsExpr path.
1604 op == ast::UnDeref ||
1605 !ccx.tcx().method_map.borrow().contains_key(&method_call));
1607 let un_ty = expr_ty(bcx, expr);
1609 let debug_loc = expr.debug_loc();
1613 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1614 let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
1615 immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
1618 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1619 let val = datum.to_llscalarish(bcx);
1620 let (bcx, llneg) = {
1621 if ty::type_is_fp(un_ty) {
1622 let result = FNeg(bcx, val, debug_loc);
1625 let is_signed = ty::type_is_signed(un_ty);
1626 let result = Neg(bcx, val, debug_loc);
1627 let bcx = if bcx.ccx().check_overflow() && is_signed {
1628 let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
1629 let is_min = ICmp(bcx, llvm::IntEQ, val,
1630 C_integral(llty, min, true), debug_loc);
1631 with_cond(bcx, is_min, |bcx| {
1632 let msg = InternedString::new(
1633 "attempted to negate with overflow");
1634 controlflow::trans_fail(bcx, expr_info(expr), msg)
1642 immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
1645 trans_uniq_expr(bcx, expr, un_ty, sub_expr, expr_ty(bcx, sub_expr))
1648 let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
1649 deref_once(bcx, expr, datum, method_call)
1654 fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1655 box_expr: &ast::Expr,
1657 contents: &ast::Expr,
1658 contents_ty: Ty<'tcx>)
1659 -> DatumBlock<'blk, 'tcx, Expr> {
1660 let _icx = push_ctxt("trans_uniq_expr");
1662 assert!(type_is_sized(bcx.tcx(), contents_ty));
1663 let llty = type_of::type_of(bcx.ccx(), contents_ty);
1664 let size = llsize_of(bcx.ccx(), llty);
1665 let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
1666 let llty_ptr = llty.ptr_to();
1667 let Result { bcx, val } = malloc_raw_dyn(bcx,
1672 box_expr.debug_loc());
1673 // Unique boxes do not allocate for zero-size types. The standard library
1674 // may assume that `free` is never called on the pointer returned for
1675 // `Box<ZeroSizeType>`.
1676 let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
1677 trans_into(bcx, contents, SaveIn(val))
1679 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
1680 fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
1681 val, cleanup::HeapExchange, contents_ty);
1682 let bcx = trans_into(bcx, contents, SaveIn(val));
1683 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
1686 immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
1689 fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1690 lval: Datum<'tcx, Lvalue>)
1691 -> DatumBlock<'blk, 'tcx, Expr> {
1692 let dest_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), lval.ty);
1693 let scratch = rvalue_scratch_datum(bcx, dest_ty, "__fat_ptr");
1694 memcpy_ty(bcx, scratch.val, lval.val, scratch.ty);
1696 DatumBlock::new(bcx, scratch.to_expr_datum())
1699 fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1701 subexpr: &ast::Expr)
1702 -> DatumBlock<'blk, 'tcx, Expr> {
1703 let _icx = push_ctxt("trans_addr_of");
1705 let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
1706 if !type_is_sized(bcx.tcx(), sub_datum.ty) {
1707 // DST lvalue, close to a fat pointer
1708 ref_fat_ptr(bcx, sub_datum)
1710 // Sized value, ref to a thin pointer
1711 let ty = expr_ty(bcx, expr);
1712 immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
1716 // Important to get types for both lhs and rhs, because one might be _|_
1717 // and the other not.
1718 fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1719 binop_expr: &ast::Expr,
1726 -> DatumBlock<'blk, 'tcx, Expr> {
1727 let _icx = push_ctxt("trans_eager_binop");
1729 let tcx = bcx.tcx();
1730 let is_simd = ty::type_is_simd(tcx, lhs_t);
1731 let intype = if is_simd {
1732 ty::simd_type(tcx, lhs_t)
1736 let is_float = ty::type_is_fp(intype);
1737 let is_signed = ty::type_is_signed(intype);
1738 let info = expr_info(binop_expr);
1740 let binop_debug_loc = binop_expr.debug_loc();
1743 let val = match op.node {
1746 FAdd(bcx, lhs, rhs, binop_debug_loc)
1748 Add(bcx, lhs, rhs, binop_debug_loc)
1750 let (newbcx, res) = with_overflow_check(
1751 bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
1758 FSub(bcx, lhs, rhs, binop_debug_loc)
1760 Sub(bcx, lhs, rhs, binop_debug_loc)
1762 let (newbcx, res) = with_overflow_check(
1763 bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
1770 FMul(bcx, lhs, rhs, binop_debug_loc)
1772 Mul(bcx, lhs, rhs, binop_debug_loc)
1774 let (newbcx, res) = with_overflow_check(
1775 bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
1782 FDiv(bcx, lhs, rhs, binop_debug_loc)
1784 // Only zero-check integers; fp /0 is NaN
1785 bcx = base::fail_if_zero_or_overflows(bcx,
1786 expr_info(binop_expr),
1792 SDiv(bcx, lhs, rhs, binop_debug_loc)
1794 UDiv(bcx, lhs, rhs, binop_debug_loc)
1800 FRem(bcx, lhs, rhs, binop_debug_loc)
1802 // Only zero-check integers; fp %0 is NaN
1803 bcx = base::fail_if_zero_or_overflows(bcx,
1804 expr_info(binop_expr),
1805 op, lhs, rhs, rhs_t);
1807 SRem(bcx, lhs, rhs, binop_debug_loc)
1809 URem(bcx, lhs, rhs, binop_debug_loc)
1813 ast::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
1814 ast::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
1815 ast::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
1817 let (newbcx, res) = with_overflow_check(
1818 bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
1823 let (newbcx, res) = with_overflow_check(
1824 bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
1828 ast::BiEq | ast::BiNe | ast::BiLt | ast::BiGe | ast::BiLe | ast::BiGt => {
1830 base::compare_simd_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
1832 base::compare_scalar_types(bcx, lhs, rhs, intype, op.node, binop_debug_loc)
1836 bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop");
1840 immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
1843 // refinement types would obviate the need for this
1844 enum lazy_binop_ty {
1849 fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1850 binop_expr: &ast::Expr,
1854 -> DatumBlock<'blk, 'tcx, Expr> {
1855 let _icx = push_ctxt("trans_lazy_binop");
1856 let binop_ty = expr_ty(bcx, binop_expr);
1859 let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
1860 let lhs = lhs.to_llscalarish(past_lhs);
1862 if past_lhs.unreachable.get() {
1863 return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
1866 let join = fcx.new_id_block("join", binop_expr.id);
1867 let before_rhs = fcx.new_id_block("before_rhs", b.id);
1870 lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
1871 lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
1874 let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
1875 let rhs = rhs.to_llscalarish(past_rhs);
1877 if past_rhs.unreachable.get() {
1878 return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
1881 Br(past_rhs, join.llbb, DebugLoc::None);
1882 let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
1883 &[past_lhs.llbb, past_rhs.llbb]);
1885 return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
1888 fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1893 -> DatumBlock<'blk, 'tcx, Expr> {
1894 let _icx = push_ctxt("trans_binary");
1895 let ccx = bcx.ccx();
1897 // if overloaded, would be RvalueDpsExpr
1898 assert!(!ccx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
1902 trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
1905 trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
1909 let lhs_datum = unpack_datum!(bcx, trans(bcx, lhs));
1910 let rhs_datum = unpack_datum!(bcx, trans(bcx, rhs));
1911 let binop_ty = expr_ty(bcx, expr);
1913 debug!("trans_binary (expr {}): lhs_datum={}",
1915 lhs_datum.to_string(ccx));
1916 let lhs_ty = lhs_datum.ty;
1917 let lhs = lhs_datum.to_llscalarish(bcx);
1919 debug!("trans_binary (expr {}): rhs_datum={}",
1921 rhs_datum.to_string(ccx));
1922 let rhs_ty = rhs_datum.ty;
1923 let rhs = rhs_datum.to_llscalarish(bcx);
1924 trans_eager_binop(bcx, expr, binop_ty, op,
1925 lhs_ty, lhs, rhs_ty, rhs)
1930 fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1932 method_call: MethodCall,
1933 lhs: Datum<'tcx, Expr>,
1934 rhs: Vec<(Datum<'tcx, Expr>, ast::NodeId)>,
1937 -> Result<'blk, 'tcx> {
1938 let method_ty = bcx.tcx().method_map.borrow().get(&method_call).unwrap().ty;
1939 callee::trans_call_inner(bcx,
1941 monomorphize_type(bcx, method_ty),
1942 |bcx, arg_cleanup_scope| {
1943 meth::trans_method_callee(bcx,
1948 callee::ArgOverloadedOp(lhs, rhs, autoref),
1952 fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1954 callee: &'a ast::Expr,
1955 args: &'a [P<ast::Expr>],
1957 -> Block<'blk, 'tcx> {
1958 let method_call = MethodCall::expr(expr.id);
1959 let method_type = bcx.tcx()
1965 let mut all_args = vec!(callee);
1966 all_args.extend(args.iter().map(|e| &**e));
1968 callee::trans_call_inner(bcx,
1970 monomorphize_type(bcx,
1972 |bcx, arg_cleanup_scope| {
1973 meth::trans_method_callee(
1979 callee::ArgOverloadedCall(all_args),
1984 pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>,
1989 if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
1993 match (ty::deref(t_in, true), ty::deref(t_out, true)) {
1994 (Some(ty::mt{ ty: t_in, .. }), Some(ty::mt{ ty: t_out, .. })) => {
1998 // This condition isn't redundant with the check for CoercionCast:
1999 // different types can be substituted into the same type, and
2000 // == equality can be overconservative if there are regions.
2006 fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2009 -> DatumBlock<'blk, 'tcx, Expr>
2011 use middle::cast::CastTy::*;
2012 use middle::cast::IntTy::*;
2014 fn int_cast(bcx: Block,
2021 let _icx = push_ctxt("int_cast");
2022 let srcsz = llsrctype.int_width();
2023 let dstsz = lldsttype.int_width();
2024 return if dstsz == srcsz {
2025 BitCast(bcx, llsrc, lldsttype)
2026 } else if srcsz > dstsz {
2027 TruncOrBitCast(bcx, llsrc, lldsttype)
2029 SExtOrBitCast(bcx, llsrc, lldsttype)
2031 ZExtOrBitCast(bcx, llsrc, lldsttype)
2035 fn float_cast(bcx: Block,
2041 let _icx = push_ctxt("float_cast");
2042 let srcsz = llsrctype.float_width();
2043 let dstsz = lldsttype.float_width();
2044 return if dstsz > srcsz {
2045 FPExt(bcx, llsrc, lldsttype)
2046 } else if srcsz > dstsz {
2047 FPTrunc(bcx, llsrc, lldsttype)
2051 let _icx = push_ctxt("trans_cast"); let mut bcx = bcx; let ccx =
2054 let t_in = expr_ty_adjusted(bcx, expr);
2055 let t_out = node_id_type(bcx, id);
2057 debug!("trans_cast({} as {})", t_in.repr(bcx.tcx()), t_out.repr(bcx.tcx()));
2058 let mut ll_t_in = type_of::arg_type_of(ccx, t_in);
2059 let ll_t_out = type_of::arg_type_of(ccx, t_out);
2060 // Convert the value to be cast into a ValueRef, either by-ref or
2061 // by-value as appropriate given its type:
2062 let mut datum = unpack_datum!(bcx, trans(bcx, expr));
2064 let datum_ty = monomorphize_type(bcx, datum.ty);
2066 if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
2068 return DatumBlock::new(bcx, datum);
2071 if type_is_fat_ptr(bcx.tcx(), t_in) {
2072 assert!(datum.kind.is_by_ref());
2073 if type_is_fat_ptr(bcx.tcx(), t_out) {
2074 return DatumBlock::new(bcx, Datum::new(
2075 PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
2078 )).to_expr_datumblock();
2080 // Return the address
2081 return immediate_rvalue_bcx(bcx,
2083 Load(bcx, get_dataptr(bcx, datum.val)),
2085 t_out).to_expr_datumblock();
2089 let r_t_in = CastTy::recognize(bcx.tcx(), t_in).expect("bad input type for cast");
2090 let r_t_out = CastTy::recognize(bcx.tcx(), t_out).expect("bad output type for cast");
2092 let (llexpr, signed) = if let Int(CEnum) = r_t_in {
2093 let repr = adt::represent_type(ccx, t_in);
2094 let datum = unpack_datum!(
2095 bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
2096 let llexpr_ptr = datum.to_llref();
2097 let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx)));
2098 ll_t_in = val_ty(discr);
2099 (discr, adt::is_discr_signed(&*repr))
2101 (datum.to_llscalarish(bcx), ty::type_is_signed(t_in))
2104 let newval = match (r_t_in, r_t_out) {
2105 (Ptr(_), Ptr(_)) | (FPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => PointerCast(bcx, llexpr, ll_t_out),
2106 (Ptr(_), Int(_)) | (FPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
2107 (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
2109 (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
2110 (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
2111 (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
2112 (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
2113 (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
2114 (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
2116 _ => ccx.sess().span_bug(expr.span,
2117 &format!("translating unsupported cast: \
2119 t_in.repr(bcx.tcx()),
2120 t_out.repr(bcx.tcx()))
2123 return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
2126 fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2131 -> Block<'blk, 'tcx> {
2132 let _icx = push_ctxt("trans_assign_op");
2135 debug!("trans_assign_op(expr={})", bcx.expr_to_string(expr));
2137 // User-defined operator methods cannot be used with `+=` etc right now
2138 assert!(!bcx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
2140 // Evaluate LHS (destination), which should be an lvalue
2141 let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
2142 assert!(!bcx.fcx.type_needs_drop(dst_datum.ty));
2143 let dst_ty = dst_datum.ty;
2144 let dst = load_ty(bcx, dst_datum.val, dst_datum.ty);
2147 let rhs_datum = unpack_datum!(bcx, trans(bcx, &*src));
2148 let rhs_ty = rhs_datum.ty;
2149 let rhs = rhs_datum.to_llscalarish(bcx);
2151 // Perform computation and store the result
2152 let result_datum = unpack_datum!(
2153 bcx, trans_eager_binop(bcx, expr, dst_datum.ty, op,
2154 dst_ty, dst, rhs_ty, rhs));
2155 return result_datum.store_to(bcx, dst_datum.val);
2158 fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2159 datum: Datum<'tcx, Expr>,
2161 -> DatumBlock<'blk, 'tcx, Expr> {
2164 // Ensure cleanup of `datum` if not already scheduled and obtain
2165 // a "by ref" pointer.
2166 let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
2168 // Compute final type. Note that we are loose with the region and
2169 // mutability, since those things don't matter in trans.
2170 let referent_ty = lv_datum.ty;
2171 let ptr_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), referent_ty);
2174 let llref = lv_datum.to_llref();
2176 // Construct the resulting datum, using what was the "by ref"
2177 // ValueRef of type `referent_ty` to be the "by value" ValueRef
2178 // of type `&referent_ty`.
2179 DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(ByValue))))
2182 fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2184 datum: Datum<'tcx, Expr>,
2186 -> DatumBlock<'blk, 'tcx, Expr> {
2188 let mut datum = datum;
2190 let method_call = MethodCall::autoderef(expr.id, i as u32);
2191 datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
2193 DatumBlock { bcx: bcx, datum: datum }
2196 fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2198 datum: Datum<'tcx, Expr>,
2199 method_call: MethodCall)
2200 -> DatumBlock<'blk, 'tcx, Expr> {
2201 let ccx = bcx.ccx();
2203 debug!("deref_once(expr={}, datum={}, method_call={:?})",
2204 expr.repr(bcx.tcx()),
2205 datum.to_string(ccx),
2210 // Check for overloaded deref.
2211 let method_ty = ccx.tcx().method_map.borrow()
2212 .get(&method_call).map(|method| method.ty);
2213 let datum = match method_ty {
2214 Some(method_ty) => {
2215 let method_ty = monomorphize_type(bcx, method_ty);
2217 // Overloaded. Evaluate `trans_overloaded_op`, which will
2218 // invoke the user's deref() method, which basically
2219 // converts from the `Smaht<T>` pointer that we have into
2220 // a `&T` pointer. We can then proceed down the normal
2221 // path (below) to dereference that `&T`.
2222 let datum = if method_call.autoderef == 0 {
2225 // Always perform an AutoPtr when applying an overloaded auto-deref
2226 unpack_datum!(bcx, auto_ref(bcx, datum, expr))
2229 let ref_ty = // invoked methods have their LB regions instantiated
2230 ty::no_late_bound_regions(
2231 ccx.tcx(), &ty::ty_fn_ret(method_ty)).unwrap().unwrap();
2232 let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
2234 unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
2235 datum, Vec::new(), Some(SaveIn(scratch.val)),
2237 scratch.to_expr_datum()
2240 // Not overloaded. We already have a pointer we know how to deref.
2245 let r = match datum.ty.sty {
2246 ty::ty_uniq(content_ty) => {
2247 if type_is_sized(bcx.tcx(), content_ty) {
2248 deref_owned_pointer(bcx, expr, datum, content_ty)
2250 // A fat pointer and a DST lvalue have the same representation
2251 // just different types. Since there is no temporary for `*e`
2252 // here (because it is unsized), we cannot emulate the sized
2253 // object code path for running drop glue and free. Instead,
2254 // we schedule cleanup for `e`, turning it into an lvalue.
2255 let datum = unpack_datum!(
2256 bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
2258 let datum = Datum::new(datum.val, content_ty, LvalueExpr);
2259 DatumBlock::new(bcx, datum)
2263 ty::ty_ptr(ty::mt { ty: content_ty, .. }) |
2264 ty::ty_rptr(_, ty::mt { ty: content_ty, .. }) => {
2265 if type_is_sized(bcx.tcx(), content_ty) {
2266 let ptr = datum.to_llscalarish(bcx);
2268 // Always generate an lvalue datum, even if datum.mode is
2269 // an rvalue. This is because datum.mode is only an
2270 // rvalue for non-owning pointers like &T or *T, in which
2271 // case cleanup *is* scheduled elsewhere, by the true
2272 // owner (or, in the case of *T, by the user).
2273 DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr))
2275 // A fat pointer and a DST lvalue have the same representation
2276 // just different types.
2277 DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr))
2282 bcx.tcx().sess.span_bug(
2284 &format!("deref invoked on expr of illegal type {}",
2285 datum.ty.repr(bcx.tcx())));
2289 debug!("deref_once(expr={}, method_call={:?}, result={})",
2290 expr.id, method_call, r.datum.to_string(ccx));
2294 /// We microoptimize derefs of owned pointers a bit here. Basically, the idea is to make the
2295 /// deref of an rvalue result in an rvalue. This helps to avoid intermediate stack slots in the
2296 /// resulting LLVM. The idea here is that, if the `Box<T>` pointer is an rvalue, then we can
2297 /// schedule a *shallow* free of the `Box<T>` pointer, and then return a ByRef rvalue into the
2298 /// pointer. Because the free is shallow, it is legit to return an rvalue, because we know that
2299 /// the contents are not yet scheduled to be freed. The language rules ensure that the contents
2300 /// will be used (or moved) before the free occurs.
2301 fn deref_owned_pointer<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2303 datum: Datum<'tcx, Expr>,
2304 content_ty: Ty<'tcx>)
2305 -> DatumBlock<'blk, 'tcx, Expr> {
2307 RvalueExpr(Rvalue { mode: ByRef }) => {
2308 let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
2309 let ptr = Load(bcx, datum.val);
2310 if !type_is_zero_size(bcx.ccx(), content_ty) {
2311 bcx.fcx.schedule_free_value(scope, ptr, cleanup::HeapExchange, content_ty);
2314 RvalueExpr(Rvalue { mode: ByValue }) => {
2315 let scope = cleanup::temporary_scope(bcx.tcx(), expr.id);
2316 if !type_is_zero_size(bcx.ccx(), content_ty) {
2317 bcx.fcx.schedule_free_value(scope, datum.val, cleanup::HeapExchange,
2324 // If we had an rvalue in, we produce an rvalue out.
2325 let (llptr, kind) = match datum.kind {
2327 (Load(bcx, datum.val), LvalueExpr)
2329 RvalueExpr(Rvalue { mode: ByRef }) => {
2330 (Load(bcx, datum.val), RvalueExpr(Rvalue::new(ByRef)))
2332 RvalueExpr(Rvalue { mode: ByValue }) => {
2333 (datum.val, RvalueExpr(Rvalue::new(ByRef)))
2337 let datum = Datum { ty: content_ty, val: llptr, kind: kind };
2338 DatumBlock { bcx: bcx, datum: datum }
2352 fn codegen_strategy(&self) -> OverflowCodegen {
2353 use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
2355 OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
2356 OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
2357 OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
2359 OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
2360 OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
2365 enum OverflowCodegen {
2366 ViaIntrinsic(OverflowOpViaIntrinsic),
2367 ViaInputCheck(OverflowOpViaInputCheck),
2370 enum OverflowOpViaInputCheck { Shl, Shr, }
2373 enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
2375 impl OverflowOpViaIntrinsic {
2376 fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
2377 let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
2378 bcx.ccx().get_intrinsic(&name)
2380 fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str {
2381 use syntax::ast::IntTy::*;
2382 use syntax::ast::UintTy::*;
2383 use middle::ty::{ty_int, ty_uint};
2385 let new_sty = match ty.sty {
2386 ty_int(TyIs) => match &tcx.sess.target.target.target_pointer_width[..] {
2387 "32" => ty_int(TyI32),
2388 "64" => ty_int(TyI64),
2389 _ => panic!("unsupported target word size")
2391 ty_uint(TyUs) => match &tcx.sess.target.target.target_pointer_width[..] {
2392 "32" => ty_uint(TyU32),
2393 "64" => ty_uint(TyU64),
2394 _ => panic!("unsupported target word size")
2396 ref t @ ty_uint(_) | ref t @ ty_int(_) => t.clone(),
2397 _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type",
2402 OverflowOpViaIntrinsic::Add => match new_sty {
2403 ty_int(TyI8) => "llvm.sadd.with.overflow.i8",
2404 ty_int(TyI16) => "llvm.sadd.with.overflow.i16",
2405 ty_int(TyI32) => "llvm.sadd.with.overflow.i32",
2406 ty_int(TyI64) => "llvm.sadd.with.overflow.i64",
2408 ty_uint(TyU8) => "llvm.uadd.with.overflow.i8",
2409 ty_uint(TyU16) => "llvm.uadd.with.overflow.i16",
2410 ty_uint(TyU32) => "llvm.uadd.with.overflow.i32",
2411 ty_uint(TyU64) => "llvm.uadd.with.overflow.i64",
2413 _ => unreachable!(),
2415 OverflowOpViaIntrinsic::Sub => match new_sty {
2416 ty_int(TyI8) => "llvm.ssub.with.overflow.i8",
2417 ty_int(TyI16) => "llvm.ssub.with.overflow.i16",
2418 ty_int(TyI32) => "llvm.ssub.with.overflow.i32",
2419 ty_int(TyI64) => "llvm.ssub.with.overflow.i64",
2421 ty_uint(TyU8) => "llvm.usub.with.overflow.i8",
2422 ty_uint(TyU16) => "llvm.usub.with.overflow.i16",
2423 ty_uint(TyU32) => "llvm.usub.with.overflow.i32",
2424 ty_uint(TyU64) => "llvm.usub.with.overflow.i64",
2426 _ => unreachable!(),
2428 OverflowOpViaIntrinsic::Mul => match new_sty {
2429 ty_int(TyI8) => "llvm.smul.with.overflow.i8",
2430 ty_int(TyI16) => "llvm.smul.with.overflow.i16",
2431 ty_int(TyI32) => "llvm.smul.with.overflow.i32",
2432 ty_int(TyI64) => "llvm.smul.with.overflow.i64",
2434 ty_uint(TyU8) => "llvm.umul.with.overflow.i8",
2435 ty_uint(TyU16) => "llvm.umul.with.overflow.i16",
2436 ty_uint(TyU32) => "llvm.umul.with.overflow.i32",
2437 ty_uint(TyU64) => "llvm.umul.with.overflow.i64",
2439 _ => unreachable!(),
2444 fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
2445 info: NodeIdAndSpan,
2446 lhs_t: Ty<'tcx>, lhs: ValueRef,
2448 binop_debug_loc: DebugLoc)
2449 -> (Block<'blk, 'tcx>, ValueRef) {
2450 let llfn = self.to_intrinsic(bcx, lhs_t);
2452 let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc);
2453 let result = ExtractValue(bcx, val, 0); // iN operation result
2454 let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
2456 let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
2459 let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
2460 Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
2461 None, binop_debug_loc);
2464 base::with_cond(bcx, cond, |bcx|
2465 controlflow::trans_fail(bcx, info,
2466 InternedString::new("arithmetic operation overflowed")));
2472 impl OverflowOpViaInputCheck {
2473 fn build_with_input_check<'blk, 'tcx>(&self,
2474 bcx: Block<'blk, 'tcx>,
2475 info: NodeIdAndSpan,
2479 binop_debug_loc: DebugLoc)
2480 -> (Block<'blk, 'tcx>, ValueRef)
2482 let lhs_llty = val_ty(lhs);
2483 let rhs_llty = val_ty(rhs);
2485 // Panic if any bits are set outside of bits that we always
2488 // Note that the mask's value is derived from the LHS type
2489 // (since that is where the 32/64 distinction is relevant) but
2490 // the mask's type must match the RHS type (since they will
2491 // both be fed into a and-binop)
2492 let invert_mask = !shift_mask_val(lhs_llty);
2493 let invert_mask = C_integral(rhs_llty, invert_mask, true);
2495 let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
2496 let cond = ICmp(bcx, llvm::IntNE, outer_bits,
2497 C_integral(rhs_llty, 0, false), binop_debug_loc);
2498 let result = match *self {
2499 OverflowOpViaInputCheck::Shl =>
2500 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2501 OverflowOpViaInputCheck::Shr =>
2502 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
2505 base::with_cond(bcx, cond, |bcx|
2506 controlflow::trans_fail(bcx, info,
2507 InternedString::new("shift operation overflowed")));
2513 fn shift_mask_val(llty: Type) -> u64 {
2514 // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
2515 llty.int_width() - 1
2518 // To avoid UB from LLVM, these two functions mask RHS with an
2519 // appropriate mask unconditionally (i.e. the fallback behavior for
2520 // all shifts). For 32- and 64-bit types, this matches the semantics
2521 // of Java. (See related discussion on #1877 and #10183.)
2523 fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2526 binop_debug_loc: DebugLoc) -> ValueRef {
2527 let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShl, lhs, rhs);
2528 // #1877, #10183: Ensure that input is always valid
2529 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
2530 Shl(bcx, lhs, rhs, binop_debug_loc)
2533 fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2537 binop_debug_loc: DebugLoc) -> ValueRef {
2538 let rhs = base::cast_shift_expr_rhs(bcx, ast::BinOp_::BiShr, lhs, rhs);
2539 // #1877, #10183: Ensure that input is always valid
2540 let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc);
2541 let is_signed = ty::type_is_signed(lhs_t);
2543 AShr(bcx, lhs, rhs, binop_debug_loc)
2545 LShr(bcx, lhs, rhs, binop_debug_loc)
2549 fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
2551 debug_loc: DebugLoc) -> ValueRef {
2552 let rhs_llty = val_ty(rhs);
2553 let mask = shift_mask_val(rhs_llty);
2554 And(bcx, rhs, C_integral(rhs_llty, mask, false), debug_loc)
2557 fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
2558 lhs_t: Ty<'tcx>, lhs: ValueRef,
2560 binop_debug_loc: DebugLoc)
2561 -> (Block<'blk, 'tcx>, ValueRef) {
2562 if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
2563 if bcx.ccx().check_overflow() {
2565 match oop.codegen_strategy() {
2566 OverflowCodegen::ViaIntrinsic(oop) =>
2567 oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2568 OverflowCodegen::ViaInputCheck(oop) =>
2569 oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
2572 let res = match oop {
2573 OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
2574 OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
2575 OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
2578 build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
2580 build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),