+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Compilation of match statements
-//!
-//! I will endeavor to explain the code as best I can. I have only a loose
-//! understanding of some parts of it.
-//!
-//! ## Matching
-//!
-//! The basic state of the code is maintained in an array `m` of `Match`
-//! objects. Each `Match` describes some list of patterns, all of which must
-//! match against the current list of values. If those patterns match, then
-//! the arm listed in the match is the correct arm. A given arm may have
-//! multiple corresponding match entries, one for each alternative that
-//! remains. As we proceed these sets of matches are adjusted by the various
-//! `enter_XXX()` functions, each of which adjusts the set of options given
-//! some information about the value which has been matched.
-//!
-//! So, initially, there is one value and N matches, each of which have one
-//! constituent pattern. N here is usually the number of arms but may be
-//! greater, if some arms have multiple alternatives. For example, here:
-//!
-//! enum Foo { A, B(int), C(usize, usize) }
-//! match foo {
-//! A => ...,
-//! B(x) => ...,
-//! C(1, 2) => ...,
-//! C(_) => ...
-//! }
-//!
-//! The value would be `foo`. There would be four matches, each of which
-//! contains one pattern (and, in one case, a guard). We could collect the
-//! various options and then compile the code for the case where `foo` is an
-//! `A`, a `B`, and a `C`. When we generate the code for `C`, we would (1)
-//! drop the two matches that do not match a `C` and (2) expand the other two
-//! into two patterns each. In the first case, the two patterns would be `1`
-//! and `2`, and the in the second case the _ pattern would be expanded into
-//! `_` and `_`. The two values are of course the arguments to `C`.
-//!
-//! Here is a quick guide to the various functions:
-//!
-//! - `compile_submatch()`: The main workhouse. It takes a list of values and
-//! a list of matches and finds the various possibilities that could occur.
-//!
-//! - `enter_XXX()`: modifies the list of matches based on some information
-//! about the value that has been matched. For example,
-//! `enter_rec_or_struct()` adjusts the values given that a record or struct
-//! has been matched. This is an infallible pattern, so *all* of the matches
-//! must be either wildcards or record/struct patterns. `enter_opt()`
-//! handles the fallible cases, and it is correspondingly more complex.
-//!
-//! ## Bindings
-//!
-//! We store information about the bound variables for each arm as part of the
-//! per-arm `ArmData` struct. There is a mapping from identifiers to
-//! `BindingInfo` structs. These structs contain the mode/id/type of the
-//! binding, but they also contain an LLVM value which points at an alloca
-//! called `llmatch`. For by value bindings that are Copy, we also create
-//! an extra alloca that we copy the matched value to so that any changes
-//! we do to our copy is not reflected in the original and vice-versa.
-//! We don't do this if it's a move since the original value can't be used
-//! and thus allowing us to cheat in not creating an extra alloca.
-//!
-//! The `llmatch` binding always stores a pointer into the value being matched
-//! which points at the data for the binding. If the value being matched has
-//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence
-//! `llmatch` has type `T**`). So, if you have a pattern like:
-//!
-//! let a: A = ...;
-//! let b: B = ...;
-//! match (a, b) { (ref c, d) => { ... } }
-//!
-//! For `c` and `d`, we would generate allocas of type `C*` and `D*`
-//! respectively. These are called the `llmatch`. As we match, when we come
-//! up against an identifier, we store the current pointer into the
-//! corresponding alloca.
-//!
-//! Once a pattern is completely matched, and assuming that there is no guard
-//! pattern, we will branch to a block that leads to the body itself. For any
-//! by-value bindings, this block will first load the ptr from `llmatch` (the
-//! one of type `D*`) and then load a second time to get the actual value (the
-//! one of type `D`). For by ref bindings, the value of the local variable is
-//! simply the first alloca.
-//!
-//! So, for the example above, we would generate a setup kind of like this:
-//!
-//! +-------+
-//! | Entry |
-//! +-------+
-//! |
-//! +--------------------------------------------+
-//! | llmatch_c = (addr of first half of tuple) |
-//! | llmatch_d = (addr of second half of tuple) |
-//! +--------------------------------------------+
-//! |
-//! +--------------------------------------+
-//! | *llbinding_d = **llmatch_d |
-//! +--------------------------------------+
-//!
-//! If there is a guard, the situation is slightly different, because we must
-//! execute the guard code. Moreover, we need to do so once for each of the
-//! alternatives that lead to the arm, because if the guard fails, they may
-//! have different points from which to continue the search. Therefore, in that
-//! case, we generate code that looks more like:
-//!
-//! +-------+
-//! | Entry |
-//! +-------+
-//! |
-//! +-------------------------------------------+
-//! | llmatch_c = (addr of first half of tuple) |
-//! | llmatch_d = (addr of first half of tuple) |
-//! +-------------------------------------------+
-//! |
-//! +-------------------------------------------------+
-//! | *llbinding_d = **llmatch_d |
-//! | check condition |
-//! | if false { goto next case } |
-//! | if true { goto body } |
-//! +-------------------------------------------------+
-//!
-//! The handling for the cleanups is a bit... sensitive. Basically, the body
-//! is the one that invokes `add_clean()` for each binding. During the guard
-//! evaluation, we add temporary cleanups and revoke them after the guard is
-//! evaluated (it could fail, after all). Note that guards and moves are
-//! just plain incompatible.
-//!
-//! Some relevant helper functions that manage bindings:
-//! - `create_bindings_map()`
-//! - `insert_lllocals()`
-//!
-//!
-//! ## Notes on vector pattern matching.
-//!
-//! Vector pattern matching is surprisingly tricky. The problem is that
-//! the structure of the vector isn't fully known, and slice matches
-//! can be done on subparts of it.
-//!
-//! The way that vector pattern matches are dealt with, then, is as
-//! follows. First, we make the actual condition associated with a
-//! vector pattern simply a vector length comparison. So the pattern
-//! [1, .. x] gets the condition "vec len >= 1", and the pattern
-//! [.. x] gets the condition "vec len >= 0". The problem here is that
-//! having the condition "vec len >= 1" hold clearly does not mean that
-//! only a pattern that has exactly that condition will match. This
-//! means that it may well be the case that a condition holds, but none
-//! of the patterns matching that condition match; to deal with this,
-//! when doing vector length matches, we have match failures proceed to
-//! the next condition to check.
-//!
-//! There are a couple more subtleties to deal with. While the "actual"
-//! condition associated with vector length tests is simply a test on
-//! the vector length, the actual vec_len Opt entry contains more
-//! information used to restrict which matches are associated with it.
-//! So that all matches in a submatch are matching against the same
-//! values from inside the vector, they are split up by how many
-//! elements they match at the front and at the back of the vector. In
-//! order to make sure that arms are properly checked in order, even
-//! with the overmatching conditions, each vec_len Opt entry is
-//! associated with a range of matches.
-//! Consider the following:
-//!
-//! match &[1, 2, 3] {
-//! [1, 1, .. _] => 0,
-//! [1, 2, 2, .. _] => 1,
-//! [1, 2, 3, .. _] => 2,
-//! [1, 2, .. _] => 3,
-//! _ => 4
-//! }
-//! The proper arm to match is arm 2, but arms 0 and 3 both have the
-//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the
-//! wrong branch would be taken. Instead, vec_len Opts are associated
-//! with a contiguous range of matches that have the same "shape".
-//! This is sort of ugly and requires a bunch of special handling of
-//! vec_len options.
-
-pub use self::BranchKind::*;
-pub use self::OptResult::*;
-pub use self::TransBindingMode::*;
-use self::Opt::*;
-use self::FailureHandler::*;
-
-use llvm::{ValueRef, BasicBlockRef};
-use rustc_const_eval::check_match::{self, Constructor, StaticInliner};
-use rustc_const_eval::{compare_lit_exprs, eval_const_expr, fatal_const_eval_err};
-use rustc::hir::def::{Def, DefMap};
-use rustc::hir::def_id::DefId;
-use middle::expr_use_visitor as euv;
-use middle::lang_items::StrEqFnLangItem;
-use middle::mem_categorization as mc;
-use middle::mem_categorization::Categorization;
-use rustc::hir::pat_util::*;
-use rustc::ty::subst::Substs;
-use adt;
-use base::*;
-use build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast};
-use build::{Not, Store, Sub, add_comment};
-use build;
-use callee::{Callee, ArgVals};
-use cleanup::{self, CleanupMethods, DropHintMethods};
-use common::*;
-use consts;
-use datum::*;
-use debuginfo::{self, DebugLoc, ToDebugLoc};
-use expr::{self, Dest};
-use monomorphize;
-use tvec;
-use type_of;
-use Disr;
-use value::Value;
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::traits::Reveal;
-use session::config::NoDebugInfo;
-use util::common::indenter;
-use util::nodemap::FnvHashMap;
-use util::ppaux;
-
-use std;
-use std::cell::RefCell;
-use std::cmp::Ordering;
-use std::fmt;
-use std::rc::Rc;
-use rustc::hir::{self, PatKind};
-use syntax::ast::{self, DUMMY_NODE_ID, NodeId};
-use syntax_pos::Span;
-use rustc::hir::fold::Folder;
-use syntax::ptr::P;
-
-#[derive(Copy, Clone, Debug)]
-struct ConstantExpr<'a>(&'a hir::Expr);
-
-impl<'a> ConstantExpr<'a> {
- fn eq<'b, 'tcx>(self, other: ConstantExpr<'a>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool {
- match compare_lit_exprs(tcx, self.0.span, self.0, other.0) {
- Ok(result) => result == Ordering::Equal,
- Err(_) => bug!("compare_list_exprs: type mismatch"),
- }
- }
-}
-
-// An option identifying a branch (either a literal, an enum variant or a range)
-#[derive(Debug)]
-enum Opt<'a, 'tcx> {
- ConstantValue(ConstantExpr<'a>, DebugLoc),
- ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>, DebugLoc),
- Variant(Disr, Rc<adt::Repr<'tcx>>, DefId, DebugLoc),
- SliceLengthEqual(usize, DebugLoc),
- SliceLengthGreaterOrEqual(/* prefix length */ usize,
- /* suffix length */ usize,
- DebugLoc),
-}
-
-impl<'a, 'b, 'tcx> Opt<'a, 'tcx> {
- fn eq(&self, other: &Opt<'a, 'tcx>, tcx: TyCtxt<'b, 'tcx, 'tcx>) -> bool {
- match (self, other) {
- (&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx),
- (&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => {
- a1.eq(b1, tcx) && a2.eq(b2, tcx)
- }
- (&Variant(a_disr, ref a_repr, a_def, _),
- &Variant(b_disr, ref b_repr, b_def, _)) => {
- a_disr == b_disr && *a_repr == *b_repr && a_def == b_def
- }
- (&SliceLengthEqual(a, _), &SliceLengthEqual(b, _)) => a == b,
- (&SliceLengthGreaterOrEqual(a1, a2, _),
- &SliceLengthGreaterOrEqual(b1, b2, _)) => {
- a1 == b1 && a2 == b2
- }
- _ => false
- }
- }
-
- fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> {
- use consts::TrueConst::Yes;
- let _icx = push_ctxt("match::trans_opt");
- let ccx = bcx.ccx();
- match *self {
- ConstantValue(ConstantExpr(lit_expr), _) => {
- let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id);
- let expr = consts::const_expr(ccx, &lit_expr, bcx.fcx.param_substs, None, Yes);
- let llval = match expr {
- Ok((llval, _)) => llval,
- Err(err) => {
- fatal_const_eval_err(bcx.tcx(), err.as_inner(), lit_expr.span, "pattern");
- }
- };
- let lit_datum = immediate_rvalue(llval, lit_ty);
- let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
- SingleResult(Result::new(bcx, lit_datum.val))
- }
- ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => {
- let l1 = match consts::const_expr(ccx, &l1, bcx.fcx.param_substs, None, Yes) {
- Ok((l1, _)) => l1,
- Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l1.span, "pattern"),
- };
- let l2 = match consts::const_expr(ccx, &l2, bcx.fcx.param_substs, None, Yes) {
- Ok((l2, _)) => l2,
- Err(err) => fatal_const_eval_err(bcx.tcx(), err.as_inner(), l2.span, "pattern"),
- };
- RangeResult(Result::new(bcx, l1), Result::new(bcx, l2))
- }
- Variant(disr_val, ref repr, _, _) => {
- SingleResult(Result::new(bcx, adt::trans_case(bcx, &repr, disr_val)))
- }
- SliceLengthEqual(length, _) => {
- SingleResult(Result::new(bcx, C_uint(ccx, length)))
- }
- SliceLengthGreaterOrEqual(prefix, suffix, _) => {
- LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix)))
- }
- }
- }
-
- fn debug_loc(&self) -> DebugLoc {
- match *self {
- ConstantValue(_,debug_loc) |
- ConstantRange(_, _, debug_loc) |
- Variant(_, _, _, debug_loc) |
- SliceLengthEqual(_, debug_loc) |
- SliceLengthGreaterOrEqual(_, _, debug_loc) => debug_loc
- }
- }
-}
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum BranchKind {
- NoBranch,
- Single,
- Switch,
- Compare,
- CompareSliceLength
-}
-
-pub enum OptResult<'blk, 'tcx: 'blk> {
- SingleResult(Result<'blk, 'tcx>),
- RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>),
- LowerBound(Result<'blk, 'tcx>)
-}
-
-#[derive(Clone, Copy, PartialEq)]
-pub enum TransBindingMode {
- /// By-value binding for a copy type: copies from matched data
- /// into a fresh LLVM alloca.
- TrByCopy(/* llbinding */ ValueRef),
-
- /// By-value binding for a non-copy type where we copy into a
- /// fresh LLVM alloca; this most accurately reflects the language
- /// semantics (e.g. it properly handles overwrites of the matched
- /// input), but potentially injects an unwanted copy.
- TrByMoveIntoCopy(/* llbinding */ ValueRef),
-
- /// Binding a non-copy type by reference under the hood; this is
- /// a codegen optimization to avoid unnecessary memory traffic.
- TrByMoveRef,
-
- /// By-ref binding exposed in the original source input.
- TrByRef,
-}
-
-impl TransBindingMode {
- /// if binding by making a fresh copy; returns the alloca that it
- /// will copy into; otherwise None.
- fn alloca_if_copy(&self) -> Option<ValueRef> {
- match *self {
- TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding),
- TrByMoveRef | TrByRef => None,
- }
- }
-}
-
-/// Information about a pattern binding:
-/// - `llmatch` is a pointer to a stack slot. The stack slot contains a
-/// pointer into the value being matched. Hence, llmatch has type `T**`
-/// where `T` is the value being matched.
-/// - `trmode` is the trans binding mode
-/// - `id` is the node id of the binding
-/// - `ty` is the Rust type of the binding
-#[derive(Clone, Copy)]
-pub struct BindingInfo<'tcx> {
- pub llmatch: ValueRef,
- pub trmode: TransBindingMode,
- pub id: ast::NodeId,
- pub span: Span,
- pub ty: Ty<'tcx>,
-}
-
-type BindingsMap<'tcx> = FnvHashMap<ast::Name, BindingInfo<'tcx>>;
-
-struct ArmData<'p, 'blk, 'tcx: 'blk> {
- bodycx: Block<'blk, 'tcx>,
- arm: &'p hir::Arm,
- bindings_map: BindingsMap<'tcx>
-}
-
-/// Info about Match.
-/// If all `pats` are matched then arm `data` will be executed.
-/// As we proceed `bound_ptrs` are filled with pointers to values to be bound,
-/// these pointers are stored in llmatch variables just before executing `data` arm.
-struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> {
- pats: Vec<&'p hir::Pat>,
- data: &'a ArmData<'p, 'blk, 'tcx>,
- bound_ptrs: Vec<(ast::Name, ValueRef)>,
- // Thread along renamings done by the check_match::StaticInliner, so we can
- // map back to original NodeIds
- pat_renaming_map: Option<&'a FnvHashMap<(NodeId, Span), NodeId>>
-}
-
-impl<'a, 'p, 'blk, 'tcx> fmt::Debug for Match<'a, 'p, 'blk, 'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- if ppaux::verbose() {
- // for many programs, this just take too long to serialize
- write!(f, "{:?}", self.pats)
- } else {
- write!(f, "{} pats", self.pats.len())
- }
- }
-}
-
-fn has_nested_bindings(m: &[Match], col: usize) -> bool {
- for br in m {
- if let PatKind::Binding(_, _, Some(..)) = br.pats[col].node {
- return true
- }
- }
- false
-}
-
-// As noted in `fn match_datum`, we should eventually pass around a
-// `Datum<Lvalue>` for the `val`; but until we get to that point, this
-// `MatchInput` struct will serve -- it has everything `Datum<Lvalue>`
-// does except for the type field.
-#[derive(Copy, Clone)]
-pub struct MatchInput { val: ValueRef, lval: Lvalue }
-
-impl<'tcx> Datum<'tcx, Lvalue> {
- pub fn match_input(&self) -> MatchInput {
- MatchInput {
- val: self.val,
- lval: self.kind,
- }
- }
-}
-
-impl fmt::Debug for MatchInput {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- fmt::Debug::fmt(&Value(self.val), f)
- }
-}
-
-impl MatchInput {
- fn from_val(val: ValueRef) -> MatchInput {
- MatchInput {
- val: val,
- lval: Lvalue::new("MatchInput::from_val"),
- }
- }
-
- fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
- Datum::new(self.val, ty, self.lval)
- }
-}
-
-fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize,
- val: MatchInput)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
- debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={:?})",
- bcx.to_str(), m, col, val);
- let _indenter = indenter();
-
- m.iter().map(|br| {
- let mut bound_ptrs = br.bound_ptrs.clone();
- let mut pat = br.pats[col];
- loop {
- pat = match pat.node {
- PatKind::Binding(_, ref path, Some(ref inner)) => {
- bound_ptrs.push((path.node, val.val));
- &inner
- },
- _ => break
- }
- }
-
- let mut pats = br.pats.clone();
- pats[col] = pat;
- Match {
- pats: pats,
- data: &br.data,
- bound_ptrs: bound_ptrs,
- pat_renaming_map: br.pat_renaming_map,
- }
- }).collect()
-}
-
-fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize,
- val: MatchInput,
- mut e: F)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> where
- F: FnMut(&[(&'p hir::Pat, Option<Ty<'tcx>>)])
- -> Option<Vec<(&'p hir::Pat, Option<Ty<'tcx>>)>>,
-{
- debug!("enter_match(bcx={}, m={:?}, col={}, val={:?})",
- bcx.to_str(), m, col, val);
- let _indenter = indenter();
-
- m.iter().filter_map(|br| {
- let pats : Vec<_> = br.pats.iter().map(|p| (*p, None)).collect();
- e(&pats).map(|pats| {
- let this = br.pats[col];
- let mut bound_ptrs = br.bound_ptrs.clone();
- match this.node {
- PatKind::Binding(_, ref path, None) => {
- bound_ptrs.push((path.node, val.val));
- }
- PatKind::Vec(ref before, Some(ref slice), ref after) => {
- if let PatKind::Binding(_, ref path, None) = slice.node {
- let subslice_val = bind_subslice_pat(
- bcx, this.id, val,
- before.len(), after.len());
- bound_ptrs.push((path.node, subslice_val));
- }
- }
- _ => {}
- }
- Match {
- pats: pats.into_iter().map(|p| p.0).collect(),
- data: br.data,
- bound_ptrs: bound_ptrs,
- pat_renaming_map: br.pat_renaming_map,
- }
- })
- }).collect()
-}
-
-fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize,
- val: MatchInput)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
- debug!("enter_default(bcx={}, m={:?}, col={}, val={:?})",
- bcx.to_str(), m, col, val);
- let _indenter = indenter();
-
- // Collect all of the matches that can match against anything.
- enter_match(bcx, m, col, val, |pats| {
- match pats[col].0.node {
- PatKind::Binding(..) | PatKind::Wild => {
- let mut r = pats[..col].to_vec();
- r.extend_from_slice(&pats[col + 1..]);
- Some(r)
- }
- _ => None
- }
- })
-}
-
-// <pcwalton> nmatsakis: what does enter_opt do?
-// <pcwalton> in trans/match
-// <pcwalton> trans/match.rs is like stumbling around in a dark cave
-// <nmatsakis> pcwalton: the enter family of functions adjust the set of
-// patterns as needed
-// <nmatsakis> yeah, at some point I kind of achieved some level of
-// understanding
-// <nmatsakis> anyhow, they adjust the patterns given that something of that
-// kind has been found
-// <nmatsakis> pcwalton: ok, right, so enter_XXX() adjusts the patterns, as I
-// said
-// <nmatsakis> enter_match() kind of embodies the generic code
-// <nmatsakis> it is provided with a function that tests each pattern to see
-// if it might possibly apply and so forth
-// <nmatsakis> so, if you have a pattern like {a: _, b: _, _} and one like _
-// <nmatsakis> then _ would be expanded to (_, _)
-// <nmatsakis> one spot for each of the sub-patterns
-// <nmatsakis> enter_opt() is one of the more complex; it covers the fallible
-// cases
-// <nmatsakis> enter_rec_or_struct() or enter_tuple() are simpler, since they
-// are infallible patterns
-// <nmatsakis> so all patterns must either be records (resp. tuples) or
-// wildcards
-
-/// The above is now outdated in that enter_match() now takes a function that
-/// takes the complete row of patterns rather than just the first one.
-/// Also, most of the enter_() family functions have been unified with
-/// the check_match specialization step.
-fn enter_opt<'a, 'p, 'blk, 'tcx>(
- bcx: Block<'blk, 'tcx>,
- _: ast::NodeId,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- opt: &Opt,
- col: usize,
- variant_size: usize,
- val: MatchInput)
- -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
- debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={:?})",
- bcx.to_str(), m, *opt, col, val);
- let _indenter = indenter();
-
- let ctor = match opt {
- &ConstantValue(ConstantExpr(expr), _) => Constructor::ConstantValue(
- eval_const_expr(bcx.tcx(), &expr)
- ),
- &ConstantRange(ConstantExpr(lo), ConstantExpr(hi), _) => Constructor::ConstantRange(
- eval_const_expr(bcx.tcx(), &lo),
- eval_const_expr(bcx.tcx(), &hi)
- ),
- &SliceLengthEqual(n, _) =>
- Constructor::Slice(n),
- &SliceLengthGreaterOrEqual(before, after, _) =>
- Constructor::SliceWithSubslice(before, after),
- &Variant(_, _, def_id, _) =>
- Constructor::Variant(def_id)
- };
-
- let param_env = bcx.tcx().empty_parameter_environment();
- let mcx = check_match::MatchCheckCtxt {
- tcx: bcx.tcx(),
- param_env: param_env,
- };
- enter_match(bcx, m, col, val, |pats|
- check_match::specialize(&mcx, &pats[..], &ctor, col, variant_size)
- )
-}
-
-// Returns the options in one column of matches. An option is something that
-// needs to be conditionally matched at runtime; for example, the discriminant
-// on a set of enum variants or a literal.
-fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- col: usize)
- -> Vec<Opt<'p, 'tcx>> {
- let tcx = bcx.tcx();
-
- let mut found: Vec<Opt> = vec![];
- for br in m {
- let cur = br.pats[col];
- let debug_loc = match br.pat_renaming_map {
- Some(pat_renaming_map) => {
- match pat_renaming_map.get(&(cur.id, cur.span)) {
- Some(&id) => DebugLoc::At(id, cur.span),
- None => DebugLoc::At(cur.id, cur.span),
- }
- }
- None => DebugLoc::None
- };
-
- let opt = match cur.node {
- PatKind::Lit(ref l) => {
- ConstantValue(ConstantExpr(&l), debug_loc)
- }
- PatKind::Path(..) | PatKind::TupleStruct(..) | PatKind::Struct(..) => {
- match tcx.expect_def(cur.id) {
- Def::Variant(enum_id, var_id) => {
- let variant = tcx.lookup_adt_def(enum_id).variant_with_id(var_id);
- Variant(Disr::from(variant.disr_val),
- adt::represent_node(bcx, cur.id),
- var_id,
- debug_loc)
- }
- _ => continue
- }
- }
- PatKind::Range(ref l1, ref l2) => {
- ConstantRange(ConstantExpr(&l1), ConstantExpr(&l2), debug_loc)
- }
- PatKind::Vec(ref before, None, ref after) => {
- SliceLengthEqual(before.len() + after.len(), debug_loc)
- }
- PatKind::Vec(ref before, Some(_), ref after) => {
- SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc)
- }
- _ => continue
- };
-
- if !found.iter().any(|x| x.eq(&opt, tcx)) {
- found.push(opt);
- }
- }
- found
-}
-
-struct ExtractedBlock<'blk, 'tcx: 'blk> {
- vals: Vec<ValueRef>,
- bcx: Block<'blk, 'tcx>,
-}
-
-fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- repr: &adt::Repr<'tcx>,
- disr_val: Disr,
- val: MatchInput)
- -> ExtractedBlock<'blk, 'tcx> {
- let _icx = push_ctxt("match::extract_variant_args");
- // Assume enums are always sized for now.
- let val = adt::MaybeSizedValue::sized(val.val);
- let args = (0..adt::num_args(repr, disr_val)).map(|i| {
- adt::trans_field_ptr(bcx, repr, val, disr_val, i)
- }).collect();
-
- ExtractedBlock { vals: args, bcx: bcx }
-}
-
-/// Helper for converting from the ValueRef that we pass around in the match code, which is always
-/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it.
-fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> {
- val.to_datum(left_ty)
-}
-
-fn bind_subslice_pat(bcx: Block,
- pat_id: ast::NodeId,
- val: MatchInput,
- offset_left: usize,
- offset_right: usize) -> ValueRef {
- let _icx = push_ctxt("match::bind_subslice_pat");
- let vec_ty = node_id_type(bcx, pat_id);
- let vec_ty_contents = match vec_ty.sty {
- ty::TyBox(ty) => ty,
- ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty,
- _ => vec_ty
- };
- let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx());
- let vec_datum = match_datum(val, vec_ty);
- let (base, len) = vec_datum.get_vec_base_and_len(bcx);
-
- let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]);
- let diff = offset_left + offset_right;
- if let ty::TyArray(ty, n) = vec_ty_contents.sty {
- let array_ty = bcx.tcx().mk_array(ty, n-diff);
- let llty_array = type_of::type_of(bcx.ccx(), array_ty);
- return PointerCast(bcx, slice_begin, llty_array.ptr_to());
- }
-
- let slice_len_offset = C_uint(bcx.ccx(), diff);
- let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None);
- let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased),
- bcx.tcx().mk_slice(unit_ty));
- let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
- Store(bcx, slice_begin, expr::get_dataptr(bcx, scratch.val));
- Store(bcx, slice_len, expr::get_meta(bcx, scratch.val));
- scratch.val
-}
-
-fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- left_ty: Ty<'tcx>,
- before: usize,
- after: usize,
- val: MatchInput)
- -> ExtractedBlock<'blk, 'tcx> {
- let _icx = push_ctxt("match::extract_vec_elems");
- let vec_datum = match_datum(val, left_ty);
- let (base, len) = vec_datum.get_vec_base_and_len(bcx);
- let mut elems = vec![];
- elems.extend((0..before).map(|i| GEPi(bcx, base, &[i])));
- elems.extend((0..after).rev().map(|i| {
- InBoundsGEP(bcx, base, &[
- Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None)
- ])
- }));
- ExtractedBlock { vals: elems, bcx: bcx }
-}
-
-// Macro for deciding whether any of the remaining matches fit a given kind of
-// pattern. Note that, because the macro is well-typed, either ALL of the
-// matches should fit that sort of pattern or NONE (however, some of the
-// matches may be wildcards like _ or identifiers).
-macro_rules! any_pat {
- ($m:expr, $col:expr, $pattern:pat) => (
- ($m).iter().any(|br| {
- match br.pats[$col].node {
- $pattern => true,
- _ => false
- }
- })
- )
-}
-
-fn any_uniq_pat(m: &[Match], col: usize) -> bool {
- any_pat!(m, col, PatKind::Box(_))
-}
-
-fn any_region_pat(m: &[Match], col: usize) -> bool {
- any_pat!(m, col, PatKind::Ref(..))
-}
-
-fn any_irrefutable_adt_pat(tcx: TyCtxt, m: &[Match], col: usize) -> bool {
- m.iter().any(|br| {
- let pat = br.pats[col];
- match pat.node {
- PatKind::Tuple(..) => true,
- PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Path(..) => {
- match tcx.expect_def(pat.id) {
- Def::Struct(..) | Def::TyAlias(..) | Def::AssociatedTy(..) => true,
- _ => false,
- }
- }
- _ => false
- }
- })
-}
-
-/// What to do when the pattern match fails.
-enum FailureHandler {
- Infallible,
- JumpToBasicBlock(BasicBlockRef),
- Unreachable
-}
-
-impl FailureHandler {
- fn is_fallible(&self) -> bool {
- match *self {
- Infallible => false,
- _ => true
- }
- }
-
- fn is_infallible(&self) -> bool {
- !self.is_fallible()
- }
-
- fn handle_fail(&self, bcx: Block) {
- match *self {
- Infallible =>
- bug!("attempted to panic in a non-panicking panic handler!"),
- JumpToBasicBlock(basic_block) =>
- Br(bcx, basic_block, DebugLoc::None),
- Unreachable =>
- build::Unreachable(bcx)
- }
- }
-}
-
-fn pick_column_to_specialize(def_map: &RefCell<DefMap>, m: &[Match]) -> Option<usize> {
- fn pat_score(def_map: &RefCell<DefMap>, pat: &hir::Pat) -> usize {
- match pat.node {
- PatKind::Binding(_, _, Some(ref inner)) => pat_score(def_map, &inner),
- _ if pat_is_refutable(&def_map.borrow(), pat) => 1,
- _ => 0
- }
- }
-
- let column_score = |m: &[Match], col: usize| -> usize {
- let total_score = m.iter()
- .map(|row| row.pats[col])
- .map(|pat| pat_score(def_map, pat))
- .sum();
-
- // Irrefutable columns always go first, they'd only be duplicated in the branches.
- if total_score == 0 {
- std::usize::MAX
- } else {
- total_score
- }
- };
-
- let column_contains_any_nonwild_patterns = |&col: &usize| -> bool {
- m.iter().any(|row| match row.pats[col].node {
- PatKind::Wild => false,
- _ => true
- })
- };
-
- (0..m[0].pats.len())
- .filter(column_contains_any_nonwild_patterns)
- .map(|col| (col, column_score(m, col)))
- .max_by_key(|&(_, score)| score)
- .map(|(col, _)| col)
-}
-
-// Compiles a comparison between two things.
-fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: Ty<'tcx>,
- debug_loc: DebugLoc)
- -> Result<'blk, 'tcx> {
- fn compare_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- lhs_data: ValueRef,
- lhs_len: ValueRef,
- rhs_data: ValueRef,
- rhs_len: ValueRef,
- rhs_t: Ty<'tcx>,
- debug_loc: DebugLoc)
- -> Result<'blk, 'tcx> {
- let did = langcall(bcx.tcx(),
- None,
- &format!("comparison of `{}`", rhs_t),
- StrEqFnLangItem);
- let args = [lhs_data, lhs_len, rhs_data, rhs_len];
- Callee::def(bcx.ccx(), did, Substs::empty(bcx.tcx()))
- .call(bcx, debug_loc, ArgVals(&args), None)
- }
-
- let _icx = push_ctxt("compare_values");
- if rhs_t.is_scalar() {
- let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, hir::BiEq, debug_loc);
- return Result::new(cx, cmp);
- }
-
- match rhs_t.sty {
- ty::TyRef(_, mt) => match mt.ty.sty {
- ty::TyStr => {
- let lhs_data = Load(cx, expr::get_dataptr(cx, lhs));
- let lhs_len = Load(cx, expr::get_meta(cx, lhs));
- let rhs_data = Load(cx, expr::get_dataptr(cx, rhs));
- let rhs_len = Load(cx, expr::get_meta(cx, rhs));
- compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
- }
- ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty {
- ty::TyUint(ast::UintTy::U8) => {
- // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item,
- // which calls memcmp().
- let pat_len = val_ty(rhs).element_type().array_length();
- let ty_str_slice = cx.tcx().mk_static_str();
-
- let rhs_data = GEPi(cx, rhs, &[0, 0]);
- let rhs_len = C_uint(cx.ccx(), pat_len);
-
- let lhs_data;
- let lhs_len;
- if val_ty(lhs) == val_ty(rhs) {
- // Both the discriminant and the pattern are thin pointers
- lhs_data = GEPi(cx, lhs, &[0, 0]);
- lhs_len = C_uint(cx.ccx(), pat_len);
- } else {
- // The discriminant is a fat pointer
- let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to();
- let lhs_str = PointerCast(cx, lhs, llty_str_slice);
- lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str));
- lhs_len = Load(cx, expr::get_meta(cx, lhs_str));
- }
-
- compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc)
- },
- _ => bug!("only byte strings supported in compare_values"),
- },
- _ => bug!("only string and byte strings supported in compare_values"),
- },
- _ => bug!("only scalars, byte strings, and strings supported in compare_values"),
- }
-}
-
-/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map
-fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- bindings_map: &BindingsMap<'tcx>,
- cs: Option<cleanup::ScopeId>)
- -> Block<'blk, 'tcx> {
- for (&name, &binding_info) in bindings_map {
- let (llval, aliases_other_state) = match binding_info.trmode {
- // By value mut binding for a copy type: load from the ptr
- // into the matched value and copy to our alloca
- TrByCopy(llbinding) |
- TrByMoveIntoCopy(llbinding) => {
- let llval = Load(bcx, binding_info.llmatch);
- let lvalue = match binding_info.trmode {
- TrByCopy(..) =>
- Lvalue::new("_match::insert_lllocals"),
- TrByMoveIntoCopy(..) => {
- // match_input moves from the input into a
- // separate stack slot.
- //
- // E.g. consider moving the value `D(A)` out
- // of the tuple `(D(A), D(B))` and into the
- // local variable `x` via the pattern `(x,_)`,
- // leaving the remainder of the tuple `(_,
- // D(B))` still to be dropped in the future.
- //
- // Thus, here we must zero the place that we
- // are moving *from*, because we do not yet
- // track drop flags for a fragmented parent
- // match input expression.
- //
- // Longer term we will be able to map the move
- // into `(x, _)` up to the parent path that
- // owns the whole tuple, and mark the
- // corresponding stack-local drop-flag
- // tracking the first component of the tuple.
- let hint_kind = HintKind::ZeroAndMaintain;
- Lvalue::new_with_hint("_match::insert_lllocals (match_input)",
- bcx, binding_info.id, hint_kind)
- }
- _ => bug!(),
- };
- let datum = Datum::new(llval, binding_info.ty, lvalue);
- call_lifetime_start(bcx, llbinding);
- bcx = datum.store_to(bcx, llbinding);
- if let Some(cs) = cs {
- bcx.fcx.schedule_lifetime_end(cs, llbinding);
- }
-
- (llbinding, false)
- },
-
- // By value move bindings: load from the ptr into the matched value
- TrByMoveRef => (Load(bcx, binding_info.llmatch), true),
-
- // By ref binding: use the ptr into the matched value
- TrByRef => (binding_info.llmatch, true),
- };
-
-
- // A local that aliases some other state must be zeroed, since
- // the other state (e.g. some parent data that we matched
- // into) will still have its subcomponents (such as this
- // local) destructed at the end of the parent's scope. Longer
- // term, we will properly map such parents to the set of
- // unique drop flags for its fragments.
- let hint_kind = if aliases_other_state {
- HintKind::ZeroAndMaintain
- } else {
- HintKind::DontZeroJustUse
- };
- let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)",
- bcx,
- binding_info.id,
- hint_kind);
- let datum = Datum::new(llval, binding_info.ty, lvalue);
- if let Some(cs) = cs {
- let opt_datum = lvalue.dropflag_hint(bcx);
- bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch);
- bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum);
- }
-
- debug!("binding {} to {:?}", binding_info.id, Value(llval));
- bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum);
- debuginfo::create_match_binding_metadata(bcx, name, binding_info);
- }
- bcx
-}
-
-fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- guard_expr: &hir::Expr,
- data: &ArmData<'p, 'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[MatchInput],
- chk: &FailureHandler,
- has_genuine_default: bool)
- -> Block<'blk, 'tcx> {
- debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals={:?})",
- bcx.to_str(), guard_expr, m, vals);
- let _indenter = indenter();
-
- let mut bcx = insert_lllocals(bcx, &data.bindings_map, None);
-
- let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr));
- let val = val.to_llbool(bcx);
-
- for (_, &binding_info) in &data.bindings_map {
- if let Some(llbinding) = binding_info.trmode.alloca_if_copy() {
- call_lifetime_end(bcx, llbinding)
- }
- }
-
- for (_, &binding_info) in &data.bindings_map {
- bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id);
- }
-
- with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| {
- for (_, &binding_info) in &data.bindings_map {
- call_lifetime_end(bcx, binding_info.llmatch);
- }
- match chk {
- // If the default arm is the only one left, move on to the next
- // condition explicitly rather than (possibly) falling back to
- // the default arm.
- &JumpToBasicBlock(_) if m.len() == 1 && has_genuine_default => {
- chk.handle_fail(bcx);
- }
- _ => {
- compile_submatch(bcx, m, vals, chk, has_genuine_default);
- }
- };
- bcx
- })
-}
-
-fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[MatchInput],
- chk: &FailureHandler,
- has_genuine_default: bool) {
- debug!("compile_submatch(bcx={}, m={:?}, vals=[{:?}])",
- bcx.to_str(), m, vals);
- let _indenter = indenter();
- let _icx = push_ctxt("match::compile_submatch");
- let mut bcx = bcx;
- if m.is_empty() {
- if chk.is_fallible() {
- chk.handle_fail(bcx);
- }
- return;
- }
-
- let tcx = bcx.tcx();
- let def_map = &tcx.def_map;
- match pick_column_to_specialize(def_map, m) {
- Some(col) => {
- let val = vals[col];
- if has_nested_bindings(m, col) {
- let expanded = expand_nested_bindings(bcx, m, col, val);
- compile_submatch_continue(bcx,
- &expanded[..],
- vals,
- chk,
- col,
- val,
- has_genuine_default)
- } else {
- compile_submatch_continue(bcx, m, vals, chk, col, val, has_genuine_default)
- }
- }
- None => {
- let data = &m[0].data;
- for &(ref name, ref value_ptr) in &m[0].bound_ptrs {
- let binfo = *data.bindings_map.get(name).unwrap();
- call_lifetime_start(bcx, binfo.llmatch);
- if binfo.trmode == TrByRef && type_is_fat_ptr(bcx.tcx(), binfo.ty) {
- expr::copy_fat_ptr(bcx, *value_ptr, binfo.llmatch);
- }
- else {
- Store(bcx, *value_ptr, binfo.llmatch);
- }
- }
- match data.arm.guard {
- Some(ref guard_expr) => {
- bcx = compile_guard(bcx,
- &guard_expr,
- m[0].data,
- &m[1..m.len()],
- vals,
- chk,
- has_genuine_default);
- }
- _ => ()
- }
- Br(bcx, data.bodycx.llbb, DebugLoc::None);
- }
- }
-}
-
-fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- m: &[Match<'a, 'p, 'blk, 'tcx>],
- vals: &[MatchInput],
- chk: &FailureHandler,
- col: usize,
- val: MatchInput,
- has_genuine_default: bool) {
- let fcx = bcx.fcx;
- let tcx = bcx.tcx();
-
- let mut vals_left = vals[0..col].to_vec();
- vals_left.extend_from_slice(&vals[col + 1..]);
- let ccx = bcx.fcx.ccx;
-
- // Find a real id (we're adding placeholder wildcard patterns, but
- // each column is guaranteed to have at least one real pattern)
- let pat_id = m.iter().map(|br| br.pats[col].id)
- .find(|&id| id != DUMMY_NODE_ID)
- .unwrap_or(DUMMY_NODE_ID);
-
- let left_ty = if pat_id == DUMMY_NODE_ID {
- tcx.mk_nil()
- } else {
- node_id_type(bcx, pat_id)
- };
-
- let mcx = check_match::MatchCheckCtxt {
- tcx: bcx.tcx(),
- param_env: bcx.tcx().empty_parameter_environment(),
- };
- let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) {
- let repr = adt::represent_type(bcx.ccx(), left_ty);
- let arg_count = adt::num_args(&repr, Disr(0));
- let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) {
- (arg_count, val.val)
- } else {
- // For an unsized ADT (i.e. DST struct), we need to treat
- // the last field specially: instead of simply passing a
- // ValueRef pointing to that field, as with all the others,
- // we skip it and instead construct a 'fat ptr' below.
- (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val)))
- };
- let mut field_vals: Vec<ValueRef> = (0..arg_count).map(|ix|
- // By definition, these are all sized
- adt::trans_field_ptr(bcx, &repr, adt::MaybeSizedValue::sized(struct_val), Disr(0), ix)
- ).collect();
-
- match left_ty.sty {
- ty::TyStruct(def, substs) if !type_is_sized(bcx.tcx(), left_ty) => {
- // The last field is technically unsized but
- // since we can only ever match that field behind
- // a reference we construct a fat ptr here.
- let unsized_ty = def.struct_variant().fields.last().map(|field| {
- monomorphize::field_ty(bcx.tcx(), substs, field)
- }).unwrap();
- let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr");
-
- let meta = Load(bcx, expr::get_meta(bcx, val.val));
- let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta);
-
- let data = adt::trans_field_ptr(bcx, &repr, struct_val, Disr(0), arg_count);
- Store(bcx, data, expr::get_dataptr(bcx, scratch));
- Store(bcx, meta, expr::get_meta(bcx, scratch));
- field_vals.push(scratch);
- }
- _ => {}
- }
- Some(field_vals)
- } else if any_uniq_pat(m, col) || any_region_pat(m, col) {
- let ptr = if type_is_fat_ptr(bcx.tcx(), left_ty) {
- val.val
- } else {
- Load(bcx, val.val)
- };
- Some(vec!(ptr))
- } else {
- match left_ty.sty {
- ty::TyArray(_, n) => {
- let args = extract_vec_elems(bcx, left_ty, n, 0, val);
- Some(args.vals)
- }
- _ => None
- }
- };
- match adt_vals {
- Some(field_vals) => {
- let pats = enter_match(bcx, m, col, val, |pats|
- check_match::specialize(&mcx, pats,
- &Constructor::Single, col,
- field_vals.len())
- );
- let mut vals: Vec<_> = field_vals.into_iter()
- .map(|v|MatchInput::from_val(v))
- .collect();
- vals.extend_from_slice(&vals_left);
- compile_submatch(bcx, &pats, &vals, chk, has_genuine_default);
- return;
- }
- _ => ()
- }
-
- // Decide what kind of branch we need
- let opts = get_branches(bcx, m, col);
- debug!("options={:?}", opts);
- let mut kind = NoBranch;
- let mut test_val = val.val;
- debug!("test_val={:?}", Value(test_val));
- if !opts.is_empty() {
- match opts[0] {
- ConstantValue(..) | ConstantRange(..) => {
- test_val = load_if_immediate(bcx, val.val, left_ty);
- kind = if left_ty.is_integral() {
- Switch
- } else {
- Compare
- };
- }
- Variant(_, ref repr, _, _) => {
- let (the_kind, val_opt) = adt::trans_switch(bcx, &repr,
- val.val, true);
- kind = the_kind;
- if let Some(tval) = val_opt { test_val = tval; }
- }
- SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => {
- let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty);
- test_val = len;
- kind = Switch;
- }
- }
- }
- for o in &opts {
- match *o {
- ConstantRange(..) => { kind = Compare; break },
- SliceLengthGreaterOrEqual(..) => { kind = CompareSliceLength; break },
- _ => ()
- }
- }
- let else_cx = match kind {
- NoBranch | Single => bcx,
- _ => bcx.fcx.new_temp_block("match_else")
- };
- let sw = if kind == Switch {
- build::Switch(bcx, test_val, else_cx.llbb, opts.len())
- } else {
- C_int(ccx, 0) // Placeholder for when not using a switch
- };
-
- let defaults = enter_default(else_cx, m, col, val);
- let exhaustive = chk.is_infallible() && defaults.is_empty();
- let len = opts.len();
-
- if exhaustive && kind == Switch {
- build::Unreachable(else_cx);
- }
-
- // Compile subtrees for each option
- for (i, opt) in opts.iter().enumerate() {
- // In some cases of range and vector pattern matching, we need to
- // override the failure case so that instead of failing, it proceeds
- // to try more matching. branch_chk, then, is the proper failure case
- // for the current conditional branch.
- let mut branch_chk = None;
- let mut opt_cx = else_cx;
- let debug_loc = opt.debug_loc();
-
- if kind == Switch || !exhaustive || i + 1 < len {
- opt_cx = bcx.fcx.new_temp_block("match_case");
- match kind {
- Single => Br(bcx, opt_cx.llbb, debug_loc),
- Switch => {
- match opt.trans(bcx) {
- SingleResult(r) => {
- AddCase(sw, r.val, opt_cx.llbb);
- bcx = r.bcx;
- }
- _ => {
- bug!(
- "in compile_submatch, expected \
- opt.trans() to return a SingleResult")
- }
- }
- }
- Compare | CompareSliceLength => {
- let t = if kind == Compare {
- left_ty
- } else {
- tcx.types.usize // vector length
- };
- let Result { bcx: after_cx, val: matches } = {
- match opt.trans(bcx) {
- SingleResult(Result { bcx, val }) => {
- compare_values(bcx, test_val, val, t, debug_loc)
- }
- RangeResult(Result { val: vbegin, .. },
- Result { bcx, val: vend }) => {
- let llge = compare_scalar_types(bcx, test_val, vbegin,
- t, hir::BiGe, debug_loc);
- let llle = compare_scalar_types(bcx, test_val, vend,
- t, hir::BiLe, debug_loc);
- Result::new(bcx, And(bcx, llge, llle, DebugLoc::None))
- }
- LowerBound(Result { bcx, val }) => {
- Result::new(bcx, compare_scalar_types(bcx, test_val,
- val, t, hir::BiGe,
- debug_loc))
- }
- }
- };
- bcx = fcx.new_temp_block("compare_next");
-
- // If none of the sub-cases match, and the current condition
- // is guarded or has multiple patterns, move on to the next
- // condition, if there is any, rather than falling back to
- // the default.
- let guarded = m[i].data.arm.guard.is_some();
- let multi_pats = m[i].pats.len() > 1;
- if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) {
- branch_chk = Some(JumpToBasicBlock(bcx.llbb));
- }
- CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc);
- }
- _ => ()
- }
- } else if kind == Compare || kind == CompareSliceLength {
- Br(bcx, else_cx.llbb, debug_loc);
- }
-
- let mut size = 0;
- let mut unpacked = Vec::new();
- match *opt {
- Variant(disr_val, ref repr, _, _) => {
- let ExtractedBlock {vals: argvals, bcx: new_bcx} =
- extract_variant_args(opt_cx, &repr, disr_val, val);
- size = argvals.len();
- unpacked = argvals;
- opt_cx = new_bcx;
- }
- SliceLengthEqual(len, _) => {
- let args = extract_vec_elems(opt_cx, left_ty, len, 0, val);
- size = args.vals.len();
- unpacked = args.vals.clone();
- opt_cx = args.bcx;
- }
- SliceLengthGreaterOrEqual(before, after, _) => {
- let args = extract_vec_elems(opt_cx, left_ty, before, after, val);
- size = args.vals.len();
- unpacked = args.vals.clone();
- opt_cx = args.bcx;
- }
- ConstantValue(..) | ConstantRange(..) => ()
- }
- let opt_ms = enter_opt(opt_cx, pat_id, m, opt, col, size, val);
- let mut opt_vals: Vec<_> = unpacked.into_iter()
- .map(|v|MatchInput::from_val(v))
- .collect();
- opt_vals.extend_from_slice(&vals_left[..]);
- compile_submatch(opt_cx,
- &opt_ms[..],
- &opt_vals[..],
- branch_chk.as_ref().unwrap_or(chk),
- has_genuine_default);
- }
-
- // Compile the fall-through case, if any
- if !exhaustive && kind != Single {
- if kind == Compare || kind == CompareSliceLength {
- Br(bcx, else_cx.llbb, DebugLoc::None);
- }
- match chk {
- // If there is only one default arm left, move on to the next
- // condition explicitly rather than (eventually) falling back to
- // the last default arm.
- &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => {
- chk.handle_fail(else_cx);
- }
- _ => {
- compile_submatch(else_cx,
- &defaults[..],
- &vals_left[..],
- chk,
- has_genuine_default);
- }
- }
- }
-}
-
-pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- match_expr: &hir::Expr,
- discr_expr: &hir::Expr,
- arms: &[hir::Arm],
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("match::trans_match");
- trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest)
-}
-
-/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body`
-fn is_discr_reassigned(bcx: Block, discr: &hir::Expr, body: &hir::Expr) -> bool {
- let (vid, field) = match discr.node {
- hir::ExprPath(..) => match bcx.tcx().expect_def(discr.id) {
- Def::Local(_, vid) | Def::Upvar(_, vid, _, _) => (vid, None),
- _ => return false
- },
- hir::ExprField(ref base, field) => {
- let vid = match bcx.tcx().expect_def_or_none(base.id) {
- Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
- _ => return false
- };
- (vid, Some(mc::NamedField(field.node)))
- },
- hir::ExprTupField(ref base, field) => {
- let vid = match bcx.tcx().expect_def_or_none(base.id) {
- Some(Def::Local(_, vid)) | Some(Def::Upvar(_, vid, _, _)) => vid,
- _ => return false
- };
- (vid, Some(mc::PositionalField(field.node)))
- },
- _ => return false
- };
-
- let mut rc = ReassignmentChecker {
- node: vid,
- field: field,
- reassigned: false
- };
- bcx.tcx().normalizing_infer_ctxt(Reveal::All).enter(|infcx| {
- let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx);
- visitor.walk_expr(body);
- });
- rc.reassigned
-}
-
-struct ReassignmentChecker {
- node: ast::NodeId,
- field: Option<mc::FieldName>,
- reassigned: bool
-}
-
-// Determine if the expression we're matching on is reassigned to within
-// the body of the match's arm.
-// We only care for the `mutate` callback since this check only matters
-// for cases where the matched value is moved.
-impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker {
- fn consume(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: euv::ConsumeMode) {}
- fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {}
- fn consume_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::ConsumeMode) {}
- fn borrow(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: ty::Region,
- _: ty::BorrowKind, _: euv::LoanCause) {}
- fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {}
-
- fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) {
- let cmt_id = |cmt: &mc::cmt| match cmt.cat {
- Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, ..}, ..}) |
- Categorization::Local(vid) => Some(vid),
- Categorization::Interior(ref base_cmt, mc::InteriorField(_)) => Some(base_cmt.id),
- _ => None
- };
- match cmt.cat {
- Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) |
- Categorization::Local(vid) => self.reassigned |= self.node == vid,
- ref cat => {
- let mut cat = cat;
- while let &Categorization::Interior(ref base_cmt, mc::InteriorField(field)) = cat {
- if let Some(vid) = cmt_id(base_cmt) {
- if self.node == vid && (self.field.is_none() || self.field == Some(field)) {
- self.reassigned = true;
- return;
- }
- }
- cat = &base_cmt.cat;
- }
- }
- }
- }
-}
-
-fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &hir::Pat,
- discr: &hir::Expr, body: &hir::Expr)
- -> BindingsMap<'tcx> {
- // Create the bindings map, which is a mapping from each binding name
- // to an alloca() that will be the value for that local variable.
- // Note that we use the names because each binding will have many ids
- // from the various alternatives.
- let ccx = bcx.ccx();
- let reassigned = is_discr_reassigned(bcx, discr, body);
- let mut bindings_map = FnvHashMap();
- pat_bindings(&pat, |bm, p_id, span, path1| {
- let name = path1.node;
- let variable_ty = node_id_type(bcx, p_id);
- let llvariable_ty = type_of::type_of(ccx, variable_ty);
- let tcx = bcx.tcx();
- let param_env = tcx.empty_parameter_environment();
-
- let llmatch;
- let trmode;
- let moves_by_default = variable_ty.moves_by_default(tcx, ¶m_env, span);
- match bm {
- hir::BindByValue(_) if !moves_by_default || reassigned =>
- {
- llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch");
- let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name));
- trmode = if moves_by_default {
- TrByMoveIntoCopy(llcopy)
- } else {
- TrByCopy(llcopy)
- };
- }
- hir::BindByValue(_) => {
- // in this case, the final type of the variable will be T,
- // but during matching we need to store a *T as explained
- // above
- llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name));
- trmode = TrByMoveRef;
- }
- hir::BindByRef(_) => {
- llmatch = alloca(bcx, llvariable_ty, &bcx.name(name));
- trmode = TrByRef;
- }
- };
- bindings_map.insert(name, BindingInfo {
- llmatch: llmatch,
- trmode: trmode,
- id: p_id,
- span: span,
- ty: variable_ty
- });
- });
- return bindings_map;
-}
-
-fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>,
- match_id: ast::NodeId,
- discr_expr: &hir::Expr,
- arms: &[hir::Arm],
- dest: Dest) -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("match::trans_match_inner");
- let fcx = scope_cx.fcx;
- let mut bcx = scope_cx;
- let tcx = bcx.tcx();
-
- let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr,
- "match"));
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let t = node_id_type(bcx, discr_expr.id);
- let chk = if t.is_uninhabited(tcx) {
- Unreachable
- } else {
- Infallible
- };
-
- let arm_datas: Vec<ArmData> = arms.iter().map(|arm| ArmData {
- bodycx: fcx.new_id_block("case_body", arm.body.id),
- arm: arm,
- bindings_map: create_bindings_map(bcx, &arm.pats[0], discr_expr, &arm.body)
- }).collect();
-
- let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo {
- Some(FnvHashMap())
- } else {
- None
- };
-
- let arm_pats: Vec<Vec<P<hir::Pat>>> = {
- let mut static_inliner = StaticInliner::new(scope_cx.tcx(),
- pat_renaming_map.as_mut());
- arm_datas.iter().map(|arm_data| {
- arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect()
- }).collect()
- };
-
- let mut matches = Vec::new();
- for (arm_data, pats) in arm_datas.iter().zip(&arm_pats) {
- matches.extend(pats.iter().map(|p| Match {
- pats: vec![&p],
- data: arm_data,
- bound_ptrs: Vec::new(),
- pat_renaming_map: pat_renaming_map.as_ref()
- }));
- }
-
- // `compile_submatch` works one column of arm patterns a time and
- // then peels that column off. So as we progress, it may become
- // impossible to tell whether we have a genuine default arm, i.e.
- // `_ => foo` or not. Sometimes it is important to know that in order
- // to decide whether moving on to the next condition or falling back
- // to the default arm.
- let has_default = arms.last().map_or(false, |arm| {
- arm.pats.len() == 1
- && arm.pats.last().unwrap().node == PatKind::Wild
- });
-
- compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default);
-
- let mut arm_cxs = Vec::new();
- for arm_data in &arm_datas {
- let mut bcx = arm_data.bodycx;
-
- // insert bindings into the lllocals map and add cleanups
- let cs = fcx.push_custom_cleanup_scope();
- bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs)));
- bcx = expr::trans_into(bcx, &arm_data.arm.body, dest);
- bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs);
- arm_cxs.push(bcx);
- }
-
- bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]);
- return bcx;
-}
-
-/// Generates code for a local variable declaration like `let <pat>;` or `let <pat> =
-/// <opt_init_expr>`.
-pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- local: &hir::Local)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("match::store_local");
- let mut bcx = bcx;
- let tcx = bcx.tcx();
- let pat = &local.pat;
-
- fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- pat: &hir::Pat)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("create_dummy_locals");
- // create dummy memory for the variables if we have no
- // value to store into them immediately
- let tcx = bcx.tcx();
- pat_bindings(pat, |_, p_id, _, path1| {
- let scope = cleanup::var_scope(tcx, p_id);
- bcx = mk_binding_alloca(
- bcx, p_id, path1.node, scope, (),
- "_match::store_local::create_dummy_locals",
- |(), bcx, Datum { val: llval, ty, kind }| {
- // Dummy-locals start out uninitialized, so set their
- // drop-flag hints (if any) to "moved."
- if let Some(hint) = kind.dropflag_hint(bcx) {
- let moved_hint = adt::DTOR_MOVED_HINT;
- debug!("store moved_hint={} for hint={:?}, uninitialized dummy",
- moved_hint, hint);
- Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value());
- }
-
- if kind.drop_flag_info.must_zero() {
- // if no drop-flag hint, or the hint requires
- // we maintain the embedded drop-flag, then
- // mark embedded drop-flag(s) as moved
- // (i.e. "already dropped").
- drop_done_fill_mem(bcx, llval, ty);
- }
- bcx
- });
- });
- bcx
- }
-
- match local.init {
- Some(ref init_expr) => {
- // Optimize the "let x = expr" case. This just writes
- // the result of evaluating `expr` directly into the alloca
- // for `x`. Often the general path results in similar or the
- // same code post-optimization, but not always. In particular,
- // in unsafe code, you can have expressions like
- //
- // let x = intrinsics::uninit();
- //
- // In such cases, the more general path is unsafe, because
- // it assumes it is matching against a valid value.
- if let Some(name) = simple_name(pat) {
- let var_scope = cleanup::var_scope(tcx, local.id);
- return mk_binding_alloca(
- bcx, pat.id, name, var_scope, (),
- "_match::store_local",
- |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &init_expr,
- expr::SaveIn(v)));
- }
-
- // General path.
- let init_datum =
- unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &init_expr, "let"));
- if bcx.sess().asm_comments() {
- add_comment(bcx, "creating zeroable ref llval");
- }
- let var_scope = cleanup::var_scope(tcx, local.id);
- bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope)
- }
- None => {
- create_dummy_locals(bcx, pat)
- }
- }
-}
-
-fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>,
- p_id: ast::NodeId,
- name: ast::Name,
- cleanup_scope: cleanup::ScopeId,
- arg: A,
- caller_name: &'static str,
- populate: F)
- -> Block<'blk, 'tcx> where
- F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>,
-{
- let var_ty = node_id_type(bcx, p_id);
-
- // Allocate memory on stack for the binding.
- let llval = alloc_ty(bcx, var_ty, &bcx.name(name));
- let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse);
- let datum = Datum::new(llval, var_ty, lvalue);
-
- debug!("mk_binding_alloca cleanup_scope={:?} llval={:?} var_ty={:?}",
- cleanup_scope, Value(llval), var_ty);
-
- // Subtle: be sure that we *populate* the memory *before*
- // we schedule the cleanup.
- call_lifetime_start(bcx, llval);
- let bcx = populate(arg, bcx, datum);
- bcx.fcx.schedule_lifetime_end(cleanup_scope, llval);
- bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx));
-
- // Now that memory is initialized and has cleanup scheduled,
- // insert datum into the local variable map.
- bcx.fcx.lllocals.borrow_mut().insert(p_id, datum);
- bcx
-}
-
-/// A simple version of the pattern matching code that only handles
-/// irrefutable patterns. This is used in let/argument patterns,
-/// not in match statements. Unifying this code with the code above
-/// sounds nice, but in practice it produces very inefficient code,
-/// since the match code is so much more general. In most cases,
-/// LLVM is able to optimize the code, but it causes longer compile
-/// times and makes the generated code nigh impossible to read.
-///
-/// # Arguments
-/// - bcx: starting basic block context
-/// - pat: the irrefutable pattern being matched.
-/// - val: the value being matched -- must be an lvalue (by ref, with cleanup)
-pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- pat: &hir::Pat,
- val: MatchInput,
- cleanup_scope: cleanup::ScopeId)
- -> Block<'blk, 'tcx> {
- debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={:?})",
- bcx.to_str(), pat, val);
-
- if bcx.sess().asm_comments() {
- add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})",
- pat));
- }
-
- let _indenter = indenter();
-
- let _icx = push_ctxt("match::bind_irrefutable_pat");
- let mut bcx = bcx;
- let tcx = bcx.tcx();
- let ccx = bcx.ccx();
- match pat.node {
- PatKind::Binding(pat_binding_mode, ref path1, ref inner) => {
- // Allocate the stack slot where the value of this
- // binding will live and place it into the appropriate
- // map.
- bcx = mk_binding_alloca(bcx, pat.id, path1.node, cleanup_scope, (),
- "_match::bind_irrefutable_pat",
- |(), bcx, Datum { val: llval, ty, kind: _ }| {
- match pat_binding_mode {
- hir::BindByValue(_) => {
- // By value binding: move the value that `val`
- // points at into the binding's stack slot.
- let d = val.to_datum(ty);
- d.store_to(bcx, llval)
- }
-
- hir::BindByRef(_) => {
- // By ref binding: the value of the variable
- // is the pointer `val` itself or fat pointer referenced by `val`
- if type_is_fat_ptr(bcx.tcx(), ty) {
- expr::copy_fat_ptr(bcx, val.val, llval);
- }
- else {
- Store(bcx, val.val, llval);
- }
-
- bcx
- }
- }
- });
-
- if let Some(ref inner_pat) = *inner {
- bcx = bind_irrefutable_pat(bcx, &inner_pat, val, cleanup_scope);
- }
- }
- PatKind::TupleStruct(_, ref sub_pats, ddpos) => {
- match bcx.tcx().expect_def(pat.id) {
- Def::Variant(enum_id, var_id) => {
- let repr = adt::represent_node(bcx, pat.id);
- let vinfo = ccx.tcx().lookup_adt_def(enum_id).variant_with_id(var_id);
- let args = extract_variant_args(bcx,
- &repr,
- Disr::from(vinfo.disr_val),
- val);
- for (i, subpat) in sub_pats.iter()
- .enumerate_and_adjust(vinfo.fields.len(), ddpos) {
- bcx = bind_irrefutable_pat(
- bcx,
- subpat,
- MatchInput::from_val(args.vals[i]),
- cleanup_scope);
- }
- }
- Def::Struct(..) => {
- let expected_len = match *ccx.tcx().pat_ty(&pat) {
- ty::TyS{sty: ty::TyStruct(adt_def, _), ..} => {
- adt_def.struct_variant().fields.len()
- }
- ref ty => {
- span_bug!(pat.span, "tuple struct pattern unexpected type {:?}", ty);
- }
- };
-
- let repr = adt::represent_node(bcx, pat.id);
- let val = adt::MaybeSizedValue::sized(val.val);
- for (i, elem) in sub_pats.iter().enumerate_and_adjust(expected_len, ddpos) {
- let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i);
- bcx = bind_irrefutable_pat(
- bcx,
- &elem,
- MatchInput::from_val(fldptr),
- cleanup_scope);
- }
- }
- _ => {
- // Nothing to do here.
- }
- }
- }
- PatKind::Struct(_, ref fields, _) => {
- let tcx = bcx.tcx();
- let pat_ty = node_id_type(bcx, pat.id);
- let pat_repr = adt::represent_type(bcx.ccx(), pat_ty);
- let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id);
-
- let val = if type_is_sized(tcx, pat_ty) {
- adt::MaybeSizedValue::sized(val.val)
- } else {
- let data = Load(bcx, expr::get_dataptr(bcx, val.val));
- let meta = Load(bcx, expr::get_meta(bcx, val.val));
- adt::MaybeSizedValue::unsized_(data, meta)
- };
-
- for f in fields {
- let name = f.node.name;
- let field_idx = pat_v.field_index(name);
- let mut fldptr = adt::trans_field_ptr(
- bcx,
- &pat_repr,
- val,
- pat_v.discr,
- field_idx);
-
- let fty = pat_v.fields[field_idx].1;
- // If it's not sized, then construct a fat pointer instead of
- // a regular one
- if !type_is_sized(tcx, fty) {
- let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr");
- debug!("Creating fat pointer {:?}", Value(scratch));
- Store(bcx, fldptr, expr::get_dataptr(bcx, scratch));
- Store(bcx, val.meta, expr::get_meta(bcx, scratch));
- fldptr = scratch;
- }
- bcx = bind_irrefutable_pat(bcx,
- &f.node.pat,
- MatchInput::from_val(fldptr),
- cleanup_scope);
- }
- }
- PatKind::Tuple(ref elems, ddpos) => {
- match tcx.node_id_to_type(pat.id).sty {
- ty::TyTuple(ref tys) => {
- let repr = adt::represent_node(bcx, pat.id);
- let val = adt::MaybeSizedValue::sized(val.val);
- for (i, elem) in elems.iter().enumerate_and_adjust(tys.len(), ddpos) {
- let fldptr = adt::trans_field_ptr(bcx, &repr, val, Disr(0), i);
- bcx = bind_irrefutable_pat(
- bcx,
- &elem,
- MatchInput::from_val(fldptr),
- cleanup_scope);
- }
- }
- ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty),
- }
- }
- PatKind::Box(ref inner) => {
- let pat_ty = node_id_type(bcx, inner.id);
- // Pass along DSTs as fat pointers.
- let val = if type_is_fat_ptr(tcx, pat_ty) {
- // We need to check for this, as the pattern could be binding
- // a fat pointer by-value.
- if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node {
- val.val
- } else {
- Load(bcx, val.val)
- }
- } else if type_is_sized(tcx, pat_ty) {
- Load(bcx, val.val)
- } else {
- val.val
- };
- bcx = bind_irrefutable_pat(
- bcx, &inner, MatchInput::from_val(val), cleanup_scope);
- }
- PatKind::Ref(ref inner, _) => {
- let pat_ty = node_id_type(bcx, inner.id);
- // Pass along DSTs as fat pointers.
- let val = if type_is_fat_ptr(tcx, pat_ty) {
- // We need to check for this, as the pattern could be binding
- // a fat pointer by-value.
- if let PatKind::Binding(hir::BindByRef(..),_,_) = inner.node {
- val.val
- } else {
- Load(bcx, val.val)
- }
- } else if type_is_sized(tcx, pat_ty) {
- Load(bcx, val.val)
- } else {
- val.val
- };
- bcx = bind_irrefutable_pat(
- bcx,
- &inner,
- MatchInput::from_val(val),
- cleanup_scope);
- }
- PatKind::Vec(ref before, ref slice, ref after) => {
- let pat_ty = node_id_type(bcx, pat.id);
- let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val);
- match slice {
- &Some(_) => {
- extracted.vals.insert(
- before.len(),
- bind_subslice_pat(bcx, pat.id, val, before.len(), after.len())
- );
- }
- &None => ()
- }
- bcx = before
- .iter()
- .chain(slice.iter())
- .chain(after.iter())
- .zip(extracted.vals)
- .fold(bcx, |bcx, (inner, elem)| {
- bind_irrefutable_pat(
- bcx,
- &inner,
- MatchInput::from_val(elem),
- cleanup_scope)
- });
- }
- PatKind::Path(..) | PatKind::Wild |
- PatKind::Lit(..) | PatKind::Range(..) => ()
- }
- return bcx;
-}
use syntax::ast;
use syntax::attr;
use syntax::attr::IntType;
-use _match;
use abi::FAT_PTR_ADDR;
-use base::InitAlloca;
use build::*;
-use cleanup;
-use cleanup::CleanupMethods;
use common::*;
-use datum;
use debuginfo::DebugLoc;
use glue;
use machine;
use type_of;
use value::Value;
+#[derive(Copy, Clone, PartialEq)]
+pub enum BranchKind {
+ Switch,
+ Single
+}
+
type Hint = attr::ReprAttr;
// Representation of the context surrounding an unsized type. I want
}
}
-/// Convenience for `represent_type`. There should probably be more or
-/// these, for places in trans where the `Ty` isn't directly
-/// available.
-pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- node: ast::NodeId) -> Rc<Repr<'tcx>> {
- represent_type(bcx.ccx(), node_id_type(bcx, node))
-}
-
/// Decides how to represent a given type.
pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>)
repr
}
-const fn repeat_u8_as_u32(val: u8) -> u32 {
- (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32
-}
-
-const fn repeat_u8_as_u64(val: u8) -> u64 {
- (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64
-}
-
-/// `DTOR_NEEDED_HINT` is a stack-local hint that just means
-/// "we do not know whether the destructor has run or not; check the
-/// drop-flag embedded in the value itself."
-pub const DTOR_NEEDED_HINT: u8 = 0x3d;
-
-/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has
-/// definitely been moved; you do not need to run its destructor."
-///
-/// (However, for now, such values may still end up being explicitly
-/// zeroed by the generated code; this is the distinction between
-/// `datum::DropFlagInfo::ZeroAndMaintain` versus
-/// `datum::DropFlagInfo::DontZeroJustUse`.)
-pub const DTOR_MOVED_HINT: u8 = 0x2d;
-
-pub const DTOR_NEEDED: u8 = 0xd4;
-#[allow(dead_code)]
-pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED);
-
-pub const DTOR_DONE: u8 = 0x1d;
-#[allow(dead_code)]
-pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE);
-
fn dtor_to_init_u8(dtor: bool) -> u8 {
- if dtor { DTOR_NEEDED } else { 0 }
+ if dtor { 1 } else { 0 }
}
pub trait GetDtorType<'tcx> { fn dtor_type(self) -> Ty<'tcx>; }
fn dtor_type(self) -> Ty<'tcx> { self.types.u8 }
}
-fn dtor_active(flag: u8) -> bool {
- flag != 0
-}
-
fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>) -> Repr<'tcx> {
match t.sty {
/// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant.
-///
-/// This should ideally be less tightly tied to `_match`.
pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
r: &Repr<'tcx>,
scrutinee: ValueRef,
range_assert: bool)
- -> (_match::BranchKind, Option<ValueRef>) {
+ -> (BranchKind, Option<ValueRef>) {
match *r {
CEnum(..) | General(..) |
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
- (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None,
- range_assert)))
+ (BranchKind::Switch, Some(trans_get_discr(bcx, r, scrutinee, None, range_assert)))
}
Univariant(..) => {
// N.B.: Univariant means <= 1 enum variants (*not* == 1 variants).
- (_match::Single, None)
+ (BranchKind::Single, None)
}
}
}
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
val);
}
- General(ity, ref cases, dtor) => {
- if dtor_active(dtor) {
- let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr,
- cases[discr.0 as usize].fields.len() - 2);
- Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr);
- }
+ General(ity, _, _) => {
Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true),
StructGEP(bcx, val, 0));
}
- Univariant(ref st, dtor) => {
+ Univariant(_, _) => {
assert_eq!(discr, Disr(0));
- if dtor_active(dtor) {
- Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED),
- StructGEP(bcx, val, st.fields.len() - 1));
- }
}
RawNullablePointer { nndiscr, nnty, ..} => {
if discr != nndiscr {
}
}
-/// The number of fields in a given case; for use when obtaining this
-/// information from the type or definition is less convenient.
-pub fn num_args(r: &Repr, discr: Disr) -> usize {
- match *r {
- CEnum(..) => 0,
- Univariant(ref st, dtor) => {
- assert_eq!(discr, Disr(0));
- st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 })
- }
- General(_, ref cases, dtor) => {
- cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 })
- }
- RawNullablePointer { nndiscr, ref nullfields, .. } => {
- if discr == nndiscr { 1 } else { nullfields.len() }
- }
- StructWrappedNullablePointer { ref nonnull, nndiscr,
- ref nullfields, .. } => {
- if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() }
- }
- }
-}
-
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
bcx.pointercast(byte_ptr, ll_fty.ptr_to())
}
-pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- r: &Repr<'tcx>,
- value: ValueRef,
- mut f: F)
- -> Block<'blk, 'tcx> where
- F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>,
-{
- let fcx = bcx.fcx;
- match *r {
- Univariant(ref st, _) => {
- f(bcx, st, value)
- }
- General(ity, ref cases, _) => {
- let ccx = bcx.ccx();
-
- // See the comments in trans/base.rs for more information (inside
- // iter_structural_ty), but the gist here is that if the enum's
- // discriminant is *not* in the range that we're expecting (in which
- // case we'll take the fall-through branch on the switch
- // instruction) then we can't just optimize this to an Unreachable
- // block.
- //
- // Currently we still have filling drop, so this means that the drop
- // glue for enums may be called when the enum has been paved over
- // with the "I've been dropped" value. In this case the default
- // branch of the switch instruction will actually be taken at
- // runtime, so the basic block isn't actually unreachable, so we
- // need to make it do something with defined behavior. In this case
- // we just return early from the function.
- //
- // Note that this is also why the `trans_get_discr` below has
- // `false` to indicate that loading the discriminant should
- // not have a range assert.
- let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void");
- RetVoid(ret_void_cx, DebugLoc::None);
-
- let discr_val = trans_get_discr(bcx, r, value, None, false);
- let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len());
- let bcx_next = fcx.new_temp_block("enum-variant-iter-next");
-
- for (discr, case) in cases.iter().enumerate() {
- let mut variant_cx = fcx.new_temp_block(
- &format!("enum-variant-iter-{}", &discr.to_string())
- );
- let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true);
- AddCase(llswitch, rhs_val, variant_cx.llbb);
-
- let fields = case.fields.iter().map(|&ty|
- type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>();
- let real_ty = Type::struct_(ccx, &fields[..], case.packed);
- let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
-
- variant_cx = f(variant_cx, case, variant_value);
- Br(variant_cx, bcx_next.llbb, DebugLoc::None);
- }
-
- bcx_next
- }
- _ => bug!()
- }
-}
-
-/// Access the struct drop flag, if present.
-pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- r: &Repr<'tcx>,
- val: ValueRef)
- -> datum::DatumBlock<'blk, 'tcx, datum::Expr>
-{
- let tcx = bcx.tcx();
- let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type());
- match *r {
- Univariant(ref st, dtor) if dtor_active(dtor) => {
- let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1);
- datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
- }
- General(_, _, dtor) if dtor_active(dtor) => {
- let fcx = bcx.fcx;
- let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
- let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
- bcx, tcx.dtor_type(), "drop_flag",
- InitAlloca::Uninit("drop flag itself has no dtor"),
- cleanup::CustomScope(custom_cleanup_scope), |bcx, _| {
- debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}",
- tcx.dtor_type());
- bcx
- }
- ));
- bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
- let ptr = struct_field_ptr(&variant_cx.build(), st,
- MaybeSizedValue::sized(value),
- (st.fields.len() - 1), false);
- datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
- .store_to(variant_cx, scratch.val)
- });
- let expr_datum = scratch.to_expr_datum();
- fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
- datum::DatumBlock::new(bcx, expr_datum)
- }
- _ => bug!("tried to get drop flag of non-droppable type")
- }
-}
-
/// Construct a constant value, suitable for initializing a
/// GlobalVariable, given a case and constant values for its fields.
/// Note that this may have a different LLVM type (and different
#[inline]
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
-/// Get the discriminant of a constant value.
-pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr {
- match *r {
- CEnum(ity, _, _) => {
- match ity {
- attr::SignedInt(..) => Disr(const_to_int(val) as u64),
- attr::UnsignedInt(..) => Disr(const_to_uint(val)),
- }
- }
- General(ity, _, _) => {
- match ity {
- attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64),
- attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0])))
- }
- }
- Univariant(..) => Disr(0),
- RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
- bug!("const discrim access of non c-like enum")
- }
- }
-}
-
/// Extract a field of a constant value, as appropriate for its
/// representation.
///
use base;
use build::*;
use common::*;
-use datum::{Datum, Lvalue};
use type_of;
use type_::Type;
-use rustc::hir as ast;
+use rustc::hir;
+use rustc::ty::Ty;
+
use std::ffi::CString;
use syntax::ast::AsmDialect;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- ia: &ast::InlineAsm,
- outputs: Vec<Datum<'tcx, Lvalue>>,
+ ia: &hir::InlineAsm,
+ outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef>) {
let mut ext_constraints = vec![];
let mut output_types = vec![];
// Prepare the output operands
let mut indirect_outputs = vec![];
- for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() {
+ for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
let val = if out.is_rw || out.is_indirect {
- Some(base::load_ty(bcx, out_datum.val, out_datum.ty))
+ Some(base::load_ty(bcx, val, ty))
} else {
None
};
if out.is_indirect {
indirect_outputs.push(val.unwrap());
} else {
- output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
+ output_types.push(type_of::type_of(bcx.ccx(), ty));
}
}
if !indirect_outputs.is_empty() {
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
- for (i, (_, datum)) in outputs.enumerate() {
+ for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
- Store(bcx, v, datum.val);
+ Store(bcx, v, val);
}
// Store expn_id in a metadata node so we can map LLVM errors
use assert_module_sources;
use back::link;
use back::linker::LinkerInfo;
-use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
+use llvm::{Linkage, ValueRef, Vector, get_param};
use llvm;
-use rustc::cfg;
use rustc::hir::def_id::DefId;
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
-use rustc::hir::pat_util::simple_name;
use rustc::ty::subst::Substs;
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::hir::map as hir_map;
use rustc::util::common::time;
use rustc::mir::mir_map::MirMap;
-use rustc_data_structures::graph::OUTGOING;
+use session::config::{self, NoDebugInfo};
use rustc_incremental::IncrementalHashesMap;
-use session::config::{self, NoDebugInfo, FullDebugInfo};
use session::Session;
-use _match;
use abi::{self, Abi, FnType};
use adt;
use attributes;
use build::*;
use builder::{Builder, noname};
-use callee::{Callee, CallArgs, ArgExprs, ArgVals};
-use cleanup::{self, CleanupMethods, DropHint};
-use closure;
-use common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
+use callee::{Callee};
+use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode};
use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
-use common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
-use common::{Result, NodeIdAndSpan, VariantInfo};
-use common::{node_id_type, fulfill_obligation};
-use common::{type_is_immediate, type_is_zero_size, val_ty};
+use common::{CrateContext, Field, FunctionContext};
+use common::{Result, VariantInfo};
+use common::{fulfill_obligation};
+use common::{type_is_zero_size, val_ty};
use common;
use consts;
use context::{SharedCrateContext, CrateContextList};
-use controlflow;
-use datum;
-use debuginfo::{self, DebugLoc, ToDebugLoc};
+use debuginfo::{self, DebugLoc};
use declare;
-use expr;
-use glue;
use inline;
use machine;
use machine::{llalign_of_min, llsize_of};
use type_of;
use value::Value;
use Disr;
-use util::common::indenter;
use util::sha2::Sha256;
-use util::nodemap::{NodeMap, NodeSet, FnvHashSet};
+use util::nodemap::{NodeSet, FnvHashSet};
use arena::TypedArena;
use libc::c_uint;
use std::ptr;
use std::rc::Rc;
use std::str;
-use std::{i8, i16, i32, i64};
+use std::i32;
use syntax_pos::{Span, DUMMY_SP};
-use syntax::parse::token::InternedString;
use syntax::attr::AttrMetaMethods;
use syntax::attr;
-use rustc::hir::intravisit::{self, Visitor};
use rustc::hir;
use syntax::ast;
}
}
-pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind {
- *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
+pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
+ StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
+}
+
+pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
+ StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
}
fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
// Allocate space:
let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem);
let r = Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx()))
- .call(bcx, debug_loc, ArgVals(&[size, align]), None);
+ .call(bcx, debug_loc, &[size, align], None);
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
let value = if common::type_is_sized(cx.tcx(), t) {
adt::MaybeSizedValue::sized(av)
} else {
- let data = Load(cx, expr::get_dataptr(cx, av));
- let info = Load(cx, expr::get_meta(cx, av));
+ let data = Load(cx, get_dataptr(cx, av));
+ let info = Load(cx, get_meta(cx, av));
adt::MaybeSizedValue::unsized_(data, info)
};
let val = if common::type_is_sized(cx.tcx(), field_ty) {
llfld_a
} else {
- let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
- Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
- Store(cx, value.meta, expr::get_meta(cx, scratch.val));
- scratch.val
+ let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter");
+ Store(cx, llfld_a, get_dataptr(cx, scratch));
+ Store(cx, value.meta, get_meta(cx, scratch));
+ scratch
};
cx = f(cx, val, field_ty);
}
}
}
ty::TyArray(_, n) => {
- let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n);
+ let base = get_dataptr(cx, value.value);
+ let len = C_uint(cx.ccx(), n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
}
// comparison know not to proceed when the discriminants differ.
match adt::trans_switch(cx, &repr, av, false) {
- (_match::Single, None) => {
+ (adt::BranchKind::Single, None) => {
if n_variants != 0 {
assert!(n_variants == 1);
cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
&en.variants[0], substs, &mut f);
}
}
- (_match::Switch, Some(lldiscrim_a)) => {
+ (adt::BranchKind::Switch, Some(lldiscrim_a)) => {
cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
// Create a fall-through basic block for the "else" case of
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
- let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
+ let ret_void_cx = fcx.new_block("enum-iter-ret-void");
RetVoid(ret_void_cx, DebugLoc::None);
let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
- let next_cx = fcx.new_temp_block("enum-iter-next");
+ let next_cx = fcx.new_block("enum-iter-next");
for variant in &en.variants {
- let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}",
+ let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}",
&variant.disr_val
.to_string()));
let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
}
}
-pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- val_t: Ty<'tcx>)
- -> (Type, u64) {
- match val_t.sty {
- ty::TyInt(t) => {
- let llty = Type::int_from_ty(cx.ccx(), t);
- let min = match t {
- ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
- ast::IntTy::Is => i64::MIN as u64,
- ast::IntTy::I8 => i8::MIN as u64,
- ast::IntTy::I16 => i16::MIN as u64,
- ast::IntTy::I32 => i32::MIN as u64,
- ast::IntTy::I64 => i64::MIN as u64,
- };
- (llty, min)
- }
- _ => bug!(),
- }
-}
-
-pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- call_info: NodeIdAndSpan,
- divrem: hir::BinOp,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- use rustc_const_math::{ConstMathErr, Op};
-
- let (zero_err, overflow_err) = if divrem.node == hir::BiDiv {
- (ConstMathErr::DivisionByZero, ConstMathErr::Overflow(Op::Div))
- } else {
- (ConstMathErr::RemainderByZero, ConstMathErr::Overflow(Op::Rem))
- };
- let debug_loc = call_info.debug_loc();
-
- let (is_zero, is_signed) = match rhs_t.sty {
- ty::TyInt(t) => {
- let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
- (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
- }
- ty::TyUint(t) => {
- let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
- (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
- }
- ty::TyStruct(def, _) if def.is_simd() => {
- let mut res = C_bool(cx.ccx(), false);
- for i in 0..rhs_t.simd_size(cx.tcx()) {
- res = Or(cx,
- res,
- IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))),
- debug_loc);
- }
- (res, false)
- }
- _ => {
- bug!("fail-if-zero on unexpected type: {}", rhs_t);
- }
- };
- let bcx = with_cond(cx, is_zero, |bcx| {
- controlflow::trans_fail(bcx, call_info, InternedString::new(zero_err.description()))
- });
-
- // To quote LLVM's documentation for the sdiv instruction:
- //
- // Division by zero leads to undefined behavior. Overflow also leads
- // to undefined behavior; this is a rare case, but can occur, for
- // example, by doing a 32-bit division of -2147483648 by -1.
- //
- // In order to avoid undefined behavior, we perform runtime checks for
- // signed division/remainder which would trigger overflow. For unsigned
- // integers, no action beyond checking for zero need be taken.
- if is_signed {
- let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
- let minus_one = ICmp(bcx,
- llvm::IntEQ,
- rhs,
- C_integral(llty, !0, false),
- debug_loc);
- with_cond(bcx, minus_one, |bcx| {
- let is_min = ICmp(bcx,
- llvm::IntEQ,
- lhs,
- C_integral(llty, min, true),
- debug_loc);
- with_cond(bcx, is_min, |bcx| {
- controlflow::trans_fail(bcx, call_info,
- InternedString::new(overflow_err.description()))
- })
- })
- } else {
- bcx
- }
-}
-
pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llfn: ValueRef,
llargs: &[ValueRef],
return (C_null(Type::i8(bcx.ccx())), bcx);
}
- match bcx.opt_node_id {
- None => {
- debug!("invoke at ???");
- }
- Some(id) => {
- debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
- }
- }
-
if need_invoke(bcx) {
debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb);
for &llarg in llargs {
debug!("arg: {:?}", Value(llarg));
}
- let normal_bcx = bcx.fcx.new_temp_block("normal-return");
+ let normal_bcx = bcx.fcx.new_block("normal-return");
let landing_pad = bcx.fcx.get_landing_pad();
let llresult = Invoke(bcx,
}
}
-pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef {
- let _icx = push_ctxt("load_if_immediate");
- if type_is_immediate(cx.ccx(), t) {
- return load_ty(cx, v, t);
- }
- return v;
-}
-
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
if common::type_is_fat_ptr(cx.tcx(), t) {
Store(cx,
ExtractValue(cx, v, abi::FAT_PTR_ADDR),
- expr::get_dataptr(cx, dst));
+ get_dataptr(cx, dst));
Store(cx,
ExtractValue(cx, v, abi::FAT_PTR_EXTRA),
- expr::get_meta(cx, dst));
+ get_meta(cx, dst));
} else {
Store(cx, from_immediate(cx, v), dst);
}
dst: ValueRef,
_ty: Ty<'tcx>) {
// FIXME: emit metadata
- Store(cx, data, expr::get_dataptr(cx, dst));
- Store(cx, extra, expr::get_meta(cx, dst));
+ Store(cx, data, get_dataptr(cx, dst));
+ Store(cx, extra, get_meta(cx, dst));
}
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
_ty: Ty<'tcx>)
-> (ValueRef, ValueRef) {
// FIXME: emit metadata
- (Load(cx, expr::get_dataptr(cx, src)),
- Load(cx, expr::get_meta(cx, src)))
+ (Load(cx, get_dataptr(cx, src)),
+ Load(cx, get_meta(cx, src)))
}
pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
}
}
-pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> {
- debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
- let _indenter = indenter();
- let _icx = push_ctxt("init_local");
- _match::store_local(bcx, local)
-}
-
-pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
- llbb: BasicBlockRef)
- -> Block<'blk, 'tcx> {
- common::BlockS::new(llbb, None, fcx)
-}
-
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
{
}
let fcx = bcx.fcx;
- let next_cx = fcx.new_temp_block("next");
- let cond_cx = fcx.new_temp_block("cond");
+ let next_cx = fcx.new_block("next");
+ let cond_cx = fcx.new_block("cond");
CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
let after_cx = f(cond_cx);
if !after_cx.terminated.get() {
} else {
let exc_ptr = ExtractValue(bcx, lpval, 0);
bcx.fcx.eh_unwind_resume()
- .call(bcx, DebugLoc::None, ArgVals(&[exc_ptr]), None);
+ .call(bcx, DebugLoc::None, &[exc_ptr], None);
}
}
}
}
-pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
- if cx.unreachable.get() {
- return;
- }
- let _icx = push_ctxt("drop_done_fill_mem");
- let bcx = cx;
- memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
-}
-
pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
if cx.unreachable.get() {
return;
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
}
-
-/// In general, when we create an scratch value in an alloca, the
-/// creator may not know if the block (that initializes the scratch
-/// with the desired value) actually dominates the cleanup associated
-/// with the scratch value.
-///
-/// To deal with this, when we do an alloca (at the *start* of whole
-/// function body), we optionally can also set the associated
-/// dropped-flag state of the alloca to "dropped."
-#[derive(Copy, Clone, Debug)]
-pub enum InitAlloca {
- /// Indicates that the state should have its associated drop flag
- /// set to "dropped" at the point of allocation.
- Dropped,
- /// Indicates the value of the associated drop flag is irrelevant.
- /// The embedded string literal is a programmer provided argument
- /// for why. This is a safeguard forcing compiler devs to
- /// document; it might be a good idea to also emit this as a
- /// comment with the alloca itself when emitting LLVM output.ll.
- Uninit(&'static str),
-}
-
-
pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- t: Ty<'tcx>,
+ ty: Ty<'tcx>,
name: &str) -> ValueRef {
- // pnkfelix: I do not know why alloc_ty meets the assumptions for
- // passing Uninit, but it was never needed (even back when we had
- // the original boolean `zero` flag on `lvalue_scratch_datum`).
- alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name)
-}
-
-/// This variant of `fn alloc_ty` does not necessarily assume that the
-/// alloca should be created with no initial value. Instead the caller
-/// controls that assumption via the `init` flag.
-///
-/// Note that if the alloca *is* initialized via `init`, then we will
-/// also inject an `llvm.lifetime.start` before that initialization
-/// occurs, and thus callers should not call_lifetime_start
-/// themselves. But if `init` says "uninitialized", then callers are
-/// in charge of choosing where to call_lifetime_start and
-/// subsequently populate the alloca.
-///
-/// (See related discussion on PR #30823.)
-pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- t: Ty<'tcx>,
- init: InitAlloca,
- name: &str) -> ValueRef {
- let _icx = push_ctxt("alloc_ty");
- let ccx = bcx.ccx();
- let ty = type_of::type_of(ccx, t);
- assert!(!t.has_param_types());
- match init {
- InitAlloca::Dropped => alloca_dropped(bcx, t, name),
- InitAlloca::Uninit(_) => alloca(bcx, ty, name),
- }
-}
-
-pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef {
- let _icx = push_ctxt("alloca_dropped");
- let llty = type_of::type_of(cx.ccx(), ty);
- if cx.unreachable.get() {
- unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); }
- }
- let p = alloca(cx, llty, name);
- let b = cx.fcx.ccx.builder();
- b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
-
- // This is just like `call_lifetime_start` (but latter expects a
- // Block, which we do not have for `alloca_insert_pt`).
- core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| {
- let ptr = b.pointercast(p, Type::i8p(ccx));
- b.call(lifetime_start, &[C_u64(ccx, size), ptr], None);
- });
- memfill(&b, p, ty, adt::DTOR_DONE);
- p
+ assert!(!ty.has_param_types());
+ alloca(bcx, type_of::type_of(bcx.ccx(), ty), name)
}
pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
Alloca(cx, ty, name)
}
-pub fn set_value_name(val: ValueRef, name: &str) {
- unsafe {
- let name = CString::new(name).unwrap();
- llvm::LLVMSetValueName(val, name.as_ptr());
- }
-}
-
-struct FindNestedReturn {
- found: bool,
-}
-
-impl FindNestedReturn {
- fn new() -> FindNestedReturn {
- FindNestedReturn {
- found: false,
- }
- }
-}
-
-impl<'v> Visitor<'v> for FindNestedReturn {
- fn visit_expr(&mut self, e: &hir::Expr) {
- match e.node {
- hir::ExprRet(..) => {
- self.found = true;
- }
- _ => intravisit::walk_expr(self, e),
- }
- }
-}
-
-fn build_cfg<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- id: ast::NodeId)
- -> (ast::NodeId, Option<cfg::CFG>) {
- let blk = match tcx.map.find(id) {
- Some(hir_map::NodeItem(i)) => {
- match i.node {
- hir::ItemFn(_, _, _, _, _, ref blk) => {
- blk
- }
- _ => bug!("unexpected item variant in has_nested_returns"),
- }
- }
- Some(hir_map::NodeTraitItem(trait_item)) => {
- match trait_item.node {
- hir::MethodTraitItem(_, Some(ref body)) => body,
- _ => {
- bug!("unexpected variant: trait item other than a provided method in \
- has_nested_returns")
- }
- }
- }
- Some(hir_map::NodeImplItem(impl_item)) => {
- match impl_item.node {
- hir::ImplItemKind::Method(_, ref body) => body,
- _ => {
- bug!("unexpected variant: non-method impl item in has_nested_returns")
- }
- }
- }
- Some(hir_map::NodeExpr(e)) => {
- match e.node {
- hir::ExprClosure(_, _, ref blk, _) => blk,
- _ => bug!("unexpected expr variant in has_nested_returns"),
- }
- }
- Some(hir_map::NodeVariant(..)) |
- Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
-
- // glue, shims, etc
- None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
-
- _ => bug!("unexpected variant in has_nested_returns: {}",
- tcx.node_path_str(id)),
- };
-
- (blk.id, Some(cfg::CFG::new(tcx, blk)))
-}
-
-// Checks for the presence of "nested returns" in a function.
-// Nested returns are when the inner expression of a return expression
-// (the 'expr' in 'return expr') contains a return expression. Only cases
-// where the outer return is actually reachable are considered. Implicit
-// returns from the end of blocks are considered as well.
-//
-// This check is needed to handle the case where the inner expression is
-// part of a larger expression that may have already partially-filled the
-// return slot alloca. This can cause errors related to clean-up due to
-// the clobbering of the existing value in the return slot.
-fn has_nested_returns(tcx: TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
- for index in cfg.graph.depth_traverse(cfg.entry, OUTGOING) {
- let n = cfg.graph.node_data(index);
- match tcx.map.find(n.id()) {
- Some(hir_map::NodeExpr(ex)) => {
- if let hir::ExprRet(Some(ref ret_expr)) = ex.node {
- let mut visitor = FindNestedReturn::new();
- intravisit::walk_expr(&mut visitor, &ret_expr);
- if visitor.found {
- return true;
- }
- }
- }
- Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => {
- let mut visitor = FindNestedReturn::new();
- walk_list!(&mut visitor, visit_expr, &blk.expr);
- if visitor.found {
- return true;
- }
- }
- _ => {}
- }
- }
-
- return false;
-}
-
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
/// Create a function context for the given function.
/// Beware that you must call `fcx.init` or `fcx.bind_args`
pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>,
llfndecl: ValueRef,
fn_ty: FnType,
- definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi, ast::NodeId)>,
+ definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>,
block_arena: &'blk TypedArena<common::BlockS<'blk, 'tcx>>)
-> FunctionContext<'blk, 'tcx> {
- let (param_substs, def_id, inlined_id) = match definition {
- Some((instance, _, _, inlined_id)) => {
+ let (param_substs, def_id) = match definition {
+ Some((instance, _, _)) => {
common::validate_substs(instance.substs);
- (instance.substs, Some(instance.def), Some(inlined_id))
+ (instance.substs, Some(instance.def))
}
- None => (Substs::empty(ccx.tcx()), None, None)
+ None => (Substs::empty(ccx.tcx()), None)
};
let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id));
debug!("FunctionContext::new({})",
definition.map_or(String::new(), |d| d.0.to_string()));
- let cfg = inlined_id.map(|id| build_cfg(ccx.tcx(), id));
- let nested_returns = if let Some((blk_id, Some(ref cfg))) = cfg {
- has_nested_returns(ccx.tcx(), cfg, blk_id)
- } else {
- false
- };
-
let no_debug = if let Some(id) = local_id {
ccx.tcx().map.attrs(id)
.iter().any(|item| item.check_name("no_debug"))
false
};
- let mir = def_id.and_then(|id| ccx.get_mir(id));
-
- let debug_context = if let (false, Some(definition)) = (no_debug, definition) {
- let (instance, sig, abi, _) = definition;
+ let debug_context = if let (false, Some((instance, sig, abi))) = (no_debug, definition) {
debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl)
} else {
debuginfo::empty_function_debug_context(ccx)
};
FunctionContext {
- needs_ret_allocas: nested_returns && mir.is_none(),
- mir: mir,
+ mir: def_id.and_then(|id| ccx.get_mir(id)),
llfn: llfndecl,
llretslotptr: Cell::new(None),
param_env: ccx.tcx().empty_parameter_environment(),
alloca_insert_pt: Cell::new(None),
- llreturn: Cell::new(None),
landingpad_alloca: Cell::new(None),
- lllocals: RefCell::new(NodeMap()),
- llupvars: RefCell::new(NodeMap()),
- lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
fn_ty: fn_ty,
param_substs: param_substs,
- span: inlined_id.and_then(|id| ccx.tcx().map.opt_span(id)),
+ span: None,
block_arena: block_arena,
lpad_arena: TypedArena::new(),
ccx: ccx,
debug_context: debug_context,
scopes: RefCell::new(Vec::new()),
- cfg: cfg.and_then(|(_, cfg)| cfg)
}
}
/// Performs setup on a newly created function, creating the entry
/// scope block and allocating space for the return pointer.
- pub fn init(&'blk self, skip_retptr: bool, fn_did: Option<DefId>)
- -> Block<'blk, 'tcx> {
- let entry_bcx = self.new_temp_block("entry-block");
+ pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> {
+ let entry_bcx = self.new_block("entry-block");
// Use a dummy instruction as the insertion point for all allocas.
// This is later removed in FunctionContext::cleanup.
// which will hold the pointer to the right alloca which has the
// final ret value
let llty = self.fn_ty.ret.memory_ty(self.ccx);
- let slot = if self.needs_ret_allocas {
- // Let's create the stack slot
- let slot = AllocaFcx(self, llty.ptr_to(), "llretslotptr");
-
- // and if we're using an out pointer, then store that in our newly made slot
- if self.fn_ty.ret.is_indirect() {
- let outptr = get_param(self.llfn, 0);
-
- let b = self.ccx.builder();
- b.position_before(self.alloca_insert_pt.get().unwrap());
- b.store(outptr, slot);
- }
-
- slot
+ // But if there are no nested returns, we skip the indirection
+ // and have a single retslot
+ let slot = if self.fn_ty.ret.is_indirect() {
+ get_param(self.llfn, 0)
} else {
- // But if there are no nested returns, we skip the indirection
- // and have a single retslot
- if self.fn_ty.ret.is_indirect() {
- get_param(self.llfn, 0)
- } else {
- AllocaFcx(self, llty, "sret_slot")
- }
+ AllocaFcx(self, llty, "sret_slot")
};
self.llretslotptr.set(Some(slot));
}
- // Create the drop-flag hints for every unfragmented path in the function.
- let tcx = self.ccx.tcx();
- let tables = tcx.tables.borrow();
- let mut hints = self.lldropflag_hints.borrow_mut();
- let fragment_infos = tcx.fragment_infos.borrow();
-
- // Intern table for drop-flag hint datums.
- let mut seen = HashMap::new();
-
- let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did));
- if let Some(fragment_infos) = fragment_infos {
- for &info in fragment_infos {
-
- let make_datum = |id| {
- let init_val = C_u8(self.ccx, adt::DTOR_NEEDED_HINT);
- let llname = &format!("dropflag_hint_{}", id);
- debug!("adding hint {}", llname);
- let ty = tcx.types.u8;
- let ptr = alloc_ty(entry_bcx, ty, llname);
- Store(entry_bcx, init_val, ptr);
- let flag = datum::Lvalue::new_dropflag_hint("FunctionContext::init");
- datum::Datum::new(ptr, ty, flag)
- };
-
- let (var, datum) = match info {
- ty::FragmentInfo::Moved { var, .. } |
- ty::FragmentInfo::Assigned { var, .. } => {
- let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| {
- let ty = tables.node_types[&var];
- if self.type_needs_drop(ty) {
- let datum = make_datum(var);
- seen.insert(var, Some(datum.clone()));
- Some(datum)
- } else {
- // No drop call needed, so we don't need a dropflag hint
- None
- }
- });
- if let Some(datum) = opt_datum {
- (var, datum)
- } else {
- continue
- }
- }
- };
- match info {
- ty::FragmentInfo::Moved { move_expr: expr_id, .. } => {
- debug!("FragmentInfo::Moved insert drop hint for {}", expr_id);
- hints.insert(expr_id, DropHint::new(var, datum));
- }
- ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => {
- debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id);
- hints.insert(expr_id, DropHint::new(var, datum));
- }
- }
- }
- }
-
entry_bcx
}
- /// Creates lvalue datums for each of the incoming function arguments,
- /// matches all argument patterns against them to produce bindings,
- /// and returns the entry block (see FunctionContext::init).
- fn bind_args(&'blk self,
- args: &[hir::Arg],
- abi: Abi,
- id: ast::NodeId,
- closure_env: closure::ClosureEnv,
- arg_scope: cleanup::CustomScopeIndex)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("FunctionContext::bind_args");
- let fn_did = self.ccx.tcx().map.local_def_id(id);
- let mut bcx = self.init(false, Some(fn_did));
- let arg_scope_id = cleanup::CustomScope(arg_scope);
-
- let mut idx = 0;
- let mut llarg_idx = self.fn_ty.ret.is_indirect() as usize;
-
- let has_tupled_arg = match closure_env {
- closure::ClosureEnv::NotClosure => abi == Abi::RustCall,
- closure::ClosureEnv::Closure(..) => {
- closure_env.load(bcx, arg_scope_id);
- let env_arg = &self.fn_ty.args[idx];
- idx += 1;
- if env_arg.pad.is_some() {
- llarg_idx += 1;
- }
- if !env_arg.is_ignore() {
- llarg_idx += 1;
- }
- false
- }
- };
- let tupled_arg_id = if has_tupled_arg {
- args[args.len() - 1].id
- } else {
- ast::DUMMY_NODE_ID
- };
-
- // Return an array wrapping the ValueRefs that we get from `get_param` for
- // each argument into datums.
- //
- // For certain mode/type combinations, the raw llarg values are passed
- // by value. However, within the fn body itself, we want to always
- // have all locals and arguments be by-ref so that we can cancel the
- // cleanup and for better interaction with LLVM's debug info. So, if
- // the argument would be passed by value, we store it into an alloca.
- // This alloca should be optimized away by LLVM's mem-to-reg pass in
- // the event it's not truly needed.
- let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor");
- for hir_arg in args {
- let arg_ty = node_id_type(bcx, hir_arg.id);
- let arg_datum = if hir_arg.id != tupled_arg_id {
- let arg = &self.fn_ty.args[idx];
- idx += 1;
- if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
- // Don't copy an indirect argument to an alloca, the caller
- // already put it in a temporary alloca and gave it up, unless
- // we emit extra-debug-info, which requires local allocas :(.
- let llarg = get_param(self.llfn, llarg_idx as c_uint);
- llarg_idx += 1;
- self.schedule_lifetime_end(arg_scope_id, llarg);
- self.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
-
- datum::Datum::new(llarg,
- arg_ty,
- datum::Lvalue::new("FunctionContext::bind_args"))
- } else {
- unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
- uninit_reason,
- arg_scope_id, |bcx, dst| {
- debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty);
- let b = &bcx.build();
- if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
- let meta = &self.fn_ty.args[idx];
- idx += 1;
- arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
- meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
- } else {
- arg.store_fn_arg(b, &mut llarg_idx, dst);
- }
- bcx
- }))
- }
- } else {
- // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
- let tupled_arg_tys = match arg_ty.sty {
- ty::TyTuple(ref tys) => tys,
- _ => bug!("last argument of `rust-call` fn isn't a tuple?!")
- };
-
- unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx,
- arg_ty,
- "tupled_args",
- uninit_reason,
- arg_scope_id,
- |bcx, llval| {
- debug!("FunctionContext::bind_args: tupled {:?}: {:?}", hir_arg, arg_ty);
- for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
- let dst = StructGEP(bcx, llval, j);
- let arg = &self.fn_ty.args[idx];
- idx += 1;
- let b = &bcx.build();
- if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) {
- let meta = &self.fn_ty.args[idx];
- idx += 1;
- arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
- meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
- } else {
- arg.store_fn_arg(b, &mut llarg_idx, dst);
- }
- }
- bcx
- }))
- };
-
- let pat = &hir_arg.pat;
- bcx = if let Some(name) = simple_name(pat) {
- // Generate nicer LLVM for the common case of fn a pattern
- // like `x: T`
- set_value_name(arg_datum.val, &bcx.name(name));
- self.lllocals.borrow_mut().insert(pat.id, arg_datum);
- bcx
- } else {
- // General path. Copy out the values that are used in the
- // pattern.
- _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id)
- };
- debuginfo::create_argument_metadata(bcx, hir_arg);
- }
-
- bcx
- }
-
/// Ties up the llstaticallocas -> llloadenv -> lltop edges,
/// and builds the return block.
- pub fn finish(&'blk self, last_bcx: Block<'blk, 'tcx>,
+ pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>,
ret_debug_loc: DebugLoc) {
let _icx = push_ctxt("FunctionContext::finish");
- let ret_cx = match self.llreturn.get() {
- Some(llreturn) => {
- if !last_bcx.terminated.get() {
- Br(last_bcx, llreturn, DebugLoc::None);
- }
- raw_block(self, llreturn)
- }
- None => last_bcx,
- };
-
self.build_return_block(ret_cx, ret_debug_loc);
DebugLoc::None.apply(self);
ret_debug_location: DebugLoc) {
if self.llretslotptr.get().is_none() ||
ret_cx.unreachable.get() ||
- (!self.needs_ret_allocas && self.fn_ty.ret.is_indirect()) {
+ self.fn_ty.ret.is_indirect() {
return RetVoid(ret_cx, ret_debug_location);
}
- let retslot = if self.needs_ret_allocas {
- Load(ret_cx, self.llretslotptr.get().unwrap())
- } else {
- self.llretslotptr.get().unwrap()
- };
+ let retslot = self.llretslotptr.get().unwrap();
let retptr = Value(retslot);
let llty = self.fn_ty.ret.original_ty;
match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) {
///
/// If the function closes over its environment a closure will be returned.
pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- decl: &hir::FnDecl,
- body: &hir::Block,
llfndecl: ValueRef,
instance: Instance<'tcx>,
- inlined_id: ast::NodeId,
sig: &ty::FnSig<'tcx>,
- abi: Abi,
- closure_env: closure::ClosureEnv) {
+ abi: Abi) {
ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
let _icx = push_ctxt("trans_closure");
fcx = FunctionContext::new(ccx,
llfndecl,
fn_ty,
- Some((instance, sig, abi, inlined_id)),
+ Some((instance, sig, abi)),
&arena);
- if fcx.mir.is_some() {
- return mir::trans_mir(&fcx);
- } else {
- span_bug!(body.span, "attempted translation of `{}` w/o MIR", instance);
- }
-
- debuginfo::fill_scope_map_for_function(&fcx, decl, body, inlined_id);
-
- // cleanup scope for the incoming arguments
- let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(
- ccx, inlined_id, body.span, true);
- let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
-
- // Set up arguments to the function.
- debug!("trans_closure: function: {:?}", Value(fcx.llfn));
- let bcx = fcx.bind_args(&decl.inputs, abi, inlined_id, closure_env, arg_scope);
-
- // Up until here, IR instructions for this function have explicitly not been annotated with
- // source code location, so we don't step into call setup code. From here on, source location
- // emitting should be enabled.
- debuginfo::start_emitting_source_locations(&fcx);
-
- let dest = if fcx.fn_ty.ret.is_ignore() {
- expr::Ignore
- } else {
- expr::SaveIn(fcx.get_ret_slot(bcx, "iret_slot"))
- };
-
- // This call to trans_block is the place where we bridge between
- // translation calls that don't have a return value (trans_crate,
- // trans_mod, trans_item, et cetera) and those that do
- // (trans_block, trans_expr, et cetera).
- let mut bcx = controlflow::trans_block(bcx, body, dest);
-
- match dest {
- expr::SaveIn(slot) if fcx.needs_ret_allocas => {
- Store(bcx, slot, fcx.llretslotptr.get().unwrap());
- }
- _ => {}
+ if fcx.mir.is_none() {
+ bug!("attempted translation of `{}` w/o MIR", instance);
}
- match fcx.llreturn.get() {
- Some(_) => {
- Br(bcx, fcx.return_exit_block(), DebugLoc::None);
- fcx.pop_custom_cleanup_scope(arg_scope);
- }
- None => {
- // Microoptimization writ large: avoid creating a separate
- // llreturn basic block
- bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
- }
- };
-
- // Put return block after all other blocks.
- // This somewhat improves single-stepping experience in debugger.
- unsafe {
- let llreturn = fcx.llreturn.get();
- if let Some(llreturn) = llreturn {
- llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
- }
- }
-
- // Insert the mandatory first few basic blocks before lltop.
- fcx.finish(bcx, fn_cleanup_debug_loc.debug_loc());
+ mir::trans_mir(&fcx);
}
pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) {
- let local_instance = inline::maybe_inline_instance(ccx, instance);
-
- let fn_node_id = ccx.tcx().map.as_local_node_id(local_instance.def).unwrap();
-
- let _s = StatRecorder::new(ccx, ccx.tcx().node_path_str(fn_node_id));
+ let _s = StatRecorder::new(ccx, ccx.tcx().item_path_str(instance.def));
debug!("trans_instance(instance={:?})", instance);
let _icx = push_ctxt("trans_instance");
- let item = ccx.tcx().map.find(fn_node_id).unwrap();
-
let fn_ty = ccx.tcx().lookup_item_type(instance.def).ty;
let fn_ty = ccx.tcx().erase_regions(&fn_ty);
let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &fn_ty);
let sig = ccx.tcx().normalize_associated_type(&sig);
let abi = fn_ty.fn_abi();
+ let local_instance = inline::maybe_inline_instance(ccx, instance);
let lldecl = match ccx.instances().borrow().get(&local_instance) {
Some(&val) => val,
None => bug!("Instance `{:?}` not already declared", instance)
};
- match item {
- hir_map::NodeItem(&hir::Item {
- node: hir::ItemFn(ref decl, _, _, _, _, ref body), ..
- }) |
- hir_map::NodeTraitItem(&hir::TraitItem {
- node: hir::MethodTraitItem(
- hir::MethodSig { ref decl, .. }, Some(ref body)), ..
- }) |
- hir_map::NodeImplItem(&hir::ImplItem {
- node: hir::ImplItemKind::Method(
- hir::MethodSig { ref decl, .. }, ref body), ..
- }) => {
- trans_closure(ccx, decl, body, lldecl, instance,
- fn_node_id, &sig, abi, closure::ClosureEnv::NotClosure);
- }
- _ => bug!("Instance is a {:?}?", item)
- }
-}
-
-pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- ctor_ty: Ty<'tcx>,
- disr: Disr,
- args: CallArgs,
- dest: expr::Dest,
- debug_loc: DebugLoc)
- -> Result<'blk, 'tcx> {
-
- let ccx = bcx.fcx.ccx;
-
- let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
- let sig = ccx.tcx().normalize_associated_type(&sig);
- let result_ty = sig.output;
-
- // Get location to store the result. If the user does not care about
- // the result, just make a stack slot
- let llresult = match dest {
- expr::SaveIn(d) => d,
- expr::Ignore => {
- if !type_is_zero_size(ccx, result_ty) {
- let llresult = alloc_ty(bcx, result_ty, "constructor_result");
- call_lifetime_start(bcx, llresult);
- llresult
- } else {
- C_undef(type_of::type_of(ccx, result_ty).ptr_to())
- }
- }
- };
-
- if !type_is_zero_size(ccx, result_ty) {
- match args {
- ArgExprs(exprs) => {
- let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
- bcx = expr::trans_adt(bcx,
- result_ty,
- disr,
- &fields[..],
- None,
- expr::SaveIn(llresult),
- debug_loc);
- }
- _ => bug!("expected expr as arguments for variant/struct tuple constructor"),
- }
- } else {
- // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary
- // contents, there could be side-effects we need from them.
- match args {
- ArgExprs(exprs) => {
- for expr in exprs {
- bcx = expr::trans_into(bcx, expr, expr::Ignore);
- }
- }
- _ => (),
- }
- }
-
- // If the caller doesn't care about the result
- // drop the temporary we made
- let bcx = match dest {
- expr::SaveIn(_) => bcx,
- expr::Ignore => {
- let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
- if !type_is_zero_size(ccx, result_ty) {
- call_lifetime_end(bcx, llresult);
- }
- bcx
- }
- };
-
- Result::new(bcx, llresult)
+ trans_closure(ccx, lldecl, instance, &sig, abi);
}
pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena);
- let bcx = fcx.init(false, None);
-
- assert!(!fcx.needs_ret_allocas);
+ let bcx = fcx.init(false);
if !fcx.fn_ty.ret.is_ignore() {
- let dest = fcx.get_ret_slot(bcx, "eret_slot");
+ let dest = fcx.llretslotptr.get().unwrap();
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
let repr = adt::represent_type(ccx, sig.output);
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
let meta = &fcx.fn_ty.args[arg_idx];
arg_idx += 1;
- arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lldestptr));
- meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lldestptr));
+ arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr));
+ meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr));
} else {
arg.store_fn_arg(b, &mut llarg_idx, lldestptr);
}
return;
}
- let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx).val;
+ let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx);
let et = ccx.sess().entry_type.get().unwrap();
match et {
Err(s) => ccx.sess().fatal(&s)
};
let empty_substs = Substs::empty(ccx.tcx());
- let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx).val;
+ let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx);
let args = {
let opaque_rust_main =
llvm::LLVMBuildPointerCast(bld,
tcx.sess.opts.debug_assertions
};
- let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
- v
- } else {
- tcx.sess.opts.debug_assertions
- };
-
let link_meta = link::build_link_meta(incremental_hashes_map, name);
let shared_ccx = SharedCrateContext::new(tcx,
Sha256::new(),
link_meta.clone(),
reachable,
- check_overflow,
- check_dropflag);
+ check_overflow);
// Translate the metadata.
let metadata = time(tcx.sess.time_passes(), "write metadata", || {
write_metadata(&shared_ccx, shared_ccx.reachable())
//! closure.
pub use self::CalleeData::*;
-pub use self::CallArgs::*;
use arena::TypedArena;
use back::symbol_names;
-use llvm::{self, ValueRef, get_params};
+use llvm::{ValueRef, get_params};
use middle::cstore::LOCAL_CRATE;
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::traits;
use rustc::hir::map as hir_map;
use abi::{Abi, FnType};
-use adt;
use attributes;
use base;
use base::*;
use build::*;
-use cleanup;
-use cleanup::CleanupMethods;
use closure;
-use common::{self, Block, Result, CrateContext, FunctionContext, C_undef};
+use common::{self, Block, Result, CrateContext, FunctionContext};
use consts;
-use datum::*;
use debuginfo::DebugLoc;
use declare;
-use expr;
-use glue;
use inline;
-use intrinsic;
-use machine::llalign_of_min;
use meth;
use monomorphize::{self, Instance};
use trans_item::TransItem;
-use type_::Type;
use type_of;
use value::Value;
use Disr;
use syntax_pos::DUMMY_SP;
use errors;
-use syntax::ptr::P;
#[derive(Debug)]
pub enum CalleeData {
impl<'tcx> Callee<'tcx> {
/// Function pointer.
- pub fn ptr(datum: Datum<'tcx, Rvalue>) -> Callee<'tcx> {
+ pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> {
Callee {
- data: Fn(datum.val),
- ty: datum.ty
+ data: Fn(llfn),
+ ty: ty
}
}
abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic
} => Intrinsic,
- _ => return Callee::ptr(get_fn(ccx, def_id, substs))
+ _ => {
+ let (llfn, ty) = get_fn(ccx, def_id, substs);
+ return Callee::ptr(llfn, ty);
+ }
};
Callee {
// That is because default methods have the same ID as the
// trait method used to look up the impl method that ended
// up here, so calling Callee::def would infinitely recurse.
- Callee::ptr(get_fn(ccx, mth.method.def_id, mth.substs))
+ let (llfn, ty) = get_fn(ccx, mth.method.def_id, mth.substs);
+ Callee::ptr(llfn, ty)
}
traits::VtableClosure(vtable_closure) => {
// The substitutions should have no type parameters remaining
_ => bug!("expected fn item type, found {}",
method_ty)
};
- Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
+ Callee::ptr(llfn, fn_ptr_ty)
}
traits::VtableFnPointer(vtable_fn_pointer) => {
let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
_ => bug!("expected fn item type, found {}",
method_ty)
};
- Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
+ Callee::ptr(llfn, fn_ptr_ty)
}
traits::VtableObject(ref data) => {
Callee {
/// function.
pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
- args: CallArgs<'a, 'tcx>,
- dest: Option<expr::Dest>)
+ args: &[ValueRef],
+ dest: Option<ValueRef>)
-> Result<'blk, 'tcx> {
trans_call_inner(bcx, debug_loc, self, args, dest)
}
/// Turn the callee into a function pointer.
- pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>)
- -> Datum<'tcx, Rvalue> {
- let fn_ptr_ty = match self.ty.sty {
- ty::TyFnDef(_, _, f) => ccx.tcx().mk_fn_ptr(f),
- _ => self.ty
- };
+ pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
match self.data {
- Fn(llfn) => {
- immediate_rvalue(llfn, fn_ptr_ty)
- }
+ Fn(llfn) => llfn,
Virtual(idx) => {
- let llfn = meth::trans_object_shim(ccx, self.ty, idx);
- immediate_rvalue(llfn, fn_ptr_ty)
+ meth::trans_object_shim(ccx, self.ty, idx)
}
NamedTupleConstructor(_) => match self.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
- return get_fn(ccx, def_id, substs);
+ return get_fn(ccx, def_id, substs).0;
}
_ => bug!("expected fn item type, found {}", self.ty)
},
let llfnpointer = match bare_fn_ty.sty {
ty::TyFnDef(def_id, substs, _) => {
// Function definitions have to be turned into a pointer.
- let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
+ let llfn = Callee::def(ccx, def_id, substs).reify(ccx);
if !is_by_ref {
// A by-value fn item is ignored, so the shim has
// the same signature as the original function.
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
- let mut bcx = fcx.init(false, None);
+ let mut bcx = fcx.init(false);
let llargs = get_params(fcx.llfn);
}
});
- assert!(!fcx.needs_ret_allocas);
-
- let dest = fcx.llretslotptr.get().map(|_|
- expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
- );
+ let dest = fcx.llretslotptr.get();
let callee = Callee {
data: Fn(llfnpointer),
ty: bare_fn_ty
};
- bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx;
+ bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx;
fcx.finish(bcx, DebugLoc::None);
fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
- -> Datum<'tcx, Rvalue> {
+ -> (ValueRef, Ty<'tcx>) {
let tcx = ccx.tcx();
debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs);
_ => bug!("expected fn item type, found {}", fn_ty)
};
assert_eq!(type_of::type_of(ccx, fn_ptr_ty), common::val_ty(val));
- return immediate_rvalue(val, fn_ptr_ty);
+ return (val, fn_ptr_ty);
}
// Find the actual function pointer.
let instance = Instance::mono(ccx.shared(), def_id);
if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
- return immediate_rvalue(llfn, fn_ptr_ty);
+ return (llfn, fn_ptr_ty);
}
let local_id = ccx.tcx().map.as_local_node_id(def_id);
ccx.instances().borrow_mut().insert(instance, llfn);
- immediate_rvalue(llfn, fn_ptr_ty)
+ (llfn, fn_ptr_ty)
}
// ______________________________________________________________________
// Translating calls
-fn trans_call_inner<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
callee: Callee<'tcx>,
- args: CallArgs<'a, 'tcx>,
- dest: Option<expr::Dest>)
+ args: &[ValueRef],
+ opt_llretslot: Option<ValueRef>)
-> Result<'blk, 'tcx> {
// Introduce a temporary cleanup scope that will contain cleanups
// for the arguments while they are being evaluated. The purpose
let fcx = bcx.fcx;
let ccx = fcx.ccx;
- let abi = callee.ty.fn_abi();
- let sig = callee.ty.fn_sig();
- let output = bcx.tcx().erase_late_bound_regions(&sig.output());
- let output = bcx.tcx().normalize_associated_type(&output);
-
- let extra_args = match args {
- ArgExprs(args) if abi != Abi::RustCall => {
- args[sig.0.inputs.len()..].iter().map(|expr| {
- common::expr_ty_adjusted(bcx, expr)
- }).collect()
- }
- _ => vec![]
- };
- let fn_ty = callee.direct_fn_type(ccx, &extra_args);
+ let fn_ret = callee.ty.fn_ret();
+ let fn_ty = callee.direct_fn_type(ccx, &[]);
let mut callee = match callee.data {
- Intrinsic => {
- assert!(abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic);
- assert!(dest.is_some());
-
- return intrinsic::trans_intrinsic_call(bcx, callee.ty, &fn_ty,
- args, dest.unwrap(),
- debug_loc);
- }
- NamedTupleConstructor(disr) => {
- assert!(dest.is_some());
-
- return base::trans_named_tuple_constructor(bcx,
- callee.ty,
- disr,
- args,
- dest.unwrap(),
- debug_loc);
+ NamedTupleConstructor(_) | Intrinsic => {
+ bug!("{:?} calls should not go through Callee::call", callee);
}
f => f
};
- // Generate a location to store the result. If the user does
- // not care about the result, just make a stack slot.
- let opt_llretslot = dest.and_then(|dest| match dest {
- expr::SaveIn(dst) => Some(dst),
- expr::Ignore => {
- let needs_drop = || bcx.fcx.type_needs_drop(output);
- if fn_ty.ret.is_indirect() || fn_ty.ret.cast.is_some() || needs_drop() {
- // Push the out-pointer if we use an out-pointer for this
- // return type, otherwise push "undef".
- if fn_ty.ret.is_ignore() {
- Some(C_undef(fn_ty.ret.original_ty.ptr_to()))
- } else {
- let llresult = alloca(bcx, fn_ty.ret.original_ty, "__llret");
- call_lifetime_start(bcx, llresult);
- Some(llresult)
- }
- } else {
- None
- }
- }
- });
-
// If there no destination, return must be direct, with no cast.
if opt_llretslot.is_none() {
assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
llargs.push(llretslot);
}
- let arg_cleanup_scope = fcx.push_custom_cleanup_scope();
- bcx = trans_args(bcx, abi, &fn_ty, &mut callee, args, &mut llargs,
- cleanup::CustomScope(arg_cleanup_scope));
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
+ match callee {
+ Virtual(idx) => {
+ llargs.push(args[0]);
+
+ let fn_ptr = meth::get_virtual_method(bcx, args[1], idx);
+ let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
+ callee = Fn(PointerCast(bcx, fn_ptr, llty));
+ llargs.extend_from_slice(&args[2..]);
+ }
+ _ => llargs.extend_from_slice(args)
+ }
let llfn = match callee {
Fn(f) => f,
_ => bug!("expected fn pointer callee, found {:?}", callee)
};
- let (llret, mut bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
+ let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
if !bcx.unreachable.get() {
fn_ty.apply_attrs_callsite(llret);
}
}
- fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope);
-
- // If the caller doesn't care about the result of this fn call,
- // drop the temporary slot we made.
- match (dest, opt_llretslot) {
- (Some(expr::Ignore), Some(llretslot)) => {
- // drop the value if it is not being saved.
- bcx = glue::drop_ty(bcx, llretslot, output, debug_loc);
- call_lifetime_end(bcx, llretslot);
- }
- _ => {}
- }
-
- // FIXME(canndrew): This is_never should really be an is_uninhabited
- if output.is_never() {
+ if fn_ret.0.is_never() {
Unreachable(bcx);
}
Result::new(bcx, llret)
}
-
-pub enum CallArgs<'a, 'tcx> {
- /// Supply value of arguments as a list of expressions that must be
- /// translated. This is used in the common case of `foo(bar, qux)`.
- ArgExprs(&'a [P<hir::Expr>]),
-
- /// Supply value of arguments as a list of LLVM value refs; frequently
- /// used with lang items and so forth, when the argument is an internal
- /// value.
- ArgVals(&'a [ValueRef]),
-
- /// For overloaded operators: `(lhs, Option(rhs))`.
- /// `lhs` is the left-hand-side and `rhs` is the datum
- /// of the right-hand-side argument (if any).
- ArgOverloadedOp(Datum<'tcx, Expr>, Option<Datum<'tcx, Expr>>),
-
- /// Supply value of arguments as a list of expressions that must be
- /// translated, for overloaded call operators.
- ArgOverloadedCall(Vec<&'a hir::Expr>),
-}
-
-fn trans_args_under_call_abi<'blk, 'tcx>(
- mut bcx: Block<'blk, 'tcx>,
- arg_exprs: &[P<hir::Expr>],
- callee: &mut CalleeData,
- fn_ty: &FnType,
- llargs: &mut Vec<ValueRef>,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Block<'blk, 'tcx>
-{
- let mut arg_idx = 0;
-
- // Translate the `self` argument first.
- let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
-
- // Now untuple the rest of the arguments.
- let tuple_expr = &arg_exprs[1];
- let tuple_type = common::node_id_type(bcx, tuple_expr.id);
-
- match tuple_type.sty {
- ty::TyTuple(ref field_types) => {
- let tuple_datum = unpack_datum!(bcx,
- expr::trans(bcx, &tuple_expr));
- let tuple_lvalue_datum =
- unpack_datum!(bcx,
- tuple_datum.to_lvalue_datum(bcx,
- "args",
- tuple_expr.id));
- let repr = adt::represent_type(bcx.ccx(), tuple_type);
- let repr_ptr = &repr;
- for (i, field_type) in field_types.iter().enumerate() {
- let arg_datum = tuple_lvalue_datum.get_element(
- bcx,
- field_type,
- |srcval| {
- adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i)
- }).to_expr_datum();
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- _ => {
- span_bug!(tuple_expr.span,
- "argument to `.call()` wasn't a tuple?!")
- }
- };
-
- bcx
-}
-
-pub fn trans_args<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- abi: Abi,
- fn_ty: &FnType,
- callee: &mut CalleeData,
- args: CallArgs<'a, 'tcx>,
- llargs: &mut Vec<ValueRef>,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Block<'blk, 'tcx> {
- debug!("trans_args(abi={})", abi);
-
- let _icx = push_ctxt("trans_args");
-
- let mut bcx = bcx;
- let mut arg_idx = 0;
-
- // First we figure out the caller's view of the types of the arguments.
- // This will be needed if this is a generic call, because the callee has
- // to cast her view of the arguments to the caller's view.
- match args {
- ArgExprs(arg_exprs) => {
- if abi == Abi::RustCall {
- // This is only used for direct calls to the `call`,
- // `call_mut` or `call_once` functions.
- return trans_args_under_call_abi(bcx,
- arg_exprs, callee, fn_ty,
- llargs,
- arg_cleanup_scope)
- }
-
- for arg_expr in arg_exprs {
- let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &arg_expr));
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- ArgOverloadedCall(arg_exprs) => {
- for expr in arg_exprs {
- let arg_datum =
- unpack_datum!(bcx, expr::trans(bcx, expr));
- bcx = trans_arg_datum(bcx,
- arg_datum,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- ArgOverloadedOp(lhs, rhs) => {
- bcx = trans_arg_datum(bcx, lhs,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
-
- if let Some(rhs) = rhs {
- bcx = trans_arg_datum(bcx, rhs,
- callee, fn_ty, &mut arg_idx,
- arg_cleanup_scope,
- llargs);
- }
- }
- ArgVals(vs) => {
- match *callee {
- Virtual(idx) => {
- llargs.push(vs[0]);
-
- let fn_ptr = meth::get_virtual_method(bcx, vs[1], idx);
- let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
- *callee = Fn(PointerCast(bcx, fn_ptr, llty));
- llargs.extend_from_slice(&vs[2..]);
- }
- _ => llargs.extend_from_slice(vs)
- }
- }
- }
-
- bcx
-}
-
-fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- arg_datum: Datum<'tcx, Expr>,
- callee: &mut CalleeData,
- fn_ty: &FnType,
- next_idx: &mut usize,
- arg_cleanup_scope: cleanup::ScopeId,
- llargs: &mut Vec<ValueRef>)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_arg_datum");
- let mut bcx = bcx;
-
- debug!("trans_arg_datum({:?})", arg_datum);
-
- let arg = &fn_ty.args[*next_idx];
- *next_idx += 1;
-
- // Fill padding with undef value, where applicable.
- if let Some(ty) = arg.pad {
- llargs.push(C_undef(ty));
- }
-
- // Determine whether we want a by-ref datum even if not appropriate.
- let want_by_ref = arg.is_indirect() || arg.cast.is_some();
-
- let fat_ptr = common::type_is_fat_ptr(bcx.tcx(), arg_datum.ty);
- let (by_ref, val) = if fat_ptr && !bcx.fcx.type_needs_drop(arg_datum.ty) {
- (true, arg_datum.val)
- } else {
- // Make this an rvalue, since we are going to be
- // passing ownership.
- let arg_datum = unpack_datum!(
- bcx, arg_datum.to_rvalue_datum(bcx, "arg"));
-
- // Now that arg_datum is owned, get it into the appropriate
- // mode (ref vs value).
- let arg_datum = unpack_datum!(bcx, if want_by_ref {
- arg_datum.to_ref_datum(bcx)
- } else {
- arg_datum.to_appropriate_datum(bcx)
- });
-
- // Technically, ownership of val passes to the callee.
- // However, we must cleanup should we panic before the
- // callee is actually invoked.
- (arg_datum.kind.is_by_ref(),
- arg_datum.add_clean(bcx.fcx, arg_cleanup_scope))
- };
-
- if arg.is_ignore() {
- return bcx;
- }
-
- debug!("--- trans_arg_datum passing {:?}", Value(val));
-
- if fat_ptr {
- // Fat pointers should be passed without any transformations.
- assert!(!arg.is_indirect() && arg.cast.is_none());
- llargs.push(Load(bcx, expr::get_dataptr(bcx, val)));
-
- let info_arg = &fn_ty.args[*next_idx];
- *next_idx += 1;
- assert!(!info_arg.is_indirect() && info_arg.cast.is_none());
- let info = Load(bcx, expr::get_meta(bcx, val));
-
- if let Virtual(idx) = *callee {
- // We have to grab the fn pointer from the vtable when
- // handling the first argument, ensure that here.
- assert_eq!(*next_idx, 2);
- assert!(info_arg.is_ignore());
- let fn_ptr = meth::get_virtual_method(bcx, info, idx);
- let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
- *callee = Fn(PointerCast(bcx, fn_ptr, llty));
- } else {
- assert!(!info_arg.is_ignore());
- llargs.push(info);
- }
- return bcx;
- }
-
- let mut val = val;
- if by_ref && !arg.is_indirect() {
- // Have to load the argument, maybe while casting it.
- if arg.original_ty == Type::i1(bcx.ccx()) {
- // We store bools as i8 so we need to truncate to i1.
- val = LoadRangeAssert(bcx, val, 0, 2, llvm::False);
- val = Trunc(bcx, val, arg.original_ty);
- } else if let Some(ty) = arg.cast {
- val = Load(bcx, PointerCast(bcx, val, ty.ptr_to()));
- if !bcx.unreachable.get() {
- let llalign = llalign_of_min(bcx.ccx(), arg.ty);
- unsafe {
- llvm::LLVMSetAlignment(val, llalign);
- }
- }
- } else {
- val = Load(bcx, val);
- }
- }
-
- llargs.push(val);
- bcx
-}
//! code for `expr` itself is responsible for freeing any other byproducts
//! that may be in play.
-pub use self::ScopeId::*;
-pub use self::CleanupScopeKind::*;
pub use self::EarlyExitLabel::*;
-pub use self::Heap::*;
use llvm::{BasicBlockRef, ValueRef};
use base;
use build;
use common;
-use common::{Block, FunctionContext, NodeIdAndSpan, LandingPad};
-use datum::{Datum, Lvalue};
-use debuginfo::{DebugLoc, ToDebugLoc};
+use common::{Block, FunctionContext, LandingPad};
+use debuginfo::{DebugLoc};
use glue;
-use middle::region;
use type_::Type;
use value::Value;
-use rustc::ty::{Ty, TyCtxt};
-
-use std::fmt;
-use syntax::ast;
-
-pub struct CleanupScope<'blk, 'tcx: 'blk> {
- // The id of this cleanup scope. If the id is None,
- // this is a *temporary scope* that is pushed during trans to
- // cleanup miscellaneous garbage that trans may generate whose
- // lifetime is a subset of some expression. See module doc for
- // more details.
- kind: CleanupScopeKind<'blk, 'tcx>,
+use rustc::ty::Ty;
+pub struct CleanupScope<'tcx> {
// Cleanups to run upon scope exit.
- cleanups: Vec<CleanupObj<'tcx>>,
+ cleanups: Vec<DropValue<'tcx>>,
// The debug location any drop calls generated for this scope will be
// associated with.
index: usize
}
-pub const EXIT_BREAK: usize = 0;
-pub const EXIT_LOOP: usize = 1;
-pub const EXIT_MAX: usize = 2;
-
-pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
- CustomScopeKind,
- AstScopeKind(ast::NodeId),
- LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX])
-}
-
-impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- CustomScopeKind => write!(f, "CustomScopeKind"),
- AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid),
- LoopScopeKind(nid, ref blks) => {
- write!(f, "LoopScopeKind({}, [", nid)?;
- for blk in blks {
- write!(f, "{:p}, ", blk)?;
- }
- write!(f, "])")
- }
- }
- }
-}
-
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum EarlyExitLabel {
UnwindExit(UnwindKind),
- ReturnExit,
- LoopExit(ast::NodeId, usize)
}
#[derive(Copy, Clone, Debug)]
last_cleanup: usize,
}
-pub trait Cleanup<'tcx> {
- fn must_unwind(&self) -> bool;
- fn is_lifetime_end(&self) -> bool;
- fn trans<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> Block<'blk, 'tcx>;
-}
-
-pub type CleanupObj<'tcx> = Box<Cleanup<'tcx>+'tcx>;
-
-#[derive(Copy, Clone, Debug)]
-pub enum ScopeId {
- AstScope(ast::NodeId),
- CustomScope(CustomScopeIndex)
-}
-
-#[derive(Copy, Clone, Debug)]
-pub struct DropHint<K>(pub ast::NodeId, pub K);
-
-pub type DropHintDatum<'tcx> = DropHint<Datum<'tcx, Lvalue>>;
-pub type DropHintValue = DropHint<ValueRef>;
-
-impl<K> DropHint<K> {
- pub fn new(id: ast::NodeId, k: K) -> DropHint<K> { DropHint(id, k) }
-}
-
-impl DropHint<ValueRef> {
- pub fn value(&self) -> ValueRef { self.1 }
-}
-
-pub trait DropHintMethods {
- type ValueKind;
- fn to_value(&self) -> Self::ValueKind;
-}
-impl<'tcx> DropHintMethods for DropHintDatum<'tcx> {
- type ValueKind = DropHintValue;
- fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) }
-}
-
-impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
- /// Invoked when we start to trans the code contained within a new cleanup scope.
- fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) {
- debug!("push_ast_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(debug_loc.id));
-
- // FIXME(#2202) -- currently closure bodies have a parent
- // region, which messes up the assertion below, since there
- // are no cleanup scopes on the stack at the start of
- // trans'ing a closure body. I think though that this should
- // eventually be fixed by closure bodies not having a parent
- // region, though that's a touch unclear, and it might also be
- // better just to narrow this assertion more (i.e., by
- // excluding id's that correspond to closure bodies only). For
- // now we just say that if there is already an AST scope on the stack,
- // this new AST scope had better be its immediate child.
- let top_scope = self.top_ast_scope();
- let region_maps = &self.ccx.tcx().region_maps;
- if top_scope.is_some() {
- assert!((region_maps
- .opt_encl_scope(region_maps.node_extent(debug_loc.id))
- .map(|s|s.node_id(region_maps)) == top_scope)
- ||
- (region_maps
- .opt_encl_scope(region_maps.lookup_code_extent(
- region::CodeExtentData::DestructionScope(debug_loc.id)))
- .map(|s|s.node_id(region_maps)) == top_scope));
- }
-
- self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
- debug_loc.debug_loc()));
- }
-
- fn push_loop_cleanup_scope(&self,
- id: ast::NodeId,
- exits: [Block<'blk, 'tcx>; EXIT_MAX]) {
- debug!("push_loop_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(id));
- assert_eq!(Some(id), self.top_ast_scope());
-
- // Just copy the debuginfo source location from the enclosing scope
- let debug_loc = self.scopes
- .borrow()
- .last()
- .unwrap()
- .debug_loc;
-
- self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc));
- }
-
- fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
+impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
+ pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex {
let index = self.scopes_len();
debug!("push_custom_cleanup_scope(): {}", index);
.map(|opt_scope| opt_scope.debug_loc)
.unwrap_or(DebugLoc::None);
- self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc));
- CustomScopeIndex { index: index }
- }
-
- fn push_custom_cleanup_scope_with_debug_loc(&self,
- debug_loc: NodeIdAndSpan)
- -> CustomScopeIndex {
- let index = self.scopes_len();
- debug!("push_custom_cleanup_scope(): {}", index);
-
- self.push_scope(CleanupScope::new(CustomScopeKind,
- debug_loc.debug_loc()));
+ self.push_scope(CleanupScope::new(debug_loc));
CustomScopeIndex { index: index }
}
- /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup
- /// stack, and generates the code to do its cleanups for normal exit.
- fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- cleanup_scope: ast::NodeId)
- -> Block<'blk, 'tcx> {
- debug!("pop_and_trans_ast_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(cleanup_scope));
-
- assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
-
- let scope = self.pop_scope();
- self.trans_scope_cleanups(bcx, &scope)
- }
-
- /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the
- /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by
- /// branching to a block generated by `normal_exit_block`.
- fn pop_loop_cleanup_scope(&self,
- cleanup_scope: ast::NodeId) {
- debug!("pop_loop_cleanup_scope({})",
- self.ccx.tcx().map.node_to_string(cleanup_scope));
-
- assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
-
- let _ = self.pop_scope();
- }
-
/// Removes the top cleanup scope from the stack without executing its cleanups. The top
/// cleanup scope must be the temporary scope `custom_scope`.
- fn pop_custom_cleanup_scope(&self,
- custom_scope: CustomScopeIndex) {
+ pub fn pop_custom_cleanup_scope(&self,
+ custom_scope: CustomScopeIndex) {
debug!("pop_custom_cleanup_scope({})", custom_scope.index);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
let _ = self.pop_scope();
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and
/// generates the code to do its cleanups for normal exit.
- fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- custom_scope: CustomScopeIndex)
- -> Block<'blk, 'tcx> {
+ pub fn pop_and_trans_custom_cleanup_scope(&self,
+ bcx: Block<'blk, 'tcx>,
+ custom_scope: CustomScopeIndex)
+ -> Block<'blk, 'tcx> {
debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
assert!(self.is_valid_to_pop_custom_scope(custom_scope));
self.trans_scope_cleanups(bcx, &scope)
}
- /// Returns the id of the top-most loop scope
- fn top_loop_scope(&self) -> ast::NodeId {
- for scope in self.scopes.borrow().iter().rev() {
- if let LoopScopeKind(id, _) = scope.kind {
- return id;
- }
- }
- bug!("no loop scope found");
- }
-
- /// Returns a block to branch to which will perform all pending cleanups and
- /// then break/continue (depending on `exit`) out of the loop with id
- /// `cleanup_scope`
- fn normal_exit_block(&'blk self,
- cleanup_scope: ast::NodeId,
- exit: usize) -> BasicBlockRef {
- self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
- }
-
- /// Returns a block to branch to which will perform all pending cleanups and
- /// then return from this function
- fn return_exit_block(&'blk self) -> BasicBlockRef {
- self.trans_cleanups_to_exit_scope(ReturnExit)
- }
-
- fn schedule_lifetime_end(&self,
- cleanup_scope: ScopeId,
- val: ValueRef) {
- let drop = box LifetimeEnd {
- ptr: val,
- };
-
- debug!("schedule_lifetime_end({:?}, val={:?})",
- cleanup_scope, Value(val));
-
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of
/// `ty`
- fn schedule_drop_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>) {
+ pub fn schedule_drop_mem(&self,
+ cleanup_scope: CustomScopeIndex,
+ val: ValueRef,
+ ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
- let drop_hint = drop_hint.map(|hint|hint.to_value());
- let drop = box DropValue {
+ let drop = DropValue {
is_immediate: false,
val: val,
ty: ty,
- fill_on_drop: false,
skip_dtor: false,
- drop_hint: drop_hint,
};
- debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+ debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
- drop.fill_on_drop,
drop.skip_dtor);
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
- /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty`
- fn schedule_drop_and_fill_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>) {
- if !self.type_needs_drop(ty) { return; }
-
- let drop_hint = drop_hint.map(|datum|datum.to_value());
- let drop = box DropValue {
- is_immediate: false,
- val: val,
- ty: ty,
- fill_on_drop: true,
- skip_dtor: false,
- drop_hint: drop_hint,
- };
-
- debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?},
- fill_on_drop={}, skip_dtor={}, has_drop_hint={})",
- cleanup_scope,
- Value(val),
- ty,
- drop.fill_on_drop,
- drop.skip_dtor,
- drop_hint.is_some());
-
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ self.schedule_clean(cleanup_scope, drop);
}
/// Issue #23611: Schedules a (deep) drop of the contents of
/// `ty`. The scheduled code handles extracting the discriminant
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
- fn schedule_drop_adt_contents(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>) {
+ pub fn schedule_drop_adt_contents(&self,
+ cleanup_scope: CustomScopeIndex,
+ val: ValueRef,
+ ty: Ty<'tcx>) {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !self.type_needs_drop(ty) { return; }
- let drop = box DropValue {
+ let drop = DropValue {
is_immediate: false,
val: val,
ty: ty,
- fill_on_drop: false,
skip_dtor: true,
- drop_hint: None,
};
- debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+ debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
- drop.fill_on_drop,
drop.skip_dtor);
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ self.schedule_clean(cleanup_scope, drop);
}
/// Schedules a (deep) drop of `val`, which is an instance of `ty`
- fn schedule_drop_immediate(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>) {
+ pub fn schedule_drop_immediate(&self,
+ cleanup_scope: CustomScopeIndex,
+ val: ValueRef,
+ ty: Ty<'tcx>) {
if !self.type_needs_drop(ty) { return; }
- let drop = Box::new(DropValue {
+ let drop = DropValue {
is_immediate: true,
val: val,
ty: ty,
- fill_on_drop: false,
skip_dtor: false,
- drop_hint: None,
- });
+ };
- debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
+ debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}",
cleanup_scope,
Value(val),
ty,
- drop.fill_on_drop,
drop.skip_dtor);
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
- /// Schedules a call to `free(val)`. Note that this is a shallow operation.
- fn schedule_free_value(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- heap: Heap,
- content_ty: Ty<'tcx>) {
- let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
-
- debug!("schedule_free_value({:?}, val={:?}, heap={:?})",
- cleanup_scope, Value(val), heap);
-
- self.schedule_clean(cleanup_scope, drop as CleanupObj);
- }
-
- fn schedule_clean(&self,
- cleanup_scope: ScopeId,
- cleanup: CleanupObj<'tcx>) {
- match cleanup_scope {
- AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup),
- CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup),
- }
- }
-
- /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not
- /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary
- /// scope.
- fn schedule_clean_in_ast_scope(&self,
- cleanup_scope: ast::NodeId,
- cleanup: CleanupObj<'tcx>) {
- debug!("schedule_clean_in_ast_scope(cleanup_scope={})",
- cleanup_scope);
-
- for scope in self.scopes.borrow_mut().iter_mut().rev() {
- if scope.kind.is_ast_with_id(cleanup_scope) {
- scope.cleanups.push(cleanup);
- scope.cached_landing_pad = None;
- return;
- } else {
- // will be adding a cleanup to some enclosing scope
- scope.clear_cached_exits();
- }
- }
-
- bug!("no cleanup scope {} found",
- self.ccx.tcx().map.node_to_string(cleanup_scope));
+ self.schedule_clean(cleanup_scope, drop);
}
/// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope.
- fn schedule_clean_in_custom_scope(&self,
- custom_scope: CustomScopeIndex,
- cleanup: CleanupObj<'tcx>) {
+ fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) {
debug!("schedule_clean_in_custom_scope(custom_scope={})",
custom_scope.index);
}
/// Returns true if there are pending cleanups that should execute on panic.
- fn needs_invoke(&self) -> bool {
+ pub fn needs_invoke(&self) -> bool {
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
/// Returns a basic block to branch to in the event of a panic. This block
/// will run the panic cleanups and eventually resume the exception that
/// caused the landing pad to be run.
- fn get_landing_pad(&'blk self) -> BasicBlockRef {
+ pub fn get_landing_pad(&'blk self) -> BasicBlockRef {
let _icx = base::push_ctxt("get_landing_pad");
debug!("get_landing_pad");
return llbb;
}
-}
-
-impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
- /// Returns the id of the current top-most AST scope, if any.
- fn top_ast_scope(&self) -> Option<ast::NodeId> {
- for scope in self.scopes.borrow().iter().rev() {
- match scope.kind {
- CustomScopeKind | LoopScopeKind(..) => {}
- AstScopeKind(i) => {
- return Some(i);
- }
- }
- }
- None
- }
-
- fn top_nonempty_cleanup_scope(&self) -> Option<usize> {
- self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty())
- }
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
self.is_valid_custom_scope(custom_scope) &&
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
let scopes = self.scopes.borrow();
- custom_scope.index < scopes.len() &&
- (*scopes)[custom_scope.index].kind.is_temp()
+ custom_scope.index < scopes.len()
}
/// Generates the cleanups for `scope` into `bcx`
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
bcx: Block<'blk, 'tcx>,
- scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> {
+ scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> {
let mut bcx = bcx;
if !bcx.unreachable.get() {
self.scopes.borrow().len()
}
- fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
+ fn push_scope(&self, scope: CleanupScope<'tcx>) {
self.scopes.borrow_mut().push(scope)
}
- fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
+ fn pop_scope(&self) -> CleanupScope<'tcx> {
debug!("popping cleanup scope {}, {} scopes remaining",
self.top_scope(|s| s.block_name("")),
self.scopes_len() - 1);
self.scopes.borrow_mut().pop().unwrap()
}
- fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R {
+ fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R {
f(self.scopes.borrow().last().unwrap())
}
UnwindExit(val) => {
// Generate a block that will resume unwinding to the
// calling function
- let bcx = self.new_block("resume", None);
+ let bcx = self.new_block("resume");
match val {
UnwindKind::LandingPad => {
let addr = self.landingpad_alloca.get()
prev_llbb = bcx.llbb;
break;
}
-
- ReturnExit => {
- prev_llbb = self.get_llreturn();
- break
- }
-
- LoopExit(id, _) => {
- bug!("cannot exit from scope {}, not in scope", id);
- }
}
}
skip = last_cleanup;
break;
}
-
- // If we are searching for a loop exit,
- // and this scope is that loop, then stop popping and set
- // `prev_llbb` to the appropriate exit block from the loop.
- let scope = popped_scopes.last().unwrap();
- match label {
- UnwindExit(..) | ReturnExit => { }
- LoopExit(id, exit) => {
- if let Some(exit) = scope.kind.early_exit_block(id, exit) {
- prev_llbb = exit;
- break
- }
- }
- }
}
debug!("trans_cleanups_to_exit_scope: popped {} scopes",
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
- let bcx_in = self.new_block(&name[..], None);
+ let bcx_in = self.new_block(&name[..]);
let exit_label = label.start(bcx_in);
let mut bcx_out = bcx_in;
let len = scope.cleanups.len();
Some(llbb) => return llbb,
None => {
let name = last_scope.block_name("unwind");
- pad_bcx = self.new_block(&name[..], None);
+ pad_bcx = self.new_block(&name[..]);
last_scope.cached_landing_pad = Some(pad_bcx.llbb);
}
}
}
}
-impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
- fn new(kind: CleanupScopeKind<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> CleanupScope<'blk, 'tcx> {
+impl<'tcx> CleanupScope<'tcx> {
+ fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> {
CleanupScope {
- kind: kind,
debug_loc: debug_loc,
cleanups: vec!(),
cached_early_exits: vec!(),
}
}
- fn clear_cached_exits(&mut self) {
- self.cached_early_exits = vec!();
- self.cached_landing_pad = None;
- }
-
fn cached_early_exit(&self,
label: EarlyExitLabel)
-> Option<(BasicBlockRef, usize)> {
/// True if this scope has cleanups that need unwinding
fn needs_invoke(&self) -> bool {
-
self.cached_landing_pad.is_some() ||
- self.cleanups.iter().any(|c| c.must_unwind())
+ !self.cleanups.is_empty()
}
/// Returns a suitable name to use for the basic block that handles this cleanup scope
fn block_name(&self, prefix: &str) -> String {
- match self.kind {
- CustomScopeKind => format!("{}_custom_", prefix),
- AstScopeKind(id) => format!("{}_ast_{}_", prefix, id),
- LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id),
- }
- }
-
- /// Manipulate cleanup scope for call arguments. Conceptually, each
- /// argument to a call is an lvalue, and performing the call moves each
- /// of the arguments into a new rvalue (which gets cleaned up by the
- /// callee). As an optimization, instead of actually performing all of
- /// those moves, trans just manipulates the cleanup scope to obtain the
- /// same effect.
- pub fn drop_non_lifetime_clean(&mut self) {
- self.cleanups.retain(|c| c.is_lifetime_end());
- self.clear_cached_exits();
- }
-}
-
-impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
- fn is_temp(&self) -> bool {
- match *self {
- CustomScopeKind => true,
- LoopScopeKind(..) | AstScopeKind(..) => false,
- }
- }
-
- fn is_ast_with_id(&self, id: ast::NodeId) -> bool {
- match *self {
- CustomScopeKind | LoopScopeKind(..) => false,
- AstScopeKind(i) => i == id
- }
- }
-
- fn is_loop_with_id(&self, id: ast::NodeId) -> bool {
- match *self {
- CustomScopeKind | AstScopeKind(..) => false,
- LoopScopeKind(i, _) => i == id
- }
- }
-
- /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None`
- fn early_exit_block(&self,
- id: ast::NodeId,
- exit: usize) -> Option<BasicBlockRef> {
- match *self {
- LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb),
- _ => None,
- }
+ format!("{}_custom_", prefix)
}
}
bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu())));
*self
}
- label => label,
}
}
}
is_immediate: bool,
val: ValueRef,
ty: Ty<'tcx>,
- fill_on_drop: bool,
skip_dtor: bool,
- drop_hint: Option<DropHintValue>,
}
-impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> {
- fn must_unwind(&self) -> bool {
- true
- }
-
- fn is_lifetime_end(&self) -> bool {
- false
- }
-
+impl<'tcx> DropValue<'tcx> {
fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc)
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else {
- glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint)
+ glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
};
- if self.fill_on_drop {
- base::drop_done_fill_mem(bcx, self.val, self.ty);
- }
bcx
}
}
-
-#[derive(Copy, Clone, Debug)]
-pub enum Heap {
- HeapExchange
-}
-
-#[derive(Copy, Clone)]
-pub struct FreeValue<'tcx> {
- ptr: ValueRef,
- heap: Heap,
- content_ty: Ty<'tcx>
-}
-
-impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> {
- fn must_unwind(&self) -> bool {
- true
- }
-
- fn is_lifetime_end(&self) -> bool {
- false
- }
-
- fn trans<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> Block<'blk, 'tcx> {
- match self.heap {
- HeapExchange => {
- glue::trans_exchange_free_ty(bcx,
- self.ptr,
- self.content_ty,
- debug_loc)
- }
- }
- }
-}
-
-#[derive(Copy, Clone)]
-pub struct LifetimeEnd {
- ptr: ValueRef,
-}
-
-impl<'tcx> Cleanup<'tcx> for LifetimeEnd {
- fn must_unwind(&self) -> bool {
- false
- }
-
- fn is_lifetime_end(&self) -> bool {
- true
- }
-
- fn trans<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- debug_loc: DebugLoc)
- -> Block<'blk, 'tcx> {
- debug_loc.apply(bcx.fcx);
- base::call_lifetime_end(bcx, self.ptr);
- bcx
- }
-}
-
-pub fn temporary_scope(tcx: TyCtxt,
- id: ast::NodeId)
- -> ScopeId {
- match tcx.region_maps.temporary_scope(id) {
- Some(scope) => {
- let r = AstScope(scope.node_id(&tcx.region_maps));
- debug!("temporary_scope({}) = {:?}", id, r);
- r
- }
- None => {
- bug!("no temporary scope available for expr {}", id)
- }
- }
-}
-
-pub fn var_scope(tcx: TyCtxt,
- id: ast::NodeId)
- -> ScopeId {
- let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps));
- debug!("var_scope({}) = {:?}", id, r);
- r
-}
-
-///////////////////////////////////////////////////////////////////////////
-// These traits just exist to put the methods into this file.
-
-pub trait CleanupMethods<'blk, 'tcx> {
- fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan);
- fn push_loop_cleanup_scope(&self,
- id: ast::NodeId,
- exits: [Block<'blk, 'tcx>; EXIT_MAX]);
- fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
- fn push_custom_cleanup_scope_with_debug_loc(&self,
- debug_loc: NodeIdAndSpan)
- -> CustomScopeIndex;
- fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- cleanup_scope: ast::NodeId)
- -> Block<'blk, 'tcx>;
- fn pop_loop_cleanup_scope(&self,
- cleanup_scope: ast::NodeId);
- fn pop_custom_cleanup_scope(&self,
- custom_scope: CustomScopeIndex);
- fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: Block<'blk, 'tcx>,
- custom_scope: CustomScopeIndex)
- -> Block<'blk, 'tcx>;
- fn top_loop_scope(&self) -> ast::NodeId;
- fn normal_exit_block(&'blk self,
- cleanup_scope: ast::NodeId,
- exit: usize) -> BasicBlockRef;
- fn return_exit_block(&'blk self) -> BasicBlockRef;
- fn schedule_lifetime_end(&self,
- cleanup_scope: ScopeId,
- val: ValueRef);
- fn schedule_drop_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>);
- fn schedule_drop_and_fill_mem(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>,
- drop_hint: Option<DropHintDatum<'tcx>>);
- fn schedule_drop_adt_contents(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>);
- fn schedule_drop_immediate(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>);
- fn schedule_free_value(&self,
- cleanup_scope: ScopeId,
- val: ValueRef,
- heap: Heap,
- content_ty: Ty<'tcx>);
- fn schedule_clean(&self,
- cleanup_scope: ScopeId,
- cleanup: CleanupObj<'tcx>);
- fn schedule_clean_in_ast_scope(&self,
- cleanup_scope: ast::NodeId,
- cleanup: CleanupObj<'tcx>);
- fn schedule_clean_in_custom_scope(&self,
- custom_scope: CustomScopeIndex,
- cleanup: CleanupObj<'tcx>);
- fn needs_invoke(&self) -> bool;
- fn get_landing_pad(&'blk self) -> BasicBlockRef;
-}
-
-trait CleanupHelperMethods<'blk, 'tcx> {
- fn top_ast_scope(&self) -> Option<ast::NodeId>;
- fn top_nonempty_cleanup_scope(&self) -> Option<usize>;
- fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
- fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
- fn trans_scope_cleanups(&self,
- bcx: Block<'blk, 'tcx>,
- scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
- fn trans_cleanups_to_exit_scope(&'blk self,
- label: EarlyExitLabel)
- -> BasicBlockRef;
- fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
- fn scopes_len(&self) -> usize;
- fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
- fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
- fn top_scope<R, F>(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R;
-}
use arena::TypedArena;
use back::symbol_names;
-use llvm::{self, ValueRef, get_param, get_params};
+use llvm::{self, ValueRef, get_params};
use rustc::hir::def_id::DefId;
use abi::{Abi, FnType};
-use adt;
use attributes;
use base::*;
-use build::*;
-use callee::{self, ArgVals, Callee};
-use cleanup::{CleanupMethods, CustomScope, ScopeId};
+use callee::{self, Callee};
use common::*;
-use datum::{ByRef, Datum, lvalue_scratch_datum};
-use datum::{rvalue_scratch_datum, Rvalue};
-use debuginfo::{self, DebugLoc};
+use debuginfo::{DebugLoc};
use declare;
-use expr;
use monomorphize::{Instance};
use value::Value;
-use Disr;
use rustc::ty::{self, Ty, TyCtxt};
-use session::config::FullDebugInfo;
-
-use syntax::ast;
use rustc::hir;
-use libc::c_uint;
-
-fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- closure_def_id: DefId,
- arg_scope_id: ScopeId,
- id: ast::NodeId) {
- let _icx = push_ctxt("closure::load_closure_environment");
- let kind = kind_for_closure(bcx.ccx(), closure_def_id);
-
- let env_arg = &bcx.fcx.fn_ty.args[0];
- let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize;
-
- // Special case for small by-value selfs.
- let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() {
- let closure_ty = node_id_type(bcx, id);
- let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val;
- env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv);
- llenv
- } else {
- get_param(bcx.fcx.llfn, env_idx as c_uint)
- };
-
- // Store the pointer to closure data in an alloca for debug info because that's what the
- // llvm.dbg.declare intrinsic expects
- let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo {
- let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr");
- Store(bcx, llenv, alloc);
- Some(alloc)
- } else {
- None
- };
-
- bcx.tcx().with_freevars(id, |fv| {
- for (i, freevar) in fv.iter().enumerate() {
- let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
- closure_expr_id: id };
- let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap();
- let mut upvar_ptr = StructGEP(bcx, llenv, i);
- let captured_by_ref = match upvar_capture {
- ty::UpvarCapture::ByValue => false,
- ty::UpvarCapture::ByRef(..) => {
- upvar_ptr = Load(bcx, upvar_ptr);
- true
- }
- };
- let node_id = freevar.def.var_id();
- bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr);
-
- if kind == ty::ClosureKind::FnOnce && !captured_by_ref {
- let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id);
- bcx.fcx.schedule_drop_mem(arg_scope_id,
- upvar_ptr,
- node_id_type(bcx, node_id),
- hint)
- }
-
- if let Some(env_pointer_alloca) = env_pointer_alloca {
- debuginfo::create_captured_var_metadata(
- bcx,
- node_id,
- env_pointer_alloca,
- i,
- captured_by_ref,
- freevar.span);
- }
- }
- })
-}
-
-pub enum ClosureEnv {
- NotClosure,
- Closure(DefId, ast::NodeId),
-}
-
-impl ClosureEnv {
- pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) {
- if let ClosureEnv::Closure(def_id, id) = self {
- load_closure_environment(bcx, def_id, arg_scope, id);
- }
- }
-}
-
fn get_self_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
closure_id: DefId,
fn_ty: Ty<'tcx>)
pub fn trans_closure_body_via_mir<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_def_id: DefId,
closure_substs: ty::ClosureSubsts<'tcx>) {
- use syntax::ast::DUMMY_NODE_ID;
- use syntax_pos::DUMMY_SP;
- use syntax::ptr::P;
-
- trans_closure_expr(Dest::Ignore(ccx),
- &hir::FnDecl {
- inputs: P::new(),
- output: hir::Return(P(hir::Ty {
- id: DUMMY_NODE_ID,
- span: DUMMY_SP,
- node: hir::Ty_::TyNever,
- })),
- variadic: false
- },
- &hir::Block {
- stmts: P::new(),
- expr: None,
- id: DUMMY_NODE_ID,
- rules: hir::DefaultBlock,
- span: DUMMY_SP
- },
- DUMMY_NODE_ID,
- closure_def_id,
- closure_substs);
-}
-
-pub enum Dest<'a, 'tcx: 'a> {
- SaveIn(Block<'a, 'tcx>, ValueRef),
- Ignore(&'a CrateContext<'a, 'tcx>)
-}
-
-pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
- decl: &hir::FnDecl,
- body: &hir::Block,
- id: ast::NodeId,
- closure_def_id: DefId, // (*)
- closure_substs: ty::ClosureSubsts<'tcx>)
- -> Option<Block<'a, 'tcx>>
-{
// (*) Note that in the case of inlined functions, the `closure_def_id` will be the
// defid of the closure in its original crate, whereas `id` will be the id of the local
// inlined copy.
- debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})",
- id, closure_def_id, closure_substs);
+ debug!("trans_closure_body_via_mir(closure_def_id={:?}, closure_substs={:?})",
+ closure_def_id, closure_substs);
- let ccx = match dest {
- Dest::SaveIn(bcx, _) => bcx.ccx(),
- Dest::Ignore(ccx) => ccx
- };
let tcx = ccx.tcx();
let _icx = push_ctxt("closure::trans_closure_expr");
};
trans_closure(ccx,
- decl,
- body,
llfn,
Instance::new(closure_def_id, param_substs),
- id,
&sig,
- Abi::RustCall,
- ClosureEnv::Closure(closure_def_id, id));
+ Abi::RustCall);
ccx.instances().borrow_mut().insert(instance, llfn);
}
-
- // Don't hoist this to the top of the function. It's perfectly legitimate
- // to have a zero-size closure (in which case dest will be `Ignore`) and
- // we must still generate the closure body.
- let (mut bcx, dest_addr) = match dest {
- Dest::SaveIn(bcx, p) => (bcx, p),
- Dest::Ignore(_) => {
- debug!("trans_closure_expr() ignoring result");
- return None;
- }
- };
-
- let repr = adt::represent_type(ccx, node_id_type(bcx, id));
-
- // Create the closure.
- tcx.with_freevars(id, |fv| {
- for (i, freevar) in fv.iter().enumerate() {
- let datum = expr::trans_var(bcx, freevar.def);
- let upvar_slot_dest = adt::trans_field_ptr(
- bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i);
- let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
- closure_expr_id: id };
- match tcx.upvar_capture(upvar_id).unwrap() {
- ty::UpvarCapture::ByValue => {
- bcx = datum.store_to(bcx, upvar_slot_dest);
- }
- ty::UpvarCapture::ByRef(..) => {
- Store(bcx, datum.to_llref(), upvar_slot_dest);
- }
- }
- }
- });
- adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0));
-
- Some(bcx)
}
pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
if !ccx.sess().target.target.options.allows_weak_linkage &&
!ccx.sess().opts.single_codegen_unit() {
- if let Some(node_id) = ccx.tcx().map.as_local_node_id(closure_def_id) {
- // If the closure is defined in the local crate, we can always just
- // translate it.
- let (decl, body) = match ccx.tcx().map.expect_expr(node_id).node {
- hir::ExprClosure(_, ref decl, ref body, _) => (decl, body),
- _ => { unreachable!() }
- };
-
- trans_closure_expr(Dest::Ignore(ccx),
- decl,
- body,
- node_id,
- closure_def_id,
- substs);
- } else {
- trans_closure_body_via_mir(ccx, closure_def_id, substs);
- }
+ trans_closure_body_via_mir(ccx, closure_def_id, substs);
}
// If the closure is a Fn closure, but a FnOnce is needed (etc),
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena);
- let mut bcx = fcx.init(false, None);
+ let mut bcx = fcx.init(false);
// the first argument (`self`) will be the (by value) closure env.
- let self_scope = fcx.push_custom_cleanup_scope();
- let self_scope_id = CustomScope(self_scope);
let mut llargs = get_params(fcx.llfn);
let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
let env_arg = &fcx.fn_ty.args[0];
let llenv = if env_arg.is_indirect() {
- Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef))
- .add_clean(&fcx, self_scope_id)
+ llargs[self_idx]
} else {
- unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self",
- InitAlloca::Dropped,
- self_scope_id, |bcx, llval| {
- let mut llarg_idx = self_idx;
- env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval);
- bcx.fcx.schedule_lifetime_end(self_scope_id, llval);
- bcx
- })).val
+ let scratch = alloc_ty(bcx, closure_ty, "self");
+ let mut llarg_idx = self_idx;
+ env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch);
+ scratch
};
debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
llargs[self_idx] = llenv;
}
- let dest =
- fcx.llretslotptr.get().map(
- |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
+ let dest = fcx.llretslotptr.get();
let callee = Callee {
data: callee::Fn(llreffn),
ty: llref_fn_ty
};
- bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx;
+
+ // Call the by-ref closure body with `self` in a cleanup scope,
+ // to drop `self` when the body returns, or in case it unwinds.
+ let self_scope = fcx.push_custom_cleanup_scope();
+ fcx.schedule_drop_mem(self_scope, llenv, closure_ty);
+
+ bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx;
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
use llvm;
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef};
-use rustc::cfg;
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use callee::Callee;
use cleanup;
use consts;
-use datum;
use debuginfo::{self, DebugLoc};
use declare;
use machine;
use rustc::traits::{self, SelectionContext, Reveal};
use rustc::ty::fold::TypeFoldable;
use rustc::hir;
-use util::nodemap::NodeMap;
use arena::TypedArena;
use libc::{c_uint, c_char};
pub span: Span,
}
-pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan {
- NodeIdAndSpan { id: expr.id, span: expr.span }
-}
-
/// The concrete version of ty::FieldDef. The name is the field index if
/// the field is numeric.
pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>);
}
}
}
-
- /// Return the variant corresponding to a given node (e.g. expr)
- pub fn of_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self {
- Self::from_ty(tcx, ty, Some(tcx.expect_def(id)))
- }
-
- pub fn field_index(&self, name: ast::Name) -> usize {
- self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| {
- bug!("unknown field `{}`", name)
- })
- }
}
pub struct BuilderRef_res {
assert!(!substs.types.needs_infer());
}
-// work around bizarre resolve errors
-type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
-pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
-
-#[derive(Clone, Debug)]
-struct HintEntry<'tcx> {
- // The datum for the dropflag-hint itself; note that many
- // source-level Lvalues will be associated with the same
- // dropflag-hint datum.
- datum: cleanup::DropHintDatum<'tcx>,
-}
-
-pub struct DropFlagHintsMap<'tcx> {
- // Maps NodeId for expressions that read/write unfragmented state
- // to that state's drop-flag "hint." (A stack-local hint
- // indicates either that (1.) it is certain that no-drop is
- // needed, or (2.) inline drop-flag must be consulted.)
- node_map: NodeMap<HintEntry<'tcx>>,
-}
-
-impl<'tcx> DropFlagHintsMap<'tcx> {
- pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } }
- pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) }
- pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) {
- self.node_map.insert(id, HintEntry { datum: datum });
- }
- pub fn hint_datum(&self, id: ast::NodeId) -> Option<cleanup::DropHintDatum<'tcx>> {
- self.node_map.get(&id).map(|t|t.datum)
- }
-}
-
// Function context. Every LLVM function we create will have one of
// these.
pub struct FunctionContext<'a, 'tcx: 'a> {
// A marker for the place where we want to insert the function's static
// allocas, so that LLVM will coalesce them into a single alloca call.
pub alloca_insert_pt: Cell<Option<ValueRef>>,
- pub llreturn: Cell<Option<BasicBlockRef>>,
-
- // If the function has any nested return's, including something like:
- // fn foo() -> Option<Foo> { Some(Foo { x: return None }) }, then
- // we use a separate alloca for each return
- pub needs_ret_allocas: bool,
// When working with landingpad-based exceptions this value is alloca'd and
// later loaded when using the resume instruction. This ends up being
// Note that for cleanuppad-based exceptions this is not used.
pub landingpad_alloca: Cell<Option<ValueRef>>,
- // Maps the DefId's for local variables to the allocas created for
- // them in llallocas.
- pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
-
- // Same as above, but for closure upvars
- pub llupvars: RefCell<NodeMap<ValueRef>>,
-
- // Carries info about drop-flags for local bindings (longer term,
- // paths) for the code being compiled.
- pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
-
// Describes the return/argument LLVM types and their ABI handling.
pub fn_ty: FnType,
pub debug_context: debuginfo::FunctionDebugContext,
// Cleanup scopes.
- pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
-
- pub cfg: Option<cfg::CFG>,
+ pub scopes: RefCell<Vec<cleanup::CleanupScope<'tcx>>>,
}
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
}
}
- pub fn get_llreturn(&self) -> BasicBlockRef {
- if self.llreturn.get().is_none() {
-
- self.llreturn.set(Some(unsafe {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn,
- "return\0".as_ptr() as *const _)
- }))
- }
-
- self.llreturn.get().unwrap()
- }
-
- pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef {
- if self.needs_ret_allocas {
- base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name)
- } else {
- self.llretslotptr.get().unwrap()
- }
- }
-
pub fn new_block(&'a self,
- name: &str,
- opt_node_id: Option<ast::NodeId>)
+ name: &str)
-> Block<'a, 'tcx> {
unsafe {
let name = CString::new(name).unwrap();
let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
name.as_ptr());
- BlockS::new(llbb, opt_node_id, self)
- }
- }
-
- pub fn new_id_block(&'a self,
- name: &str,
- node_id: ast::NodeId)
- -> Block<'a, 'tcx> {
- self.new_block(name, Some(node_id))
- }
-
- pub fn new_temp_block(&'a self,
- name: &str)
- -> Block<'a, 'tcx> {
- self.new_block(name, None)
- }
-
- pub fn join_blocks(&'a self,
- id: ast::NodeId,
- in_cxs: &[Block<'a, 'tcx>])
- -> Block<'a, 'tcx> {
- let out = self.new_id_block("join", id);
- let mut reachable = false;
- for bcx in in_cxs {
- if !bcx.unreachable.get() {
- build::Br(*bcx, out.llbb, DebugLoc::None);
- reachable = true;
- }
- }
- if !reachable {
- build::Unreachable(out);
+ BlockS::new(llbb, self)
}
- return out;
}
pub fn monomorphize<T>(&self, value: &T) -> T
let tcx = ccx.tcx();
match tcx.lang_items.eh_personality() {
Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
- Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx).val
+ Callee::def(ccx, def_id, Substs::empty(tcx)).reify(ccx)
}
_ => {
if let Some(llpersonality) = ccx.eh_personality().get() {
let unwresume = ccx.eh_unwind_resume();
if let Some(llfn) = unwresume.get() {
- return Callee::ptr(datum::immediate_rvalue(llfn, ty));
+ return Callee::ptr(llfn, ty);
}
let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty);
attributes::unwind(llfn, true);
unwresume.set(Some(llfn));
- Callee::ptr(datum::immediate_rvalue(llfn, ty))
+ Callee::ptr(llfn, ty)
}
}
// kind of landing pad its in, otherwise this is none.
pub lpad: Cell<Option<&'blk LandingPad>>,
- // AST node-id associated with this block, if any. Used for
- // debugging purposes only.
- pub opt_node_id: Option<ast::NodeId>,
-
// The function context for the function to which this block is
// attached.
pub fcx: &'blk FunctionContext<'blk, 'tcx>,
impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
pub fn new(llbb: BasicBlockRef,
- opt_node_id: Option<ast::NodeId>,
fcx: &'blk FunctionContext<'blk, 'tcx>)
-> Block<'blk, 'tcx> {
fcx.block_arena.alloc(BlockS {
terminated: Cell::new(false),
unreachable: Cell::new(false),
lpad: Cell::new(None),
- opt_node_id: opt_node_id,
fcx: fcx
})
}
}
}
-pub fn C_floating(s: &str, t: Type) -> ValueRef {
- unsafe {
- let s = CString::new(s).unwrap();
- llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr())
- }
-}
-
pub fn C_floating_f64(f: f64, t: Type) -> ValueRef {
unsafe {
llvm::LLVMConstReal(t.to_ref(), f)
}
}
-pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> {
- bcx.fcx.monomorphize(&t)
-}
-
-pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> {
- let tcx = bcx.tcx();
- let t = tcx.node_id_to_type(id);
- monomorphize_type(bcx, t)
-}
-
-pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
- node_id_type(bcx, ex.id)
-}
-
-pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> {
- monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex))
-}
-
/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
/// guarantee to us that all nested obligations *could be* resolved if we wanted to.
use llvm;
use llvm::{SetUnnamedAddr};
-use llvm::{InternalLinkage, ValueRef, Bool, True};
-use middle::const_qualif::ConstQualif;
-use rustc_const_eval::{ConstEvalErr, lookup_const_fn_by_id, lookup_const_by_id, ErrKind};
-use rustc_const_eval::{eval_length, report_const_eval_err, note_const_eval_err};
-use rustc::hir::def::Def;
+use llvm::{InternalLinkage, ValueRef, True};
+use rustc_const_eval::ConstEvalErr;
use rustc::hir::def_id::DefId;
use rustc::hir::map as hir_map;
-use {abi, adt, closure, debuginfo, expr, machine};
+use {debuginfo, machine};
use base::{self, push_ctxt};
-use callee::Callee;
use trans_item::TransItem;
-use common::{type_is_sized, C_nil, const_get_elt};
-use common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty};
-use common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint};
-use common::{type_is_fat_ptr, Field, C_vector, C_array, C_null};
-use datum::{Datum, Lvalue};
+use common::{CrateContext, val_ty};
use declare;
-use monomorphize::{self, Instance};
+use monomorphize::{Instance};
use type_::Type;
use type_of;
-use value::Value;
-use Disr;
-use rustc::ty::subst::Substs;
-use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer};
-use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::cast::{CastTy,IntTy};
-use util::nodemap::NodeMap;
-use rustc_const_math::{ConstInt, ConstUsize, ConstIsize};
+use rustc::ty;
use rustc::hir;
use std::ffi::{CStr, CString};
-use libc::c_uint;
-use syntax::ast::{self, LitKind};
+use syntax::ast;
use syntax::attr::{self, AttrMetaMethods};
use syntax::parse::token;
-use syntax::ptr::P;
-use syntax_pos::Span;
-
-pub type FnArgMap<'a> = Option<&'a NodeMap<ValueRef>>;
-
-pub fn const_lit(cx: &CrateContext, e: &hir::Expr, lit: &ast::Lit)
- -> ValueRef {
- let _icx = push_ctxt("trans_lit");
- debug!("const_lit: {:?}", lit);
- match lit.node {
- LitKind::Byte(b) => C_integral(Type::uint_from_ty(cx, ast::UintTy::U8), b as u64, false),
- LitKind::Char(i) => C_integral(Type::char(cx), i as u64, false),
- LitKind::Int(i, ast::LitIntType::Signed(t)) => {
- C_integral(Type::int_from_ty(cx, t), i, true)
- }
- LitKind::Int(u, ast::LitIntType::Unsigned(t)) => {
- C_integral(Type::uint_from_ty(cx, t), u, false)
- }
- LitKind::Int(i, ast::LitIntType::Unsuffixed) => {
- let lit_int_ty = cx.tcx().node_id_to_type(e.id);
- match lit_int_ty.sty {
- ty::TyInt(t) => {
- C_integral(Type::int_from_ty(cx, t), i as u64, true)
- }
- ty::TyUint(t) => {
- C_integral(Type::uint_from_ty(cx, t), i as u64, false)
- }
- _ => span_bug!(lit.span,
- "integer literal has type {:?} (expected int \
- or usize)",
- lit_int_ty)
- }
- }
- LitKind::Float(ref fs, t) => {
- C_floating(&fs, Type::float_from_ty(cx, t))
- }
- LitKind::FloatUnsuffixed(ref fs) => {
- let lit_float_ty = cx.tcx().node_id_to_type(e.id);
- match lit_float_ty.sty {
- ty::TyFloat(t) => {
- C_floating(&fs, Type::float_from_ty(cx, t))
- }
- _ => {
- span_bug!(lit.span,
- "floating point literal doesn't have the right type");
- }
- }
- }
- LitKind::Bool(b) => C_bool(cx, b),
- LitKind::Str(ref s, _) => C_str_slice(cx, (*s).clone()),
- LitKind::ByteStr(ref data) => {
- addr_of(cx, C_bytes(cx, &data[..]), 1, "byte_str")
- }
- }
-}
pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef {
unsafe {
gv
}
-/// Deref a constant pointer
-pub fn load_const(cx: &CrateContext, v: ValueRef, t: Ty) -> ValueRef {
- let v = match cx.const_unsized().borrow().get(&v) {
- Some(&v) => v,
- None => v
- };
- let d = unsafe { llvm::LLVMGetInitializer(v) };
- if !d.is_null() && t.is_bool() {
- unsafe { llvm::LLVMConstTrunc(d, Type::i1(cx).to_ref()) }
- } else {
- d
- }
-}
-
-fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- v: ValueRef,
- ty: Ty<'tcx>)
- -> (ValueRef, Ty<'tcx>) {
- match ty.builtin_deref(true, ty::NoPreference) {
- Some(mt) => {
- if type_is_sized(cx.tcx(), mt.ty) {
- (load_const(cx, v, mt.ty), mt.ty)
- } else {
- // Derefing a fat pointer does not change the representation,
- // just the type to the unsized contents.
- (v, mt.ty)
- }
- }
- None => {
- bug!("unexpected dereferenceable type {:?}", ty)
- }
- }
-}
-
-fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- def_id: DefId,
- substs: &'tcx Substs<'tcx>,
- arg_vals: &[ValueRef],
- param_substs: &'tcx Substs<'tcx>,
- trueconst: TrueConst) -> Result<ValueRef, ConstEvalFailure> {
- let fn_like = lookup_const_fn_by_id(ccx.tcx(), def_id);
- let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call");
-
- let body = match fn_like.body().expr {
- Some(ref expr) => expr,
- None => return Ok(C_nil(ccx))
- };
-
- let args = &fn_like.decl().inputs;
- assert_eq!(args.len(), arg_vals.len());
-
- let arg_ids = args.iter().map(|arg| arg.pat.id);
- let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect();
-
- let substs = ccx.tcx().erase_regions(&substs);
- let substs = monomorphize::apply_param_substs(ccx.tcx(),
- param_substs,
- &substs);
-
- const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res)
-}
-
-pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- def_id: DefId,
- ref_expr: &hir::Expr,
- param_substs: &'tcx Substs<'tcx>)
- -> &'tcx hir::Expr {
- let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs;
- let substs = ccx.tcx().erase_regions(&substs);
- let substs = monomorphize::apply_param_substs(ccx.tcx(),
- param_substs,
- &substs);
- match lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) {
- Some((ref expr, _ty)) => expr,
- None => {
- span_bug!(ref_expr.span, "constant item not found")
- }
- }
-}
-
-pub enum ConstEvalFailure {
- /// in case the const evaluator failed on something that panic at runtime
- /// as defined in RFC 1229
- Runtime(ConstEvalErr),
- // in case we found a true constant
- Compiletime(ConstEvalErr),
-}
-
-impl ConstEvalFailure {
- fn into_inner(self) -> ConstEvalErr {
- match self {
- Runtime(e) => e,
- Compiletime(e) => e,
- }
- }
-
- pub fn as_inner(&self) -> &ConstEvalErr {
- match self {
- &Runtime(ref e) => e,
- &Compiletime(ref e) => e,
- }
- }
-}
-
-#[derive(Copy, Clone, Debug, Eq, PartialEq)]
-pub enum TrueConst {
- Yes, No
-}
-
-use self::ConstEvalFailure::*;
-
-fn get_const_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- def_id: DefId,
- ref_expr: &hir::Expr,
- param_substs: &'tcx Substs<'tcx>)
- -> Result<ValueRef, ConstEvalFailure> {
- let expr = get_const_expr(ccx, def_id, ref_expr, param_substs);
- let empty_substs = Substs::empty(ccx.tcx());
- match get_const_expr_as_global(ccx, expr, ConstQualif::empty(), empty_substs, TrueConst::Yes) {
- Err(Runtime(err)) => {
- report_const_eval_err(ccx.tcx(), &err, expr.span, "expression").emit();
- Err(Compiletime(err))
- },
- other => other,
- }
-}
-
-pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- expr: &hir::Expr,
- qualif: ConstQualif,
- param_substs: &'tcx Substs<'tcx>,
- trueconst: TrueConst)
- -> Result<ValueRef, ConstEvalFailure> {
- debug!("get_const_expr_as_global: {:?}", expr.id);
- // Special-case constants to cache a common global for all uses.
- if let hir::ExprPath(..) = expr.node {
- // `def` must be its own statement and cannot be in the `match`
- // otherwise the `def_map` will be borrowed for the entire match instead
- // of just to get the `def` value
- match ccx.tcx().expect_def(expr.id) {
- Def::Const(def_id) | Def::AssociatedConst(def_id) => {
- if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) {
- debug!("get_const_expr_as_global ({:?}): found const {:?}",
- expr.id, def_id);
- return get_const_val(ccx, def_id, expr, param_substs);
- }
- },
- _ => {},
- }
- }
-
- let key = (expr.id, param_substs);
- if let Some(&val) = ccx.const_values().borrow().get(&key) {
- return Ok(val);
- }
- let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs,
- &ccx.tcx().expr_ty(expr));
- let val = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) {
- // Avoid autorefs as they would create global instead of stack
- // references, even when only the latter are correct.
- const_expr_unadjusted(ccx, expr, ty, param_substs, None, trueconst)?
- } else {
- const_expr(ccx, expr, param_substs, None, trueconst)?.0
- };
-
- // boolean SSA values are i1, but they have to be stored in i8 slots,
- // otherwise some LLVM optimization passes don't work as expected
- let val = unsafe {
- if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() {
- llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref())
- } else {
- val
- }
- };
-
- let lvalue = addr_of(ccx, val, type_of::align_of(ccx, ty), "const");
- ccx.const_values().borrow_mut().insert(key, lvalue);
- Ok(lvalue)
-}
-
-pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- e: &hir::Expr,
- param_substs: &'tcx Substs<'tcx>,
- fn_args: FnArgMap,
- trueconst: TrueConst)
- -> Result<(ValueRef, Ty<'tcx>), ConstEvalFailure> {
- let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs,
- &cx.tcx().expr_ty(e));
- let llconst = const_expr_unadjusted(cx, e, ety, param_substs, fn_args, trueconst)?;
- let mut llconst = llconst;
- let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs,
- &cx.tcx().expr_ty_adjusted(e));
- let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned();
- match opt_adj {
- Some(AdjustNeverToAny(..)) => span_bug!(e.span, "const expression of type ! encountered"),
- Some(AdjustReifyFnPointer) => {
- match ety.sty {
- ty::TyFnDef(def_id, substs, _) => {
- llconst = Callee::def(cx, def_id, substs).reify(cx).val;
- }
- _ => {
- bug!("{} cannot be reified to a fn ptr", ety)
- }
- }
- }
- Some(AdjustUnsafeFnPointer) | Some(AdjustMutToConstPointer) => {
- // purely a type-level thing
- }
- Some(AdjustDerefRef(adj)) => {
- let mut ty = ety;
- // Save the last autoderef in case we can avoid it.
- if adj.autoderefs > 0 {
- for _ in 0..adj.autoderefs-1 {
- let (dv, dt) = const_deref(cx, llconst, ty);
- llconst = dv;
- ty = dt;
- }
- }
-
- if adj.autoref.is_some() {
- if adj.autoderefs == 0 {
- // Don't copy data to do a deref+ref
- // (i.e., skip the last auto-deref).
- llconst = addr_of(cx, llconst, type_of::align_of(cx, ty), "autoref");
- ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReErased), ty);
- }
- } else if adj.autoderefs > 0 {
- let (dv, dt) = const_deref(cx, llconst, ty);
- llconst = dv;
-
- // If we derefed a fat pointer then we will have an
- // open type here. So we need to update the type with
- // the one returned from const_deref.
- ety_adjusted = dt;
- }
-
- if let Some(target) = adj.unsize {
- let target = monomorphize::apply_param_substs(cx.tcx(),
- param_substs,
- &target);
-
- let pointee_ty = ty.builtin_deref(true, ty::NoPreference)
- .expect("consts: unsizing got non-pointer type").ty;
- let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) {
- // Normally, the source is a thin pointer and we are
- // adding extra info to make a fat pointer. The exception
- // is when we are upcasting an existing object fat pointer
- // to use a different vtable. In that case, we want to
- // load out the original data pointer so we can repackage
- // it.
- (const_get_elt(llconst, &[abi::FAT_PTR_ADDR as u32]),
- Some(const_get_elt(llconst, &[abi::FAT_PTR_EXTRA as u32])))
- } else {
- (llconst, None)
- };
-
- let unsized_ty = target.builtin_deref(true, ty::NoPreference)
- .expect("consts: unsizing got non-pointer target type").ty;
- let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to();
- let base = ptrcast(base, ptr_ty);
- let info = base::unsized_info(cx, pointee_ty, unsized_ty, old_info);
-
- if old_info.is_none() {
- let prev_const = cx.const_unsized().borrow_mut()
- .insert(base, llconst);
- assert!(prev_const.is_none() || prev_const == Some(llconst));
- }
- assert_eq!(abi::FAT_PTR_ADDR, 0);
- assert_eq!(abi::FAT_PTR_EXTRA, 1);
- llconst = C_struct(cx, &[base, info], false);
- }
- }
- None => {}
- };
-
- let llty = type_of::sizing_type_of(cx, ety_adjusted);
- let csize = machine::llsize_of_alloc(cx, val_ty(llconst));
- let tsize = machine::llsize_of_alloc(cx, llty);
- if csize != tsize {
- cx.sess().abort_if_errors();
- unsafe {
- // FIXME these values could use some context
- llvm::LLVMDumpValue(llconst);
- llvm::LLVMDumpValue(C_undef(llty));
- }
- bug!("const {:?} of type {:?} has size {} instead of {}",
- e, ety_adjusted,
- csize, tsize);
- }
- Ok((llconst, ety_adjusted))
-}
-
-fn check_unary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
- te: ValueRef, trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
- // The only kind of unary expression that we check for validity
- // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`).
- if let hir::ExprUnary(hir::UnNeg, ref inner_e) = e.node {
-
- // An unfortunate special case: we parse e.g. -128 as a
- // negation of the literal 128, which means if we're expecting
- // a i8 (or if it was already suffixed, e.g. `-128_i8`), then
- // 128 will have already overflowed to -128, and so then the
- // constant evaluator thinks we're trying to negate -128.
- //
- // Catch this up front by looking for ExprLit directly,
- // and just accepting it.
- if let hir::ExprLit(_) = inner_e.node { return Ok(()); }
- let cval = match to_const_int(te, t, cx.tcx()) {
- Some(v) => v,
- None => return Ok(()),
- };
- const_err(cx, e.span, (-cval).map_err(ErrKind::Math), trueconst)?;
- }
- Ok(())
-}
-
-pub fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
- match t.sty {
- ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type {
- ast::IntTy::I8 => {
- assert_eq!(input as i8 as i64, input);
- Some(ConstInt::I8(input as i8))
- },
- ast::IntTy::I16 => {
- assert_eq!(input as i16 as i64, input);
- Some(ConstInt::I16(input as i16))
- },
- ast::IntTy::I32 => {
- assert_eq!(input as i32 as i64, input);
- Some(ConstInt::I32(input as i32))
- },
- ast::IntTy::I64 => {
- Some(ConstInt::I64(input))
- },
- ast::IntTy::Is => {
- ConstIsize::new(input, tcx.sess.target.int_type)
- .ok().map(ConstInt::Isize)
- },
- }),
- ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type {
- ast::UintTy::U8 => {
- assert_eq!(input as u8 as u64, input);
- Some(ConstInt::U8(input as u8))
- },
- ast::UintTy::U16 => {
- assert_eq!(input as u16 as u64, input);
- Some(ConstInt::U16(input as u16))
- },
- ast::UintTy::U32 => {
- assert_eq!(input as u32 as u64, input);
- Some(ConstInt::U32(input as u32))
- },
- ast::UintTy::U64 => {
- Some(ConstInt::U64(input))
- },
- ast::UintTy::Us => {
- ConstUsize::new(input, tcx.sess.target.uint_type)
- .ok().map(ConstInt::Usize)
- },
- }),
- _ => None,
- }
-}
-
-pub fn const_err<T>(cx: &CrateContext,
- span: Span,
- result: Result<T, ErrKind>,
- trueconst: TrueConst)
- -> Result<T, ConstEvalFailure> {
- match (result, trueconst) {
- (Ok(x), _) => Ok(x),
- (Err(err), TrueConst::Yes) => {
- let err = ConstEvalErr{ span: span, kind: err };
- report_const_eval_err(cx.tcx(), &err, span, "expression").emit();
- Err(Compiletime(err))
- },
- (Err(err), TrueConst::No) => {
- let err = ConstEvalErr{ span: span, kind: err };
- let mut diag = cx.tcx().sess.struct_span_warn(
- span, "this expression will panic at run-time");
- note_const_eval_err(cx.tcx(), &err, span, "expression", &mut diag);
- diag.emit();
- Err(Runtime(err))
- },
- }
-}
-
-fn check_binary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty,
- te1: ValueRef, te2: ValueRef,
- trueconst: TrueConst) -> Result<(), ConstEvalFailure> {
- let b = if let hir::ExprBinary(b, _, _) = e.node { b } else { bug!() };
- let (lhs, rhs) = match (to_const_int(te1, t, cx.tcx()), to_const_int(te2, t, cx.tcx())) {
- (Some(v1), Some(v2)) => (v1, v2),
- _ => return Ok(()),
- };
- let result = match b.node {
- hir::BiAdd => lhs + rhs,
- hir::BiSub => lhs - rhs,
- hir::BiMul => lhs * rhs,
- hir::BiDiv => lhs / rhs,
- hir::BiRem => lhs % rhs,
- hir::BiShl => lhs << rhs,
- hir::BiShr => lhs >> rhs,
- _ => return Ok(()),
- };
- const_err(cx, e.span, result.map_err(ErrKind::Math), trueconst)?;
- Ok(())
-}
-
-fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- e: &hir::Expr,
- ety: Ty<'tcx>,
- param_substs: &'tcx Substs<'tcx>,
- fn_args: FnArgMap,
- trueconst: TrueConst)
- -> Result<ValueRef, ConstEvalFailure>
-{
- debug!("const_expr_unadjusted(e={:?}, ety={:?}, param_substs={:?})",
- e,
- ety,
- param_substs);
-
- let map_list = |exprs: &[P<hir::Expr>]| -> Result<Vec<ValueRef>, ConstEvalFailure> {
- exprs.iter()
- .map(|e| const_expr(cx, &e, param_substs, fn_args, trueconst).map(|(l, _)| l))
- .collect::<Vec<Result<ValueRef, ConstEvalFailure>>>()
- .into_iter()
- .collect()
- // this dance is necessary to eagerly run const_expr so all errors are reported
- };
- let _icx = push_ctxt("const_expr");
- Ok(match e.node {
- hir::ExprLit(ref lit) => const_lit(cx, e, &lit),
- hir::ExprBinary(b, ref e1, ref e2) => {
- /* Neither type is bottom, and we expect them to be unified
- * already, so the following is safe. */
- let (te1, ty) = const_expr(cx, &e1, param_substs, fn_args, trueconst)?;
- debug!("const_expr_unadjusted: te1={:?}, ty={:?}",
- Value(te1), ty);
- assert!(!ty.is_simd());
- let is_float = ty.is_fp();
- let signed = ty.is_signed();
-
- let (te2, ty2) = const_expr(cx, &e2, param_substs, fn_args, trueconst)?;
- debug!("const_expr_unadjusted: te2={:?}, ty={:?}",
- Value(te2), ty2);
-
- check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)?;
-
- unsafe { match b.node {
- hir::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2),
- hir::BiAdd => llvm::LLVMConstAdd(te1, te2),
-
- hir::BiSub if is_float => llvm::LLVMConstFSub(te1, te2),
- hir::BiSub => llvm::LLVMConstSub(te1, te2),
-
- hir::BiMul if is_float => llvm::LLVMConstFMul(te1, te2),
- hir::BiMul => llvm::LLVMConstMul(te1, te2),
-
- hir::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2),
- hir::BiDiv if signed => llvm::LLVMConstSDiv(te1, te2),
- hir::BiDiv => llvm::LLVMConstUDiv(te1, te2),
-
- hir::BiRem if is_float => llvm::LLVMConstFRem(te1, te2),
- hir::BiRem if signed => llvm::LLVMConstSRem(te1, te2),
- hir::BiRem => llvm::LLVMConstURem(te1, te2),
-
- hir::BiAnd => llvm::LLVMConstAnd(te1, te2),
- hir::BiOr => llvm::LLVMConstOr(te1, te2),
- hir::BiBitXor => llvm::LLVMConstXor(te1, te2),
- hir::BiBitAnd => llvm::LLVMConstAnd(te1, te2),
- hir::BiBitOr => llvm::LLVMConstOr(te1, te2),
- hir::BiShl => {
- let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
- llvm::LLVMConstShl(te1, te2)
- },
- hir::BiShr => {
- let te2 = base::cast_shift_const_rhs(b.node, te1, te2);
- if signed { llvm::LLVMConstAShr(te1, te2) }
- else { llvm::LLVMConstLShr(te1, te2) }
- },
- hir::BiEq | hir::BiNe | hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => {
- if is_float {
- let cmp = base::bin_op_to_fcmp_predicate(b.node);
- llvm::LLVMConstFCmp(cmp, te1, te2)
- } else {
- let cmp = base::bin_op_to_icmp_predicate(b.node, signed);
- llvm::LLVMConstICmp(cmp, te1, te2)
- }
- },
- } } // unsafe { match b.node {
- },
- hir::ExprUnary(u, ref inner_e) => {
- let (te, ty) = const_expr(cx, &inner_e, param_substs, fn_args, trueconst)?;
-
- check_unary_expr_validity(cx, e, ty, te, trueconst)?;
-
- let is_float = ty.is_fp();
- unsafe { match u {
- hir::UnDeref => const_deref(cx, te, ty).0,
- hir::UnNot => llvm::LLVMConstNot(te),
- hir::UnNeg if is_float => llvm::LLVMConstFNeg(te),
- hir::UnNeg => llvm::LLVMConstNeg(te),
- } }
- },
- hir::ExprField(ref base, field) => {
- let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- let brepr = adt::represent_type(cx, bt);
- let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
- let ix = vinfo.field_index(field.node);
- adt::const_get_field(&brepr, bv, vinfo.discr, ix)
- },
- hir::ExprTupField(ref base, idx) => {
- let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- let brepr = adt::represent_type(cx, bt);
- let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None);
- adt::const_get_field(&brepr, bv, vinfo.discr, idx.node)
- },
- hir::ExprIndex(ref base, ref index) => {
- let (bv, bt) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- let iv = const_expr(cx, &index, param_substs, fn_args, TrueConst::Yes)?.0;
- let iv = if let Some(iv) = const_to_opt_uint(iv) {
- iv
- } else {
- span_bug!(index.span, "index is not an integer-constant expression");
- };
- let (arr, len) = match bt.sty {
- ty::TyArray(_, u) => (bv, C_uint(cx, u)),
- ty::TySlice(..) | ty::TyStr => {
- let e1 = const_get_elt(bv, &[0]);
- (load_const(cx, e1, bt), const_get_elt(bv, &[1]))
- },
- ty::TyRef(_, mt) => match mt.ty.sty {
- ty::TyArray(_, u) => {
- (load_const(cx, bv, mt.ty), C_uint(cx, u))
- },
- _ => span_bug!(base.span,
- "index-expr base must be a vector \
- or string type, found {:?}",
- bt),
- },
- _ => span_bug!(base.span,
- "index-expr base must be a vector \
- or string type, found {:?}",
- bt),
- };
-
- let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 };
- let len = match bt.sty {
- ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty {
- ty::TyStr => {
- assert!(len > 0);
- len - 1
- },
- _ => len,
- },
- _ => len,
- };
- if iv >= len {
- // FIXME #3170: report this earlier on in the const-eval
- // pass. Reporting here is a bit late.
- const_err(cx, e.span, Err(ErrKind::IndexOutOfBounds {
- len: len,
- index: iv
- }), trueconst)?;
- C_undef(val_ty(arr).element_type())
- } else {
- const_get_elt(arr, &[iv as c_uint])
- }
- },
- hir::ExprCast(ref base, _) => {
- let t_cast = ety;
- let llty = type_of::type_of(cx, t_cast);
- let (v, t_expr) = const_expr(cx, &base, param_substs, fn_args, trueconst)?;
- debug!("trans_const_cast({:?} as {:?})", t_expr, t_cast);
- if expr::cast_is_noop(cx.tcx(), base, t_expr, t_cast) {
- return Ok(v);
- }
- if type_is_fat_ptr(cx.tcx(), t_expr) {
- // Fat pointer casts.
- let t_cast_inner =
- t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty;
- let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to();
- let addr = ptrcast(const_get_elt(v, &[abi::FAT_PTR_ADDR as u32]),
- ptr_ty);
- if type_is_fat_ptr(cx.tcx(), t_cast) {
- let info = const_get_elt(v, &[abi::FAT_PTR_EXTRA as u32]);
- return Ok(C_struct(cx, &[addr, info], false))
- } else {
- return Ok(addr);
- }
- }
- unsafe { match (
- CastTy::from_ty(t_expr).expect("bad input type for cast"),
- CastTy::from_ty(t_cast).expect("bad output type for cast"),
- ) {
- (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => {
- let repr = adt::represent_type(cx, t_expr);
- let discr = adt::const_get_discrim(&repr, v);
- let iv = C_integral(cx.int_type(), discr.0, false);
- let s = adt::is_discr_signed(&repr) as Bool;
- llvm::LLVMConstIntCast(iv, llty.to_ref(), s)
- },
- (CastTy::Int(_), CastTy::Int(_)) => {
- let s = t_expr.is_signed() as Bool;
- llvm::LLVMConstIntCast(v, llty.to_ref(), s)
- },
- (CastTy::Int(_), CastTy::Float) => {
- if t_expr.is_signed() {
- llvm::LLVMConstSIToFP(v, llty.to_ref())
- } else {
- llvm::LLVMConstUIToFP(v, llty.to_ref())
- }
- },
- (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()),
- (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()),
- (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()),
- (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_))
- | (CastTy::RPtr(_), CastTy::Ptr(_)) => {
- ptrcast(v, llty)
- },
- (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion?
- (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()),
- (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => {
- llvm::LLVMConstPtrToInt(v, llty.to_ref())
- },
- _ => {
- span_bug!(e.span, "bad combination of types for cast")
- },
- } } // unsafe { match ( ... ) {
- },
- hir::ExprAddrOf(hir::MutImmutable, ref sub) => {
- // If this is the address of some static, then we need to return
- // the actual address of the static itself (short circuit the rest
- // of const eval).
- let mut cur = sub;
- loop {
- match cur.node {
- hir::ExprBlock(ref blk) => {
- if let Some(ref sub) = blk.expr {
- cur = sub;
- } else {
- break;
- }
- },
- _ => break,
- }
- }
- if let Some(Def::Static(def_id, _)) = cx.tcx().expect_def_or_none(cur.id) {
- get_static(cx, def_id).val
- } else {
- // If this isn't the address of a static, then keep going through
- // normal constant evaluation.
- let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?;
- addr_of(cx, v, type_of::align_of(cx, ty), "ref")
- }
- },
- hir::ExprAddrOf(hir::MutMutable, ref sub) => {
- let (v, ty) = const_expr(cx, &sub, param_substs, fn_args, trueconst)?;
- addr_of_mut(cx, v, type_of::align_of(cx, ty), "ref_mut_slice")
- },
- hir::ExprTup(ref es) => {
- let repr = adt::represent_type(cx, ety);
- let vals = map_list(&es[..])?;
- adt::trans_const(cx, &repr, Disr(0), &vals[..])
- },
- hir::ExprStruct(_, ref fs, ref base_opt) => {
- let repr = adt::represent_type(cx, ety);
-
- let base_val = match *base_opt {
- Some(ref base) => Some(const_expr(
- cx,
- &base,
- param_substs,
- fn_args,
- trueconst,
- )?),
- None => None
- };
-
- let VariantInfo { discr, fields } = VariantInfo::of_node(cx.tcx(), ety, e.id);
- let cs = fields.iter().enumerate().map(|(ix, &Field(f_name, _))| {
- match (fs.iter().find(|f| f_name == f.name.node), base_val) {
- (Some(ref f), _) => {
- const_expr(cx, &f.expr, param_substs, fn_args, trueconst).map(|(l, _)| l)
- },
- (_, Some((bv, _))) => Ok(adt::const_get_field(&repr, bv, discr, ix)),
- (_, None) => span_bug!(e.span, "missing struct field"),
- }
- })
- .collect::<Vec<Result<_, ConstEvalFailure>>>()
- .into_iter()
- .collect::<Result<Vec<_>,ConstEvalFailure>>();
- let cs = cs?;
- if ety.is_simd() {
- C_vector(&cs[..])
- } else {
- adt::trans_const(cx, &repr, discr, &cs[..])
- }
- },
- hir::ExprVec(ref es) => {
- let unit_ty = ety.sequence_element_type(cx.tcx());
- let llunitty = type_of::type_of(cx, unit_ty);
- let vs = es.iter()
- .map(|e| const_expr(
- cx,
- &e,
- param_substs,
- fn_args,
- trueconst,
- ).map(|(l, _)| l))
- .collect::<Vec<Result<_, ConstEvalFailure>>>()
- .into_iter()
- .collect::<Result<Vec<_>, ConstEvalFailure>>();
- let vs = vs?;
- // If the vector contains enums, an LLVM array won't work.
- if vs.iter().any(|vi| val_ty(*vi) != llunitty) {
- C_struct(cx, &vs[..], false)
- } else {
- C_array(llunitty, &vs[..])
- }
- },
- hir::ExprRepeat(ref elem, ref count) => {
- let unit_ty = ety.sequence_element_type(cx.tcx());
- let llunitty = type_of::type_of(cx, unit_ty);
- let n = eval_length(cx.tcx(), count, "repeat count").unwrap();
- let unit_val = const_expr(cx, &elem, param_substs, fn_args, trueconst)?.0;
- let vs = vec![unit_val; n];
- if val_ty(unit_val) != llunitty {
- C_struct(cx, &vs[..], false)
- } else {
- C_array(llunitty, &vs[..])
- }
- },
- hir::ExprPath(..) => {
- match cx.tcx().expect_def(e.id) {
- Def::Local(_, id) => {
- if let Some(val) = fn_args.and_then(|args| args.get(&id).cloned()) {
- val
- } else {
- span_bug!(e.span, "const fn argument not found")
- }
- }
- Def::Fn(..) | Def::Method(..) => C_nil(cx),
- Def::Const(def_id) | Def::AssociatedConst(def_id) => {
- load_const(cx, get_const_val(cx, def_id, e, param_substs)?,
- ety)
- }
- Def::Variant(enum_did, variant_did) => {
- let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
- match vinfo.kind {
- ty::VariantKind::Unit => {
- let repr = adt::represent_type(cx, ety);
- adt::trans_const(cx, &repr, Disr::from(vinfo.disr_val), &[])
- }
- ty::VariantKind::Tuple => C_nil(cx),
- ty::VariantKind::Struct => {
- span_bug!(e.span, "path-expr refers to a dict variant!")
- }
- }
- }
- // Unit struct or ctor.
- Def::Struct(..) => C_null(type_of::type_of(cx, ety)),
- _ => {
- span_bug!(e.span, "expected a const, fn, struct, \
- or variant def")
- }
- }
- },
- hir::ExprCall(ref callee, ref args) => {
- let mut callee = &**callee;
- loop {
- callee = match callee.node {
- hir::ExprBlock(ref block) => match block.expr {
- Some(ref tail) => &tail,
- None => break,
- },
- _ => break,
- };
- }
- let arg_vals = map_list(args)?;
- match cx.tcx().expect_def(callee.id) {
- Def::Fn(did) | Def::Method(did) => {
- const_fn_call(
- cx,
- did,
- cx.tcx().node_id_item_substs(callee.id).substs,
- &arg_vals,
- param_substs,
- trueconst,
- )?
- }
- Def::Struct(..) => {
- if ety.is_simd() {
- C_vector(&arg_vals[..])
- } else {
- let repr = adt::represent_type(cx, ety);
- adt::trans_const(cx, &repr, Disr(0), &arg_vals[..])
- }
- }
- Def::Variant(enum_did, variant_did) => {
- let repr = adt::represent_type(cx, ety);
- let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did);
- adt::trans_const(cx,
- &repr,
- Disr::from(vinfo.disr_val),
- &arg_vals[..])
- }
- _ => span_bug!(e.span, "expected a struct, variant, or const fn def"),
- }
- },
- hir::ExprMethodCall(_, _, ref args) => {
- let arg_vals = map_list(args)?;
- let method_call = ty::MethodCall::expr(e.id);
- let method = cx.tcx().tables.borrow().method_map[&method_call];
- const_fn_call(cx, method.def_id, method.substs,
- &arg_vals, param_substs, trueconst)?
- },
- hir::ExprType(ref e, _) => const_expr(cx, &e, param_substs, fn_args, trueconst)?.0,
- hir::ExprBlock(ref block) => {
- match block.expr {
- Some(ref expr) => const_expr(
- cx,
- &expr,
- param_substs,
- fn_args,
- trueconst,
- )?.0,
- None => C_nil(cx),
- }
- },
- hir::ExprClosure(_, ref decl, ref body, _) => {
- match ety.sty {
- ty::TyClosure(def_id, substs) => {
- closure::trans_closure_expr(closure::Dest::Ignore(cx),
- decl,
- body,
- e.id,
- def_id,
- substs);
- }
- _ =>
- span_bug!(
- e.span,
- "bad type for closure expr: {:?}", ety)
- }
- C_null(type_of::type_of(cx, ety))
- },
- _ => span_bug!(e.span,
- "bad constant expression type in consts::const_expr"),
- })
-}
-
-pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId)
- -> Datum<'tcx, Lvalue> {
- let ty = ccx.tcx().lookup_item_type(def_id).ty;
-
+pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
let instance = Instance::mono(ccx.shared(), def_id);
if let Some(&g) = ccx.instances().borrow().get(&instance) {
- return Datum::new(g, ty, Lvalue::new("static"));
+ return g;
}
+ let ty = ccx.tcx().lookup_item_type(def_id).ty;
let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) {
let llty = type_of::type_of(ccx, ty);
ccx.instances().borrow_mut().insert(instance, g);
ccx.statics().borrow_mut().insert(g, def_id);
- Datum::new(g, ty, Lvalue::new("static"))
+ g
}
pub fn trans_static(ccx: &CrateContext,
m: hir::Mutability,
- expr: &hir::Expr,
id: ast::NodeId,
attrs: &[ast::Attribute])
-> Result<ValueRef, ConstEvalErr> {
unsafe {
let _icx = push_ctxt("trans_static");
let def_id = ccx.tcx().map.local_def_id(id);
- let datum = get_static(ccx, def_id);
+ let g = get_static(ccx, def_id);
- let use_mir = true;
-
- let v = if use_mir {
- ::mir::trans_static_initializer(ccx, def_id)
- } else {
- let empty_substs = Substs::empty(ccx.tcx());
- const_expr(ccx, expr, empty_substs, None, TrueConst::Yes)
- .map(|(v, _)| v)
- }.map_err(|e| e.into_inner())?;
+ let v = ::mir::trans_static_initializer(ccx, def_id)?;
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
v
};
- let llty = type_of::type_of(ccx, datum.ty);
+ let ty = ccx.tcx().lookup_item_type(def_id).ty;
+ let llty = type_of::type_of(ccx, ty);
let g = if val_llty == llty {
- datum.val
+ g
} else {
// If we created the global with the wrong type,
// correct the type.
let empty_string = CString::new("").unwrap();
- let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(datum.val));
+ let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g));
let name_string = CString::new(name_str_ref.to_bytes()).unwrap();
- llvm::LLVMSetValueName(datum.val, empty_string.as_ptr());
+ llvm::LLVMSetValueName(g, empty_string.as_ptr());
let new_g = llvm::LLVMRustGetOrInsertGlobal(
ccx.llmod(), name_string.as_ptr(), val_llty.to_ref());
// To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it
// with the new global later. (See base::trans_crate.)
- ccx.statics_to_rauw().borrow_mut().push((datum.val, new_g));
+ ccx.statics_to_rauw().borrow_mut().push((g, new_g));
new_g
};
- llvm::LLVMSetAlignment(g, type_of::align_of(ccx, datum.ty));
+ llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty));
llvm::LLVMSetInitializer(g, v);
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if m != hir::MutMutable {
- let tcontents = datum.ty.type_contents(ccx.tcx());
+ let tcontents = ty.type_contents(ccx.tcx());
if !tcontents.interior_unsafe() {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
tcx: TyCtxt<'a, 'tcx, 'tcx>,
stats: Stats,
check_overflow: bool,
- check_drop_flag_for_sanity: bool,
mir_map: &'a MirMap<'tcx>,
mir_cache: RefCell<DepTrackingMap<MirCache<'tcx>>>,
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet,
- check_overflow: bool,
- check_drop_flag_for_sanity: bool)
+ check_overflow: bool)
-> SharedCrateContext<'b, 'tcx> {
let (metadata_llcx, metadata_llmod) = unsafe {
create_context_and_module(&tcx.sess, "metadata")
fn_stats: RefCell::new(Vec::new()),
},
check_overflow: check_overflow,
- check_drop_flag_for_sanity: check_drop_flag_for_sanity,
use_dll_storage_attrs: use_dll_storage_attrs,
translation_items: RefCell::new(FnvHashSet()),
trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())),
self.shared.check_overflow
}
- pub fn check_drop_flag_for_sanity(&self) -> bool {
- // This controls whether we emit a conditional llvm.debugtrap
- // guarded on whether the dropflag is one of its (two) valid
- // values.
- self.shared.check_drop_flag_for_sanity
- }
-
pub fn use_dll_storage_attrs(&self) -> bool {
self.shared.use_dll_storage_attrs()
}
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use llvm::ValueRef;
-use rustc::hir::def::Def;
-use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem};
-use rustc::ty::subst::Substs;
-use base::*;
-use basic_block::BasicBlock;
-use build::*;
-use callee::{Callee, ArgVals};
-use cleanup::CleanupMethods;
-use cleanup;
-use common::*;
-use consts;
-use debuginfo;
-use debuginfo::{DebugLoc, ToDebugLoc};
-use expr;
-use machine;
-
-use rustc::hir;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-use syntax::parse::token;
-
-pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
- s: &hir::Stmt)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_stmt");
- let fcx = cx.fcx;
- debug!("trans_stmt({:?})", s);
-
- if cx.unreachable.get() {
- return cx;
- }
-
- if cx.sess().asm_comments() {
- add_span_comment(cx, s.span, &format!("{:?}", s));
- }
-
- let mut bcx = cx;
-
- let id = s.node.id();
- let cleanup_debug_loc =
- debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
- match s.node {
- hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => {
- bcx = trans_stmt_semi(bcx, &e);
- }
- hir::StmtDecl(ref d, _) => {
- match d.node {
- hir::DeclLocal(ref local) => {
- bcx = init_local(bcx, &local);
- debuginfo::create_local_var_metadata(bcx, &local);
- }
- // Inner items are visited by `trans_item`/`trans_meth`.
- hir::DeclItem(_) => {},
- }
- }
- }
-
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, s.node.id());
-
- return bcx;
-}
-
-pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_stmt_semi");
-
- if cx.unreachable.get() {
- return cx;
- }
-
- let ty = expr_ty(cx, e);
- if cx.fcx.type_needs_drop(ty) {
- expr::trans_to_lvalue(cx, e, "stmt").bcx
- } else {
- expr::trans_into(cx, e, expr::Ignore)
- }
-}
-
-pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- b: &hir::Block,
- mut dest: expr::Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_block");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
- let mut bcx = bcx;
-
- let cleanup_debug_loc =
- debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
- for s in &b.stmts {
- bcx = trans_stmt(bcx, s);
- }
-
- if dest != expr::Ignore {
- let block_ty = node_id_type(bcx, b.id);
-
- if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) {
- dest = expr::Ignore;
- } else if b.expr.is_some() {
- // If the block has an expression, but that expression isn't reachable,
- // don't save into the destination given, ignore it.
- if let Some(ref cfg) = bcx.fcx.cfg {
- if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) {
- dest = expr::Ignore;
- }
- }
- }
- }
-
- match b.expr {
- Some(ref e) => {
- if !bcx.unreachable.get() {
- bcx = expr::trans_into(bcx, &e, dest);
- }
- }
- None => {
- assert!(dest == expr::Ignore || bcx.unreachable.get());
- }
- }
-
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id);
-
- return bcx;
-}
-
-pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- if_id: ast::NodeId,
- cond: &hir::Expr,
- thn: &hir::Block,
- els: Option<&hir::Expr>,
- dest: expr::Dest)
- -> Block<'blk, 'tcx> {
- debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})",
- bcx.to_str(), if_id, cond, thn.id, dest);
- let _icx = push_ctxt("trans_if");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let mut bcx = bcx;
-
- let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool());
-
- // Drop branches that are known to be impossible
- if let Some(cv) = const_to_opt_uint(cond_val) {
- if cv == 1 {
- // if true { .. } [else { .. }]
- bcx = trans_block(bcx, &thn, dest);
- DebugLoc::None.apply(bcx.fcx);
- } else {
- if let Some(elexpr) = els {
- bcx = expr::trans_into(bcx, &elexpr, dest);
- DebugLoc::None.apply(bcx.fcx);
- }
- }
-
- return bcx;
- }
-
- let name = format!("then-block-{}-", thn.id);
- let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id);
- let then_bcx_out = trans_block(then_bcx_in, &thn, dest);
- DebugLoc::None.apply(bcx.fcx);
-
- let cond_source_loc = cond.debug_loc();
-
- let next_bcx;
- match els {
- Some(elexpr) => {
- let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id);
- let else_bcx_out = expr::trans_into(else_bcx_in, &elexpr, dest);
- next_bcx = bcx.fcx.join_blocks(if_id,
- &[then_bcx_out, else_bcx_out]);
- CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc);
- }
-
- None => {
- next_bcx = bcx.fcx.new_id_block("next-block", if_id);
- Br(then_bcx_out, next_bcx.llbb, DebugLoc::None);
- CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc);
- }
- }
-
- // Clear the source location because it is still set to whatever has been translated
- // right before.
- DebugLoc::None.apply(next_bcx.fcx);
-
- next_bcx
-}
-
-pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- loop_expr: &hir::Expr,
- cond: &hir::Expr,
- body: &hir::Block)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_while");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
-
- // bcx
- // |
- // cond_bcx_in <--------+
- // | |
- // cond_bcx_out |
- // | | |
- // | body_bcx_in |
- // cleanup_blk | |
- // | body_bcx_out --+
- // next_bcx_in
-
- let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id);
- let cond_bcx_in = fcx.new_id_block("while_cond", cond.id);
- let body_bcx_in = fcx.new_id_block("while_body", body.id);
-
- fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]);
-
- Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc());
-
- // compile the block where we will handle loop cleanups
- let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK);
-
- // compile the condition
- let Result {bcx: cond_bcx_out, val: cond_val} =
- expr::trans(cond_bcx_in, cond).to_llbool();
-
- CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc());
-
- // loop body:
- let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
- Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None);
-
- fcx.pop_loop_cleanup_scope(loop_expr.id);
- return next_bcx_in;
-}
-
-pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- loop_expr: &hir::Expr,
- body: &hir::Block)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_loop");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
-
- // bcx
- // |
- // body_bcx_in
- // |
- // body_bcx_out
- //
- // next_bcx
- //
- // Links between body_bcx_in and next_bcx are created by
- // break statements.
-
- let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id);
- let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id);
-
- fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]);
-
- Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc());
- let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
- Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None);
-
- fcx.pop_loop_cleanup_scope(loop_expr.id);
-
- // If there are no predecessors for the next block, we just translated an endless loop and the
- // next block is unreachable
- if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() {
- Unreachable(next_bcx_in);
- }
-
- return next_bcx_in;
-}
-
-pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- opt_label: Option<ast::Name>,
- exit: usize)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_break_cont");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
-
- // Locate loop that we will break to
- let loop_id = match opt_label {
- None => fcx.top_loop_scope(),
- Some(_) => {
- match bcx.tcx().expect_def(expr.id) {
- Def::Label(loop_id) => loop_id,
- r => {
- bug!("{:?} in def-map for label", r)
- }
- }
- }
- };
-
- // Generate appropriate cleanup code and branch
- let cleanup_llbb = fcx.normal_exit_block(loop_id, exit);
- Br(bcx, cleanup_llbb, expr.debug_loc());
- Unreachable(bcx); // anything afterwards should be ignored
- return bcx;
-}
-
-pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- label_opt: Option<ast::Name>)
- -> Block<'blk, 'tcx> {
- return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK);
-}
-
-pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- label_opt: Option<ast::Name>)
- -> Block<'blk, 'tcx> {
- return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP);
-}
-
-pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- return_expr: &hir::Expr,
- retval_expr: Option<&hir::Expr>)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_ret");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
- let mut bcx = bcx;
- if let Some(x) = retval_expr {
- let dest = if fcx.llretslotptr.get().is_some() {
- expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
- } else {
- expr::Ignore
- };
- bcx = expr::trans_into(bcx, &x, dest);
- match dest {
- expr::SaveIn(slot) if fcx.needs_ret_allocas => {
- Store(bcx, slot, fcx.llretslotptr.get().unwrap());
- }
- _ => {}
- }
- }
- let cleanup_llbb = fcx.return_exit_block();
- Br(bcx, cleanup_llbb, return_expr.debug_loc());
- Unreachable(bcx);
- return bcx;
-}
-
-pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- call_info: NodeIdAndSpan,
- fail_str: InternedString)
- -> Block<'blk, 'tcx> {
- let ccx = bcx.ccx();
- let _icx = push_ctxt("trans_fail_value");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let v_str = C_str_slice(ccx, fail_str);
- let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
- let filename = token::intern_and_get_ident(&loc.file.name);
- let filename = C_str_slice(ccx, filename);
- let line = C_u32(ccx, loc.line as u32);
- let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false);
- let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const));
- let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc");
- let args = vec!(expr_file_line);
- let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicFnLangItem);
- Callee::def(ccx, did, Substs::empty(ccx.tcx()))
- .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
-}
-
-pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- call_info: NodeIdAndSpan,
- index: ValueRef,
- len: ValueRef)
- -> Block<'blk, 'tcx> {
- let ccx = bcx.ccx();
- let _icx = push_ctxt("trans_fail_bounds_check");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- // Extract the file/line from the span
- let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo);
- let filename = token::intern_and_get_ident(&loc.file.name);
-
- // Invoke the lang item
- let filename = C_str_slice(ccx, filename);
- let line = C_u32(ccx, loc.line as u32);
- let file_line_const = C_struct(ccx, &[filename, line], false);
- let align = machine::llalign_of_min(ccx, val_ty(file_line_const));
- let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc");
- let args = vec!(file_line, index, len);
- let did = langcall(bcx.tcx(), Some(call_info.span), "", PanicBoundsCheckFnLangItem);
- Callee::def(ccx, did, Substs::empty(ccx.tcx()))
- .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
-}
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! ## The Datum module
-//!
-//! A `Datum` encapsulates the result of evaluating a Rust expression. It
-//! contains a `ValueRef` indicating the result, a `Ty` describing
-//! the Rust type, but also a *kind*. The kind indicates whether the datum
-//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of
-//! rvalues -- whether or not the value is "by ref" or "by value".
-//!
-//! The datum API is designed to try and help you avoid memory errors like
-//! forgetting to arrange cleanup or duplicating a value. The type of the
-//! datum incorporates the kind, and thus reflects whether it has cleanup
-//! scheduled:
-//!
-//! - `Datum<Lvalue>` -- by ref, cleanup scheduled
-//! - `Datum<Rvalue>` -- by value or by ref, no cleanup scheduled
-//! - `Datum<Expr>` -- either `Datum<Lvalue>` or `Datum<Rvalue>`
-//!
-//! Rvalue and expr datums are noncopyable, and most of the methods on
-//! datums consume the datum itself (with some notable exceptions). This
-//! reflects the fact that datums may represent affine values which ought
-//! to be consumed exactly once, and if you were to try to (for example)
-//! store an affine value multiple times, you would be duplicating it,
-//! which would certainly be a bug.
-//!
-//! Some of the datum methods, however, are designed to work only on
-//! copyable values such as ints or pointers. Those methods may borrow the
-//! datum (`&self`) rather than consume it, but they always include
-//! assertions on the type of the value represented to check that this
-//! makes sense. An example is `shallow_copy()`, which duplicates
-//! a datum value.
-//!
-//! Translating an expression always yields a `Datum<Expr>` result, but
-//! the methods `to_[lr]value_datum()` can be used to coerce a
-//! `Datum<Expr>` into a `Datum<Lvalue>` or `Datum<Rvalue>` as
-//! needed. Coercing to an lvalue is fairly common, and generally occurs
-//! whenever it is necessary to inspect a value and pull out its
-//! subcomponents (for example, a match, or indexing expression). Coercing
-//! to an rvalue is more unusual; it occurs when moving values from place
-//! to place, such as in an assignment expression or parameter passing.
-//!
-//! ### Lvalues in detail
-//!
-//! An lvalue datum is one for which cleanup has been scheduled. Lvalue
-//! datums are always located in memory, and thus the `ValueRef` for an
-//! LLVM value is always a pointer to the actual Rust value. This means
-//! that if the Datum has a Rust type of `int`, then the LLVM type of the
-//! `ValueRef` will be `int*` (pointer to int).
-//!
-//! Because lvalues already have cleanups scheduled, the memory must be
-//! zeroed to prevent the cleanup from taking place (presuming that the
-//! Rust type needs drop in the first place, otherwise it doesn't
-//! matter). The Datum code automatically performs this zeroing when the
-//! value is stored to a new location, for example.
-//!
-//! Lvalues usually result from evaluating lvalue expressions. For
-//! example, evaluating a local variable `x` yields an lvalue, as does a
-//! reference to a field like `x.f` or an index `x[i]`.
-//!
-//! Lvalue datums can also arise by *converting* an rvalue into an lvalue.
-//! This is done with the `to_lvalue_datum` method defined on
-//! `Datum<Expr>`. Basically this method just schedules cleanup if the
-//! datum is an rvalue, possibly storing the value into a stack slot first
-//! if needed. Converting rvalues into lvalues occurs in constructs like
-//! `&foo()` or `match foo() { ref x => ... }`, where the user is
-//! implicitly requesting a temporary.
-//!
-//! ### Rvalues in detail
-//!
-//! Rvalues datums are values with no cleanup scheduled. One must be
-//! careful with rvalue datums to ensure that cleanup is properly
-//! arranged, usually by converting to an lvalue datum or by invoking the
-//! `add_clean` method.
-//!
-//! ### Scratch datums
-//!
-//! Sometimes you need some temporary scratch space. The functions
-//! `[lr]value_scratch_datum()` can be used to get temporary stack
-//! space. As their name suggests, they yield lvalues and rvalues
-//! respectively. That is, the slot from `lvalue_scratch_datum` will have
-//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not.
-
-pub use self::Expr::*;
-pub use self::RvalueMode::*;
-
-use llvm::ValueRef;
-use adt;
-use base::*;
-use build::{Load, Store};
-use common::*;
-use cleanup;
-use cleanup::{CleanupMethods, DropHintDatum, DropHintMethods};
-use expr;
-use tvec;
-use value::Value;
-use rustc::ty::Ty;
-
-use std::fmt;
-use syntax::ast;
-use syntax_pos::DUMMY_SP;
-
-/// A `Datum` encapsulates the result of evaluating an expression. It
-/// describes where the value is stored, what Rust type the value has,
-/// whether it is addressed by reference, and so forth. Please refer
-/// the section on datums in `README.md` for more details.
-#[derive(Clone, Copy)]
-pub struct Datum<'tcx, K> {
- /// The llvm value. This is either a pointer to the Rust value or
- /// the value itself, depending on `kind` below.
- pub val: ValueRef,
-
- /// The rust type of the value.
- pub ty: Ty<'tcx>,
-
- /// Indicates whether this is by-ref or by-value.
- pub kind: K,
-}
-
-impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "Datum({:?}, {:?}, {:?})",
- Value(self.val), self.ty, self.kind)
- }
-}
-
-pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
- pub bcx: Block<'blk, 'tcx>,
- pub datum: Datum<'tcx, K>,
-}
-
-#[derive(Debug)]
-pub enum Expr {
- /// a fresh value that was produced and which has no cleanup yet
- /// because it has not yet "landed" into its permanent home
- RvalueExpr(Rvalue),
-
- /// `val` is a pointer into memory for which a cleanup is scheduled
- /// (and thus has type *T). If you move out of an Lvalue, you must
- /// zero out the memory (FIXME #5016).
- LvalueExpr(Lvalue),
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum DropFlagInfo {
- DontZeroJustUse(ast::NodeId),
- ZeroAndMaintain(ast::NodeId),
- None,
-}
-
-impl DropFlagInfo {
- pub fn must_zero(&self) -> bool {
- match *self {
- DropFlagInfo::DontZeroJustUse(..) => false,
- DropFlagInfo::ZeroAndMaintain(..) => true,
- DropFlagInfo::None => true,
- }
- }
-
- pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
- -> Option<DropHintDatum<'tcx>> {
- let id = match *self {
- DropFlagInfo::None => return None,
- DropFlagInfo::DontZeroJustUse(id) |
- DropFlagInfo::ZeroAndMaintain(id) => id,
- };
-
- let hints = bcx.fcx.lldropflag_hints.borrow();
- let retval = hints.hint_datum(id);
- assert!(retval.is_some(), "An id (={}) means must have a hint", id);
- retval
- }
-}
-
-// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients
-// may not realize that subparts of an Lvalue can have a subset of
-// drop-flags associated with them, while this as written will just
-// memcpy the drop_flag_info. But, it is an easier way to get `_match`
-// off the ground to just let this be `Copy` for now.
-#[derive(Copy, Clone, Debug)]
-pub struct Lvalue {
- pub source: &'static str,
- pub drop_flag_info: DropFlagInfo
-}
-
-#[derive(Debug)]
-pub struct Rvalue {
- pub mode: RvalueMode
-}
-
-/// Classifies what action we should take when a value is moved away
-/// with respect to its drop-flag.
-///
-/// Long term there will be no need for this classification: all flags
-/// (which will be stored on the stack frame) will have the same
-/// interpretation and maintenance code associated with them.
-#[derive(Copy, Clone, Debug)]
-pub enum HintKind {
- /// When the value is moved, set the drop-flag to "dropped"
- /// (i.e. "zero the flag", even when the specific representation
- /// is not literally 0) and when it is reinitialized, set the
- /// drop-flag back to "initialized".
- ZeroAndMaintain,
-
- /// When the value is moved, do not set the drop-flag to "dropped"
- /// However, continue to read the drop-flag in deciding whether to
- /// drop. (In essence, the path/fragment in question will never
- /// need to be dropped at the points where it is moved away by
- /// this code, but we are defending against the scenario where
- /// some *other* code could move away (or drop) the value and thus
- /// zero-the-flag, which is why we will still read from it.
- DontZeroJustUse,
-}
-
-impl Lvalue { // Constructors for various Lvalues.
- pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue {
- debug!("Lvalue at {} no drop flag info", source);
- Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
- }
-
- pub fn new_dropflag_hint(source: &'static str) -> Lvalue {
- debug!("Lvalue at {} is drop flag hint", source);
- Lvalue { source: source, drop_flag_info: DropFlagInfo::None }
- }
-
- pub fn new_with_hint<'blk, 'tcx>(source: &'static str,
- bcx: Block<'blk, 'tcx>,
- id: ast::NodeId,
- k: HintKind) -> Lvalue {
- let (opt_id, info) = {
- let hint_available = Lvalue::has_dropflag_hint(bcx, id) &&
- bcx.tcx().sess.nonzeroing_move_hints();
- let info = match k {
- HintKind::ZeroAndMaintain if hint_available =>
- DropFlagInfo::ZeroAndMaintain(id),
- HintKind::DontZeroJustUse if hint_available =>
- DropFlagInfo::DontZeroJustUse(id),
- _ =>
- DropFlagInfo::None,
- };
- (Some(id), info)
- };
- debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info);
- Lvalue { source: source, drop_flag_info: info }
- }
-} // end Lvalue constructor methods.
-
-impl Lvalue {
- fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- id: ast::NodeId) -> bool {
- let hints = bcx.fcx.lldropflag_hints.borrow();
- hints.has_hint(id)
- }
- pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>)
- -> Option<DropHintDatum<'tcx>> {
- self.drop_flag_info.hint_datum(bcx)
- }
-}
-
-impl Rvalue {
- pub fn new(m: RvalueMode) -> Rvalue {
- Rvalue { mode: m }
- }
-}
-
-// Make Datum linear for more type safety.
-impl Drop for Rvalue {
- fn drop(&mut self) { }
-}
-
-#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
-pub enum RvalueMode {
- /// `val` is a pointer to the actual value (and thus has type *T)
- ByRef,
-
- /// `val` is the actual value (*only used for immediates* like ints, ptrs)
- ByValue,
-}
-
-pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> {
- return Datum::new(val, ty, Rvalue::new(ByValue));
-}
-
-pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- return DatumBlock::new(bcx, immediate_rvalue(val, ty))
-}
-
-/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
-/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should
-/// initialize the memory.
-///
-/// The flag `zero` indicates how the temporary space itself should be
-/// initialized at the outset of the function; the only time that
-/// `InitAlloca::Uninit` is a valid value for `zero` is when the
-/// caller can prove that either (1.) the code injected by `populate`
-/// onto `bcx` always dominates the end of `scope`, or (2.) the data
-/// being allocated has no associated destructor.
-pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- ty: Ty<'tcx>,
- name: &str,
- zero: InitAlloca,
- scope: cleanup::ScopeId,
- populate: F)
- -> DatumBlock<'blk, 'tcx, Lvalue> where
- F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
-{
- // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed.
- // (See discussion at Issue 30530.)
- let scratch = alloc_ty_init(bcx, ty, zero, name);
- debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}",
- scope, Value(scratch), ty);
-
- // Subtle. Populate the scratch memory *before* scheduling cleanup.
- let bcx = populate(bcx, scratch);
- bcx.fcx.schedule_drop_mem(scope, scratch, ty, None);
-
- DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum")))
-}
-
-/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to
-/// it. If `zero` is true, the space will be zeroed when it is allocated; this is normally not
-/// necessary, but in the case of automatic rooting in match statements it is possible to have
-/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them.
-/// You must arrange any cleanups etc yourself!
-pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- ty: Ty<'tcx>,
- name: &str)
- -> Datum<'tcx, Rvalue> {
- let scratch = alloc_ty(bcx, ty, name);
- call_lifetime_start(bcx, scratch);
- Datum::new(scratch, ty, Rvalue::new(ByRef))
-}
-
-/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending
-/// on whether type is immediate or not.
-pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- ty: Ty<'tcx>) -> RvalueMode {
- if type_is_immediate(ccx, ty) {
- ByValue
- } else {
- ByRef
- }
-}
-
-fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode,
- fcx: &FunctionContext<'a, 'tcx>,
- scope: cleanup::ScopeId,
- val: ValueRef,
- ty: Ty<'tcx>) {
- debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}",
- scope, Value(val), ty);
- match mode {
- ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
- ByRef => {
- fcx.schedule_lifetime_end(scope, val);
- fcx.schedule_drop_mem(scope, val, ty, None);
- }
- }
-}
-
-pub trait KindOps {
-
- /// Take appropriate action after the value in `datum` has been
- /// stored to a new location.
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> Block<'blk, 'tcx>;
-
- /// True if this mode is a reference mode, meaning that the datum's
- /// val field is a pointer to the actual value
- fn is_by_ref(&self) -> bool;
-
- /// Converts to an Expr kind
- fn to_expr_kind(self) -> Expr;
-
-}
-
-impl KindOps for Rvalue {
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- _val: ValueRef,
- _ty: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- // No cleanup is scheduled for an rvalue, so we don't have
- // to do anything after a move to cancel or duplicate it.
- if self.is_by_ref() {
- call_lifetime_end(bcx, _val);
- }
- bcx
- }
-
- fn is_by_ref(&self) -> bool {
- self.mode == ByRef
- }
-
- fn to_expr_kind(self) -> Expr {
- RvalueExpr(self)
- }
-}
-
-impl KindOps for Lvalue {
- /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel
- /// cleanup. If an @T lvalue is copied, we must increment the reference count.
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("<Lvalue as KindOps>::post_store");
- if bcx.fcx.type_needs_drop(ty) {
- // cancel cleanup of affine values:
- // 1. if it has drop-hint, mark as moved; then code
- // aware of drop-hint won't bother calling the
- // drop-glue itself.
- if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) {
- let moved_hint_byte = adt::DTOR_MOVED_HINT;
- let hint_llval = hint_datum.to_value().value();
- Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval);
- }
- // 2. if the drop info says its necessary, drop-fill the memory.
- if self.drop_flag_info.must_zero() {
- let () = drop_done_fill_mem(bcx, val, ty);
- }
- bcx
- } else {
- // FIXME (#5016) would be nice to assert this, but we have
- // to allow for e.g. DontZeroJustUse flags, for now.
- //
- // (The dropflag hint construction should be taking
- // !type_needs_drop into account; earlier analysis phases
- // may not have all the info they need to include such
- // information properly, I think; in particular the
- // fragments analysis works on a non-monomorphized view of
- // the code.)
- //
- // assert_eq!(self.drop_flag_info, DropFlagInfo::None);
- bcx
- }
- }
-
- fn is_by_ref(&self) -> bool {
- true
- }
-
- fn to_expr_kind(self) -> Expr {
- LvalueExpr(self)
- }
-}
-
-impl KindOps for Expr {
- fn post_store<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- val: ValueRef,
- ty: Ty<'tcx>)
- -> Block<'blk, 'tcx> {
- match *self {
- LvalueExpr(ref l) => l.post_store(bcx, val, ty),
- RvalueExpr(ref r) => r.post_store(bcx, val, ty),
- }
- }
-
- fn is_by_ref(&self) -> bool {
- match *self {
- LvalueExpr(ref l) => l.is_by_ref(),
- RvalueExpr(ref r) => r.is_by_ref()
- }
- }
-
- fn to_expr_kind(self) -> Expr {
- self
- }
-}
-
-impl<'tcx> Datum<'tcx, Rvalue> {
- /// Schedules a cleanup for this datum in the given scope. That means that this datum is no
- /// longer an rvalue datum; hence, this function consumes the datum and returns the contained
- /// ValueRef.
- pub fn add_clean<'a>(self,
- fcx: &FunctionContext<'a, 'tcx>,
- scope: cleanup::ScopeId)
- -> ValueRef {
- add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty);
- self.val
- }
-
- /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not
- /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`.
- pub fn to_lvalue_datum_in_scope<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- name: &str,
- scope: cleanup::ScopeId)
- -> DatumBlock<'blk, 'tcx, Lvalue> {
- let fcx = bcx.fcx;
-
- match self.kind.mode {
- ByRef => {
- add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty);
- DatumBlock::new(bcx, Datum::new(
- self.val,
- self.ty,
- Lvalue::new("datum::to_lvalue_datum_in_scope")))
- }
-
- ByValue => {
- lvalue_scratch_datum(
- bcx, self.ty, name, InitAlloca::Dropped, scope,
- |bcx, llval| {
- debug!("populate call for Datum::to_lvalue_datum_in_scope \
- self.ty={:?}", self.ty);
- // do not call_lifetime_start here; the
- // `InitAlloc::Dropped` will start scratch
- // value's lifetime at open of function body.
- let bcx = self.store_to(bcx, llval);
- bcx.fcx.schedule_lifetime_end(scope, llval);
- bcx
- })
- }
- }
- }
-
- pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- let mut bcx = bcx;
- match self.kind.mode {
- ByRef => DatumBlock::new(bcx, self),
- ByValue => {
- let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref");
- bcx = self.store_to(bcx, scratch.val);
- DatumBlock::new(bcx, scratch)
- }
- }
- }
-
- pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- match self.appropriate_rvalue_mode(bcx.ccx()) {
- ByRef => {
- self.to_ref_datum(bcx)
- }
- ByValue => {
- match self.kind.mode {
- ByValue => DatumBlock::new(bcx, self),
- ByRef => {
- let llval = load_ty(bcx, self.val, self.ty);
- call_lifetime_end(bcx, self.val);
- DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue)))
- }
- }
- }
- }
- }
-}
-
-/// Methods suitable for "expr" datums that could be either lvalues or
-/// rvalues. These include coercions into lvalues/rvalues but also a number
-/// of more general operations. (Some of those operations could be moved to
-/// the more general `impl<K> Datum<K>`, but it's convenient to have them
-/// here since we can `match self.kind` rather than having to implement
-/// generic methods in `KindOps`.)
-impl<'tcx> Datum<'tcx, Expr> {
- fn match_kind<R, F, G>(self, if_lvalue: F, if_rvalue: G) -> R where
- F: FnOnce(Datum<'tcx, Lvalue>) -> R,
- G: FnOnce(Datum<'tcx, Rvalue>) -> R,
- {
- let Datum { val, ty, kind } = self;
- match kind {
- LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)),
- RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)),
- }
- }
-
- /// Asserts that this datum *is* an lvalue and returns it.
- #[allow(dead_code)] // potentially useful
- pub fn assert_lvalue(self) -> Datum<'tcx, Lvalue> {
- self.match_kind(
- |d| d,
- |_| bug!("assert_lvalue given rvalue"))
- }
-
- pub fn store_to_dest<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- dest: expr::Dest,
- expr_id: ast::NodeId)
- -> Block<'blk, 'tcx> {
- match dest {
- expr::Ignore => {
- self.add_clean_if_rvalue(bcx, expr_id);
- bcx
- }
- expr::SaveIn(addr) => {
- self.store_to(bcx, addr)
- }
- }
- }
-
- /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value
- /// that may need drop.
- pub fn add_clean_if_rvalue<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- expr_id: ast::NodeId) {
- self.match_kind(
- |_| { /* Nothing to do, cleanup already arranged */ },
- |r| {
- let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
- r.add_clean(bcx.fcx, scope);
- })
- }
-
- pub fn to_lvalue_datum<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- name: &str,
- expr_id: ast::NodeId)
- -> DatumBlock<'blk, 'tcx, Lvalue> {
- debug!("to_lvalue_datum self: {:?}", self);
-
- self.match_kind(
- |l| DatumBlock::new(bcx, l),
- |r| {
- let scope = cleanup::temporary_scope(bcx.tcx(), expr_id);
- r.to_lvalue_datum_in_scope(bcx, name, scope)
- })
- }
-
- /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled).
- pub fn to_rvalue_datum<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- name: &'static str)
- -> DatumBlock<'blk, 'tcx, Rvalue> {
- self.match_kind(
- |l| {
- let mut bcx = bcx;
- match l.appropriate_rvalue_mode(bcx.ccx()) {
- ByRef => {
- let scratch = rvalue_scratch_datum(bcx, l.ty, name);
- bcx = l.store_to(bcx, scratch.val);
- DatumBlock::new(bcx, scratch)
- }
- ByValue => {
- let v = load_ty(bcx, l.val, l.ty);
- bcx = l.kind.post_store(bcx, l.val, l.ty);
- DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue)))
- }
- }
- },
- |r| DatumBlock::new(bcx, r))
- }
-
-}
-
-/// Methods suitable only for lvalues. These include the various
-/// operations to extract components out of compound data structures,
-/// such as extracting the field from a struct or a particular element
-/// from an array.
-impl<'tcx> Datum<'tcx, Lvalue> {
- /// Converts a datum into a by-ref value. The datum type must be one which is always passed by
- /// reference.
- pub fn to_llref(self) -> ValueRef {
- self.val
- }
-
- // Extracts a component of a compound data structure (e.g., a field from a
- // struct). Note that if self is an opened, unsized type then the returned
- // datum may also be unsized _without the size information_. It is the
- // callers responsibility to package the result in some way to make a valid
- // datum in that case (e.g., by making a fat pointer or opened pair).
- pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>,
- gep: F)
- -> Datum<'tcx, Lvalue> where
- F: FnOnce(adt::MaybeSizedValue) -> ValueRef,
- {
- let val = if type_is_sized(bcx.tcx(), self.ty) {
- let val = adt::MaybeSizedValue::sized(self.val);
- gep(val)
- } else {
- let val = adt::MaybeSizedValue::unsized_(
- Load(bcx, expr::get_dataptr(bcx, self.val)),
- Load(bcx, expr::get_meta(bcx, self.val)));
- gep(val)
- };
- Datum {
- val: val,
- kind: Lvalue::new("Datum::get_element"),
- ty: ty,
- }
- }
-
- pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>)
- -> (ValueRef, ValueRef) {
- //! Converts a vector into the slice pair.
-
- tvec::get_base_and_len(bcx, self.val, self.ty)
- }
-}
-
-/// Generic methods applicable to any sort of datum.
-impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> {
- pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> {
- Datum { val: val, ty: ty, kind: kind }
- }
-
- pub fn to_expr_datum(self) -> Datum<'tcx, Expr> {
- let Datum { val, ty, kind } = self;
- Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
- }
-
- /// Moves or copies this value into a new home, as appropriate depending on the type of the
- /// datum. This method consumes the datum, since it would be incorrect to go on using the datum
- /// if the value represented is affine (and hence the value is moved).
- pub fn store_to<'blk>(self,
- bcx: Block<'blk, 'tcx>,
- dst: ValueRef)
- -> Block<'blk, 'tcx> {
- self.shallow_copy_raw(bcx, dst);
-
- self.kind.post_store(bcx, self.val, self.ty)
- }
-
- /// Helper function that performs a shallow copy of this value into `dst`, which should be a
- /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized
- /// memory (either newly allocated, zeroed, or dropped).
- ///
- /// This function is private to datums because it leaves memory in an unstable state, where the
- /// source value has been copied but not zeroed. Public methods are `store_to` (if you no
- /// longer need the source value) or `shallow_copy` (if you wish the source value to remain
- /// valid).
- fn shallow_copy_raw<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- dst: ValueRef)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("copy_to_no_check");
-
- if type_is_zero_size(bcx.ccx(), self.ty) {
- return bcx;
- }
-
- if self.kind.is_by_ref() {
- memcpy_ty(bcx, dst, self.val, self.ty);
- } else {
- store_ty(bcx, self.val, dst, self.ty);
- }
-
- return bcx;
- }
-
- /// Copies the value into a new location. This function always preserves the existing datum as
- /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine
- /// values (since they must never be duplicated).
- pub fn shallow_copy<'blk>(&self,
- bcx: Block<'blk, 'tcx>,
- dst: ValueRef)
- -> Block<'blk, 'tcx> {
- /*!
- * Copies the value into a new location. This function always
- * preserves the existing datum as a valid value. Therefore,
- * it does not consume `self` and, also, cannot be applied to
- * affine values (since they must never be duplicated).
- */
-
- assert!(!self.ty.moves_by_default(bcx.tcx(),
- &bcx.tcx().empty_parameter_environment(), DUMMY_SP));
- self.shallow_copy_raw(bcx, dst)
- }
-
- /// See the `appropriate_rvalue_mode()` function
- pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
- -> RvalueMode {
- appropriate_rvalue_mode(ccx, self.ty)
- }
-
- /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of
- /// responsibility to cleanup the value). For this to work, the value must be something
- /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is
- /// naturally passed around by value, and not by reference.
- pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
- assert!(!bcx.fcx.type_needs_drop(self.ty));
- assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue);
- if self.kind.is_by_ref() {
- load_ty(bcx, self.val, self.ty)
- } else {
- self.val
- }
- }
-
- pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef {
- assert!(self.ty.is_bool());
- self.to_llscalarish(bcx)
- }
-}
-
-impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> {
- pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>)
- -> DatumBlock<'blk, 'tcx, K> {
- DatumBlock { bcx: bcx, datum: datum }
- }
-}
-
-impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> {
- pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> {
- DatumBlock::new(self.bcx, self.datum.to_expr_datum())
- }
-}
-
-impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> {
- pub fn store_to_dest(self,
- dest: expr::Dest,
- expr_id: ast::NodeId) -> Block<'blk, 'tcx> {
- let DatumBlock { bcx, datum } = self;
- datum.store_to_dest(bcx, dest, expr_id)
- }
-
- pub fn to_llbool(self) -> Result<'blk, 'tcx> {
- let DatumBlock { datum, bcx } = self;
- Result::new(bcx, datum.to_llbool(bcx))
- }
-}
use llvm;
use llvm::debuginfo::{DIScope, DISubprogram};
use common::{CrateContext, FunctionContext};
-use rustc::hir::pat_util;
use rustc::mir::repr::{Mir, VisibilityScope};
-use rustc::util::nodemap::NodeMap;
use libc::c_uint;
use std::ptr;
-use syntax_pos::{Span, Pos};
-use syntax::{ast, codemap};
+use syntax_pos::Pos;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use rustc::hir::{self, PatKind};
-
-// This procedure builds the *scope map* for a given function, which maps any
-// given ast::NodeId in the function's AST to the correct DIScope metadata instance.
-//
-// This builder procedure walks the AST in execution order and keeps track of
-// what belongs to which scope, creating DIScope DIEs along the way, and
-// introducing *artificial* lexical scope descriptors where necessary. These
-// artificial scopes allow GDB to correctly handle name shadowing.
-pub fn create_scope_map(cx: &CrateContext,
- args: &[hir::Arg],
- fn_entry_block: &hir::Block,
- fn_metadata: DISubprogram,
- fn_ast_id: ast::NodeId)
- -> NodeMap<DIScope> {
- let mut scope_map = NodeMap();
- let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None });
- scope_map.insert(fn_ast_id, fn_metadata);
-
- // Push argument identifiers onto the stack so arguments integrate nicely
- // with variable shadowing.
- for arg in args {
- pat_util::pat_bindings(&arg.pat, |_, node_id, _, path1| {
- scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata,
- name: Some(path1.node) });
- scope_map.insert(node_id, fn_metadata);
- })
- }
-
- // Clang creates a separate scope for function bodies, so let's do this too.
- with_new_scope(cx,
- fn_entry_block.span,
- &mut scope_stack,
- &mut scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, fn_entry_block, scope_stack, scope_map);
- });
-
- return scope_map;
-}
/// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
/// If debuginfo is disabled, the returned vector is empty.
loc.col.to_usize() as c_uint)
};
}
-
-// local helper functions for walking the AST.
-fn with_new_scope<F>(cx: &CrateContext,
- scope_span: Span,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>,
- inner_walk: F) where
- F: FnOnce(&CrateContext, &mut Vec<ScopeStackEntry>, &mut NodeMap<DIScope>),
-{
- // Create a new lexical scope and push it onto the stack
- let loc = span_start(cx, scope_span);
- let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path);
- let parent_scope = scope_stack.last().unwrap().scope_metadata;
-
- let scope_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreateLexicalBlock(
- DIB(cx),
- parent_scope,
- file_metadata,
- loc.line as c_uint,
- loc.col.to_usize() as c_uint)
- };
-
- scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None });
-
- inner_walk(cx, scope_stack, scope_map);
-
- // pop artificial scopes
- while scope_stack.last().unwrap().name.is_some() {
- scope_stack.pop();
- }
-
- if scope_stack.last().unwrap().scope_metadata != scope_metadata {
- span_bug!(scope_span, "debuginfo: Inconsistency in scope management.");
- }
-
- scope_stack.pop();
-}
-
-struct ScopeStackEntry {
- scope_metadata: DIScope,
- name: Option<ast::Name>
-}
-
-fn walk_block(cx: &CrateContext,
- block: &hir::Block,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
- scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata);
-
- // The interesting things here are statements and the concluding expression.
- for statement in &block.stmts {
- scope_map.insert(statement.node.id(),
- scope_stack.last().unwrap().scope_metadata);
-
- match statement.node {
- hir::StmtDecl(ref decl, _) =>
- walk_decl(cx, &decl, scope_stack, scope_map),
- hir::StmtExpr(ref exp, _) |
- hir::StmtSemi(ref exp, _) =>
- walk_expr(cx, &exp, scope_stack, scope_map),
- }
- }
-
- if let Some(ref exp) = block.expr {
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
-}
-
-fn walk_decl(cx: &CrateContext,
- decl: &hir::Decl,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
- match *decl {
- codemap::Spanned { node: hir::DeclLocal(ref local), .. } => {
- scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata);
-
- walk_pattern(cx, &local.pat, scope_stack, scope_map);
-
- if let Some(ref exp) = local.init {
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
- }
- _ => ()
- }
-}
-
-fn walk_pattern(cx: &CrateContext,
- pat: &hir::Pat,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
- // Unfortunately, we cannot just use pat_util::pat_bindings() or
- // ast_util::walk_pat() here because we have to visit *all* nodes in
- // order to put them into the scope map. The above functions don't do that.
- match pat.node {
- PatKind::Binding(_, ref path1, ref sub_pat_opt) => {
- // LLVM does not properly generate 'DW_AT_start_scope' fields
- // for variable DIEs. For this reason we have to introduce
- // an artificial scope at bindings whenever a variable with
- // the same name is declared in *any* parent scope.
- //
- // Otherwise the following error occurs:
- //
- // let x = 10;
- //
- // do_something(); // 'gdb print x' correctly prints 10
- //
- // {
- // do_something(); // 'gdb print x' prints 0, because it
- // // already reads the uninitialized 'x'
- // // from the next line...
- // let x = 100;
- // do_something(); // 'gdb print x' correctly prints 100
- // }
-
- // Is there already a binding with that name?
- // N.B.: this comparison must be UNhygienic... because
- // gdb knows nothing about the context, so any two
- // variables with the same name will cause the problem.
- let name = path1.node;
- let need_new_scope = scope_stack
- .iter()
- .any(|entry| entry.name == Some(name));
-
- if need_new_scope {
- // Create a new lexical scope and push it onto the stack
- let loc = span_start(cx, pat.span);
- let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path);
- let parent_scope = scope_stack.last().unwrap().scope_metadata;
-
- let scope_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreateLexicalBlock(
- DIB(cx),
- parent_scope,
- file_metadata,
- loc.line as c_uint,
- loc.col.to_usize() as c_uint)
- };
-
- scope_stack.push(ScopeStackEntry {
- scope_metadata: scope_metadata,
- name: Some(name)
- });
-
- } else {
- // Push a new entry anyway so the name can be found
- let prev_metadata = scope_stack.last().unwrap().scope_metadata;
- scope_stack.push(ScopeStackEntry {
- scope_metadata: prev_metadata,
- name: Some(name)
- });
- }
-
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- if let Some(ref sub_pat) = *sub_pat_opt {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
-
- PatKind::Wild => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- }
-
- PatKind::TupleStruct(_, ref sub_pats, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for p in sub_pats {
- walk_pattern(cx, &p, scope_stack, scope_map);
- }
- }
-
- PatKind::Path(..) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- }
-
- PatKind::Struct(_, ref field_pats, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for &codemap::Spanned {
- node: hir::FieldPat { pat: ref sub_pat, .. },
- ..
- } in field_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
-
- PatKind::Tuple(ref sub_pats, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for sub_pat in sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
-
- PatKind::Box(ref sub_pat) | PatKind::Ref(ref sub_pat, _) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
-
- PatKind::Lit(ref exp) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
-
- PatKind::Range(ref exp1, ref exp2) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
- walk_expr(cx, &exp1, scope_stack, scope_map);
- walk_expr(cx, &exp2, scope_stack, scope_map);
- }
-
- PatKind::Vec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => {
- scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata);
-
- for sub_pat in front_sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
-
- if let Some(ref sub_pat) = *middle_sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
-
- for sub_pat in back_sub_pats {
- walk_pattern(cx, &sub_pat, scope_stack, scope_map);
- }
- }
- }
-}
-
-fn walk_expr(cx: &CrateContext,
- exp: &hir::Expr,
- scope_stack: &mut Vec<ScopeStackEntry> ,
- scope_map: &mut NodeMap<DIScope>) {
-
- scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata);
-
- match exp.node {
- hir::ExprLit(_) |
- hir::ExprBreak(_) |
- hir::ExprAgain(_) |
- hir::ExprPath(..) => {}
-
- hir::ExprCast(ref sub_exp, _) |
- hir::ExprType(ref sub_exp, _) |
- hir::ExprAddrOf(_, ref sub_exp) |
- hir::ExprField(ref sub_exp, _) |
- hir::ExprTupField(ref sub_exp, _) =>
- walk_expr(cx, &sub_exp, scope_stack, scope_map),
-
- hir::ExprBox(ref sub_expr) => {
- walk_expr(cx, &sub_expr, scope_stack, scope_map);
- }
-
- hir::ExprRet(ref exp_opt) => match *exp_opt {
- Some(ref sub_exp) => walk_expr(cx, &sub_exp, scope_stack, scope_map),
- None => ()
- },
-
- hir::ExprUnary(_, ref sub_exp) => {
- walk_expr(cx, &sub_exp, scope_stack, scope_map);
- }
-
- hir::ExprAssignOp(_, ref lhs, ref rhs) |
- hir::ExprIndex(ref lhs, ref rhs) |
- hir::ExprBinary(_, ref lhs, ref rhs) => {
- walk_expr(cx, &lhs, scope_stack, scope_map);
- walk_expr(cx, &rhs, scope_stack, scope_map);
- }
-
- hir::ExprVec(ref init_expressions) |
- hir::ExprTup(ref init_expressions) => {
- for ie in init_expressions {
- walk_expr(cx, &ie, scope_stack, scope_map);
- }
- }
-
- hir::ExprAssign(ref sub_exp1, ref sub_exp2) |
- hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => {
- walk_expr(cx, &sub_exp1, scope_stack, scope_map);
- walk_expr(cx, &sub_exp2, scope_stack, scope_map);
- }
-
- hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => {
- walk_expr(cx, &cond_exp, scope_stack, scope_map);
-
- with_new_scope(cx,
- then_block.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, &then_block, scope_stack, scope_map);
- });
-
- match *opt_else_exp {
- Some(ref else_exp) =>
- walk_expr(cx, &else_exp, scope_stack, scope_map),
- _ => ()
- }
- }
-
- hir::ExprWhile(ref cond_exp, ref loop_body, _) => {
- walk_expr(cx, &cond_exp, scope_stack, scope_map);
-
- with_new_scope(cx,
- loop_body.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, &loop_body, scope_stack, scope_map);
- })
- }
-
- hir::ExprLoop(ref block, _) |
- hir::ExprBlock(ref block) => {
- with_new_scope(cx,
- block.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- walk_block(cx, &block, scope_stack, scope_map);
- })
- }
-
- hir::ExprClosure(_, ref decl, ref block, _) => {
- with_new_scope(cx,
- block.span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- for &hir::Arg { pat: ref pattern, .. } in &decl.inputs {
- walk_pattern(cx, &pattern, scope_stack, scope_map);
- }
-
- walk_block(cx, &block, scope_stack, scope_map);
- })
- }
-
- hir::ExprCall(ref fn_exp, ref args) => {
- walk_expr(cx, &fn_exp, scope_stack, scope_map);
-
- for arg_exp in args {
- walk_expr(cx, &arg_exp, scope_stack, scope_map);
- }
- }
-
- hir::ExprMethodCall(_, _, ref args) => {
- for arg_exp in args {
- walk_expr(cx, &arg_exp, scope_stack, scope_map);
- }
- }
-
- hir::ExprMatch(ref discriminant_exp, ref arms, _) => {
- walk_expr(cx, &discriminant_exp, scope_stack, scope_map);
-
- // For each arm we have to first walk the pattern as these might
- // introduce new artificial scopes. It should be sufficient to
- // walk only one pattern per arm, as they all must contain the
- // same binding names.
-
- for arm_ref in arms {
- let arm_span = arm_ref.pats[0].span;
-
- with_new_scope(cx,
- arm_span,
- scope_stack,
- scope_map,
- |cx, scope_stack, scope_map| {
- for pat in &arm_ref.pats {
- walk_pattern(cx, &pat, scope_stack, scope_map);
- }
-
- if let Some(ref guard_exp) = arm_ref.guard {
- walk_expr(cx, &guard_exp, scope_stack, scope_map)
- }
-
- walk_expr(cx, &arm_ref.body, scope_stack, scope_map);
- })
- }
- }
-
- hir::ExprStruct(_, ref fields, ref base_exp) => {
- for &hir::Field { expr: ref exp, .. } in fields {
- walk_expr(cx, &exp, scope_stack, scope_map);
- }
-
- match *base_exp {
- Some(ref exp) => walk_expr(cx, &exp, scope_stack, scope_map),
- None => ()
- }
- }
-
- hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
- for output in outputs {
- walk_expr(cx, output, scope_stack, scope_map);
- }
-
- for input in inputs {
- walk_expr(cx, input, scope_stack, scope_map);
- }
- }
- }
-}
use self::EnumDiscriminantInfo::*;
use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of,
- get_namespace_and_span_for_item, create_DIArray,
- fn_should_be_ignored, is_node_local_to_unit};
+ get_namespace_and_span_for_item, create_DIArray, is_node_local_to_unit};
use super::namespace::mangled_name_of_item;
use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name};
-use super::{declare_local, VariableKind, VariableAccess, CrateDebugContext};
+use super::{CrateDebugContext};
use context::SharedCrateContext;
use session::Session;
use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType};
use rustc::hir::def_id::DefId;
-use rustc::hir::pat_util;
use rustc::ty::subst::Substs;
-use rustc::hir::map as hir_map;
-use rustc::hir::{self, PatKind};
+use rustc::hir;
use {type_of, adt, machine, monomorphize};
-use common::{self, CrateContext, FunctionContext, Block};
-use _match::{BindingInfo, TransBindingMode};
+use common::{CrateContext, FunctionContext};
use type_::Type;
use rustc::ty::{self, Ty};
-use session::config::{self, FullDebugInfo};
+use session::config;
use util::nodemap::FnvHashMap;
use util::common::path2cstr;
ptr::null_mut());
}
}
-
-/// Creates debug information for the given local variable.
-///
-/// This function assumes that there's a datum for each pattern component of the
-/// local in `bcx.fcx.lllocals`.
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
- }
-
- let locals = bcx.fcx.lllocals.borrow();
- pat_util::pat_bindings(&local.pat, |_, node_id, span, var_name| {
- let datum = match locals.get(&node_id) {
- Some(datum) => datum,
- None => {
- span_bug!(span,
- "no entry in lllocals table for {}",
- node_id);
- }
- };
-
- if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
- span_bug!(span, "debuginfo::create_local_var_metadata() - \
- Referenced variable location is not an alloca!");
- }
-
- let scope_metadata = scope_metadata(bcx.fcx, node_id, span);
-
- declare_local(bcx,
- var_name.node,
- datum.ty,
- scope_metadata,
- VariableAccess::DirectVariable { alloca: datum.val },
- VariableKind::LocalVariable,
- span);
- })
-}
-
-/// Creates debug information for a variable captured in a closure.
-///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- node_id: ast::NodeId,
- env_pointer: ValueRef,
- env_index: usize,
- captured_by_ref: bool,
- span: Span) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
- }
-
- let cx = bcx.ccx();
-
- let ast_item = cx.tcx().map.find(node_id);
-
- let variable_name = match ast_item {
- None => {
- span_bug!(span, "debuginfo::create_captured_var_metadata: node not found");
- }
- Some(hir_map::NodeLocal(pat)) => {
- match pat.node {
- PatKind::Binding(_, ref path1, _) => {
- path1.node
- }
- _ => {
- span_bug!(span,
- "debuginfo::create_captured_var_metadata() - \
- Captured var-id refers to unexpected \
- hir_map variant: {:?}",
- ast_item);
- }
- }
- }
- _ => {
- span_bug!(span,
- "debuginfo::create_captured_var_metadata() - \
- Captured var-id refers to unexpected \
- hir_map variant: {:?}",
- ast_item);
- }
- };
-
- let variable_type = common::node_id_type(bcx, node_id);
- let scope_metadata = bcx.fcx.debug_context.get_ref(span).fn_metadata;
-
- // env_pointer is the alloca containing the pointer to the environment,
- // so it's type is **EnvironmentType. In order to find out the type of
- // the environment we have to "dereference" two times.
- let llvm_env_data_type = common::val_ty(env_pointer).element_type()
- .element_type();
- let byte_offset_of_var_in_env = machine::llelement_offset(cx,
- llvm_env_data_type,
- env_index);
-
- let address_operations = unsafe {
- [llvm::LLVMRustDIBuilderCreateOpDeref(),
- llvm::LLVMRustDIBuilderCreateOpPlus(),
- byte_offset_of_var_in_env as i64,
- llvm::LLVMRustDIBuilderCreateOpDeref()]
- };
-
- let address_op_count = if captured_by_ref {
- address_operations.len()
- } else {
- address_operations.len() - 1
- };
-
- let variable_access = VariableAccess::IndirectVariable {
- alloca: env_pointer,
- address_operations: &address_operations[..address_op_count]
- };
-
- declare_local(bcx,
- variable_name,
- variable_type,
- scope_metadata,
- variable_access,
- VariableKind::CapturedVariable,
- span);
-}
-
-/// Creates debug information for a local variable introduced in the head of a
-/// match-statement arm.
-///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- variable_name: ast::Name,
- binding: BindingInfo<'tcx>) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
- }
-
- let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
- let aops = unsafe {
- [llvm::LLVMRustDIBuilderCreateOpDeref()]
- };
- // Regardless of the actual type (`T`) we're always passed the stack slot
- // (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove
- // bindings we actually have `T**`. So to get the actual variable we need to
- // dereference once more. For ByCopy we just use the stack slot we created
- // for the binding.
- let var_access = match binding.trmode {
- TransBindingMode::TrByCopy(llbinding) |
- TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable {
- alloca: llbinding
- },
- TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable {
- alloca: binding.llmatch,
- address_operations: &aops
- },
- TransBindingMode::TrByRef => VariableAccess::DirectVariable {
- alloca: binding.llmatch
- }
- };
-
- declare_local(bcx,
- variable_name,
- binding.ty,
- scope_metadata,
- var_access,
- VariableKind::LocalVariable,
- binding.span);
-}
-
-/// Creates debug information for the given function argument.
-///
-/// This function assumes that there's a datum for each pattern component of the
-/// argument in `bcx.fcx.lllocals`.
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) {
- if bcx.unreachable.get() ||
- fn_should_be_ignored(bcx.fcx) ||
- bcx.sess().opts.debuginfo != FullDebugInfo {
- return;
- }
-
- let scope_metadata = bcx
- .fcx
- .debug_context
- .get_ref(arg.pat.span)
- .fn_metadata;
- let locals = bcx.fcx.lllocals.borrow();
-
- pat_util::pat_bindings(&arg.pat, |_, node_id, span, var_name| {
- let datum = match locals.get(&node_id) {
- Some(v) => v,
- None => {
- span_bug!(span, "no entry in lllocals table for {}", node_id);
- }
- };
-
- if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() {
- span_bug!(span, "debuginfo::create_argument_metadata() - \
- Referenced variable location is not an alloca!");
- }
-
- let argument_index = {
- let counter = &bcx
- .fcx
- .debug_context
- .get_ref(span)
- .argument_counter;
- let argument_index = counter.get();
- counter.set(argument_index + 1);
- argument_index
- };
-
- declare_local(bcx,
- var_name.node,
- datum.ty,
- scope_metadata,
- VariableAccess::DirectVariable { alloca: datum.val },
- VariableKind::ArgumentVariable(argument_index),
- span);
- })
-}
pub use self::create_scope_map::create_mir_scopes;
pub use self::source_loc::start_emitting_source_locations;
-pub use self::source_loc::get_cleanup_debug_loc_for_ast_node;
-pub use self::source_loc::with_source_location_override;
-pub use self::metadata::create_match_binding_metadata;
-pub use self::metadata::create_argument_metadata;
-pub use self::metadata::create_captured_var_metadata;
pub use self::metadata::create_global_var_metadata;
-pub use self::metadata::create_local_var_metadata;
#[allow(non_upper_case_globals)]
const DW_TAG_auto_variable: c_uint = 0x100;
pub struct FunctionDebugContextData {
scope_map: RefCell<NodeMap<DIScope>>,
fn_metadata: DISubprogram,
- argument_counter: Cell<usize>,
source_locations_enabled: Cell<bool>,
source_location_override: Cell<bool>,
}
let fn_debug_context = box FunctionDebugContextData {
scope_map: RefCell::new(NodeMap()),
fn_metadata: fn_metadata,
- argument_counter: Cell::new(1),
source_locations_enabled: Cell::new(false),
source_location_override: Cell::new(false),
};
}
}
-/// Computes the scope map for a function given its declaration and body.
-pub fn fill_scope_map_for_function<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
- fn_decl: &hir::FnDecl,
- top_level_block: &hir::Block,
- fn_ast_id: ast::NodeId) {
- match fcx.debug_context {
- FunctionDebugContext::RegularContext(box ref data) => {
- let scope_map = create_scope_map::create_scope_map(fcx.ccx,
- &fn_decl.inputs,
- top_level_block,
- data.fn_metadata,
- fn_ast_id);
- *data.scope_map.borrow_mut() = scope_map;
- }
- FunctionDebugContext::DebugInfoDisabled |
- FunctionDebugContext::FunctionWithoutDebugInfo => {}
- }
-}
-
pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
variable_name: ast::Name,
variable_type: Ty<'tcx>,
use llvm;
use llvm::debuginfo::DIScope;
use builder::Builder;
-use common::{NodeIdAndSpan, CrateContext, FunctionContext};
+use common::{CrateContext, FunctionContext};
use libc::c_uint;
use std::ptr;
-use syntax_pos::{self, Span, Pos};
-use syntax::ast;
-
-pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
- node_id: ast::NodeId,
- node_span: Span,
- is_block: bool)
- -> NodeIdAndSpan {
- // A debug location needs two things:
- // (1) A span (of which only the beginning will actually be used)
- // (2) An AST node-id which will be used to look up the lexical scope
- // for the location in the functions scope-map
- //
- // This function will calculate the debug location for compiler-generated
- // cleanup calls that are executed when control-flow leaves the
- // scope identified by `node_id`.
- //
- // For everything but block-like things we can simply take id and span of
- // the given expression, meaning that from a debugger's view cleanup code is
- // executed at the same source location as the statement/expr itself.
- //
- // Blocks are a special case. Here we want the cleanup to be linked to the
- // closing curly brace of the block. The *scope* the cleanup is executed in
- // is up to debate: It could either still be *within* the block being
- // cleaned up, meaning that locals from the block are still visible in the
- // debugger.
- // Or it could be in the scope that the block is contained in, so any locals
- // from within the block are already considered out-of-scope and thus not
- // accessible in the debugger anymore.
- //
- // The current implementation opts for the second option: cleanup of a block
- // already happens in the parent scope of the block. The main reason for
- // this decision is that scoping becomes controlflow dependent when variable
- // shadowing is involved and it's impossible to decide statically which
- // scope is actually left when the cleanup code is executed.
- // In practice it shouldn't make much of a difference.
-
- let mut cleanup_span = node_span;
-
- if is_block {
- // Not all blocks actually have curly braces (e.g. simple closure
- // bodies), in which case we also just want to return the span of the
- // whole expression.
- let code_snippet = cx.sess().codemap().span_to_snippet(node_span);
- if let Ok(code_snippet) = code_snippet {
- let bytes = code_snippet.as_bytes();
-
- if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" {
- cleanup_span = Span {
- lo: node_span.hi - syntax_pos::BytePos(1),
- hi: node_span.hi,
- expn_id: node_span.expn_id
- };
- }
- }
- }
-
- NodeIdAndSpan {
- id: node_id,
- span: cleanup_span
- }
-}
-
+use syntax_pos::Pos;
/// Sets the current debug location at the beginning of the span.
///
set_debug_location(fcx.ccx, builder, dbg_loc);
}
-/// This function makes sure that all debug locations emitted while executing
-/// `wrapped_function` are set to the given `debug_loc`.
-pub fn with_source_location_override<F, R>(fcx: &FunctionContext,
- debug_loc: DebugLoc,
- wrapped_function: F) -> R
- where F: FnOnce() -> R
-{
- match fcx.debug_context {
- FunctionDebugContext::DebugInfoDisabled => {
- wrapped_function()
- }
- FunctionDebugContext::FunctionWithoutDebugInfo => {
- set_debug_location(fcx.ccx, None, UnknownLocation);
- wrapped_function()
- }
- FunctionDebugContext::RegularContext(box ref function_debug_context) => {
- if function_debug_context.source_location_override.get() {
- wrapped_function()
- } else {
- debug_loc.apply(fcx);
- function_debug_context.source_location_override.set(true);
- let result = wrapped_function();
- function_debug_context.source_location_override.set(false);
- result
- }
- }
- }
-}
-
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,
// Utility Functions.
-use super::{FunctionDebugContext, CrateDebugContext};
+use super::{CrateDebugContext};
use super::namespace::item_namespace;
use rustc::hir::def_id::DefId;
use llvm;
use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
use machine;
-use common::{CrateContext, FunctionContext};
+use common::{CrateContext};
use type_::Type;
use syntax_pos::{self, Span};
cx.dbg_cx().as_ref().unwrap().builder
}
-pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
- match fcx.debug_context {
- FunctionDebugContext::RegularContext(_) => false,
- _ => true
- }
-}
-
pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId)
-> (DIScope, Span) {
let containing_scope = item_namespace(cx, DefId {
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! # Translation of Expressions
-//!
-//! The expr module handles translation of expressions. The most general
-//! translation routine is `trans()`, which will translate an expression
-//! into a datum. `trans_into()` is also available, which will translate
-//! an expression and write the result directly into memory, sometimes
-//! avoiding the need for a temporary stack slot. Finally,
-//! `trans_to_lvalue()` is available if you'd like to ensure that the
-//! result has cleanup scheduled.
-//!
-//! Internally, each of these functions dispatches to various other
-//! expression functions depending on the kind of expression. We divide
-//! up expressions into:
-//!
-//! - **Datum expressions:** Those that most naturally yield values.
-//! Examples would be `22`, `box x`, or `a + b` (when not overloaded).
-//! - **DPS expressions:** Those that most naturally write into a location
-//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`.
-//! - **Statement expressions:** That that do not generate a meaningful
-//! result. Examples would be `while { ... }` or `return 44`.
-//!
-//! Public entry points:
-//!
-//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression,
-//! storing the result into `dest`. This is the preferred form, if you
-//! can manage it.
-//!
-//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding
-//! `Datum` with the result. You can then store the datum, inspect
-//! the value, etc. This may introduce temporaries if the datum is a
-//! structural type.
-//!
-//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an
-//! expression and ensures that the result has a cleanup associated with it,
-//! creating a temporary stack slot if necessary.
-//!
-//! - `trans_var -> Datum`: looks up a local variable, upvar or static.
-
-#![allow(non_camel_case_types)]
-
-pub use self::Dest::*;
-use self::lazy_binop_ty::*;
-
-use llvm::{self, ValueRef, TypeKind};
-use middle::const_qualif::ConstQualif;
-use rustc::hir::def::Def;
-use rustc::ty::subst::Substs;
-use {_match, abi, adt, asm, base, closure, consts, controlflow};
-use base::*;
-use build::*;
-use callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp};
-use cleanup::{self, CleanupMethods, DropHintMethods};
-use common::*;
-use datum::*;
-use debuginfo::{self, DebugLoc, ToDebugLoc};
-use glue;
-use machine;
-use tvec;
-use type_of;
-use value::Value;
-use Disr;
-use rustc::ty::adjustment::{AdjustNeverToAny, AdjustDerefRef, AdjustReifyFnPointer};
-use rustc::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
-use rustc::ty::adjustment::CustomCoerceUnsized;
-use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::MethodCall;
-use rustc::ty::cast::{CastKind, CastTy};
-use util::common::indenter;
-use machine::{llsize_of, llsize_of_alloc};
-use type_::Type;
-
-use rustc::hir;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-use syntax_pos;
-use std::fmt;
-use std::mem;
-
-// Destinations
-
-// These are passed around by the code generating functions to track the
-// destination of a computation's value.
-
-#[derive(Copy, Clone, PartialEq)]
-pub enum Dest {
- SaveIn(ValueRef),
- Ignore,
-}
-
-impl fmt::Debug for Dest {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)),
- Ignore => f.write_str("Ignore")
- }
- }
-}
-
-/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate
-/// better optimized LLVM code.
-pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let mut bcx = bcx;
-
- expr.debug_loc().apply(bcx.fcx);
-
- if adjustment_required(bcx, expr) {
- // use trans, which may be less efficient but
- // which will perform the adjustments:
- let datum = unpack_datum!(bcx, trans(bcx, expr));
- return datum.store_to_dest(bcx, dest, expr.id);
- }
-
- let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
- if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
- if !qualif.intersects(ConstQualif::PREFER_IN_PLACE) {
- if let SaveIn(lldest) = dest {
- match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
- bcx.fcx.param_substs,
- consts::TrueConst::No) {
- Ok(global) => {
- // Cast pointer to destination, because constants
- // have different types.
- let lldest = PointerCast(bcx, lldest, val_ty(global));
- memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr));
- return bcx;
- },
- Err(consts::ConstEvalFailure::Runtime(_)) => {
- // in case const evaluation errors, translate normally
- // debug assertions catch the same errors
- // see RFC 1229
- },
- Err(consts::ConstEvalFailure::Compiletime(_)) => {
- return bcx;
- },
- }
- }
-
- // If we see a const here, that's because it evaluates to a type with zero size. We
- // should be able to just discard it, since const expressions are guaranteed not to
- // have side effects. This seems to be reached through tuple struct constructors being
- // passed zero-size constants.
- if let hir::ExprPath(..) = expr.node {
- match bcx.tcx().expect_def(expr.id) {
- Def::Const(_) | Def::AssociatedConst(_) => {
- assert!(type_is_zero_size(bcx.ccx(), bcx.tcx().node_id_to_type(expr.id)));
- return bcx;
- }
- _ => {}
- }
- }
-
- // Even if we don't have a value to emit, and the expression
- // doesn't have any side-effects, we still have to translate the
- // body of any closures.
- // FIXME: Find a better way of handling this case.
- } else {
- // The only way we're going to see a `const` at this point is if
- // it prefers in-place instantiation, likely because it contains
- // `[x; N]` somewhere within.
- match expr.node {
- hir::ExprPath(..) => {
- match bcx.tcx().expect_def(expr.id) {
- Def::Const(did) | Def::AssociatedConst(did) => {
- let empty_substs = Substs::empty(bcx.tcx());
- let const_expr = consts::get_const_expr(bcx.ccx(), did, expr,
- empty_substs);
- // Temporarily get cleanup scopes out of the way,
- // as they require sub-expressions to be contained
- // inside the current AST scope.
- // These should record no cleanups anyways, `const`
- // can't have destructors.
- let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
- vec![]);
- // Lock emitted debug locations to the location of
- // the constant reference expression.
- debuginfo::with_source_location_override(bcx.fcx,
- expr.debug_loc(),
- || {
- bcx = trans_into(bcx, const_expr, dest)
- });
- let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(),
- scopes);
- assert!(scopes.is_empty());
- return bcx;
- }
- _ => {}
- }
- }
- _ => {}
- }
- }
- }
-
- debug!("trans_into() expr={:?}", expr);
-
- let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
- expr.id,
- expr.span,
- false);
- bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc);
-
- let kind = expr_kind(bcx.tcx(), expr);
- bcx = match kind {
- ExprKind::Lvalue | ExprKind::RvalueDatum => {
- trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id)
- }
- ExprKind::RvalueDps => {
- trans_rvalue_dps_unadjusted(bcx, expr, dest)
- }
- ExprKind::RvalueStmt => {
- trans_rvalue_stmt_unadjusted(bcx, expr)
- }
- };
-
- bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
-}
-
-/// Translates an expression, returning a datum (and new block) encapsulating the result. When
-/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the
-/// stack.
-pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- debug!("trans(expr={:?})", expr);
-
- let mut bcx = bcx;
- let fcx = bcx.fcx;
- let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap();
- let adjusted_global = !qualif.intersects(ConstQualif::NON_STATIC_BORROWS);
- let global = if !qualif.intersects(ConstQualif::NOT_CONST | ConstQualif::NEEDS_DROP) {
- match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif,
- bcx.fcx.param_substs,
- consts::TrueConst::No) {
- Ok(global) => {
- if qualif.intersects(ConstQualif::HAS_STATIC_BORROWS) {
- // Is borrowed as 'static, must return lvalue.
-
- // Cast pointer to global, because constants have different types.
- let const_ty = expr_ty_adjusted(bcx, expr);
- let llty = type_of::type_of(bcx.ccx(), const_ty);
- let global = PointerCast(bcx, global, llty.ptr_to());
- let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans"));
- return DatumBlock::new(bcx, datum.to_expr_datum());
- }
-
- // Otherwise, keep around and perform adjustments, if needed.
- let const_ty = if adjusted_global {
- expr_ty_adjusted(bcx, expr)
- } else {
- expr_ty(bcx, expr)
- };
-
- // This could use a better heuristic.
- Some(if type_is_immediate(bcx.ccx(), const_ty) {
- // Cast pointer to global, because constants have different types.
- let llty = type_of::type_of(bcx.ccx(), const_ty);
- let global = PointerCast(bcx, global, llty.ptr_to());
- // Maybe just get the value directly, instead of loading it?
- immediate_rvalue(load_ty(bcx, global, const_ty), const_ty)
- } else {
- let scratch = alloc_ty(bcx, const_ty, "const");
- call_lifetime_start(bcx, scratch);
- let lldest = if !const_ty.is_structural() {
- // Cast pointer to slot, because constants have different types.
- PointerCast(bcx, scratch, val_ty(global))
- } else {
- // In this case, memcpy_ty calls llvm.memcpy after casting both
- // source and destination to i8*, so we don't need any casts.
- scratch
- };
- memcpy_ty(bcx, lldest, global, const_ty);
- Datum::new(scratch, const_ty, Rvalue::new(ByRef))
- })
- },
- Err(consts::ConstEvalFailure::Runtime(_)) => {
- // in case const evaluation errors, translate normally
- // debug assertions catch the same errors
- // see RFC 1229
- None
- },
- Err(consts::ConstEvalFailure::Compiletime(_)) => {
- // generate a dummy llvm value
- let const_ty = expr_ty(bcx, expr);
- let llty = type_of::type_of(bcx.ccx(), const_ty);
- let dummy = C_undef(llty.ptr_to());
- Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef)))
- },
- }
- } else {
- None
- };
-
- let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
- expr.id,
- expr.span,
- false);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
- let datum = match global {
- Some(rvalue) => rvalue.to_expr_datum(),
- None => unpack_datum!(bcx, trans_unadjusted(bcx, expr))
- };
- let datum = if adjusted_global {
- datum // trans::consts already performed adjustments.
- } else {
- unpack_datum!(bcx, apply_adjustments(bcx, expr, datum))
- };
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id);
- return DatumBlock::new(bcx, datum);
-}
-
-pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
- StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
-}
-
-pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
- StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
-}
-
-pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) {
- Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr));
- Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr));
-}
-
-fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr) -> bool {
- let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
- None => { return false; }
- Some(adj) => adj
- };
-
- // Don't skip a conversion from Box<T> to &T, etc.
- if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
- return true;
- }
-
- match adjustment {
- AdjustNeverToAny(..) => true,
- AdjustReifyFnPointer => true,
- AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
- // purely a type-level thing
- false
- }
- AdjustDerefRef(ref adj) => {
- // We are a bit paranoid about adjustments and thus might have a re-
- // borrow here which merely derefs and then refs again (it might have
- // a different region or mutability, but we don't care here).
- !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none())
- }
- }
-}
-
-/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted
-/// translation of `expr`.
-fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- datum: Datum<'tcx, Expr>)
- -> DatumBlock<'blk, 'tcx, Expr>
-{
- let mut bcx = bcx;
- let mut datum = datum;
- let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() {
- None => {
- return DatumBlock::new(bcx, datum);
- }
- Some(adj) => { adj }
- };
- debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}",
- expr, datum, adjustment);
- match adjustment {
- AdjustNeverToAny(ref target) => {
- let mono_target = bcx.monomorphize(target);
- let llty = type_of::type_of(bcx.ccx(), mono_target);
- let dummy = C_undef(llty.ptr_to());
- datum = Datum::new(dummy, mono_target, Lvalue::new("never")).to_expr_datum();
- }
- AdjustReifyFnPointer => {
- match datum.ty.sty {
- ty::TyFnDef(def_id, substs, _) => {
- datum = Callee::def(bcx.ccx(), def_id, substs)
- .reify(bcx.ccx()).to_expr_datum();
- }
- _ => {
- bug!("{} cannot be reified to a fn ptr", datum.ty)
- }
- }
- }
- AdjustUnsafeFnPointer | AdjustMutToConstPointer => {
- // purely a type-level thing
- }
- AdjustDerefRef(ref adj) => {
- let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() {
- // We are a bit paranoid about adjustments and thus might have a re-
- // borrow here which merely derefs and then refs again (it might have
- // a different region or mutability, but we don't care here).
- match datum.ty.sty {
- // Don't skip a conversion from Box<T> to &T, etc.
- ty::TyRef(..) => {
- if bcx.tcx().is_overloaded_autoderef(expr.id, 0) {
- // Don't skip an overloaded deref.
- 0
- } else {
- 1
- }
- }
- _ => 0
- }
- } else {
- 0
- };
-
- if adj.autoderefs > skip_reborrows {
- // Schedule cleanup.
- let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id));
- datum = unpack_datum!(bcx, deref_multiple(bcx, expr,
- lval.to_expr_datum(),
- adj.autoderefs - skip_reborrows));
- }
-
- // (You might think there is a more elegant way to do this than a
- // skip_reborrows bool, but then you remember that the borrow checker exists).
- if skip_reborrows == 0 && adj.autoref.is_some() {
- datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr));
- }
-
- if let Some(target) = adj.unsize {
- // We do not arrange cleanup ourselves; if we already are an
- // L-value, then cleanup will have already been scheduled (and
- // the `datum.to_rvalue_datum` call below will emit code to zero
- // the drop flag when moving out of the L-value). If we are an
- // R-value, then we do not need to schedule cleanup.
- let source_datum = unpack_datum!(bcx,
- datum.to_rvalue_datum(bcx, "__coerce_source"));
-
- let target = bcx.monomorphize(&target);
-
- let scratch = alloc_ty(bcx, target, "__coerce_target");
- call_lifetime_start(bcx, scratch);
- let target_datum = Datum::new(scratch, target,
- Rvalue::new(ByRef));
- bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum);
- datum = Datum::new(scratch, target,
- RvalueExpr(Rvalue::new(ByRef)));
- }
- }
- }
- debug!("after adjustments, datum={:?}", datum);
- DatumBlock::new(bcx, datum)
-}
-
-fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- span: syntax_pos::Span,
- source: Datum<'tcx, Rvalue>,
- target: Datum<'tcx, Rvalue>)
- -> Block<'blk, 'tcx> {
- let mut bcx = bcx;
- debug!("coerce_unsized({:?} -> {:?})", source, target);
-
- match (&source.ty.sty, &target.ty.sty) {
- (&ty::TyBox(a), &ty::TyBox(b)) |
- (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
- &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
- (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
- &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
- let (inner_source, inner_target) = (a, b);
-
- let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) {
- // Normally, the source is a thin pointer and we are
- // adding extra info to make a fat pointer. The exception
- // is when we are upcasting an existing object fat pointer
- // to use a different vtable. In that case, we want to
- // load out the original data pointer so we can repackage
- // it.
- (Load(bcx, get_dataptr(bcx, source.val)),
- Some(Load(bcx, get_meta(bcx, source.val))))
- } else {
- let val = if source.kind.is_by_ref() {
- load_ty(bcx, source.val, source.ty)
- } else {
- source.val
- };
- (val, None)
- };
-
- let info = unsized_info(bcx.ccx(), inner_source, inner_target, old_info);
-
- // Compute the base pointer. This doesn't change the pointer value,
- // but merely its type.
- let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to();
- let base = PointerCast(bcx, base, ptr_ty);
-
- Store(bcx, base, get_dataptr(bcx, target.val));
- Store(bcx, info, get_meta(bcx, target.val));
- }
-
- // This can be extended to enums and tuples in the future.
- // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
- (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => {
- assert_eq!(def_id_a, def_id_b);
-
- // The target is already by-ref because it's to be written to.
- let source = unpack_datum!(bcx, source.to_ref_datum(bcx));
- assert!(target.kind.is_by_ref());
-
- let kind = custom_coerce_unsize_info(bcx.ccx().shared(),
- source.ty,
- target.ty);
-
- let repr_source = adt::represent_type(bcx.ccx(), source.ty);
- let src_fields = match &*repr_source {
- &adt::Repr::Univariant(ref s, _) => &s.fields,
- _ => span_bug!(span,
- "Non univariant struct? (repr_source: {:?})",
- repr_source),
- };
- let repr_target = adt::represent_type(bcx.ccx(), target.ty);
- let target_fields = match &*repr_target {
- &adt::Repr::Univariant(ref s, _) => &s.fields,
- _ => span_bug!(span,
- "Non univariant struct? (repr_target: {:?})",
- repr_target),
- };
-
- let coerce_index = match kind {
- CustomCoerceUnsized::Struct(i) => i
- };
- assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
-
- let source_val = adt::MaybeSizedValue::sized(source.val);
- let target_val = adt::MaybeSizedValue::sized(target.val);
-
- let iter = src_fields.iter().zip(target_fields).enumerate();
- for (i, (src_ty, target_ty)) in iter {
- let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i);
- let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i);
-
- // If this is the field we need to coerce, recurse on it.
- if i == coerce_index {
- coerce_unsized(bcx, span,
- Datum::new(ll_source, src_ty,
- Rvalue::new(ByRef)),
- Datum::new(ll_target, target_ty,
- Rvalue::new(ByRef)));
- } else {
- // Otherwise, simply copy the data from the source.
- assert!(src_ty.is_phantom_data() || src_ty == target_ty);
- memcpy_ty(bcx, ll_target, ll_source, src_ty);
- }
- }
- }
- _ => bug!("coerce_unsized: invalid coercion {:?} -> {:?}",
- source.ty,
- target.ty)
- }
- bcx
-}
-
-/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory
-/// that the expr represents.
-///
-/// If this expression is an rvalue, this implies introducing a temporary. In other words,
-/// something like `x().f` is translated into roughly the equivalent of
-///
-/// { tmp = x(); tmp.f }
-pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- name: &str)
- -> DatumBlock<'blk, 'tcx, Lvalue> {
- let mut bcx = bcx;
- let datum = unpack_datum!(bcx, trans(bcx, expr));
- return datum.to_lvalue_datum(bcx, name, expr.id);
-}
-
-/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this
-/// directly.
-fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
-
- debug!("trans_unadjusted(expr={:?})", expr);
- let _indenter = indenter();
-
- expr.debug_loc().apply(bcx.fcx);
-
- return match expr_kind(bcx.tcx(), expr) {
- ExprKind::Lvalue | ExprKind::RvalueDatum => {
- let datum = unpack_datum!(bcx, {
- trans_datum_unadjusted(bcx, expr)
- });
-
- DatumBlock {bcx: bcx, datum: datum}
- }
-
- ExprKind::RvalueStmt => {
- bcx = trans_rvalue_stmt_unadjusted(bcx, expr);
- nil(bcx, expr_ty(bcx, expr))
- }
-
- ExprKind::RvalueDps => {
- let ty = expr_ty(bcx, expr);
- if type_is_zero_size(bcx.ccx(), ty) {
- bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore);
- nil(bcx, ty)
- } else {
- let scratch = rvalue_scratch_datum(bcx, ty, "");
- bcx = trans_rvalue_dps_unadjusted(
- bcx, expr, SaveIn(scratch.val));
-
- // Note: this is not obviously a good idea. It causes
- // immediate values to be loaded immediately after a
- // return from a call or other similar expression,
- // which in turn leads to alloca's having shorter
- // lifetimes and hence larger stack frames. However,
- // in turn it can lead to more register pressure.
- // Still, in practice it seems to increase
- // performance, since we have fewer problems with
- // morestack churn.
- let scratch = unpack_datum!(
- bcx, scratch.to_appropriate_datum(bcx));
-
- DatumBlock::new(bcx, scratch.to_expr_datum())
- }
- }
- };
-
- fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
- let datum = immediate_rvalue(llval, ty);
- DatumBlock::new(bcx, datum.to_expr_datum())
- }
-}
-
-fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
- let fcx = bcx.fcx;
- let _icx = push_ctxt("trans_datum_unadjusted");
-
- match expr.node {
- hir::ExprType(ref e, _) => {
- trans(bcx, &e)
- }
- hir::ExprPath(..) => {
- let var = trans_var(bcx, bcx.tcx().expect_def(expr.id));
- DatumBlock::new(bcx, var.to_expr_datum())
- }
- hir::ExprField(ref base, name) => {
- trans_rec_field(bcx, &base, name.node)
- }
- hir::ExprTupField(ref base, idx) => {
- trans_rec_tup_field(bcx, &base, idx.node)
- }
- hir::ExprIndex(ref base, ref idx) => {
- trans_index(bcx, expr, &base, &idx, MethodCall::expr(expr.id))
- }
- hir::ExprBox(ref contents) => {
- // Special case for `Box<T>`
- let box_ty = expr_ty(bcx, expr);
- let contents_ty = expr_ty(bcx, &contents);
- match box_ty.sty {
- ty::TyBox(..) => {
- trans_uniq_expr(bcx, expr, box_ty, &contents, contents_ty)
- }
- _ => span_bug!(expr.span,
- "expected unique box")
- }
-
- }
- hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &lit),
- hir::ExprBinary(op, ref lhs, ref rhs) => {
- trans_binary(bcx, expr, op, &lhs, &rhs)
- }
- hir::ExprUnary(op, ref x) => {
- trans_unary(bcx, expr, op, &x)
- }
- hir::ExprAddrOf(_, ref x) => {
- match x.node {
- hir::ExprRepeat(..) | hir::ExprVec(..) => {
- // Special case for slices.
- let cleanup_debug_loc =
- debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(),
- x.id,
- x.span,
- false);
- fcx.push_ast_cleanup_scope(cleanup_debug_loc);
- let datum = unpack_datum!(
- bcx, tvec::trans_slice_vec(bcx, expr, &x));
- bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id);
- DatumBlock::new(bcx, datum)
- }
- _ => {
- trans_addr_of(bcx, expr, &x)
- }
- }
- }
- hir::ExprCast(ref val, _) => {
- // Datum output mode means this is a scalar cast:
- trans_imm_cast(bcx, &val, expr.id)
- }
- _ => {
- span_bug!(
- expr.span,
- "trans_rvalue_datum_unadjusted reached \
- fall-through case: {:?}",
- expr.node);
- }
- }
-}
-
-fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- base: &hir::Expr,
- get_idx: F)
- -> DatumBlock<'blk, 'tcx, Expr> where
- F: FnOnce(TyCtxt<'blk, 'tcx, 'tcx>, &VariantInfo<'tcx>) -> usize,
-{
- let mut bcx = bcx;
- let _icx = push_ctxt("trans_rec_field");
-
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field"));
- let bare_ty = base_datum.ty;
- let repr = adt::represent_type(bcx.ccx(), bare_ty);
- let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None);
-
- let ix = get_idx(bcx.tcx(), &vinfo);
- let d = base_datum.get_element(
- bcx,
- vinfo.fields[ix].1,
- |srcval| {
- adt::trans_field_ptr(bcx, &repr, srcval, vinfo.discr, ix)
- });
-
- if type_is_sized(bcx.tcx(), d.ty) {
- DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
- } else {
- let scratch = rvalue_scratch_datum(bcx, d.ty, "");
- Store(bcx, d.val, get_dataptr(bcx, scratch.val));
- let info = Load(bcx, get_meta(bcx, base_datum.val));
- Store(bcx, info, get_meta(bcx, scratch.val));
-
- // Always generate an lvalue datum, because this pointer doesn't own
- // the data and cleanup is scheduled elsewhere.
- DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind)))
- }
-}
-
-/// Translates `base.field`.
-fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- base: &hir::Expr,
- field: ast::Name)
- -> DatumBlock<'blk, 'tcx, Expr> {
- trans_field(bcx, base, |_, vinfo| vinfo.field_index(field))
-}
-
-/// Translates `base.<idx>`.
-fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- base: &hir::Expr,
- idx: usize)
- -> DatumBlock<'blk, 'tcx, Expr> {
- trans_field(bcx, base, |_, _| idx)
-}
-
-fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- index_expr: &hir::Expr,
- base: &hir::Expr,
- idx: &hir::Expr,
- method_call: MethodCall)
- -> DatumBlock<'blk, 'tcx, Expr> {
- //! Translates `base[idx]`.
-
- let _icx = push_ctxt("trans_index");
- let ccx = bcx.ccx();
- let mut bcx = bcx;
-
- let index_expr_debug_loc = index_expr.debug_loc();
-
- // Check for overloaded index.
- let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
- let elt_datum = match method {
- Some(method) => {
- let method_ty = monomorphize_type(bcx, method.ty);
-
- let base_datum = unpack_datum!(bcx, trans(bcx, base));
-
- // Translate index expression.
- let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
-
- let ref_ty = // invoked methods have LB regions instantiated:
- bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap();
- let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) {
- None => {
- span_bug!(index_expr.span,
- "index method didn't return a \
- dereferenceable type?!")
- }
- Some(elt_tm) => elt_tm.ty,
- };
-
- // Overloaded. Invoke the index() method, which basically
- // yields a `&T` pointer. We can then proceed down the
- // normal path (below) to dereference that `&T`.
- let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt");
-
- bcx = Callee::method(bcx, method)
- .call(bcx, index_expr_debug_loc,
- ArgOverloadedOp(base_datum, Some(ix_datum)),
- Some(SaveIn(scratch.val))).bcx;
-
- let datum = scratch.to_expr_datum();
- let lval = Lvalue::new("expr::trans_index overload");
- if type_is_sized(bcx.tcx(), elt_ty) {
- Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval))
- } else {
- Datum::new(datum.val, elt_ty, LvalueExpr(lval))
- }
- }
- None => {
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx,
- base,
- "index"));
-
- // Translate index expression and cast to a suitable LLVM integer.
- // Rust is less strict than LLVM in this regard.
- let ix_datum = unpack_datum!(bcx, trans(bcx, idx));
- let ix_val = ix_datum.to_llscalarish(bcx);
- let ix_size = machine::llbitsize_of_real(bcx.ccx(),
- val_ty(ix_val));
- let int_size = machine::llbitsize_of_real(bcx.ccx(),
- ccx.int_type());
- let ix_val = {
- if ix_size < int_size {
- if expr_ty(bcx, idx).is_signed() {
- SExt(bcx, ix_val, ccx.int_type())
- } else { ZExt(bcx, ix_val, ccx.int_type()) }
- } else if ix_size > int_size {
- Trunc(bcx, ix_val, ccx.int_type())
- } else {
- ix_val
- }
- };
-
- let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx());
-
- let (base, len) = base_datum.get_vec_base_and_len(bcx);
-
- debug!("trans_index: base {:?}", Value(base));
- debug!("trans_index: len {:?}", Value(len));
-
- let bounds_check = ICmp(bcx,
- llvm::IntUGE,
- ix_val,
- len,
- index_expr_debug_loc);
- let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
- let expected = Call(bcx,
- expect,
- &[bounds_check, C_bool(ccx, false)],
- index_expr_debug_loc);
- bcx = with_cond(bcx, expected, |bcx| {
- controlflow::trans_fail_bounds_check(bcx,
- expr_info(index_expr),
- ix_val,
- len)
- });
- let elt = InBoundsGEP(bcx, base, &[ix_val]);
- let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to());
- let lval = Lvalue::new("expr::trans_index fallback");
- Datum::new(elt, unit_ty, LvalueExpr(lval))
- }
- };
-
- DatumBlock::new(bcx, elt_datum)
-}
-
-/// Translates a reference to a variable.
-pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def)
- -> Datum<'tcx, Lvalue> {
-
- match def {
- Def::Static(did, _) => consts::get_static(bcx.ccx(), did),
- Def::Upvar(_, nid, _, _) => {
- // Can't move upvars, so this is never a ZeroMemLastUse.
- let local_ty = node_id_type(bcx, nid);
- let lval = Lvalue::new_with_hint("expr::trans_var (upvar)",
- bcx, nid, HintKind::ZeroAndMaintain);
- match bcx.fcx.llupvars.borrow().get(&nid) {
- Some(&val) => Datum::new(val, local_ty, lval),
- None => {
- bug!("trans_var: no llval for upvar {} found", nid);
- }
- }
- }
- Def::Local(_, nid) => {
- let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
- Some(&v) => v,
- None => {
- bug!("trans_var: no datum for local/arg {} found", nid);
- }
- };
- debug!("take_local(nid={}, v={:?}, ty={})",
- nid, Value(datum.val), datum.ty);
- datum
- }
- _ => bug!("{:?} should not reach expr::trans_var", def)
- }
-}
-
-fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr)
- -> Block<'blk, 'tcx> {
- let mut bcx = bcx;
- let _icx = push_ctxt("trans_rvalue_stmt");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- expr.debug_loc().apply(bcx.fcx);
-
- match expr.node {
- hir::ExprBreak(label_opt) => {
- controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node))
- }
- hir::ExprType(ref e, _) => {
- trans_into(bcx, &e, Ignore)
- }
- hir::ExprAgain(label_opt) => {
- controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node))
- }
- hir::ExprRet(ref ex) => {
- // Check to see if the return expression itself is reachable.
- // This can occur when the inner expression contains a return
- let reachable = if let Some(ref cfg) = bcx.fcx.cfg {
- cfg.node_is_reachable(expr.id)
- } else {
- true
- };
-
- if reachable {
- controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e))
- } else {
- // If it's not reachable, just translate the inner expression
- // directly. This avoids having to manage a return slot when
- // it won't actually be used anyway.
- if let &Some(ref x) = ex {
- bcx = trans_into(bcx, &x, Ignore);
- }
- // Mark the end of the block as unreachable. Once we get to
- // a return expression, there's no more we should be doing
- // after this.
- Unreachable(bcx);
- bcx
- }
- }
- hir::ExprWhile(ref cond, ref body, _) => {
- controlflow::trans_while(bcx, expr, &cond, &body)
- }
- hir::ExprLoop(ref body, _) => {
- controlflow::trans_loop(bcx, expr, &body)
- }
- hir::ExprAssign(ref dst, ref src) => {
- let src_datum = unpack_datum!(bcx, trans(bcx, &src));
- let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &dst, "assign"));
-
- if bcx.fcx.type_needs_drop(dst_datum.ty) {
- // If there are destructors involved, make sure we
- // are copying from an rvalue, since that cannot possible
- // alias an lvalue. We are concerned about code like:
- //
- // a = a
- //
- // but also
- //
- // a = a.b
- //
- // where e.g. a : Option<Foo> and a.b :
- // Option<Foo>. In that case, freeing `a` before the
- // assignment may also free `a.b`!
- //
- // We could avoid this intermediary with some analysis
- // to determine whether `dst` may possibly own `src`.
- expr.debug_loc().apply(bcx.fcx);
- let src_datum = unpack_datum!(
- bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign"));
- let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx);
- let opt_hint_val = opt_hint_datum.map(|d|d.to_value());
-
- // 1. Drop the data at the destination, passing the
- // drop-hint in case the lvalue has already been
- // dropped or moved.
- bcx = glue::drop_ty_core(bcx,
- dst_datum.val,
- dst_datum.ty,
- expr.debug_loc(),
- false,
- opt_hint_val);
-
- // 2. We are overwriting the destination; ensure that
- // its drop-hint (if any) says "initialized."
- if let Some(hint_val) = opt_hint_val {
- let hint_llval = hint_val.value();
- let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT);
- Store(bcx, drop_needed, hint_llval);
- }
- src_datum.store_to(bcx, dst_datum.val)
- } else {
- src_datum.store_to(bcx, dst_datum.val)
- }
- }
- hir::ExprAssignOp(op, ref dst, ref src) => {
- let method = bcx.tcx().tables
- .borrow()
- .method_map
- .get(&MethodCall::expr(expr.id)).cloned();
-
- if let Some(method) = method {
- let dst = unpack_datum!(bcx, trans(bcx, &dst));
- let src_datum = unpack_datum!(bcx, trans(bcx, &src));
-
- Callee::method(bcx, method)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(dst, Some(src_datum)), None).bcx
- } else {
- trans_assign_op(bcx, expr, op, &dst, &src)
- }
- }
- hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
- let outputs = outputs.iter().map(|output| {
- let out_datum = unpack_datum!(bcx, trans(bcx, output));
- unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id))
- }).collect();
- let inputs = inputs.iter().map(|input| {
- let input = unpack_datum!(bcx, trans(bcx, input));
- let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in"));
- input.to_llscalarish(bcx)
- }).collect();
- asm::trans_inline_asm(bcx, a, outputs, inputs);
- bcx
- }
- _ => {
- span_bug!(
- expr.span,
- "trans_rvalue_stmt_unadjusted reached \
- fall-through case: {:?}",
- expr.node);
- }
- }
-}
-
-fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
- let mut bcx = bcx;
-
- expr.debug_loc().apply(bcx.fcx);
-
- // Entry into the method table if this is an overloaded call/op.
- let method_call = MethodCall::expr(expr.id);
-
- match expr.node {
- hir::ExprType(ref e, _) => {
- trans_into(bcx, &e, dest)
- }
- hir::ExprPath(..) => {
- trans_def_dps_unadjusted(bcx, expr, bcx.tcx().expect_def(expr.id), dest)
- }
- hir::ExprIf(ref cond, ref thn, ref els) => {
- controlflow::trans_if(bcx, expr.id, &cond, &thn, els.as_ref().map(|e| &**e), dest)
- }
- hir::ExprMatch(ref discr, ref arms, _) => {
- _match::trans_match(bcx, expr, &discr, &arms[..], dest)
- }
- hir::ExprBlock(ref blk) => {
- controlflow::trans_block(bcx, &blk, dest)
- }
- hir::ExprStruct(_, ref fields, ref base) => {
- trans_struct(bcx,
- &fields[..],
- base.as_ref().map(|e| &**e),
- expr.span,
- expr.id,
- node_id_type(bcx, expr.id),
- dest)
- }
- hir::ExprTup(ref args) => {
- let numbered_fields: Vec<(usize, &hir::Expr)> =
- args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect();
- trans_adt(bcx,
- expr_ty(bcx, expr),
- Disr(0),
- &numbered_fields[..],
- None,
- dest,
- expr.debug_loc())
- }
- hir::ExprLit(ref lit) => {
- match lit.node {
- ast::LitKind::Str(ref s, _) => {
- tvec::trans_lit_str(bcx, expr, (*s).clone(), dest)
- }
- _ => {
- span_bug!(expr.span,
- "trans_rvalue_dps_unadjusted shouldn't be \
- translating this type of literal")
- }
- }
- }
- hir::ExprVec(..) | hir::ExprRepeat(..) => {
- tvec::trans_fixed_vstore(bcx, expr, dest)
- }
- hir::ExprClosure(_, ref decl, ref body, _) => {
- let dest = match dest {
- SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest),
- Ignore => closure::Dest::Ignore(bcx.ccx())
- };
-
- // NB. To get the id of the closure, we don't use
- // `local_def_id(id)`, but rather we extract the closure
- // def-id from the expr's type. This is because this may
- // be an inlined expression from another crate, and we
- // want to get the ORIGINAL closure def-id, since that is
- // the key we need to find the closure-kind and
- // closure-type etc.
- let (def_id, substs) = match expr_ty(bcx, expr).sty {
- ty::TyClosure(def_id, substs) => (def_id, substs),
- ref t =>
- span_bug!(
- expr.span,
- "closure expr without closure type: {:?}", t),
- };
-
- closure::trans_closure_expr(dest,
- decl,
- body,
- expr.id,
- def_id,
- substs).unwrap_or(bcx)
- }
- hir::ExprCall(ref f, ref args) => {
- let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned();
- let (callee, args) = if let Some(method) = method {
- let mut all_args = vec![&**f];
- all_args.extend(args.iter().map(|e| &**e));
-
- (Callee::method(bcx, method), ArgOverloadedCall(all_args))
- } else {
- let f = unpack_datum!(bcx, trans(bcx, f));
- (match f.ty.sty {
- ty::TyFnDef(def_id, substs, _) => {
- Callee::def(bcx.ccx(), def_id, substs)
- }
- ty::TyFnPtr(_) => {
- let f = unpack_datum!(bcx,
- f.to_rvalue_datum(bcx, "callee"));
- Callee::ptr(f)
- }
- _ => {
- span_bug!(expr.span,
- "type of callee is not a fn: {}", f.ty);
- }
- }, ArgExprs(&args))
- };
- callee.call(bcx, expr.debug_loc(), args, Some(dest)).bcx
- }
- hir::ExprMethodCall(_, _, ref args) => {
- Callee::method_call(bcx, method_call)
- .call(bcx, expr.debug_loc(), ArgExprs(&args), Some(dest)).bcx
- }
- hir::ExprBinary(op, ref lhs, ref rhs_expr) => {
- // if not overloaded, would be RvalueDatumExpr
- let lhs = unpack_datum!(bcx, trans(bcx, &lhs));
- let mut rhs = unpack_datum!(bcx, trans(bcx, &rhs_expr));
- if !op.node.is_by_value() {
- rhs = unpack_datum!(bcx, auto_ref(bcx, rhs, rhs_expr));
- }
-
- Callee::method_call(bcx, method_call)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(lhs, Some(rhs)), Some(dest)).bcx
- }
- hir::ExprUnary(_, ref subexpr) => {
- // if not overloaded, would be RvalueDatumExpr
- let arg = unpack_datum!(bcx, trans(bcx, &subexpr));
-
- Callee::method_call(bcx, method_call)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(arg, None), Some(dest)).bcx
- }
- hir::ExprCast(..) => {
- // Trait casts used to come this way, now they should be coercions.
- span_bug!(expr.span, "DPS expr_cast (residual trait cast?)")
- }
- hir::ExprAssignOp(op, _, _) => {
- span_bug!(
- expr.span,
- "augmented assignment `{}=` should always be a rvalue_stmt",
- op.node.as_str())
- }
- _ => {
- span_bug!(
- expr.span,
- "trans_rvalue_dps_unadjusted reached fall-through \
- case: {:?}",
- expr.node);
- }
- }
-}
-
-fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- ref_expr: &hir::Expr,
- def: Def,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_def_dps_unadjusted");
-
- let lldest = match dest {
- SaveIn(lldest) => lldest,
- Ignore => { return bcx; }
- };
-
- let ty = expr_ty(bcx, ref_expr);
- if let ty::TyFnDef(..) = ty.sty {
- // Zero-sized function or ctor.
- return bcx;
- }
-
- match def {
- Def::Variant(tid, vid) => {
- let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid);
- // Nullary variant.
- let ty = expr_ty(bcx, ref_expr);
- let repr = adt::represent_type(bcx.ccx(), ty);
- adt::trans_set_discr(bcx, &repr, lldest, Disr::from(variant.disr_val));
- bcx
- }
- Def::Struct(..) => {
- match ty.sty {
- ty::TyStruct(def, _) if def.has_dtor() => {
- let repr = adt::represent_type(bcx.ccx(), ty);
- adt::trans_set_discr(bcx, &repr, lldest, Disr(0));
- }
- _ => {}
- }
- bcx
- }
- _ => {
- span_bug!(ref_expr.span,
- "Non-DPS def {:?} referened by {}",
- def, bcx.node_id_to_string(ref_expr.id));
- }
- }
-}
-
-fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- fields: &[hir::Field],
- base: Option<&hir::Expr>,
- expr_span: syntax_pos::Span,
- expr_id: ast::NodeId,
- ty: Ty<'tcx>,
- dest: Dest) -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_rec");
-
- let tcx = bcx.tcx();
- let vinfo = VariantInfo::of_node(tcx, ty, expr_id);
-
- let mut need_base = vec![true; vinfo.fields.len()];
-
- let numbered_fields = fields.iter().map(|field| {
- let pos = vinfo.field_index(field.name.node);
- need_base[pos] = false;
- (pos, &*field.expr)
- }).collect::<Vec<_>>();
-
- let optbase = match base {
- Some(base_expr) => {
- let mut leftovers = Vec::new();
- for (i, b) in need_base.iter().enumerate() {
- if *b {
- leftovers.push((i, vinfo.fields[i].1));
- }
- }
- Some(StructBaseInfo {expr: base_expr,
- fields: leftovers })
- }
- None => {
- if need_base.iter().any(|b| *b) {
- span_bug!(expr_span, "missing fields and no base expr")
- }
- None
- }
- };
-
- trans_adt(bcx,
- ty,
- vinfo.discr,
- &numbered_fields,
- optbase,
- dest,
- DebugLoc::At(expr_id, expr_span))
-}
-
-/// Information that `trans_adt` needs in order to fill in the fields
-/// of a struct copied from a base struct (e.g., from an expression
-/// like `Foo { a: b, ..base }`.
-///
-/// Note that `fields` may be empty; the base expression must always be
-/// evaluated for side-effects.
-pub struct StructBaseInfo<'a, 'tcx> {
- /// The base expression; will be evaluated after all explicit fields.
- expr: &'a hir::Expr,
- /// The indices of fields to copy paired with their types.
- fields: Vec<(usize, Ty<'tcx>)>
-}
-
-/// Constructs an ADT instance:
-///
-/// - `fields` should be a list of field indices paired with the
-/// expression to store into that field. The initializers will be
-/// evaluated in the order specified by `fields`.
-///
-/// - `optbase` contains information on the base struct (if any) from
-/// which remaining fields are copied; see comments on `StructBaseInfo`.
-pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- ty: Ty<'tcx>,
- discr: Disr,
- fields: &[(usize, &hir::Expr)],
- optbase: Option<StructBaseInfo<'a, 'tcx>>,
- dest: Dest,
- debug_location: DebugLoc)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_adt");
- let fcx = bcx.fcx;
- let repr = adt::represent_type(bcx.ccx(), ty);
-
- debug_location.apply(bcx.fcx);
-
- // If we don't care about the result, just make a
- // temporary stack slot
- let addr = match dest {
- SaveIn(pos) => pos,
- Ignore => {
- let llresult = alloc_ty(bcx, ty, "temp");
- call_lifetime_start(bcx, llresult);
- llresult
- }
- };
-
- debug!("trans_adt");
-
- // This scope holds intermediates that must be cleaned should
- // panic occur before the ADT as a whole is ready.
- let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
-
- if ty.is_simd() {
- // Issue 23112: The original logic appeared vulnerable to same
- // order-of-eval bug. But, SIMD values are tuple-structs;
- // i.e. functional record update (FRU) syntax is unavailable.
- //
- // To be safe, double-check that we did not get here via FRU.
- assert!(optbase.is_none());
-
- // This is the constructor of a SIMD type, such types are
- // always primitive machine types and so do not have a
- // destructor or require any clean-up.
- let llty = type_of::type_of(bcx.ccx(), ty);
-
- // keep a vector as a register, and running through the field
- // `insertelement`ing them directly into that register
- // (i.e. avoid GEPi and `store`s to an alloca) .
- let mut vec_val = C_undef(llty);
-
- for &(i, ref e) in fields {
- let block_datum = trans(bcx, &e);
- bcx = block_datum.bcx;
- let position = C_uint(bcx.ccx(), i);
- let value = block_datum.datum.to_llscalarish(bcx);
- vec_val = InsertElement(bcx, vec_val, value, position);
- }
- Store(bcx, vec_val, addr);
- } else if let Some(base) = optbase {
- // Issue 23112: If there is a base, then order-of-eval
- // requires field expressions eval'ed before base expression.
-
- // First, trans field expressions to temporary scratch values.
- let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| {
- let datum = unpack_datum!(bcx, trans(bcx, &e));
- (i, datum)
- }).collect();
-
- debug_location.apply(bcx.fcx);
-
- // Second, trans the base to the dest.
- assert_eq!(discr, Disr(0));
-
- let addr = adt::MaybeSizedValue::sized(addr);
- match expr_kind(bcx.tcx(), &base.expr) {
- ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
- bcx = trans_into(bcx, &base.expr, SaveIn(addr.value));
- },
- ExprKind::RvalueStmt => {
- bug!("unexpected expr kind for struct base expr")
- }
- _ => {
- let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &base.expr, "base"));
- for &(i, t) in &base.fields {
- let datum = base_datum.get_element(
- bcx, t, |srcval| adt::trans_field_ptr(bcx, &repr, srcval, discr, i));
- assert!(type_is_sized(bcx.tcx(), datum.ty));
- let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
- bcx = datum.store_to(bcx, dest);
- }
- }
- }
-
- // Finally, move scratch field values into actual field locations
- for (i, datum) in scratch_vals {
- let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
- bcx = datum.store_to(bcx, dest);
- }
- } else {
- // No base means we can write all fields directly in place.
- let addr = adt::MaybeSizedValue::sized(addr);
- for &(i, ref e) in fields {
- let dest = adt::trans_field_ptr(bcx, &repr, addr, discr, i);
- let e_ty = expr_ty_adjusted(bcx, &e);
- bcx = trans_into(bcx, &e, SaveIn(dest));
- let scope = cleanup::CustomScope(custom_cleanup_scope);
- fcx.schedule_lifetime_end(scope, dest);
- // FIXME: nonzeroing move should generalize to fields
- fcx.schedule_drop_mem(scope, dest, e_ty, None);
- }
- }
-
- adt::trans_set_discr(bcx, &repr, addr, discr);
-
- fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
-
- // If we don't care about the result drop the temporary we made
- match dest {
- SaveIn(_) => bcx,
- Ignore => {
- bcx = glue::drop_ty(bcx, addr, ty, debug_location);
- base::call_lifetime_end(bcx, addr);
- bcx
- }
- }
-}
-
-
-fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- lit: &ast::Lit)
- -> DatumBlock<'blk, 'tcx, Expr> {
- // must not be a string constant, that is a RvalueDpsExpr
- let _icx = push_ctxt("trans_immediate_lit");
- let ty = expr_ty(bcx, expr);
- let v = consts::const_lit(bcx.ccx(), expr, lit);
- immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
-}
-
-fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- op: hir::UnOp,
- sub_expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let ccx = bcx.ccx();
- let mut bcx = bcx;
- let _icx = push_ctxt("trans_unary_datum");
-
- let method_call = MethodCall::expr(expr.id);
-
- // The only overloaded operator that is translated to a datum
- // is an overloaded deref, since it is always yields a `&T`.
- // Otherwise, we should be in the RvalueDpsExpr path.
- assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id));
-
- let un_ty = expr_ty(bcx, expr);
-
- let debug_loc = expr.debug_loc();
-
- match op {
- hir::UnNot => {
- let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
- let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc);
- immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock()
- }
- hir::UnNeg => {
- let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
- let val = datum.to_llscalarish(bcx);
- let (bcx, llneg) = {
- if un_ty.is_fp() {
- let result = FNeg(bcx, val, debug_loc);
- (bcx, result)
- } else {
- let is_signed = un_ty.is_signed();
- let result = Neg(bcx, val, debug_loc);
- let bcx = if bcx.ccx().check_overflow() && is_signed {
- let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty);
- let is_min = ICmp(bcx, llvm::IntEQ, val,
- C_integral(llty, min, true), debug_loc);
- with_cond(bcx, is_min, |bcx| {
- let msg = InternedString::new(
- "attempt to negate with overflow");
- controlflow::trans_fail(bcx, expr_info(expr), msg)
- })
- } else {
- bcx
- };
- (bcx, result)
- }
- };
- immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock()
- }
- hir::UnDeref => {
- let datum = unpack_datum!(bcx, trans(bcx, sub_expr));
- deref_once(bcx, expr, datum, method_call)
- }
- }
-}
-
-fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- box_expr: &hir::Expr,
- box_ty: Ty<'tcx>,
- contents: &hir::Expr,
- contents_ty: Ty<'tcx>)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_uniq_expr");
- let fcx = bcx.fcx;
- assert!(type_is_sized(bcx.tcx(), contents_ty));
- let llty = type_of::type_of(bcx.ccx(), contents_ty);
- let size = llsize_of(bcx.ccx(), llty);
- let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty));
- let llty_ptr = llty.ptr_to();
- let Result { bcx, val } = malloc_raw_dyn(bcx,
- llty_ptr,
- box_ty,
- size,
- align,
- box_expr.debug_loc());
- // Unique boxes do not allocate for zero-size types. The standard library
- // may assume that `free` is never called on the pointer returned for
- // `Box<ZeroSizeType>`.
- let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 {
- trans_into(bcx, contents, SaveIn(val))
- } else {
- let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
- fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope),
- val, cleanup::HeapExchange, contents_ty);
- let bcx = trans_into(bcx, contents, SaveIn(val));
- fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
- bcx
- };
- immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
-}
-
-fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- subexpr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_addr_of");
- let mut bcx = bcx;
- let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
- let ty = expr_ty(bcx, expr);
- if !type_is_sized(bcx.tcx(), sub_datum.ty) {
- // Always generate an lvalue datum, because this pointer doesn't own
- // the data and cleanup is scheduled elsewhere.
- DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind)))
- } else {
- // Sized value, ref to a thin pointer
- immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock()
- }
-}
-
-fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- binop_expr: &hir::Expr,
- binop_ty: Ty<'tcx>,
- op: hir::BinOp,
- lhs: Datum<'tcx, Rvalue>,
- rhs: Datum<'tcx, Rvalue>)
- -> DatumBlock<'blk, 'tcx, Expr>
-{
- let _icx = push_ctxt("trans_scalar_binop");
-
- let lhs_t = lhs.ty;
- assert!(!lhs_t.is_simd());
- let is_float = lhs_t.is_fp();
- let is_signed = lhs_t.is_signed();
- let info = expr_info(binop_expr);
-
- let binop_debug_loc = binop_expr.debug_loc();
-
- let mut bcx = bcx;
- let lhs = lhs.to_llscalarish(bcx);
- let rhs = rhs.to_llscalarish(bcx);
- let val = match op.node {
- hir::BiAdd => {
- if is_float {
- FAdd(bcx, lhs, rhs, binop_debug_loc)
- } else {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- }
- hir::BiSub => {
- if is_float {
- FSub(bcx, lhs, rhs, binop_debug_loc)
- } else {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- }
- hir::BiMul => {
- if is_float {
- FMul(bcx, lhs, rhs, binop_debug_loc)
- } else {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- }
- hir::BiDiv => {
- if is_float {
- FDiv(bcx, lhs, rhs, binop_debug_loc)
- } else {
- // Only zero-check integers; fp /0 is NaN
- bcx = base::fail_if_zero_or_overflows(bcx,
- expr_info(binop_expr),
- op,
- lhs,
- rhs,
- lhs_t);
- if is_signed {
- SDiv(bcx, lhs, rhs, binop_debug_loc)
- } else {
- UDiv(bcx, lhs, rhs, binop_debug_loc)
- }
- }
- }
- hir::BiRem => {
- if is_float {
- FRem(bcx, lhs, rhs, binop_debug_loc)
- } else {
- // Only zero-check integers; fp %0 is NaN
- bcx = base::fail_if_zero_or_overflows(bcx,
- expr_info(binop_expr),
- op, lhs, rhs, lhs_t);
- if is_signed {
- SRem(bcx, lhs, rhs, binop_debug_loc)
- } else {
- URem(bcx, lhs, rhs, binop_debug_loc)
- }
- }
- }
- hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc),
- hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc),
- hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc),
- hir::BiShl => {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- hir::BiShr => {
- let (newbcx, res) = with_overflow_check(
- bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc);
- bcx = newbcx;
- res
- }
- hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => {
- base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc)
- }
- _ => {
- span_bug!(binop_expr.span, "unexpected binop");
- }
- };
-
- immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
-}
-
-// refinement types would obviate the need for this
-#[derive(Clone, Copy)]
-enum lazy_binop_ty {
- lazy_and,
- lazy_or,
-}
-
-
-fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- binop_expr: &hir::Expr,
- op: lazy_binop_ty,
- a: &hir::Expr,
- b: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_lazy_binop");
- let binop_ty = expr_ty(bcx, binop_expr);
- let fcx = bcx.fcx;
-
- let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a);
- let lhs = lhs.to_llscalarish(past_lhs);
-
- if past_lhs.unreachable.get() {
- return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
- }
-
- // If the rhs can never be reached, don't generate code for it.
- if let Some(cond_val) = const_to_opt_uint(lhs) {
- match (cond_val, op) {
- (0, lazy_and) |
- (1, lazy_or) => {
- return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock();
- }
- _ => { /* continue */ }
- }
- }
-
- let join = fcx.new_id_block("join", binop_expr.id);
- let before_rhs = fcx.new_id_block("before_rhs", b.id);
-
- match op {
- lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None),
- lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None)
- }
-
- let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b);
- let rhs = rhs.to_llscalarish(past_rhs);
-
- if past_rhs.unreachable.get() {
- return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock();
- }
-
- Br(past_rhs, join.llbb, DebugLoc::None);
- let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs],
- &[past_lhs.llbb, past_rhs.llbb]);
-
- return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
-}
-
-fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- op: hir::BinOp,
- lhs: &hir::Expr,
- rhs: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let _icx = push_ctxt("trans_binary");
- let ccx = bcx.ccx();
-
- // if overloaded, would be RvalueDpsExpr
- assert!(!ccx.tcx().is_method_call(expr.id));
-
- match op.node {
- hir::BiAnd => {
- trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs)
- }
- hir::BiOr => {
- trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs)
- }
- _ => {
- let mut bcx = bcx;
- let binop_ty = expr_ty(bcx, expr);
-
- let lhs = unpack_datum!(bcx, trans(bcx, lhs));
- let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
- debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs);
- let rhs = unpack_datum!(bcx, trans(bcx, rhs));
- let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
- debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs);
-
- if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
- assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
- "built-in binary operators on fat pointers are homogeneous");
- assert_eq!(binop_ty, bcx.tcx().types.bool);
- let val = base::compare_scalar_types(
- bcx,
- lhs.val,
- rhs.val,
- lhs.ty,
- op.node,
- expr.debug_loc());
- immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock()
- } else {
- assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty),
- "built-in binary operators on fat pointers are homogeneous");
- trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs)
- }
- }
- }
-}
-
-pub fn cast_is_noop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
- expr: &hir::Expr,
- t_in: Ty<'tcx>,
- t_out: Ty<'tcx>)
- -> bool {
- if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) {
- return true;
- }
-
- match (t_in.builtin_deref(true, ty::NoPreference),
- t_out.builtin_deref(true, ty::NoPreference)) {
- (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => {
- t_in == t_out
- }
- _ => {
- // This condition isn't redundant with the check for CoercionCast:
- // different types can be substituted into the same type, and
- // == equality can be overconservative if there are regions.
- t_in == t_out
- }
- }
-}
-
-fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- id: ast::NodeId)
- -> DatumBlock<'blk, 'tcx, Expr>
-{
- use rustc::ty::cast::CastTy::*;
- use rustc::ty::cast::IntTy::*;
-
- fn int_cast(bcx: Block,
- lldsttype: Type,
- llsrctype: Type,
- llsrc: ValueRef,
- signed: bool)
- -> ValueRef
- {
- let _icx = push_ctxt("int_cast");
- let srcsz = llsrctype.int_width();
- let dstsz = lldsttype.int_width();
- return if dstsz == srcsz {
- BitCast(bcx, llsrc, lldsttype)
- } else if srcsz > dstsz {
- TruncOrBitCast(bcx, llsrc, lldsttype)
- } else if signed {
- SExtOrBitCast(bcx, llsrc, lldsttype)
- } else {
- ZExtOrBitCast(bcx, llsrc, lldsttype)
- }
- }
-
- fn float_cast(bcx: Block,
- lldsttype: Type,
- llsrctype: Type,
- llsrc: ValueRef)
- -> ValueRef
- {
- let _icx = push_ctxt("float_cast");
- let srcsz = llsrctype.float_width();
- let dstsz = lldsttype.float_width();
- return if dstsz > srcsz {
- FPExt(bcx, llsrc, lldsttype)
- } else if srcsz > dstsz {
- FPTrunc(bcx, llsrc, lldsttype)
- } else { llsrc };
- }
-
- let _icx = push_ctxt("trans_cast");
- let mut bcx = bcx;
- let ccx = bcx.ccx();
-
- let t_in = expr_ty_adjusted(bcx, expr);
- let t_out = node_id_type(bcx, id);
-
- debug!("trans_cast({:?} as {:?})", t_in, t_out);
- let mut ll_t_in = type_of::immediate_type_of(ccx, t_in);
- let ll_t_out = type_of::immediate_type_of(ccx, t_out);
- // Convert the value to be cast into a ValueRef, either by-ref or
- // by-value as appropriate given its type:
- let mut datum = unpack_datum!(bcx, trans(bcx, expr));
-
- let datum_ty = monomorphize_type(bcx, datum.ty);
-
- if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) {
- datum.ty = t_out;
- return DatumBlock::new(bcx, datum);
- }
-
- if type_is_fat_ptr(bcx.tcx(), t_in) {
- assert!(datum.kind.is_by_ref());
- if type_is_fat_ptr(bcx.tcx(), t_out) {
- return DatumBlock::new(bcx, Datum::new(
- PointerCast(bcx, datum.val, ll_t_out.ptr_to()),
- t_out,
- Rvalue::new(ByRef)
- )).to_expr_datumblock();
- } else {
- // Return the address
- return immediate_rvalue_bcx(bcx,
- PointerCast(bcx,
- Load(bcx, get_dataptr(bcx, datum.val)),
- ll_t_out),
- t_out).to_expr_datumblock();
- }
- }
-
- let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast");
- let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast");
-
- let (llexpr, signed) = if let Int(CEnum) = r_t_in {
- let repr = adt::represent_type(ccx, t_in);
- let datum = unpack_datum!(
- bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id));
- let llexpr_ptr = datum.to_llref();
- let discr = adt::trans_get_discr(bcx, &repr, llexpr_ptr,
- Some(Type::i64(ccx)), true);
- ll_t_in = val_ty(discr);
- (discr, adt::is_discr_signed(&repr))
- } else {
- (datum.to_llscalarish(bcx), t_in.is_signed())
- };
-
- let newval = match (r_t_in, r_t_out) {
- (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => {
- PointerCast(bcx, llexpr, ll_t_out)
- }
- (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out),
- (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out),
-
- (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed),
- (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr),
- (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out),
- (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out),
- (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out),
- (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out),
-
- _ => span_bug!(expr.span,
- "translating unsupported cast: \
- {:?} -> {:?}",
- t_in,
- t_out)
- };
- return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
-}
-
-fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- op: hir::BinOp,
- dst: &hir::Expr,
- src: &hir::Expr)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("trans_assign_op");
- let mut bcx = bcx;
-
- debug!("trans_assign_op(expr={:?})", expr);
-
- // User-defined operator methods cannot be used with `+=` etc right now
- assert!(!bcx.tcx().is_method_call(expr.id));
-
- // Evaluate LHS (destination), which should be an lvalue
- let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op"));
- assert!(!bcx.fcx.type_needs_drop(dst.ty));
- let lhs = load_ty(bcx, dst.val, dst.ty);
- let lhs = immediate_rvalue(lhs, dst.ty);
-
- // Evaluate RHS - FIXME(#28160) this sucks
- let rhs = unpack_datum!(bcx, trans(bcx, &src));
- let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs"));
-
- // Perform computation and store the result
- let result_datum = unpack_datum!(
- bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs));
- return result_datum.store_to(bcx, dst.val);
-}
-
-fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- datum: Datum<'tcx, Expr>,
- expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
-
- // Ensure cleanup of `datum` if not already scheduled and obtain
- // a "by ref" pointer.
- let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id));
-
- // Compute final type. Note that we are loose with the region and
- // mutability, since those things don't matter in trans.
- let referent_ty = lv_datum.ty;
- let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReErased), referent_ty);
-
- // Construct the resulting datum. The right datum to return here would be an Lvalue datum,
- // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers
- // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of
- // indirection and for thin pointers, this has no ill effects.
- let kind = if type_is_sized(bcx.tcx(), referent_ty) {
- RvalueExpr(Rvalue::new(ByValue))
- } else {
- LvalueExpr(lv_datum.kind)
- };
-
- // Get the pointer.
- let llref = lv_datum.to_llref();
- DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind))
-}
-
-fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- datum: Datum<'tcx, Expr>,
- times: usize)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let mut bcx = bcx;
- let mut datum = datum;
- for i in 0..times {
- let method_call = MethodCall::autoderef(expr.id, i as u32);
- datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call));
- }
- DatumBlock { bcx: bcx, datum: datum }
-}
-
-fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- datum: Datum<'tcx, Expr>,
- method_call: MethodCall)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let ccx = bcx.ccx();
-
- debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})",
- expr, datum, method_call);
-
- let mut bcx = bcx;
-
- // Check for overloaded deref.
- let method = ccx.tcx().tables.borrow().method_map.get(&method_call).cloned();
- let datum = match method {
- Some(method) => {
- let method_ty = monomorphize_type(bcx, method.ty);
-
- // Overloaded. Invoke the deref() method, which basically
- // converts from the `Smaht<T>` pointer that we have into
- // a `&T` pointer. We can then proceed down the normal
- // path (below) to dereference that `&T`.
- let datum = if method_call.autoderef == 0 {
- datum
- } else {
- // Always perform an AutoPtr when applying an overloaded auto-deref
- unpack_datum!(bcx, auto_ref(bcx, datum, expr))
- };
-
- let ref_ty = // invoked methods have their LB regions instantiated
- ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap();
- let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
-
- bcx = Callee::method(bcx, method)
- .call(bcx, expr.debug_loc(),
- ArgOverloadedOp(datum, None),
- Some(SaveIn(scratch.val))).bcx;
- scratch.to_expr_datum()
- }
- None => {
- // Not overloaded. We already have a pointer we know how to deref.
- datum
- }
- };
-
- let r = match datum.ty.sty {
- ty::TyBox(content_ty) => {
- // Make sure we have an lvalue datum here to get the
- // proper cleanups scheduled
- let datum = unpack_datum!(
- bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
-
- if type_is_sized(bcx.tcx(), content_ty) {
- let ptr = load_ty(bcx, datum.val, datum.ty);
- DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind)))
- } else {
- // A fat pointer and a DST lvalue have the same representation
- // just different types. Since there is no temporary for `*e`
- // here (because it is unsized), we cannot emulate the sized
- // object code path for running drop glue and free. Instead,
- // we schedule cleanup for `e`, turning it into an lvalue.
-
- let lval = Lvalue::new("expr::deref_once ty_uniq");
- let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval));
- DatumBlock::new(bcx, datum)
- }
- }
-
- ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) |
- ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => {
- let lval = Lvalue::new("expr::deref_once ptr");
- if type_is_sized(bcx.tcx(), content_ty) {
- let ptr = datum.to_llscalarish(bcx);
-
- // Always generate an lvalue datum, even if datum.mode is
- // an rvalue. This is because datum.mode is only an
- // rvalue for non-owning pointers like &T or *T, in which
- // case cleanup *is* scheduled elsewhere, by the true
- // owner (or, in the case of *T, by the user).
- DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval)))
- } else {
- // A fat pointer and a DST lvalue have the same representation
- // just different types.
- DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval)))
- }
- }
-
- _ => {
- span_bug!(
- expr.span,
- "deref invoked on expr of invalid type {:?}",
- datum.ty);
- }
- };
-
- debug!("deref_once(expr={}, method_call={:?}, result={:?})",
- expr.id, method_call, r.datum);
-
- return r;
-}
-
-#[derive(Debug)]
-enum OverflowOp {
- Add,
- Sub,
- Mul,
- Shl,
- Shr,
-}
-
-impl OverflowOp {
- fn codegen_strategy(&self) -> OverflowCodegen {
- use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck};
- match *self {
- OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add),
- OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub),
- OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul),
-
- OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl),
- OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr),
- }
- }
-}
-
-enum OverflowCodegen {
- ViaIntrinsic(OverflowOpViaIntrinsic),
- ViaInputCheck(OverflowOpViaInputCheck),
-}
-
-enum OverflowOpViaInputCheck { Shl, Shr, }
-
-#[derive(Debug)]
-enum OverflowOpViaIntrinsic { Add, Sub, Mul, }
-
-impl OverflowOpViaIntrinsic {
- fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef {
- let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty);
- bcx.ccx().get_intrinsic(&name)
- }
- fn to_intrinsic_name(&self, tcx: TyCtxt, ty: Ty) -> &'static str {
- use syntax::ast::IntTy::*;
- use syntax::ast::UintTy::*;
- use rustc::ty::{TyInt, TyUint};
-
- let new_sty = match ty.sty {
- TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] {
- "16" => TyInt(I16),
- "32" => TyInt(I32),
- "64" => TyInt(I64),
- _ => bug!("unsupported target word size")
- },
- TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] {
- "16" => TyUint(U16),
- "32" => TyUint(U32),
- "64" => TyUint(U64),
- _ => bug!("unsupported target word size")
- },
- ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(),
- _ => bug!("tried to get overflow intrinsic for {:?} applied to non-int type",
- *self)
- };
-
- match *self {
- OverflowOpViaIntrinsic::Add => match new_sty {
- TyInt(I8) => "llvm.sadd.with.overflow.i8",
- TyInt(I16) => "llvm.sadd.with.overflow.i16",
- TyInt(I32) => "llvm.sadd.with.overflow.i32",
- TyInt(I64) => "llvm.sadd.with.overflow.i64",
-
- TyUint(U8) => "llvm.uadd.with.overflow.i8",
- TyUint(U16) => "llvm.uadd.with.overflow.i16",
- TyUint(U32) => "llvm.uadd.with.overflow.i32",
- TyUint(U64) => "llvm.uadd.with.overflow.i64",
-
- _ => bug!(),
- },
- OverflowOpViaIntrinsic::Sub => match new_sty {
- TyInt(I8) => "llvm.ssub.with.overflow.i8",
- TyInt(I16) => "llvm.ssub.with.overflow.i16",
- TyInt(I32) => "llvm.ssub.with.overflow.i32",
- TyInt(I64) => "llvm.ssub.with.overflow.i64",
-
- TyUint(U8) => "llvm.usub.with.overflow.i8",
- TyUint(U16) => "llvm.usub.with.overflow.i16",
- TyUint(U32) => "llvm.usub.with.overflow.i32",
- TyUint(U64) => "llvm.usub.with.overflow.i64",
-
- _ => bug!(),
- },
- OverflowOpViaIntrinsic::Mul => match new_sty {
- TyInt(I8) => "llvm.smul.with.overflow.i8",
- TyInt(I16) => "llvm.smul.with.overflow.i16",
- TyInt(I32) => "llvm.smul.with.overflow.i32",
- TyInt(I64) => "llvm.smul.with.overflow.i64",
-
- TyUint(U8) => "llvm.umul.with.overflow.i8",
- TyUint(U16) => "llvm.umul.with.overflow.i16",
- TyUint(U32) => "llvm.umul.with.overflow.i32",
- TyUint(U64) => "llvm.umul.with.overflow.i64",
-
- _ => bug!(),
- },
- }
- }
-
- fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>,
- info: NodeIdAndSpan,
- lhs_t: Ty<'tcx>, lhs: ValueRef,
- rhs: ValueRef,
- binop_debug_loc: DebugLoc)
- -> (Block<'blk, 'tcx>, ValueRef) {
- use rustc_const_math::{ConstMathErr, Op};
-
- let llfn = self.to_intrinsic(bcx, lhs_t);
-
- let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
- let result = ExtractValue(bcx, val, 0); // iN operation result
- let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
-
- let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false),
- binop_debug_loc);
-
- let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
- let expected = Call(bcx, expect, &[cond, C_bool(bcx.ccx(), false)],
- binop_debug_loc);
-
- let op = match *self {
- OverflowOpViaIntrinsic::Add => Op::Add,
- OverflowOpViaIntrinsic::Sub => Op::Sub,
- OverflowOpViaIntrinsic::Mul => Op::Mul
- };
-
- let bcx =
- base::with_cond(bcx, expected, |bcx|
- controlflow::trans_fail(bcx, info,
- InternedString::new(ConstMathErr::Overflow(op).description())));
-
- (bcx, result)
- }
-}
-
-impl OverflowOpViaInputCheck {
- fn build_with_input_check<'blk, 'tcx>(&self,
- bcx: Block<'blk, 'tcx>,
- info: NodeIdAndSpan,
- lhs_t: Ty<'tcx>,
- lhs: ValueRef,
- rhs: ValueRef,
- binop_debug_loc: DebugLoc)
- -> (Block<'blk, 'tcx>, ValueRef)
- {
- use rustc_const_math::{ConstMathErr, Op};
-
- let lhs_llty = val_ty(lhs);
- let rhs_llty = val_ty(rhs);
-
- // Panic if any bits are set outside of bits that we always
- // mask in.
- //
- // Note that the mask's value is derived from the LHS type
- // (since that is where the 32/64 distinction is relevant) but
- // the mask's type must match the RHS type (since they will
- // both be fed into an and-binop)
- let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true);
-
- let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc);
- let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc);
- let (result, op) = match *self {
- OverflowOpViaInputCheck::Shl =>
- (build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), Op::Shl),
- OverflowOpViaInputCheck::Shr =>
- (build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), Op::Shr)
- };
- let bcx =
- base::with_cond(bcx, cond, |bcx|
- controlflow::trans_fail(bcx, info,
- InternedString::new(ConstMathErr::Overflow(op).description())));
-
- (bcx, result)
- }
-}
-
-// Check if an integer or vector contains a nonzero element.
-fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- value: ValueRef,
- binop_debug_loc: DebugLoc) -> ValueRef {
- let llty = val_ty(value);
- let kind = llty.kind();
- match kind {
- TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc),
- TypeKind::Vector => {
- // Check if any elements of the vector are nonzero by treating
- // it as a wide integer and checking if the integer is nonzero.
- let width = llty.vector_length() as u64 * llty.element_type().int_width();
- let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width));
- build_nonzero_check(bcx, int_value, binop_debug_loc)
- },
- _ => bug!("build_nonzero_check: expected Integer or Vector, found {:?}", kind),
- }
-}
-
-fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan,
- lhs_t: Ty<'tcx>, lhs: ValueRef,
- rhs: ValueRef,
- binop_debug_loc: DebugLoc)
- -> (Block<'blk, 'tcx>, ValueRef) {
- if bcx.unreachable.get() { return (bcx, _Undef(lhs)); }
- if bcx.ccx().check_overflow() {
-
- match oop.codegen_strategy() {
- OverflowCodegen::ViaIntrinsic(oop) =>
- oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
- OverflowCodegen::ViaInputCheck(oop) =>
- oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc),
- }
- } else {
- let res = match oop {
- OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc),
- OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc),
- OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc),
-
- OverflowOp::Shl =>
- build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc),
- OverflowOp::Shr =>
- build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc),
- };
- (bcx, res)
- }
-}
-
-/// We categorize expressions into three kinds. The distinction between
-/// lvalue/rvalue is fundamental to the language. The distinction between the
-/// two kinds of rvalues is an artifact of trans which reflects how we will
-/// generate code for that kind of expression. See trans/expr.rs for more
-/// information.
-#[derive(Copy, Clone)]
-enum ExprKind {
- Lvalue,
- RvalueDps,
- RvalueDatum,
- RvalueStmt
-}
-
-fn expr_kind<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, expr: &hir::Expr) -> ExprKind {
- if tcx.is_method_call(expr.id) {
- // Overloaded operations are generally calls, and hence they are
- // generated via DPS, but there are a few exceptions:
- return match expr.node {
- // `a += b` has a unit result.
- hir::ExprAssignOp(..) => ExprKind::RvalueStmt,
-
- // the deref method invoked for `*a` always yields an `&T`
- hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue,
-
- // the index method invoked for `a[i]` always yields an `&T`
- hir::ExprIndex(..) => ExprKind::Lvalue,
-
- // in the general case, result could be any type, use DPS
- _ => ExprKind::RvalueDps
- };
- }
-
- match expr.node {
- hir::ExprPath(..) => {
- match tcx.expect_def(expr.id) {
- // Put functions and ctors with the ADTs, as they
- // are zero-sized, so DPS is the cheapest option.
- Def::Struct(..) | Def::Variant(..) |
- Def::Fn(..) | Def::Method(..) => {
- ExprKind::RvalueDps
- }
-
- // Note: there is actually a good case to be made that
- // DefArg's, particularly those of immediate type, ought to
- // considered rvalues.
- Def::Static(..) |
- Def::Upvar(..) |
- Def::Local(..) => ExprKind::Lvalue,
-
- Def::Const(..) |
- Def::AssociatedConst(..) => ExprKind::RvalueDatum,
-
- def => {
- span_bug!(
- expr.span,
- "uncategorized def for expr {}: {:?}",
- expr.id,
- def);
- }
- }
- }
-
- hir::ExprType(ref expr, _) => {
- expr_kind(tcx, expr)
- }
-
- hir::ExprUnary(hir::UnDeref, _) |
- hir::ExprField(..) |
- hir::ExprTupField(..) |
- hir::ExprIndex(..) => {
- ExprKind::Lvalue
- }
-
- hir::ExprCall(..) |
- hir::ExprMethodCall(..) |
- hir::ExprStruct(..) |
- hir::ExprTup(..) |
- hir::ExprIf(..) |
- hir::ExprMatch(..) |
- hir::ExprClosure(..) |
- hir::ExprBlock(..) |
- hir::ExprRepeat(..) |
- hir::ExprVec(..) => {
- ExprKind::RvalueDps
- }
-
- hir::ExprLit(ref lit) if lit.node.is_str() => {
- ExprKind::RvalueDps
- }
-
- hir::ExprBreak(..) |
- hir::ExprAgain(..) |
- hir::ExprRet(..) |
- hir::ExprWhile(..) |
- hir::ExprLoop(..) |
- hir::ExprAssign(..) |
- hir::ExprInlineAsm(..) |
- hir::ExprAssignOp(..) => {
- ExprKind::RvalueStmt
- }
-
- hir::ExprLit(_) | // Note: LitStr is carved out above
- hir::ExprUnary(..) |
- hir::ExprBox(_) |
- hir::ExprAddrOf(..) |
- hir::ExprBinary(..) |
- hir::ExprCast(..) => {
- ExprKind::RvalueDatum
- }
- }
-}
use rustc::traits;
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use adt;
-use adt::GetDtorType; // for tcx.dtor_type()
use base::*;
use build::*;
-use callee::{Callee, ArgVals};
-use cleanup;
-use cleanup::CleanupMethods;
+use callee::{Callee};
use common::*;
use debuginfo::DebugLoc;
-use expr;
use machine::*;
use monomorphize;
use trans_item::TransItem;
let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx()))
- .call(bcx, debug_loc, ArgVals(&args), None).bcx
+ .call(bcx, debug_loc, &args, None).bcx
}
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> {
- drop_ty_core(bcx, v, t, debug_loc, false, None)
+ drop_ty_core(bcx, v, t, debug_loc, false)
}
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef,
t: Ty<'tcx>,
debug_loc: DebugLoc,
- skip_dtor: bool,
- drop_hint: Option<cleanup::DropHintValue>)
+ skip_dtor: bool)
-> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
- debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint);
+ debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor);
let _icx = push_ctxt("drop_ty");
- let mut bcx = bcx;
if bcx.fcx.type_needs_drop(t) {
let ccx = bcx.ccx();
let g = if skip_dtor {
v
};
- match drop_hint {
- Some(drop_hint) => {
- let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8);
- let moved_val =
- C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false);
- let may_need_drop =
- ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
- bcx = with_cond(bcx, may_need_drop, |cx| {
- Call(cx, glue, &[ptr], debug_loc);
- cx
- })
- }
- None => {
- // No drop-hint ==> call standard drop glue
- Call(bcx, glue, &[ptr], debug_loc);
- }
- }
+ // No drop-hint ==> call standard drop glue
+ Call(bcx, glue, &[ptr], debug_loc);
}
bcx
}
let vp = alloc_ty(bcx, t, "");
call_lifetime_start(bcx, vp);
store_ty(bcx, v, vp, t);
- let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None);
+ let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor);
call_lifetime_end(bcx, vp);
bcx
}
arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena);
- let bcx = fcx.init(false, None);
+ let bcx = fcx.init(false);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
fcx.finish(bcx, DebugLoc::None);
}
-
-fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
- t: Ty<'tcx>,
- struct_data: ValueRef)
- -> Block<'blk, 'tcx> {
- assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized");
-
- let repr = adt::represent_type(bcx.ccx(), t);
- let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &repr, struct_data));
- let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type());
- let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
- let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false);
-
- let bcx = if !bcx.ccx().check_drop_flag_for_sanity() {
- bcx
- } else {
- let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type());
- let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false);
- let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None);
- let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None);
- let drop_flag_neither_initialized_nor_cleared =
- And(bcx, not_init, not_done, DebugLoc::None);
- with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
- let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
- Call(cx, llfn, &[], DebugLoc::None);
- cx
- })
- };
-
- let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None);
- with_cond(bcx, drop_flag_dtor_needed, |cx| {
- trans_struct_drop(cx, t, struct_data)
- })
-}
fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
v0: ValueRef)
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
- bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t);
+ bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t);
let (sized_args, unsized_args);
let args: &[ValueRef] = if type_is_sized(tcx, t) {
sized_args = [v0];
&sized_args
} else {
- unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))];
+ unsized_args = [
+ Load(bcx, get_dataptr(bcx, v0)),
+ Load(bcx, get_meta(bcx, v0))
+ ];
&unsized_args
};
};
let dtor_did = def.destructor().unwrap();
bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
- .call(bcx, DebugLoc::None, ArgVals(args), None).bcx;
+ .call(bcx, DebugLoc::None, args, None).bcx;
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
}
// must definitely check for special bit-patterns corresponding to
// the special dtor markings.
- let inttype = Type::int(bcx.ccx());
- let dropped_pattern = C_integral(inttype, adt::DTOR_DONE_U64, false);
-
match t.sty {
ty::TyBox(content_ty) => {
// Support for TyBox is built-in and its drop glue is
// a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) {
- let llval = expr::get_dataptr(bcx, v0);
+ let llval = get_dataptr(bcx, v0);
let llbox = Load(bcx, llval);
- let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx()));
- let drop_flag_not_dropped_already =
- ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
- with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
- let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
- let info = expr::get_meta(bcx, v0);
- let info = Load(bcx, info);
- let (llsize, llalign) =
- size_and_align_of_dst(&bcx.build(), content_ty, info);
-
- // `Box<ZeroSizeType>` does not allocate.
- let needs_free = ICmp(bcx,
- llvm::IntNE,
- llsize,
- C_uint(bcx.ccx(), 0u64),
- DebugLoc::None);
- with_cond(bcx, needs_free, |bcx| {
- trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
- })
+ let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
+ let info = get_meta(bcx, v0);
+ let info = Load(bcx, info);
+ let (llsize, llalign) =
+ size_and_align_of_dst(&bcx.build(), content_ty, info);
+
+ // `Box<ZeroSizeType>` does not allocate.
+ let needs_free = ICmp(bcx,
+ llvm::IntNE,
+ llsize,
+ C_uint(bcx.ccx(), 0u64),
+ DebugLoc::None);
+ with_cond(bcx, needs_free, |bcx| {
+ trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
})
} else {
let llval = v0;
let llbox = Load(bcx, llval);
- let llbox_as_usize = PtrToInt(bcx, llbox, inttype);
- let drop_flag_not_dropped_already =
- ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None);
- with_cond(bcx, drop_flag_not_dropped_already, |bcx| {
- let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
- trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
- })
+ let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
+ trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
}
}
ty::TyStruct(def, _) | ty::TyEnum(def, _) => {
match (def.dtor_kind(), skip_dtor) {
- (ty::TraitDtor(true), false) => {
- // FIXME(16758) Since the struct is unsized, it is hard to
- // find the drop flag (which is at the end of the struct).
- // Lets just ignore the flag and pretend everything will be
- // OK.
- if type_is_sized(bcx.tcx(), t) {
- trans_struct_drop_flag(bcx, t, v0)
- } else {
- // Give the user a heads up that we are doing something
- // stupid and dangerous.
- bcx.sess().warn(&format!("Ignoring drop flag in destructor for {} \
- because the struct is unsized. See issue \
- #16758", t));
- trans_struct_drop(bcx, t, v0)
- }
- }
- (ty::TraitDtor(false), false) => {
+ (ty::TraitDtor(_), false) => {
trans_struct_drop(bcx, t, v0)
}
(ty::NoDtor, _) | (_, true) => {
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
- let data_ptr = expr::get_dataptr(bcx, v0);
- let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0));
+ let data_ptr = get_dataptr(bcx, v0);
+ let vtable_ptr = Load(bcx, get_meta(bcx, v0));
let dtor = Load(bcx, vtable_ptr);
Call(bcx,
dtor,
use intrinsics::{self, Intrinsic};
use libc;
use llvm;
-use llvm::{ValueRef, TypeKind};
-use rustc::ty::subst::Substs;
+use llvm::{ValueRef};
use abi::{Abi, FnType};
use adt;
use base::*;
use build::*;
-use callee::{self, Callee};
-use cleanup;
-use cleanup::CleanupMethods;
use common::*;
-use consts;
-use datum::*;
use debuginfo::DebugLoc;
use declare;
-use expr;
use glue;
use type_of;
use machine;
use Disr;
use rustc::hir;
use syntax::ast;
-use syntax::ptr::P;
use syntax::parse::token;
use rustc::session::Session;
-use rustc_const_eval::fatal_const_eval_err;
use syntax_pos::{Span, DUMMY_SP};
use std::cmp::Ordering;
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
- args: callee::CallArgs<'a, 'tcx>,
- dest: expr::Dest,
+ llargs: &[ValueRef],
+ llresult: ValueRef,
call_debug_location: DebugLoc)
-> Result<'blk, 'tcx> {
let fcx = bcx.fcx;
}
};
- let cleanup_scope = fcx.push_custom_cleanup_scope();
-
- // For `transmute` we can just trans the input expr directly into dest
- if name == "transmute" {
- let llret_ty = type_of::type_of(ccx, ret_ty);
- match args {
- callee::ArgExprs(arg_exprs) => {
- assert_eq!(arg_exprs.len(), 1);
-
- let (in_type, out_type) = (substs.types[0],
- substs.types[1]);
- let llintype = type_of::type_of(ccx, in_type);
- let llouttype = type_of::type_of(ccx, out_type);
-
- let in_type_size = machine::llbitsize_of_real(ccx, llintype);
- let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
-
- if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
- if out_type_size != 0 {
- // FIXME #19925 Remove this hack after a release cycle.
- let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
- let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
- let llfnty = val_ty(llfn);
- let llresult = match dest {
- expr::SaveIn(d) => d,
- expr::Ignore => alloc_ty(bcx, out_type, "ret")
- };
- Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
- if dest == expr::Ignore {
- bcx = glue::drop_ty(bcx, llresult, out_type,
- call_debug_location);
- }
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
- return Result::new(bcx, llresult);
- }
- }
-
- // This should be caught by the intrinsicck pass
- assert_eq!(in_type_size, out_type_size);
-
- let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
- use llvm::TypeKind::*;
- match llkind {
- Half | Float | Double | X86_FP80 | FP128 |
- PPC_FP128 | Integer | Vector | X86_MMX => true,
- _ => false
- }
- };
-
- // An approximation to which types can be directly cast via
- // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
- // but does, importantly, cover SIMD types.
- let in_kind = llintype.kind();
- let ret_kind = llret_ty.kind();
- let bitcast_compatible =
- (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
- in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
- };
-
- let dest = if bitcast_compatible {
- // if we're here, the type is scalar-like (a primitive, a
- // SIMD type or a pointer), and so can be handled as a
- // by-value ValueRef and can also be directly bitcast to the
- // target type. Doing this special case makes conversions
- // like `u32x4` -> `u64x2` much nicer for LLVM and so more
- // efficient (these are done efficiently implicitly in C
- // with the `__m128i` type and so this means Rust doesn't
- // lose out there).
- let expr = &arg_exprs[0];
- let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
- let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
- let val = if datum.kind.is_by_ref() {
- load_ty(bcx, datum.val, datum.ty)
- } else {
- from_immediate(bcx, datum.val)
- };
-
- let cast_val = BitCast(bcx, val, llret_ty);
-
- match dest {
- expr::SaveIn(d) => {
- // this often occurs in a sequence like `Store(val,
- // d); val2 = Load(d)`, so disappears easily.
- Store(bcx, cast_val, d);
- }
- expr::Ignore => {}
- }
- dest
- } else {
- // The types are too complicated to do with a by-value
- // bitcast, so pointer cast instead. We need to cast the
- // dest so the types work out.
- let dest = match dest {
- expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
- expr::Ignore => expr::Ignore
- };
- bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
- dest
- };
-
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
- return match dest {
- expr::SaveIn(d) => Result::new(bcx, d),
- expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
- };
-
- }
-
- _ => {
- bug!("expected expr as argument for transmute");
- }
- }
- }
-
- // For `move_val_init` we can evaluate the destination address
- // (the first argument) and then trans the source value (the
- // second argument) directly into the resulting destination
- // address.
- if name == "move_val_init" {
- if let callee::ArgExprs(ref exprs) = args {
- let (dest_expr, source_expr) = if exprs.len() != 2 {
- bug!("expected two exprs as arguments for `move_val_init` intrinsic");
- } else {
- (&exprs[0], &exprs[1])
- };
-
- // evaluate destination address
- let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
- let dest_datum = unpack_datum!(
- bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
- let dest_datum = unpack_datum!(
- bcx, dest_datum.to_appropriate_datum(bcx));
-
- // `expr::trans_into(bcx, expr, dest)` is equiv to
- //
- // `trans(bcx, expr).store_to_dest(dest)`,
- //
- // which for `dest == expr::SaveIn(addr)`, is equivalent to:
- //
- // `trans(bcx, expr).store_to(bcx, addr)`.
- let lldest = expr::Dest::SaveIn(dest_datum.val);
- bcx = expr::trans_into(bcx, source_expr, lldest);
-
- let llresult = C_nil(ccx);
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
- return Result::new(bcx, llresult);
- } else {
- bug!("expected two exprs as arguments for `move_val_init` intrinsic");
- }
- }
-
- // save the actual AST arguments for later (some places need to do
- // const-evaluation on them)
- let expr_arguments = match args {
- callee::ArgExprs(args) => Some(args),
- _ => None,
- };
-
- // Push the arguments.
- let mut llargs = Vec::new();
- bcx = callee::trans_args(bcx,
- Abi::RustIntrinsic,
- fn_ty,
- &mut callee::Intrinsic,
- args,
- &mut llargs,
- cleanup::CustomScope(cleanup_scope));
-
- fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
-
// These are the only intrinsic functions that diverge.
if name == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap"));
Call(bcx, llfn, &[], call_debug_location);
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
} else if &name[..] == "unreachable" {
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_nil(ccx));
}
let llret_ty = type_of::type_of(ccx, ret_ty);
- // Get location to store the result. If the user does
- // not care about the result, just make a stack slot
- let llresult = match dest {
- expr::SaveIn(d) => d,
- expr::Ignore => {
- if !type_is_zero_size(ccx, ret_ty) {
- let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
- call_lifetime_start(bcx, llresult);
- llresult
- } else {
- C_undef(llret_ty.ptr_to())
- }
- }
- };
-
let simple = get_simple_intrinsic(ccx, &name);
let llval = match (simple, &name[..]) {
(Some(llfn), _) => {
}
(_, "drop_in_place") => {
let tp_ty = substs.types[0];
- let ptr = if type_is_sized(tcx, tp_ty) {
+ let is_sized = type_is_sized(tcx, tp_ty);
+ let ptr = if is_sized {
llargs[0]
} else {
- let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
- Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
- Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
- fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
- scratch.val
+ let scratch = alloc_ty(bcx, tp_ty, "drop");
+ call_lifetime_start(bcx, scratch);
+ Store(bcx, llargs[0], get_dataptr(bcx, scratch));
+ Store(bcx, llargs[1], get_meta(bcx, scratch));
+ scratch
};
glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
+ if !is_sized {
+ call_lifetime_end(bcx, ptr);
+ }
C_nil(ccx)
}
(_, "type_name") => {
C_u64(ccx, ccx.tcx().type_id_hash(substs.types[0]))
}
(_, "init_dropped") => {
- let tp_ty = substs.types[0];
- if !type_is_zero_size(ccx, tp_ty) {
- drop_done_fill_mem(bcx, llresult, tp_ty);
- }
- C_nil(ccx)
+ span_bug!(span, "init_dropped intrinsic unsupported");
}
(_, "init") => {
let tp_ty = substs.types[0];
(_, "volatile_store") => {
let tp_ty = substs.types[0];
if type_is_fat_ptr(bcx.tcx(), tp_ty) {
- VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
- VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
+ VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0]));
+ VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0]));
} else {
let val = if fn_ty.args[1].is_indirect() {
Load(bcx, llargs[1])
}
(_, name) if name.starts_with("simd_") => {
generic_simd_intrinsic(bcx, name,
- substs,
callee_ty,
- expr_arguments,
&llargs,
ret_ty, llret_ty,
call_debug_location,
let llargs = if !any_changes_needed {
// no aggregates to flatten, so no change needed
- llargs
+ llargs.to_vec()
} else {
// there are some aggregates that need to be flattened
// in the LLVM call, so we need to run over the types
// again to find them and extract the arguments
intr.inputs.iter()
- .zip(&llargs)
+ .zip(llargs)
.zip(&arg_tys)
.flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
.collect()
}
}
- // If we made a temporary stack slot, let's clean it up
- match dest {
- expr::Ignore => {
- bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
- call_lifetime_end(bcx, llresult);
- }
- expr::SaveIn(_) => {}
- }
-
- fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
-
Result::new(bcx, llresult)
}
SetPersonalityFn(bcx, bcx.fcx.eh_personality());
- let normal = bcx.fcx.new_temp_block("normal");
- let catchswitch = bcx.fcx.new_temp_block("catchswitch");
- let catchpad = bcx.fcx.new_temp_block("catchpad");
- let caught = bcx.fcx.new_temp_block("caught");
+ let normal = bcx.fcx.new_block("normal");
+ let catchswitch = bcx.fcx.new_block("catchswitch");
+ let catchpad = bcx.fcx.new_block("catchpad");
+ let caught = bcx.fcx.new_block("caught");
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let tcx = ccx.tcx();
let tydesc = match tcx.lang_items.msvc_try_filter() {
- Some(did) => ::consts::get_static(ccx, did).to_llref(),
+ Some(did) => ::consts::get_static(ccx, did),
None => bug!("msvc_try_filter not defined"),
};
let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
- let then = bcx.fcx.new_temp_block("then");
- let catch = bcx.fcx.new_temp_block("catch");
+ let then = bcx.fcx.new_block("then");
+ let catch = bcx.fcx.new_block("catch");
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
- let bcx = fcx.init(true, None);
- trans(bcx);
+ trans(fcx.init(true));
fcx.cleanup();
llfn
}
fn generic_simd_intrinsic<'blk, 'tcx, 'a>
(bcx: Block<'blk, 'tcx>,
name: &str,
- substs: &'tcx Substs<'tcx>,
callee_ty: Ty<'tcx>,
- args: Option<&[P<hir::Expr>]>,
llargs: &[ValueRef],
ret_ty: Ty<'tcx>,
llret_ty: Type,
let total_len = in_len as u64 * 2;
- let vector = match args {
- Some(args) => {
- match consts::const_expr(bcx.ccx(), &args[2], substs, None,
- // this should probably help simd error reporting
- consts::TrueConst::Yes) {
- Ok((vector, _)) => vector,
- Err(err) => {
- fatal_const_eval_err(bcx.tcx(), err.as_inner(), span,
- "shuffle indices");
- }
- }
- }
- None => llargs[2]
- };
+ let vector = llargs[2];
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
mod common;
mod consts;
mod context;
-mod controlflow;
-mod datum;
mod debuginfo;
mod declare;
mod disr;
-mod expr;
mod glue;
mod inline;
mod intrinsic;
mod machine;
-mod _match;
mod meth;
mod mir;
mod monomorphize;
use abi::FnType;
use base::*;
use build::*;
-use callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim};
+use callee::{Callee, Virtual, trans_fn_pointer_shim};
use closure;
use common::*;
use consts;
use debuginfo::DebugLoc;
use declare;
-use expr;
use glue;
use machine;
use type_::Type;
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
- let mut bcx = fcx.init(false, None);
- assert!(!fcx.needs_ret_allocas);
+ let mut bcx = fcx.init(false);
-
- let dest =
- fcx.llretslotptr.get().map(
- |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
+ let dest = fcx.llretslotptr.get();
debug!("trans_object_shim: method_offset_in_vtable={}",
vtable_index);
let llargs = get_params(fcx.llfn);
- let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]);
let callee = Callee {
data: Virtual(vtable_index),
ty: method_ty
};
- bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx;
+ bcx = callee.call(bcx, DebugLoc::None,
+ &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx;
fcx.finish(bcx, DebugLoc::None);
get_vtable_methods(tcx, id, substs)
.into_iter()
.map(|opt_mth| opt_mth.map_or(nullptr, |mth| {
- Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val
+ Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx)
}))
.collect::<Vec<_>>()
.into_iter()
// except according to those terms.
use llvm::{self, ValueRef};
-use rustc_const_eval::ErrKind;
+use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err};
use rustc::middle::lang_items;
use rustc::ty;
use rustc::mir::repr as mir;
debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
- let trampoline = this.fcx.new_block(name, None).build();
+ let trampoline = this.fcx.new_block(name).build();
trampoline.set_personality_fn(this.fcx.eh_personality());
trampoline.cleanup_ret(cp, Some(lltarget));
trampoline.llbb()
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target);
- let panic_block = self.fcx.new_block("panic", None);
+ let panic_block = self.fcx.new_block("panic");
if expected {
bcx.cond_br(cond, lltarget, panic_block.llbb);
} else {
// is also constant, then we can produce a warning.
if const_cond == Some(!expected) {
if let Some(err) = const_err {
- let _ = consts::const_err(bcx.ccx(), span,
- Err::<(), _>(err),
- consts::TrueConst::No);
+ let err = ConstEvalErr{ span: span, kind: err };
+ let mut diag = bcx.tcx().sess.struct_span_warn(
+ span, "this expression will panic at run-time");
+ note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag);
+ diag.emit();
}
}
let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
let callee = Callee::def(bcx.ccx(), def_id,
bcx.ccx().empty_substs_for_def_id(def_id));
- let llfn = callee.reify(bcx.ccx()).val;
+ let llfn = callee.reify(bcx.ccx());
// Translate the actual panic invoke/call.
if let Some(unwind) = cleanup {
let fn_ptr = match callee.data {
NamedTupleConstructor(_) => {
// FIXME translate this like mir::Rvalue::Aggregate.
- callee.reify(bcx.ccx()).val
+ callee.reify(bcx.ccx())
}
Intrinsic => {
- use callee::ArgVals;
- use expr::{Ignore, SaveIn};
use intrinsic::trans_intrinsic_call;
let (dest, llargs) = match ret_dest {
_ if fn_ty.ret.is_indirect() => {
- (SaveIn(llargs[0]), &llargs[1..])
+ (llargs[0], &llargs[1..])
+ }
+ ReturnDest::Nothing => {
+ (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..])
}
- ReturnDest::Nothing => (Ignore, &llargs[..]),
ReturnDest::IndirectOperand(dst, _) |
- ReturnDest::Store(dst) => (SaveIn(dst), &llargs[..]),
+ ReturnDest::Store(dst) => (dst, &llargs[..]),
ReturnDest::DirectOperand(_) =>
bug!("Cannot use direct operand with an intrinsic call")
};
bcx.with_block(|bcx| {
trans_intrinsic_call(bcx, callee.ty, &fn_ty,
- ArgVals(llargs), dest,
- debug_loc);
+ &llargs, dest, debug_loc);
});
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
let target = self.bcx(target_bb);
- let block = self.fcx.new_block("cleanup", None);
+ let block = self.fcx.new_block("cleanup");
self.landing_pads[target_bb] = Some(block);
let bcx = block.build();
fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
self.unreachable_block.unwrap_or_else(|| {
- let bl = self.fcx.new_block("unreachable", None);
+ let bl = self.fcx.new_block("unreachable");
bl.build().unreachable();
self.unreachable_block = Some(bl);
bl
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx(), def_id, substs);
- let datum = f.reify(bcx.ccx());
+ let ty = match f.ty.sty {
+ ty::TyFnDef(_, _, f) => bcx.tcx().mk_fn_ptr(f),
+ _ => f.ty
+ };
val = OperandRef {
- val: Immediate(datum.val),
- ty: datum.ty
+ val: Immediate(f.reify(bcx.ccx())),
+ ty: ty
};
}
}
use llvm::{self, ValueRef};
use rustc::middle::const_val::ConstVal;
-use rustc_const_eval::ErrKind;
+use rustc_const_eval::{ErrKind, ConstEvalErr, report_const_eval_err};
use rustc_const_math::ConstInt::*;
use rustc_const_math::ConstFloat::*;
-use rustc_const_math::ConstMathErr;
+use rustc_const_math::{ConstInt, ConstIsize, ConstUsize, ConstMathErr};
use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize;
use rustc::mir::repr as mir;
use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty};
use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral};
use common::{C_null, C_struct, C_str_slice, C_undef, C_uint};
-use consts::{self, ConstEvalFailure, TrueConst, to_const_int};
+use common::{const_to_opt_int, const_to_opt_uint};
+use consts;
use monomorphize::{self, Instance};
use type_of;
use type_::Type;
use value::Value;
+use syntax::ast;
use syntax_pos::{Span, DUMMY_SP};
use std::ptr;
fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
mut instance: Instance<'tcx>,
args: IndexVec<mir::Arg, Const<'tcx>>)
- -> Result<Const<'tcx>, ConstEvalFailure> {
+ -> Result<Const<'tcx>, ConstEvalErr> {
// Try to resolve associated constants.
if let Some(trait_id) = ccx.tcx().trait_of_item(instance.def) {
let trait_ref = ty::TraitRef::new(trait_id, instance.substs);
value)
}
- fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalFailure> {
+ fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
let mut bb = mir::START_BLOCK;
ErrKind::Math(err.clone())
}
};
- match consts::const_err(self.ccx, span, Err(err), TrueConst::Yes) {
- Ok(()) => {}
- Err(err) => if failure.is_ok() { failure = Err(err); }
- }
+
+ let err = ConstEvalErr{ span: span, kind: err };
+ report_const_eval_err(tcx, &err, span, "expression").emit();
+ failure = Err(err);
}
target
}
}
fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span)
- -> Result<ConstLvalue<'tcx>, ConstEvalFailure> {
+ -> Result<ConstLvalue<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
if let Some(index) = self.mir.local_index(lvalue) {
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
ConstLvalue {
- base: Base::Static(consts::get_static(self.ccx, def_id).val),
+ base: Base::Static(consts::get_static(self.ccx, def_id)),
llextra: ptr::null_mut(),
ty: lvalue.ty(self.mir, tcx).to_ty(tcx)
}
} else if let ty::TyStr = projected_ty.sty {
(Base::Str(base), extra)
} else {
- let val = consts::load_const(self.ccx, base, projected_ty);
+ let v = base;
+ let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v);
+ let mut val = unsafe { llvm::LLVMGetInitializer(v) };
if val.is_null() {
span_bug!(span, "dereference of non-constant pointer `{:?}`",
Value(base));
}
+ if projected_ty.is_bool() {
+ unsafe {
+ val = llvm::LLVMConstTrunc(val, Type::i1(self.ccx).to_ref());
+ }
+ }
(Base::Value(val), extra)
}
}
}
fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span)
- -> Result<Const<'tcx>, ConstEvalFailure> {
+ -> Result<Const<'tcx>, ConstEvalErr> {
match *operand {
mir::Operand::Consume(ref lvalue) => {
Ok(self.const_lvalue(lvalue, span)?.to_const(span))
fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
dest_ty: Ty<'tcx>, span: Span)
- -> Result<Const<'tcx>, ConstEvalFailure> {
+ -> Result<Const<'tcx>, ConstEvalErr> {
let tcx = self.ccx.tcx();
let val = match *rvalue {
mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
match operand.ty.sty {
ty::TyFnDef(def_id, substs, _) => {
Callee::def(self.ccx, def_id, substs)
- .reify(self.ccx).val
+ .reify(self.ccx)
}
_ => {
span_bug!(span, "{} cannot be reified to a fn ptr",
}
+fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option<ConstInt> {
+ match t.sty {
+ ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type {
+ ast::IntTy::I8 => {
+ assert_eq!(input as i8 as i64, input);
+ Some(ConstInt::I8(input as i8))
+ },
+ ast::IntTy::I16 => {
+ assert_eq!(input as i16 as i64, input);
+ Some(ConstInt::I16(input as i16))
+ },
+ ast::IntTy::I32 => {
+ assert_eq!(input as i32 as i64, input);
+ Some(ConstInt::I32(input as i32))
+ },
+ ast::IntTy::I64 => {
+ Some(ConstInt::I64(input))
+ },
+ ast::IntTy::Is => {
+ ConstIsize::new(input, tcx.sess.target.int_type)
+ .ok().map(ConstInt::Isize)
+ },
+ }),
+ ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type {
+ ast::UintTy::U8 => {
+ assert_eq!(input as u8 as u64, input);
+ Some(ConstInt::U8(input as u8))
+ },
+ ast::UintTy::U16 => {
+ assert_eq!(input as u16 as u64, input);
+ Some(ConstInt::U16(input as u16))
+ },
+ ast::UintTy::U32 => {
+ assert_eq!(input as u32 as u64, input);
+ Some(ConstInt::U32(input as u32))
+ },
+ ast::UintTy::U64 => {
+ Some(ConstInt::U64(input))
+ },
+ ast::UintTy::Us => {
+ ConstUsize::new(input, tcx.sess.target.uint_type)
+ .ok().map(ConstInt::Usize)
+ },
+ }),
+ _ => None,
+ }
+}
+
pub fn const_scalar_binop(op: mir::BinOp,
lhs: ValueRef,
rhs: ValueRef,
}
};
- match result {
- Ok(v) => v,
- Err(ConstEvalFailure::Compiletime(_)) => {
- // We've errored, so we don't have to produce working code.
- let llty = type_of::type_of(bcx.ccx(), ty);
- Const::new(C_undef(llty), ty)
- }
- Err(ConstEvalFailure::Runtime(err)) => {
- span_bug!(constant.span,
- "MIR constant {:?} results in runtime panic: {:?}",
- constant, err.description())
- }
- }
+ result.unwrap_or_else(|_| {
+ // We've errored, so we don't have to produce working code.
+ let llty = type_of::type_of(bcx.ccx(), ty);
+ Const::new(C_undef(llty), ty)
+ })
}
}
pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId)
- -> Result<ValueRef, ConstEvalFailure> {
+ -> Result<ValueRef, ConstEvalErr> {
let instance = Instance::mono(ccx.shared(), def_id);
MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval)
}
mir::Lvalue::ReturnPointer => bug!(), // handled above
mir::Lvalue::Static(def_id) => {
let const_ty = self.monomorphized_lvalue_ty(lvalue);
- LvalueRef::new_sized(consts::get_static(ccx, def_id).val,
+ LvalueRef::new_sized(consts::get_static(ccx, def_id),
LvalueTy::from_ty(const_ty))
},
mir::Lvalue::Projection(box mir::Projection {
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
- let bcx = fcx.init(true, None).build();
+ let bcx = fcx.init(true).build();
let mir = bcx.mir();
// Analyze the temps to determine which must be lvalues
let block_bcxs: IndexVec<mir::BasicBlock, Block<'blk,'tcx>> =
mir.basic_blocks().indices().map(|bb| {
if bb == mir::START_BLOCK {
- fcx.new_block("start", None)
+ fcx.new_block("start")
} else {
- fcx.new_block(&format!("{:?}", bb), None)
+ fcx.new_block(&format!("{:?}", bb))
}
}).collect();
use base;
use callee::Callee;
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result};
-use datum::{Datum, Lvalue};
use debuginfo::DebugLoc;
use adt;
use machine;
mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
let outputs = outputs.iter().map(|output| {
let lvalue = self.trans_lvalue(&bcx, output);
- Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
- Lvalue::new("out"))
+ (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
}).collect();
let input_vals = inputs.iter().map(|input| {
ty::TyFnDef(def_id, substs, _) => {
OperandValue::Immediate(
Callee::def(bcx.ccx(), def_id, substs)
- .reify(bcx.ccx()).val)
+ .reify(bcx.ccx()))
}
_ => {
bug!("{} cannot be reified to a fn ptr", operand.ty)
let def_id = ccx.tcx().map.local_def_id(node_id);
let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*)
let item = ccx.tcx().map.expect_item(node_id);
- if let hir::ItemStatic(_, m, ref expr) = item.node {
- match consts::trans_static(&ccx, m, expr, item.id, &item.attrs) {
+ if let hir::ItemStatic(_, m, _) = item.node {
+ match consts::trans_static(&ccx, m, item.id, &item.attrs) {
Ok(_) => { /* Cool, everything's alright. */ },
Err(err) => {
// FIXME: shouldn't this be a `span_err`?
fatal_const_eval_err(
- ccx.tcx(), &err, expr.span, "static");
+ ccx.tcx(), &err, item.span, "static");
}
};
} else {
use llvm;
use llvm::ValueRef;
use base::*;
-use base;
use build::*;
-use cleanup;
-use cleanup::CleanupMethods;
use common::*;
-use consts;
-use datum::*;
use debuginfo::DebugLoc;
-use expr::{Dest, Ignore, SaveIn};
-use expr;
-use machine::llsize_of_alloc;
-use type_::Type;
-use type_of;
-use value::Value;
-use rustc::ty::{self, Ty};
-
-use rustc::hir;
-use rustc_const_eval::eval_length;
-
-use syntax::ast;
-use syntax::parse::token::InternedString;
-
-#[derive(Copy, Clone, Debug)]
-struct VecTypes<'tcx> {
- unit_ty: Ty<'tcx>,
- llunit_ty: Type
-}
-
-pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- expr: &hir::Expr,
- dest: expr::Dest)
- -> Block<'blk, 'tcx> {
- //!
- //
- // [...] allocates a fixed-size array and moves it around "by value".
- // In this case, it means that the caller has already given us a location
- // to store the array of the suitable size, so all we have to do is
- // generate the content.
-
- debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest);
-
- let vt = vec_types_from_expr(bcx, expr);
-
- return match dest {
- Ignore => write_content(bcx, &vt, expr, expr, dest),
- SaveIn(lldest) => {
- // lldest will have type *[T x N], but we want the type *T,
- // so use GEP to convert:
- let lldest = StructGEP(bcx, lldest, 0);
- write_content(bcx, &vt, expr, expr, SaveIn(lldest))
- }
- };
-}
-
-/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the
-/// caller must make the reference). "..." is similar except that the memory can be statically
-/// allocated and we return a reference (strings are always by-ref).
-pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- slice_expr: &hir::Expr,
- content_expr: &hir::Expr)
- -> DatumBlock<'blk, 'tcx, Expr> {
- let fcx = bcx.fcx;
- let mut bcx = bcx;
-
- debug!("trans_slice_vec(slice_expr={:?})",
- slice_expr);
-
- let vec_ty = node_id_type(bcx, slice_expr.id);
-
- // Handle the "..." case (returns a slice since strings are always unsized):
- if let hir::ExprLit(ref lit) = content_expr.node {
- if let ast::LitKind::Str(ref s, _) = lit.node {
- let scratch = rvalue_scratch_datum(bcx, vec_ty, "");
- bcx = trans_lit_str(bcx,
- content_expr,
- s.clone(),
- SaveIn(scratch.val));
- return DatumBlock::new(bcx, scratch.to_expr_datum());
- }
- }
-
- // Handle the &[...] case:
- let vt = vec_types_from_expr(bcx, content_expr);
- let count = elements_required(bcx, content_expr);
- debug!(" vt={:?}, count={}", vt, count);
-
- let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count);
-
- // Always create an alloca even if zero-sized, to preserve
- // the non-null invariant of the inner slice ptr
- let llfixed;
- // Issue 30018: ensure state is initialized as dropped if necessary.
- if fcx.type_needs_drop(vt.unit_ty) {
- llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, "");
- } else {
- let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop");
- llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, "");
- call_lifetime_start(bcx, llfixed);
- };
-
- if count > 0 {
- // Arrange for the backing array to be cleaned up.
- let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id);
- fcx.schedule_lifetime_end(cleanup_scope, llfixed);
- fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None);
-
- // Generate the content into the backing array.
- // llfixed has type *[T x N], but we want the type *T,
- // so use GEP to convert
- bcx = write_content(bcx, &vt, slice_expr, content_expr,
- SaveIn(StructGEP(bcx, llfixed, 0)));
- };
-
- immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
-}
-
-/// Literal strings translate to slices into static memory. This is different from
-/// trans_slice_vstore() above because it doesn't need to copy the content anywhere.
-pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- lit_expr: &hir::Expr,
- str_lit: InternedString,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest);
-
- match dest {
- Ignore => bcx,
- SaveIn(lldest) => {
- let bytes = str_lit.len();
- let llbytes = C_uint(bcx.ccx(), bytes);
- let llcstr = C_cstr(bcx.ccx(), str_lit, false);
- let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx()));
- Store(bcx, llcstr, expr::get_dataptr(bcx, lldest));
- Store(bcx, llbytes, expr::get_meta(bcx, lldest));
- bcx
- }
- }
-}
-
-fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- vt: &VecTypes<'tcx>,
- vstore_expr: &hir::Expr,
- content_expr: &hir::Expr,
- dest: Dest)
- -> Block<'blk, 'tcx> {
- let _icx = push_ctxt("tvec::write_content");
- let fcx = bcx.fcx;
- let mut bcx = bcx;
-
- debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})",
- vt, dest, vstore_expr);
-
- match content_expr.node {
- hir::ExprLit(ref lit) => {
- match lit.node {
- ast::LitKind::Str(ref s, _) => {
- match dest {
- Ignore => return bcx,
- SaveIn(lldest) => {
- let bytes = s.len();
- let llbytes = C_uint(bcx.ccx(), bytes);
- let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
- if !bcx.unreachable.get() {
- base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1);
- }
- return bcx;
- }
- }
- }
- _ => {
- span_bug!(content_expr.span, "unexpected evec content");
- }
- }
- }
- hir::ExprVec(ref elements) => {
- match dest {
- Ignore => {
- for element in elements {
- bcx = expr::trans_into(bcx, &element, Ignore);
- }
- }
-
- SaveIn(lldest) => {
- let temp_scope = fcx.push_custom_cleanup_scope();
- for (i, element) in elements.iter().enumerate() {
- let lleltptr = GEPi(bcx, lldest, &[i]);
- debug!("writing index {} with lleltptr={:?}",
- i, Value(lleltptr));
- bcx = expr::trans_into(bcx, &element,
- SaveIn(lleltptr));
- let scope = cleanup::CustomScope(temp_scope);
- // Issue #30822: mark memory as dropped after running destructor
- fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None);
- }
- fcx.pop_custom_cleanup_scope(temp_scope);
- }
- }
- return bcx;
- }
- hir::ExprRepeat(ref element, ref count_expr) => {
- match dest {
- Ignore => {
- return expr::trans_into(bcx, &element, Ignore);
- }
- SaveIn(lldest) => {
- match eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap() {
- 0 => expr::trans_into(bcx, &element, Ignore),
- 1 => expr::trans_into(bcx, &element, SaveIn(lldest)),
- count => {
- let elem = unpack_datum!(bcx, expr::trans(bcx, &element));
- let bcx = iter_vec_loop(bcx, lldest, vt,
- C_uint(bcx.ccx(), count),
- |set_bcx, lleltptr, _| {
- elem.shallow_copy(set_bcx, lleltptr)
- });
- bcx
- }
- }
- }
- }
- }
- _ => {
- span_bug!(content_expr.span, "unexpected vec content");
- }
- }
-}
-
-fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr)
- -> VecTypes<'tcx> {
- let vec_ty = node_id_type(bcx, vec_expr.id);
- vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx()))
-}
-
-fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>)
- -> VecTypes<'tcx> {
- VecTypes {
- unit_ty: unit_ty,
- llunit_ty: type_of::type_of(bcx.ccx(), unit_ty)
- }
-}
-
-fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize {
- //! Figure out the number of elements we need to store this content
-
- match content_expr.node {
- hir::ExprLit(ref lit) => {
- match lit.node {
- ast::LitKind::Str(ref s, _) => s.len(),
- _ => {
- span_bug!(content_expr.span, "unexpected evec content")
- }
- }
- },
- hir::ExprVec(ref es) => es.len(),
- hir::ExprRepeat(_, ref count_expr) => {
- eval_length(bcx.tcx(), &count_expr, "repeat count").unwrap()
- }
- _ => span_bug!(content_expr.span, "unexpected vec content")
- }
-}
-
-/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval`
-/// which should be by ref.
-pub fn get_fixed_base_and_len(bcx: Block,
- llval: ValueRef,
- vec_length: usize)
- -> (ValueRef, ValueRef) {
- let ccx = bcx.ccx();
-
- let base = expr::get_dataptr(bcx, llval);
- let len = C_uint(ccx, vec_length);
- (base, len)
-}
-
-/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be
-/// by-reference. If you have a datum, you would probably prefer to call
-/// `Datum::get_base_and_len()` which will handle any conversions for you.
-pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
- llval: ValueRef,
- vec_ty: Ty<'tcx>)
- -> (ValueRef, ValueRef) {
- match vec_ty.sty {
- ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n),
- ty::TySlice(_) | ty::TyStr => {
- let base = Load(bcx, expr::get_dataptr(bcx, llval));
- let len = Load(bcx, expr::get_meta(bcx, llval));
- (base, len)
- }
-
- // Only used for pattern matching.
- ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => {
- let inner = if type_is_sized(bcx.tcx(), ty) {
- Load(bcx, llval)
- } else {
- llval
- };
- get_base_and_len(bcx, inner, ty)
- },
- _ => bug!("unexpected type in get_base_and_len"),
- }
-}
-
-fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
- data_ptr: ValueRef,
- vt: &VecTypes<'tcx>,
- count: ValueRef,
- f: F)
- -> Block<'blk, 'tcx> where
- F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
-{
- let _icx = push_ctxt("tvec::iter_vec_loop");
-
- if bcx.unreachable.get() {
- return bcx;
- }
-
- let fcx = bcx.fcx;
- let loop_bcx = fcx.new_temp_block("expr_repeat");
- let next_bcx = fcx.new_temp_block("expr_repeat: next");
-
- Br(bcx, loop_bcx.llbb, DebugLoc::None);
-
- let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
- &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
-
- let bcx = loop_bcx;
-
- let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
- data_ptr
- } else {
- InBoundsGEP(bcx, data_ptr, &[loop_counter])
- };
- let bcx = f(bcx, lleltptr, vt.unit_ty);
- let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
- AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
-
- let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None);
- CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
-
- next_bcx
-}
+use rustc::ty::Ty;
pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
data_ptr: ValueRef,
let _icx = push_ctxt("tvec::iter_vec_raw");
let fcx = bcx.fcx;
- let vt = vec_types(bcx, unit_ty);
-
- if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 {
+ if type_is_zero_size(bcx.ccx(), unit_ty) {
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
- iter_vec_loop(bcx, data_ptr, &vt, len, f)
+ if bcx.unreachable.get() {
+ return bcx;
+ }
+
+ let loop_bcx = fcx.new_block("expr_repeat");
+ let next_bcx = fcx.new_block("expr_repeat: next");
+
+ Br(bcx, loop_bcx.llbb, DebugLoc::None);
+
+ let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(),
+ &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]);
+
+ let bcx = loop_bcx;
+ let bcx = f(bcx, data_ptr, unit_ty);
+ let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None);
+ AddIncomingToPhi(loop_counter, plusone, bcx.llbb);
+
+ let cond_val = ICmp(bcx, llvm::IntULT, plusone, len, DebugLoc::None);
+ CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None);
+
+ next_bcx
} else {
// Calculate the last pointer address we want to handle.
let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]);
// Now perform the iteration.
- let header_bcx = fcx.new_temp_block("iter_vec_loop_header");
+ let header_bcx = fcx.new_block("iter_vec_loop_header");
Br(bcx, header_bcx.llbb, DebugLoc::None);
let data_ptr =
Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]);
let not_yet_at_end =
ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None);
- let body_bcx = fcx.new_temp_block("iter_vec_loop_body");
- let next_bcx = fcx.new_temp_block("iter_vec_next");
+ let body_bcx = fcx.new_block("iter_vec_loop_body");
+ let next_bcx = fcx.new_block("iter_vec_next");
CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None);
let body_bcx = f(body_bcx, data_ptr, unit_ty);
AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr,