"match checking",
|| middle::check_match::check_crate(tcx));
+ // this must run before MIR dump, because
+ // "not all control paths return a value" is reported here.
+ //
+ // maybe move the check to a MIR pass?
+ time(time_passes,
+ "liveness checking",
+ || middle::liveness::check_crate(tcx));
+
+ time(time_passes,
+ "rvalue checking",
+ || rvalues::check_crate(tcx));
+
let mut mir_map =
time(time_passes,
"MIR dump",
"MIR passes",
|| mir_map.run_passes(&mut sess.plugin_mir_passes.borrow_mut(), tcx));
- time(time_passes,
- "liveness checking",
- || middle::liveness::check_crate(tcx));
-
time(time_passes,
"borrow checking",
|| borrowck::check_crate(tcx));
- time(time_passes,
- "rvalue checking",
- || rvalues::check_crate(tcx));
-
// Avoid overwhelming user with errors if type checking failed.
// I'm not sure how helpful this is, to be honest, but it avoids
// a
debug!("Expr::make_mirror(): id={}, span={:?}", self.id, self.span);
let expr_ty = cx.tcx.expr_ty(self); // note: no adjustments (yet)!
+ let temp_lifetime = cx.tcx.region_maps.temporary_scope(self.id);
+ let expr_extent = cx.tcx.region_maps.node_extent(self.id);
let kind = match self.node {
// Here comes the interesting stuff:
let tupled_args = Expr {
ty: sig.inputs[1],
- temp_lifetime: cx.tcx.region_maps.temporary_scope(self.id),
+ temp_lifetime: temp_lifetime,
span: self.span,
kind: ExprKind::Tuple {
fields: args.iter().map(ToRef::to_ref).collect()
}
hir::ExprAssignOp(op, ref lhs, ref rhs) => {
- let op = bin_op(op.node);
- ExprKind::AssignOp {
- op: op,
- lhs: lhs.to_ref(),
- rhs: rhs.to_ref(),
+ if cx.tcx.is_method_call(self.id) {
+ let pass_args = if hir_util::is_by_value_binop(op.node) {
+ PassArgs::ByValue
+ } else {
+ PassArgs::ByRef
+ };
+ overloaded_operator(cx, self, ty::MethodCall::expr(self.id),
+ pass_args, lhs.to_ref(), vec![rhs])
+ } else {
+ ExprKind::AssignOp {
+ op: bin_op(op.node),
+ lhs: lhs.to_ref(),
+ rhs: rhs.to_ref(),
+ }
}
}
ExprKind::Tuple { fields: fields.to_ref() },
};
- let temp_lifetime = cx.tcx.region_maps.temporary_scope(self.id);
- let expr_extent = cx.tcx.region_maps.node_extent(self.id);
-
let mut expr = Expr {
temp_lifetime: temp_lifetime,
ty: expr_ty,
// except according to those terms.
//! This pass type-checks the MIR to ensure it is not broken.
+#![allow(unreachable_code)]
use rustc::middle::infer;
use rustc::middle::ty::{self, Ty};
_tcx: &ty::ctxt<'tcx_>) {
// FIXME: pass param_env to run_on_mir
let mir: &mut Mir<'tcx> = unsafe { ::std::mem::transmute(mir) };
+
+ if self.tcx().sess.err_count() > 0 {
+ // compiling a broken program can obviously result in a
+ // broken MIR, so try not to report duplicate errors.
+ return;
+ }
+
let mut type_verifier = TypeVerifier::new(self.infcx, mir);
type_verifier.visit_mir(mir);
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(augmented_assignments)]
+#![feature(op_assign_traits)]
+#![feature(rustc_attrs)]
+
+use std::mem;
+use std::ops::{
+ AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, DivAssign, MulAssign, RemAssign,
+ ShlAssign, ShrAssign, SubAssign,
+};
+
+#[derive(Debug, PartialEq)]
+struct Int(i32);
+
+struct Slice([i32]);
+
+impl Slice {
+ fn new(slice: &mut [i32]) -> &mut Slice {
+ unsafe {
+ mem::transmute(slice)
+ }
+ }
+}
+
+fn main() {
+ main_mir();
+}
+
+#[rustc_mir]
+fn main_mir() {
+ let mut x = Int(1);
+
+ x += Int(2);
+ assert_eq!(x, Int(0b11));
+
+ x &= Int(0b01);
+ assert_eq!(x, Int(0b01));
+
+ x |= Int(0b10);
+ assert_eq!(x, Int(0b11));
+
+ x ^= Int(0b01);
+ assert_eq!(x, Int(0b10));
+
+ x /= Int(2);
+ assert_eq!(x, Int(1));
+
+ x *= Int(3);
+ assert_eq!(x, Int(3));
+
+ x %= Int(2);
+ assert_eq!(x, Int(1));
+
+ // overloaded RHS
+ x <<= 1u8;
+ assert_eq!(x, Int(2));
+
+ x <<= 1u16;
+ assert_eq!(x, Int(4));
+
+ x >>= 1u8;
+ assert_eq!(x, Int(2));
+
+ x >>= 1u16;
+ assert_eq!(x, Int(1));
+
+ x -= Int(1);
+ assert_eq!(x, Int(0));
+
+ // indexed LHS
+ let mut v = vec![Int(1), Int(2)];
+ v[0] += Int(2);
+ assert_eq!(v[0], Int(3));
+
+ // unsized RHS
+ let mut array = [0, 1, 2];
+ *Slice::new(&mut array) += 1;
+ assert_eq!(array[0], 1);
+ assert_eq!(array[1], 2);
+ assert_eq!(array[2], 3);
+}
+
+impl AddAssign for Int {
+ #[rustc_mir]
+ fn add_assign(&mut self, rhs: Int) {
+ self.0 += rhs.0;
+ }
+}
+
+impl BitAndAssign for Int {
+ #[rustc_mir]
+ fn bitand_assign(&mut self, rhs: Int) {
+ self.0 &= rhs.0;
+ }
+}
+
+impl BitOrAssign for Int {
+ #[rustc_mir]
+ fn bitor_assign(&mut self, rhs: Int) {
+ self.0 |= rhs.0;
+ }
+}
+
+impl BitXorAssign for Int {
+ #[rustc_mir]
+ fn bitxor_assign(&mut self, rhs: Int) {
+ self.0 ^= rhs.0;
+ }
+}
+
+impl DivAssign for Int {
+ #[rustc_mir]
+ fn div_assign(&mut self, rhs: Int) {
+ self.0 /= rhs.0;
+ }
+}
+
+impl MulAssign for Int {
+ #[rustc_mir]
+ fn mul_assign(&mut self, rhs: Int) {
+ self.0 *= rhs.0;
+ }
+}
+
+impl RemAssign for Int {
+ #[rustc_mir]
+ fn rem_assign(&mut self, rhs: Int) {
+ self.0 %= rhs.0;
+ }
+}
+
+impl ShlAssign<u8> for Int {
+ #[rustc_mir]
+ fn shl_assign(&mut self, rhs: u8) {
+ self.0 <<= rhs;
+ }
+}
+
+impl ShlAssign<u16> for Int {
+ #[rustc_mir]
+ fn shl_assign(&mut self, rhs: u16) {
+ self.0 <<= rhs;
+ }
+}
+
+impl ShrAssign<u8> for Int {
+ #[rustc_mir]
+ fn shr_assign(&mut self, rhs: u8) {
+ self.0 >>= rhs;
+ }
+}
+
+impl ShrAssign<u16> for Int {
+ #[rustc_mir]
+ fn shr_assign(&mut self, rhs: u16) {
+ self.0 >>= rhs;
+ }
+}
+
+impl SubAssign for Int {
+ #[rustc_mir]
+ fn sub_assign(&mut self, rhs: Int) {
+ self.0 -= rhs.0;
+ }
+}
+
+impl AddAssign<i32> for Slice {
+ #[rustc_mir]
+ fn add_assign(&mut self, rhs: i32) {
+ for lhs in &mut self.0 {
+ *lhs += rhs;
+ }
+ }
+}