]> git.lizzy.rs Git - rust.git/blob - src/librustc_mir/interpret/intrinsics.rs
forward read_c_str method from Memory to Alloc
[rust.git] / src / librustc_mir / interpret / intrinsics.rs
1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
3 //! and miri.
4
5 use syntax::symbol::Symbol;
6 use rustc::ty;
7 use rustc::ty::layout::{LayoutOf, Primitive, Size};
8 use rustc::mir::BinOp;
9 use rustc::mir::interpret::{
10     InterpResult, InterpError, Scalar,
11 };
12
13 use super::{
14     Machine, PlaceTy, OpTy, InterpretCx, Immediate,
15 };
16
17 mod type_name;
18
19 pub use type_name::*;
20
21 fn numeric_intrinsic<'tcx, Tag>(
22     name: &str,
23     bits: u128,
24     kind: Primitive,
25 ) -> InterpResult<'tcx, Scalar<Tag>> {
26     let size = match kind {
27         Primitive::Int(integer, _) => integer.size(),
28         _ => bug!("invalid `{}` argument: {:?}", name, bits),
29     };
30     let extra = 128 - size.bits() as u128;
31     let bits_out = match name {
32         "ctpop" => bits.count_ones() as u128,
33         "ctlz" => bits.leading_zeros() as u128 - extra,
34         "cttz" => (bits << extra).trailing_zeros() as u128 - extra,
35         "bswap" => (bits << extra).swap_bytes(),
36         "bitreverse" => (bits << extra).reverse_bits(),
37         _ => bug!("not a numeric intrinsic: {}", name),
38     };
39     Ok(Scalar::from_uint(bits_out, size))
40 }
41
42 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpretCx<'mir, 'tcx, M> {
43     /// Returns `true` if emulation happened.
44     pub fn emulate_intrinsic(
45         &mut self,
46         instance: ty::Instance<'tcx>,
47         args: &[OpTy<'tcx, M::PointerTag>],
48         dest: PlaceTy<'tcx, M::PointerTag>,
49     ) -> InterpResult<'tcx, bool> {
50         let substs = instance.substs;
51
52         let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
53         match intrinsic_name {
54             "min_align_of" => {
55                 let elem_ty = substs.type_at(0);
56                 let elem_align = self.layout_of(elem_ty)?.align.abi.bytes();
57                 let align_val = Scalar::from_uint(elem_align, dest.layout.size);
58                 self.write_scalar(align_val, dest)?;
59             }
60
61             "needs_drop" => {
62                 let ty = substs.type_at(0);
63                 let ty_needs_drop = ty.needs_drop(self.tcx.tcx, self.param_env);
64                 let val = Scalar::from_bool(ty_needs_drop);
65                 self.write_scalar(val, dest)?;
66             }
67
68             "size_of" => {
69                 let ty = substs.type_at(0);
70                 let size = self.layout_of(ty)?.size.bytes() as u128;
71                 let size_val = Scalar::from_uint(size, dest.layout.size);
72                 self.write_scalar(size_val, dest)?;
73             }
74
75             "type_id" => {
76                 let ty = substs.type_at(0);
77                 let type_id = self.tcx.type_id_hash(ty) as u128;
78                 let id_val = Scalar::from_uint(type_id, dest.layout.size);
79                 self.write_scalar(id_val, dest)?;
80             }
81
82             "type_name" => {
83                 let alloc = alloc_type_name(self.tcx.tcx, substs.type_at(0));
84                 let name_id = self.tcx.alloc_map.lock().create_memory_alloc(alloc);
85                 let id_ptr = self.memory.tag_static_base_pointer(name_id.into());
86                 let alloc_len = alloc.bytes.len() as u64;
87                 let name_val = Immediate::new_slice(Scalar::Ptr(id_ptr), alloc_len, self);
88                 self.write_immediate(name_val, dest)?;
89             }
90
91             | "ctpop"
92             | "cttz"
93             | "cttz_nonzero"
94             | "ctlz"
95             | "ctlz_nonzero"
96             | "bswap"
97             | "bitreverse" => {
98                 let ty = substs.type_at(0);
99                 let layout_of = self.layout_of(ty)?;
100                 let bits = self.read_scalar(args[0])?.to_bits(layout_of.size)?;
101                 let kind = match layout_of.abi {
102                     ty::layout::Abi::Scalar(ref scalar) => scalar.value,
103                     _ => Err(::rustc::mir::interpret::InterpError::TypeNotPrimitive(ty))?,
104                 };
105                 let out_val = if intrinsic_name.ends_with("_nonzero") {
106                     if bits == 0 {
107                         return err!(Intrinsic(format!("{} called on 0", intrinsic_name)));
108                     }
109                     numeric_intrinsic(intrinsic_name.trim_end_matches("_nonzero"), bits, kind)?
110                 } else {
111                     numeric_intrinsic(intrinsic_name, bits, kind)?
112                 };
113                 self.write_scalar(out_val, dest)?;
114             }
115             | "overflowing_add"
116             | "overflowing_sub"
117             | "overflowing_mul"
118             | "add_with_overflow"
119             | "sub_with_overflow"
120             | "mul_with_overflow" => {
121                 let lhs = self.read_immediate(args[0])?;
122                 let rhs = self.read_immediate(args[1])?;
123                 let (bin_op, ignore_overflow) = match intrinsic_name {
124                     "overflowing_add" => (BinOp::Add, true),
125                     "overflowing_sub" => (BinOp::Sub, true),
126                     "overflowing_mul" => (BinOp::Mul, true),
127                     "add_with_overflow" => (BinOp::Add, false),
128                     "sub_with_overflow" => (BinOp::Sub, false),
129                     "mul_with_overflow" => (BinOp::Mul, false),
130                     _ => bug!("Already checked for int ops")
131                 };
132                 if ignore_overflow {
133                     self.binop_ignore_overflow(bin_op, lhs, rhs, dest)?;
134                 } else {
135                     self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
136                 }
137             }
138             "saturating_add" | "saturating_sub" => {
139                 let l = self.read_immediate(args[0])?;
140                 let r = self.read_immediate(args[1])?;
141                 let is_add = intrinsic_name == "saturating_add";
142                 let (val, overflowed) = self.binary_op(if is_add {
143                     BinOp::Add
144                 } else {
145                     BinOp::Sub
146                 }, l, r)?;
147                 let val = if overflowed {
148                     let num_bits = l.layout.size.bits();
149                     if l.layout.abi.is_signed() {
150                         // For signed ints the saturated value depends on the sign of the first
151                         // term since the sign of the second term can be inferred from this and
152                         // the fact that the operation has overflowed (if either is 0 no
153                         // overflow can occur)
154                         let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
155                         let first_term_positive = first_term & (1 << (num_bits-1)) == 0;
156                         if first_term_positive {
157                             // Negative overflow not possible since the positive first term
158                             // can only increase an (in range) negative term for addition
159                             // or corresponding negated positive term for subtraction
160                             Scalar::from_uint((1u128 << (num_bits - 1)) - 1,  // max positive
161                                 Size::from_bits(num_bits))
162                         } else {
163                             // Positive overflow not possible for similar reason
164                             // max negative
165                             Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
166                         }
167                     } else {  // unsigned
168                         if is_add {
169                             // max unsigned
170                             Scalar::from_uint(u128::max_value() >> (128 - num_bits),
171                                 Size::from_bits(num_bits))
172                         } else {  // underflow to 0
173                             Scalar::from_uint(0u128, Size::from_bits(num_bits))
174                         }
175                     }
176                 } else {
177                     val
178                 };
179                 self.write_scalar(val, dest)?;
180             }
181             "unchecked_shl" | "unchecked_shr" => {
182                 let l = self.read_immediate(args[0])?;
183                 let r = self.read_immediate(args[1])?;
184                 let bin_op = match intrinsic_name {
185                     "unchecked_shl" => BinOp::Shl,
186                     "unchecked_shr" => BinOp::Shr,
187                     _ => bug!("Already checked for int ops")
188                 };
189                 let (val, overflowed) = self.binary_op(bin_op, l, r)?;
190                 if overflowed {
191                     let layout = self.layout_of(substs.type_at(0))?;
192                     let r_val =  r.to_scalar()?.to_bits(layout.size)?;
193                     return err!(Intrinsic(
194                         format!("Overflowing shift by {} in {}", r_val, intrinsic_name),
195                     ));
196                 }
197                 self.write_scalar(val, dest)?;
198             }
199             "rotate_left" | "rotate_right" => {
200                 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
201                 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
202                 let layout = self.layout_of(substs.type_at(0))?;
203                 let val_bits = self.read_scalar(args[0])?.to_bits(layout.size)?;
204                 let raw_shift_bits = self.read_scalar(args[1])?.to_bits(layout.size)?;
205                 let width_bits = layout.size.bits() as u128;
206                 let shift_bits = raw_shift_bits % width_bits;
207                 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
208                 let result_bits = if intrinsic_name == "rotate_left" {
209                     (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
210                 } else {
211                     (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
212                 };
213                 let truncated_bits = self.truncate(result_bits, layout);
214                 let result = Scalar::from_uint(truncated_bits, layout.size);
215                 self.write_scalar(result, dest)?;
216             }
217             "transmute" => {
218                 self.copy_op_transmute(args[0], dest)?;
219             }
220
221             _ => return Ok(false),
222         }
223
224         Ok(true)
225     }
226
227     /// "Intercept" a function call because we have something special to do for it.
228     /// Returns `true` if an intercept happened.
229     pub fn hook_fn(
230         &mut self,
231         instance: ty::Instance<'tcx>,
232         args: &[OpTy<'tcx, M::PointerTag>],
233         dest: Option<PlaceTy<'tcx, M::PointerTag>>,
234     ) -> InterpResult<'tcx, bool> {
235         let def_id = instance.def_id();
236         // Some fn calls are actually BinOp intrinsics
237         if let Some((op, oflo)) = self.tcx.is_binop_lang_item(def_id) {
238             let dest = dest.expect("128 lowerings can't diverge");
239             let l = self.read_immediate(args[0])?;
240             let r = self.read_immediate(args[1])?;
241             if oflo {
242                 self.binop_with_overflow(op, l, r, dest)?;
243             } else {
244                 self.binop_ignore_overflow(op, l, r, dest)?;
245             }
246             return Ok(true);
247         } else if Some(def_id) == self.tcx.lang_items().panic_fn() {
248             assert!(args.len() == 1);
249             // &(&'static str, &'static str, u32, u32)
250             let place = self.deref_operand(args[0])?;
251             let (msg, file, line, col) = (
252                 self.mplace_field(place, 0)?,
253                 self.mplace_field(place, 1)?,
254                 self.mplace_field(place, 2)?,
255                 self.mplace_field(place, 3)?,
256             );
257
258             let msg_place = self.deref_operand(msg.into())?;
259             let msg = Symbol::intern(self.read_str(msg_place)?);
260             let file_place = self.deref_operand(file.into())?;
261             let file = Symbol::intern(self.read_str(file_place)?);
262             let line = self.read_scalar(line.into())?.to_u32()?;
263             let col = self.read_scalar(col.into())?.to_u32()?;
264             return Err(InterpError::Panic { msg, file, line, col }.into());
265         } else if Some(def_id) == self.tcx.lang_items().begin_panic_fn() {
266             assert!(args.len() == 2);
267             // &'static str, &(&'static str, u32, u32)
268             let msg = args[0];
269             let place = self.deref_operand(args[1])?;
270             let (file, line, col) = (
271                 self.mplace_field(place, 0)?,
272                 self.mplace_field(place, 1)?,
273                 self.mplace_field(place, 2)?,
274             );
275
276             let msg_place = self.deref_operand(msg.into())?;
277             let msg = Symbol::intern(self.read_str(msg_place)?);
278             let file_place = self.deref_operand(file.into())?;
279             let file = Symbol::intern(self.read_str(file_place)?);
280             let line = self.read_scalar(line.into())?.to_u32()?;
281             let col = self.read_scalar(col.into())?.to_u32()?;
282             return Err(InterpError::Panic { msg, file, line, col }.into());
283         } else {
284             return Ok(false);
285         }
286     }
287 }