1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 /*! rustc compiler intrinsics.
13 The corresponding definitions are in librustc/middle/trans/foreign.rs.
17 The atomic intrinsics provide common atomic operations on machine
18 words, with multiple possible memory orderings. They obey the same
19 semantics as C++11. See the LLVM documentation on [[atomics]].
21 [atomics]: http://llvm.org/docs/Atomics.html
23 A quick refresher on memory ordering:
25 * Acquire - a barrier for aquiring a lock. Subsequent reads and writes
26 take place after the barrier.
27 * Release - a barrier for releasing a lock. Preceding reads and writes
28 take place before the barrier.
29 * Sequentially consistent - sequentially consistent operations are
30 guaranteed to happen in order. This is the standard mode for working
31 with atomic types and is equivalent to Java's `volatile`.
35 // This is needed to prevent duplicate lang item definitions.
37 pub use realstd::unstable::intrinsics::{TyDesc, Opaque, TyVisitor};
39 pub type GlueFn = extern "Rust" fn(*i8);
41 // NB: this has to be kept in sync with the Rust ABI.
60 fn visit_bot(&self) -> bool;
61 fn visit_nil(&self) -> bool;
62 fn visit_bool(&self) -> bool;
64 fn visit_int(&self) -> bool;
65 fn visit_i8(&self) -> bool;
66 fn visit_i16(&self) -> bool;
67 fn visit_i32(&self) -> bool;
68 fn visit_i64(&self) -> bool;
70 fn visit_uint(&self) -> bool;
71 fn visit_u8(&self) -> bool;
72 fn visit_u16(&self) -> bool;
73 fn visit_u32(&self) -> bool;
74 fn visit_u64(&self) -> bool;
76 fn visit_float(&self) -> bool;
77 fn visit_f32(&self) -> bool;
78 fn visit_f64(&self) -> bool;
80 fn visit_char(&self) -> bool;
82 fn visit_estr_box(&self) -> bool;
83 fn visit_estr_uniq(&self) -> bool;
84 fn visit_estr_slice(&self) -> bool;
85 fn visit_estr_fixed(&self, n: uint, sz: uint, align: uint) -> bool;
87 fn visit_box(&self, mtbl: uint, inner: *TyDesc) -> bool;
88 fn visit_uniq(&self, mtbl: uint, inner: *TyDesc) -> bool;
89 fn visit_uniq_managed(&self, mtbl: uint, inner: *TyDesc) -> bool;
90 fn visit_ptr(&self, mtbl: uint, inner: *TyDesc) -> bool;
91 fn visit_rptr(&self, mtbl: uint, inner: *TyDesc) -> bool;
93 fn visit_vec(&self, mtbl: uint, inner: *TyDesc) -> bool;
94 fn visit_unboxed_vec(&self, mtbl: uint, inner: *TyDesc) -> bool;
95 fn visit_evec_box(&self, mtbl: uint, inner: *TyDesc) -> bool;
96 fn visit_evec_uniq(&self, mtbl: uint, inner: *TyDesc) -> bool;
97 fn visit_evec_uniq_managed(&self, mtbl: uint, inner: *TyDesc) -> bool;
98 fn visit_evec_slice(&self, mtbl: uint, inner: *TyDesc) -> bool;
99 fn visit_evec_fixed(&self, n: uint, sz: uint, align: uint,
100 mtbl: uint, inner: *TyDesc) -> bool;
102 fn visit_enter_rec(&self, n_fields: uint,
103 sz: uint, align: uint) -> bool;
104 fn visit_rec_field(&self, i: uint, name: &str,
105 mtbl: uint, inner: *TyDesc) -> bool;
106 fn visit_leave_rec(&self, n_fields: uint,
107 sz: uint, align: uint) -> bool;
109 fn visit_enter_class(&self, n_fields: uint,
110 sz: uint, align: uint) -> bool;
111 fn visit_class_field(&self, i: uint, name: &str,
112 mtbl: uint, inner: *TyDesc) -> bool;
113 fn visit_leave_class(&self, n_fields: uint,
114 sz: uint, align: uint) -> bool;
116 fn visit_enter_tup(&self, n_fields: uint,
117 sz: uint, align: uint) -> bool;
118 fn visit_tup_field(&self, i: uint, inner: *TyDesc) -> bool;
119 fn visit_leave_tup(&self, n_fields: uint,
120 sz: uint, align: uint) -> bool;
122 fn visit_enter_enum(&self, n_variants: uint,
123 get_disr: extern unsafe fn(ptr: *Opaque) -> int,
124 sz: uint, align: uint) -> bool;
125 fn visit_enter_enum_variant(&self, variant: uint,
129 fn visit_enum_variant_field(&self, i: uint, offset: uint, inner: *TyDesc) -> bool;
130 fn visit_leave_enum_variant(&self, variant: uint,
134 fn visit_leave_enum(&self, n_variants: uint,
135 get_disr: extern unsafe fn(ptr: *Opaque) -> int,
136 sz: uint, align: uint) -> bool;
138 fn visit_enter_fn(&self, purity: uint, proto: uint,
139 n_inputs: uint, retstyle: uint) -> bool;
140 fn visit_fn_input(&self, i: uint, mode: uint, inner: *TyDesc) -> bool;
141 fn visit_fn_output(&self, retstyle: uint, inner: *TyDesc) -> bool;
142 fn visit_leave_fn(&self, purity: uint, proto: uint,
143 n_inputs: uint, retstyle: uint) -> bool;
145 fn visit_trait(&self) -> bool;
146 fn visit_var(&self) -> bool;
147 fn visit_var_integral(&self) -> bool;
148 fn visit_param(&self, i: uint) -> bool;
149 fn visit_self(&self) -> bool;
150 fn visit_type(&self) -> bool;
151 fn visit_opaque_box(&self) -> bool;
152 fn visit_constr(&self, inner: *TyDesc) -> bool;
153 fn visit_closure_ptr(&self, ck: uint) -> bool;
156 #[abi = "rust-intrinsic"]
157 extern "rust-intrinsic" {
159 /// Atomic compare and exchange, sequentially consistent.
160 pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int;
161 /// Atomic compare and exchange, acquire ordering.
162 pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int;
163 /// Atomic compare and exchange, release ordering.
164 pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
166 pub fn atomic_cxchg_acqrel(dst: &mut int, old: int, src: int) -> int;
167 pub fn atomic_cxchg_relaxed(dst: &mut int, old: int, src: int) -> int;
170 /// Atomic load, sequentially consistent.
171 pub fn atomic_load(src: &int) -> int;
172 /// Atomic load, acquire ordering.
173 pub fn atomic_load_acq(src: &int) -> int;
175 pub fn atomic_load_relaxed(src: &int) -> int;
177 /// Atomic store, sequentially consistent.
178 pub fn atomic_store(dst: &mut int, val: int);
179 /// Atomic store, release ordering.
180 pub fn atomic_store_rel(dst: &mut int, val: int);
182 pub fn atomic_store_relaxed(dst: &mut int, val: int);
184 /// Atomic exchange, sequentially consistent.
185 pub fn atomic_xchg(dst: &mut int, src: int) -> int;
186 /// Atomic exchange, acquire ordering.
187 pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
188 /// Atomic exchange, release ordering.
189 pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
190 pub fn atomic_xchg_acqrel(dst: &mut int, src: int) -> int;
191 pub fn atomic_xchg_relaxed(dst: &mut int, src: int) -> int;
193 /// Atomic addition, sequentially consistent.
194 pub fn atomic_xadd(dst: &mut int, src: int) -> int;
195 /// Atomic addition, acquire ordering.
196 pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
197 /// Atomic addition, release ordering.
198 pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
199 pub fn atomic_xadd_acqrel(dst: &mut int, src: int) -> int;
200 pub fn atomic_xadd_relaxed(dst: &mut int, src: int) -> int;
202 /// Atomic subtraction, sequentially consistent.
203 pub fn atomic_xsub(dst: &mut int, src: int) -> int;
204 /// Atomic subtraction, acquire ordering.
205 pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
206 /// Atomic subtraction, release ordering.
207 pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
208 pub fn atomic_xsub_acqrel(dst: &mut int, src: int) -> int;
209 pub fn atomic_xsub_relaxed(dst: &mut int, src: int) -> int;
211 pub fn atomic_and(dst: &mut int, src: int) -> int;
212 pub fn atomic_and_acq(dst: &mut int, src: int) -> int;
213 pub fn atomic_and_rel(dst: &mut int, src: int) -> int;
214 pub fn atomic_and_acqrel(dst: &mut int, src: int) -> int;
215 pub fn atomic_and_relaxed(dst: &mut int, src: int) -> int;
217 pub fn atomic_nand(dst: &mut int, src: int) -> int;
218 pub fn atomic_nand_acq(dst: &mut int, src: int) -> int;
219 pub fn atomic_nand_rel(dst: &mut int, src: int) -> int;
220 pub fn atomic_nand_acqrel(dst: &mut int, src: int) -> int;
221 pub fn atomic_nand_relaxed(dst: &mut int, src: int) -> int;
223 pub fn atomic_or(dst: &mut int, src: int) -> int;
224 pub fn atomic_or_acq(dst: &mut int, src: int) -> int;
225 pub fn atomic_or_rel(dst: &mut int, src: int) -> int;
226 pub fn atomic_or_acqrel(dst: &mut int, src: int) -> int;
227 pub fn atomic_or_relaxed(dst: &mut int, src: int) -> int;
229 pub fn atomic_xor(dst: &mut int, src: int) -> int;
230 pub fn atomic_xor_acq(dst: &mut int, src: int) -> int;
231 pub fn atomic_xor_rel(dst: &mut int, src: int) -> int;
232 pub fn atomic_xor_acqrel(dst: &mut int, src: int) -> int;
233 pub fn atomic_xor_relaxed(dst: &mut int, src: int) -> int;
235 pub fn atomic_max(dst: &mut int, src: int) -> int;
236 pub fn atomic_max_acq(dst: &mut int, src: int) -> int;
237 pub fn atomic_max_rel(dst: &mut int, src: int) -> int;
238 pub fn atomic_max_acqrel(dst: &mut int, src: int) -> int;
239 pub fn atomic_max_relaxed(dst: &mut int, src: int) -> int;
241 pub fn atomic_min(dst: &mut int, src: int) -> int;
242 pub fn atomic_min_acq(dst: &mut int, src: int) -> int;
243 pub fn atomic_min_rel(dst: &mut int, src: int) -> int;
244 pub fn atomic_min_acqrel(dst: &mut int, src: int) -> int;
245 pub fn atomic_min_relaxed(dst: &mut int, src: int) -> int;
247 pub fn atomic_umin(dst: &mut int, src: int) -> int;
248 pub fn atomic_umin_acq(dst: &mut int, src: int) -> int;
249 pub fn atomic_umin_rel(dst: &mut int, src: int) -> int;
250 pub fn atomic_umin_acqrel(dst: &mut int, src: int) -> int;
251 pub fn atomic_umin_relaxed(dst: &mut int, src: int) -> int;
253 pub fn atomic_umax(dst: &mut int, src: int) -> int;
254 pub fn atomic_umax_acq(dst: &mut int, src: int) -> int;
255 pub fn atomic_umax_rel(dst: &mut int, src: int) -> int;
256 pub fn atomic_umax_acqrel(dst: &mut int, src: int) -> int;
257 pub fn atomic_umax_relaxed(dst: &mut int, src: int) -> int;
259 pub fn atomic_fence();
260 pub fn atomic_fence_acq();
261 pub fn atomic_fence_rel();
262 pub fn atomic_fence_acqrel();
264 /// The size of a type in bytes.
266 /// This is the exact number of bytes in memory taken up by a
267 /// value of the given type. In other words, a memset of this size
268 /// would *exactly* overwrite a value. When laid out in vectors
269 /// and structures there may be additional padding between
271 pub fn size_of<T>() -> uint;
273 /// Move a value to a memory location containing a value.
275 /// Drop glue is run on the destination, which must contain a
276 /// valid Rust value.
277 pub fn move_val<T>(dst: &mut T, src: T);
279 /// Move a value to an uninitialized memory location.
281 /// Drop glue is not run on the destination.
282 pub fn move_val_init<T>(dst: &mut T, src: T);
284 pub fn min_align_of<T>() -> uint;
285 pub fn pref_align_of<T>() -> uint;
287 /// Get a static pointer to a type descriptor.
288 pub fn get_tydesc<T>() -> *TyDesc;
290 /// Create a value initialized to zero.
292 /// `init` is unsafe because it returns a zeroed-out datum,
293 /// which is unsafe unless T is POD. We don't have a POD
294 /// kind yet. (See #4074).
295 pub fn init<T>() -> T;
297 /// Create an uninitialized value.
298 pub fn uninit<T>() -> T;
300 /// Move a value out of scope without running drop glue.
302 /// `forget` is unsafe because the caller is responsible for
303 /// ensuring the argument is deallocated already.
304 pub fn forget<T>(_: T) -> ();
305 pub fn transmute<T,U>(e: T) -> U;
307 /// Returns `true` if a type requires drop glue.
308 pub fn needs_drop<T>() -> bool;
310 /// Returns `true` if a type is managed (will be allocated on the local heap)
311 pub fn contains_managed<T>() -> bool;
313 pub fn visit_tydesc(td: *TyDesc, tv: @TyVisitor);
315 pub fn frame_address(f: &once fn(*u8));
317 /// Get the address of the `__morestack` stack growth function.
318 pub fn morestack_addr() -> *();
320 /// Adjust a pointer by an offset.
322 /// This is implemented as an intrinsic to avoid converting to and from an
323 /// integer, since the conversion would throw away aliasing information.
324 pub fn offset<T>(dst: *T, offset: int) -> *T;
326 /// Equivalent to the `llvm.memcpy.p0i8.0i8.i32` intrinsic, with a size of
327 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
328 pub fn memcpy32<T>(dst: *mut T, src: *T, count: u32);
329 /// Equivalent to the `llvm.memcpy.p0i8.0i8.i64` intrinsic, with a size of
330 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
331 pub fn memcpy64<T>(dst: *mut T, src: *T, count: u64);
333 /// Equivalent to the `llvm.memmove.p0i8.0i8.i32` intrinsic, with a size of
334 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
335 pub fn memmove32<T>(dst: *mut T, src: *T, count: u32);
336 /// Equivalent to the `llvm.memmove.p0i8.0i8.i64` intrinsic, with a size of
337 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
338 pub fn memmove64<T>(dst: *mut T, src: *T, count: u64);
340 /// Equivalent to the `llvm.memset.p0i8.i32` intrinsic, with a size of
341 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
342 pub fn memset32<T>(dst: *mut T, val: u8, count: u32);
343 /// Equivalent to the `llvm.memset.p0i8.i64` intrinsic, with a size of
344 /// `count` * `size_of::<T>()` and an alignment of `min_align_of::<T>()`
345 pub fn memset64<T>(dst: *mut T, val: u8, count: u64);
347 pub fn sqrtf32(x: f32) -> f32;
348 pub fn sqrtf64(x: f64) -> f64;
350 pub fn powif32(a: f32, x: i32) -> f32;
351 pub fn powif64(a: f64, x: i32) -> f64;
353 // the following kill the stack canary without
354 // `fixed_stack_segment`. This possibly only affects the f64
355 // variants, but it's hard to be sure since it seems to only
356 // occur with fairly specific arguments.
357 #[fixed_stack_segment]
358 pub fn sinf32(x: f32) -> f32;
359 #[fixed_stack_segment]
360 pub fn sinf64(x: f64) -> f64;
362 #[fixed_stack_segment]
363 pub fn cosf32(x: f32) -> f32;
364 #[fixed_stack_segment]
365 pub fn cosf64(x: f64) -> f64;
367 #[fixed_stack_segment]
368 pub fn powf32(a: f32, x: f32) -> f32;
369 #[fixed_stack_segment]
370 pub fn powf64(a: f64, x: f64) -> f64;
372 #[fixed_stack_segment]
373 pub fn expf32(x: f32) -> f32;
374 #[fixed_stack_segment]
375 pub fn expf64(x: f64) -> f64;
377 pub fn exp2f32(x: f32) -> f32;
378 pub fn exp2f64(x: f64) -> f64;
380 pub fn logf32(x: f32) -> f32;
381 pub fn logf64(x: f64) -> f64;
383 pub fn log10f32(x: f32) -> f32;
384 pub fn log10f64(x: f64) -> f64;
386 pub fn log2f32(x: f32) -> f32;
387 pub fn log2f64(x: f64) -> f64;
389 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
390 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
392 pub fn fabsf32(x: f32) -> f32;
393 pub fn fabsf64(x: f64) -> f64;
395 pub fn floorf32(x: f32) -> f32;
396 pub fn floorf64(x: f64) -> f64;
398 pub fn ceilf32(x: f32) -> f32;
399 pub fn ceilf64(x: f64) -> f64;
401 pub fn truncf32(x: f32) -> f32;
402 pub fn truncf64(x: f64) -> f64;
404 pub fn ctpop8(x: i8) -> i8;
405 pub fn ctpop16(x: i16) -> i16;
406 pub fn ctpop32(x: i32) -> i32;
407 pub fn ctpop64(x: i64) -> i64;
409 pub fn ctlz8(x: i8) -> i8;
410 pub fn ctlz16(x: i16) -> i16;
411 pub fn ctlz32(x: i32) -> i32;
412 pub fn ctlz64(x: i64) -> i64;
414 pub fn cttz8(x: i8) -> i8;
415 pub fn cttz16(x: i16) -> i16;
416 pub fn cttz32(x: i32) -> i32;
417 pub fn cttz64(x: i64) -> i64;
419 pub fn bswap16(x: i16) -> i16;
420 pub fn bswap32(x: i32) -> i32;
421 pub fn bswap64(x: i64) -> i64;
424 #[cfg(target_endian = "little")] pub fn to_le16(x: i16) -> i16 { x }
425 #[cfg(target_endian = "big")] pub fn to_le16(x: i16) -> i16 { unsafe { bswap16(x) } }
426 #[cfg(target_endian = "little")] pub fn to_le32(x: i32) -> i32 { x }
427 #[cfg(target_endian = "big")] pub fn to_le32(x: i32) -> i32 { unsafe { bswap32(x) } }
428 #[cfg(target_endian = "little")] pub fn to_le64(x: i64) -> i64 { x }
429 #[cfg(target_endian = "big")] pub fn to_le64(x: i64) -> i64 { unsafe { bswap64(x) } }
431 #[cfg(target_endian = "little")] pub fn to_be16(x: i16) -> i16 { unsafe { bswap16(x) } }
432 #[cfg(target_endian = "big")] pub fn to_be16(x: i16) -> i16 { x }
433 #[cfg(target_endian = "little")] pub fn to_be32(x: i32) -> i32 { unsafe { bswap32(x) } }
434 #[cfg(target_endian = "big")] pub fn to_be32(x: i32) -> i32 { x }
435 #[cfg(target_endian = "little")] pub fn to_be64(x: i64) -> i64 { unsafe { bswap64(x) } }
436 #[cfg(target_endian = "big")] pub fn to_be64(x: i64) -> i64 { x }