1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! rustc compiler intrinsics.
13 //! The corresponding definitions are in librustc/middle/trans/foreign.rs.
17 //! The volatile intrinsics provide operations intended to act on I/O
18 //! memory, which are guaranteed to not be reordered by the compiler
19 //! across other volatile intrinsics. See the LLVM documentation on
22 //! [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
26 //! The atomic intrinsics provide common atomic operations on machine
27 //! words, with multiple possible memory orderings. They obey the same
28 //! semantics as C++11. See the LLVM documentation on [[atomics]].
30 //! [atomics]: http://llvm.org/docs/Atomics.html
32 //! A quick refresher on memory ordering:
34 //! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
35 //! take place after the barrier.
36 //! * Release - a barrier for releasing a lock. Preceding reads and writes
37 //! take place before the barrier.
38 //! * Sequentially consistent - sequentially consistent operations are
39 //! guaranteed to happen in order. This is the standard mode for working
40 //! with atomic types and is equivalent to Java's `volatile`.
43 #![allow(missing_docs)]
45 pub type GlueFn = extern "Rust" fn(*const i8);
55 // Called when a value of type `T` is no longer needed
56 pub drop_glue: GlueFn,
58 // Name corresponding to the type
59 pub name: &'static str,
62 extern "rust-intrinsic" {
64 // NB: These intrinsics take unsafe pointers because they mutate aliased
65 // memory, which is not valid for either `&` or `&mut`.
67 pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> T;
68 pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> T;
69 pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
70 pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> T;
71 pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> T;
73 pub fn atomic_load<T>(src: *const T) -> T;
74 pub fn atomic_load_acq<T>(src: *const T) -> T;
75 pub fn atomic_load_relaxed<T>(src: *const T) -> T;
77 pub fn atomic_store<T>(dst: *mut T, val: T);
78 pub fn atomic_store_rel<T>(dst: *mut T, val: T);
79 pub fn atomic_store_relaxed<T>(dst: *mut T, val: T);
81 pub fn atomic_xchg<T>(dst: *mut T, src: T) -> T;
82 pub fn atomic_xchg_acq<T>(dst: *mut T, src: T) -> T;
83 pub fn atomic_xchg_rel<T>(dst: *mut T, src: T) -> T;
84 pub fn atomic_xchg_acqrel<T>(dst: *mut T, src: T) -> T;
85 pub fn atomic_xchg_relaxed<T>(dst: *mut T, src: T) -> T;
87 pub fn atomic_xadd<T>(dst: *mut T, src: T) -> T;
88 pub fn atomic_xadd_acq<T>(dst: *mut T, src: T) -> T;
89 pub fn atomic_xadd_rel<T>(dst: *mut T, src: T) -> T;
90 pub fn atomic_xadd_acqrel<T>(dst: *mut T, src: T) -> T;
91 pub fn atomic_xadd_relaxed<T>(dst: *mut T, src: T) -> T;
93 pub fn atomic_xsub<T>(dst: *mut T, src: T) -> T;
94 pub fn atomic_xsub_acq<T>(dst: *mut T, src: T) -> T;
95 pub fn atomic_xsub_rel<T>(dst: *mut T, src: T) -> T;
96 pub fn atomic_xsub_acqrel<T>(dst: *mut T, src: T) -> T;
97 pub fn atomic_xsub_relaxed<T>(dst: *mut T, src: T) -> T;
99 pub fn atomic_and<T>(dst: *mut T, src: T) -> T;
100 pub fn atomic_and_acq<T>(dst: *mut T, src: T) -> T;
101 pub fn atomic_and_rel<T>(dst: *mut T, src: T) -> T;
102 pub fn atomic_and_acqrel<T>(dst: *mut T, src: T) -> T;
103 pub fn atomic_and_relaxed<T>(dst: *mut T, src: T) -> T;
105 pub fn atomic_nand<T>(dst: *mut T, src: T) -> T;
106 pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T;
107 pub fn atomic_nand_rel<T>(dst: *mut T, src: T) -> T;
108 pub fn atomic_nand_acqrel<T>(dst: *mut T, src: T) -> T;
109 pub fn atomic_nand_relaxed<T>(dst: *mut T, src: T) -> T;
111 pub fn atomic_or<T>(dst: *mut T, src: T) -> T;
112 pub fn atomic_or_acq<T>(dst: *mut T, src: T) -> T;
113 pub fn atomic_or_rel<T>(dst: *mut T, src: T) -> T;
114 pub fn atomic_or_acqrel<T>(dst: *mut T, src: T) -> T;
115 pub fn atomic_or_relaxed<T>(dst: *mut T, src: T) -> T;
117 pub fn atomic_xor<T>(dst: *mut T, src: T) -> T;
118 pub fn atomic_xor_acq<T>(dst: *mut T, src: T) -> T;
119 pub fn atomic_xor_rel<T>(dst: *mut T, src: T) -> T;
120 pub fn atomic_xor_acqrel<T>(dst: *mut T, src: T) -> T;
121 pub fn atomic_xor_relaxed<T>(dst: *mut T, src: T) -> T;
123 pub fn atomic_max<T>(dst: *mut T, src: T) -> T;
124 pub fn atomic_max_acq<T>(dst: *mut T, src: T) -> T;
125 pub fn atomic_max_rel<T>(dst: *mut T, src: T) -> T;
126 pub fn atomic_max_acqrel<T>(dst: *mut T, src: T) -> T;
127 pub fn atomic_max_relaxed<T>(dst: *mut T, src: T) -> T;
129 pub fn atomic_min<T>(dst: *mut T, src: T) -> T;
130 pub fn atomic_min_acq<T>(dst: *mut T, src: T) -> T;
131 pub fn atomic_min_rel<T>(dst: *mut T, src: T) -> T;
132 pub fn atomic_min_acqrel<T>(dst: *mut T, src: T) -> T;
133 pub fn atomic_min_relaxed<T>(dst: *mut T, src: T) -> T;
135 pub fn atomic_umin<T>(dst: *mut T, src: T) -> T;
136 pub fn atomic_umin_acq<T>(dst: *mut T, src: T) -> T;
137 pub fn atomic_umin_rel<T>(dst: *mut T, src: T) -> T;
138 pub fn atomic_umin_acqrel<T>(dst: *mut T, src: T) -> T;
139 pub fn atomic_umin_relaxed<T>(dst: *mut T, src: T) -> T;
141 pub fn atomic_umax<T>(dst: *mut T, src: T) -> T;
142 pub fn atomic_umax_acq<T>(dst: *mut T, src: T) -> T;
143 pub fn atomic_umax_rel<T>(dst: *mut T, src: T) -> T;
144 pub fn atomic_umax_acqrel<T>(dst: *mut T, src: T) -> T;
145 pub fn atomic_umax_relaxed<T>(dst: *mut T, src: T) -> T;
148 extern "rust-intrinsic" {
150 pub fn atomic_fence();
151 pub fn atomic_fence_acq();
152 pub fn atomic_fence_rel();
153 pub fn atomic_fence_acqrel();
155 /// Abort the execution of the process.
158 /// Tell LLVM that this point in the code is not reachable,
159 /// enabling further optimizations.
161 /// NB: This is very different from the `unreachable!()` macro!
162 pub fn unreachable() -> !;
164 /// Inform the optimizer that a condition is always true.
165 /// If the condition is false, the behavior is undefined.
167 /// No code is generated for this intrinsic, but the optimizer will try
168 /// to preserve it (and its condition) between passes, which may interfere
169 /// with optimization of surrounding code and reduce performance. It should
170 /// not be used if the invariant can be discovered by the optimizer on its
171 /// own, or if it does not enable any significant optimizations.
172 pub fn assume(b: bool);
174 /// Execute a breakpoint trap, for inspection by a debugger.
177 /// The size of a type in bytes.
179 /// This is the exact number of bytes in memory taken up by a
180 /// value of the given type. In other words, a memset of this size
181 /// would *exactly* overwrite a value. When laid out in vectors
182 /// and structures there may be additional padding between
184 pub fn size_of<T>() -> uint;
186 /// Move a value to an uninitialized memory location.
188 /// Drop glue is not run on the destination.
189 pub fn move_val_init<T>(dst: &mut T, src: T);
191 pub fn min_align_of<T>() -> uint;
192 pub fn pref_align_of<T>() -> uint;
194 /// Get a static pointer to a type descriptor.
195 pub fn get_tydesc<T>() -> *const TyDesc;
197 /// Gets an identifier which is globally unique to the specified type. This
198 /// function will return the same value for a type regardless of whichever
199 /// crate it is invoked in.
200 pub fn type_id<T: 'static>() -> TypeId;
203 /// Create a value initialized to zero.
205 /// `init` is unsafe because it returns a zeroed-out datum,
206 /// which is unsafe unless T is Copy.
207 pub fn init<T>() -> T;
209 /// Create an uninitialized value.
210 pub fn uninit<T>() -> T;
212 /// Move a value out of scope without running drop glue.
214 /// `forget` is unsafe because the caller is responsible for
215 /// ensuring the argument is deallocated already.
216 pub fn forget<T>(_: T) -> ();
218 /// Unsafely transforms a value of one type into a value of another type.
220 /// Both types must have the same size and alignment, and this guarantee
221 /// is enforced at compile-time.
228 /// let v: &[u8] = unsafe { mem::transmute("L") };
229 /// assert!(v == &[76u8]);
231 pub fn transmute<T,U>(e: T) -> U;
233 /// Gives the address for the return value of the enclosing function.
235 /// Using this intrinsic in a function that does not use an out pointer
236 /// will trigger a compiler error.
237 pub fn return_address() -> *const u8;
239 /// Returns `true` if a type requires drop glue.
240 pub fn needs_drop<T>() -> bool;
242 /// Returns `true` if a type is managed (will be allocated on the local heap)
243 pub fn owns_managed<T>() -> bool;
245 /// Calculates the offset from a pointer. The offset *must* be in-bounds of
246 /// the object, or one-byte-past-the-end. An arithmetic overflow is also
247 /// undefined behaviour.
249 /// This is implemented as an intrinsic to avoid converting to and from an
250 /// integer, since the conversion would throw away aliasing information.
251 pub fn offset<T>(dst: *const T, offset: int) -> *const T;
253 /// Copies data from one location to another.
255 /// Copies `count` elements (not bytes) from `src` to `dst`. The source
256 /// and destination may *not* overlap.
258 /// `copy_nonoverlapping_memory` is semantically equivalent to C's `memcpy`.
262 /// A safe swap function:
268 /// fn swap<T>(x: &mut T, y: &mut T) {
270 /// // Give ourselves some scratch space to work with
271 /// let mut t: T = mem::uninitialized();
273 /// // Perform the swap, `&mut` pointers never alias
274 /// ptr::copy_nonoverlapping_memory(&mut t, &*x, 1);
275 /// ptr::copy_nonoverlapping_memory(x, &*y, 1);
276 /// ptr::copy_nonoverlapping_memory(y, &t, 1);
278 /// // y and t now point to the same thing, but we need to completely forget `tmp`
279 /// // because it's no longer relevant.
287 /// If the source and destination overlap then the behavior of this
288 /// function is undefined.
290 pub fn copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: uint);
292 /// Copies data from one location to another.
294 /// Copies `count` elements (not bytes) from `src` to `dst`. The source
295 /// and destination may overlap.
297 /// `copy_memory` is semantically equivalent to C's `memmove`.
301 /// Efficiently create a Rust vector from an unsafe buffer:
306 /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: uint) -> Vec<T> {
307 /// let mut dst = Vec::with_capacity(elts);
308 /// dst.set_len(elts);
309 /// ptr::copy_memory(dst.as_mut_ptr(), ptr, elts);
315 pub fn copy_memory<T>(dst: *mut T, src: *const T, count: uint);
317 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
318 /// bytes of memory starting at `dst` to `c`.
319 #[experimental = "uncertain about naming and semantics"]
320 pub fn set_memory<T>(dst: *mut T, val: u8, count: uint);
322 /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
323 /// a size of `count` * `size_of::<T>()` and an alignment of
324 /// `min_align_of::<T>()`
326 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
327 pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T,
329 /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
330 /// a size of `count` * `size_of::<T>()` and an alignment of
331 /// `min_align_of::<T>()`
333 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
334 pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: uint);
335 /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
336 /// size of `count` * `size_of::<T>()` and an alignment of
337 /// `min_align_of::<T>()`.
339 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
340 pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: uint);
342 /// Perform a volatile load from the `src` pointer.
343 pub fn volatile_load<T>(src: *const T) -> T;
344 /// Perform a volatile store to the `dst` pointer.
345 pub fn volatile_store<T>(dst: *mut T, val: T);
347 /// Returns the square root of an `f32`
348 pub fn sqrtf32(x: f32) -> f32;
349 /// Returns the square root of an `f64`
350 pub fn sqrtf64(x: f64) -> f64;
352 /// Raises an `f32` to an integer power.
353 pub fn powif32(a: f32, x: i32) -> f32;
354 /// Raises an `f64` to an integer power.
355 pub fn powif64(a: f64, x: i32) -> f64;
357 /// Returns the sine of an `f32`.
358 pub fn sinf32(x: f32) -> f32;
359 /// Returns the sine of an `f64`.
360 pub fn sinf64(x: f64) -> f64;
362 /// Returns the cosine of an `f32`.
363 pub fn cosf32(x: f32) -> f32;
364 /// Returns the cosine of an `f64`.
365 pub fn cosf64(x: f64) -> f64;
367 /// Raises an `f32` to an `f32` power.
368 pub fn powf32(a: f32, x: f32) -> f32;
369 /// Raises an `f64` to an `f64` power.
370 pub fn powf64(a: f64, x: f64) -> f64;
372 /// Returns the exponential of an `f32`.
373 pub fn expf32(x: f32) -> f32;
374 /// Returns the exponential of an `f64`.
375 pub fn expf64(x: f64) -> f64;
377 /// Returns 2 raised to the power of an `f32`.
378 pub fn exp2f32(x: f32) -> f32;
379 /// Returns 2 raised to the power of an `f64`.
380 pub fn exp2f64(x: f64) -> f64;
382 /// Returns the natural logarithm of an `f32`.
383 pub fn logf32(x: f32) -> f32;
384 /// Returns the natural logarithm of an `f64`.
385 pub fn logf64(x: f64) -> f64;
387 /// Returns the base 10 logarithm of an `f32`.
388 pub fn log10f32(x: f32) -> f32;
389 /// Returns the base 10 logarithm of an `f64`.
390 pub fn log10f64(x: f64) -> f64;
392 /// Returns the base 2 logarithm of an `f32`.
393 pub fn log2f32(x: f32) -> f32;
394 /// Returns the base 2 logarithm of an `f64`.
395 pub fn log2f64(x: f64) -> f64;
397 /// Returns `a * b + c` for `f32` values.
398 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
399 /// Returns `a * b + c` for `f64` values.
400 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
402 /// Returns the absolute value of an `f32`.
403 pub fn fabsf32(x: f32) -> f32;
404 /// Returns the absolute value of an `f64`.
405 pub fn fabsf64(x: f64) -> f64;
407 /// Copies the sign from `y` to `x` for `f32` values.
408 pub fn copysignf32(x: f32, y: f32) -> f32;
409 /// Copies the sign from `y` to `x` for `f64` values.
410 pub fn copysignf64(x: f64, y: f64) -> f64;
412 /// Returns the largest integer less than or equal to an `f32`.
413 pub fn floorf32(x: f32) -> f32;
414 /// Returns the largest integer less than or equal to an `f64`.
415 pub fn floorf64(x: f64) -> f64;
417 /// Returns the smallest integer greater than or equal to an `f32`.
418 pub fn ceilf32(x: f32) -> f32;
419 /// Returns the smallest integer greater than or equal to an `f64`.
420 pub fn ceilf64(x: f64) -> f64;
422 /// Returns the integer part of an `f32`.
423 pub fn truncf32(x: f32) -> f32;
424 /// Returns the integer part of an `f64`.
425 pub fn truncf64(x: f64) -> f64;
427 /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
428 /// if the argument is not an integer.
429 pub fn rintf32(x: f32) -> f32;
430 /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
431 /// if the argument is not an integer.
432 pub fn rintf64(x: f64) -> f64;
434 /// Returns the nearest integer to an `f32`.
435 pub fn nearbyintf32(x: f32) -> f32;
436 /// Returns the nearest integer to an `f64`.
437 pub fn nearbyintf64(x: f64) -> f64;
439 /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
440 pub fn roundf32(x: f32) -> f32;
441 /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
442 pub fn roundf64(x: f64) -> f64;
444 /// Returns the number of bits set in a `u8`.
445 pub fn ctpop8(x: u8) -> u8;
446 /// Returns the number of bits set in a `u16`.
447 pub fn ctpop16(x: u16) -> u16;
448 /// Returns the number of bits set in a `u32`.
449 pub fn ctpop32(x: u32) -> u32;
450 /// Returns the number of bits set in a `u64`.
451 pub fn ctpop64(x: u64) -> u64;
453 /// Returns the number of leading bits unset in a `u8`.
454 pub fn ctlz8(x: u8) -> u8;
455 /// Returns the number of leading bits unset in a `u16`.
456 pub fn ctlz16(x: u16) -> u16;
457 /// Returns the number of leading bits unset in a `u32`.
458 pub fn ctlz32(x: u32) -> u32;
459 /// Returns the number of leading bits unset in a `u64`.
460 pub fn ctlz64(x: u64) -> u64;
462 /// Returns the number of trailing bits unset in a `u8`.
463 pub fn cttz8(x: u8) -> u8;
464 /// Returns the number of trailing bits unset in a `u16`.
465 pub fn cttz16(x: u16) -> u16;
466 /// Returns the number of trailing bits unset in a `u32`.
467 pub fn cttz32(x: u32) -> u32;
468 /// Returns the number of trailing bits unset in a `u64`.
469 pub fn cttz64(x: u64) -> u64;
471 /// Reverses the bytes in a `u16`.
472 pub fn bswap16(x: u16) -> u16;
473 /// Reverses the bytes in a `u32`.
474 pub fn bswap32(x: u32) -> u32;
475 /// Reverses the bytes in a `u64`.
476 pub fn bswap64(x: u64) -> u64;
478 /// Performs checked `i8` addition.
479 pub fn i8_add_with_overflow(x: i8, y: i8) -> (i8, bool);
480 /// Performs checked `i16` addition.
481 pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
482 /// Performs checked `i32` addition.
483 pub fn i32_add_with_overflow(x: i32, y: i32) -> (i32, bool);
484 /// Performs checked `i64` addition.
485 pub fn i64_add_with_overflow(x: i64, y: i64) -> (i64, bool);
487 /// Performs checked `u8` addition.
488 pub fn u8_add_with_overflow(x: u8, y: u8) -> (u8, bool);
489 /// Performs checked `u16` addition.
490 pub fn u16_add_with_overflow(x: u16, y: u16) -> (u16, bool);
491 /// Performs checked `u32` addition.
492 pub fn u32_add_with_overflow(x: u32, y: u32) -> (u32, bool);
493 /// Performs checked `u64` addition.
494 pub fn u64_add_with_overflow(x: u64, y: u64) -> (u64, bool);
496 /// Performs checked `i8` subtraction.
497 pub fn i8_sub_with_overflow(x: i8, y: i8) -> (i8, bool);
498 /// Performs checked `i16` subtraction.
499 pub fn i16_sub_with_overflow(x: i16, y: i16) -> (i16, bool);
500 /// Performs checked `i32` subtraction.
501 pub fn i32_sub_with_overflow(x: i32, y: i32) -> (i32, bool);
502 /// Performs checked `i64` subtraction.
503 pub fn i64_sub_with_overflow(x: i64, y: i64) -> (i64, bool);
505 /// Performs checked `u8` subtraction.
506 pub fn u8_sub_with_overflow(x: u8, y: u8) -> (u8, bool);
507 /// Performs checked `u16` subtraction.
508 pub fn u16_sub_with_overflow(x: u16, y: u16) -> (u16, bool);
509 /// Performs checked `u32` subtraction.
510 pub fn u32_sub_with_overflow(x: u32, y: u32) -> (u32, bool);
511 /// Performs checked `u64` subtraction.
512 pub fn u64_sub_with_overflow(x: u64, y: u64) -> (u64, bool);
514 /// Performs checked `i8` multiplication.
515 pub fn i8_mul_with_overflow(x: i8, y: i8) -> (i8, bool);
516 /// Performs checked `i16` multiplication.
517 pub fn i16_mul_with_overflow(x: i16, y: i16) -> (i16, bool);
518 /// Performs checked `i32` multiplication.
519 pub fn i32_mul_with_overflow(x: i32, y: i32) -> (i32, bool);
520 /// Performs checked `i64` multiplication.
521 pub fn i64_mul_with_overflow(x: i64, y: i64) -> (i64, bool);
523 /// Performs checked `u8` multiplication.
524 pub fn u8_mul_with_overflow(x: u8, y: u8) -> (u8, bool);
525 /// Performs checked `u16` multiplication.
526 pub fn u16_mul_with_overflow(x: u16, y: u16) -> (u16, bool);
527 /// Performs checked `u32` multiplication.
528 pub fn u32_mul_with_overflow(x: u32, y: u32) -> (u32, bool);
529 /// Performs checked `u64` multiplication.
530 pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
534 /// `TypeId` represents a globally unique identifier for a type
535 #[lang="type_id"] // This needs to be kept in lockstep with the code in trans/intrinsic.rs and
536 // middle/lang_items.rs
537 #[deriving(Clone, PartialEq, Eq, Show)]
543 /// Returns the `TypeId` of the type this generic function has been instantiated with
544 pub fn of<T: 'static>() -> TypeId {
545 unsafe { type_id::<T>() }
547 pub fn hash(&self) -> u64 { self.t }