1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 /*! rustc compiler intrinsics.
13 The corresponding definitions are in librustc/middle/trans/foreign.rs.
17 The volatile intrinsics provide operations intended to act on I/O
18 memory, which are guaranteed to not be reordered by the compiler
19 across other volatile intrinsics. See the LLVM documentation on
22 [volatile]: http://llvm.org/docs/LangRef.html#volatile-memory-accesses
26 The atomic intrinsics provide common atomic operations on machine
27 words, with multiple possible memory orderings. They obey the same
28 semantics as C++11. See the LLVM documentation on [[atomics]].
30 [atomics]: http://llvm.org/docs/Atomics.html
32 A quick refresher on memory ordering:
34 * Acquire - a barrier for acquiring a lock. Subsequent reads and writes
35 take place after the barrier.
36 * Release - a barrier for releasing a lock. Preceding reads and writes
37 take place before the barrier.
38 * Sequentially consistent - sequentially consistent operations are
39 guaranteed to happen in order. This is the standard mode for working
40 with atomic types and is equivalent to Java's `volatile`.
45 #![allow(missing_doc)]
47 pub type GlueFn = extern "Rust" fn(*const i8);
57 // Called when a value of type `T` is no longer needed
58 pub drop_glue: GlueFn,
60 // Called by reflection visitor to visit a value of type `T`
62 pub visit_glue: GlueFn,
64 // Name corresponding to the type
65 pub name: &'static str,
78 fn visit_bot(&mut self) -> bool;
79 fn visit_nil(&mut self) -> bool;
80 fn visit_bool(&mut self) -> bool;
82 fn visit_int(&mut self) -> bool;
83 fn visit_i8(&mut self) -> bool;
84 fn visit_i16(&mut self) -> bool;
85 fn visit_i32(&mut self) -> bool;
86 fn visit_i64(&mut self) -> bool;
88 fn visit_uint(&mut self) -> bool;
89 fn visit_u8(&mut self) -> bool;
90 fn visit_u16(&mut self) -> bool;
91 fn visit_u32(&mut self) -> bool;
92 fn visit_u64(&mut self) -> bool;
94 fn visit_f32(&mut self) -> bool;
95 fn visit_f64(&mut self) -> bool;
97 fn visit_char(&mut self) -> bool;
99 fn visit_estr_slice(&mut self) -> bool;
101 fn visit_box(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
102 fn visit_uniq(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
103 fn visit_ptr(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
104 fn visit_rptr(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
106 fn visit_evec_slice(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
107 fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
108 inner: *const TyDesc) -> bool;
110 fn visit_enter_rec(&mut self, n_fields: uint,
111 sz: uint, align: uint) -> bool;
112 fn visit_rec_field(&mut self, i: uint, name: &str,
113 mtbl: uint, inner: *const TyDesc) -> bool;
114 fn visit_leave_rec(&mut self, n_fields: uint,
115 sz: uint, align: uint) -> bool;
117 fn visit_enter_class(&mut self, name: &str, named_fields: bool, n_fields: uint,
118 sz: uint, align: uint) -> bool;
119 fn visit_class_field(&mut self, i: uint, name: &str, named: bool,
120 mtbl: uint, inner: *const TyDesc) -> bool;
121 fn visit_leave_class(&mut self, name: &str, named_fields: bool, n_fields: uint,
122 sz: uint, align: uint) -> bool;
124 fn visit_enter_tup(&mut self, n_fields: uint,
125 sz: uint, align: uint) -> bool;
126 fn visit_tup_field(&mut self, i: uint, inner: *const TyDesc) -> bool;
127 fn visit_leave_tup(&mut self, n_fields: uint,
128 sz: uint, align: uint) -> bool;
130 fn visit_enter_enum(&mut self, n_variants: uint,
131 get_disr: unsafe extern fn(ptr: *const Opaque) -> Disr,
132 sz: uint, align: uint) -> bool;
133 fn visit_enter_enum_variant(&mut self, variant: uint,
137 fn visit_enum_variant_field(&mut self, i: uint, offset: uint,
138 inner: *const TyDesc) -> bool;
139 fn visit_leave_enum_variant(&mut self, variant: uint,
143 fn visit_leave_enum(&mut self, n_variants: uint,
144 get_disr: unsafe extern fn(ptr: *const Opaque) -> Disr,
145 sz: uint, align: uint) -> bool;
147 fn visit_enter_fn(&mut self, purity: uint, proto: uint,
148 n_inputs: uint, retstyle: uint) -> bool;
149 fn visit_fn_input(&mut self, i: uint, mode: uint,
150 inner: *const TyDesc) -> bool;
151 fn visit_fn_output(&mut self, retstyle: uint, variadic: bool,
152 inner: *const TyDesc) -> bool;
153 fn visit_leave_fn(&mut self, purity: uint, proto: uint,
154 n_inputs: uint, retstyle: uint) -> bool;
156 fn visit_trait(&mut self, name: &str) -> bool;
157 fn visit_param(&mut self, i: uint) -> bool;
158 fn visit_self(&mut self) -> bool;
161 extern "rust-intrinsic" {
163 // NB: These intrinsics take unsafe pointers because they mutate aliased
164 // memory, which is not valid for either `&` or `&mut`.
166 pub fn atomic_cxchg<T>(dst: *mut T, old: T, src: T) -> T;
167 pub fn atomic_cxchg_acq<T>(dst: *mut T, old: T, src: T) -> T;
168 pub fn atomic_cxchg_rel<T>(dst: *mut T, old: T, src: T) -> T;
169 pub fn atomic_cxchg_acqrel<T>(dst: *mut T, old: T, src: T) -> T;
170 pub fn atomic_cxchg_relaxed<T>(dst: *mut T, old: T, src: T) -> T;
172 pub fn atomic_load<T>(src: *const T) -> T;
173 pub fn atomic_load_acq<T>(src: *const T) -> T;
174 pub fn atomic_load_relaxed<T>(src: *const T) -> T;
176 pub fn atomic_store<T>(dst: *mut T, val: T);
177 pub fn atomic_store_rel<T>(dst: *mut T, val: T);
178 pub fn atomic_store_relaxed<T>(dst: *mut T, val: T);
180 pub fn atomic_xchg<T>(dst: *mut T, src: T) -> T;
181 pub fn atomic_xchg_acq<T>(dst: *mut T, src: T) -> T;
182 pub fn atomic_xchg_rel<T>(dst: *mut T, src: T) -> T;
183 pub fn atomic_xchg_acqrel<T>(dst: *mut T, src: T) -> T;
184 pub fn atomic_xchg_relaxed<T>(dst: *mut T, src: T) -> T;
186 pub fn atomic_xadd<T>(dst: *mut T, src: T) -> T;
187 pub fn atomic_xadd_acq<T>(dst: *mut T, src: T) -> T;
188 pub fn atomic_xadd_rel<T>(dst: *mut T, src: T) -> T;
189 pub fn atomic_xadd_acqrel<T>(dst: *mut T, src: T) -> T;
190 pub fn atomic_xadd_relaxed<T>(dst: *mut T, src: T) -> T;
192 pub fn atomic_xsub<T>(dst: *mut T, src: T) -> T;
193 pub fn atomic_xsub_acq<T>(dst: *mut T, src: T) -> T;
194 pub fn atomic_xsub_rel<T>(dst: *mut T, src: T) -> T;
195 pub fn atomic_xsub_acqrel<T>(dst: *mut T, src: T) -> T;
196 pub fn atomic_xsub_relaxed<T>(dst: *mut T, src: T) -> T;
198 pub fn atomic_and<T>(dst: *mut T, src: T) -> T;
199 pub fn atomic_and_acq<T>(dst: *mut T, src: T) -> T;
200 pub fn atomic_and_rel<T>(dst: *mut T, src: T) -> T;
201 pub fn atomic_and_acqrel<T>(dst: *mut T, src: T) -> T;
202 pub fn atomic_and_relaxed<T>(dst: *mut T, src: T) -> T;
204 pub fn atomic_nand<T>(dst: *mut T, src: T) -> T;
205 pub fn atomic_nand_acq<T>(dst: *mut T, src: T) -> T;
206 pub fn atomic_nand_rel<T>(dst: *mut T, src: T) -> T;
207 pub fn atomic_nand_acqrel<T>(dst: *mut T, src: T) -> T;
208 pub fn atomic_nand_relaxed<T>(dst: *mut T, src: T) -> T;
210 pub fn atomic_or<T>(dst: *mut T, src: T) -> T;
211 pub fn atomic_or_acq<T>(dst: *mut T, src: T) -> T;
212 pub fn atomic_or_rel<T>(dst: *mut T, src: T) -> T;
213 pub fn atomic_or_acqrel<T>(dst: *mut T, src: T) -> T;
214 pub fn atomic_or_relaxed<T>(dst: *mut T, src: T) -> T;
216 pub fn atomic_xor<T>(dst: *mut T, src: T) -> T;
217 pub fn atomic_xor_acq<T>(dst: *mut T, src: T) -> T;
218 pub fn atomic_xor_rel<T>(dst: *mut T, src: T) -> T;
219 pub fn atomic_xor_acqrel<T>(dst: *mut T, src: T) -> T;
220 pub fn atomic_xor_relaxed<T>(dst: *mut T, src: T) -> T;
222 pub fn atomic_max<T>(dst: *mut T, src: T) -> T;
223 pub fn atomic_max_acq<T>(dst: *mut T, src: T) -> T;
224 pub fn atomic_max_rel<T>(dst: *mut T, src: T) -> T;
225 pub fn atomic_max_acqrel<T>(dst: *mut T, src: T) -> T;
226 pub fn atomic_max_relaxed<T>(dst: *mut T, src: T) -> T;
228 pub fn atomic_min<T>(dst: *mut T, src: T) -> T;
229 pub fn atomic_min_acq<T>(dst: *mut T, src: T) -> T;
230 pub fn atomic_min_rel<T>(dst: *mut T, src: T) -> T;
231 pub fn atomic_min_acqrel<T>(dst: *mut T, src: T) -> T;
232 pub fn atomic_min_relaxed<T>(dst: *mut T, src: T) -> T;
234 pub fn atomic_umin<T>(dst: *mut T, src: T) -> T;
235 pub fn atomic_umin_acq<T>(dst: *mut T, src: T) -> T;
236 pub fn atomic_umin_rel<T>(dst: *mut T, src: T) -> T;
237 pub fn atomic_umin_acqrel<T>(dst: *mut T, src: T) -> T;
238 pub fn atomic_umin_relaxed<T>(dst: *mut T, src: T) -> T;
240 pub fn atomic_umax<T>(dst: *mut T, src: T) -> T;
241 pub fn atomic_umax_acq<T>(dst: *mut T, src: T) -> T;
242 pub fn atomic_umax_rel<T>(dst: *mut T, src: T) -> T;
243 pub fn atomic_umax_acqrel<T>(dst: *mut T, src: T) -> T;
244 pub fn atomic_umax_relaxed<T>(dst: *mut T, src: T) -> T;
247 extern "rust-intrinsic" {
249 pub fn atomic_fence();
250 pub fn atomic_fence_acq();
251 pub fn atomic_fence_rel();
252 pub fn atomic_fence_acqrel();
254 /// Abort the execution of the process.
257 /// Tell LLVM that this point in the code is not reachable,
258 /// enabling further optimizations.
260 /// NB: This is very different from the `unreachable!()` macro!
261 pub fn unreachable() -> !;
263 /// Inform the optimizer that a condition is always true.
264 /// If the condition is false, the behavior is undefined.
266 /// No code is generated for this intrinsic, but the optimizer will try
267 /// to preserve it (and its condition) between passes, which may interfere
268 /// with optimization of surrounding code and reduce performance. It should
269 /// not be used if the invariant can be discovered by the optimizer on its
270 /// own, or if it does not enable any significant optimizations.
272 pub fn assume(b: bool);
274 /// Execute a breakpoint trap, for inspection by a debugger.
277 /// The size of a type in bytes.
279 /// This is the exact number of bytes in memory taken up by a
280 /// value of the given type. In other words, a memset of this size
281 /// would *exactly* overwrite a value. When laid out in vectors
282 /// and structures there may be additional padding between
284 pub fn size_of<T>() -> uint;
286 /// Move a value to an uninitialized memory location.
288 /// Drop glue is not run on the destination.
289 pub fn move_val_init<T>(dst: &mut T, src: T);
291 pub fn min_align_of<T>() -> uint;
292 pub fn pref_align_of<T>() -> uint;
294 /// Get a static pointer to a type descriptor.
295 pub fn get_tydesc<T>() -> *const TyDesc;
297 /// Gets an identifier which is globally unique to the specified type. This
298 /// function will return the same value for a type regardless of whichever
299 /// crate it is invoked in.
300 pub fn type_id<T: 'static>() -> TypeId;
303 /// Create a value initialized to zero.
305 /// `init` is unsafe because it returns a zeroed-out datum,
306 /// which is unsafe unless T is Copy.
307 pub fn init<T>() -> T;
309 /// Create an uninitialized value.
310 pub fn uninit<T>() -> T;
312 /// Move a value out of scope without running drop glue.
314 /// `forget` is unsafe because the caller is responsible for
315 /// ensuring the argument is deallocated already.
316 pub fn forget<T>(_: T) -> ();
318 /// Unsafely transforms a value of one type into a value of another type.
320 /// Both types must have the same size and alignment, and this guarantee
321 /// is enforced at compile-time.
328 /// let v: &[u8] = unsafe { mem::transmute("L") };
329 /// assert!(v == [76u8]);
331 pub fn transmute<T,U>(e: T) -> U;
333 /// Gives the address for the return value of the enclosing function.
335 /// Using this intrinsic in a function that does not use an out pointer
336 /// will trigger a compiler error.
337 pub fn return_address() -> *const u8;
339 /// Returns `true` if a type requires drop glue.
340 pub fn needs_drop<T>() -> bool;
342 /// Returns `true` if a type is managed (will be allocated on the local heap)
343 pub fn owns_managed<T>() -> bool;
345 /// Calculates the offset from a pointer. The offset *must* be in-bounds of
346 /// the object, or one-byte-past-the-end. An arithmetic overflow is also
347 /// undefined behaviour.
349 /// This is implemented as an intrinsic to avoid converting to and from an
350 /// integer, since the conversion would throw away aliasing information.
351 pub fn offset<T>(dst: *const T, offset: int) -> *const T;
353 /// Copies data from one location to another.
355 /// Copies `count` elements (not bytes) from `src` to `dst`. The source
356 /// and destination may *not* overlap.
358 /// `copy_nonoverlapping_memory` is semantically equivalent to C's `memcpy`.
362 /// A safe swap function:
368 /// fn swap<T>(x: &mut T, y: &mut T) {
370 /// // Give ourselves some scratch space to work with
371 /// let mut t: T = mem::uninitialized();
373 /// // Perform the swap, `&mut` pointers never alias
374 /// ptr::copy_nonoverlapping_memory(&mut t, &*x, 1);
375 /// ptr::copy_nonoverlapping_memory(x, &*y, 1);
376 /// ptr::copy_nonoverlapping_memory(y, &t, 1);
378 /// // y and t now point to the same thing, but we need to completely forget `tmp`
379 /// // because it's no longer relevant.
387 /// If the source and destination overlap then the behavior of this
388 /// function is undefined.
390 pub fn copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T, count: uint);
392 /// Copies data from one location to another.
394 /// Copies `count` elements (not bytes) from `src` to `dst`. The source
395 /// and destination may overlap.
397 /// `copy_memory` is semantically equivalent to C's `memmove`.
401 /// Efficiently create a Rust vector from an unsafe buffer:
406 /// unsafe fn from_buf_raw<T>(ptr: *const T, elts: uint) -> Vec<T> {
407 /// let mut dst = Vec::with_capacity(elts);
408 /// dst.set_len(elts);
409 /// ptr::copy_memory(dst.as_mut_ptr(), ptr, elts);
415 pub fn copy_memory<T>(dst: *mut T, src: *const T, count: uint);
417 /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
418 /// bytes of memory starting at `dst` to `c`.
419 #[experimental = "uncertain about naming and semantics"]
420 pub fn set_memory<T>(dst: *mut T, val: u8, count: uint);
422 /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
423 /// a size of `count` * `size_of::<T>()` and an alignment of
424 /// `min_align_of::<T>()`
426 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
427 pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *const T,
429 /// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
430 /// a size of `count` * `size_of::<T>()` and an alignment of
431 /// `min_align_of::<T>()`
433 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
434 pub fn volatile_copy_memory<T>(dst: *mut T, src: *const T, count: uint);
435 /// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
436 /// size of `count` * `size_of::<T>()` and an alignment of
437 /// `min_align_of::<T>()`.
439 /// The volatile parameter parameter is set to `true`, so it will not be optimized out.
440 pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: uint);
442 /// Perform a volatile load from the `src` pointer.
443 pub fn volatile_load<T>(src: *const T) -> T;
444 /// Perform a volatile store to the `dst` pointer.
445 pub fn volatile_store<T>(dst: *mut T, val: T);
447 /// Returns the square root of an `f32`
448 pub fn sqrtf32(x: f32) -> f32;
449 /// Returns the square root of an `f64`
450 pub fn sqrtf64(x: f64) -> f64;
452 /// Raises an `f32` to an integer power.
453 pub fn powif32(a: f32, x: i32) -> f32;
454 /// Raises an `f64` to an integer power.
455 pub fn powif64(a: f64, x: i32) -> f64;
457 /// Returns the sine of an `f32`.
458 pub fn sinf32(x: f32) -> f32;
459 /// Returns the sine of an `f64`.
460 pub fn sinf64(x: f64) -> f64;
462 /// Returns the cosine of an `f32`.
463 pub fn cosf32(x: f32) -> f32;
464 /// Returns the cosine of an `f64`.
465 pub fn cosf64(x: f64) -> f64;
467 /// Raises an `f32` to an `f32` power.
468 pub fn powf32(a: f32, x: f32) -> f32;
469 /// Raises an `f64` to an `f64` power.
470 pub fn powf64(a: f64, x: f64) -> f64;
472 /// Returns the exponential of an `f32`.
473 pub fn expf32(x: f32) -> f32;
474 /// Returns the exponential of an `f64`.
475 pub fn expf64(x: f64) -> f64;
477 /// Returns 2 raised to the power of an `f32`.
478 pub fn exp2f32(x: f32) -> f32;
479 /// Returns 2 raised to the power of an `f64`.
480 pub fn exp2f64(x: f64) -> f64;
482 /// Returns the natural logarithm of an `f32`.
483 pub fn logf32(x: f32) -> f32;
484 /// Returns the natural logarithm of an `f64`.
485 pub fn logf64(x: f64) -> f64;
487 /// Returns the base 10 logarithm of an `f32`.
488 pub fn log10f32(x: f32) -> f32;
489 /// Returns the base 10 logarithm of an `f64`.
490 pub fn log10f64(x: f64) -> f64;
492 /// Returns the base 2 logarithm of an `f32`.
493 pub fn log2f32(x: f32) -> f32;
494 /// Returns the base 2 logarithm of an `f64`.
495 pub fn log2f64(x: f64) -> f64;
497 /// Returns `a * b + c` for `f32` values.
498 pub fn fmaf32(a: f32, b: f32, c: f32) -> f32;
499 /// Returns `a * b + c` for `f64` values.
500 pub fn fmaf64(a: f64, b: f64, c: f64) -> f64;
502 /// Returns the absolute value of an `f32`.
503 pub fn fabsf32(x: f32) -> f32;
504 /// Returns the absolute value of an `f64`.
505 pub fn fabsf64(x: f64) -> f64;
507 /// Copies the sign from `y` to `x` for `f32` values.
508 pub fn copysignf32(x: f32, y: f32) -> f32;
509 /// Copies the sign from `y` to `x` for `f64` values.
510 pub fn copysignf64(x: f64, y: f64) -> f64;
512 /// Returns the largest integer less than or equal to an `f32`.
513 pub fn floorf32(x: f32) -> f32;
514 /// Returns the largest integer less than or equal to an `f64`.
515 pub fn floorf64(x: f64) -> f64;
517 /// Returns the smallest integer greater than or equal to an `f32`.
518 pub fn ceilf32(x: f32) -> f32;
519 /// Returns the smallest integer greater than or equal to an `f64`.
520 pub fn ceilf64(x: f64) -> f64;
522 /// Returns the integer part of an `f32`.
523 pub fn truncf32(x: f32) -> f32;
524 /// Returns the integer part of an `f64`.
525 pub fn truncf64(x: f64) -> f64;
527 /// Returns the nearest integer to an `f32`. May raise an inexact floating-point exception
528 /// if the argument is not an integer.
529 pub fn rintf32(x: f32) -> f32;
530 /// Returns the nearest integer to an `f64`. May raise an inexact floating-point exception
531 /// if the argument is not an integer.
532 pub fn rintf64(x: f64) -> f64;
534 /// Returns the nearest integer to an `f32`.
535 pub fn nearbyintf32(x: f32) -> f32;
536 /// Returns the nearest integer to an `f64`.
537 pub fn nearbyintf64(x: f64) -> f64;
539 /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero.
540 pub fn roundf32(x: f32) -> f32;
541 /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero.
542 pub fn roundf64(x: f64) -> f64;
544 /// Returns the number of bits set in a `u8`.
545 pub fn ctpop8(x: u8) -> u8;
546 /// Returns the number of bits set in a `u16`.
547 pub fn ctpop16(x: u16) -> u16;
548 /// Returns the number of bits set in a `u32`.
549 pub fn ctpop32(x: u32) -> u32;
550 /// Returns the number of bits set in a `u64`.
551 pub fn ctpop64(x: u64) -> u64;
553 /// Returns the number of leading bits unset in a `u8`.
554 pub fn ctlz8(x: u8) -> u8;
555 /// Returns the number of leading bits unset in a `u16`.
556 pub fn ctlz16(x: u16) -> u16;
557 /// Returns the number of leading bits unset in a `u32`.
558 pub fn ctlz32(x: u32) -> u32;
559 /// Returns the number of leading bits unset in a `u64`.
560 pub fn ctlz64(x: u64) -> u64;
562 /// Returns the number of trailing bits unset in a `u8`.
563 pub fn cttz8(x: u8) -> u8;
564 /// Returns the number of trailing bits unset in a `u16`.
565 pub fn cttz16(x: u16) -> u16;
566 /// Returns the number of trailing bits unset in a `u32`.
567 pub fn cttz32(x: u32) -> u32;
568 /// Returns the number of trailing bits unset in a `u64`.
569 pub fn cttz64(x: u64) -> u64;
571 /// Reverses the bytes in a `u16`.
572 pub fn bswap16(x: u16) -> u16;
573 /// Reverses the bytes in a `u32`.
574 pub fn bswap32(x: u32) -> u32;
575 /// Reverses the bytes in a `u64`.
576 pub fn bswap64(x: u64) -> u64;
578 /// Performs checked `i8` addition.
579 pub fn i8_add_with_overflow(x: i8, y: i8) -> (i8, bool);
580 /// Performs checked `i16` addition.
581 pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool);
582 /// Performs checked `i32` addition.
583 pub fn i32_add_with_overflow(x: i32, y: i32) -> (i32, bool);
584 /// Performs checked `i64` addition.
585 pub fn i64_add_with_overflow(x: i64, y: i64) -> (i64, bool);
587 /// Performs checked `u8` addition.
588 pub fn u8_add_with_overflow(x: u8, y: u8) -> (u8, bool);
589 /// Performs checked `u16` addition.
590 pub fn u16_add_with_overflow(x: u16, y: u16) -> (u16, bool);
591 /// Performs checked `u32` addition.
592 pub fn u32_add_with_overflow(x: u32, y: u32) -> (u32, bool);
593 /// Performs checked `u64` addition.
594 pub fn u64_add_with_overflow(x: u64, y: u64) -> (u64, bool);
596 /// Performs checked `i8` subtraction.
597 pub fn i8_sub_with_overflow(x: i8, y: i8) -> (i8, bool);
598 /// Performs checked `i16` subtraction.
599 pub fn i16_sub_with_overflow(x: i16, y: i16) -> (i16, bool);
600 /// Performs checked `i32` subtraction.
601 pub fn i32_sub_with_overflow(x: i32, y: i32) -> (i32, bool);
602 /// Performs checked `i64` subtraction.
603 pub fn i64_sub_with_overflow(x: i64, y: i64) -> (i64, bool);
605 /// Performs checked `u8` subtraction.
606 pub fn u8_sub_with_overflow(x: u8, y: u8) -> (u8, bool);
607 /// Performs checked `u16` subtraction.
608 pub fn u16_sub_with_overflow(x: u16, y: u16) -> (u16, bool);
609 /// Performs checked `u32` subtraction.
610 pub fn u32_sub_with_overflow(x: u32, y: u32) -> (u32, bool);
611 /// Performs checked `u64` subtraction.
612 pub fn u64_sub_with_overflow(x: u64, y: u64) -> (u64, bool);
614 /// Performs checked `i8` multiplication.
615 pub fn i8_mul_with_overflow(x: i8, y: i8) -> (i8, bool);
616 /// Performs checked `i16` multiplication.
617 pub fn i16_mul_with_overflow(x: i16, y: i16) -> (i16, bool);
618 /// Performs checked `i32` multiplication.
619 pub fn i32_mul_with_overflow(x: i32, y: i32) -> (i32, bool);
620 /// Performs checked `i64` multiplication.
621 pub fn i64_mul_with_overflow(x: i64, y: i64) -> (i64, bool);
623 /// Performs checked `u8` multiplication.
624 pub fn u8_mul_with_overflow(x: u8, y: u8) -> (u8, bool);
625 /// Performs checked `u16` multiplication.
626 pub fn u16_mul_with_overflow(x: u16, y: u16) -> (u16, bool);
627 /// Performs checked `u32` multiplication.
628 pub fn u32_mul_with_overflow(x: u32, y: u32) -> (u32, bool);
629 /// Performs checked `u64` multiplication.
630 pub fn u64_mul_with_overflow(x: u64, y: u64) -> (u64, bool);
634 /// `TypeId` represents a globally unique identifier for a type
635 #[lang="type_id"] // This needs to be kept in lockstep with the code in trans/intrinsic.rs and
636 // middle/lang_items.rs
637 #[deriving(PartialEq, Eq, Show)]
643 /// Returns the `TypeId` of the type this generic function has been instantiated with
644 pub fn of<T: 'static>() -> TypeId {
645 unsafe { type_id::<T>() }
647 pub fn hash(&self) -> u64 { self.t }