1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
14 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
16 use middle::subst::FnSpace;
21 use trans::cleanup::CleanupMethods;
26 use trans::type_of::*;
29 use trans::machine::llsize_of;
30 use trans::type_::Type;
31 use middle::ty::{self, Ty};
32 use syntax::abi::RustIntrinsic;
34 use syntax::parse::token;
35 use util::ppaux::{Repr, ty_to_string};
37 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
38 let name = match token::get_ident(item.ident).get() {
39 "sqrtf32" => "llvm.sqrt.f32",
40 "sqrtf64" => "llvm.sqrt.f64",
41 "powif32" => "llvm.powi.f32",
42 "powif64" => "llvm.powi.f64",
43 "sinf32" => "llvm.sin.f32",
44 "sinf64" => "llvm.sin.f64",
45 "cosf32" => "llvm.cos.f32",
46 "cosf64" => "llvm.cos.f64",
47 "powf32" => "llvm.pow.f32",
48 "powf64" => "llvm.pow.f64",
49 "expf32" => "llvm.exp.f32",
50 "expf64" => "llvm.exp.f64",
51 "exp2f32" => "llvm.exp2.f32",
52 "exp2f64" => "llvm.exp2.f64",
53 "logf32" => "llvm.log.f32",
54 "logf64" => "llvm.log.f64",
55 "log10f32" => "llvm.log10.f32",
56 "log10f64" => "llvm.log10.f64",
57 "log2f32" => "llvm.log2.f32",
58 "log2f64" => "llvm.log2.f64",
59 "fmaf32" => "llvm.fma.f32",
60 "fmaf64" => "llvm.fma.f64",
61 "fabsf32" => "llvm.fabs.f32",
62 "fabsf64" => "llvm.fabs.f64",
63 "copysignf32" => "llvm.copysign.f32",
64 "copysignf64" => "llvm.copysign.f64",
65 "floorf32" => "llvm.floor.f32",
66 "floorf64" => "llvm.floor.f64",
67 "ceilf32" => "llvm.ceil.f32",
68 "ceilf64" => "llvm.ceil.f64",
69 "truncf32" => "llvm.trunc.f32",
70 "truncf64" => "llvm.trunc.f64",
71 "rintf32" => "llvm.rint.f32",
72 "rintf64" => "llvm.rint.f64",
73 "nearbyintf32" => "llvm.nearbyint.f32",
74 "nearbyintf64" => "llvm.nearbyint.f64",
75 "roundf32" => "llvm.round.f32",
76 "roundf64" => "llvm.round.f64",
77 "ctpop8" => "llvm.ctpop.i8",
78 "ctpop16" => "llvm.ctpop.i16",
79 "ctpop32" => "llvm.ctpop.i32",
80 "ctpop64" => "llvm.ctpop.i64",
81 "bswap16" => "llvm.bswap.i16",
82 "bswap32" => "llvm.bswap.i32",
83 "bswap64" => "llvm.bswap.i64",
84 "assume" => "llvm.assume",
87 Some(ccx.get_intrinsic(&name))
90 /// Performs late verification that intrinsics are used correctly. At present,
91 /// the only intrinsic that needs such verification is `transmute`.
92 pub fn check_intrinsics(ccx: &CrateContext) {
93 let mut last_failing_id = None;
94 for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
95 // Sometimes, a single call to transmute will push multiple
96 // type pairs to test in order to exhaustively test the
97 // possibility around a type parameter. If one of those fails,
98 // there is no sense reporting errors on the others.
99 if last_failing_id == Some(transmute_restriction.id) {
103 debug!("transmute_restriction: {}", transmute_restriction.repr(ccx.tcx()));
105 assert!(!ty::type_has_params(transmute_restriction.substituted_from));
106 assert!(!ty::type_has_params(transmute_restriction.substituted_to));
108 let llfromtype = type_of::sizing_type_of(ccx,
109 transmute_restriction.substituted_from);
110 let lltotype = type_of::sizing_type_of(ccx,
111 transmute_restriction.substituted_to);
112 let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
113 let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
114 if from_type_size != to_type_size {
115 last_failing_id = Some(transmute_restriction.id);
117 if transmute_restriction.original_from != transmute_restriction.substituted_from {
119 transmute_restriction.span,
120 format!("transmute called on types with potentially different sizes: \
121 {} (could be {} bit{}) to {} (could be {} bit{})",
122 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
123 from_type_size as uint,
124 if from_type_size == 1 {""} else {"s"},
125 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
126 to_type_size as uint,
127 if to_type_size == 1 {""} else {"s"}).as_slice());
130 transmute_restriction.span,
131 format!("transmute called on types with different sizes: \
132 {} ({} bit{}) to {} ({} bit{})",
133 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
134 from_type_size as uint,
135 if from_type_size == 1 {""} else {"s"},
136 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
137 to_type_size as uint,
138 if to_type_size == 1 {""} else {"s"}).as_slice());
142 ccx.sess().abort_if_errors();
145 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
148 cleanup_scope: cleanup::CustomScopeIndex,
149 args: callee::CallArgs<'a, 'tcx>,
151 substs: subst::Substs<'tcx>,
153 -> Result<'blk, 'tcx>
159 let ret_ty = match callee_ty.sty {
160 ty::ty_bare_fn(_, ref f) => {
161 ty::erase_late_bound_regions(bcx.tcx(), &f.sig.output())
163 _ => panic!("expected bare_fn in trans_intrinsic_call")
165 let foreign_item = tcx.map.expect_foreign_item(node);
166 let name = token::get_ident(foreign_item.ident);
168 // For `transmute` we can just trans the input expr directly into dest
169 if name.get() == "transmute" {
170 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
172 callee::ArgExprs(arg_exprs) => {
173 assert_eq!(arg_exprs.len(), 1);
175 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
176 *substs.types.get(FnSpace, 1));
177 let llintype = type_of::type_of(ccx, in_type);
178 let llouttype = type_of::type_of(ccx, out_type);
180 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
181 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
183 // This should be caught by the intrinsicck pass
184 assert_eq!(in_type_size, out_type_size);
186 let nonpointer_nonaggregate = |&: llkind: TypeKind| -> bool {
187 use llvm::TypeKind::*;
189 Half | Float | Double | X86_FP80 | FP128 |
190 PPC_FP128 | Integer | Vector | X86_MMX => true,
195 // An approximation to which types can be directly cast via
196 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
197 // but does, importantly, cover SIMD types.
198 let in_kind = llintype.kind();
199 let ret_kind = llret_ty.kind();
200 let bitcast_compatible =
201 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
202 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
205 let dest = if bitcast_compatible {
206 // if we're here, the type is scalar-like (a primitive, a
207 // SIMD type or a pointer), and so can be handled as a
208 // by-value ValueRef and can also be directly bitcast to the
209 // target type. Doing this special case makes conversions
210 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
211 // efficient (these are done efficiently implicitly in C
212 // with the `__m128i` type and so this means Rust doesn't
214 let expr = &*arg_exprs[0];
215 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
216 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
217 let val = if datum.kind.is_by_ref() {
218 load_ty(bcx, datum.val, datum.ty)
223 let cast_val = BitCast(bcx, val, llret_ty);
227 // this often occurs in a sequence like `Store(val,
228 // d); val2 = Load(d)`, so disappears easily.
229 Store(bcx, cast_val, d);
235 // The types are too complicated to do with a by-value
236 // bitcast, so pointer cast instead. We need to cast the
237 // dest so the types work out.
238 let dest = match dest {
239 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
240 expr::Ignore => expr::Ignore
242 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
246 fcx.pop_custom_cleanup_scope(cleanup_scope);
249 expr::SaveIn(d) => Result::new(bcx, d),
250 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
256 ccx.sess().bug("expected expr as argument for transmute");
261 // Push the arguments.
262 let mut llargs = Vec::new();
263 bcx = callee::trans_args(bcx,
267 cleanup::CustomScope(cleanup_scope),
271 fcx.pop_custom_cleanup_scope(cleanup_scope);
273 // These are the only intrinsic functions that diverge.
274 if name.get() == "abort" {
275 let llfn = ccx.get_intrinsic(&("llvm.trap"));
276 Call(bcx, llfn, &[], None);
278 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
279 } else if name.get() == "unreachable" {
281 return Result::new(bcx, C_nil(ccx));
284 let ret_ty = match ret_ty {
285 ty::FnConverging(ret_ty) => ret_ty,
286 ty::FnDiverging => unreachable!()
289 let llret_ty = type_of::type_of(ccx, ret_ty);
291 // Get location to store the result. If the user does
292 // not care about the result, just make a stack slot
293 let llresult = match dest {
294 expr::SaveIn(d) => d,
296 if !type_is_zero_size(ccx, ret_ty) {
297 alloc_ty(bcx, ret_ty, "intrinsic_result")
299 C_undef(llret_ty.ptr_to())
304 let simple = get_simple_intrinsic(ccx, &*foreign_item);
305 let llval = match (simple, name.get()) {
307 Call(bcx, llfn, llargs.as_slice(), None)
309 (_, "breakpoint") => {
310 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
311 Call(bcx, llfn, &[], None)
314 let tp_ty = *substs.types.get(FnSpace, 0);
315 let lltp_ty = type_of::type_of(ccx, tp_ty);
316 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
318 (_, "min_align_of") => {
319 let tp_ty = *substs.types.get(FnSpace, 0);
320 C_uint(ccx, type_of::align_of(ccx, tp_ty))
322 (_, "pref_align_of") => {
323 let tp_ty = *substs.types.get(FnSpace, 0);
324 let lltp_ty = type_of::type_of(ccx, tp_ty);
325 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
327 (_, "move_val_init") => {
328 // Create a datum reflecting the value being moved.
329 // Use `appropriate_mode` so that the datum is by ref
330 // if the value is non-immediate. Note that, with
331 // intrinsics, there are no argument cleanups to
332 // concern ourselves with, so we can use an rvalue datum.
333 let tp_ty = *substs.types.get(FnSpace, 0);
334 let mode = appropriate_rvalue_mode(ccx, tp_ty);
338 kind: Rvalue::new(mode)
340 bcx = src.store_to(bcx, llargs[0]);
343 (_, "get_tydesc") => {
344 let tp_ty = *substs.types.get(FnSpace, 0);
345 let static_ti = get_tydesc(ccx, tp_ty);
347 // FIXME (#3730): ideally this shouldn't need a cast,
348 // but there's a circularity between translating rust types to llvm
349 // types and having a tydesc type available. So I can't directly access
350 // the llvm type of intrinsic::TyDesc struct.
351 PointerCast(bcx, static_ti.tydesc, llret_ty)
354 let hash = ty::hash_crate_independent(
356 *substs.types.get(FnSpace, 0),
357 &ccx.link_meta().crate_hash);
358 // NB: This needs to be kept in lockstep with the TypeId struct in
359 // the intrinsic module
360 C_named_struct(llret_ty, &[C_u64(ccx, hash)])
363 let tp_ty = *substs.types.get(FnSpace, 0);
364 let lltp_ty = type_of::type_of(ccx, tp_ty);
365 if return_type_is_void(ccx, tp_ty) {
371 // Effectively no-ops
372 (_, "uninit") | (_, "forget") => {
375 (_, "needs_drop") => {
376 let tp_ty = *substs.types.get(FnSpace, 0);
377 C_bool(ccx, type_needs_drop(ccx.tcx(), tp_ty))
379 (_, "owns_managed") => {
380 let tp_ty = *substs.types.get(FnSpace, 0);
381 C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
385 let offset = llargs[1];
386 InBoundsGEP(bcx, ptr, &[offset])
389 (_, "copy_nonoverlapping_memory") => {
390 copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
391 llargs[0], llargs[1], llargs[2])
393 (_, "copy_memory") => {
394 copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
395 llargs[0], llargs[1], llargs[2])
397 (_, "set_memory") => {
398 memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
399 llargs[0], llargs[1], llargs[2])
402 (_, "volatile_copy_nonoverlapping_memory") => {
403 copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
404 llargs[0], llargs[1], llargs[2])
406 (_, "volatile_copy_memory") => {
407 copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
408 llargs[0], llargs[1], llargs[2])
410 (_, "volatile_set_memory") => {
411 memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
412 llargs[0], llargs[1], llargs[2])
414 (_, "volatile_load") => {
415 VolatileLoad(bcx, llargs[0])
417 (_, "volatile_store") => {
418 VolatileStore(bcx, llargs[1], llargs[0]);
422 (_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", llargs[0]),
423 (_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", llargs[0]),
424 (_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", llargs[0]),
425 (_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", llargs[0]),
426 (_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", llargs[0]),
427 (_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", llargs[0]),
428 (_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", llargs[0]),
429 (_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", llargs[0]),
431 (_, "i8_add_with_overflow") =>
432 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
433 llargs[0], llargs[1]),
434 (_, "i16_add_with_overflow") =>
435 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
436 llargs[0], llargs[1]),
437 (_, "i32_add_with_overflow") =>
438 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
439 llargs[0], llargs[1]),
440 (_, "i64_add_with_overflow") =>
441 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
442 llargs[0], llargs[1]),
444 (_, "u8_add_with_overflow") =>
445 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
446 llargs[0], llargs[1]),
447 (_, "u16_add_with_overflow") =>
448 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
449 llargs[0], llargs[1]),
450 (_, "u32_add_with_overflow") =>
451 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
452 llargs[0], llargs[1]),
453 (_, "u64_add_with_overflow") =>
454 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
455 llargs[0], llargs[1]),
457 (_, "i8_sub_with_overflow") =>
458 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
459 llargs[0], llargs[1]),
460 (_, "i16_sub_with_overflow") =>
461 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
462 llargs[0], llargs[1]),
463 (_, "i32_sub_with_overflow") =>
464 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
465 llargs[0], llargs[1]),
466 (_, "i64_sub_with_overflow") =>
467 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
468 llargs[0], llargs[1]),
470 (_, "u8_sub_with_overflow") =>
471 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
472 llargs[0], llargs[1]),
473 (_, "u16_sub_with_overflow") =>
474 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
475 llargs[0], llargs[1]),
476 (_, "u32_sub_with_overflow") =>
477 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
478 llargs[0], llargs[1]),
479 (_, "u64_sub_with_overflow") =>
480 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
481 llargs[0], llargs[1]),
483 (_, "i8_mul_with_overflow") =>
484 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
485 llargs[0], llargs[1]),
486 (_, "i16_mul_with_overflow") =>
487 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
488 llargs[0], llargs[1]),
489 (_, "i32_mul_with_overflow") =>
490 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
491 llargs[0], llargs[1]),
492 (_, "i64_mul_with_overflow") =>
493 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
494 llargs[0], llargs[1]),
496 (_, "u8_mul_with_overflow") =>
497 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
498 llargs[0], llargs[1]),
499 (_, "u16_mul_with_overflow") =>
500 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
501 llargs[0], llargs[1]),
502 (_, "u32_mul_with_overflow") =>
503 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
504 llargs[0], llargs[1]),
505 (_, "u64_mul_with_overflow") =>
506 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
507 llargs[0], llargs[1]),
509 (_, "return_address") => {
510 if !fcx.caller_expects_out_pointer {
511 tcx.sess.span_err(call_info.span,
512 "invalid use of `return_address` intrinsic: function \
513 does not use out pointer");
514 C_null(Type::i8p(ccx))
516 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
520 // This requires that atomic intrinsics follow a specific naming pattern:
521 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
522 (_, name) if name.starts_with("atomic_") => {
523 let split: Vec<&str> = name.split('_').collect();
524 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
526 let order = if split.len() == 2 {
527 llvm::SequentiallyConsistent
530 "unordered" => llvm::Unordered,
531 "relaxed" => llvm::Monotonic,
532 "acq" => llvm::Acquire,
533 "rel" => llvm::Release,
534 "acqrel" => llvm::AcquireRelease,
535 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
541 // See include/llvm/IR/Instructions.h for their implementation
542 // of this, I assume that it's good enough for us to use for
544 let strongest_failure_ordering = match order {
545 llvm::NotAtomic | llvm::Unordered =>
546 ccx.sess().fatal("cmpxchg must be atomic"),
548 llvm::Monotonic | llvm::Release =>
551 llvm::Acquire | llvm::AcquireRelease =>
554 llvm::SequentiallyConsistent =>
555 llvm::SequentiallyConsistent
558 let res = AtomicCmpXchg(bcx, llargs[0], llargs[1],
560 strongest_failure_ordering);
561 if unsafe { llvm::LLVMVersionMinor() >= 5 } {
562 ExtractValue(bcx, res, 0)
569 AtomicLoad(bcx, llargs[0], order)
572 AtomicStore(bcx, llargs[1], llargs[0], order);
577 AtomicFence(bcx, order);
581 // These are all AtomicRMW ops
583 let atom_op = match op {
584 "xchg" => llvm::AtomicXchg,
585 "xadd" => llvm::AtomicAdd,
586 "xsub" => llvm::AtomicSub,
587 "and" => llvm::AtomicAnd,
588 "nand" => llvm::AtomicNand,
589 "or" => llvm::AtomicOr,
590 "xor" => llvm::AtomicXor,
591 "max" => llvm::AtomicMax,
592 "min" => llvm::AtomicMin,
593 "umax" => llvm::AtomicUMax,
594 "umin" => llvm::AtomicUMin,
595 _ => ccx.sess().fatal("unknown atomic operation")
598 AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
604 (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
607 if val_ty(llval) != Type::void(ccx) &&
608 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
609 store_ty(bcx, llval, llresult, ret_ty);
612 // If we made a temporary stack slot, let's clean it up
615 bcx = glue::drop_ty(bcx, llresult, ret_ty, Some(call_info));
617 expr::SaveIn(_) => {}
620 Result::new(bcx, llresult)
623 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
624 allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>,
625 dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
627 let lltp_ty = type_of::type_of(ccx, tp_ty);
628 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
629 let size = machine::llsize_of(ccx, lltp_ty);
630 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
631 let name = if allow_overlap {
633 "llvm.memmove.p0i8.p0i8.i32"
635 "llvm.memmove.p0i8.p0i8.i64"
639 "llvm.memcpy.p0i8.p0i8.i32"
641 "llvm.memcpy.p0i8.p0i8.i64"
645 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
646 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
647 let llfn = ccx.get_intrinsic(&name);
649 Call(bcx, llfn, &[dst_ptr, src_ptr, Mul(bcx, size, count), align,
650 C_bool(ccx, volatile)], None)
653 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, volatile: bool, tp_ty: Ty<'tcx>,
654 dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
656 let lltp_ty = type_of::type_of(ccx, tp_ty);
657 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
658 let size = machine::llsize_of(ccx, lltp_ty);
659 let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
660 "llvm.memset.p0i8.i32"
662 "llvm.memset.p0i8.i64"
665 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
666 let llfn = ccx.get_intrinsic(&name);
668 Call(bcx, llfn, &[dst_ptr, val, Mul(bcx, size, count), align,
669 C_bool(ccx, volatile)], None)
672 fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
673 let y = C_bool(bcx.ccx(), false);
674 let llfn = bcx.ccx().get_intrinsic(&name);
675 Call(bcx, llfn, &[val, y], None)
678 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, name: &'static str,
679 t: Ty<'tcx>, a: ValueRef, b: ValueRef) -> ValueRef {
680 let llfn = bcx.ccx().get_intrinsic(&name);
682 // Convert `i1` to a `bool`, and write it to the out parameter
683 let val = Call(bcx, llfn, &[a, b], None);
684 let result = ExtractValue(bcx, val, 0);
685 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
686 let ret = C_undef(type_of::type_of(bcx.ccx(), t));
687 let ret = InsertValue(bcx, ret, result, 0);
688 let ret = InsertValue(bcx, ret, overflow, 1);