1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
14 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
16 use middle::subst::FnSpace;
21 use trans::cleanup::CleanupMethods;
24 use trans::debuginfo::DebugLoc;
27 use trans::type_of::*;
30 use trans::machine::llsize_of;
31 use trans::type_::Type;
32 use middle::ty::{self, Ty};
33 use syntax::abi::RustIntrinsic;
35 use syntax::parse::token;
36 use util::ppaux::{Repr, ty_to_string};
38 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
39 let name = match token::get_ident(item.ident).get() {
40 "sqrtf32" => "llvm.sqrt.f32",
41 "sqrtf64" => "llvm.sqrt.f64",
42 "powif32" => "llvm.powi.f32",
43 "powif64" => "llvm.powi.f64",
44 "sinf32" => "llvm.sin.f32",
45 "sinf64" => "llvm.sin.f64",
46 "cosf32" => "llvm.cos.f32",
47 "cosf64" => "llvm.cos.f64",
48 "powf32" => "llvm.pow.f32",
49 "powf64" => "llvm.pow.f64",
50 "expf32" => "llvm.exp.f32",
51 "expf64" => "llvm.exp.f64",
52 "exp2f32" => "llvm.exp2.f32",
53 "exp2f64" => "llvm.exp2.f64",
54 "logf32" => "llvm.log.f32",
55 "logf64" => "llvm.log.f64",
56 "log10f32" => "llvm.log10.f32",
57 "log10f64" => "llvm.log10.f64",
58 "log2f32" => "llvm.log2.f32",
59 "log2f64" => "llvm.log2.f64",
60 "fmaf32" => "llvm.fma.f32",
61 "fmaf64" => "llvm.fma.f64",
62 "fabsf32" => "llvm.fabs.f32",
63 "fabsf64" => "llvm.fabs.f64",
64 "copysignf32" => "llvm.copysign.f32",
65 "copysignf64" => "llvm.copysign.f64",
66 "floorf32" => "llvm.floor.f32",
67 "floorf64" => "llvm.floor.f64",
68 "ceilf32" => "llvm.ceil.f32",
69 "ceilf64" => "llvm.ceil.f64",
70 "truncf32" => "llvm.trunc.f32",
71 "truncf64" => "llvm.trunc.f64",
72 "rintf32" => "llvm.rint.f32",
73 "rintf64" => "llvm.rint.f64",
74 "nearbyintf32" => "llvm.nearbyint.f32",
75 "nearbyintf64" => "llvm.nearbyint.f64",
76 "roundf32" => "llvm.round.f32",
77 "roundf64" => "llvm.round.f64",
78 "ctpop8" => "llvm.ctpop.i8",
79 "ctpop16" => "llvm.ctpop.i16",
80 "ctpop32" => "llvm.ctpop.i32",
81 "ctpop64" => "llvm.ctpop.i64",
82 "bswap16" => "llvm.bswap.i16",
83 "bswap32" => "llvm.bswap.i32",
84 "bswap64" => "llvm.bswap.i64",
85 "assume" => "llvm.assume",
88 Some(ccx.get_intrinsic(&name))
91 /// Performs late verification that intrinsics are used correctly. At present,
92 /// the only intrinsic that needs such verification is `transmute`.
93 pub fn check_intrinsics(ccx: &CrateContext) {
94 let mut last_failing_id = None;
95 for transmute_restriction in &*ccx.tcx().transmute_restrictions.borrow() {
96 // Sometimes, a single call to transmute will push multiple
97 // type pairs to test in order to exhaustively test the
98 // possibility around a type parameter. If one of those fails,
99 // there is no sense reporting errors on the others.
100 if last_failing_id == Some(transmute_restriction.id) {
104 debug!("transmute_restriction: {}", transmute_restriction.repr(ccx.tcx()));
106 assert!(!ty::type_has_params(transmute_restriction.substituted_from));
107 assert!(!ty::type_has_params(transmute_restriction.substituted_to));
109 let llfromtype = type_of::sizing_type_of(ccx,
110 transmute_restriction.substituted_from);
111 let lltotype = type_of::sizing_type_of(ccx,
112 transmute_restriction.substituted_to);
113 let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
114 let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
115 if from_type_size != to_type_size {
116 last_failing_id = Some(transmute_restriction.id);
118 if transmute_restriction.original_from != transmute_restriction.substituted_from {
120 transmute_restriction.span,
121 format!("transmute called on types with potentially different sizes: \
122 {} (could be {} bit{}) to {} (could be {} bit{})",
123 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
124 from_type_size as uint,
125 if from_type_size == 1 {""} else {"s"},
126 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
127 to_type_size as uint,
128 if to_type_size == 1 {""} else {"s"}).as_slice());
131 transmute_restriction.span,
132 format!("transmute called on types with different sizes: \
133 {} ({} bit{}) to {} ({} bit{})",
134 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
135 from_type_size as uint,
136 if from_type_size == 1 {""} else {"s"},
137 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
138 to_type_size as uint,
139 if to_type_size == 1 {""} else {"s"}).as_slice());
143 ccx.sess().abort_if_errors();
146 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
149 cleanup_scope: cleanup::CustomScopeIndex,
150 args: callee::CallArgs<'a, 'tcx>,
152 substs: subst::Substs<'tcx>,
153 call_info: NodeIdAndSpan)
154 -> Result<'blk, 'tcx> {
159 let ret_ty = match callee_ty.sty {
160 ty::ty_bare_fn(_, ref f) => {
161 ty::erase_late_bound_regions(bcx.tcx(), &f.sig.output())
163 _ => panic!("expected bare_fn in trans_intrinsic_call")
165 let foreign_item = tcx.map.expect_foreign_item(node);
166 let name = token::get_ident(foreign_item.ident);
168 // For `transmute` we can just trans the input expr directly into dest
169 if name.get() == "transmute" {
170 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
172 callee::ArgExprs(arg_exprs) => {
173 assert_eq!(arg_exprs.len(), 1);
175 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
176 *substs.types.get(FnSpace, 1));
177 let llintype = type_of::type_of(ccx, in_type);
178 let llouttype = type_of::type_of(ccx, out_type);
180 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
181 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
183 // This should be caught by the intrinsicck pass
184 assert_eq!(in_type_size, out_type_size);
186 let nonpointer_nonaggregate = |&: llkind: TypeKind| -> bool {
187 use llvm::TypeKind::*;
189 Half | Float | Double | X86_FP80 | FP128 |
190 PPC_FP128 | Integer | Vector | X86_MMX => true,
195 // An approximation to which types can be directly cast via
196 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
197 // but does, importantly, cover SIMD types.
198 let in_kind = llintype.kind();
199 let ret_kind = llret_ty.kind();
200 let bitcast_compatible =
201 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
202 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
205 let dest = if bitcast_compatible {
206 // if we're here, the type is scalar-like (a primitive, a
207 // SIMD type or a pointer), and so can be handled as a
208 // by-value ValueRef and can also be directly bitcast to the
209 // target type. Doing this special case makes conversions
210 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
211 // efficient (these are done efficiently implicitly in C
212 // with the `__m128i` type and so this means Rust doesn't
214 let expr = &*arg_exprs[0];
215 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
216 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
217 let val = if datum.kind.is_by_ref() {
218 load_ty(bcx, datum.val, datum.ty)
223 let cast_val = BitCast(bcx, val, llret_ty);
227 // this often occurs in a sequence like `Store(val,
228 // d); val2 = Load(d)`, so disappears easily.
229 Store(bcx, cast_val, d);
235 // The types are too complicated to do with a by-value
236 // bitcast, so pointer cast instead. We need to cast the
237 // dest so the types work out.
238 let dest = match dest {
239 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
240 expr::Ignore => expr::Ignore
242 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
246 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
247 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
250 expr::SaveIn(d) => Result::new(bcx, d),
251 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
257 ccx.sess().bug("expected expr as argument for transmute");
262 // Push the arguments.
263 let mut llargs = Vec::new();
264 bcx = callee::trans_args(bcx,
268 cleanup::CustomScope(cleanup_scope),
272 fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
274 let call_debug_location = DebugLoc::At(call_info.id, call_info.span);
276 // These are the only intrinsic functions that diverge.
277 if name.get() == "abort" {
278 let llfn = ccx.get_intrinsic(&("llvm.trap"));
279 Call(bcx, llfn, &[], None, call_debug_location);
280 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
282 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
283 } else if name.get() == "unreachable" {
284 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
286 return Result::new(bcx, C_nil(ccx));
289 let ret_ty = match ret_ty {
290 ty::FnConverging(ret_ty) => ret_ty,
291 ty::FnDiverging => unreachable!()
294 let llret_ty = type_of::type_of(ccx, ret_ty);
296 // Get location to store the result. If the user does
297 // not care about the result, just make a stack slot
298 let llresult = match dest {
299 expr::SaveIn(d) => d,
301 if !type_is_zero_size(ccx, ret_ty) {
302 alloc_ty(bcx, ret_ty, "intrinsic_result")
304 C_undef(llret_ty.ptr_to())
309 let simple = get_simple_intrinsic(ccx, &*foreign_item);
310 let llval = match (simple, name.get()) {
312 Call(bcx, llfn, llargs.as_slice(), None, call_debug_location)
314 (_, "breakpoint") => {
315 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
316 Call(bcx, llfn, &[], None, call_debug_location)
319 let tp_ty = *substs.types.get(FnSpace, 0);
320 let lltp_ty = type_of::type_of(ccx, tp_ty);
321 C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
323 (_, "min_align_of") => {
324 let tp_ty = *substs.types.get(FnSpace, 0);
325 C_uint(ccx, type_of::align_of(ccx, tp_ty))
327 (_, "pref_align_of") => {
328 let tp_ty = *substs.types.get(FnSpace, 0);
329 let lltp_ty = type_of::type_of(ccx, tp_ty);
330 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
332 (_, "move_val_init") => {
333 // Create a datum reflecting the value being moved.
334 // Use `appropriate_mode` so that the datum is by ref
335 // if the value is non-immediate. Note that, with
336 // intrinsics, there are no argument cleanups to
337 // concern ourselves with, so we can use an rvalue datum.
338 let tp_ty = *substs.types.get(FnSpace, 0);
339 let mode = appropriate_rvalue_mode(ccx, tp_ty);
343 kind: Rvalue::new(mode)
345 bcx = src.store_to(bcx, llargs[0]);
348 (_, "get_tydesc") => {
349 let tp_ty = *substs.types.get(FnSpace, 0);
350 let static_ti = get_tydesc(ccx, tp_ty);
352 // FIXME (#3730): ideally this shouldn't need a cast,
353 // but there's a circularity between translating rust types to llvm
354 // types and having a tydesc type available. So I can't directly access
355 // the llvm type of intrinsic::TyDesc struct.
356 PointerCast(bcx, static_ti.tydesc, llret_ty)
359 let hash = ty::hash_crate_independent(
361 *substs.types.get(FnSpace, 0),
362 &ccx.link_meta().crate_hash);
366 let tp_ty = *substs.types.get(FnSpace, 0);
367 if !return_type_is_void(ccx, tp_ty) {
368 // Just zero out the stack slot. (See comment on base::memzero for explaination)
369 zero_mem(bcx, llresult, tp_ty);
373 // Effectively no-ops
374 (_, "uninit") | (_, "forget") => {
377 (_, "needs_drop") => {
378 let tp_ty = *substs.types.get(FnSpace, 0);
379 C_bool(ccx, type_needs_drop(ccx.tcx(), tp_ty))
381 (_, "owns_managed") => {
382 let tp_ty = *substs.types.get(FnSpace, 0);
383 C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
387 let offset = llargs[1];
388 InBoundsGEP(bcx, ptr, &[offset])
391 (_, "copy_nonoverlapping_memory") => {
395 *substs.types.get(FnSpace, 0),
401 (_, "copy_memory") => {
405 *substs.types.get(FnSpace, 0),
411 (_, "set_memory") => {
412 memset_intrinsic(bcx,
414 *substs.types.get(FnSpace, 0),
421 (_, "volatile_copy_nonoverlapping_memory") => {
425 *substs.types.get(FnSpace, 0),
431 (_, "volatile_copy_memory") => {
435 *substs.types.get(FnSpace, 0),
441 (_, "volatile_set_memory") => {
442 memset_intrinsic(bcx,
444 *substs.types.get(FnSpace, 0),
450 (_, "volatile_load") => {
451 VolatileLoad(bcx, llargs[0])
453 (_, "volatile_store") => {
454 VolatileStore(bcx, llargs[1], llargs[0]);
458 (_, "ctlz8") => count_zeros_intrinsic(bcx,
461 call_debug_location),
462 (_, "ctlz16") => count_zeros_intrinsic(bcx,
465 call_debug_location),
466 (_, "ctlz32") => count_zeros_intrinsic(bcx,
469 call_debug_location),
470 (_, "ctlz64") => count_zeros_intrinsic(bcx,
473 call_debug_location),
474 (_, "cttz8") => count_zeros_intrinsic(bcx,
477 call_debug_location),
478 (_, "cttz16") => count_zeros_intrinsic(bcx,
481 call_debug_location),
482 (_, "cttz32") => count_zeros_intrinsic(bcx,
485 call_debug_location),
486 (_, "cttz64") => count_zeros_intrinsic(bcx,
489 call_debug_location),
491 (_, "i8_add_with_overflow") =>
492 with_overflow_intrinsic(bcx,
493 "llvm.sadd.with.overflow.i8",
497 call_debug_location),
498 (_, "i16_add_with_overflow") =>
499 with_overflow_intrinsic(bcx,
500 "llvm.sadd.with.overflow.i16",
504 call_debug_location),
505 (_, "i32_add_with_overflow") =>
506 with_overflow_intrinsic(bcx,
507 "llvm.sadd.with.overflow.i32",
511 call_debug_location),
512 (_, "i64_add_with_overflow") =>
513 with_overflow_intrinsic(bcx,
514 "llvm.sadd.with.overflow.i64",
518 call_debug_location),
520 (_, "u8_add_with_overflow") =>
521 with_overflow_intrinsic(bcx,
522 "llvm.uadd.with.overflow.i8",
526 call_debug_location),
527 (_, "u16_add_with_overflow") =>
528 with_overflow_intrinsic(bcx,
529 "llvm.uadd.with.overflow.i16",
533 call_debug_location),
534 (_, "u32_add_with_overflow") =>
535 with_overflow_intrinsic(bcx,
536 "llvm.uadd.with.overflow.i32",
540 call_debug_location),
541 (_, "u64_add_with_overflow") =>
542 with_overflow_intrinsic(bcx,
543 "llvm.uadd.with.overflow.i64",
547 call_debug_location),
548 (_, "i8_sub_with_overflow") =>
549 with_overflow_intrinsic(bcx,
550 "llvm.ssub.with.overflow.i8",
554 call_debug_location),
555 (_, "i16_sub_with_overflow") =>
556 with_overflow_intrinsic(bcx,
557 "llvm.ssub.with.overflow.i16",
561 call_debug_location),
562 (_, "i32_sub_with_overflow") =>
563 with_overflow_intrinsic(bcx,
564 "llvm.ssub.with.overflow.i32",
568 call_debug_location),
569 (_, "i64_sub_with_overflow") =>
570 with_overflow_intrinsic(bcx,
571 "llvm.ssub.with.overflow.i64",
575 call_debug_location),
576 (_, "u8_sub_with_overflow") =>
577 with_overflow_intrinsic(bcx,
578 "llvm.usub.with.overflow.i8",
582 call_debug_location),
583 (_, "u16_sub_with_overflow") =>
584 with_overflow_intrinsic(bcx,
585 "llvm.usub.with.overflow.i16",
589 call_debug_location),
590 (_, "u32_sub_with_overflow") =>
591 with_overflow_intrinsic(bcx,
592 "llvm.usub.with.overflow.i32",
596 call_debug_location),
597 (_, "u64_sub_with_overflow") =>
598 with_overflow_intrinsic(bcx,
599 "llvm.usub.with.overflow.i64",
603 call_debug_location),
604 (_, "i8_mul_with_overflow") =>
605 with_overflow_intrinsic(bcx,
606 "llvm.smul.with.overflow.i8",
610 call_debug_location),
611 (_, "i16_mul_with_overflow") =>
612 with_overflow_intrinsic(bcx,
613 "llvm.smul.with.overflow.i16",
617 call_debug_location),
618 (_, "i32_mul_with_overflow") =>
619 with_overflow_intrinsic(bcx,
620 "llvm.smul.with.overflow.i32",
624 call_debug_location),
625 (_, "i64_mul_with_overflow") =>
626 with_overflow_intrinsic(bcx,
627 "llvm.smul.with.overflow.i64",
631 call_debug_location),
632 (_, "u8_mul_with_overflow") =>
633 with_overflow_intrinsic(bcx,
634 "llvm.umul.with.overflow.i8",
638 call_debug_location),
639 (_, "u16_mul_with_overflow") =>
640 with_overflow_intrinsic(bcx,
641 "llvm.umul.with.overflow.i16",
645 call_debug_location),
646 (_, "u32_mul_with_overflow") =>
647 with_overflow_intrinsic(bcx,
648 "llvm.umul.with.overflow.i32",
652 call_debug_location),
653 (_, "u64_mul_with_overflow") =>
654 with_overflow_intrinsic(bcx,
655 "llvm.umul.with.overflow.i64",
659 call_debug_location),
660 (_, "return_address") => {
661 if !fcx.caller_expects_out_pointer {
662 tcx.sess.span_err(call_info.span,
663 "invalid use of `return_address` intrinsic: function \
664 does not use out pointer");
665 C_null(Type::i8p(ccx))
667 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
671 // This requires that atomic intrinsics follow a specific naming pattern:
672 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
673 (_, name) if name.starts_with("atomic_") => {
674 let split: Vec<&str> = name.split('_').collect();
675 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
677 let order = if split.len() == 2 {
678 llvm::SequentiallyConsistent
681 "unordered" => llvm::Unordered,
682 "relaxed" => llvm::Monotonic,
683 "acq" => llvm::Acquire,
684 "rel" => llvm::Release,
685 "acqrel" => llvm::AcquireRelease,
686 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
692 // See include/llvm/IR/Instructions.h for their implementation
693 // of this, I assume that it's good enough for us to use for
695 let strongest_failure_ordering = match order {
696 llvm::NotAtomic | llvm::Unordered =>
697 ccx.sess().fatal("cmpxchg must be atomic"),
699 llvm::Monotonic | llvm::Release =>
702 llvm::Acquire | llvm::AcquireRelease =>
705 llvm::SequentiallyConsistent =>
706 llvm::SequentiallyConsistent
709 let res = AtomicCmpXchg(bcx, llargs[0], llargs[1],
711 strongest_failure_ordering);
712 if unsafe { llvm::LLVMVersionMinor() >= 5 } {
713 ExtractValue(bcx, res, 0)
720 AtomicLoad(bcx, llargs[0], order)
723 AtomicStore(bcx, llargs[1], llargs[0], order);
728 AtomicFence(bcx, order);
732 // These are all AtomicRMW ops
734 let atom_op = match op {
735 "xchg" => llvm::AtomicXchg,
736 "xadd" => llvm::AtomicAdd,
737 "xsub" => llvm::AtomicSub,
738 "and" => llvm::AtomicAnd,
739 "nand" => llvm::AtomicNand,
740 "or" => llvm::AtomicOr,
741 "xor" => llvm::AtomicXor,
742 "max" => llvm::AtomicMax,
743 "min" => llvm::AtomicMin,
744 "umax" => llvm::AtomicUMax,
745 "umin" => llvm::AtomicUMin,
746 _ => ccx.sess().fatal("unknown atomic operation")
749 AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
755 (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
758 if val_ty(llval) != Type::void(ccx) &&
759 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
760 store_ty(bcx, llval, llresult, ret_ty);
763 // If we made a temporary stack slot, let's clean it up
766 bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
768 expr::SaveIn(_) => {}
771 fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
773 Result::new(bcx, llresult)
776 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
783 call_debug_location: DebugLoc)
786 let lltp_ty = type_of::type_of(ccx, tp_ty);
787 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
788 let size = machine::llsize_of(ccx, lltp_ty);
789 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
790 let name = if allow_overlap {
792 "llvm.memmove.p0i8.p0i8.i32"
794 "llvm.memmove.p0i8.p0i8.i64"
798 "llvm.memcpy.p0i8.p0i8.i32"
800 "llvm.memcpy.p0i8.p0i8.i64"
804 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
805 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
806 let llfn = ccx.get_intrinsic(&name);
812 Mul(bcx, size, count, DebugLoc::None),
814 C_bool(ccx, volatile)],
819 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
825 call_debug_location: DebugLoc)
828 let lltp_ty = type_of::type_of(ccx, tp_ty);
829 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
830 let size = machine::llsize_of(ccx, lltp_ty);
831 let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
832 "llvm.memset.p0i8.i32"
834 "llvm.memset.p0i8.i64"
837 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
838 let llfn = ccx.get_intrinsic(&name);
844 Mul(bcx, size, count, DebugLoc::None),
846 C_bool(ccx, volatile)],
851 fn count_zeros_intrinsic(bcx: Block,
854 call_debug_location: DebugLoc)
856 let y = C_bool(bcx.ccx(), false);
857 let llfn = bcx.ccx().get_intrinsic(&name);
858 Call(bcx, llfn, &[val, y], None, call_debug_location)
861 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
866 call_debug_location: DebugLoc)
868 let llfn = bcx.ccx().get_intrinsic(&name);
870 // Convert `i1` to a `bool`, and write it to the out parameter
871 let val = Call(bcx, llfn, &[a, b], None, call_debug_location);
872 let result = ExtractValue(bcx, val, 0);
873 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
874 let ret = C_undef(type_of::type_of(bcx.ccx(), t));
875 let ret = InsertValue(bcx, ret, result, 0);
876 let ret = InsertValue(bcx, ret, overflow, 1);
877 if type_is_immediate(bcx.ccx(), t) {
878 let tmp = alloc_ty(bcx, t, "tmp");
879 Store(bcx, ret, tmp);