1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![allow(non_upper_case_globals)]
14 use llvm::{SequentiallyConsistent, Acquire, Release, AtomicXchg, ValueRef, TypeKind};
16 use middle::subst::FnSpace;
21 use trans::cleanup::CleanupMethods;
26 use trans::type_of::*;
29 use trans::machine::llsize_of;
30 use trans::type_::Type;
31 use middle::ty::{mod, Ty};
32 use syntax::abi::RustIntrinsic;
34 use syntax::parse::token;
35 use util::ppaux::{Repr, ty_to_string};
37 pub fn get_simple_intrinsic(ccx: &CrateContext, item: &ast::ForeignItem) -> Option<ValueRef> {
38 let name = match token::get_ident(item.ident).get() {
39 "sqrtf32" => "llvm.sqrt.f32",
40 "sqrtf64" => "llvm.sqrt.f64",
41 "powif32" => "llvm.powi.f32",
42 "powif64" => "llvm.powi.f64",
43 "sinf32" => "llvm.sin.f32",
44 "sinf64" => "llvm.sin.f64",
45 "cosf32" => "llvm.cos.f32",
46 "cosf64" => "llvm.cos.f64",
47 "powf32" => "llvm.pow.f32",
48 "powf64" => "llvm.pow.f64",
49 "expf32" => "llvm.exp.f32",
50 "expf64" => "llvm.exp.f64",
51 "exp2f32" => "llvm.exp2.f32",
52 "exp2f64" => "llvm.exp2.f64",
53 "logf32" => "llvm.log.f32",
54 "logf64" => "llvm.log.f64",
55 "log10f32" => "llvm.log10.f32",
56 "log10f64" => "llvm.log10.f64",
57 "log2f32" => "llvm.log2.f32",
58 "log2f64" => "llvm.log2.f64",
59 "fmaf32" => "llvm.fma.f32",
60 "fmaf64" => "llvm.fma.f64",
61 "fabsf32" => "llvm.fabs.f32",
62 "fabsf64" => "llvm.fabs.f64",
63 "copysignf32" => "llvm.copysign.f32",
64 "copysignf64" => "llvm.copysign.f64",
65 "floorf32" => "llvm.floor.f32",
66 "floorf64" => "llvm.floor.f64",
67 "ceilf32" => "llvm.ceil.f32",
68 "ceilf64" => "llvm.ceil.f64",
69 "truncf32" => "llvm.trunc.f32",
70 "truncf64" => "llvm.trunc.f64",
71 "rintf32" => "llvm.rint.f32",
72 "rintf64" => "llvm.rint.f64",
73 "nearbyintf32" => "llvm.nearbyint.f32",
74 "nearbyintf64" => "llvm.nearbyint.f64",
75 "roundf32" => "llvm.round.f32",
76 "roundf64" => "llvm.round.f64",
77 "ctpop8" => "llvm.ctpop.i8",
78 "ctpop16" => "llvm.ctpop.i16",
79 "ctpop32" => "llvm.ctpop.i32",
80 "ctpop64" => "llvm.ctpop.i64",
81 "bswap16" => "llvm.bswap.i16",
82 "bswap32" => "llvm.bswap.i32",
83 "bswap64" => "llvm.bswap.i64",
84 "assume" => "llvm.assume",
87 Some(ccx.get_intrinsic(&name))
90 /// Performs late verification that intrinsics are used correctly. At present,
91 /// the only intrinsic that needs such verification is `transmute`.
92 pub fn check_intrinsics(ccx: &CrateContext) {
93 let mut last_failing_id = None;
94 for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
95 // Sometimes, a single call to transmute will push multiple
96 // type pairs to test in order to exhaustively test the
97 // possibility around a type parameter. If one of those fails,
98 // there is no sense reporting errors on the others.
99 if last_failing_id == Some(transmute_restriction.id) {
103 debug!("transmute_restriction: {}", transmute_restriction.repr(ccx.tcx()));
105 assert!(!ty::type_has_params(transmute_restriction.substituted_from));
106 assert!(!ty::type_has_params(transmute_restriction.substituted_to));
108 let llfromtype = type_of::sizing_type_of(ccx,
109 transmute_restriction.substituted_from);
110 let lltotype = type_of::sizing_type_of(ccx,
111 transmute_restriction.substituted_to);
112 let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
113 let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
114 if from_type_size != to_type_size {
115 last_failing_id = Some(transmute_restriction.id);
117 if transmute_restriction.original_from != transmute_restriction.substituted_from {
119 transmute_restriction.span,
120 format!("transmute called on types with potentially different sizes: \
121 {} (could be {} bit{}) to {} (could be {} bit{})",
122 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
123 from_type_size as uint,
124 if from_type_size == 1 {""} else {"s"},
125 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
126 to_type_size as uint,
127 if to_type_size == 1 {""} else {"s"}).as_slice());
130 transmute_restriction.span,
131 format!("transmute called on types with different sizes: \
132 {} ({} bit{}) to {} ({} bit{})",
133 ty_to_string(ccx.tcx(), transmute_restriction.original_from),
134 from_type_size as uint,
135 if from_type_size == 1 {""} else {"s"},
136 ty_to_string(ccx.tcx(), transmute_restriction.original_to),
137 to_type_size as uint,
138 if to_type_size == 1 {""} else {"s"}).as_slice());
142 ccx.sess().abort_if_errors();
145 pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
148 cleanup_scope: cleanup::CustomScopeIndex,
149 args: callee::CallArgs<'a, 'tcx>,
151 substs: subst::Substs<'tcx>,
153 -> Result<'blk, 'tcx> {
159 let ret_ty = match callee_ty.sty {
160 ty::ty_bare_fn(_, ref f) => f.sig.0.output,
161 _ => panic!("expected bare_fn in trans_intrinsic_call")
163 let foreign_item = tcx.map.expect_foreign_item(node);
164 let name = token::get_ident(foreign_item.ident);
166 // For `transmute` we can just trans the input expr directly into dest
167 if name.get() == "transmute" {
168 let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
170 callee::ArgExprs(arg_exprs) => {
171 assert_eq!(arg_exprs.len(), 1);
173 let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
174 *substs.types.get(FnSpace, 1));
175 let llintype = type_of::type_of(ccx, in_type);
176 let llouttype = type_of::type_of(ccx, out_type);
178 let in_type_size = machine::llbitsize_of_real(ccx, llintype);
179 let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
181 // This should be caught by the intrinsicck pass
182 assert_eq!(in_type_size, out_type_size);
184 let nonpointer_nonaggregate = |&: llkind: TypeKind| -> bool {
185 use llvm::TypeKind::*;
187 Half | Float | Double | X86_FP80 | FP128 |
188 PPC_FP128 | Integer | Vector | X86_MMX => true,
193 // An approximation to which types can be directly cast via
194 // LLVM's bitcast. This doesn't cover pointer -> pointer casts,
195 // but does, importantly, cover SIMD types.
196 let in_kind = llintype.kind();
197 let ret_kind = llret_ty.kind();
198 let bitcast_compatible =
199 (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
200 in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
203 let dest = if bitcast_compatible {
204 // if we're here, the type is scalar-like (a primitive, a
205 // SIMD type or a pointer), and so can be handled as a
206 // by-value ValueRef and can also be directly bitcast to the
207 // target type. Doing this special case makes conversions
208 // like `u32x4` -> `u64x2` much nicer for LLVM and so more
209 // efficient (these are done efficiently implicitly in C
210 // with the `__m128i` type and so this means Rust doesn't
212 let expr = &*arg_exprs[0];
213 let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
214 let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
215 let val = if datum.kind.is_by_ref() {
216 load_ty(bcx, datum.val, datum.ty)
221 let cast_val = BitCast(bcx, val, llret_ty);
225 // this often occurs in a sequence like `Store(val,
226 // d); val2 = Load(d)`, so disappears easily.
227 Store(bcx, cast_val, d);
233 // The types are too complicated to do with a by-value
234 // bitcast, so pointer cast instead. We need to cast the
235 // dest so the types work out.
236 let dest = match dest {
237 expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
238 expr::Ignore => expr::Ignore
240 bcx = expr::trans_into(bcx, &*arg_exprs[0], dest);
244 fcx.pop_custom_cleanup_scope(cleanup_scope);
247 expr::SaveIn(d) => Result::new(bcx, d),
248 expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
254 ccx.sess().bug("expected expr as argument for transmute");
259 // Push the arguments.
260 let mut llargs = Vec::new();
261 bcx = callee::trans_args(bcx,
265 cleanup::CustomScope(cleanup_scope),
269 fcx.pop_custom_cleanup_scope(cleanup_scope);
271 // These are the only intrinsic functions that diverge.
272 if name.get() == "abort" {
273 let llfn = ccx.get_intrinsic(&("llvm.trap"));
274 Call(bcx, llfn, &[], None);
276 return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
277 } else if name.get() == "unreachable" {
279 return Result::new(bcx, C_nil(ccx));
282 let ret_ty = match ret_ty {
283 ty::FnConverging(ret_ty) => ret_ty,
284 ty::FnDiverging => unreachable!()
287 let llret_ty = type_of::type_of(ccx, ret_ty);
289 // Get location to store the result. If the user does
290 // not care about the result, just make a stack slot
291 let llresult = match dest {
292 expr::SaveIn(d) => d,
294 if !type_is_zero_size(ccx, ret_ty) {
295 alloc_ty(bcx, ret_ty, "intrinsic_result")
297 C_undef(llret_ty.ptr_to())
302 let simple = get_simple_intrinsic(ccx, &*foreign_item);
303 let llval = match (simple, name.get()) {
305 Call(bcx, llfn, llargs.as_slice(), None)
307 (_, "breakpoint") => {
308 let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
309 Call(bcx, llfn, &[], None)
312 let tp_ty = *substs.types.get(FnSpace, 0);
313 let lltp_ty = type_of::type_of(ccx, tp_ty);
314 C_uint(ccx, machine::llsize_of_real(ccx, lltp_ty))
316 (_, "min_align_of") => {
317 let tp_ty = *substs.types.get(FnSpace, 0);
318 C_uint(ccx, type_of::align_of(ccx, tp_ty))
320 (_, "pref_align_of") => {
321 let tp_ty = *substs.types.get(FnSpace, 0);
322 let lltp_ty = type_of::type_of(ccx, tp_ty);
323 C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
325 (_, "move_val_init") => {
326 // Create a datum reflecting the value being moved.
327 // Use `appropriate_mode` so that the datum is by ref
328 // if the value is non-immediate. Note that, with
329 // intrinsics, there are no argument cleanups to
330 // concern ourselves with, so we can use an rvalue datum.
331 let tp_ty = *substs.types.get(FnSpace, 0);
332 let mode = appropriate_rvalue_mode(ccx, tp_ty);
336 kind: Rvalue::new(mode)
338 bcx = src.store_to(bcx, llargs[0]);
341 (_, "get_tydesc") => {
342 let tp_ty = *substs.types.get(FnSpace, 0);
343 let static_ti = get_tydesc(ccx, tp_ty);
345 // FIXME (#3730): ideally this shouldn't need a cast,
346 // but there's a circularity between translating rust types to llvm
347 // types and having a tydesc type available. So I can't directly access
348 // the llvm type of intrinsic::TyDesc struct.
349 PointerCast(bcx, static_ti.tydesc, llret_ty)
352 let hash = ty::hash_crate_independent(
354 *substs.types.get(FnSpace, 0),
355 &ccx.link_meta().crate_hash);
356 // NB: This needs to be kept in lockstep with the TypeId struct in
357 // the intrinsic module
358 C_named_struct(llret_ty, &[C_u64(ccx, hash)])
361 let tp_ty = *substs.types.get(FnSpace, 0);
362 let lltp_ty = type_of::type_of(ccx, tp_ty);
363 if return_type_is_void(ccx, tp_ty) {
369 // Effectively no-ops
370 (_, "uninit") | (_, "forget") => {
373 (_, "needs_drop") => {
374 let tp_ty = *substs.types.get(FnSpace, 0);
375 C_bool(ccx, type_needs_drop(ccx.tcx(), tp_ty))
377 (_, "owns_managed") => {
378 let tp_ty = *substs.types.get(FnSpace, 0);
379 C_bool(ccx, ty::type_contents(ccx.tcx(), tp_ty).owns_managed())
383 let offset = llargs[1];
384 InBoundsGEP(bcx, ptr, &[offset])
387 (_, "copy_nonoverlapping_memory") => {
388 copy_intrinsic(bcx, false, false, *substs.types.get(FnSpace, 0),
389 llargs[0], llargs[1], llargs[2])
391 (_, "copy_memory") => {
392 copy_intrinsic(bcx, true, false, *substs.types.get(FnSpace, 0),
393 llargs[0], llargs[1], llargs[2])
395 (_, "set_memory") => {
396 memset_intrinsic(bcx, false, *substs.types.get(FnSpace, 0),
397 llargs[0], llargs[1], llargs[2])
400 (_, "volatile_copy_nonoverlapping_memory") => {
401 copy_intrinsic(bcx, false, true, *substs.types.get(FnSpace, 0),
402 llargs[0], llargs[1], llargs[2])
404 (_, "volatile_copy_memory") => {
405 copy_intrinsic(bcx, true, true, *substs.types.get(FnSpace, 0),
406 llargs[0], llargs[1], llargs[2])
408 (_, "volatile_set_memory") => {
409 memset_intrinsic(bcx, true, *substs.types.get(FnSpace, 0),
410 llargs[0], llargs[1], llargs[2])
412 (_, "volatile_load") => {
413 VolatileLoad(bcx, llargs[0])
415 (_, "volatile_store") => {
416 VolatileStore(bcx, llargs[1], llargs[0]);
420 (_, "ctlz8") => count_zeros_intrinsic(bcx, "llvm.ctlz.i8", llargs[0]),
421 (_, "ctlz16") => count_zeros_intrinsic(bcx, "llvm.ctlz.i16", llargs[0]),
422 (_, "ctlz32") => count_zeros_intrinsic(bcx, "llvm.ctlz.i32", llargs[0]),
423 (_, "ctlz64") => count_zeros_intrinsic(bcx, "llvm.ctlz.i64", llargs[0]),
424 (_, "cttz8") => count_zeros_intrinsic(bcx, "llvm.cttz.i8", llargs[0]),
425 (_, "cttz16") => count_zeros_intrinsic(bcx, "llvm.cttz.i16", llargs[0]),
426 (_, "cttz32") => count_zeros_intrinsic(bcx, "llvm.cttz.i32", llargs[0]),
427 (_, "cttz64") => count_zeros_intrinsic(bcx, "llvm.cttz.i64", llargs[0]),
429 (_, "i8_add_with_overflow") =>
430 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i8", ret_ty,
431 llargs[0], llargs[1]),
432 (_, "i16_add_with_overflow") =>
433 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i16", ret_ty,
434 llargs[0], llargs[1]),
435 (_, "i32_add_with_overflow") =>
436 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i32", ret_ty,
437 llargs[0], llargs[1]),
438 (_, "i64_add_with_overflow") =>
439 with_overflow_intrinsic(bcx, "llvm.sadd.with.overflow.i64", ret_ty,
440 llargs[0], llargs[1]),
442 (_, "u8_add_with_overflow") =>
443 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i8", ret_ty,
444 llargs[0], llargs[1]),
445 (_, "u16_add_with_overflow") =>
446 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i16", ret_ty,
447 llargs[0], llargs[1]),
448 (_, "u32_add_with_overflow") =>
449 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i32", ret_ty,
450 llargs[0], llargs[1]),
451 (_, "u64_add_with_overflow") =>
452 with_overflow_intrinsic(bcx, "llvm.uadd.with.overflow.i64", ret_ty,
453 llargs[0], llargs[1]),
455 (_, "i8_sub_with_overflow") =>
456 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i8", ret_ty,
457 llargs[0], llargs[1]),
458 (_, "i16_sub_with_overflow") =>
459 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i16", ret_ty,
460 llargs[0], llargs[1]),
461 (_, "i32_sub_with_overflow") =>
462 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i32", ret_ty,
463 llargs[0], llargs[1]),
464 (_, "i64_sub_with_overflow") =>
465 with_overflow_intrinsic(bcx, "llvm.ssub.with.overflow.i64", ret_ty,
466 llargs[0], llargs[1]),
468 (_, "u8_sub_with_overflow") =>
469 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i8", ret_ty,
470 llargs[0], llargs[1]),
471 (_, "u16_sub_with_overflow") =>
472 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i16", ret_ty,
473 llargs[0], llargs[1]),
474 (_, "u32_sub_with_overflow") =>
475 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i32", ret_ty,
476 llargs[0], llargs[1]),
477 (_, "u64_sub_with_overflow") =>
478 with_overflow_intrinsic(bcx, "llvm.usub.with.overflow.i64", ret_ty,
479 llargs[0], llargs[1]),
481 (_, "i8_mul_with_overflow") =>
482 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i8", ret_ty,
483 llargs[0], llargs[1]),
484 (_, "i16_mul_with_overflow") =>
485 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i16", ret_ty,
486 llargs[0], llargs[1]),
487 (_, "i32_mul_with_overflow") =>
488 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i32", ret_ty,
489 llargs[0], llargs[1]),
490 (_, "i64_mul_with_overflow") =>
491 with_overflow_intrinsic(bcx, "llvm.smul.with.overflow.i64", ret_ty,
492 llargs[0], llargs[1]),
494 (_, "u8_mul_with_overflow") =>
495 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i8", ret_ty,
496 llargs[0], llargs[1]),
497 (_, "u16_mul_with_overflow") =>
498 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i16", ret_ty,
499 llargs[0], llargs[1]),
500 (_, "u32_mul_with_overflow") =>
501 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i32", ret_ty,
502 llargs[0], llargs[1]),
503 (_, "u64_mul_with_overflow") =>
504 with_overflow_intrinsic(bcx, "llvm.umul.with.overflow.i64", ret_ty,
505 llargs[0], llargs[1]),
507 (_, "return_address") => {
508 if !fcx.caller_expects_out_pointer {
509 tcx.sess.span_err(call_info.span,
510 "invalid use of `return_address` intrinsic: function \
511 does not use out pointer");
512 C_null(Type::i8p(ccx))
514 PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
518 // This requires that atomic intrinsics follow a specific naming pattern:
519 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
520 (_, name) if name.starts_with("atomic_") => {
521 let split: Vec<&str> = name.split('_').collect();
522 assert!(split.len() >= 2, "Atomic intrinsic not correct format");
524 let order = if split.len() == 2 {
525 llvm::SequentiallyConsistent
528 "unordered" => llvm::Unordered,
529 "relaxed" => llvm::Monotonic,
530 "acq" => llvm::Acquire,
531 "rel" => llvm::Release,
532 "acqrel" => llvm::AcquireRelease,
533 _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
539 // See include/llvm/IR/Instructions.h for their implementation
540 // of this, I assume that it's good enough for us to use for
542 let strongest_failure_ordering = match order {
543 llvm::NotAtomic | llvm::Unordered =>
544 ccx.sess().fatal("cmpxchg must be atomic"),
546 llvm::Monotonic | llvm::Release =>
549 llvm::Acquire | llvm::AcquireRelease =>
552 llvm::SequentiallyConsistent =>
553 llvm::SequentiallyConsistent
556 let res = AtomicCmpXchg(bcx, llargs[0], llargs[1],
558 strongest_failure_ordering);
559 if unsafe { llvm::LLVMVersionMinor() >= 5 } {
560 ExtractValue(bcx, res, 0)
567 AtomicLoad(bcx, llargs[0], order)
570 AtomicStore(bcx, llargs[1], llargs[0], order);
575 AtomicFence(bcx, order);
579 // These are all AtomicRMW ops
581 let atom_op = match op {
582 "xchg" => llvm::AtomicXchg,
583 "xadd" => llvm::AtomicAdd,
584 "xsub" => llvm::AtomicSub,
585 "and" => llvm::AtomicAnd,
586 "nand" => llvm::AtomicNand,
587 "or" => llvm::AtomicOr,
588 "xor" => llvm::AtomicXor,
589 "max" => llvm::AtomicMax,
590 "min" => llvm::AtomicMin,
591 "umax" => llvm::AtomicUMax,
592 "umin" => llvm::AtomicUMin,
593 _ => ccx.sess().fatal("unknown atomic operation")
596 AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
602 (_, _) => ccx.sess().span_bug(foreign_item.span, "unknown intrinsic")
605 if val_ty(llval) != Type::void(ccx) &&
606 machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
607 store_ty(bcx, llval, llresult, ret_ty);
610 // If we made a temporary stack slot, let's clean it up
613 bcx = glue::drop_ty(bcx, llresult, ret_ty, Some(call_info));
615 expr::SaveIn(_) => {}
618 Result::new(bcx, llresult)
621 fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
622 allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>,
623 dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
625 let lltp_ty = type_of::type_of(ccx, tp_ty);
626 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
627 let size = machine::llsize_of(ccx, lltp_ty);
628 let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
629 let name = if allow_overlap {
631 "llvm.memmove.p0i8.p0i8.i32"
633 "llvm.memmove.p0i8.p0i8.i64"
637 "llvm.memcpy.p0i8.p0i8.i32"
639 "llvm.memcpy.p0i8.p0i8.i64"
643 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
644 let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
645 let llfn = ccx.get_intrinsic(&name);
647 Call(bcx, llfn, &[dst_ptr, src_ptr, Mul(bcx, size, count), align,
648 C_bool(ccx, volatile)], None)
651 fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, volatile: bool, tp_ty: Ty<'tcx>,
652 dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
654 let lltp_ty = type_of::type_of(ccx, tp_ty);
655 let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
656 let size = machine::llsize_of(ccx, lltp_ty);
657 let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
658 "llvm.memset.p0i8.i32"
660 "llvm.memset.p0i8.i64"
663 let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
664 let llfn = ccx.get_intrinsic(&name);
666 Call(bcx, llfn, &[dst_ptr, val, Mul(bcx, size, count), align,
667 C_bool(ccx, volatile)], None)
670 fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
671 let y = C_bool(bcx.ccx(), false);
672 let llfn = bcx.ccx().get_intrinsic(&name);
673 Call(bcx, llfn, &[val, y], None)
676 fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, name: &'static str,
677 t: Ty<'tcx>, a: ValueRef, b: ValueRef) -> ValueRef {
678 let llfn = bcx.ccx().get_intrinsic(&name);
680 // Convert `i1` to a `bool`, and write it to the out parameter
681 let val = Call(bcx, llfn, &[a, b], None);
682 let result = ExtractValue(bcx, val, 0);
683 let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
684 let ret = C_undef(type_of::type_of(bcx.ccx(), t));
685 let ret = InsertValue(bcx, ret, result, 0);
686 let ret = InsertValue(bcx, ret, overflow, 1);