1 //! Module to handle integer operations.
2 //! This module exists because some integer types are not supported on some gcc platforms, e.g.
3 //! 128-bit integers on 32-bit platforms and thus require to be handled manually.
5 use std::convert::TryFrom;
7 use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
8 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
9 use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
10 use rustc_middle::ty::Ty;
12 use crate::builder::ToGccComp;
13 use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
15 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
16 pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
17 // 128-bit unsigned %: __umodti3
18 self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
21 pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
22 // 128-bit signed %: __modti3
23 self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
26 pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
27 let typ = a.get_type();
28 if self.is_native_int_type_or_bool(typ) {
31 UnaryOp::LogicalNegate
34 UnaryOp::BitwiseNegate
36 self.cx.context.new_unary_op(None, operation, typ, a)
39 // TODO(antoyo): use __negdi2 and __negti2 instead?
40 let element_type = typ.dyncast_array().expect("element type");
42 self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
43 self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
45 self.cx.context.new_array_constructor(None, typ, &values)
49 pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
50 let a_type = a.get_type();
51 if self.is_native_int_type(a_type) {
52 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
55 let param_a = self.context.new_parameter(None, a_type, "a");
56 let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
57 self.context.new_call(None, func, &[a])
61 pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
62 self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
65 pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
66 let a_type = a.get_type();
67 let b_type = b.get_type();
68 let a_native = self.is_native_int_type(a_type);
69 let b_native = self.is_native_int_type(b_type);
70 if a_native && b_native {
71 // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
72 // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
73 if a_type.is_signed(self) != b_type.is_signed(self) {
74 let b = self.context.new_cast(None, b, a_type);
81 else if a_native && !b_native {
82 self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
85 // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
86 // significant half of the number) which uses lshr.
88 let native_int_type = a_type.dyncast_array().expect("get element type");
90 let func = self.current_func();
91 let then_block = func.new_block("then");
92 let else_block = func.new_block("else");
93 let after_block = func.new_block("after");
94 let b0_block = func.new_block("b0");
95 let actual_else_block = func.new_block("actual_else");
97 let result = func.new_local(None, a_type, "shiftResult");
99 let sixty_four = self.gcc_int(native_int_type, 64);
100 let sixty_three = self.gcc_int(native_int_type, 63);
101 let zero = self.gcc_zero(native_int_type);
102 let b = self.gcc_int_cast(b, native_int_type);
103 let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
104 self.llbb().end_with_conditional(None, condition, then_block, else_block);
106 // TODO(antoyo): take endianness into account.
107 let shift_value = self.gcc_sub(b, sixty_four);
108 let high = self.high(a);
110 if a_type.is_signed(self) {
120 let array_value = self.context.new_array_constructor(None, a_type, &values);
121 then_block.add_assignment(None, result, array_value);
122 then_block.end_with_jump(None, after_block);
124 let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
125 else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
127 b0_block.add_assignment(None, result, a);
128 b0_block.end_with_jump(None, after_block);
130 let shift_value = self.gcc_sub(sixty_four, b);
131 // NOTE: cast low to its unsigned type in order to perform a logical right shift.
132 let unsigned_type = native_int_type.to_unsigned(&self.cx);
133 let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
134 let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
135 let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
137 (high << shift_value) | shifted_low,
140 let array_value = self.context.new_array_constructor(None, a_type, &values);
141 actual_else_block.add_assignment(None, result, array_value);
142 actual_else_block.end_with_jump(None, after_block);
144 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
145 // state need to be updated.
146 self.switch_to_block(after_block);
152 fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
153 let a_type = a.get_type();
154 let b_type = b.get_type();
155 if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
156 if a.get_type() != b.get_type() {
157 b = self.context.new_cast(None, b, a.get_type());
159 self.context.new_binary_op(None, operation, a_type, a, b)
162 let signed = a_type.is_compatible_with(self.i128_type);
164 match (operation, signed) {
165 (BinaryOp::Plus, true) => "__rust_i128_add",
166 (BinaryOp::Plus, false) => "__rust_u128_add",
167 (BinaryOp::Minus, true) => "__rust_i128_sub",
168 (BinaryOp::Minus, false) => "__rust_u128_sub",
169 _ => unreachable!("unexpected additive operation {:?}", operation),
171 let param_a = self.context.new_parameter(None, a_type, "a");
172 let param_b = self.context.new_parameter(None, b_type, "b");
173 let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
174 self.context.new_call(None, func, &[a, b])
178 pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
179 self.additive_operation(BinaryOp::Plus, a, b)
182 pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
183 self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
186 pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
187 self.additive_operation(BinaryOp::Minus, a, b)
190 fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
191 let a_type = a.get_type();
192 let b_type = b.get_type();
193 if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
194 self.context.new_binary_op(None, operation, a_type, a, b)
204 let func_name = format!("__{}{}ti3", sign, operation_name);
205 let param_a = self.context.new_parameter(None, a_type, "a");
206 let param_b = self.context.new_parameter(None, b_type, "b");
207 let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
208 self.context.new_call(None, func, &[a, b])
212 pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
213 // TODO(antoyo): check if the types are signed?
214 // 128-bit, signed: __divti3
215 // TODO(antoyo): convert the arguments to signed?
216 self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
219 pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
220 // 128-bit, unsigned: __udivti3
221 self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
224 pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
225 use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
229 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
230 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
231 t @ (Uint(_) | Int(_)) => t.clone(),
232 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
235 // TODO(antoyo): remove duplication with intrinsic?
237 if self.is_native_int_type(lhs.get_type()) {
241 Int(I8) => "__builtin_add_overflow",
242 Int(I16) => "__builtin_add_overflow",
243 Int(I32) => "__builtin_sadd_overflow",
244 Int(I64) => "__builtin_saddll_overflow",
245 Int(I128) => "__builtin_add_overflow",
247 Uint(U8) => "__builtin_add_overflow",
248 Uint(U16) => "__builtin_add_overflow",
249 Uint(U32) => "__builtin_uadd_overflow",
250 Uint(U64) => "__builtin_uaddll_overflow",
251 Uint(U128) => "__builtin_add_overflow",
257 Int(I8) => "__builtin_sub_overflow",
258 Int(I16) => "__builtin_sub_overflow",
259 Int(I32) => "__builtin_ssub_overflow",
260 Int(I64) => "__builtin_ssubll_overflow",
261 Int(I128) => "__builtin_sub_overflow",
263 Uint(U8) => "__builtin_sub_overflow",
264 Uint(U16) => "__builtin_sub_overflow",
265 Uint(U32) => "__builtin_usub_overflow",
266 Uint(U64) => "__builtin_usubll_overflow",
267 Uint(U128) => "__builtin_sub_overflow",
273 Int(I8) => "__builtin_mul_overflow",
274 Int(I16) => "__builtin_mul_overflow",
275 Int(I32) => "__builtin_smul_overflow",
276 Int(I64) => "__builtin_smulll_overflow",
277 Int(I128) => "__builtin_mul_overflow",
279 Uint(U8) => "__builtin_mul_overflow",
280 Uint(U16) => "__builtin_mul_overflow",
281 Uint(U32) => "__builtin_umul_overflow",
282 Uint(U64) => "__builtin_umulll_overflow",
283 Uint(U128) => "__builtin_mul_overflow",
291 Int(I128) | Uint(U128) => {
296 Int(I128) => "__rust_i128_addo",
297 Uint(U128) => "__rust_u128_addo",
302 Int(I128) => "__rust_i128_subo",
303 Uint(U128) => "__rust_u128_subo",
308 Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
309 Uint(U128) => "__rust_u128_mulo",
313 let a_type = lhs.get_type();
314 let b_type = rhs.get_type();
315 let param_a = self.context.new_parameter(None, a_type, "a");
316 let param_b = self.context.new_parameter(None, b_type, "b");
317 let result_field = self.context.new_field(None, a_type, "result");
318 let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
319 let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
320 let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
321 let result = self.context.new_call(None, func, &[lhs, rhs]);
322 let overflow = result.access_field(None, overflow_field);
323 let int_result = result.access_field(None, result_field);
324 return (int_result, overflow);
330 Int(I32) => "__mulosi4",
331 Int(I64) => "__mulodi4",
334 _ => unimplemented!("overflow operation for {:?}", new_kind),
340 let intrinsic = self.context.get_builtin_function(&name);
341 let res = self.current_func()
342 // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
343 .new_local(None, rhs.get_type(), "binopResult")
345 let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
346 (res.dereference(None).to_rvalue(), overflow)
349 pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
350 let a_type = lhs.get_type();
351 let b_type = rhs.get_type();
352 if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
353 let signed = a_type.is_compatible_with(self.i128_type);
361 let func_name = format!("__{}cmpti2", sign);
362 let param_a = self.context.new_parameter(None, a_type, "a");
363 let param_b = self.context.new_parameter(None, b_type, "b");
364 let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
365 let cmp = self.context.new_call(None, func, &[lhs, rhs]);
368 IntPredicate::IntEQ => {
369 return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
371 IntPredicate::IntNE => {
372 return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
374 IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
375 IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
376 IntPredicate::IntULT => (ComparisonOp::Equals, 0),
377 IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
378 IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
379 IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
380 IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
381 IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
383 self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
386 let left_type = lhs.get_type();
387 let right_type = rhs.get_type();
388 if left_type != right_type {
389 // NOTE: because libgccjit cannot compare function pointers.
390 if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
391 lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
392 rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
394 // NOTE: hack because we try to cast a vector type to the same vector type.
395 else if format!("{:?}", left_type) != format!("{:?}", right_type) {
396 rhs = self.context.new_cast(None, rhs, left_type);
399 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
403 pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
404 let a_type = a.get_type();
405 let b_type = b.get_type();
406 if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
411 self.low(a) ^ self.low(b),
412 self.high(a) ^ self.high(b),
414 self.context.new_array_constructor(None, a_type, &values)
418 pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
419 let a_type = a.get_type();
420 let b_type = b.get_type();
421 let a_native = self.is_native_int_type(a_type);
422 let b_native = self.is_native_int_type(b_type);
423 if a_native && b_native {
424 // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
425 if a_type.is_unsigned(self) && b_type.is_signed(self) {
426 let a = self.context.new_cast(None, a, b_type);
428 self.context.new_cast(None, result, a_type)
430 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
431 let b = self.context.new_cast(None, b, a_type);
438 else if a_native && !b_native {
439 self.gcc_shl(a, self.gcc_int_cast(b, a_type))
442 // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
443 let native_int_type = a_type.dyncast_array().expect("get element type");
445 let func = self.current_func();
446 let then_block = func.new_block("then");
447 let else_block = func.new_block("else");
448 let after_block = func.new_block("after");
449 let b0_block = func.new_block("b0");
450 let actual_else_block = func.new_block("actual_else");
452 let result = func.new_local(None, a_type, "shiftResult");
454 let b = self.gcc_int_cast(b, native_int_type);
455 let sixty_four = self.gcc_int(native_int_type, 64);
456 let zero = self.gcc_zero(native_int_type);
457 let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
458 self.llbb().end_with_conditional(None, condition, then_block, else_block);
460 // TODO(antoyo): take endianness into account.
463 self.low(a) << (b - sixty_four),
465 let array_value = self.context.new_array_constructor(None, a_type, &values);
466 then_block.add_assignment(None, result, array_value);
467 then_block.end_with_jump(None, after_block);
469 let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
470 else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
472 b0_block.add_assignment(None, result, a);
473 b0_block.end_with_jump(None, after_block);
475 // NOTE: cast low to its unsigned type in order to perform a logical right shift.
476 let unsigned_type = native_int_type.to_unsigned(&self.cx);
477 let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
478 let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
479 let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
482 (self.high(a) << b) | high_low,
485 let array_value = self.context.new_array_constructor(None, a_type, &values);
486 actual_else_block.add_assignment(None, result, array_value);
487 actual_else_block.end_with_jump(None, after_block);
489 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
490 // state need to be updated.
491 self.switch_to_block(after_block);
497 pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
498 let arg_type = arg.get_type();
499 if !self.is_native_int_type(arg_type) {
500 let native_int_type = arg_type.dyncast_array().expect("get element type");
501 let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
502 let swapped_lsb = self.gcc_bswap(lsb, width / 2);
503 let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
504 let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
505 let swapped_msb = self.gcc_bswap(msb, width / 2);
506 let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
508 // NOTE: we also need to swap the two elements here, in addition to swapping inside
509 // the elements themselves like done above.
510 return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
513 // TODO(antoyo): check if it's faster to use string literals and a
514 // match instead of format!.
515 let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
516 // FIXME(antoyo): this cast should not be necessary. Remove
517 // when having proper sized integer types.
518 let param_type = bswap.get_param(0).to_rvalue().get_type();
519 if param_type != arg_type {
520 arg = self.bitcast(arg, param_type);
522 self.cx.context.new_call(None, bswap, &[arg])
526 impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
527 pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
528 if self.is_native_int_type_or_bool(typ) {
529 self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
532 // NOTE: set the sign in high.
533 self.from_low_high(typ, int, -(int.is_negative() as i64))
537 pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
538 if self.is_native_int_type_or_bool(typ) {
539 self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
542 self.from_low_high(typ, int as i64, 0)
546 pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
547 let low = num as u64;
548 let high = (num >> 64) as u64;
550 // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
551 if self.is_native_int_type(typ) {
552 let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
553 let high = self.context.new_rvalue_from_long(typ, high as i64);
555 let sixty_four = self.context.new_rvalue_from_long(typ, 64);
556 let shift = high << sixty_four;
557 shift | self.context.new_cast(None, low, typ)
560 self.from_low_high(typ, low as i64, high as i64)
563 else if typ.is_i128(self) {
564 let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
565 self.gcc_int_cast(num, typ)
568 self.gcc_uint(typ, num as u64)
572 pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
573 if self.is_native_int_type_or_bool(typ) {
574 self.context.new_rvalue_zero(typ)
577 self.from_low_high(typ, 0, 0)
581 pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
582 if self.is_native_int_type_or_bool(typ) {
583 typ.get_size() as u64 * 8
586 // NOTE: the only unsupported types are u128 and i128.
591 fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
592 let a_type = a.get_type();
593 let b_type = b.get_type();
594 let a_native = self.is_native_int_type_or_bool(a_type);
595 let b_native = self.is_native_int_type_or_bool(b_type);
596 if a_native && b_native {
597 if a_type != b_type {
598 b = self.context.new_cast(None, b, a_type);
600 self.context.new_binary_op(None, operation, a_type, a, b)
603 assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
604 let native_int_type = a_type.dyncast_array().expect("get element type");
606 self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
607 self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
609 self.context.new_array_constructor(None, a_type, &values)
613 pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
614 self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
617 // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
618 pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
619 let value_type = value.get_type();
620 if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
621 self.context.new_cast(None, value, dest_typ)
623 else if self.is_native_int_type_or_bool(dest_typ) {
624 self.context.new_cast(None, self.low(value), dest_typ)
626 else if self.is_native_int_type_or_bool(value_type) {
627 let dest_element_type = dest_typ.dyncast_array().expect("get element type");
629 // NOTE: set the sign of the value.
630 let zero = self.context.new_rvalue_zero(value_type);
631 let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
632 let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
634 self.context.new_cast(None, value, dest_element_type),
635 self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
637 self.context.new_array_constructor(None, dest_typ, &values)
640 // Since u128 and i128 are the only types that can be unsupported, we know the type of
641 // value and the destination type have the same size, so a bitcast is fine.
642 self.context.new_bitcast(None, value, dest_typ)
646 fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
647 let value_type = value.get_type();
648 if self.is_native_int_type_or_bool(value_type) {
649 return self.context.new_cast(None, value, dest_typ);
653 match self.type_kind(dest_typ) {
654 TypeKind::Float => "tisf",
655 TypeKind::Double => "tidf",
656 kind => panic!("cannot cast a non-native integer to type {:?}", kind),
665 let func_name = format!("__float{}{}", sign, name_suffix);
666 let param = self.context.new_parameter(None, value_type, "n");
667 let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
668 self.context.new_call(None, func, &[value])
671 pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
672 self.int_to_float_cast(true, value, dest_typ)
675 pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
676 self.int_to_float_cast(false, value, dest_typ)
679 fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
680 let value_type = value.get_type();
681 if self.is_native_int_type_or_bool(dest_typ) {
682 return self.context.new_cast(None, value, dest_typ);
686 match self.type_kind(value_type) {
687 TypeKind::Float => "sfti",
688 TypeKind::Double => "dfti",
689 kind => panic!("cannot cast a {:?} to non-native integer", kind),
698 let func_name = format!("__fix{}{}", sign, name_suffix);
699 let param = self.context.new_parameter(None, value_type, "n");
700 let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
701 self.context.new_call(None, func, &[value])
704 pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
705 self.float_to_int_cast(true, value, dest_typ)
708 pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
709 self.float_to_int_cast(false, value, dest_typ)
712 fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
713 self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
717 fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
718 self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
722 fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
723 let native_int_type = typ.dyncast_array().expect("get element type");
725 self.context.new_rvalue_from_long(native_int_type, low),
726 self.context.new_rvalue_from_long(native_int_type, high),
728 self.context.new_array_constructor(None, typ, &values)