1 //! Module to handle integer operations.
2 //! This module exists because some integer types are not supported on some gcc platforms, e.g.
3 //! 128-bit integers on 32-bit platforms and thus require to be handled manually.
5 use std::convert::TryFrom;
7 use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
8 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
9 use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
10 use rustc_middle::ty::Ty;
12 use crate::builder::ToGccComp;
13 use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
15 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
16 pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
17 // 128-bit unsigned %: __umodti3
18 self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
21 pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
22 // 128-bit signed %: __modti3
23 self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
26 pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
27 let typ = a.get_type();
28 if self.is_native_int_type_or_bool(typ) {
31 UnaryOp::LogicalNegate
34 UnaryOp::BitwiseNegate
36 self.cx.context.new_unary_op(None, operation, typ, a)
39 // TODO(antoyo): use __negdi2 and __negti2 instead?
40 let element_type = typ.dyncast_array().expect("element type");
42 self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
43 self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
45 self.cx.context.new_array_constructor(None, typ, &values)
49 pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
50 let a_type = a.get_type();
51 if self.is_native_int_type(a_type) {
52 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
55 let param_a = self.context.new_parameter(None, a_type, "a");
56 let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
57 self.context.new_call(None, func, &[a])
61 pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
62 self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
65 pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
66 let a_type = a.get_type();
67 let b_type = b.get_type();
68 let a_native = self.is_native_int_type(a_type);
69 let b_native = self.is_native_int_type(b_type);
70 if a_native && b_native {
71 // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
72 // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
73 if a_type.is_signed(self) != b_type.is_signed(self) {
74 let b = self.context.new_cast(None, b, a_type);
81 else if a_native && !b_native {
82 self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
85 // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
86 // significant half of the number) which uses lshr.
88 let native_int_type = a_type.dyncast_array().expect("get element type");
90 let func = self.current_func();
91 let then_block = func.new_block("then");
92 let else_block = func.new_block("else");
93 let after_block = func.new_block("after");
94 let b0_block = func.new_block("b0");
95 let actual_else_block = func.new_block("actual_else");
97 let result = func.new_local(None, a_type, "shiftResult");
99 let sixty_four = self.gcc_int(native_int_type, 64);
100 let sixty_three = self.gcc_int(native_int_type, 63);
101 let zero = self.gcc_zero(native_int_type);
102 let b = self.gcc_int_cast(b, native_int_type);
103 let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
104 self.llbb().end_with_conditional(None, condition, then_block, else_block);
106 // TODO(antoyo): take endianness into account.
107 let shift_value = self.gcc_sub(b, sixty_four);
108 let high = self.high(a);
110 if a_type.is_signed(self) {
120 let array_value = self.context.new_array_constructor(None, a_type, &values);
121 then_block.add_assignment(None, result, array_value);
122 then_block.end_with_jump(None, after_block);
124 let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
125 else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
127 b0_block.add_assignment(None, result, a);
128 b0_block.end_with_jump(None, after_block);
130 let shift_value = self.gcc_sub(sixty_four, b);
131 // NOTE: cast low to its unsigned type in order to perform a logical right shift.
132 let unsigned_type = native_int_type.to_unsigned(&self.cx);
133 let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
134 let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
135 let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
137 (high << shift_value) | shifted_low,
140 let array_value = self.context.new_array_constructor(None, a_type, &values);
141 actual_else_block.add_assignment(None, result, array_value);
142 actual_else_block.end_with_jump(None, after_block);
144 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
145 // state need to be updated.
146 self.switch_to_block(after_block);
152 fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
153 let a_type = a.get_type();
154 let b_type = b.get_type();
155 if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
156 if a_type != b_type {
157 if a_type.is_vector() {
158 // Vector types need to be bitcast.
159 // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
160 b = self.context.new_bitcast(None, b, a.get_type());
163 b = self.context.new_cast(None, b, a.get_type());
166 self.context.new_binary_op(None, operation, a_type, a, b)
169 let signed = a_type.is_compatible_with(self.i128_type);
171 match (operation, signed) {
172 (BinaryOp::Plus, true) => "__rust_i128_add",
173 (BinaryOp::Plus, false) => "__rust_u128_add",
174 (BinaryOp::Minus, true) => "__rust_i128_sub",
175 (BinaryOp::Minus, false) => "__rust_u128_sub",
176 _ => unreachable!("unexpected additive operation {:?}", operation),
178 let param_a = self.context.new_parameter(None, a_type, "a");
179 let param_b = self.context.new_parameter(None, b_type, "b");
180 let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
181 self.context.new_call(None, func, &[a, b])
185 pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
186 self.additive_operation(BinaryOp::Plus, a, b)
189 pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
190 self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
193 pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
194 self.additive_operation(BinaryOp::Minus, a, b)
197 fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
198 let a_type = a.get_type();
199 let b_type = b.get_type();
200 if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
201 self.context.new_binary_op(None, operation, a_type, a, b)
211 let func_name = format!("__{}{}ti3", sign, operation_name);
212 let param_a = self.context.new_parameter(None, a_type, "a");
213 let param_b = self.context.new_parameter(None, b_type, "b");
214 let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
215 self.context.new_call(None, func, &[a, b])
219 pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
220 // TODO(antoyo): check if the types are signed?
221 // 128-bit, signed: __divti3
222 // TODO(antoyo): convert the arguments to signed?
223 self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
226 pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
227 // 128-bit, unsigned: __udivti3
228 self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
231 pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
232 use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
236 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
237 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
238 t @ (Uint(_) | Int(_)) => t.clone(),
239 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
242 // TODO(antoyo): remove duplication with intrinsic?
244 if self.is_native_int_type(lhs.get_type()) {
248 Int(I8) => "__builtin_add_overflow",
249 Int(I16) => "__builtin_add_overflow",
250 Int(I32) => "__builtin_sadd_overflow",
251 Int(I64) => "__builtin_saddll_overflow",
252 Int(I128) => "__builtin_add_overflow",
254 Uint(U8) => "__builtin_add_overflow",
255 Uint(U16) => "__builtin_add_overflow",
256 Uint(U32) => "__builtin_uadd_overflow",
257 Uint(U64) => "__builtin_uaddll_overflow",
258 Uint(U128) => "__builtin_add_overflow",
264 Int(I8) => "__builtin_sub_overflow",
265 Int(I16) => "__builtin_sub_overflow",
266 Int(I32) => "__builtin_ssub_overflow",
267 Int(I64) => "__builtin_ssubll_overflow",
268 Int(I128) => "__builtin_sub_overflow",
270 Uint(U8) => "__builtin_sub_overflow",
271 Uint(U16) => "__builtin_sub_overflow",
272 Uint(U32) => "__builtin_usub_overflow",
273 Uint(U64) => "__builtin_usubll_overflow",
274 Uint(U128) => "__builtin_sub_overflow",
280 Int(I8) => "__builtin_mul_overflow",
281 Int(I16) => "__builtin_mul_overflow",
282 Int(I32) => "__builtin_smul_overflow",
283 Int(I64) => "__builtin_smulll_overflow",
284 Int(I128) => "__builtin_mul_overflow",
286 Uint(U8) => "__builtin_mul_overflow",
287 Uint(U16) => "__builtin_mul_overflow",
288 Uint(U32) => "__builtin_umul_overflow",
289 Uint(U64) => "__builtin_umulll_overflow",
290 Uint(U128) => "__builtin_mul_overflow",
298 Int(I128) | Uint(U128) => {
303 Int(I128) => "__rust_i128_addo",
304 Uint(U128) => "__rust_u128_addo",
309 Int(I128) => "__rust_i128_subo",
310 Uint(U128) => "__rust_u128_subo",
315 Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
316 Uint(U128) => "__rust_u128_mulo",
320 let a_type = lhs.get_type();
321 let b_type = rhs.get_type();
322 let param_a = self.context.new_parameter(None, a_type, "a");
323 let param_b = self.context.new_parameter(None, b_type, "b");
324 let result_field = self.context.new_field(None, a_type, "result");
325 let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
326 let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
327 let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
328 let result = self.context.new_call(None, func, &[lhs, rhs]);
329 let overflow = result.access_field(None, overflow_field);
330 let int_result = result.access_field(None, result_field);
331 return (int_result, overflow);
337 Int(I32) => "__mulosi4",
338 Int(I64) => "__mulodi4",
341 _ => unimplemented!("overflow operation for {:?}", new_kind),
347 let intrinsic = self.context.get_builtin_function(&name);
348 let res = self.current_func()
349 // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
350 .new_local(None, rhs.get_type(), "binopResult")
352 let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
353 (res.dereference(None).to_rvalue(), overflow)
356 pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
357 let a_type = lhs.get_type();
358 let b_type = rhs.get_type();
359 if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
360 let signed = a_type.is_compatible_with(self.i128_type);
368 let func_name = format!("__{}cmpti2", sign);
369 let param_a = self.context.new_parameter(None, a_type, "a");
370 let param_b = self.context.new_parameter(None, b_type, "b");
371 let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
372 let cmp = self.context.new_call(None, func, &[lhs, rhs]);
375 IntPredicate::IntEQ => {
376 return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
378 IntPredicate::IntNE => {
379 return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
381 IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
382 IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
383 IntPredicate::IntULT => (ComparisonOp::Equals, 0),
384 IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
385 IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
386 IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
387 IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
388 IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
390 self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
393 let left_type = lhs.get_type();
394 let right_type = rhs.get_type();
395 if left_type != right_type {
396 // NOTE: because libgccjit cannot compare function pointers.
397 if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
398 lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
399 rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
401 // NOTE: hack because we try to cast a vector type to the same vector type.
402 else if format!("{:?}", left_type) != format!("{:?}", right_type) {
403 rhs = self.context.new_cast(None, rhs, left_type);
406 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
410 pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
411 let a_type = a.get_type();
412 let b_type = b.get_type();
413 if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
418 self.low(a) ^ self.low(b),
419 self.high(a) ^ self.high(b),
421 self.context.new_array_constructor(None, a_type, &values)
425 pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
426 let a_type = a.get_type();
427 let b_type = b.get_type();
428 let a_native = self.is_native_int_type(a_type);
429 let b_native = self.is_native_int_type(b_type);
430 if a_native && b_native {
431 // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
432 if a_type.is_unsigned(self) && b_type.is_signed(self) {
433 let a = self.context.new_cast(None, a, b_type);
435 self.context.new_cast(None, result, a_type)
437 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
438 let b = self.context.new_cast(None, b, a_type);
445 else if a_native && !b_native {
446 self.gcc_shl(a, self.gcc_int_cast(b, a_type))
449 // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
450 let native_int_type = a_type.dyncast_array().expect("get element type");
452 let func = self.current_func();
453 let then_block = func.new_block("then");
454 let else_block = func.new_block("else");
455 let after_block = func.new_block("after");
456 let b0_block = func.new_block("b0");
457 let actual_else_block = func.new_block("actual_else");
459 let result = func.new_local(None, a_type, "shiftResult");
461 let b = self.gcc_int_cast(b, native_int_type);
462 let sixty_four = self.gcc_int(native_int_type, 64);
463 let zero = self.gcc_zero(native_int_type);
464 let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
465 self.llbb().end_with_conditional(None, condition, then_block, else_block);
467 // TODO(antoyo): take endianness into account.
470 self.low(a) << (b - sixty_four),
472 let array_value = self.context.new_array_constructor(None, a_type, &values);
473 then_block.add_assignment(None, result, array_value);
474 then_block.end_with_jump(None, after_block);
476 let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
477 else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
479 b0_block.add_assignment(None, result, a);
480 b0_block.end_with_jump(None, after_block);
482 // NOTE: cast low to its unsigned type in order to perform a logical right shift.
483 let unsigned_type = native_int_type.to_unsigned(&self.cx);
484 let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
485 let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
486 let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
489 (self.high(a) << b) | high_low,
492 let array_value = self.context.new_array_constructor(None, a_type, &values);
493 actual_else_block.add_assignment(None, result, array_value);
494 actual_else_block.end_with_jump(None, after_block);
496 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
497 // state need to be updated.
498 self.switch_to_block(after_block);
504 pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
505 let arg_type = arg.get_type();
506 if !self.is_native_int_type(arg_type) {
507 let native_int_type = arg_type.dyncast_array().expect("get element type");
508 let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
509 let swapped_lsb = self.gcc_bswap(lsb, width / 2);
510 let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
511 let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
512 let swapped_msb = self.gcc_bswap(msb, width / 2);
513 let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
515 // NOTE: we also need to swap the two elements here, in addition to swapping inside
516 // the elements themselves like done above.
517 return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
520 // TODO(antoyo): check if it's faster to use string literals and a
521 // match instead of format!.
522 let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
523 // FIXME(antoyo): this cast should not be necessary. Remove
524 // when having proper sized integer types.
525 let param_type = bswap.get_param(0).to_rvalue().get_type();
526 if param_type != arg_type {
527 arg = self.bitcast(arg, param_type);
529 self.cx.context.new_call(None, bswap, &[arg])
533 impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
534 pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
535 if self.is_native_int_type_or_bool(typ) {
536 self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
539 // NOTE: set the sign in high.
540 self.from_low_high(typ, int, -(int.is_negative() as i64))
544 pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
545 if self.is_native_int_type_or_bool(typ) {
546 self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
549 self.from_low_high(typ, int as i64, 0)
553 pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
554 let low = num as u64;
555 let high = (num >> 64) as u64;
557 // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
558 if self.is_native_int_type(typ) {
559 let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
560 let high = self.context.new_rvalue_from_long(typ, high as i64);
562 let sixty_four = self.context.new_rvalue_from_long(typ, 64);
563 let shift = high << sixty_four;
564 shift | self.context.new_cast(None, low, typ)
567 self.from_low_high(typ, low as i64, high as i64)
570 else if typ.is_i128(self) {
571 let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
572 self.gcc_int_cast(num, typ)
575 self.gcc_uint(typ, num as u64)
579 pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
580 if self.is_native_int_type_or_bool(typ) {
581 self.context.new_rvalue_zero(typ)
584 self.from_low_high(typ, 0, 0)
588 pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
589 if self.is_native_int_type_or_bool(typ) {
590 typ.get_size() as u64 * 8
593 // NOTE: the only unsupported types are u128 and i128.
598 fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
599 let a_type = a.get_type();
600 let b_type = b.get_type();
601 let a_native = self.is_native_int_type_or_bool(a_type);
602 let b_native = self.is_native_int_type_or_bool(b_type);
603 if a_type.is_vector() && b_type.is_vector() {
604 self.context.new_binary_op(None, operation, a_type, a, b)
606 else if a_native && b_native {
607 if a_type != b_type {
608 b = self.context.new_cast(None, b, a_type);
610 self.context.new_binary_op(None, operation, a_type, a, b)
613 assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
614 let native_int_type = a_type.dyncast_array().expect("get element type");
616 self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
617 self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
619 self.context.new_array_constructor(None, a_type, &values)
623 pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
624 self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
627 // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
628 pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
629 let value_type = value.get_type();
630 if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
631 self.context.new_cast(None, value, dest_typ)
633 else if self.is_native_int_type_or_bool(dest_typ) {
634 self.context.new_cast(None, self.low(value), dest_typ)
636 else if self.is_native_int_type_or_bool(value_type) {
637 let dest_element_type = dest_typ.dyncast_array().expect("get element type");
639 // NOTE: set the sign of the value.
640 let zero = self.context.new_rvalue_zero(value_type);
641 let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
642 let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
644 self.context.new_cast(None, value, dest_element_type),
645 self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
647 self.context.new_array_constructor(None, dest_typ, &values)
650 // Since u128 and i128 are the only types that can be unsupported, we know the type of
651 // value and the destination type have the same size, so a bitcast is fine.
653 // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
654 self.context.new_bitcast(None, value, dest_typ)
658 fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
659 let value_type = value.get_type();
660 if self.is_native_int_type_or_bool(value_type) {
661 return self.context.new_cast(None, value, dest_typ);
665 match self.type_kind(dest_typ) {
666 TypeKind::Float => "tisf",
667 TypeKind::Double => "tidf",
668 kind => panic!("cannot cast a non-native integer to type {:?}", kind),
677 let func_name = format!("__float{}{}", sign, name_suffix);
678 let param = self.context.new_parameter(None, value_type, "n");
679 let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
680 self.context.new_call(None, func, &[value])
683 pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
684 self.int_to_float_cast(true, value, dest_typ)
687 pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
688 self.int_to_float_cast(false, value, dest_typ)
691 fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
692 let value_type = value.get_type();
693 if self.is_native_int_type_or_bool(dest_typ) {
694 return self.context.new_cast(None, value, dest_typ);
698 match self.type_kind(value_type) {
699 TypeKind::Float => "sfti",
700 TypeKind::Double => "dfti",
701 kind => panic!("cannot cast a {:?} to non-native integer", kind),
710 let func_name = format!("__fix{}{}", sign, name_suffix);
711 let param = self.context.new_parameter(None, value_type, "n");
712 let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
713 self.context.new_call(None, func, &[value])
716 pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
717 self.float_to_int_cast(true, value, dest_typ)
720 pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
721 self.float_to_int_cast(false, value, dest_typ)
724 fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
725 self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
729 fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
730 self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
734 fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
735 let native_int_type = typ.dyncast_array().expect("get element type");
737 self.context.new_rvalue_from_long(native_int_type, low),
738 self.context.new_rvalue_from_long(native_int_type, high),
740 self.context.new_array_constructor(None, typ, &values)