3 use std::convert::TryFrom;
4 use std::ops::{Deref, Range};
6 use gccjit::FunctionType;
18 use rustc_codegen_ssa::MemFlags;
19 use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
20 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
21 use rustc_codegen_ssa::mir::place::PlaceRef;
22 use rustc_codegen_ssa::traits::{
32 use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
33 use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, TyAndLayout};
35 use rustc_span::def_id::DefId;
36 use rustc_target::abi::{
44 use rustc_target::spec::{HasTargetSpec, Target};
46 use crate::common::{SignType, TypeReflection, type_is_pointer};
47 use crate::context::CodegenCx;
48 use crate::type_of::LayoutGccExt;
53 // TODO: remove this variable.
54 static mut RETURN_VALUE_COUNT: usize = 0;
56 enum ExtremumOperation {
62 fn clone(&self) -> Self;
65 impl EnumClone for AtomicOrdering {
66 fn clone(&self) -> Self {
68 AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
69 AtomicOrdering::Unordered => AtomicOrdering::Unordered,
70 AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
71 AtomicOrdering::Acquire => AtomicOrdering::Acquire,
72 AtomicOrdering::Release => AtomicOrdering::Release,
73 AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
74 AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
79 pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
80 pub cx: &'a CodegenCx<'gcc, 'tcx>,
81 pub block: Option<Block<'gcc>>,
82 stack_var_count: Cell<usize>,
85 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
86 fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
90 stack_var_count: Cell::new(0),
94 fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
95 let size = self.cx.int_width(src.get_type()) / 8;
97 let func = self.current_func();
101 // TODO: does this make sense?
102 AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
105 let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
106 let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
107 let return_value = func.new_local(None, previous_value.get_type(), "return_value");
108 self.llbb().add_assignment(None, previous_var, previous_value);
109 self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
111 let while_block = func.new_block("while");
112 let after_block = func.new_block("after_while");
113 self.llbb().end_with_jump(None, while_block);
115 // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
116 // state need to be updated.
117 self.block = Some(while_block);
118 *self.cx.current_block.borrow_mut() = Some(while_block);
120 let comparison_operator =
122 ExtremumOperation::Max => ComparisonOp::LessThan,
123 ExtremumOperation::Min => ComparisonOp::GreaterThan,
126 let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
127 let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
128 let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
129 let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
131 while_block.end_with_conditional(None, cond, while_block, after_block);
133 // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
134 // state need to be updated.
135 self.block = Some(after_block);
136 *self.cx.current_block.borrow_mut() = Some(after_block);
138 return_value.to_rvalue()
141 fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
142 let size = self.cx.int_width(src.get_type());
143 let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
144 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
145 let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
146 let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
148 let void_ptr_type = self.context.new_type::<*mut ()>();
149 let volatile_void_ptr_type = void_ptr_type.make_volatile();
150 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
151 let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
153 // NOTE: not sure why, but we have the wrong type here.
154 let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
155 let src = self.context.new_cast(None, src, int_type);
156 self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
159 pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
160 self.llbb().add_assignment(None, lvalue, value);
163 fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
164 //let mut fn_ty = self.cx.val_ty(func);
165 // Strip off pointers
166 /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
167 fn_ty = self.cx.element_type(fn_ty);
171 self.cx.type_kind(fn_ty) == TypeKind::Function,
172 "builder::{} not passed a function, but {:?}",
177 let param_tys = self.cx.func_params_types(fn_ty);
179 let all_args_match = param_tys
181 .zip(args.iter().map(|&v| self.val_ty(v)))
182 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
184 let mut all_args_match = true;
185 let mut param_types = vec![];
186 let param_count = func.get_param_count();
187 for (index, arg) in args.iter().enumerate().take(param_count) {
188 let param = func.get_param(index as i32);
189 let param = param.to_rvalue().get_type();
190 if param != arg.get_type() {
191 all_args_match = false;
193 param_types.push(param);
197 return Cow::Borrowed(args);
200 let casted_args: Vec<_> = param_types
204 .map(|(_i, (expected_ty, &actual_val))| {
205 let actual_ty = actual_val.get_type();
206 if expected_ty != actual_ty {
208 "type mismatch in function call of {:?}. \
209 Expected {:?} for param {}, got {:?}; injecting bitcast",
210 func, expected_ty, i, actual_ty
213 "type mismatch in function call of {:?}. \
214 Expected {:?} for param {}, got {:?}; injecting bitcast",
215 func, expected_ty, i, actual_ty
217 self.bitcast(actual_val, expected_ty)
225 Cow::Owned(casted_args)
228 fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
229 //let mut fn_ty = self.cx.val_ty(func);
230 // Strip off pointers
231 /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
232 fn_ty = self.cx.element_type(fn_ty);
236 self.cx.type_kind(fn_ty) == TypeKind::Function,
237 "builder::{} not passed a function, but {:?}",
242 let param_tys = self.cx.func_params_types(fn_ty);
244 let all_args_match = param_tys
246 .zip(args.iter().map(|&v| self.val_ty(v)))
247 .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
249 let mut all_args_match = true;
250 let mut param_types = vec![];
251 let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
252 for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
253 let param = gcc_func.get_param_type(index);
254 if param != arg.get_type() {
255 all_args_match = false;
257 param_types.push(param);
261 return Cow::Borrowed(args);
264 let casted_args: Vec<_> = param_types
268 .map(|(_i, (expected_ty, &actual_val))| {
269 let actual_ty = actual_val.get_type();
270 if expected_ty != actual_ty {
272 "type mismatch in function call of {:?}. \
273 Expected {:?} for param {}, got {:?}; injecting bitcast",
274 func, expected_ty, i, actual_ty
277 "type mismatch in function call of {:?}. \
278 Expected {:?} for param {}, got {:?}; injecting bitcast",
279 func, expected_ty, i, actual_ty
281 self.bitcast(actual_val, expected_ty)
289 Cow::Owned(casted_args)
292 fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
293 let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here.
294 let stored_ty = self.cx.val_ty(val);
295 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
297 //assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
299 if dest_ptr_ty == stored_ptr_ty {
304 "type mismatch in store. \
305 Expected {:?}, got {:?}; inserting bitcast",
306 dest_ptr_ty, stored_ptr_ty
309 "type mismatch in store. \
310 Expected {:?}, got {:?}; inserting bitcast",
311 dest_ptr_ty, stored_ptr_ty
314 self.bitcast(ptr, stored_ptr_ty)
318 pub fn current_func(&self) -> Function<'gcc> {
319 self.block.expect("block").get_function()
322 fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
323 //debug!("call {:?} with args ({:?})", func, args);
325 // TODO: remove when the API supports a different type for functions.
326 let func: Function<'gcc> = self.cx.rvalue_as_function(func);
327 let args = self.check_call("call", func, args);
328 //let bundle = funclet.map(|funclet| funclet.bundle());
329 //let bundle = bundle.as_ref().map(|b| &*b.raw);
331 // gccjit requires to use the result of functions, even when it's not used.
332 // That's why we assign the result to a local or call add_eval().
333 let return_type = func.get_return_type();
334 let current_block = self.current_block.borrow().expect("block");
335 let void_type = self.context.new_type::<()>();
336 let current_func = current_block.get_function();
337 if return_type != void_type {
338 unsafe { RETURN_VALUE_COUNT += 1 };
339 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
340 current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
344 current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
345 // Return dummy value when not having return value.
346 self.context.new_rvalue_from_long(self.isize_type, 0)
350 fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
351 //debug!("func ptr call {:?} with args ({:?})", func, args);
353 let args = self.check_ptr_call("call", func_ptr, args);
354 //let bundle = funclet.map(|funclet| funclet.bundle());
355 //let bundle = bundle.as_ref().map(|b| &*b.raw);
357 // gccjit requires to use the result of functions, even when it's not used.
358 // That's why we assign the result to a local or call add_eval().
359 let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
360 let mut return_type = gcc_func.get_return_type();
361 let current_block = self.current_block.borrow().expect("block");
362 let void_type = self.context.new_type::<()>();
363 let current_func = current_block.get_function();
365 // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
366 if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
367 return_type = self.int_type;
370 if return_type != void_type {
371 unsafe { RETURN_VALUE_COUNT += 1 };
372 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
373 current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
377 if gcc_func.get_param_count() == 0 {
378 // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
379 current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
382 current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
384 // Return dummy value when not having return value.
385 let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
386 current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
391 pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
392 //debug!("overflow_call {:?} with args ({:?})", func, args);
394 //let bundle = funclet.map(|funclet| funclet.bundle());
395 //let bundle = bundle.as_ref().map(|b| &*b.raw);
397 // gccjit requires to use the result of functions, even when it's not used.
398 // That's why we assign the result to a local.
399 let return_type = self.context.new_type::<bool>();
400 let current_block = self.current_block.borrow().expect("block");
401 let current_func = current_block.get_function();
402 // TODO: return the new_call() directly? Since the overflow function has no side-effects.
403 unsafe { RETURN_VALUE_COUNT += 1 };
404 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
405 current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
410 impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
411 type CodegenCx = CodegenCx<'gcc, 'tcx>;
414 impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
415 fn tcx(&self) -> TyCtxt<'tcx> {
420 impl HasDataLayout for Builder<'_, '_, '_> {
421 fn data_layout(&self) -> &TargetDataLayout {
422 self.cx.data_layout()
426 impl<'tcx> LayoutOf for Builder<'_, '_, 'tcx> {
428 type TyAndLayout = TyAndLayout<'tcx>;
430 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
431 self.cx.layout_of(ty)
435 impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
436 type Target = CodegenCx<'gcc, 'tcx>;
438 fn deref(&self) -> &Self::Target {
443 impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
444 type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
445 type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
446 type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
447 type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
448 type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
450 type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
451 type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
452 type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
455 impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
456 fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
457 let mut bx = Builder::with_cx(cx);
458 *cx.current_block.borrow_mut() = Some(block);
459 bx.block = Some(block);
463 fn build_sibling_block(&mut self, name: &str) -> Self {
464 let block = self.append_sibling_block(name);
465 Self::build(self.cx, block)
468 fn llbb(&self) -> Block<'gcc> {
469 self.block.expect("block")
472 fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
473 let func = cx.rvalue_as_function(func);
477 fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
478 let func = self.current_func();
482 fn ret_void(&mut self) {
483 self.llbb().end_with_void_return(None)
486 fn ret(&mut self, value: RValue<'gcc>) {
488 if self.structs_as_pointer.borrow().contains(&value) {
489 // NOTE: hack to workaround a limitation of the rustc API: see comment on
490 // CodegenCx.structs_as_pointer
491 value.dereference(None).to_rvalue()
496 self.llbb().end_with_return(None, value);
499 fn br(&mut self, dest: Block<'gcc>) {
500 self.llbb().end_with_jump(None, dest)
503 fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
504 self.llbb().end_with_conditional(None, cond, then_block, else_block)
507 fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
508 let mut gcc_cases = vec![];
509 let typ = self.val_ty(value);
510 for (on_val, dest) in cases {
511 let on_val = self.const_uint_big(typ, on_val);
512 gcc_cases.push(self.context.new_case(on_val, on_val, dest));
514 self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
517 fn invoke(&mut self, _func: RValue<'gcc>, _args: &[RValue<'gcc>], _then: Block<'gcc>, _catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
519 /*debug!("invoke {:?} with args ({:?})", func, args);
521 let args = self.check_call("invoke", func, args);
522 let bundle = funclet.map(|funclet| funclet.bundle());
523 let bundle = bundle.as_ref().map(|b| &*b.raw);
526 llvm::LLVMRustBuildInvoke(
530 args.len() as c_uint,
539 fn unreachable(&mut self) {
540 let func = self.context.get_builtin_function("__builtin_unreachable");
541 let block = self.block.expect("block");
542 block.add_eval(None, self.context.new_call(None, func, &[]));
543 let return_type = block.get_function().get_return_type();
544 let void_type = self.context.new_type::<()>();
545 if return_type == void_type {
546 block.end_with_void_return(None)
549 let return_value = self.current_func()
550 .new_local(None, return_type, "unreachableReturn");
551 block.end_with_return(None, return_value)
555 fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
556 // FIXME: this should not be required.
557 if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
558 b = self.context.new_cast(None, b, a.get_type());
563 fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
567 fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
568 if a.get_type() != b.get_type() {
569 b = self.context.new_cast(None, b, a.get_type());
574 fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
578 fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
582 fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
586 fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
587 // TODO: convert the arguments to unsigned?
591 fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
592 // TODO: convert the arguments to unsigned?
593 // TODO: poison if not exact.
597 fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
598 // TODO: convert the arguments to signed?
602 fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
603 // TODO: posion if not exact.
604 // FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
605 // should be the same.
606 let typ = a.get_type().to_signed(self);
607 let a = self.context.new_cast(None, a, typ);
608 let b = self.context.new_cast(None, b, typ);
612 fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
616 fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
620 fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
624 fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
625 if a.get_type() == self.cx.float_type {
626 let fmodf = self.context.get_builtin_function("fmodf");
627 // FIXME: this seems to produce the wrong result.
628 return self.context.new_call(None, fmodf, &[a, b]);
630 assert_eq!(a.get_type(), self.cx.double_type);
632 let fmod = self.context.get_builtin_function("fmod");
633 return self.context.new_call(None, fmod, &[a, b]);
636 fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
637 // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
638 let a_type = a.get_type();
639 let b_type = b.get_type();
640 if a_type.is_unsigned(self) && b_type.is_signed(self) {
641 //println!("shl: {:?} -> {:?}", a, b_type);
642 let a = self.context.new_cast(None, a, b_type);
644 //println!("shl: {:?} -> {:?}", result, a_type);
645 self.context.new_cast(None, result, a_type)
647 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
648 //println!("shl: {:?} -> {:?}", b, a_type);
649 let b = self.context.new_cast(None, b, a_type);
657 fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
658 // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
659 // TODO: cast to unsigned to do a logical shift if that does not work.
660 let a_type = a.get_type();
661 let b_type = b.get_type();
662 if a_type.is_unsigned(self) && b_type.is_signed(self) {
663 //println!("lshl: {:?} -> {:?}", a, b_type);
664 let a = self.context.new_cast(None, a, b_type);
666 //println!("lshl: {:?} -> {:?}", result, a_type);
667 self.context.new_cast(None, result, a_type)
669 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
670 //println!("lshl: {:?} -> {:?}", b, a_type);
671 let b = self.context.new_cast(None, b, a_type);
679 fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
680 // TODO: check whether behavior is an arithmetic shift for >> .
681 // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
682 let a_type = a.get_type();
683 let b_type = b.get_type();
684 if a_type.is_unsigned(self) && b_type.is_signed(self) {
685 //println!("ashl: {:?} -> {:?}", a, b_type);
686 let a = self.context.new_cast(None, a, b_type);
688 //println!("ashl: {:?} -> {:?}", result, a_type);
689 self.context.new_cast(None, result, a_type)
691 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
692 //println!("ashl: {:?} -> {:?}", b, a_type);
693 let b = self.context.new_cast(None, b, a_type);
701 fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
702 // FIXME: hack by putting the result in a variable to workaround this bug:
703 // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
704 if a.get_type() != b.get_type() {
705 b = self.context.new_cast(None, b, a.get_type());
707 let res = self.current_func().new_local(None, b.get_type(), "andResult");
708 self.llbb().add_assignment(None, res, a & b);
712 fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
713 // FIXME: hack by putting the result in a variable to workaround this bug:
714 // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
715 let res = self.current_func().new_local(None, b.get_type(), "orResult");
716 self.llbb().add_assignment(None, res, a | b);
720 fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
724 fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
725 // TODO: use new_unary_op()?
726 self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
729 fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
730 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
733 fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
735 if a.get_type().is_bool() {
736 UnaryOp::LogicalNegate
739 UnaryOp::BitwiseNegate
741 self.cx.context.new_unary_op(None, operation, a.get_type(), a)
744 fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
748 fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
752 fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
756 fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
757 // TODO: should generate poison value?
761 fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
765 fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
769 fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
772 let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
773 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
778 fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
781 let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
782 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
787 fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
790 let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
791 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
796 fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
799 let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
800 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
805 fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
808 let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
809 llvm::LLVMRustSetHasUnsafeAlgebra(instr);
814 fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
815 use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
819 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
820 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
821 t @ (Uint(_) | Int(_)) => t.clone(),
822 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
825 // TODO: remove duplication with intrinsic?
830 Int(I8) => "__builtin_add_overflow",
831 Int(I16) => "__builtin_add_overflow",
832 Int(I32) => "__builtin_sadd_overflow",
833 Int(I64) => "__builtin_saddll_overflow",
834 Int(I128) => "__builtin_add_overflow",
836 Uint(U8) => "__builtin_add_overflow",
837 Uint(U16) => "__builtin_add_overflow",
838 Uint(U32) => "__builtin_uadd_overflow",
839 Uint(U64) => "__builtin_uaddll_overflow",
840 Uint(U128) => "__builtin_add_overflow",
846 Int(I8) => "__builtin_sub_overflow",
847 Int(I16) => "__builtin_sub_overflow",
848 Int(I32) => "__builtin_ssub_overflow",
849 Int(I64) => "__builtin_ssubll_overflow",
850 Int(I128) => "__builtin_sub_overflow",
852 Uint(U8) => "__builtin_sub_overflow",
853 Uint(U16) => "__builtin_sub_overflow",
854 Uint(U32) => "__builtin_usub_overflow",
855 Uint(U64) => "__builtin_usubll_overflow",
856 Uint(U128) => "__builtin_sub_overflow",
862 Int(I8) => "__builtin_mul_overflow",
863 Int(I16) => "__builtin_mul_overflow",
864 Int(I32) => "__builtin_smul_overflow",
865 Int(I64) => "__builtin_smulll_overflow",
866 Int(I128) => "__builtin_mul_overflow",
868 Uint(U8) => "__builtin_mul_overflow",
869 Uint(U16) => "__builtin_mul_overflow",
870 Uint(U32) => "__builtin_umul_overflow",
871 Uint(U64) => "__builtin_umulll_overflow",
872 Uint(U128) => "__builtin_mul_overflow",
878 let intrinsic = self.context.get_builtin_function(&name);
879 let res = self.current_func()
880 // TODO: is it correct to use rhs type instead of the parameter typ?
881 .new_local(None, rhs.get_type(), "binopResult")
883 let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
884 (res.dereference(None).to_rvalue(), overflow)
887 fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
888 // FIXME: this check that we don't call get_aligned() a second time on a time.
889 // Ideally, we shouldn't need to do this check.
891 if ty == self.cx.u128_type || ty == self.cx.i128_type {
895 ty.get_aligned(align.bytes())
897 // TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial.
898 self.stack_var_count.set(self.stack_var_count.get() + 1);
899 self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
902 fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
905 let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
906 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
911 fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
914 let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
915 llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
920 fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
922 let block = self.llbb();
923 let function = block.get_function();
924 // NOTE: instead of returning the dereference here, we have to assign it to a variable in
925 // the current basic block. Otherwise, it could be used in another basic block, causing a
926 // dereference after a drop, for instance.
927 // TODO: handle align.
928 let deref = ptr.dereference(None).to_rvalue();
929 let value_type = deref.get_type();
930 unsafe { RETURN_VALUE_COUNT += 1 };
931 let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
932 block.add_assignment(None, loaded_value, deref);
933 loaded_value.to_rvalue()
936 fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
938 //println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile());
939 let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
941 ptr.dereference(None).to_rvalue()
944 fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
946 // TODO: handle alignment.
947 let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
948 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
950 let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
951 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
952 self.context.new_call(None, atomic_load, &[ptr, ordering])
955 fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
956 //debug!("PlaceRef::load: {:?}", place);
958 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
960 if place.layout.is_zst() {
961 return OperandRef::new_zst(self, place.layout);
964 fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
965 let vr = scalar.valid_range.clone();
968 let range = scalar.valid_range_exclusive(bx);
969 if range.start != range.end {
970 bx.range_metadata(load, range);
973 abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
974 bx.nonnull_metadata(load);
981 if let Some(llextra) = place.llextra {
982 OperandValue::Ref(place.llval, Some(llextra), place.align)
984 else if place.layout.is_gcc_immediate() {
985 let const_llval = None;
987 if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
988 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
989 const_llval = llvm::LLVMGetInitializer(global);
993 let llval = const_llval.unwrap_or_else(|| {
994 let load = self.load(place.llval.get_type(), place.llval, place.align);
995 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
996 scalar_load_metadata(self, load, scalar);
1000 OperandValue::Immediate(self.to_immediate(llval, place.layout))
1002 else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
1003 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
1005 let mut load = |i, scalar: &abi::Scalar, align| {
1006 let llptr = self.struct_gep(place.llval, i as u64);
1007 let load = self.load(llptr.get_type(), llptr, align);
1008 scalar_load_metadata(self, load, scalar);
1009 if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
1013 load(0, a, place.align),
1014 load(1, b, place.align.restrict_for_offset(b_offset)),
1018 OperandValue::Ref(place.llval, None, place.align)
1021 OperandRef { val, layout: place.layout }
1024 fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
1025 let zero = self.const_usize(0);
1026 let count = self.const_usize(count);
1027 let start = dest.project_index(&mut self, zero).llval;
1028 let end = dest.project_index(&mut self, count).llval;
1030 let mut header_bx = self.build_sibling_block("repeat_loop_header");
1031 let mut body_bx = self.build_sibling_block("repeat_loop_body");
1032 let next_bx = self.build_sibling_block("repeat_loop_next");
1034 let ptr_type = start.get_type();
1035 let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
1036 let current_val = current.to_rvalue();
1037 self.assign(current, start);
1039 self.br(header_bx.llbb());
1041 let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end);
1042 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
1044 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
1045 cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
1047 let next = body_bx.inbounds_gep(current.to_rvalue(), &[self.const_usize(1)]);
1048 body_bx.llbb().add_assignment(None, current, next);
1049 body_bx.br(header_bx.llbb());
1054 fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range<u128>) {
1056 /*if self.sess().target.target.arch == "amdgpu" {
1057 // amdgpu/LLVM does something weird and thinks a i64 value is
1058 // split into a v2i32, halving the bitwidth LLVM expects,
1059 // tripping an assertion. So, for now, just disable this
1065 let llty = self.cx.val_ty(load);
1067 self.cx.const_uint_big(llty, range.start),
1068 self.cx.const_uint_big(llty, range.end),
1071 llvm::LLVMSetMetadata(
1073 llvm::MD_range as c_uint,
1074 llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
1079 fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
1082 llvm::LLVMSetMetadata(
1084 llvm::MD_nonnull as c_uint,
1085 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
1090 fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
1091 self.store_with_flags(val, ptr, align, MemFlags::empty())
1094 fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
1095 //debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
1096 let ptr = self.check_store(val, ptr);
1097 self.llbb().add_assignment(None, ptr.dereference(None), val);
1099 if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
1100 llvm::LLVMSetAlignment(store, align);
1101 if flags.contains(MemFlags::VOLATILE) {
1102 llvm::LLVMSetVolatile(store, llvm::True);
1104 if flags.contains(MemFlags::NONTEMPORAL) {
1105 // According to LLVM [1] building a nontemporal store must
1106 // *always* point to a metadata value of the integer 1.
1108 // [1]: http://llvm.org/docs/LangRef.html#store-instruction
1109 let one = self.cx.const_i32(1);
1110 let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
1111 llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
1113 // NOTE: dummy value here since it's never used. FIXME: API should not return a value here?
1114 self.cx.context.new_rvalue_zero(self.type_i32())
1117 fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
1118 // TODO: handle alignment.
1119 let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
1120 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1121 let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
1122 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
1124 // FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because
1125 // the following cast is required to avoid this error:
1126 // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
1127 let int_type = atomic_store.get_param(1).to_rvalue().get_type();
1128 let value = self.context.new_cast(None, value, int_type);
1130 .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
1133 fn gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
1134 let mut result = ptr;
1135 for index in indices {
1136 result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
1141 fn inbounds_gep(&mut self, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
1142 // FIXME: would be safer if doing the same thing (loop) as gep.
1143 // TODO: specify inbounds somehow.
1144 match indices.len() {
1146 self.context.new_array_access(None, ptr, indices[0]).get_address(None)
1149 let array = ptr.dereference(None); // TODO: assert that first index is 0?
1150 self.context.new_array_access(None, array, indices[1]).get_address(None)
1152 _ => unimplemented!(),
1156 fn struct_gep(&mut self, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1157 // FIXME: it would be better if the API only called this on struct, not on arrays.
1158 assert_eq!(idx as usize as u64, idx);
1159 let value = ptr.dereference(None).to_rvalue();
1160 let value_type = value.get_type();
1162 if value_type.is_array().is_some() {
1163 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1164 let element = self.context.new_array_access(None, value, index);
1165 element.get_address(None)
1167 else if let Some(vector_type) = value_type.is_vector() {
1168 let array_type = vector_type.get_element_type().make_pointer();
1169 let array = self.bitcast(ptr, array_type);
1170 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1171 let element = self.context.new_array_access(None, array, index);
1172 element.get_address(None)
1174 else if let Some(struct_type) = value_type.is_struct() {
1175 ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
1178 panic!("Unexpected type {:?}", value_type);
1183 fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1184 // TODO: check that it indeed truncate the value.
1185 //println!("trunc: {:?} -> {:?}", value, dest_ty);
1186 self.context.new_cast(None, value, dest_ty)
1189 fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1190 // TODO: check that it indeed sign extend the value.
1191 //println!("Sext {:?} to {:?}", value, dest_ty);
1192 //if let Some(vector_type) = value.get_type().is_vector() {
1193 if dest_ty.is_vector().is_some() {
1194 // TODO: nothing to do as it is only for LLVM?
1196 /*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64);
1197 println!("Casting {:?} to {:?}", value, dest_type);
1198 return self.context.new_cast(None, value, dest_type);*/
1200 self.context.new_cast(None, value, dest_ty)
1203 fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1204 //println!("7: fptoui: {:?} to {:?}", value, dest_ty);
1205 let ret = self.context.new_cast(None, value, dest_ty);
1208 //unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
1211 fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1212 self.context.new_cast(None, value, dest_ty)
1215 fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1216 //println!("1: uitofp: {:?} -> {:?}", value, dest_ty);
1217 let ret = self.context.new_cast(None, value, dest_ty);
1222 fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1223 //println!("3: sitofp: {:?} -> {:?}", value, dest_ty);
1224 let ret = self.context.new_cast(None, value, dest_ty);
1229 fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1230 // TODO: make sure it trancates.
1231 self.context.new_cast(None, value, dest_ty)
1234 fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1235 self.context.new_cast(None, value, dest_ty)
1238 fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1239 self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
1242 fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1243 self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
1246 fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1247 self.cx.const_bitcast(value, dest_ty)
1250 fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
1251 // NOTE: is_signed is for value, not dest_typ.
1252 //println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ);
1253 self.cx.context.new_cast(None, value, dest_typ)
1256 fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1257 //println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty);
1258 let val_type = value.get_type();
1259 match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
1261 // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to
1262 // a pointer, which is not supported by gccjit.
1263 return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
1266 // When they are not pointers, we want a transmute (or reinterpret_cast).
1267 //self.cx.context.new_cast(None, value, dest_ty)
1268 self.bitcast(value, dest_ty)
1270 (true, true) => self.cx.context.new_cast(None, value, dest_ty),
1271 (true, false) => unimplemented!(),
1276 fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
1277 if lhs.get_type() != rhs.get_type() {
1278 // NOTE: hack because we try to cast a vector type to the same vector type.
1279 if format!("{:?}", lhs.get_type()) != format!("{:?}", rhs.get_type()) {
1280 rhs = self.context.new_cast(None, rhs, lhs.get_type());
1283 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
1286 fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
1287 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
1290 /* Miscellaneous instructions */
1291 fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1292 if flags.contains(MemFlags::NONTEMPORAL) {
1293 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
1294 let val = self.load(src.get_type(), src, src_align);
1295 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
1296 self.store_with_flags(val, ptr, dst_align, flags);
1299 let size = self.intcast(size, self.type_size_t(), false);
1300 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1301 let dst = self.pointercast(dst, self.type_i8p());
1302 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1303 let memcpy = self.context.get_builtin_function("memcpy");
1304 let block = self.block.expect("block");
1305 // TODO: handle aligns and is_volatile.
1306 block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
1309 fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1310 if flags.contains(MemFlags::NONTEMPORAL) {
1311 // HACK(nox): This is inefficient but there is no nontemporal memmove.
1312 let val = self.load(src.get_type(), src, src_align);
1313 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
1314 self.store_with_flags(val, ptr, dst_align, flags);
1317 let size = self.intcast(size, self.type_size_t(), false);
1318 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1319 let dst = self.pointercast(dst, self.type_i8p());
1320 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1322 let memmove = self.context.get_builtin_function("memmove");
1323 let block = self.block.expect("block");
1324 // TODO: handle is_volatile.
1325 block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
1328 fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
1329 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1330 let ptr = self.pointercast(ptr, self.type_i8p());
1331 let memset = self.context.get_builtin_function("memset");
1332 let block = self.block.expect("block");
1333 // TODO: handle aligns and is_volatile.
1334 //println!("memset: {:?} -> {:?}", fill_byte, self.i32_type);
1335 let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
1336 let size = self.intcast(size, self.type_size_t(), false);
1337 block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
1340 fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
1341 let func = self.current_func();
1342 let variable = func.new_local(None, then_val.get_type(), "selectVar");
1343 let then_block = func.new_block("then");
1344 let else_block = func.new_block("else");
1345 let after_block = func.new_block("after");
1346 self.llbb().end_with_conditional(None, cond, then_block, else_block);
1348 then_block.add_assignment(None, variable, then_val);
1349 then_block.end_with_jump(None, after_block);
1351 if then_val.get_type() != else_val.get_type() {
1352 else_val = self.context.new_cast(None, else_val, then_val.get_type());
1354 else_block.add_assignment(None, variable, else_val);
1355 else_block.end_with_jump(None, after_block);
1357 // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
1358 // state need to be updated.
1359 self.block = Some(after_block);
1360 *self.cx.current_block.borrow_mut() = Some(after_block);
1362 variable.to_rvalue()
1366 fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
1368 //unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
1371 fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
1373 //unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
1376 fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
1379 let elt_ty = self.cx.val_ty(elt);
1380 let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
1381 let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
1382 let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
1383 self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
1387 fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1388 // FIXME: it would be better if the API only called this on struct, not on arrays.
1389 assert_eq!(idx as usize as u64, idx);
1390 let value_type = aggregate_value.get_type();
1392 if value_type.is_array().is_some() {
1393 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1394 let element = self.context.new_array_access(None, aggregate_value, index);
1395 element.get_address(None)
1397 else if value_type.is_vector().is_some() {
1400 else if let Some(pointer_type) = value_type.get_pointee() {
1401 if let Some(struct_type) = pointer_type.is_struct() {
1402 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1403 // CodegenCx.structs_as_pointer
1404 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1407 panic!("Unexpected type {:?}", value_type);
1410 else if let Some(struct_type) = value_type.is_struct() {
1411 aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1414 panic!("Unexpected type {:?}", value_type);
1416 /*assert_eq!(idx as c_uint as u64, idx);
1417 unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/
1420 fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1421 // FIXME: it would be better if the API only called this on struct, not on arrays.
1422 assert_eq!(idx as usize as u64, idx);
1423 let value_type = aggregate_value.get_type();
1426 if value_type.is_array().is_some() {
1427 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1428 self.context.new_array_access(None, aggregate_value, index)
1430 else if value_type.is_vector().is_some() {
1433 else if let Some(pointer_type) = value_type.get_pointee() {
1434 if let Some(struct_type) = pointer_type.is_struct() {
1435 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1436 // CodegenCx.structs_as_pointer
1437 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
1440 panic!("Unexpected type {:?}", value_type);
1444 panic!("Unexpected type {:?}", value_type);
1446 self.llbb().add_assignment(None, lvalue, value);
1451 fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> {
1454 llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
1458 fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
1461 llvm::LLVMSetCleanup(landing_pad, llvm::True);
1465 fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
1467 //unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
1470 fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
1472 /*let name = const_cstr!("cleanuppad");
1474 llvm::LLVMRustBuildCleanupPad(
1477 args.len() as c_uint,
1482 Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/
1485 fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
1488 unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
1489 ret.expect("LLVM does not have support for cleanupret")*/
1492 fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
1494 /*let name = const_cstr!("catchpad");
1496 llvm::LLVMRustBuildCatchPad(
1499 args.len() as c_uint,
1504 Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/
1507 fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
1509 /*let name = const_cstr!("catchswitch");
1511 llvm::LLVMRustBuildCatchSwitch(
1515 num_handlers as c_uint,
1519 ret.expect("LLVM does not have support for catchswitch")*/
1522 fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
1525 llvm::LLVMRustAddHandler(catch_switch, handler);
1529 fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
1532 llvm::LLVMSetPersonalityFn(self.llfn(), personality);
1536 // Atomic Operations
1537 fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
1538 let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
1539 self.llbb().add_assignment(None, expected, cmp);
1540 let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
1542 let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
1543 let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
1544 let align = Align::from_bits(64).expect("align"); // TODO: use good align.
1546 let value_type = result.to_rvalue().get_type();
1547 if let Some(struct_type) = value_type.is_struct() {
1548 self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
1549 // NOTE: since success contains the call to the intrinsic, it must be stored before
1550 // expected so that we store expected after the call.
1551 self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
1553 // TODO: handle when value is not a struct.
1558 fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
1559 let size = self.cx.int_width(src.get_type()) / 8;
1562 AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
1563 AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
1564 AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
1565 AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
1566 AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
1567 AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
1568 AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
1569 AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1570 AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1571 AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1572 AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1576 let atomic_function = self.context.get_builtin_function(name);
1577 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1579 let void_ptr_type = self.context.new_type::<*mut ()>();
1580 let volatile_void_ptr_type = void_ptr_type.make_volatile();
1581 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
1582 // NOTE: not sure why, but we have the wrong type here.
1583 let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
1584 let src = self.context.new_cast(None, src, new_src_type);
1585 let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
1586 self.context.new_cast(None, res, src.get_type())
1589 fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
1592 SynchronizationScope::SingleThread => "__atomic_signal_fence",
1593 SynchronizationScope::CrossThread => "__atomic_thread_fence",
1595 let thread_fence = self.context.get_builtin_function(name);
1596 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1597 self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
1600 fn set_invariant_load(&mut self, load: RValue<'gcc>) {
1601 // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
1602 self.normal_function_addresses.borrow_mut().insert(load);
1605 llvm::LLVMSetMetadata(
1607 llvm::MD_invariant_load as c_uint,
1608 llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
1613 fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1615 //self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
1618 fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1620 //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
1623 fn call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
1624 // FIXME: remove when having a proper API.
1625 let gcc_func = unsafe { std::mem::transmute(func) };
1626 if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
1627 self.function_call(func, args, funclet)
1630 // If it's a not function that was defined, it's a function pointer.
1631 self.function_ptr_call(func, args, funclet)
1635 fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
1636 // FIXME: this does not zero-extend.
1637 if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
1638 // FIXME: hack because base::from_immediate converts i1 to i8.
1639 // Fix the code in codegen_ssa::base::from_immediate.
1642 //println!("zext: {:?} -> {:?}", value, dest_typ);
1643 self.context.new_cast(None, value, dest_typ)
1646 fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
1650 fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
1652 //llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
1655 fn set_span(&mut self, _span: Span) {}
1657 fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
1658 if self.cx().val_ty(val) == self.cx().type_i1() {
1659 self.zext(val, self.cx().type_i8())
1666 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
1667 if scalar.is_bool() {
1668 return self.trunc(val, self.cx().type_i1());
1673 fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1677 fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1681 fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
1684 "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
1685 fn_name, hash, num_counters, index
1688 let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
1689 let args = &[fn_name, hash, num_counters, index];
1690 let args = self.check_call("call", llfn, args);
1693 let _ = llvm::LLVMRustBuildCall(
1696 args.as_ptr() as *const &llvm::Value,
1697 args.len() as c_uint,
1704 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
1705 pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
1706 let return_type = v1.get_type();
1708 self.context.new_parameter(None, return_type, "v1"),
1709 self.context.new_parameter(None, return_type, "v2"),
1710 self.context.new_parameter(None, mask.get_type(), "mask"),
1712 let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, "_mm_shuffle_epi8", false);
1713 self.context.new_call(None, shuffle, &[v1, v2, mask])
1717 impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
1718 fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
1719 // Forward to the `get_static` method of `CodegenCx`
1720 self.cx().get_static(def_id)
1724 impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
1725 fn param_env(&self) -> ParamEnv<'tcx> {
1730 impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
1731 fn target_spec(&self) -> &Target {
1732 &self.cx.target_spec()
1737 fn to_gcc_comparison(&self) -> ComparisonOp;
1740 impl ToGccComp for IntPredicate {
1741 fn to_gcc_comparison(&self) -> ComparisonOp {
1743 IntPredicate::IntEQ => ComparisonOp::Equals,
1744 IntPredicate::IntNE => ComparisonOp::NotEquals,
1745 IntPredicate::IntUGT => ComparisonOp::GreaterThan,
1746 IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
1747 IntPredicate::IntULT => ComparisonOp::LessThan,
1748 IntPredicate::IntULE => ComparisonOp::LessThanEquals,
1749 IntPredicate::IntSGT => ComparisonOp::GreaterThan,
1750 IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
1751 IntPredicate::IntSLT => ComparisonOp::LessThan,
1752 IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
1757 impl ToGccComp for RealPredicate {
1758 fn to_gcc_comparison(&self) -> ComparisonOp {
1759 // TODO: check that ordered vs non-ordered is respected.
1761 RealPredicate::RealPredicateFalse => unreachable!(),
1762 RealPredicate::RealOEQ => ComparisonOp::Equals,
1763 RealPredicate::RealOGT => ComparisonOp::GreaterThan,
1764 RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
1765 RealPredicate::RealOLT => ComparisonOp::LessThan,
1766 RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
1767 RealPredicate::RealONE => ComparisonOp::NotEquals,
1768 RealPredicate::RealORD => unreachable!(),
1769 RealPredicate::RealUNO => unreachable!(),
1770 RealPredicate::RealUEQ => ComparisonOp::Equals,
1771 RealPredicate::RealUGT => ComparisonOp::GreaterThan,
1772 RealPredicate::RealUGE => ComparisonOp::GreaterThan,
1773 RealPredicate::RealULT => ComparisonOp::LessThan,
1774 RealPredicate::RealULE => ComparisonOp::LessThan,
1775 RealPredicate::RealUNE => ComparisonOp::NotEquals,
1776 RealPredicate::RealPredicateTrue => unreachable!(),
1782 #[allow(non_camel_case_types)]
1792 trait ToGccOrdering {
1793 fn to_gcc(self) -> i32;
1796 impl ToGccOrdering for AtomicOrdering {
1797 fn to_gcc(self) -> i32 {
1802 AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same.
1803 AtomicOrdering::Unordered => __ATOMIC_RELAXED,
1804 AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same.
1805 AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
1806 AtomicOrdering::Release => __ATOMIC_RELEASE,
1807 AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
1808 AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,