3 use std::convert::TryFrom;
6 use gccjit::FunctionType;
18 use rustc_codegen_ssa::MemFlags;
19 use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
20 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
21 use rustc_codegen_ssa::mir::place::PlaceRef;
22 use rustc_codegen_ssa::traits::{
33 use rustc_data_structures::stable_set::FxHashSet;
34 use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
35 use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
37 use rustc_span::def_id::DefId;
38 use rustc_target::abi::{
47 use rustc_target::spec::{HasTargetSpec, Target};
49 use crate::common::{SignType, TypeReflection, type_is_pointer};
50 use crate::context::CodegenCx;
51 use crate::type_of::LayoutGccExt;
56 // TODO(antoyo): remove this variable.
57 static mut RETURN_VALUE_COUNT: usize = 0;
59 enum ExtremumOperation {
65 fn clone(&self) -> Self;
68 impl EnumClone for AtomicOrdering {
69 fn clone(&self) -> Self {
71 AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
72 AtomicOrdering::Unordered => AtomicOrdering::Unordered,
73 AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
74 AtomicOrdering::Acquire => AtomicOrdering::Acquire,
75 AtomicOrdering::Release => AtomicOrdering::Release,
76 AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
77 AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
82 pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
83 pub cx: &'a CodegenCx<'gcc, 'tcx>,
84 pub block: Block<'gcc>,
85 stack_var_count: Cell<usize>,
88 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
89 fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
93 stack_var_count: Cell::new(0),
97 fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
98 let size = src.get_type().get_size();
100 let func = self.current_func();
104 // TODO(antoyo): does this make sense?
105 AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
108 let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
109 let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
110 let return_value = func.new_local(None, previous_value.get_type(), "return_value");
111 self.llbb().add_assignment(None, previous_var, previous_value);
112 self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
114 let while_block = func.new_block("while");
115 let after_block = func.new_block("after_while");
116 self.llbb().end_with_jump(None, while_block);
118 // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
119 // state need to be updated.
120 self.switch_to_block(while_block);
122 let comparison_operator =
124 ExtremumOperation::Max => ComparisonOp::LessThan,
125 ExtremumOperation::Min => ComparisonOp::GreaterThan,
128 let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
129 let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
130 let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
131 let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
133 while_block.end_with_conditional(None, cond, while_block, after_block);
135 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
136 // state need to be updated.
137 self.switch_to_block(after_block);
139 return_value.to_rvalue()
142 fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
143 let size = src.get_type().get_size();
144 let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
145 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
146 let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
147 let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
149 let void_ptr_type = self.context.new_type::<*mut ()>();
150 let volatile_void_ptr_type = void_ptr_type.make_volatile();
151 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
152 let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
154 // NOTE: not sure why, but we have the wrong type here.
155 let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
156 let src = self.context.new_cast(None, src, int_type);
157 self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
160 pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
161 self.llbb().add_assignment(None, lvalue, value);
164 fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
165 let mut all_args_match = true;
166 let mut param_types = vec![];
167 let param_count = func.get_param_count();
168 for (index, arg) in args.iter().enumerate().take(param_count) {
169 let param = func.get_param(index as i32);
170 let param = param.to_rvalue().get_type();
171 if param != arg.get_type() {
172 all_args_match = false;
174 param_types.push(param);
178 return Cow::Borrowed(args);
181 let casted_args: Vec<_> = param_types
185 .map(|(_i, (expected_ty, &actual_val))| {
186 let actual_ty = actual_val.get_type();
187 if expected_ty != actual_ty {
188 self.bitcast(actual_val, expected_ty)
196 Cow::Owned(casted_args)
199 fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
200 let mut all_args_match = true;
201 let mut param_types = vec![];
202 let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
203 for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
204 let param = gcc_func.get_param_type(index);
205 if param != arg.get_type() {
206 all_args_match = false;
208 param_types.push(param);
211 let mut on_stack_param_indices = FxHashSet::default();
212 if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
213 on_stack_param_indices = indices.clone();
217 return Cow::Borrowed(args);
220 let casted_args: Vec<_> = param_types
224 .map(|(index, (expected_ty, &actual_val))| {
225 let actual_ty = actual_val.get_type();
226 if expected_ty != actual_ty {
227 if on_stack_param_indices.contains(&index) {
228 actual_val.dereference(None).to_rvalue()
231 self.bitcast(actual_val, expected_ty)
240 Cow::Owned(casted_args)
243 fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
244 let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
245 let stored_ty = self.cx.val_ty(val);
246 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
248 if dest_ptr_ty == stored_ptr_ty {
252 self.bitcast(ptr, stored_ptr_ty)
256 pub fn current_func(&self) -> Function<'gcc> {
257 self.block.get_function()
260 fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
261 // TODO(antoyo): remove when the API supports a different type for functions.
262 let func: Function<'gcc> = self.cx.rvalue_as_function(func);
263 let args = self.check_call("call", func, args);
265 // gccjit requires to use the result of functions, even when it's not used.
266 // That's why we assign the result to a local or call add_eval().
267 let return_type = func.get_return_type();
268 let void_type = self.context.new_type::<()>();
269 let current_func = self.block.get_function();
270 if return_type != void_type {
271 unsafe { RETURN_VALUE_COUNT += 1 };
272 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
273 self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
277 self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
278 // Return dummy value when not having return value.
279 self.context.new_rvalue_from_long(self.isize_type, 0)
283 fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
284 let args = self.check_ptr_call("call", func_ptr, args);
286 // gccjit requires to use the result of functions, even when it's not used.
287 // That's why we assign the result to a local or call add_eval().
288 let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
289 let mut return_type = gcc_func.get_return_type();
290 let void_type = self.context.new_type::<()>();
291 let current_func = self.block.get_function();
293 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
294 if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
295 return_type = self.int_type;
298 if return_type != void_type {
299 unsafe { RETURN_VALUE_COUNT += 1 };
300 let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
301 self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
305 if gcc_func.get_param_count() == 0 {
306 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
307 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
310 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
312 // Return dummy value when not having return value.
313 let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
314 self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
319 pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
320 // gccjit requires to use the result of functions, even when it's not used.
321 // That's why we assign the result to a local.
322 let return_type = self.context.new_type::<bool>();
323 let current_func = self.block.get_function();
324 // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
325 unsafe { RETURN_VALUE_COUNT += 1 };
326 let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
327 self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
332 impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
333 type CodegenCx = CodegenCx<'gcc, 'tcx>;
336 impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
337 fn tcx(&self) -> TyCtxt<'tcx> {
342 impl HasDataLayout for Builder<'_, '_, '_> {
343 fn data_layout(&self) -> &TargetDataLayout {
344 self.cx.data_layout()
348 impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
349 type LayoutOfResult = TyAndLayout<'tcx>;
352 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
353 self.cx.handle_layout_err(err, span, ty)
357 impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
358 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
361 fn handle_fn_abi_err(
363 err: FnAbiError<'tcx>,
365 fn_abi_request: FnAbiRequest<'tcx>,
367 self.cx.handle_fn_abi_err(err, span, fn_abi_request)
371 impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
372 type Target = CodegenCx<'gcc, 'tcx>;
374 fn deref(&self) -> &Self::Target {
379 impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
380 type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
381 type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
382 type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
383 type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
384 type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
386 type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
387 type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
388 type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
391 impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
392 fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
393 Builder::with_cx(cx, block)
396 fn llbb(&self) -> Block<'gcc> {
400 fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
401 let func = cx.rvalue_as_function(func);
405 fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
406 let func = self.current_func();
410 fn switch_to_block(&mut self, block: Self::BasicBlock) {
414 fn ret_void(&mut self) {
415 self.llbb().end_with_void_return(None)
418 fn ret(&mut self, value: RValue<'gcc>) {
420 if self.structs_as_pointer.borrow().contains(&value) {
421 // NOTE: hack to workaround a limitation of the rustc API: see comment on
422 // CodegenCx.structs_as_pointer
423 value.dereference(None).to_rvalue()
428 self.llbb().end_with_return(None, value);
431 fn br(&mut self, dest: Block<'gcc>) {
432 self.llbb().end_with_jump(None, dest)
435 fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
436 self.llbb().end_with_conditional(None, cond, then_block, else_block)
439 fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
440 let mut gcc_cases = vec![];
441 let typ = self.val_ty(value);
442 for (on_val, dest) in cases {
443 let on_val = self.const_uint_big(typ, on_val);
444 gcc_cases.push(self.context.new_case(on_val, on_val, dest));
446 self.block.end_with_switch(None, value, default_block, &gcc_cases);
449 fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
450 // TODO(bjorn3): Properly implement unwinding.
451 let call_site = self.call(typ, func, args, None);
452 let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
453 self.llbb().end_with_conditional(None, condition, then, catch);
457 fn unreachable(&mut self) {
458 let func = self.context.get_builtin_function("__builtin_unreachable");
459 self.block.add_eval(None, self.context.new_call(None, func, &[]));
460 let return_type = self.block.get_function().get_return_type();
461 let void_type = self.context.new_type::<()>();
462 if return_type == void_type {
463 self.block.end_with_void_return(None)
466 let return_value = self.current_func()
467 .new_local(None, return_type, "unreachableReturn");
468 self.block.end_with_return(None, return_value)
472 fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
476 fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
480 fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
484 fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
488 fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
492 fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
496 fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
500 fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
501 // TODO(antoyo): convert the arguments to unsigned?
502 // TODO(antoyo): poison if not exact.
506 fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
510 fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
511 // TODO(antoyo): posion if not exact.
512 // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
513 // should be the same.
514 let typ = a.get_type().to_signed(self);
515 let b = self.context.new_cast(None, b, typ);
519 fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
523 fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
527 fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
531 fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
532 if a.get_type() == self.cx.float_type {
533 let fmodf = self.context.get_builtin_function("fmodf");
534 // FIXME(antoyo): this seems to produce the wrong result.
535 return self.context.new_call(None, fmodf, &[a, b]);
537 assert_eq!(a.get_type(), self.cx.double_type);
539 let fmod = self.context.get_builtin_function("fmod");
540 return self.context.new_call(None, fmod, &[a, b]);
543 fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
547 fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
551 fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
552 // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
553 // It seems to be if the value is signed.
557 fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
561 fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
565 fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
569 fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
573 fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
574 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
577 fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
581 fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
585 fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
589 fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
593 fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
594 // TODO(antoyo): should generate poison value?
598 fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
602 fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
606 fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
610 fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
614 fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
618 fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
622 fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
626 fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
627 self.gcc_checked_binop(oop, typ, lhs, rhs)
630 fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
631 // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
632 // Ideally, we shouldn't need to do this check.
634 if ty == self.cx.u128_type || ty == self.cx.i128_type {
638 ty.get_aligned(align.bytes())
640 // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
641 self.stack_var_count.set(self.stack_var_count.get() + 1);
642 self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
645 fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
649 fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
653 fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
654 // TODO(antoyo): use ty.
655 let block = self.llbb();
656 let function = block.get_function();
657 // NOTE: instead of returning the dereference here, we have to assign it to a variable in
658 // the current basic block. Otherwise, it could be used in another basic block, causing a
659 // dereference after a drop, for instance.
660 // TODO(antoyo): handle align.
661 let deref = ptr.dereference(None).to_rvalue();
662 let value_type = deref.get_type();
663 unsafe { RETURN_VALUE_COUNT += 1 };
664 let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
665 block.add_assignment(None, loaded_value, deref);
666 loaded_value.to_rvalue()
669 fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
670 // TODO(antoyo): use ty.
671 let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
672 ptr.dereference(None).to_rvalue()
675 fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
676 // TODO(antoyo): use ty.
677 // TODO(antoyo): handle alignment.
678 let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
679 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
681 let volatile_const_void_ptr_type = self.context.new_type::<()>()
685 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
686 self.context.new_call(None, atomic_load, &[ptr, ordering])
689 fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
690 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
692 if place.layout.is_zst() {
693 return OperandRef::new_zst(self, place.layout);
696 fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
697 let vr = scalar.valid_range.clone();
700 if !scalar.is_always_valid(bx) {
701 bx.range_metadata(load, scalar.valid_range);
704 abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
705 bx.nonnull_metadata(load);
712 if let Some(llextra) = place.llextra {
713 OperandValue::Ref(place.llval, Some(llextra), place.align)
715 else if place.layout.is_gcc_immediate() {
716 let load = self.load(place.llval.get_type(), place.llval, place.align);
717 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
718 scalar_load_metadata(self, load, scalar);
720 OperandValue::Immediate(self.to_immediate(load, place.layout))
722 else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
723 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
724 let pair_type = place.layout.gcc_type(self, false);
726 let mut load = |i, scalar: &abi::Scalar, align| {
727 let llptr = self.struct_gep(pair_type, place.llval, i as u64);
728 let load = self.load(llptr.get_type(), llptr, align);
729 scalar_load_metadata(self, load, scalar);
730 if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
734 load(0, a, place.align),
735 load(1, b, place.align.restrict_for_offset(b_offset)),
739 OperandValue::Ref(place.llval, None, place.align)
742 OperandRef { val, layout: place.layout }
745 fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
746 let zero = self.const_usize(0);
747 let count = self.const_usize(count);
748 let start = dest.project_index(&mut self, zero).llval;
749 let end = dest.project_index(&mut self, count).llval;
751 let header_bb = self.append_sibling_block("repeat_loop_header");
752 let body_bb = self.append_sibling_block("repeat_loop_body");
753 let next_bb = self.append_sibling_block("repeat_loop_next");
755 let ptr_type = start.get_type();
756 let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
757 let current_val = current.to_rvalue();
758 self.assign(current, start);
762 self.switch_to_block(header_bb);
763 let keep_going = self.icmp(IntPredicate::IntNE, current_val, end);
764 self.cond_br(keep_going, body_bb, next_bb);
766 self.switch_to_block(body_bb);
767 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
768 cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
770 let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
771 self.llbb().add_assignment(None, current, next);
774 self.switch_to_block(next_bb);
778 fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
782 fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
786 fn type_metadata(&mut self, _function: RValue<'gcc>, _typeid: String) {
790 fn typeid_metadata(&mut self, _typeid: String) -> RValue<'gcc> {
792 self.context.new_rvalue_from_int(self.int_type, 0)
796 fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
797 self.store_with_flags(val, ptr, align, MemFlags::empty())
800 fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
801 let ptr = self.check_store(val, ptr);
802 self.llbb().add_assignment(None, ptr.dereference(None), val);
803 // TODO(antoyo): handle align and flags.
804 // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
805 self.cx.context.new_rvalue_zero(self.type_i32())
808 fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
809 // TODO(antoyo): handle alignment.
810 let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
811 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
812 let volatile_const_void_ptr_type = self.context.new_type::<()>()
815 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
817 // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
818 // the following cast is required to avoid this error:
819 // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
820 let int_type = atomic_store.get_param(1).to_rvalue().get_type();
821 let value = self.context.new_cast(None, value, int_type);
823 .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
826 fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
827 let mut result = ptr;
828 for index in indices {
829 result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
834 fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
835 // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
836 // TODO(antoyo): specify inbounds somehow.
837 match indices.len() {
839 self.context.new_array_access(None, ptr, indices[0]).get_address(None)
842 let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
843 self.context.new_array_access(None, array, indices[1]).get_address(None)
845 _ => unimplemented!(),
849 fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
850 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
851 assert_eq!(idx as usize as u64, idx);
852 let value = ptr.dereference(None).to_rvalue();
854 if value_type.dyncast_array().is_some() {
855 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
856 let element = self.context.new_array_access(None, value, index);
857 element.get_address(None)
859 else if let Some(vector_type) = value_type.dyncast_vector() {
860 let array_type = vector_type.get_element_type().make_pointer();
861 let array = self.bitcast(ptr, array_type);
862 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
863 let element = self.context.new_array_access(None, array, index);
864 element.get_address(None)
866 else if let Some(struct_type) = value_type.is_struct() {
867 ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
870 panic!("Unexpected type {:?}", value_type);
875 fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
876 // TODO(antoyo): check that it indeed truncate the value.
877 self.gcc_int_cast(value, dest_ty)
880 fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
881 // TODO(antoyo): check that it indeed sign extend the value.
882 if dest_ty.dyncast_vector().is_some() {
883 // TODO(antoyo): nothing to do as it is only for LLVM?
886 self.context.new_cast(None, value, dest_ty)
889 fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
890 self.gcc_float_to_uint_cast(value, dest_ty)
893 fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
894 self.gcc_float_to_int_cast(value, dest_ty)
897 fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
898 self.gcc_uint_to_float_cast(value, dest_ty)
901 fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
902 self.gcc_int_to_float_cast(value, dest_ty)
905 fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
906 // TODO(antoyo): make sure it truncates.
907 self.context.new_cast(None, value, dest_ty)
910 fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
911 self.context.new_cast(None, value, dest_ty)
914 fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
915 let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
916 self.intcast(usize_value, dest_ty, false)
919 fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
920 let usize_value = self.intcast(value, self.cx.type_isize(), false);
921 self.cx.const_bitcast(usize_value, dest_ty)
924 fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
925 self.cx.const_bitcast(value, dest_ty)
928 fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
929 // NOTE: is_signed is for value, not dest_typ.
930 self.gcc_int_cast(value, dest_typ)
933 fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
934 let val_type = value.get_type();
935 match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
937 // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
938 // a pointer, which is not supported by gccjit.
939 return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
942 // When they are not pointers, we want a transmute (or reinterpret_cast).
943 self.bitcast(value, dest_ty)
945 (true, true) => self.cx.context.new_cast(None, value, dest_ty),
946 (true, false) => unimplemented!(),
951 fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
952 self.gcc_icmp(op, lhs, rhs)
955 fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
956 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
959 /* Miscellaneous instructions */
960 fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
961 assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
962 let size = self.intcast(size, self.type_size_t(), false);
963 let _is_volatile = flags.contains(MemFlags::VOLATILE);
964 let dst = self.pointercast(dst, self.type_i8p());
965 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
966 let memcpy = self.context.get_builtin_function("memcpy");
967 // TODO(antoyo): handle aligns and is_volatile.
968 self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
971 fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
972 if flags.contains(MemFlags::NONTEMPORAL) {
973 // HACK(nox): This is inefficient but there is no nontemporal memmove.
974 let val = self.load(src.get_type(), src, src_align);
975 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
976 self.store_with_flags(val, ptr, dst_align, flags);
979 let size = self.intcast(size, self.type_size_t(), false);
980 let _is_volatile = flags.contains(MemFlags::VOLATILE);
981 let dst = self.pointercast(dst, self.type_i8p());
982 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
984 let memmove = self.context.get_builtin_function("memmove");
985 // TODO(antoyo): handle is_volatile.
986 self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
989 fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
990 let _is_volatile = flags.contains(MemFlags::VOLATILE);
991 let ptr = self.pointercast(ptr, self.type_i8p());
992 let memset = self.context.get_builtin_function("memset");
993 // TODO(antoyo): handle align and is_volatile.
994 let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
995 let size = self.intcast(size, self.type_size_t(), false);
996 self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
999 fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
1000 let func = self.current_func();
1001 let variable = func.new_local(None, then_val.get_type(), "selectVar");
1002 let then_block = func.new_block("then");
1003 let else_block = func.new_block("else");
1004 let after_block = func.new_block("after");
1005 self.llbb().end_with_conditional(None, cond, then_block, else_block);
1007 then_block.add_assignment(None, variable, then_val);
1008 then_block.end_with_jump(None, after_block);
1010 if !then_val.get_type().is_compatible_with(else_val.get_type()) {
1011 else_val = self.context.new_cast(None, else_val, then_val.get_type());
1013 else_block.add_assignment(None, variable, else_val);
1014 else_block.end_with_jump(None, after_block);
1016 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
1017 // state need to be updated.
1018 self.switch_to_block(after_block);
1020 variable.to_rvalue()
1024 fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
1028 fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
1032 fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
1036 fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1037 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1038 assert_eq!(idx as usize as u64, idx);
1039 let value_type = aggregate_value.get_type();
1041 if value_type.dyncast_array().is_some() {
1042 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1043 let element = self.context.new_array_access(None, aggregate_value, index);
1044 element.get_address(None)
1046 else if value_type.dyncast_vector().is_some() {
1049 else if let Some(pointer_type) = value_type.get_pointee() {
1050 if let Some(struct_type) = pointer_type.is_struct() {
1051 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1052 // CodegenCx.structs_as_pointer
1053 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1056 panic!("Unexpected type {:?}", value_type);
1059 else if let Some(struct_type) = value_type.is_struct() {
1060 aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1063 panic!("Unexpected type {:?}", value_type);
1067 fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1068 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1069 assert_eq!(idx as usize as u64, idx);
1070 let value_type = aggregate_value.get_type();
1073 if value_type.dyncast_array().is_some() {
1074 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1075 self.context.new_array_access(None, aggregate_value, index)
1077 else if value_type.dyncast_vector().is_some() {
1080 else if let Some(pointer_type) = value_type.get_pointee() {
1081 if let Some(struct_type) = pointer_type.is_struct() {
1082 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1083 // CodegenCx.structs_as_pointer
1084 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
1087 panic!("Unexpected type {:?}", value_type);
1091 panic!("Unexpected type {:?}", value_type);
1094 let lvalue_type = lvalue.to_rvalue().get_type();
1096 // NOTE: sometimes, rustc will create a value with the wrong type.
1097 if lvalue_type != value.get_type() {
1098 self.context.new_cast(None, value, lvalue_type)
1104 self.llbb().add_assignment(None, lvalue, value);
1109 fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
1113 fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> {
1114 let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1");
1115 let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
1116 let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
1117 self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
1119 // TODO(antoyo): Properly implement unwinding.
1120 // the above is just to make the compilation work as it seems
1121 // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
1124 fn resume(&mut self, _exn: RValue<'gcc>) {
1125 // TODO(bjorn3): Properly implement unwinding.
1129 fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
1133 fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) {
1137 fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
1143 _parent: Option<RValue<'gcc>>,
1144 _unwind: Option<Block<'gcc>>,
1145 _handlers: &[Block<'gcc>],
1150 // Atomic Operations
1151 fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
1152 let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
1153 self.llbb().add_assignment(None, expected, cmp);
1154 let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
1156 let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
1157 let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
1158 let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
1160 let value_type = result.to_rvalue().get_type();
1161 if let Some(struct_type) = value_type.is_struct() {
1162 self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
1163 // NOTE: since success contains the call to the intrinsic, it must be stored before
1164 // expected so that we store expected after the call.
1165 self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
1167 // TODO(antoyo): handle when value is not a struct.
1172 fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
1173 let size = src.get_type().get_size();
1176 AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
1177 AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
1178 AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
1179 AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
1180 AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
1181 AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
1182 AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
1183 AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1184 AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1185 AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1186 AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1190 let atomic_function = self.context.get_builtin_function(name);
1191 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1193 let void_ptr_type = self.context.new_type::<*mut ()>();
1194 let volatile_void_ptr_type = void_ptr_type.make_volatile();
1195 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
1196 // FIXME(antoyo): not sure why, but we have the wrong type here.
1197 let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
1198 let src = self.context.new_cast(None, src, new_src_type);
1199 let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
1200 self.context.new_cast(None, res, src.get_type())
1203 fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
1206 SynchronizationScope::SingleThread => "__atomic_signal_fence",
1207 SynchronizationScope::CrossThread => "__atomic_thread_fence",
1209 let thread_fence = self.context.get_builtin_function(name);
1210 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1211 self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
1214 fn set_invariant_load(&mut self, load: RValue<'gcc>) {
1215 // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
1216 self.normal_function_addresses.borrow_mut().insert(load);
1220 fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1224 fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1228 fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
1229 // FIXME(antoyo): remove when having a proper API.
1230 let gcc_func = unsafe { std::mem::transmute(func) };
1231 if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
1232 self.function_call(func, args, funclet)
1235 // If it's a not function that was defined, it's a function pointer.
1236 self.function_ptr_call(func, args, funclet)
1240 fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
1241 // FIXME(antoyo): this does not zero-extend.
1242 if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
1243 // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
1244 // Fix the code in codegen_ssa::base::from_immediate.
1247 self.gcc_int_cast(value, dest_typ)
1250 fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
1254 fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
1258 fn set_span(&mut self, _span: Span) {}
1260 fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
1261 if self.cx().val_ty(val) == self.cx().type_i1() {
1262 self.zext(val, self.cx().type_i8())
1269 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
1270 if scalar.is_bool() {
1271 return self.trunc(val, self.cx().type_i1());
1276 fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1280 fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1284 fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
1289 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
1290 pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
1291 let return_type = v1.get_type();
1293 self.context.new_parameter(None, return_type, "v1"),
1294 self.context.new_parameter(None, return_type, "v2"),
1295 self.context.new_parameter(None, mask.get_type(), "mask"),
1297 let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, "_mm_shuffle_epi8", false);
1298 self.context.new_call(None, shuffle, &[v1, v2, mask])
1302 impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
1303 fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
1304 // Forward to the `get_static` method of `CodegenCx`
1305 self.cx().get_static(def_id).get_address(None)
1309 impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
1310 fn param_env(&self) -> ParamEnv<'tcx> {
1315 impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
1316 fn target_spec(&self) -> &Target {
1317 &self.cx.target_spec()
1321 pub trait ToGccComp {
1322 fn to_gcc_comparison(&self) -> ComparisonOp;
1325 impl ToGccComp for IntPredicate {
1326 fn to_gcc_comparison(&self) -> ComparisonOp {
1328 IntPredicate::IntEQ => ComparisonOp::Equals,
1329 IntPredicate::IntNE => ComparisonOp::NotEquals,
1330 IntPredicate::IntUGT => ComparisonOp::GreaterThan,
1331 IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
1332 IntPredicate::IntULT => ComparisonOp::LessThan,
1333 IntPredicate::IntULE => ComparisonOp::LessThanEquals,
1334 IntPredicate::IntSGT => ComparisonOp::GreaterThan,
1335 IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
1336 IntPredicate::IntSLT => ComparisonOp::LessThan,
1337 IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
1342 impl ToGccComp for RealPredicate {
1343 fn to_gcc_comparison(&self) -> ComparisonOp {
1344 // TODO(antoyo): check that ordered vs non-ordered is respected.
1346 RealPredicate::RealPredicateFalse => unreachable!(),
1347 RealPredicate::RealOEQ => ComparisonOp::Equals,
1348 RealPredicate::RealOGT => ComparisonOp::GreaterThan,
1349 RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
1350 RealPredicate::RealOLT => ComparisonOp::LessThan,
1351 RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
1352 RealPredicate::RealONE => ComparisonOp::NotEquals,
1353 RealPredicate::RealORD => unreachable!(),
1354 RealPredicate::RealUNO => unreachable!(),
1355 RealPredicate::RealUEQ => ComparisonOp::Equals,
1356 RealPredicate::RealUGT => ComparisonOp::GreaterThan,
1357 RealPredicate::RealUGE => ComparisonOp::GreaterThan,
1358 RealPredicate::RealULT => ComparisonOp::LessThan,
1359 RealPredicate::RealULE => ComparisonOp::LessThan,
1360 RealPredicate::RealUNE => ComparisonOp::NotEquals,
1361 RealPredicate::RealPredicateTrue => unreachable!(),
1367 #[allow(non_camel_case_types)]
1377 trait ToGccOrdering {
1378 fn to_gcc(self) -> i32;
1381 impl ToGccOrdering for AtomicOrdering {
1382 fn to_gcc(self) -> i32 {
1387 AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
1388 AtomicOrdering::Unordered => __ATOMIC_RELAXED,
1389 AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
1390 AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
1391 AtomicOrdering::Release => __ATOMIC_RELEASE,
1392 AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
1393 AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,