git add .
echo "[GIT] commit"
-# This is needed on virgin system where nothing is configured.
+# This is needed on systems where nothing is configured.
# git really needs something here, or it will fail.
# Even using --author is not enough.
git config user.email || git config user.email "none@example.com"
fi
export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot'
-#export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot -Clto=fat -Cembed-bitcode=yes'
-# FIXME remove once the atomic shim is gone
+# FIXME(antoyo): remove once the atomic shim is gone
if [[ `uname` == 'Darwin' ]]; then
export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
fi
export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH"
export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
-
-export CG_CLIF_DISPLAY_CG_TIME=1
-export CG_CLIF_INCR_CACHE_DISABLED=1
}
}
- // TODO: not sure about this assert. ABC is not defined, so should it be really 0?
+ // TODO(antoyo): to make this work, support weak linkage.
//unsafe { assert_eq!(ABC as usize, 0); }
&mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
let stderr = ::std::io::stderr();
let mut stderr = stderr.lock();
- // FIXME: this thread panics.
std::thread::spawn(move || {
println!("Hello from another thread!");
});
assert_eq!(-32768i16, (-32768i16).saturating_add(-32768));
assert_eq!(32767i16, 32767i16.saturating_add(1));
- /*assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
let _d = 0i128.checked_div(2i128);
assert_eq!(houndred_i128 as f32, 100.0);
assert_eq!(houndred_i128 as f64, 100.0);
assert_eq!(houndred_f32 as i128, 100);
- assert_eq!(houndred_f64 as i128, 100);*/
+ assert_eq!(houndred_f64 as i128, 100);
let _a = 1u32 << 2u8;
+/home/bouanto/Ordinateur/Programmation/Projets/gcc-build/build/gcc
impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
- // TODO
- //fn_abi.apply_attrs_callsite(self, callsite)
+ // TODO(antoyo)
}
fn get_param(&self, index: usize) -> Self::Value {
}
pub trait FnAbiGccExt<'gcc, 'tcx> {
- // TODO: return a function pointer type instead?
+ // TODO(antoyo): return a function pointer type instead?
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool);
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
- /*fn llvm_cconv(&self) -> llvm::CallConv;
- fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
- fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);*/
}
impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
continue;
}
PassMode::Indirect { extra_attrs: Some(_), .. } => {
- /*let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
- let ptr_layout = cx.layout_of(ptr_ty);
- argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 0, true));
- argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 1, true));*/
unimplemented!();
- //continue;
}
PassMode::Cast(cast) => cast.gcc_type(cx),
PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic);
pointer_type
}
-
- /*fn llvm_cconv(&self) -> llvm::CallConv {
- match self.conv {
- Conv::C | Conv::Rust => llvm::CCallConv,
- Conv::AmdGpuKernel => llvm::AmdGpuKernel,
- Conv::ArmAapcs => llvm::ArmAapcsCallConv,
- Conv::Msp430Intr => llvm::Msp430Intr,
- Conv::PtxKernel => llvm::PtxKernel,
- Conv::X86Fastcall => llvm::X86FastcallCallConv,
- Conv::X86Intr => llvm::X86_Intr,
- Conv::X86Stdcall => llvm::X86StdcallCallConv,
- Conv::X86ThisCall => llvm::X86_ThisCall,
- Conv::X86VectorCall => llvm::X86_VectorCall,
- Conv::X86_64SysV => llvm::X86_64_SysV,
- Conv::X86_64Win64 => llvm::X86_64_Win64,
- }
- }
-
- fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
- // FIXME(eddyb) can this also be applied to callsites?
- if self.ret.layout.abi.is_uninhabited() {
- llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
- }
-
- // FIXME(eddyb, wesleywiser): apply this to callsites as well?
- if !self.can_unwind {
- llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
- }
-
- let mut i = 0;
- let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
- attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
- i += 1;
- };
- match self.ret.mode {
- PassMode::Direct(ref attrs) => {
- attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
- }
- PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(cx))),
- _ => {}
- }
- for arg in &self.args {
- if arg.pad.is_some() {
- apply(&ArgAttributes::new(), None);
- }
- match arg.mode {
- PassMode::Ignore => {}
- PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
- apply(attrs, Some(arg.layout.gcc_type(cx)))
- }
- PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
- apply(attrs, None);
- apply(extra_attrs, None);
- }
- PassMode::Pair(ref a, ref b) => {
- apply(a, None);
- apply(b, None);
- }
- PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
- }
- }
- }
-
- fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
- // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
-
- let mut i = 0;
- let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
- attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
- i += 1;
- };
- match self.ret.mode {
- PassMode::Direct(ref attrs) => {
- attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
- }
- PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(bx))),
- _ => {}
- }
- if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
- // If the value is a boolean, the range is 0..2 and that ultimately
- // become 0..0 when the type becomes i1, which would be rejected
- // by the LLVM verifier.
- if let Int(..) = scalar.value {
- if !scalar.is_bool() {
- let range = scalar.valid_range_exclusive(bx);
- if range.start != range.end {
- bx.range_metadata(callsite, range);
- }
- }
- }
- }
- for arg in &self.args {
- if arg.pad.is_some() {
- apply(&ArgAttributes::new(), None);
- }
- match arg.mode {
- PassMode::Ignore => {}
- PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
- apply(attrs, Some(arg.layout.gcc_type(bx)))
- }
- PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
- apply(attrs, None);
- apply(extra_attrs, None);
- }
- PassMode::Pair(ref a, ref b) => {
- apply(a, None);
- apply(b, None);
- }
- PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
- }
- }
-
- let cconv = self.llvm_cconv();
- if cconv != llvm::CCallConv {
- llvm::SetInstructionCallConv(callsite, cconv);
- }
- }*/
}
-//use crate::attributes;
use gccjit::{FunctionType, ToRValue};
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
use rustc_middle::bug;
let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
if tcx.sess.target.options.default_hidden_visibility {
- //llvm::LLVMRustSetVisibility(func, llvm::Visibility::Hidden);
+ // TODO(antoyo): set visibility.
}
if tcx.sess.must_emit_unwind_tables() {
- // TODO
- //attributes::emit_uwtable(func, true);
+ // TODO(antoyo): emit unwind tables.
}
let callee = kind.fn_name(method.name);
.map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
.collect();
let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
- //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+ // TODO(antoyo): set visibility.
let block = func.new_block("entry");
impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec<PlaceRef<'tcx, RValue<'gcc>>>, mut _inputs: Vec<RValue<'gcc>>, _span: Span) -> bool {
- // TODO
+ // TODO(antoyo)
return true;
-
- /*let mut ext_constraints = vec![];
- let mut output_types = vec![];
-
- // Prepare the output operands
- let mut indirect_outputs = vec![];
- for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
- if out.is_rw {
- let operand = self.load_operand(place);
- if let OperandValue::Immediate(_) = operand.val {
- inputs.push(operand.immediate());
- }
- ext_constraints.push(i.to_string());
- }
- if out.is_indirect {
- let operand = self.load_operand(place);
- if let OperandValue::Immediate(_) = operand.val {
- indirect_outputs.push(operand.immediate());
- }
- } else {
- output_types.push(place.layout.gcc_type(self.cx()));
- }
- }
- if !indirect_outputs.is_empty() {
- indirect_outputs.extend_from_slice(&inputs);
- inputs = indirect_outputs;
- }
-
- let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s));
-
- // Default per-arch clobbers
- // Basically what clang does
- let arch_clobbers = match &self.sess().target.target.arch[..] {
- "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
- "mips" | "mips64" => vec!["~{$1}"],
- _ => Vec::new(),
- };
-
- let all_constraints = ia
- .outputs
- .iter()
- .map(|out| out.constraint.to_string())
- .chain(ia.inputs.iter().map(|s| s.to_string()))
- .chain(ext_constraints)
- .chain(clobbers)
- .chain(arch_clobbers.iter().map(|s| (*s).to_string()))
- .collect::<Vec<String>>()
- .join(",");
-
- debug!("Asm Constraints: {}", &all_constraints);
-
- // Depending on how many outputs we have, the return type is different
- let num_outputs = output_types.len();
- let output_type = match num_outputs {
- 0 => self.type_void(),
- 1 => output_types[0],
- _ => self.type_struct(&output_types, false),
- };
-
- let asm = ia.asm.as_str();
- let r = inline_asm_call(
- self,
- &asm,
- &all_constraints,
- &inputs,
- output_type,
- ia.volatile,
- ia.alignstack,
- ia.dialect,
- );
- if r.is_none() {
- return false;
- }
- let r = r.unwrap();
-
- // Again, based on how many outputs we have
- let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
- for (i, (_, &place)) in outputs.enumerate() {
- let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
- OperandValue::Immediate(v).store(self, place);
- }
-
- // Store mark in a metadata node so we can map LLVM errors
- // back to source locations. See #17552.
- unsafe {
- let key = "srcloc";
- let kind = llvm::LLVMGetMDKindIDInContext(
- self.llcx,
- key.as_ptr() as *const c_char,
- key.len() as c_uint,
- );
-
- let val: &'ll Value = self.const_i32(span.ctxt().outer_expn().as_u32() as i32);
-
- llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.llcx, &val, 1));
- }
-
- true*/
}
fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) {
};
// Collect the types of output operands
- // FIXME: we do this here instead of later because of a bug in libgccjit where creating the
+ // FIXME(antoyo): we do this here instead of later because of a bug in libgccjit where creating the
// variable after the extended asm expression causes a segfault:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380
let mut output_vars = FxHashMap::default();
match out_place {
Some(place) => place.layout.gcc_type(self.cx, false),
None => {
- // If the output is discarded, we don't really care what
- // type is used. We're just using this to tell GCC to
- // reserve the register.
- //dummy_output_type(self.cx, reg.reg_class())
-
// NOTE: if no output value, we should not create one.
continue;
},
template_str
}
else {
- // FIXME: this might break the "m" memory constraint:
+ // FIXME(antoyo): this might break the "m" memory constraint:
// https://stackoverflow.com/a/9347957/389119
- // TODO: only set on x86 platforms.
+ // TODO(antoyo): only set on x86 platforms.
format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str)
};
let extended_asm = block.add_extended_asm(None, &template_str);
},
};
output_types.push(ty);
- //op_idx.insert(idx, constraints.len());
let prefix = if late { "=" } else { "=&" };
let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
None => dummy_output_type(self.cx, reg.reg_class())
};
output_types.push(ty);
- //op_idx.insert(idx, constraints.len());
- // TODO: prefix of "+" for reading and writing?
+ // TODO(antoyo): prefix of "+" for reading and writing?
let prefix = if late { "=" } else { "=&" };
let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
if out_place.is_some() {
let var = output_vars[&idx];
- // TODO: also specify an output operand when out_place is none: that would
+ // TODO(antoyo): also specify an output operand when out_place is none: that would
// be the clobber but clobbers do not support general constraint like reg;
// they only support named registers.
// Not sure how we can do this. And the LLVM backend does not seem to add a
}
}
- /*if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
- match asm_arch {
- InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
- constraints.push("~{cc}".to_string());
- }
- InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
- constraints.extend_from_slice(&[
- "~{dirflag}".to_string(),
- "~{fpsr}".to_string(),
- "~{flags}".to_string(),
- ]);
- }
- InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
- }
- }
- if !options.contains(InlineAsmOptions::NOMEM) {
- // This is actually ignored by LLVM, but it's probably best to keep
- // it just in case. LLVM instead uses the ReadOnly/ReadNone
- // attributes on the call instruction to optimize.
- constraints.push("~{memory}".to_string());
- }
- let volatile = !options.contains(InlineAsmOptions::PURE);
- let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
- let output_type = match &output_types[..] {
- [] => self.type_void(),
- [ty] => ty,
- tys => self.type_struct(&tys, false),
- };*/
-
- /*let result = inline_asm_call(
- self,
- &template_str,
- &constraints.join(","),
- &inputs,
- output_type,
- volatile,
- alignstack,
- dialect,
- span,
- )
- .unwrap_or_else(|| span_bug!(span, "LLVM asm constraint validation failed"));
-
- if options.contains(InlineAsmOptions::PURE) {
- if options.contains(InlineAsmOptions::NOMEM) {
- llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
- } else if options.contains(InlineAsmOptions::READONLY) {
- llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
- }
- } else {
- if options.contains(InlineAsmOptions::NOMEM) {
- llvm::Attribute::InaccessibleMemOnly
- .apply_callsite(llvm::AttributePlace::Function, result);
- } else {
- // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
- }
- }*/
-
// Write results to outputs
for (idx, op) in operands.iter().enumerate() {
if let InlineAsmOperandRef::Out { place: Some(place), .. }
}
/// Converts a register class to a GCC constraint code.
-// TODO: return &'static str instead?
+// TODO(antoyo): return &'static str instead?
fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String {
match reg {
// For vector registers LLVM wants the register name to match the type size.
InlineAsmRegOrRegClass::Reg(reg) => {
- // TODO: add support for vector register.
+ // TODO(antoyo): add support for vector register.
let constraint =
match reg.name() {
"ax" => "a",
"dx" => "d",
"si" => "S",
"di" => "D",
- // TODO: for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
- // TODO: in this case though, it's a clobber, so it should work as r11.
+ // TODO(antoyo): for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
+ // TODO(antoyo): in this case though, it's a clobber, so it should work as r11.
// Recent nightly supports clobber() syntax, so update to it. It does not seem
// like it's implemented yet.
- name => name, // FIXME: probably wrong.
+ name => name, // FIXME(antoyo): probably wrong.
};
constraint.to_string()
},
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
unimplemented!()
- //if modifier == Some('v') { None } else { modifier }
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
unimplemented!()
- /*if modifier.is_none() {
- Some('q')
- } else {
- modifier
- }*/
}
InlineAsmRegClass::Bpf(_) => unimplemented!(),
InlineAsmRegClass::Hexagon(_) => unimplemented!(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
- | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!() /*match (reg, modifier) {
- (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
- (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
- (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
- (_, Some('x')) => Some('x'),
- (_, Some('y')) => Some('t'),
- (_, Some('z')) => Some('g'),
- _ => unreachable!(),
- }*/,
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(),
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
{
let context = &module.module_llvm.context;
- //let llcx = &*module.module_llvm.llcx;
- //let tm = &*module.module_llvm.tm;
let module_name = module.name.clone();
let module_name = Some(&module_name[..]);
- //let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
-
- /*if cgcx.msvc_imps_needed {
- create_msvc_imps(cgcx, llcx, llmod);
- }*/
-
- // A codegen-specific pass manager is used to generate object
- // files for an GCC module.
- //
- // Apparently each of these pass managers is a one-shot kind of
- // thing, so we create a new one for each type of output. The
- // pass manager passed to the closure should be ensured to not
- // escape the closure itself, and the manager should only be
- // used once.
- /*unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, llmod: &'ll llvm::Module, no_builtins: bool, f: F) -> R
- where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
- {
- let cpm = llvm::LLVMCreatePassManager();
- llvm::LLVMAddAnalysisPasses(tm, cpm);
- llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
- f(cpm)
- }*/
-
- // Two things to note:
- // - If object files are just LLVM bitcode we write bitcode, copy it to
- // the .o file, and delete the bitcode if it wasn't otherwise
- // requested.
- // - If we don't have the integrated assembler then we need to emit
- // asm from LLVM and use `gcc` to create the object file.
let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if config.bitcode_needed() {
- // TODO
- /*let _timer = cgcx
- .prof
- .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
- let thin = ThinBuffer::new(llmod);
- let data = thin.data();
-
- if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_emit_bitcode",
- &module.name[..],
- );
- if let Err(e) = fs::write(&bc_out, data) {
- let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
- diag_handler.err(&msg);
- }
- }
-
- if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_embed_bitcode",
- &module.name[..],
- );
- embed_bitcode(cgcx, llcx, llmod, Some(data));
- }
-
- if config.emit_bc_compressed {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_emit_compressed_bitcode",
- &module.name[..],
- );
- let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
- let data = bytecode::encode(&module.name, data);
- if let Err(e) = fs::write(&dst, data) {
- let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
- diag_handler.err(&msg);
- }
- }*/
- } /*else if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Marker) {
- unimplemented!();
- //embed_bitcode(cgcx, llcx, llmod, None);
- }*/
+ // TODO(antoyo)
+ }
if config.emit_ir {
unimplemented!();
- /*let _timer = cgcx
- .prof
- .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
- let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
- let out_c = path_to_c_string(&out);
-
- extern "C" fn demangle_callback(
- input_ptr: *const c_char,
- input_len: size_t,
- output_ptr: *mut c_char,
- output_len: size_t,
- ) -> size_t {
- let input =
- unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
-
- let input = match str::from_utf8(input) {
- Ok(s) => s,
- Err(_) => return 0,
- };
-
- let output = unsafe {
- slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
- };
- let mut cursor = io::Cursor::new(output);
-
- let demangled = match rustc_demangle::try_demangle(input) {
- Ok(d) => d,
- Err(_) => return 0,
- };
-
- if write!(cursor, "{:#}", demangled).is_err() {
- // Possible only if provided buffer is not big enough
- return 0;
- }
-
- cursor.position() as size_t
- }
-
- let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
- result.into_result().map_err(|()| {
- let msg = format!("failed to write LLVM IR to {}", out.display());
- llvm_err(diag_handler, &msg)
- })?;*/
}
if config.emit_asm {
.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
-
- /*with_codegen(tm, llmod, config.no_builtins, |cpm| {
- write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile)
- })?;*/
}
match config.emit_obj {
let _timer = cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
- //with_codegen(tm, llmod, config.no_builtins, |cpm| {
- //println!("1: {}", module.name);
- match &*module.name {
- "std_example.7rcbfp3g-cgu.15" => {
- println!("Dumping reproducer {}", module.name);
- let _ = fs::create_dir("/tmp/reproducers");
- // FIXME: segfault in dump_reproducer_to_file() might be caused by
- // transmuting an rvalue to an lvalue.
- // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
- context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
- println!("Dumped reproducer {}", module.name);
- },
- _ => (),
- }
- /*let _ = fs::create_dir("/tmp/dumps");
- context.dump_to_file(&format!("/tmp/dumps/{}.c", module.name), true);
- println!("Dumped {}", module.name);*/
- //println!("Compile module {}", module.name);
- context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
- //})?;
+ match &*module.name {
+ "std_example.7rcbfp3g-cgu.15" => {
+ println!("Dumping reproducer {}", module.name);
+ let _ = fs::create_dir("/tmp/reproducers");
+ // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
+ // transmuting an rvalue to an lvalue.
+ // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
+ context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
+ println!("Dumped reproducer {}", module.name);
+ },
+ _ => (),
+ }
+ context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
}
EmitObj::Bitcode => {
- //unimplemented!();
- /*debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
- if let Err(e) = link_or_copy(&bc_out, &obj_out) {
- diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
- }
-
- if !config.emit_bc {
- debug!("removing_bitcode {:?}", bc_out);
- if let Err(e) = fs::remove_file(&bc_out) {
- diag_handler.err(&format!("failed to remove bitcode: {}", e));
- }
- }*/
+ // TODO(antoyo)
}
EmitObj::None => {}
}
-
- //drop(handlers);
}
Ok(module.into_compiled_module(
pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
unimplemented!();
- /*use super::lto::{Linker, ModuleBuffer};
- // Sort the modules by name to ensure to ensure deterministic behavior.
- modules.sort_by(|a, b| a.name.cmp(&b.name));
- let (first, elements) =
- modules.split_first().expect("Bug! modules must contain at least one module.");
-
- let mut linker = Linker::new(first.module_llvm.llmod());
- for module in elements {
- let _timer =
- cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name));
- let buffer = ModuleBuffer::new(module.module_llvm.llmod());
- linker.add(&buffer.data()).map_err(|()| {
- let msg = format!("failed to serialize module {:?}", module.name);
- llvm_err(&diag_handler, &msg)
- })?;
- }
- drop(linker);
- Ok(modules.remove(0))*/
}
Linkage::Appending => unimplemented!(),
Linkage::Internal => GlobalKind::Internal,
Linkage::Private => GlobalKind::Internal,
- Linkage::ExternalWeak => GlobalKind::Imported, // TODO: should be weak linkage.
+ Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
Linkage::Common => unimplemented!(),
}
}
Linkage::AvailableExternally => FunctionType::Extern,
Linkage::LinkOnceAny => unimplemented!(),
Linkage::LinkOnceODR => unimplemented!(),
- Linkage::WeakAny => FunctionType::Exported, // FIXME: should be similar to linkonce.
+ Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce.
Linkage::WeakODR => unimplemented!(),
Linkage::Appending => unimplemented!(),
Linkage::Internal => FunctionType::Internal,
// Instantiate monomorphizations without filling out definitions yet...
//let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
let context = Context::default();
- // TODO: only set on x86 platforms.
+ // TODO(antoyo): only set on x86 platforms.
context.add_command_line_option("-masm=intel");
for arg in &tcx.sess.opts.cg.llvm_args {
context.add_command_line_option(arg);
}
context.add_command_line_option("-fno-semantic-interposition");
- //context.set_dump_code_on_compile(true);
+ if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
+ context.set_dump_code_on_compile(true);
+ }
if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
context.set_dump_initial_gimple(true);
}
context.set_debug_info(true);
- //context.set_dump_everything(true);
- //context.set_keep_intermediates(true);
+ if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
+ context.set_dump_everything(true);
+ }
+ if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
+ context.set_keep_intermediates(true);
+ }
{
let cx = CodegenCx::new(&context, cgu, tcx);
block.end_with_void_return(None);
});
- //println!("module_codegen: {:?} {:?}", cgu_name, &cx.context as *const _);
let mono_items = cgu.items_in_deterministic_order(tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt;
-// TODO
+// TODO(antoyo)
type Funclet = ();
-// TODO: remove this variable.
+// TODO(antoyo): remove this variable.
static mut RETURN_VALUE_COUNT: usize = 0;
enum ExtremumOperation {
let load_ordering =
match order {
- // TODO: does this make sense?
+ // TODO(antoyo): does this make sense?
AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
_ => order.clone(),
};
}
fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
- //let mut fn_ty = self.cx.val_ty(func);
- // Strip off pointers
- /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
- fn_ty = self.cx.element_type(fn_ty);
- }*/
-
- /*assert!(
- self.cx.type_kind(fn_ty) == TypeKind::Function,
- "builder::{} not passed a function, but {:?}",
- typ,
- fn_ty
- );
-
- let param_tys = self.cx.func_params_types(fn_ty);
-
- let all_args_match = param_tys
- .iter()
- .zip(args.iter().map(|&v| self.val_ty(v)))
- .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
-
let mut all_args_match = true;
let mut param_types = vec![];
let param_count = func.get_param_count();
.map(|(_i, (expected_ty, &actual_val))| {
let actual_ty = actual_val.get_type();
if expected_ty != actual_ty {
- /*debug!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
- /*println!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
self.bitcast(actual_val, expected_ty)
}
else {
}
fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
- //let mut fn_ty = self.cx.val_ty(func);
- // Strip off pointers
- /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
- fn_ty = self.cx.element_type(fn_ty);
- }*/
-
- /*assert!(
- self.cx.type_kind(fn_ty) == TypeKind::Function,
- "builder::{} not passed a function, but {:?}",
- typ,
- fn_ty
- );
-
- let param_tys = self.cx.func_params_types(fn_ty);
-
- let all_args_match = param_tys
- .iter()
- .zip(args.iter().map(|&v| self.val_ty(v)))
- .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
-
let mut all_args_match = true;
let mut param_types = vec![];
let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
.map(|(_i, (expected_ty, &actual_val))| {
let actual_ty = actual_val.get_type();
if expected_ty != actual_ty {
- /*debug!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
- /*println!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
self.bitcast(actual_val, expected_ty)
}
else {
}
fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
- let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here.
+ let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
let stored_ty = self.cx.val_ty(val);
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
- //assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
-
if dest_ptr_ty == stored_ptr_ty {
ptr
}
else {
- /*debug!(
- "type mismatch in store. \
- Expected {:?}, got {:?}; inserting bitcast",
- dest_ptr_ty, stored_ptr_ty
- );*/
- /*println!(
- "type mismatch in store. \
- Expected {:?}, got {:?}; inserting bitcast",
- dest_ptr_ty, stored_ptr_ty
- );*/
- //ptr
self.bitcast(ptr, stored_ptr_ty)
}
}
}
fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
- //debug!("call {:?} with args ({:?})", func, args);
-
- // TODO: remove when the API supports a different type for functions.
+ // TODO(antoyo): remove when the API supports a different type for functions.
let func: Function<'gcc> = self.cx.rvalue_as_function(func);
let args = self.check_call("call", func, args);
- //let bundle = funclet.map(|funclet| funclet.bundle());
- //let bundle = bundle.as_ref().map(|b| &*b.raw);
// gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local or call add_eval().
}
fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
- //debug!("func ptr call {:?} with args ({:?})", func, args);
-
let args = self.check_ptr_call("call", func_ptr, args);
- //let bundle = funclet.map(|funclet| funclet.bundle());
- //let bundle = bundle.as_ref().map(|b| &*b.raw);
// gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local or call add_eval().
let void_type = self.context.new_type::<()>();
let current_func = current_block.get_function();
- // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
+ // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
return_type = self.int_type;
}
}
else {
if gcc_func.get_param_count() == 0 {
- // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
+ // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
}
else {
}
pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
- //debug!("overflow_call {:?} with args ({:?})", func, args);
-
- //let bundle = funclet.map(|funclet| funclet.bundle());
- //let bundle = bundle.as_ref().map(|b| &*b.raw);
-
// gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local.
let return_type = self.context.new_type::<bool>();
let current_block = self.current_block.borrow().expect("block");
let current_func = current_block.get_function();
- // TODO: return the new_call() directly? Since the overflow function has no side-effects.
+ // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
unsafe { RETURN_VALUE_COUNT += 1 };
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
self.llbb().end_with_conditional(None, condition, then, catch);
self.context.new_rvalue_from_int(self.int_type, 0)
- // TODO
- /*debug!("invoke {:?} with args ({:?})", func, args);
-
- let args = self.check_call("invoke", func, args);
- let bundle = funclet.map(|funclet| funclet.bundle());
- let bundle = bundle.as_ref().map(|b| &*b.raw);
-
- unsafe {
- llvm::LLVMRustBuildInvoke(
- self.llbuilder,
- func,
- args.as_ptr(),
- args.len() as c_uint,
- then,
- catch,
- bundle,
- UNNAMED,
- )
- }*/
+ // TODO(antoyo)
}
fn unreachable(&mut self) {
}
fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME: this should not be required.
+ // FIXME(antoyo): this should not be required.
if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
b = self.context.new_cast(None, b, a.get_type());
}
}
fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: convert the arguments to unsigned?
+ // TODO(antoyo): convert the arguments to unsigned?
a / b
}
fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: convert the arguments to unsigned?
- // TODO: poison if not exact.
+ // TODO(antoyo): convert the arguments to unsigned?
+ // TODO(antoyo): poison if not exact.
a / b
}
fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: convert the arguments to signed?
+ // TODO(antoyo): convert the arguments to signed?
a / b
}
fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: posion if not exact.
- // FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
+ // TODO(antoyo): posion if not exact.
+ // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
// should be the same.
let typ = a.get_type().to_signed(self);
let a = self.context.new_cast(None, a, typ);
fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
if a.get_type() == self.cx.float_type {
let fmodf = self.context.get_builtin_function("fmodf");
- // FIXME: this seems to produce the wrong result.
+ // FIXME(antoyo): this seems to produce the wrong result.
return self.context.new_call(None, fmodf, &[a, b]);
}
assert_eq!(a.get_type(), self.cx.double_type);
}
fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
let a_type = a.get_type();
let b_type = b.get_type();
if a_type.is_unsigned(self) && b_type.is_signed(self) {
- //println!("shl: {:?} -> {:?}", a, b_type);
let a = self.context.new_cast(None, a, b_type);
let result = a << b;
- //println!("shl: {:?} -> {:?}", result, a_type);
self.context.new_cast(None, result, a_type)
}
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- //println!("shl: {:?} -> {:?}", b, a_type);
let b = self.context.new_cast(None, b, a_type);
a << b
}
}
fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
- // TODO: cast to unsigned to do a logical shift if that does not work.
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
let a_type = a.get_type();
let b_type = b.get_type();
if a_type.is_unsigned(self) && b_type.is_signed(self) {
- //println!("lshl: {:?} -> {:?}", a, b_type);
let a = self.context.new_cast(None, a, b_type);
let result = a >> b;
- //println!("lshl: {:?} -> {:?}", result, a_type);
self.context.new_cast(None, result, a_type)
}
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- //println!("lshl: {:?} -> {:?}", b, a_type);
let b = self.context.new_cast(None, b, a_type);
a >> b
}
}
fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: check whether behavior is an arithmetic shift for >> .
- // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
let a_type = a.get_type();
let b_type = b.get_type();
if a_type.is_unsigned(self) && b_type.is_signed(self) {
- //println!("ashl: {:?} -> {:?}", a, b_type);
let a = self.context.new_cast(None, a, b_type);
let result = a >> b;
- //println!("ashl: {:?} -> {:?}", result, a_type);
self.context.new_cast(None, result, a_type)
}
else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- //println!("ashl: {:?} -> {:?}", b, a_type);
let b = self.context.new_cast(None, b, a_type);
a >> b
}
}
fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME: hack by putting the result in a variable to workaround this bug:
+ // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
// https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
if a.get_type() != b.get_type() {
b = self.context.new_cast(None, b, a.get_type());
}
fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME: hack by putting the result in a variable to workaround this bug:
+ // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
// https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
let res = self.current_func().new_local(None, b.get_type(), "orResult");
self.llbb().add_assignment(None, res, a | b);
}
fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: use new_unary_op()?
+ // TODO(antoyo): use new_unary_op()?
self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
}
}
fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: should generate poison value?
+ // TODO(antoyo): should generate poison value?
a - b
}
fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
}
fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
}
fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
}
fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
}
fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
}
fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
};
- // TODO: remove duplication with intrinsic?
+ // TODO(antoyo): remove duplication with intrinsic?
let name =
match oop {
OverflowOp::Add =>
let intrinsic = self.context.get_builtin_function(&name);
let res = self.current_func()
- // TODO: is it correct to use rhs type instead of the parameter typ?
+ // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
.new_local(None, rhs.get_type(), "binopResult")
.get_address(None);
let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
}
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
- // FIXME: this check that we don't call get_aligned() a second time on a time.
+ // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
// Ideally, we shouldn't need to do this check.
let aligned_type =
if ty == self.cx.u128_type || ty == self.cx.i128_type {
else {
ty.get_aligned(align.bytes())
};
- // TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial.
+ // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
self.stack_var_count.set(self.stack_var_count.get() + 1);
self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
}
fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
- llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
- alloca
- }*/
}
fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
- llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
- alloca
- }*/
}
fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
- // TODO: use ty.
+ // TODO(antoyo): use ty.
let block = self.llbb();
let function = block.get_function();
// NOTE: instead of returning the dereference here, we have to assign it to a variable in
// the current basic block. Otherwise, it could be used in another basic block, causing a
// dereference after a drop, for instance.
- // TODO: handle align.
+ // TODO(antoyo): handle align.
let deref = ptr.dereference(None).to_rvalue();
let value_type = deref.get_type();
unsafe { RETURN_VALUE_COUNT += 1 };
}
fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: use ty.
- //println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile());
+ // TODO(antoyo): use ty.
let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
- //println!("6");
ptr.dereference(None).to_rvalue()
}
fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
- // TODO: use ty.
- // TODO: handle alignment.
+ // TODO(antoyo): use ty.
+ // TODO(antoyo): handle alignment.
let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
}
fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
- //debug!("PlaceRef::load: {:?}", place);
-
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
if place.layout.is_zst() {
OperandValue::Ref(place.llval, Some(llextra), place.align)
}
else if place.layout.is_gcc_immediate() {
- let const_llval = None;
- /*unsafe {
- if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
- if llvm::LLVMIsGlobalConstant(global) == llvm::True {
- const_llval = llvm::LLVMGetInitializer(global);
- }
- }
- }*/
- let llval = const_llval.unwrap_or_else(|| {
- let load = self.load(place.llval.get_type(), place.llval, place.align);
- if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
- scalar_load_metadata(self, load, scalar);
- }
- load
- });
- OperandValue::Immediate(self.to_immediate(llval, place.layout))
+ let load = self.load(place.llval.get_type(), place.llval, place.align);
+ if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+ scalar_load_metadata(self, load, scalar);
+ }
+ OperandValue::Immediate(self.to_immediate(load, place.layout))
}
else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
}
fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range<u128>) {
- // TODO
- /*if self.sess().target.target.arch == "amdgpu" {
- // amdgpu/LLVM does something weird and thinks a i64 value is
- // split into a v2i32, halving the bitwidth LLVM expects,
- // tripping an assertion. So, for now, just disable this
- // optimization.
- return;
- }
-
- unsafe {
- let llty = self.cx.val_ty(load);
- let v = [
- self.cx.const_uint_big(llty, range.start),
- self.cx.const_uint_big(llty, range.end),
- ];
-
- llvm::LLVMSetMetadata(
- load,
- llvm::MD_range as c_uint,
- llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
- );
- }*/
+ // TODO(antoyo)
}
fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
- // TODO
- /*unsafe {
- llvm::LLVMSetMetadata(
- load,
- llvm::MD_nonnull as c_uint,
- llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
- );
- }*/
+ // TODO(antoyo)
}
fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
}
fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
- //debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
let ptr = self.check_store(val, ptr);
self.llbb().add_assignment(None, ptr.dereference(None), val);
- /*let align =
- if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
- llvm::LLVMSetAlignment(store, align);
- if flags.contains(MemFlags::VOLATILE) {
- llvm::LLVMSetVolatile(store, llvm::True);
- }
- if flags.contains(MemFlags::NONTEMPORAL) {
- // According to LLVM [1] building a nontemporal store must
- // *always* point to a metadata value of the integer 1.
- //
- // [1]: http://llvm.org/docs/LangRef.html#store-instruction
- let one = self.cx.const_i32(1);
- let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
- llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
- }*/
- // NOTE: dummy value here since it's never used. FIXME: API should not return a value here?
+ // TODO(antoyo): handle align and flags.
+ // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
self.cx.context.new_rvalue_zero(self.type_i32())
}
fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
- // TODO: handle alignment.
+ // TODO(antoyo): handle alignment.
let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
- // FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because
+ // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
// the following cast is required to avoid this error:
// gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
let int_type = atomic_store.get_param(1).to_rvalue().get_type();
}
fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
- // FIXME: would be safer if doing the same thing (loop) as gep.
- // TODO: specify inbounds somehow.
+ // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
+ // TODO(antoyo): specify inbounds somehow.
match indices.len() {
1 => {
self.context.new_array_access(None, ptr, indices[0]).get_address(None)
},
2 => {
- let array = ptr.dereference(None); // TODO: assert that first index is 0?
+ let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
self.context.new_array_access(None, array, indices[1]).get_address(None)
},
_ => unimplemented!(),
}
fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
- // FIXME: it would be better if the API only called this on struct, not on arrays.
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
assert_eq!(idx as usize as u64, idx);
let value = ptr.dereference(None).to_rvalue();
/* Casts */
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO: check that it indeed truncate the value.
- //println!("trunc: {:?} -> {:?}", value, dest_ty);
+ // TODO(antoyo): check that it indeed truncate the value.
self.context.new_cast(None, value, dest_ty)
}
fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO: check that it indeed sign extend the value.
- //println!("Sext {:?} to {:?}", value, dest_ty);
- //if let Some(vector_type) = value.get_type().is_vector() {
+ // TODO(antoyo): check that it indeed sign extend the value.
if dest_ty.is_vector().is_some() {
- // TODO: nothing to do as it is only for LLVM?
+ // TODO(antoyo): nothing to do as it is only for LLVM?
return value;
- /*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64);
- println!("Casting {:?} to {:?}", value, dest_type);
- return self.context.new_cast(None, value, dest_type);*/
}
self.context.new_cast(None, value, dest_ty)
}
fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //println!("7: fptoui: {:?} to {:?}", value, dest_ty);
- let ret = self.context.new_cast(None, value, dest_ty);
- //println!("8");
- ret
- //unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
+ self.context.new_cast(None, value, dest_ty)
}
fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
}
fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //println!("1: uitofp: {:?} -> {:?}", value, dest_ty);
- let ret = self.context.new_cast(None, value, dest_ty);
- //println!("2");
- ret
+ self.context.new_cast(None, value, dest_ty)
}
fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //println!("3: sitofp: {:?} -> {:?}", value, dest_ty);
- let ret = self.context.new_cast(None, value, dest_ty);
- //println!("4");
- ret
+ self.context.new_cast(None, value, dest_ty)
}
fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO: make sure it trancates.
+ // TODO(antoyo): make sure it truncates.
self.context.new_cast(None, value, dest_ty)
}
fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
// NOTE: is_signed is for value, not dest_typ.
- //println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ);
self.cx.context.new_cast(None, value, dest_typ)
}
fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty);
let val_type = value.get_type();
match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
(false, true) => {
},
(false, false) => {
// When they are not pointers, we want a transmute (or reinterpret_cast).
- //self.cx.context.new_cast(None, value, dest_ty)
self.bitcast(value, dest_ty)
},
(true, true) => self.cx.context.new_cast(None, value, dest_ty),
let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
let memcpy = self.context.get_builtin_function("memcpy");
let block = self.block.expect("block");
- // TODO: handle aligns and is_volatile.
+ // TODO(antoyo): handle aligns and is_volatile.
block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
}
let memmove = self.context.get_builtin_function("memmove");
let block = self.block.expect("block");
- // TODO: handle is_volatile.
+ // TODO(antoyo): handle is_volatile.
block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
}
let ptr = self.pointercast(ptr, self.type_i8p());
let memset = self.context.get_builtin_function("memset");
let block = self.block.expect("block");
- // TODO: handle aligns and is_volatile.
- //println!("memset: {:?} -> {:?}", fill_byte, self.i32_type);
+ // TODO(antoyo): handle align and is_volatile.
let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
let size = self.intcast(size, self.type_size_t(), false);
block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
#[allow(dead_code)]
fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
unimplemented!();
- //unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
}
fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- //unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
}
fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*unsafe {
- let elt_ty = self.cx.val_ty(elt);
- let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
- let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
- let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
- self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
- }*/
}
fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
- // FIXME: it would be better if the API only called this on struct, not on arrays.
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
assert_eq!(idx as usize as u64, idx);
let value_type = aggregate_value.get_type();
else {
panic!("Unexpected type {:?}", value_type);
}
- /*assert_eq!(idx as c_uint as u64, idx);
- unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/
}
fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
- // FIXME: it would be better if the API only called this on struct, not on arrays.
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
assert_eq!(idx as usize as u64, idx);
let value_type = aggregate_value.get_type();
let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
.to_rvalue()
- // TODO
- /*unsafe {
- llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
- }*/
+ // TODO(antoyo): Properly implement unwinding.
+ // the above is just to make the compilation work as it seems
+ // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
}
fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
- // TODO
- /*unsafe {
- llvm::LLVMSetCleanup(landing_pad, llvm::True);
- }*/
+ // TODO(antoyo)
}
fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- //unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
}
fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
unimplemented!();
- /*let name = const_cstr!("cleanuppad");
- let ret = unsafe {
- llvm::LLVMRustBuildCleanupPad(
- self.llbuilder,
- parent,
- args.len() as c_uint,
- args.as_ptr(),
- name.as_ptr(),
- )
- };
- Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/
}
fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
unimplemented!();
- /*let ret =
- unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
- ret.expect("LLVM does not have support for cleanupret")*/
}
fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
unimplemented!();
- /*let name = const_cstr!("catchpad");
- let ret = unsafe {
- llvm::LLVMRustBuildCatchPad(
- self.llbuilder,
- parent,
- args.len() as c_uint,
- args.as_ptr(),
- name.as_ptr(),
- )
- };
- Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/
}
fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
unimplemented!();
- /*let name = const_cstr!("catchswitch");
- let ret = unsafe {
- llvm::LLVMRustBuildCatchSwitch(
- self.llbuilder,
- parent,
- unwind,
- num_handlers as c_uint,
- name.as_ptr(),
- )
- };
- ret.expect("LLVM does not have support for catchswitch")*/
}
fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
unimplemented!();
- /*unsafe {
- llvm::LLVMRustAddHandler(catch_switch, handler);
- }*/
}
fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
- // TODO
- /*unsafe {
- llvm::LLVMSetPersonalityFn(self.llfn(), personality);
- }*/
+ // TODO(antoyo)
}
// Atomic Operations
let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
- let align = Align::from_bits(64).expect("align"); // TODO: use good align.
+ let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
let value_type = result.to_rvalue().get_type();
if let Some(struct_type) = value_type.is_struct() {
// expected so that we store expected after the call.
self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
}
- // TODO: handle when value is not a struct.
+ // TODO(antoyo): handle when value is not a struct.
result.to_rvalue()
}
let void_ptr_type = self.context.new_type::<*mut ()>();
let volatile_void_ptr_type = void_ptr_type.make_volatile();
let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
- // NOTE: not sure why, but we have the wrong type here.
+ // FIXME(antoyo): not sure why, but we have the wrong type here.
let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
let src = self.context.new_cast(None, src, new_src_type);
let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
fn set_invariant_load(&mut self, load: RValue<'gcc>) {
// NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
self.normal_function_addresses.borrow_mut().insert(load);
- // TODO
- /*unsafe {
- llvm::LLVMSetMetadata(
- load,
- llvm::MD_invariant_load as c_uint,
- llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
- );
- }*/
+ // TODO(antoyo)
}
fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
- // TODO
- //self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
+ // TODO(antoyo)
}
fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
- // TODO
- //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
+ // TODO(antoyo)
}
fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
- // FIXME: remove when having a proper API.
+ // FIXME(antoyo): remove when having a proper API.
let gcc_func = unsafe { std::mem::transmute(func) };
if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
self.function_call(func, args, funclet)
}
fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
- // FIXME: this does not zero-extend.
+ // FIXME(antoyo): this does not zero-extend.
if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
- // FIXME: hack because base::from_immediate converts i1 to i8.
+ // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
// Fix the code in codegen_ssa::base::from_immediate.
return value;
}
- //println!("zext: {:?} -> {:?}", value, dest_typ);
self.context.new_cast(None, value, dest_typ)
}
fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
unimplemented!();
- //llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
}
fn set_span(&mut self, _span: Span) {}
fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
unimplemented!();
- /*debug!(
- "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
- fn_name, hash, num_counters, index
- );
-
- let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
- let args = &[fn_name, hash, num_counters, index];
- let args = self.check_call("call", llfn, args);
-
- unsafe {
- let _ = llvm::LLVMRustBuildCall(
- self.llbuilder,
- llfn,
- args.as_ptr() as *const &llvm::Value,
- args.len() as c_uint,
- None,
- );
- }*/
}
}
impl ToGccComp for RealPredicate {
fn to_gcc_comparison(&self) -> ComparisonOp {
- // TODO: check that ordered vs non-ordered is respected.
+ // TODO(antoyo): check that ordered vs non-ordered is respected.
match *self {
RealPredicate::RealPredicateFalse => unreachable!(),
RealPredicate::RealOEQ => ComparisonOp::Equals,
let ordering =
match self {
- AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same.
+ AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Unordered => __ATOMIC_RELAXED,
- AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same.
+ AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
AtomicOrdering::Release => __ATOMIC_RELEASE,
AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> {
let tcx = cx.tcx();
- //debug!("get_fn(instance={:?})", instance);
-
assert!(!instance.substs.needs_infer());
assert!(!instance.substs.has_escaping_bound_vars());
assert!(!instance.substs.has_param_types_or_consts());
}
let sym = tcx.symbol_name(instance).name;
- //debug!("get_fn({:?}: {:?}) => {}", instance, instance.monomorphic_ty(cx.tcx()), sym);
let fn_abi = FnAbi::of_instance(cx, instance, &[]);
- // TODO
let func =
if let Some(func) = cx.get_declared_value(&sym) {
// Create a fn pointer with the new signature.
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if cx.val_ty(func) != ptrty {
- //debug!("get_fn: casting {:?} to {:?}", func, ptrty);
- // TODO
- //cx.const_ptrcast(func, ptrty)
+ // TODO(antoyo): cast the pointer.
func
}
else {
- //debug!("get_fn: not casting pointer!");
func
}
}
else {
cx.linkage.set(FunctionType::Extern);
let func = cx.declare_fn(&sym, &fn_abi);
- //cx.linkage.set(FunctionType::Internal);
- //debug!("get_fn: not casting pointer!");
-
- // TODO
- //attributes::from_fn_attrs(cx, func, instance);
-
- //let instance_def_id = instance.def_id();
-
- // TODO
- /*if cx.use_dll_storage_attrs && tcx.is_dllimport_foreign_item(instance_def_id) {
- unsafe {
- llvm::LLVMSetDLLStorageClass(func, llvm::DLLStorageClass::DllImport);
- }
- }*/
+ // TODO(antoyo): set linkage and attributes.
func
};
}
fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> {
- // TODO: handle null_terminated.
+ // TODO(antoyo): handle null_terminated.
if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) {
return value.to_rvalue();
}
}
fn global_string(&self, string: &str) -> RValue<'gcc> {
- // TODO: handle non-null-terminated strings.
+ // TODO(antoyo): handle non-null-terminated strings.
let string = self.context.new_string_literal(&*string);
let sym = self.generate_local_symbol_name("str");
// NOTE: TLS is always off for a string litteral.
.unwrap_or_else(|| bug!("symbol `{}` is already defined", sym));
self.global_init_block.add_assignment(None, global.dereference(None), string);
global.to_rvalue()
- //llvm::LLVMRustSetLinkage(global, llvm::Linkage::InternalLinkage);
+ // TODO(antoyo): set linkage.
}
pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
}
pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO: when libgccjit allow casting from pointer to int, remove this.
+ // TODO(antoyo): when libgccjit allow casting from pointer to int, remove this.
let func = block.get_function();
let local = func.new_local(None, value.get_type(), "ptrLocal");
block.add_assignment(None, local, value);
let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer());
ptr.dereference(None).to_rvalue()
}
-
- /*pub fn const_vector(&self, elements: &[RValue<'gcc>]) -> RValue<'gcc> {
- self.context.new_rvalue_from_vector(None, elements[0].get_type(), elements)
- }*/
}
pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
let num64: Result<i64, _> = num.try_into();
if let Ok(num) = num64 {
- // FIXME: workaround for a bug where libgccjit is expecting a constant.
+ // FIXME(antoyo): workaround for a bug where libgccjit is expecting a constant.
// The operations >> 64 and | low are making the normal case a non-constant.
return self.context.new_rvalue_from_long(typ, num as i64);
}
if num >> 64 != 0 {
- // FIXME: use a new function new_rvalue_from_unsigned_long()?
+ // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
fn const_u8(&self, _i: u8) -> RValue<'gcc> {
unimplemented!();
- //self.const_uint(self.type_i8(), i as u64)
}
fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> {
unimplemented!();
- //unsafe { llvm::LLVMConstReal(t, val) }
}
fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) {
let fields: Vec<_> = values.iter()
.map(|value| value.get_type())
.collect();
- // TODO: cache the type? It's anonymous, so probably not.
+ // TODO(antoyo): cache the type? It's anonymous, so probably not.
let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
let typ = self.type_struct(&fields, packed);
let structure = self.global_init_func.new_local(None, typ, &name);
}
fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
- // TODO
+ // TODO(antoyo)
None
- //try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
}
fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
- // TODO
+ // TODO(antoyo)
None
- /*try_as_const_integral(v).and_then(|v| unsafe {
- let (mut lo, mut hi) = (0u64, 0u64);
- let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
- success.then_some(hi_lo_to_u128(lo, hi))
- })*/
}
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self));
- // FIXME: there's some issues with using the u128 code that follows, so hard-code
+ // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
// the paths for floating-point values.
if ty == self.float_type {
return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
_ => self.static_addr_of(init, alloc.align, None),
};
if !self.sess().fewer_names() {
- // TODO
- //llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
+ // TODO(antoyo): set value name.
}
value
},
impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
if let Some(global_value) = self.const_globals.borrow().get(&cv) {
- // TODO
- /*unsafe {
- // Upgrade the alignment in cases where the same constant is used with different
- // alignment requirements
- let llalign = align.bytes() as u32;
- if llalign > llvm::LLVMGetAlignment(gv) {
- llvm::LLVMSetAlignment(gv, llalign);
- }
- }*/
+ // TODO(antoyo): upgrade alignment.
return *global_value;
}
let global_value = self.static_addr_of_mut(cv, align, kind);
- // TODO
- /*unsafe {
- llvm::LLVMSetGlobalConstant(global_value, True);
- }*/
+ // TODO(antoyo): set global constant.
self.const_globals.borrow_mut().insert(cv, global_value);
global_value
}
let val_llty = self.val_ty(value);
let value =
if val_llty == self.type_i1() {
- //val_llty = self.type_i8();
unimplemented!();
- //llvm::LLVMConstZExt(value, val_llty)
}
else {
value
else {
// If we created the global with the wrong type,
// correct the type.
- /*let name = llvm::get_value_name(global).to_vec();
- llvm::set_value_name(global, b"");
-
- let linkage = llvm::LLVMRustGetLinkage(global);
- let visibility = llvm::LLVMRustGetVisibility(global);*/
+ // TODO(antoyo): set value name, linkage and visibility.
let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section);
- /*llvm::LLVMRustSetLinkage(new_global, linkage);
- llvm::LLVMRustSetVisibility(new_global, visibility);*/
-
// To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it
// with the new global later. (See base::codegen_backend.)
//self.statics_to_rauw.borrow_mut().push((global, new_global));
new_global
};
- // TODO
- //set_global_alignment(&self, global, self.align_of(ty));
- //llvm::LLVMSetInitializer(global, value);
+ // TODO(antoyo): set alignment and initializer.
let value = self.rvalue_as_lvalue(value);
let value = value.get_address(None);
let dest_typ = global.get_type();
// NOTE: do not init the variables related to argc/argv because it seems we cannot
// overwrite those variables.
- // FIXME: correctly support global variable initialization.
+ // FIXME(antoyo): correctly support global variable initialization.
let skip_init = [
ARGV_INIT_ARRAY,
ARGC,
ARGV,
];
if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) {
- // TODO: switch to set_initializer when libgccjit supports that.
+ // TODO(antoyo): switch to set_initializer when libgccjit supports that.
let memcpy = self.context.get_builtin_function("memcpy");
let dst = self.context.new_cast(None, global, self.type_i8p());
let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void()));
// mutability are placed into read-only memory.
if !is_mutable {
if self.type_is_freeze(ty) {
- // TODO
- //llvm::LLVMSetGlobalConstant(global, llvm::True);
+ // TODO(antoyo): set global constant.
}
}
- //debuginfo::create_global_var_metadata(&self, def_id, global);
-
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
// Do not allow LLVM to change the alignment of a TLS on macOS.
//
// happens to be zero. Instead, we should only check the value of defined bytes
// and set all undefined bytes to zero if this allocation is headed for the
// BSS.
- /*let all_bytes_are_zero = alloc.relocations().is_empty()
- && alloc
- .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
- .iter()
- .all(|&byte| byte == 0);
-
- let sect_name = if all_bytes_are_zero {
- CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
- } else {
- CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
- };*/
unimplemented!();
- //llvm::LLVMSetSection(global, sect_name.as_ptr());
}
}
if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
if let Some(_section) = attrs.link_section {
unimplemented!();
- /*let section = llvm::LLVMMDStringInContext(
- self.llcx,
- section.as_str().as_ptr().cast(),
- section.as_str().len() as c_uint,
- );
- assert!(alloc.relocations().is_empty());
-
- // The `inspect` method is okay here because we checked relocations, and
- // because we are doing this access to inspect the final interpreter state (not
- // as part of the interpreter execution).
- let bytes =
- alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
- let alloc = llvm::LLVMMDStringInContext(
- self.llcx,
- bytes.as_ptr().cast(),
- bytes.len() as c_uint,
- );
- let data = [section, alloc];
- let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
- llvm::LLVMAddNamedMetadataOperand(
- self.llmod,
- "wasm.custom_sections\0".as_ptr().cast(),
- meta,
- );*/
}
} else {
- // TODO
- //base::set_link_section(global, &attrs);
+ // TODO(antoyo): set link section.
}
if attrs.flags.contains(CodegenFnAttrFlags::USED) {
/// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
fn add_used_global(&self, _global: RValue<'gcc>) {
- // TODO
- //let cast = self.context.new_cast(None, global, self.type_i8p());
- //self.used_statics.borrow_mut().push(cast);
+ // TODO(antoyo)
}
}
match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
- // TODO: check if it's okay that TLS is off here.
- // TODO: check if it's okay that link_section is None here.
- // TODO: set alignment here as well.
+ // TODO(antoyo): check if it's okay that TLS is off here.
+ // TODO(antoyo): check if it's okay that link_section is None here.
+ // TODO(antoyo): set alignment here as well.
let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| {
bug!("symbol `{}` is already defined", name);
});
- //llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
+ // TODO(antoyo): set linkage.
(name, gv)
}
_ => {
(name, global)
},
};
- // FIXME: I think the name coming from generate_local_symbol_name() above cannot be used
+ // FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used
// globally.
// NOTE: global seems to only be global in a module. So save the name instead of the value
// to import it later.
self.global_names.borrow_mut().insert(cv, name);
self.global_init_block.add_assignment(None, gv.dereference(None), cv);
- //llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
+ // TODO(antoyo): set unnamed address.
gv
}
let instance = Instance::mono(self.tcx, def_id);
let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
if let Some(&global) = self.instances.borrow().get(&instance) {
- /*let attrs = self.tcx.codegen_fn_attrs(def_id);
- let name = &*self.tcx.symbol_name(instance).name;
- let name =
- if let Some(linkage) = attrs.linkage {
- // This is to match what happens in check_and_apply_linkage.
- Cow::from(format!("_rust_extern_with_linkage_{}", name))
- }
- else {
- Cow::from(name)
- };
- let global = self.context.new_global(None, GlobalKind::Imported, global.get_type(), &name)
- .get_address(None);
- self.global_names.borrow_mut().insert(global, name.to_string());*/
return global;
}
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
let sym = self.tcx.symbol_name(instance).name;
- //debug!("get_static: sym={} instance={:?}", sym, instance);
-
let global =
if let Some(def_id) = def_id.as_local() {
let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section);
if !self.tcx.is_reachable_non_generic(def_id) {
- /*unsafe {
- llvm::LLVMRustSetVisibility(global, llvm::Visibility::Hidden);
- }*/
+ // TODO(antoyo): set visibility.
}
global
item => bug!("get_static: expected static, found {:?}", item),
};
- //debug!("get_static: sym={} attrs={:?}", sym, attrs);
-
global
}
else {
let span = self.tcx.def_span(def_id);
let global = check_and_apply_linkage(&self, &attrs, ty, sym, span);
- let needs_dll_storage_attr = false; /*self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
- // ThinLTO can't handle this workaround in all cases, so we don't
- // emit the attrs. Instead we make them unnecessary by disallowing
- // dynamic linking when linker plugin based LTO is enabled.
- !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();*/
+ let needs_dll_storage_attr = false; // TODO(antoyo)
// If this assertion triggers, there's something wrong with commandline
// argument validation.
// is_codegened_item query.
if !self.tcx.is_codegened_item(def_id) {
unimplemented!();
- /*unsafe {
- llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
- }*/
}
}
global
};
- /*if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
- // For foreign (native) libs we know the exact storage type to use.
- unsafe {
- llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
- }
- }*/
+ // TODO(antoyo): set dll storage class.
self.instances.borrow_mut().insert(instance, global);
global
let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
let llty = cx.layout_of(ty).gcc_type(cx, true);
if let Some(linkage) = attrs.linkage {
- //debug!("get_static: sym={} linkage={:?}", sym, linkage);
-
// If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a
cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| {
cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
});
- //llvm::LLVMRustSetLinkage(global2, llvm::Linkage::InternalLinkage);
+ // TODO(antoyo): set linkage.
let lvalue = global2.dereference(None);
cx.global_init_block.add_assignment(None, lvalue, global1);
- //llvm::LLVMSetInitializer(global2, global1);
+ // TODO(antoyo): use global_set_initializer() when it will work.
global2
}
else {
pub codegen_unit: &'tcx CodegenUnit<'tcx>,
pub context: &'gcc Context<'gcc>,
- // TODO: First set it to a dummy block to avoid using Option?
+ // TODO(antoyo): First set it to a dummy block to avoid using Option?
pub current_block: RefCell<Option<Block<'gcc>>>,
pub current_func: RefCell<Option<Function<'gcc>>>,
pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
/// Cache of globals.
pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
- // TODO: remove global_names.
+ // TODO(antoyo): remove global_names.
pub global_names: RefCell<FxHashMap<RValue<'gcc>, String>>,
/// A counter that is used for generating local symbol names
/// `const_undef()` returns struct as pointer so that they can later be assigned a value.
/// As such, this set remembers which of these pointers were returned by this function so that
/// they can be derefered later.
- /// FIXME: fix the rustc API to avoid having this hack.
+ /// FIXME(antoyo): fix the rustc API to avoid having this hack.
pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
/// Store the pointer of different types for safety.
/// When casting the values back to their original types, check that they are indeed that type
/// with these sets.
- /// FIXME: remove when the API supports more types.
+ /// FIXME(antoyo): remove when the API supports more types.
#[cfg(debug_assertions)]
lvalues: RefCell<FxHashSet<LValue<'gcc>>>,
}
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
let check_overflow = tcx.sess.overflow_checks();
- // TODO: fix this mess. libgccjit seems to return random type when using new_int_type().
- //let isize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, true);
+ // TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type().
let isize_type = context.new_c_type(CType::LongLong);
- //let usize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, false);
let usize_type = context.new_c_type(CType::ULongLong);
let bool_type = context.new_type::<bool>();
let i8_type = context.new_type::<i8>();
let i16_type = context.new_type::<i16>();
let i32_type = context.new_type::<i32>();
let i64_type = context.new_c_type(CType::LongLong);
- let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO: should this be hard-coded?
+ let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
let u8_type = context.new_type::<u8>();
let u16_type = context.new_type::<u16>();
let u32_type = context.new_type::<u32>();
let u64_type = context.new_c_type(CType::ULongLong);
- let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO: should this be hard-coded?
+ let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> {
let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) };
- //debug_assert!(self.lvalues.borrow().contains(&lvalue), "{:?} is not an lvalue", value);
lvalue
}
type BasicBlock = Block<'gcc>;
type Type = Type<'gcc>;
- type Funclet = (); // TODO
+ type Funclet = (); // TODO(antoyo)
- type DIScope = (); // TODO
- type DILocation = (); // TODO
- type DIVariable = (); // TODO
+ type DIScope = (); // TODO(antoyo)
+ type DILocation = (); // TODO(antoyo)
+ type DIVariable = (); // TODO(antoyo)
}
impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
- //let symbol = self.tcx.symbol_name(instance).name;
-
let func = get_fn(self, instance);
let func = self.rvalue_as_function(func);
let ptr = func.get_address(None);
- // TODO: don't do this twice: i.e. in declare_fn and here.
- //let fn_abi = FnAbi::of_instance(self, instance, &[]);
- //let (return_type, params, _) = fn_abi.gcc_type(self);
- // FIXME: the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
- //let pointer_type = ptr.get_type();
+ // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
+ // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
self.normal_function_addresses.borrow_mut().insert(ptr);
"rust_eh_personality"
};
//let func = self.declare_func(name, self.type_i32(), &[], true);
- // FIXME: this hack should not be needed. That will probably be removed when
+ // FIXME(antoyo): this hack should not be needed. That will probably be removed when
// unwinding support is added.
self.context.new_rvalue_from_int(self.int_type, 0)
}
};
- //attributes::apply_target_cpu_attr(self, llfn);
+ // TODO(antoyo): apply target cpu attributes.
self.eh_personality.set(Some(llfn));
llfn
}
fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
unimplemented!();
- //&self.used_statics
}
fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
- // TODO
- //attributes::set_frame_pointer_type(self, llfn)
+ // TODO(antoyo)
}
fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
- // TODO
- //attributes::apply_target_cpu_attr(self, llfn)
+ // TODO(antoyo)
}
fn create_used_variable(&self) {
unimplemented!();
- /*let name = const_cstr!("llvm.used");
- let section = const_cstr!("llvm.metadata");
- let array =
- self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
-
- unsafe {
- let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
- llvm::LLVMSetInitializer(g, array);
- llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
- llvm::LLVMSetSection(g, section.as_ptr());
- }*/
}
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
_function_source_hash: u64,
) -> bool {
unimplemented!();
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "ensuring function source hash is set for instance={:?}; function_source_hash={}",
- instance, function_source_hash,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .set_function_source_hash(function_source_hash);
- true
- } else {
- false
- }*/
}
fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool {
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
- at {:?}",
- instance, function_source_hash, id, region,
- );
- let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter(function_source_hash, id, region);
- true
- } else {
- false
- }*/
- // TODO
+ // TODO(antoyo)
false
}
fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool {
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
- at {:?}",
- instance, id, lhs, op, rhs, region,
- );
- let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter_expression(id, lhs, op, rhs, region);
- true
- } else {
- false
- }*/
- // TODO
+ // TODO(antoyo)
false
}
fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool {
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding unreachable code to coverage_regions: instance={:?}, at {:?}",
- instance, region,
- );
- let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_unreachable_region(region);
- true
- } else {
- false
- }*/
- // TODO
+ // TODO(antoyo)
false
}
}
impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn coverageinfo_finalize(&self) {
- // TODO
- //mapgen::finalize(self)
+ // TODO(antoyo)
}
fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> {
unimplemented!();
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!("getting pgo_func_name_var for instance={:?}", instance);
- let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
- pgo_func_name_var_map
- .entry(instance)
- .or_insert_with(|| create_pgo_func_name_var(self, instance))
- } else {
- bug!("Could not get the `coverage_context`");
- }*/
}
/// Functions with MIR-based coverage are normally codegenned _only_ if
/// added as `unreachable_region`s.
fn define_unused_fn(&self, _def_id: DefId) {
unimplemented!();
- /*let instance = declare_unused_fn(self, &def_id);
- codegen_unused_fn_and_counter(self, instance);
- add_unused_function_coverage(self, instance, def_id);*/
}
}
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) {
unimplemented!();
- /*let cx = self.cx();
-
- // Convert the direct and indirect offsets to address ops.
- // FIXME(eddyb) use `const`s instead of getting the values via FFI,
- // the values should match the ones in the DWARF standard anyway.
- let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
- let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
- let mut addr_ops = SmallVec::<[_; 8]>::new();
-
- if direct_offset.bytes() > 0 {
- addr_ops.push(op_plus_uconst());
- addr_ops.push(direct_offset.bytes() as i64);
- }
- for &offset in indirect_offsets {
- addr_ops.push(op_deref());
- if offset.bytes() > 0 {
- addr_ops.push(op_plus_uconst());
- addr_ops.push(offset.bytes() as i64);
- }
- }
-
- // FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
- // to avoid having to pass it down in both places?
- // NB: `var` doesn't seem to know about the column, so that's a limitation.
- let dbg_loc = cx.create_debug_loc(scope_metadata, span);
- unsafe {
- // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
- llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
- DIB(cx),
- variable_alloca,
- dbg_var,
- addr_ops.as_ptr(),
- addr_ops.len() as c_uint,
- dbg_loc,
- self.llbb(),
- );
- }*/
}
- /*fn set_source_location(&mut self, scope: Self::DIScope, span: Span) {
- unimplemented!();
- /*debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
-
- let dbg_loc = self.cx().create_debug_loc(scope, span);
-
- unsafe {
- llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
- }*/
- }*/
-
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
- // TODO: replace with gcc_jit_context_new_global_with_initializer() if it's added:
+ // TODO(antoyo): replace with gcc_jit_context_new_global_with_initializer() if it's added:
// https://gcc.gnu.org/pipermail/jit/2020q3/001225.html
//
// Call the function to initialize global values here.
use std::iter;
for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) {
- // FIXME: better way to find if a crate is of proc-macro type?
+ // FIXME(antoyo): better way to find if a crate is of proc-macro type?
if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly {
// NOTE: proc-macro crates are not included in the executable, so don't call their
// initialization routine.
}
}
- // TODO
- //gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
+ // TODO(antoyo): insert reference to gdb debug scripts section global.
}
fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
unimplemented!();
- // Avoid wasting time if LLVM value names aren't even enabled.
- /*if self.sess().fewer_names() {
- return;
- }
-
- // Only function parameters and instructions are local to a function,
- // don't change the name of anything else (e.g. globals).
- let param_or_inst = unsafe {
- llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
- };
- if !param_or_inst {
- return;
- }
-
- // Avoid replacing the name if it already exists.
- // While we could combine the names somehow, it'd
- // get noisy quick, and the usefulness is dubious.
- if llvm::get_value_name(value).is_empty() {
- llvm::set_value_name(value, name.as_bytes());
- }*/
}
fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
unimplemented!();
- /*unsafe {
- let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
- llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
- }*/
}
}
impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) {
- //metadata::create_vtable_metadata(self, ty, vtable)
+ // TODO(antoyo)
}
fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
- // TODO
+ // TODO(antoyo)
None
}
}
fn debuginfo_finalize(&self) {
- //unimplemented!();
+ // TODO(antoyo)
}
fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable {
fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope {
unimplemented!();
- /*let def_id = instance.def_id();
- let containing_scope = get_containing_scope(self, instance);
- let span = self.tcx.def_span(def_id);
- let loc = self.lookup_debug_loc(span.lo());
- let file_metadata = file_metadata(self, &loc.file);
-
- let function_type_metadata = unsafe {
- let fn_signature = get_function_signature(self, fn_abi);
- llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
- };
-
- // Find the enclosing function, in case this is a closure.
- let def_key = self.tcx().def_key(def_id);
- let mut name = def_key.disambiguated_data.data.to_string();
-
- let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id);
-
- // Get_template_parameters() will append a `<...>` clause to the function
- // name if necessary.
- let generics = self.tcx().generics_of(enclosing_fn_def_id);
- let substs = instance.substs.truncate_to(self.tcx(), generics);
- let template_parameters = get_template_parameters(self, &generics, substs, &mut name);
-
- let linkage_name = &mangled_name_of_instance(self, instance).name;
- // Omit the linkage_name if it is the same as subprogram name.
- let linkage_name = if &name == linkage_name { "" } else { linkage_name };
-
- // FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
- let scope_line = loc.line;
-
- let mut flags = DIFlags::FlagPrototyped;
-
- if fn_abi.ret.layout.abi.is_uninhabited() {
- flags |= DIFlags::FlagNoReturn;
- }
-
- let mut spflags = DISPFlags::SPFlagDefinition;
- if is_node_local_to_unit(self, def_id) {
- spflags |= DISPFlags::SPFlagLocalToUnit;
- }
- if self.sess().opts.optimize != config::OptLevel::No {
- spflags |= DISPFlags::SPFlagOptimized;
- }
- if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) {
- if id.to_def_id() == def_id {
- spflags |= DISPFlags::SPFlagMainSubprogram;
- }
- }
-
- unsafe {
- return llvm::LLVMRustDIBuilderCreateFunction(
- DIB(self),
- containing_scope,
- name.as_ptr().cast(),
- name.len(),
- linkage_name.as_ptr().cast(),
- linkage_name.len(),
- file_metadata,
- loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
- function_type_metadata,
- scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
- flags,
- spflags,
- maybe_definition_llfn,
- template_parameters,
- None,
- );
- }
-
- fn get_function_signature<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
- ) -> &'ll DIArray {
- if cx.sess().opts.debuginfo == DebugInfo::Limited {
- return create_DIArray(DIB(cx), &[]);
- }
-
- let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
-
- // Return type -- llvm::DIBuilder wants this at index 0
- signature.push(if fn_abi.ret.is_ignore() {
- None
- } else {
- Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP))
- });
-
- // Arguments types
- if cx.sess().target.options.is_like_msvc {
- // FIXME(#42800):
- // There is a bug in MSDIA that leads to a crash when it encounters
- // a fixed-size array of `u8` or something zero-sized in a
- // function-type (see #40477).
- // As a workaround, we replace those fixed-size arrays with a
- // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
- // appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
- // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
- // This transformed type is wrong, but these function types are
- // already inaccurate due to ABI adjustments (see #42800).
- signature.extend(fn_abi.args.iter().map(|arg| {
- let t = arg.layout.ty;
- let t = match t.kind() {
- ty::Array(ct, _)
- if (*ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() =>
- {
- cx.tcx.mk_imm_ptr(ct)
- }
- _ => t,
- };
- Some(type_metadata(cx, t, rustc_span::DUMMY_SP))
- }));
- } else {
- signature.extend(
- fn_abi
- .args
- .iter()
- .map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))),
- );
- }
-
- create_DIArray(DIB(cx), &signature[..])
- }
-
- fn get_template_parameters<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- generics: &ty::Generics,
- substs: SubstsRef<'tcx>,
- name_to_append_suffix_to: &mut String,
- ) -> &'ll DIArray {
- if substs.types().next().is_none() {
- return create_DIArray(DIB(cx), &[]);
- }
-
- name_to_append_suffix_to.push('<');
- for (i, actual_type) in substs.types().enumerate() {
- if i != 0 {
- name_to_append_suffix_to.push(',');
- }
-
- let actual_type =
- cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
- // Add actual type name to <...> clause of function name
- let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true);
- name_to_append_suffix_to.push_str(&actual_type_name[..]);
- }
- name_to_append_suffix_to.push('>');
-
- // Again, only create type information if full debuginfo is enabled
- let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
- let names = get_parameter_names(cx, generics);
- substs
- .iter()
- .zip(names)
- .filter_map(|(kind, name)| {
- if let GenericArgKind::Type(ty) = kind.unpack() {
- let actual_type =
- cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
- let actual_type_metadata =
- type_metadata(cx, actual_type, rustc_span::DUMMY_SP);
- let name = name.as_str();
- Some(unsafe {
- Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
- DIB(cx),
- None,
- name.as_ptr().cast(),
- name.len(),
- actual_type_metadata,
- ))
- })
- } else {
- None
- }
- })
- .collect()
- } else {
- vec![]
- };
-
- create_DIArray(DIB(cx), &template_params[..])
- }
-
- fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
- let mut names = generics
- .parent
- .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
- names.extend(generics.params.iter().map(|param| param.name));
- names
- }
-
- fn get_containing_scope<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- instance: Instance<'tcx>,
- ) -> &'ll DIScope {
- // First, let's see if this is a method within an inherent impl. Because
- // if yes, we want to make the result subroutine DIE a child of the
- // subroutine's self-type.
- let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
- // If the method does *not* belong to a trait, proceed
- if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
- let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
- instance.substs,
- ty::ParamEnv::reveal_all(),
- &cx.tcx.type_of(impl_def_id),
- );
-
- // Only "class" methods are generally understood by LLVM,
- // so avoid methods on other types (e.g., `<*mut T>::null`).
- match impl_self_ty.kind() {
- ty::Adt(def, ..) if !def.is_box() => {
- // Again, only create type information if full debuginfo is enabled
- if cx.sess().opts.debuginfo == DebugInfo::Full
- && !impl_self_ty.needs_subst()
- {
- Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP))
- } else {
- Some(namespace::item_namespace(cx, def.did))
- }
- }
- _ => None,
- }
- } else {
- // For trait method impls we still use the "parallel namespace"
- // strategy
- None
- }
- });
-
- self_type.unwrap_or_else(|| {
- namespace::item_namespace(
- cx,
- DefId {
- krate: instance.def_id().krate,
- index: cx
- .tcx
- .def_key(instance.def_id())
- .parent
- .expect("get_containing_scope: missing parent?"),
- },
- )
- })
- }*/
}
fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation {
unimplemented!();
- /*let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
-
- unsafe {
- llvm::LLVMRustDIBuilderCreateDebugLocation(
- utils::debug_context(self).llcontext,
- line.unwrap_or(UNKNOWN_LINE_NUMBER),
- col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
- scope,
- inlined_at,
- )
- }*/
}
}
}
pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> {
- //debug!("declare_global_with_linkage(name={:?})", name);
let global = self.context.new_global(None, linkage, ty, name)
.get_address(None);
self.globals.borrow_mut().insert(name.to_string(), global);
pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> {
self.linkage.set(FunctionType::Exported);
let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic);
- // FIXME: this is a wrong cast. That requires changing the compiler API.
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
unsafe { std::mem::transmute(func) }
}
pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> RValue<'gcc> {
- //debug!("declare_global(name={:?})", name);
- // FIXME: correctly support global variable initialization.
+ // FIXME(antoyo): correctly support global variable initialization.
if name.starts_with(ARGV_INIT_ARRAY) {
// NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the
// name of the variable now to actually declare it later.
}
pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> {
- // TODO: use the fn_type parameter.
+ // TODO(antoyo): use the fn_type parameter.
let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
let return_type = self.type_i32();
let variadic = false;
// NOTE: it is needed to set the current_func here as well, because get_fn() is not called
// for the main function.
*self.current_func.borrow_mut() = Some(func);
- // FIXME: this is a wrong cast. That requires changing the compiler API.
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
unsafe { std::mem::transmute(func) }
}
self.global_names.borrow_mut().insert(global, global_name.to_string());
self.argv_initialized.set(true);
}
- //debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
let (return_type, params, variadic) = fn_abi.gcc_type(self);
let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic);
- //fn_abi.apply_attrs_llfn(self, func);
- // FIXME: this is a wrong cast. That requires changing the compiler API.
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
unsafe { std::mem::transmute(func) }
}
}
pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
- //debug!("get_declared_value(name={:?})", name);
- // TODO: use a different field than globals, because this seems to return a function?
+ // TODO(antoyo): use a different field than globals, because this seems to return a function?
self.globals.borrow().get(name).cloned()
}
-
- /*fn get_defined_value(&self, name: &str) -> Option<RValue<'gcc>> {
- // TODO: gcc does not allow global initialization.
- None
- /*self.get_declared_value(name).and_then(|val| {
- let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
- if !declaration { Some(val) } else { None }
- })*/
- }*/
}
/// Declare a function.
/// If there’s a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
- //debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
- /*let llfn = unsafe {
- llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
- };*/
-
if name.starts_with("llvm.") {
return llvm::intrinsic(name, cx);
}
}
else {
let params: Vec<_> = param_types.into_iter().enumerate()
- .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO: set name.
+ .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
.collect();
let func = cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, mangle_name(name), variadic);
cx.functions.borrow_mut().insert(name.to_string(), func);
func
};
- //llvm::SetFunctionCallConv(llfn, callconv); // TODO
- // Function addresses in Rust are never significant, allowing functions to
- // be merged.
- //llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global); // TODO
-
- /*if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
- llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
- }*/
-
- //attributes::default_optimisation_attrs(cx.tcx.sess, llfn);
- //attributes::non_lazy_bind(cx.sess(), llfn);
+ // TODO(antoyo): set function calling convention.
+ // TODO(antoyo): set unnamed address.
+ // TODO(antoyo): set no red zone function attribute.
+ // TODO(antoyo): set attributes for optimisation.
+ // TODO(antoyo): set attributes for non lazy bind.
- // FIXME: invalid cast.
- // TODO: is this line useful?
- //cx.globals.borrow_mut().insert(name.to_string(), unsafe { std::mem::transmute(func) });
+ // FIXME(antoyo): invalid cast.
func
}
-// FIXME: this is a hack because libgccjit currently only supports alpha, num and _.
+// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
// Unsupported characters: `$` and `.`.
pub fn mangle_name(name: &str) -> String {
name.replace(|char: char| {
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func;
},
- // TODO: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
+ // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
"llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
"llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
"llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
_ => unimplemented!("unsupported LLVM intrinsic {}", name)
};
- println!("Get target builtin");
unimplemented!();
- /*let func = cx.context.get_target_builtin_function(gcc_name);
- cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
- func*/
}
let llval =
match name {
_ if simple.is_some() => {
- // FIXME: remove this cast when the API supports function.
+ // FIXME(antoyo): remove this cast when the API supports function.
let func = unsafe { std::mem::transmute(simple.expect("simple")) };
self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
},
}
sym::breakpoint => {
unimplemented!();
- /*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
- self.call(llfn, &[], None)*/
}
sym::va_copy => {
unimplemented!();
- /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
- self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
}
sym::va_arg => {
unimplemented!();
- /*match fn_abi.ret.layout.abi {
- abi::Abi::Scalar(ref scalar) => {
- match scalar.value {
- Primitive::Int(..) => {
- if self.cx().size_of(ret_ty).bytes() < 4 {
- // `va_arg` should not be called on a integer type
- // less than 4 bytes in length. If it is, promote
- // the integer to a `i32` and truncate the result
- // back to the smaller type.
- let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
- self.trunc(promoted_result, llret_ty)
- } else {
- emit_va_arg(self, args[0], ret_ty)
- }
- }
- Primitive::F64 | Primitive::Pointer => {
- emit_va_arg(self, args[0], ret_ty)
- }
- // `va_arg` should never be used with the return type f32.
- Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
- }
- }
- _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
- }*/
}
sym::volatile_load | sym::unaligned_volatile_load => {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
}
let load = self.volatile_load(ptr.get_type(), ptr);
- // TODO
- /*let align = if name == sym::unaligned_volatile_load {
- 1
- } else {
- self.align_of(tp_ty).bytes() as u32
- };
- unsafe {
- llvm::LLVMSetAlignment(load, align);
- }*/
+ // TODO(antoyo): set alignment.
self.to_immediate(load, self.layout_of(tp_ty))
}
sym::volatile_store => {
| sym::prefetch_read_instruction
| sym::prefetch_write_instruction => {
unimplemented!();
- /*let expect = self.get_intrinsic(&("llvm.prefetch"));
- let (rw, cache_type) = match name {
- sym::prefetch_read_data => (0, 1),
- sym::prefetch_write_data => (1, 1),
- sym::prefetch_read_instruction => (0, 0),
- sym::prefetch_write_instruction => (1, 0),
- _ => bug!(),
- };
- self.call(
- expect,
- &[
- args[0].immediate(),
- self.const_i32(rw),
- args[1].immediate(),
- self.const_i32(cache_type),
- ],
- None,
- )*/
}
sym::ctlz
| sym::ctlz_nonzero
self.block = Some(after_block);
result.to_rvalue()
-
- /*let y = self.const_bool(false);
- let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
- self.call(llfn, &[args[0].immediate(), y], None)*/
}
sym::ctlz_nonzero => {
self.count_leading_zeroes(width, args[0].immediate())
args[0].immediate() // byte swap a u8/i8 is just a no-op
}
else {
- // TODO: check if it's faster to use string literals and a
+ // TODO(antoyo): check if it's faster to use string literals and a
// match instead of format!.
let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
let mut arg = args[0].immediate();
- // FIXME: this cast should not be necessary. Remove
+ // FIXME(antoyo): this cast should not be necessary. Remove
// when having proper sized integer types.
let param_type = bswap.get_param(0).to_rvalue().get_type();
if param_type != arg.get_type() {
},
sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
sym::rotate_left | sym::rotate_right => {
- // TODO: implement using algorithm from:
+ // TODO(antoyo): implement using algorithm from:
// https://blog.regehr.org/archives/1063
// for other platforms.
let is_left = name == sym::rotate_left;
self.const_bool(true)
}
/*else if use_integer_compare {
- let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+ let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
let ptr_ty = self.type_ptr_to(integer_ty);
let a_ptr = self.bitcast(a, ptr_ty);
let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
}
fn assume(&mut self, value: Self::Value) {
- // TODO: switch to asumme when it exists.
+ // TODO(antoyo): switch to asumme when it exists.
// Or use something like this:
// #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
self.expect(value, true);
}
fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
- // TODO
- /*let expect = self.context.get_builtin_function("__builtin_expect");
- let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
- self.call(expect, &[cond, self.const_bool(expected)], None)*/
+ // TODO(antoyo)
cond
}
fn sideeffect(&mut self) {
- // TODO
- /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
- let fnname = self.get_intrinsic(&("llvm.sideeffect"));
- self.call(fnname, &[], None);
- }*/
+ // TODO(antoyo)
}
fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
- self.call(intrinsic, &[va_list], None)*/
}
fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
- /*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
- self.call(intrinsic, &[va_list], None)*/
}
}
step4
},
32 => {
- // TODO: Refactor with other implementations.
+ // TODO(antoyo): Refactor with other implementations.
// First step.
let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
// Second step.
let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
- let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead?
+ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
let step2 = self.or(left, right);
step5
},
128 => {
- // TODO: find a more efficient implementation?
+ // TODO(antoyo): find a more efficient implementation?
let sixty_four = self.context.new_rvalue_from_long(typ, 64);
let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
let low = self.context.new_cast(None, value, self.u64_type);
}
fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: use width?
+ // TODO(antoyo): use width?
let arg_type = arg.get_type();
let count_leading_zeroes =
if arg_type.is_uint(&self.cx) {
}
fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: use the optimized version with fewer operations.
+ // TODO(antoyo): use the optimized version with fewer operations.
let value_type = value.get_type();
if value_type.is_u128(&self.cx) {
- // TODO: implement in the normal algorithm below to have a more efficient
+ // TODO(antoyo): implement in the normal algorithm below to have a more efficient
// implementation (that does not require a call to __popcountdi2).
let popcount = self.context.get_builtin_function("__builtin_popcountll");
let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
}
else if wants_msvc_seh(bx.sess()) {
unimplemented!();
- //codegen_msvc_try(bx, try_func, data, catch_func, dest);
}
else {
unimplemented!();
- //codegen_gnu_try(bx, try_func, data, catch_func, dest);
}
}
-
-// MSVC's definition of the `rust_try` function.
-//
-// This implementation uses the new exception handling instructions in LLVM
-// which have support in LLVM for SEH on MSVC targets. Although these
-// instructions are meant to work for all targets, as of the time of this
-// writing, however, LLVM does not recommend the usage of these new instructions
-// as the old ones are still more optimized.
-/*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
- unimplemented!();
- /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
- bx.set_personality_fn(bx.eh_personality());
- bx.sideeffect();
-
- let mut normal = bx.build_sibling_block("normal");
- let mut catchswitch = bx.build_sibling_block("catchswitch");
- let mut catchpad = bx.build_sibling_block("catchpad");
- let mut caught = bx.build_sibling_block("caught");
-
- let try_func = llvm::get_param(bx.llfn(), 0);
- let data = llvm::get_param(bx.llfn(), 1);
- let catch_func = llvm::get_param(bx.llfn(), 2);
-
- // We're generating an IR snippet that looks like:
- //
- // declare i32 @rust_try(%try_func, %data, %catch_func) {
- // %slot = alloca u8*
- // invoke %try_func(%data) to label %normal unwind label %catchswitch
- //
- // normal:
- // ret i32 0
- //
- // catchswitch:
- // %cs = catchswitch within none [%catchpad] unwind to caller
- //
- // catchpad:
- // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
- // %ptr = load %slot
- // call %catch_func(%data, %ptr)
- // catchret from %tok to label %caught
- //
- // caught:
- // ret i32 1
- // }
- //
- // This structure follows the basic usage of throw/try/catch in LLVM.
- // For example, compile this C++ snippet to see what LLVM generates:
- //
- // #include <stdint.h>
- //
- // struct rust_panic {
- // rust_panic(const rust_panic&);
- // ~rust_panic();
- //
- // uint64_t x[2];
- // };
- //
- // int __rust_try(
- // void (*try_func)(void*),
- // void *data,
- // void (*catch_func)(void*, void*) noexcept
- // ) {
- // try {
- // try_func(data);
- // return 0;
- // } catch(rust_panic& a) {
- // catch_func(data, &a);
- // return 1;
- // }
- // }
- //
- // More information can be found in libstd's seh.rs implementation.
- let ptr_align = bx.tcx().data_layout.pointer_align.abi;
- let slot = bx.alloca(bx.type_i8p(), ptr_align);
- bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
-
- normal.ret(bx.const_i32(0));
-
- let cs = catchswitch.catch_switch(None, None, 1);
- catchswitch.add_handler(cs, catchpad.llbb());
-
- // We can't use the TypeDescriptor defined in libpanic_unwind because it
- // might be in another DLL and the SEH encoding only supports specifying
- // a TypeDescriptor from the current module.
- //
- // However this isn't an issue since the MSVC runtime uses string
- // comparison on the type name to match TypeDescriptors rather than
- // pointer equality.
- //
- // So instead we generate a new TypeDescriptor in each module that uses
- // `try` and let the linker merge duplicate definitions in the same
- // module.
- //
- // When modifying, make sure that the type_name string exactly matches
- // the one used in src/libpanic_unwind/seh.rs.
- let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
- let type_name = bx.const_bytes(b"rust_panic\0");
- let type_info =
- bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
- let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
- unsafe {
- llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
- llvm::SetUniqueComdat(bx.llmod, tydesc);
- llvm::LLVMSetInitializer(tydesc, type_info);
- }
-
- // The flag value of 8 indicates that we are catching the exception by
- // reference instead of by value. We can't use catch by value because
- // that requires copying the exception object, which we don't support
- // since our exception object effectively contains a Box.
- //
- // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
- let flags = bx.const_i32(8);
- let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
- let ptr = catchpad.load(slot, ptr_align);
- catchpad.call(catch_func, &[data, ptr], Some(&funclet));
-
- catchpad.catch_ret(&funclet, caught.llbb());
-
- caught.ret(bx.const_i32(1));
- });
-
- // Note that no invoke is used here because by definition this function
- // can't panic (that's what it's catching).
- let ret = bx.call(llfn, &[try_func, data, catch_func], None);
- let i32_align = bx.tcx().data_layout.i32_align.abi;
- bx.store(ret, dest, i32_align);*/
-}*/
-
-// Definition of the standard `try` function for Rust using the GNU-like model
-// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
-// instructions).
-//
-// This codegen is a little surprising because we always call a shim
-// function instead of inlining the call to `invoke` manually here. This is done
-// because in LLVM we're only allowed to have one personality per function
-// definition. The call to the `try` intrinsic is being inlined into the
-// function calling it, and that function may already have other personality
-// functions in play. By calling a shim we're guaranteed that our shim will have
-// the right personality function.
-/*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
- unimplemented!();
- /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
- // Codegens the shims described above:
- //
- // bx:
- // invoke %try_func(%data) normal %normal unwind %catch
- //
- // normal:
- // ret 0
- //
- // catch:
- // (%ptr, _) = landingpad
- // call %catch_func(%data, %ptr)
- // ret 1
-
- bx.sideeffect();
-
- let mut then = bx.build_sibling_block("then");
- let mut catch = bx.build_sibling_block("catch");
-
- let try_func = llvm::get_param(bx.llfn(), 0);
- let data = llvm::get_param(bx.llfn(), 1);
- let catch_func = llvm::get_param(bx.llfn(), 2);
- bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
- then.ret(bx.const_i32(0));
-
- // Type indicator for the exception being thrown.
- //
- // The first value in this tuple is a pointer to the exception object
- // being thrown. The second value is a "selector" indicating which of
- // the landing pad clauses the exception's type had been matched to.
- // rust_try ignores the selector.
- let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
- let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
- let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
- Some(tydesc) => {
- let tydesc = bx.get_static(tydesc);
- bx.bitcast(tydesc, bx.type_i8p())
- }
- None => bx.const_null(bx.type_i8p()),
- };
- catch.add_clause(vals, tydesc);
- let ptr = catch.extract_value(vals, 0);
- catch.call(catch_func, &[data, ptr], None);
- catch.ret(bx.const_i32(1));
- });
-
- // Note that no invoke is used here because by definition this function
- // can't panic (that's what it's catching).
- let ret = bx.call(llfn, &[try_func, data, catch_func], None);
- let i32_align = bx.tcx().data_layout.i32_align.abi;
- bx.store(ret, dest, i32_align);*/
-}*/
use crate::builder::Builder;
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
- //println!("Generic simd: {}", name);
-
// macros for error handling:
macro_rules! emit_error {
($msg: tt) => {
let arg_tys = sig.inputs();
let name_str = &*name.as_str();
- /*if name == sym::simd_select_bitmask {
- let in_ty = arg_tys[0];
- let m_len = match in_ty.kind() {
- // Note that this `.unwrap()` crashes for isize/usize, that's sort
- // of intentional as there's not currently a use case for that.
- ty::Int(i) => i.bit_width().unwrap(),
- ty::Uint(i) => i.bit_width().unwrap(),
- _ => return_error!("`{}` is not an integral type", in_ty),
- };
- require_simd!(arg_tys[1], "argument");
- let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- require!(
- // Allow masks for vectors with fewer than 8 elements to be
- // represented with a u8 or i8.
- m_len == v_len || (m_len == 8 && v_len < 8),
- "mismatched lengths: mask length `{}` != other vector length `{}`",
- m_len,
- v_len
- );
- let i1 = bx.type_i1();
- let im = bx.type_ix(v_len);
- let i1xn = bx.type_vector(i1, v_len);
- let m_im = bx.trunc(args[0].immediate(), im);
- let m_i1s = bx.bitcast(m_im, i1xn);
- return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
- }*/
-
// every intrinsic below takes a SIMD vector as its first argument
require_simd!(arg_tys[0], "input");
let in_ty = arg_tys[0];
out_ty
);
- //let total_len = u128::from(in_len) * 2;
-
let vector = args[2].immediate();
- // TODO:
- /*let indices: Option<Vec<_>> = (0..n)
- .map(|i| {
- let arg_idx = i;
- let val = bx.const_get_vector_element(vector, i as u64);
- match bx.const_to_opt_u128(val, true) {
- None => {
- emit_error!("shuffle index #{} is not a constant", arg_idx);
- None
- }
- Some(idx) if idx >= total_len => {
- emit_error!(
- "shuffle index #{} is out of bounds (limit {})",
- arg_idx,
- total_len
- );
- None
- }
- Some(idx) => Some(bx.const_i32(idx as i32)),
- }
- })
- .collect();
- let indices = match indices {
- Some(i) => i,
- None => return Ok(bx.const_null(llret_ty)),
- };*/
-
return Ok(bx.shuffle_vector(
args[0].immediate(),
args[1].immediate(),
));
}
- /*if name == sym::simd_insert {
- require!(
- in_elem == arg_tys[2],
- "expected inserted type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- arg_tys[2]
- );
- return Ok(bx.insert_element(
- args[0].immediate(),
- args[2].immediate(),
- args[1].immediate(),
- ));
- }
- if name == sym::simd_extract {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
- }
-
- if name == sym::simd_select {
- let m_elem_ty = in_elem;
- let m_len = in_len;
- require_simd!(arg_tys[1], "argument");
- let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- require!(
- m_len == v_len,
- "mismatched lengths: mask length `{}` != other vector length `{}`",
- m_len,
- v_len
- );
- match m_elem_ty.kind() {
- ty::Int(_) => {}
- _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
- }
- // truncate the mask to a vector of i1s
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, m_len as u64);
- let m_i1s = bx.trunc(args[0].immediate(), i1xn);
- return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
- }
-
- if name == sym::simd_bitmask {
- // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
- // vector mask and returns an unsigned integer containing the most
- // significant bit (MSB) of each lane.
-
- // If the vector has less than 8 lanes, an u8 is returned with zeroed
- // trailing bits.
- let expected_int_bits = in_len.max(8);
- match ret_ty.kind() {
- ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
- _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
- }
-
- // Integer vector <i{in_bitwidth} x in_len>:
- let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
- ty::Int(i) => (
- args[0].immediate(),
- i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
- ),
- ty::Uint(i) => (
- args[0].immediate(),
- i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
- ),
- _ => return_error!(
- "vector argument `{}`'s element type `{}`, expected integer element type",
- in_ty,
- in_elem
- ),
- };
-
- // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
- let shift_indices =
- vec![
- bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
- in_len as _
- ];
- let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
- // Truncate vector to an <i1 x N>
- let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
- // Bitcast <i1 x N> to iN:
- let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
- // Zero-extend iN to the bitmask type:
- return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
- }
-
- fn simd_simple_float_intrinsic<'a, 'gcc, 'tcx>(
- name: Symbol,
- in_elem: &::rustc_middle::ty::TyS<'_>,
- in_ty: &::rustc_middle::ty::TyS<'_>,
- in_len: u64,
- bx: &mut Builder<'a, 'gcc, 'tcx>,
- span: Span,
- args: &[OperandRef<'tcx, RValue<'gcc>>],
- ) -> Result<RValue<'gcc>, ()> {
- macro_rules! emit_error {
- ($msg: tt) => {
- emit_error!($msg, )
- };
- ($msg: tt, $($fmt: tt)*) => {
- span_invalid_monomorphization_error(
- bx.sess(), span,
- &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
- name, $($fmt)*));
- }
- }
- macro_rules! return_error {
- ($($fmt: tt)*) => {
- {
- emit_error!($($fmt)*);
- return Err(());
- }
- }
- }
-
- let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
- let elem_ty = bx.cx.type_float_from_ty(*f);
- match f.bit_width() {
- 32 => ("f32", elem_ty),
- 64 => ("f64", elem_ty),
- _ => {
- return_error!(
- "unsupported element type `{}` of floating-point vector `{}`",
- f.name_str(),
- in_ty
- );
- }
- }
- } else {
- return_error!("`{}` is not a floating-point type", in_ty);
- };
-
- let vec_ty = bx.type_vector(elem_ty, in_len);
-
- let (intr_name, fn_ty) = match name {
- sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
- sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
- sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
- sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
- _ => return_error!("unrecognized intrinsic `{}`", name),
- };
- let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
- let f = bx.declare_cfn(&llvm_name, fn_ty);
- let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
- Ok(c)
- }
-
- if std::matches!(
- name,
- sym::simd_ceil
- | sym::simd_fabs
- | sym::simd_fcos
- | sym::simd_fexp2
- | sym::simd_fexp
- | sym::simd_flog10
- | sym::simd_flog2
- | sym::simd_flog
- | sym::simd_floor
- | sym::simd_fma
- | sym::simd_fpow
- | sym::simd_fpowi
- | sym::simd_fsin
- | sym::simd_fsqrt
- | sym::simd_round
- | sym::simd_trunc
- ) {
- return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
- }
-
- // FIXME: use:
- // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
- // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
- fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
- let p0s: String = "p0".repeat(no_pointers);
- match *elem_ty.kind() {
- ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
- _ => unreachable!(),
- }
- }
-
- fn gcc_vector_ty<'gcc>(
- cx: &CodegenCx<'gcc, '_>,
- elem_ty: Ty<'_>,
- vec_len: u64,
- mut no_pointers: usize,
- ) -> Type<'gcc> {
- // FIXME: use cx.layout_of(ty).llvm_type() ?
- let mut elem_ty = match *elem_ty.kind() {
- ty::Int(v) => cx.type_int_from_ty(v),
- ty::Uint(v) => cx.type_uint_from_ty(v),
- ty::Float(v) => cx.type_float_from_ty(v),
- _ => unreachable!(),
- };
- while no_pointers > 0 {
- elem_ty = cx.type_ptr_to(elem_ty);
- no_pointers -= 1;
- }
- cx.type_vector(elem_ty, vec_len)
- }
-
- if name == sym::simd_gather {
- // simd_gather(values: <N x T>, pointers: <N x *_ T>,
- // mask: <N x i{M}>) -> <N x T>
- // * N: number of elements in the input vectors
- // * T: type of the element to load
- // * M: any integer width is supported, will be truncated to i1
-
- // All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
- require_simd!(ret_ty, "return");
-
- // Of the same length:
- let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
- require!(
- in_len == out_len,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "second",
- in_len,
- in_ty,
- arg_tys[1],
- out_len
- );
- require!(
- in_len == out_len2,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "third",
- in_len,
- in_ty,
- arg_tys[2],
- out_len2
- );
-
- // The return type must match the first argument type
- require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
-
- // This counts how many pointers
- fn ptr_count(t: Ty<'_>) -> usize {
- match t.kind() {
- ty::RawPtr(p) => 1 + ptr_count(p.ty),
- _ => 0,
- }
- }
-
- // Non-ptr type
- fn non_ptr(t: Ty<'_>) -> Ty<'_> {
- match t.kind() {
- ty::RawPtr(p) => non_ptr(p.ty),
- _ => t,
- }
- }
-
- // The second argument must be a simd vector with an element type that's a pointer
- // to the element type of the first argument
- let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
- let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (pointer_count, underlying_ty) = match element_ty1.kind() {
- ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
- _ => {
- require!(
- false,
- "expected element type `{}` of second argument `{}` \
- to be a pointer to the element type `{}` of the first \
- argument `{}`, found `{}` != `*_ {}`",
- element_ty1,
- arg_tys[1],
- in_elem,
- in_ty,
- element_ty1,
- in_elem
- );
- unreachable!();
- }
- };
- assert!(pointer_count > 0);
- assert_eq!(pointer_count - 1, ptr_count(element_ty0));
- assert_eq!(underlying_ty, non_ptr(element_ty0));
-
- // The element type of the third argument must be a signed integer type of any width:
- let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
- match element_ty2.kind() {
- ty::Int(_) => (),
- _ => {
- require!(
- false,
- "expected element type `{}` of third argument `{}` \
- to be a signed integer type",
- element_ty2,
- arg_tys[2]
- );
- }
- }
-
- // Alignment of T, must be a constant integer value:
- let alignment_ty = bx.type_i32();
- let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
-
- // Truncate the mask vector to a vector of i1s:
- let (mask, mask_ty) = {
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, in_len);
- (bx.trunc(args[2].immediate(), i1xn), i1xn)
- };
-
- // Type of the vector of pointers:
- let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
- let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
-
- // Type of the vector of elements:
- let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
- let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
-
- let llvm_intrinsic =
- format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- bx.type_func(
- &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
- llvm_elem_vec_ty,
- ),
- );
- let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
- return Ok(v);
- }
-
- if name == sym::simd_scatter {
- // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
- // mask: <N x i{M}>) -> ()
- // * N: number of elements in the input vectors
- // * T: type of the element to load
- // * M: any integer width is supported, will be truncated to i1
-
- // All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
-
- // Of the same length:
- let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
- require!(
- in_len == element_len1,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "second",
- in_len,
- in_ty,
- arg_tys[1],
- element_len1
- );
- require!(
- in_len == element_len2,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "third",
- in_len,
- in_ty,
- arg_tys[2],
- element_len2
- );
-
- // This counts how many pointers
- fn ptr_count(t: Ty<'_>) -> usize {
- match t.kind() {
- ty::RawPtr(p) => 1 + ptr_count(p.ty),
- _ => 0,
- }
- }
-
- // Non-ptr type
- fn non_ptr(t: Ty<'_>) -> Ty<'_> {
- match t.kind() {
- ty::RawPtr(p) => non_ptr(p.ty),
- _ => t,
- }
- }
-
- // The second argument must be a simd vector with an element type that's a pointer
- // to the element type of the first argument
- let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
- let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
- let (pointer_count, underlying_ty) = match element_ty1.kind() {
- ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
- (ptr_count(element_ty1), non_ptr(element_ty1))
- }
- _ => {
- require!(
- false,
- "expected element type `{}` of second argument `{}` \
- to be a pointer to the element type `{}` of the first \
- argument `{}`, found `{}` != `*mut {}`",
- element_ty1,
- arg_tys[1],
- in_elem,
- in_ty,
- element_ty1,
- in_elem
- );
- unreachable!();
- }
- };
- assert!(pointer_count > 0);
- assert_eq!(pointer_count - 1, ptr_count(element_ty0));
- assert_eq!(underlying_ty, non_ptr(element_ty0));
-
- // The element type of the third argument must be a signed integer type of any width:
- match element_ty2.kind() {
- ty::Int(_) => (),
- _ => {
- require!(
- false,
- "expected element type `{}` of third argument `{}` \
- be a signed integer type",
- element_ty2,
- arg_tys[2]
- );
- }
- }
-
- // Alignment of T, must be a constant integer value:
- let alignment_ty = bx.type_i32();
- let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
-
- // Truncate the mask vector to a vector of i1s:
- let (mask, mask_ty) = {
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, in_len);
- (bx.trunc(args[2].immediate(), i1xn), i1xn)
- };
-
- let ret_t = bx.type_void();
-
- // Type of the vector of pointers:
- let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
- let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
-
- // Type of the vector of elements:
- let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
- let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
-
- let llvm_intrinsic =
- format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
- );
- let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
- return Ok(v);
- }
-
- macro_rules! arith_red {
- ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
- $identity:expr) => {
- if name == sym::$name {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- return match in_elem.kind() {
- ty::Int(_) | ty::Uint(_) => {
- let r = bx.$integer_reduce(args[0].immediate());
- if $ordered {
- // if overflow occurs, the result is the
- // mathematical result modulo 2^n:
- Ok(bx.$op(args[1].immediate(), r))
- } else {
- Ok(bx.$integer_reduce(args[0].immediate()))
- }
- }
- ty::Float(f) => {
- let acc = if $ordered {
- // ordered arithmetic reductions take an accumulator
- args[1].immediate()
- } else {
- // unordered arithmetic reductions use the identity accumulator
- match f.bit_width() {
- 32 => bx.const_real(bx.type_f32(), $identity),
- 64 => bx.const_real(bx.type_f64(), $identity),
- v => return_error!(
- r#"
-unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
- sym::$name,
- in_ty,
- in_elem,
- v,
- ret_ty
- ),
- }
- };
- Ok(bx.$float_reduce(acc, args[0].immediate()))
- }
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- };
- }
- };
- }
-
- arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
- arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
- arith_red!(
- simd_reduce_add_unordered: vector_reduce_add,
- vector_reduce_fadd_fast,
- false,
- add,
- 0.0
- );
- arith_red!(
- simd_reduce_mul_unordered: vector_reduce_mul,
- vector_reduce_fmul_fast,
- false,
- mul,
- 1.0
- );
-
- macro_rules! minmax_red {
- ($name:ident: $int_red:ident, $float_red:ident) => {
- if name == sym::$name {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- return match in_elem.kind() {
- ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
- ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
- ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- };
- }
- };
- }
-
- minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
- minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
-
- minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
- minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
-
- macro_rules! bitwise_red {
- ($name:ident : $red:ident, $boolean:expr) => {
- if name == sym::$name {
- let input = if !$boolean {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- args[0].immediate()
- } else {
- match in_elem.kind() {
- ty::Int(_) | ty::Uint(_) => {}
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- }
-
- // boolean reductions operate on vectors of i1s:
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, in_len as u64);
- bx.trunc(args[0].immediate(), i1xn)
- };
- return match in_elem.kind() {
- ty::Int(_) | ty::Uint(_) => {
- let r = bx.$red(input);
- Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
- }
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- };
- }
- };
- }
-
- bitwise_red!(simd_reduce_and: vector_reduce_and, false);
- bitwise_red!(simd_reduce_or: vector_reduce_or, false);
- bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
- bitwise_red!(simd_reduce_all: vector_reduce_and, true);
- bitwise_red!(simd_reduce_any: vector_reduce_or, true);
-
- if name == sym::simd_cast {
- require_simd!(ret_ty, "return");
- let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
- require!(
- in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
- );
- // casting cares about nominal type, not just structural type
- if in_elem == out_elem {
- return Ok(args[0].immediate());
- }
-
- enum Style {
- Float,
- Int(/* is signed? */ bool),
- Unsupported,
- }
-
- let (in_style, in_width) = match in_elem.kind() {
- // vectors of pointer-sized integers should've been
- // disallowed before here, so this unwrap is safe.
- ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::Float(f) => (Style::Float, f.bit_width()),
- _ => (Style::Unsupported, 0),
- };
- let (out_style, out_width) = match out_elem.kind() {
- ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::Float(f) => (Style::Float, f.bit_width()),
- _ => (Style::Unsupported, 0),
- };
-
- match (in_style, out_style) {
- (Style::Int(in_is_signed), Style::Int(_)) => {
- return Ok(match in_width.cmp(&out_width) {
- Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
- Ordering::Equal => args[0].immediate(),
- Ordering::Less => {
- if in_is_signed {
- bx.sext(args[0].immediate(), llret_ty)
- } else {
- bx.zext(args[0].immediate(), llret_ty)
- }
- }
- });
- }
- (Style::Int(in_is_signed), Style::Float) => {
- return Ok(if in_is_signed {
- bx.sitofp(args[0].immediate(), llret_ty)
- } else {
- bx.uitofp(args[0].immediate(), llret_ty)
- });
- }
- (Style::Float, Style::Int(out_is_signed)) => {
- return Ok(if out_is_signed {
- bx.fptosi(args[0].immediate(), llret_ty)
- } else {
- bx.fptoui(args[0].immediate(), llret_ty)
- });
- }
- (Style::Float, Style::Float) => {
- return Ok(match in_width.cmp(&out_width) {
- Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
- Ordering::Equal => args[0].immediate(),
- Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
- });
- }
- _ => { /* Unsupported. Fallthrough. */ }
- }
- require!(
- false,
- "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
- in_ty,
- in_elem,
- ret_ty,
- out_elem
- );
- }*/
-
macro_rules! arith_binary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
simd_shl: Uint, Int => shl;
simd_shr: Uint => lshr, Int => ashr;
simd_and: Uint, Int => and;
- simd_or: Uint, Int => or; // FIXME: calling or might not work on vectors.
+ simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
simd_xor: Uint, Int => xor;
- /*simd_fmax: Float => maxnum;
- simd_fmin: Float => minnum;*/
}
- /*macro_rules! arith_unary {
- ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
- $(if name == sym::$name {
- match in_elem.kind() {
- $($(ty::$p(_))|* => {
- return Ok(bx.$call(args[0].immediate()))
- })*
- _ => {},
- }
- require!(false,
- "unsupported operation on `{}` with element `{}`",
- in_ty,
- in_elem)
- })*
- }
- }
-
- arith_unary! {
- simd_neg: Int => neg, Float => fneg;
- }
-
- if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
- let lhs = args[0].immediate();
- let rhs = args[1].immediate();
- let is_add = name == sym::simd_saturating_add;
- let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
- let (signed, elem_width, elem_ty) = match *in_elem.kind() {
- ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
- ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
- _ => {
- return_error!(
- "expected element type `{}` of vector type `{}` \
- to be a signed or unsigned integer type",
- arg_tys[0].simd_size_and_type(bx.tcx()).1,
- arg_tys[0]
- );
- }
- };
- let llvm_intrinsic = &format!(
- "llvm.{}{}.sat.v{}i{}",
- if signed { 's' } else { 'u' },
- if is_add { "add" } else { "sub" },
- in_len,
- elem_width
- );
- let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
-
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- bx.type_func(&[vec_ty, vec_ty], vec_ty),
- );
- let v = bx.call(f, &[lhs, rhs], None);
- return Ok(v);
- }*/
-
unimplemented!("simd {}", name);
-
- //span_bug!(span, "unknown SIMD intrinsic");
}
/*
- * TODO: support #[inline] attributes.
- * TODO: support LTO.
+ * TODO(antoyo): support #[inline] attributes.
+ * TODO(antoyo): support LTO.
*
- * TODO: remove the local gccjit LD_LIBRARY_PATH in config.sh.
- * TODO: remove the object dependency.
- * TODO: remove the patches.
+ * TODO(antoyo): remove the patches.
*/
#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)]
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
-/*extern crate flate2;
-extern crate libc;*/
extern crate rustc_ast;
extern crate rustc_codegen_ssa;
extern crate rustc_data_structures;
extern crate rustc_errors;
-//extern crate rustc_fs_util;
extern crate rustc_hir;
extern crate rustc_metadata;
extern crate rustc_middle;
mod mono_item;
mod type_;
mod type_of;
-mod va_arg;
use std::any::Any;
use std::sync::Arc;
fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> {
use rustc_codegen_ssa::back::link::link_binary;
if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) {
- // TODO: remove when global initializer work without calling a function at runtime.
+ // TODO:(antoyo): remove when global initializer work without calling a function at runtime.
// HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI
// tests load libstd.so as a dynamic library, and rustc use a version-script to specify
// the symbols visibility, we add * to export all symbols.
}
fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn<Self> {
- // TODO: set opt level.
+ // TODO(antoyo): set opt level.
Arc::new(|_| {
Ok(())
})
fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
None
- // TODO
- //llvm_util::tune_cpu(sess)
+ // TODO(antoyo)
}
}
}
unsafe impl Send for GccContext {}
-// FIXME: that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
+// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
unsafe impl Sync for GccContext {}
impl WriteBackendMethods for GccCodegenBackend {
type ThinBuffer = ThinBuffer;
fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
- // TODO: implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
+ // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
// NOTE: implemented elsewhere.
let module =
match modules.remove(0) {
FatLTOInput::InMemory(module) => module,
FatLTOInput::Serialized { .. } => {
unimplemented!();
- /*info!("pushing serialized module {:?}", name);
- let buffer = SerializedModule::Local(buffer);
- serialized_modules.push((buffer, CString::new(name).unwrap()));*/
}
};
Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] })
}
unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
- //if cgcx.lto == Lto::Fat {
- //module.module_llvm.context.add_driver_option("-flto");
- //}
module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
Ok(())
}
}
fn run_lto_pass_manager(_cgcx: &CodegenContext<Self>, _module: &ModuleCodegen<Self::Module>, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> {
- // TODO
+ // TODO(antoyo)
Ok(())
}
}
}
-/*fn target_triple(sess: &Session) -> target_lexicon::Triple {
- sess.target.llvm_target.parse().unwrap()
-}*/
-
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
}
unimplemented!();
- /*unsafe {
- let mut len = 0;
- let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
- str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
- }*/
}
pub fn target_cpu(sess: &Session) -> &str {
},
)
.filter(|_feature| {
- /*if feature.starts_with("sse") {
- return true;
- }*/
- // TODO: implement a way to get enabled feature in libgccjit.
- //println!("Feature: {}", feature);
- /*let llvm_feature = to_llvm_feature(sess, feature);
- let cstr = CString::new(llvm_feature).unwrap();
- unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }*/
+ // TODO(antoyo): implement a way to get enabled feature in libgccjit.
false
})
.map(|feature| Symbol::intern(feature))
)
});
- // TODO
- /*unsafe {
- llvm::LLVMRustSetLinkage(global, base::linkage_to_llvm(linkage));
- llvm::LLVMRustSetVisibility(global, base::visibility_to_llvm(visibility));
- }*/
-
+ // TODO(antoyo): set linkage and visibility.
self.instances.borrow_mut().insert(instance, global);
}
let _decl = self.declare_fn(symbol_name, &fn_abi);
//let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
- // TODO: call set_link_section() to allow initializing argc/argv.
- //base::set_link_section(decl, &attrs);
- /*if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
- llvm::SetUniqueComdat(self.llmod, decl);
- }*/
-
- //debug!("predefine_fn: instance = {:?}", instance);
-
- // TODO: use inline attribute from there in linkage.set() above:
- //attributes::from_fn_attrs(self, decl, instance);
-
- //self.instances.borrow_mut().insert(instance, decl);
+ // TODO(antoyo): call set_link_section() to allow initializing argc/argv.
+ // TODO(antoyo): set unique comdat.
+ // TODO(antoyo): use inline attribute from there in linkage.set() above.
}
}
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
// gcc only supports 1, 2, 4 or 8-byte integers.
+ // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa
+ // sometimes use 96-bit numbers and the following code will give an integer of a different
+ // size.
let bytes = (num_bits / 8).next_power_of_two() as i32;
match bytes {
1 => self.i8_type,
16 => self.i128_type,
_ => panic!("unexpected num_bits: {}", num_bits),
}
- /*
- let bytes = (num_bits / 8).next_power_of_two() as i32;
- println!("num_bits: {}, bytes: {}", num_bits, bytes);
- self.context.new_int_type(bytes, true) // TODO: check if it is indeed a signed integer.
- */
}
- /*pub fn type_bool(&self) -> Type<'gcc> {
- self.bool_type
- }*/
-
pub fn type_void(&self) -> Type<'gcc> {
self.context.new_type::<()>()
}
let ity = Integer::approximate_align(self, align);
self.type_from_integer(ity)
}
-
- /*pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
- match t {
- ty::IntTy::Isize => self.type_isize(),
- ty::IntTy::I8 => self.type_i8(),
- ty::IntTy::I16 => self.type_i16(),
- ty::IntTy::I32 => self.type_i32(),
- ty::IntTy::I64 => self.type_i64(),
- ty::IntTy::I128 => self.type_i128(),
- }
- }
-
- pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
- match t {
- ty::UintTy::Usize => self.type_isize(),
- ty::UintTy::U8 => self.type_i8(),
- ty::UintTy::U16 => self.type_i16(),
- ty::UintTy::U32 => self.type_i32(),
- ty::UintTy::U64 => self.type_i64(),
- ty::UintTy::U128 => self.type_i128(),
- }
- }
-
- pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
- match t {
- ty::FloatTy::F32 => self.type_f32(),
- ty::FloatTy::F64 => self.type_f64(),
- }
- }
-
- pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
- self.context.new_vector_type(ty, len)
- }*/
}
impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let fields: Vec<_> = fields.iter().enumerate()
.map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
.collect();
- // TODO: use packed.
- //let name = types.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
- //let typ = self.context.new_struct_type(None, format!("struct{}", name), &fields).as_type();
+ // TODO(antoyo): use packed.
let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
self.struct_types.borrow_mut().insert(types, typ);
typ
TypeKind::Vector
}
else {
- // TODO
+ // TODO(antoyo): support other types.
TypeKind::Void
}
}
fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
- // TODO
- /*assert_ne!(self.type_kind(ty), TypeKind::Function,
- "don't call ptr_to on function types, use ptr_to_gcc_type on FnAbi instead"
- );*/
ty.make_pointer()
}
fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
- // TODO: use address_space
+ // TODO(antoyo): use address_space
ty.make_pointer()
}
fn vector_length(&self, _ty: Type<'gcc>) -> usize {
unimplemented!();
- //unsafe { llvm::LLVMGetVectorSize(ty) as usize }
}
fn float_width(&self, typ: Type<'gcc>) -> usize {
else {
panic!("Cannot get width of float type {:?}", typ);
}
- // TODO: support other sizes.
- /*match self.type_kind(ty) {
- TypeKind::Float => 32,
- TypeKind::Double => 64,
- TypeKind::X86_FP80 => 80,
- TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
- _ => bug!("llvm_float_width called on a non-float type"),
- }*/
+ // TODO(antoyo): support other sizes.
}
fn int_width(&self, typ: Type<'gcc>) -> u64 {
}
pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) {
- // TODO: use packed.
+ // TODO(antoyo): use packed.
let fields: Vec<_> = fields.iter().enumerate()
.map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
.collect();
typ.set_fields(None, &fields);
}
- /*fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
- // TODO: use packed.
- let fields: Vec<_> = fields.iter().enumerate()
- .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
- .collect();
- return self.context.new_struct_type(None, "unnamedStruct", &fields).as_type();
- }*/
-
pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
self.context.new_opaque_struct_type(None, name)
}
// NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
// size of usize::MAX in test_binary_search, we workaround this by setting the size to
// zero for ZSTs.
- // FIXME: fix gccjit API.
+ // FIXME(antoyo): fix gccjit API.
len = 0;
}
}
}
pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
- //debug!("struct_fields: {:#?}", layout);
let field_count = layout.fields.count();
let mut packed = false;
layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
packed |= effective_field_align < field.align.abi;
- /*debug!(
- "struct_fields: {}: {:?} offset: {:?} target_offset: {:?} \
- effective_field_align: {}",
- i,
- field,
- offset,
- target_offset,
- effective_field_align.bytes()
- );*/
assert!(target_offset >= offset);
let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align);
assert_eq!(offset.align_to(padding_align) + padding, target_offset);
result.push(cx.type_padding_filler(padding, padding_align));
- //debug!(" padding before: {:?}", padding);
- result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME: might need to check if the type is inside another, like Box<Type>.
+ result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box<Type>.
offset = target_offset + field.size;
prev_effective_align = effective_field_align;
}
let padding = layout.size - offset;
let padding_align = prev_effective_align;
assert_eq!(offset.align_to(padding_align) + padding, layout.size);
- /*debug!(
- "struct_fields: pad_bytes: {:?} offset: {:?} stride: {:?}",
- padding, offset, layout.size
- );*/
result.push(cx.type_padding_filler(padding, padding_align));
assert_eq!(result.len(), 1 + field_count * 2);
- } else {
- //debug!("struct_fields: offset: {:?} stride: {:?}", offset, layout.size);
}
(result, packed)
// If `Some` is returned then a named struct is created in LLVM. Name collisions are
// avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
// can improve perf.
- // FIXME: I don't think that's true for libgccjit.
+ // FIXME(antoyo): I don't think that's true for libgccjit.
Some(String::new())
}
_ => None,
/// with the inner-most trailing unsized field using the "minimal unit"
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
+ //TODO(antoyo): do we still need the set_fields parameter?
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
if let Abi::Scalar(ref scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
return ty;
}
- //debug!("gcc_type({:#?})", self);
-
assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
// Make sure lifetimes are erased, to avoid generating distinct LLVM
else {
uncached_gcc_type(cx, *self, &mut defer)
};
- //debug!("--> mapped {:#?} to ty={:?}", self, ty);
cx.types.borrow_mut().insert((self.ty, variant_index), ty);
if let Some((ty, layout)) = defer {
- //TODO: do we still need this conditions and the set_fields parameter?
- //if set_fields {
- let (fields, packed) = struct_fields(cx, layout);
- cx.set_struct_body(ty, &fields, packed);
- /*}
- else {
- // Since we might be trying to generate a type containing another type which is not
- // completely generated yet, we don't set the fields right now, but we save the
- // type to set the fields later.
- cx.types_with_fields_to_set.borrow_mut().insert(ty.as_type(), (ty, layout));
- }*/
+ let (fields, packed) = struct_fields(cx, layout);
+ cx.set_struct_body(ty, &fields, packed);
}
ty
}
fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
- // TODO: remove llvm hack:
+ // TODO(antoyo): remove llvm hack:
// HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`.
match self.ty.kind() {
// immediate, just like `bool` is typically `i8` in memory and only `i1`
// when immediate. We need to load/store `bool` as `i8` to avoid
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
- // TODO: this bugs certainly don't happen in this case since the bool type is used instead of i1.
- if /*immediate &&*/ scalar.is_bool() {
+ // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
+ if scalar.is_bool() {
return cx.type_i1();
}
fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
unimplemented!();
- //ty.gcc_type(self)
}
fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
- // FIXME: return correct type.
+ // FIXME(antoyo): return correct type.
self.type_void()
- //fn_abi.gcc_type(self)
}
}
+++ /dev/null
-/*use gccjit::{RValue, ToRValue, Type};
-use rustc_codegen_ssa::mir::operand::OperandRef;
-use rustc_codegen_ssa::{
- common::IntPredicate,
- traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
-};
-use rustc_middle::ty::layout::HasTyCtxt;
-use rustc_middle::ty::Ty;
-use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size};
-
-use crate::builder::Builder;
-use crate::type_of::LayoutGccExt;
-
-fn round_pointer_up_to_alignment<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: RValue<'gcc>, align: Align, ptr_ty: Type<'gcc>) -> RValue<'gcc> {
- let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
- ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
- ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
- bx.inttoptr(ptr_as_int, ptr_ty)
-}
-
-fn emit_direct_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, llty: Type<'gcc>, size: Size, align: Align, slot_size: Align, allow_higher_align: bool) -> (RValue<'gcc>, Align) {
- let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
- let va_list_addr =
- if list.layout.gcc_type(bx.cx, true) != va_list_ptr_ty {
- bx.bitcast(list.immediate(), va_list_ptr_ty)
- }
- else {
- list.immediate()
- };
-
- let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
-
- let (addr, addr_align) = if allow_higher_align && align > slot_size {
- (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
- } else {
- (ptr, slot_size)
- };
-
- let aligned_size = size.align_to(slot_size).bytes() as i32;
- let full_direct_size = bx.cx().const_i32(aligned_size);
- let next = bx.inbounds_gep(addr, &[full_direct_size]);
- bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
-
- if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
- let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
- let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
- (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
- } else {
- (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
- }
-}
-
-fn emit_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>, indirect: bool, slot_size: Align, allow_higher_align: bool) -> RValue<'gcc> {
- let layout = bx.cx.layout_of(target_ty);
- let (llty, size, align) =
- if indirect {
- (
- bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).gcc_type(bx.cx, true),
- bx.cx.data_layout().pointer_size,
- bx.cx.data_layout().pointer_align,
- )
- }
- else {
- (layout.gcc_type(bx.cx, true), layout.size, layout.align)
- };
- let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
- if indirect {
- let tmp_ret = bx.load(addr, addr_align);
- bx.load(tmp_ret, align.abi)
- }
- else {
- bx.load(addr, addr_align)
- }
-}
-
-fn emit_aapcs_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> {
- // Implementation of the AAPCS64 calling convention for va_args see
- // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
- let va_list_addr = list.immediate();
- let layout = bx.cx.layout_of(target_ty);
- let gcc_type = layout.immediate_gcc_type(bx);
-
- let function = bx.llbb().get_function();
- let variable = function.new_local(None, gcc_type, "va_arg");
-
- let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg");
- let mut in_reg = bx.build_sibling_block("va_arg.in_reg");
- let mut on_stack = bx.build_sibling_block("va_arg.on_stack");
- let end = bx.build_sibling_block("va_arg.end");
- let zero = bx.const_i32(0);
- let offset_align = Align::from_bytes(4).unwrap();
- assert!(bx.tcx().sess.target.endian == Endian::Little);
-
- let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
- let (reg_off, reg_top_index, slot_size) = if gr_type {
- let gr_offs = bx.struct_gep(va_list_addr, 7);
- let nreg = (layout.size.bytes() + 7) / 8;
- (gr_offs, 3, nreg * 8)
- } else {
- let vr_off = bx.struct_gep(va_list_addr, 9);
- let nreg = (layout.size.bytes() + 15) / 16;
- (vr_off, 5, nreg * 16)
- };
-
- // if the offset >= 0 then the value will be on the stack
- let mut reg_off_v = bx.load(reg_off, offset_align);
- let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
- bx.cond_br(use_stack, on_stack.llbb(), maybe_reg.llbb());
-
- // The value at this point might be in a register, but there is a chance that
- // it could be on the stack so we have to update the offset and then check
- // the offset again.
-
- if gr_type && layout.align.abi.bytes() > 8 {
- reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15));
- reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16));
- }
- let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32));
-
- maybe_reg.store(new_reg_off_v, reg_off, offset_align);
-
- // Check to see if we have overflowed the registers as a result of this.
- // If we have then we need to use the stack for this value
- let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
- maybe_reg.cond_br(use_stack, on_stack.llbb(), in_reg.llbb());
-
- let top = in_reg.struct_gep(va_list_addr, reg_top_index);
- let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
-
- // reg_value = *(@top + reg_off_v);
- let top = in_reg.gep(top, &[reg_off_v]);
- let top = in_reg.bitcast(top, bx.cx.type_ptr_to(layout.gcc_type(bx, true)));
- let reg_value = in_reg.load(top, layout.align.abi);
- in_reg.assign(variable, reg_value);
- in_reg.br(end.llbb());
-
- // On Stack block
- let stack_value =
- emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
- on_stack.assign(variable, stack_value);
- on_stack.br(end.llbb());
-
- *bx = end;
- variable.to_rvalue()
-}
-
-pub(super) fn emit_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> {
- // Determine the va_arg implementation to use. The LLVM va_arg instruction
- // is lacking in some instances, so we should only use it as a fallback.
- let target = &bx.cx.tcx.sess.target;
- let arch = &bx.cx.tcx.sess.target.arch;
- match &**arch {
- // Windows x86
- "x86" if target.options.is_like_windows => {
- emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
- }
- // Generic x86
- "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
- // Windows AArch64
- "aarch64" if target.options.is_like_windows => {
- emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
- }
- // macOS / iOS AArch64
- "aarch64" if target.options.is_like_osx => {
- emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
- }
- "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
- // Windows x86_64
- "x86_64" if target.options.is_like_windows => {
- let target_ty_size = bx.cx.size_of(target_ty).bytes();
- let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
- emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
- }
- // For all other architecture/OS combinations fall back to using
- // the LLVM va_arg instruction.
- // https://llvm.org/docs/LangRef.html#va-arg-instruction
- _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).gcc_type(bx.cx, true)),
- }
-}*/
#!/bin/bash
-# TODO: rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
+# TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
-#set -x
set -e
export GCC_PATH=$(cat gcc_path)
echo "[BUILD] example"
$RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
-#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- #echo "[JIT] mini_core_hello_world"
- #CG_CLIF_JIT=1 CG_CLIF_JIT_ARGS="abc bcd" $RUSTC --crate-type bin -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE
-#else
- #echo "[JIT] mini_core_hello_world (skipped)"
-#fi
-
echo "[AOT] mini_core_hello_world"
$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
$RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
-# (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
echo "[BUILD] sysroot"
time ./build_sysroot/build_sysroot.sh
echo "[AOT] alloc_system"
$RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
-# FIXME: this requires linking an additional lib for __popcountdi2
-#echo "[AOT] alloc_example"
-#$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
-#$RUN_WRAPPER ./target/out/alloc_example
-
-#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- #echo "[JIT] std_example"
- #CG_CLIF_JIT=1 $RUSTC --crate-type bin -Cprefer-dynamic example/std_example.rs --target $HOST_TRIPLE
-#else
- #echo "[JIT] std_example (skipped)"
-#fi
+echo "[AOT] alloc_example"
+$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
+$RUN_WRAPPER ./target/out/alloc_example
echo "[AOT] dst_field_align"
-# FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+# FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
$RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
$RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
$RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
$RUN_WRAPPER ./target/out/track-caller-attribute
-# FIXME: this requires linking an additional lib for __popcountdi2
-#echo "[BUILD] mod_bench"
-#$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
+echo "[BUILD] mod_bench"
+$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
-# FIXME linker gives multiple definitions error on Linux
+# FIXME(antoyo): linker gives multiple definitions error on Linux
#echo "[BUILD] sysroot in release mode"
#./build_sysroot/build_sysroot.sh --release
+# TODO(antoyo): uncomment when it works.
#pushd simple-raytracer
#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
#echo "[BENCH COMPILE] ebobby/simple-raytracer"
../../../../../cargo.sh test
popd
+# TODO(antoyo): uncomment when it works.
#pushd regex
#echo "[TEST] rust-lang/regex example shootout-regex-dna"
#../cargo.sh clean
git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
export RUSTFLAGS=
-#git apply ../rust_lang.patch
-
-
rm config.toml || true
cat > config.toml <<EOF
done
git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
-rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO: Enable back this test if I ever implement the llvm_asm! macro.
-#rm src/test/ui/consts/const-size_of-cycle.rs || true # Error file path difference
-#rm src/test/ui/impl-trait/impl-generic-mismatch.rs || true # ^
-#rm src/test/ui/type_length_limit.rs || true
-#rm src/test/ui/issues/issue-50993.rs || true # Target `thumbv7em-none-eabihf` is not supported
-#rm src/test/ui/macros/same-sequence-span.rs || true # Proc macro .rustc section not found?
-#rm src/test/ui/suggestions/issue-61963.rs || true # ^
+rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO(antoyo): Enable back this test if I ever implement the llvm_asm! macro.
RUSTC_ARGS="-Zpanic-abort-tests -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
echo "[TEST] rustc test suite"
-# TODO: remove excluded tests when they stop stalling.
-COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS" --exclude src/test/ui/numbers-arithmetic/saturating-float-casts.rs --exclude src/test/ui/issues/issue-50811.rs
+COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS"