}
CPlaceInner::VarPair(place_local, var1, var2) => {
assert_eq!(local, place_local);
- ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
+ (
+ "ssa",
+ Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())),
+ )
}
CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
CPlaceInner::Addr(ptr, meta) => {
Cow::Borrowed("")
};
match ptr.base_and_offset() {
- (crate::pointer::PointerBase::Addr(addr), offset) => {
- ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
- }
- (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
- ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
- }
- (crate::pointer::PointerBase::Dangling(align), offset) => {
- ("zst", format!("align={},offset={}", align.bytes(), offset).into())
- }
+ (crate::pointer::PointerBase::Addr(addr), offset) => (
+ "reuse",
+ format!("storage={}{}{}", addr, offset, meta).into(),
+ ),
+ (crate::pointer::PointerBase::Stack(stack_slot), offset) => (
+ "stack",
+ format!("storage={}{}{}", stack_slot, offset, meta).into(),
+ ),
+ (crate::pointer::PointerBase::Dangling(align), offset) => (
+ "zst",
+ format!("align={},offset={}", align.bytes(), offset).into(),
+ ),
}
}
};
size.bytes(),
align.abi.bytes(),
align.pref.bytes(),
- if extra.is_empty() { "" } else { " " },
+ if extra.is_empty() {
+ ""
+ } else {
+ " "
+ },
extra,
));
}
mod pass_mode;
mod returning;
-use rustc_target::spec::abi::Abi;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_target::spec::abi::Abi;
use cranelift_codegen::ir::{AbiParam, ArgumentPurpose};
pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
// Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301
-pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::PolyFnSig<'tcx> {
+pub(crate) fn fn_sig_for_fn_abi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+) -> ty::PolyFnSig<'tcx> {
use rustc_middle::ty::subst::Subst;
// FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
sig.map_bound(|sig| {
- let state_did = tcx.require_lang_item(rustc_hir::LangItem::GeneratorStateLangItem, None);
+ let state_did =
+ tcx.require_lang_item(rustc_hir::LangItem::GeneratorStateLangItem, None);
let state_adt_ref = tcx.adt_def(state_did);
- let state_substs =
- tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
+ let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
tcx.mk_fn_sig(
abi => abi,
};
let (call_conv, inputs, output): (CallConv, Vec<Ty<'tcx>>, Ty<'tcx>) = match abi {
- Abi::Rust => (CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output()),
- Abi::C | Abi::Unadjusted => (CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output()),
+ Abi::Rust => (
+ CallConv::triple_default(triple),
+ sig.inputs().to_vec(),
+ sig.output(),
+ ),
+ Abi::C | Abi::Unadjusted => (
+ CallConv::triple_default(triple),
+ sig.inputs().to_vec(),
+ sig.output(),
+ ),
Abi::SysV64 => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
Abi::RustCall => {
assert_eq!(sig.inputs().len(), 2);
(CallConv::triple_default(triple), inputs, sig.output())
}
Abi::System => unreachable!(),
- Abi::RustIntrinsic => (CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output()),
+ Abi::RustIntrinsic => (
+ CallConv::triple_default(triple),
+ sig.inputs().to_vec(),
+ sig.output(),
+ ),
_ => unimplemented!("unsupported abi {:?}", sig.abi),
};
tcx.layout_of(ParamEnv::reveal_all().and(output)).unwrap(),
) {
PassMode::NoPass => (inputs.collect(), vec![]),
- PassMode::ByVal(ret_ty) => (
- inputs.collect(),
- vec![AbiParam::new(ret_ty)],
- ),
+ PassMode::ByVal(ret_ty) => (inputs.collect(), vec![AbiParam::new(ret_ty)]),
PassMode::ByValPair(ret_ty_a, ret_ty_b) => (
inputs.collect(),
vec![AbiParam::new(ret_ty_a), AbiParam::new(ret_ty_b)],
support_vararg: bool,
) -> (String, Signature) {
assert!(!inst.substs.needs_infer());
- let fn_sig =
- tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_sig_for_fn_abi(tcx, inst));
+ let fn_sig = tcx.normalize_erasing_late_bound_regions(
+ ParamEnv::reveal_all(),
+ &fn_sig_for_fn_abi(tcx, inst),
+ );
if fn_sig.c_variadic && !support_vararg {
- tcx.sess.span_fatal(tcx.def_span(inst.def_id()), "Variadic function definitions are not yet supported");
+ tcx.sess.span_fatal(
+ tcx.def_span(inst.def_id()),
+ "Variadic function definitions are not yet supported",
+ );
}
- let sig = clif_sig_from_fn_sig(tcx, triple, fn_sig, tcx.def_span(inst.def_id()), false, inst.def.requires_caller_location(tcx));
+ let sig = clif_sig_from_fn_sig(
+ tcx,
+ triple,
+ fn_sig,
+ tcx.def_span(inst.def_id()),
+ false,
+ inst.def.requires_caller_location(tcx),
+ );
(tcx.symbol_name(inst).name.to_string(), sig)
}
pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
let func_id = import_function(self.tcx, &mut self.cx.module, inst);
let func_ref = self
- .cx.module
+ .cx
+ .module
.declare_func_in_func(func_id, &mut self.bcx.func);
#[cfg(debug_assertions)]
call_conv: CallConv::triple_default(self.triple()),
};
let func_id = self
- .cx.module
+ .cx
+ .module
.declare_function(&name, Linkage::Import, &sig)
.unwrap();
let func_ref = self
- .cx.module
+ .cx
+ .module
.declare_func_in_func(func_id, &mut self.bcx.func);
let call_inst = self.bcx.ins().call(func_ref, args);
#[cfg(debug_assertions)]
assert!(fx.caller_location.is_none());
if fx.instance.def.requires_caller_location(fx.tcx) {
// Store caller location for `#[track_caller]`.
- fx.caller_location = Some(cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap());
+ fx.caller_location = Some(
+ cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap(),
+ );
}
fx.bcx.switch_to_block(start_block);
fx.bcx.ins().jump(ret_block, &[]);
return;
}
- _ => Some(instance)
+ _ => Some(instance),
}
} else {
None
};
- let is_cold =
- instance.map(|inst|
- fx.tcx.codegen_fn_attrs(inst.def_id())
- .flags.contains(CodegenFnAttrFlags::COLD))
- .unwrap_or(false);
+ let is_cold = instance
+ .map(|inst| {
+ fx.tcx
+ .codegen_fn_attrs(inst.def_id())
+ .flags
+ .contains(CodegenFnAttrFlags::COLD)
+ })
+ .unwrap_or(false);
if is_cold {
fx.cold_blocks.insert(current_block);
}
let pack_arg = trans_operand(fx, &args[1]);
let tupled_arguments = match pack_arg.layout().ty.kind {
- ty::Tuple(ref tupled_arguments) => {
- tupled_arguments
- }
+ ty::Tuple(ref tupled_arguments) => tupled_arguments,
_ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
};
let nop_inst = fx.bcx.ins().nop();
fx.add_comment(nop_inst, "indirect call");
}
- let func = trans_operand(fx, func)
- .load_scalar(fx);
+ let func = trans_operand(fx, func).load_scalar(fx);
(
Some(func),
args.get(0)
)
.collect::<Vec<_>>();
- if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+ if instance
+ .map(|inst| inst.def.requires_caller_location(fx.tcx))
+ .unwrap_or(false)
+ {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
// FIXME find a cleaner way to support varargs
if fn_sig.c_variadic {
if fn_sig.abi != Abi::C {
- fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
+ fx.tcx.sess.span_fatal(
+ span,
+ &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
+ );
}
let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
let abi_params = call_args
let ty = fx.bcx.func.dfg.value_type(arg);
if !ty.is_int() {
// FIXME set %al to upperbound on float args once floats are supported
- fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+ fx.tcx
+ .sess
+ .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
}
AbiParam::new(ty)
})
_ => {
assert!(!matches!(drop_fn.def, InstanceDef::Virtual(_, _)));
- let arg_value = drop_place.place_ref(fx, fx.layout_of(fx.tcx.mk_ref(
- &ty::RegionKind::ReErased,
- TypeAndMut {
- ty,
- mutbl: crate::rustc_hir::Mutability::Mut,
- },
- )));
+ let arg_value = drop_place.place_ref(
+ fx,
+ fx.layout_of(fx.tcx.mk_ref(
+ &ty::RegionKind::ReErased,
+ TypeAndMut {
+ ty,
+ mutbl: crate::rustc_hir::Mutability::Mut,
+ },
+ )),
+ );
let arg_value = adjust_arg_for_abi(fx, arg_value);
let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
} else {
match &layout.abi {
Abi::Uninhabited => PassMode::NoPass,
- Abi::Scalar(scalar) => {
- PassMode::ByVal(scalar_to_clif_type(tcx, scalar.clone()))
- }
+ Abi::Scalar(scalar) => PassMode::ByVal(scalar_to_clif_type(tcx, scalar.clone())),
Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone());
let b = scalar_to_clif_type(tcx, b.clone());
// Returning (i128, i128) by-val-pair would take 4 regs, while only 3 are
// available on x86_64. Cranelift gets confused when too many return params
// are used.
- PassMode::ByRef { size: Some(layout.size) }
+ PassMode::ByRef {
+ size: Some(layout.size),
+ }
} else {
PassMode::ByValPair(a, b)
}
if let Some(vector_ty) = crate::intrinsics::clif_vector_type(tcx, layout) {
PassMode::ByVal(vector_ty)
} else {
- PassMode::ByRef { size: Some(layout.size) }
+ PassMode::ByRef {
+ size: Some(layout.size),
+ }
}
}
- Abi::Aggregate { sized: true } => PassMode::ByRef { size: Some(layout.size) },
+ Abi::Aggregate { sized: true } => PassMode::ByRef {
+ size: Some(layout.size),
+ },
Abi::Aggregate { sized: false } => PassMode::ByRef { size: None },
}
}
let (a, b) = arg.load_scalar_pair(fx);
Pair(a, b)
}
- PassMode::ByRef { size: _ } => {
- match arg.force_stack(fx) {
- (ptr, None) => Single(ptr.get_addr(fx)),
- (ptr, Some(meta)) => Pair(ptr.get_addr(fx), meta),
- }
- }
+ PassMode::ByRef { size: _ } => match arg.force_stack(fx) {
+ (ptr, None) => Single(ptr.get_addr(fx)),
+ (ptr, Some(meta)) => Pair(ptr.get_addr(fx), meta),
+ },
}
}
pub(super) fn cvalue_for_param<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
start_block: Block,
- #[cfg_attr(not(debug_assertions), allow(unused_variables))]
- local: Option<mir::Local>,
- #[cfg_attr(not(debug_assertions), allow(unused_variables))]
- local_field: Option<usize>,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))] local: Option<mir::Local>,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))] local_field: Option<usize>,
arg_ty: Ty<'tcx>,
) -> Option<CValue<'tcx>> {
let layout = fx.layout_of(arg_ty);
let (a, b) = block_params.assert_pair();
Some(CValue::by_val_pair(a, b, layout))
}
- PassMode::ByRef { size: Some(_) } => Some(CValue::by_ref(Pointer::new(block_params.assert_single()), layout)),
+ PassMode::ByRef { size: Some(_) } => Some(CValue::by_ref(
+ Pointer::new(block_params.assert_single()),
+ layout,
+ )),
PassMode::ByRef { size: None } => {
let (ptr, meta) = block_params.assert_pair();
Some(CValue::by_ref_unsized(Pointer::new(ptr), meta, layout))
fx.layout_of(fx.monomorphize(&fx.mir.local_decls[RETURN_PLACE].ty))
}
-pub(crate) fn can_return_to_ssa_var<'tcx>(tcx: TyCtxt<'tcx>, dest_layout: TyAndLayout<'tcx>) -> bool {
+pub(crate) fn can_return_to_ssa_var<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+) -> bool {
match get_pass_mode(tcx, dest_layout) {
PassMode::NoPass | PassMode::ByVal(_) => true,
// FIXME Make it possible to return ByValPair and ByRef to an ssa var.
- PassMode::ByValPair(_, _) | PassMode::ByRef { size: _ } => false
+ PassMode::ByValPair(_, _) | PassMode::ByRef { size: _ } => false,
}
}
}
PassMode::ByRef { size: Some(_) } => {
let ret_param = fx.bcx.append_block_param(start_block, fx.pointer_type);
- fx.local_map
- .insert(RETURN_PLACE, CPlace::for_ptr(Pointer::new(ret_param), ret_layout));
+ fx.local_map.insert(
+ RETURN_PLACE,
+ CPlace::for_ptr(Pointer::new(ret_param), ret_layout),
+ );
Single(ret_param)
}
let output_pass_mode = get_pass_mode(fx.tcx, ret_layout);
let return_ptr = match output_pass_mode {
PassMode::NoPass => None,
- PassMode::ByRef { size: Some(_)} => match ret_place {
+ PassMode::ByRef { size: Some(_) } => match ret_place {
Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
},
bcx.seal_all_blocks();
bcx.finalize();
}
- module.define_function(
- func_id,
- &mut ctx,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- ).unwrap();
+ module
+ .define_function(
+ func_id,
+ &mut ctx,
+ &mut cranelift_codegen::binemit::NullTrapSink {},
+ )
+ .unwrap();
unwind_context.add_function(func_id, &ctx, module.isa());
}
}
use crate::prelude::*;
-use rustc_middle::mir::StatementKind::*;
use rustc_index::vec::IndexVec;
+use rustc_middle::mir::StatementKind::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub(crate) enum SsaKind {
}
pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Backend>) -> IndexVec<Local, SsaKind> {
- let mut flag_map = fx.mir.local_decls.iter().map(|local_decl| {
- let ty = fx.monomorphize(&local_decl.ty);
- if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
- SsaKind::Ssa
- } else {
- SsaKind::NotSsa
- }
- }).collect::<IndexVec<Local, SsaKind>>();
+ let mut flag_map = fx
+ .mir
+ .local_decls
+ .iter()
+ .map(|local_decl| {
+ let ty = fx.monomorphize(&local_decl.ty);
+ if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
+ SsaKind::Ssa
+ } else {
+ SsaKind::NotSsa
+ }
+ })
+ .collect::<IndexVec<Local, SsaKind>>();
for bb in fx.mir.basic_blocks().iter() {
for stmt in bb.statements.iter() {
match &stmt.kind {
Assign(place_and_rval) => match &place_and_rval.1 {
- Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place)=> {
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
not_ssa(&mut flag_map, place.local)
}
_ => {}
match &bb.terminator().kind {
TerminatorKind::Call { destination, .. } => {
if let Some((dest_place, _dest_bb)) = destination {
- let dest_layout = fx.layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.tcx).ty));
+ let dest_layout = fx
+ .layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.tcx).ty));
if !crate::abi::can_return_to_ssa_var(fx.tcx, dest_layout) {
not_ssa(&mut flag_map, dest_place.local)
}
use std::fs::File;
use std::path::{Path, PathBuf};
-use rustc_session::Session;
use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
use rustc_codegen_ssa::METADATA_FILENAME;
+use rustc_session::Session;
use object::{Object, SymbolKind};
entry.read_to_end(&mut data).unwrap();
data
}
- ArchiveEntry::File(file) => {
- std::fs::read(file).unwrap_or_else(|err| {
- sess.fatal(&format!("error while reading object file during archive building: {}", err));
- })
- }
+ ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error while reading object file during archive building: {}",
+ err
+ ));
+ }),
};
if !self.no_builtin_ranlib {
match object::File::parse(&data) {
Ok(object) => {
- symbol_table.insert(entry_name.as_bytes().to_vec(), object.symbols().filter_map(|(_index, symbol)| {
- if symbol.is_undefined() || symbol.is_local() || symbol.kind() != SymbolKind::Data && symbol.kind() != SymbolKind::Text && symbol.kind() != SymbolKind::Tls {
- None
- } else {
- symbol.name().map(|name| name.as_bytes().to_vec())
- }
- }).collect::<Vec<_>>());
+ symbol_table.insert(
+ entry_name.as_bytes().to_vec(),
+ object
+ .symbols()
+ .filter_map(|(_index, symbol)| {
+ if symbol.is_undefined()
+ || symbol.is_local()
+ || symbol.kind() != SymbolKind::Data
+ && symbol.kind() != SymbolKind::Text
+ && symbol.kind() != SymbolKind::Tls
+ {
+ None
+ } else {
+ symbol.name().map(|name| name.as_bytes().to_vec())
+ }
+ })
+ .collect::<Vec<_>>(),
+ );
}
Err(err) => {
let err = err.to_string();
if err == "Unknown file magic" {
// Not an object file; skip it.
} else {
- sess.fatal(&format!("error parsing `{}` during archive creation: {}", entry_name, err));
+ sess.fatal(&format!(
+ "error parsing `{}` during archive creation: {}",
+ entry_name, err
+ ));
}
}
}
}
let mut builder = if self.use_gnu_style_archive {
- BuilderKind::Gnu(ar::GnuBuilder::new(
- File::create(&self.dst).unwrap_or_else(|err| {
- sess.fatal(&format!("error opening destination during archive building: {}", err));
- }),
- entries
- .iter()
- .map(|(name, _)| name.as_bytes().to_vec())
- .collect(),
- ar::GnuSymbolTableFormat::Size32,
- symbol_table,
- ).unwrap())
+ BuilderKind::Gnu(
+ ar::GnuBuilder::new(
+ File::create(&self.dst).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ entries
+ .iter()
+ .map(|(name, _)| name.as_bytes().to_vec())
+ .collect(),
+ ar::GnuSymbolTableFormat::Size32,
+ symbol_table,
+ )
+ .unwrap(),
+ )
} else {
- BuilderKind::Bsd(ar::Builder::new(
- File::create(&self.dst).unwrap_or_else(|err| {
- sess.fatal(&format!("error opening destination during archive building: {}", err));
- }),
- symbol_table,
- ).unwrap())
+ BuilderKind::Bsd(
+ ar::Builder::new(
+ File::create(&self.dst).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ symbol_table,
+ )
+ .unwrap(),
+ )
};
// Add all files
for (entry_name, data) in entries.into_iter() {
let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64);
match builder {
- BuilderKind::Bsd(ref mut builder) => builder
- .append(&header, &mut &*data)
- .unwrap(),
- BuilderKind::Gnu(ref mut builder) => builder
- .append(&header, &mut &*data)
- .unwrap(),
+ BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
}
}
.expect("Couldn't run ranlib");
if !status.success() {
- self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ self.sess
+ .fatal(&format!("Ranlib exited with code {:?}", status.code()));
}
}
}
let mut i = 0;
while let Some(entry) = archive.next_entry() {
let entry = entry?;
- let file_name = String::from_utf8(entry.header().identifier().to_vec()).map_err(|err| {
- std::io::Error::new(std::io::ErrorKind::InvalidData, err)
- })?;
+ let file_name = String::from_utf8(entry.header().identifier().to_vec())
+ .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
if !skip(&file_name) {
self.entries.push((
file_name,
#[cfg(feature = "jit")]
#[no_mangle]
-pub static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER;
+pub static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t =
+ libc::PTHREAD_MUTEX_INITIALIZER;
pub(crate) fn init_global_lock(module: &mut Module<impl Backend>, bcx: &mut FunctionBuilder<'_>) {
if std::env::var("CG_CLIF_JIT").is_ok() {
let mut data_ctx = DataContext::new();
data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms.
- let atomic_mutex = module.declare_data(
- "__cg_clif_global_atomic_mutex",
- Linkage::Export,
- true,
- false,
- Some(16),
- ).unwrap();
+ let atomic_mutex = module
+ .declare_data(
+ "__cg_clif_global_atomic_mutex",
+ Linkage::Export,
+ true,
+ false,
+ Some(16),
+ )
+ .unwrap();
module.define_data(atomic_mutex, &data_ctx).unwrap();
- let pthread_mutex_init = module.declare_function("pthread_mutex_init", Linkage::Import, &cranelift_codegen::ir::Signature {
- call_conv: module.target_config().default_call_conv,
- params: vec![
- AbiParam::new(module.target_config().pointer_type() /* *mut pthread_mutex_t */),
- AbiParam::new(module.target_config().pointer_type() /* *const pthread_mutex_attr_t */),
- ],
- returns: vec![AbiParam::new(types::I32 /* c_int */)],
- }).unwrap();
+ let pthread_mutex_init = module
+ .declare_function(
+ "pthread_mutex_init",
+ Linkage::Import,
+ &cranelift_codegen::ir::Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: vec![
+ AbiParam::new(
+ module.target_config().pointer_type(), /* *mut pthread_mutex_t */
+ ),
+ AbiParam::new(
+ module.target_config().pointer_type(), /* *const pthread_mutex_attr_t */
+ ),
+ ],
+ returns: vec![AbiParam::new(types::I32 /* c_int */)],
+ },
+ )
+ .unwrap();
let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func);
let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func);
- let atomic_mutex = bcx.ins().global_value(module.target_config().pointer_type(), atomic_mutex);
+ let atomic_mutex = bcx
+ .ins()
+ .global_value(module.target_config().pointer_type(), atomic_mutex);
let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0);
pub(crate) fn init_global_lock_constructor(
module: &mut Module<impl Backend>,
- constructor_name: &str
+ constructor_name: &str,
) -> FuncId {
let sig = Signature::new(CallConv::SystemV);
let init_func_id = module
bcx.seal_all_blocks();
bcx.finalize();
}
- module.define_function(
- init_func_id,
- &mut ctx,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- ).unwrap();
+ module
+ .define_function(
+ init_func_id,
+ &mut ctx,
+ &mut cranelift_codegen::binemit::NullTrapSink {},
+ )
+ .unwrap();
init_func_id
}
pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
- let atomic_mutex = fx.cx.module.declare_data(
- "__cg_clif_global_atomic_mutex",
- Linkage::Import,
- true,
- false,
- None,
- ).unwrap();
-
- let pthread_mutex_lock = fx.cx.module.declare_function("pthread_mutex_lock", Linkage::Import, &cranelift_codegen::ir::Signature {
- call_conv: fx.cx.module.target_config().default_call_conv,
- params: vec![
- AbiParam::new(fx.cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
- ],
- returns: vec![AbiParam::new(types::I32 /* c_int */)],
- }).unwrap();
-
- let pthread_mutex_lock = fx.cx.module.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
+ let atomic_mutex = fx
+ .cx
+ .module
+ .declare_data(
+ "__cg_clif_global_atomic_mutex",
+ Linkage::Import,
+ true,
+ false,
+ None,
+ )
+ .unwrap();
+
+ let pthread_mutex_lock = fx
+ .cx
+ .module
+ .declare_function(
+ "pthread_mutex_lock",
+ Linkage::Import,
+ &cranelift_codegen::ir::Signature {
+ call_conv: fx.cx.module.target_config().default_call_conv,
+ params: vec![AbiParam::new(
+ fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
+ )],
+ returns: vec![AbiParam::new(types::I32 /* c_int */)],
+ },
+ )
+ .unwrap();
+
+ let pthread_mutex_lock = fx
+ .cx
+ .module
+ .declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
- let atomic_mutex = fx.bcx.ins().global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
+ let atomic_mutex = fx
+ .bcx
+ .ins()
+ .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
}
pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
- let atomic_mutex = fx.cx.module.declare_data(
- "__cg_clif_global_atomic_mutex",
- Linkage::Import,
- true,
- false,
- None,
- ).unwrap();
-
- let pthread_mutex_unlock = fx.cx.module.declare_function("pthread_mutex_unlock", Linkage::Import, &cranelift_codegen::ir::Signature {
- call_conv: fx.cx.module.target_config().default_call_conv,
- params: vec![
- AbiParam::new(fx.cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
- ],
- returns: vec![AbiParam::new(types::I32 /* c_int */)],
- }).unwrap();
-
- let pthread_mutex_unlock = fx.cx.module.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
+ let atomic_mutex = fx
+ .cx
+ .module
+ .declare_data(
+ "__cg_clif_global_atomic_mutex",
+ Linkage::Import,
+ true,
+ false,
+ None,
+ )
+ .unwrap();
+
+ let pthread_mutex_unlock = fx
+ .cx
+ .module
+ .declare_function(
+ "pthread_mutex_unlock",
+ Linkage::Import,
+ &cranelift_codegen::ir::Signature {
+ call_conv: fx.cx.module.target_config().default_call_conv,
+ params: vec![AbiParam::new(
+ fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
+ )],
+ returns: vec![AbiParam::new(types::I32 /* c_int */)],
+ },
+ )
+ .unwrap();
+
+ let pthread_mutex_unlock = fx
+ .cx
+ .module
+ .declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
- let atomic_mutex = fx.bcx.ins().global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
+ let atomic_mutex = fx
+ .bcx
+ .ins()
+ .global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
}
use cranelift_module::{FuncId, Module};
-use object::{SectionKind, SymbolFlags, RelocationKind, RelocationEncoding};
use object::write::*;
+use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
use cranelift_object::{ObjectBackend, ObjectBuilder, ObjectProduct};
impl WriteMetadata for object::write::Object {
fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) {
- let segment = self.segment_name(object::write::StandardSegment::Data).to_vec();
+ let segment = self
+ .segment_name(object::write::StandardSegment::Data)
+ .to_vec();
let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
let offset = self.append_section_data(section_id, &data, 1);
// For MachO and probably PE this is necessary to prevent the linker from throwing away the
id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
} else {
id.name().to_string()
- }.into_bytes();
+ }
+ .into_bytes();
let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
let section_id = self.object.add_section(segment, name, SectionKind::Debug);
reloc: &DebugReloc,
) {
let (symbol, symbol_offset) = match reloc.name {
- DebugRelocName::Section(id) => {
- (section_map.get(&id).unwrap().1, 0)
- }
+ DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
DebugRelocName::Symbol(id) => {
let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
- self.object.symbol_section_and_offset(symbol_id).expect("Debug reloc for undef sym???")
+ self.object
+ .symbol_section_and_offset(symbol_id)
+ .expect("Debug reloc for undef sym???")
}
};
- self.object.add_relocation(from.0, Relocation {
- offset: u64::from(reloc.offset),
- symbol,
- kind: RelocationKind::Absolute,
- encoding: RelocationEncoding::Generic,
- size: reloc.size * 8,
- addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
- }).unwrap();
+ self.object
+ .add_relocation(
+ from.0,
+ Relocation {
+ offset: u64::from(reloc.offset),
+ symbol,
+ kind: RelocationKind::Absolute,
+ encoding: RelocationEncoding::Generic,
+ size: reloc.size * 8,
+ addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
+ },
+ )
+ .unwrap();
}
}
impl AddConstructor for ObjectProduct {
fn add_constructor(&mut self, func_id: FuncId) {
let symbol = self.function_symbol(func_id);
- let segment = self.object.segment_name(object::write::StandardSegment::Data);
- let init_array_section = self.object.add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
+ let segment = self
+ .object
+ .segment_name(object::write::StandardSegment::Data);
+ let init_array_section =
+ self.object
+ .add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
self.object.append_section_data(
init_array_section,
- &std::iter::repeat(0).take(8 /*FIXME pointer size*/).collect::<Vec<u8>>(),
+ &std::iter::repeat(0)
+ .take(8 /*FIXME pointer size*/)
+ .collect::<Vec<u8>>(),
8,
);
- self.object.add_relocation(init_array_section, object::write::Relocation {
- offset: 0,
- size: 64, // FIXME pointer size
- kind: RelocationKind::Absolute,
- encoding: RelocationEncoding::Generic,
- symbol,
- addend: 0,
- }).unwrap();
+ self.object
+ .add_relocation(
+ init_array_section,
+ object::write::Relocation {
+ offset: 0,
+ size: 64, // FIXME pointer size
+ kind: RelocationKind::Absolute,
+ encoding: RelocationEncoding::Generic,
+ symbol,
+ addend: 0,
+ },
+ )
+ .unwrap();
}
}
architecture => sess.fatal(&format!(
"target architecture {:?} is unsupported",
architecture,
- ))
+ )),
};
let endian = match triple.endianness().unwrap() {
target_lexicon::Endianness::Little => object::Endianness::Little,
metadata_object.write().unwrap()
}
-pub(crate) type Backend = impl cranelift_module::Backend<Product: AddConstructor + Emit + WriteDebugInfo>;
+pub(crate) type Backend =
+ impl cranelift_module::Backend<Product: AddConstructor + Emit + WriteDebugInfo>;
pub(crate) fn make_module(sess: &Session, name: String) -> Module<Backend> {
let module: Module<ObjectBackend> = Module::new(
crate::build_isa(sess, true),
name + ".o",
cranelift_module::default_libcall_names(),
- ).unwrap(),
+ )
+ .unwrap(),
);
module
}
-use rustc_middle::ty::adjustment::PointerCast;
use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
use crate::prelude::*;
// Predefine blocks
let start_block = bcx.create_block();
- let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
+ let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len())
+ .map(|_| bcx.create_block())
+ .collect();
// Make FunctionCx
let pointer_type = cx.module.target_config().pointer_type();
inline_asm_index: 0,
};
- let arg_uninhabited = fx.mir.args_iter().any(|arg| fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+ let arg_uninhabited = fx.mir.args_iter().any(|arg| {
+ fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty))
+ .abi
+ .is_uninhabited()
+ });
if arg_uninhabited {
- fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx
+ .append_block_params_for_function_params(fx.block_map[START_BLOCK]);
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
} else {
tcx.sess.time("codegen clif ir", || {
- tcx.sess.time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
+ tcx.sess.time("codegen prelude", || {
+ crate::abi::codegen_fn_prelude(&mut fx, start_block)
+ });
codegen_fn_content(&mut fx);
});
}
let context = &mut cx.cached_context;
context.func = func;
- crate::pretty_clif::write_clif_file(
- tcx,
- "unopt",
- None,
- instance,
- &context,
- &clif_comments,
- );
+ crate::pretty_clif::write_clif_file(tcx, "unopt", None, instance, &context, &clif_comments);
// Verify function
verify_func(tcx, &clif_comments, &context.func);
// Perform rust specific optimizations
tcx.sess.time("optimize clif ir", || {
- crate::optimize::optimize_function(tcx, instance, context, &cold_blocks, &mut clif_comments);
+ crate::optimize::optimize_function(
+ tcx,
+ instance,
+ context,
+ &cold_blocks,
+ &mut clif_comments,
+ );
});
// If the return block is not reachable, then the SSA builder may have inserted a `iconst.i128`
// Define function
let module = &mut cx.module;
- tcx.sess.time(
- "define function",
- || module.define_function(
- func_id,
- context,
- &mut cranelift_codegen::binemit::NullTrapSink {},
- ).unwrap(),
- );
+ tcx.sess.time("define function", || {
+ module
+ .define_function(
+ func_id,
+ context,
+ &mut cranelift_codegen::binemit::NullTrapSink {},
+ )
+ .unwrap()
+ });
// Write optimized function to file for debugging
crate::pretty_clif::write_clif_file(
let unwind_context = &mut cx.unwind_context;
tcx.sess.time("generate debug info", || {
if let Some(debug_context) = debug_context {
- debug_context.define_function(instance, func_id, &name, isa, context, &source_info_set, local_map);
+ debug_context.define_function(
+ instance,
+ func_id,
+ &name,
+ isa,
+ context,
+ &source_info_set,
+ local_map,
+ );
}
unwind_context.add_function(func_id, &context, isa);
});
context.clear();
}
-pub(crate) fn verify_func(tcx: TyCtxt<'_>, writer: &crate::pretty_clif::CommentWriter, func: &Function) {
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
tcx.sess.time("verify clif ir", || {
let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
match cranelift_codegen::verify_function(&func, &flags) {
fx.bcx.switch_to_block(failure);
- let location = fx.get_caller_location(bb_data.terminator().source_info.span).load_scalar(fx);
+ let location = fx
+ .get_caller_location(bb_data.terminator().source_info.span)
+ .load_scalar(fx);
let args;
let lang_item = match msg {
_ => {
let msg_str = msg.description();
let msg_ptr = fx.anonymous_str("assert", msg_str);
- let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let msg_len = fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
args = [msg_ptr, msg_len, location];
rustc_hir::lang_items::PanicFnLangItem
}
};
let def_id = fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| {
- fx.tcx.sess.span_fatal(bb_data.terminator().source_info.span, &s)
+ fx.tcx
+ .sess
+ .span_fatal(bb_data.terminator().source_info.span, &s)
});
let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
let symbol_name = fx.tcx.symbol_name(instance).name;
- fx.lib_call(&*symbol_name, vec![fx.pointer_type, fx.pointer_type, fx.pointer_type], vec![], &args);
+ fx.lib_call(
+ &*symbol_name,
+ vec![fx.pointer_type, fx.pointer_type, fx.pointer_type],
+ vec![],
+ &args,
+ );
crate::trap::trap_unreachable(fx, "panic lang item returned");
}
cleanup: _,
from_hir_call: _,
} => {
- fx.tcx.sess.time("codegen call", || crate::abi::codegen_terminator_call(
- fx,
- *fn_span,
- block,
- func,
- args,
- *destination,
- ));
+ fx.tcx.sess.time("codegen call", || {
+ crate::abi::codegen_terminator_call(
+ fx,
+ *fn_span,
+ block,
+ func,
+ args,
+ *destination,
+ )
+ });
}
TerminatorKind::InlineAsm {
template,
fx.bcx.ins().jump(destination_block, &[]);
}
None => {
- crate::trap::trap_unreachable(fx, "[corruption] Returned from noreturn inline asm");
+ crate::trap::trap_unreachable(
+ fx,
+ "[corruption] Returned from noreturn inline asm",
+ );
}
}
}
fn trans_stmt<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
- #[allow(unused_variables)]
- cur_block: Block,
+ #[allow(unused_variables)] cur_block: Block,
stmt: &Statement<'tcx>,
) {
let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
let layout = operand.layout();
let val = operand.load_scalar(fx);
let res = match un_op {
- UnOp::Not => {
- match layout.ty.kind {
- ty::Bool => {
- let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
- CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
- }
- ty::Uint(_) | ty::Int(_) => {
- CValue::by_val(fx.bcx.ins().bnot(val), layout)
- }
- _ => unreachable!("un op Not for {:?}", layout.ty),
+ UnOp::Not => match layout.ty.kind {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
}
- }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
UnOp::Neg => match layout.ty.kind {
ty::Int(IntTy::I128) => {
// FIXME remove this case once ineg.i128 works
let zero = CValue::const_val(fx, layout, 0);
crate::num::trans_int_binop(fx, BinOp::Sub, zero, operand)
}
- ty::Int(_) => {
- CValue::by_val(fx.bcx.ins().ineg(val), layout)
- }
- ty::Float(_) => {
- CValue::by_val(fx.bcx.ins().fneg(val), layout)
- }
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
_ => unreachable!("un op Neg for {:?}", layout.ty),
},
};
match from_ty.kind {
ty::FnDef(def_id, substs) => {
let func_ref = fx.get_function_ref(
- Instance::resolve_for_fn_ptr(fx.tcx, ParamEnv::reveal_all(), def_id, substs)
- .unwrap()
- .polymorphize(fx.tcx),
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
|ty::TypeAndMut {
ty: pointee_ty,
mutbl: _,
- }| has_ptr_meta(fx.tcx, pointee_ty),
+ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ },
)
.unwrap_or(false)
}
_ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
}
- use rustc_target::abi::{TagEncoding, Int, Variants};
+ use rustc_target::abi::{Int, TagEncoding, Variants};
match &operand.layout().variants {
Variants::Single { index } => {
- let discr = operand.layout().ty.discriminant_for_variant(fx.tcx, *index).unwrap();
+ let discr = operand
+ .layout()
+ .ty
+ .discriminant_for_variant(fx.tcx, *index)
+ .unwrap();
let discr = if discr.ty.is_signed() {
- rustc_middle::mir::interpret::sign_extend(discr.val, fx.layout_of(discr.ty).size)
+ rustc_middle::mir::interpret::sign_extend(
+ discr.val,
+ fx.layout_of(discr.ty).size,
+ )
} else {
discr.val
};
let cast_to = fx.clif_type(dest_layout.ty).unwrap();
// Read the tag/niche-encoded discriminant from memory.
- let encoded_discr = operand.value_field(fx, mir::Field::new(*tag_field));
+ let encoded_discr =
+ operand.value_field(fx, mir::Field::new(*tag_field));
let encoded_discr = encoded_discr.load_scalar(fx);
// Decode the discriminant (specifically if it's niche-encoded).
let val = CValue::by_val(val, dest_layout);
lval.write_cvalue(fx, val);
}
- Variants::Multiple { ..} => unreachable!(),
+ Variants::Multiple { .. } => unreachable!(),
}
} else {
let to_clif_ty = fx.clif_type(to_ty).unwrap();
lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
}
}
- Rvalue::Cast(CastKind::Pointer(PointerCast::ClosureFnPointer(_)), operand, _to_ty) => {
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ operand,
+ _to_ty,
+ ) => {
let operand = trans_operand(fx, operand);
match operand.layout().ty.kind {
ty::Closure(def_id, substs) => {
def_id,
substs,
ty::ClosureKind::FnOnce,
- ).polymorphize(fx.tcx);
+ )
+ .polymorphize(fx.tcx);
let func_ref = fx.get_function_ref(instance);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
.ty
.is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
- let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
+ let val =
+ CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
lval.write_cvalue(fx, val);
}
Rvalue::Aggregate(kind, operands) => match **kind {
inputs,
} = &**asm;
let rustc_hir::LlvmInlineAsmInner {
- asm: asm_code, // Name
+ asm: asm_code, // Name
outputs: output_names, // Vec<LlvmInlineAsmOutput>
inputs: input_names, // Vec<Name>
- clobbers, // Vec<Name>
- volatile, // bool
- alignstack, // bool
+ clobbers, // Vec<Name>
+ volatile, // bool
+ alignstack, // bool
dialect: _,
asm_str_style: _,
} = asm;
// Black box
}
"mov %rbx, %rsi\n cpuid\n xchg %rbx, %rsi" => {
- assert_eq!(input_names, &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]);
+ assert_eq!(
+ input_names,
+ &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]
+ );
assert_eq!(output_names.len(), 4);
- for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"]).iter().enumerate() {
+ for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"])
+ .iter()
+ .enumerate()
+ {
assert_eq!(&output_names[i].constraint.as_str(), c);
assert!(!output_names[i].is_rw);
assert!(!output_names[i].is_indirect);
let leaf = trans_operand(fx, &inputs[0].1).load_scalar(fx); // %eax
let subleaf = trans_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx
- let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
+ let (eax, ebx, ecx, edx) =
+ crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
assert_eq!(outputs.len(), 4);
- trans_place(fx, outputs[0]).write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
- trans_place(fx, outputs[1]).write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
- trans_place(fx, outputs[2]).write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
- trans_place(fx, outputs[3]).write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ trans_place(fx, outputs[0])
+ .write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+ trans_place(fx, outputs[1])
+ .write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+ trans_place(fx, outputs[2])
+ .write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+ trans_place(fx, outputs[3])
+ .write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
}
"xgetbv" => {
assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
}
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
- _ if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => {
+ _ if fx
+ .tcx
+ .symbol_name(fx.instance)
+ .name
+ .starts_with("___chkstk") =>
+ {
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
}
_ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => {
"int $$0x29" => {
crate::trap::trap_unimplemented(fx, "Windows abort");
}
- _ => fx.tcx.sess.span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
+ _ => fx
+ .tcx
+ .sess
+ .span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
}
}
StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
) -> Value {
match place.layout().ty.kind {
ty::Array(_elem_ty, len) => {
- let len = fx.monomorphize(&len)
+ let len = fx
+ .monomorphize(&len)
.eval(fx.tcx, ParamEnv::reveal_all())
.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
fx.bcx.ins().iconst(fx.pointer_type, len)
let len = len.unwrap();
cplace = CPlace::for_ptr_with_extra(
ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * i64::from(from)),
- fx.bcx.ins().iadd_imm(len, -(i64::from(from) + i64::from(to))),
+ fx.bcx
+ .ins()
+ .iadd_imm(len, -(i64::from(from) + i64::from(to))),
cplace.layout(),
);
}
// Optimize `val >> 64`, because compiler_builtins uses it to deconstruct an 128bit
// integer into its lsb and msb.
// https://github.com/rust-lang-nursery/compiler-builtins/blob/79a6a1603d5672cbb9187ff41ff4d9b5048ac1cb/src/int/mod.rs#L217
- if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) {
+ if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) {
let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs_val);
let all_zeros = fx.bcx.ins().iconst(types::I64, 0);
let val = match (bin_op, is_signed) {
+use rustc_index::vec::IndexVec;
use rustc_target::abi::{Integer, Primitive};
use rustc_target::spec::{HasTargetSpec, Target};
-use rustc_index::vec::IndexVec;
use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
FloatTy::F64 => types::F64,
},
ty::FnPtr(_) => pointer_ty(tcx),
- ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ ty::RawPtr(TypeAndMut {
+ ty: pointee_ty,
+ mutbl: _,
+ })
+ | ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, pointee_ty) {
return None;
} else {
}
}
ty::Adt(adt_def, _) if adt_def.repr.simd() => {
- let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi {
+ let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
+ {
Abi::Vector { element, count } => (element.clone(), *count),
_ => unreachable!(),
};
})
}
-fn clif_pair_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<(types::Type, types::Type)> {
+fn clif_pair_type_from_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<(types::Type, types::Type)> {
Some(match ty.kind {
ty::Tuple(substs) if substs.len() == 2 => {
let mut types = substs.types();
}
(a, b)
}
- ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ ty::RawPtr(TypeAndMut {
+ ty: pointee_ty,
+ mutbl: _,
+ })
+ | ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, pointee_ty) {
(pointer_ty(tcx), pointer_ty(tcx))
} else {
- return None
+ return None;
}
}
_ => return None,
/// Is a pointer to this type a fat ptr?
pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
- let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
- match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
+ let ptr_ty = tcx.mk_ptr(TypeAndMut {
+ ty,
+ mutbl: rustc_hir::Mutability::Not,
+ });
+ match &tcx
+ .layout_of(ParamEnv::reveal_all().and(ptr_ty))
+ .unwrap()
+ .abi
+ {
Abi::Scalar(_) => false,
Abi::ScalarPair(_, _) => true,
abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
}
}
-pub(crate) fn type_min_max_value(bcx: &mut FunctionBuilder<'_>, ty: Type, signed: bool) -> (Value, Value) {
+pub(crate) fn type_min_max_value(
+ bcx: &mut FunctionBuilder<'_>,
+ ty: Type,
+ signed: bool,
+) -> (Value, Value) {
assert!(ty.is_int());
if ty == types::I128 {
T: TypeFoldable<'tcx> + Copy,
{
if let Some(substs) = self.instance.substs_for_mir_body() {
- self.tcx.subst_and_normalize_erasing_regions(
- substs,
- ty::ParamEnv::reveal_all(),
- value,
- )
+ self.tcx
+ .subst_and_normalize_erasing_regions(substs, ty::ParamEnv::reveal_all(), value)
} else {
- self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
+ self.tcx
+ .normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
}
}
caller.line as u32,
caller.col_display as u32 + 1,
));
- crate::constant::trans_const_value(
- self,
- const_loc,
- self.tcx.caller_location_ty(),
- )
+ crate::constant::trans_const_value(self, const_loc, self.tcx.caller_location_ty())
}
pub(crate) fn triple(&self) -> &target_lexicon::Triple {
let mut data_ctx = DataContext::new();
data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
let msg_id = self
- .cx.module
+ .cx
+ .module
.declare_data(
&format!("__{}_{:08x}", prefix, msg_hash),
Linkage::Local,
use rustc_span::DUMMY_SP;
+use rustc_data_structures::fx::FxHashSet;
use rustc_errors::ErrorReported;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{
};
use rustc_middle::ty::{Const, ConstKind};
use rustc_target::abi::Align;
-use rustc_data_structures::fx::FxHashSet;
use cranelift_codegen::ir::GlobalValueData;
use cranelift_module::*;
match const_.val {
ConstKind::Value(_) => {}
ConstKind::Unevaluated(def, ref substs, promoted) => {
- if let Err(err) = fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
+ if let Err(err) =
+ fx.tcx
+ .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+ {
match err {
ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
- fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ fx.tcx
+ .sess
+ .span_err(constant.span, "erroneous constant encountered");
}
ErrorHandled::TooGeneric => {
- span_bug!(constant.span, "codgen encountered polymorphic constant: {:?}", err);
+ span_bug!(
+ constant.span,
+ "codgen encountered polymorphic constant: {:?}",
+ err
+ );
}
}
}
}
- ConstKind::Param(_) | ConstKind::Infer(_) | ConstKind::Bound(_, _)
- | ConstKind::Placeholder(_) | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
}
}
}
fx.add_comment(local_data_id, format!("{:?}", def_id));
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
assert!(!layout.is_unsized(), "unsized statics aren't supported");
- assert!(matches!(fx.bcx.func.global_values[local_data_id], GlobalValueData::Symbol { tls: false, ..}), "tls static referenced without Rvalue::ThreadLocalRef");
+ assert!(
+ matches!(fx.bcx.func.global_values[local_data_id], GlobalValueData::Symbol { tls: false, ..}),
+ "tls static referenced without Rvalue::ThreadLocalRef"
+ );
CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
}
fx,
def.did,
fx.layout_of(fx.monomorphize(&constant.literal.ty)),
- ).to_cvalue(fx);
+ )
+ .to_cvalue(fx);
}
ConstKind::Unevaluated(def, ref substs, promoted) => {
- match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
+ match fx
+ .tcx
+ .const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
+ {
Ok(const_val) => const_val,
Err(_) => {
if promoted.is_none() {
- fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ fx.tcx
+ .sess
+ .span_err(constant.span, "erroneous constant encountered");
}
return crate::trap::trap_unreachable_ret_value(
fx,
}
}
}
- ConstKind::Param(_) | ConstKind::Infer(_) | ConstKind::Bound(_, _)
- | ConstKind::Placeholder(_) | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
};
trans_const_value(fx, const_val, const_.ty)
pub(crate) fn trans_const_value<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
const_val: ConstValue<'tcx>,
- ty: Ty<'tcx>
+ ty: Ty<'tcx>,
) -> CValue<'tcx> {
let layout = fx.layout_of(ty);
assert!(!layout.is_unsized(), "sized const value");
if fx.clif_type(layout.ty).is_none() {
let (size, align) = (layout.size, layout.align.pref);
let mut alloc = Allocation::from_bytes(
- std::iter::repeat(0).take(size.bytes_usize()).collect::<Vec<u8>>(),
+ std::iter::repeat(0)
+ .take(size.bytes_usize())
+ .collect::<Vec<u8>>(),
align,
);
let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
let base_addr = match alloc_kind {
Some(GlobalAlloc::Memory(alloc)) => {
fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
- let data_id = data_id_for_alloc_id(&mut fx.cx.module, ptr.alloc_id, alloc.align, alloc.mutability);
- let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ let data_id = data_id_for_alloc_id(
+ &mut fx.cx.module,
+ ptr.alloc_id,
+ alloc.align,
+ alloc.mutability,
+ );
+ let local_data_id =
+ fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
Some(GlobalAlloc::Function(instance)) => {
- let func_id = crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
- let local_func_id = fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+ let func_id =
+ crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
+ let local_func_id =
+ fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
}
Some(GlobalAlloc::Static(def_id)) => {
assert!(fx.tcx.is_static(def_id));
- let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
- let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ let data_id =
+ data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
+ let local_data_id =
+ fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", def_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
- let val = fx.bcx.ins().iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap());
+ let val = fx
+ .bcx
+ .ins()
+ .iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap());
return CValue::by_val(val, layout);
}
}
}
- ConstValue::ByRef { alloc, offset } => {
- CValue::by_ref(
- pointer_for_allocation(fx, alloc)
- .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
- layout,
- )
- }
+ ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+ pointer_for_allocation(fx, alloc)
+ .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+ layout,
+ ),
ConstValue::Slice { data, start, end } => {
let ptr = pointer_for_allocation(fx, data)
.offset_i64(fx, i64::try_from(start).unwrap())
.get_addr(fx);
- let len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+ let len = fx.bcx.ins().iconst(
+ fx.pointer_type,
+ i64::try_from(end.checked_sub(start).unwrap()).unwrap(),
+ );
CValue::by_val_pair(ptr, len, layout)
}
}
crate::linkage::get_static_linkage(tcx, def_id)
} else {
if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
- || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny) {
+ || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+ {
Linkage::Preemptible
} else {
Linkage::Import
.unwrap();
let mut data_ctx = DataContext::new();
let data = module.declare_data_in_data(data_id, &mut data_ctx);
- data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
+ data_ctx.define(
+ std::iter::repeat(0)
+ .take(pointer_ty(tcx).bytes() as usize)
+ .collect(),
+ );
data_ctx.write_data_addr(0, data, 0);
match module.define_data(ref_data_id, &data_ctx) {
// Every time the static is referenced there will be another definition of this global,
TodoItem::Static(def_id) => {
//println!("static {:?}", def_id);
- let section_name = tcx.codegen_fn_attrs(def_id).link_section.map(|s| s.as_str());
+ let section_name = tcx
+ .codegen_fn_attrs(def_id)
+ .link_section
+ .map(|s| s.as_str());
let const_ = tcx.const_eval_poly(def_id).unwrap();
data_ctx.set_segment_section("", &*section_name);
}
- let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
+ let bytes = alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
+ .to_vec();
data_ctx.define(bytes.into_boxed_slice());
for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
let endianness = tcx.data_layout.endian;
let offset = offset.bytes() as usize;
let ptr_size = tcx.data_layout.pointer_size;
- let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..offset + ptr_size.bytes() as usize);
+ let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+ offset..offset + ptr_size.bytes() as usize,
+ );
read_target_uint(endianness, bytes).unwrap()
};
data_id_for_alloc_id(module, reloc, target_alloc.align, target_alloc.mutability)
}
GlobalAlloc::Static(def_id) => {
- if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
- tcx.sess.fatal(&format!("Allocation {:?} contains reference to TLS value {:?}", alloc, def_id));
+ if tcx
+ .codegen_fn_attrs(def_id)
+ .flags
+ .contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ {
+ tcx.sess.fatal(&format!(
+ "Allocation {:?} contains reference to TLS value {:?}",
+ alloc, def_id
+ ));
}
// Don't push a `TodoItem::Static` here, as it will cause statics used by
) -> Option<&'tcx Const<'tcx>> {
match operand {
Operand::Copy(_) | Operand::Move(_) => None,
- Operand::Constant(const_) => {
- Some(fx.monomorphize(&const_.literal).eval(fx.tcx, ParamEnv::reveal_all()))
- }
+ Operand::Constant(const_) => Some(
+ fx.monomorphize(&const_.literal)
+ .eval(fx.tcx, ParamEnv::reveal_all()),
+ ),
}
}
cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
);
let val = (addr as u64 as i64 + reloc.addend) as u64;
- self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap();
+ self.writer
+ .write_udata_at(reloc.offset as usize, val, reloc.size)
+ .unwrap();
}
}
}
use crate::prelude::*;
-use rustc_span::{FileName, SourceFile, SourceFileAndLine, Pos, SourceFileHash, SourceFileHashAlgorithm};
+use rustc_span::{
+ FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
+};
use cranelift_codegen::binemit::CodeOffset;
use cranelift_codegen::machinst::MachSrcLoc;
use gimli::write::{
- Address, AttributeValue, FileId, LineProgram, LineString, FileInfo, LineStringTable, UnitEntryId,
+ Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
+ UnitEntryId,
};
// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
let file_name = match iter.next_back() {
Some(Component::Normal(p)) => p,
component => {
- panic!("Path component {:?} of path {} is an invalid filename", component, path.display());
+ panic!(
+ "Path component {:?} of path {} is an invalid filename",
+ component,
+ path.display()
+ );
}
};
let parent = iter.as_path();
// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
- #[cfg(unix)] {
+ #[cfg(unix)]
+ {
use std::os::unix::ffi::OsStrExt;
return path.as_bytes();
}
- #[cfg(not(unix))] {
+ #[cfg(not(unix))]
+ {
return path.to_str().unwrap().as_bytes();
}
}
} else {
line_program.default_directory()
};
- let file_name = LineString::new(
- file_name,
- line_program.encoding(),
- line_strings,
- );
+ let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
let info = make_file_info(file.src_hash);
// In order to have a good line stepping behavior in debugger, we overwrite debug
// locations of macro expansions with that of the outermost expansion site
// (unless the crate is being compiled with `-Z debug-macros`).
- let span = if !span.from_expansion() ||
- tcx.sess.opts.debugging_opts.debug_macros {
+ let span = if !span.from_expansion() || tcx.sess.opts.debugging_opts.debug_macros {
span
} else {
// Walk up the macro expansion chain until we reach a non-expanded span.
Ok(SourceFileAndLine { sf: file, line }) => {
let line_pos = file.line_begin_pos(span.lo());
- (file, u64::try_from(line).unwrap() + 1, u64::from((span.lo() - line_pos).to_u32()) + 1)
+ (
+ file,
+ u64::try_from(line).unwrap() + 1,
+ u64::from((span.lo() - line_pos).to_u32()) + 1,
+ )
}
- Err(file) => (file, 0, 0)
+ Err(file) => (file, 0, 0),
};
// line_program_add_file is very slow.
line_program.generate_row();
};
- line_program.begin_sequence(Some(Address::Symbol {
- symbol,
- addend: 0,
- }));
+ line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
let mut func_end = 0;
let srcloc = func.srclocs[inst];
line_program.row().address_offset = u64::from(offset);
if !srcloc.is_default() {
- let source_info = *source_info_set.get_index(srcloc.bits() as usize).unwrap();
+ let source_info =
+ *source_info_set.get_index(srcloc.bits() as usize).unwrap();
create_row_for_span(line_program, source_info.span);
} else {
create_row_for_span(line_program, function_span);
let entry = self.dwarf.unit.get_mut(entry_id);
entry.set(
gimli::DW_AT_low_pc,
- AttributeValue::Address(Address::Symbol {
- symbol,
- addend: 0,
- }),
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
+ entry.set(
+ gimli::DW_AT_high_pc,
+ AttributeValue::Udata(u64::from(func_end)),
);
- entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
self.emit_location(entry_id, function_span);
use cranelift_codegen::ValueLocRange;
use gimli::write::{
- Address, AttributeValue, DwarfUnit, Expression, LineProgram,
- LineString, Location, LocationList, Range, RangeList, UnitEntryId,
+ Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
+ LocationList, Range, RangeList, UnitEntryId,
};
use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
Some(path) => {
let name = path.to_string_lossy().into_owned();
(name, None)
- },
+ }
None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
};
};
let type_entry = self.dwarf.unit.get_mut(type_id);
- type_entry.set(gimli::DW_AT_name, AttributeValue::String(format!("{}", ty).replace('i', "u").into_bytes()));
+ type_entry.set(
+ gimli::DW_AT_name,
+ AttributeValue::String(format!("{}", ty).replace('i', "u").into_bytes()),
+ );
type_entry.set(
gimli::DW_AT_byte_size,
AttributeValue::Udata(u64::from(ty.bytes())),
for (field_idx, field_def) in variant.fields.iter().enumerate() {
let field_offset = layout.fields.offset(field_idx);
- let field_layout = layout.field(&layout::LayoutCx {
- tcx: self.tcx,
- param_env: ParamEnv::reveal_all(),
- }, field_idx).unwrap();
+ let field_layout = layout
+ .field(
+ &layout::LayoutCx {
+ tcx: self.tcx,
+ param_env: ParamEnv::reveal_all(),
+ },
+ field_idx,
+ )
+ .unwrap();
let field_type = self.dwarf_ty(field_layout.ty);
let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
let field_entry = self.dwarf.unit.get_mut(field_id);
- field_entry.set(gimli::DW_AT_name, AttributeValue::String(field_def.ident.as_str().to_string().into_bytes()));
- field_entry.set(gimli::DW_AT_data_member_location, AttributeValue::Udata(field_offset.bytes()));
+ field_entry.set(
+ gimli::DW_AT_name,
+ AttributeValue::String(field_def.ident.as_str().to_string().into_bytes()),
+ );
+ field_entry.set(
+ gimli::DW_AT_data_member_location,
+ AttributeValue::Udata(field_offset.bytes()),
+ );
field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
}
fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
let dw_ty = self.dwarf_ty(ty);
- let var_id = self
- .dwarf
- .unit
- .add(scope, gimli::DW_TAG_variable);
+ let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
let var_entry = self.dwarf.unit.get_mut(var_id);
var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
// FIXME: add to appropriate scope instead of root
let scope = self.dwarf.unit.root();
- let entry_id = self
- .dwarf
- .unit
- .add(scope, gimli::DW_TAG_subprogram);
+ let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
let entry = self.dwarf.unit.get_mut(entry_id);
let name_id = self.dwarf.strings.add(name);
// Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
- entry.set(
- gimli::DW_AT_name,
- AttributeValue::StringRef(name_id),
- );
+ entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
entry.set(
gimli::DW_AT_linkage_name,
AttributeValue::StringRef(name_id),
);
- let end = self.create_debug_lines(isa, symbol, entry_id, context, mir.span, source_info_set);
+ let end =
+ self.create_debug_lines(isa, symbol, entry_id, context, mir.span, source_info_set);
- self
- .unit_range_list
- .0
- .push(Range::StartLength {
- begin: Address::Symbol {
- symbol,
- addend: 0,
- },
- length: u64::from(end),
- });
+ self.unit_range_list.0.push(Range::StartLength {
+ begin: Address::Symbol { symbol, addend: 0 },
+ length: u64::from(end),
+ });
if isa.get_mach_backend().is_some() {
return; // Not yet implemented for the AArch64 backend.
let func_entry = self.dwarf.unit.get_mut(entry_id);
// Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
- func_entry.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Symbol {
- symbol,
- addend: 0,
- }));
+ func_entry.set(
+ gimli::DW_AT_low_pc,
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
// Using Udata for DW_AT_high_pc requires at least DWARF4
func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
// FIXME Remove once actual debuginfo for locals works.
- for (i, (param, &val)) in context.func.signature.params.iter().zip(context.func.dfg.block_params(context.func.layout.entry_block().unwrap())).enumerate() {
+ for (i, (param, &val)) in context
+ .func
+ .signature
+ .params
+ .iter()
+ .zip(
+ context
+ .func
+ .dfg
+ .block_params(context.func.layout.entry_block().unwrap()),
+ )
+ .enumerate()
+ {
use cranelift_codegen::ir::ArgumentPurpose;
let base_name = match param.purpose {
ArgumentPurpose::Normal => "arg",
ArgumentPurpose::StructArgument(_) => "struct_arg",
ArgumentPurpose::StructReturn => "sret",
- ArgumentPurpose::Link | ArgumentPurpose::FramePointer | ArgumentPurpose::CalleeSaved => continue,
- ArgumentPurpose::VMContext | ArgumentPurpose::SignatureId | ArgumentPurpose::StackLimit => unreachable!(),
+ ArgumentPurpose::Link
+ | ArgumentPurpose::FramePointer
+ | ArgumentPurpose::CalleeSaved => continue,
+ ArgumentPurpose::VMContext
+ | ArgumentPurpose::SignatureId
+ | ArgumentPurpose::StackLimit => unreachable!(),
};
let name = format!("{}{}", base_name, i);
let dw_ty = self.dwarf_ty_for_clif_ty(param.value_type);
- let loc = translate_loc(isa, context.func.locations[val], &context.func.stack_slots).unwrap();
+ let loc =
+ translate_loc(isa, context.func.locations[val], &context.func.stack_slots).unwrap();
- let arg_id = self.dwarf.unit.add(entry_id, gimli::DW_TAG_formal_parameter);
+ let arg_id = self
+ .dwarf
+ .unit
+ .add(entry_id, gimli::DW_TAG_formal_parameter);
let var_entry = self.dwarf.unit.get_mut(arg_id);
var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
symbol: usize,
context: &Context,
local_map: &FxHashMap<mir::Local, CPlace<'tcx>>,
- #[allow(rustc::default_hash_types)]
- value_labels_ranges: &std::collections::HashMap<ValueLabel, Vec<ValueLocRange>>,
+ #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
+ ValueLabel,
+ Vec<ValueLocRange>,
+ >,
place: Place<'tcx>,
) -> AttributeValue {
assert!(place.projection.is_empty()); // FIXME implement them
symbol,
addend: i64::from(value_loc_range.end),
},
- data: translate_loc(isa, value_loc_range.loc, &context.func.stack_slots).unwrap(),
+ data: translate_loc(
+ isa,
+ value_loc_range.loc,
+ &context.func.stack_slots,
+ )
+ .unwrap(),
})
.collect(),
);
}
// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
-fn translate_loc(isa: &dyn TargetIsa, loc: ValueLoc, stack_slots: &StackSlots) -> Option<Expression> {
+fn translate_loc(
+ isa: &dyn TargetIsa,
+ loc: ValueLoc,
+ stack_slots: &StackSlots,
+) -> Option<Expression> {
match loc {
ValueLoc::Reg(reg) => {
let machine_reg = isa.map_dwarf_register(reg).unwrap();
use crate::prelude::*;
-use cranelift_codegen::isa::{TargetIsa, unwind::UnwindInfo};
+use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
}
impl<'tcx> UnwindContext<'tcx> {
- pub(crate) fn new(
- tcx: TyCtxt<'tcx>,
- isa: &dyn TargetIsa,
- ) -> Self {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
let mut frame_table = FrameTable::default();
-
let cie_id = if let Some(cie) = isa.create_systemv_cie() {
Some(frame_table.add_cie(cie))
} else {
match unwind_info {
UnwindInfo::SystemV(unwind_info) => {
- self.frame_table.add_fde(self.cie_id.unwrap(), unwind_info.to_fde(Address::Symbol {
- symbol: func_id.as_u32() as usize,
- addend: 0,
- }));
- },
+ self.frame_table.add_fde(
+ self.cie_id.unwrap(),
+ unwind_info.to_fde(Address::Symbol {
+ symbol: func_id.as_u32() as usize,
+ addend: 0,
+ }),
+ );
+ }
UnwindInfo::WindowsX64(_) => {
// FIXME implement this
}
}
pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx)));
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
+ self.tcx,
+ )));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if !eh_frame.0.writer.slice().is_empty() {
self,
jit_module: &mut Module<cranelift_simplejit::SimpleJITBackend>,
) -> Option<UnwindRegistry> {
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx)));
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
+ self.tcx,
+ )));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if eh_frame.0.writer.slice().is_empty() {
//! Adapted from https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs
-use rustc_target::abi::{TagEncoding, Int, Variants};
+use rustc_target::abi::{Int, TagEncoding, Variants};
use crate::prelude::*;
use std::path::PathBuf;
+use rustc_codegen_ssa::back::linker::LinkerInfo;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::middle::cstore::EncodedMetadata;
use rustc_middle::mir::mono::CodegenUnit;
-use rustc_session::config::{DebugInfo, OutputType};
use rustc_session::cgu_reuse_tracker::CguReuse;
-use rustc_codegen_ssa::back::linker::LinkerInfo;
-use rustc_codegen_ssa::{CrateInfo, CodegenResults, CompiledModule, ModuleKind};
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_session::config::{DebugInfo, OutputType};
use crate::prelude::*;
struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
-
impl<HCX> HashStable<HCX> for ModuleCodegenResult {
fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
// do nothing
unwind_context: UnwindContext<'_>,
map_product: impl FnOnce(B::Product) -> B::Product,
) -> ModuleCodegenResult
- where B::Product: AddConstructor + Emit + WriteDebugInfo,
+where
+ B::Product: AddConstructor + Emit + WriteDebugInfo,
{
module.finalize_definitions();
let mut product = module.finish();
.temp_path(OutputType::Object, Some(&name));
let obj = product.emit();
if let Err(err) = std::fs::write(&tmp_file, obj) {
- tcx.sess.fatal(&format!("error writing object file: {}", err));
+ tcx.sess
+ .fatal(&format!("error writing object file: {}", err));
}
let work_product = if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() {
let mut object = None;
let work_product = cgu.work_product(tcx);
if let Some(saved_file) = &work_product.saved_file {
- let obj_out = tcx.output_filenames(LOCAL_CRATE).temp_path(OutputType::Object, Some(&cgu.name().as_str()));
+ let obj_out = tcx
+ .output_filenames(LOCAL_CRATE)
+ .temp_path(OutputType::Object, Some(&cgu.name().as_str()));
object = Some(obj_out.clone());
let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
// Initialize the global atomic mutex using a constructor for proc-macros.
// FIXME implement atomic instructions in Cranelift.
let mut init_atomics_mutex_from_constructor = None;
- if tcx.sess.crate_types().contains(&rustc_session::config::CrateType::ProcMacro) {
- if mono_items.iter().any(|(mono_item, _)| {
- match mono_item {
- rustc_middle::mir::mono::MonoItem::Static(def_id) => {
- tcx.symbol_name(Instance::mono(tcx, *def_id)).name.contains("__rustc_proc_macro_decls_")
- }
- _ => false,
- }
+ if tcx
+ .sess
+ .crate_types()
+ .contains(&rustc_session::config::CrateType::ProcMacro)
+ {
+ if mono_items.iter().any(|(mono_item, _)| match mono_item {
+ rustc_middle::mir::mono::MonoItem::Static(def_id) => tcx
+ .symbol_name(Instance::mono(tcx, *def_id))
+ .name
+ .contains("__rustc_proc_macro_decls_"),
+ _ => false,
}) {
- init_atomics_mutex_from_constructor = Some(crate::atomic_shim::init_global_lock_constructor(&mut module, &format!("{}_init_atomics_mutex", cgu_name.as_str())));
+ init_atomics_mutex_from_constructor =
+ Some(crate::atomic_shim::init_global_lock_constructor(
+ &mut module,
+ &format!("{}_init_atomics_mutex", cgu_name.as_str()),
+ ));
}
}
let mut cx = crate::CodegenCx::new(tcx, module, tcx.sess.opts.debuginfo != DebugInfo::None);
super::codegen_mono_items(&mut cx, mono_items);
- let (mut module, global_asm, debug, mut unwind_context) = tcx.sess.time("finalize CodegenCx", || cx.finalize());
+ let (mut module, global_asm, debug, mut unwind_context) =
+ tcx.sess.time("finalize CodegenCx", || cx.finalize());
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context);
let codegen_result = emit_module(
}
product
- }
+ },
);
codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm);
}
let modules = super::time(tcx, "codegen mono items", || {
- cgus.iter().map(|cgu| {
- let cgu_reuse = determine_cgu_reuse(tcx, cgu);
- tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
-
- match cgu_reuse {
- _ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {}
- CguReuse::No => {}
- CguReuse::PreLto => {
- return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+ tcx.sess
+ .cgu_reuse_tracker
+ .set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ _ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {}
+ CguReuse::No => {}
+ CguReuse::PreLto => {
+ return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ }
+ CguReuse::PostLto => unreachable!(),
}
- CguReuse::PostLto => unreachable!(),
- }
-
- let dep_node = cgu.codegen_dep_node(tcx);
- let (ModuleCodegenResult(module, work_product), _) =
- tcx.dep_graph.with_task(dep_node, tcx, cgu.name(), module_codegen, rustc_middle::dep_graph::hash_result);
- if let Some((id, product)) = work_product {
- work_products.insert(id, product);
- }
+ let dep_node = cgu.codegen_dep_node(tcx);
+ let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ cgu.name(),
+ module_codegen,
+ rustc_middle::dep_graph::hash_result,
+ );
+
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
- module
- }).collect::<Vec<_>>()
+ module
+ })
+ .collect::<Vec<_>>()
});
tcx.sess.abort_if_errors();
let mut allocator_module = new_module(tcx, "allocator_shim".to_string());
let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa());
- let created_alloc_shim = crate::allocator::codegen(
- tcx,
- &mut allocator_module,
- &mut allocator_unwind_context,
- );
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
let allocator_module = if created_alloc_shim {
let ModuleCodegenResult(module, work_product) = emit_module(
});
if let Err(err) = std::fs::write(&tmp_file, obj) {
- tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ tcx.sess
+ .fatal(&format!("error writing metadata object file: {}", err));
}
(metadata_cgu_name, tmp_file)
rustc_incremental::assert_module_sources::assert_module_sources(tcx);
}
- Box::new((CodegenResults {
- crate_name: tcx.crate_name(LOCAL_CRATE),
- modules,
- allocator_module,
- metadata_module,
- crate_hash: tcx.crate_hash(LOCAL_CRATE),
- metadata,
- windows_subsystem: None, // Windows is not yet supported
- linker_info: LinkerInfo::new(tcx),
- crate_info: CrateInfo::new(tcx),
- }, work_products))
+ Box::new((
+ CodegenResults {
+ crate_name: tcx.crate_name(LOCAL_CRATE),
+ modules,
+ allocator_module,
+ metadata_module,
+ crate_hash: tcx.crate_hash(LOCAL_CRATE),
+ metadata,
+ windows_subsystem: None, // Windows is not yet supported
+ linker_info: LinkerInfo::new(tcx),
+ crate_info: CrateInfo::new(tcx),
+ },
+ work_products,
+ ))
}
fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
// FIXME fix linker error on macOS
if cfg!(not(feature = "inline_asm")) {
- tcx.sess.fatal("asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift");
+ tcx.sess.fatal(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+ );
} else {
- tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+ tcx.sess
+ .fatal("asm! and global_asm! are not yet supported on macOS and Windows");
}
}
let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
// Remove all LLVM style comments
- let global_asm = global_asm.lines().map(|line| {
- if let Some(index) = line.find("//") {
- &line[0..index]
- } else {
- line
- }
- }).collect::<Vec<_>>().join("\n");
+ let global_asm = global_asm
+ .lines()
+ .map(|line| {
+ if let Some(index) = line.find("//") {
+ &line[0..index]
+ } else {
+ line
+ }
+ })
+ .collect::<Vec<_>>()
+ .join("\n");
let output_object_file = tcx
.output_filenames(LOCAL_CRATE)
// Assemble `global_asm`
let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
let mut child = Command::new(assembler)
- .arg("-o").arg(&global_asm_object_file)
+ .arg("-o")
+ .arg(&global_asm_object_file)
.stdin(Stdio::piped())
.spawn()
.expect("Failed to spawn `as`.");
- child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+ child
+ .stdin
+ .take()
+ .unwrap()
+ .write_all(global_asm.as_bytes())
+ .unwrap();
let status = child.wait().expect("Failed to wait for `as`.");
if !status.success() {
- tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
+ tcx.sess
+ .fatal(&format!("Failed to assemble `{}`", global_asm));
}
// Link the global asm and main object file together
std::fs::rename(&output_object_file, &main_object_file).unwrap();
let status = Command::new(linker)
.arg("-r") // Create a new object file
- .arg("-o").arg(output_object_file)
+ .arg("-o")
+ .arg(output_object_file)
.arg(&main_object_file)
.arg(&global_asm_object_file)
.status()
}
let work_product_id = &cgu.work_product_id();
- if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ if tcx
+ .dep_graph
+ .previous_work_product(work_product_id)
+ .is_none()
+ {
// We don't have anything cached for this CGU. This can happen
// if the CGU did not exist in the previous session.
return CguReuse::No;
// Rustc opens us without the RTLD_GLOBAL flag, so __cg_clif_global_atomic_mutex will not be
// exported. We fix this by opening ourself again as global.
// FIXME remove once atomic_shim is gone
- let cg_dylib = std::ffi::OsString::from(&tcx.sess.opts.debugging_opts.codegen_backend.as_ref().unwrap());
- std::mem::forget(libloading::os::unix::Library::open(Some(cg_dylib), libc::RTLD_NOW | libc::RTLD_GLOBAL).unwrap());
-
+ let cg_dylib = std::ffi::OsString::from(
+ &tcx.sess
+ .opts
+ .debugging_opts
+ .codegen_backend
+ .as_ref()
+ .unwrap(),
+ );
+ std::mem::forget(
+ libloading::os::unix::Library::open(Some(cg_dylib), libc::RTLD_NOW | libc::RTLD_GLOBAL)
+ .unwrap(),
+ );
let imported_symbols = load_imported_symbols_for_jit(tcx);
let mut cx = crate::CodegenCx::new(tcx, jit_module, false);
- let (mut jit_module, global_asm, _debug, mut unwind_context) = super::time(tcx, "codegen mono items", || {
- super::codegen_mono_items(&mut cx, mono_items);
- tcx.sess.time("finalize CodegenCx", || cx.finalize())
- });
+ let (mut jit_module, global_asm, _debug, mut unwind_context) =
+ super::time(tcx, "codegen mono items", || {
+ super::codegen_mono_items(&mut cx, mono_items);
+ tcx.sess.time("finalize CodegenCx", || cx.finalize())
+ });
if !global_asm.is_empty() {
tcx.sess.fatal("Global asm is not supported in JIT mode");
}
tcx.sess.abort_if_errors();
if std::env::var("CG_CLIF_JIT").is_ok()
- && tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable)
+ && tcx
+ .sess
+ .crate_types()
+ .contains(&rustc_session::config::CrateType::Executable)
{
#[cfg(feature = "jit")]
let _: ! = jit::run_jit(tcx);
#[cfg(not(feature = "jit"))]
- tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+ tcx.sess
+ .fatal("jit support was disabled when compiling rustc_codegen_cranelift");
}
aot::run_aot(tcx, metadata, need_metadata_module)
for &(mono_item, (linkage, visibility)) in &mono_items {
match mono_item {
MonoItem::Fn(instance) => {
- let (name, sig) =
- get_function_name_and_sig(cx.tcx, cx.module.isa().triple(), instance, false);
+ let (name, sig) = get_function_name_and_sig(
+ cx.tcx,
+ cx.module.isa().triple(),
+ instance,
+ false,
+ );
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
cx.module.declare_function(&name, linkage, &sig).unwrap();
}
}
});
- tcx.sess.time("codegen fn", || crate::base::trans_fn(cx, inst, linkage));
+ tcx.sess
+ .time("codegen fn", || crate::base::trans_fn(cx, inst, linkage));
}
MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
- if std::env::var("CG_CLIF_DISPLAY_CG_TIME").as_ref().map(|val| &**val) == Ok("1") {
+ if std::env::var("CG_CLIF_DISPLAY_CG_TIME")
+ .as_ref()
+ .map(|val| &**val)
+ == Ok("1")
+ {
println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
let before = std::time::Instant::now();
let res = tcx.sess.time(name, f);
let after = std::time::Instant::now();
- println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
+ println!(
+ "[{:<30}: {}] end time: {:?}",
+ tcx.crate_name(LOCAL_CRATE),
+ name,
+ after - before
+ );
res
} else {
tcx.sess.time(name, f)
use std::fmt::Write;
-use rustc_ast::ast::{InlineAsmTemplatePiece, InlineAsmOptions};
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir::InlineAsmOperand;
use rustc_target::asm::*;
InlineAsmOperand::In { reg, ref value } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
- inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, value).load_scalar(fx)));
+ inputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::trans_operand(fx, value).load_scalar(fx),
+ ));
}
- InlineAsmOperand::Out { reg, late: _, place } => {
+ InlineAsmOperand::Out {
+ reg,
+ late: _,
+ place,
+ } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
if let Some(place) = place {
- outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, place)));
+ outputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::trans_place(fx, place),
+ ));
}
}
- InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
+ InlineAsmOperand::InOut {
+ reg,
+ late: _,
+ ref in_value,
+ out_place,
+ } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
- inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, in_value).load_scalar(fx)));
+ inputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::trans_operand(fx, in_value).load_scalar(fx),
+ ));
if let Some(out_place) = out_place {
- outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, out_place)));
+ outputs.push((
+ reg,
+ new_slot(reg.reg_class()),
+ crate::base::trans_place(fx, out_place),
+ ));
}
}
InlineAsmOperand::Const { value: _ } => todo!(),
let inline_asm_index = fx.inline_asm_index;
fx.inline_asm_index += 1;
- let asm_name = format!("{}__inline_asm_{}", fx.tcx.symbol_name(fx.instance).name, inline_asm_index);
+ let asm_name = format!(
+ "{}__inline_asm_{}",
+ fx.tcx.symbol_name(fx.instance).name,
+ inline_asm_index
+ );
- let generated_asm = generate_asm_wrapper(&asm_name, InlineAsmArch::X86_64, options, template, clobbered_regs, &inputs, &outputs);
+ let generated_asm = generate_asm_wrapper(
+ &asm_name,
+ InlineAsmArch::X86_64,
+ options,
+ template,
+ clobbered_regs,
+ &inputs,
+ &outputs,
+ );
fx.cx.global_asm.push_str(&generated_asm);
call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
let mut generated_asm = String::new();
writeln!(generated_asm, ".globl {}", asm_name).unwrap();
writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
- writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
+ writeln!(
+ generated_asm,
+ ".section .text.{},\"ax\",@progbits",
+ asm_name
+ )
+ .unwrap();
writeln!(generated_asm, "{}:", asm_name).unwrap();
generated_asm.push_str(".intel_syntax noprefix\n");
InlineAsmTemplatePiece::String(s) => {
generated_asm.push_str(s);
}
- InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(),
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: _,
+ modifier: _,
+ span: _,
+ } => todo!(),
}
}
generated_asm.push('\n');
}
generated_asm.push_str(".att_syntax\n");
- writeln!(generated_asm, ".size {name}, .-{name}", name=asm_name).unwrap();
+ writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
generated_asm.push_str(".text\n");
generated_asm.push_str("\n\n");
#[cfg(debug_assertions)]
fx.add_comment(stack_slot, "inline asm scratch slot");
- let inline_asm_func = fx.cx.module.declare_function(asm_name, Linkage::Import, &Signature {
- call_conv: CallConv::SystemV,
- params: vec![AbiParam::new(fx.pointer_type)],
- returns: vec![],
- }).unwrap();
- let inline_asm_func = fx.cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ let inline_asm_func = fx
+ .cx
+ .module
+ .declare_function(
+ asm_name,
+ Linkage::Import,
+ &Signature {
+ call_conv: CallConv::SystemV,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![],
+ },
+ )
+ .unwrap();
+ let inline_asm_func = fx
+ .cx
+ .module
+ .declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(inline_asm_func, asm_name);
for (_reg, offset, value) in inputs {
- fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ fx.bcx
+ .ins()
+ .stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
}
let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
for (_reg, offset, place) in outputs {
let ty = fx.clif_type(place.layout().ty).unwrap();
- let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ let value = fx
+ .bcx
+ .ins()
+ .stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
place.write_cvalue(fx, CValue::by_val(value, place.layout()));
}
}
match arch {
InlineAsmArch::X86_64 => {
write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
- reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None)
+ .unwrap();
generated_asm.push('\n');
}
_ => unimplemented!("save_register for {:?}", arch),
}
}
-fn restore_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
+fn restore_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+) {
match arch {
InlineAsmArch::X86_64 => {
generated_asm.push_str(" mov ");
- reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None)
+ .unwrap();
writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
}
_ => unimplemented!("restore_register for {:?}", arch),
fx.bcx.switch_to_block(leaf_0);
let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
- let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
- let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
- let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
- fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
+ let vend0 = fx
+ .bcx
+ .ins()
+ .iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
+ let vend2 = fx
+ .bcx
+ .ins()
+ .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
+ let vend1 = fx
+ .bcx
+ .ins()
+ .iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
+ fx.bcx
+ .ins()
+ .jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
fx.bcx.switch_to_block(leaf_1);
let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
let additional_information = fx.bcx.ins().iconst(types::I32, 0);
- let ecx_features = fx.bcx.ins().iconst(
- types::I32,
- 0,
+ let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
+ let edx_features = fx
+ .bcx
+ .ins()
+ .iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
+ fx.bcx.ins().jump(
+ dest,
+ &[
+ cpu_signature,
+ additional_information,
+ ecx_features,
+ edx_features,
+ ],
);
- let edx_features = fx.bcx.ins().iconst(
- types::I32,
- 1 << 25 /* sse */ | 1 << 26 /* sse2 */,
- );
- fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
fx.bcx.switch_to_block(leaf_8000_0000);
let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
let zero = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
+ fx.bcx
+ .ins()
+ .jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
fx.bcx.switch_to_block(leaf_8000_0001);
let zero = fx.bcx.ins().iconst(types::I32, 0);
let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
+ fx.bcx
+ .ins()
+ .jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
fx.bcx.switch_to_block(unsupported_leaf);
- crate::trap::trap_unreachable(fx, "__cpuid_count arch intrinsic doesn't yet support specified leaf");
+ crate::trap::trap_unreachable(
+ fx,
+ "__cpuid_count arch intrinsic doesn't yet support specified leaf",
+ );
fx.bcx.switch_to_block(dest);
fx.bcx.ins().nop();
}
}
-macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
+macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
crate::atomic_shim::lock_global_lock($fx);
let clif_ty = $fx.clif_type($T).unwrap();
match $ty.kind {
ty::Uint(_) | ty::Int(_) => {}
_ => {
- $fx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty));
+ $fx.tcx.sess.span_err(
+ $span,
+ &format!(
+ "`{}` intrinsic: expected basic integer type, found `{:?}`",
+ $intrinsic, $ty
+ ),
+ );
// Prevent verifier error
crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
return;
rustc_target::abi::FieldsShape::Array { stride: _, count } => u16::try_from(count).unwrap(),
_ => unreachable!("lane_type_and_count({:?})", layout),
};
- let lane_layout = layout.field(&ty::layout::LayoutCx {
- tcx,
- param_env: ParamEnv::reveal_all(),
- }, 0).unwrap();
+ let lane_layout = layout
+ .field(
+ &ty::layout::LayoutCx {
+ tcx,
+ param_env: ParamEnv::reveal_all(),
+ },
+ 0,
+ )
+ .unwrap();
(lane_layout, lane_count)
}
trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
}
"transmute" => {
- trap_unreachable(
- fx,
- "[corruption] Transmuting to uninhabited type.",
- );
+ trap_unreachable(fx, "[corruption] Transmuting to uninhabited type.");
}
_ => unimplemented!("unsupported instrinsic {}", intrinsic),
}
-use crate::prelude::*;
use super::*;
+use crate::prelude::*;
pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
-#![feature(rustc_private, decl_macro, type_alias_impl_trait, associated_type_bounds, never_type, try_blocks)]
+#![feature(
+ rustc_private,
+ decl_macro,
+ type_alias_impl_trait,
+ associated_type_bounds,
+ never_type,
+ try_blocks
+)]
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
extern crate libc;
#[macro_use]
extern crate rustc_middle;
+extern crate rustc_ast;
extern crate rustc_codegen_ssa;
extern crate rustc_data_structures;
extern crate rustc_errors;
extern crate rustc_span;
extern crate rustc_symbol_mangling;
extern crate rustc_target;
-extern crate rustc_ast;
// This prevents duplicating functions and statics that are already part of the host rustc process.
#[allow(unused_extern_crates)]
use std::any::Any;
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
use rustc_errors::ErrorReported;
use rustc_middle::dep_graph::{DepGraph, WorkProduct, WorkProductId};
use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
-use rustc_session::Session;
-use rustc_session::config::OutputFilenames;
use rustc_middle::ty::query::Providers;
-use rustc_codegen_ssa::CodegenResults;
-use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
use cranelift_codegen::settings::{self, Configurable};
mod analyze;
mod archive;
mod atomic_shim;
-mod base;
mod backend;
+mod base;
mod cast;
mod codegen_i128;
mod common;
pub(crate) use rustc_ast::ast::{FloatTy, IntTy, UintTy};
pub(crate) use rustc_span::Span;
- pub(crate) use rustc_middle::bug;
pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+ pub(crate) use rustc_middle::bug;
pub(crate) use rustc_middle::mir::{self, *};
pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
- pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
pub(crate) use rustc_middle::ty::{
self, FnSig, Instance, InstanceDef, ParamEnv, Ty, TyCtxt, TypeAndMut, TypeFoldable,
};
+ pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
pub(crate) use rustc_data_structures::fx::FxHashMap;
pub(crate) use rustc_index::vec::Idx;
- pub(crate) use cranelift_codegen::Context;
pub(crate) use cranelift_codegen::entity::EntitySet;
- pub(crate) use cranelift_codegen::ir::{AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc, StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value};
pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
pub(crate) use cranelift_codegen::ir::function::Function;
pub(crate) use cranelift_codegen::ir::types;
+ pub(crate) use cranelift_codegen::ir::{
+ AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
+ StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+ };
pub(crate) use cranelift_codegen::isa::{self, CallConv};
+ pub(crate) use cranelift_codegen::Context;
pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
pub(crate) use cranelift_module::{
self, Backend, DataContext, DataId, FuncId, Linkage, Module,
}
impl<'tcx, B: Backend + 'static> CodegenCx<'tcx, B> {
- fn new(
- tcx: TyCtxt<'tcx>,
- module: Module<B>,
- debug_info: bool,
- ) -> Self {
+ fn new(tcx: TyCtxt<'tcx>, module: Module<B>, debug_info: bool) -> Self {
let unwind_context = UnwindContext::new(tcx, module.isa());
let debug_context = if debug_info {
- Some(DebugContext::new(
- tcx,
- module.isa(),
- ))
+ Some(DebugContext::new(tcx, module.isa()))
} else {
None
};
}
}
- fn finalize(mut self) -> (Module<B>, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
+ fn finalize(
+ mut self,
+ ) -> (
+ Module<B>,
+ String,
+ Option<DebugContext<'tcx>>,
+ UnwindContext<'tcx>,
+ ) {
self.constants_cx.finalize(self.tcx, &mut self.module);
- (self.module, self.global_asm, self.debug_context, self.unwind_context)
+ (
+ self.module,
+ self.global_asm,
+ self.debug_context,
+ self.unwind_context,
+ )
}
}
sess: &Session,
dep_graph: &DepGraph,
) -> Result<Box<dyn Any>, ErrorReported> {
- let (codegen_results, work_products) = *ongoing_codegen.downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>().unwrap();
+ let (codegen_results, work_products) = *ongoing_codegen
+ .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
+ .unwrap();
sess.time("serialize_work_products", move || {
rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)
use crate::prelude::*;
-pub(crate) fn get_clif_linkage(mono_item: MonoItem<'_>, linkage: RLinkage, visibility: Visibility) -> Linkage {
+pub(crate) fn get_clif_linkage(
+ mono_item: MonoItem<'_>,
+ linkage: RLinkage,
+ visibility: Visibility,
+) -> Linkage {
match (linkage, visibility) {
(RLinkage::External, Visibility::Default) => Linkage::Export,
(RLinkage::Internal, Visibility::Default) => Linkage::Local,
return;
}
- create_entry_fn(tcx, module, unwind_context, main_def_id, use_start_lang_item);
+ create_entry_fn(
+ tcx,
+ module,
+ unwind_context,
+ main_def_id,
+ use_start_lang_item,
+ );
fn create_entry_fn(
tcx: TyCtxt<'_>,
cmain_func_id,
&mut ctx,
&mut cranelift_codegen::binemit::NullTrapSink {},
- ).unwrap();
+ )
+ .unwrap();
unwind_context.add_function(cmain_func_id, &ctx, m.isa());
}
}
use std::fs::File;
use std::path::Path;
-use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
-use rustc_session::config;
-use rustc_middle::ty::TyCtxt;
use rustc_codegen_ssa::METADATA_FILENAME;
use rustc_data_structures::owning_ref::{self, OwningRef};
use rustc_data_structures::rustc_erase_owner;
+use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config;
use rustc_target::spec::Target;
use crate::backend::WriteMetadata;
if entry.header().identifier() == METADATA_FILENAME.as_bytes() {
let mut buf = Vec::with_capacity(
usize::try_from(entry.header().size())
- .expect("Rlib metadata file too big to load into memory.")
+ .expect("Rlib metadata file too big to load into memory."),
);
::std::io::copy(&mut entry, &mut buf).map_err(|e| format!("{:?}", e))?;
let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf).into();
}
// Adapted from https://github.com/rust-lang/rust/blob/da573206f87b5510de4b0ee1a9c044127e409bd3/src/librustc_codegen_llvm/base.rs#L47-L112
-pub(crate) fn write_metadata<P: WriteMetadata>(tcx: TyCtxt<'_>, product: &mut P) -> EncodedMetadata {
+pub(crate) fn write_metadata<P: WriteMetadata>(
+ tcx: TyCtxt<'_>,
+ product: &mut P,
+) -> EncodedMetadata {
use flate2::write::DeflateEncoder;
use flate2::Compression;
use std::io::Write;
let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
let val = fx.bcx.ins().imul(lhs, rhs);
- let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, (1 << ty.bits()) - 1);
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::UnsignedGreaterThan,
+ val,
+ (1 << ty.bits()) - 1,
+ );
let val = fx.bcx.ins().ireduce(ty, val);
(val, has_overflow)
}
let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
let val = fx.bcx.ins().imul(lhs, rhs);
- let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
- let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, (1 << (ty.bits() - 1)) - 1);
+ let has_underflow =
+ fx.bcx
+ .ins()
+ .icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::SignedGreaterThan,
+ val,
+ (1 << (ty.bits() - 1)) - 1,
+ );
let val = fx.bcx.ins().ireduce(ty, val);
(val, fx.bcx.ins().bor(has_underflow, has_overflow))
}
} else {
let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
let not_all_zero = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0);
- let not_all_ones = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, u64::try_from((1u128 << ty.bits()) - 1).unwrap() as i64);
+ let not_all_ones = fx.bcx.ins().icmp_imm(
+ IntCC::NotEqual,
+ val_hi,
+ u64::try_from((1u128 << ty.bits()) - 1).unwrap() as i64,
+ );
fx.bcx.ins().band(not_all_zero, not_all_ones)
};
(val, has_overflow)
}
- types::I128 => unreachable!("i128 should have been handled by codegen_i128::maybe_codegen"),
+ types::I128 => {
+ unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+ }
_ => unreachable!("invalid non-integer type {}", ty),
}
}
let val = fx.bcx.ins().ishl(lhs, actual_shift);
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow =
- fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ let has_overflow = fx
+ .bcx
+ .ins()
+ .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow)
}
BinOp::Shr => {
};
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow =
- fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ let has_overflow = fx
+ .bcx
+ .ins()
+ .icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow)
}
_ => bug!(
// FIXME directly write to result place instead
let out_place = CPlace::new_stack_slot(
fx,
- fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter())),
+ fx.layout_of(
+ fx.tcx
+ .mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()),
+ ),
);
let out_layout = out_place.layout();
out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
- let is_thin_ptr = in_lhs.layout().ty.builtin_deref(true).map(|TypeAndMut { ty, mutbl: _}| {
- !has_ptr_meta(fx.tcx, ty)
- }).unwrap_or(true);
+ let is_thin_ptr = in_lhs
+ .layout()
+ .ty
+ .builtin_deref(true)
+ .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+ .unwrap_or(true);
if is_thin_ptr {
match bin_op {
// bytecodealliance/cranelift#1339 is implemented.
let mut block_insts = FxHashMap::default();
- for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) {
+ for block in cold_blocks
+ .keys()
+ .filter(|&block| cold_blocks.contains(block))
+ {
let insts = ctx.func.layout.block_insts(block).collect::<Vec<_>>();
for &inst in &insts {
ctx.func.layout.remove_inst(inst);
}
// And then append them at the back again.
- for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) {
+ for block in cold_blocks
+ .keys()
+ .filter(|&block| cold_blocks.contains(block))
+ {
ctx.func.layout.append_block(block);
for inst in block_insts.remove(&block).unwrap() {
ctx.func.layout.append_inst(inst, block);
pub(crate) fn optimize_function<'tcx>(
tcx: TyCtxt<'tcx>,
- #[cfg_attr(not(debug_assertions), allow(unused_variables))]
- instance: Instance<'tcx>,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))] instance: Instance<'tcx>,
ctx: &mut Context,
cold_blocks: &EntitySet<Block>,
clif_comments: &mut crate::pretty_clif::CommentWriter,
use rustc_data_structures::fx::{FxHashSet, FxHasher};
use cranelift_codegen::cursor::{Cursor, FuncCursor};
-use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
use cranelift_codegen::ir::immediates::Offset32;
+use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
use hashbrown::HashSet;
use std::hash::BuildHasherDefault;
impl StackSlotUsage {
fn potential_stores_for_load(&self, ctx: &Context, load: Inst) -> Vec<Inst> {
- self.stack_store.iter().cloned().filter(|&store| {
- match spatial_overlap(&ctx.func, store, load) {
- SpatialOverlap::No => false, // Can never be the source of the loaded value.
- SpatialOverlap::Partial | SpatialOverlap::Full => true,
- }
- }).filter(|&store| {
- match temporal_order(ctx, store, load) {
- TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
- TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
- }
- }).collect::<Vec<Inst>>()
+ self.stack_store
+ .iter()
+ .cloned()
+ .filter(|&store| {
+ match spatial_overlap(&ctx.func, store, load) {
+ SpatialOverlap::No => false, // Can never be the source of the loaded value.
+ SpatialOverlap::Partial | SpatialOverlap::Full => true,
+ }
+ })
+ .filter(|&store| {
+ match temporal_order(ctx, store, load) {
+ TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
+ TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
+ }
+ })
+ .collect::<Vec<Inst>>()
}
fn potential_loads_of_store(&self, ctx: &Context, store: Inst) -> Vec<Inst> {
- self.stack_load.iter().cloned().filter(|&load| {
- match spatial_overlap(&ctx.func, store, load) {
- SpatialOverlap::No => false, // Can never be the source of the loaded value.
- SpatialOverlap::Partial | SpatialOverlap::Full => true,
- }
- }).filter(|&load| {
- match temporal_order(ctx, store, load) {
- TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
- TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
- }
- }).collect::<Vec<Inst>>()
+ self.stack_load
+ .iter()
+ .cloned()
+ .filter(|&load| {
+ match spatial_overlap(&ctx.func, store, load) {
+ SpatialOverlap::No => false, // Can never be the source of the loaded value.
+ SpatialOverlap::Partial | SpatialOverlap::Full => true,
+ }
+ })
+ .filter(|&load| {
+ match temporal_order(ctx, store, load) {
+ TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
+ TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
+ }
+ })
+ .collect::<Vec<Inst>>()
}
fn remove_unused_stack_addr(func: &mut Function, inst: Inst) {
stack_slot,
offset: _,
} => {
- stack_slot_usage_map.entry(OrdStackSlot(stack_slot)).or_insert_with(StackSlotUsage::default).stack_addr.insert(inst);
+ stack_slot_usage_map
+ .entry(OrdStackSlot(stack_slot))
+ .or_insert_with(StackSlotUsage::default)
+ .stack_addr
+ .insert(inst);
}
InstructionData::StackLoad {
opcode: Opcode::StackLoad,
stack_slot,
offset: _,
} => {
- stack_slot_usage_map.entry(OrdStackSlot(stack_slot)).or_insert_with(StackSlotUsage::default).stack_load.insert(inst);
+ stack_slot_usage_map
+ .entry(OrdStackSlot(stack_slot))
+ .or_insert_with(StackSlotUsage::default)
+ .stack_load
+ .insert(inst);
}
InstructionData::StackStore {
opcode: Opcode::StackStore,
stack_slot,
offset: _,
} => {
- stack_slot_usage_map.entry(OrdStackSlot(stack_slot)).or_insert_with(StackSlotUsage::default).stack_store.insert(inst);
+ stack_slot_usage_map
+ .entry(OrdStackSlot(stack_slot))
+ .or_insert_with(StackSlotUsage::default)
+ .stack_store
+ .insert(inst);
}
_ => {}
}
pub(super) fn optimize_function(
ctx: &mut Context,
- #[cfg_attr(not(debug_assertions), allow(unused_variables))]
clif_comments: &mut crate::pretty_clif::CommentWriter,
) {
combine_stack_addr_with_load_store(&mut ctx.func);
remove_unused_stack_addr_and_stack_load(&mut opt_ctx);
- #[cfg(debug_assertions)] {
+ #[cfg(debug_assertions)]
+ {
for (&OrdStackSlot(stack_slot), usage) in &opt_ctx.stack_slot_usage_map {
clif_comments.add_comment(stack_slot, format!("used by: {:?}", usage));
}
#[cfg(debug_assertions)]
for &store in &potential_stores {
- clif_comments.add_comment(load, format!(
- "Potential store -> load forwarding {} -> {} ({:?}, {:?})",
- opt_ctx.ctx.func.dfg.display_inst(store, None),
- opt_ctx.ctx.func.dfg.display_inst(load, None),
- spatial_overlap(&opt_ctx.ctx.func, store, load),
- temporal_order(&opt_ctx.ctx, store, load),
- ));
+ clif_comments.add_comment(
+ load,
+ format!(
+ "Potential store -> load forwarding {} -> {} ({:?}, {:?})",
+ opt_ctx.ctx.func.dfg.display_inst(store, None),
+ opt_ctx.ctx.func.dfg.display_inst(load, None),
+ spatial_overlap(&opt_ctx.ctx.func, store, load),
+ temporal_order(&opt_ctx.ctx, store, load),
+ ),
+ );
}
match *potential_stores {
#[cfg(debug_assertions)]
clif_comments.add_comment(load, format!("[BUG?] Reading uninitialized memory"));
}
- [store] if spatial_overlap(&opt_ctx.ctx.func, store, load) == SpatialOverlap::Full && temporal_order(&opt_ctx.ctx, store, load) == TemporalOrder::DefinitivelyBefore => {
+ [store]
+ if spatial_overlap(&opt_ctx.ctx.func, store, load) == SpatialOverlap::Full
+ && temporal_order(&opt_ctx.ctx, store, load)
+ == TemporalOrder::DefinitivelyBefore =>
+ {
// Only one store could have been the origin of the value.
let stored_value = opt_ctx.ctx.func.dfg.inst_args(store)[0];
#[cfg(debug_assertions)]
- clif_comments.add_comment(load, format!("Store to load forward {} -> {}", store, load));
+ clif_comments
+ .add_comment(load, format!("Store to load forward {} -> {}", store, load));
users.change_load_to_alias(&mut opt_ctx.ctx.func, load, stored_value);
}
#[cfg(debug_assertions)]
for &load in &potential_loads {
- clif_comments.add_comment(store, format!(
- "Potential load from store {} <- {} ({:?}, {:?})",
- opt_ctx.ctx.func.dfg.display_inst(load, None),
- opt_ctx.ctx.func.dfg.display_inst(store, None),
- spatial_overlap(&opt_ctx.ctx.func, store, load),
- temporal_order(&opt_ctx.ctx, store, load),
- ));
+ clif_comments.add_comment(
+ store,
+ format!(
+ "Potential load from store {} <- {} ({:?}, {:?})",
+ opt_ctx.ctx.func.dfg.display_inst(load, None),
+ opt_ctx.ctx.func.dfg.display_inst(store, None),
+ spatial_overlap(&opt_ctx.ctx.func, store, load),
+ temporal_order(&opt_ctx.ctx, store, load),
+ ),
+ );
}
if potential_loads.is_empty() {
// FIXME also remove stores when there is always a next store before a load.
#[cfg(debug_assertions)]
- clif_comments.add_comment(store, format!("Remove dead stack store {} of {}", opt_ctx.ctx.func.dfg.display_inst(store, None), stack_slot.0));
+ clif_comments.add_comment(
+ store,
+ format!(
+ "Remove dead stack store {} of {}",
+ opt_ctx.ctx.func.dfg.display_inst(store, None),
+ stack_slot.0
+ ),
+ );
users.remove_dead_store(&mut opt_ctx.ctx.func, store);
}
while let Some(_block) = cursor.next_block() {
while let Some(inst) = cursor.next_inst() {
match cursor.func.dfg[inst] {
- InstructionData::Load { opcode: Opcode::Load, arg: addr, flags: _, offset } => {
- if cursor.func.dfg.ctrl_typevar(inst) == types::I128 || cursor.func.dfg.ctrl_typevar(inst).is_vector() {
+ InstructionData::Load {
+ opcode: Opcode::Load,
+ arg: addr,
+ flags: _,
+ offset,
+ } => {
+ if cursor.func.dfg.ctrl_typevar(inst) == types::I128
+ || cursor.func.dfg.ctrl_typevar(inst).is_vector()
+ {
continue; // WORKAROUD: stack_load.i128 not yet implemented
}
- if let Some((stack_slot, stack_addr_offset)) = try_get_stack_slot_and_offset_for_addr(cursor.func, addr) {
- if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into()) {
+ if let Some((stack_slot, stack_addr_offset)) =
+ try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
+ {
+ if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
+ {
let ty = cursor.func.dfg.ctrl_typevar(inst);
- cursor.func.dfg.replace(inst).stack_load(ty, stack_slot, combined_offset);
+ cursor.func.dfg.replace(inst).stack_load(
+ ty,
+ stack_slot,
+ combined_offset,
+ );
}
}
}
- InstructionData::Store { opcode: Opcode::Store, args: [value, addr], flags: _, offset } => {
- if cursor.func.dfg.ctrl_typevar(inst) == types::I128 || cursor.func.dfg.ctrl_typevar(inst).is_vector() {
+ InstructionData::Store {
+ opcode: Opcode::Store,
+ args: [value, addr],
+ flags: _,
+ offset,
+ } => {
+ if cursor.func.dfg.ctrl_typevar(inst) == types::I128
+ || cursor.func.dfg.ctrl_typevar(inst).is_vector()
+ {
continue; // WORKAROUND: stack_store.i128 not yet implemented
}
- if let Some((stack_slot, stack_addr_offset)) = try_get_stack_slot_and_offset_for_addr(cursor.func, addr) {
- if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into()) {
- cursor.func.dfg.replace(inst).stack_store(value, stack_slot, combined_offset);
+ if let Some((stack_slot, stack_addr_offset)) =
+ try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
+ {
+ if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
+ {
+ cursor.func.dfg.replace(inst).stack_store(
+ value,
+ stack_slot,
+ combined_offset,
+ );
}
}
}
if let ValueDef::Result(arg_origin, 0) = cursor.func.dfg.value_def(arg) {
match cursor.func.dfg[arg_origin].opcode() {
Opcode::StackAddr | Opcode::StackLoad => {
- stack_addr_load_insts_users.entry(arg_origin).or_insert_with(FxHashSet::default).insert(inst);
+ stack_addr_load_insts_users
+ .entry(arg_origin)
+ .or_insert_with(FxHashSet::default)
+ .insert(inst);
}
_ => {}
}
for inst in stack_addr_load_insts_users.keys() {
let mut is_recorded_stack_addr_or_stack_load = false;
for stack_slot_users in opt_ctx.stack_slot_usage_map.values() {
- is_recorded_stack_addr_or_stack_load |= stack_slot_users.stack_addr.contains(inst) || stack_slot_users.stack_load.contains(inst);
+ is_recorded_stack_addr_or_stack_load |= stack_slot_users.stack_addr.contains(inst)
+ || stack_slot_users.stack_load.contains(inst);
}
assert!(is_recorded_stack_addr_or_stack_load);
}
for stack_slot_users in opt_ctx.stack_slot_usage_map.values_mut() {
stack_slot_users
.stack_addr
- .drain_filter(|inst| !(stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true)))
+ .drain_filter(|inst| {
+ !(stack_addr_load_insts_users
+ .get(inst)
+ .map(|users| users.is_empty())
+ .unwrap_or(true))
+ })
.for_each(|inst| StackSlotUsage::remove_unused_stack_addr(&mut func, inst));
stack_slot_users
.stack_load
- .drain_filter(|inst| !(stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true)))
+ .drain_filter(|inst| {
+ !(stack_addr_load_insts_users
+ .get(inst)
+ .map(|users| users.is_empty())
+ .unwrap_or(true))
+ })
.for_each(|inst| StackSlotUsage::remove_unused_load(&mut func, inst));
}
}
-fn try_get_stack_slot_and_offset_for_addr(func: &Function, addr: Value) -> Option<(StackSlot, Offset32)> {
+fn try_get_stack_slot_and_offset_for_addr(
+ func: &Function,
+ addr: Value,
+) -> Option<(StackSlot, Offset32)> {
if let ValueDef::Result(addr_inst, 0) = func.dfg.value_def(addr) {
if let InstructionData::StackLoad {
opcode: Opcode::StackAddr,
stack_slot,
offset,
- } = func.dfg[addr_inst] {
+ } = func.dfg[addr_inst]
+ {
return Some((stack_slot, offset));
}
}
}
let src_end: i64 = src_offset.try_add_i64(i64::from(src_size)).unwrap().into();
- let dest_end: i64 = dest_offset.try_add_i64(i64::from(dest_size)).unwrap().into();
+ let dest_end: i64 = dest_offset
+ .try_add_i64(i64::from(dest_size))
+ .unwrap()
+ .into();
if src_end <= dest_offset.into() || dest_end <= src_offset.into() {
return SpatialOverlap::No;
}
}
}
- pub(crate) fn const_addr<'a, 'tcx>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>, addr: i64) -> Self {
+ pub(crate) fn const_addr<'a, 'tcx>(
+ fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
+ addr: i64,
+ ) -> Self {
let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
Pointer {
base: PointerBase::Addr(addr),
fx.bcx.ins().iadd_imm(base_addr, offset)
}
}
- PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset),
- PointerBase::Dangling(align) => {
- fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx
+ .ins()
+ .stack_addr(fx.pointer_type, stack_slot, self.offset)
}
+ PointerBase::Dangling(align) => fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
}
}
}
} else {
let base_offset: i64 = self.offset.into();
- if let Some(new_offset) = base_offset.checked_add(extra_offset){
+ if let Some(new_offset) = base_offset.checked_add(extra_offset) {
let base_addr = match self.base {
PointerBase::Addr(addr) => addr,
- PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0),
- PointerBase::Dangling(align) => fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
+ }
+ PointerBase::Dangling(align) => fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
};
let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
Pointer {
offset: Offset32::new(0),
}
} else {
- panic!("self.offset ({}) + extra_offset ({}) not representable in i64", base_offset, extra_offset);
+ panic!(
+ "self.offset ({}) + extra_offset ({}) not representable in i64",
+ base_offset, extra_offset
+ );
}
}
}
offset: self.offset,
},
PointerBase::Stack(stack_slot) => {
- let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset);
+ let base_addr = fx
+ .bcx
+ .ins()
+ .stack_addr(fx.pointer_type, stack_slot, self.offset);
Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
offset: Offset32::new(0),
}
}
PointerBase::Dangling(align) => {
- let addr = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
+ let addr = fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
offset: self.offset,
) -> Value {
match self.base {
PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
- PointerBase::Stack(stack_slot) => if ty == types::I128 || ty.is_vector() {
- // WORKAROUND for stack_load.i128 and stack_load.iXxY not being implemented
- let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
- fx.bcx.ins().load(ty, flags, base_addr, self.offset)
- } else {
- fx.bcx.ins().stack_load(ty, stack_slot, self.offset)
+ PointerBase::Stack(stack_slot) => {
+ if ty == types::I128 || ty.is_vector() {
+ // WORKAROUND for stack_load.i128 and stack_load.iXxY not being implemented
+ let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+ fx.bcx.ins().load(ty, flags, base_addr, self.offset)
+ } else {
+ fx.bcx.ins().stack_load(ty, stack_slot, self.offset)
+ }
}
PointerBase::Dangling(_align) => unreachable!(),
}
entity: E,
comment: S,
) {
- self.clif_comments.add_comment(entity, comment);
+ self.clif_comments.add_comment(entity, comment);
}
}
) {
use std::io::Write;
- if !cfg!(debug_assertions) && !tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly) {
+ if !cfg!(debug_assertions)
+ && !tcx
+ .sess
+ .opts
+ .output_types
+ .contains_key(&OutputType::LlvmAssembly)
+ {
return;
}
("vsx", Some(sym::powerpc_target_feature)),
];
-const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
- &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
+const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("fp64", Some(sym::mips_target_feature)),
+ ("msa", Some(sym::mips_target_feature)),
+];
const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("m", Some(sym::riscv_target_feature)),
/// Tries to infer the path of a binary for the target toolchain from the linker name.
pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
let (mut linker, _linker_flavor) = linker_and_flavor(sess);
- let linker_file_name = linker.file_name().and_then(|name| name.to_str()).unwrap_or_else(|| {
- sess.fatal("couldn't extract file name from specified linker")
- });
+ let linker_file_name = linker
+ .file_name()
+ .and_then(|name| name.to_str())
+ .unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
if linker_file_name == "ld.lld" {
if tool != "ld" {
flavor,
)),
(Some(linker), None) => {
- let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| {
- sess.fatal("couldn't extract file stem from specified linker")
- });
+ let stem = linker
+ .file_stem()
+ .and_then(|stem| stem.to_str())
+ .unwrap_or_else(|| {
+ sess.fatal("couldn't extract file stem from specified linker")
+ });
let flavor = if stem == "emcc" {
LinkerFlavor::Em
// linker and linker flavor specified via command line have precedence over what the target
// specification specifies
- if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) {
+ if let Some(ret) = infer_from(
+ sess,
+ sess.opts.cg.linker.clone(),
+ sess.opts.cg.linker_flavor,
+ ) {
return ret;
}
fn codegen_print(fx: &mut FunctionCx<'_, '_, impl cranelift_module::Backend>, msg: &str) {
let puts = fx
- .cx.module
+ .cx
+ .module
.declare_function(
"puts",
Linkage::Import,
_ => {
// We have to align the offset for DST's
let unaligned_offset = field_offset.bytes();
- let (_, unsized_align) = crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+ let (_, unsized_align) =
+ crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
- (
- base.offset_value(fx, offset),
- field_layout,
- )
+ (base.offset_value(fx, offset), field_layout)
}
}
} else {
}
}
-fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: &Scalar, b_scalar: &Scalar) -> Offset32 {
+fn scalar_pair_calculate_b_offset(
+ tcx: TyCtxt<'_>,
+ a_scalar: &Scalar,
+ b_scalar: &Scalar,
+) -> Offset32 {
let b_offset = a_scalar
.value
.size(&tcx)
CValue(CValueInner::ByRef(ptr, None), layout)
}
- pub(crate) fn by_ref_unsized(ptr: Pointer, meta: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ pub(crate) fn by_ref_unsized(
+ ptr: Pointer,
+ meta: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
}
CValue(CValueInner::ByVal(value), layout)
}
- pub(crate) fn by_val_pair(value: Value, extra: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ pub(crate) fn by_val_pair(
+ value: Value,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
CValue(CValueInner::ByValPair(value, extra), layout)
}
}
// FIXME remove
- pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> (Pointer, Option<Value>) {
+ pub(crate) fn force_stack(
+ self,
+ fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+ ) -> (Pointer, Option<Value>) {
let layout = self.1;
match self.0 {
CValueInner::ByRef(ptr, meta) => (ptr, meta),
Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
Abi::Vector { ref element, count } => {
scalar_to_clif_type(fx.tcx, element.clone())
- .by(u16::try_from(count).unwrap()).unwrap()
+ .by(u16::try_from(count).unwrap())
+ .unwrap()
}
_ => unreachable!("{:?}", layout.ty),
};
let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new());
(val1, val2)
}
- CValueInner::ByRef(_, Some(_)) => bug!("load_scalar_pair for unsized value not allowed"),
+ CValueInner::ByRef(_, Some(_)) => {
+ bug!("load_scalar_pair for unsized value not allowed")
+ }
CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
CValueInner::ByValPair(val1, val2) => (val1, val2),
}
) -> CValue<'tcx> {
let layout = self.1;
match self.0 {
- CValueInner::ByVal(val) => {
- match layout.abi {
- Abi::Vector { element: _, count } => {
- let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
- let field = u8::try_from(field.index()).unwrap();
- assert!(field < count);
- let lane = fx.bcx.ins().extractlane(val, field);
- let field_layout = layout.field(&*fx, usize::from(field));
- CValue::by_val(lane, field_layout)
- }
- _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count } => {
+ let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+ let field = u8::try_from(field.index()).unwrap();
+ assert!(field < count);
+ let lane = fx.bcx.ins().extractlane(val, field);
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(lane, field_layout)
}
- }
- CValueInner::ByValPair(val1, val2) => {
- match layout.abi {
- Abi::ScalarPair(_, _) => {
- let val = match field.as_u32() {
- 0 => val1,
- 1 => val2,
- _ => bug!("field should be 0 or 1"),
- };
- let field_layout = layout.field(&*fx, usize::from(field));
- CValue::by_val(val, field_layout)
- }
- _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(val1, val2) => match layout.abi {
+ Abi::ScalarPair(_, _) => {
+ let val = match field.as_u32() {
+ 0 => val1,
+ 1 => val2,
+ _ => bug!("field should be 0 or 1"),
+ };
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(val, field_layout)
}
- }
+ _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ },
CValueInner::ByRef(ptr, None) => {
let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
CValue::by_ref(field_ptr, field_layout)
}
}
- pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, dest: CPlace<'tcx>) {
+ pub(crate) fn unsize_value(
+ self,
+ fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+ dest: CPlace<'tcx>,
+ ) {
crate::unsize::coerce_unsized_into(fx, self, dest);
}
match layout.ty.kind {
ty::Bool => {
- assert!(const_val == 0 || const_val == 1, "Invalid bool 0x{:032X}", const_val);
+ assert!(
+ const_val == 0 || const_val == 1,
+ "Invalid bool 0x{:032X}",
+ const_val
+ );
}
_ => {}
}
}
pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
- assert!(matches!(self.layout().ty.kind, ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
- assert!(matches!(layout.ty.kind, ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert!(matches!(
+ self.layout().ty.kind,
+ ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
+ ));
+ assert!(matches!(
+ layout.ty.kind,
+ ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
+ ));
assert_eq!(self.layout().abi, layout.abi);
CValue(self.0, layout)
}
) -> CPlace<'tcx> {
let var = Variable::with_u32(fx.next_ssa_var);
fx.next_ssa_var += 1;
- fx.bcx
- .declare_var(var, fx.clif_type(layout.ty).unwrap());
+ fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
CPlace {
inner: CPlaceInner::Var(local, var),
layout,
}
}
- pub(crate) fn for_ptr_with_extra(ptr: Pointer, extra: Value, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+ pub(crate) fn for_ptr_with_extra(
+ ptr: Pointer,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
CPlace {
inner: CPlaceInner::Addr(ptr, Some(extra)),
layout,
match self.inner {
CPlaceInner::Var(_local, var) => {
let val = fx.bcx.use_var(var);
- fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx
+ .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
CValue::by_val(val, layout)
}
CPlaceInner::VarPair(_local, var1, var2) => {
let val1 = fx.bcx.use_var(var1);
- fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+ fx.bcx
+ .set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
let val2 = fx.bcx.use_var(var2);
- fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+ fx.bcx
+ .set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
CValue::by_val_pair(val1, val2, layout)
}
CPlaceInner::VarLane(_local, var, lane) => {
let val = fx.bcx.use_var(var);
- fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx
+ .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
let val = fx.bcx.ins().extractlane(val, lane);
CValue::by_val(val, layout)
}
}
}
- pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, from: CValue<'tcx>) {
+ pub(crate) fn write_cvalue(
+ self,
+ fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+ from: CValue<'tcx>,
+ ) {
fn assert_assignable<'tcx>(
fx: &FunctionCx<'_, 'tcx, impl Backend>,
from_ty: Ty<'tcx>,
) {
match (&from_ty.kind, &to_ty.kind) {
(ty::Ref(_, a, _), ty::Ref(_, b, _))
- | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _}), ty::RawPtr(TypeAndMut { ty: b, mutbl: _})) => {
+ | (
+ ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+ ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+ ) => {
assert_assignable(fx, a, b);
}
(ty::FnPtr(_), ty::FnPtr(_)) => {
self,
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
from: CValue<'tcx>,
- #[cfg_attr(not(debug_assertions), allow(unused_variables))]
- method: &'static str,
+ #[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str,
) {
fn transmute_value<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
(_, _) if src_ty == dst_ty => data,
// This is a `write_cvalue_transmute`.
- (types::I32, types::F32) | (types::F32, types::I32)
- | (types::I64, types::F64) | (types::F64, types::I64) => {
- fx.bcx.ins().bitcast(dst_ty, data)
- }
+ (types::I32, types::F32)
+ | (types::F32, types::I32)
+ | (types::I64, types::F64)
+ | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
_ if src_ty.is_vector() && dst_ty.is_vector() => {
fx.bcx.ins().raw_bitcast(dst_ty, data)
}
_ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
};
- fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx
+ .set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, data);
}
};
fx.add_comment(
fx.bcx.func.layout.last_inst(cur_block).unwrap(),
- format!("{}: {:?}: {:?} <- {:?}: {:?}", method, self.inner(), self.layout().ty, from.0, from.layout().ty),
+ format!(
+ "{}: {:?}: {:?} <- {:?}: {:?}",
+ method,
+ self.inner(),
+ self.layout().ty,
+ from.0,
+ from.layout().ty
+ ),
);
}
// First get the old vector
let vector = fx.bcx.use_var(var);
- fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx
+ .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
// Next insert the written lane into the vector
let vector = fx.bcx.ins().insertlane(vector, data, lane);
// Finally write the new vector
- fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx
+ .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, vector);
return;
let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, MemFlags::new());
- to_ptr.offset(fx, b_offset).store(fx, extra, MemFlags::new());
+ to_ptr
+ .offset(fx, b_offset)
+ .store(fx, extra, MemFlags::new());
return;
}
_ => {}
let layout = layout.field(&*fx, field.index());
match field.as_u32() {
- 0 => return CPlace {
- inner: CPlaceInner::Var(local, var1),
- layout,
- },
- 1 => return CPlace {
- inner: CPlaceInner::Var(local, var2),
- layout,
- },
+ 0 => {
+ return CPlace {
+ inner: CPlaceInner::Var(local, var1),
+ layout,
+ }
+ }
+ 1 => {
+ return CPlace {
+ inner: CPlaceInner::Var(local, var2),
+ layout,
+ }
+ }
_ => unreachable!("field should be 0 or 1"),
}
}
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
} else {
- CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
+ CPlace::for_ptr(
+ Pointer::new(self.to_cvalue(fx).load_scalar(fx)),
+ inner_layout,
+ )
}
}
arg.load_scalar_pair(fx)
} else {
let (ptr, vtable) = arg.try_to_ptr().unwrap();
- (
- ptr.get_addr(fx),
- vtable.unwrap()
- )
+ (ptr.get_addr(fx), vtable.unwrap())
};
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
let tcx = fx.tcx;
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
- let drop_in_place_fn =
- import_function(tcx, &mut fx.cx.module, Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx));
+ let drop_in_place_fn = import_function(
+ tcx,
+ &mut fx.cx.module,
+ Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx),
+ );
let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
Some(import_function(
tcx,
&mut fx.cx.module,
- Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs).unwrap().polymorphize(fx.tcx),
+ Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .polymorphize(fx.tcx),
))
})
});
}
let data_id = fx
- .cx.module
+ .cx
+ .module
.declare_data(
&format!(
"__vtable.{}.for.{:?}.{}",