let _a = 1u32 << 2u8;
+ println!("{:?}", unsafe { std::intrinsics::caller_location() });
+
unsafe {
test_simd();
}
func: &Operand<'tcx>,
args: &[Operand<'tcx>],
destination: &Option<(Place<'tcx>, BasicBlock)>,
+ span: Span,
) {
let fn_ty = fx.monomorphize(&func.ty(fx.mir, fx.tcx));
let sig = fx
match instance.def {
InstanceDef::Intrinsic(_) => {
- crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination);
+ crate::intrinsics::codegen_intrinsic_call(fx, instance, args, destination, span);
return;
}
InstanceDef::DropGlue(_, None) => {
cleanup: _,
from_hir_call: _,
} => {
- crate::abi::codegen_terminator_call(fx, func, args, destination);
+ crate::abi::codegen_terminator_call(
+ fx,
+ func,
+ args,
+ destination,
+ bb_data.terminator().source_info.span,
+ );
}
TerminatorKind::Resume | TerminatorKind::Abort => {
trap_unreachable(fx, "[corruption] Unwinding bb reached.");
-use rustc::ty::layout::{FloatTy, Integer, Primitive};
+use rustc::ty::layout::{Integer, Primitive};
use rustc_target::spec::{HasTargetSpec, Target};
use cranelift::codegen::ir::{InstructionData, Opcode, ValueDef};
Integer::I64 => types::I64,
Integer::I128 => types::I128,
},
- Primitive::Float(flt) => match flt {
- FloatTy::F32 => types::F32,
- FloatTy::F64 => types::F64,
- },
+ Primitive::F32 => types::F32,
+ Primitive::F64 => types::F64,
Primitive::Pointer => pointer_ty(tcx),
}
}
let (index, _) = self.source_info_set.insert_full((source_info.span, source_info.scope));
self.bcx.set_srcloc(SourceLoc::new(index as u32));
}
+
+ pub fn get_caller_location(&mut self, span: Span) -> CValue<'tcx> {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = self.tcx.const_caller_location((
+ syntax::symbol::Symbol::intern(&caller.file.name.to_string()),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ crate::constant::trans_const_value(self, const_loc)
+ }
}
ecx.copy_op(op, ptr.into())?;
let alloc = ecx
.memory
- .get(ptr.to_ref().to_scalar()?.to_ptr()?.alloc_id)?;
+ .get_raw(ptr.to_ref().to_scalar()?.to_ptr()?.alloc_id)?;
Ok(fx.tcx.intern_const_alloc(alloc.clone()))
};
let alloc = result().expect("unable to convert ConstValue to Allocation");
let (data_id, alloc) = match todo_item {
TodoItem::Alloc(alloc_id) => {
//println!("alloc_id {}", alloc_id);
- let alloc = memory.get(alloc_id).unwrap();
+ let alloc = memory.get_raw(alloc_id).unwrap();
let data_id = data_id_for_alloc_id(module, alloc_id, alloc.align);
(data_id, alloc)
}
instance: Instance<'tcx>,
args: &[mir::Operand<'tcx>],
destination: Option<(CPlace<'tcx>, BasicBlock)>,
+ span: Span,
) {
let def_id = instance.def_id();
let substs = instance.substs;
ret.write_cvalue(fx, val);
};
+ ptr_offset_from, <T> (v ptr, v base) {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+
+ let pointee_size: u64 = fx.layout_of(T).size.bytes();
+ let diff = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = CValue::by_val(fx.bcx.ins().udiv_imm(diff, pointee_size as i64), isize_layout);
+ ret.write_cvalue(fx, val);
+ };
+
+ caller_location, () {
+ let caller_location = fx.get_caller_location(span);
+ ret.write_cvalue(fx, caller_location);
+ };
+
_ if intrinsic.starts_with("atomic_fence"), () {};
_ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
_ if intrinsic.starts_with("atomic_load"), (c ptr) {