// based function.
debug!("function inlining is disabled when compiling with `instrument_coverage`");
} else {
- Inliner { tcx, codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()) }
- .run_pass(body);
+ Inliner {
+ tcx,
+ param_env: tcx.param_env_reveal_all_normalized(body.source.def_id()),
+ codegen_fn_attrs: tcx.codegen_fn_attrs(body.source.def_id()),
+ }
+ .run_pass(body);
}
}
}
struct Inliner<'tcx> {
tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
codegen_fn_attrs: &'tcx CodegenFnAttrs,
}
let def_id = caller_body.source.def_id();
- let param_env = self.tcx.param_env_reveal_all_normalized(def_id);
-
// Only do inlining into fn bodies.
let self_hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
if self.tcx.hir().body_owner_kind(self_hir_id).is_fn_or_closure()
&& caller_body.source.promoted.is_none()
{
for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated() {
- if let Some(callsite) =
- self.get_valid_function_call(bb, bb_data, caller_body, param_env)
- {
+ if let Some(callsite) = self.get_valid_function_call(bb, bb_data, caller_body) {
callsites.push_back(callsite);
}
}
let callee_body = if self.consider_optimizing(callsite, callee_body) {
self.tcx.subst_and_normalize_erasing_regions(
&callsite.substs,
- param_env,
+ self.param_env,
callee_body,
)
} else {
// Add callsites from inlined function
for (bb, bb_data) in caller_body.basic_blocks().iter_enumerated().skip(start) {
if let Some(new_callsite) =
- self.get_valid_function_call(bb, bb_data, caller_body, param_env)
+ self.get_valid_function_call(bb, bb_data, caller_body)
{
// Don't inline the same function multiple times.
if callsite.callee != new_callsite.callee {
bb: BasicBlock,
bb_data: &BasicBlockData<'tcx>,
caller_body: &Body<'tcx>,
- param_env: ParamEnv<'tcx>,
) -> Option<CallSite<'tcx>> {
// Don't inline calls that are in cleanup blocks.
if bb_data.is_cleanup {
let terminator = bb_data.terminator();
if let TerminatorKind::Call { func: ref op, .. } = terminator.kind {
if let ty::FnDef(callee_def_id, substs) = *op.ty(caller_body, self.tcx).kind() {
- let instance =
- Instance::resolve(self.tcx, param_env, callee_def_id, substs).ok().flatten()?;
+ let instance = Instance::resolve(self.tcx, self.param_env, callee_def_id, substs)
+ .ok()
+ .flatten()?;
if let InstanceDef::Virtual(..) = instance.def {
return None;
debug!(" final inline threshold = {}", threshold);
// FIXME: Give a bonus to functions with only a single caller
-
- let param_env = tcx.param_env(callee_body.source.def_id());
-
let mut first_block = true;
let mut cost = 0;
// If the place doesn't actually need dropping, treat it like
// a regular goto.
let ty = place.ty(callee_body, tcx).subst(tcx, callsite.substs).ty;
- if ty.needs_drop(tcx, param_env) {
+ if ty.needs_drop(tcx, self.param_env) {
cost += CALL_PENALTY;
if let Some(unwind) = unwind {
cost += LANDINGPAD_PENALTY;
let ty = v.ty.subst(tcx, callsite.substs);
// Cost of the var is the size in machine-words, if we know
// it.
- if let Some(size) = type_size_of(tcx, param_env, ty) {
+ if let Some(size) = type_size_of(tcx, self.param_env, ty) {
cost += (size / ptr_size) as usize;
} else {
cost += UNKNOWN_SIZE_COST;
--- /dev/null
+// run-pass
+// compile-flags:-Zmir-opt-level=2
+
+use std::mem::MaybeUninit;
+const N: usize = 2;
+
+trait CollectArray<A>: Iterator<Item = A> {
+ fn inner_array(&mut self) -> [A; N];
+ fn collect_array(&mut self) -> [A; N] {
+ let result = self.inner_array();
+ assert!(self.next().is_none());
+ result
+ }
+}
+
+impl<A, I: ?Sized> CollectArray<A> for I
+where
+ I: Iterator<Item = A>,
+{
+ fn inner_array(&mut self) -> [A; N] {
+ let mut result: [MaybeUninit<A>; N] = unsafe { MaybeUninit::uninit().assume_init() };
+ for (dest, item) in result.iter_mut().zip(self) {
+ *dest = MaybeUninit::new(item);
+ }
+ let temp_ptr: *const [MaybeUninit<A>; N] = &result;
+ unsafe { std::ptr::read(temp_ptr as *const [A; N]) }
+ }
+}
+
+fn main() {
+ assert_eq!(
+ [[1, 2], [3, 4]]
+ .iter()
+ .map(|row| row.iter().collect_array())
+ .collect_array(),
+ [[&1, &2], [&3, &4]]
+ );
+}