pub fn grow(&mut self, num_bits: usize) {
let num_words = u64s(num_bits);
- let extra_words = self.data.len() - num_words;
- if extra_words > 0 {
- self.data.extend((0..extra_words).map(|_| 0));
+ if self.data.len() < num_words {
+ self.data.resize(num_words, 0)
}
}
#[test]
fn grow() {
let mut vec1 = BitVector::new(65);
- assert!(vec1.insert(3));
- assert!(!vec1.insert(3));
- assert!(vec1.insert(5));
- assert!(vec1.insert(64));
+ for index in 0 .. 65 {
+ assert!(vec1.insert(index));
+ assert!(!vec1.insert(index));
+ }
vec1.grow(128);
- assert!(vec1.contains(3));
- assert!(vec1.contains(5));
- assert!(vec1.contains(64));
- assert!(!vec1.contains(126));
+
+ // Check if the bits set before growing are still set
+ for index in 0 .. 65 {
+ assert!(vec1.contains(index));
+ }
+
+ // Check if the new bits are all un-set
+ for index in 65 .. 128 {
+ assert!(!vec1.contains(index));
+ }
+
+ // Check that we can set all new bits without running out of bounds
+ for index in 65 .. 128 {
+ assert!(vec1.insert(index));
+ assert!(!vec1.insert(index));
+ }
}
#[test]
// DLLExportLinkage, GhostLinkage and LinkOnceODRAutoHideLinkage.
// LinkerPrivateLinkage and LinkerPrivateWeakLinkage are not included either;
// they've been removed in upstream LLVM commit r203866.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum Linkage {
ExternalLinkage = 0,
AvailableExternallyLinkage = 1,
use build::*;
use builder::{Builder, noname};
use callee::{Callee, CallArgs, ArgExprs, ArgVals};
-use partitioning::{self, PartitioningStrategy};
use cleanup::{self, CleanupMethods, DropHint};
use closure;
use common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
use meth;
use mir;
use monomorphize::{self, Instance};
+use partitioning::{self, PartitioningStrategy, InstantiationMode};
use symbol_names_test;
use tvec;
use type_::Type;
None => TransItemCollectionMode::Lazy
};
- let (items, inlining_map) = time(time_passes, "translation item collection", || {
+ let (items, reference_map) = time(time_passes, "translation item collection", || {
collector::collect_crate_translation_items(&ccx, collection_mode)
});
partitioning::partition(ccx.tcx(),
items.iter().cloned(),
strategy,
- &inlining_map)
+ &reference_map)
});
if ccx.sess().opts.debugging_opts.print_trans_items.is_some() {
output.push_str(&cgu_name[..]);
let linkage_abbrev = match linkage {
- llvm::ExternalLinkage => "External",
- llvm::AvailableExternallyLinkage => "Available",
- llvm::LinkOnceAnyLinkage => "OnceAny",
- llvm::LinkOnceODRLinkage => "OnceODR",
- llvm::WeakAnyLinkage => "WeakAny",
- llvm::WeakODRLinkage => "WeakODR",
- llvm::AppendingLinkage => "Appending",
- llvm::InternalLinkage => "Internal",
- llvm::PrivateLinkage => "Private",
- llvm::ExternalWeakLinkage => "ExternalWeak",
- llvm::CommonLinkage => "Common",
+ InstantiationMode::Def(llvm::ExternalLinkage) => "External",
+ InstantiationMode::Def(llvm::AvailableExternallyLinkage) => "Available",
+ InstantiationMode::Def(llvm::LinkOnceAnyLinkage) => "OnceAny",
+ InstantiationMode::Def(llvm::LinkOnceODRLinkage) => "OnceODR",
+ InstantiationMode::Def(llvm::WeakAnyLinkage) => "WeakAny",
+ InstantiationMode::Def(llvm::WeakODRLinkage) => "WeakODR",
+ InstantiationMode::Def(llvm::AppendingLinkage) => "Appending",
+ InstantiationMode::Def(llvm::InternalLinkage) => "Internal",
+ InstantiationMode::Def(llvm::PrivateLinkage) => "Private",
+ InstantiationMode::Def(llvm::ExternalWeakLinkage) => "ExternalWeak",
+ InstantiationMode::Def(llvm::CommonLinkage) => "Common",
+ InstantiationMode::Decl => "Declaration",
};
output.push_str("[");
//! this is not implemented however: a translation item will be produced
//! regardless of whether it is actually needed or not.
+use rustc_data_structures::bitvec::BitVector;
+
use rustc::hir;
use rustc::hir::intravisit as hir_visit;
}
}
-pub type InliningMap<'tcx> = FnvHashMap<TransItem<'tcx>, FnvHashSet<TransItem<'tcx>>>;
+/// Maps every translation item to all translation items it references in its
+/// body.
+pub struct ReferenceMap<'tcx> {
+ // Maps a source translation item to a range of target translation items.
+ // The two numbers in the tuple are the start (inclusive) and
+ // end index (exclusive) within the `targets` and the `inlined` vecs.
+ index: FnvHashMap<TransItem<'tcx>, (usize, usize)>,
+ targets: Vec<TransItem<'tcx>>,
+ inlined: BitVector
+}
+
+impl<'tcx> ReferenceMap<'tcx> {
+
+ fn new() -> ReferenceMap<'tcx> {
+ ReferenceMap {
+ index: FnvHashMap(),
+ targets: Vec::new(),
+ inlined: BitVector::new(64 * 256),
+ }
+ }
+
+ fn record_references<I>(&mut self, source: TransItem<'tcx>, targets: I)
+ where I: Iterator<Item=(TransItem<'tcx>, bool)>
+ {
+ assert!(!self.index.contains_key(&source));
+
+ let start_index = self.targets.len();
+
+ for (target, inlined) in targets {
+ let index = self.targets.len();
+ self.targets.push(target);
+ self.inlined.grow(index + 1);
+
+ if inlined {
+ self.inlined.insert(index);
+ }
+ }
+
+ let end_index = self.targets.len();
+ self.index.insert(source, (start_index, end_index));
+ }
+
+ // Internally iterate over all items referenced by `source` which will be
+ // made available for inlining.
+ pub fn with_inlining_candidates<F>(&self, source: TransItem<'tcx>, mut f: F)
+ where F: FnMut(TransItem<'tcx>) {
+ if let Some(&(start_index, end_index)) = self.index.get(&source)
+ {
+ for index in start_index .. end_index {
+ if self.inlined.contains(index) {
+ f(self.targets[index])
+ }
+ }
+ }
+ }
+
+ pub fn get_direct_references_from(&self, source: TransItem<'tcx>) -> &[TransItem<'tcx>]
+ {
+ if let Some(&(start_index, end_index)) = self.index.get(&source) {
+ &self.targets[start_index .. end_index]
+ } else {
+ &self.targets[0 .. 0]
+ }
+ }
+}
pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
mode: TransItemCollectionMode)
-> (FnvHashSet<TransItem<'tcx>>,
- InliningMap<'tcx>) {
+ ReferenceMap<'tcx>) {
// We are not tracking dependencies of this pass as it has to be re-executed
// every time no matter what.
ccx.tcx().dep_graph.with_ignore(|| {
debug!("Building translation item graph, beginning at roots");
let mut visited = FnvHashSet();
let mut recursion_depths = DefIdMap();
- let mut inlining_map = FnvHashMap();
+ let mut reference_map = ReferenceMap::new();
for root in roots {
collect_items_rec(ccx,
root,
&mut visited,
&mut recursion_depths,
- &mut inlining_map);
+ &mut reference_map);
}
- (visited, inlining_map)
+ (visited, reference_map)
})
}
starting_point: TransItem<'tcx>,
visited: &mut FnvHashSet<TransItem<'tcx>>,
recursion_depths: &mut DefIdMap<usize>,
- inlining_map: &mut InliningMap<'tcx>) {
+ reference_map: &mut ReferenceMap<'tcx>) {
if !visited.insert(starting_point.clone()) {
// We've been here already, no need to search again.
return;
}
}
+ record_references(ccx, starting_point, &neighbors[..], reference_map);
+
for neighbour in neighbors {
- record_inlined_use(ccx, starting_point, neighbour, inlining_map);
- collect_items_rec(ccx, neighbour, visited, recursion_depths, inlining_map);
+ collect_items_rec(ccx, neighbour, visited, recursion_depths, reference_map);
}
if let Some((def_id, depth)) = recursion_depth_reset {
debug!("END collect_items_rec({})", starting_point.to_string(ccx));
}
-fn record_inlined_use<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
- caller: TransItem<'tcx>,
- callee: TransItem<'tcx>,
- inlining_map: &mut InliningMap<'tcx>) {
- if callee.is_from_extern_crate() ||
- callee.requests_inline(ccx.tcx()) {
- inlining_map.entry(caller)
- .or_insert_with(|| FnvHashSet())
- .insert(callee);
- }
+fn record_references<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+ caller: TransItem<'tcx>,
+ callees: &[TransItem<'tcx>],
+ reference_map: &mut ReferenceMap<'tcx>) {
+ let iter = callees.into_iter()
+ .map(|callee| {
+ let is_inlining_candidate = callee.is_from_extern_crate() ||
+ callee.requests_inline(ccx.tcx());
+ (*callee, is_inlining_candidate)
+ });
+ reference_map.record_references(caller, iter);
}
fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
//! source-level module, functions from the same module will be available for
//! inlining, even when they are not marked #[inline].
-use collector::{InliningMap, TransItem};
-use context::CrateContext;
+use collector::{TransItem, ReferenceMap};
use monomorphize;
use rustc::hir::def_id::DefId;
use rustc::hir::map::DefPathData;
use syntax::parse::token::{self, InternedString};
use util::nodemap::{FnvHashMap, FnvHashSet};
+#[derive(Clone, Copy, Eq, PartialEq, Debug)]
+pub enum InstantiationMode {
+ /// This variant indicates that a translation item should be placed in some
+ /// codegen unit as a definition and with the given linkage.
+ Def(llvm::Linkage),
+
+ /// This variant indicates that only a declaration of some translation item
+ /// should be placed in a given codegen unit.
+ Decl
+}
+
pub struct CodegenUnit<'tcx> {
pub name: InternedString,
- pub items: FnvHashMap<TransItem<'tcx>, llvm::Linkage>,
+ pub items: FnvHashMap<TransItem<'tcx>, InstantiationMode>,
}
pub enum PartitioningStrategy {
+ /// Generate one codegen unit per source-level module.
PerModule,
+
+ /// Partition the whole crate into a fixed number of codegen units.
FixedUnitCount(usize)
}
pub fn partition<'tcx, I>(tcx: &TyCtxt<'tcx>,
trans_items: I,
strategy: PartitioningStrategy,
- inlining_map: &InliningMap<'tcx>)
+ reference_map: &ReferenceMap<'tcx>)
-> Vec<CodegenUnit<'tcx>>
where I: Iterator<Item = TransItem<'tcx>>
{
// functions and statics defined in the local crate.
let mut initial_partitioning = place_root_translation_items(tcx, trans_items);
+ // If the partitioning should produce a fixed count of codegen units, merge
+ // until that count is reached.
if let PartitioningStrategy::FixedUnitCount(count) = strategy {
merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name[..]);
}
// translation items have to go into each codegen unit. These additional
// translation items can be drop-glue, functions from external crates, and
// local functions the definition of which is marked with #[inline].
- place_inlined_translation_items(initial_partitioning, inlining_map)
+ let post_inlining = place_inlined_translation_items(initial_partitioning,
+ reference_map);
+
+ // Now we know all *definitions* within all codegen units, thus we can
+ // easily determine which declarations need to be placed within each one.
+ let post_declarations = place_declarations(post_inlining, reference_map);
+
+ post_declarations.0
}
-struct InitialPartitioning<'tcx> {
+struct PreInliningPartitioning<'tcx> {
codegen_units: Vec<CodegenUnit<'tcx>>,
roots: FnvHashSet<TransItem<'tcx>>,
}
+struct PostInliningPartitioning<'tcx>(Vec<CodegenUnit<'tcx>>);
+struct PostDeclarationsPartitioning<'tcx>(Vec<CodegenUnit<'tcx>>);
+
fn place_root_translation_items<'tcx, I>(tcx: &TyCtxt<'tcx>,
trans_items: I)
- -> InitialPartitioning<'tcx>
+ -> PreInliningPartitioning<'tcx>
where I: Iterator<Item = TransItem<'tcx>>
{
let mut roots = FnvHashSet();
}
};
- codegen_unit.items.insert(trans_item, linkage);
+ codegen_unit.items.insert(trans_item,
+ InstantiationMode::Def(linkage));
roots.insert(trans_item);
}
}
- InitialPartitioning {
+ PreInliningPartitioning {
codegen_units: codegen_units.into_iter()
.map(|(_, codegen_unit)| codegen_unit)
.collect(),
}
}
-fn merge_codegen_units<'tcx>(initial_partitioning: &mut InitialPartitioning<'tcx>,
+fn merge_codegen_units<'tcx>(initial_partitioning: &mut PreInliningPartitioning<'tcx>,
target_cgu_count: usize,
crate_name: &str) {
if target_cgu_count >= initial_partitioning.codegen_units.len() {
assert!(target_cgu_count >= 1);
let codegen_units = &mut initial_partitioning.codegen_units;
- // Merge the two smallest codegen units until the target size is reached
+ // Merge the two smallest codegen units until the target size is reached.
+ // Note that "size" is estimated here rather inaccurately as the number of
+ // translation items in a given unit. This could be improved on.
while codegen_units.len() > target_cgu_count {
// Sort small cgus to the back
codegen_units.as_mut_slice().sort_by_key(|cgu| -(cgu.items.len() as i64));
}
}
-fn place_inlined_translation_items<'tcx>(initial_partitioning: InitialPartitioning<'tcx>,
- inlining_map: &InliningMap<'tcx>)
- -> Vec<CodegenUnit<'tcx>> {
- let mut final_partitioning = Vec::new();
+fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartitioning<'tcx>,
+ reference_map: &ReferenceMap<'tcx>)
+ -> PostInliningPartitioning<'tcx> {
+ let mut new_partitioning = Vec::new();
for codegen_unit in &initial_partitioning.codegen_units[..] {
// Collect all items that need to be available in this codegen unit
let mut reachable = FnvHashSet();
for root in codegen_unit.items.keys() {
- follow_inlining(*root, inlining_map, &mut reachable);
+ follow_inlining(*root, reference_map, &mut reachable);
}
- let mut final_codegen_unit = CodegenUnit {
+ let mut new_codegen_unit = CodegenUnit {
name: codegen_unit.name.clone(),
items: FnvHashMap(),
};
// Add all translation items that are not already there
for trans_item in reachable {
- if let Some(linkage) = codegen_unit.items.get(&trans_item) {
+ if let Some(instantiation_mode) = codegen_unit.items.get(&trans_item) {
// This is a root, just copy it over
- final_codegen_unit.items.insert(trans_item, *linkage);
+ new_codegen_unit.items.insert(trans_item, *instantiation_mode);
} else {
if initial_partitioning.roots.contains(&trans_item) {
// This item will be instantiated in some other codegen unit,
// so we just add it here with AvailableExternallyLinkage
- final_codegen_unit.items.insert(trans_item, llvm::AvailableExternallyLinkage);
+ new_codegen_unit.items.insert(trans_item,
+ InstantiationMode::Def(llvm::AvailableExternallyLinkage));
} else {
// We can't be sure if this will also be instantiated
// somewhere else, so we add an instance here with
// LinkOnceODRLinkage. That way the item can be discarded if
// it's not needed (inlined) after all.
- final_codegen_unit.items.insert(trans_item, llvm::LinkOnceODRLinkage);
+ new_codegen_unit.items.insert(trans_item,
+ InstantiationMode::Def(llvm::LinkOnceODRLinkage));
}
}
}
- final_partitioning.push(final_codegen_unit);
+ new_partitioning.push(new_codegen_unit);
}
- return final_partitioning;
+ return PostInliningPartitioning(new_partitioning);
fn follow_inlining<'tcx>(trans_item: TransItem<'tcx>,
- inlining_map: &InliningMap<'tcx>,
+ reference_map: &ReferenceMap<'tcx>,
visited: &mut FnvHashSet<TransItem<'tcx>>) {
if !visited.insert(trans_item) {
return;
}
- if let Some(inlined_items) = inlining_map.get(&trans_item) {
- for &inlined_item in inlined_items {
- follow_inlining(inlined_item, inlining_map, visited);
+ reference_map.with_inlining_candidates(trans_item, |target| {
+ follow_inlining(target, reference_map, visited);
+ });
+ }
+}
+
+fn place_declarations<'tcx>(codegen_units: PostInliningPartitioning<'tcx>,
+ reference_map: &ReferenceMap<'tcx>)
+ -> PostDeclarationsPartitioning<'tcx> {
+ let PostInliningPartitioning(mut codegen_units) = codegen_units;
+
+ for codegen_unit in codegen_units.iter_mut() {
+ let mut declarations = FnvHashSet();
+
+ for (trans_item, _) in &codegen_unit.items {
+ for referenced_item in reference_map.get_direct_references_from(*trans_item) {
+ if !codegen_unit.items.contains_key(referenced_item) {
+ declarations.insert(*referenced_item);
+ }
}
}
+
+ codegen_unit.items
+ .extend(declarations.iter()
+ .map(|trans_item| (*trans_item,
+ InstantiationMode::Decl)));
}
+
+ PostDeclarationsPartitioning(codegen_units)
}
fn characteristic_def_id_of_trans_item<'tcx>(tcx: &TyCtxt<'tcx>,
return token::intern_and_get_ident(&mod_path[..]);
}
-
-impl<'tcx> CodegenUnit<'tcx> {
- pub fn _dump<'a>(&self, ccx: &CrateContext<'a, 'tcx>) {
- println!("CodegenUnit {} (", self.name);
-
- let mut items: Vec<_> = self.items
- .iter()
- .map(|(trans_item, inst)| {
- format!("{} -- ({:?})", trans_item.to_string(ccx), inst)
- })
- .collect();
-
- items.as_mut_slice().sort();
-
- for s in items {
- println!(" {}", s);
- }
-
- println!(")");
- }
-}
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=lazy -Zincremental=""
+// compile-flags:-Zprint-trans-items=lazy -Zincremental=tmp
#![allow(dead_code)]
#![crate_type="lib"]
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=eager -Zincremental=""
+// compile-flags:-Zprint-trans-items=eager -Zincremental=tmp
#![allow(dead_code)]
#![crate_type="lib"]
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=lazy -Zincremental=""
+// compile-flags:-Zprint-trans-items=lazy -Zincremental=tmp
#![crate_type="lib"]
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=lazy -Zincremental=""
+// compile-flags:-Zprint-trans-items=lazy -Zincremental=tmp
#![allow(dead_code)]
#![crate_type="lib"]
}
impl Drop for Struct {
- //~ TRANS_ITEM fn local_drop_glue::{{impl}}[0]::drop[0] @@ local_drop_glue[WeakODR]
+ //~ TRANS_ITEM fn local_drop_glue::{{impl}}[0]::drop[0] @@ local_drop_glue[WeakODR] local_drop_glue-mod1[Declaration]
fn drop(&mut self) {}
}
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=eager -Zincremental=""
+// compile-flags:-Zprint-trans-items=eager -Zincremental=tmp
#![allow(dead_code)]
#![crate_type="lib"]
// Used in different modules/codegen units but always instantiated in the same
// codegen unit.
-//~ TRANS_ITEM fn local_generic::generic[0]<u32> @@ local_generic.volatile[WeakODR]
-//~ TRANS_ITEM fn local_generic::generic[0]<u64> @@ local_generic.volatile[WeakODR]
-//~ TRANS_ITEM fn local_generic::generic[0]<char> @@ local_generic.volatile[WeakODR]
-//~ TRANS_ITEM fn local_generic::generic[0]<&str> @@ local_generic.volatile[WeakODR]
+//~ TRANS_ITEM fn local_generic::generic[0]<u32> @@ local_generic.volatile[WeakODR] local_generic[Declaration]
+//~ TRANS_ITEM fn local_generic::generic[0]<u64> @@ local_generic.volatile[WeakODR] local_generic-mod1[Declaration]
+//~ TRANS_ITEM fn local_generic::generic[0]<char> @@ local_generic.volatile[WeakODR] local_generic-mod1-mod1[Declaration]
+//~ TRANS_ITEM fn local_generic::generic[0]<&str> @@ local_generic.volatile[WeakODR] local_generic-mod2[Declaration]
pub fn generic<T>(x: T) -> T { x }
//~ TRANS_ITEM fn local_generic::user[0] @@ local_generic[WeakODR]
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=lazy -Zincremental=""
+// compile-flags:-Zprint-trans-items=lazy -Zincremental=tmp
#![allow(dead_code)]
#![crate_type="lib"]
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=lazy -Zincremental=""
+// compile-flags:-Zprint-trans-items=lazy -Zincremental=tmp
#![allow(dead_code)]
#![crate_type="lib"]
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=lazy -Zincremental=""
+// compile-flags:-Zprint-trans-items=lazy -Zincremental=tmp
#![allow(dead_code)]
//~ TRANS_ITEM fn methods_are_with_self_type::main[0]
fn main()
{
- //~ TRANS_ITEM fn methods_are_with_self_type::mod1[0]::{{impl}}[1]::method[0]<u32, u64> @@ methods_are_with_self_type.volatile[WeakODR]
+ //~ TRANS_ITEM fn methods_are_with_self_type::mod1[0]::{{impl}}[1]::method[0]<u32, u64> @@ methods_are_with_self_type.volatile[WeakODR] methods_are_with_self_type[Declaration]
SomeGenericType(0u32, 0u64).method();
- //~ TRANS_ITEM fn methods_are_with_self_type::mod1[0]::{{impl}}[1]::associated_fn[0]<char, &str> @@ methods_are_with_self_type.volatile[WeakODR]
+ //~ TRANS_ITEM fn methods_are_with_self_type::mod1[0]::{{impl}}[1]::associated_fn[0]<char, &str> @@ methods_are_with_self_type.volatile[WeakODR] methods_are_with_self_type[Declaration]
SomeGenericType::associated_fn('c', "&str");
- //~ TRANS_ITEM fn methods_are_with_self_type::{{impl}}[0]::foo[0]<methods_are_with_self_type::type1[0]::Struct[0]> @@ methods_are_with_self_type-type1.volatile[WeakODR]
+ //~ TRANS_ITEM fn methods_are_with_self_type::{{impl}}[0]::foo[0]<methods_are_with_self_type::type1[0]::Struct[0]> @@ methods_are_with_self_type-type1.volatile[WeakODR] methods_are_with_self_type[Declaration]
type1::Struct.foo();
- //~ TRANS_ITEM fn methods_are_with_self_type::{{impl}}[0]::foo[0]<methods_are_with_self_type::type2[0]::Struct[0]> @@ methods_are_with_self_type-type2.volatile[WeakODR]
+ //~ TRANS_ITEM fn methods_are_with_self_type::{{impl}}[0]::foo[0]<methods_are_with_self_type::type2[0]::Struct[0]> @@ methods_are_with_self_type-type2.volatile[WeakODR] methods_are_with_self_type[Declaration]
type2::Struct.foo();
- //~ TRANS_ITEM fn methods_are_with_self_type::Trait[0]::default[0]<methods_are_with_self_type::type1[0]::Struct[0]> @@ methods_are_with_self_type-type1.volatile[WeakODR]
+ //~ TRANS_ITEM fn methods_are_with_self_type::Trait[0]::default[0]<methods_are_with_self_type::type1[0]::Struct[0]> @@ methods_are_with_self_type-type1.volatile[WeakODR] methods_are_with_self_type[Declaration]
type1::Struct.default();
- //~ TRANS_ITEM fn methods_are_with_self_type::Trait[0]::default[0]<methods_are_with_self_type::type2[0]::Struct[0]> @@ methods_are_with_self_type-type2.volatile[WeakODR]
+ //~ TRANS_ITEM fn methods_are_with_self_type::Trait[0]::default[0]<methods_are_with_self_type::type2[0]::Struct[0]> @@ methods_are_with_self_type-type2.volatile[WeakODR] methods_are_with_self_type[Declaration]
type2::Struct.default();
}
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=eager -Z incremental=""
+// compile-flags:-Zprint-trans-items=eager -Z incremental=tmp
#![allow(dead_code)]
#![crate_type="lib"]
// except according to those terms.
// ignore-tidy-linelength
-// compile-flags:-Zprint-trans-items=lazy -Zincremental=""
+// compile-flags:-Zprint-trans-items=lazy -Zincremental=tmp
#![crate_type="lib"]