bug!("invalid output type `{:?}` for target os `{}`",
crate_type, sess.opts.target_triple);
}
- let mut out_files = link_binary_output(sess,
- codegen_results,
- crate_type,
- outputs,
- crate_name);
- out_filenames.append(&mut out_files);
+ let out_files = link_binary_output(sess,
+ codegen_results,
+ crate_type,
+ outputs,
+ crate_name);
+ out_filenames.extend(out_files);
}
// Remove the temporary object file and metadata if we aren't saving temps
// and we want to move everything to the same LLVM context. Currently the
// way we know of to do that is to serialize them to a string and them parse
// them later. Not great but hey, that's why it's "fat" LTO, right?
- for module in modules {
+ serialized_modules.extend(modules.into_iter().map(|module| {
let buffer = ModuleBuffer::new(module.module_llvm.llmod());
let llmod_id = CString::new(&module.name[..]).unwrap();
- serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
- }
+
+ (SerializedModule::Local(buffer), llmod_id)
+ }));
// For all serialized bitcode files we parse them and link them in as we did
// above, this is all mostly handled in C++. Like above, though, we don't
.map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone()))
.collect();
- let mut thin_buffers = Vec::new();
- let mut module_names = Vec::new();
- let mut thin_modules = Vec::new();
+ let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
+ let mut thin_buffers = Vec::with_capacity(modules.len());
+ let mut module_names = Vec::with_capacity(full_scope_len);
+ let mut thin_modules = Vec::with_capacity(full_scope_len);
// FIXME: right now, like with fat LTO, we serialize all in-memory
// modules before working with them and ThinLTO. We really
// into the global index. It turns out that this loop is by far
// the most expensive portion of this small bit of global
// analysis!
- for (i, module) in modules.iter().enumerate() {
+ for (i, module) in modules.into_iter().enumerate() {
info!("local module: {} - {}", i, module.name);
let name = CString::new(module.name.clone()).unwrap();
let buffer = ThinBuffer::new(module.module_llvm.llmod());
// incremental ThinLTO first where we could actually avoid
// looking at upstream modules entirely sometimes (the contents,
// we must always unconditionally look at the index).
- let mut serialized = Vec::new();
+ let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
let cached_modules = cached_modules.into_iter().map(|(sm, wp)| {
(sm, CString::new(wp.cgu_name).unwrap())
return Vec::new();
}
- let mut flags = Vec::new();
-
debug!("preparing the RPATH!");
let libs = config.used_crates.clone();
let libs = libs.iter().filter_map(|&(_, ref l)| l.option()).collect::<Vec<_>>();
let rpaths = get_rpaths(config, &libs);
- flags.extend_from_slice(&rpaths_to_flags(&rpaths));
+ let mut flags = rpaths_to_flags(&rpaths);
// Use DT_RUNPATH instead of DT_RPATH if available
if config.linker_is_gnu {
}
fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
- let mut ret = Vec::new();
+ let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity
+
for rpath in rpaths {
if rpath.contains(',') {
ret.push("-Wl,-rpath".into());
"__llvm_profile_raw_version",
"__llvm_profile_filename",
];
- for sym in &PROFILER_WEAK_SYMBOLS {
+
+ symbols.extend(PROFILER_WEAK_SYMBOLS.iter().map(|sym| {
let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(sym));
- symbols.push((exported_symbol, SymbolExportLevel::C));
- }
+ (exported_symbol, SymbolExportLevel::C)
+ }));
}
if tcx.sess.crate_types.borrow().contains(&config::CrateType::Dylib) {