// codegen tests (vs. clang)
-fn make_o_name(config: &Config, testfile: &Path) -> Path {
- output_base_name(config, testfile).with_extension("o")
-}
-
fn append_suffix_to_stem(p: &Path, suffix: &str) -> Path {
if suffix.len() == 0 {
(*p).clone()
// FIXME (#9639): This needs to handle non-utf8 paths
let link_args = vec!("-L".to_string(),
aux_dir.as_str().unwrap().to_string());
- let llvm_args = vec!("--emit=obj".to_string(),
- "--crate-type=lib".to_string(),
- "-C".to_string(),
- "save-temps".to_string());
+ let llvm_args = vec!("--emit=bc,obj".to_string(),
+ "--crate-type=lib".to_string());
let args = make_compile_args(config,
props,
link_args.append(llvm_args.as_slice()),
- |a, b| ThisFile(make_o_name(a, b)), testfile);
+ |a, b| ThisDirectory(output_base_name(a, b).dir_path()),
+ testfile);
compose_and_run_compiler(config, props, testfile, args, None)
}
common thing to do.
The first thing that we need to do is make a file to put our code in. I like
-to make a projects directory in my home directory, and keep all my projects
+to make a `projects` directory in my home directory, and keep all my projects
there. Rust does not care where your code lives.
This actually leads to one other concern we should address: this tutorial will
languages which have it, like Haskell, often suggest that documenting your
types explicitly is a best-practice. We agree that forcing functions to declare
types while allowing for inference inside of function bodies is a wonderful
-compromise between full inference and no inference.
+sweet spot between full inference and no inference.
What about returning a value? Here's a function that adds one to an integer:
: This type does not implement "copy", even if eligible
* `no_send_bound`
: This type does not implement "send", even if eligible
-* `no_share_bound`
- : This type does not implement "share", even if eligible
+* `no_sync_bound`
+ : This type does not implement "sync", even if eligible
* `eh_personality`
: ___Needs filling in___
* `exchange_free`
use core::ptr::RawPtr;
#[cfg(not(test))] use core::raw;
-#[cfg(not(test))] use util;
+#[cfg(stage0, not(test))] use util;
/// Returns a pointer to `size` bytes of memory.
///
}
// FIXME: #7496
-#[cfg(not(test))]
+#[cfg(stage0, not(test))]
#[lang="closure_exchange_malloc"]
#[inline]
#[allow(deprecated)]
alloc as *mut u8
}
+// FIXME: #7496
+#[cfg(not(stage0), not(test))]
+#[lang="closure_exchange_malloc"]
+#[inline]
+#[allow(deprecated)]
+unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
+ align: uint) -> *mut u8 {
+ let p = allocate(size, align);
+
+ let alloc = p as *mut raw::Box<()>;
+ (*alloc).drop_glue = drop_glue;
+
+ alloc as *mut u8
+}
+
#[cfg(jemalloc)]
mod imp {
use core::option::{None, Option};
use std::num;
use std::ptr;
use std::rc::Rc;
-use std::rt::heap::allocate;
+use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
- data: Rc<RefCell<Vec<u8> >>,
+ data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
+
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
end: Cell<*const T>,
/// A pointer to the first arena segment.
- first: RefCell<TypedArenaChunkRef<T>>,
+ first: RefCell<*mut TypedArenaChunk<T>>,
}
-type TypedArenaChunkRef<T> = Option<Box<TypedArenaChunk<T>>>;
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
- next: TypedArenaChunkRef<T>,
+ next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
+fn calculate_size<T>(capacity: uint) -> uint {
+ let mut size = mem::size_of::<TypedArenaChunk<T>>();
+ size = round_up(size, mem::min_align_of::<T>());
+ let elem_size = mem::size_of::<T>();
+ let elems_size = elem_size.checked_mul(&capacity).unwrap();
+ size = size.checked_add(&elems_size).unwrap();
+ size
+}
+
impl<T> TypedArenaChunk<T> {
#[inline]
- fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
- -> Box<TypedArenaChunk<T>> {
- let mut size = mem::size_of::<TypedArenaChunk<T>>();
- size = round_up(size, mem::min_align_of::<T>());
- let elem_size = mem::size_of::<T>();
- let elems_size = elem_size.checked_mul(&capacity).unwrap();
- size = size.checked_add(&elems_size).unwrap();
-
- let mut chunk = unsafe {
- let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
- let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
- ptr::write(&mut chunk.next, next);
- chunk
- };
-
- chunk.capacity = capacity;
+ unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
+ -> *mut TypedArenaChunk<T> {
+ let size = calculate_size::<T>(capacity);
+ let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
+ as *mut TypedArenaChunk<T>;
+ (*chunk).next = next;
+ (*chunk).capacity = capacity;
chunk
}
}
// Destroy the next chunk.
- let next_opt = mem::replace(&mut self.next, None);
- match next_opt {
- None => {}
- Some(mut next) => {
- // We assume that the next chunk is completely filled.
- let capacity = next.capacity;
- next.destroy(capacity)
- }
+ let next = self.next;
+ let size = calculate_size::<T>(self.capacity);
+ deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
+ mem::min_align_of::<TypedArenaChunk<T>>());
+ if next.is_not_null() {
+ let capacity = (*next).capacity;
+ (*next).destroy(capacity);
}
}
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
- let chunk = TypedArenaChunk::<T>::new(None, capacity);
- TypedArena {
- ptr: Cell::new(chunk.start() as *const T),
- end: Cell::new(chunk.end() as *const T),
- first: RefCell::new(Some(chunk)),
+ unsafe {
+ let chunk = TypedArenaChunk::<T>::new(ptr::mut_null(), capacity);
+ TypedArena {
+ ptr: Cell::new((*chunk).start() as *const T),
+ end: Cell::new((*chunk).end() as *const T),
+ first: RefCell::new(chunk),
+ }
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
- let chunk = self.first.borrow_mut().take().unwrap();
- let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
- let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
- self.ptr.set(chunk.start() as *const T);
- self.end.set(chunk.end() as *const T);
- *self.first.borrow_mut() = Some(chunk)
+ unsafe {
+ let chunk = *self.first.borrow_mut();
+ let new_capacity = (*chunk).capacity.checked_mul(&2).unwrap();
+ let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
+ self.ptr.set((*chunk).start() as *const T);
+ self.end.set((*chunk).end() as *const T);
+ *self.first.borrow_mut() = chunk
+ }
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
- // Determine how much was filled.
- let start = self.first.borrow().as_ref().unwrap().start() as uint;
- let end = self.ptr.get() as uint;
- let diff = (end - start) / mem::size_of::<T>();
-
- // Pass that to the `destroy` method.
unsafe {
- self.first.borrow_mut().as_mut().unwrap().destroy(diff)
+ // Determine how much was filled.
+ let start = self.first.borrow().as_ref().unwrap().start() as uint;
+ let end = self.ptr.get() as uint;
+ let diff = (end - start) / mem::size_of::<T>();
+
+ // Pass that to the `destroy` method.
+ (**self.first.borrow_mut()).destroy(diff)
}
}
}
pub use core::slice::{Chunks, Slice, ImmutableSlice, ImmutablePartialEqSlice};
pub use core::slice::{ImmutableOrdSlice, MutableSlice, Items, MutItems};
pub use core::slice::{MutSplits, MutChunks, Splits};
-pub use core::slice::{bytes, ref_slice, MutableCloneableSlice};
+pub use core::slice::{bytes, mut_ref_slice, ref_slice, MutableCloneableSlice};
pub use core::slice::{Found, NotFound};
// Functional utilities
/// A type which is considered "not sync", meaning that
/// its contents are not threadsafe, hence they cannot be
/// shared between tasks.
- #[lang="no_share_bound"]
+ #[lang="no_sync_bound"]
#[deriving(PartialEq,Clone)]
pub struct NoSync;
let mut i: uint = 0;
let ln = self.len();
while i < ln / 2 {
- self.swap(i, ln - i - 1);
+ // Unsafe swap to avoid the bounds check in safe swap.
+ unsafe {
+ let pa: *mut T = self.unsafe_mut_ref(i);
+ let pb: *mut T = self.unsafe_mut_ref(ln - i - 1);
+ ptr::swap(pa, pb);
+ }
i += 1;
}
}
}
#[test]
+ #[ignore(cfg(windows))] // FIXME (#9406)
fn test_lots_of_files() {
// this is a good test because it touches lots of differently named files
glob("/*/*/*/*").skip(10000).next();
Ratio::from_integer(self.numer / self.denom)
}
- ///Returns the fractional part of a number.
+ /// Returns the fractional part of a number.
#[inline]
pub fn fract(&self) -> Ratio<T> {
Ratio::new_raw(self.numer % self.denom, self.denom.clone())
}
}
-// a/b + c/d = (a*d + b*c)/(b*d
+// a/b + c/d = (a*d + b*c)/(b*d)
arith_impl!(impl Add, add)
// a/b - c/d = (a*d - b*c)/(b*d)
use super::rpath;
use super::rpath::RPathConfig;
use super::svh::Svh;
+use super::write::{OutputTypeBitcode, OutputTypeExe, OutputTypeObject};
use driver::driver::{CrateTranslation, OutputFilenames, Input, FileInput};
use driver::config::NoDebugInfo;
use driver::session::Session;
use driver::config;
-use llvm;
-use llvm::ModuleRef;
use metadata::common::LinkMeta;
use metadata::{encoder, cstore, filesearch, csearch, loader, creader};
use middle::trans::context::CrateContext;
use util::ppaux;
use util::sha2::{Digest, Sha256};
-use std::c_str::{ToCStr, CString};
use std::char;
use std::collections::HashSet;
use std::io::{fs, TempDir, Command};
use std::io;
use std::mem;
-use std::ptr;
use std::str;
use std::string::String;
use flate;
RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET + 8;
-#[deriving(Clone, PartialEq, PartialOrd, Ord, Eq)]
-pub enum OutputType {
- OutputTypeBitcode,
- OutputTypeAssembly,
- OutputTypeLlvmAssembly,
- OutputTypeObject,
- OutputTypeExe,
-}
-
-pub fn llvm_err(sess: &Session, msg: String) -> ! {
- unsafe {
- let cstr = llvm::LLVMRustGetLastError();
- if cstr == ptr::null() {
- sess.fatal(msg.as_slice());
- } else {
- let err = CString::new(cstr, true);
- let err = String::from_utf8_lossy(err.as_bytes());
- sess.fatal(format!("{}: {}",
- msg.as_slice(),
- err.as_slice()).as_slice());
- }
- }
-}
-
-pub fn write_output_file(
- sess: &Session,
- target: llvm::TargetMachineRef,
- pm: llvm::PassManagerRef,
- m: ModuleRef,
- output: &Path,
- file_type: llvm::FileType) {
- unsafe {
- output.with_c_str(|output| {
- let result = llvm::LLVMRustWriteOutputFile(
- target, pm, m, output, file_type);
- if !result {
- llvm_err(sess, "could not write output".to_string());
- }
- })
- }
-}
-
-pub mod write {
-
- use super::super::lto;
- use super::{write_output_file, OutputType};
- use super::{OutputTypeAssembly, OutputTypeBitcode};
- use super::{OutputTypeExe, OutputTypeLlvmAssembly};
- use super::{OutputTypeObject};
- use driver::driver::{CrateTranslation, OutputFilenames};
- use driver::config::NoDebugInfo;
- use driver::session::Session;
- use driver::config;
- use llvm;
- use llvm::{ModuleRef, TargetMachineRef, PassManagerRef};
- use util::common::time;
- use syntax::abi;
-
- use std::c_str::ToCStr;
- use std::io::{Command};
- use libc::{c_uint, c_int};
- use std::str;
-
- // On android, we by default compile for armv7 processors. This enables
- // things like double word CAS instructions (rather than emulating them)
- // which are *far* more efficient. This is obviously undesirable in some
- // cases, so if any sort of target feature is specified we don't append v7
- // to the feature list.
- //
- // On iOS only armv7 and newer are supported. So it is useful to
- // get all hardware potential via VFP3 (hardware floating point)
- // and NEON (SIMD) instructions supported by LLVM.
- // Note that without those flags various linking errors might
- // arise as some of intrinsics are converted into function calls
- // and nobody provides implementations those functions
- fn target_feature<'a>(sess: &'a Session) -> &'a str {
- match sess.targ_cfg.os {
- abi::OsAndroid => {
- if "" == sess.opts.cg.target_feature.as_slice() {
- "+v7"
- } else {
- sess.opts.cg.target_feature.as_slice()
- }
- },
- abi::OsiOS if sess.targ_cfg.arch == abi::Arm => {
- "+v7,+thumb2,+vfp3,+neon"
- },
- _ => sess.opts.cg.target_feature.as_slice()
- }
- }
-
- pub fn run_passes(sess: &Session,
- trans: &CrateTranslation,
- output_types: &[OutputType],
- output: &OutputFilenames) {
- let llmod = trans.module;
- let llcx = trans.context;
- unsafe {
- configure_llvm(sess);
-
- if sess.opts.cg.save_temps {
- output.with_extension("no-opt.bc").with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
-
- let opt_level = match sess.opts.optimize {
- config::No => llvm::CodeGenLevelNone,
- config::Less => llvm::CodeGenLevelLess,
- config::Default => llvm::CodeGenLevelDefault,
- config::Aggressive => llvm::CodeGenLevelAggressive,
- };
- let use_softfp = sess.opts.cg.soft_float;
-
- // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a parameter.
- // FIXME: #11954: mac64 unwinding may not work with fp elim
- let no_fp_elim = (sess.opts.debuginfo != NoDebugInfo) ||
- (sess.targ_cfg.os == abi::OsMacos &&
- sess.targ_cfg.arch == abi::X86_64);
-
- // OSX has -dead_strip, which doesn't rely on ffunction_sections
- // FIXME(#13846) this should be enabled for windows
- let ffunction_sections = sess.targ_cfg.os != abi::OsMacos &&
- sess.targ_cfg.os != abi::OsWindows;
- let fdata_sections = ffunction_sections;
-
- let reloc_model = match sess.opts.cg.relocation_model.as_slice() {
- "pic" => llvm::RelocPIC,
- "static" => llvm::RelocStatic,
- "default" => llvm::RelocDefault,
- "dynamic-no-pic" => llvm::RelocDynamicNoPic,
- _ => {
- sess.err(format!("{} is not a valid relocation mode",
- sess.opts
- .cg
- .relocation_model).as_slice());
- sess.abort_if_errors();
- return;
- }
- };
-
- let code_model = match sess.opts.cg.code_model.as_slice() {
- "default" => llvm::CodeModelDefault,
- "small" => llvm::CodeModelSmall,
- "kernel" => llvm::CodeModelKernel,
- "medium" => llvm::CodeModelMedium,
- "large" => llvm::CodeModelLarge,
- _ => {
- sess.err(format!("{} is not a valid code model",
- sess.opts
- .cg
- .code_model).as_slice());
- sess.abort_if_errors();
- return;
- }
- };
-
- let tm = sess.targ_cfg
- .target_strs
- .target_triple
- .as_slice()
- .with_c_str(|t| {
- sess.opts.cg.target_cpu.as_slice().with_c_str(|cpu| {
- target_feature(sess).with_c_str(|features| {
- llvm::LLVMRustCreateTargetMachine(
- t, cpu, features,
- code_model,
- reloc_model,
- opt_level,
- true /* EnableSegstk */,
- use_softfp,
- no_fp_elim,
- ffunction_sections,
- fdata_sections,
- )
- })
- })
- });
-
- // Create the two optimizing pass managers. These mirror what clang
- // does, and are by populated by LLVM's default PassManagerBuilder.
- // Each manager has a different set of passes, but they also share
- // some common passes.
- let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
- let mpm = llvm::LLVMCreatePassManager();
-
- // If we're verifying or linting, add them to the function pass
- // manager.
- let addpass = |pass: &str| {
- pass.as_slice().with_c_str(|s| llvm::LLVMRustAddPass(fpm, s))
- };
- if !sess.no_verify() { assert!(addpass("verify")); }
-
- if !sess.opts.cg.no_prepopulate_passes {
- llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
- llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
- populate_llvm_passes(fpm, mpm, llmod, opt_level,
- trans.no_builtins);
- }
-
- for pass in sess.opts.cg.passes.iter() {
- pass.as_slice().with_c_str(|s| {
- if !llvm::LLVMRustAddPass(mpm, s) {
- sess.warn(format!("unknown pass {}, ignoring",
- *pass).as_slice());
- }
- })
- }
-
- // Finally, run the actual optimization passes
- time(sess.time_passes(), "llvm function passes", (), |()|
- llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
- time(sess.time_passes(), "llvm module passes", (), |()|
- llvm::LLVMRunPassManager(mpm, llmod));
-
- // Deallocate managers that we're now done with
- llvm::LLVMDisposePassManager(fpm);
- llvm::LLVMDisposePassManager(mpm);
-
- // Emit the bytecode if we're either saving our temporaries or
- // emitting an rlib. Whenever an rlib is created, the bytecode is
- // inserted into the archive in order to allow LTO against it.
- if sess.opts.cg.save_temps ||
- (sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
- sess.opts.output_types.contains(&OutputTypeExe)) {
- output.temp_path(OutputTypeBitcode).with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
-
- if sess.lto() {
- time(sess.time_passes(), "all lto passes", (), |()|
- lto::run(sess, llmod, tm, trans.reachable.as_slice()));
-
- if sess.opts.cg.save_temps {
- output.with_extension("lto.bc").with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
- }
-
- // A codegen-specific pass manager is used to generate object
- // files for an LLVM module.
- //
- // Apparently each of these pass managers is a one-shot kind of
- // thing, so we create a new one for each type of output. The
- // pass manager passed to the closure should be ensured to not
- // escape the closure itself, and the manager should only be
- // used once.
- fn with_codegen(tm: TargetMachineRef, llmod: ModuleRef,
- no_builtins: bool, f: |PassManagerRef|) {
- unsafe {
- let cpm = llvm::LLVMCreatePassManager();
- llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
- llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
- f(cpm);
- llvm::LLVMDisposePassManager(cpm);
- }
- }
-
- let mut object_file = None;
- let mut needs_metadata = false;
- for output_type in output_types.iter() {
- let path = output.path(*output_type);
- match *output_type {
- OutputTypeBitcode => {
- path.with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
- OutputTypeLlvmAssembly => {
- path.with_c_str(|output| {
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- llvm::LLVMRustPrintModule(cpm, llmod, output);
- })
- })
- }
- OutputTypeAssembly => {
- // If we're not using the LLVM assembler, this function
- // could be invoked specially with output_type_assembly,
- // so in this case we still want the metadata object
- // file.
- let ty = OutputTypeAssembly;
- let path = if sess.opts.output_types.contains(&ty) {
- path
- } else {
- needs_metadata = true;
- output.temp_path(OutputTypeAssembly)
- };
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- write_output_file(sess, tm, cpm, llmod, &path,
- llvm::AssemblyFile);
- });
- }
- OutputTypeObject => {
- object_file = Some(path);
- }
- OutputTypeExe => {
- object_file = Some(output.temp_path(OutputTypeObject));
- needs_metadata = true;
- }
- }
- }
-
- time(sess.time_passes(), "codegen passes", (), |()| {
- match object_file {
- Some(ref path) => {
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- write_output_file(sess, tm, cpm, llmod, path,
- llvm::ObjectFile);
- });
- }
- None => {}
- }
- if needs_metadata {
- with_codegen(tm, trans.metadata_module,
- trans.no_builtins, |cpm| {
- let out = output.temp_path(OutputTypeObject)
- .with_extension("metadata.o");
- write_output_file(sess, tm, cpm,
- trans.metadata_module, &out,
- llvm::ObjectFile);
- })
- }
- });
-
- llvm::LLVMRustDisposeTargetMachine(tm);
- llvm::LLVMDisposeModule(trans.metadata_module);
- llvm::LLVMDisposeModule(llmod);
- llvm::LLVMContextDispose(llcx);
- if sess.time_llvm_passes() { llvm::LLVMRustPrintPassTimings(); }
- }
- }
-
- pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
- let pname = super::get_cc_prog(sess);
- let mut cmd = Command::new(pname.as_slice());
-
- cmd.arg("-c").arg("-o").arg(outputs.path(OutputTypeObject))
- .arg(outputs.temp_path(OutputTypeAssembly));
- debug!("{}", &cmd);
-
- match cmd.output() {
- Ok(prog) => {
- if !prog.status.success() {
- sess.err(format!("linking with `{}` failed: {}",
- pname,
- prog.status).as_slice());
- sess.note(format!("{}", &cmd).as_slice());
- let mut note = prog.error.clone();
- note.push_all(prog.output.as_slice());
- sess.note(str::from_utf8(note.as_slice()).unwrap());
- sess.abort_if_errors();
- }
- },
- Err(e) => {
- sess.err(format!("could not exec the linker `{}`: {}",
- pname,
- e).as_slice());
- sess.abort_if_errors();
- }
- }
- }
-
- unsafe fn configure_llvm(sess: &Session) {
- use std::sync::{Once, ONCE_INIT};
- static mut INIT: Once = ONCE_INIT;
-
- // Copy what clang does by turning on loop vectorization at O2 and
- // slp vectorization at O3
- let vectorize_loop = !sess.opts.cg.no_vectorize_loops &&
- (sess.opts.optimize == config::Default ||
- sess.opts.optimize == config::Aggressive);
- let vectorize_slp = !sess.opts.cg.no_vectorize_slp &&
- sess.opts.optimize == config::Aggressive;
-
- let mut llvm_c_strs = Vec::new();
- let mut llvm_args = Vec::new();
- {
- let add = |arg: &str| {
- let s = arg.to_c_str();
- llvm_args.push(s.as_ptr());
- llvm_c_strs.push(s);
- };
- add("rustc"); // fake program name
- if vectorize_loop { add("-vectorize-loops"); }
- if vectorize_slp { add("-vectorize-slp"); }
- if sess.time_llvm_passes() { add("-time-passes"); }
- if sess.print_llvm_passes() { add("-debug-pass=Structure"); }
-
- for arg in sess.opts.cg.llvm_args.iter() {
- add((*arg).as_slice());
- }
- }
-
- INIT.doit(|| {
- llvm::LLVMInitializePasses();
-
- // Only initialize the platforms supported by Rust here, because
- // using --llvm-root will have multiple platforms that rustllvm
- // doesn't actually link to and it's pointless to put target info
- // into the registry that Rust cannot generate machine code for.
- llvm::LLVMInitializeX86TargetInfo();
- llvm::LLVMInitializeX86Target();
- llvm::LLVMInitializeX86TargetMC();
- llvm::LLVMInitializeX86AsmPrinter();
- llvm::LLVMInitializeX86AsmParser();
-
- llvm::LLVMInitializeARMTargetInfo();
- llvm::LLVMInitializeARMTarget();
- llvm::LLVMInitializeARMTargetMC();
- llvm::LLVMInitializeARMAsmPrinter();
- llvm::LLVMInitializeARMAsmParser();
-
- llvm::LLVMInitializeMipsTargetInfo();
- llvm::LLVMInitializeMipsTarget();
- llvm::LLVMInitializeMipsTargetMC();
- llvm::LLVMInitializeMipsAsmPrinter();
- llvm::LLVMInitializeMipsAsmParser();
-
- llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
- llvm_args.as_ptr());
- });
- }
-
- unsafe fn populate_llvm_passes(fpm: llvm::PassManagerRef,
- mpm: llvm::PassManagerRef,
- llmod: ModuleRef,
- opt: llvm::CodeGenOptLevel,
- no_builtins: bool) {
- // Create the PassManagerBuilder for LLVM. We configure it with
- // reasonable defaults and prepare it to actually populate the pass
- // manager.
- let builder = llvm::LLVMPassManagerBuilderCreate();
- match opt {
- llvm::CodeGenLevelNone => {
- // Don't add lifetime intrinsics at O0
- llvm::LLVMRustAddAlwaysInlinePass(builder, false);
- }
- llvm::CodeGenLevelLess => {
- llvm::LLVMRustAddAlwaysInlinePass(builder, true);
- }
- // numeric values copied from clang
- llvm::CodeGenLevelDefault => {
- llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
- 225);
- }
- llvm::CodeGenLevelAggressive => {
- llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
- 275);
- }
- }
- llvm::LLVMPassManagerBuilderSetOptLevel(builder, opt as c_uint);
- llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, no_builtins);
-
- // Use the builder to populate the function/module pass managers.
- llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
- llvm::LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
- llvm::LLVMPassManagerBuilderDispose(builder);
-
- match opt {
- llvm::CodeGenLevelDefault | llvm::CodeGenLevelAggressive => {
- "mergefunc".with_c_str(|s| llvm::LLVMRustAddPass(mpm, s));
- }
- _ => {}
- };
- }
-}
-
-
/*
* Name mangling and its relationship to metadata. This is complex. Read
* carefully.
}
fn get_symbol_hash(ccx: &CrateContext, t: ty::t) -> String {
- match ccx.type_hashcodes.borrow().find(&t) {
+ match ccx.type_hashcodes().borrow().find(&t) {
Some(h) => return h.to_string(),
None => {}
}
- let mut symbol_hasher = ccx.symbol_hasher.borrow_mut();
- let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, &ccx.link_meta);
- ccx.type_hashcodes.borrow_mut().insert(t, hash.clone());
+ let mut symbol_hasher = ccx.symbol_hasher().borrow_mut();
+ let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, ccx.link_meta());
+ ccx.type_hashcodes().borrow_mut().insert(t, hash.clone());
hash
}
}
}
-fn remove(sess: &Session, path: &Path) {
+pub fn remove(sess: &Session, path: &Path) {
match fs::unlink(path) {
Ok(..) => {}
Err(e) => {
// contain the metadata in a separate file. We use a temp directory
// here so concurrent builds in the same directory don't try to use
// the same filename for metadata (stomping over one another)
- let tmpdir = TempDir::new("rustc").expect("needs a temp dir");
+ let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
let metadata = tmpdir.path().join(METADATA_FILENAME);
match fs::File::create(&metadata).write(trans.metadata
.as_slice()) {
ab.add_file(&metadata).unwrap();
remove(sess, &metadata);
- // For LTO purposes, the bytecode of this library is also inserted
- // into the archive.
- //
- // Note that we make sure that the bytecode filename in the archive
- // is never exactly 16 bytes long by adding a 16 byte extension to
- // it. This is to work around a bug in LLDB that would cause it to
- // crash if the name of a file in an archive was exactly 16 bytes.
- let bc_filename = obj_filename.with_extension("bc");
- let bc_deflated_filename = obj_filename.with_extension("bytecode.deflate");
-
- let bc_data = match fs::File::open(&bc_filename).read_to_end() {
- Ok(buffer) => buffer,
- Err(e) => sess.fatal(format!("failed to read bytecode: {}",
- e).as_slice())
- };
+ if sess.opts.cg.codegen_units == 1 {
+ // For LTO purposes, the bytecode of this library is also
+ // inserted into the archive. We currently do this only when
+ // codegen_units == 1, so we don't have to deal with multiple
+ // bitcode files per crate.
+ //
+ // Note that we make sure that the bytecode filename in the
+ // archive is never exactly 16 bytes long by adding a 16 byte
+ // extension to it. This is to work around a bug in LLDB that
+ // would cause it to crash if the name of a file in an archive
+ // was exactly 16 bytes.
+ let bc_filename = obj_filename.with_extension("bc");
+ let bc_deflated_filename = obj_filename.with_extension("bytecode.deflate");
+
+ let bc_data = match fs::File::open(&bc_filename).read_to_end() {
+ Ok(buffer) => buffer,
+ Err(e) => sess.fatal(format!("failed to read bytecode: {}",
+ e).as_slice())
+ };
- let bc_data_deflated = match flate::deflate_bytes(bc_data.as_slice()) {
- Some(compressed) => compressed,
- None => sess.fatal(format!("failed to compress bytecode from {}",
- bc_filename.display()).as_slice())
- };
+ let bc_data_deflated = match flate::deflate_bytes(bc_data.as_slice()) {
+ Some(compressed) => compressed,
+ None => sess.fatal(format!("failed to compress bytecode from {}",
+ bc_filename.display()).as_slice())
+ };
- let mut bc_file_deflated = match fs::File::create(&bc_deflated_filename) {
- Ok(file) => file,
- Err(e) => {
- sess.fatal(format!("failed to create compressed bytecode \
- file: {}", e).as_slice())
- }
- };
+ let mut bc_file_deflated = match fs::File::create(&bc_deflated_filename) {
+ Ok(file) => file,
+ Err(e) => {
+ sess.fatal(format!("failed to create compressed bytecode \
+ file: {}", e).as_slice())
+ }
+ };
- match write_rlib_bytecode_object_v1(&mut bc_file_deflated,
- bc_data_deflated.as_slice()) {
- Ok(()) => {}
- Err(e) => {
- sess.err(format!("failed to write compressed bytecode: \
- {}", e).as_slice());
- sess.abort_if_errors()
- }
- };
+ match write_rlib_bytecode_object_v1(&mut bc_file_deflated,
+ bc_data_deflated.as_slice()) {
+ Ok(()) => {}
+ Err(e) => {
+ sess.err(format!("failed to write compressed bytecode: \
+ {}", e).as_slice());
+ sess.abort_if_errors()
+ }
+ };
- ab.add_file(&bc_deflated_filename).unwrap();
- remove(sess, &bc_deflated_filename);
- if !sess.opts.cg.save_temps &&
- !sess.opts.output_types.contains(&OutputTypeBitcode) {
- remove(sess, &bc_filename);
+ ab.add_file(&bc_deflated_filename).unwrap();
+ remove(sess, &bc_deflated_filename);
+ if !sess.opts.cg.save_temps &&
+ !sess.opts.output_types.contains(&OutputTypeBitcode) {
+ remove(sess, &bc_filename);
+ }
}
}
// links to all upstream files as well.
fn link_natively(sess: &Session, trans: &CrateTranslation, dylib: bool,
obj_filename: &Path, out_filename: &Path) {
- let tmpdir = TempDir::new("rustc").expect("needs a temp dir");
+ let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
// The invocations of cc share some flags across platforms
let pname = get_cc_prog(sess);
// except according to those terms.
use super::link;
+use super::write;
use driver::session;
use driver::config;
use llvm;
archive.read(format!("{}.bytecode.deflate",
file).as_slice())
});
- let bc_encoded = bc_encoded.expect("missing compressed bytecode in archive!");
+ let bc_encoded = match bc_encoded {
+ Some(data) => data,
+ None => {
+ sess.fatal(format!("missing compressed bytecode in {} \
+ (perhaps it was compiled with -C codegen-units > 1)",
+ path.display()).as_slice());
+ },
+ };
let bc_extractor = if is_versioned_bytecode_format(bc_encoded) {
|_| {
// Read the version
if !llvm::LLVMRustLinkInExternalBitcode(llmod,
ptr as *const libc::c_char,
bc_decoded.len() as libc::size_t) {
- link::llvm_err(sess,
- format!("failed to load bc of `{}`",
- name.as_slice()));
+ write::llvm_err(sess.diagnostic().handler(),
+ format!("failed to load bc of `{}`",
+ name.as_slice()));
}
});
}
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use back::lto;
+use back::link::{get_cc_prog, remove};
+use driver::driver::{CrateTranslation, ModuleTranslation, OutputFilenames};
+use driver::config::NoDebugInfo;
+use driver::session::Session;
+use driver::config;
+use llvm;
+use llvm::{ModuleRef, TargetMachineRef, PassManagerRef};
+use util::common::time;
+use syntax::abi;
+use syntax::codemap;
+use syntax::diagnostic;
+use syntax::diagnostic::{Emitter, Handler, Level, mk_handler};
+
+use std::c_str::{ToCStr, CString};
+use std::io::Command;
+use std::io::fs;
+use std::iter::Unfold;
+use std::ptr;
+use std::str;
+use std::sync::{Arc, Mutex};
+use std::task::TaskBuilder;
+use libc::{c_uint, c_int};
+
+
+#[deriving(Clone, PartialEq, PartialOrd, Ord, Eq)]
+pub enum OutputType {
+ OutputTypeBitcode,
+ OutputTypeAssembly,
+ OutputTypeLlvmAssembly,
+ OutputTypeObject,
+ OutputTypeExe,
+}
+
+
+pub fn llvm_err(handler: &diagnostic::Handler, msg: String) -> ! {
+ unsafe {
+ let cstr = llvm::LLVMRustGetLastError();
+ if cstr == ptr::null() {
+ handler.fatal(msg.as_slice());
+ } else {
+ let err = CString::new(cstr, true);
+ let err = String::from_utf8_lossy(err.as_bytes());
+ handler.fatal(format!("{}: {}",
+ msg.as_slice(),
+ err.as_slice()).as_slice());
+ }
+ }
+}
+
+pub fn write_output_file(
+ handler: &diagnostic::Handler,
+ target: llvm::TargetMachineRef,
+ pm: llvm::PassManagerRef,
+ m: ModuleRef,
+ output: &Path,
+ file_type: llvm::FileType) {
+ unsafe {
+ output.with_c_str(|output| {
+ let result = llvm::LLVMRustWriteOutputFile(
+ target, pm, m, output, file_type);
+ if !result {
+ llvm_err(handler, "could not write output".to_string());
+ }
+ })
+ }
+}
+
+
+struct Diagnostic {
+ msg: String,
+ code: Option<String>,
+ lvl: Level,
+}
+
+// We use an Arc instead of just returning a list of diagnostics from the
+// child task because we need to make sure that the messages are seen even
+// if the child task fails (for example, when `fatal` is called).
+#[deriving(Clone)]
+struct SharedEmitter {
+ buffer: Arc<Mutex<Vec<Diagnostic>>>,
+}
+
+impl SharedEmitter {
+ fn new() -> SharedEmitter {
+ SharedEmitter {
+ buffer: Arc::new(Mutex::new(Vec::new())),
+ }
+ }
+
+ fn dump(&mut self, handler: &Handler) {
+ let mut buffer = self.buffer.lock();
+ for diag in buffer.iter() {
+ match diag.code {
+ Some(ref code) => {
+ handler.emit_with_code(None,
+ diag.msg.as_slice(),
+ code.as_slice(),
+ diag.lvl);
+ },
+ None => {
+ handler.emit(None,
+ diag.msg.as_slice(),
+ diag.lvl);
+ },
+ }
+ }
+ buffer.clear();
+ }
+}
+
+impl Emitter for SharedEmitter {
+ fn emit(&mut self, cmsp: Option<(&codemap::CodeMap, codemap::Span)>,
+ msg: &str, code: Option<&str>, lvl: Level) {
+ assert!(cmsp.is_none(), "SharedEmitter doesn't support spans");
+
+ self.buffer.lock().push(Diagnostic {
+ msg: msg.to_string(),
+ code: code.map(|s| s.to_string()),
+ lvl: lvl,
+ });
+ }
+
+ fn custom_emit(&mut self, _cm: &codemap::CodeMap,
+ _sp: diagnostic::RenderSpan, _msg: &str, _lvl: Level) {
+ fail!("SharedEmitter doesn't support custom_emit");
+ }
+}
+
+
+// On android, we by default compile for armv7 processors. This enables
+// things like double word CAS instructions (rather than emulating them)
+// which are *far* more efficient. This is obviously undesirable in some
+// cases, so if any sort of target feature is specified we don't append v7
+// to the feature list.
+//
+// On iOS only armv7 and newer are supported. So it is useful to
+// get all hardware potential via VFP3 (hardware floating point)
+// and NEON (SIMD) instructions supported by LLVM.
+// Note that without those flags various linking errors might
+// arise as some of intrinsics are converted into function calls
+// and nobody provides implementations those functions
+fn target_feature<'a>(sess: &'a Session) -> &'a str {
+ match sess.targ_cfg.os {
+ abi::OsAndroid => {
+ if "" == sess.opts.cg.target_feature.as_slice() {
+ "+v7"
+ } else {
+ sess.opts.cg.target_feature.as_slice()
+ }
+ },
+ abi::OsiOS if sess.targ_cfg.arch == abi::Arm => {
+ "+v7,+thumb2,+vfp3,+neon"
+ },
+ _ => sess.opts.cg.target_feature.as_slice()
+ }
+}
+
+fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel {
+ match optimize {
+ config::No => llvm::CodeGenLevelNone,
+ config::Less => llvm::CodeGenLevelLess,
+ config::Default => llvm::CodeGenLevelDefault,
+ config::Aggressive => llvm::CodeGenLevelAggressive,
+ }
+}
+
+fn create_target_machine(sess: &Session) -> TargetMachineRef {
+ let reloc_model = match sess.opts.cg.relocation_model.as_slice() {
+ "pic" => llvm::RelocPIC,
+ "static" => llvm::RelocStatic,
+ "default" => llvm::RelocDefault,
+ "dynamic-no-pic" => llvm::RelocDynamicNoPic,
+ _ => {
+ sess.err(format!("{} is not a valid relocation mode",
+ sess.opts
+ .cg
+ .relocation_model).as_slice());
+ sess.abort_if_errors();
+ unreachable!();
+ }
+ };
+
+ let opt_level = get_llvm_opt_level(sess.opts.optimize);
+ let use_softfp = sess.opts.cg.soft_float;
+
+ // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a parameter.
+ // FIXME: #11954: mac64 unwinding may not work with fp elim
+ let no_fp_elim = (sess.opts.debuginfo != NoDebugInfo) ||
+ (sess.targ_cfg.os == abi::OsMacos &&
+ sess.targ_cfg.arch == abi::X86_64);
+
+ // OSX has -dead_strip, which doesn't rely on ffunction_sections
+ // FIXME(#13846) this should be enabled for windows
+ let ffunction_sections = sess.targ_cfg.os != abi::OsMacos &&
+ sess.targ_cfg.os != abi::OsWindows;
+ let fdata_sections = ffunction_sections;
+
+ let code_model = match sess.opts.cg.code_model.as_slice() {
+ "default" => llvm::CodeModelDefault,
+ "small" => llvm::CodeModelSmall,
+ "kernel" => llvm::CodeModelKernel,
+ "medium" => llvm::CodeModelMedium,
+ "large" => llvm::CodeModelLarge,
+ _ => {
+ sess.err(format!("{} is not a valid code model",
+ sess.opts
+ .cg
+ .code_model).as_slice());
+ sess.abort_if_errors();
+ unreachable!();
+ }
+ };
+
+ unsafe {
+ sess.targ_cfg
+ .target_strs
+ .target_triple
+ .as_slice()
+ .with_c_str(|t| {
+ sess.opts.cg.target_cpu.as_slice().with_c_str(|cpu| {
+ target_feature(sess).with_c_str(|features| {
+ llvm::LLVMRustCreateTargetMachine(
+ t, cpu, features,
+ code_model,
+ reloc_model,
+ opt_level,
+ true /* EnableSegstk */,
+ use_softfp,
+ no_fp_elim,
+ ffunction_sections,
+ fdata_sections,
+ )
+ })
+ })
+ })
+ }
+}
+
+
+/// Module-specific configuration for `optimize_and_codegen`.
+#[deriving(Clone)]
+struct ModuleConfig {
+ /// LLVM TargetMachine to use for codegen.
+ tm: TargetMachineRef,
+ /// Names of additional optimization passes to run.
+ passes: Vec<String>,
+ /// Some(level) to optimize at a certain level, or None to run
+ /// absolutely no optimizations (used for the metadata module).
+ opt_level: Option<llvm::CodeGenOptLevel>,
+
+ // Flags indicating which outputs to produce.
+ emit_no_opt_bc: bool,
+ emit_bc: bool,
+ emit_lto_bc: bool,
+ emit_ir: bool,
+ emit_asm: bool,
+ emit_obj: bool,
+
+ // Miscellaneous flags. These are mostly copied from command-line
+ // options.
+ no_verify: bool,
+ no_prepopulate_passes: bool,
+ no_builtins: bool,
+ time_passes: bool,
+}
+
+impl ModuleConfig {
+ fn new(tm: TargetMachineRef, passes: Vec<String>) -> ModuleConfig {
+ ModuleConfig {
+ tm: tm,
+ passes: passes,
+ opt_level: None,
+
+ emit_no_opt_bc: false,
+ emit_bc: false,
+ emit_lto_bc: false,
+ emit_ir: false,
+ emit_asm: false,
+ emit_obj: false,
+
+ no_verify: false,
+ no_prepopulate_passes: false,
+ no_builtins: false,
+ time_passes: false,
+ }
+ }
+
+ fn set_flags(&mut self, sess: &Session, trans: &CrateTranslation) {
+ self.no_verify = sess.no_verify();
+ self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes;
+ self.no_builtins = trans.no_builtins;
+ self.time_passes = sess.time_passes();
+ }
+}
+
+/// Additional resources used by optimize_and_codegen (not module specific)
+struct CodegenContext<'a> {
+ // Extra resources used for LTO: (sess, reachable). This will be `None`
+ // when running in a worker thread.
+ lto_ctxt: Option<(&'a Session, &'a [String])>,
+ // Handler to use for diagnostics produced during codegen.
+ handler: &'a Handler,
+}
+
+impl<'a> CodegenContext<'a> {
+ fn new(handler: &'a Handler) -> CodegenContext<'a> {
+ CodegenContext {
+ lto_ctxt: None,
+ handler: handler,
+ }
+ }
+
+ fn new_with_session(sess: &'a Session, reachable: &'a [String]) -> CodegenContext<'a> {
+ CodegenContext {
+ lto_ctxt: Some((sess, reachable)),
+ handler: sess.diagnostic().handler(),
+ }
+ }
+}
+
+// Unsafe due to LLVM calls.
+unsafe fn optimize_and_codegen(cgcx: &CodegenContext,
+ mtrans: ModuleTranslation,
+ config: ModuleConfig,
+ name_extra: String,
+ output_names: OutputFilenames) {
+ let ModuleTranslation { llmod, llcx } = mtrans;
+ let tm = config.tm;
+
+ if config.emit_no_opt_bc {
+ let ext = format!("{}.no-opt.bc", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+
+ match config.opt_level {
+ Some(opt_level) => {
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ // If we're verifying or linting, add them to the function pass
+ // manager.
+ let addpass = |pass: &str| {
+ pass.as_slice().with_c_str(|s| llvm::LLVMRustAddPass(fpm, s))
+ };
+ if !config.no_verify { assert!(addpass("verify")); }
+
+ if !config.no_prepopulate_passes {
+ llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
+ llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
+ populate_llvm_passes(fpm, mpm, llmod, opt_level,
+ config.no_builtins);
+ }
+
+ for pass in config.passes.iter() {
+ pass.as_slice().with_c_str(|s| {
+ if !llvm::LLVMRustAddPass(mpm, s) {
+ cgcx.handler.warn(format!("unknown pass {}, ignoring",
+ *pass).as_slice());
+ }
+ })
+ }
+
+ // Finally, run the actual optimization passes
+ time(config.time_passes, "llvm function passes", (), |()|
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
+ time(config.time_passes, "llvm module passes", (), |()|
+ llvm::LLVMRunPassManager(mpm, llmod));
+
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
+
+ match cgcx.lto_ctxt {
+ Some((sess, reachable)) if sess.lto() => {
+ time(sess.time_passes(), "all lto passes", (), |()|
+ lto::run(sess, llmod, tm, reachable));
+
+ if config.emit_lto_bc {
+ let name = format!("{}.lto.bc", name_extra);
+ output_names.with_extension(name.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+ },
+ _ => {},
+ }
+ },
+ None => {},
+ }
+
+ // A codegen-specific pass manager is used to generate object
+ // files for an LLVM module.
+ //
+ // Apparently each of these pass managers is a one-shot kind of
+ // thing, so we create a new one for each type of output. The
+ // pass manager passed to the closure should be ensured to not
+ // escape the closure itself, and the manager should only be
+ // used once.
+ unsafe fn with_codegen(tm: TargetMachineRef, llmod: ModuleRef,
+ no_builtins: bool, f: |PassManagerRef|) {
+ let cpm = llvm::LLVMCreatePassManager();
+ llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
+ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+ f(cpm);
+ llvm::LLVMDisposePassManager(cpm);
+ }
+
+ if config.emit_bc {
+ let ext = format!("{}.bc", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+
+ time(config.time_passes, "codegen passes", (), |()| {
+ if config.emit_ir {
+ let ext = format!("{}.ll", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|output| {
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ llvm::LLVMRustPrintModule(cpm, llmod, output);
+ })
+ })
+ }
+
+ if config.emit_asm {
+ let path = output_names.with_extension(format!("{}.s", name_extra).as_slice());
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::AssemblyFile);
+ });
+ }
+
+ if config.emit_obj {
+ let path = output_names.with_extension(format!("{}.o", name_extra).as_slice());
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::ObjectFile);
+ });
+ }
+ });
+
+ llvm::LLVMDisposeModule(llmod);
+ llvm::LLVMContextDispose(llcx);
+ llvm::LLVMRustDisposeTargetMachine(tm);
+}
+
+pub fn run_passes(sess: &Session,
+ trans: &CrateTranslation,
+ output_types: &[OutputType],
+ crate_output: &OutputFilenames) {
+ // It's possible that we have `codegen_units > 1` but only one item in
+ // `trans.modules`. We could theoretically proceed and do LTO in that
+ // case, but it would be confusing to have the validity of
+ // `-Z lto -C codegen-units=2` depend on details of the crate being
+ // compiled, so we complain regardless.
+ if sess.lto() && sess.opts.cg.codegen_units > 1 {
+ // This case is impossible to handle because LTO expects to be able
+ // to combine the entire crate and all its dependencies into a
+ // single compilation unit, but each codegen unit is in a separate
+ // LLVM context, so they can't easily be combined.
+ sess.fatal("can't perform LTO when using multiple codegen units");
+ }
+
+ // Sanity check
+ assert!(trans.modules.len() == sess.opts.cg.codegen_units);
+
+ unsafe {
+ configure_llvm(sess);
+ }
+
+ let tm = create_target_machine(sess);
+
+ // Figure out what we actually need to build.
+
+ let mut modules_config = ModuleConfig::new(tm, sess.opts.cg.passes.clone());
+ let mut metadata_config = ModuleConfig::new(tm, vec!());
+
+ modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize));
+
+ // Save all versions of the bytecode if we're saving our temporaries.
+ if sess.opts.cg.save_temps {
+ modules_config.emit_no_opt_bc = true;
+ modules_config.emit_bc = true;
+ modules_config.emit_lto_bc = true;
+ metadata_config.emit_bc = true;
+ }
+
+ // Emit a bitcode file for the crate if we're emitting an rlib.
+ // Whenever an rlib is created, the bitcode is inserted into the
+ // archive in order to allow LTO against it.
+ let needs_crate_bitcode =
+ sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
+ sess.opts.output_types.contains(&OutputTypeExe) &&
+ sess.opts.cg.codegen_units == 1;
+ if needs_crate_bitcode {
+ modules_config.emit_bc = true;
+ }
+
+ for output_type in output_types.iter() {
+ match *output_type {
+ OutputTypeBitcode => { modules_config.emit_bc = true; },
+ OutputTypeLlvmAssembly => { modules_config.emit_ir = true; },
+ OutputTypeAssembly => {
+ modules_config.emit_asm = true;
+ // If we're not using the LLVM assembler, this function
+ // could be invoked specially with output_type_assembly, so
+ // in this case we still want the metadata object file.
+ if !sess.opts.output_types.contains(&OutputTypeAssembly) {
+ metadata_config.emit_obj = true;
+ }
+ },
+ OutputTypeObject => { modules_config.emit_obj = true; },
+ OutputTypeExe => {
+ modules_config.emit_obj = true;
+ metadata_config.emit_obj = true;
+ },
+ }
+ }
+
+ modules_config.set_flags(sess, trans);
+ metadata_config.set_flags(sess, trans);
+
+
+ // Populate a buffer with a list of codegen tasks. Items are processed in
+ // LIFO order, just because it's a tiny bit simpler that way. (The order
+ // doesn't actually matter.)
+ let mut work_items = Vec::with_capacity(1 + trans.modules.len());
+
+ {
+ let work = build_work_item(sess,
+ trans.metadata_module,
+ metadata_config.clone(),
+ crate_output.clone(),
+ "metadata".to_string());
+ work_items.push(work);
+ }
+
+ for (index, mtrans) in trans.modules.iter().enumerate() {
+ let work = build_work_item(sess,
+ *mtrans,
+ modules_config.clone(),
+ crate_output.clone(),
+ format!("{}", index));
+ work_items.push(work);
+ }
+
+ // Process the work items, optionally using worker threads.
+ if sess.opts.cg.codegen_units == 1 {
+ run_work_singlethreaded(sess, trans.reachable.as_slice(), work_items);
+
+ if needs_crate_bitcode {
+ // The only bitcode file produced (aside from metadata) was
+ // "crate.0.bc". Rename to "crate.bc" since that's what
+ // `link_rlib` expects to find.
+ fs::copy(&crate_output.with_extension("0.bc"),
+ &crate_output.temp_path(OutputTypeBitcode)).unwrap();
+ }
+ } else {
+ run_work_multithreaded(sess, work_items, sess.opts.cg.codegen_units);
+
+ assert!(!needs_crate_bitcode,
+ "can't produce a crate bitcode file from multiple compilation units");
+ }
+
+ // All codegen is finished.
+ unsafe {
+ llvm::LLVMRustDisposeTargetMachine(tm);
+ }
+
+ // Produce final compile outputs.
+
+ let copy_if_one_unit = |ext: &str, output_type: OutputType| {
+ // Three cases:
+ if sess.opts.cg.codegen_units == 1 {
+ // 1) Only one codegen unit. In this case it's no difficulty
+ // to copy `foo.0.x` to `foo.x`.
+ fs::copy(&crate_output.with_extension(ext),
+ &crate_output.path(output_type)).unwrap();
+ if !sess.opts.cg.save_temps {
+ // The user just wants `foo.x`, not `foo.0.x`.
+ remove(sess, &crate_output.with_extension(ext));
+ }
+ } else {
+ if crate_output.single_output_file.is_some() {
+ // 2) Multiple codegen units, with `-o some_name`. We have
+ // no good solution for this case, so warn the user.
+ sess.warn(format!("ignoring -o because multiple .{} files were produced",
+ ext).as_slice());
+ } else {
+ // 3) Multiple codegen units, but no `-o some_name`. We
+ // just leave the `foo.0.x` files in place.
+ // (We don't have to do any work in this case.)
+ }
+ }
+ };
+
+ let link_obj = |output_path: &Path| {
+ // Running `ld -r` on a single input is kind of pointless.
+ if sess.opts.cg.codegen_units == 1 {
+ fs::copy(&crate_output.with_extension("0.o"),
+ output_path).unwrap();
+ // Leave the .0.o file around, to mimic the behavior of the normal
+ // code path.
+ return;
+ }
+
+ // Some builds of MinGW GCC will pass --force-exe-suffix to ld, which
+ // will automatically add a .exe extension if the extension is not
+ // already .exe or .dll. To ensure consistent behavior on Windows, we
+ // add the .exe suffix explicitly and then rename the output file to
+ // the desired path. This will give the correct behavior whether or
+ // not GCC adds --force-exe-suffix.
+ let windows_output_path =
+ if sess.targ_cfg.os == abi::OsWindows {
+ Some(output_path.with_extension("o.exe"))
+ } else {
+ None
+ };
+
+ let pname = get_cc_prog(sess);
+ let mut cmd = Command::new(pname.as_slice());
+
+ cmd.args(sess.targ_cfg.target_strs.cc_args.as_slice());
+ cmd.arg("-nostdlib");
+
+ for index in range(0, trans.modules.len()) {
+ cmd.arg(crate_output.with_extension(format!("{}.o", index).as_slice()));
+ }
+
+ cmd.arg("-r")
+ .arg("-o")
+ .arg(windows_output_path.as_ref().unwrap_or(output_path));
+
+ if (sess.opts.debugging_opts & config::PRINT_LINK_ARGS) != 0 {
+ println!("{}", &cmd);
+ }
+
+ cmd.stdin(::std::io::process::Ignored)
+ .stdout(::std::io::process::InheritFd(1))
+ .stderr(::std::io::process::InheritFd(2));
+ match cmd.status() {
+ Ok(_) => {},
+ Err(e) => {
+ sess.err(format!("could not exec the linker `{}`: {}",
+ pname,
+ e).as_slice());
+ sess.abort_if_errors();
+ },
+ }
+
+ match windows_output_path {
+ Some(ref windows_path) => {
+ fs::rename(windows_path, output_path).unwrap();
+ },
+ None => {
+ // The file is already named according to `output_path`.
+ }
+ }
+ };
+
+ // Flag to indicate whether the user explicitly requested bitcode.
+ // Otherwise, we produced it only as a temporary output, and will need
+ // to get rid of it.
+ // FIXME: Since we don't support LTO anyway, maybe we can avoid
+ // producing the temporary .0.bc's in the first place?
+ let mut save_bitcode = false;
+ for output_type in output_types.iter() {
+ match *output_type {
+ OutputTypeBitcode => {
+ save_bitcode = true;
+ copy_if_one_unit("0.bc", OutputTypeBitcode);
+ },
+ OutputTypeLlvmAssembly => { copy_if_one_unit("0.ll", OutputTypeLlvmAssembly); },
+ OutputTypeAssembly => { copy_if_one_unit("0.s", OutputTypeAssembly); },
+ OutputTypeObject => { link_obj(&crate_output.path(OutputTypeObject)); },
+ OutputTypeExe => {
+ // If OutputTypeObject is already in the list, then
+ // `crate.o` will be handled by the OutputTypeObject case.
+ // Otherwise, we need to create the temporary object so we
+ // can run the linker.
+ if !sess.opts.output_types.contains(&OutputTypeObject) {
+ link_obj(&crate_output.temp_path(OutputTypeObject));
+ }
+ },
+ }
+ }
+ let save_bitcode = save_bitcode;
+
+ // Clean up unwanted temporary files.
+
+ // We create the following files by default:
+ // - crate.0.bc
+ // - crate.0.o
+ // - crate.metadata.bc
+ // - crate.metadata.o
+ // - crate.o (linked from crate.##.o)
+ // - crate.bc (copied from crate.0.bc)
+ // We may create additional files if requested by the user (through
+ // `-C save-temps` or `--emit=` flags).
+
+ if !sess.opts.cg.save_temps {
+ // Remove the temporary .0.o objects. If the user didn't
+ // explicitly request bitcode (with --emit=bc), we must remove
+ // .0.bc as well. (We don't touch the crate.bc that may have been
+ // produced earlier.)
+ for i in range(0, trans.modules.len()) {
+ if modules_config.emit_obj {
+ let ext = format!("{}.o", i);
+ remove(sess, &crate_output.with_extension(ext.as_slice()));
+ }
+
+ if modules_config.emit_bc && !save_bitcode {
+ let ext = format!("{}.bc", i);
+ remove(sess, &crate_output.with_extension(ext.as_slice()));
+ }
+ }
+
+ if metadata_config.emit_bc && !save_bitcode {
+ remove(sess, &crate_output.with_extension("metadata.bc"));
+ }
+ }
+
+ // We leave the following files around by default:
+ // - crate.o
+ // - crate.metadata.o
+ // - crate.bc
+ // These are used in linking steps and will be cleaned up afterward.
+
+ // FIXME: time_llvm_passes support - does this use a global context or
+ // something?
+ //if sess.time_llvm_passes() { llvm::LLVMRustPrintPassTimings(); }
+}
+
+type WorkItem = proc(&CodegenContext):Send;
+
+fn build_work_item(sess: &Session,
+ mtrans: ModuleTranslation,
+ config: ModuleConfig,
+ output_names: OutputFilenames,
+ name_extra: String) -> WorkItem {
+ let mut config = config;
+ config.tm = create_target_machine(sess);
+
+ proc(cgcx) unsafe {
+ optimize_and_codegen(cgcx, mtrans, config, name_extra, output_names);
+ }
+}
+
+fn run_work_singlethreaded(sess: &Session,
+ reachable: &[String],
+ work_items: Vec<WorkItem>) {
+ let cgcx = CodegenContext::new_with_session(sess, reachable);
+ let mut work_items = work_items;
+
+ // Since we're running single-threaded, we can pass the session to
+ // the proc, allowing `optimize_and_codegen` to perform LTO.
+ for work in Unfold::new((), |_| work_items.pop()) {
+ work(&cgcx);
+ }
+}
+
+fn run_work_multithreaded(sess: &Session,
+ work_items: Vec<WorkItem>,
+ num_workers: uint) {
+ // Run some workers to process the work items.
+ let work_items_arc = Arc::new(Mutex::new(work_items));
+ let mut diag_emitter = SharedEmitter::new();
+ let mut futures = Vec::with_capacity(num_workers);
+
+ for i in range(0, num_workers) {
+ let work_items_arc = work_items_arc.clone();
+ let diag_emitter = diag_emitter.clone();
+
+ let future = TaskBuilder::new().named(format!("codegen-{}", i)).try_future(proc() {
+ let diag_handler = mk_handler(box diag_emitter);
+
+ // Must construct cgcx inside the proc because it has non-Send
+ // fields.
+ let cgcx = CodegenContext::new(&diag_handler);
+
+ loop {
+ // Avoid holding the lock for the entire duration of the match.
+ let maybe_work = work_items_arc.lock().pop();
+ match maybe_work {
+ Some(work) => {
+ work(&cgcx);
+
+ // Make sure to fail the worker so the main thread can
+ // tell that there were errors.
+ cgcx.handler.abort_if_errors();
+ }
+ None => break,
+ }
+ }
+ });
+ futures.push(future);
+ }
+
+ let mut failed = false;
+ for future in futures.move_iter() {
+ match future.unwrap() {
+ Ok(()) => {},
+ Err(_) => {
+ failed = true;
+ },
+ }
+ // Display any new diagnostics.
+ diag_emitter.dump(sess.diagnostic().handler());
+ }
+ if failed {
+ sess.fatal("aborting due to worker thread failure");
+ }
+}
+
+pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
+ let pname = get_cc_prog(sess);
+ let mut cmd = Command::new(pname.as_slice());
+
+ cmd.arg("-c").arg("-o").arg(outputs.path(OutputTypeObject))
+ .arg(outputs.temp_path(OutputTypeAssembly));
+ debug!("{}", &cmd);
+
+ match cmd.output() {
+ Ok(prog) => {
+ if !prog.status.success() {
+ sess.err(format!("linking with `{}` failed: {}",
+ pname,
+ prog.status).as_slice());
+ sess.note(format!("{}", &cmd).as_slice());
+ let mut note = prog.error.clone();
+ note.push_all(prog.output.as_slice());
+ sess.note(str::from_utf8(note.as_slice()).unwrap());
+ sess.abort_if_errors();
+ }
+ },
+ Err(e) => {
+ sess.err(format!("could not exec the linker `{}`: {}",
+ pname,
+ e).as_slice());
+ sess.abort_if_errors();
+ }
+ }
+}
+
+unsafe fn configure_llvm(sess: &Session) {
+ use std::sync::{Once, ONCE_INIT};
+ static mut INIT: Once = ONCE_INIT;
+
+ // Copy what clang does by turning on loop vectorization at O2 and
+ // slp vectorization at O3
+ let vectorize_loop = !sess.opts.cg.no_vectorize_loops &&
+ (sess.opts.optimize == config::Default ||
+ sess.opts.optimize == config::Aggressive);
+ let vectorize_slp = !sess.opts.cg.no_vectorize_slp &&
+ sess.opts.optimize == config::Aggressive;
+
+ let mut llvm_c_strs = Vec::new();
+ let mut llvm_args = Vec::new();
+ {
+ let add = |arg: &str| {
+ let s = arg.to_c_str();
+ llvm_args.push(s.as_ptr());
+ llvm_c_strs.push(s);
+ };
+ add("rustc"); // fake program name
+ if vectorize_loop { add("-vectorize-loops"); }
+ if vectorize_slp { add("-vectorize-slp"); }
+ if sess.time_llvm_passes() { add("-time-passes"); }
+ if sess.print_llvm_passes() { add("-debug-pass=Structure"); }
+
+ for arg in sess.opts.cg.llvm_args.iter() {
+ add((*arg).as_slice());
+ }
+ }
+
+ INIT.doit(|| {
+ llvm::LLVMInitializePasses();
+
+ // Only initialize the platforms supported by Rust here, because
+ // using --llvm-root will have multiple platforms that rustllvm
+ // doesn't actually link to and it's pointless to put target info
+ // into the registry that Rust cannot generate machine code for.
+ llvm::LLVMInitializeX86TargetInfo();
+ llvm::LLVMInitializeX86Target();
+ llvm::LLVMInitializeX86TargetMC();
+ llvm::LLVMInitializeX86AsmPrinter();
+ llvm::LLVMInitializeX86AsmParser();
+
+ llvm::LLVMInitializeARMTargetInfo();
+ llvm::LLVMInitializeARMTarget();
+ llvm::LLVMInitializeARMTargetMC();
+ llvm::LLVMInitializeARMAsmPrinter();
+ llvm::LLVMInitializeARMAsmParser();
+
+ llvm::LLVMInitializeMipsTargetInfo();
+ llvm::LLVMInitializeMipsTarget();
+ llvm::LLVMInitializeMipsTargetMC();
+ llvm::LLVMInitializeMipsAsmPrinter();
+ llvm::LLVMInitializeMipsAsmParser();
+
+ llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
+ llvm_args.as_ptr());
+ });
+}
+
+unsafe fn populate_llvm_passes(fpm: llvm::PassManagerRef,
+ mpm: llvm::PassManagerRef,
+ llmod: ModuleRef,
+ opt: llvm::CodeGenOptLevel,
+ no_builtins: bool) {
+ // Create the PassManagerBuilder for LLVM. We configure it with
+ // reasonable defaults and prepare it to actually populate the pass
+ // manager.
+ let builder = llvm::LLVMPassManagerBuilderCreate();
+ match opt {
+ llvm::CodeGenLevelNone => {
+ // Don't add lifetime intrinsics at O0
+ llvm::LLVMRustAddAlwaysInlinePass(builder, false);
+ }
+ llvm::CodeGenLevelLess => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, true);
+ }
+ // numeric values copied from clang
+ llvm::CodeGenLevelDefault => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
+ 225);
+ }
+ llvm::CodeGenLevelAggressive => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
+ 275);
+ }
+ }
+ llvm::LLVMPassManagerBuilderSetOptLevel(builder, opt as c_uint);
+ llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, no_builtins);
+
+ // Use the builder to populate the function/module pass managers.
+ llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
+ llvm::LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
+ llvm::LLVMPassManagerBuilderDispose(builder);
+
+ match opt {
+ llvm::CodeGenLevelDefault | llvm::CodeGenLevelAggressive => {
+ "mergefunc".with_c_str(|s| llvm::LLVMRustAddPass(mpm, s));
+ }
+ _ => {}
+ };
+}
E0146,
E0147,
E0148,
- E0149,
- E0150,
E0151,
E0152,
E0153,
use driver::session::Session;
use back;
-use back::link;
+use back::write;
use back::target_strs;
use back::{arm, x86, x86_64, mips, mipsel};
use lint;
pub debuginfo: DebugInfoLevel,
pub lint_opts: Vec<(String, lint::Level)>,
pub describe_lints: bool,
- pub output_types: Vec<back::link::OutputType> ,
+ pub output_types: Vec<back::write::OutputType> ,
// This was mutable for rustpkg, which updates search paths based on the
// parsed code. It remains mutable in case its replacements wants to use
// this.
}
}
+ fn parse_uint(slot: &mut uint, v: Option<&str>) -> bool {
+ use std::from_str::FromStr;
+ match v.and_then(FromStr::from_str) {
+ Some(i) => { *slot = i; true },
+ None => false
+ }
+ }
}
) )
"metadata to mangle symbol names with"),
extra_filename: String = ("".to_string(), parse_string,
"extra data to put in each output filename"),
+ codegen_units: uint = (1, parse_uint,
+ "divide crate into N units to optimize in parallel"),
)
pub fn build_codegen_options(matches: &getopts::Matches) -> CodegenOptions
for unparsed_output_type in unparsed_output_types.iter() {
for part in unparsed_output_type.as_slice().split(',') {
let output_type = match part.as_slice() {
- "asm" => link::OutputTypeAssembly,
- "ir" => link::OutputTypeLlvmAssembly,
- "bc" => link::OutputTypeBitcode,
- "obj" => link::OutputTypeObject,
- "link" => link::OutputTypeExe,
+ "asm" => write::OutputTypeAssembly,
+ "ir" => write::OutputTypeLlvmAssembly,
+ "bc" => write::OutputTypeBitcode,
+ "obj" => write::OutputTypeObject,
+ "link" => write::OutputTypeExe,
_ => {
early_error(format!("unknown emission type: `{}`",
part).as_slice())
output_types.as_mut_slice().sort();
output_types.dedup();
if output_types.len() == 0 {
- output_types.push(link::OutputTypeExe);
+ output_types.push(write::OutputTypeExe);
}
let sysroot_opt = matches.opt_str("sysroot").map(|m| Path::new(m));
use back::link;
+use back::write;
use driver::session::Session;
use driver::config;
use front;
middle::save::process_crate(sess, krate, analysis, odir));
}
+pub struct ModuleTranslation {
+ pub llcx: ContextRef,
+ pub llmod: ModuleRef,
+}
+
pub struct CrateTranslation {
- pub context: ContextRef,
- pub module: ModuleRef,
- pub metadata_module: ModuleRef,
+ pub modules: Vec<ModuleTranslation>,
+ pub metadata_module: ModuleTranslation,
pub link: LinkMeta,
pub metadata: Vec<u8>,
pub reachable: Vec<String>,
trans: &CrateTranslation,
outputs: &OutputFilenames) {
if sess.opts.cg.no_integrated_as {
- let output_type = link::OutputTypeAssembly;
+ let output_type = write::OutputTypeAssembly;
time(sess.time_passes(), "LLVM passes", (), |_|
- link::write::run_passes(sess, trans, [output_type], outputs));
+ write::run_passes(sess, trans, [output_type], outputs));
- link::write::run_assembler(sess, outputs);
+ write::run_assembler(sess, outputs);
// Remove assembly source, unless --save-temps was specified
if !sess.opts.cg.save_temps {
- fs::unlink(&outputs.temp_path(link::OutputTypeAssembly)).unwrap();
+ fs::unlink(&outputs.temp_path(write::OutputTypeAssembly)).unwrap();
}
} else {
time(sess.time_passes(), "LLVM passes", (), |_|
- link::write::run_passes(sess,
- trans,
- sess.opts.output_types.as_slice(),
- outputs));
+ write::run_passes(sess,
+ trans,
+ sess.opts.output_types.as_slice(),
+ outputs));
}
}
}
pub fn stop_after_phase_5(sess: &Session) -> bool {
- if !sess.opts.output_types.iter().any(|&i| i == link::OutputTypeExe) {
+ if !sess.opts.output_types.iter().any(|&i| i == write::OutputTypeExe) {
debug!("not building executable, returning early from compile_input");
return true;
}
for output_type in sess.opts.output_types.iter() {
let file = outputs.path(*output_type);
match *output_type {
- link::OutputTypeExe => {
+ write::OutputTypeExe => {
for output in sess.crate_types.borrow().iter() {
let p = link::filename_for_input(sess, *output,
id, &file);
session.opts.cg.metadata.clone()
}
+#[deriving(Clone)]
pub struct OutputFilenames {
pub out_directory: Path,
pub out_filestem: String,
}
impl OutputFilenames {
- pub fn path(&self, flavor: link::OutputType) -> Path {
+ pub fn path(&self, flavor: write::OutputType) -> Path {
match self.single_output_file {
Some(ref path) => return path.clone(),
None => {}
self.temp_path(flavor)
}
- pub fn temp_path(&self, flavor: link::OutputType) -> Path {
+ pub fn temp_path(&self, flavor: write::OutputType) -> Path {
let base = self.out_directory.join(self.filestem());
match flavor {
- link::OutputTypeBitcode => base.with_extension("bc"),
- link::OutputTypeAssembly => base.with_extension("s"),
- link::OutputTypeLlvmAssembly => base.with_extension("ll"),
- link::OutputTypeObject => base.with_extension("o"),
- link::OutputTypeExe => base,
+ write::OutputTypeBitcode => base.with_extension("bc"),
+ write::OutputTypeAssembly => base.with_extension("s"),
+ write::OutputTypeLlvmAssembly => base.with_extension("ll"),
+ write::OutputTypeObject => base.with_extension("o"),
+ write::OutputTypeExe => base,
}
}
pub mod link;
pub mod lto;
+ pub mod write;
}
}
pub fn get_lint_groups<'t>(&'t self) -> Vec<(&'static str, Vec<LintId>, bool)> {
- self.lint_groups.iter().map(|(k, &(ref v, b))| (*k, v.clone(), b)).collect()
+ self.lint_groups.iter().map(|(k, v)| (*k,
+ v.ref0().clone(),
+ *v.ref1())).collect()
}
pub fn register_pass(&mut self, sess: Option<&Session>,
match self.by_name.find_equiv(&lint_name.as_slice()) {
Some(&lint_id) => self.set_level(lint_id, (level, CommandLine)),
None => {
- match self.lint_groups.iter().map(|(&x, &(ref y, _))| (x, y.clone()))
+ match self.lint_groups.iter().map(|(&x, pair)| (x, pair.ref0().clone()))
.collect::<HashMap<&'static str, Vec<LintId>>>()
.find_equiv(&lint_name.as_slice()) {
Some(v) => {
IITraitItemRef(local_def(parent_id),
RequiredInlinedTraitItemRef(
&*ast_method)));
- } else {
+ }
+ if !any_types {
encode_symbol(ecx, rbml_w, m.def_id.node);
}
encode_method_argument_names(rbml_w, &*ast_method.pe_fn_decl());
encode_attributes(rbml_w, item.attrs.as_slice());
if tps_len > 0u || should_inline(item.attrs.as_slice()) {
encode_inlined_item(ecx, rbml_w, IIItemRef(item));
- } else {
+ }
+ if tps_len == 0 {
encode_symbol(ecx, rbml_w, item.id);
}
encode_visibility(rbml_w, vis);
encode_name(rbml_w, nitem.ident.name);
if abi == abi::RustIntrinsic {
encode_inlined_item(ecx, rbml_w, IIForeignRef(nitem));
- } else {
- encode_symbol(ecx, rbml_w, nitem.id);
}
+ encode_symbol(ecx, rbml_w, nitem.id);
}
ForeignItemStatic(_, mutbl) => {
if mutbl {
fn check_item(cx: &mut Context, item: &Item) {
if !attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
- ItemImpl(_, Some(ref trait_ref), ref self_type, _) => {
- check_impl_of_trait(cx, item, trait_ref, &**self_type);
-
+ ItemImpl(_, ref trait_ref, ref self_type, _) => {
let parameter_environment =
ParameterEnvironment::for_item(cx.tcx, item.id);
cx.parameter_environments.push(parameter_environment);
item.span,
ty::node_id_to_type(cx.tcx, item.id));
- // Check bounds on the trait ref.
- match ty::impl_trait_ref(cx.tcx,
- ast_util::local_def(item.id)) {
- None => {}
- Some(trait_ref) => {
- check_bounds_on_structs_or_enums_in_trait_ref(
- cx,
- item.span,
- &*trait_ref);
+ match trait_ref {
+ &Some(ref trait_ref) => {
+ check_impl_of_trait(cx, item, trait_ref, &**self_type);
+
+ // Check bounds on the trait ref.
+ match ty::impl_trait_ref(cx.tcx,
+ ast_util::local_def(item.id)) {
+ None => {}
+ Some(trait_ref) => {
+ check_bounds_on_structs_or_enums_in_trait_ref(
+ cx,
+ item.span,
+ &*trait_ref);
+ }
+ }
}
+ &None => {}
}
drop(cx.parameter_environments.pop());
NoSendItem, "no_send_bound", no_send_bound;
NoCopyItem, "no_copy_bound", no_copy_bound;
- NoSyncItem, "no_share_bound", no_share_bound;
+ NoSyncItem, "no_sync_bound", no_sync_bound;
ManagedItem, "managed_bound", managed_bound;
IteratorItem, "iterator", iterator;
}
ast::PatIdent(..) | ast::PatEnum(..) | ast::PatStruct(..) => {
// This is either an enum variant or a variable binding.
- let opt_def = ccx.tcx.def_map.borrow().find_copy(&cur.id);
+ let opt_def = ccx.tcx().def_map.borrow().find_copy(&cur.id);
match opt_def {
Some(def::DefVariant(enum_id, var_id, _)) => {
let variant = ty::enum_variant_with_id(ccx.tcx(), enum_id, var_id);
/// Decides how to represent a given type.
pub fn represent_type(cx: &CrateContext, t: ty::t) -> Rc<Repr> {
debug!("Representing: {}", ty_to_string(cx.tcx(), t));
- match cx.adt_reprs.borrow().find(&t) {
+ match cx.adt_reprs().borrow().find(&t) {
Some(repr) => return repr.clone(),
None => {}
}
let repr = Rc::new(represent_type_uncached(cx, t));
debug!("Represented as: {:?}", repr)
- cx.adt_reprs.borrow_mut().insert(t, repr.clone());
+ cx.adt_reprs().borrow_mut().insert(t, repr.clone());
repr
}
attempts = choose_shortest;
},
attr::ReprPacked => {
- cx.tcx.sess.bug("range_to_inttype: found ReprPacked on an enum");
+ cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum");
}
}
for &ity in attempts.iter() {
use back::{link, abi};
use driver::config;
use driver::config::{NoDebugInfo, FullDebugInfo};
-use driver::driver::{CrateAnalysis, CrateTranslation};
+use driver::driver::{CrateAnalysis, CrateTranslation, ModuleTranslation};
use driver::session::Session;
use lint;
use llvm::{BasicBlockRef, ModuleRef, ValueRef, Vector, get_param};
use middle::trans::callee;
use middle::trans::cleanup::{CleanupMethods, ScopeId};
use middle::trans::cleanup;
-use middle::trans::common::{Block, C_bool, C_bytes, C_i32, C_integral, C_nil};
-use middle::trans::common::{C_null, C_struct, C_u64, C_u8, C_uint, C_undef};
+use middle::trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_integral, C_nil};
+use middle::trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_uint, C_undef};
use middle::trans::common::{CrateContext, ExternMap, FunctionContext};
use middle::trans::common::{NodeInfo, Result, SubstP, monomorphize_type};
use middle::trans::common::{node_id_type, param_substs, return_type_is_void};
use middle::trans::common::{type_is_zero_size, val_ty};
use middle::trans::common;
use middle::trans::consts;
+use middle::trans::context::SharedCrateContext;
use middle::trans::controlflow;
use middle::trans::datum;
use middle::trans::debuginfo;
use middle::trans::inline;
use middle::trans::intrinsic;
use middle::trans::machine;
-use middle::trans::machine::{llsize_of, llsize_of_real};
+use middle::trans::machine::{llsize_of, llsize_of_real, llalign_of_min};
use middle::trans::meth;
use middle::trans::monomorphize;
use middle::trans::tvec;
use libc::{c_uint, uint64_t};
use std::c_str::ToCStr;
use std::cell::{Cell, RefCell};
+use std::collections::HashSet;
use std::rc::Rc;
use std::{i8, i16, i32, i64};
use syntax::abi::{X86, X86_64, Arm, Mips, Mipsel, Rust, RustCall};
}
pub struct StatRecorder<'a> {
- ccx: &'a CrateContext,
+ ccx: &'a CrateContext<'a>,
name: Option<String>,
start: u64,
istart: uint,
} else {
0
};
- let istart = ccx.stats.n_llvm_insns.get();
+ let istart = ccx.stats().n_llvm_insns.get();
StatRecorder {
ccx: ccx,
name: Some(name),
if self.ccx.sess().trans_stats() {
let end = time::precise_time_ns();
let elapsed = ((end - self.start) / 1_000_000) as uint;
- let iend = self.ccx.stats.n_llvm_insns.get();
- self.ccx.stats.fn_stats.borrow_mut().push((self.name.take().unwrap(),
+ let iend = self.ccx.stats().n_llvm_insns.get();
+ self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
elapsed,
iend - self.istart));
- self.ccx.stats.n_fns.set(self.ccx.stats.n_fns.get() + 1);
+ self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
// Reset LLVM insn count to avoid compound costs.
- self.ccx.stats.n_llvm_insns.set(self.istart);
+ self.ccx.stats().n_llvm_insns.set(self.istart);
}
}
}
let llfn: ValueRef = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMGetOrInsertFunction(ccx.llmod, buf, ty.to_ref())
+ llvm::LLVMGetOrInsertFunction(ccx.llmod(), buf, ty.to_ref())
}
});
_ => {}
}
- if ccx.tcx.sess.opts.cg.no_redzone {
+ if ccx.tcx().sess.opts.cg.no_redzone {
unsafe {
llvm::LLVMAddFunctionAttribute(llfn,
llvm::FunctionIndex as c_uint,
}
fn get_extern_rust_fn(ccx: &CrateContext, fn_ty: ty::t, name: &str, did: ast::DefId) -> ValueRef {
- match ccx.externs.borrow().find_equiv(&name) {
+ match ccx.externs().borrow().find_equiv(&name) {
Some(n) => return *n,
None => ()
}
set_llvm_fn_attrs(attrs.as_slice(), f)
});
- ccx.externs.borrow_mut().insert(name.to_string(), f);
+ ccx.externs().borrow_mut().insert(name.to_string(), f);
f
}
let unboxed_closure_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_id);
match unboxed_closure.kind {
ty::FnUnboxedClosureKind => {
- ty::mk_imm_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_imm_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnMutUnboxedClosureKind => {
- ty::mk_mut_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_mut_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnOnceUnboxedClosureKind => unboxed_closure_type,
}
pub fn kind_for_unboxed_closure(ccx: &CrateContext, closure_id: ast::DefId)
-> ty::UnboxedClosureKind {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
unboxed_closures.get(&closure_id).kind
}
(f.sig.inputs.clone(), f.sig.output, f.abi, Some(Type::i8p(ccx)))
}
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_did);
let function_type = unboxed_closure.closure_type.clone();
let self_type = self_type_for_unboxed_closure(ccx, closure_did);
let llfty = type_of_rust_fn(ccx, env, inputs.as_slice(), output, abi);
debug!("decl_rust_fn(input count={},type={})",
inputs.len(),
- ccx.tn.type_to_string(llfty));
+ ccx.tn().type_to_string(llfty));
let llfn = decl_fn(ccx, name, llvm::CCallConv, llfty, output);
let attrs = get_fn_llvm_attributes(ccx, fn_ty);
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
+pub fn malloc_raw_dyn_proc<'a>(
+ bcx: &'a Block<'a>,
+ t: ty::t, alloc_fn: LangItem) -> Result<'a> {
+ let _icx = push_ctxt("malloc_raw_dyn_proc");
+ let ccx = bcx.ccx();
+
+ let langcall = require_alloc_fn(bcx, t, alloc_fn);
+
+ // Grab the TypeRef type of ptr_ty.
+ let ptr_ty = ty::mk_uniq(bcx.tcx(), t);
+ let ptr_llty = type_of(ccx, ptr_ty);
+
+ let llty = type_of(bcx.ccx(), t);
+ let size = llsize_of(bcx.ccx(), llty);
+ let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
+
+ // Allocate space:
+ let drop_glue = glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t));
+ let r = callee::trans_lang_call(
+ bcx,
+ langcall,
+ [
+ PointerCast(bcx, drop_glue, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to()),
+ size,
+ llalign
+ ],
+ None);
+ Result::new(r.bcx, PointerCast(r.bcx, r.val, ptr_llty))
+}
+
+
pub fn malloc_raw_dyn_managed<'a>(
bcx: &'a Block<'a>,
t: ty::t,
alloc_fn: LangItem,
size: ValueRef)
-> Result<'a> {
- let _icx = push_ctxt("malloc_raw_managed");
+ let _icx = push_ctxt("malloc_raw_dyn_managed");
let ccx = bcx.ccx();
let langcall = require_alloc_fn(bcx, t, alloc_fn);
// Type descriptor and type glue stuff
pub fn get_tydesc(ccx: &CrateContext, t: ty::t) -> Rc<tydesc_info> {
- match ccx.tydescs.borrow().find(&t) {
+ match ccx.tydescs().borrow().find(&t) {
Some(inf) => return inf.clone(),
_ => { }
}
- ccx.stats.n_static_tydescs.set(ccx.stats.n_static_tydescs.get() + 1u);
+ ccx.stats().n_static_tydescs.set(ccx.stats().n_static_tydescs.get() + 1u);
let inf = Rc::new(glue::declare_tydesc(ccx, t));
- ccx.tydescs.borrow_mut().insert(t, inf.clone());
+ ccx.tydescs().borrow_mut().insert(t, inf.clone());
inf
}
// Double-check that we never ask LLVM to declare the same symbol twice. It
// silently mangles such symbols, breaking our linkage model.
pub fn note_unique_llvm_symbol(ccx: &CrateContext, sym: String) {
- if ccx.all_llvm_symbols.borrow().contains(&sym) {
+ if ccx.all_llvm_symbols().borrow().contains(&sym) {
ccx.sess().bug(format!("duplicate LLVM symbol: {}", sym).as_slice());
}
- ccx.all_llvm_symbols.borrow_mut().insert(sym);
+ ccx.all_llvm_symbols().borrow_mut().insert(sym);
}
let dtor_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[glue::get_drop_glue_type(ccx, t)], ty::mk_nil());
get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name.as_slice(),
llvm::CCallConv,
llty,
}
_ => {
let llty = type_of(ccx, t);
- get_extern_const(&mut *ccx.externs.borrow_mut(),
- ccx.llmod,
+ get_extern_const(&mut *ccx.externs().borrow_mut(),
+ ccx.llmod(),
name.as_slice(),
llty)
}
let memcpy = ccx.get_intrinsic(&key);
let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
- let size = IntCast(cx, n_bytes, ccx.int_type);
+ let size = IntCast(cx, n_bytes, ccx.int_type());
let align = C_i32(ccx, align as i32);
let volatile = C_bool(ccx, false);
Call(cx, memcpy, [dst_ptr, src_ptr, size, align, volatile], None);
if id == -1 {
"".to_string()
} else {
- ccx.tcx.map.path_to_string(id).to_string()
+ ccx.tcx().map.path_to_string(id).to_string()
},
id, param_substs.repr(ccx.tcx()));
is_unboxed_closure: IsUnboxedClosureFlag,
maybe_load_env: <'a>|&'a Block<'a>, ScopeId|
-> &'a Block<'a>) {
- ccx.stats.n_closures.set(ccx.stats.n_closures.get() + 1);
+ ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
let _icx = push_ctxt("trans_closure");
set_uwtable(llfndecl);
ty_to_string(ccx.tcx(), *monomorphized_arg_type));
}
debug!("trans_closure: function lltype: {}",
- bcx.fcx.ccx.tn.val_to_string(bcx.fcx.llfn));
+ bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
let arg_datums = if abi != RustCall {
create_datums_for_fn_args(&fcx,
param_substs: ¶m_substs,
id: ast::NodeId,
attrs: &[ast::Attribute]) {
- let _s = StatRecorder::new(ccx, ccx.tcx.map.path_to_string(id).to_string());
+ let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
debug!("trans_fn(param_substs={})", param_substs.repr(ccx.tcx()));
let _icx = push_ctxt("trans_fn");
let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
dest: expr::Dest) -> Result<'a> {
let ccx = bcx.fcx.ccx;
- let tcx = &ccx.tcx;
+ let tcx = ccx.tcx();
let result_ty = match ty::get(ctor_ty).sty {
ty::ty_bare_fn(ref bft) => bft.sig.output,
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
- let levels = ccx.tcx.node_lint_levels.borrow();
+ let levels = ccx.tcx().node_lint_levels.borrow();
let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCE);
let lvlsrc = match levels.find(&(id, lint_id)) {
None | Some(&(lint::Allow, _)) => return,
}
pub struct TransItemVisitor<'a> {
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a>,
}
impl<'a> Visitor<()> for TransItemVisitor<'a> {
}
}
+/// Enum describing the origin of an LLVM `Value`, for linkage purposes.
+pub enum ValueOrigin {
+ /// The LLVM `Value` is in this context because the corresponding item was
+ /// assigned to the current compilation unit.
+ OriginalTranslation,
+ /// The `Value`'s corresponding item was assigned to some other compilation
+ /// unit, but the `Value` was translated in this context anyway because the
+ /// item is marked `#[inline]`.
+ InlinedCopy,
+}
+
+/// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
+/// If the `llval` is the direct translation of a specific Rust item, `id`
+/// should be set to the `NodeId` of that item. (This mapping should be
+/// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
+/// `None`.) `llval_origin` indicates whether `llval` is the translation of an
+/// item assigned to `ccx`'s compilation unit or an inlined copy of an item
+/// assigned to a different compilation unit.
+pub fn update_linkage(ccx: &CrateContext,
+ llval: ValueRef,
+ id: Option<ast::NodeId>,
+ llval_origin: ValueOrigin) {
+ match llval_origin {
+ InlinedCopy => {
+ // `llval` is a translation of an item defined in a separate
+ // compilation unit. This only makes sense if there are at least
+ // two compilation units.
+ assert!(ccx.sess().opts.cg.codegen_units > 1);
+ // `llval` is a copy of something defined elsewhere, so use
+ // `AvailableExternallyLinkage` to avoid duplicating code in the
+ // output.
+ llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
+ return;
+ },
+ OriginalTranslation => {},
+ }
+
+ match id {
+ Some(id) if ccx.reachable().contains(&id) => {
+ llvm::SetLinkage(llval, llvm::ExternalLinkage);
+ },
+ _ => {
+ // `id` does not refer to an item in `ccx.reachable`.
+ if ccx.sess().opts.cg.codegen_units > 1 {
+ llvm::SetLinkage(llval, llvm::ExternalLinkage);
+ } else {
+ llvm::SetLinkage(llval, llvm::InternalLinkage);
+ }
+ },
+ }
+}
+
pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
let _icx = push_ctxt("trans_item");
+
+ let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
+
match item.node {
ast::ItemFn(ref decl, _fn_style, abi, ref generics, ref body) => {
if !generics.is_type_parameterized() {
- let llfn = get_item_val(ccx, item.id);
- if abi != Rust {
- foreign::trans_rust_fn_with_foreign_abi(ccx,
- &**decl,
- &**body,
- item.attrs.as_slice(),
- llfn,
- ¶m_substs::empty(),
- item.id,
- None);
- } else {
- trans_fn(ccx,
- &**decl,
- &**body,
- llfn,
- ¶m_substs::empty(),
- item.id,
- item.attrs.as_slice());
+ let trans_everywhere = attr::requests_inline(item.attrs.as_slice());
+ // Ignore `trans_everywhere` for cross-crate inlined items
+ // (`from_external`). `trans_item` will be called once for each
+ // compilation unit that references the item, so it will still get
+ // translated everywhere it's needed.
+ for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+ let llfn = get_item_val(ccx, item.id);
+ if abi != Rust {
+ foreign::trans_rust_fn_with_foreign_abi(ccx,
+ &**decl,
+ &**body,
+ item.attrs.as_slice(),
+ llfn,
+ ¶m_substs::empty(),
+ item.id,
+ None);
+ } else {
+ trans_fn(ccx,
+ &**decl,
+ &**body,
+ llfn,
+ ¶m_substs::empty(),
+ item.id,
+ item.attrs.as_slice());
+ }
+ update_linkage(ccx,
+ llfn,
+ Some(item.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
}
}
item.id);
}
ast::ItemMod(ref m) => {
- trans_mod(ccx, m);
+ trans_mod(&ccx.rotate(), m);
}
ast::ItemEnum(ref enum_definition, _) => {
enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
// Recurse on the expression to catch items in blocks
let mut v = TransItemVisitor{ ccx: ccx };
v.visit_expr(&**expr, ());
- consts::trans_const(ccx, m, item.id);
+
+ let trans_everywhere = attr::requests_inline(item.attrs.as_slice());
+ for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+ consts::trans_const(ccx, m, item.id);
+
+ let g = get_item_val(ccx, item.id);
+ update_linkage(ccx,
+ g,
+ Some(item.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
+ }
+
// Do static_assert checking. It can't really be done much earlier
// because we need to get the value of the bool out of LLVM
if attr::contains_name(item.attrs.as_slice(), "static_assert") {
static");
}
- let v = ccx.const_values.borrow().get_copy(&item.id);
+ let v = ccx.const_values().borrow().get_copy(&item.id);
unsafe {
if !(llvm::LLVMConstIntGetZExtValue(v) != 0) {
ccx.sess().span_fatal(expr.span, "static assertion failed");
fn finish_register_fn(ccx: &CrateContext, sp: Span, sym: String, node_id: ast::NodeId,
llfn: ValueRef) {
- ccx.item_symbols.borrow_mut().insert(node_id, sym);
-
- if !ccx.reachable.contains(&node_id) {
- llvm::SetLinkage(llfn, llvm::InternalLinkage);
- }
+ ccx.item_symbols().borrow_mut().insert(node_id, sym);
// The stack exhaustion lang item shouldn't have a split stack because
// otherwise it would continue to be exhausted (bad), and both it and the
// eh_personality functions need to be externally linkable.
let def = ast_util::local_def(node_id);
- if ccx.tcx.lang_items.stack_exhausted() == Some(def) {
+ if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
unset_split_stack(llfn);
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
- if ccx.tcx.lang_items.eh_personality() == Some(def) {
+ if ccx.tcx().lang_items.eh_personality() == Some(def) {
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
ty::ty_bare_fn(ref f) => (f.sig.clone(), f.abi, false),
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let ref function_type = unboxed_closures.get(&closure_did)
.closure_type;
fn create_entry_fn(ccx: &CrateContext,
rust_main: ValueRef,
use_start_lang_item: bool) {
- let llfty = Type::func([ccx.int_type, Type::i8p(ccx).ptr_to()],
- &ccx.int_type);
+ let llfty = Type::func([ccx.int_type(), Type::i8p(ccx).ptr_to()],
+ &ccx.int_type());
let llfn = decl_cdecl_fn(ccx, "main", llfty, ty::mk_nil());
let llbb = "top".with_c_str(|buf| {
unsafe {
- llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, buf)
}
});
- let bld = ccx.builder.b;
+ let bld = ccx.raw_builder();
unsafe {
llvm::LLVMPositionBuilderAtEnd(bld, llbb);
let (start_fn, args) = if use_start_lang_item {
- let start_def_id = match ccx.tcx.lang_items.require(StartFnLangItem) {
+ let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
Ok(id) => id,
Err(s) => { ccx.sess().fatal(s.as_slice()); }
};
fn exported_name(ccx: &CrateContext, id: ast::NodeId,
ty: ty::t, attrs: &[ast::Attribute]) -> String {
+ match ccx.external_srcs().borrow().find(&id) {
+ Some(&did) => {
+ let sym = csearch::get_symbol(&ccx.sess().cstore, did);
+ debug!("found item {} in other crate...", sym);
+ return sym;
+ }
+ None => {}
+ }
+
match attr::first_attr_value_str_by_name(attrs, "export_name") {
// Use provided name
Some(name) => name.get().to_string(),
- _ => ccx.tcx.map.with_path(id, |mut path| {
+ _ => ccx.tcx().map.with_path(id, |mut path| {
if attr::contains_name(attrs, "no_mangle") {
// Don't mangle
path.last().unwrap().to_string()
pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
debug!("get_item_val(id=`{:?}`)", id);
- match ccx.item_vals.borrow().find_copy(&id) {
+ match ccx.item_vals().borrow().find_copy(&id) {
Some(v) => return v,
None => {}
}
- let mut foreign = false;
- let item = ccx.tcx.map.get(id);
+ let item = ccx.tcx().map.get(id);
let val = match item {
ast_map::NodeItem(i) => {
let ty = ty::node_id_to_type(ccx.tcx(), i.id);
// using the current crate's name/version
// information in the hash of the symbol
debug!("making {}", sym);
- let (sym, is_local) = {
- match ccx.external_srcs.borrow().find(&i.id) {
- Some(&did) => {
- debug!("but found in other crate...");
- (csearch::get_symbol(&ccx.sess().cstore,
- did), false)
- }
- None => (sym, true)
- }
- };
+ let is_local = !ccx.external_srcs().borrow().contains_key(&id);
// We need the translated value here, because for enums the
// LLVM type is not fully determined by the Rust type.
let (v, inlineable, _) = consts::const_expr(ccx, &**expr, is_local);
- ccx.const_values.borrow_mut().insert(id, v);
+ ccx.const_values().borrow_mut().insert(id, v);
let mut inlineable = inlineable;
unsafe {
let llty = llvm::LLVMTypeOf(v);
let g = sym.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty, buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty, buf)
});
- if !ccx.reachable.contains(&id) {
- llvm::SetLinkage(g, llvm::InternalLinkage);
- }
-
// Apply the `unnamed_addr` attribute if
// requested
if !ast_util::static_has_significant_address(
if !inlineable {
debug!("{} not inlined", sym);
- ccx.non_inlineable_statics.borrow_mut()
+ ccx.non_inlineable_statics().borrow_mut()
.insert(id);
}
- ccx.item_symbols.borrow_mut().insert(i.id, sym);
+ ccx.item_symbols().borrow_mut().insert(i.id, sym);
g
}
}
}
ast_map::NodeForeignItem(ni) => {
- foreign = true;
-
match ni.node {
ast::ForeignItemFn(..) => {
- let abi = ccx.tcx.map.get_foreign_abi(id);
+ let abi = ccx.tcx().map.get_foreign_abi(id);
let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
let name = foreign::link_name(&*ni);
foreign::register_foreign_item_fn(ccx, abi, ty,
};
assert!(args.len() != 0u);
let ty = ty::node_id_to_type(ccx.tcx(), id);
- let parent = ccx.tcx.map.get_parent(id);
- let enm = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let enm = ccx.tcx().map.expect_item(parent);
let sym = exported_name(ccx,
id,
ty,
}
Some(ctor_id) => ctor_id,
};
- let parent = ccx.tcx.map.get_parent(id);
- let struct_item = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let struct_item = ccx.tcx().map.expect_item(parent);
let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
let sym = exported_name(ccx,
id,
}
};
- // foreign items (extern fns and extern statics) don't have internal
- // linkage b/c that doesn't quite make sense. Otherwise items can
- // have internal linkage if they're not reachable.
- if !foreign && !ccx.reachable.contains(&id) {
- llvm::SetLinkage(val, llvm::InternalLinkage);
- }
+ // All LLVM globals and functions are initially created as external-linkage
+ // declarations. If `trans_item`/`trans_fn` later turns the declaration
+ // into a definition, it adjusts the linkage then (using `update_linkage`).
+ //
+ // The exception is foreign items, which have their linkage set inside the
+ // call to `foreign::register_*` above. We don't touch the linkage after
+ // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
+ // other item translation functions do).
- ccx.item_vals.borrow_mut().insert(id, val);
+ ccx.item_vals().borrow_mut().insert(id, val);
val
}
pub fn p2i(ccx: &CrateContext, v: ValueRef) -> ValueRef {
unsafe {
- return llvm::LLVMConstPtrToInt(v, ccx.int_type.to_ref());
+ return llvm::LLVMConstPtrToInt(v, ccx.int_type().to_ref());
}
}
-pub fn crate_ctxt_to_encode_parms<'r>(cx: &'r CrateContext, ie: encoder::EncodeInlinedItem<'r>)
+pub fn crate_ctxt_to_encode_parms<'r>(cx: &'r SharedCrateContext,
+ ie: encoder::EncodeInlinedItem<'r>)
-> encoder::EncodeParams<'r> {
encoder::EncodeParams {
diag: cx.sess().diagnostic(),
tcx: cx.tcx(),
- reexports2: &cx.exp_map2,
- item_symbols: &cx.item_symbols,
- non_inlineable_statics: &cx.non_inlineable_statics,
- link_meta: &cx.link_meta,
+ reexports2: cx.exp_map2(),
+ item_symbols: cx.item_symbols(),
+ non_inlineable_statics: cx.non_inlineable_statics(),
+ link_meta: cx.link_meta(),
cstore: &cx.sess().cstore,
encode_inlined_item: ie,
- reachable: &cx.reachable,
+ reachable: cx.reachable(),
}
}
-pub fn write_metadata(cx: &CrateContext, krate: &ast::Crate) -> Vec<u8> {
+pub fn write_metadata(cx: &SharedCrateContext, krate: &ast::Crate) -> Vec<u8> {
use flate;
let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
cx.sess().fatal("failed to compress metadata")
}
}.as_slice());
- let llmeta = C_bytes(cx, compressed.as_slice());
- let llconst = C_struct(cx, [llmeta], false);
+ let llmeta = C_bytes_in_context(cx.metadata_llcx(), compressed.as_slice());
+ let llconst = C_struct_in_context(cx.metadata_llcx(), [llmeta], false);
let name = format!("rust_metadata_{}_{}",
- cx.link_meta.crate_name,
- cx.link_meta.crate_hash);
+ cx.link_meta().crate_name,
+ cx.link_meta().crate_hash);
let llglobal = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(cx.metadata_llmod, val_ty(llconst).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf)
}
});
unsafe {
return metadata;
}
+/// Find any symbols that are defined in one compilation unit, but not declared
+/// in any other compilation unit. Give these symbols internal linkage.
+fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<String>) {
+ use std::c_str::CString;
+
+ unsafe {
+ let mut declared = HashSet::new();
+
+ let iter_globals = |llmod| {
+ ValueIter {
+ cur: llvm::LLVMGetFirstGlobal(llmod),
+ step: llvm::LLVMGetNextGlobal,
+ }
+ };
+
+ let iter_functions = |llmod| {
+ ValueIter {
+ cur: llvm::LLVMGetFirstFunction(llmod),
+ step: llvm::LLVMGetNextFunction,
+ }
+ };
+
+ // Collect all external declarations in all compilation units.
+ for ccx in cx.iter() {
+ for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ let linkage = llvm::LLVMGetLinkage(val);
+ // We only care about external declarations (not definitions)
+ // and available_externally definitions.
+ if !(linkage == llvm::ExternalLinkage as c_uint &&
+ llvm::LLVMIsDeclaration(val) != 0) &&
+ !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
+ continue
+ }
+
+ let name = CString::new(llvm::LLVMGetValueName(val), false);
+ declared.insert(name);
+ }
+ }
+
+ // Examine each external definition. If the definition is not used in
+ // any other compilation unit, and is not reachable from other crates,
+ // then give it internal linkage.
+ for ccx in cx.iter() {
+ for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ // We only care about external definitions.
+ if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
+ llvm::LLVMIsDeclaration(val) == 0) {
+ continue
+ }
+
+ let name = CString::new(llvm::LLVMGetValueName(val), false);
+ if !declared.contains(&name) &&
+ !reachable.contains_equiv(&name.as_str().unwrap()) {
+ llvm::SetLinkage(val, llvm::InternalLinkage);
+ }
+ }
+ }
+ }
+
+
+ struct ValueIter {
+ cur: ValueRef,
+ step: unsafe extern "C" fn(ValueRef) -> ValueRef,
+ }
+
+ impl Iterator<ValueRef> for ValueIter {
+ fn next(&mut self) -> Option<ValueRef> {
+ let old = self.cur;
+ if !old.is_null() {
+ self.cur = unsafe { (self.step)(old) };
+ Some(old)
+ } else {
+ None
+ }
+ }
+ }
+}
+
pub fn trans_crate(krate: ast::Crate,
analysis: CrateAnalysis) -> (ty::ctxt, CrateTranslation) {
let CrateAnalysis { ty_cx: tcx, exp_map2, reachable, name, .. } = analysis;
let link_meta = link::build_link_meta(&tcx.sess, &krate, name);
- // Append ".rs" to crate name as LLVM module identifier.
- //
- // LLVM code generator emits a ".file filename" directive
- // for ELF backends. Value of the "filename" is set as the
- // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
- // crashes if the module identifier is same as other symbols
- // such as a function name in the module.
- // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
- let mut llmod_id = link_meta.crate_name.clone();
- llmod_id.push_str(".rs");
-
- let ccx = CrateContext::new(llmod_id.as_slice(), tcx, exp_map2,
- Sha256::new(), link_meta, reachable);
-
- // First, verify intrinsics.
- intrinsic::check_intrinsics(&ccx);
-
- // Next, translate the module.
+ let codegen_units = tcx.sess.opts.cg.codegen_units;
+ let shared_ccx = SharedCrateContext::new(link_meta.crate_name.as_slice(),
+ codegen_units,
+ tcx,
+ exp_map2,
+ Sha256::new(),
+ link_meta.clone(),
+ reachable);
+
{
- let _icx = push_ctxt("text");
- trans_mod(&ccx, &krate.module);
+ let ccx = shared_ccx.get_ccx(0);
+
+ // First, verify intrinsics.
+ intrinsic::check_intrinsics(&ccx);
+
+ // Next, translate the module.
+ {
+ let _icx = push_ctxt("text");
+ trans_mod(&ccx, &krate.module);
+ }
}
- glue::emit_tydescs(&ccx);
- if ccx.sess().opts.debuginfo != NoDebugInfo {
- debuginfo::finalize(&ccx);
+ for ccx in shared_ccx.iter() {
+ glue::emit_tydescs(&ccx);
+ if ccx.sess().opts.debuginfo != NoDebugInfo {
+ debuginfo::finalize(&ccx);
+ }
}
// Translate the metadata.
- let metadata = write_metadata(&ccx, &krate);
- if ccx.sess().trans_stats() {
+ let metadata = write_metadata(&shared_ccx, &krate);
+
+ if shared_ccx.sess().trans_stats() {
+ let stats = shared_ccx.stats();
println!("--- trans stats ---");
- println!("n_static_tydescs: {}", ccx.stats.n_static_tydescs.get());
- println!("n_glues_created: {}", ccx.stats.n_glues_created.get());
- println!("n_null_glues: {}", ccx.stats.n_null_glues.get());
- println!("n_real_glues: {}", ccx.stats.n_real_glues.get());
-
- println!("n_fns: {}", ccx.stats.n_fns.get());
- println!("n_monos: {}", ccx.stats.n_monos.get());
- println!("n_inlines: {}", ccx.stats.n_inlines.get());
- println!("n_closures: {}", ccx.stats.n_closures.get());
+ println!("n_static_tydescs: {}", stats.n_static_tydescs.get());
+ println!("n_glues_created: {}", stats.n_glues_created.get());
+ println!("n_null_glues: {}", stats.n_null_glues.get());
+ println!("n_real_glues: {}", stats.n_real_glues.get());
+
+ println!("n_fns: {}", stats.n_fns.get());
+ println!("n_monos: {}", stats.n_monos.get());
+ println!("n_inlines: {}", stats.n_inlines.get());
+ println!("n_closures: {}", stats.n_closures.get());
println!("fn stats:");
- ccx.stats.fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
+ stats.fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
insns_b.cmp(&insns_a)
});
- for tuple in ccx.stats.fn_stats.borrow().iter() {
+ for tuple in stats.fn_stats.borrow().iter() {
match *tuple {
(ref name, ms, insns) => {
println!("{} insns, {} ms, {}", insns, ms, *name);
}
}
}
- if ccx.sess().count_llvm_insns() {
- for (k, v) in ccx.stats.llvm_insns.borrow().iter() {
+ if shared_ccx.sess().count_llvm_insns() {
+ for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
println!("{:7u} {}", *v, *k);
}
}
- let llcx = ccx.llcx;
- let link_meta = ccx.link_meta.clone();
- let llmod = ccx.llmod;
+ let modules = shared_ccx.iter()
+ .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
+ .collect();
- let mut reachable: Vec<String> = ccx.reachable.iter().filter_map(|id| {
- ccx.item_symbols.borrow().find(id).map(|s| s.to_string())
+ let mut reachable: Vec<String> = shared_ccx.reachable().iter().filter_map(|id| {
+ shared_ccx.item_symbols().borrow().find(id).map(|s| s.to_string())
}).collect();
// For the purposes of LTO, we add to the reachable set all of the upstream
// reachable extern fns. These functions are all part of the public ABI of
// the final product, so LTO needs to preserve them.
- ccx.sess().cstore.iter_crate_data(|cnum, _| {
- let syms = csearch::get_reachable_extern_fns(&ccx.sess().cstore, cnum);
+ shared_ccx.sess().cstore.iter_crate_data(|cnum, _| {
+ let syms = csearch::get_reachable_extern_fns(&shared_ccx.sess().cstore, cnum);
reachable.extend(syms.move_iter().map(|did| {
- csearch::get_symbol(&ccx.sess().cstore, did)
+ csearch::get_symbol(&shared_ccx.sess().cstore, did)
}));
});
// referenced from rt/rust_try.ll
reachable.push("rust_eh_personality_catch".to_string());
- let metadata_module = ccx.metadata_llmod;
- let formats = ccx.tcx.dependency_formats.borrow().clone();
+ if codegen_units > 1 {
+ internalize_symbols(&shared_ccx, &reachable.iter().map(|x| x.clone()).collect());
+ }
+
+ let metadata_module = ModuleTranslation {
+ llcx: shared_ccx.metadata_llcx(),
+ llmod: shared_ccx.metadata_llmod(),
+ };
+ let formats = shared_ccx.tcx().dependency_formats.borrow().clone();
let no_builtins = attr::contains_name(krate.attrs.as_slice(), "no_builtins");
- (ccx.tcx, CrateTranslation {
- context: llcx,
- module: llmod,
- link: link_meta,
+ let translation = CrateTranslation {
+ modules: modules,
metadata_module: metadata_module,
+ link: link_meta,
metadata: metadata,
reachable: reachable,
crate_formats: formats,
no_builtins: no_builtins,
- })
+ };
+
+ (shared_ccx.take_tcx(), translation)
}
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
return llvm::LLVMGetUndef(eltty.to_ref());
}
unsafe {
let ccx = cx.fcx.ccx;
if cx.unreachable.get() {
- return llvm::LLVMGetUndef(ccx.int_type.to_ref());
+ return llvm::LLVMGetUndef(ccx.int_type().to_ref());
}
B(cx).atomic_load(pointer_val, order)
}
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
unsafe {
llvm::LLVMGetUndef(eltty.to_ref())
let retty = if ty.kind() == llvm::Integer {
ty.return_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
B(cx).count_insn("ret_undef");
llvm::LLVMGetUndef(retty.to_ref())
pub fn PtrDiff(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
- if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type.to_ref()); }
+ if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); }
B(cx).ptrdiff(lhs, rhs)
}
}
pub struct Builder<'a> {
pub llbuilder: BuilderRef,
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a>,
}
// This is a really awful way to get a zero-length c-string, but better (and a
impl<'a> Builder<'a> {
pub fn new(ccx: &'a CrateContext) -> Builder<'a> {
Builder {
- llbuilder: ccx.builder.b,
+ llbuilder: ccx.raw_builder(),
ccx: ccx,
}
}
pub fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() {
- self.ccx.stats.n_llvm_insns.set(self.ccx
- .stats
+ self.ccx.stats().n_llvm_insns.set(self.ccx
+ .stats()
.n_llvm_insns
.get() + 1);
}
+ self.ccx.count_llvm_insn();
if self.ccx.sess().count_llvm_insns() {
base::with_insn_ctxt(|v| {
- let mut h = self.ccx.stats.llvm_insns.borrow_mut();
+ let mut h = self.ccx.stats().llvm_insns.borrow_mut();
// Build version of path with cycles removed.
self.count_insn("invoke");
debug!("Invoke {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
let v = [min, max];
llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
- llvm::LLVMMDNodeInContext(self.ccx.llcx,
+ llvm::LLVMMDNodeInContext(self.ccx.llcx(),
v.as_ptr(), v.len() as c_uint));
}
pub fn store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store");
unsafe {
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store.volatile");
unsafe {
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
self.count_insn("store.atomic");
unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
else { llvm::False };
let argtys = inputs.iter().map(|v| {
- debug!("Asm Input Type: {:?}", self.ccx.tn.val_to_string(*v));
+ debug!("Asm Input Type: {:?}", self.ccx.tn().val_to_string(*v));
val_ty(*v)
}).collect::<Vec<_>>();
- debug!("Asm Output Type: {:?}", self.ccx.tn.type_to_string(output));
+ debug!("Asm Output Type: {:?}", self.ccx.tn().type_to_string(output));
let fty = Type::func(argtys.as_slice(), &output);
unsafe {
let v = llvm::LLVMInlineAsm(
self.count_insn("call");
debug!("Call {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
let r = size % 32;
if r > 0 {
unsafe {
- args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx, r as c_uint)));
+ args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
}
}
*/
debug!("push_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
// FIXME(#2202) -- currently closure bodies have a parent
// region, which messes up the assertion below, since there
// this new AST scope had better be its immediate child.
let top_scope = self.top_ast_scope();
if top_scope.is_some() {
- assert_eq!(self.ccx.tcx.region_maps.opt_encl_scope(id), top_scope);
+ assert_eq!(self.ccx.tcx().region_maps.opt_encl_scope(id), top_scope);
}
self.push_scope(CleanupScope::new(AstScopeKind(id)));
id: ast::NodeId,
exits: [&'a Block<'a>, ..EXIT_MAX]) {
debug!("push_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
assert_eq!(Some(id), self.top_ast_scope());
self.push_scope(CleanupScope::new(LoopScopeKind(id, exits)));
*/
debug!("pop_and_trans_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
*/
debug!("pop_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
debug!("schedule_lifetime_end({:?}, val={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val));
+ self.ccx.tn().val_to_string(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
debug!("schedule_drop_mem({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
true);
debug!("schedule_drop_immediate({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_free_value({:?}, val={}, heap={:?})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
+ heap);
+
+ self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ }
+
+ fn schedule_free_slice(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap) {
+ /*!
+ * Schedules a call to `free(val)`. Note that this is a shallow
+ * operation.
+ */
+
+ let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
+
+ debug!("schedule_free_slice({:?}, val={}, heap={:?})",
+ cleanup_scope,
+ self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
self.ccx.sess().bug(
format!("no cleanup scope {} found",
- self.ccx.tcx.map.node_to_string(cleanup_scope)).as_slice());
+ self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
}
fn schedule_clean_in_custom_scope(&self,
let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
None => {
- let mut personality = self.ccx.eh_personality.borrow_mut();
+ let mut personality = self.ccx.eh_personality().borrow_mut();
match *personality {
Some(llpersonality) => llpersonality,
None => {
}
}
+pub struct FreeSlice {
+ ptr: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap,
+}
+
+impl Cleanup for FreeSlice {
+ fn must_unwind(&self) -> bool {
+ true
+ }
+
+ fn clean_on_unwind(&self) -> bool {
+ true
+ }
+
+ fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
+ match self.heap {
+ HeapManaged => {
+ glue::trans_free(bcx, self.ptr)
+ }
+ HeapExchange => {
+ glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
+ }
+ }
+ }
+}
+
pub struct LifetimeEnd {
ptr: ValueRef,
}
val: ValueRef,
heap: Heap,
content_ty: ty::t);
+ fn schedule_free_slice(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj);
use middle::trans::datum::{Datum, DatumBlock, Expr, Lvalue, rvalue_scratch_datum};
use middle::trans::debuginfo;
use middle::trans::expr;
-use middle::trans::machine::llsize_of;
use middle::trans::type_of::*;
use middle::trans::type_::Type;
use middle::ty;
let tcx = bcx.tcx();
// Allocate and initialize the box:
+ let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
match store {
ty::UniqTraitStore => {
- let ty = type_of(bcx.ccx(), cdata_ty);
- let size = llsize_of(bcx.ccx(), ty);
- // we treat proc as @ here, which isn't ideal
- malloc_raw_dyn_managed(bcx, cdata_ty, ClosureExchangeMallocFnLangItem, size)
+ malloc_raw_dyn_proc(bcx, cbox_ty, ClosureExchangeMallocFnLangItem)
}
ty::RegionTraitStore(..) => {
- let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let llbox = alloc_ty(bcx, cbox_ty, "__closure");
Result::new(bcx, llbox)
}
pub fn get_or_create_declaration_if_unboxed_closure(ccx: &CrateContext,
closure_id: ast::DefId)
-> Option<ValueRef> {
- if !ccx.tcx.unboxed_closures.borrow().contains_key(&closure_id) {
+ if !ccx.tcx().unboxed_closures.borrow().contains_key(&closure_id) {
// Not an unboxed closure.
return None
}
- match ccx.unboxed_closure_vals.borrow().find(&closure_id) {
+ match ccx.unboxed_closure_vals().borrow().find(&closure_id) {
Some(llfn) => {
debug!("get_or_create_declaration_if_unboxed_closure(): found \
closure");
None => {}
}
- let function_type = ty::mk_unboxed_closure(&ccx.tcx,
+ let function_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let symbol = ccx.tcx.map.with_path(closure_id.node, |path| {
+ let symbol = ccx.tcx().map.with_path(closure_id.node, |path| {
mangle_internal_name_by_path_and_seq(path, "unboxed_closure")
});
debug!("get_or_create_declaration_if_unboxed_closure(): inserting new \
closure {} (type {})",
closure_id,
- ccx.tn.type_to_string(val_ty(llfn)));
- ccx.unboxed_closure_vals.borrow_mut().insert(closure_id, llfn);
+ ccx.tn().type_to_string(val_ty(llfn)));
+ ccx.unboxed_closure_vals().borrow_mut().insert(closure_id, llfn);
Some(llfn)
}
}
};
- match ccx.closure_bare_wrapper_cache.borrow().find(&fn_ptr) {
+ match ccx.closure_bare_wrapper_cache().borrow().find(&fn_ptr) {
Some(&llval) => return llval,
None => {}
}
decl_rust_fn(ccx, closure_ty, name.as_slice())
};
- ccx.closure_bare_wrapper_cache.borrow_mut().insert(fn_ptr, llfn);
+ ccx.closure_bare_wrapper_cache().borrow_mut().insert(fn_ptr, llfn);
// This is only used by statics inlined from a different crate.
if !is_local {
use driver::session::Session;
use llvm;
-use llvm::{ValueRef, BasicBlockRef, BuilderRef};
+use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef};
use llvm::{True, False, Bool};
use middle::def;
use middle::freevars;
ty::ty_struct(..) | ty::ty_enum(..) | ty::ty_tup(..) |
ty::ty_unboxed_closure(..) => {
let llty = sizing_type_of(ccx, ty);
- llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type)
+ llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
}
_ => type_is_zero_size(ccx, ty)
}
pub block_arena: &'a TypedArena<Block<'a>>,
// This function's enclosing crate context.
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a>,
// Used and maintained by the debuginfo module.
pub debug_context: debuginfo::FunctionDebugContext,
self.llreturn.set(Some(unsafe {
"return".with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx, self.llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, buf)
})
}))
}
-> &'a Block<'a> {
unsafe {
let llbb = name.with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx,
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
buf)
});
})
}
- pub fn ccx(&self) -> &'a CrateContext { self.fcx.ccx }
+ pub fn ccx(&self) -> &'a CrateContext<'a> { self.fcx.ccx }
pub fn tcx(&self) -> &'a ty::ctxt {
- &self.fcx.ccx.tcx
+ self.fcx.ccx.tcx()
}
pub fn sess(&self) -> &'a Session { self.fcx.ccx.sess() }
}
pub fn val_to_string(&self, val: ValueRef) -> String {
- self.ccx().tn.val_to_string(val)
+ self.ccx().tn().val_to_string(val)
}
pub fn llty_str(&self, ty: Type) -> String {
- self.ccx().tn.type_to_string(ty)
+ self.ccx().tn().type_to_string(ty)
}
pub fn ty_to_string(&self, t: ty::t) -> String {
}
pub fn C_int(ccx: &CrateContext, i: int) -> ValueRef {
- C_integral(ccx.int_type, i as u64, true)
+ C_integral(ccx.int_type(), i as u64, true)
}
pub fn C_uint(ccx: &CrateContext, i: uint) -> ValueRef {
- C_integral(ccx.int_type, i as u64, false)
+ C_integral(ccx.int_type(), i as u64, false)
}
pub fn C_u8(ccx: &CrateContext, i: uint) -> ValueRef {
// our boxed-and-length-annotated strings.
pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
unsafe {
- match cx.const_cstr_cache.borrow().find(&s) {
+ match cx.const_cstr_cache().borrow().find(&s) {
Some(&llval) => return llval,
None => ()
}
- let sc = llvm::LLVMConstStringInContext(cx.llcx,
+ let sc = llvm::LLVMConstStringInContext(cx.llcx(),
s.get().as_ptr() as *const c_char,
s.get().len() as c_uint,
!null_terminated as Bool);
let gsym = token::gensym("str");
let g = format!("str{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(sc).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(sc).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::SetLinkage(g, llvm::InternalLinkage);
- cx.const_cstr_cache.borrow_mut().insert(s, g);
+ cx.const_cstr_cache().borrow_mut().insert(s, g);
g
}
}
let len = s.get().len();
let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s, false),
Type::i8p(cx).to_ref());
- C_named_struct(cx.tn.find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
+ C_named_struct(cx.tn().find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
}
}
let gsym = token::gensym("binary");
let g = format!("binary{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(lldata).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(lldata).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, lldata);
llvm::LLVMSetGlobalConstant(g, True);
}
}
-pub fn C_struct(ccx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
+pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
+ C_struct_in_context(cx.llcx(), elts, packed)
+}
+
+pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
unsafe {
- llvm::LLVMConstStructInContext(ccx.llcx,
+ llvm::LLVMConstStructInContext(llcx,
elts.as_ptr(), elts.len() as c_uint,
packed as Bool)
}
}
}
-pub fn C_bytes(ccx: &CrateContext, bytes: &[u8]) -> ValueRef {
+pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
+ C_bytes_in_context(cx.llcx(), bytes)
+}
+
+pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
- return llvm::LLVMConstStringInContext(ccx.llcx, ptr, bytes.len() as c_uint, True);
+ return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
}
}
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={}, us={:?}, r={})",
- cx.tn.val_to_string(v), us, cx.tn.val_to_string(r));
+ cx.tn().val_to_string(v), us, cx.tn().val_to_string(r));
return r;
}
pub fn const_ptrcast(cx: &CrateContext, a: ValueRef, t: Type) -> ValueRef {
unsafe {
let b = llvm::LLVMConstPointerCast(a, t.ptr_to().to_ref());
- assert!(cx.const_globals.borrow_mut().insert(b as int, a));
+ assert!(cx.const_globals().borrow_mut().insert(b as int, a));
b
}
}
pub fn const_addr_of(cx: &CrateContext, cv: ValueRef, mutbl: ast::Mutability) -> ValueRef {
unsafe {
let gv = "const".with_c_str(|name| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(cv).to_ref(), name)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(cv).to_ref(), name)
});
llvm::LLVMSetInitializer(gv, cv);
llvm::LLVMSetGlobalConstant(gv,
}
fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef {
- let v = match cx.const_globals.borrow().find(&(v as int)) {
+ let v = match cx.const_globals().borrow().find(&(v as int)) {
Some(&v) => v,
None => v
};
pub fn get_const_val(cx: &CrateContext,
mut def_id: ast::DefId) -> (ValueRef, bool) {
- let contains_key = cx.const_values.borrow().contains_key(&def_id.node);
+ let contains_key = cx.const_values().borrow().contains_key(&def_id.node);
if !ast_util::is_local(def_id) || !contains_key {
if !ast_util::is_local(def_id) {
def_id = inline::maybe_instantiate_inline(cx, def_id);
}
- match cx.tcx.map.expect_item(def_id.node).node {
+ match cx.tcx().map.expect_item(def_id.node).node {
ast::ItemStatic(_, ast::MutImmutable, _) => {
trans_const(cx, ast::MutImmutable, def_id.node);
}
}
}
- (cx.const_values.borrow().get_copy(&def_id.node),
- !cx.non_inlineable_statics.borrow().contains(&def_id.node))
+ (cx.const_values().borrow().get_copy(&def_id.node),
+ !cx.non_inlineable_statics().borrow().contains(&def_id.node))
}
pub fn const_expr(cx: &CrateContext, e: &ast::Expr, is_local: bool) -> (ValueRef, bool, ty::t) {
let mut inlineable = inlineable;
let ety = ty::expr_ty(cx.tcx(), e);
let mut ety_adjusted = ty::expr_ty_adjusted(cx.tcx(), e);
- let opt_adj = cx.tcx.adjustments.borrow().find_copy(&e.id);
+ let opt_adj = cx.tcx().adjustments.borrow().find_copy(&e.id);
match opt_adj {
None => { }
Some(adj) => {
(expr::cast_enum, expr::cast_integral) => {
let repr = adt::represent_type(cx, basety);
let discr = adt::const_get_discrim(cx, &*repr, v);
- let iv = C_integral(cx.int_type, discr, false);
+ let iv = C_integral(cx.int_type(), discr, false);
let ety_cast = expr::cast_type_kind(cx.tcx(), ety);
match ety_cast {
expr::cast_integral => {
let g = base::get_item_val(ccx, id);
// At this point, get_item_val has already translated the
// constant's initializer to determine its LLVM type.
- let v = ccx.const_values.borrow().get_copy(&id);
+ let v = ccx.const_values().borrow().get_copy(&id);
llvm::LLVMSetInitializer(g, v);
+
+ // `get_item_val` left `g` with external linkage, but we just set an
+ // initializer for it. But we don't know yet if `g` should really be
+ // defined in this compilation unit, so we set its linkage to
+ // `AvailableExternallyLinkage`. (It's still a definition, but acts
+ // like a declaration for most purposes.) If `g` really should be
+ // declared here, then `trans_item` will fix up the linkage later on.
+ llvm::SetLinkage(g, llvm::AvailableExternallyLinkage);
+
if m != ast::MutMutable {
llvm::LLVMSetGlobalConstant(g, True);
}
use driver::config::NoDebugInfo;
use driver::session::Session;
use llvm;
-use llvm::{ContextRef, ModuleRef, ValueRef};
+use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
use llvm::{TargetData};
use llvm::mk_target_data;
use metadata::common::LinkMeta;
pub fn_stats: RefCell<Vec<(String, uint, uint)> >,
}
-pub struct CrateContext {
- pub llmod: ModuleRef,
- pub llcx: ContextRef,
- pub metadata_llmod: ModuleRef,
- pub td: TargetData,
- pub tn: TypeNames,
- pub externs: RefCell<ExternMap>,
- pub item_vals: RefCell<NodeMap<ValueRef>>,
- pub exp_map2: resolve::ExportMap2,
- pub reachable: NodeSet,
- pub item_symbols: RefCell<NodeMap<String>>,
- pub link_meta: LinkMeta,
- pub drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
- pub tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
+/// The shared portion of a `CrateContext`. There is one `SharedCrateContext`
+/// per crate. The data here is shared between all compilation units of the
+/// crate, so it must not contain references to any LLVM data structures
+/// (aside from metadata-related ones).
+pub struct SharedCrateContext {
+ local_ccxs: Vec<LocalCrateContext>,
+
+ metadata_llmod: ModuleRef,
+ metadata_llcx: ContextRef,
+
+ exp_map2: resolve::ExportMap2,
+ reachable: NodeSet,
+ item_symbols: RefCell<NodeMap<String>>,
+ link_meta: LinkMeta,
+ /// A set of static items which cannot be inlined into other crates. This
+ /// will prevent in IIItem() structures from being encoded into the metadata
+ /// that is generated
+ non_inlineable_statics: RefCell<NodeSet>,
+ symbol_hasher: RefCell<Sha256>,
+ tcx: ty::ctxt,
+ stats: Stats,
+
+ available_monomorphizations: RefCell<HashSet<String>>,
+ available_drop_glues: RefCell<HashMap<ty::t, String>>,
+ available_visit_glues: RefCell<HashMap<ty::t, String>>,
+}
+
+/// The local portion of a `CrateContext`. There is one `LocalCrateContext`
+/// per compilation unit. Each one has its own LLVM `ContextRef` so that
+/// several compilation units may be optimized in parallel. All other LLVM
+/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
+pub struct LocalCrateContext {
+ llmod: ModuleRef,
+ llcx: ContextRef,
+ td: TargetData,
+ tn: TypeNames,
+ externs: RefCell<ExternMap>,
+ item_vals: RefCell<NodeMap<ValueRef>>,
+ drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
+ tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
/// Set when running emit_tydescs to enforce that no more tydescs are
/// created.
- pub finished_tydescs: Cell<bool>,
+ finished_tydescs: Cell<bool>,
/// Track mapping of external ids to local items imported for inlining
- pub external: RefCell<DefIdMap<Option<ast::NodeId>>>,
+ external: RefCell<DefIdMap<Option<ast::NodeId>>>,
/// Backwards version of the `external` map (inlined items to where they
/// came from)
- pub external_srcs: RefCell<NodeMap<ast::DefId>>,
- /// A set of static items which cannot be inlined into other crates. This
- /// will prevent in IIItem() structures from being encoded into the metadata
- /// that is generated
- pub non_inlineable_statics: RefCell<NodeSet>,
+ external_srcs: RefCell<NodeMap<ast::DefId>>,
/// Cache instances of monomorphized functions
- pub monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
- pub monomorphizing: RefCell<DefIdMap<uint>>,
+ monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
+ monomorphizing: RefCell<DefIdMap<uint>>,
/// Cache generated vtables
- pub vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
+ vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
/// Cache of constant strings,
- pub const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
+ const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
/// Reverse-direction for const ptrs cast from globals.
/// Key is an int, cast from a ValueRef holding a *T,
/// when we ptrcast, and we have to ptrcast during translation
/// of a [T] const because we form a slice, a [*T,int] pair, not
/// a pointer to an LLVM array type.
- pub const_globals: RefCell<HashMap<int, ValueRef>>,
+ const_globals: RefCell<HashMap<int, ValueRef>>,
/// Cache of emitted const values
- pub const_values: RefCell<NodeMap<ValueRef>>,
+ const_values: RefCell<NodeMap<ValueRef>>,
/// Cache of external const values
- pub extern_const_values: RefCell<DefIdMap<ValueRef>>,
+ extern_const_values: RefCell<DefIdMap<ValueRef>>,
- pub impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
+ impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
/// Cache of closure wrappers for bare fn's.
- pub closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
-
- pub lltypes: RefCell<HashMap<ty::t, Type>>,
- pub llsizingtypes: RefCell<HashMap<ty::t, Type>>,
- pub adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
- pub symbol_hasher: RefCell<Sha256>,
- pub type_hashcodes: RefCell<HashMap<ty::t, String>>,
- pub all_llvm_symbols: RefCell<HashSet<String>>,
- pub tcx: ty::ctxt,
- pub stats: Stats,
- pub int_type: Type,
- pub opaque_vec_type: Type,
- pub builder: BuilderRef_res,
+ closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
+
+ lltypes: RefCell<HashMap<ty::t, Type>>,
+ llsizingtypes: RefCell<HashMap<ty::t, Type>>,
+ adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
+ type_hashcodes: RefCell<HashMap<ty::t, String>>,
+ all_llvm_symbols: RefCell<HashSet<String>>,
+ int_type: Type,
+ opaque_vec_type: Type,
+ builder: BuilderRef_res,
/// Holds the LLVM values for closure IDs.
- pub unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
+ unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
- pub dbg_cx: Option<debuginfo::CrateDebugContext>,
+ dbg_cx: Option<debuginfo::CrateDebugContext>,
- pub eh_personality: RefCell<Option<ValueRef>>,
+ eh_personality: RefCell<Option<ValueRef>>,
intrinsics: RefCell<HashMap<&'static str, ValueRef>>,
+
+ /// Number of LLVM instructions translated into this `LocalCrateContext`.
+ /// This is used to perform some basic load-balancing to keep all LLVM
+ /// contexts around the same size.
+ n_llvm_insns: Cell<uint>,
+}
+
+pub struct CrateContext<'a> {
+ shared: &'a SharedCrateContext,
+ local: &'a LocalCrateContext,
+ /// The index of `local` in `shared.local_ccxs`. This is used in
+ /// `maybe_iter(true)` to identify the original `LocalCrateContext`.
+ index: uint,
+}
+
+pub struct CrateContextIterator<'a> {
+ shared: &'a SharedCrateContext,
+ index: uint,
+}
+
+impl<'a> Iterator<CrateContext<'a>> for CrateContextIterator<'a> {
+ fn next(&mut self) -> Option<CrateContext<'a>> {
+ if self.index >= self.shared.local_ccxs.len() {
+ return None;
+ }
+
+ let index = self.index;
+ self.index += 1;
+
+ Some(CrateContext {
+ shared: self.shared,
+ local: &self.shared.local_ccxs[index],
+ index: index,
+ })
+ }
+}
+
+/// The iterator produced by `CrateContext::maybe_iter`.
+pub struct CrateContextMaybeIterator<'a> {
+ shared: &'a SharedCrateContext,
+ index: uint,
+ single: bool,
+ origin: uint,
}
-impl CrateContext {
- pub fn new(name: &str,
+impl<'a> Iterator<(CrateContext<'a>, bool)> for CrateContextMaybeIterator<'a> {
+ fn next(&mut self) -> Option<(CrateContext<'a>, bool)> {
+ if self.index >= self.shared.local_ccxs.len() {
+ return None;
+ }
+
+ let index = self.index;
+ self.index += 1;
+ if self.single {
+ self.index = self.shared.local_ccxs.len();
+ }
+
+ let ccx = CrateContext {
+ shared: self.shared,
+ local: &self.shared.local_ccxs[index],
+ index: index,
+ };
+ Some((ccx, index == self.origin))
+ }
+}
+
+
+unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
+ let llcx = llvm::LLVMContextCreate();
+ let llmod = mod_name.with_c_str(|buf| {
+ llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
+ });
+ sess.targ_cfg
+ .target_strs
+ .data_layout
+ .as_slice()
+ .with_c_str(|buf| {
+ llvm::LLVMSetDataLayout(llmod, buf);
+ });
+ sess.targ_cfg
+ .target_strs
+ .target_triple
+ .as_slice()
+ .with_c_str(|buf| {
+ llvm::LLVMRustSetNormalizedTarget(llmod, buf);
+ });
+ (llcx, llmod)
+}
+
+impl SharedCrateContext {
+ pub fn new(crate_name: &str,
+ local_count: uint,
tcx: ty::ctxt,
emap2: resolve::ExportMap2,
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet)
- -> CrateContext {
+ -> SharedCrateContext {
+ let (metadata_llcx, metadata_llmod) = unsafe {
+ create_context_and_module(&tcx.sess, "metadata")
+ };
+
+ let mut shared_ccx = SharedCrateContext {
+ local_ccxs: Vec::with_capacity(local_count),
+ metadata_llmod: metadata_llmod,
+ metadata_llcx: metadata_llcx,
+ exp_map2: emap2,
+ reachable: reachable,
+ item_symbols: RefCell::new(NodeMap::new()),
+ link_meta: link_meta,
+ non_inlineable_statics: RefCell::new(NodeSet::new()),
+ symbol_hasher: RefCell::new(symbol_hasher),
+ tcx: tcx,
+ stats: Stats {
+ n_static_tydescs: Cell::new(0u),
+ n_glues_created: Cell::new(0u),
+ n_null_glues: Cell::new(0u),
+ n_real_glues: Cell::new(0u),
+ n_fns: Cell::new(0u),
+ n_monos: Cell::new(0u),
+ n_inlines: Cell::new(0u),
+ n_closures: Cell::new(0u),
+ n_llvm_insns: Cell::new(0u),
+ llvm_insns: RefCell::new(HashMap::new()),
+ fn_stats: RefCell::new(Vec::new()),
+ },
+ available_monomorphizations: RefCell::new(HashSet::new()),
+ available_drop_glues: RefCell::new(HashMap::new()),
+ available_visit_glues: RefCell::new(HashMap::new()),
+ };
+
+ for i in range(0, local_count) {
+ // Append ".rs" to crate name as LLVM module identifier.
+ //
+ // LLVM code generator emits a ".file filename" directive
+ // for ELF backends. Value of the "filename" is set as the
+ // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
+ // crashes if the module identifier is same as other symbols
+ // such as a function name in the module.
+ // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
+ let llmod_id = format!("{}.{}.rs", crate_name, i);
+ let local_ccx = LocalCrateContext::new(&shared_ccx, llmod_id.as_slice());
+ shared_ccx.local_ccxs.push(local_ccx);
+ }
+
+ shared_ccx
+ }
+
+ pub fn iter<'a>(&'a self) -> CrateContextIterator<'a> {
+ CrateContextIterator {
+ shared: self,
+ index: 0,
+ }
+ }
+
+ pub fn get_ccx<'a>(&'a self, index: uint) -> CrateContext<'a> {
+ CrateContext {
+ shared: self,
+ local: &self.local_ccxs[index],
+ index: index,
+ }
+ }
+
+ fn get_smallest_ccx<'a>(&'a self) -> CrateContext<'a> {
+ let (local_ccx, index) =
+ self.local_ccxs
+ .iter()
+ .zip(range(0, self.local_ccxs.len()))
+ .min_by(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get())
+ .unwrap();
+ CrateContext {
+ shared: self,
+ local: local_ccx,
+ index: index,
+ }
+ }
+
+
+ pub fn metadata_llmod(&self) -> ModuleRef {
+ self.metadata_llmod
+ }
+
+ pub fn metadata_llcx(&self) -> ContextRef {
+ self.metadata_llcx
+ }
+
+ pub fn exp_map2<'a>(&'a self) -> &'a resolve::ExportMap2 {
+ &self.exp_map2
+ }
+
+ pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+ &self.reachable
+ }
+
+ pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+ &self.item_symbols
+ }
+
+ pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+ &self.link_meta
+ }
+
+ pub fn non_inlineable_statics<'a>(&'a self) -> &'a RefCell<NodeSet> {
+ &self.non_inlineable_statics
+ }
+
+ pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+ &self.symbol_hasher
+ }
+
+ pub fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+ &self.tcx
+ }
+
+ pub fn take_tcx(self) -> ty::ctxt {
+ self.tcx
+ }
+
+ pub fn sess<'a>(&'a self) -> &'a Session {
+ &self.tcx.sess
+ }
+
+ pub fn stats<'a>(&'a self) -> &'a Stats {
+ &self.stats
+ }
+}
+
+impl LocalCrateContext {
+ fn new(shared: &SharedCrateContext,
+ name: &str)
+ -> LocalCrateContext {
unsafe {
- let llcx = llvm::LLVMContextCreate();
- let llmod = name.with_c_str(|buf| {
- llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
- });
- let metadata_llmod = format!("{}_metadata", name).with_c_str(|buf| {
- llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
- });
- tcx.sess
- .targ_cfg
- .target_strs
- .data_layout
- .as_slice()
- .with_c_str(|buf| {
- llvm::LLVMSetDataLayout(llmod, buf);
- llvm::LLVMSetDataLayout(metadata_llmod, buf);
- });
- tcx.sess
- .targ_cfg
- .target_strs
- .target_triple
- .as_slice()
- .with_c_str(|buf| {
- llvm::LLVMRustSetNormalizedTarget(llmod, buf);
- llvm::LLVMRustSetNormalizedTarget(metadata_llmod, buf);
- });
-
- let td = mk_target_data(tcx.sess
- .targ_cfg
- .target_strs
- .data_layout
- .as_slice());
-
- let dbg_cx = if tcx.sess.opts.debuginfo != NoDebugInfo {
+ let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name);
+
+ let td = mk_target_data(shared.tcx
+ .sess
+ .targ_cfg
+ .target_strs
+ .data_layout
+ .as_slice());
+
+ let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
Some(debuginfo::CrateDebugContext::new(llmod))
} else {
None
};
- let mut ccx = CrateContext {
+ let mut local_ccx = LocalCrateContext {
llmod: llmod,
llcx: llcx,
- metadata_llmod: metadata_llmod,
td: td,
tn: TypeNames::new(),
externs: RefCell::new(HashMap::new()),
item_vals: RefCell::new(NodeMap::new()),
- exp_map2: emap2,
- reachable: reachable,
- item_symbols: RefCell::new(NodeMap::new()),
- link_meta: link_meta,
drop_glues: RefCell::new(HashMap::new()),
tydescs: RefCell::new(HashMap::new()),
finished_tydescs: Cell::new(false),
external: RefCell::new(DefIdMap::new()),
external_srcs: RefCell::new(NodeMap::new()),
- non_inlineable_statics: RefCell::new(NodeSet::new()),
monomorphized: RefCell::new(HashMap::new()),
monomorphizing: RefCell::new(DefIdMap::new()),
vtables: RefCell::new(HashMap::new()),
lltypes: RefCell::new(HashMap::new()),
llsizingtypes: RefCell::new(HashMap::new()),
adt_reprs: RefCell::new(HashMap::new()),
- symbol_hasher: RefCell::new(symbol_hasher),
type_hashcodes: RefCell::new(HashMap::new()),
all_llvm_symbols: RefCell::new(HashSet::new()),
- tcx: tcx,
- stats: Stats {
- n_static_tydescs: Cell::new(0u),
- n_glues_created: Cell::new(0u),
- n_null_glues: Cell::new(0u),
- n_real_glues: Cell::new(0u),
- n_fns: Cell::new(0u),
- n_monos: Cell::new(0u),
- n_inlines: Cell::new(0u),
- n_closures: Cell::new(0u),
- n_llvm_insns: Cell::new(0u),
- llvm_insns: RefCell::new(HashMap::new()),
- fn_stats: RefCell::new(Vec::new()),
- },
int_type: Type::from_ref(ptr::mut_null()),
opaque_vec_type: Type::from_ref(ptr::mut_null()),
builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
dbg_cx: dbg_cx,
eh_personality: RefCell::new(None),
intrinsics: RefCell::new(HashMap::new()),
+ n_llvm_insns: Cell::new(0u),
};
- ccx.int_type = Type::int(&ccx);
- ccx.opaque_vec_type = Type::opaque_vec(&ccx);
+ local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared));
+ local_ccx.opaque_vec_type = Type::opaque_vec(&local_ccx.dummy_ccx(shared));
- let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
- str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type], false);
- ccx.tn.associate_type("str_slice", &str_slice_ty);
+ // Done mutating local_ccx directly. (The rest of the
+ // initialization goes through RefCell.)
+ {
+ let ccx = local_ccx.dummy_ccx(shared);
- ccx.tn.associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
+ let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
+ str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type()], false);
+ ccx.tn().associate_type("str_slice", &str_slice_ty);
- if ccx.sess().count_llvm_insns() {
- base::init_insn_ctxt()
+ ccx.tn().associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
+
+ if ccx.sess().count_llvm_insns() {
+ base::init_insn_ctxt()
+ }
}
- ccx
+ local_ccx
}
}
+ /// Create a dummy `CrateContext` from `self` and the provided
+ /// `SharedCrateContext`. This is somewhat dangerous because `self` may
+ /// not actually be an element of `shared.local_ccxs`, which can cause some
+ /// operations to `fail` unexpectedly.
+ ///
+ /// This is used in the `LocalCrateContext` constructor to allow calling
+ /// functions that expect a complete `CrateContext`, even before the local
+ /// portion is fully initialized and attached to the `SharedCrateContext`.
+ fn dummy_ccx<'a>(&'a self, shared: &'a SharedCrateContext) -> CrateContext<'a> {
+ CrateContext {
+ shared: shared,
+ local: self,
+ index: -1 as uint,
+ }
+ }
+}
+
+impl<'b> CrateContext<'b> {
+ pub fn shared(&self) -> &'b SharedCrateContext {
+ self.shared
+ }
+
+ pub fn local(&self) -> &'b LocalCrateContext {
+ self.local
+ }
+
+
+ /// Get a (possibly) different `CrateContext` from the same
+ /// `SharedCrateContext`.
+ pub fn rotate(&self) -> CrateContext<'b> {
+ self.shared.get_smallest_ccx()
+ }
+
+ /// Either iterate over only `self`, or iterate over all `CrateContext`s in
+ /// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)`
+ /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false`
+ /// otherwise. This method is useful for avoiding code duplication in
+ /// cases where it may or may not be necessary to translate code into every
+ /// context.
+ pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b> {
+ CrateContextMaybeIterator {
+ shared: self.shared,
+ index: if iter_all { 0 } else { self.index },
+ single: !iter_all,
+ origin: self.index,
+ }
+ }
+
+
pub fn tcx<'a>(&'a self) -> &'a ty::ctxt {
- &self.tcx
+ &self.shared.tcx
}
pub fn sess<'a>(&'a self) -> &'a Session {
- &self.tcx.sess
+ &self.shared.tcx.sess
}
pub fn builder<'a>(&'a self) -> Builder<'a> {
Builder::new(self)
}
+ pub fn raw_builder<'a>(&'a self) -> BuilderRef {
+ self.local.builder.b
+ }
+
pub fn tydesc_type(&self) -> Type {
- self.tn.find_type("tydesc").unwrap()
+ self.local.tn.find_type("tydesc").unwrap()
}
pub fn get_intrinsic(&self, key: & &'static str) -> ValueRef {
- match self.intrinsics.borrow().find_copy(key) {
+ match self.intrinsics().borrow().find_copy(key) {
Some(v) => return v,
_ => {}
}
let ref cfg = self.sess().targ_cfg;
cfg.os != abi::OsiOS || cfg.arch != abi::Arm
}
+
+
+ pub fn llmod(&self) -> ModuleRef {
+ self.local.llmod
+ }
+
+ pub fn llcx(&self) -> ContextRef {
+ self.local.llcx
+ }
+
+ pub fn td<'a>(&'a self) -> &'a TargetData {
+ &self.local.td
+ }
+
+ pub fn tn<'a>(&'a self) -> &'a TypeNames {
+ &self.local.tn
+ }
+
+ pub fn externs<'a>(&'a self) -> &'a RefCell<ExternMap> {
+ &self.local.externs
+ }
+
+ pub fn item_vals<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.local.item_vals
+ }
+
+ pub fn exp_map2<'a>(&'a self) -> &'a resolve::ExportMap2 {
+ &self.shared.exp_map2
+ }
+
+ pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+ &self.shared.reachable
+ }
+
+ pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+ &self.shared.item_symbols
+ }
+
+ pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+ &self.shared.link_meta
+ }
+
+ pub fn drop_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, ValueRef>> {
+ &self.local.drop_glues
+ }
+
+ pub fn tydescs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<tydesc_info>>> {
+ &self.local.tydescs
+ }
+
+ pub fn finished_tydescs<'a>(&'a self) -> &'a Cell<bool> {
+ &self.local.finished_tydescs
+ }
+
+ pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
+ &self.local.external
+ }
+
+ pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<ast::DefId>> {
+ &self.local.external_srcs
+ }
+
+ pub fn non_inlineable_statics<'a>(&'a self) -> &'a RefCell<NodeSet> {
+ &self.shared.non_inlineable_statics
+ }
+
+ pub fn monomorphized<'a>(&'a self) -> &'a RefCell<HashMap<MonoId, ValueRef>> {
+ &self.local.monomorphized
+ }
+
+ pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<uint>> {
+ &self.local.monomorphizing
+ }
+
+ pub fn vtables<'a>(&'a self) -> &'a RefCell<HashMap<(ty::t, MonoId), ValueRef>> {
+ &self.local.vtables
+ }
+
+ pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<HashMap<InternedString, ValueRef>> {
+ &self.local.const_cstr_cache
+ }
+
+ pub fn const_globals<'a>(&'a self) -> &'a RefCell<HashMap<int, ValueRef>> {
+ &self.local.const_globals
+ }
+
+ pub fn const_values<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.local.const_values
+ }
+
+ pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.local.extern_const_values
+ }
+
+ pub fn impl_method_cache<'a>(&'a self)
+ -> &'a RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>> {
+ &self.local.impl_method_cache
+ }
+
+ pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<HashMap<ValueRef, ValueRef>> {
+ &self.local.closure_bare_wrapper_cache
+ }
+
+ pub fn lltypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.local.lltypes
+ }
+
+ pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.local.llsizingtypes
+ }
+
+ pub fn adt_reprs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<adt::Repr>>> {
+ &self.local.adt_reprs
+ }
+
+ pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+ &self.shared.symbol_hasher
+ }
+
+ pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.local.type_hashcodes
+ }
+
+ pub fn all_llvm_symbols<'a>(&'a self) -> &'a RefCell<HashSet<String>> {
+ &self.local.all_llvm_symbols
+ }
+
+ pub fn stats<'a>(&'a self) -> &'a Stats {
+ &self.shared.stats
+ }
+
+ pub fn available_monomorphizations<'a>(&'a self) -> &'a RefCell<HashSet<String>> {
+ &self.shared.available_monomorphizations
+ }
+
+ pub fn available_drop_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.shared.available_drop_glues
+ }
+
+ pub fn available_visit_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.shared.available_visit_glues
+ }
+
+ pub fn int_type(&self) -> Type {
+ self.local.int_type
+ }
+
+ pub fn opaque_vec_type(&self) -> Type {
+ self.local.opaque_vec_type
+ }
+
+ pub fn unboxed_closure_vals<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.local.unboxed_closure_vals
+ }
+
+ pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext> {
+ &self.local.dbg_cx
+ }
+
+ pub fn eh_personality<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
+ &self.local.eh_personality
+ }
+
+ fn intrinsics<'a>(&'a self) -> &'a RefCell<HashMap<&'static str, ValueRef>> {
+ &self.local.intrinsics
+ }
+
+ pub fn count_llvm_insn(&self) {
+ self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1);
+ }
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {
($name:expr fn() -> $ret:expr) => (
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name, Type::func([], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
);
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name,
Type::func([$($arg),*], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
let f = base::decl_cdecl_fn(ccx, stringify!($cname),
Type::func([$($arg),*], &$ret),
ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
#[allow(dead_code)] // useful for debugging
pub fn to_string(&self, ccx: &CrateContext) -> String {
format!("Datum({}, {}, {:?})",
- ccx.tn.val_to_string(self.val),
+ ccx.tn().val_to_string(self.val),
ty_to_string(ccx.tcx(), self.ty),
self.kind)
}
// First, find out the 'real' def_id of the type. Items inlined from
// other crates have to be mapped back to their source.
let source_def_id = if def_id.krate == ast::LOCAL_CRATE {
- match cx.external_srcs.borrow().find_copy(&def_id.node) {
+ match cx.external_srcs().borrow().find_copy(&def_id.node) {
Some(source_def_id) => {
// The given def_id identifies the inlined copy of a
// type definition, let's take the source of the copy.
// Get the crate hash as first part of the identifier.
let crate_hash = if source_def_id.krate == ast::LOCAL_CRATE {
- cx.link_meta.crate_hash.clone()
+ cx.link_meta().crate_hash.clone()
} else {
cx.sess().cstore.get_crate_hash(source_def_id.krate)
};
/// Create any deferred debug metadata nodes
pub fn finalize(cx: &CrateContext) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
if cx.sess().targ_cfg.os == abi::OsMacos ||
cx.sess().targ_cfg.os == abi::OsiOS {
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 2));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 2));
} else {
// FIXME(#13611) this is a kludge fix because the Linux bots have
// gdb 7.4 which doesn't understand dwarf4, we should
// do something more graceful here.
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 3));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 3));
}
// Prevent bitcode readers from deleting the debug info.
"Debug Info Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s,
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s,
llvm::LLVMRustDebugMetadataVersion));
};
}
pub fn create_global_var_metadata(cx: &CrateContext,
node_id: ast::NodeId,
global: ValueRef) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
// crate should already contain debuginfo for it. More importantly, the
// global might not even exist in un-inlined form anywhere which would lead
// to a linker errors.
- if cx.external_srcs.borrow().contains_key(&node_id) {
+ if cx.external_srcs().borrow().contains_key(&node_id) {
return;
}
- let var_item = cx.tcx.map.get(node_id);
+ let var_item = cx.tcx().map.get(node_id);
let (ident, span) = match var_item {
ast_map::NodeItem(item) => {
}
let cx = bcx.ccx();
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, path1| {
let var_ident = path1.node;
let cx = bcx.ccx();
- let ast_item = cx.tcx.map.find(node_id);
+ let ast_item = cx.tcx().map.find(node_id);
let variable_ident = match ast_item {
None => {
let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
let aops = unsafe {
- [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type.to_ref())]
+ [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type().to_ref())]
};
// Regardless of the actual type (`T`) we're always passed the stack slot (alloca)
// for the binding. For ByRef bindings that's a `T*` but for ByMove bindings we
let fcx = bcx.fcx;
let cx = fcx.ccx;
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, arg.pat.span).fn_metadata;
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, path1| {
let empty_generics = ast_util::empty_generics();
- let fnitem = cx.tcx.map.get(fn_ast_id);
+ let fnitem = cx.tcx().map.get(fn_ast_id);
let (ident, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
ast_map::NodeItem(ref item) => {
// externally visible or by being inlined into something externally visible).
// It might better to use the `exported_items` set from `driver::CrateAnalysis`
// in the future, but (atm) this set is not available in the translation pass.
- !cx.reachable.contains(&node_id)
+ !cx.reachable().contains(&node_id)
}
#[allow(non_snake_case)]
});
fn fallback_path(cx: &CrateContext) -> CString {
- cx.link_meta.crate_name.as_slice().to_c_str()
+ cx.link_meta().crate_name.as_slice().to_c_str()
}
}
match scope_map.borrow().find_copy(&node_id) {
Some(scope_metadata) => scope_metadata,
None => {
- let node = fcx.ccx.tcx.map.get(node_id);
+ let node = fcx.ccx.tcx().map.get(node_id);
fcx.ccx.sess().span_bug(span,
format!("debuginfo: Could not find scope info for node {:?}",
def_id: ast::DefId)
-> token::InternedString {
let name = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.get_path_elem(def_id.node).name()
+ cx.tcx().map.get_path_elem(def_id.node).name()
} else {
- csearch::get_item_path(&cx.tcx, def_id).last().unwrap().name()
+ csearch::get_item_path(cx.tcx(), def_id).last().unwrap().name()
};
token::get_name(name)
content_llvm_type: Type)
-> bool {
member_llvm_types.len() == 5 &&
- member_llvm_types[0] == cx.int_type &&
+ member_llvm_types[0] == cx.int_type() &&
member_llvm_types[1] == Type::generic_glue_fn(cx).ptr_to() &&
member_llvm_types[2] == Type::i8(cx).ptr_to() &&
member_llvm_types[3] == Type::i8(cx).ptr_to() &&
-> bool {
member_llvm_types.len() == 2 &&
member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
- member_llvm_types[1] == cx.int_type
+ member_llvm_types[1] == cx.int_type()
}
}
};
unsafe {
- llvm::LLVMSetCurrentDebugLocation(cx.builder.b, metadata_node);
+ llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
}
debug_context(cx).current_debug_location.set(debug_location);
#[inline]
fn debug_context<'a>(cx: &'a CrateContext) -> &'a CrateDebugContext {
- let debug_context: &'a CrateDebugContext = cx.dbg_cx.get_ref();
+ let debug_context: &'a CrateDebugContext = cx.dbg_cx().get_ref();
debug_context
}
#[inline]
#[allow(non_snake_case)]
fn DIB(cx: &CrateContext) -> DIBuilderRef {
- cx.dbg_cx.get_ref().builder
+ cx.dbg_cx().get_ref().builder
}
fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
}
fn assert_type_for_node_id(cx: &CrateContext, node_id: ast::NodeId, error_span: Span) {
- if !cx.tcx.node_types.borrow().contains_key(&(node_id as uint)) {
+ if !cx.tcx().node_types.borrow().contains_key(&(node_id as uint)) {
cx.sess().span_bug(error_span, "debuginfo: Could not find type for node id!");
}
}
-> (DIScope, Span) {
let containing_scope = namespace_for_item(cx, def_id).scope;
let definition_span = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.span(def_id.node)
+ cx.tcx().map.span(def_id.node)
} else {
// For external items there is no span information
codemap::DUMMY_SP
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
struct ScopeStackEntry {
scope_metadata: DIScope,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
}
fn crate_root_namespace<'a>(cx: &'a CrateContext) -> &'a str {
- cx.link_meta.crate_name.as_slice()
+ cx.link_meta().crate_name.as_slice()
}
fn namespace_for_item(cx: &CrateContext, def_id: ast::DefId) -> Rc<NamespaceTreeNode> {
impl Dest {
pub fn to_string(&self, ccx: &CrateContext) -> String {
match *self {
- SaveIn(v) => format!("SaveIn({})", ccx.tn.val_to_string(v)),
+ SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
Ignore => "Ignore".to_string()
}
}
let mut bcx = bcx;
// Check for overloaded index.
- let method_ty = ccx.tcx
+ let method_ty = ccx.tcx()
.method_map
.borrow()
.find(&method_call)
let ix_size = machine::llbitsize_of_real(bcx.ccx(),
val_ty(ix_val));
let int_size = machine::llbitsize_of_real(bcx.ccx(),
- ccx.int_type);
+ ccx.int_type());
let ix_val = {
if ix_size < int_size {
if ty::type_is_signed(expr_ty(bcx, idx)) {
- SExt(bcx, ix_val, ccx.int_type)
- } else { ZExt(bcx, ix_val, ccx.int_type) }
+ SExt(bcx, ix_val, ccx.int_type())
+ } else { ZExt(bcx, ix_val, ccx.int_type()) }
} else if ix_size > int_size {
- Trunc(bcx, ix_val, ccx.int_type)
+ Trunc(bcx, ix_val, ccx.int_type())
} else {
ix_val
}
trans_def_fn_unadjusted(bcx, ref_expr, def)
}
def::DefStatic(did, _) => {
+ // There are three things that may happen here:
+ // 1) If the static item is defined in this crate, it will be
+ // translated using `get_item_val`, and we return a pointer to
+ // the result.
+ // 2) If the static item is defined in another crate, but is
+ // marked inlineable, then it will be inlined into this crate
+ // and then translated with `get_item_val`. Again, we return a
+ // pointer to the result.
+ // 3) If the static item is defined in another crate and is not
+ // marked inlineable, then we add (or reuse) a declaration of
+ // an external global, and return a pointer to that.
let const_ty = expr_ty(bcx, ref_expr);
fn get_did(ccx: &CrateContext, did: ast::DefId)
-> ast::DefId {
if did.krate != ast::LOCAL_CRATE {
+ // Case 2 or 3. Which one we're in is determined by
+ // whether the DefId produced by `maybe_instantiate_inline`
+ // is in the LOCAL_CRATE or not.
inline::maybe_instantiate_inline(ccx, did)
} else {
+ // Case 1.
did
}
}
-> ValueRef {
// For external constants, we don't inline.
if did.krate == ast::LOCAL_CRATE {
+ // Case 1 or 2. (The inlining in case 2 produces a new
+ // DefId in LOCAL_CRATE.)
+
// The LLVM global has the type of its initializer,
// which may not be equal to the enum's type for
// non-C-like enums.
let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
PointerCast(bcx, val, pty)
} else {
- match bcx.ccx().extern_const_values.borrow().find(&did) {
+ // Case 3.
+ match bcx.ccx().extern_const_values().borrow().find(&did) {
None => {} // Continue.
Some(llval) => {
return *llval;
&bcx.ccx().sess().cstore,
did);
let llval = symbol.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(bcx.ccx().llmod,
+ llvm::LLVMAddGlobal(bcx.ccx().llmod(),
llty.to_ref(),
buf)
});
- bcx.ccx().extern_const_values.borrow_mut()
+ bcx.ccx().extern_const_values().borrow_mut()
.insert(did, llval);
llval
}
// Otherwise, we should be in the RvalueDpsExpr path.
assert!(
op == ast::UnDeref ||
- !ccx.tcx.method_map.borrow().contains_key(&method_call));
+ !ccx.tcx().method_map.borrow().contains_key(&method_call));
let un_ty = expr_ty(bcx, expr);
let ccx = bcx.ccx();
// if overloaded, would be RvalueDpsExpr
- assert!(!ccx.tcx.method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
+ assert!(!ccx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
match op {
ast::BiAnd => {
let mut bcx = bcx;
// Check for overloaded deref.
- let method_ty = ccx.tcx.method_map.borrow()
+ let method_ty = ccx.tcx().method_map.borrow()
.find(&method_call).map(|method| method.ty);
let datum = match method_ty {
Some(method_ty) => {
}
};
unsafe {
+ // Declare a symbol `foo` with the desired linkage.
let g1 = ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty2.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty2.to_ref(), buf)
});
llvm::SetLinkage(g1, linkage);
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(ident.get());
let g2 = real_name.with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
});
llvm::SetLinkage(g2, llvm::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
}
}
None => unsafe {
+ // Generate an external declaration.
ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
})
}
}
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let llfn = base::get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name,
cc,
llfn_ty,
llfn={}, \
llretptr={})",
callee_ty.repr(tcx),
- ccx.tn.val_to_string(llfn),
- ccx.tn.val_to_string(llretptr));
+ ccx.tn().val_to_string(llfn),
+ ccx.tn().val_to_string(llretptr));
let (fn_abi, fn_sig) = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref fn_ty) => (fn_ty.abi, fn_ty.sig.clone()),
debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}",
i,
- ccx.tn.val_to_string(llarg_rust),
+ ccx.tn().val_to_string(llarg_rust),
rust_indirect,
- ccx.tn.type_to_string(arg_tys[i].ty));
+ ccx.tn().type_to_string(arg_tys[i].ty));
// Ensure that we always have the Rust value indirectly,
// because it makes bitcasting easier.
}
debug!("llarg_rust={} (after indirection)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Check whether we need to do any casting
match arg_tys[i].cast {
}
debug!("llarg_rust={} (after casting)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Finally, load the value if needed for the foreign ABI
let foreign_indirect = arg_tys[i].is_indirect();
};
debug!("argument {}, llarg_foreign={}",
- i, ccx.tn.val_to_string(llarg_foreign));
+ i, ccx.tn().val_to_string(llarg_foreign));
// fill padding with undef value
match arg_tys[i].pad {
None => fn_type.ret_ty.ty
};
- debug!("llretptr={}", ccx.tn.val_to_string(llretptr));
- debug!("llforeign_retval={}", ccx.tn.val_to_string(llforeign_retval));
- debug!("llrust_ret_ty={}", ccx.tn.type_to_string(llrust_ret_ty));
- debug!("llforeign_ret_ty={}", ccx.tn.type_to_string(llforeign_ret_ty));
+ debug!("llretptr={}", ccx.tn().val_to_string(llretptr));
+ debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval));
+ debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty));
+ debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty));
if llrust_ret_ty == llforeign_ret_ty {
base::store_ty(bcx, llforeign_retval, llretptr, fn_sig.output)
register_foreign_item_fn(ccx, abi, ty,
lname.get().as_slice(),
Some(foreign_item.span));
+ // Unlike for other items, we shouldn't call
+ // `base::update_linkage` here. Foreign items have
+ // special linkage requirements, which are handled
+ // inside `foreign::register_*`.
}
}
}
_ => {}
}
- ccx.item_symbols.borrow_mut().insert(foreign_item.id,
+ ccx.item_symbols().borrow_mut().insert(foreign_item.id,
lname.get().to_string());
}
}
let llfn = base::decl_fn(ccx, name, cconv, llfn_ty, ty::mk_nil());
add_argument_attributes(&tys, llfn);
debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})",
- ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty);
add_argument_attributes(&tys, llfn);
debug!("register_rust_fn_with_foreign_abi(node_id={:?}, llfn_ty={}, llfn={})",
- node_id, ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let t = ty::node_id_to_type(tcx, id).subst(
ccx.tcx(), ¶m_substs.substs);
- let ps = ccx.tcx.map.with_path(id, |path| {
+ let ps = ccx.tcx().map.with_path(id, |path| {
let abi = Some(ast_map::PathName(special_idents::clownshoe_abi.name));
link::mangle(path.chain(abi.move_iter()), hash)
});
_ => {
ccx.sess().bug(format!("build_rust_fn: extern fn {} has ty {}, \
expected a bare fn ty",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
t.repr(tcx)).as_slice());
}
};
debug!("build_rust_fn: path={} id={} t={}",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
id, t.repr(tcx));
let llfn = base::decl_internal_rust_fn(ccx, t, ps.as_slice());
let tcx = ccx.tcx();
debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={})",
- ccx.tn.val_to_string(llrustfn),
- ccx.tn.val_to_string(llwrapfn),
+ ccx.tn().val_to_string(llrustfn),
+ ccx.tn().val_to_string(llwrapfn),
t.repr(ccx.tcx()));
// Avoid all the Rust generation stuff and just generate raw
let the_block =
"the block".with_c_str(
- |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llwrapfn, s));
+ |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn, s));
let builder = ccx.builder();
builder.position_at_end(the_block);
match foreign_outptr {
Some(llforeign_outptr) => {
debug!("out pointer, foreign={}",
- ccx.tn.val_to_string(llforeign_outptr));
+ ccx.tn().val_to_string(llforeign_outptr));
let llrust_retptr =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
debug!("out pointer, foreign={} (casted)",
- ccx.tn.val_to_string(llrust_retptr));
+ ccx.tn().val_to_string(llrust_retptr));
llrust_args.push(llrust_retptr);
return_alloca = None;
}
allocad={}, \
llrust_ret_ty={}, \
return_ty={}",
- ccx.tn.val_to_string(slot),
- ccx.tn.type_to_string(llrust_ret_ty),
+ ccx.tn().val_to_string(slot),
+ ccx.tn().type_to_string(llrust_ret_ty),
tys.fn_sig.output.repr(tcx));
llrust_args.push(slot);
return_alloca = Some(slot);
let mut llforeign_arg = get_param(llwrapfn, foreign_index);
debug!("llforeign_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llforeign_arg));
+ i, ccx.tn().val_to_string(llforeign_arg));
debug!("rust_indirect = {}, foreign_indirect = {}",
rust_indirect, foreign_indirect);
};
debug!("llrust_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llrust_arg));
+ i, ccx.tn().val_to_string(llrust_arg));
llrust_args.push(llrust_arg);
}
// Perform the call itself
- debug!("calling llrustfn = {}, t = {}", ccx.tn.val_to_string(llrustfn), t.repr(ccx.tcx()));
+ debug!("calling llrustfn = {}, t = {}",
+ ccx.tn().val_to_string(llrustfn), t.repr(ccx.tcx()));
let attributes = base::get_fn_llvm_attributes(ccx, t);
let llrust_ret_val = builder.call(llrustfn, llrust_args.as_slice(), Some(attributes));
fn_ty={} -> {}, \
ret_def={}",
ty.repr(ccx.tcx()),
- ccx.tn.types_to_str(llsig.llarg_tys.as_slice()),
- ccx.tn.type_to_string(llsig.llret_ty),
- ccx.tn.types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
- ccx.tn.type_to_string(fn_ty.ret_ty.ty),
+ ccx.tn().types_to_str(llsig.llarg_tys.as_slice()),
+ ccx.tn().type_to_string(llsig.llret_ty),
+ ccx.tn().types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
+ ccx.tn().type_to_string(fn_ty.ret_ty.ty),
ret_def);
ForeignTypes {
Some(expr::Ignore)).bcx
}
-fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
+pub fn trans_exchange_free_dyn<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
align: ValueRef) -> &'a Block<'a> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
align: u64) -> &'a Block<'a> {
- trans_exchange_free_internal(cx,
- v,
- C_uint(cx.ccx(), size as uint),
- C_uint(cx.ccx(), align as uint))
+ trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size as uint),
+ C_uint(cx.ccx(), align as uint))
}
pub fn trans_exchange_free_ty<'a>(bcx: &'a Block<'a>, ptr: ValueRef,
return ty::mk_i8();
}
match ty::get(t).sty {
- ty::ty_box(typ) if !ty::type_needs_drop(tcx, typ) =>
- ty::mk_box(tcx, ty::mk_i8()),
-
ty::ty_uniq(typ) if !ty::type_needs_drop(tcx, typ)
&& ty::type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8()
} else {
- ty::mk_uniq(tcx, ty::mk_i8())
+ t
}
}
_ => t
debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t));
let t = get_drop_glue_type(ccx, t);
debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t));
- match ccx.drop_glues.borrow().find(&t) {
+ match ccx.drop_glues().borrow().find(&t) {
Some(&glue) => return glue,
_ => { }
}
};
let llfnty = Type::glue_fn(ccx, llty);
- let glue = declare_generic_glue(ccx, t, llfnty, "drop");
- ccx.drop_glues.borrow_mut().insert(t, glue);
+ let (glue, new_sym) = match ccx.available_drop_glues().borrow().find(&t) {
+ Some(old_sym) => {
+ let glue = decl_cdecl_fn(ccx, old_sym.as_slice(), llfnty, ty::mk_nil());
+ (glue, None)
+ },
+ None => {
+ let (sym, glue) = declare_generic_glue(ccx, t, llfnty, "drop");
+ (glue, Some(sym))
+ },
+ };
- make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
+ ccx.drop_glues().borrow_mut().insert(t, glue);
+
+ // To avoid infinite recursion, don't `make_drop_glue` until after we've
+ // added the entry to the `drop_glues` cache.
+ match new_sym {
+ Some(sym) => {
+ ccx.available_drop_glues().borrow_mut().insert(t, sym);
+ // We're creating a new drop glue, so also generate a body.
+ make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
+ },
+ None => {},
+ }
glue
}
Some(visit_glue) => visit_glue,
None => {
debug!("+++ lazily_emit_tydesc_glue VISIT {}", ppaux::ty_to_string(ccx.tcx(), ti.ty));
- let glue_fn = declare_generic_glue(ccx, ti.ty, llfnty, "visit");
+
+ let (glue_fn, new_sym) = match ccx.available_visit_glues().borrow().find(&ti.ty) {
+ Some(old_sym) => {
+ let glue_fn = decl_cdecl_fn(ccx, old_sym.as_slice(), llfnty, ty::mk_nil());
+ (glue_fn, None)
+ },
+ None => {
+ let (sym, glue_fn) = declare_generic_glue(ccx, ti.ty, llfnty, "visit");
+ (glue_fn, Some(sym))
+ },
+ };
+
ti.visit_glue.set(Some(glue_fn));
- make_generic_glue(ccx, ti.ty, glue_fn, make_visit_glue, "visit");
+
+ match new_sym {
+ Some(sym) => {
+ ccx.available_visit_glues().borrow_mut().insert(ti.ty, sym);
+ make_generic_glue(ccx, ti.ty, glue_fn, make_visit_glue, "visit");
+ },
+ None => {},
+ }
+
debug!("--- lazily_emit_tydesc_glue VISIT {}", ppaux::ty_to_string(ccx.tcx(), ti.ty));
glue_fn
}
let info = GEPi(bcx, v0, [0, abi::slice_elt_len]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
- trans_exchange_free_internal(bcx, llbox, llsize, llalign)
+ trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
})
}
_ => {
with_cond(bcx, IsNotNull(bcx, env), |bcx| {
let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_tydesc]);
let dtor = Load(bcx, dtor_ptr);
- let cdata = GEPi(bcx, env, [0u, abi::box_field_body]);
- Call(bcx, dtor, [PointerCast(bcx, cdata, Type::i8p(bcx.ccx()))], None);
-
- // Free the environment itself
- // FIXME: #13994: pass align and size here
- trans_exchange_free(bcx, env, 0, 8)
+ Call(bcx, dtor, [PointerCast(bcx, box_cell_v, Type::i8p(bcx.ccx()))], None);
+ bcx
})
}
ty::ty_trait(..) => {
pub fn declare_tydesc(ccx: &CrateContext, t: ty::t) -> tydesc_info {
// If emit_tydescs already ran, then we shouldn't be creating any new
// tydescs.
- assert!(!ccx.finished_tydescs.get());
+ assert!(!ccx.finished_tydescs().get());
let llty = type_of(ccx, t);
debug!("+++ declare_tydesc {} {}", ppaux::ty_to_string(ccx.tcx(), t), name);
let gvar = name.as_slice().with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(ccx.llmod, ccx.tydesc_type().to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), ccx.tydesc_type().to_ref(), buf)
}
});
note_unique_llvm_symbol(ccx, name);
}
fn declare_generic_glue(ccx: &CrateContext, t: ty::t, llfnty: Type,
- name: &str) -> ValueRef {
+ name: &str) -> (String, ValueRef) {
let _icx = push_ctxt("declare_generic_glue");
let fn_nm = mangle_internal_name_by_type_and_seq(
ccx,
t,
format!("glue_{}", name).as_slice());
let llfn = decl_cdecl_fn(ccx, fn_nm.as_slice(), llfnty, ty::mk_nil());
- note_unique_llvm_symbol(ccx, fn_nm);
- return llfn;
+ note_unique_llvm_symbol(ccx, fn_nm.clone());
+ return (fn_nm, llfn);
}
fn make_generic_glue(ccx: &CrateContext,
let bcx = init_function(&fcx, false, ty::mk_nil());
- llvm::SetLinkage(llfn, llvm::InternalLinkage);
- ccx.stats.n_glues_created.set(ccx.stats.n_glues_created.get() + 1u);
+ update_linkage(ccx, llfn, None, OriginalTranslation);
+
+ ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1u);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
pub fn emit_tydescs(ccx: &CrateContext) {
let _icx = push_ctxt("emit_tydescs");
// As of this point, allow no more tydescs to be created.
- ccx.finished_tydescs.set(true);
+ ccx.finished_tydescs().set(true);
let glue_fn_ty = Type::generic_glue_fn(ccx).ptr_to();
- for (_, ti) in ccx.tydescs.borrow().iter() {
+ for (_, ti) in ccx.tydescs().borrow().iter() {
// Each of the glue functions needs to be cast to a generic type
// before being put into the tydesc because we only have a singleton
// tydesc type. Then we'll recast each function to its real type when
let drop_glue = unsafe {
llvm::LLVMConstPointerCast(get_drop_glue(ccx, ti.ty), glue_fn_ty.to_ref())
};
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() + 1);
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() + 1);
let visit_glue =
match ti.visit_glue.get() {
None => {
- ccx.stats.n_null_glues.set(ccx.stats.n_null_glues.get() +
+ ccx.stats().n_null_glues.set(ccx.stats().n_null_glues.get() +
1u);
C_null(glue_fn_ty)
}
Some(v) => {
unsafe {
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() +
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() +
1);
llvm::LLVMConstPointerCast(v, glue_fn_ty.to_ref())
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use llvm::{AvailableExternallyLinkage, SetLinkage};
+use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage};
use metadata::csearch;
use middle::astencode;
use middle::trans::base::{push_ctxt, trans_item, get_item_val, trans_fn};
pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId)
-> ast::DefId {
let _icx = push_ctxt("maybe_instantiate_inline");
- match ccx.external.borrow().find(&fn_id) {
+ match ccx.external().borrow().find(&fn_id) {
Some(&Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline({}): already inline as node id {}",
|a,b,c,d| astencode::decode_inlined_item(a, b, c, d));
return match csearch_result {
csearch::not_found => {
- ccx.external.borrow_mut().insert(fn_id, None);
+ ccx.external().borrow_mut().insert(fn_id, None);
fn_id
}
csearch::found(ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
trans_item(ccx, &*item);
- // We're bringing an external global into this crate, but we don't
- // want to create two copies of the global. If we do this, then if
- // you take the address of the global in two separate crates you get
- // two different addresses. This is bad for things like conditions,
- // but it could possibly have other adverse side effects. We still
- // want to achieve the optimizations related to this global,
- // however, so we use the available_externally linkage which llvm
- // provides
- match item.node {
+ let linkage = match item.node {
+ ast::ItemFn(_, _, _, ref generics, _) => {
+ if generics.is_type_parameterized() {
+ // Generics have no symbol, so they can't be given any
+ // linkage.
+ None
+ } else {
+ if ccx.sess().opts.cg.codegen_units == 1 {
+ // We could use AvailableExternallyLinkage here,
+ // but InternalLinkage allows LLVM to optimize more
+ // aggressively (at the cost of sometimes
+ // duplicating code).
+ Some(InternalLinkage)
+ } else {
+ // With multiple compilation units, duplicated code
+ // is more of a problem. Also, `codegen_units > 1`
+ // means the user is okay with losing some
+ // performance.
+ Some(AvailableExternallyLinkage)
+ }
+ }
+ }
ast::ItemStatic(_, mutbl, _) => {
- let g = get_item_val(ccx, item.id);
- // see the comment in get_item_val() as to why this check is
- // performed here.
- if ast_util::static_has_significant_address(
- mutbl,
- item.attrs.as_slice()) {
- SetLinkage(g, AvailableExternallyLinkage);
+ if !ast_util::static_has_significant_address(mutbl, item.attrs.as_slice()) {
+ // Inlined static items use internal linkage when
+ // possible, so that LLVM will coalesce globals with
+ // identical initializers. (It only does this for
+ // globals with unnamed_addr and either internal or
+ // private linkage.)
+ Some(InternalLinkage)
+ } else {
+ // The address is significant, so we can't create an
+ // internal copy of the static. (The copy would have a
+ // different address from the original.)
+ Some(AvailableExternallyLinkage)
}
}
- _ => {}
+ _ => unreachable!(),
+ };
+
+ match linkage {
+ Some(linkage) => {
+ let g = get_item_val(ccx, item.id);
+ SetLinkage(g, linkage);
+ }
+ None => {}
}
local_def(item.id)
}
csearch::found(ast::IIForeign(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
local_def(item.id)
}
csearch::found_parent(parent_id, ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(parent_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, parent_id);
+ ccx.external().borrow_mut().insert(parent_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
let mut my_id = 0;
match item.node {
let vs_there = ty::enum_variants(ccx.tcx(), parent_id);
for (here, there) in vs_here.iter().zip(vs_there.iter()) {
if there.id == fn_id { my_id = here.id.node; }
- ccx.external.borrow_mut().insert(there.id, Some(here.id.node));
+ ccx.external().borrow_mut().insert(there.id, Some(here.id.node));
}
}
ast::ItemStruct(ref struct_def, _) => {
match struct_def.ctor_id {
None => {}
Some(ctor_id) => {
- ccx.external.borrow_mut().insert(fn_id, Some(ctor_id));
+ ccx.external().borrow_mut().insert(fn_id, Some(ctor_id));
my_id = ctor_id;
}
}
match impl_item {
ast::ProvidedInlinedTraitItem(mth) |
ast::RequiredInlinedTraitItem(mth) => {
- ccx.external.borrow_mut().insert(fn_id, Some(mth.id));
- ccx.external_srcs.borrow_mut().insert(mth.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(mth.id));
+ ccx.external_srcs().borrow_mut().insert(mth.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
}
}
¶m_substs::empty(),
mth.id,
[]);
+ // Use InternalLinkage so LLVM can optimize more
+ // aggressively.
+ SetLinkage(llfn, InternalLinkage);
}
local_def(mth.id)
}
/// Performs late verification that intrinsics are used correctly. At present,
/// the only intrinsic that needs such verification is `transmute`.
pub fn check_intrinsics(ccx: &CrateContext) {
- for transmute_restriction in ccx.tcx
+ for transmute_restriction in ccx.tcx()
.transmute_restrictions
.borrow()
.iter() {
let hash = ty::hash_crate_independent(
ccx.tcx(),
*substs.types.get(FnSpace, 0),
- &ccx.link_meta.crate_hash);
+ &ccx.link_meta().crate_hash);
// NB: This needs to be kept in lockstep with the TypeId struct in
// the intrinsic module
C_named_struct(llret_ty, [C_u64(ccx, hash)])
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
+ let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
let name = if allow_overlap {
if int_size == 32 {
"llvm.memmove.p0i8.p0i8.i32"
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
+ let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
"llvm.memset.p0i8.i32"
} else {
"llvm.memset.p0i8.i64"
impl LlvmRepr for Type {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.type_to_string(*self)
+ ccx.tn().type_to_string(*self)
}
}
impl LlvmRepr for ValueRef {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.val_to_string(*self)
+ ccx.tn().val_to_string(*self)
}
}
// Returns the number of bytes clobbered by a Store to this type.
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMStoreSizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABISizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// below.
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- let nbits = llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64;
+ let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64;
if nbits & 7 != 0 {
// Not an even number of bytes, spills into "next" byte.
1 + (nbits >> 3)
/// Returns the "real" size of the type in bits.
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64
+ llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64
}
}
// space to be consumed.
pub fn nonzero_llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
if llbitsize_of_real(cx, ty) == 0 {
- unsafe { llvm::LLVMConstInt(cx.int_type.to_ref(), 1, False) }
+ unsafe { llvm::LLVMConstInt(cx.int_type().to_ref(), 1, False) }
} else {
llsize_of(cx, ty)
}
// allocations inside a stack frame, which LLVM has a free hand in.
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMPreferredAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// and similar ABI-mandated things.
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABIAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
pub fn llalign_of(cx: &CrateContext, ty: Type) -> ValueRef {
unsafe {
return llvm::LLVMConstIntCast(
- llvm::LLVMAlignOf(ty.to_ref()), cx.int_type.to_ref(), False);
+ llvm::LLVMAlignOf(ty.to_ref()), cx.int_type().to_ref(), False);
}
}
pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: uint) -> u64 {
unsafe {
- return llvm::LLVMOffsetOfElement(cx.td.lltd, struct_ty.to_ref(), element as u32) as u64;
+ return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(), element as u32) as u64;
}
}
use std::c_str::ToCStr;
use syntax::abi::{Rust, RustCall};
use syntax::parse::token;
-use syntax::{ast, ast_map, visit};
+use syntax::{ast, ast_map, attr, visit};
use syntax::ast_util::PostExpansionMethod;
// drop_glue pointer, size, align.
match *impl_item {
ast::MethodImplItem(method) => {
if method.pe_generics().ty_params.len() == 0u {
- let llfn = get_item_val(ccx, method.id);
- trans_fn(ccx,
- &*method.pe_fn_decl(),
- &*method.pe_body(),
- llfn,
- ¶m_substs::empty(),
- method.id,
- []);
+ let trans_everywhere = attr::requests_inline(method.attrs.as_slice());
+ for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
+ let llfn = get_item_val(ccx, method.id);
+ trans_fn(ccx,
+ &*method.pe_fn_decl(),
+ &*method.pe_body(),
+ llfn,
+ ¶m_substs::empty(),
+ method.id,
+ []);
+ update_linkage(ccx,
+ llfn,
+ Some(method.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
+ }
}
let mut v = TransItemVisitor {
ccx: ccx,
let vtable_key = MethodCall::expr(expr_id);
let vtbls = resolve_vtables_in_fn_ctxt(
bcx.fcx,
- ccx.tcx.vtable_map.borrow().get(&vtable_key));
+ ccx.tcx().vtable_map.borrow().get(&vtable_key));
match *vtbls.get_self().unwrap().get(0) {
typeck::vtable_static(impl_did, ref rcvr_substs, ref rcvr_origins) => {
fn method_with_name(ccx: &CrateContext, impl_id: ast::DefId, name: ast::Name)
-> ast::DefId {
- match ccx.impl_method_cache.borrow().find_copy(&(impl_id, name)) {
+ match ccx.impl_method_cache().borrow().find_copy(&(impl_id, name)) {
Some(m) => return m,
None => {}
}
- let impl_items = ccx.tcx.impl_items.borrow();
+ let impl_items = ccx.tcx().impl_items.borrow();
let impl_items =
impl_items.find(&impl_id)
.expect("could not find impl while translating");
.find(|&did| {
match *did {
ty::MethodTraitItemId(did) => {
- ty::impl_or_trait_item(&ccx.tcx,
+ ty::impl_or_trait_item(ccx.tcx(),
did).ident()
.name ==
name
}).expect("could not find method while \
translating");
- ccx.impl_method_cache.borrow_mut().insert((impl_id, name),
+ ccx.impl_method_cache().borrow_mut().insert((impl_id, name),
meth_did.def_id());
meth_did.def_id()
}
// Check the cache.
let hash_id = (self_ty, monomorphize::make_vtable_id(ccx, origins.get(0)));
- match ccx.vtables.borrow().find(&hash_id) {
+ match ccx.vtables().borrow().find(&hash_id) {
Some(&val) => { return val }
None => { }
}
let drop_glue = glue::get_drop_glue(ccx, self_ty);
let vtable = make_vtable(ccx, drop_glue, ll_size, ll_align, methods);
- ccx.vtables.borrow_mut().insert(hash_id, vtable);
+ ccx.vtables().borrow_mut().insert(hash_id, vtable);
vtable
}
let tbl = C_struct(ccx, components.as_slice(), false);
let sym = token::gensym("vtable");
let vt_gvar = format!("vtable{}", sym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, val_ty(tbl).to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), val_ty(tbl).to_ref(), buf)
});
llvm::LLVMSetInitializer(vt_gvar, tbl);
llvm::LLVMSetGlobalConstant(vt_gvar, llvm::True);
self_ty: ty::t) -> ValueRef {
let ccx = bcx.ccx();
let origins = {
- let vtable_map = ccx.tcx.vtable_map.borrow();
+ let vtable_map = ccx.tcx().vtable_map.borrow();
// This trait cast might be because of implicit coercion
- let adjs = ccx.tcx.adjustments.borrow();
+ let adjs = ccx.tcx().adjustments.borrow();
let adjust = adjs.find(&id);
let method_call = if adjust.is_some() && ty::adjust_is_object(adjust.unwrap()) {
MethodCall::autoobject(id)
use back::link::exported_name;
use driver::session;
use llvm::ValueRef;
+use llvm;
use middle::subst;
use middle::subst::Subst;
use middle::trans::base::{set_llvm_fn_attrs, set_inline_hint};
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util::{local_def, PostExpansionMethod};
+use syntax::attr;
use std::hash::{sip, Hash};
pub fn monomorphic_fn(ccx: &CrateContext,
params: real_substs.types.clone()
};
- match ccx.monomorphized.borrow().find(&hash_id) {
+ match ccx.monomorphized().borrow().find(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
ty::item_path_str(ccx.tcx(), fn_id));
let map_node = session::expect(
ccx.sess(),
- ccx.tcx.map.find(fn_id.node),
+ ccx.tcx().map.find(fn_id.node),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
match map_node {
ast_map::NodeForeignItem(_) => {
- if ccx.tcx.map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
+ if ccx.tcx().map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), true);
}
debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx()));
let mono_ty = llitem_ty.subst(ccx.tcx(), real_substs);
- ccx.stats.n_monos.set(ccx.stats.n_monos.get() + 1);
+ ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
{
- let mut monomorphizing = ccx.monomorphizing.borrow_mut();
+ let mut monomorphizing = ccx.monomorphizing().borrow_mut();
depth = match monomorphizing.find(&fn_id) {
Some(&d) => d, None => 0
};
// recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() {
- ccx.sess().span_fatal(ccx.tcx.map.span(fn_id.node),
+ ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node),
"reached the recursion limit during monomorphization");
}
mono_ty.hash(&mut state);
hash = format!("h{}", state.result());
- ccx.tcx.map.with_path(fn_id.node, |path| {
+ ccx.tcx().map.with_path(fn_id.node, |path| {
exported_name(path, hash.as_slice())
})
};
decl_internal_rust_fn(ccx, mono_ty, s.as_slice())
};
- ccx.monomorphized.borrow_mut().insert(hash_id.take().unwrap(), lldecl);
+ ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
lldecl
};
+ let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
+ base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
+ set_llvm_fn_attrs(attrs, lldecl);
+
+ let is_first = !ccx.available_monomorphizations().borrow().contains(&s);
+ if is_first {
+ ccx.available_monomorphizations().borrow_mut().insert(s.clone());
+ }
+
+ let trans_everywhere = attr::requests_inline(attrs);
+ if trans_everywhere && !is_first {
+ llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
+ }
+
+ // If `true`, then `lldecl` should be given a function body.
+ // Otherwise, it should be left as a declaration of an external
+ // function, with no definition in the current compilation unit.
+ trans_everywhere || is_first
+ };
let lldecl = match map_node {
ast_map::NodeItem(i) => {
..
} => {
let d = mk_lldecl(abi);
- set_llvm_fn_attrs(i.attrs.as_slice(), d);
-
- if abi != abi::Rust {
- foreign::trans_rust_fn_with_foreign_abi(
- ccx, &**decl, &**body, [], d, &psubsts, fn_id.node,
- Some(hash.as_slice()));
- } else {
- trans_fn(ccx, &**decl, &**body, d, &psubsts, fn_id.node, []);
+ let needs_body = setup_lldecl(d, i.attrs.as_slice());
+ if needs_body {
+ if abi != abi::Rust {
+ foreign::trans_rust_fn_with_foreign_abi(
+ ccx, &**decl, &**body, [], d, &psubsts, fn_id.node,
+ Some(hash.as_slice()));
+ } else {
+ trans_fn(ccx, &**decl, &**body, d, &psubsts, fn_id.node, []);
+ }
}
d
}
}
ast_map::NodeVariant(v) => {
- let parent = ccx.tcx.map.get_parent(fn_id.node);
+ let parent = ccx.tcx().map.get_parent(fn_id.node);
let tvs = ty::enum_variants(ccx.tcx(), local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
match *ii {
ast::MethodImplItem(mth) => {
let d = mk_lldecl(abi::Rust);
- set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx,
- &*mth.pe_fn_decl(),
- &*mth.pe_body(),
- d,
- &psubsts,
- mth.id,
- []);
+ let needs_body = setup_lldecl(d, mth.attrs.as_slice());
+ if needs_body {
+ trans_fn(ccx,
+ &*mth.pe_fn_decl(),
+ &*mth.pe_body(),
+ d,
+ &psubsts,
+ mth.id,
+ []);
+ }
d
}
}
match *method {
ast::ProvidedMethod(mth) => {
let d = mk_lldecl(abi::Rust);
- set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx, &*mth.pe_fn_decl(), &*mth.pe_body(), d,
- &psubsts, mth.id, []);
+ let needs_body = setup_lldecl(d, mth.attrs.as_slice());
+ if needs_body {
+ trans_fn(ccx, &*mth.pe_fn_decl(), &*mth.pe_body(), d,
+ &psubsts, mth.id, []);
+ }
d
}
_ => {
}
};
- ccx.monomorphizing.borrow_mut().insert(fn_id, depth);
+ ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id));
(lldecl, true)
let sym = mangle_internal_name_by_path_and_seq(
ast_map::Values([].iter()).chain(None), "get_disr");
- let fn_ty = ty::mk_ctor_fn(&ccx.tcx, ast::DUMMY_NODE_ID,
+ let fn_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[opaqueptrty], ty::mk_u64());
let llfdecl = decl_internal_rust_fn(ccx,
fn_ty,
use middle::trans::expr::{Dest, Ignore, SaveIn};
use middle::trans::expr;
use middle::trans::glue;
+use middle::trans::machine;
use middle::trans::machine::{nonzero_llsize_of, llsize_of_alloc};
use middle::trans::type_::Type;
use middle::trans::type_of;
-> &'a Block<'a> {
let not_null = IsNotNull(bcx, vptr);
with_cond(bcx, not_null, |bcx| {
+ let ccx = bcx.ccx();
let tcx = bcx.tcx();
let _icx = push_ctxt("tvec::make_drop_glue_unboxed");
if should_deallocate {
let not_null = IsNotNull(bcx, dataptr);
with_cond(bcx, not_null, |bcx| {
- // FIXME: #13994: the old `Box<[T]>` will not support sized deallocation
- glue::trans_exchange_free(bcx, dataptr, 0, 8)
+ let llty = type_of::type_of(ccx, unit_ty);
+ let llsize = machine::llsize_of(ccx, llty);
+ let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
+ let size = Mul(bcx, llsize, get_len(bcx, vptr));
+ glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
})
} else {
bcx
format!("VecTypes {{unit_ty={}, llunit_ty={}, \
llunit_size={}, llunit_alloc_size={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
- ccx.tn.type_to_string(self.llunit_ty),
- ccx.tn.val_to_string(self.llunit_size),
+ ccx.tn().type_to_string(self.llunit_ty),
+ ccx.tn().val_to_string(self.llunit_size),
self.llunit_alloc_size)
}
}
debug!(" vt={}, count={:?}", vt.to_string(ccx), count);
let vec_ty = node_id_type(bcx, uniq_expr.id);
- let unit_sz = nonzero_llsize_of(ccx, type_of::type_of(ccx, vt.unit_ty));
+ let llty = type_of::type_of(ccx, vt.unit_ty);
+ let unit_sz = nonzero_llsize_of(ccx, llty);
let llcount = if count < 4u {
C_int(ccx, 4)
} else {
C_uint(ccx, count)
};
let alloc = Mul(bcx, llcount, unit_sz);
- let llty_ptr = type_of::type_of(ccx, vt.unit_ty).ptr_to();
- let align = C_uint(ccx, 8);
+ let llty_ptr = llty.ptr_to();
+ let align = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let Result { bcx: bcx, val: dataptr } = malloc_raw_dyn(bcx,
llty_ptr,
vec_ty,
// Create a temporary scope lest execution should fail while
// constructing the vector.
let temp_scope = fcx.push_custom_cleanup_scope();
- // FIXME: #13994: the old `Box<[T]> will not support sized deallocation,
- // this is a placeholder
- fcx.schedule_free_value(cleanup::CustomScope(temp_scope),
- dataptr, cleanup::HeapExchange, vt.unit_ty);
- debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
- bcx.val_to_string(dataptr), count);
+ fcx.schedule_free_slice(cleanup::CustomScope(temp_scope),
+ dataptr, alloc, align, cleanup::HeapExchange);
- let bcx = write_content(bcx, &vt, uniq_expr,
- content_expr, SaveIn(dataptr));
+ debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
+ bcx.val_to_string(dataptr), count);
+
+ let bcx = write_content(bcx, &vt, uniq_expr,
+ content_expr, SaveIn(dataptr));
fcx.pop_custom_cleanup_scope(temp_scope);
let loop_counter = {
// i = 0
- let i = alloca(loop_bcx, bcx.ccx().int_type, "__i");
+ let i = alloca(loop_bcx, bcx.ccx().int_type(), "__i");
Store(loop_bcx, C_uint(bcx.ccx(), 0), i);
Br(loop_bcx, cond_bcx.llbb);
}
pub fn void(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMVoidTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
}
pub fn nil(ccx: &CrateContext) -> Type {
}
pub fn metadata(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx()))
}
pub fn i1(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt1TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt1TypeInContext(ccx.llcx()))
}
pub fn i8(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt8TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
}
pub fn i16(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt16TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
}
pub fn i32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt32TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt32TypeInContext(ccx.llcx()))
}
pub fn i64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt64TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt64TypeInContext(ccx.llcx()))
}
pub fn f32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMFloatTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMFloatTypeInContext(ccx.llcx()))
}
pub fn f64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx()))
}
pub fn bool(ccx: &CrateContext) -> Type {
}
pub fn int(ccx: &CrateContext) -> Type {
- match ccx.tcx.sess.targ_cfg.arch {
+ match ccx.tcx().sess.targ_cfg.arch {
X86 | Arm | Mips | Mipsel => Type::i32(ccx),
X86_64 => Type::i64(ccx)
}
pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type {
match t {
- ast::TyI => ccx.int_type,
+ ast::TyI => ccx.int_type(),
ast::TyI8 => Type::i8(ccx),
ast::TyI16 => Type::i16(ccx),
ast::TyI32 => Type::i32(ccx),
pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type {
match t {
- ast::TyU => ccx.int_type,
+ ast::TyU => ccx.int_type(),
ast::TyU8 => Type::i8(ccx),
ast::TyU16 => Type::i16(ccx),
ast::TyU32 => Type::i32(ccx),
pub fn struct_(ccx: &CrateContext, els: &[Type], packed: bool) -> Type {
let els : &[TypeRef] = unsafe { mem::transmute(els) };
- ty!(llvm::LLVMStructTypeInContext(ccx.llcx, els.as_ptr(),
+ ty!(llvm::LLVMStructTypeInContext(ccx.llcx(), els.as_ptr(),
els.len() as c_uint,
packed as Bool))
}
pub fn named_struct(ccx: &CrateContext, name: &str) -> Type {
- ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx, s)))
+ ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx(), s)))
}
pub fn empty_struct(ccx: &CrateContext) -> Type {
}
pub fn generic_glue_fn(cx: &CrateContext) -> Type {
- match cx.tn.find_type("glue_fn") {
+ match cx.tn().find_type("glue_fn") {
Some(ty) => return ty,
None => ()
}
let ty = Type::glue_fn(cx, Type::i8p(cx));
- cx.tn.associate_type("glue_fn", &ty);
+ cx.tn().associate_type("glue_fn", &ty);
ty
}
// The box pointed to by @T.
pub fn at_box(ccx: &CrateContext, ty: Type) -> Type {
Type::struct_(ccx, [
- ccx.int_type, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
+ ccx.int_type(), Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
Type::i8p(ccx), Type::i8p(ccx), ty
], false)
}
// recursive types. For example, enum types rely on this behavior.
pub fn sizing_type_of(cx: &CrateContext, t: ty::t) -> Type {
- match cx.llsizingtypes.borrow().find_copy(&t) {
+ match cx.llsizingtypes().borrow().find_copy(&t) {
Some(t) => return t,
None => ()
}
ty::ty_vec(_, None) | ty::ty_trait(..) | ty::ty_str => fail!("unreachable")
};
- cx.llsizingtypes.borrow_mut().insert(t, llsizingty);
+ cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
llsizingty
}
}
// Check the cache.
- match cx.lltypes.borrow().find(&t) {
+ match cx.lltypes().borrow().find(&t) {
Some(&llty) => return llty,
None => ()
}
t,
t_norm.repr(cx.tcx()),
t_norm,
- cx.tn.type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.tn().type_to_string(llty));
+ cx.lltypes().borrow_mut().insert(t, llty);
return llty;
}
ty::ty_str => {
// This means we get a nicer name in the output (str is always
// unsized).
- cx.tn.find_type("str_slice").unwrap()
+ cx.tn().find_type("str_slice").unwrap()
}
ty::ty_trait(..) => Type::opaque_trait(cx),
_ if !ty::type_is_sized(cx.tcx(), ty) => {
debug!("--> mapped t={} {:?} to llty={}",
t.repr(cx.tcx()),
t,
- cx.tn.type_to_string(llty));
+ cx.tn().type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.lltypes().borrow_mut().insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
tc | TC::Managed
} else if Some(did) == cx.lang_items.no_copy_bound() {
tc | TC::OwnsAffine
- } else if Some(did) == cx.lang_items.no_share_bound() {
+ } else if Some(did) == cx.lang_items.no_sync_bound() {
tc | TC::ReachesNoSync
} else if Some(did) == cx.lang_items.unsafe_type() {
// FIXME(#13231): This shouldn't be needed after
AD_Intel = 1
}
-#[deriving(PartialEq)]
+#[deriving(PartialEq, Clone)]
#[repr(C)]
pub enum CodeGenOptLevel {
CodeGenLevelNone = 0,
}).unwrap_or(HashMap::new());
let mut cache = Cache {
impls: HashMap::new(),
- external_paths: paths.iter().map(|(&k, &(ref v, _))| (k, v.clone()))
+ external_paths: paths.iter().map(|(&k, v)| (k, v.ref0().clone()))
.collect(),
paths: paths,
implementors: HashMap::new(),
color: #333;
}
+.location a:first-child { font-weight: bold; }
+
.block {
padding: 0 10px;
margin-bottom: 14px;
use std::collections::{HashSet, HashMap};
use testing;
-use rustc::back::link;
+use rustc::back::write;
use rustc::driver::config;
use rustc::driver::driver;
use rustc::driver::session;
maybe_sysroot: Some(os::self_exe_path().unwrap().dir_path()),
addl_lib_search_paths: RefCell::new(libs),
crate_types: vec!(config::CrateTypeExecutable),
- output_types: vec!(link::OutputTypeExe),
+ output_types: vec!(write::OutputTypeExe),
no_trans: no_run,
externs: externs,
cg: config::CodegenOptions {
None,
span_diagnostic_handler);
- let outdir = TempDir::new("rustdoctest").expect("rustdoc needs a tempdir");
+ let outdir = TempDir::new("rustdoctest").ok().expect("rustdoc needs a tempdir");
let out = Some(outdir.path().clone());
let cfg = config::build_configuration(&sess);
let libdir = sess.target_filesearch().get_lib_path();
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-// ignore-lexer-test FIXME #15883
-
-//! Unordered containers, implemented as hash-tables (`HashSet` and `HashMap` types)
-
-use clone::Clone;
-use cmp::{max, Eq, Equiv, PartialEq};
-use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
-use default::Default;
-use fmt::Show;
-use fmt;
-use hash::{Hash, Hasher, RandomSipHasher};
-use iter::{Iterator, FilterMap, Chain, Repeat, Zip, Extendable};
-use iter::{range, range_inclusive, FromIterator};
-use iter;
-use mem::replace;
-use num;
-use option::{Some, None, Option};
-use result::{Ok, Err};
-use ops::Index;
-
-mod table {
- use clone::Clone;
- use cmp;
- use hash::{Hash, Hasher};
- use iter::range_step_inclusive;
- use iter::{Iterator, range};
- use kinds::marker;
- use mem::{min_align_of, size_of};
- use mem::{overwrite, transmute};
- use num::{CheckedMul, is_power_of_two};
- use ops::Drop;
- use option::{Some, None, Option};
- use ptr::RawPtr;
- use ptr::set_memory;
- use ptr;
- use rt::heap::{allocate, deallocate};
-
- static EMPTY_BUCKET: u64 = 0u64;
-
- /// The raw hashtable, providing safe-ish access to the unzipped and highly
- /// optimized arrays of hashes, keys, and values.
- ///
- /// This design uses less memory and is a lot faster than the naive
- /// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
- /// option on every element, and we get a generally more cache-aware design.
- ///
- /// Key invariants of this structure:
- ///
- /// - if hashes[i] == EMPTY_BUCKET, then keys[i] and vals[i] have
- /// 'undefined' contents. Don't read from them. This invariant is
- /// enforced outside this module with the `EmptyIndex`, `FullIndex`,
- /// and `SafeHash` types.
- ///
- /// - An `EmptyIndex` is only constructed for a bucket at an index with
- /// a hash of EMPTY_BUCKET.
- ///
- /// - A `FullIndex` is only constructed for a bucket at an index with a
- /// non-EMPTY_BUCKET hash.
- ///
- /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
- /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
- /// which will likely map to the same bucket, while not being confused
- /// with "empty".
- ///
- /// - All three "arrays represented by pointers" are the same length:
- /// `capacity`. This is set at creation and never changes. The arrays
- /// are unzipped to save space (we don't have to pay for the padding
- /// between odd sized elements, such as in a map from u64 to u8), and
- /// be more cache aware (scanning through 8 hashes brings in 2 cache
- /// lines, since they're all right beside each other).
- ///
- /// You can kind of think of this module/data structure as a safe wrapper
- /// around just the "table" part of the hashtable. It enforces some
- /// invariants at the type level and employs some performance trickery,
- /// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
- ///
- /// FIXME(cgaebel):
- ///
- /// Feb 11, 2014: This hashtable was just implemented, and, hard as I tried,
- /// isn't yet totally safe. There's a "known exploit" that you can create
- /// multiple FullIndexes for a bucket, `take` one, and then still `take`
- /// the other causing undefined behavior. Currently, there's no story
- /// for how to protect against this statically. Therefore, there are asserts
- /// on `take`, `get`, `get_mut`, and `put` which check the bucket state.
- /// With time, and when we're confident this works correctly, they should
- /// be removed. Also, the bounds check in `peek` is especially painful,
- /// as that's called in the innermost loops of the hashtable and has the
- /// potential to be a major performance drain. Remove this too.
- ///
- /// Or, better than remove, only enable these checks for debug builds.
- /// There's currently no "debug-only" asserts in rust, so if you're reading
- /// this and going "what? of course there are debug-only asserts!", then
- /// please make this use them!
- #[unsafe_no_drop_flag]
- pub struct RawTable<K, V> {
- capacity: uint,
- size: uint,
- hashes: *mut u64,
- keys: *mut K,
- vals: *mut V,
- }
-
- /// Represents an index into a `RawTable` with no key or value in it.
- pub struct EmptyIndex {
- idx: int,
- nocopy: marker::NoCopy,
- }
-
- /// Represents an index into a `RawTable` with a key, value, and hash
- /// in it.
- pub struct FullIndex {
- idx: int,
- hash: SafeHash,
- nocopy: marker::NoCopy,
- }
-
- impl FullIndex {
- /// Since we get the hash for free whenever we check the bucket state,
- /// this function is provided for fast access, letting us avoid
- /// redundant trips back to the hashtable.
- #[inline(always)]
- pub fn hash(&self) -> SafeHash { self.hash }
-
- /// Same comment as with `hash`.
- #[inline(always)]
- pub fn raw_index(&self) -> uint { self.idx as uint }
- }
-
- /// Represents the state of a bucket: it can either have a key/value
- /// pair (be full) or not (be empty). You cannot `take` empty buckets,
- /// and you cannot `put` into full buckets.
- pub enum BucketState {
- Empty(EmptyIndex),
- Full(FullIndex),
- }
-
- /// A hash that is not zero, since we use a hash of zero to represent empty
- /// buckets.
- #[deriving(PartialEq)]
- pub struct SafeHash {
- hash: u64,
- }
-
- impl SafeHash {
- /// Peek at the hash value, which is guaranteed to be non-zero.
- #[inline(always)]
- pub fn inspect(&self) -> u64 { self.hash }
- }
-
- /// We need to remove hashes of 0. That's reserved for empty buckets.
- /// This function wraps up `hash_keyed` to be the only way outside this
- /// module to generate a SafeHash.
- pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
- match hasher.hash(t) {
- // This constant is exceedingly likely to hash to the same
- // bucket, but it won't be counted as empty!
- EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
- h => SafeHash { hash: h },
- }
- }
-
- fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
- assert!(is_power_of_two(target_alignment));
- (unrounded + target_alignment - 1) & !(target_alignment - 1)
- }
-
- #[test]
- fn test_rounding() {
- assert_eq!(round_up_to_next(0, 4), 0);
- assert_eq!(round_up_to_next(1, 4), 4);
- assert_eq!(round_up_to_next(2, 4), 4);
- assert_eq!(round_up_to_next(3, 4), 4);
- assert_eq!(round_up_to_next(4, 4), 4);
- assert_eq!(round_up_to_next(5, 4), 8);
- }
-
- // Returns a tuple of (minimum required malloc alignment, hash_offset,
- // key_offset, val_offset, array_size), from the start of a mallocated array.
- fn calculate_offsets(
- hash_size: uint, hash_align: uint,
- keys_size: uint, keys_align: uint,
- vals_size: uint, vals_align: uint) -> (uint, uint, uint, uint, uint) {
-
- let hash_offset = 0;
- let end_of_hashes = hash_offset + hash_size;
-
- let keys_offset = round_up_to_next(end_of_hashes, keys_align);
- let end_of_keys = keys_offset + keys_size;
-
- let vals_offset = round_up_to_next(end_of_keys, vals_align);
- let end_of_vals = vals_offset + vals_size;
-
- let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
-
- (min_align, hash_offset, keys_offset, vals_offset, end_of_vals)
- }
-
- #[test]
- fn test_offset_calculation() {
- assert_eq!(calculate_offsets(128, 8, 15, 1, 4, 4 ), (8, 0, 128, 144, 148));
- assert_eq!(calculate_offsets(3, 1, 2, 1, 1, 1 ), (1, 0, 3, 5, 6));
- assert_eq!(calculate_offsets(6, 2, 12, 4, 24, 8), (8, 0, 8, 24, 48));
- }
-
- impl<K, V> RawTable<K, V> {
-
- /// Does not initialize the buckets. The caller should ensure they,
- /// at the very least, set every hash to EMPTY_BUCKET.
- unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
- let hashes_size = capacity.checked_mul(&size_of::<u64>())
- .expect("capacity overflow");
- let keys_size = capacity.checked_mul(&size_of::< K >())
- .expect("capacity overflow");
- let vals_size = capacity.checked_mul(&size_of::< V >())
- .expect("capacity overflow");
-
- // Allocating hashmaps is a little tricky. We need to allocate three
- // arrays, but since we know their sizes and alignments up front,
- // we just allocate a single array, and then have the subarrays
- // point into it.
- //
- // This is great in theory, but in practice getting the alignment
- // right is a little subtle. Therefore, calculating offsets has been
- // factored out into a different function.
- let (malloc_alignment, hash_offset, keys_offset, vals_offset, size) =
- calculate_offsets(
- hashes_size, min_align_of::<u64>(),
- keys_size, min_align_of::< K >(),
- vals_size, min_align_of::< V >());
-
- let buffer = allocate(size, malloc_alignment);
-
- let hashes = buffer.offset(hash_offset as int) as *mut u64;
- let keys = buffer.offset(keys_offset as int) as *mut K;
- let vals = buffer.offset(vals_offset as int) as *mut V;
-
- RawTable {
- capacity: capacity,
- size: 0,
- hashes: hashes,
- keys: keys,
- vals: vals,
- }
- }
-
- /// Creates a new raw table from a given capacity. All buckets are
- /// initially empty.
- #[allow(experimental)]
- pub fn new(capacity: uint) -> RawTable<K, V> {
- unsafe {
- let ret = RawTable::new_uninitialized(capacity);
- set_memory(ret.hashes, 0u8, capacity);
- ret
- }
- }
-
- /// Reads a bucket at a given index, returning an enum indicating whether
- /// there's anything there or not. You need to match on this enum to get
- /// the appropriate types to pass on to most of the other functions in
- /// this module.
- pub fn peek(&self, index: uint) -> BucketState {
- debug_assert!(index < self.capacity);
-
- let idx = index as int;
- let hash = unsafe { *self.hashes.offset(idx) };
-
- let nocopy = marker::NoCopy;
-
- match hash {
- EMPTY_BUCKET =>
- Empty(EmptyIndex {
- idx: idx,
- nocopy: nocopy
- }),
- full_hash =>
- Full(FullIndex {
- idx: idx,
- hash: SafeHash { hash: full_hash },
- nocopy: nocopy,
- })
- }
- }
-
- /// Gets references to the key and value at a given index.
- pub fn read<'a>(&'a self, index: &FullIndex) -> (&'a K, &'a V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (&*self.keys.offset(idx), &*self.vals.offset(idx))
- }
- }
-
- /// Gets references to the key and value at a given index, with the
- /// value's reference being mutable.
- pub fn read_mut<'a>(&'a mut self, index: &FullIndex) -> (&'a K, &'a mut V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (&*self.keys.offset(idx), &mut *self.vals.offset(idx))
- }
- }
-
- /// Read everything, mutably.
- pub fn read_all_mut<'a>(&'a mut self, index: &FullIndex)
- -> (&'a mut SafeHash, &'a mut K, &'a mut V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (transmute(self.hashes.offset(idx)),
- &mut *self.keys.offset(idx), &mut *self.vals.offset(idx))
- }
- }
-
- /// Puts a key and value pair, along with the key's hash, into a given
- /// index in the hashtable. Note how the `EmptyIndex` is 'moved' into this
- /// function, because that slot will no longer be empty when we return!
- /// A FullIndex is returned for later use, pointing to the newly-filled
- /// slot in the hashtable.
- ///
- /// Use `make_hash` to construct a `SafeHash` to pass to this function.
- pub fn put(&mut self, index: EmptyIndex, hash: SafeHash, k: K, v: V) -> FullIndex {
- let idx = index.idx;
-
- unsafe {
- debug_assert_eq!(*self.hashes.offset(idx), EMPTY_BUCKET);
- *self.hashes.offset(idx) = hash.inspect();
- overwrite(&mut *self.keys.offset(idx), k);
- overwrite(&mut *self.vals.offset(idx), v);
- }
-
- self.size += 1;
-
- FullIndex { idx: idx, hash: hash, nocopy: marker::NoCopy }
- }
-
- /// Removes a key and value from the hashtable.
- ///
- /// This works similarly to `put`, building an `EmptyIndex` out of the
- /// taken FullIndex.
- pub fn take(&mut self, index: FullIndex) -> (EmptyIndex, K, V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
-
- *self.hashes.offset(idx) = EMPTY_BUCKET;
-
- // Drop the mutable constraint.
- let keys = self.keys as *const K;
- let vals = self.vals as *const V;
-
- let k = ptr::read(keys.offset(idx));
- let v = ptr::read(vals.offset(idx));
-
- self.size -= 1;
-
- (EmptyIndex { idx: idx, nocopy: marker::NoCopy }, k, v)
- }
- }
-
- /// The hashtable's capacity, similar to a vector's.
- pub fn capacity(&self) -> uint {
- self.capacity
- }
-
- /// The number of elements ever `put` in the hashtable, minus the number
- /// of elements ever `take`n.
- pub fn size(&self) -> uint {
- self.size
- }
-
- pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
- Entries { table: self, idx: 0, elems_seen: 0 }
- }
-
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
- MutEntries { table: self, idx: 0, elems_seen: 0 }
- }
-
- pub fn move_iter(self) -> MoveEntries<K, V> {
- MoveEntries { table: self, idx: 0 }
- }
- }
-
- // `read_all_mut` casts a `*u64` to a `*SafeHash`. Since we statically
- // ensure that a `FullIndex` points to an index with a non-zero hash,
- // and a `SafeHash` is just a `u64` with a different name, this is
- // safe.
- //
- // This test ensures that a `SafeHash` really IS the same size as a
- // `u64`. If you need to change the size of `SafeHash` (and
- // consequently made this test fail), `read_all_mut` needs to be
- // modified to no longer assume this.
- #[test]
- fn can_alias_safehash_as_u64() {
- assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
- }
-
- /// Iterator over shared references to entries in a table.
- pub struct Entries<'a, K:'a, V:'a> {
- table: &'a RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Iterator over mutable references to entries in a table.
- pub struct MutEntries<'a, K:'a, V:'a> {
- table: &'a mut RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Iterator over the entries in a table, consuming the table.
- pub struct MoveEntries<K, V> {
- table: RawTable<K, V>,
- idx: uint
- }
-
- impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
- fn next(&mut self) -> Option<(&'a K, &'a V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- Full(idx) => {
- self.elems_seen += 1;
- return Some(self.table.read(&idx));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size() - self.elems_seen;
- (size, Some(size))
- }
- }
-
- impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
- fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- // the transmute here fixes:
- // error: lifetime of `self` is too short to guarantee its contents
- // can be safely reborrowed
- Full(idx) => unsafe {
- self.elems_seen += 1;
- return Some(transmute(self.table.read_mut(&idx)));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size() - self.elems_seen;
- (size, Some(size))
- }
- }
-
- impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
- fn next(&mut self) -> Option<(SafeHash, K, V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- Full(idx) => {
- let h = idx.hash();
- let (_, k, v) = self.table.take(idx);
- return Some((h, k, v));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size();
- (size, Some(size))
- }
- }
-
- impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
- fn clone(&self) -> RawTable<K, V> {
- unsafe {
- let mut new_ht = RawTable::new_uninitialized(self.capacity());
-
- for i in range(0, self.capacity()) {
- match self.peek(i) {
- Empty(_) => {
- *new_ht.hashes.offset(i as int) = EMPTY_BUCKET;
- },
- Full(idx) => {
- let hash = idx.hash().inspect();
- let (k, v) = self.read(&idx);
- *new_ht.hashes.offset(i as int) = hash;
- overwrite(&mut *new_ht.keys.offset(i as int), (*k).clone());
- overwrite(&mut *new_ht.vals.offset(i as int), (*v).clone());
- }
- }
- }
-
- new_ht.size = self.size();
-
- new_ht
- }
- }
- }
-
- #[unsafe_destructor]
- impl<K, V> Drop for RawTable<K, V> {
- fn drop(&mut self) {
- // This is in reverse because we're likely to have partially taken
- // some elements out with `.move_iter()` from the front.
- for i in range_step_inclusive(self.capacity as int - 1, 0, -1) {
- // Check if the size is 0, so we don't do a useless scan when
- // dropping empty tables such as on resize.
- if self.size == 0 { break }
-
- match self.peek(i as uint) {
- Empty(_) => {},
- Full(idx) => { self.take(idx); }
- }
- }
-
- assert_eq!(self.size, 0);
-
- if self.hashes.is_not_null() {
- let hashes_size = self.capacity * size_of::<u64>();
- let keys_size = self.capacity * size_of::<K>();
- let vals_size = self.capacity * size_of::<V>();
- let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
- keys_size, min_align_of::<K>(),
- vals_size, min_align_of::<V>());
-
- unsafe {
- deallocate(self.hashes as *mut u8, size, align);
- // Remember how everything was allocated out of one buffer
- // during initialization? We only need one call to free here.
- }
-
- self.hashes = RawPtr::null();
- }
- }
- }
-}
-
-static INITIAL_LOG2_CAP: uint = 5;
-static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
-
-/// The default behavior of HashMap implements a load factor of 90.9%.
-/// This behavior is characterized by the following conditions:
-///
-/// - if `size * 1.1 < cap < size * 4` then shouldn't resize
-/// - if `cap < minimum_capacity * 2` then shouldn't shrink
-#[deriving(Clone)]
-struct DefaultResizePolicy {
- /// Doubled minimal capacity. The capacity must never drop below
- /// the minimum capacity. (The check happens before the capacity
- /// is potentially halved.)
- minimum_capacity2: uint
-}
-
-impl DefaultResizePolicy {
- fn new(new_capacity: uint) -> DefaultResizePolicy {
- DefaultResizePolicy {
- minimum_capacity2: new_capacity << 1
- }
- }
-
- #[inline]
- fn capacity_range(&self, new_size: uint) -> (uint, uint) {
- ((new_size * 11) / 10, max(new_size << 3, self.minimum_capacity2))
- }
-
- #[inline]
- fn reserve(&mut self, new_capacity: uint) {
- self.minimum_capacity2 = new_capacity << 1;
- }
-}
-
-// The main performance trick in this hashmap is called Robin Hood Hashing.
-// It gains its excellent performance from one key invariant:
-//
-// If an insertion collides with an existing element, and that elements
-// "probe distance" (how far away the element is from its ideal location)
-// is higher than how far we've already probed, swap the elements.
-//
-// This massively lowers variance in probe distance, and allows us to get very
-// high load factors with good performance. The 90% load factor I use is rather
-// conservative.
-//
-// > Why a load factor of approximately 90%?
-//
-// In general, all the distances to initial buckets will converge on the mean.
-// At a load factor of α, the odds of finding the target bucket after k
-// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
-// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
-// this down to make the math easier on the CPU and avoid its FPU.
-// Since on average we start the probing in the middle of a cache line, this
-// strategy pulls in two cache lines of hashes on every lookup. I think that's
-// pretty good, but if you want to trade off some space, it could go down to one
-// cache line on average with an α of 0.84.
-//
-// > Wait, what? Where did you get 1-α^k from?
-//
-// On the first probe, your odds of a collision with an existing element is α.
-// The odds of doing this twice in a row is approximately α^2. For three times,
-// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
-// colliding after k tries is 1-α^k.
-//
-// Future Improvements (FIXME!)
-// ============================
-//
-// Allow the load factor to be changed dynamically and/or at initialization.
-//
-// Also, would it be possible for us to reuse storage when growing the
-// underlying table? This is exactly the use case for 'realloc', and may
-// be worth exploring.
-//
-// Future Optimizations (FIXME!)
-// =============================
-//
-// The paper cited below mentions an implementation which keeps track of the
-// distance-to-initial-bucket histogram. I'm suspicious of this approach because
-// it requires maintaining an internal map. If this map were replaced with a
-// hashmap, it would be faster, but now our data structure is self-referential
-// and blows up. Also, this allows very good first guesses, but array accesses
-// are no longer linear and in one direction, as we have now. There is also
-// memory and cache pressure that this map would entail that would be very
-// difficult to properly see in a microbenchmark.
-//
-// Another possible design choice that I made without any real reason is
-// parameterizing the raw table over keys and values. Technically, all we need
-// is the size and alignment of keys and values, and the code should be just as
-// efficient (well, we might need one for power-of-two size and one for not...).
-// This has the potential to reduce code bloat in rust executables, without
-// really losing anything except 4 words (key size, key alignment, val size,
-// val alignment) which can be passed in to every call of a `RawTable` function.
-// This would definitely be an avenue worth exploring if people start complaining
-// about the size of rust executables.
-//
-// There's also an "optimization" that has been omitted regarding how the
-// hashtable allocates. The vector type has set the expectation that a hashtable
-// which never has an element inserted should not allocate. I'm suspicious of
-// implementing this for hashtables, because supporting it has no performance
-// benefit over using an `Option<HashMap<K, V>>`, and is significantly more
-// complicated.
-
-/// A hash map implementation which uses linear probing with Robin
-/// Hood bucket stealing.
-///
-/// The hashes are all keyed by the task-local random number generator
-/// on creation by default. This means that the ordering of the keys is
-/// randomized, but makes the tables more resistant to
-/// denial-of-service attacks (Hash DoS). This behaviour can be
-/// overridden with one of the constructors.
-///
-/// It is required that the keys implement the `Eq` and `Hash` traits, although
-/// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
-///
-/// Relevant papers/articles:
-///
-/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
-/// 2. Emmanuel Goossaert. ["Robin Hood
-/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
-/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
-/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
-///
-/// # Example
-///
-/// ```
-/// use std::collections::HashMap;
-///
-/// // type inference lets us omit an explicit type signature (which
-/// // would be `HashMap<&str, &str>` in this example).
-/// let mut book_reviews = HashMap::new();
-///
-/// // review some books.
-/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
-/// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
-/// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
-/// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
-///
-/// // check for a specific one.
-/// if !book_reviews.contains_key(&("Les Misérables")) {
-/// println!("We've got {} reviews, but Les Misérables ain't one.",
-/// book_reviews.len());
-/// }
-///
-/// // oops, this review has a lot of spelling mistakes, let's delete it.
-/// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
-///
-/// // look up the values associated with some keys.
-/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
-/// for book in to_find.iter() {
-/// match book_reviews.find(book) {
-/// Some(review) => println!("{}: {}", *book, *review),
-/// None => println!("{} is unreviewed.", *book)
-/// }
-/// }
-///
-/// // iterate over everything.
-/// for (book, review) in book_reviews.iter() {
-/// println!("{}: \"{}\"", *book, *review);
-/// }
-/// ```
-///
-/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
-/// We must also derive `PartialEq`.
-///
-/// ```
-/// use std::collections::HashMap;
-///
-/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
-/// }
-///
-/// let mut vikings = HashMap::new();
-///
-/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
-/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
-/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
-///
-/// // Use derived implementation to print the vikings.
-/// for (land, viking) in vikings.iter() {
-/// println!("{} at {}", viking, land);
-/// }
-/// ```
-#[deriving(Clone)]
-pub struct HashMap<K, V, H = RandomSipHasher> {
- // All hashes are keyed on these values, to prevent hash collision attacks.
- hasher: H,
-
- table: table::RawTable<K, V>,
-
- // We keep this at the end since it might as well have tail padding.
- resize_policy: DefaultResizePolicy,
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
- // Probe the `idx`th bucket for a given hash, returning the index of the
- // target bucket.
- //
- // This exploits the power-of-two size of the hashtable. As long as this
- // is always true, we can use a bitmask of cap-1 to do modular arithmetic.
- //
- // Prefer using this with increasing values of `idx` rather than repeatedly
- // calling `probe_next`. This reduces data-dependencies between loops, which
- // can help the optimizer, and certainly won't hurt it. `probe_next` is
- // simply for convenience, and is no more efficient than `probe`.
- fn probe(&self, hash: &table::SafeHash, idx: uint) -> uint {
- let hash_mask = self.table.capacity() - 1;
-
- // So I heard a rumor that unsigned overflow is safe in rust..
- ((hash.inspect() as uint) + idx) & hash_mask
- }
-
- // Generate the next probe in a sequence. Prefer using 'probe' by itself,
- // but this can sometimes be useful.
- fn probe_next(&self, probe: uint) -> uint {
- let hash_mask = self.table.capacity() - 1;
- (probe + 1) & hash_mask
- }
-
- fn make_hash<X: Hash<S>>(&self, x: &X) -> table::SafeHash {
- table::make_hash(&self.hasher, x)
- }
-
- /// Get the distance of the bucket at the given index that it lies
- /// from its 'ideal' location.
- ///
- /// In the cited blog posts above, this is called the "distance to
- /// initial bucket", or DIB.
- fn bucket_distance(&self, index_of_elem: &table::FullIndex) -> uint {
- // where the hash of the element that happens to reside at
- // `index_of_elem` tried to place itself first.
- let first_probe_index = self.probe(&index_of_elem.hash(), 0);
-
- let raw_index = index_of_elem.raw_index();
-
- if first_probe_index <= raw_index {
- // probe just went forward
- raw_index - first_probe_index
- } else {
- // probe wrapped around the hashtable
- raw_index + (self.table.capacity() - first_probe_index)
- }
- }
-
- /// Search for a pre-hashed key.
- fn search_hashed_generic(&self, hash: &table::SafeHash, is_match: |&K| -> bool)
- -> Option<table::FullIndex> {
- for num_probes in range(0u, self.table.size()) {
- let probe = self.probe(hash, num_probes);
-
- let idx = match self.table.peek(probe) {
- table::Empty(_) => return None, // hit an empty bucket
- table::Full(idx) => idx
- };
-
- // We can finish the search early if we hit any bucket
- // with a lower distance to initial bucket than we've probed.
- if self.bucket_distance(&idx) < num_probes { return None }
-
- // If the hash doesn't match, it can't be this one..
- if *hash != idx.hash() { continue }
-
- let (k, _) = self.table.read(&idx);
-
- // If the key doesn't match, it can't be this one..
- if !is_match(k) { continue }
-
- return Some(idx);
- }
-
- return None
- }
-
- fn search_hashed(&self, hash: &table::SafeHash, k: &K) -> Option<table::FullIndex> {
- self.search_hashed_generic(hash, |k_| *k == *k_)
- }
-
- fn search_equiv<Q: Hash<S> + Equiv<K>>(&self, q: &Q) -> Option<table::FullIndex> {
- self.search_hashed_generic(&self.make_hash(q), |k| q.equiv(k))
- }
-
- /// Search for a key, yielding the index if it's found in the hashtable.
- /// If you already have the hash for the key lying around, use
- /// search_hashed.
- fn search(&self, k: &K) -> Option<table::FullIndex> {
- self.search_hashed(&self.make_hash(k), k)
- }
-
- fn pop_internal(&mut self, starting_index: table::FullIndex) -> Option<V> {
- let starting_probe = starting_index.raw_index();
-
- let ending_probe = {
- let mut probe = self.probe_next(starting_probe);
- for _ in range(0u, self.table.size()) {
- match self.table.peek(probe) {
- table::Empty(_) => {}, // empty bucket. this is the end of our shifting.
- table::Full(idx) => {
- // Bucket that isn't us, which has a non-zero probe distance.
- // This isn't the ending index, so keep searching.
- if self.bucket_distance(&idx) != 0 {
- probe = self.probe_next(probe);
- continue;
- }
-
- // if we do have a bucket_distance of zero, we're at the end
- // of what we need to shift.
- }
- }
- break;
- }
-
- probe
- };
-
- let (_, _, retval) = self.table.take(starting_index);
-
- let mut probe = starting_probe;
- let mut next_probe = self.probe_next(probe);
-
- // backwards-shift all the elements after our newly-deleted one.
- while next_probe != ending_probe {
- match self.table.peek(next_probe) {
- table::Empty(_) => {
- // nothing to shift in. just empty it out.
- match self.table.peek(probe) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
- },
- table::Full(next_idx) => {
- // something to shift. move it over!
- let next_hash = next_idx.hash();
- let (_, next_key, next_val) = self.table.take(next_idx);
- match self.table.peek(probe) {
- table::Empty(idx) => {
- self.table.put(idx, next_hash, next_key, next_val);
- },
- table::Full(idx) => {
- let (emptyidx, _, _) = self.table.take(idx);
- self.table.put(emptyidx, next_hash, next_key, next_val);
- }
- }
- }
- }
-
- probe = next_probe;
- next_probe = self.probe_next(next_probe);
- }
-
- // Done the backwards shift, but there's still an element left!
- // Empty it out.
- match self.table.peek(probe) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
-
- // Now we're done all our shifting. Return the value we grabbed
- // earlier.
- return Some(retval);
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
- /// Return the number of elements in the map.
- fn len(&self) -> uint { self.table.size() }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
- /// Clear the map, removing all key-value pairs. Keeps the allocated memory
- /// for reuse.
- fn clear(&mut self) {
- // Prevent reallocations from happening from now on. Makes it possible
- // for the map to be reused but has a downside: reserves permanently.
- self.resize_policy.reserve(self.table.size());
-
- for i in range(0, self.table.capacity()) {
- match self.table.peek(i) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
- }
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
- fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
- self.search(k).map(|idx| {
- let (_, v) = self.table.read(&idx);
- v
- })
- }
-
- fn contains_key(&self, k: &K) -> bool {
- self.search(k).is_some()
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
- fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
- match self.search(k) {
- None => None,
- Some(idx) => {
- let (_, v) = self.table.read_mut(&idx);
- Some(v)
- }
- }
- }
-
- fn swap(&mut self, k: K, v: V) -> Option<V> {
- let hash = self.make_hash(&k);
- let potential_new_size = self.table.size() + 1;
- self.make_some_room(potential_new_size);
-
- for dib in range_inclusive(0u, self.table.size()) {
- let probe = self.probe(&hash, dib);
-
- let idx = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Found a hole!
- self.table.put(idx, hash, k, v);
- return None;
- },
- table::Full(idx) => idx
- };
-
- if idx.hash() == hash {
- let (bucket_k, bucket_v) = self.table.read_mut(&idx);
- if k == *bucket_k {
- // Found an existing value.
- return Some(replace(bucket_v, v));
- }
- }
-
- let probe_dib = self.bucket_distance(&idx);
-
- if probe_dib < dib {
- // Found a luckier bucket. This implies that the key does not
- // already exist in the hashtable. Just do a robin hood
- // insertion, then.
- self.robin_hood(idx, probe_dib, hash, k, v);
- return None;
- }
- }
-
- // We really shouldn't be here.
- fail!("Internal HashMap error: Out of space.");
- }
-
- fn pop(&mut self, k: &K) -> Option<V> {
- if self.table.size() == 0 {
- return None
- }
-
- let potential_new_size = self.table.size() - 1;
- self.make_some_room(potential_new_size);
-
- let starting_index = match self.search(k) {
- Some(idx) => idx,
- None => return None,
- };
-
- self.pop_internal(starting_index)
- }
-
-}
-
-impl<K: Hash + Eq, V> HashMap<K, V, RandomSipHasher> {
- /// Create an empty HashMap.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::new();
- /// ```
- #[inline]
- pub fn new() -> HashMap<K, V, RandomSipHasher> {
- HashMap::with_capacity(INITIAL_CAPACITY)
- }
-
- /// Creates an empty hash map with the given initial capacity.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
- /// ```
- #[inline]
- pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomSipHasher> {
- let hasher = RandomSipHasher::new();
- HashMap::with_capacity_and_hasher(capacity, hasher)
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
- /// Creates an empty hashmap which will use the given hasher to hash keys.
- ///
- /// The creates map has the default initial capacity.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut map = HashMap::with_hasher(h);
- /// map.insert(1i, 2u);
- /// ```
- #[inline]
- pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
- HashMap::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
- }
-
- /// Create an empty HashMap with space for at least `capacity`
- /// elements, using `hasher` to hash the keys.
- ///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut map = HashMap::with_capacity_and_hasher(10, h);
- /// map.insert(1i, 2u);
- /// ```
- #[inline]
- pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
- let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
- HashMap {
- hasher: hasher,
- resize_policy: DefaultResizePolicy::new(cap),
- table: table::RawTable::new(cap),
- }
- }
-
- /// The hashtable will never try to shrink below this size. You can use
- /// this function to reduce reallocations if your hashtable frequently
- /// grows and shrinks by large amounts.
- ///
- /// This function has no effect on the operational semantics of the
- /// hashtable, only on performance.
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::new();
- /// map.reserve(10);
- /// ```
- pub fn reserve(&mut self, new_minimum_capacity: uint) {
- let cap = num::next_power_of_two(
- max(INITIAL_CAPACITY, new_minimum_capacity));
-
- self.resize_policy.reserve(cap);
-
- if self.table.capacity() < cap {
- self.resize(cap);
- }
- }
-
- /// Resizes the internal vectors to a new capacity. It's your responsibility to:
- /// 1) Make sure the new capacity is enough for all the elements, accounting
- /// for the load factor.
- /// 2) Ensure new_capacity is a power of two.
- fn resize(&mut self, new_capacity: uint) {
- assert!(self.table.size() <= new_capacity);
- assert!(num::is_power_of_two(new_capacity));
-
- let old_table = replace(&mut self.table, table::RawTable::new(new_capacity));
- let old_size = old_table.size();
-
- for (h, k, v) in old_table.move_iter() {
- self.insert_hashed_nocheck(h, k, v);
- }
-
- assert_eq!(self.table.size(), old_size);
- }
-
- /// Performs any necessary resize operations, such that there's space for
- /// new_size elements.
- fn make_some_room(&mut self, new_size: uint) {
- let (grow_at, shrink_at) = self.resize_policy.capacity_range(new_size);
- let cap = self.table.capacity();
-
- // An invalid value shouldn't make us run out of space.
- debug_assert!(grow_at >= new_size);
-
- if cap <= grow_at {
- let new_capacity = cap << 1;
- self.resize(new_capacity);
- } else if shrink_at <= cap {
- let new_capacity = cap >> 1;
- self.resize(new_capacity);
- }
- }
-
- /// Perform robin hood bucket stealing at the given 'index'. You must
- /// also pass that probe's "distance to initial bucket" so we don't have
- /// to recalculate it, as well as the total number of probes already done
- /// so we have some sort of upper bound on the number of probes to do.
- ///
- /// 'hash', 'k', and 'v' are the elements to robin hood into the hashtable.
- fn robin_hood(&mut self, mut index: table::FullIndex, mut dib_param: uint,
- mut hash: table::SafeHash, mut k: K, mut v: V) {
- 'outer: loop {
- let (old_hash, old_key, old_val) = {
- let (old_hash_ref, old_key_ref, old_val_ref) =
- self.table.read_all_mut(&index);
-
- let old_hash = replace(old_hash_ref, hash);
- let old_key = replace(old_key_ref, k);
- let old_val = replace(old_val_ref, v);
-
- (old_hash, old_key, old_val)
- };
-
- let mut probe = self.probe_next(index.raw_index());
-
- for dib in range(dib_param + 1, self.table.size()) {
- let full_index = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Finally. A hole!
- self.table.put(idx, old_hash, old_key, old_val);
- return;
- },
- table::Full(idx) => idx
- };
-
- let probe_dib = self.bucket_distance(&full_index);
-
- // Robin hood! Steal the spot.
- if probe_dib < dib {
- index = full_index;
- dib_param = probe_dib;
- hash = old_hash;
- k = old_key;
- v = old_val;
- continue 'outer;
- }
-
- probe = self.probe_next(probe);
- }
-
- fail!("HashMap fatal error: 100% load factor?");
- }
- }
-
- /// Insert a pre-hashed key-value pair, without first checking
- /// that there's enough room in the buckets. Returns a reference to the
- /// newly insert value.
- ///
- /// If the key already exists, the hashtable will be returned untouched
- /// and a reference to the existing element will be returned.
- fn insert_hashed_nocheck<'a>(
- &'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
-
- for dib in range_inclusive(0u, self.table.size()) {
- let probe = self.probe(&hash, dib);
-
- let idx = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Found a hole!
- let fullidx = self.table.put(idx, hash, k, v);
- let (_, val) = self.table.read_mut(&fullidx);
- return val;
- },
- table::Full(idx) => idx
- };
-
- if idx.hash() == hash {
- let (bucket_k, bucket_v) = self.table.read_mut(&idx);
- // FIXME #12147 the conditional return confuses
- // borrowck if we return bucket_v directly
- let bv: *mut V = bucket_v;
- if k == *bucket_k {
- // Key already exists. Get its reference.
- return unsafe {&mut *bv};
- }
- }
-
- let probe_dib = self.bucket_distance(&idx);
-
- if probe_dib < dib {
- // Found a luckier bucket than me. Better steal his spot.
- self.robin_hood(idx, probe_dib, hash, k, v);
-
- // Now that it's stolen, just read the value's pointer
- // right out of the table!
- match self.table.peek(probe) {
- table::Empty(_) => fail!("Just stole a spot, but now that spot's empty."),
- table::Full(idx) => {
- let (_, v) = self.table.read_mut(&idx);
- return v;
- }
- }
- }
- }
-
- // We really shouldn't be here.
- fail!("Internal HashMap error: Out of space.");
- }
-
- /// Inserts an element which has already been hashed, returning a reference
- /// to that element inside the hashtable. This is more efficient that using
- /// `insert`, since the key will not be rehashed.
- fn insert_hashed<'a>(&'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
- let potential_new_size = self.table.size() + 1;
- self.make_some_room(potential_new_size);
- self.insert_hashed_nocheck(hash, k, v)
- }
-
- /// Return the value corresponding to the key in the map, or insert
- /// and return the value if it doesn't exist.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 1i with key "a"
- /// assert_eq!(*map.find_or_insert("a", 1i), 1);
- ///
- /// // Find the existing key
- /// assert_eq!(*map.find_or_insert("a", -2), 1);
- /// ```
- pub fn find_or_insert<'a>(&'a mut self, k: K, v: V) -> &'a mut V {
- self.find_with_or_insert_with(k, v, |_k, _v, _a| (), |_k, a| a)
- }
-
- /// Return the value corresponding to the key in the map, or create,
- /// insert, and return a new value if it doesn't exist.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 10 with key 2
- /// assert_eq!(*map.find_or_insert_with(2i, |&key| 5 * key as uint), 10u);
- ///
- /// // Find the existing key
- /// assert_eq!(*map.find_or_insert_with(2, |&key| key as uint), 10);
- /// ```
- pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
- -> &'a mut V {
- self.find_with_or_insert_with(k, (), |_k, _v, _a| (), |k, _a| f(k))
- }
-
- /// Insert a key-value pair into the map if the key is not already present.
- /// Otherwise, modify the existing value for the key.
- /// Returns the new or modified value for the key.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 2 with key "a"
- /// assert_eq!(*map.insert_or_update_with("a", 2u, |_key, val| *val = 3), 2);
- ///
- /// // Update and return the existing value
- /// assert_eq!(*map.insert_or_update_with("a", 9, |_key, val| *val = 7), 7);
- /// assert_eq!(map["a"], 7);
- /// ```
- pub fn insert_or_update_with<'a>(
- &'a mut self,
- k: K,
- v: V,
- f: |&K, &mut V|)
- -> &'a mut V {
- self.find_with_or_insert_with(k, v, |k, v, _a| f(k, v), |_k, a| a)
- }
-
- /// Modify and return the value corresponding to the key in the map, or
- /// insert and return a new value if it doesn't exist.
- ///
- /// This method allows for all insertion behaviours of a hashmap;
- /// see methods like
- /// [`insert`](../trait.MutableMap.html#tymethod.insert),
- /// [`find_or_insert`](#method.find_or_insert) and
- /// [`insert_or_update_with`](#method.insert_or_update_with)
- /// for less general and more friendly variations of this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// // map some strings to vectors of strings
- /// let mut map = HashMap::new();
- /// map.insert("a key", vec!["value"]);
- /// map.insert("z key", vec!["value"]);
- ///
- /// let new = vec!["a key", "b key", "z key"];
- ///
- /// for k in new.move_iter() {
- /// map.find_with_or_insert_with(
- /// k, "new value",
- /// // if the key does exist either prepend or append this
- /// // new value based on the first letter of the key.
- /// |key, already, new| {
- /// if key.as_slice().starts_with("z") {
- /// already.insert(0, new);
- /// } else {
- /// already.push(new);
- /// }
- /// },
- /// // if the key doesn't exist in the map yet, add it in
- /// // the obvious way.
- /// |_k, v| vec![v]);
- /// }
- ///
- /// assert_eq!(map.len(), 3);
- /// assert_eq!(map["a key"], vec!["value", "new value"]);
- /// assert_eq!(map["b key"], vec!["new value"]);
- /// assert_eq!(map["z key"], vec!["new value", "value"]);
- /// ```
- pub fn find_with_or_insert_with<'a, A>(&'a mut self,
- k: K,
- a: A,
- found: |&K, &mut V, A|,
- not_found: |&K, A| -> V)
- -> &'a mut V {
- let hash = self.make_hash(&k);
- match self.search_hashed(&hash, &k) {
- None => {
- let v = not_found(&k, a);
- self.insert_hashed(hash, k, v)
- },
- Some(idx) => {
- let (_, v_ref) = self.table.read_mut(&idx);
- found(&k, v_ref, a);
- v_ref
- }
- }
- }
-
- /// Retrieves a value for the given key.
- /// See [`find`](../trait.Map.html#tymethod.find) for a non-failing alternative.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// #![allow(deprecated)]
- ///
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// assert_eq!(map.get(&"a"), &1);
- /// ```
- #[deprecated = "prefer indexing instead, e.g., map[key]"]
- pub fn get<'a>(&'a self, k: &K) -> &'a V {
- match self.find(k) {
- Some(v) => v,
- None => fail!("no entry found for key")
- }
- }
-
- /// Retrieves a mutable value for the given key.
- /// See [`find_mut`](../trait.MutableMap.html#tymethod.find_mut) for a non-failing alternative.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// {
- /// // val will freeze map to prevent usage during its lifetime
- /// let val = map.get_mut(&"a");
- /// *val = 40;
- /// }
- /// assert_eq!(map["a"], 40);
- ///
- /// // A more direct way could be:
- /// *map.get_mut(&"a") = -2;
- /// assert_eq!(map["a"], -2);
- /// ```
- pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
- match self.find_mut(k) {
- Some(v) => v,
- None => fail!("no entry found for key")
- }
- }
-
- /// Return true if the map contains a value for the specified key,
- /// using equivalence.
- ///
- /// See [pop_equiv](#method.pop_equiv) for an extended example.
- pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
- self.search_equiv(key).is_some()
- }
-
- /// Return the value corresponding to the key in the map, using
- /// equivalence.
- ///
- /// See [pop_equiv](#method.pop_equiv) for an extended example.
- pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
- match self.search_equiv(k) {
- None => None,
- Some(idx) => {
- let (_, v_ref) = self.table.read(&idx);
- Some(v_ref)
- }
- }
- }
-
- /// Remove an equivalent key from the map, returning the value at the
- /// key if the key was previously in the map.
- ///
- /// # Example
- ///
- /// This is a slightly silly example where we define the number's parity as
- /// the equivalence class. It is important that the values hash the same,
- /// which is why we override `Hash`.
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::Hash;
- /// use std::hash::sip::SipState;
- ///
- /// #[deriving(Eq, PartialEq)]
- /// struct EvenOrOdd {
- /// num: uint
- /// };
- ///
- /// impl Hash for EvenOrOdd {
- /// fn hash(&self, state: &mut SipState) {
- /// let parity = self.num % 2;
- /// parity.hash(state);
- /// }
- /// }
- ///
- /// impl Equiv<EvenOrOdd> for EvenOrOdd {
- /// fn equiv(&self, other: &EvenOrOdd) -> bool {
- /// self.num % 2 == other.num % 2
- /// }
- /// }
- ///
- /// let mut map = HashMap::new();
- /// map.insert(EvenOrOdd { num: 3 }, "foo");
- ///
- /// assert!(map.contains_key_equiv(&EvenOrOdd { num: 1 }));
- /// assert!(!map.contains_key_equiv(&EvenOrOdd { num: 4 }));
- ///
- /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 5 }), Some(&"foo"));
- /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 2 }), None);
- ///
- /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 1 }), Some("foo"));
- /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 2 }), None);
- ///
- /// ```
- #[experimental]
- pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
- if self.table.size() == 0 {
- return None
- }
-
- let potential_new_size = self.table.size() - 1;
- self.make_some_room(potential_new_size);
-
- let starting_index = match self.search_equiv(k) {
- Some(idx) => idx,
- None => return None,
- };
-
- self.pop_internal(starting_index)
- }
-
- /// An iterator visiting all keys in arbitrary order.
- /// Iterator element type is `&'a K`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for key in map.keys() {
- /// println!("{}", key);
- /// }
- /// ```
- pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
- self.iter().map(|(k, _v)| k)
- }
-
- /// An iterator visiting all values in arbitrary order.
- /// Iterator element type is `&'a V`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for key in map.values() {
- /// println!("{}", key);
- /// }
- /// ```
- pub fn values<'a>(&'a self) -> Values<'a, K, V> {
- self.iter().map(|(_k, v)| v)
- }
-
- /// An iterator visiting all key-value pairs in arbitrary order.
- /// Iterator element type is `(&'a K, &'a V)`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for (key, val) in map.iter() {
- /// println!("key: {} val: {}", key, val);
- /// }
- /// ```
- pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
- self.table.iter()
- }
-
- /// An iterator visiting all key-value pairs in arbitrary order,
- /// with mutable references to the values.
- /// Iterator element type is `(&'a K, &'a mut V)`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// // Update all values
- /// for (_, val) in map.mut_iter() {
- /// *val *= 2;
- /// }
- ///
- /// for (key, val) in map.iter() {
- /// println!("key: {} val: {}", key, val);
- /// }
- /// ```
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
- self.table.mut_iter()
- }
-
- /// Creates a consuming iterator, that is, one that moves each key-value
- /// pair out of the map in arbitrary order. The map cannot be used after
- /// calling this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// // Not possible with .iter()
- /// let vec: Vec<(&str, int)> = map.move_iter().collect();
- /// ```
- pub fn move_iter(self) -> MoveEntries<K, V> {
- self.table.move_iter().map(|(_, k, v)| (k, v))
- }
-}
-
-impl<K: Eq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
- /// Return a copy of the value corresponding to the key.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map: HashMap<uint, String> = HashMap::new();
- /// map.insert(1u, "foo".to_string());
- /// let s: String = map.find_copy(&1).unwrap();
- /// ```
- pub fn find_copy(&self, k: &K) -> Option<V> {
- self.find(k).map(|v| (*v).clone())
- }
-
- /// Return a copy of the value corresponding to the key.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map: HashMap<uint, String> = HashMap::new();
- /// map.insert(1u, "foo".to_string());
- /// let s: String = map.get_copy(&1);
- /// ```
- pub fn get_copy(&self, k: &K) -> V {
- (*self.get(k)).clone()
- }
-}
-
-impl<K: Eq + Hash<S>, V: PartialEq, S, H: Hasher<S>> PartialEq for HashMap<K, V, H> {
- fn eq(&self, other: &HashMap<K, V, H>) -> bool {
- if self.len() != other.len() { return false; }
-
- self.iter()
- .all(|(key, value)| {
- match other.find(key) {
- None => false,
- Some(v) => *value == *v
- }
- })
- }
-}
-
-impl<K: Eq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {}
-
-impl<K: Eq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- try!(write!(f, "{{"));
-
- for (i, (k, v)) in self.iter().enumerate() {
- if i != 0 { try!(write!(f, ", ")); }
- try!(write!(f, "{}: {}", *k, *v));
- }
-
- write!(f, "}}")
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
- fn default() -> HashMap<K, V, H> {
- HashMap::with_hasher(Default::default())
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Index<K, V> for HashMap<K, V, H> {
- #[inline]
- fn index<'a>(&'a self, index: &K) -> &'a V {
- self.get(index)
- }
-}
-
-// FIXME(#12825) Indexing will always try IndexMut first and that causes issues.
-/*impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> ops::IndexMut<K, V> for HashMap<K, V, H> {
- #[inline]
- fn index_mut<'a>(&'a mut self, index: &K) -> &'a mut V {
- self.get_mut(index)
- }
-}*/
-
-/// HashMap iterator
-pub type Entries<'a, K, V> = table::Entries<'a, K, V>;
-
-/// HashMap mutable values iterator
-pub type MutEntries<'a, K, V> = table::MutEntries<'a, K, V>;
-
-/// HashMap move iterator
-pub type MoveEntries<K, V> =
- iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
-
-/// HashMap keys iterator
-pub type Keys<'a, K, V> =
- iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
-
-/// HashMap values iterator
-pub type Values<'a, K, V> =
- iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
- fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
- let (lower, _) = iter.size_hint();
- let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
- map.extend(iter);
- map
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
- fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
- for (k, v) in iter {
- self.insert(k, v);
- }
- }
-}
-
-/// HashSet iterator
-pub type SetItems<'a, K> =
- iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
-
-/// HashSet move iterator
-pub type SetMoveItems<K> =
- iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
-
-/// An implementation of a hash set using the underlying representation of a
-/// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
-/// requires that the elements implement the `Eq` and `Hash` traits.
-///
-/// # Example
-///
-/// ```
-/// use std::collections::HashSet;
-///
-/// // Type inference lets us omit an explicit type signature (which
-/// // would be `HashSet<&str>` in this example).
-/// let mut books = HashSet::new();
-///
-/// // Add some books.
-/// books.insert("A Dance With Dragons");
-/// books.insert("To Kill a Mockingbird");
-/// books.insert("The Odyssey");
-/// books.insert("The Great Gatsby");
-///
-/// // Check for a specific one.
-/// if !books.contains(&("The Winds of Winter")) {
-/// println!("We have {} books, but The Winds of Winter ain't one.",
-/// books.len());
-/// }
-///
-/// // Remove a book.
-/// books.remove(&"The Odyssey");
-///
-/// // Iterate over everything.
-/// for book in books.iter() {
-/// println!("{}", *book);
-/// }
-/// ```
-///
-/// The easiest way to use `HashSet` with a custom type is to derive
-/// `Eq` and `Hash`. We must also derive `PartialEq`, this will in the
-/// future be implied by `Eq`.
-///
-/// ```rust
-/// use std::collections::HashSet;
-///
-/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
-/// }
-///
-/// let mut vikings = HashSet::new();
-///
-/// vikings.insert(Viking { name: "Einar", power: 9u });
-/// vikings.insert(Viking { name: "Einar", power: 9u });
-/// vikings.insert(Viking { name: "Olaf", power: 4u });
-/// vikings.insert(Viking { name: "Harald", power: 8u });
-///
-/// // Use derived implementation to print the vikings.
-/// for x in vikings.iter() {
-/// println!("{}", x);
-/// }
-/// ```
-#[deriving(Clone)]
-pub struct HashSet<T, H = RandomSipHasher> {
- map: HashMap<T, (), H>
-}
-
-impl<T: Hash + Eq> HashSet<T, RandomSipHasher> {
- /// Create an empty HashSet.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::new();
- /// ```
- #[inline]
- pub fn new() -> HashSet<T, RandomSipHasher> {
- HashSet::with_capacity(INITIAL_CAPACITY)
- }
-
- /// Create an empty HashSet with space for at least `n` elements in
- /// the hash table.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::with_capacity(10);
- /// ```
- #[inline]
- pub fn with_capacity(capacity: uint) -> HashSet<T, RandomSipHasher> {
- HashSet { map: HashMap::with_capacity(capacity) }
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
- /// Creates a new empty hash set which will use the given hasher to hash
- /// keys.
- ///
- /// The hash set is also created with the default initial capacity.
- ///
- /// # Example
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut set = HashSet::with_hasher(h);
- /// set.insert(2u);
- /// ```
- #[inline]
- pub fn with_hasher(hasher: H) -> HashSet<T, H> {
- HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
- }
-
- /// Create an empty HashSet with space for at least `capacity`
- /// elements in the hash table, using `hasher` to hash the keys.
- ///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
- ///
- /// # Example
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut set = HashSet::with_capacity_and_hasher(10u, h);
- /// set.insert(1i);
- /// ```
- #[inline]
- pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
- HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
- }
-
- /// Reserve space for at least `n` elements in the hash table.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::new();
- /// set.reserve(10);
- /// ```
- pub fn reserve(&mut self, n: uint) {
- self.map.reserve(n)
- }
-
- /// Returns true if the hash set contains a value equivalent to the
- /// given query value.
- ///
- /// # Example
- ///
- /// This is a slightly silly example where we define the number's
- /// parity as the equivalence class. It is important that the
- /// values hash the same, which is why we implement `Hash`.
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::Hash;
- /// use std::hash::sip::SipState;
- ///
- /// #[deriving(Eq, PartialEq)]
- /// struct EvenOrOdd {
- /// num: uint
- /// };
- ///
- /// impl Hash for EvenOrOdd {
- /// fn hash(&self, state: &mut SipState) {
- /// let parity = self.num % 2;
- /// parity.hash(state);
- /// }
- /// }
- ///
- /// impl Equiv<EvenOrOdd> for EvenOrOdd {
- /// fn equiv(&self, other: &EvenOrOdd) -> bool {
- /// self.num % 2 == other.num % 2
- /// }
- /// }
- ///
- /// let mut set = HashSet::new();
- /// set.insert(EvenOrOdd { num: 3u });
- ///
- /// assert!(set.contains_equiv(&EvenOrOdd { num: 3u }));
- /// assert!(set.contains_equiv(&EvenOrOdd { num: 5u }));
- /// assert!(!set.contains_equiv(&EvenOrOdd { num: 4u }));
- /// assert!(!set.contains_equiv(&EvenOrOdd { num: 2u }));
- ///
- /// ```
- pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
- self.map.contains_key_equiv(value)
- }
-
- /// An iterator visiting all elements in arbitrary order.
- /// Iterator element type is &'a T.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set = HashSet::new();
- /// set.insert("a");
- /// set.insert("b");
- ///
- /// // Will print in an arbitrary order.
- /// for x in set.iter() {
- /// println!("{}", x);
- /// }
- /// ```
- pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
- self.map.keys()
- }
-
- /// Creates a consuming iterator, that is, one that moves each value out
- /// of the set in arbitrary order. The set cannot be used after calling
- /// this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set = HashSet::new();
- /// set.insert("a".to_string());
- /// set.insert("b".to_string());
- ///
- /// // Not possible to collect to a Vec<String> with a regular `.iter()`.
- /// let v: Vec<String> = set.move_iter().collect();
- ///
- /// // Will print in an arbitrary order.
- /// for x in v.iter() {
- /// println!("{}", x);
- /// }
- /// ```
- pub fn move_iter(self) -> SetMoveItems<T> {
- self.map.move_iter().map(|(k, _)| k)
- }
-
- /// Visit the values representing the difference.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Can be seen as `a - b`.
- /// for x in a.difference(&b) {
- /// println!("{}", x); // Print 1
- /// }
- ///
- /// let diff: HashSet<int> = a.difference(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [1i].iter().map(|&x| x).collect());
- ///
- /// // Note that difference is not symmetric,
- /// // and `b - a` means something else:
- /// let diff: HashSet<int> = b.difference(&a).map(|&x| x).collect();
- /// assert_eq!(diff, [4i].iter().map(|&x| x).collect());
- /// ```
- pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
- Repeat::new(other).zip(self.iter())
- .filter_map(|(other, elt)| {
- if !other.contains(elt) { Some(elt) } else { None }
- })
- }
-
- /// Visit the values representing the symmetric difference.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 1, 4 in arbitrary order.
- /// for x in a.symmetric_difference(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff1: HashSet<int> = a.symmetric_difference(&b).map(|&x| x).collect();
- /// let diff2: HashSet<int> = b.symmetric_difference(&a).map(|&x| x).collect();
- ///
- /// assert_eq!(diff1, diff2);
- /// assert_eq!(diff1, [1i, 4].iter().map(|&x| x).collect());
- /// ```
- pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
- -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
- self.difference(other).chain(other.difference(self))
- }
-
- /// Visit the values representing the intersection.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 2, 3 in arbitrary order.
- /// for x in a.intersection(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff: HashSet<int> = a.intersection(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [2i, 3].iter().map(|&x| x).collect());
- /// ```
- pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
- -> SetAlgebraItems<'a, T, H> {
- Repeat::new(other).zip(self.iter())
- .filter_map(|(other, elt)| {
- if other.contains(elt) { Some(elt) } else { None }
- })
- }
-
- /// Visit the values representing the union.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 1, 2, 3, 4 in arbitrary order.
- /// for x in a.union(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff: HashSet<int> = a.union(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [1i, 2, 3, 4].iter().map(|&x| x).collect());
- /// ```
- pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
- -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
- self.iter().chain(other.difference(self))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> PartialEq for HashSet<T, H> {
- fn eq(&self, other: &HashSet<T, H>) -> bool {
- if self.len() != other.len() { return false; }
-
- self.iter().all(|key| other.contains(key))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Collection for HashSet<T, H> {
- fn len(&self) -> uint { self.map.len() }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
- fn clear(&mut self) { self.map.clear() }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
- fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
-
- fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
- self.iter().all(|v| !other.contains(v))
- }
-
- fn is_subset(&self, other: &HashSet<T, H>) -> bool {
- self.iter().all(|v| other.contains(v))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
- fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
-
- fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
-}
-
-
-impl<T: Eq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- try!(write!(f, "{{"));
-
- for (i, x) in self.iter().enumerate() {
- if i != 0 { try!(write!(f, ", ")); }
- try!(write!(f, "{}", *x));
- }
-
- write!(f, "}}")
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
- fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
- let (lower, _) = iter.size_hint();
- let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
- set.extend(iter);
- set
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
- fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
- for k in iter {
- self.insert(k);
- }
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Default for HashSet<T, H> {
- fn default() -> HashSet<T, H> {
- HashSet::with_hasher(Default::default())
- }
-}
-
-// `Repeat` is used to feed the filter closure an explicit capture
-// of a reference to the other set
-/// Set operations iterator
-pub type SetAlgebraItems<'a, T, H> =
- FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
- Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
-
-#[cfg(test)]
-mod test_map {
- use prelude::*;
-
- use super::HashMap;
- use cmp::Equiv;
- use hash;
- use iter::{Iterator,range_inclusive,range_step_inclusive};
- use cell::RefCell;
-
- struct KindaIntLike(int);
-
- impl Equiv<int> for KindaIntLike {
- fn equiv(&self, other: &int) -> bool {
- let KindaIntLike(this) = *self;
- this == *other
- }
- }
- impl<S: hash::Writer> hash::Hash<S> for KindaIntLike {
- fn hash(&self, state: &mut S) {
- let KindaIntLike(this) = *self;
- this.hash(state)
- }
- }
-
- #[test]
- fn test_create_capacity_zero() {
- let mut m = HashMap::with_capacity(0);
-
- assert!(m.insert(1i, 1i));
-
- assert!(m.contains_key(&1));
- assert!(!m.contains_key(&0));
- }
-
- #[test]
- fn test_insert() {
- let mut m = HashMap::new();
- assert_eq!(m.len(), 0);
- assert!(m.insert(1i, 2i));
- assert_eq!(m.len(), 1);
- assert!(m.insert(2i, 4i));
- assert_eq!(m.len(), 2);
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&2).unwrap(), 4);
- }
-
- local_data_key!(drop_vector: RefCell<Vec<int>>)
-
- #[deriving(Hash, PartialEq, Eq)]
- struct Dropable {
- k: uint
- }
-
-
- impl Dropable {
- fn new(k: uint) -> Dropable {
- let v = drop_vector.get().unwrap();
- v.borrow_mut().as_mut_slice()[k] += 1;
-
- Dropable { k: k }
- }
- }
-
- impl Drop for Dropable {
- fn drop(&mut self) {
- let v = drop_vector.get().unwrap();
- v.borrow_mut().as_mut_slice()[self.k] -= 1;
- }
- }
-
- #[test]
- fn test_drops() {
- drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
-
- {
- let mut m = HashMap::new();
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- }
- drop(v);
-
- for i in range(0u, 100) {
- let d1 = Dropable::new(i);
- let d2 = Dropable::new(i+100);
- m.insert(d1, d2);
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 1);
- }
- drop(v);
-
- for i in range(0u, 50) {
- let k = Dropable::new(i);
- let v = m.pop(&k);
-
- assert!(v.is_some());
-
- let v = drop_vector.get().unwrap();
- assert_eq!(v.borrow().as_slice()[i], 1);
- assert_eq!(v.borrow().as_slice()[i+100], 1);
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 50) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- assert_eq!(v.borrow().as_slice()[i+100], 0);
- }
-
- for i in range(50u, 100) {
- assert_eq!(v.borrow().as_slice()[i], 1);
- assert_eq!(v.borrow().as_slice()[i+100], 1);
- }
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- }
- }
-
- #[test]
- fn test_empty_pop() {
- let mut m: HashMap<int, bool> = HashMap::new();
- assert_eq!(m.pop(&0), None);
- }
-
- #[test]
- fn test_lots_of_insertions() {
- let mut m = HashMap::new();
-
- // Try this a few times to make sure we never screw up the hashmap's
- // internal state.
- for _ in range(0i, 10) {
- assert!(m.is_empty());
-
- for i in range_inclusive(1i, 1000) {
- assert!(m.insert(i, i));
-
- for j in range_inclusive(1, i) {
- let r = m.find(&j);
- assert_eq!(r, Some(&j));
- }
-
- for j in range_inclusive(i+1, 1000) {
- let r = m.find(&j);
- assert_eq!(r, None);
- }
- }
-
- for i in range_inclusive(1001i, 2000) {
- assert!(!m.contains_key(&i));
- }
-
- // remove forwards
- for i in range_inclusive(1i, 1000) {
- assert!(m.remove(&i));
-
- for j in range_inclusive(1, i) {
- assert!(!m.contains_key(&j));
- }
-
- for j in range_inclusive(i+1, 1000) {
- assert!(m.contains_key(&j));
- }
- }
-
- for i in range_inclusive(1i, 1000) {
- assert!(!m.contains_key(&i));
- }
-
- for i in range_inclusive(1i, 1000) {
- assert!(m.insert(i, i));
- }
-
- // remove backwards
- for i in range_step_inclusive(1000i, 1, -1) {
- assert!(m.remove(&i));
-
- for j in range_inclusive(i, 1000) {
- assert!(!m.contains_key(&j));
- }
-
- for j in range_inclusive(1, i-1) {
- assert!(m.contains_key(&j));
- }
- }
- }
- }
-
- #[test]
- fn test_find_mut() {
- let mut m = HashMap::new();
- assert!(m.insert(1i, 12i));
- assert!(m.insert(2i, 8i));
- assert!(m.insert(5i, 14i));
- let new = 100;
- match m.find_mut(&5) {
- None => fail!(), Some(x) => *x = new
- }
- assert_eq!(m.find(&5), Some(&new));
- }
-
- #[test]
- fn test_insert_overwrite() {
- let mut m = HashMap::new();
- assert!(m.insert(1i, 2i));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert!(!m.insert(1i, 3i));
- assert_eq!(*m.find(&1).unwrap(), 3);
- }
-
- #[test]
- fn test_insert_conflicts() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert!(m.insert(5i, 3i));
- assert!(m.insert(9i, 4i));
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert_eq!(*m.find(&1).unwrap(), 2);
- }
-
- #[test]
- fn test_conflict_remove() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert!(m.insert(5, 3));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert!(m.insert(9, 4));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert!(m.remove(&1));
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert_eq!(*m.find(&5).unwrap(), 3);
- }
-
- #[test]
- fn test_is_empty() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert!(!m.is_empty());
- assert!(m.remove(&1));
- assert!(m.is_empty());
- }
-
- #[test]
- fn test_pop() {
- let mut m = HashMap::new();
- m.insert(1i, 2i);
- assert_eq!(m.pop(&1), Some(2));
- assert_eq!(m.pop(&1), None);
- }
-
- #[test]
- #[allow(experimental)]
- fn test_pop_equiv() {
- let mut m = HashMap::new();
- m.insert(1i, 2i);
- assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
- assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
- }
-
- #[test]
- fn test_swap() {
- let mut m = HashMap::new();
- assert_eq!(m.swap(1i, 2i), None);
- assert_eq!(m.swap(1i, 3i), Some(2));
- assert_eq!(m.swap(1i, 4i), Some(3));
- }
-
- #[test]
- fn test_move_iter() {
- let hm = {
- let mut hm = HashMap::new();
-
- hm.insert('a', 1i);
- hm.insert('b', 2i);
-
- hm
- };
-
- let v = hm.move_iter().collect::<Vec<(char, int)>>();
- assert!([('a', 1), ('b', 2)] == v.as_slice() || [('b', 2), ('a', 1)] == v.as_slice());
- }
-
- #[test]
- fn test_iterate() {
- let mut m = HashMap::with_capacity(4);
- for i in range(0u, 32) {
- assert!(m.insert(i, i*2));
- }
- assert_eq!(m.len(), 32);
-
- let mut observed: u32 = 0;
-
- for (k, v) in m.iter() {
- assert_eq!(*v, *k * 2);
- observed |= 1 << *k;
- }
- assert_eq!(observed, 0xFFFF_FFFF);
- }
-
- #[test]
- fn test_keys() {
- let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
- let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
- assert_eq!(keys.len(), 3);
- assert!(keys.contains(&1));
- assert!(keys.contains(&2));
- assert!(keys.contains(&3));
- }
-
- #[test]
- fn test_values() {
- let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
- let values = map.values().map(|&v| v).collect::<Vec<char>>();
- assert_eq!(values.len(), 3);
- assert!(values.contains(&'a'));
- assert!(values.contains(&'b'));
- assert!(values.contains(&'c'));
- }
-
- #[test]
- fn test_find() {
- let mut m = HashMap::new();
- assert!(m.find(&1i).is_none());
- m.insert(1i, 2i);
- match m.find(&1) {
- None => fail!(),
- Some(v) => assert_eq!(*v, 2)
- }
- }
-
- #[test]
- fn test_eq() {
- let mut m1 = HashMap::new();
- m1.insert(1i, 2i);
- m1.insert(2i, 3i);
- m1.insert(3i, 4i);
-
- let mut m2 = HashMap::new();
- m2.insert(1i, 2i);
- m2.insert(2i, 3i);
-
- assert!(m1 != m2);
-
- m2.insert(3i, 4i);
-
- assert_eq!(m1, m2);
- }
-
- #[test]
- fn test_show() {
- let mut map: HashMap<int, int> = HashMap::new();
- let empty: HashMap<int, int> = HashMap::new();
-
- map.insert(1i, 2i);
- map.insert(3i, 4i);
-
- let map_str = format!("{}", map);
-
- assert!(map_str == "{1: 2, 3: 4}".to_string() || map_str == "{3: 4, 1: 2}".to_string());
- assert_eq!(format!("{}", empty), "{}".to_string());
- }
-
- #[test]
- fn test_expand() {
- let mut m = HashMap::new();
-
- assert_eq!(m.len(), 0);
- assert!(m.is_empty());
-
- let mut i = 0u;
- let old_cap = m.table.capacity();
- while old_cap == m.table.capacity() {
- m.insert(i, i);
- i += 1;
- }
-
- assert_eq!(m.len(), i);
- assert!(!m.is_empty());
- }
-
- #[test]
- fn test_resize_policy() {
- let mut m = HashMap::new();
-
- assert_eq!(m.len(), 0);
- assert!(m.is_empty());
-
- let initial_cap = m.table.capacity();
- m.reserve(initial_cap * 2);
- let cap = m.table.capacity();
-
- assert_eq!(cap, initial_cap * 2);
-
- let mut i = 0u;
- for _ in range(0, cap * 3 / 4) {
- m.insert(i, i);
- i += 1;
- }
-
- assert_eq!(m.len(), i);
- assert_eq!(m.table.capacity(), cap);
-
- for _ in range(0, cap / 4) {
- m.insert(i, i);
- i += 1;
- }
-
- let new_cap = m.table.capacity();
- assert_eq!(new_cap, cap * 2);
-
- for _ in range(0, cap / 2) {
- i -= 1;
- m.remove(&i);
- assert_eq!(m.table.capacity(), new_cap);
- }
-
- for _ in range(0, cap / 2 - 1) {
- i -= 1;
- m.remove(&i);
- }
-
- assert_eq!(m.table.capacity(), cap);
- assert_eq!(m.len(), i);
- assert!(!m.is_empty());
- }
-
- #[test]
- fn test_find_equiv() {
- let mut m = HashMap::new();
-
- let (foo, bar, baz) = (1i,2i,3i);
- m.insert("foo".to_string(), foo);
- m.insert("bar".to_string(), bar);
- m.insert("baz".to_string(), baz);
-
-
- assert_eq!(m.find_equiv(&("foo")), Some(&foo));
- assert_eq!(m.find_equiv(&("bar")), Some(&bar));
- assert_eq!(m.find_equiv(&("baz")), Some(&baz));
-
- assert_eq!(m.find_equiv(&("qux")), None);
- }
-
- #[test]
- fn test_from_iter() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- for &(k, v) in xs.iter() {
- assert_eq!(map.find(&k), Some(&v));
- }
- }
-
- #[test]
- fn test_size_hint() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- let mut iter = map.iter();
-
- for _ in iter.by_ref().take(3) {}
-
- assert_eq!(iter.size_hint(), (3, Some(3)));
- }
-
- #[test]
- fn test_mut_size_hint() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- let mut iter = map.mut_iter();
-
- for _ in iter.by_ref().take(3) {}
-
- assert_eq!(iter.size_hint(), (3, Some(3)));
- }
-
- #[test]
- fn test_index() {
- let mut map: HashMap<int, int> = HashMap::new();
-
- map.insert(1, 2);
- map.insert(2, 1);
- map.insert(3, 4);
-
- assert_eq!(map[2], 1);
- }
-
- #[test]
- #[should_fail]
- fn test_index_nonexistent() {
- let mut map: HashMap<int, int> = HashMap::new();
-
- map.insert(1, 2);
- map.insert(2, 1);
- map.insert(3, 4);
-
- map[4];
- }
-}
-
-#[cfg(test)]
-mod test_set {
- use prelude::*;
-
- use super::HashSet;
- use slice::ImmutablePartialEqSlice;
- use collections::Collection;
-
- #[test]
- fn test_disjoint() {
- let mut xs = HashSet::new();
- let mut ys = HashSet::new();
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(xs.insert(5i));
- assert!(ys.insert(11i));
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(xs.insert(7));
- assert!(xs.insert(19));
- assert!(xs.insert(4));
- assert!(ys.insert(2));
- assert!(ys.insert(-11));
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(ys.insert(7));
- assert!(!xs.is_disjoint(&ys));
- assert!(!ys.is_disjoint(&xs));
- }
-
- #[test]
- fn test_subset_and_superset() {
- let mut a = HashSet::new();
- assert!(a.insert(0i));
- assert!(a.insert(5));
- assert!(a.insert(11));
- assert!(a.insert(7));
-
- let mut b = HashSet::new();
- assert!(b.insert(0i));
- assert!(b.insert(7));
- assert!(b.insert(19));
- assert!(b.insert(250));
- assert!(b.insert(11));
- assert!(b.insert(200));
-
- assert!(!a.is_subset(&b));
- assert!(!a.is_superset(&b));
- assert!(!b.is_subset(&a));
- assert!(!b.is_superset(&a));
-
- assert!(b.insert(5));
-
- assert!(a.is_subset(&b));
- assert!(!a.is_superset(&b));
- assert!(!b.is_subset(&a));
- assert!(b.is_superset(&a));
- }
-
- #[test]
- fn test_iterate() {
- let mut a = HashSet::new();
- for i in range(0u, 32) {
- assert!(a.insert(i));
- }
- let mut observed: u32 = 0;
- for k in a.iter() {
- observed |= 1 << *k;
- }
- assert_eq!(observed, 0xFFFF_FFFF);
- }
-
- #[test]
- fn test_intersection() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(11i));
- assert!(a.insert(1));
- assert!(a.insert(3));
- assert!(a.insert(77));
- assert!(a.insert(103));
- assert!(a.insert(5));
- assert!(a.insert(-5));
-
- assert!(b.insert(2i));
- assert!(b.insert(11));
- assert!(b.insert(77));
- assert!(b.insert(-9));
- assert!(b.insert(-42));
- assert!(b.insert(5));
- assert!(b.insert(3));
-
- let mut i = 0;
- let expected = [3, 5, 11, 77];
- for x in a.intersection(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_difference() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
-
- assert!(b.insert(3i));
- assert!(b.insert(9));
-
- let mut i = 0;
- let expected = [1, 5, 11];
- for x in a.difference(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_symmetric_difference() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
-
- assert!(b.insert(-2i));
- assert!(b.insert(3));
- assert!(b.insert(9));
- assert!(b.insert(14));
- assert!(b.insert(22));
-
- let mut i = 0;
- let expected = [-2, 1, 5, 11, 14, 22];
- for x in a.symmetric_difference(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_union() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
- assert!(a.insert(16));
- assert!(a.insert(19));
- assert!(a.insert(24));
-
- assert!(b.insert(-2i));
- assert!(b.insert(1));
- assert!(b.insert(5));
- assert!(b.insert(9));
- assert!(b.insert(13));
- assert!(b.insert(19));
-
- let mut i = 0;
- let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
- for x in a.union(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_from_iter() {
- let xs = [1i, 2, 3, 4, 5, 6, 7, 8, 9];
-
- let set: HashSet<int> = xs.iter().map(|&x| x).collect();
-
- for x in xs.iter() {
- assert!(set.contains(x));
- }
- }
-
- #[test]
- fn test_move_iter() {
- let hs = {
- let mut hs = HashSet::new();
-
- hs.insert('a');
- hs.insert('b');
-
- hs
- };
-
- let v = hs.move_iter().collect::<Vec<char>>();
- assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
- }
-
- #[test]
- fn test_eq() {
- // These constants once happened to expose a bug in insert().
- // I'm keeping them around to prevent a regression.
- let mut s1 = HashSet::new();
-
- s1.insert(1i);
- s1.insert(2);
- s1.insert(3);
-
- let mut s2 = HashSet::new();
-
- s2.insert(1i);
- s2.insert(2);
-
- assert!(s1 != s2);
-
- s2.insert(3);
-
- assert_eq!(s1, s2);
- }
-
- #[test]
- fn test_show() {
- let mut set: HashSet<int> = HashSet::new();
- let empty: HashSet<int> = HashSet::new();
-
- set.insert(1i);
- set.insert(2);
-
- let set_str = format!("{}", set);
-
- assert!(set_str == "{1, 2}".to_string() || set_str == "{2, 1}".to_string());
- assert_eq!(format!("{}", empty), "{}".to_string());
- }
-}
-
-#[cfg(test)]
-mod bench {
- extern crate test;
- use prelude::*;
-
- use self::test::Bencher;
- use iter::{range_inclusive};
-
- #[bench]
- fn new_drop(b : &mut Bencher) {
- use super::HashMap;
-
- b.iter(|| {
- let m : HashMap<int, int> = HashMap::new();
- assert_eq!(m.len(), 0);
- })
- }
-
- #[bench]
- fn new_insert_drop(b : &mut Bencher) {
- use super::HashMap;
-
- b.iter(|| {
- let mut m = HashMap::new();
- m.insert(0i, 0i);
- assert_eq!(m.len(), 1);
- })
- }
-
- #[bench]
- fn insert(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1001;
-
- b.iter(|| {
- m.insert(k, k);
- k += 1;
- });
- }
-
- #[bench]
- fn find_existing(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- b.iter(|| {
- for i in range_inclusive(1i, 1000) {
- m.contains_key(&i);
- }
- });
- }
-
- #[bench]
- fn find_nonexisting(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- b.iter(|| {
- for i in range_inclusive(1001i, 2000) {
- m.contains_key(&i);
- }
- });
- }
-
- #[bench]
- fn hashmap_as_queue(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1i;
-
- b.iter(|| {
- m.pop(&k);
- m.insert(k + 1000, k + 1000);
- k += 1;
- });
- }
-
- #[bench]
- fn find_pop_insert(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1i;
-
- b.iter(|| {
- m.find(&(k + 400));
- m.find(&(k + 2000));
- m.pop(&k);
- m.insert(k + 1000, k + 1000);
- k += 1;
- })
- }
-}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![cfg(test)]
+
+extern crate test;
+use prelude::*;
+
+use self::test::Bencher;
+use iter::{range_inclusive};
+
+#[bench]
+fn new_drop(b : &mut Bencher) {
+ use super::HashMap;
+
+ b.iter(|| {
+ let m : HashMap<int, int> = HashMap::new();
+ assert_eq!(m.len(), 0);
+ })
+}
+
+#[bench]
+fn new_insert_drop(b : &mut Bencher) {
+ use super::HashMap;
+
+ b.iter(|| {
+ let mut m = HashMap::new();
+ m.insert(0i, 0i);
+ assert_eq!(m.len(), 1);
+ })
+}
+
+#[bench]
+fn grow_by_insertion(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1001;
+
+ b.iter(|| {
+ m.insert(k, k);
+ k += 1;
+ });
+}
+
+#[bench]
+fn find_existing(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in range_inclusive(1i, 1000) {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn find_nonexisting(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in range_inclusive(1001i, 2000) {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn hashmap_as_queue(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1i;
+
+ b.iter(|| {
+ m.pop(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ });
+}
+
+#[bench]
+fn find_pop_insert(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1i;
+
+ b.iter(|| {
+ m.find(&(k + 400));
+ m.find(&(k + 2000));
+ m.pop(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ })
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp::{max, Eq, Equiv, PartialEq};
+use collections::{Collection, Mutable, MutableSet, Map, MutableMap};
+use default::Default;
+use fmt::Show;
+use fmt;
+use hash::{Hash, Hasher, RandomSipHasher};
+use iter::{Iterator, FromIterator, Extendable};
+use iter;
+use mem::replace;
+use num;
+use ops::{Deref, DerefMut};
+use option::{Some, None, Option};
+use result::{Ok, Err};
+use ops::Index;
+
+use super::table;
+use super::table::{
+ Bucket,
+ Empty,
+ Full,
+ FullBucket,
+ FullBucketImm,
+ FullBucketMut,
+ RawTable,
+ SafeHash
+};
+
+static INITIAL_LOG2_CAP: uint = 5;
+pub static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
+
+/// The default behavior of HashMap implements a load factor of 90.9%.
+/// This behavior is characterized by the following conditions:
+///
+/// - if size > 0.909 * capacity: grow
+/// - if size < 0.25 * capacity: shrink (if this won't bring capacity lower
+/// than the minimum)
+#[deriving(Clone)]
+struct DefaultResizePolicy {
+ /// Doubled minimal capacity. The capacity must never drop below
+ /// the minimum capacity. (The check happens before the capacity
+ /// is potentially halved.)
+ minimum_capacity2: uint
+}
+
+impl DefaultResizePolicy {
+ fn new(new_capacity: uint) -> DefaultResizePolicy {
+ DefaultResizePolicy {
+ minimum_capacity2: new_capacity << 1
+ }
+ }
+
+ #[inline]
+ fn capacity_range(&self, new_size: uint) -> (uint, uint) {
+ // Here, we are rephrasing the logic by specifying the ranges:
+ //
+ // - if `size * 1.1 < cap < size * 4`: don't resize
+ // - if `cap < minimum_capacity * 2`: don't shrink
+ // - otherwise, resize accordingly
+ ((new_size * 11) / 10, max(new_size << 2, self.minimum_capacity2))
+ }
+
+ #[inline]
+ fn reserve(&mut self, new_capacity: uint) {
+ self.minimum_capacity2 = new_capacity << 1;
+ }
+}
+
+// The main performance trick in this hashmap is called Robin Hood Hashing.
+// It gains its excellent performance from one essential operation:
+//
+// If an insertion collides with an existing element, and that element's
+// "probe distance" (how far away the element is from its ideal location)
+// is higher than how far we've already probed, swap the elements.
+//
+// This massively lowers variance in probe distance, and allows us to get very
+// high load factors with good performance. The 90% load factor I use is rather
+// conservative.
+//
+// > Why a load factor of approximately 90%?
+//
+// In general, all the distances to initial buckets will converge on the mean.
+// At a load factor of α, the odds of finding the target bucket after k
+// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
+// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
+// this down to make the math easier on the CPU and avoid its FPU.
+// Since on average we start the probing in the middle of a cache line, this
+// strategy pulls in two cache lines of hashes on every lookup. I think that's
+// pretty good, but if you want to trade off some space, it could go down to one
+// cache line on average with an α of 0.84.
+//
+// > Wait, what? Where did you get 1-α^k from?
+//
+// On the first probe, your odds of a collision with an existing element is α.
+// The odds of doing this twice in a row is approximately α^2. For three times,
+// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
+// colliding after k tries is 1-α^k.
+//
+// The paper from 1986 cited below mentions an implementation which keeps track
+// of the distance-to-initial-bucket histogram. This approach is not suitable
+// for modern architectures because it requires maintaining an internal data
+// structure. This allows very good first guesses, but we are most concerned
+// with guessing entire cache lines, not individual indexes. Furthermore, array
+// accesses are no longer linear and in one direction, as we have now. There
+// is also memory and cache pressure that this would entail that would be very
+// difficult to properly see in a microbenchmark.
+//
+// Future Improvements (FIXME!)
+// ============================
+//
+// Allow the load factor to be changed dynamically and/or at initialization.
+//
+// Also, would it be possible for us to reuse storage when growing the
+// underlying table? This is exactly the use case for 'realloc', and may
+// be worth exploring.
+//
+// Future Optimizations (FIXME!)
+// =============================
+//
+// Another possible design choice that I made without any real reason is
+// parameterizing the raw table over keys and values. Technically, all we need
+// is the size and alignment of keys and values, and the code should be just as
+// efficient (well, we might need one for power-of-two size and one for not...).
+// This has the potential to reduce code bloat in rust executables, without
+// really losing anything except 4 words (key size, key alignment, val size,
+// val alignment) which can be passed in to every call of a `RawTable` function.
+// This would definitely be an avenue worth exploring if people start complaining
+// about the size of rust executables.
+//
+// Annotate exceedingly likely branches in `table::make_hash`
+// and `search_hashed_generic` to reduce instruction cache pressure
+// and mispredictions once it becomes possible (blocked on issue #11092).
+//
+// Shrinking the table could simply reallocate in place after moving buckets
+// to the first half.
+//
+// The growth algorithm (fragment of the Proof of Correctness)
+// --------------------
+//
+// The growth algorithm is basically a fast path of the naive reinsertion-
+// during-resize algorithm. Other paths should never be taken.
+//
+// Consider growing a robin hood hashtable of capacity n. Normally, we do this
+// by allocating a new table of capacity `2n`, and then individually reinsert
+// each element in the old table into the new one. This guarantees that the
+// new table is a valid robin hood hashtable with all the desired statistical
+// properties. Remark that the order we reinsert the elements in should not
+// matter. For simplicity and efficiency, we will consider only linear
+// reinsertions, which consist of reinserting all elements in the old table
+// into the new one by increasing order of index. However we will not be
+// starting our reinsertions from index 0 in general. If we start from index
+// i, for the purpose of reinsertion we will consider all elements with real
+// index j < i to have virtual index n + j.
+//
+// Our hash generation scheme consists of generating a 64-bit hash and
+// truncating the most significant bits. When moving to the new table, we
+// simply introduce a new bit to the front of the hash. Therefore, if an
+// elements has ideal index i in the old table, it can have one of two ideal
+// locations in the new table. If the new bit is 0, then the new ideal index
+// is i. If the new bit is 1, then the new ideal index is n + i. Intutively,
+// we are producing two independent tables of size n, and for each element we
+// independently choose which table to insert it into with equal probability.
+// However the rather than wrapping around themselves on overflowing their
+// indexes, the first table overflows into the first, and the first into the
+// second. Visually, our new table will look something like:
+//
+// [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy]
+//
+// Where x's are elements inserted into the first table, y's are elements
+// inserted into the second, and _'s are empty sections. We now define a few
+// key concepts that we will use later. Note that this is a very abstract
+// perspective of the table. A real resized table would be at least half
+// empty.
+//
+// Theorem: A linear robin hood reinsertion from the first ideal element
+// produces identical results to a linear naive reinsertion from the same
+// element.
+//
+// FIXME(Gankro, pczarn): review the proof and put it all in a separate doc.rs
+
+/// A hash map implementation which uses linear probing with Robin
+/// Hood bucket stealing.
+///
+/// The hashes are all keyed by the task-local random number generator
+/// on creation by default. This means that the ordering of the keys is
+/// randomized, but makes the tables more resistant to
+/// denial-of-service attacks (Hash DoS). This behaviour can be
+/// overridden with one of the constructors.
+///
+/// It is required that the keys implement the `Eq` and `Hash` traits, although
+/// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
+///
+/// Relevant papers/articles:
+///
+/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
+/// 2. Emmanuel Goossaert. ["Robin Hood
+/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
+/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
+/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `HashMap<&str, &str>` in this example).
+/// let mut book_reviews = HashMap::new();
+///
+/// // review some books.
+/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
+/// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
+/// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
+/// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
+///
+/// // check for a specific one.
+/// if !book_reviews.contains_key(&("Les Misérables")) {
+/// println!("We've got {} reviews, but Les Misérables ain't one.",
+/// book_reviews.len());
+/// }
+///
+/// // oops, this review has a lot of spelling mistakes, let's delete it.
+/// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
+///
+/// // look up the values associated with some keys.
+/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
+/// for book in to_find.iter() {
+/// match book_reviews.find(book) {
+/// Some(review) => println!("{}: {}", *book, *review),
+/// None => println!("{} is unreviewed.", *book)
+/// }
+/// }
+///
+/// // iterate over everything.
+/// for (book, review) in book_reviews.iter() {
+/// println!("{}: \"{}\"", *book, *review);
+/// }
+/// ```
+///
+/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
+/// We must also derive `PartialEq`.
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// #[deriving(Hash, Eq, PartialEq, Show)]
+/// struct Viking<'a> {
+/// name: &'a str,
+/// power: uint,
+/// }
+///
+/// let mut vikings = HashMap::new();
+///
+/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
+/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
+/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
+///
+/// // Use derived implementation to print the vikings.
+/// for (land, viking) in vikings.iter() {
+/// println!("{} at {}", viking, land);
+/// }
+/// ```
+#[deriving(Clone)]
+pub struct HashMap<K, V, H = RandomSipHasher> {
+ // All hashes are keyed on these values, to prevent hash collision attacks.
+ hasher: H,
+
+ table: RawTable<K, V>,
+
+ // We keep this at the end since it might as well have tail padding.
+ resize_policy: DefaultResizePolicy,
+}
+
+/// Search for a pre-hashed key.
+fn search_hashed_generic<K, V, M: Deref<RawTable<K, V>>>(table: M,
+ hash: &SafeHash,
+ is_match: |&K| -> bool)
+ -> SearchResult<K, V, M> {
+ let size = table.size();
+ let mut probe = Bucket::new(table, hash);
+ let ib = probe.index();
+
+ while probe.index() != ib + size {
+ let full = match probe.peek() {
+ Empty(b) => return TableRef(b.into_table()), // hit an empty bucket
+ Full(b) => b
+ };
+
+ if full.distance() + ib < full.index() {
+ // We can finish the search early if we hit any bucket
+ // with a lower distance to initial bucket than we've probed.
+ return TableRef(full.into_table());
+ }
+
+ // If the hash doesn't match, it can't be this one..
+ if *hash == full.hash() {
+ let matched = {
+ let (k, _) = full.read();
+ is_match(k)
+ };
+
+ // If the key doesn't match, it can't be this one..
+ if matched {
+ return FoundExisting(full);
+ }
+ }
+
+ probe = full.next();
+ }
+
+ TableRef(probe.into_table())
+}
+
+fn search_hashed<K: Eq, V, M: Deref<RawTable<K, V>>>(table: M, hash: &SafeHash, k: &K)
+ -> SearchResult<K, V, M> {
+ search_hashed_generic(table, hash, |k_| *k == *k_)
+}
+
+fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> V {
+ let (empty, _k, retval) = starting_bucket.take();
+ let mut gap = match empty.gap_peek() {
+ Some(b) => b,
+ None => return retval
+ };
+
+ while gap.full().distance() != 0 {
+ gap = match gap.shift() {
+ Some(b) => b,
+ None => break
+ };
+ }
+
+ // Now we've done all our shifting. Return the value we grabbed earlier.
+ return retval;
+}
+
+/// Perform robin hood bucket stealing at the given `bucket`. You must
+/// also pass the position of that bucket's initial bucket so we don't have
+/// to recalculate it.
+///
+/// `hash`, `k`, and `v` are the elements to "robin hood" into the hashtable.
+fn robin_hood<'a, K: 'a, V: 'a>(mut bucket: FullBucketMut<'a, K, V>,
+ mut ib: uint,
+ mut hash: SafeHash,
+ mut k: K,
+ mut v: V)
+ -> &'a mut V {
+ let starting_index = bucket.index();
+ let size = {
+ let table = bucket.table(); // FIXME "lifetime too short".
+ table.size()
+ };
+ // There can be at most `size - dib` buckets to displace, because
+ // in the worst case, there are `size` elements and we already are
+ // `distance` buckets away from the initial one.
+ let idx_end = starting_index + size - bucket.distance();
+
+ loop {
+ let (old_hash, old_key, old_val) = bucket.replace(hash, k, v);
+ loop {
+ let probe = bucket.next();
+ assert!(probe.index() != idx_end);
+
+ let full_bucket = match probe.peek() {
+ table::Empty(bucket) => {
+ // Found a hole!
+ let b = bucket.put(old_hash, old_key, old_val);
+ // Now that it's stolen, just read the value's pointer
+ // right out of the table!
+ let (_, v) = Bucket::at_index(b.into_table(), starting_index).peek()
+ .expect_full()
+ .into_mut_refs();
+ return v;
+ },
+ table::Full(bucket) => bucket
+ };
+
+ let probe_ib = full_bucket.index() - full_bucket.distance();
+
+ bucket = full_bucket;
+
+ // Robin hood! Steal the spot.
+ if ib < probe_ib {
+ ib = probe_ib;
+ hash = old_hash;
+ k = old_key;
+ v = old_val;
+ break;
+ }
+ }
+ }
+}
+
+/// A result that works like Option<FullBucket<..>> but preserves
+/// the reference that grants us access to the table in any case.
+enum SearchResult<K, V, M> {
+ // This is an entry that holds the given key:
+ FoundExisting(FullBucket<K, V, M>),
+
+ // There was no such entry. The reference is given back:
+ TableRef(M)
+}
+
+impl<K, V, M> SearchResult<K, V, M> {
+ fn into_option(self) -> Option<FullBucket<K, V, M>> {
+ match self {
+ FoundExisting(bucket) => Some(bucket),
+ TableRef(_) => None
+ }
+ }
+}
+
+/// A newtyped mutable reference to the hashmap that allows e.g. Deref to be
+/// implemented without making changes to the visible interface of HashMap.
+/// Used internally because it's accepted by the search functions above.
+struct MapMutRef<'a, K: 'a, V: 'a, H: 'a> {
+ map_ref: &'a mut HashMap<K, V, H>
+}
+
+impl<'a, K, V, H> Deref<RawTable<K, V>> for MapMutRef<'a, K, V, H> {
+ fn deref(&self) -> &RawTable<K, V> {
+ &self.map_ref.table
+ }
+}
+
+impl<'a, K, V, H> DerefMut<RawTable<K, V>> for MapMutRef<'a, K, V, H> {
+ fn deref_mut(&mut self) -> &mut RawTable<K, V> {
+ &mut self.map_ref.table
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
+ fn make_hash<X: Hash<S>>(&self, x: &X) -> SafeHash {
+ table::make_hash(&self.hasher, x)
+ }
+
+ fn search_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, q: &Q)
+ -> Option<FullBucketImm<'a, K, V>> {
+ let hash = self.make_hash(q);
+ search_hashed_generic(&self.table, &hash, |k| q.equiv(k)).into_option()
+ }
+
+ fn search_equiv_mut<'a, Q: Hash<S> + Equiv<K>>(&'a mut self, q: &Q)
+ -> Option<FullBucketMut<'a, K, V>> {
+ let hash = self.make_hash(q);
+ search_hashed_generic(&mut self.table, &hash, |k| q.equiv(k)).into_option()
+ }
+
+ /// Search for a key, yielding the index if it's found in the hashtable.
+ /// If you already have the hash for the key lying around, use
+ /// search_hashed.
+ fn search<'a>(&'a self, k: &K) -> Option<FullBucketImm<'a, K, V>> {
+ let hash = self.make_hash(k);
+ search_hashed(&self.table, &hash, k).into_option()
+ }
+
+ fn search_mut<'a>(&'a mut self, k: &K) -> Option<FullBucketMut<'a, K, V>> {
+ let hash = self.make_hash(k);
+ search_hashed(&mut self.table, &hash, k).into_option()
+ }
+
+ // The caller should ensure that invariants by Robin Hood Hashing hold.
+ fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
+ let cap = self.table.capacity();
+ let mut buckets = Bucket::new(&mut self.table, &hash);
+ let ib = buckets.index();
+
+ while buckets.index() != ib + cap {
+ // We don't need to compare hashes for value swap.
+ // Not even DIBs for Robin Hood.
+ buckets = match buckets.peek() {
+ Empty(empty) => {
+ empty.put(hash, k, v);
+ return;
+ }
+ Full(b) => b.into_bucket()
+ };
+ buckets.next();
+ }
+ fail!("Internal HashMap error: Out of space.");
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
+ /// Return the number of elements in the map.
+ fn len(&self) -> uint { self.table.size() }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
+ /// Clear the map, removing all key-value pairs. Keeps the allocated memory
+ /// for reuse.
+ fn clear(&mut self) {
+ // Prevent reallocations from happening from now on. Makes it possible
+ // for the map to be reused but has a downside: reserves permanently.
+ self.resize_policy.reserve(self.table.size());
+
+ let cap = self.table.capacity();
+ let mut buckets = Bucket::first(&mut self.table);
+
+ while buckets.index() != cap {
+ buckets = match buckets.peek() {
+ Empty(b) => b.next(),
+ Full(full) => {
+ let (b, _, _) = full.take();
+ b.next()
+ }
+ };
+ }
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
+ fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
+ self.search(k).map(|bucket| {
+ let (_, v) = bucket.into_refs();
+ v
+ })
+ }
+
+ fn contains_key(&self, k: &K) -> bool {
+ self.search(k).is_some()
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
+ fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
+ match self.search_mut(k) {
+ Some(bucket) => {
+ let (_, v) = bucket.into_mut_refs();
+ Some(v)
+ }
+ _ => None
+ }
+ }
+
+ fn swap(&mut self, k: K, v: V) -> Option<V> {
+ let hash = self.make_hash(&k);
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+
+ let mut retval = None;
+ self.insert_or_replace_with(hash, k, v, |_, val_ref, val| {
+ retval = Some(replace(val_ref, val));
+ });
+ retval
+ }
+
+
+ fn pop(&mut self, k: &K) -> Option<V> {
+ if self.table.size() == 0 {
+ return None
+ }
+
+ let potential_new_size = self.table.size() - 1;
+ self.make_some_room(potential_new_size);
+
+ self.search_mut(k).map(|bucket| {
+ pop_internal(bucket)
+ })
+ }
+}
+
+impl<K: Hash + Eq, V> HashMap<K, V, RandomSipHasher> {
+ /// Create an empty HashMap.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn new() -> HashMap<K, V, RandomSipHasher> {
+ let hasher = RandomSipHasher::new();
+ HashMap::with_hasher(hasher)
+ }
+
+ /// Creates an empty hash map with the given initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomSipHasher> {
+ let hasher = RandomSipHasher::new();
+ HashMap::with_capacity_and_hasher(capacity, hasher)
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
+ /// Creates an empty hashmap which will use the given hasher to hash keys.
+ ///
+ /// The creates map has the default initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut map = HashMap::with_hasher(h);
+ /// map.insert(1i, 2u);
+ /// ```
+ #[inline]
+ pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
+ HashMap {
+ hasher: hasher,
+ resize_policy: DefaultResizePolicy::new(INITIAL_CAPACITY),
+ table: RawTable::new(0),
+ }
+ }
+
+ /// Create an empty HashMap with space for at least `capacity`
+ /// elements, using `hasher` to hash the keys.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow HashMaps to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut map = HashMap::with_capacity_and_hasher(10, h);
+ /// map.insert(1i, 2u);
+ /// ```
+ #[inline]
+ pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
+ let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
+ HashMap {
+ hasher: hasher,
+ resize_policy: DefaultResizePolicy::new(cap),
+ table: RawTable::new(cap),
+ }
+ }
+
+ /// The hashtable will never try to shrink below this size. You can use
+ /// this function to reduce reallocations if your hashtable frequently
+ /// grows and shrinks by large amounts.
+ ///
+ /// This function has no effect on the operational semantics of the
+ /// hashtable, only on performance.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::new();
+ /// map.reserve(10);
+ /// ```
+ pub fn reserve(&mut self, new_minimum_capacity: uint) {
+ let cap = num::next_power_of_two(
+ max(INITIAL_CAPACITY, new_minimum_capacity));
+
+ self.resize_policy.reserve(cap);
+
+ if self.table.capacity() < cap {
+ self.resize(cap);
+ }
+ }
+
+ /// Resizes the internal vectors to a new capacity. It's your responsibility to:
+ /// 1) Make sure the new capacity is enough for all the elements, accounting
+ /// for the load factor.
+ /// 2) Ensure new_capacity is a power of two.
+ fn resize(&mut self, new_capacity: uint) {
+ assert!(self.table.size() <= new_capacity);
+ assert!(num::is_power_of_two(new_capacity));
+
+ let mut old_table = replace(&mut self.table, RawTable::new(new_capacity));
+ let old_size = old_table.size();
+
+ if old_table.capacity() == 0 || old_table.size() == 0 {
+ return;
+ }
+
+ if new_capacity < old_table.capacity() {
+ // Shrink the table. Naive algorithm for resizing:
+ for (h, k, v) in old_table.move_iter() {
+ self.insert_hashed_nocheck(h, k, v);
+ }
+ } else {
+ // Grow the table.
+ // Specialization of the other branch.
+ let mut bucket = Bucket::first(&mut old_table);
+
+ // "So a few of the first shall be last: for many be called,
+ // but few chosen."
+ //
+ // We'll most likely encounter a few buckets at the beginning that
+ // have their initial buckets near the end of the table. They were
+ // placed at the beginning as the probe wrapped around the table
+ // during insertion. We must skip forward to a bucket that won't
+ // get reinserted too early and won't unfairly steal others spot.
+ // This eliminates the need for robin hood.
+ loop {
+ bucket = match bucket.peek() {
+ Full(full) => {
+ if full.distance() == 0 {
+ // This bucket occupies its ideal spot.
+ // It indicates the start of another "cluster".
+ bucket = full.into_bucket();
+ break;
+ }
+ // Leaving this bucket in the last cluster for later.
+ full.into_bucket()
+ }
+ Empty(b) => {
+ // Encountered a hole between clusters.
+ b.into_bucket()
+ }
+ };
+ bucket.next();
+ }
+
+ // This is how the buckets might be laid out in memory:
+ // ($ marks an initialized bucket)
+ // ________________
+ // |$$$_$$$$$$_$$$$$|
+ //
+ // But we've skipped the entire initial cluster of buckets
+ // and will continue iteration in this order:
+ // ________________
+ // |$$$$$$_$$$$$
+ // ^ wrap around once end is reached
+ // ________________
+ // $$$_____________|
+ // ^ exit once table.size == 0
+ loop {
+ bucket = match bucket.peek() {
+ Full(bucket) => {
+ let h = bucket.hash();
+ let (b, k, v) = bucket.take();
+ self.insert_hashed_ordered(h, k, v);
+ {
+ let t = b.table(); // FIXME "lifetime too short".
+ if t.size() == 0 { break }
+ };
+ b.into_bucket()
+ }
+ Empty(b) => b.into_bucket()
+ };
+ bucket.next();
+ }
+ }
+
+ assert_eq!(self.table.size(), old_size);
+ }
+
+ /// Performs any necessary resize operations, such that there's space for
+ /// new_size elements.
+ fn make_some_room(&mut self, new_size: uint) {
+ let (grow_at, shrink_at) = self.resize_policy.capacity_range(new_size);
+ let cap = self.table.capacity();
+
+ // An invalid value shouldn't make us run out of space.
+ debug_assert!(grow_at >= new_size);
+
+ if cap <= grow_at {
+ let new_capacity = max(cap << 1, INITIAL_CAPACITY);
+ self.resize(new_capacity);
+ } else if shrink_at <= cap {
+ let new_capacity = cap >> 1;
+ self.resize(new_capacity);
+ }
+ }
+
+ /// Insert a pre-hashed key-value pair, without first checking
+ /// that there's enough room in the buckets. Returns a reference to the
+ /// newly insert value.
+ ///
+ /// If the key already exists, the hashtable will be returned untouched
+ /// and a reference to the existing element will be returned.
+ fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
+ self.insert_or_replace_with(hash, k, v, |_, _, _| ())
+ }
+
+ fn insert_or_replace_with<'a>(&'a mut self,
+ hash: SafeHash,
+ k: K,
+ v: V,
+ found_existing: |&mut K, &mut V, V|)
+ -> &'a mut V {
+ // Worst case, we'll find one empty bucket among `size + 1` buckets.
+ let size = self.table.size();
+ let mut probe = Bucket::new(&mut self.table, &hash);
+ let ib = probe.index();
+
+ loop {
+ let mut bucket = match probe.peek() {
+ Empty(bucket) => {
+ // Found a hole!
+ let bucket = bucket.put(hash, k, v);
+ let (_, val) = bucket.into_mut_refs();
+ return val;
+ },
+ Full(bucket) => bucket
+ };
+
+ if bucket.hash() == hash {
+ let found_match = {
+ let (bucket_k, _) = bucket.read_mut();
+ k == *bucket_k
+ };
+ if found_match {
+ let (bucket_k, bucket_v) = bucket.into_mut_refs();
+ debug_assert!(k == *bucket_k);
+ // Key already exists. Get its reference.
+ found_existing(bucket_k, bucket_v, v);
+ return bucket_v;
+ }
+ }
+
+ let robin_ib = bucket.index() as int - bucket.distance() as int;
+
+ if (ib as int) < robin_ib {
+ // Found a luckier bucket than me. Better steal his spot.
+ return robin_hood(bucket, robin_ib as uint, hash, k, v);
+ }
+
+ probe = bucket.next();
+ assert!(probe.index() != ib + size + 1);
+ }
+ }
+
+ /// Inserts an element which has already been hashed, returning a reference
+ /// to that element inside the hashtable. This is more efficient that using
+ /// `insert`, since the key will not be rehashed.
+ fn insert_hashed(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+ self.insert_hashed_nocheck(hash, k, v)
+ }
+
+ /// Return the value corresponding to the key in the map, or insert
+ /// and return the value if it doesn't exist.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 1i with key "a"
+ /// assert_eq!(*map.find_or_insert("a", 1i), 1);
+ ///
+ /// // Find the existing key
+ /// assert_eq!(*map.find_or_insert("a", -2), 1);
+ /// ```
+ pub fn find_or_insert(&mut self, k: K, v: V) -> &mut V {
+ self.find_with_or_insert_with(k, v, |_k, _v, _a| (), |_k, a| a)
+ }
+
+ /// Return the value corresponding to the key in the map, or create,
+ /// insert, and return a new value if it doesn't exist.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 10 with key 2
+ /// assert_eq!(*map.find_or_insert_with(2i, |&key| 5 * key as uint), 10u);
+ ///
+ /// // Find the existing key
+ /// assert_eq!(*map.find_or_insert_with(2, |&key| key as uint), 10);
+ /// ```
+ pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
+ -> &'a mut V {
+ self.find_with_or_insert_with(k, (), |_k, _v, _a| (), |k, _a| f(k))
+ }
+
+ /// Insert a key-value pair into the map if the key is not already present.
+ /// Otherwise, modify the existing value for the key.
+ /// Returns the new or modified value for the key.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 2 with key "a"
+ /// assert_eq!(*map.insert_or_update_with("a", 2u, |_key, val| *val = 3), 2);
+ ///
+ /// // Update and return the existing value
+ /// assert_eq!(*map.insert_or_update_with("a", 9, |_key, val| *val = 7), 7);
+ /// assert_eq!(map["a"], 7);
+ /// ```
+ pub fn insert_or_update_with<'a>(
+ &'a mut self,
+ k: K,
+ v: V,
+ f: |&K, &mut V|)
+ -> &'a mut V {
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+
+ let hash = self.make_hash(&k);
+ self.insert_or_replace_with(hash, k, v, |kref, vref, _v| f(kref, vref))
+ }
+
+ /// Modify and return the value corresponding to the key in the map, or
+ /// insert and return a new value if it doesn't exist.
+ ///
+ /// This method allows for all insertion behaviours of a hashmap;
+ /// see methods like
+ /// [`insert`](../trait.MutableMap.html#tymethod.insert),
+ /// [`find_or_insert`](#method.find_or_insert) and
+ /// [`insert_or_update_with`](#method.insert_or_update_with)
+ /// for less general and more friendly variations of this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// // map some strings to vectors of strings
+ /// let mut map = HashMap::new();
+ /// map.insert("a key", vec!["value"]);
+ /// map.insert("z key", vec!["value"]);
+ ///
+ /// let new = vec!["a key", "b key", "z key"];
+ ///
+ /// for k in new.move_iter() {
+ /// map.find_with_or_insert_with(
+ /// k, "new value",
+ /// // if the key does exist either prepend or append this
+ /// // new value based on the first letter of the key.
+ /// |key, already, new| {
+ /// if key.as_slice().starts_with("z") {
+ /// already.insert(0, new);
+ /// } else {
+ /// already.push(new);
+ /// }
+ /// },
+ /// // if the key doesn't exist in the map yet, add it in
+ /// // the obvious way.
+ /// |_k, v| vec![v]);
+ /// }
+ ///
+ /// assert_eq!(map.len(), 3);
+ /// assert_eq!(map["a key"], vec!["value", "new value"]);
+ /// assert_eq!(map["b key"], vec!["new value"]);
+ /// assert_eq!(map["z key"], vec!["new value", "value"]);
+ /// ```
+ pub fn find_with_or_insert_with<'a, A>(&'a mut self,
+ k: K,
+ a: A,
+ found: |&K, &mut V, A|,
+ not_found: |&K, A| -> V)
+ -> &'a mut V
+ {
+ let hash = self.make_hash(&k);
+ let this = MapMutRef { map_ref: self };
+
+ match search_hashed(this, &hash, &k) {
+ FoundExisting(bucket) => {
+ let (_, v_ref) = bucket.into_mut_refs();
+ found(&k, v_ref, a);
+ v_ref
+ }
+ TableRef(this) => {
+ let v = not_found(&k, a);
+ this.map_ref.insert_hashed(hash, k, v)
+ }
+ }
+ }
+
+ /// Retrieves a value for the given key.
+ /// See [`find`](../trait.Map.html#tymethod.find) for a non-failing alternative.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![allow(deprecated)]
+ ///
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// assert_eq!(map.get(&"a"), &1);
+ /// ```
+ #[deprecated = "prefer indexing instead, e.g., map[key]"]
+ pub fn get<'a>(&'a self, k: &K) -> &'a V {
+ match self.find(k) {
+ Some(v) => v,
+ None => fail!("no entry found for key")
+ }
+ }
+
+ /// Retrieves a mutable value for the given key.
+ /// See [`find_mut`](../trait.MutableMap.html#tymethod.find_mut) for a non-failing alternative.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// {
+ /// // val will freeze map to prevent usage during its lifetime
+ /// let val = map.get_mut(&"a");
+ /// *val = 40;
+ /// }
+ /// assert_eq!(map["a"], 40);
+ ///
+ /// // A more direct way could be:
+ /// *map.get_mut(&"a") = -2;
+ /// assert_eq!(map["a"], -2);
+ /// ```
+ pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
+ match self.find_mut(k) {
+ Some(v) => v,
+ None => fail!("no entry found for key")
+ }
+ }
+
+ /// Return true if the map contains a value for the specified key,
+ /// using equivalence.
+ ///
+ /// See [pop_equiv](#method.pop_equiv) for an extended example.
+ pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
+ self.search_equiv(key).is_some()
+ }
+
+ /// Return the value corresponding to the key in the map, using
+ /// equivalence.
+ ///
+ /// See [pop_equiv](#method.pop_equiv) for an extended example.
+ pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
+ match self.search_equiv(k) {
+ None => None,
+ Some(bucket) => {
+ let (_, v_ref) = bucket.into_refs();
+ Some(v_ref)
+ }
+ }
+ }
+
+ /// Remove an equivalent key from the map, returning the value at the
+ /// key if the key was previously in the map.
+ ///
+ /// # Example
+ ///
+ /// This is a slightly silly example where we define the number's
+ /// parity as the equivalence class. It is important that the
+ /// values hash the same, which is why we implement `Hash`.
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::Hash;
+ /// use std::hash::sip::SipState;
+ ///
+ /// #[deriving(Eq, PartialEq)]
+ /// struct EvenOrOdd {
+ /// num: uint
+ /// };
+ ///
+ /// impl Hash for EvenOrOdd {
+ /// fn hash(&self, state: &mut SipState) {
+ /// let parity = self.num % 2;
+ /// parity.hash(state);
+ /// }
+ /// }
+ ///
+ /// impl Equiv<EvenOrOdd> for EvenOrOdd {
+ /// fn equiv(&self, other: &EvenOrOdd) -> bool {
+ /// self.num % 2 == other.num % 2
+ /// }
+ /// }
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(EvenOrOdd { num: 3 }, "foo");
+ ///
+ /// assert!(map.contains_key_equiv(&EvenOrOdd { num: 1 }));
+ /// assert!(!map.contains_key_equiv(&EvenOrOdd { num: 4 }));
+ ///
+ /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 5 }), Some(&"foo"));
+ /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 2 }), None);
+ ///
+ /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 1 }), Some("foo"));
+ /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 2 }), None);
+ ///
+ /// ```
+ #[experimental]
+ pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
+ if self.table.size() == 0 {
+ return None
+ }
+
+ let potential_new_size = self.table.size() - 1;
+ self.make_some_room(potential_new_size);
+
+ match self.search_equiv_mut(k) {
+ Some(bucket) => {
+ Some(pop_internal(bucket))
+ }
+ _ => None
+ }
+ }
+
+ /// An iterator visiting all keys in arbitrary order.
+ /// Iterator element type is `&'a K`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for key in map.keys() {
+ /// println!("{}", key);
+ /// }
+ /// ```
+ pub fn keys(&self) -> Keys<K, V> {
+ self.iter().map(|(k, _v)| k)
+ }
+
+ /// An iterator visiting all values in arbitrary order.
+ /// Iterator element type is `&'a V`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for key in map.values() {
+ /// println!("{}", key);
+ /// }
+ /// ```
+ pub fn values(&self) -> Values<K, V> {
+ self.iter().map(|(_k, v)| v)
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order.
+ /// Iterator element type is `(&'a K, &'a V)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for (key, val) in map.iter() {
+ /// println!("key: {} val: {}", key, val);
+ /// }
+ /// ```
+ pub fn iter(&self) -> Entries<K, V> {
+ Entries { inner: self.table.iter() }
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order,
+ /// with mutable references to the values.
+ /// Iterator element type is `(&'a K, &'a mut V)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// // Update all values
+ /// for (_, val) in map.mut_iter() {
+ /// *val *= 2;
+ /// }
+ ///
+ /// for (key, val) in map.iter() {
+ /// println!("key: {} val: {}", key, val);
+ /// }
+ /// ```
+ pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ MutEntries { inner: self.table.mut_iter() }
+ }
+
+ /// Creates a consuming iterator, that is, one that moves each key-value
+ /// pair out of the map in arbitrary order. The map cannot be used after
+ /// calling this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// // Not possible with .iter()
+ /// let vec: Vec<(&str, int)> = map.move_iter().collect();
+ /// ```
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ MoveEntries {
+ inner: self.table.move_iter().map(|(_, k, v)| (k, v))
+ }
+ }
+}
+
+impl<K: Eq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
+ /// Return a copy of the value corresponding to the key.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<uint, String> = HashMap::new();
+ /// map.insert(1u, "foo".to_string());
+ /// let s: String = map.find_copy(&1).unwrap();
+ /// ```
+ pub fn find_copy(&self, k: &K) -> Option<V> {
+ self.find(k).map(|v| (*v).clone())
+ }
+
+ /// Return a copy of the value corresponding to the key.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<uint, String> = HashMap::new();
+ /// map.insert(1u, "foo".to_string());
+ /// let s: String = map.get_copy(&1);
+ /// ```
+ pub fn get_copy(&self, k: &K) -> V {
+ (*self.get(k)).clone()
+ }
+}
+
+impl<K: Eq + Hash<S>, V: PartialEq, S, H: Hasher<S>> PartialEq for HashMap<K, V, H> {
+ fn eq(&self, other: &HashMap<K, V, H>) -> bool {
+ if self.len() != other.len() { return false; }
+
+ self.iter().all(|(key, value)|
+ other.find(key).map_or(false, |v| *value == *v)
+ )
+ }
+}
+
+impl<K: Eq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {}
+
+impl<K: Eq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "{{"));
+
+ for (i, (k, v)) in self.iter().enumerate() {
+ if i != 0 { try!(write!(f, ", ")); }
+ try!(write!(f, "{}: {}", *k, *v));
+ }
+
+ write!(f, "}}")
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
+ fn default() -> HashMap<K, V, H> {
+ HashMap::with_hasher(Default::default())
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Index<K, V> for HashMap<K, V, H> {
+ #[inline]
+ fn index<'a>(&'a self, index: &K) -> &'a V {
+ self.get(index)
+ }
+}
+
+// FIXME(#12825) Indexing will always try IndexMut first and that causes issues.
+/*impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> ops::IndexMut<K, V> for HashMap<K, V, H> {
+ #[inline]
+ fn index_mut<'a>(&'a mut self, index: &K) -> &'a mut V {
+ self.get_mut(index)
+ }
+}*/
+
+/// HashMap iterator
+pub struct Entries<'a, K: 'a, V: 'a> {
+ inner: table::Entries<'a, K, V>
+}
+
+/// HashMap mutable values iterator
+pub struct MutEntries<'a, K: 'a, V: 'a> {
+ inner: table::MutEntries<'a, K, V>
+}
+
+/// HashMap move iterator
+pub struct MoveEntries<K, V> {
+ inner: iter::Map<'static, (SafeHash, K, V), (K, V), table::MoveEntries<K, V>>
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<K, V> Iterator<(K, V)> for MoveEntries<K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(K, V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+/// HashMap keys iterator
+pub type Keys<'a, K, V> =
+ iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
+
+/// HashMap values iterator
+pub type Values<'a, K, V> =
+ iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
+ fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
+ let (lower, _) = iter.size_hint();
+ let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
+ map.extend(iter);
+ map
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
+ fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
+ for (k, v) in iter {
+ self.insert(k, v);
+ }
+ }
+}
+
+#[cfg(test)]
+mod test_map {
+ use prelude::*;
+
+ use super::HashMap;
+ use cmp::Equiv;
+ use hash;
+ use iter::{Iterator,range_inclusive,range_step_inclusive};
+ use cell::RefCell;
+
+ struct KindaIntLike(int);
+
+ impl Equiv<int> for KindaIntLike {
+ fn equiv(&self, other: &int) -> bool {
+ let KindaIntLike(this) = *self;
+ this == *other
+ }
+ }
+ impl<S: hash::Writer> hash::Hash<S> for KindaIntLike {
+ fn hash(&self, state: &mut S) {
+ let KindaIntLike(this) = *self;
+ this.hash(state)
+ }
+ }
+
+ #[test]
+ fn test_create_capacity_zero() {
+ let mut m = HashMap::with_capacity(0);
+
+ assert!(m.insert(1i, 1i));
+
+ assert!(m.contains_key(&1));
+ assert!(!m.contains_key(&0));
+ }
+
+ #[test]
+ fn test_insert() {
+ let mut m = HashMap::new();
+ assert_eq!(m.len(), 0);
+ assert!(m.insert(1i, 2i));
+ assert_eq!(m.len(), 1);
+ assert!(m.insert(2i, 4i));
+ assert_eq!(m.len(), 2);
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&2).unwrap(), 4);
+ }
+
+ local_data_key!(drop_vector: RefCell<Vec<int>>)
+
+ #[deriving(Hash, PartialEq, Eq)]
+ struct Dropable {
+ k: uint
+ }
+
+ impl Dropable {
+ fn new(k: uint) -> Dropable {
+ let v = drop_vector.get().unwrap();
+ v.borrow_mut().as_mut_slice()[k] += 1;
+
+ Dropable { k: k }
+ }
+ }
+
+ impl Drop for Dropable {
+ fn drop(&mut self) {
+ let v = drop_vector.get().unwrap();
+ v.borrow_mut().as_mut_slice()[self.k] -= 1;
+ }
+ }
+
+ impl Clone for Dropable {
+ fn clone(&self) -> Dropable {
+ Dropable::new(self.k)
+ }
+ }
+
+ #[test]
+ fn test_drops() {
+ drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
+
+ {
+ let mut m = HashMap::new();
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ drop(v);
+
+ for i in range(0u, 100) {
+ let d1 = Dropable::new(i);
+ let d2 = Dropable::new(i+100);
+ m.insert(d1, d2);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ for i in range(0u, 50) {
+ let k = Dropable::new(i);
+ let v = m.pop(&k);
+
+ assert!(v.is_some());
+
+ let v = drop_vector.get().unwrap();
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ assert_eq!(v.borrow().as_slice()[i+100], 1);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 50) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ assert_eq!(v.borrow().as_slice()[i+100], 0);
+ }
+
+ for i in range(50u, 100) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ assert_eq!(v.borrow().as_slice()[i+100], 1);
+ }
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ }
+
+ #[test]
+ fn test_move_iter_drops() {
+ drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
+
+ let hm = {
+ let mut hm = HashMap::new();
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ drop(v);
+
+ for i in range(0u, 100) {
+ let d1 = Dropable::new(i);
+ let d2 = Dropable::new(i+100);
+ hm.insert(d1, d2);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ hm
+ };
+
+ // By the way, ensure that cloning doesn't screw up the dropping.
+ drop(hm.clone());
+
+ {
+ let mut half = hm.move_iter().take(50);
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ for _ in half {}
+
+ let v = drop_vector.get().unwrap();
+ let nk = range(0u, 100).filter(|&i| {
+ v.borrow().as_slice()[i] == 1
+ }).count();
+
+ let nv = range(0u, 100).filter(|&i| {
+ v.borrow().as_slice()[i+100] == 1
+ }).count();
+
+ assert_eq!(nk, 50);
+ assert_eq!(nv, 50);
+ };
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ }
+
+ #[test]
+ fn test_empty_pop() {
+ let mut m: HashMap<int, bool> = HashMap::new();
+ assert_eq!(m.pop(&0), None);
+ }
+
+ #[test]
+ fn test_lots_of_insertions() {
+ let mut m = HashMap::new();
+
+ // Try this a few times to make sure we never screw up the hashmap's
+ // internal state.
+ for _ in range(0i, 10) {
+ assert!(m.is_empty());
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.insert(i, i));
+
+ for j in range_inclusive(1, i) {
+ let r = m.find(&j);
+ assert_eq!(r, Some(&j));
+ }
+
+ for j in range_inclusive(i+1, 1000) {
+ let r = m.find(&j);
+ assert_eq!(r, None);
+ }
+ }
+
+ for i in range_inclusive(1001i, 2000) {
+ assert!(!m.contains_key(&i));
+ }
+
+ // remove forwards
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.remove(&i));
+
+ for j in range_inclusive(1, i) {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in range_inclusive(i+1, 1000) {
+ assert!(m.contains_key(&j));
+ }
+ }
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(!m.contains_key(&i));
+ }
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.insert(i, i));
+ }
+
+ // remove backwards
+ for i in range_step_inclusive(1000i, 1, -1) {
+ assert!(m.remove(&i));
+
+ for j in range_inclusive(i, 1000) {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in range_inclusive(1, i-1) {
+ assert!(m.contains_key(&j));
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_find_mut() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1i, 12i));
+ assert!(m.insert(2i, 8i));
+ assert!(m.insert(5i, 14i));
+ let new = 100;
+ match m.find_mut(&5) {
+ None => fail!(), Some(x) => *x = new
+ }
+ assert_eq!(m.find(&5), Some(&new));
+ }
+
+ #[test]
+ fn test_insert_overwrite() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1i, 2i));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert!(!m.insert(1i, 3i));
+ assert_eq!(*m.find(&1).unwrap(), 3);
+ }
+
+ #[test]
+ fn test_insert_conflicts() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert!(m.insert(5i, 3i));
+ assert!(m.insert(9i, 4i));
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ }
+
+ #[test]
+ fn test_update_with() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+
+ for i in range(1i, 1000) {
+ assert_eq!(
+ i + 2,
+ *m.insert_or_update_with(i + 1, i + 2, |_k, _v| {
+ fail!("Key not yet present");
+ })
+ );
+ assert_eq!(
+ i + 1,
+ *m.insert_or_update_with(i, i + 3, |k, v| {
+ assert_eq!(*k, i);
+ assert_eq!(*v, i + 1);
+ })
+ );
+ }
+ }
+
+ #[test]
+ fn test_conflict_remove() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert!(m.insert(5, 3));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert!(m.insert(9, 4));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert!(m.remove(&1));
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ }
+
+ #[test]
+ fn test_is_empty() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert!(!m.is_empty());
+ assert!(m.remove(&1));
+ assert!(m.is_empty());
+ }
+
+ #[test]
+ fn test_pop() {
+ let mut m = HashMap::new();
+ m.insert(1i, 2i);
+ assert_eq!(m.pop(&1), Some(2));
+ assert_eq!(m.pop(&1), None);
+ }
+
+ #[test]
+ #[allow(experimental)]
+ fn test_pop_equiv() {
+ let mut m = HashMap::new();
+ m.insert(1i, 2i);
+ assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
+ assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
+ }
+
+ #[test]
+ fn test_swap() {
+ let mut m = HashMap::new();
+ assert_eq!(m.swap(1i, 2i), None);
+ assert_eq!(m.swap(1i, 3i), Some(2));
+ assert_eq!(m.swap(1i, 4i), Some(3));
+ }
+
+ #[test]
+ fn test_iterate() {
+ let mut m = HashMap::with_capacity(4);
+ for i in range(0u, 32) {
+ assert!(m.insert(i, i*2));
+ }
+ assert_eq!(m.len(), 32);
+
+ let mut observed: u32 = 0;
+
+ for (k, v) in m.iter() {
+ assert_eq!(*v, *k * 2);
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+ }
+
+ #[test]
+ fn test_keys() {
+ let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
+ let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn test_values() {
+ let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
+ let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let values = map.values().map(|&v| v).collect::<Vec<char>>();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
+ fn test_find() {
+ let mut m = HashMap::new();
+ assert!(m.find(&1i).is_none());
+ m.insert(1i, 2i);
+ match m.find(&1) {
+ None => fail!(),
+ Some(v) => assert_eq!(*v, 2)
+ }
+ }
+
+ #[test]
+ fn test_find_copy() {
+ let mut m = HashMap::new();
+ assert!(m.find(&1i).is_none());
+
+ for i in range(1i, 10000) {
+ m.insert(i, i + 7);
+ match m.find_copy(&i) {
+ None => fail!(),
+ Some(v) => assert_eq!(v, i + 7)
+ }
+ for j in range(1i, i/100) {
+ match m.find_copy(&j) {
+ None => fail!(),
+ Some(v) => assert_eq!(v, j + 7)
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_eq() {
+ let mut m1 = HashMap::new();
+ m1.insert(1i, 2i);
+ m1.insert(2i, 3i);
+ m1.insert(3i, 4i);
+
+ let mut m2 = HashMap::new();
+ m2.insert(1i, 2i);
+ m2.insert(2i, 3i);
+
+ assert!(m1 != m2);
+
+ m2.insert(3i, 4i);
+
+ assert_eq!(m1, m2);
+ }
+
+ #[test]
+ fn test_show() {
+ let mut map: HashMap<int, int> = HashMap::new();
+ let empty: HashMap<int, int> = HashMap::new();
+
+ map.insert(1i, 2i);
+ map.insert(3i, 4i);
+
+ let map_str = format!("{}", map);
+
+ assert!(map_str == "{1: 2, 3: 4}".to_string() || map_str == "{3: 4, 1: 2}".to_string());
+ assert_eq!(format!("{}", empty), "{}".to_string());
+ }
+
+ #[test]
+ fn test_expand() {
+ let mut m = HashMap::new();
+
+ assert_eq!(m.len(), 0);
+ assert!(m.is_empty());
+
+ let mut i = 0u;
+ let old_cap = m.table.capacity();
+ while old_cap == m.table.capacity() {
+ m.insert(i, i);
+ i += 1;
+ }
+
+ assert_eq!(m.len(), i);
+ assert!(!m.is_empty());
+ }
+
+ #[test]
+ fn test_resize_policy() {
+ let mut m = HashMap::new();
+
+ assert_eq!(m.len(), 0);
+ assert_eq!(m.table.capacity(), 0);
+ assert!(m.is_empty());
+
+ m.insert(0, 0);
+ m.remove(&0);
+ assert!(m.is_empty());
+ let initial_cap = m.table.capacity();
+ m.reserve(initial_cap * 2);
+ let cap = m.table.capacity();
+
+ assert_eq!(cap, initial_cap * 2);
+
+ let mut i = 0u;
+ for _ in range(0, cap * 3 / 4) {
+ m.insert(i, i);
+ i += 1;
+ }
+ // three quarters full
+
+ assert_eq!(m.len(), i);
+ assert_eq!(m.table.capacity(), cap);
+
+ for _ in range(0, cap / 4) {
+ m.insert(i, i);
+ i += 1;
+ }
+ // half full
+
+ let new_cap = m.table.capacity();
+ assert_eq!(new_cap, cap * 2);
+
+ for _ in range(0, cap / 2 - 1) {
+ i -= 1;
+ m.remove(&i);
+ assert_eq!(m.table.capacity(), new_cap);
+ }
+ // A little more than one quarter full.
+ // Shrinking starts as we remove more elements:
+ for _ in range(0, cap / 2 - 1) {
+ i -= 1;
+ m.remove(&i);
+ }
+
+ assert_eq!(m.len(), i);
+ assert!(!m.is_empty());
+ assert_eq!(m.table.capacity(), cap);
+ }
+
+ #[test]
+ fn test_find_equiv() {
+ let mut m = HashMap::new();
+
+ let (foo, bar, baz) = (1i,2i,3i);
+ m.insert("foo".to_string(), foo);
+ m.insert("bar".to_string(), bar);
+ m.insert("baz".to_string(), baz);
+
+
+ assert_eq!(m.find_equiv(&("foo")), Some(&foo));
+ assert_eq!(m.find_equiv(&("bar")), Some(&bar));
+ assert_eq!(m.find_equiv(&("baz")), Some(&baz));
+
+ assert_eq!(m.find_equiv(&("qux")), None);
+ }
+
+ #[test]
+ fn test_from_iter() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ for &(k, v) in xs.iter() {
+ assert_eq!(map.find(&k), Some(&v));
+ }
+ }
+
+ #[test]
+ fn test_size_hint() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ let mut iter = map.iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+ }
+
+ #[test]
+ fn test_mut_size_hint() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ let mut iter = map.mut_iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+ }
+
+ #[test]
+ fn test_index() {
+ let mut map: HashMap<int, int> = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ assert_eq!(map[2], 1);
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_index_nonexistent() {
+ let mut map: HashMap<int, int> = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ map[4];
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unordered containers, implemented as hash-tables
+
+pub use self::map::HashMap;
+pub use self::map::Entries;
+pub use self::map::MutEntries;
+pub use self::map::MoveEntries;
+pub use self::map::Keys;
+pub use self::map::Values;
+pub use self::map::INITIAL_CAPACITY;
+pub use self::set::HashSet;
+pub use self::set::SetItems;
+pub use self::set::SetMoveItems;
+pub use self::set::SetAlgebraItems;
+
+mod bench;
+mod map;
+mod set;
+mod table;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp::{Eq, Equiv, PartialEq};
+use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
+use default::Default;
+use fmt::Show;
+use fmt;
+use hash::{Hash, Hasher, RandomSipHasher};
+use iter::{Iterator, FromIterator, FilterMap, Chain, Repeat, Zip, Extendable};
+use iter;
+use option::{Some, None};
+use result::{Ok, Err};
+
+use super::{HashMap, Entries, MoveEntries, INITIAL_CAPACITY};
+
+
+// Future Optimization (FIXME!)
+// =============================
+//
+// Iteration over zero sized values is a noop. There is no need
+// for `bucket.val` in the case of HashSet. I suppose we would need HKT
+// to get rid of it properly.
+
+/// An implementation of a hash set using the underlying representation of a
+/// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
+/// requires that the elements implement the `Eq` and `Hash` traits.
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashSet;
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `HashSet<&str>` in this example).
+/// let mut books = HashSet::new();
+///
+/// // Add some books.
+/// books.insert("A Dance With Dragons");
+/// books.insert("To Kill a Mockingbird");
+/// books.insert("The Odyssey");
+/// books.insert("The Great Gatsby");
+///
+/// // Check for a specific one.
+/// if !books.contains(&("The Winds of Winter")) {
+/// println!("We have {} books, but The Winds of Winter ain't one.",
+/// books.len());
+/// }
+///
+/// // Remove a book.
+/// books.remove(&"The Odyssey");
+///
+/// // Iterate over everything.
+/// for book in books.iter() {
+/// println!("{}", *book);
+/// }
+/// ```
+///
+/// The easiest way to use `HashSet` with a custom type is to derive
+/// `Eq` and `Hash`. We must also derive `PartialEq`, this will in the
+/// future be implied by `Eq`.
+///
+/// ```
+/// use std::collections::HashSet;
+/// #[deriving(Hash, Eq, PartialEq, Show)]
+/// struct Viking<'a> {
+/// name: &'a str,
+/// power: uint,
+/// }
+///
+/// let mut vikings = HashSet::new();
+///
+/// vikings.insert(Viking { name: "Einar", power: 9u });
+/// vikings.insert(Viking { name: "Einar", power: 9u });
+/// vikings.insert(Viking { name: "Olaf", power: 4u });
+/// vikings.insert(Viking { name: "Harald", power: 8u });
+///
+/// // Use derived implementation to print the vikings.
+/// for x in vikings.iter() {
+/// println!("{}", x);
+/// }
+/// ```
+#[deriving(Clone)]
+pub struct HashSet<T, H = RandomSipHasher> {
+ map: HashMap<T, (), H>
+}
+
+impl<T: Hash + Eq> HashSet<T, RandomSipHasher> {
+ /// Create an empty HashSet.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::new();
+ /// ```
+ #[inline]
+ pub fn new() -> HashSet<T, RandomSipHasher> {
+ HashSet::with_capacity(INITIAL_CAPACITY)
+ }
+
+ /// Create an empty HashSet with space for at least `n` elements in
+ /// the hash table.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: uint) -> HashSet<T, RandomSipHasher> {
+ HashSet { map: HashMap::with_capacity(capacity) }
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
+ /// Creates a new empty hash set which will use the given hasher to hash
+ /// keys.
+ ///
+ /// The hash set is also created with the default initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut set = HashSet::with_hasher(h);
+ /// set.insert(2u);
+ /// ```
+ #[inline]
+ pub fn with_hasher(hasher: H) -> HashSet<T, H> {
+ HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
+ }
+
+ /// Create an empty HashSet with space for at least `capacity`
+ /// elements in the hash table, using `hasher` to hash the keys.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow `HashSet`s to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut set = HashSet::with_capacity_and_hasher(10u, h);
+ /// set.insert(1i);
+ /// ```
+ #[inline]
+ pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
+ HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
+ }
+
+ /// Reserve space for at least `n` elements in the hash table.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::new();
+ /// set.reserve(10);
+ /// ```
+ pub fn reserve(&mut self, n: uint) {
+ self.map.reserve(n)
+ }
+
+ /// Returns true if the hash set contains a value equivalent to the
+ /// given query value.
+ ///
+ /// # Example
+ ///
+ /// This is a slightly silly example where we define the number's
+ /// parity as the equivilance class. It is important that the
+ /// values hash the same, which is why we implement `Hash`.
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::Hash;
+ /// use std::hash::sip::SipState;
+ ///
+ /// #[deriving(Eq, PartialEq)]
+ /// struct EvenOrOdd {
+ /// num: uint
+ /// };
+ ///
+ /// impl Hash for EvenOrOdd {
+ /// fn hash(&self, state: &mut SipState) {
+ /// let parity = self.num % 2;
+ /// parity.hash(state);
+ /// }
+ /// }
+ ///
+ /// impl Equiv<EvenOrOdd> for EvenOrOdd {
+ /// fn equiv(&self, other: &EvenOrOdd) -> bool {
+ /// self.num % 2 == other.num % 2
+ /// }
+ /// }
+ ///
+ /// let mut set = HashSet::new();
+ /// set.insert(EvenOrOdd { num: 3u });
+ ///
+ /// assert!(set.contains_equiv(&EvenOrOdd { num: 3u }));
+ /// assert!(set.contains_equiv(&EvenOrOdd { num: 5u }));
+ /// assert!(!set.contains_equiv(&EvenOrOdd { num: 4u }));
+ /// assert!(!set.contains_equiv(&EvenOrOdd { num: 2u }));
+ ///
+ /// ```
+ pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
+ self.map.contains_key_equiv(value)
+ }
+
+ /// An iterator visiting all elements in arbitrary order.
+ /// Iterator element type is &'a T.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a");
+ /// set.insert("b");
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in set.iter() {
+ /// println!("{}", x);
+ /// }
+ /// ```
+ pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
+ self.map.keys()
+ }
+
+ /// Creates a consuming iterator, that is, one that moves each value out
+ /// of the set in arbitrary order. The set cannot be used after calling
+ /// this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a".to_string());
+ /// set.insert("b".to_string());
+ ///
+ /// // Not possible to collect to a Vec<String> with a regular `.iter()`.
+ /// let v: Vec<String> = set.move_iter().collect();
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in v.iter() {
+ /// println!("{}", x);
+ /// }
+ /// ```
+ pub fn move_iter(self) -> SetMoveItems<T> {
+ self.map.move_iter().map(|(k, _)| k)
+ }
+
+ /// Visit the values representing the difference.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Can be seen as `a - b`.
+ /// for x in a.difference(&b) {
+ /// println!("{}", x); // Print 1
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.difference(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [1i].iter().map(|&x| x).collect());
+ ///
+ /// // Note that difference is not symmetric,
+ /// // and `b - a` means something else:
+ /// let diff: HashSet<int> = b.difference(&a).map(|&x| x).collect();
+ /// assert_eq!(diff, [4i].iter().map(|&x| x).collect());
+ /// ```
+ pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
+ Repeat::new(other).zip(self.iter())
+ .filter_map(|(other, elt)| {
+ if !other.contains(elt) { Some(elt) } else { None }
+ })
+ }
+
+ /// Visit the values representing the symmetric difference.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 1, 4 in arbitrary order.
+ /// for x in a.symmetric_difference(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff1: HashSet<int> = a.symmetric_difference(&b).map(|&x| x).collect();
+ /// let diff2: HashSet<int> = b.symmetric_difference(&a).map(|&x| x).collect();
+ ///
+ /// assert_eq!(diff1, diff2);
+ /// assert_eq!(diff1, [1i, 4].iter().map(|&x| x).collect());
+ /// ```
+ pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
+ self.difference(other).chain(other.difference(self))
+ }
+
+ /// Visit the values representing the intersection.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 2, 3 in arbitrary order.
+ /// for x in a.intersection(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.intersection(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [2i, 3].iter().map(|&x| x).collect());
+ /// ```
+ pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> SetAlgebraItems<'a, T, H> {
+ Repeat::new(other).zip(self.iter())
+ .filter_map(|(other, elt)| {
+ if other.contains(elt) { Some(elt) } else { None }
+ })
+ }
+
+ /// Visit the values representing the union.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order.
+ /// for x in a.union(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.union(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [1i, 2, 3, 4].iter().map(|&x| x).collect());
+ /// ```
+ pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
+ self.iter().chain(other.difference(self))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> PartialEq for HashSet<T, H> {
+ fn eq(&self, other: &HashSet<T, H>) -> bool {
+ if self.len() != other.len() { return false; }
+
+ self.iter().all(|key| other.contains(key))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Collection for HashSet<T, H> {
+ fn len(&self) -> uint { self.map.len() }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
+ fn clear(&mut self) { self.map.clear() }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
+ fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
+
+ fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
+ self.iter().all(|v| !other.contains(v))
+ }
+
+ fn is_subset(&self, other: &HashSet<T, H>) -> bool {
+ self.iter().all(|v| other.contains(v))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
+ fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
+
+ fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
+}
+
+impl<T: Eq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "{{"));
+
+ for (i, x) in self.iter().enumerate() {
+ if i != 0 { try!(write!(f, ", ")); }
+ try!(write!(f, "{}", *x));
+ }
+
+ write!(f, "}}")
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
+ fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
+ let (lower, _) = iter.size_hint();
+ let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
+ set.extend(iter);
+ set
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
+ fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
+ for k in iter {
+ self.insert(k);
+ }
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Default for HashSet<T, H> {
+ fn default() -> HashSet<T, H> {
+ HashSet::with_hasher(Default::default())
+ }
+}
+
+/// HashSet iterator
+pub type SetItems<'a, K> =
+ iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
+
+/// HashSet move iterator
+pub type SetMoveItems<K> =
+ iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
+
+// `Repeat` is used to feed the filter closure an explicit capture
+// of a reference to the other set
+/// Set operations iterator
+pub type SetAlgebraItems<'a, T, H> =
+ FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
+ Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
+
+#[cfg(test)]
+mod test_set {
+ use prelude::*;
+
+ use super::HashSet;
+ use slice::ImmutablePartialEqSlice;
+ use collections::Collection;
+
+ #[test]
+ fn test_disjoint() {
+ let mut xs = HashSet::new();
+ let mut ys = HashSet::new();
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(5i));
+ assert!(ys.insert(11i));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(7));
+ assert!(xs.insert(19));
+ assert!(xs.insert(4));
+ assert!(ys.insert(2));
+ assert!(ys.insert(-11));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(ys.insert(7));
+ assert!(!xs.is_disjoint(&ys));
+ assert!(!ys.is_disjoint(&xs));
+ }
+
+ #[test]
+ fn test_subset_and_superset() {
+ let mut a = HashSet::new();
+ assert!(a.insert(0i));
+ assert!(a.insert(5));
+ assert!(a.insert(11));
+ assert!(a.insert(7));
+
+ let mut b = HashSet::new();
+ assert!(b.insert(0i));
+ assert!(b.insert(7));
+ assert!(b.insert(19));
+ assert!(b.insert(250));
+ assert!(b.insert(11));
+ assert!(b.insert(200));
+
+ assert!(!a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(!b.is_superset(&a));
+
+ assert!(b.insert(5));
+
+ assert!(a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(b.is_superset(&a));
+ }
+
+ #[test]
+ fn test_iterate() {
+ let mut a = HashSet::new();
+ for i in range(0u, 32) {
+ assert!(a.insert(i));
+ }
+ let mut observed: u32 = 0;
+ for k in a.iter() {
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+ }
+
+ #[test]
+ fn test_intersection() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(11i));
+ assert!(a.insert(1));
+ assert!(a.insert(3));
+ assert!(a.insert(77));
+ assert!(a.insert(103));
+ assert!(a.insert(5));
+ assert!(a.insert(-5));
+
+ assert!(b.insert(2i));
+ assert!(b.insert(11));
+ assert!(b.insert(77));
+ assert!(b.insert(-9));
+ assert!(b.insert(-42));
+ assert!(b.insert(5));
+ assert!(b.insert(3));
+
+ let mut i = 0;
+ let expected = [3, 5, 11, 77];
+ for x in a.intersection(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(3i));
+ assert!(b.insert(9));
+
+ let mut i = 0;
+ let expected = [1, 5, 11];
+ for x in a.difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_symmetric_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(-2i));
+ assert!(b.insert(3));
+ assert!(b.insert(9));
+ assert!(b.insert(14));
+ assert!(b.insert(22));
+
+ let mut i = 0;
+ let expected = [-2, 1, 5, 11, 14, 22];
+ for x in a.symmetric_difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_union() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+ assert!(a.insert(16));
+ assert!(a.insert(19));
+ assert!(a.insert(24));
+
+ assert!(b.insert(-2i));
+ assert!(b.insert(1));
+ assert!(b.insert(5));
+ assert!(b.insert(9));
+ assert!(b.insert(13));
+ assert!(b.insert(19));
+
+ let mut i = 0;
+ let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
+ for x in a.union(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_from_iter() {
+ let xs = [1i, 2, 3, 4, 5, 6, 7, 8, 9];
+
+ let set: HashSet<int> = xs.iter().map(|&x| x).collect();
+
+ for x in xs.iter() {
+ assert!(set.contains(x));
+ }
+ }
+
+ #[test]
+ fn test_move_iter() {
+ let hs = {
+ let mut hs = HashSet::new();
+
+ hs.insert('a');
+ hs.insert('b');
+
+ hs
+ };
+
+ let v = hs.move_iter().collect::<Vec<char>>();
+ assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
+ }
+
+ #[test]
+ fn test_eq() {
+ // These constants once happened to expose a bug in insert().
+ // I'm keeping them around to prevent a regression.
+ let mut s1 = HashSet::new();
+
+ s1.insert(1i);
+ s1.insert(2);
+ s1.insert(3);
+
+ let mut s2 = HashSet::new();
+
+ s2.insert(1i);
+ s2.insert(2);
+
+ assert!(s1 != s2);
+
+ s2.insert(3);
+
+ assert_eq!(s1, s2);
+ }
+
+ #[test]
+ fn test_show() {
+ let mut set: HashSet<int> = HashSet::new();
+ let empty: HashSet<int> = HashSet::new();
+
+ set.insert(1i);
+ set.insert(2);
+
+ let set_str = format!("{}", set);
+
+ assert!(set_str == "{1, 2}".to_string() || set_str == "{2, 1}".to_string());
+ assert_eq!(format!("{}", empty), "{}".to_string());
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp;
+use hash::{Hash, Hasher};
+use iter::{Iterator, count};
+use kinds::marker;
+use mem::{min_align_of, size_of};
+use mem;
+use num::{CheckedAdd, CheckedMul, is_power_of_two};
+use ops::{Deref, DerefMut, Drop};
+use option::{Some, None, Option};
+use ptr::{RawPtr, copy_nonoverlapping_memory, zero_memory};
+use ptr;
+use rt::heap::{allocate, deallocate};
+
+static EMPTY_BUCKET: u64 = 0u64;
+
+/// The raw hashtable, providing safe-ish access to the unzipped and highly
+/// optimized arrays of hashes, keys, and values.
+///
+/// This design uses less memory and is a lot faster than the naive
+/// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
+/// option on every element, and we get a generally more cache-aware design.
+///
+/// Essential invariants of this structure:
+///
+/// - if t.hashes[i] == EMPTY_BUCKET, then `Bucket::at_index(&t, i).raw`
+/// points to 'undefined' contents. Don't read from it. This invariant is
+/// enforced outside this module with the `EmptyBucket`, `FullBucket`,
+/// and `SafeHash` types.
+///
+/// - An `EmptyBucket` is only constructed at an index with
+/// a hash of EMPTY_BUCKET.
+///
+/// - A `FullBucket` is only constructed at an index with a
+/// non-EMPTY_BUCKET hash.
+///
+/// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
+/// around hashes of zero by changing them to 0x8000_0000_0000_0000,
+/// which will likely map to the same bucket, while not being confused
+/// with "empty".
+///
+/// - All three "arrays represented by pointers" are the same length:
+/// `capacity`. This is set at creation and never changes. The arrays
+/// are unzipped to save space (we don't have to pay for the padding
+/// between odd sized elements, such as in a map from u64 to u8), and
+/// be more cache aware (scanning through 8 hashes brings in at most
+/// 2 cache lines, since they're all right beside each other).
+///
+/// You can kind of think of this module/data structure as a safe wrapper
+/// around just the "table" part of the hashtable. It enforces some
+/// invariants at the type level and employs some performance trickery,
+/// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
+#[unsafe_no_drop_flag]
+pub struct RawTable<K, V> {
+ capacity: uint,
+ size: uint,
+ hashes: *mut u64,
+ // Because K/V do not appear directly in any of the types in the struct,
+ // inform rustc that in fact instances of K and V are reachable from here.
+ marker: marker::CovariantType<(K,V)>,
+}
+
+struct RawBucket<K, V> {
+ hash: *mut u64,
+ key: *mut K,
+ val: *mut V
+}
+
+pub struct Bucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub struct EmptyBucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub struct FullBucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub type EmptyBucketImm<'table, K, V> = EmptyBucket<K, V, &'table RawTable<K, V>>;
+pub type FullBucketImm<'table, K, V> = FullBucket<K, V, &'table RawTable<K, V>>;
+
+pub type EmptyBucketMut<'table, K, V> = EmptyBucket<K, V, &'table mut RawTable<K, V>>;
+pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
+
+pub enum BucketState<K, V, M> {
+ Empty(EmptyBucket<K, V, M>),
+ Full(FullBucket<K, V, M>),
+}
+
+// A GapThenFull encapsulates the state of two consecutive buckets at once.
+// The first bucket, called the gap, is known to be empty.
+// The second bucket is full.
+struct GapThenFull<K, V, M> {
+ gap: EmptyBucket<K, V, ()>,
+ full: FullBucket<K, V, M>,
+}
+
+/// A hash that is not zero, since we use a hash of zero to represent empty
+/// buckets.
+#[deriving(PartialEq)]
+pub struct SafeHash {
+ hash: u64,
+}
+
+impl SafeHash {
+ /// Peek at the hash value, which is guaranteed to be non-zero.
+ #[inline(always)]
+ pub fn inspect(&self) -> u64 { self.hash }
+}
+
+/// We need to remove hashes of 0. That's reserved for empty buckets.
+/// This function wraps up `hash_keyed` to be the only way outside this
+/// module to generate a SafeHash.
+pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
+ match hasher.hash(t) {
+ // This constant is exceedingly likely to hash to the same
+ // bucket, but it won't be counted as empty! Just so we can maintain
+ // our precious uniform distribution of initial indexes.
+ EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
+ h => SafeHash { hash: h },
+ }
+}
+
+// `replace` casts a `*u64` to a `*SafeHash`. Since we statically
+// ensure that a `FullBucket` points to an index with a non-zero hash,
+// and a `SafeHash` is just a `u64` with a different name, this is
+// safe.
+//
+// This test ensures that a `SafeHash` really IS the same size as a
+// `u64`. If you need to change the size of `SafeHash` (and
+// consequently made this test fail), `replace` needs to be
+// modified to no longer assume this.
+#[test]
+fn can_alias_safehash_as_u64() {
+ assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
+}
+
+impl<K, V> RawBucket<K, V> {
+ unsafe fn offset(self, count: int) -> RawBucket<K, V> {
+ RawBucket {
+ hash: self.hash.offset(count),
+ key: self.key.offset(count),
+ val: self.val.offset(count),
+ }
+ }
+}
+
+// For parameterizing over mutability.
+impl<'t, K, V> Deref<RawTable<K, V>> for &'t RawTable<K, V> {
+ fn deref(&self) -> &RawTable<K, V> {
+ &**self
+ }
+}
+
+impl<'t, K, V> Deref<RawTable<K, V>> for &'t mut RawTable<K, V> {
+ fn deref(&self) -> &RawTable<K,V> {
+ &**self
+ }
+}
+
+impl<'t, K, V> DerefMut<RawTable<K, V>> for &'t mut RawTable<K, V> {
+ fn deref_mut(&mut self) -> &mut RawTable<K,V> {
+ &mut **self
+ }
+}
+
+// Buckets hold references to the table.
+impl<K, V, M> FullBucket<K, V, M> {
+ /// Borrow a reference to the table.
+ pub fn table(&self) -> &M {
+ &self.table
+ }
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+ /// Get the raw index.
+ pub fn index(&self) -> uint {
+ self.idx
+ }
+}
+
+impl<K, V, M> EmptyBucket<K, V, M> {
+ /// Borrow a reference to the table.
+ pub fn table(&self) -> &M {
+ &self.table
+ }
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+}
+
+impl<K, V, M> Bucket<K, V, M> {
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+ /// Get the raw index.
+ pub fn index(&self) -> uint {
+ self.idx
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> Bucket<K, V, M> {
+ pub fn new(table: M, hash: &SafeHash) -> Bucket<K, V, M> {
+ Bucket::at_index(table, hash.inspect() as uint)
+ }
+
+ pub fn at_index(table: M, ib_index: uint) -> Bucket<K, V, M> {
+ let ib_index = ib_index & (table.capacity() - 1);
+ Bucket {
+ raw: unsafe {
+ table.first_bucket_raw().offset(ib_index as int)
+ },
+ idx: ib_index,
+ table: table
+ }
+ }
+
+ pub fn first(table: M) -> Bucket<K, V, M> {
+ Bucket {
+ raw: table.first_bucket_raw(),
+ idx: 0,
+ table: table
+ }
+ }
+
+ /// Reads a bucket at a given index, returning an enum indicating whether
+ /// it's initialized or not. You need to match on this enum to get
+ /// the appropriate types to call most of the other functions in
+ /// this module.
+ pub fn peek(self) -> BucketState<K, V, M> {
+ match unsafe { *self.raw.hash } {
+ EMPTY_BUCKET =>
+ Empty(EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }),
+ _ =>
+ Full(FullBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ })
+ }
+ }
+
+ /// Modifies the bucket pointer in place to make it point to the next slot.
+ pub fn next(&mut self) {
+ // Branchless bucket iteration step.
+ // As we reach the end of the table...
+ // We take the current idx: 0111111b
+ // Xor it by its increment: ^ 1000000b
+ // ------------
+ // 1111111b
+ // Then AND with the capacity: & 1000000b
+ // ------------
+ // to get the backwards offset: 1000000b
+ // ... and it's zero at all other times.
+ let maybe_wraparound_dist = (self.idx ^ (self.idx + 1)) & self.table.capacity();
+ // Finally, we obtain the offset 1 or the offset -cap + 1.
+ let dist = 1i - (maybe_wraparound_dist as int);
+
+ self.idx += 1;
+
+ unsafe {
+ self.raw = self.raw.offset(dist);
+ }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> EmptyBucket<K, V, M> {
+ #[inline]
+ pub fn next(self) -> Bucket<K, V, M> {
+ let mut bucket = self.into_bucket();
+ bucket.next();
+ bucket
+ }
+
+ #[inline]
+ pub fn into_bucket(self) -> Bucket<K, V, M> {
+ Bucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }
+ }
+
+ pub fn gap_peek(self) -> Option<GapThenFull<K, V, M>> {
+ let gap = EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: ()
+ };
+
+ match self.next().peek() {
+ Full(bucket) => {
+ Some(GapThenFull {
+ gap: gap,
+ full: bucket
+ })
+ }
+ Empty(..) => None
+ }
+ }
+}
+
+impl<K, V, M: DerefMut<RawTable<K, V>>> EmptyBucket<K, V, M> {
+ /// Puts given key and value pair, along with the key's hash,
+ /// into this bucket in the hashtable. Note how `self` is 'moved' into
+ /// this function, because this slot will no longer be empty when
+ /// we return! A `FullBucket` is returned for later use, pointing to
+ /// the newly-filled slot in the hashtable.
+ ///
+ /// Use `make_hash` to construct a `SafeHash` to pass to this function.
+ pub fn put(mut self, hash: SafeHash, key: K, value: V)
+ -> FullBucket<K, V, M> {
+ unsafe {
+ *self.raw.hash = hash.inspect();
+ ptr::write(self.raw.key, key);
+ ptr::write(self.raw.val, value);
+ }
+
+ self.table.size += 1;
+
+ FullBucket { raw: self.raw, idx: self.idx, table: self.table }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> FullBucket<K, V, M> {
+ #[inline]
+ pub fn next(self) -> Bucket<K, V, M> {
+ let mut bucket = self.into_bucket();
+ bucket.next();
+ bucket
+ }
+
+ #[inline]
+ pub fn into_bucket(self) -> Bucket<K, V, M> {
+ Bucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }
+ }
+
+ /// Get the distance between this bucket and the 'ideal' location
+ /// as determined by the key's hash stored in it.
+ ///
+ /// In the cited blog posts above, this is called the "distance to
+ /// initial bucket", or DIB. Also known as "probe count".
+ pub fn distance(&self) -> uint {
+ // Calculates the distance one has to travel when going from
+ // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
+ // if the destination is not reached before the end of the table.
+ (self.idx - self.hash().inspect() as uint) & (self.table.capacity() - 1)
+ }
+
+ #[inline]
+ pub fn hash(&self) -> SafeHash {
+ unsafe {
+ SafeHash {
+ hash: *self.raw.hash
+ }
+ }
+ }
+
+ /// Gets references to the key and value at a given index.
+ pub fn read(&self) -> (&K, &V) {
+ unsafe {
+ (&*self.raw.key,
+ &*self.raw.val)
+ }
+ }
+}
+
+impl<K, V, M: DerefMut<RawTable<K, V>>> FullBucket<K, V, M> {
+ /// Removes this bucket's key and value from the hashtable.
+ ///
+ /// This works similarly to `put`, building an `EmptyBucket` out of the
+ /// taken bucket.
+ pub fn take(mut self) -> (EmptyBucket<K, V, M>, K, V) {
+ let key = self.raw.key as *const K;
+ let val = self.raw.val as *const V;
+
+ self.table.size -= 1;
+
+ unsafe {
+ *self.raw.hash = EMPTY_BUCKET;
+ (
+ EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ },
+ ptr::read(key),
+ ptr::read(val)
+ )
+ }
+ }
+
+ pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
+ unsafe {
+ let old_hash = ptr::replace(self.raw.hash as *mut SafeHash, h);
+ let old_key = ptr::replace(self.raw.key, k);
+ let old_val = ptr::replace(self.raw.val, v);
+
+ (old_hash, old_key, old_val)
+ }
+ }
+
+ /// Gets mutable references to the key and value at a given index.
+ pub fn read_mut(&mut self) -> (&mut K, &mut V) {
+ unsafe {
+ (&mut *self.raw.key,
+ &mut *self.raw.val)
+ }
+ }
+}
+
+impl<'t, K, V, M: Deref<RawTable<K, V>> + 't> FullBucket<K, V, M> {
+ /// Exchange a bucket state for immutable references into the table.
+ /// Because the underlying reference to the table is also consumed,
+ /// no further changes to the structure of the table are possible;
+ /// in exchange for this, the returned references have a longer lifetime
+ /// than the references returned by `read()`.
+ pub fn into_refs(self) -> (&'t K, &'t V) {
+ unsafe {
+ (&*self.raw.key,
+ &*self.raw.val)
+ }
+ }
+}
+
+impl<'t, K, V, M: DerefMut<RawTable<K, V>> + 't> FullBucket<K, V, M> {
+ /// This works similarly to `into_refs`, exchanging a bucket state
+ /// for mutable references into the table.
+ pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
+ unsafe {
+ (&mut *self.raw.key,
+ &mut *self.raw.val)
+ }
+ }
+}
+
+impl<K, V, M> BucketState<K, V, M> {
+ // For convenience.
+ pub fn expect_full(self) -> FullBucket<K, V, M> {
+ match self {
+ Full(full) => full,
+ Empty(..) => fail!("Expected full bucket")
+ }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> GapThenFull<K, V, M> {
+ #[inline]
+ pub fn full(&self) -> &FullBucket<K, V, M> {
+ &self.full
+ }
+
+ pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
+ unsafe {
+ *self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
+ copy_nonoverlapping_memory(self.gap.raw.key, self.full.raw.key as *const K, 1);
+ copy_nonoverlapping_memory(self.gap.raw.val, self.full.raw.val as *const V, 1);
+ }
+
+ let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
+
+ match self.full.next().peek() {
+ Full(bucket) => {
+ self.gap.raw = prev_raw;
+ self.gap.idx = prev_idx;
+
+ self.full = bucket;
+
+ Some(self)
+ }
+ Empty(..) => None
+ }
+ }
+}
+
+
+/// Rounds up to a multiple of a power of two. Returns the closest multiple
+/// of `target_alignment` that is higher or equal to `unrounded`.
+///
+/// # Failure
+///
+/// Fails if `target_alignment` is not a power of two.
+fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
+ assert!(is_power_of_two(target_alignment));
+ (unrounded + target_alignment - 1) & !(target_alignment - 1)
+}
+
+#[test]
+fn test_rounding() {
+ assert_eq!(round_up_to_next(0, 4), 0);
+ assert_eq!(round_up_to_next(1, 4), 4);
+ assert_eq!(round_up_to_next(2, 4), 4);
+ assert_eq!(round_up_to_next(3, 4), 4);
+ assert_eq!(round_up_to_next(4, 4), 4);
+ assert_eq!(round_up_to_next(5, 4), 8);
+}
+
+// Returns a tuple of (key_offset, val_offset),
+// from the start of a mallocated array.
+fn calculate_offsets(hashes_size: uint,
+ keys_size: uint, keys_align: uint,
+ vals_align: uint)
+ -> (uint, uint) {
+ let keys_offset = round_up_to_next(hashes_size, keys_align);
+ let end_of_keys = keys_offset + keys_size;
+
+ let vals_offset = round_up_to_next(end_of_keys, vals_align);
+
+ (keys_offset, vals_offset)
+}
+
+// Returns a tuple of (minimum required malloc alignment, hash_offset,
+// array_size), from the start of a mallocated array.
+fn calculate_allocation(hash_size: uint, hash_align: uint,
+ keys_size: uint, keys_align: uint,
+ vals_size: uint, vals_align: uint)
+ -> (uint, uint, uint) {
+ let hash_offset = 0;
+ let (_, vals_offset) = calculate_offsets(hash_size,
+ keys_size, keys_align,
+ vals_align);
+ let end_of_vals = vals_offset + vals_size;
+
+ let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
+
+ (min_align, hash_offset, end_of_vals)
+}
+
+#[test]
+fn test_offset_calculation() {
+ assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148));
+ assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6));
+ assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48));
+ assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144));
+ assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5));
+ assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24));
+}
+
+impl<K, V> RawTable<K, V> {
+ /// Does not initialize the buckets. The caller should ensure they,
+ /// at the very least, set every hash to EMPTY_BUCKET.
+ unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
+ if capacity == 0 {
+ return RawTable {
+ size: 0,
+ capacity: 0,
+ hashes: 0 as *mut u64,
+ marker: marker::CovariantType,
+ };
+ }
+ // No need for `checked_mul` before a more restrictive check performed
+ // later in this method.
+ let hashes_size = capacity * size_of::<u64>();
+ let keys_size = capacity * size_of::< K >();
+ let vals_size = capacity * size_of::< V >();
+
+ // Allocating hashmaps is a little tricky. We need to allocate three
+ // arrays, but since we know their sizes and alignments up front,
+ // we just allocate a single array, and then have the subarrays
+ // point into it.
+ //
+ // This is great in theory, but in practice getting the alignment
+ // right is a little subtle. Therefore, calculating offsets has been
+ // factored out into a different function.
+ let (malloc_alignment, hash_offset, size) =
+ calculate_allocation(
+ hashes_size, min_align_of::<u64>(),
+ keys_size, min_align_of::< K >(),
+ vals_size, min_align_of::< V >());
+
+ // One check for overflow that covers calculation and rounding of size.
+ let size_of_bucket = size_of::<u64>().checked_add(&size_of::<K>()).unwrap()
+ .checked_add(&size_of::<V>()).unwrap();
+ assert!(size >= capacity.checked_mul(&size_of_bucket)
+ .expect("capacity overflow"),
+ "capacity overflow");
+
+ let buffer = allocate(size, malloc_alignment);
+
+ let hashes = buffer.offset(hash_offset as int) as *mut u64;
+
+ RawTable {
+ capacity: capacity,
+ size: 0,
+ hashes: hashes,
+ marker: marker::CovariantType,
+ }
+ }
+
+ fn first_bucket_raw(&self) -> RawBucket<K, V> {
+ let hashes_size = self.capacity * size_of::<u64>();
+ let keys_size = self.capacity * size_of::<K>();
+
+ let buffer = self.hashes as *mut u8;
+ let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
+ keys_size, min_align_of::<K>(),
+ min_align_of::<V>());
+
+ unsafe {
+ RawBucket {
+ hash: self.hashes,
+ key: buffer.offset(keys_offset as int) as *mut K,
+ val: buffer.offset(vals_offset as int) as *mut V
+ }
+ }
+ }
+
+ /// Creates a new raw table from a given capacity. All buckets are
+ /// initially empty.
+ #[allow(experimental)]
+ pub fn new(capacity: uint) -> RawTable<K, V> {
+ unsafe {
+ let ret = RawTable::new_uninitialized(capacity);
+ zero_memory(ret.hashes, capacity);
+ ret
+ }
+ }
+
+ /// The hashtable's capacity, similar to a vector's.
+ pub fn capacity(&self) -> uint {
+ self.capacity
+ }
+
+ /// The number of elements ever `put` in the hashtable, minus the number
+ /// of elements ever `take`n.
+ pub fn size(&self) -> uint {
+ self.size
+ }
+
+ fn raw_buckets(&self) -> RawBuckets<K, V> {
+ RawBuckets {
+ raw: self.first_bucket_raw(),
+ hashes_end: unsafe {
+ self.hashes.offset(self.capacity as int)
+ }
+ }
+ }
+
+ pub fn iter(&self) -> Entries<K, V> {
+ Entries {
+ iter: self.raw_buckets(),
+ elems_left: self.size(),
+ }
+ }
+
+ pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ MutEntries {
+ iter: self.raw_buckets(),
+ elems_left: self.size(),
+ }
+ }
+
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ MoveEntries {
+ iter: self.raw_buckets(),
+ table: self,
+ }
+ }
+
+ /// Returns an iterator that copies out each entry. Used while the table
+ /// is being dropped.
+ unsafe fn rev_move_buckets(&mut self) -> RevMoveBuckets<K, V> {
+ let raw_bucket = self.first_bucket_raw();
+ RevMoveBuckets {
+ raw: raw_bucket.offset(self.capacity as int),
+ hashes_end: raw_bucket.hash,
+ elems_left: self.size
+ }
+ }
+}
+
+/// A raw iterator. The basis for some other iterators in this module. Although
+/// this interface is safe, it's not used outside this module.
+struct RawBuckets<'a, K, V> {
+ raw: RawBucket<K, V>,
+ hashes_end: *mut u64
+}
+
+impl<'a, K, V> Iterator<RawBucket<K, V>> for RawBuckets<'a, K, V> {
+ fn next(&mut self) -> Option<RawBucket<K, V>> {
+ while self.raw.hash != self.hashes_end {
+ unsafe {
+ // We are swapping out the pointer to a bucket and replacing
+ // it with the pointer to the next one.
+ let prev = ptr::replace(&mut self.raw, self.raw.offset(1));
+ if *prev.hash != EMPTY_BUCKET {
+ return Some(prev);
+ }
+ }
+ }
+
+ None
+ }
+}
+
+/// An iterator that moves out buckets in reverse order. It leaves the table
+/// in an an inconsistent state and should only be used for dropping
+/// the table's remaining entries. It's used in the implementation of Drop.
+struct RevMoveBuckets<'a, K, V> {
+ raw: RawBucket<K, V>,
+ hashes_end: *mut u64,
+ elems_left: uint
+}
+
+impl<'a, K, V> Iterator<(K, V)> for RevMoveBuckets<'a, K, V> {
+ fn next(&mut self) -> Option<(K, V)> {
+ if self.elems_left == 0 {
+ return None;
+ }
+
+ loop {
+ debug_assert!(self.raw.hash != self.hashes_end);
+
+ unsafe {
+ self.raw = self.raw.offset(-1);
+
+ if *self.raw.hash != EMPTY_BUCKET {
+ self.elems_left -= 1;
+ return Some((
+ ptr::read(self.raw.key as *const K),
+ ptr::read(self.raw.val as *const V)
+ ));
+ }
+ }
+ }
+ }
+}
+
+/// Iterator over shared references to entries in a table.
+pub struct Entries<'a, K: 'a, V: 'a> {
+ iter: RawBuckets<'a, K, V>,
+ elems_left: uint,
+}
+
+/// Iterator over mutable references to entries in a table.
+pub struct MutEntries<'a, K: 'a, V: 'a> {
+ iter: RawBuckets<'a, K, V>,
+ elems_left: uint,
+}
+
+/// Iterator over the entries in a table, consuming the table.
+pub struct MoveEntries<K, V> {
+ table: RawTable<K, V>,
+ iter: RawBuckets<'static, K, V>
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.iter.next().map(|bucket| {
+ self.elems_left -= 1;
+ unsafe {
+ (&*bucket.key,
+ &*bucket.val)
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ (self.elems_left, Some(self.elems_left))
+ }
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.iter.next().map(|bucket| {
+ self.elems_left -= 1;
+ unsafe {
+ (&*bucket.key,
+ &mut *bucket.val)
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ (self.elems_left, Some(self.elems_left))
+ }
+}
+
+impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
+ fn next(&mut self) -> Option<(SafeHash, K, V)> {
+ self.iter.next().map(|bucket| {
+ self.table.size -= 1;
+ unsafe {
+ (
+ SafeHash {
+ hash: *bucket.hash,
+ },
+ ptr::read(bucket.key as *const K),
+ ptr::read(bucket.val as *const V)
+ )
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ let size = self.table.size();
+ (size, Some(size))
+ }
+}
+
+impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
+ fn clone(&self) -> RawTable<K, V> {
+ unsafe {
+ let mut new_ht = RawTable::new_uninitialized(self.capacity());
+
+ {
+ let cap = self.capacity();
+ let mut new_buckets = Bucket::first(&mut new_ht);
+ let mut buckets = Bucket::first(self);
+ while buckets.index() != cap {
+ match buckets.peek() {
+ Full(full) => {
+ let (h, k, v) = {
+ let (k, v) = full.read();
+ (full.hash(), k.clone(), v.clone())
+ };
+ *new_buckets.raw.hash = h.inspect();
+ mem::overwrite(new_buckets.raw.key, k);
+ mem::overwrite(new_buckets.raw.val, v);
+ }
+ Empty(..) => {
+ *new_buckets.raw.hash = EMPTY_BUCKET;
+ }
+ }
+ new_buckets.next();
+ buckets.next();
+ }
+ };
+
+ new_ht.size = self.size();
+
+ new_ht
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<K, V> Drop for RawTable<K, V> {
+ fn drop(&mut self) {
+ if self.hashes.is_null() {
+ return;
+ }
+ // This is done in reverse because we've likely partially taken
+ // some elements out with `.move_iter()` from the front.
+ // Check if the size is 0, so we don't do a useless scan when
+ // dropping empty tables such as on resize.
+ // Also avoid double drop of elements that have been already moved out.
+ unsafe {
+ for _ in self.rev_move_buckets() {}
+ }
+
+ let hashes_size = self.capacity * size_of::<u64>();
+ let keys_size = self.capacity * size_of::<K>();
+ let vals_size = self.capacity * size_of::<V>();
+ let (align, _, size) = calculate_allocation(hashes_size, min_align_of::<u64>(),
+ keys_size, min_align_of::<K>(),
+ vals_size, min_align_of::<V>());
+
+ unsafe {
+ deallocate(self.hashes as *mut u8, size, align);
+ // Remember how everything was allocated out of one buffer
+ // during initialization? We only need one call to free here.
+ }
+ }
+}
}
}
+#[cfg(stage0)]
impl Reader for Box<Reader+'static> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) }
}
+#[cfg(not(stage0))]
+impl<'a> Reader for Box<Reader+'a> {
+ fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) }
+}
+
impl<'a> Reader for &'a mut Reader+'a {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) }
}
}
}
+#[cfg(stage0)]
impl Writer for Box<Writer+'static> {
#[inline]
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) }
fn flush(&mut self) -> IoResult<()> { self.flush() }
}
+#[cfg(not(stage0))]
+impl<'a> Writer for Box<Writer+'a> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) }
+
+ #[inline]
+ fn flush(&mut self) -> IoResult<()> { self.flush() }
+}
+
impl<'a> Writer for &'a mut Writer+'a {
#[inline]
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) }
use io::{fs, IoResult};
use io;
-use iter::range;
use libc;
use ops::Drop;
use option::{Option, None, Some};
/// will have the suffix `suffix`. The directory will be automatically
/// deleted once the returned wrapper is destroyed.
///
- /// If no directory can be created, None is returned.
- pub fn new_in(tmpdir: &Path, suffix: &str) -> Option<TempDir> {
+ /// If no directory can be created, `Err` is returned.
+ pub fn new_in(tmpdir: &Path, suffix: &str) -> IoResult<TempDir> {
if !tmpdir.is_absolute() {
return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
}
static mut CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
- for _ in range(0u, 1000) {
+ let mut attempts = 0u;
+ loop {
let filename =
format!("rs-{}-{}-{}",
unsafe { libc::getpid() },
suffix);
let p = tmpdir.join(filename);
match fs::mkdir(&p, io::UserRWX) {
- Err(..) => {}
- Ok(()) => return Some(TempDir { path: Some(p), disarmed: false })
+ Err(error) => {
+ if attempts >= 1000 {
+ return Err(error)
+ }
+ attempts += 1;
+ }
+ Ok(()) => return Ok(TempDir { path: Some(p), disarmed: false })
}
}
- None
}
/// Attempts to make a temporary directory inside of `os::tmpdir()` whose
/// name will have the suffix `suffix`. The directory will be automatically
/// deleted once the returned wrapper is destroyed.
///
- /// If no directory can be created, None is returned.
- pub fn new(suffix: &str) -> Option<TempDir> {
+ /// If no directory can be created, `Err` is returned.
+ pub fn new(suffix: &str) -> IoResult<TempDir> {
TempDir::new_in(&os::tmpdir(), suffix)
}
InlineNever,
}
-/// True if something like #[inline] is found in the list of attrs.
+/// Determine what `#[inline]` attribute is present in `attrs`, if any.
pub fn find_inline_attr(attrs: &[Attribute]) -> InlineAttr {
// FIXME (#2809)---validate the usage of #[inline] and #[inline]
attrs.iter().fold(InlineNone, |ia,attr| {
})
}
+/// True if `#[inline]` or `#[inline(always)]` is present in `attrs`.
+pub fn requests_inline(attrs: &[Attribute]) -> bool {
+ match find_inline_attr(attrs) {
+ InlineHint | InlineAlways => true,
+ InlineNone | InlineNever => false,
+ }
+}
+
/// Tests if any `cfg(...)` meta items in `metas` match `cfg`. e.g.
///
/// test_cfg(`[foo="a", bar]`, `[cfg(foo), cfg(bar)]`) == true
if len == 0 {
OwnedSlice::empty()
} else {
+ // drop excess capacity to avoid breaking sized deallocation
+ v.shrink_to_fit();
+
let p = v.as_mut_ptr();
// we own the allocation now
- unsafe {mem::forget(v)}
+ unsafe { mem::forget(v) }
OwnedSlice { data: p, len: len }
}
let span = self.span;
self.span_warn(span,
format!("this extern crate syntax is deprecated. \
- Use: extern create \"{}\" as {};",
+ Use: extern crate \"{}\" as {};",
the_ident.as_str(), path.ref0().get() ).as_slice()
);
Some(path)
#[test]
pub fn ratchet_test() {
- let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
+ let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
let pth = dpth.path().join("ratchet.json");
let mut m1 = MetricMap::new();
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[no_mangle]
+pub extern "C" fn foo() -> uint {
+ 1234
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[inline]
+pub fn cci_fn() -> uint {
+ 1200
+}
+
+#[inline]
+pub static CCI_STATIC: uint = 34;
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3 --crate-type=rlib,dylib
+
+pub mod a {
+ pub fn one() -> uint {
+ 1
+ }
+}
+
+pub mod b {
+ pub fn two() -> uint {
+ 2
+ }
+}
+
+pub mod c {
+ use a::one;
+ use b::two;
+ pub fn three() -> uint {
+ one() + two()
+ }
+}
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-use std::cmp::max;
+use std::{cmp, iter, mem};
+use std::sync::Future;
-fn fact(n: uint) -> uint {
- range(1, n + 1).fold(1, |accu, i| accu * i)
+fn rotate(x: &mut [i32]) {
+ let mut prev = x[0];
+ for place in x.mut_iter().rev() {
+ prev = mem::replace(place, prev)
+ }
}
-fn fannkuch(n: uint, i: uint) -> (int, int) {
- let mut perm = Vec::from_fn(n, |e| ((n + e - i) % n + 1) as i32);
- let mut tperm = perm.clone();
- let mut count = Vec::from_elem(n, 0u);
- let mut perm_count = 0i;
- let mut checksum = 0;
+fn next_permutation(perm: &mut [i32], count: &mut [i32]) {
+ for i in range(1, perm.len()) {
+ rotate(perm.mut_slice_to(i + 1));
+ let count_i = &mut count[i];
+ if *count_i >= i as i32 {
+ *count_i = 0;
+ } else {
+ *count_i += 1;
+ break
+ }
+ }
+}
+
+struct P {
+ p: [i32, .. 16],
+}
+
+struct Perm {
+ cnt: [i32, .. 16],
+ fact: [u32, .. 16],
+ n: u32,
+ permcount: u32,
+ perm: P,
+}
+
+impl Perm {
+ fn new(n: u32) -> Perm {
+ let mut fact = [1, .. 16];
+ for i in range(1, n as uint + 1) {
+ fact[i] = fact[i - 1] * i as u32;
+ }
+ Perm {
+ cnt: [0, .. 16],
+ fact: fact,
+ n: n,
+ permcount: 0,
+ perm: P { p: [0, .. 16 ] }
+ }
+ }
+
+ fn get(&mut self, mut idx: i32) -> P {
+ let mut pp = [0u8, .. 16];
+ self.permcount = idx as u32;
+ for (i, place) in self.perm.p.mut_iter().enumerate() {
+ *place = i as i32 + 1;
+ }
- for countdown in range(1, fact(n - 1) + 1).rev() {
- for i in range(1, n) {
- let perm0 = *perm.get(0);
- for j in range(0, i) {
- *perm.get_mut(j) = *perm.get(j + 1);
+ for i in range(1, self.n as uint).rev() {
+ let d = idx / self.fact[i] as i32;
+ self.cnt[i] = d;
+ idx %= self.fact[i] as i32;
+ for (place, val) in pp.mut_iter().zip(self.perm.p.slice_to(i + 1).iter()) {
+ *place = (*val) as u8
}
- *perm.get_mut(i) = perm0;
-
- let count_i = count.get_mut(i);
- if *count_i >= i {
- *count_i = 0;
- } else {
- *count_i += 1;
- break;
+
+ let d = d as uint;
+ for j in range(0, i + 1) {
+ self.perm.p[j] = if j + d <= i {pp[j + d]} else {pp[j+d-i-1]} as i32;
}
}
- tperm.clone_from(&perm);
- let mut flips_count = 0;
- loop {
- let k = *tperm.get(0);
- if k == 1 { break; }
- tperm.mut_slice_to(k as uint).reverse();
- flips_count += 1;
+ self.perm
+ }
+
+ fn count(&self) -> u32 { self.permcount }
+ fn max(&self) -> u32 { self.fact[self.n as uint] }
+
+ fn next(&mut self) -> P {
+ next_permutation(self.perm.p, self.cnt);
+ self.permcount += 1;
+
+ self.perm
+ }
+}
+
+
+fn reverse(tperm: &mut [i32], mut k: uint) {
+ tperm.mut_slice_to(k).reverse()
+}
+
+fn work(mut perm: Perm, n: uint, max: uint) -> (i32, i32) {
+ let mut checksum = 0;
+ let mut maxflips = 0;
+
+ let mut p = perm.get(n as i32);
+
+ while perm.count() < max as u32 {
+ let mut flips = 0;
+
+ while p.p[0] != 1 {
+ let k = p.p[0] as uint;
+ reverse(p.p, k);
+ flips += 1;
}
- perm_count = max(perm_count, flips_count);
- checksum += if countdown & 1 == 1 {flips_count} else {-flips_count}
+
+ checksum += if perm.count() % 2 == 0 {flips} else {-flips};
+ maxflips = cmp::max(maxflips, flips);
+
+ p = perm.next();
}
- (checksum, perm_count)
+
+ (checksum, maxflips)
}
-fn main() {
- let n = std::os::args().as_slice()
- .get(1)
- .and_then(|arg| from_str(arg.as_slice()))
- .unwrap_or(2u);
-
- let (tx, rx) = channel();
- for i in range(0, n) {
- let tx = tx.clone();
- spawn(proc() tx.send(fannkuch(n, i)));
+fn fannkuch(n: i32) -> (i32, i32) {
+ let perm = Perm::new(n as u32);
+
+ let N = 4;
+ let mut futures = vec![];
+ let k = perm.max() / N;
+
+ for (i, j) in range(0, N).zip(iter::count(0, k)) {
+ let max = cmp::min(j+k, perm.max());
+
+ futures.push(Future::spawn(proc() {
+ work(perm, j as uint, max as uint)
+ }))
}
- drop(tx);
let mut checksum = 0;
- let mut perm = 0;
- for (cur_cks, cur_perm) in rx.iter() {
- checksum += cur_cks;
- perm = max(perm, cur_perm);
+ let mut maxflips = 0;
+ for fut in futures.mut_iter() {
+ let (cs, mf) = fut.get();
+ checksum += cs;
+ maxflips = cmp::max(maxflips, mf);
}
- println!("{}\nPfannkuchen({}) = {}", checksum, n, perm);
+ (checksum, maxflips)
+}
+
+fn main() {
+ let n = std::os::args().as_slice()
+ .get(1)
+ .and_then(|arg| from_str(arg.as_slice()))
+ .unwrap_or(2i32);
+
+ let (checksum, maxflips) = fannkuch(n);
+ println!("{}\nPfannkuchen({}) = {}", checksum, n, maxflips);
}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure we give a sane error message when the user requests LTO with a
+// library built with -C codegen-units > 1.
+
+// aux-build:sepcomp_lib.rs
+// compile-flags: -Z lto
+// error-pattern:missing compressed bytecode
+// no-prefer-dynamic
+
+extern crate sepcomp_lib;
+use sepcomp_lib::a::one;
+use sepcomp_lib::b::two;
+use sepcomp_lib::c::three;
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(two(), 2);
+ assert_eq!(three(), 3);
+}
//~^ ERROR failed to find an implementation
//~^^ ERROR instantiating a type parameter with an incompatible type
-impl<T> Foo<T> {
+impl<T> Foo<T> { //~ ERROR failed to find an implementation
+//~^ ERROR instantiating a type parameter with an incompatible type
fn uhoh() {}
}
struct S4<Y>;
impl<Sized? X> T3<X> for S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
}
+impl<Sized? X> S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
+}
pub fn main() {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:capacity overflow
+
+use std::collections::hashmap::HashMap;
+use std::uint;
+use std::mem::size_of;
+
+fn main() {
+ let threshold = uint::MAX / size_of::<(u64, u64, u64)>();
+ let mut h = HashMap::<u64, u64>::with_capacity(threshold + 100);
+ h.insert(0, 0);
+}
$(call REMOVE_RLIBS,bar)
$(call REMOVE_DYLIBS,bar)
rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
+ # Check that $(TMPDIR) is empty.
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=bin
rm $(TMPDIR)/$(call BIN,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=asm,ir,bc,obj,link
rm $(TMPDIR)/bar.ll
rm $(TMPDIR)/bar.bc
rm $(TMPDIR)/bar.s
rm $(TMPDIR)/bar.o
rm $(TMPDIR)/$(call BIN,bar)
- $(RUSTC) foo.rs --emit=asm,ir,bc,obj,link --crate-type=staticlib
- rm $(TMPDIR)/bar.ll
- rm $(TMPDIR)/bar.s
- rm $(TMPDIR)/bar.o
- rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=asm -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=bc -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=ir -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=obj -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=link -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=rlib -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=dylib -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo) # FIXME 13794
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=staticlib -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=bin -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
+ $(RUSTC) foo.rs --emit=asm,ir,bc,obj,link --crate-type=staticlib
+ rm $(TMPDIR)/bar.ll
+ rm $(TMPDIR)/bar.s
+ rm $(TMPDIR)/bar.o
+ rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
mv $(TMPDIR)/bar.bc $(TMPDIR)/foo.bc
+ # Don't check that the $(TMPDIR) is empty - we left `foo.bc` for later
+ # comparison.
+
$(RUSTC) foo.rs --emit=bc,link --crate-type=rlib
cmp $(TMPDIR)/foo.bc $(TMPDIR)/bar.bc
rm $(TMPDIR)/bar.bc
rm $(TMPDIR)/foo.bc
$(call REMOVE_RLIBS,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
--- /dev/null
+-include ../tools.mk
+
+# Check that cross-crate inlined items are inlined in all compilation units
+# that refer to them, and not in any other compilation units.
+
+all:
+ $(RUSTC) cci_lib.rs
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ .*cci_fn)" -eq "2" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c CCI_STATIC.*=.*constant)" -eq "2" ]
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "rlib"]
+
+#[inline]
+pub fn cci_fn() -> uint {
+ 1234
+}
+
+#[inline]
+pub static CCI_STATIC: uint = 2345;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate cci_lib;
+use cci_lib::{cci_fn, CCI_STATIC};
+
+fn call1() -> uint {
+ cci_fn() + CCI_STATIC
+}
+
+mod a {
+ use cci_lib::cci_fn;
+ pub fn call2() -> uint {
+ cci_fn()
+ }
+}
+
+mod b {
+ use cci_lib::CCI_STATIC;
+ pub fn call3() -> uint {
+ CCI_STATIC
+ }
+}
+
+fn main() {
+ call1();
+ a::call2();
+ b::call3();
+}
--- /dev/null
+-include ../tools.mk
+
+# Test that #[inline(always)] functions still get inlined across compilation
+# unit boundaries. Compilation should produce three IR files, with each one
+# containing a definition of the inlined function. Also, the non-#[inline]
+# function should be defined in only one compilation unit.
+
+all:
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ i32\ .*inlined)" -eq "1" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ available_externally\ i32\ .*inlined)" -eq "2" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ i32\ .*normal)" -eq "1" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c declare\ i32\ .*normal)" -eq "2" ]
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[inline]
+fn inlined() -> u32 {
+ 1234
+}
+
+fn normal() -> u32 {
+ 2345
+}
+
+mod a {
+ pub fn f() -> u32 {
+ ::inlined() + ::normal()
+ }
+}
+
+mod b {
+ pub fn f() -> u32 {
+ ::inlined() + ::normal()
+ }
+}
+
+fn main() {
+ a::f();
+ b::f();
+}
--- /dev/null
+-include ../tools.mk
+
+# Test that separate compilation actually puts code into separate compilation
+# units. `foo.rs` defines `magic_fn` in three different modules, which should
+# wind up in three different compilation units.
+
+all:
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ .*magic_fn)" -eq "3" ]
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn magic_fn() -> uint {
+ 1234
+}
+
+mod a {
+ pub fn magic_fn() -> uint {
+ 2345
+ }
+}
+
+mod b {
+ pub fn magic_fn() -> uint {
+ 3456
+ }
+}
+
+fn main() { }
unsafe {
static U_RWX: i32 = (libc::S_IRUSR | libc::S_IWUSR | libc::S_IXUSR) as i32;
- let tmpdir = TempDir::new("rename_directory").expect("rename_directory failed");
+ let tmpdir = TempDir::new("rename_directory").ok().expect("rename_directory failed");
let tmpdir = tmpdir.path();
let old_path = tmpdir.join_many(["foo", "bar", "baz"]);
fs::mkdir_recursive(&old_path, io::UserRWX);
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+// aux-build:sepcomp_cci_lib.rs
+
+// Test accessing cross-crate inlined items from multiple compilation units.
+
+extern crate sepcomp_cci_lib;
+use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+
+fn call1() -> uint {
+ cci_fn() + CCI_STATIC
+}
+
+mod a {
+ use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+ pub fn call2() -> uint {
+ cci_fn() + CCI_STATIC
+ }
+}
+
+mod b {
+ use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+ pub fn call3() -> uint {
+ cci_fn() + CCI_STATIC
+ }
+}
+
+fn main() {
+ assert_eq!(call1(), 1234);
+ assert_eq!(a::call2(), 1234);
+ assert_eq!(b::call3(), 1234);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+// aux-build:sepcomp-extern-lib.rs
+
+// Test accessing external items from multiple compilation units.
+
+#[link(name = "sepcomp-extern-lib")]
+extern {
+ #[allow(ctypes)]
+ fn foo() -> uint;
+}
+
+fn call1() -> uint {
+ unsafe { foo() }
+}
+
+mod a {
+ pub fn call2() -> uint {
+ unsafe { ::foo() }
+ }
+}
+
+mod b {
+ pub fn call3() -> uint {
+ unsafe { ::foo() }
+ }
+}
+
+fn main() {
+ assert_eq!(call1(), 1234);
+ assert_eq!(a::call2(), 1234);
+ assert_eq!(b::call3(), 1234);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test references to items that haven't been translated yet.
+
+// Generate some code in the first compilation unit before declaring any
+// modules. This ensures that the first module doesn't go into the same
+// compilation unit as the top-level module.
+fn pad() -> uint { 0 }
+
+mod b {
+ pub fn three() -> uint {
+ ::one() + ::a::two()
+ }
+}
+
+mod a {
+ pub fn two() -> uint {
+ ::one() + ::one()
+ }
+}
+
+fn one() -> uint {
+ 1
+}
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(a::two(), 2);
+ assert_eq!(b::three(), 3);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test basic separate compilation functionality. The functions should be able
+// to call each other even though they will be placed in different compilation
+// units.
+
+// Generate some code in the first compilation unit before declaring any
+// modules. This ensures that the first module doesn't go into the same
+// compilation unit as the top-level module.
+fn one() -> uint { 1 }
+
+mod a {
+ pub fn two() -> uint {
+ ::one() + ::one()
+ }
+}
+
+mod b {
+ pub fn three() -> uint {
+ ::one() + ::a::two()
+ }
+}
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(a::two(), 2);
+ assert_eq!(b::three(), 3);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:sepcomp_lib.rs
+
+// Test linking against a library built with -C codegen-units > 1
+
+extern crate sepcomp_lib;
+use sepcomp_lib::a::one;
+use sepcomp_lib::b::two;
+use sepcomp_lib::c::three;
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(two(), 2);
+ assert_eq!(three(), 3);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test references to static items across compilation units.
+
+fn pad() -> uint { 0 }
+
+static ONE: uint = 1;
+
+mod b {
+ // Separate compilation always switches to the LLVM module with the fewest
+ // instructions. Make sure we have some instructions in this module so
+ // that `a` and `b` don't go into the same compilation unit.
+ fn pad() -> uint { 0 }
+
+ pub static THREE: uint = ::ONE + ::a::TWO;
+}
+
+mod a {
+ fn pad() -> uint { 0 }
+
+ pub static TWO: uint = ::ONE + ::ONE;
+}
+
+fn main() {
+ assert_eq!(ONE, 1);
+ assert_eq!(a::TWO, 2);
+ assert_eq!(b::THREE, 3);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test unwinding through multiple compilation units.
+
+// According to acrichto, in the distant past `ld -r` (which is used during
+// linking when codegen-units > 1) was known to produce object files with
+// damaged unwinding tables. This may be related to GNU binutils bug #6893
+// ("Partial linking results in corrupt .eh_frame_hdr"), but I'm not certain.
+// In any case, this test should let us know if enabling parallel codegen ever
+// breaks unwinding.
+
+fn pad() -> uint { 0 }
+
+mod a {
+ pub fn f() {
+ fail!();
+ }
+}
+
+mod b {
+ pub fn g() {
+ ::a::f();
+ }
+}
+
+fn main() {
+ std::task::try(proc() { ::b::g() }).unwrap_err();
+}
pub fn test_rmdir_recursive_ok() {
let rwx = io::UserRWX;
- let tmpdir = TempDir::new("test").expect("test_rmdir_recursive_ok: \
- couldn't create temp dir");
+ let tmpdir = TempDir::new("test").ok().expect("test_rmdir_recursive_ok: \
+ couldn't create temp dir");
let tmpdir = tmpdir.path();
let root = tmpdir.join("foo");
}
fn in_tmpdir(f: ||) {
- let tmpdir = TempDir::new("test").expect("can't make tmpdir");
+ let tmpdir = TempDir::new("test").ok().expect("can't make tmpdir");
assert!(os::change_dir(tmpdir.path()));
f();