probe CFG_GDB gdb
probe CFG_LLDB lldb
+if [ ! -z "$CFG_GDB" ]
+then
+ # Extract the version
+ CFG_GDB_VERSION=$($CFG_GDB --version 2>/dev/null | head -1)
+ putvar CFG_GDB_VERSION
+fi
+
if [ ! -z "$CFG_LLDB" ]
then
# If CFG_LLDB_PYTHON_DIR is not already set from the outside and valid, try to read it from
DEPS_time := std serialize
DEPS_rand := core
DEPS_url := std
-DEPS_log := std
+DEPS_log := std regex
DEPS_regex := std
DEPS_regex_macros = rustc syntax std regex
DEPS_fmt_macros = std
--stage-id stage$(1)-$(2) \
--target $(2) \
--host $(3) \
+ --gdb-version="$(CFG_GDB_VERSION)" \
--android-cross-path=$(CFG_ANDROID_CROSS_PATH) \
--adb-path=$(CFG_ADB) \
--adb-test-dir=$(CFG_ADB_TEST_DIR) \
// Host triple for the compiler being invoked
pub host: String,
+ // Version of GDB
+ pub gdb_version: Option<String>,
+
// Path to the android tools
pub android_cross_path: Path,
optflag("", "jit", "run tests under the JIT"),
optopt("", "target", "the target to build for", "TARGET"),
optopt("", "host", "the host to build for", "HOST"),
+ optopt("", "gdb-version", "the version of GDB used", "MAJOR.MINOR"),
optopt("", "android-cross-path", "Android NDK standalone path", "PATH"),
optopt("", "adb-path", "path to the android debugger", "PATH"),
optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH"),
jit: matches.opt_present("jit"),
target: opt_str2(matches.opt_str("target")),
host: opt_str2(matches.opt_str("host")),
+ gdb_version: extract_gdb_version(matches.opt_str("gdb-version")),
android_cross_path: opt_path(matches, "android-cross-path"),
adb_path: opt_str2(matches.opt_str("adb-path")),
adb_test_dir: opt_str2(matches.opt_str("adb-test-dir")),
runtest::run_metrics(config, testfile, mm)
})
}
+
+fn extract_gdb_version(full_version_line: Option<String>) -> Option<String> {
+ match full_version_line {
+ Some(ref full_version_line)
+ if full_version_line.as_slice().trim().len() > 0 => {
+ let full_version_line = full_version_line.as_slice().trim();
+
+ let re = Regex::new(r"(^|[^0-9])([0-9]\.[0-9])([^0-9]|$)").unwrap();
+
+ match re.captures(full_version_line) {
+ Some(captures) => {
+ Some(captures.at(2).to_string())
+ }
+ None => {
+ println!("Could not extract GDB version from line '{}'",
+ full_version_line);
+ None
+ }
+ }
+ },
+ _ => None
+ }
+}
\ No newline at end of file
use common;
use util;
+use std::from_str::FromStr;
+
pub struct TestProps {
// Lines that should be expected, in order, on standard out
pub error_patterns: Vec<String> ,
format!("ignore-{}",
config.stage_id.as_slice().split('-').next().unwrap())
}
+ fn ignore_gdb(config: &Config, line: &str) -> bool {
+ if config.mode != common::DebugInfoGdb {
+ return false;
+ }
- let val = iter_header(testfile, |ln| {
- if parse_name_directive(ln, "ignore-test") {
- false
- } else if parse_name_directive(ln, ignore_target(config).as_slice()) {
- false
- } else if parse_name_directive(ln, ignore_stage(config).as_slice()) {
- false
- } else if config.mode == common::Pretty &&
- parse_name_directive(ln, "ignore-pretty") {
- false
- } else if config.target != config.host &&
- parse_name_directive(ln, "ignore-cross-compile") {
- false
- } else {
- true
+ if parse_name_directive(line, "ignore-gdb") {
+ return true;
}
+
+ match config.gdb_version {
+ Some(ref actual_version) => {
+ if line.contains("min-gdb-version") {
+ let min_version = line.trim()
+ .split(' ')
+ .last()
+ .expect("Malformed GDB version directive");
+ // Ignore if actual version is smaller the minimum required
+ // version
+ gdb_version_to_int(actual_version.as_slice()) <
+ gdb_version_to_int(min_version.as_slice())
+ } else {
+ false
+ }
+ }
+ None => false
+ }
+ }
+
+ let val = iter_header(testfile, |ln| {
+ !parse_name_directive(ln, "ignore-test") &&
+ !parse_name_directive(ln, ignore_target(config).as_slice()) &&
+ !parse_name_directive(ln, ignore_stage(config).as_slice()) &&
+ !(config.mode == common::Pretty && parse_name_directive(ln, "ignore-pretty")) &&
+ !(config.target != config.host && parse_name_directive(ln, "ignore-cross-compile")) &&
+ !ignore_gdb(config, ln) &&
+ !(config.mode == common::DebugInfoLldb && parse_name_directive(ln, "ignore-lldb"))
});
!val
None => None
}
}
+
+pub fn gdb_version_to_int(version_string: &str) -> int {
+ let error_string = format!(
+ "Encountered GDB version string with unexpected format: {}",
+ version_string);
+ let error_string = error_string.as_slice();
+
+ let components: Vec<&str> = version_string.trim().split('.').collect();
+
+ if components.len() != 2 {
+ fail!("{}", error_string);
+ }
+
+ let major: int = FromStr::from_str(components[0]).expect(error_string);
+ let minor: int = FromStr::from_str(components[1]).expect(error_string);
+
+ return major * 1000 + minor;
+}
match cmd.spawn() {
Ok(mut process) => {
for input in input.iter() {
- process.stdin.get_mut_ref().write(input.as_bytes()).unwrap();
+ process.stdin.as_mut().unwrap().write(input.as_bytes()).unwrap();
}
let ProcessOutput { status, output, error } =
process.wait_with_output().unwrap();
match cmd.spawn() {
Ok(mut process) => {
for input in input.iter() {
- process.stdin.get_mut_ref().write(input.as_bytes()).unwrap();
+ process.stdin.as_mut().unwrap().write(input.as_bytes()).unwrap();
}
Some(process)
};
let config = &mut config;
- let DebuggerCommands { commands, check_lines, .. } = parse_debugger_commands(testfile, "gdb");
+ let DebuggerCommands {
+ commands,
+ check_lines,
+ use_gdb_pretty_printer,
+ ..
+ } = parse_debugger_commands(testfile, "gdb");
let mut cmds = commands.connect("\n");
// compile test file (it should have 'compile-flags:-g' in the header)
let exe_file = make_exe_name(config, testfile);
- let mut proc_args;
let debugger_run_result;
match config.target.as_slice() {
"arm-linux-androideabi" => {
}
_=> {
+ let rust_src_root = find_rust_src_root(config)
+ .expect("Could not find Rust source root");
+ let rust_pp_module_rel_path = Path::new("./src/etc");
+ let rust_pp_module_abs_path = rust_src_root.join(rust_pp_module_rel_path)
+ .as_str()
+ .unwrap()
+ .to_string();
// write debugger script
- let script_str = [
- "set charset UTF-8".to_string(),
- cmds,
- "quit\n".to_string()
- ].connect("\n");
+ let mut script_str = String::with_capacity(2048);
+
+ script_str.push_str("set charset UTF-8\n");
+ script_str.push_str("show version\n");
+
+ match config.gdb_version {
+ Some(ref version) => {
+ println!("NOTE: compiletest thinks it is using GDB version {}",
+ version.as_slice());
+
+ if header::gdb_version_to_int(version.as_slice()) >
+ header::gdb_version_to_int("7.4") {
+ // Add the directory containing the pretty printers to
+ // GDB's script auto loading safe path ...
+ script_str.push_str(
+ format!("add-auto-load-safe-path {}\n",
+ rust_pp_module_abs_path.as_slice())
+ .as_slice());
+ // ... and also the test directory
+ script_str.push_str(
+ format!("add-auto-load-safe-path {}\n",
+ config.build_base.as_str().unwrap())
+ .as_slice());
+ }
+ }
+ _ => {
+ println!("NOTE: compiletest does not know which version of \
+ GDB it is using");
+ }
+ }
+
+ // Load the target executable
+ script_str.push_str(format!("file {}\n",
+ exe_file.as_str().unwrap())
+ .as_slice());
+
+ script_str.push_str(cmds.as_slice());
+ script_str.push_str("quit\n");
+
debug!("script_str = {}", script_str);
dump_output_file(config,
testfile,
script_str.as_slice(),
"debugger.script");
+ if use_gdb_pretty_printer {
+ // Only emit the gdb auto-loading script if pretty printers
+ // should actually be loaded
+ dump_gdb_autoload_script(config, testfile);
+ }
+
// run debugger script with gdb
#[cfg(windows)]
fn debugger() -> String {
vec!("-quiet".to_string(),
"-batch".to_string(),
"-nx".to_string(),
- format!("-command={}", debugger_script.as_str().unwrap()),
- exe_file.as_str().unwrap().to_string());
- proc_args = ProcArgs {
+ format!("-command={}", debugger_script.as_str().unwrap()));
+
+ let proc_args = ProcArgs {
prog: debugger(),
args: debugger_opts,
};
+
+ let environment = vec![("PYTHONPATH".to_string(), rust_pp_module_abs_path)];
+
debugger_run_result = compose_and_run(config,
testfile,
proc_args,
- Vec::new(),
+ environment,
config.run_lib_path.as_slice(),
None,
None);
}
check_debugger_output(&debugger_run_result, check_lines.as_slice());
+
+ fn dump_gdb_autoload_script(config: &Config, testfile: &Path) {
+ let mut script_path = output_base_name(config, testfile);
+ let mut script_file_name = script_path.filename().unwrap().to_vec();
+ script_file_name.push_all("-gdb.py".as_bytes());
+ script_path.set_filename(script_file_name.as_slice());
+
+ let script_content = "import gdb_rust_pretty_printing\n\
+ gdb_rust_pretty_printing.register_printers(gdb.current_objfile())\n"
+ .as_bytes();
+
+ File::create(&script_path).write(script_content).unwrap();
+ }
+}
+
+fn find_rust_src_root(config: &Config) -> Option<Path> {
+ let mut path = config.src_base.clone();
+ let path_postfix = Path::new("src/etc/lldb_batchmode.py");
+
+ while path.pop() {
+ if path.join(path_postfix.clone()).is_file() {
+ return Some(path);
+ }
+ }
+
+ return None;
}
fn run_debuginfo_lldb_test(config: &Config, props: &TestProps, testfile: &Path) {
let DebuggerCommands {
commands,
check_lines,
- breakpoint_lines
+ breakpoint_lines,
+ ..
} = parse_debugger_commands(testfile, "lldb");
// Write debugger script:
commands: Vec<String>,
check_lines: Vec<String>,
breakpoint_lines: Vec<uint>,
+ use_gdb_pretty_printer: bool
}
fn parse_debugger_commands(file_path: &Path, debugger_prefix: &str)
let mut breakpoint_lines = vec!();
let mut commands = vec!();
let mut check_lines = vec!();
+ let mut use_gdb_pretty_printer = false;
let mut counter = 1;
let mut reader = BufferedReader::new(File::open(file_path).unwrap());
for line in reader.lines() {
breakpoint_lines.push(counter);
}
+ if line.as_slice().contains("gdb-use-pretty-printer") {
+ use_gdb_pretty_printer = true;
+ }
+
header::parse_name_value_directive(
line.as_slice(),
command_directive.as_slice()).map(|cmd| {
DebuggerCommands {
commands: commands,
check_lines: check_lines,
- breakpoint_lines: breakpoint_lines
+ breakpoint_lines: breakpoint_lines,
+ use_gdb_pretty_printer: use_gdb_pretty_printer,
}
}
// codegen tests (vs. clang)
-fn make_o_name(config: &Config, testfile: &Path) -> Path {
- output_base_name(config, testfile).with_extension("o")
-}
-
fn append_suffix_to_stem(p: &Path, suffix: &str) -> Path {
if suffix.len() == 0 {
(*p).clone()
// FIXME (#9639): This needs to handle non-utf8 paths
let link_args = vec!("-L".to_string(),
aux_dir.as_str().unwrap().to_string());
- let llvm_args = vec!("--emit=obj".to_string(),
- "--crate-type=lib".to_string(),
- "-C".to_string(),
- "save-temps".to_string());
+ let llvm_args = vec!("--emit=bc,obj".to_string(),
+ "--crate-type=lib".to_string());
let args = make_compile_args(config,
props,
link_args.append(llvm_args.as_slice()),
- |a, b| ThisFile(make_o_name(a, b)), testfile);
+ |a, b| ThisDirectory(output_base_name(a, b).dir_path()),
+ testfile);
compose_and_run_compiler(config, props, testfile, args, None)
}
let testcc = testfile.with_extension("cc");
let proc_args = ProcArgs {
// FIXME (#9639): This needs to handle non-utf8 paths
- prog: config.clang_path.get_ref().as_str().unwrap().to_string(),
+ prog: config.clang_path.as_ref().unwrap().as_str().unwrap().to_string(),
args: vec!("-c".to_string(),
"-emit-llvm".to_string(),
"-o".to_string(),
let bitcodefile = output_base_name(config, testfile).with_extension("bc");
let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix);
let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract");
- let prog = config.llvm_bin_path.get_ref().join("llvm-extract");
+ let prog = config.llvm_bin_path.as_ref().unwrap().join("llvm-extract");
let proc_args = ProcArgs {
// FIXME (#9639): This needs to handle non-utf8 paths
prog: prog.as_str().unwrap().to_string(),
let bitcodefile = append_suffix_to_stem(&bitcodefile, suffix);
let extracted_bc = append_suffix_to_stem(&bitcodefile, "extract");
let extracted_ll = extracted_bc.with_extension("ll");
- let prog = config.llvm_bin_path.get_ref().join("llvm-dis");
+ let prog = config.llvm_bin_path.as_ref().unwrap().join("llvm-dis");
let proc_args = ProcArgs {
// FIXME (#9639): This needs to handle non-utf8 paths
prog: prog.as_str().unwrap().to_string(),
## Why aren't values type-parametric? Why only items?
-Doing so would make type inference much more complex, and require the implementation strategy of runtime parametrization.
+Doing so would make type inference much more complex, and require the implementation strategy of runtime parameterization.
## Why are enumerations nominal and closed?
#[cfg(target_os = "win32", target_arch = "x86")]
#[link(name = "kernel32")]
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
extern "stdcall" {
fn SetEnvironmentVariableA(n: *const u8, v: *const u8) -> libc::c_int;
}
# The "nullable pointer optimization"
Certain types are defined to not be `null`. This includes references (`&T`,
-`&mut T`), owning pointers (`~T`), and function pointers (`extern "abi"
-fn()`). When interfacing with C, pointers that might be null are often used.
+`&mut T`), boxes (`Box<T>`), and function pointers (`extern "abi" fn()`).
+When interfacing with C, pointers that might be null are often used.
As a special case, a generic `enum` that contains exactly two variants, one of
which contains no data and the other containing a single field, is eligible
for the "nullable pointer optimization". When such an enum is instantiated
This part is coming soon.
+# Patterns and `ref`
+
+When you're trying to match something that's stored in a pointer, there may be
+a situation where matching directly isn't the best option available. Let's see
+how to properly handle this:
+
+```{rust,ignore}
+fn possibly_print(x: &Option<String>) {
+ match *x {
+ // BAD: cannot move out of a `&`
+ Some(s) => println!("{}", s)
+
+ // GOOD: instead take a reference into the memory of the `Option`
+ Some(ref s) => println!("{}", *s),
+ None => {}
+ }
+}
+```
+
+The `ref s` here means that `s` will be of type `&String`, rather than type
+`String`.
+
+This is important when the type you're trying to get access to has a destructor
+and you don't want to move it, you just want a reference to it.
+
# Cheat Sheet
Here's a quick rundown of Rust's pointer types:
common thing to do.
The first thing that we need to do is make a file to put our code in. I like
-to make a projects directory in my home directory, and keep all my projects
+to make a `projects` directory in my home directory, and keep all my projects
there. Rust does not care where your code lives.
This actually leads to one other concern we should address: this tutorial will
```
Cargo expects your source files to live inside a `src` directory. That leaves
-the top level for other things, like READMEs, licence information, and anything
+the top level for other things, like READMEs, license information, and anything
not related to your code. Cargo helps us keep our projects nice and tidy. A
place for everything, and everything in its place.
[package]
name = "hello_world"
-version = "0.1.0"
+version = "0.0.1"
authors = [ "Your name <you@example.com>" ]
[[bin]]
following will produce a compile-time error:
```{ignore}
-let x = (let y = 5i); // found `let` in ident position
+let x = (let y = 5i); // expected identifier, found keyword `let`
```
The compiler is telling us here that it was expecting to see the beginning of
languages which have it, like Haskell, often suggest that documenting your
types explicitly is a best-practice. We agree that forcing functions to declare
types while allowing for inference inside of function bodies is a wonderful
-compromise between full inference and no inference.
+sweet spot between full inference and no inference.
What about returning a value? Here's a function that adds one to an integer:
}
```
-Sometimes, this makes things more readable. Sometimes, less. Use your judgement
+Sometimes, this makes things more readable. Sometimes, less. Use your judgment
here.
That's all you need to get basic input from the standard input! It's not too
guide.
Before we move on, let me show you one more Cargo command: `run`. `cargo run`
-is kind of like `cargo build`, but it also then runs the produced exectuable.
+is kind of like `cargo build`, but it also then runs the produced executable.
Try it out:
```{notrust,ignore}
$ cargo run
- Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)
+ Compiling guessing_game v0.0.1 (file:///home/you/projects/guessing_game)
Running `target/guessing_game`
Hello, world!
```
```{notrust,ignore}
$ cargo run
- Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)
+ Compiling guessing_game v0.0.1 (file:///home/you/projects/guessing_game)
Running `target/guessing_game`
Guess the number!
The secret number is: 7
```{notrust,ignore}
$ cargo run
- Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)
+ Compiling guessing_game v0.0.1 (file:///home/you/projects/guessing_game)
Running `target/guessing_game`
Guess the number!
The secret number is: 57
```{notrust,ignore}
$ cargo run
- Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)
+ Compiling guessing_game v0.0.1 (file:///home/you/projects/guessing_game)
Running `target/guessing_game`
Guess the number!
The secret number is: 17
```{notrust,ignore}
$ cargo run
- Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)
+ Compiling guessing_game v0.0.1 (file:///home/you/projects/guessing_game)
Running `target/guessing_game`
Guess the number!
The secret number is: 58
```{notrust,ignore}
$ cargo run
- Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)
+ Compiling guessing_game v0.0.1 (file:///home/you/projects/guessing_game)
Running `target/guessing_game`
Guess the number!
The secret number is: 59
```{notrust,ignore}
$ cargo run
- Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)
+ Compiling guessing_game v0.0.1 (file:///home/you/projects/guessing_game)
Running `target/guessing_game`
Guess the number!
The secret number is: 61
Note that we haven't mentioned anything about files yet. Rust does not impose a
particular relationship between your filesystem structure and your module
structure. That said, there is a conventional approach to how Rust looks for
-modules on the file system, but it's also overrideable.
+modules on the file system, but it's also overridable.
Enough talk, let's build something! Let's make a new project called `modules`.
Let's double check our work by compiling:
-```{bash,ignore}
-$ cargo build
+```{bash,notrust}
+$ cargo run
Compiling modules v0.0.1 (file:///home/you/projects/modules)
-$ ./target/modules
+ Running `target/modules`
Hello, world!
```
Rust provides six attributes to indicate the stability level of various
parts of your library. The six levels are:
-* deprecated: this item should no longer be used. No guarantee of backwards
+* deprecated: This item should no longer be used. No guarantee of backwards
compatibility.
* experimental: This item was only recently introduced or is otherwise in a
state of flux. It may change significantly, or even be removed. No guarantee
Rust can't find this function. That makes sense, as we didn't write it yet!
-In order to share this codes with our tests, we'll need to make a library crate.
+In order to share this code with our tests, we'll need to make a library crate.
This is also just good software design: as we mentioned before, it's a good idea
to put most of your functionality into a library crate, and have your executable
crate use that library. This allows for code re-use.
We've now covered the basics of testing. Rust's tools are primitive, but they
work well in the simple cases. There are some Rustaceans working on building
-more complicated frameworks on top of all of this, but thery're just starting
+more complicated frameworks on top of all of this, but they're just starting
out.
# Pointers
deallocated, leaving it up to your runtime can make this difficult.
Rust chooses a different path, and that path is called **ownership**. Any
-binding that creates a resource is the **owner** of that resource. Being an
-owner gives you three privileges, with two restrictions:
+binding that creates a resource is the **owner** of that resource.
+
+Being an owner affords you some privileges:
1. You control when that resource is deallocated.
2. You may lend that resource, immutably, to as many borrowers as you'd like.
-3. You may lend that resource, mutably, to a single borrower. **BUT**
-4. Once you've done so, you may not also lend it out otherwise, mutably or
- immutably.
-5. You may not lend it out mutably if you're currently lending it to someone.
+3. You may lend that resource, mutably, to a single borrower.
+
+But it also comes with some restrictions:
+
+1. If someone is borrowing your resource (either mutably or immutably), you may
+ not mutate the resource or mutably lend it to someone.
+2. If someone is mutably borrowing your resource, you may not lend it out at
+ all (mutably or immutably) or access it in any way.
What's up with all this 'lending' and 'borrowing'? When you allocate memory,
you get a pointer to that memory. This pointer allows you to manipulate said
}
```
-If you have a struct, you can desugar it inside of a pattern:
+If you have a struct, you can destructure it inside of a pattern:
```{rust}
struct Point {
default to returning unit (`()`).
There's one big difference between a closure and named functions, and it's in
-the name: a function "closes over its environment." What's that mean? It means
+the name: a closure "closes over its environment." What's that mean? It means
this:
```{rust}
Whew! This isn't too terrible. You can see that we still `let x = 5i`,
but then things get a little bit hairy. Three more bindings get set: a
-static format string, an argument vector, and the aruments. We then
+static format string, an argument vector, and the arguments. We then
invoke the `println_args` function with the generated arguments.
This is the code (well, the full version) that Rust actually compiles. You can
# Unsafe
-Finally, there's one more concept that you should be aware in Rust: `unsafe`.
+Finally, there's one more Rust concept that you should be aware of: `unsafe`.
There are two circumstances where Rust's safety provisions don't work well.
The first is when interfacing with C code, and the second is when building
certain kinds of abstractions.
- `repr` - specifies the representation to use for this struct. Takes a list
of options. The currently accepted ones are `C` and `packed`, which may be
- combined. `C` will use a C ABI comptible struct layout, and `packed` will
+ combined. `C` will use a C ABI compatible struct layout, and `packed` will
remove any padding between fields (note that this is very fragile and may
break platforms which require aligned access).
* `begin_unwind`
: ___Needs filling in___
+* `managed_bound`
+ : This type implements "managed"
* `no_copy_bound`
: This type does not implement "copy", even if eligible
* `no_send_bound`
: This type does not implement "send", even if eligible
* `no_sync_bound`
: This type does not implement "sync", even if eligible
-* `managed_bound`
- : This type implements "managed"
* `eh_personality`
: ___Needs filling in___
* `exchange_free`
These levels are directly inspired by
[Node.js' "stability index"](http://nodejs.org/api/documentation.html).
-Stability levels are inherited, so an items's stability attribute is the
+Stability levels are inherited, so an item's stability attribute is the
default stability for everything nested underneath it.
There are lints for disallowing items marked with certain levels: `deprecated`,
* `concat_idents` - Allows use of the `concat_idents` macro, which is in many
ways insufficient for concatenating identifiers, and may
- be removed entirely for something more wholsome.
+ be removed entirely for something more wholesome.
* `default_type_params` - Allows use of default type parameters. The future of
this feature is uncertain.
New instances of a `struct` can be constructed with a [struct expression](#structure-expressions).
-The memory layout of a `struct` is undefined by default to allow for compiler optimziations like
+The memory layout of a `struct` is undefined by default to allow for compiler optimizations like
field reordering, but it can be fixed with the `#[repr(...)]` attribute.
In either case, fields may be given in any order in a corresponding struct *expression*;
the resulting `struct` value will always have the same memory layout.
All pointers in Rust are explicit first-class values.
They can be copied, stored into data structures, and returned from functions.
-There are four varieties of pointer in Rust:
-
-* Owning pointers (`Box`)
- : These point to owned heap allocations (or "boxes") in the shared, inter-task heap.
- Each owned box has a single owning pointer; pointer and pointee retain a 1:1 relationship at all times.
- Owning pointers are written `Box<content>`,
- for example `Box<int>` means an owning pointer to an owned box containing an integer.
- Copying an owned box is a "deep" operation:
- it involves allocating a new owned box and copying the contents of the old box into the new box.
- Releasing an owning pointer immediately releases its corresponding owned box.
+There are two varieties of pointer in Rust:
* References (`&`)
: These point to memory _owned by some other value_.
- References arise by (automatic) conversion from owning pointers, managed pointers,
- or by applying the borrowing operator `&` to some other value,
- including [lvalues, rvalues or temporaries](#lvalues,-rvalues-and-temporaries).
- A borrow expression is written `&content`.
-
- A reference type is written `&'f type` for some lifetime-variable `f`,
- or just `&type` when the lifetime can be elided;
- for example `&int` means a reference to an integer.
+ A reference type is written `&type` for some lifetime-variable `f`,
+ or just `&'a type` when you need an explicit lifetime.
Copying a reference is a "shallow" operation:
it involves only copying the pointer itself.
Releasing a reference typically has no effect on the value it points to,
- with the exception of temporary values,
- which are released when the last reference to them is released.
+ with the exception of temporary values, which are released when the last
+ reference to them is released.
* Raw pointers (`*`)
: Raw pointers are pointers without safety or liveness guarantees.
they exist to support interoperability with foreign code,
and writing performance-critical or low-level functions.
+The standard library contains addtional 'smart pointer' types beyond references
+and raw pointers.
+
### Function types
The function type constructor `fn` forms new function types.
purpose of this output type is to create a static library containing all of
the local crate's code along with all upstream dependencies. The static
library is actually a `*.a` archive on linux and osx and a `*.lib` file on
- windows. This format is recommended for use in situtations such as linking
+ windows. This format is recommended for use in situations such as linking
Rust code into an existing non-Rust application because it will not have
dynamic dependencies on other Rust code.
// except according to those terms.
#[cfg(rustdoc)]
-extern crate this = "rustdoc";
+extern crate "rustdoc" as this;
#[cfg(rustc)]
-extern crate this = "rustc";
+extern crate "rustc" as this;
fn main() { this::main() }
--- /dev/null
+# Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+# file at the top-level directory of this distribution and at
+# http://rust-lang.org/COPYRIGHT.
+#
+# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+# option. This file may not be copied, modified, or distributed
+# except according to those terms.
+
+import gdb
+
+#===============================================================================
+# GDB Pretty Printing Module for Rust
+#===============================================================================
+
+def register_printers(objfile):
+ "Registers Rust pretty printers for the given objfile"
+ objfile.pretty_printers.append(rust_pretty_printer_lookup_function)
+
+def rust_pretty_printer_lookup_function(val):
+ "Returns the correct Rust pretty printer for the given value if there is one"
+ type_code = val.type.code
+
+ if type_code == gdb.TYPE_CODE_STRUCT:
+ struct_kind = classify_struct(val.type)
+
+ if struct_kind == STRUCT_KIND_STR_SLICE:
+ return RustStringSlicePrinter(val)
+
+ if struct_kind == STRUCT_KIND_TUPLE:
+ return RustTuplePrinter(val)
+
+ if struct_kind == STRUCT_KIND_TUPLE_STRUCT:
+ return RustTupleStructPrinter(val, False)
+
+ if struct_kind == STRUCT_KIND_CSTYLE_VARIANT:
+ return RustCStyleEnumPrinter(val[get_field_at_index(val, 0)])
+
+ if struct_kind == STRUCT_KIND_TUPLE_VARIANT:
+ return RustTupleStructPrinter(val, True)
+
+ if struct_kind == STRUCT_KIND_STRUCT_VARIANT:
+ return RustStructPrinter(val, True)
+
+ return RustStructPrinter(val, False)
+
+ # Enum handling
+ if type_code == gdb.TYPE_CODE_UNION:
+ enum_members = list(val.type.fields())
+ enum_member_count = len(enum_members)
+
+ if enum_member_count == 0:
+ return RustStructPrinter(val, false)
+
+ if enum_member_count == 1:
+ if enum_members[0].name == None:
+ # This is a singleton enum
+ return rust_pretty_printer_lookup_function(val[enum_members[0]])
+ else:
+ assert enum_members[0].name.startswith("RUST$ENCODED$ENUM$")
+ # This is a space-optimized enum
+ last_separator_index = enum_members[0].name.rfind("$")
+ second_last_separator_index = first_variant_name.rfind("$", 0, last_separator_index)
+ disr_field_index = first_variant_name[second_last_separator_index + 1 :
+ last_separator_index]
+ disr_field_index = int(disr_field_index)
+
+ sole_variant_val = val[enum_members[0]]
+ disr_field = get_field_at_index(sole_variant_val, disr_field_index)
+ discriminant = int(sole_variant_val[disr_field])
+
+ if discriminant == 0:
+ null_variant_name = first_variant_name[last_separator_index + 1:]
+ return IdentityPrinter(null_variant_name)
+
+ return rust_pretty_printer_lookup_function(sole_variant_val)
+
+ # This is a regular enum, extract the discriminant
+ discriminant_name, discriminant_val = extract_discriminant_value(val)
+ return rust_pretty_printer_lookup_function(val[enum_members[discriminant_val]])
+
+ # No pretty printer has been found
+ return None
+
+#=------------------------------------------------------------------------------
+# Pretty Printer Classes
+#=------------------------------------------------------------------------------
+
+class RustStructPrinter:
+ def __init__(self, val, hide_first_field):
+ self.val = val
+ self.hide_first_field = hide_first_field
+
+ def to_string(self):
+ return self.val.type.tag
+
+ def children(self):
+ cs = []
+ for field in self.val.type.fields():
+ field_name = field.name
+ # Normally the field name is used as a key to access the field value,
+ # because that's also supported in older versions of GDB...
+ field_key = field_name
+ if field_name == None:
+ field_name = ""
+ # ... but for fields without a name (as in tuples), we have to fall back
+ # to the newer method of using the field object directly as key. In
+ # older versions of GDB, this will just fail.
+ field_key = field
+ name_value_tuple = ( field_name, self.val[field_key] )
+ cs.append( name_value_tuple )
+
+ if self.hide_first_field:
+ cs = cs[1:]
+
+ return cs
+
+class RustTuplePrinter:
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ return None
+
+ def children(self):
+ cs = []
+ for field in self.val.type.fields():
+ cs.append( ("", self.val[field]) )
+
+ return cs
+
+ def display_hint(self):
+ return "array"
+
+class RustTupleStructPrinter:
+ def __init__(self, val, hide_first_field):
+ self.val = val
+ self.hide_first_field = hide_first_field
+
+ def to_string(self):
+ return self.val.type.tag
+
+ def children(self):
+ cs = []
+ for field in self.val.type.fields():
+ cs.append( ("", self.val[field]) )
+
+ if self.hide_first_field:
+ cs = cs[1:]
+
+ return cs
+
+ def display_hint(self):
+ return "array"
+
+class RustStringSlicePrinter:
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ slice_byte_len = self.val["length"]
+ return '"%s"' % self.val["data_ptr"].string(encoding = "utf-8",
+ length = slice_byte_len)
+
+class RustCStyleEnumPrinter:
+ def __init__(self, val):
+ assert val.type.code == gdb.TYPE_CODE_ENUM
+ self.val = val
+
+ def to_string(self):
+ return str(self.val)
+
+class IdentityPrinter:
+ def __init__(self, string):
+ self.string
+
+ def to_string(self):
+ return self.string
+
+STRUCT_KIND_REGULAR_STRUCT = 0
+STRUCT_KIND_TUPLE_STRUCT = 1
+STRUCT_KIND_TUPLE = 2
+STRUCT_KIND_TUPLE_VARIANT = 3
+STRUCT_KIND_STRUCT_VARIANT = 4
+STRUCT_KIND_CSTYLE_VARIANT = 5
+STRUCT_KIND_STR_SLICE = 6
+
+def classify_struct(type):
+ if type.tag == "&str":
+ return STRUCT_KIND_STR_SLICE
+
+ fields = list(type.fields())
+ field_count = len(fields)
+
+ if field_count == 0:
+ return STRUCT_KIND_REGULAR_STRUCT
+
+ if fields[0].artificial:
+ if field_count == 1:
+ return STRUCT_KIND_CSTYLE_VARIANT
+ elif fields[1].name == None:
+ return STRUCT_KIND_TUPLE_VARIANT
+ else:
+ return STRUCT_KIND_STRUCT_VARIANT
+
+ if fields[0].name == None:
+ if type.tag.startswith("("):
+ return STRUCT_KIND_TUPLE
+ else:
+ return STRUCT_KIND_TUPLE_STRUCT
+
+ return STRUCT_KIND_REGULAR_STRUCT
+
+def extract_discriminant_value(enum_val):
+ assert enum_val.type.code == gdb.TYPE_CODE_UNION
+ for variant_descriptor in enum_val.type.fields():
+ variant_val = enum_val[variant_descriptor]
+ for field in variant_val.type.fields():
+ return (field.name, int(variant_val[field]))
+
+def first_field(val):
+ for field in val.type.fields():
+ return field
+
+def get_field_at_index(val, index):
+ i = 0
+ for field in val.type.fields():
+ if i == index:
+ return field
+ return None
// NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
-#![allow(missing_doc, non_uppercase_statics, non_snake_case_functions)]
+#![allow(missing_doc, non_uppercase_statics, non_snake_case)]
'''
# Mapping taken from Table 12 from:
use core::ptr::RawPtr;
#[cfg(not(test))] use core::raw;
-#[cfg(not(test))] use util;
+#[cfg(stage0, not(test))] use util;
/// Returns a pointer to `size` bytes of memory.
///
}
// FIXME: #7496
-#[cfg(not(test))]
+#[cfg(stage0, not(test))]
#[lang="closure_exchange_malloc"]
#[inline]
#[allow(deprecated)]
alloc as *mut u8
}
+// FIXME: #7496
+#[cfg(not(stage0), not(test))]
+#[lang="closure_exchange_malloc"]
+#[inline]
+#[allow(deprecated)]
+unsafe fn closure_exchange_malloc(drop_glue: fn(*mut u8), size: uint,
+ align: uint) -> *mut u8 {
+ let p = allocate(size, align);
+
+ let alloc = p as *mut raw::Box<()>;
+ (*alloc).drop_glue = drop_glue;
+
+ alloc as *mut u8
+}
+
#[cfg(jemalloc)]
mod imp {
use core::option::{None, Option};
//!
//! This is the lowest level library through which allocation in Rust can be
//! performed where the allocation is assumed to succeed. This library will
-//! trigger a task failure when allocation fails.
+//! abort the process when allocation fails.
//!
//! This library, like libcore, is not intended for general usage, but rather as
//! a building block of other libraries. The types and interfaces in this
use std::num;
use std::ptr;
use std::rc::Rc;
-use std::rt::heap::allocate;
+use std::rt::heap::{allocate, deallocate};
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone, PartialEq)]
struct Chunk {
- data: Rc<RefCell<Vec<u8> >>,
+ data: Rc<RefCell<Vec<u8>>>,
fill: Cell<uint>,
is_copy: Cell<bool>,
}
+
impl Chunk {
fn capacity(&self) -> uint {
self.data.borrow().capacity()
end: Cell<*const T>,
/// A pointer to the first arena segment.
- first: RefCell<TypedArenaChunkRef<T>>,
+ first: RefCell<*mut TypedArenaChunk<T>>,
}
-type TypedArenaChunkRef<T> = Option<Box<TypedArenaChunk<T>>>;
struct TypedArenaChunk<T> {
/// Pointer to the next arena segment.
- next: TypedArenaChunkRef<T>,
+ next: *mut TypedArenaChunk<T>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
+fn calculate_size<T>(capacity: uint) -> uint {
+ let mut size = mem::size_of::<TypedArenaChunk<T>>();
+ size = round_up(size, mem::min_align_of::<T>());
+ let elem_size = mem::size_of::<T>();
+ let elems_size = elem_size.checked_mul(&capacity).unwrap();
+ size = size.checked_add(&elems_size).unwrap();
+ size
+}
+
impl<T> TypedArenaChunk<T> {
#[inline]
- fn new(next: Option<Box<TypedArenaChunk<T>>>, capacity: uint)
- -> Box<TypedArenaChunk<T>> {
- let mut size = mem::size_of::<TypedArenaChunk<T>>();
- size = round_up(size, mem::min_align_of::<T>());
- let elem_size = mem::size_of::<T>();
- let elems_size = elem_size.checked_mul(&capacity).unwrap();
- size = size.checked_add(&elems_size).unwrap();
-
- let mut chunk = unsafe {
- let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>());
- let mut chunk: Box<TypedArenaChunk<T>> = mem::transmute(chunk);
- ptr::write(&mut chunk.next, next);
- chunk
- };
-
- chunk.capacity = capacity;
+ unsafe fn new(next: *mut TypedArenaChunk<T>, capacity: uint)
+ -> *mut TypedArenaChunk<T> {
+ let size = calculate_size::<T>(capacity);
+ let chunk = allocate(size, mem::min_align_of::<TypedArenaChunk<T>>())
+ as *mut TypedArenaChunk<T>;
+ (*chunk).next = next;
+ (*chunk).capacity = capacity;
chunk
}
}
// Destroy the next chunk.
- let next_opt = mem::replace(&mut self.next, None);
- match next_opt {
- None => {}
- Some(mut next) => {
- // We assume that the next chunk is completely filled.
- let capacity = next.capacity;
- next.destroy(capacity)
- }
+ let next = self.next;
+ let size = calculate_size::<T>(self.capacity);
+ deallocate(self as *mut TypedArenaChunk<T> as *mut u8, size,
+ mem::min_align_of::<TypedArenaChunk<T>>());
+ if next.is_not_null() {
+ let capacity = (*next).capacity;
+ (*next).destroy(capacity);
}
}
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
- let chunk = TypedArenaChunk::<T>::new(None, capacity);
- TypedArena {
- ptr: Cell::new(chunk.start() as *const T),
- end: Cell::new(chunk.end() as *const T),
- first: RefCell::new(Some(chunk)),
+ unsafe {
+ let chunk = TypedArenaChunk::<T>::new(ptr::mut_null(), capacity);
+ TypedArena {
+ ptr: Cell::new((*chunk).start() as *const T),
+ end: Cell::new((*chunk).end() as *const T),
+ first: RefCell::new(chunk),
+ }
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&self) {
- let chunk = self.first.borrow_mut().take_unwrap();
- let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
- let chunk = TypedArenaChunk::<T>::new(Some(chunk), new_capacity);
- self.ptr.set(chunk.start() as *const T);
- self.end.set(chunk.end() as *const T);
- *self.first.borrow_mut() = Some(chunk)
+ unsafe {
+ let chunk = *self.first.borrow_mut();
+ let new_capacity = (*chunk).capacity.checked_mul(&2).unwrap();
+ let chunk = TypedArenaChunk::<T>::new(chunk, new_capacity);
+ self.ptr.set((*chunk).start() as *const T);
+ self.end.set((*chunk).end() as *const T);
+ *self.first.borrow_mut() = chunk
+ }
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
- // Determine how much was filled.
- let start = self.first.borrow().get_ref().start() as uint;
- let end = self.ptr.get() as uint;
- let diff = (end - start) / mem::size_of::<T>();
-
- // Pass that to the `destroy` method.
unsafe {
- self.first.borrow_mut().get_mut_ref().destroy(diff)
+ // Determine how much was filled.
+ let start = self.first.borrow().as_ref().unwrap().start() as uint;
+ let end = self.ptr.get() as uint;
+ let diff = (end - start) / mem::size_of::<T>();
+
+ // Pass that to the `destroy` method.
+ (**self.first.borrow_mut()).destroy(diff)
}
}
}
static TRUE: bool = true;
static FALSE: bool = false;
-#[deriving(Clone)]
-struct SmallBitv {
- /// only the lowest nbits of this value are used. the rest is undefined.
- bits: uint
-}
-
-#[deriving(Clone)]
-struct BigBitv {
- storage: Vec<uint>
-}
-
-#[deriving(Clone)]
-enum BitvVariant { Big(BigBitv), Small(SmallBitv) }
-
/// The bitvector type.
///
/// # Example
#[cfg(test)]
mod tests {
use std::prelude::*;
+ use std::iter::range_step;
use std::uint;
use std::rand;
use std::rand::Rng;
#[test]
fn test_bitv_iterator() {
- let bools = [true, false, true, true];
+ let bools = vec![true, false, true, true];
let bitv: Bitv = bools.iter().map(|n| *n).collect();
- for (act, &ex) in bitv.iter().zip(bools.iter()) {
- assert_eq!(ex, act);
- }
+ assert_eq!(bitv.iter().collect::<Vec<bool>>(), bools)
+
+ let long = Vec::from_fn(10000, |i| i % 2 == 0);
+ let bitv: Bitv = long.iter().map(|n| *n).collect();
+ assert_eq!(bitv.iter().collect::<Vec<bool>>(), long)
}
#[test]
let idxs: Vec<uint> = bitv.iter().collect();
assert_eq!(idxs, vec!(0, 2, 3));
+
+ let long: BitvSet = range(0u, 10000).map(|n| n % 2 == 0).collect();
+ let real = range_step(0, 10000, 2).collect::<Vec<uint>>();
+
+ let idxs: Vec<uint> = long.iter().collect();
+ assert_eq!(idxs, real);
}
#[test]
}
#[bench]
- fn bench_bitv_big(b: &mut Bencher) {
+ fn bench_bitv_set_big_fixed(b: &mut Bencher) {
let mut r = rng();
let mut bitv = Bitv::with_capacity(BENCH_BITS, false);
b.iter(|| {
}
#[bench]
- fn bench_bitv_small(b: &mut Bencher) {
+ fn bench_bitv_set_big_variable(b: &mut Bencher) {
+ let mut r = rng();
+ let mut bitv = Bitv::with_capacity(BENCH_BITS, false);
+ b.iter(|| {
+ for i in range(0u, 100) {
+ bitv.set((r.next_u32() as uint) % BENCH_BITS, r.gen());
+ }
+ &bitv
+ })
+ }
+
+ #[bench]
+ fn bench_bitv_set_small(b: &mut Bencher) {
let mut r = rng();
let mut bitv = Bitv::with_capacity(uint::BITS, false);
b.iter(|| {
}
#[bench]
- fn bench_bitv_set_small(b: &mut Bencher) {
+ fn bench_bitvset_small(b: &mut Bencher) {
let mut r = rng();
let mut bitv = BitvSet::new();
b.iter(|| {
}
#[bench]
- fn bench_bitv_set_big(b: &mut Bencher) {
+ fn bench_bitvset_big(b: &mut Bencher) {
let mut r = rng();
let mut bitv = BitvSet::new();
b.iter(|| {
value: T,
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct Items<'a, T> {
- head: &'a Link<T>,
- tail: Rawlink<Node<T>>,
- nelem: uint,
-}
-
/// An iterator over references to the items of a `DList`.
-#[cfg(not(stage0))]
pub struct Items<'a, T:'a> {
head: &'a Link<T>,
tail: Rawlink<Node<T>>,
fn clone(&self) -> Items<'a, T> { *self }
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct MutItems<'a, T> {
- list: &'a mut DList<T>,
- head: Rawlink<Node<T>>,
- tail: Rawlink<Node<T>>,
- nelem: uint,
-}
-
/// An iterator over mutable references to the items of a `DList`.
-#[cfg(not(stage0))]
pub struct MutItems<'a, T:'a> {
list: &'a mut DList<T>,
head: Rawlink<Node<T>>,
/// Convert the `Rawlink` into an Option value
fn resolve_immut<'a>(&self) -> Option<&'a T> {
unsafe {
- mem::transmute(self.p.to_option())
+ self.p.as_ref()
}
}
None => return self.list.push_front_node(ins_node),
Some(prev) => prev,
};
- let node_own = prev_node.next.take_unwrap();
+ let node_own = prev_node.next.take().unwrap();
ins_node.next = link_with_prev(node_own, Rawlink::some(&mut *ins_node));
prev_node.next = link_with_prev(ins_node, Rawlink::some(prev_node));
self.list.length += 1;
fn test_basic() {
let mut m: DList<Box<int>> = DList::new();
assert_eq!(m.pop_front(), None);
- assert_eq!(m.pop_back(), None);
+ assert_eq!(m.pop(), None);
assert_eq!(m.pop_front(), None);
m.push_front(box 1);
assert_eq!(m.pop_front(), Some(box 1));
- m.push_back(box 2);
- m.push_back(box 3);
+ m.push(box 2);
+ m.push(box 3);
assert_eq!(m.len(), 2);
assert_eq!(m.pop_front(), Some(box 2));
assert_eq!(m.pop_front(), Some(box 3));
assert_eq!(m.len(), 0);
assert_eq!(m.pop_front(), None);
- m.push_back(box 1);
- m.push_back(box 3);
- m.push_back(box 5);
- m.push_back(box 7);
+ m.push(box 1);
+ m.push(box 3);
+ m.push(box 5);
+ m.push(box 7);
assert_eq!(m.pop_front(), Some(box 1));
let mut n = DList::new();
{
let mut m = DList::new();
let mut n = DList::new();
- n.push_back(2i);
+ n.push(2i);
m.append(n);
assert_eq!(m.len(), 1);
- assert_eq!(m.pop_back(), Some(2));
+ assert_eq!(m.pop(), Some(2));
check_links(&m);
}
{
let mut m = DList::new();
let n = DList::new();
- m.push_back(2i);
+ m.push(2i);
m.append(n);
assert_eq!(m.len(), 1);
- assert_eq!(m.pop_back(), Some(2));
+ assert_eq!(m.pop(), Some(2));
check_links(&m);
}
{
let mut m = DList::new();
let mut n = DList::new();
- n.push_back(2i);
+ n.push(2i);
m.prepend(n);
assert_eq!(m.len(), 1);
- assert_eq!(m.pop_back(), Some(2));
+ assert_eq!(m.pop(), Some(2));
check_links(&m);
}
#[test]
fn test_iterator_clone() {
let mut n = DList::new();
- n.push_back(2i);
- n.push_back(3);
- n.push_back(4);
+ n.push(2i);
+ n.push(3);
+ n.push(4);
let mut it = n.iter();
it.next();
let mut jt = it.clone();
let mut n = DList::new();
assert!(n.mut_iter().next().is_none());
n.push_front(4i);
- n.push_back(5);
+ n.push(5);
let mut it = n.mut_iter();
assert_eq!(it.size_hint(), (2, Some(2)));
assert!(it.next().is_some());
assert_eq!(n.pop_front(), Some(1));
let mut m = DList::new();
- m.push_back(2i);
- m.push_back(4);
+ m.push(2i);
+ m.push(4);
m.insert_ordered(3);
check_links(&m);
assert_eq!(vec![2,3,4], m.move_iter().collect::<Vec<int>>());
assert!(n == m);
n.push_front(1);
assert!(n != m);
- m.push_back(1);
+ m.push(1);
assert!(n == m);
let n = list_from([2i,3,4]);
assert!(hash::hash(&x) == hash::hash(&y));
- x.push_back(1i);
- x.push_back(2);
- x.push_back(3);
+ x.push(1i);
+ x.push(2);
+ x.push(3);
y.push_front(3i);
y.push_front(2);
let r: u8 = rand::random();
match r % 6 {
0 => {
- m.pop_back();
+ m.pop();
v.pop();
}
1 => {
m.pop_front();
- v.shift();
+ v.remove(0);
}
2 | 4 => {
m.push_front(-i);
- v.unshift(-i);
+ v.insert(0, -i);
}
3 | 5 | _ => {
- m.push_back(i);
+ m.push(i);
v.push(i);
}
}
fn bench_push_back(b: &mut test::Bencher) {
let mut m: DList<int> = DList::new();
b.iter(|| {
- m.push_back(0);
+ m.push(0);
})
}
fn bench_push_back_pop_back(b: &mut test::Bencher) {
let mut m: DList<int> = DList::new();
b.iter(|| {
- m.push_back(0);
- m.pop_back();
+ m.push(0);
+ m.pop();
})
}
( $($name:ident)+) => (
impl<S: Writer, $($name: Hash<S>),*> Hash<S> for ($($name,)*) {
- #[allow(uppercase_variables)]
#[inline]
+ #[allow(non_snake_case)]
fn hash(&self, state: &mut S) {
match *self {
($(ref $name,)*) => {
#[cfg(test)]
mod tests {
- use std::prelude::*;
use std::mem;
use slice::ImmutableSlice;
#![feature(unsafe_destructor, import_shadowing)]
#![no_std]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
#[phase(plugin, link)] extern crate core;
extern crate unicode;
extern crate alloc;
}
/// A mutable collection of values which are distinct from one another that
-/// can be mutaed.
+/// can be mutated.
pub trait MutableSet<T>: Set<T> + Mutable {
/// Adds a value to the set. Returns `true` if the value was not already
/// present in the set.
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct Items <'a, T> {
- iter: slice::Items<'a, T>,
-}
-
/// `PriorityQueue` iterator.
-#[cfg(not(stage0))]
pub struct Items <'a, T:'a> {
iter: slice::Items<'a, T>,
}
elts: Vec::from_fn(cmp::max(MINIMUM_CAPACITY, n), |_| None)}
}
- /// Retrieva an element in the `RingBuf` by index.
+ /// Retrieve an element in the `RingBuf` by index.
///
/// Fails if there is no element with the given index.
///
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct Items<'a, T> {
- lo: uint,
- index: uint,
- rindex: uint,
- elts: &'a [Option<T>],
-}
-
/// `RingBuf` iterator.
-#[cfg(not(stage0))]
pub struct Items<'a, T:'a> {
lo: uint,
index: uint,
}
let raw_index = raw_index(self.lo, self.elts.len(), self.index);
self.index += 1;
- Some(self.elts[raw_index].get_ref())
+ Some(self.elts[raw_index].as_ref().unwrap())
}
#[inline]
}
self.rindex -= 1;
let raw_index = raw_index(self.lo, self.elts.len(), self.rindex);
- Some(self.elts[raw_index].get_ref())
+ Some(self.elts[raw_index].as_ref().unwrap())
}
}
None
} else {
let raw_index = raw_index(self.lo, self.elts.len(), self.index + j);
- Some(self.elts[raw_index].get_ref())
+ Some(self.elts[raw_index].as_ref().unwrap())
}
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct MutItems<'a, T> {
- remaining1: &'a mut [Option<T>],
- remaining2: &'a mut [Option<T>],
- nelts: uint,
-}
-
/// `RingBuf` mutable iterator.
-#[cfg(not(stage0))]
pub struct MutItems<'a, T:'a> {
remaining1: &'a mut [Option<T>],
remaining2: &'a mut [Option<T>],
assert_eq!(d.len(), 0u);
d.push_front(17i);
d.push_front(42i);
- d.push_back(137);
+ d.push(137);
assert_eq!(d.len(), 3u);
- d.push_back(137);
+ d.push(137);
assert_eq!(d.len(), 4u);
debug!("{:?}", d.front());
assert_eq!(*d.front().unwrap(), 42);
let mut i = d.pop_front();
debug!("{:?}", i);
assert_eq!(i, Some(42));
- i = d.pop_back();
+ i = d.pop();
debug!("{:?}", i);
assert_eq!(i, Some(137));
- i = d.pop_back();
+ i = d.pop();
debug!("{:?}", i);
assert_eq!(i, Some(137));
- i = d.pop_back();
+ i = d.pop();
debug!("{:?}", i);
assert_eq!(i, Some(17));
assert_eq!(d.len(), 0u);
- d.push_back(3);
+ d.push(3);
assert_eq!(d.len(), 1u);
d.push_front(2);
assert_eq!(d.len(), 2u);
- d.push_back(4);
+ d.push(4);
assert_eq!(d.len(), 3u);
d.push_front(1);
assert_eq!(d.len(), 4u);
assert_eq!(deq.len(), 0);
deq.push_front(a);
deq.push_front(b);
- deq.push_back(c);
+ deq.push(c);
assert_eq!(deq.len(), 3);
- deq.push_back(d);
+ deq.push(d);
assert_eq!(deq.len(), 4);
assert_eq!(deq.front(), Some(&b));
assert_eq!(deq.back(), Some(&d));
assert_eq!(deq.pop_front(), Some(b));
- assert_eq!(deq.pop_back(), Some(d));
- assert_eq!(deq.pop_back(), Some(c));
- assert_eq!(deq.pop_back(), Some(a));
+ assert_eq!(deq.pop(), Some(d));
+ assert_eq!(deq.pop(), Some(c));
+ assert_eq!(deq.pop(), Some(a));
assert_eq!(deq.len(), 0);
- deq.push_back(c);
+ deq.push(c);
assert_eq!(deq.len(), 1);
deq.push_front(b);
assert_eq!(deq.len(), 2);
- deq.push_back(d);
+ deq.push(d);
assert_eq!(deq.len(), 3);
deq.push_front(a);
assert_eq!(deq.len(), 4);
assert_eq!(deq.len(), 0);
deq.push_front(a.clone());
deq.push_front(b.clone());
- deq.push_back(c.clone());
+ deq.push(c.clone());
assert_eq!(deq.len(), 3);
- deq.push_back(d.clone());
+ deq.push(d.clone());
assert_eq!(deq.len(), 4);
assert_eq!((*deq.front().unwrap()).clone(), b.clone());
assert_eq!((*deq.back().unwrap()).clone(), d.clone());
assert_eq!(deq.pop_front().unwrap(), b.clone());
- assert_eq!(deq.pop_back().unwrap(), d.clone());
- assert_eq!(deq.pop_back().unwrap(), c.clone());
- assert_eq!(deq.pop_back().unwrap(), a.clone());
+ assert_eq!(deq.pop().unwrap(), d.clone());
+ assert_eq!(deq.pop().unwrap(), c.clone());
+ assert_eq!(deq.pop().unwrap(), a.clone());
assert_eq!(deq.len(), 0);
- deq.push_back(c.clone());
+ deq.push(c.clone());
assert_eq!(deq.len(), 1);
deq.push_front(b.clone());
assert_eq!(deq.len(), 2);
- deq.push_back(d.clone());
+ deq.push(d.clone());
assert_eq!(deq.len(), 3);
deq.push_front(a.clone());
assert_eq!(deq.len(), 4);
let mut deq = RingBuf::new();
for i in range(0u, 66) {
- deq.push_back(i);
+ deq.push(i);
}
for i in range(0u, 66) {
fn bench_push_back(b: &mut test::Bencher) {
let mut deq = RingBuf::new();
b.iter(|| {
- deq.push_back(0i);
+ deq.push(0i);
})
}
#[test]
fn test_with_capacity() {
let mut d = RingBuf::with_capacity(0);
- d.push_back(1i);
+ d.push(1i);
assert_eq!(d.len(), 1);
let mut d = RingBuf::with_capacity(50);
- d.push_back(1i);
+ d.push(1i);
assert_eq!(d.len(), 1);
}
#[test]
fn test_reserve_exact() {
let mut d = RingBuf::new();
- d.push_back(0u64);
+ d.push(0u64);
d.reserve_exact(50);
assert_eq!(d.elts.capacity(), 50);
let mut d = RingBuf::new();
- d.push_back(0u32);
+ d.push(0u32);
d.reserve_exact(50);
assert_eq!(d.elts.capacity(), 50);
}
#[test]
fn test_reserve() {
let mut d = RingBuf::new();
- d.push_back(0u64);
+ d.push(0u64);
d.reserve(50);
assert_eq!(d.elts.capacity(), 64);
let mut d = RingBuf::new();
- d.push_back(0u32);
+ d.push(0u32);
d.reserve(50);
assert_eq!(d.elts.capacity(), 64);
}
assert_eq!(d.iter().size_hint(), (0, Some(0)));
for i in range(0i, 5) {
- d.push_back(i);
+ d.push(i);
}
{
let b: &[_] = &[&0,&1,&2,&3,&4];
assert_eq!(d.iter().rev().next(), None);
for i in range(0i, 5) {
- d.push_back(i);
+ d.push(i);
}
{
let b: &[_] = &[&4,&3,&2,&1,&0];
let mut d = RingBuf::with_capacity(3);
assert!(d.mut_iter().rev().next().is_none());
- d.push_back(1i);
- d.push_back(2);
- d.push_back(3);
+ d.push(1i);
+ d.push(2);
+ d.push(3);
assert_eq!(d.pop_front(), Some(1));
- d.push_back(4);
+ d.push(4);
assert_eq!(d.mut_iter().rev().map(|x| *x).collect::<Vec<int>>(),
vec!(4, 3, 2));
let mut d = RingBuf::new();
d.push_front(17i);
d.push_front(42);
- d.push_back(137);
- d.push_back(137);
+ d.push(137);
+ d.push(137);
assert_eq!(d.len(), 4u);
let mut e = d.clone();
assert_eq!(e.len(), 4u);
while !d.is_empty() {
- assert_eq!(d.pop_back(), e.pop_back());
+ assert_eq!(d.pop(), e.pop());
}
assert_eq!(d.len(), 0u);
assert_eq!(e.len(), 0u);
d.push_front(137i);
d.push_front(17);
d.push_front(42);
- d.push_back(137);
+ d.push(137);
let mut e = RingBuf::with_capacity(0);
- e.push_back(42);
- e.push_back(17);
- e.push_back(137);
- e.push_back(137);
+ e.push(42);
+ e.push(17);
+ e.push(137);
+ e.push(137);
assert!(&e == &d);
- e.pop_back();
- e.push_back(0);
+ e.pop();
+ e.push(0);
assert!(e != d);
e.clear();
assert!(e == RingBuf::new());
pub use core::slice::{Chunks, Slice, ImmutableSlice, ImmutablePartialEqSlice};
pub use core::slice::{ImmutableOrdSlice, MutableSlice, Items, MutItems};
pub use core::slice::{MutSplits, MutChunks, Splits};
-pub use core::slice::{bytes, ref_slice, MutableCloneableSlice};
+pub use core::slice::{bytes, mut_ref_slice, ref_slice, MutableCloneableSlice};
pub use core::slice::{Found, NotFound};
// Functional utilities
/// Updates a value in the map. If the key already exists in the map,
/// modifies the value with `ff` taking `oldval, newval`.
/// Otherwise, sets the value to `newval`.
- /// Returasn `true` if the key did not already exist in the map.
+ /// Returns `true` if the key did not already exist in the map.
///
/// # Example
///
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct Entries<'a, T> {
- front: uint,
- back: uint,
- iter: slice::Items<'a, Option<T>>
-}
-
/// Forward iterator over a map.
-#[cfg(not(stage0))]
pub struct Entries<'a, T:'a> {
front: uint,
back: uint,
iterator!(impl Entries -> (uint, &'a T), get_ref)
double_ended_iterator!(impl Entries -> (uint, &'a T), get_ref)
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct MutEntries<'a, T> {
- front: uint,
- back: uint,
- iter: slice::MutItems<'a, Option<T>>
-}
-
/// Forward iterator over the key-value pairs of a map, with the
/// values being mutable.
-#[cfg(not(stage0))]
pub struct MutEntries<'a, T:'a> {
front: uint,
back: uint,
//!
//! # Representation
//!
-//! Rust's string type, `str`, is a sequence of unicode scalar values encoded as a
+//! Rust's string type, `str`, is a sequence of Unicode scalar values encoded as a
//! stream of UTF-8 bytes. All strings are guaranteed to be validly encoded UTF-8
//! sequences. Additionally, strings are not null-terminated and can contain null
//! bytes.
use test::Bencher;
use test::black_box;
use super::*;
- use std::option::{None, Some};
use std::iter::{Iterator, DoubleEndedIterator};
use std::collections::Collection;
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct Entries<'a, K, V> {
- stack: Vec<&'a TreeNode<K, V>>,
- // See the comment on MutEntries; this is just to allow
- // code-sharing (for this immutable-values iterator it *could* very
- // well be Option<&'a TreeNode<K,V>>).
- node: *const TreeNode<K, V>,
- remaining_min: uint,
- remaining_max: uint
-}
-
/// Lazy forward iterator over a map
-#[cfg(not(stage0))]
pub struct Entries<'a, K:'a, V:'a> {
stack: Vec<&'a TreeNode<K, V>>,
// See the comment on MutEntries; this is just to allow
remaining_max: uint
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct RevEntries<'a, K, V> {
- iter: Entries<'a, K, V>,
-}
-
/// Lazy backward iterator over a map
-#[cfg(not(stage0))]
pub struct RevEntries<'a, K:'a, V:'a> {
iter: Entries<'a, K, V>,
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct MutEntries<'a, K, V> {
- stack: Vec<&'a mut TreeNode<K, V>>,
- // Unfortunately, we require some unsafe-ness to get around the
- // fact that we would be storing a reference *into* one of the
- // nodes in the stack.
- //
- // As far as the compiler knows, this would let us invalidate the
- // reference by assigning a new value to this node's position in
- // its parent, which would cause this current one to be
- // deallocated so this reference would be invalid. (i.e. the
- // compilers complaints are 100% correct.)
- //
- // However, as far as you humans reading this code know (or are
- // about to know, if you haven't read far enough down yet), we are
- // only reading from the TreeNode.{left,right} fields. the only
- // thing that is ever mutated is the .value field (although any
- // actual mutation that happens is done externally, by the
- // iterator consumer). So, don't be so concerned, rustc, we've got
- // it under control.
- //
- // (This field can legitimately be null.)
- node: *mut TreeNode<K, V>,
- remaining_min: uint,
- remaining_max: uint
-}
-
/// Lazy forward iterator over a map that allows for the mutation of
/// the values.
-#[cfg(not(stage0))]
pub struct MutEntries<'a, K:'a, V:'a> {
stack: Vec<&'a mut TreeNode<K, V>>,
// Unfortunately, we require some unsafe-ness to get around the
remaining_max: uint
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct RevMutEntries<'a, K, V> {
- iter: MutEntries<'a, K, V>,
-}
-
/// Lazy backward iterator over a map
-#[cfg(not(stage0))]
pub struct RevMutEntries<'a, K:'a, V:'a> {
iter: MutEntries<'a, K, V>,
}
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct SetItems<'a, T> {
- iter: Entries<'a, T, ()>
-}
-
/// A lazy forward iterator over a set.
-#[cfg(not(stage0))]
pub struct SetItems<'a, T:'a> {
iter: Entries<'a, T, ()>
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct RevSetItems<'a, T> {
- iter: RevEntries<'a, T, ()>
-}
-
/// A lazy backward iterator over a set.
-#[cfg(not(stage0))]
pub struct RevSetItems<'a, T:'a> {
iter: RevEntries<'a, T, ()>
}
/// A lazy forward iterator over a set that consumes the set while iterating.
pub type MoveSetItems<T> = iter::Map<'static, (T, ()), T, MoveEntries<T, ()>>;
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct DifferenceItems<'a, T> {
- a: Peekable<&'a T, SetItems<'a, T>>,
- b: Peekable<&'a T, SetItems<'a, T>>,
-}
-
/// A lazy iterator producing elements in the set difference (in-order).
-#[cfg(not(stage0))]
pub struct DifferenceItems<'a, T:'a> {
a: Peekable<&'a T, SetItems<'a, T>>,
b: Peekable<&'a T, SetItems<'a, T>>,
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct SymDifferenceItems<'a, T> {
- a: Peekable<&'a T, SetItems<'a, T>>,
- b: Peekable<&'a T, SetItems<'a, T>>,
-}
-
/// A lazy iterator producing elements in the set symmetric difference (in-order).
-#[cfg(not(stage0))]
pub struct SymDifferenceItems<'a, T:'a> {
a: Peekable<&'a T, SetItems<'a, T>>,
b: Peekable<&'a T, SetItems<'a, T>>,
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct IntersectionItems<'a, T> {
- a: Peekable<&'a T, SetItems<'a, T>>,
- b: Peekable<&'a T, SetItems<'a, T>>,
-}
-
/// A lazy iterator producing elements in the set intersection (in-order).
-#[cfg(not(stage0))]
pub struct IntersectionItems<'a, T:'a> {
a: Peekable<&'a T, SetItems<'a, T>>,
b: Peekable<&'a T, SetItems<'a, T>>,
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct UnionItems<'a, T> {
- a: Peekable<&'a T, SetItems<'a, T>>,
- b: Peekable<&'a T, SetItems<'a, T>>,
-}
-
/// A lazy iterator producing elements in the set union (in-order).
-#[cfg(not(stage0))]
pub struct UnionItems<'a, T:'a> {
a: Peekable<&'a T, SetItems<'a, T>>,
b: Peekable<&'a T, SetItems<'a, T>>,
// Remove left horizontal link by rotating right
fn skew<K: Ord, V>(node: &mut Box<TreeNode<K, V>>) {
if node.left.as_ref().map_or(false, |x| x.level == node.level) {
- let mut save = node.left.take_unwrap();
+ let mut save = node.left.take().unwrap();
swap(&mut node.left, &mut save.right); // save.right now None
swap(node, &mut save);
node.right = Some(save);
fn split<K: Ord, V>(node: &mut Box<TreeNode<K, V>>) {
if node.right.as_ref().map_or(false,
|x| x.right.as_ref().map_or(false, |y| y.level == node.level)) {
- let mut save = node.right.take_unwrap();
+ let mut save = node.right.take().unwrap();
swap(&mut node.right, &mut save.left); // save.left now None
save.level += 1;
swap(node, &mut save);
Equal => {
if save.left.is_some() {
if save.right.is_some() {
- let mut left = save.left.take_unwrap();
+ let mut left = save.left.take().unwrap();
if left.right.is_some() {
heir_swap(save, &mut left.right);
} else {
save.left = Some(left);
(remove(&mut save.left, key), true)
} else {
- let new = save.left.take_unwrap();
+ let new = save.left.take().unwrap();
let box TreeNode{value, ..} = replace(save, new);
- *save = save.left.take_unwrap();
+ *save = save.left.take().unwrap();
(Some(value), true)
}
} else if save.right.is_some() {
- let new = save.right.take_unwrap();
+ let new = save.right.take().unwrap();
let box TreeNode{value, ..} = replace(save, new);
(Some(value), true)
} else {
return ret;
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct Entries<'a, T> {
- stack: [slice::Items<'a, Child<T>>, .. NUM_CHUNKS],
- length: uint,
- remaining_min: uint,
- remaining_max: uint
-}
-
/// A forward iterator over a map.
-#[cfg(not(stage0))]
pub struct Entries<'a, T:'a> {
stack: [slice::Items<'a, Child<T>>, .. NUM_CHUNKS],
length: uint,
remaining_max: uint
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct MutEntries<'a, T> {
- stack: [slice::MutItems<'a, Child<T>>, .. NUM_CHUNKS],
- length: uint,
- remaining_min: uint,
- remaining_max: uint
-}
-
/// A forward iterator over the key-value pairs of a map, with the
/// values being mutable.
-#[cfg(not(stage0))]
pub struct MutEntries<'a, T:'a> {
stack: [slice::MutItems<'a, Child<T>>, .. NUM_CHUNKS],
length: uint,
iter: Items<'static, T>
}
+impl<T> MoveItems<T> {
+ #[inline]
+ /// Drops all items that have not yet been moved and returns the empty vector.
+ pub fn unwrap(mut self) -> Vec<T> {
+ unsafe {
+ for _x in self { }
+ let MoveItems { allocation, cap, iter: _iter } = self;
+ mem::forget(self);
+ Vec { ptr: allocation, cap: cap, len: 0 }
+ }
+ }
+}
+
impl<T> Iterator<T> for MoveItems<T> {
#[inline]
fn next<'a>(&'a mut self) -> Option<T> {
assert_eq!(vec.swap_remove(0), None);
}
+ #[test]
+ fn test_move_iter_unwrap() {
+ let mut vec: Vec<uint> = Vec::with_capacity(7);
+ vec.push(1);
+ vec.push(2);
+ let ptr = vec.as_ptr();
+ vec = vec.move_iter().unwrap();
+ assert_eq!(vec.as_ptr(), ptr);
+ assert_eq!(vec.capacity(), 7);
+ assert_eq!(vec.len(), 0);
+ }
+
#[bench]
fn bench_new(b: &mut Bencher) {
b.iter(|| {
//! // Take a reference to the inside of cache cell
//! let mut cache = self.span_tree_cache.borrow_mut();
//! if cache.is_some() {
-//! return cache.get_ref().clone();
+//! return cache.as_ref().unwrap().clone();
//! }
//!
//! let span_tree = self.calc_span_tree();
/// Wraps a borrowed reference to a value in a `RefCell` box.
#[unstable]
-#[cfg(not(stage0))]
pub struct Ref<'b, T:'b> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_parent: &'b RefCell<T>
}
-/// Dox.
-#[unstable]
-#[cfg(stage0)]
-pub struct Ref<'b, T> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _parent: &'b RefCell<T>
-}
-
#[unsafe_destructor]
#[unstable]
impl<'b, T> Drop for Ref<'b, T> {
/// Wraps a mutable borrowed reference to a value in a `RefCell` box.
#[unstable]
-#[cfg(not(stage0))]
pub struct RefMut<'b, T:'b> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
_parent: &'b RefCell<T>
}
-/// Dox.
-#[unstable]
-#[cfg(stage0)]
-pub struct RefMut<'b, T> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _parent: &'b RefCell<T>
-}
-
#[unsafe_destructor]
#[unstable]
impl<'b, T> Drop for RefMut<'b, T> {
//!
//! For more details, see ::unicode::char (a.k.a. std::char)
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
#![doc(primitive = "char")]
use mem::transmute;
/// - Tab, CR and LF are escaped as '\t', '\r' and '\n' respectively.
/// - Single-quote, double-quote and backslash chars are backslash-escaped.
/// - Any other chars in the range [0x20,0x7e] are not escaped.
-/// - Any other chars are given hex unicode escapes; see `escape_unicode`.
+/// - Any other chars are given hex Unicode escapes; see `escape_unicode`.
///
pub fn escape_default(c: char, f: |char|) {
match c {
/// * Single-quote, double-quote and backslash chars are backslash-
/// escaped.
/// * Any other chars in the range [0x20,0x7e] are not escaped.
- /// * Any other chars are given hex unicode escapes; see `escape_unicode`.
+ /// * Any other chars are given hex Unicode escapes; see `escape_unicode`.
fn escape_default(&self, f: |char|);
/// Returns the amount of bytes this character would need if encoded in
unsafe { intrinsics::abort() }
}
+#[cold] #[inline(never)]
+pub fn begin_unwind_string(msg: &str, file: &(&'static str, uint)) -> ! {
+ format_args!(|fmt| begin_unwind(fmt, file), "{}", msg)
+}
+
#[cold] #[inline(never)]
pub fn begin_unwind(fmt: &fmt::Arguments, file_line: &(&'static str, uint)) -> ! {
#[allow(ctypes)]
try_fn(&mut *f.mutate, drop)
}
-#[cfg(not(stage0))]
struct Finallyalizer<'a,A:'a> {
mutate: &'a mut A,
dtor: |&mut A|: 'a
}
-#[cfg(stage0)]
-struct Finallyalizer<'a,A> {
- mutate: &'a mut A,
- dtor: |&mut A|: 'a
-}
-
#[unsafe_destructor]
impl<'a,A> Drop for Finallyalizer<'a,A> {
#[inline]
use char::Char;
let align = match self.align {
rt::AlignUnknown => default,
- rt::AlignLeft | rt::AlignRight => self.align
+ _ => self.align
};
- if align == rt::AlignLeft {
- try!(f(self));
- }
+
+ let (pre_pad, post_pad) = match align {
+ rt::AlignLeft => (0u, padding),
+ rt::AlignRight | rt::AlignUnknown => (padding, 0u),
+ rt::AlignCenter => (padding / 2, (padding + 1) / 2),
+ };
+
let mut fill = [0u8, ..4];
let len = self.fill.encode_utf8(fill).unwrap_or(0);
- for _ in range(0, padding) {
+
+ for _ in range(0, pre_pad) {
try!(self.buf.write(fill.slice_to(len)));
}
- if align == rt::AlignRight {
- try!(f(self));
+
+ try!(f(self));
+
+ for _ in range(0, post_pad) {
+ try!(self.buf.write(fill.slice_to(len)));
}
+
Ok(())
}
() => ();
( $($name:ident,)+ ) => (
impl<$($name:Show),*> Show for ($($name,)*) {
- #[allow(uppercase_variables, dead_assignment)]
+ #[allow(non_snake_case, dead_assignment)]
fn fmt(&self, f: &mut Formatter) -> Result {
try!(write!(f, "("));
let ($(ref $name,)*) = *self;
AlignLeft,
/// Indication that contents should be right-aligned.
AlignRight,
+ /// Indication that contents should be center-aligned.
+ AlignCenter,
/// No alignment was requested.
AlignUnknown,
}
fn visit_char(&mut self) -> bool;
fn visit_estr_slice(&mut self) -> bool;
- // NOTE: remove after snapshot
- #[cfg(stage0)]
- fn visit_estr_fixed(&mut self, n: uint, sz: uint, align: uint) -> bool;
fn visit_box(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
fn visit_uniq(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
fn visit_rptr(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
fn visit_evec_slice(&mut self, mtbl: uint, inner: *const TyDesc) -> bool;
- // NOTE: remove after snapshot
- #[cfg(stage0)]
- fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
- mtbl: uint, inner: *const TyDesc) -> bool;
- #[cfg(not(stage0))]
fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
inner: *const TyDesc) -> bool;
/// Gives the address for the return value of the enclosing function.
///
- /// Using this instrinsic in a function that does not use an out pointer
+ /// Using this intrinsic in a function that does not use an out pointer
/// will trigger a compiler error.
pub fn return_address() -> *const u8;
/// A mutable reference to an iterator
#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
-#[cfg(not(stage0))]
pub struct ByRef<'a, T:'a> {
iter: &'a mut T
}
-/// Dox
-#[must_use = "iterator adaptors are lazy and do nothing unless consumed"]
-#[cfg(stage0)]
-pub struct ByRef<'a, T> {
- iter: &'a mut T
-}
-
impl<'a, A, T: Iterator<A>+'a> Iterator<A> for ByRef<'a, T> {
#[inline]
fn next(&mut self) -> Option<A> { self.iter.next() }
if *first {
*first = false;
} else {
- val.mutate(|x| (*f)(x));
+ match val.take() {
+ Some(x) => {
+ *val = Some((*f)(x))
+ }
+ None => {}
+ }
}
val.clone()
})
/// A type which is considered "not sync", meaning that
/// its contents are not threadsafe, hence they cannot be
/// shared between tasks.
- #[lang="no_share_bound"]
+ #[lang="no_sync_bound"]
#[deriving(PartialEq,Clone)]
pub struct NoSync;
#![no_std]
#![feature(globs, intrinsics, lang_items, macro_rules, managed_boxes, phase)]
-#![feature(simd, unsafe_destructor, issue_5723_bootstrap)]
+#![feature(simd, unsafe_destructor)]
#![deny(missing_doc)]
mod macros;
() => (
fail!("{}", "explicit failure")
);
- ($msg:expr) => (
- fail!("{}", $msg)
- );
+ ($msg:expr) => ({
+ static _FILE_LINE: (&'static str, uint) = (file!(), line!());
+ ::core::failure::begin_unwind_string($msg, &_FILE_LINE)
+ });
($fmt:expr, $($arg:tt)*) => ({
// a closure can't have return type !, so we need a full
// function to pass to format_args!, *and* we need the
/// Computes the absolute value.
///
/// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`.
+ ///
+ /// For signed integers, `::MIN` will be returned if the number is `::MIN`.
fn abs(&self) -> Self;
/// The positive difference of two numbers.
/// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
/// * `NaN` if the number is `NaN`
///
- /// For `int`:
+ /// For signed integers:
///
/// * `0` if the number is zero
/// * `1` if the number is positive
/// Computes the absolute value.
///
/// For `f32` and `f64`, `NaN` will be returned if the number is `NaN`
+///
+/// For signed integers, `::MIN` will be returned if the number is `::MIN`.
#[inline(always)]
pub fn abs<T: Signed>(value: T) -> T {
value.abs()
/// * `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
/// * `NaN` if the number is `NaN`
///
-/// For int:
+/// For signed integers:
///
/// * `0` if the number is zero
/// * `1` if the number is positive
///
/// assert_eq!(n.count_ones(), 3);
/// ```
- fn count_ones(self) -> Self;
+ fn count_ones(self) -> uint;
/// Returns the number of zeros in the binary representation of the integer.
///
/// assert_eq!(n.count_zeros(), 5);
/// ```
#[inline]
- fn count_zeros(self) -> Self {
+ fn count_zeros(self) -> uint {
(!self).count_ones()
}
///
/// assert_eq!(n.leading_zeros(), 10);
/// ```
- fn leading_zeros(self) -> Self;
+ fn leading_zeros(self) -> uint;
/// Returns the number of trailing zeros in the binary representation
/// of the integer.
///
/// assert_eq!(n.trailing_zeros(), 3);
/// ```
- fn trailing_zeros(self) -> Self;
+ fn trailing_zeros(self) -> uint;
/// Shifts the bits to the left by a specified amount amount, `n`, wrapping
/// the truncated bits to the end of the resulting integer.
($T:ty, $BITS:expr, $ctpop:path, $ctlz:path, $cttz:path, $bswap:path) => {
impl Int for $T {
#[inline]
- fn count_ones(self) -> $T { unsafe { $ctpop(self) } }
+ fn count_ones(self) -> uint { unsafe { $ctpop(self) as uint } }
#[inline]
- fn leading_zeros(self) -> $T { unsafe { $ctlz(self) } }
+ fn leading_zeros(self) -> uint { unsafe { $ctlz(self) as uint } }
#[inline]
- fn trailing_zeros(self) -> $T { unsafe { $cttz(self) } }
+ fn trailing_zeros(self) -> uint { unsafe { $cttz(self) as uint } }
#[inline]
fn rotate_left(self, n: uint) -> $T {
($T:ty, $U:ty) => {
impl Int for $T {
#[inline]
- fn count_ones(self) -> $T { (self as $U).count_ones() as $T }
+ fn count_ones(self) -> uint { (self as $U).count_ones() }
#[inline]
- fn leading_zeros(self) -> $T { (self as $U).leading_zeros() as $T }
+ fn leading_zeros(self) -> uint { (self as $U).leading_zeros() }
#[inline]
- fn trailing_zeros(self) -> $T { (self as $U).trailing_zeros() as $T }
+ fn trailing_zeros(self) -> uint { (self as $U).trailing_zeros() }
#[inline]
fn rotate_left(self, n: uint) -> $T { (self as $U).rotate_left(n) as $T }
FnMut<($($args,)*),Result>
for extern "Rust" fn($($args: $args,)*) -> Result {
#[rust_call_abi_hack]
- #[allow(uppercase_variables)]
+ #[allow(non_snake_case)]
fn call_mut(&mut self, args: ($($args,)*)) -> Result {
let ($($args,)*) = args;
(*self)($($args,)*)
//! }
//! ```
+#![stable]
+
use cmp::{PartialEq, Eq, Ord};
use default::Default;
use slice::Slice;
/// The `Option` type.
#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord, Show)]
+#[stable]
pub enum Option<T> {
/// No value
None,
/// Returns `true` if the option is a `Some` value
#[inline]
+ #[stable]
pub fn is_some(&self) -> bool {
match *self {
Some(_) => true,
/// Returns `true` if the option is a `None` value
#[inline]
+ #[stable]
pub fn is_none(&self) -> bool {
!self.is_some()
}
/// println!("still can print num_as_str: {}", num_as_str);
/// ```
#[inline]
+ #[stable]
pub fn as_ref<'r>(&'r self) -> Option<&'r T> {
match *self { Some(ref x) => Some(x), None => None }
}
/// Convert from `Option<T>` to `Option<&mut T>`
#[inline]
+ #[unstable = "waiting for mut conventions"]
pub fn as_mut<'r>(&'r mut self) -> Option<&'r mut T> {
match *self { Some(ref mut x) => Some(x), None => None }
}
/// Convert from `Option<T>` to `&mut [T]` (without copying)
#[inline]
+ #[unstable = "waiting for mut conventions"]
pub fn as_mut_slice<'r>(&'r mut self) -> &'r mut [T] {
match *self {
Some(ref mut x) => {
/// Fails if the value is a `None` with a custom failure message provided by
/// `msg`.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn expect(self, msg: &str) -> T {
match self {
Some(val) => val,
/// Instead, prefer to use pattern matching and handle the `None`
/// case explicitly.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap(self) -> T {
match self {
Some(val) => val,
/// Returns the contained value or a default.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap_or(self, def: T) -> T {
match self {
Some(x) => x,
/// Returns the contained value or computes it from a closure.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap_or_else(self, f: || -> T) -> T {
match self {
Some(x) => x,
/// let num_as_int: Option<uint> = num_as_str.map(|n| n.len());
/// ```
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn map<U>(self, f: |T| -> U) -> Option<U> {
match self { Some(x) => Some(f(x)), None => None }
}
/// Applies a function to the contained value or returns a default.
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn map_or<U>(self, def: U, f: |T| -> U) -> U {
match self { None => def, Some(t) => f(t) }
}
+ /// Applies a function to the contained value or computes a default.
+ #[inline]
+ #[unstable = "waiting for unboxed closures"]
+ pub fn map_or_else<U>(self, def: || -> U, f: |T| -> U) -> U {
+ match self { None => def(), Some(t) => f(t) }
+ }
+
+ /// Deprecated.
+ ///
/// Applies a function to the contained value or does nothing.
/// Returns true if the contained value was mutated.
+ #[deprecated = "removed due to lack of use"]
pub fn mutate(&mut self, f: |T| -> T) -> bool {
if self.is_some() {
- *self = Some(f(self.take_unwrap()));
+ *self = Some(f(self.take().unwrap()));
true
} else { false }
}
+ /// Deprecated.
+ ///
/// Applies a function to the contained value or sets it to a default.
/// Returns true if the contained value was mutated, or false if set to the default.
+ #[deprecated = "removed due to lack of use"]
pub fn mutate_or_set(&mut self, def: T, f: |T| -> T) -> bool {
if self.is_some() {
- *self = Some(f(self.take_unwrap()));
+ *self = Some(f(self.take().unwrap()));
true
} else {
*self = Some(def);
/// Returns an iterator over the possibly contained value.
#[inline]
+ #[unstable = "waiting for iterator conventions"]
pub fn iter<'r>(&'r self) -> Item<&'r T> {
Item{opt: self.as_ref()}
}
/// Returns a mutable iterator over the possibly contained value.
#[inline]
+ #[unstable = "waiting for iterator conventions"]
pub fn mut_iter<'r>(&'r mut self) -> Item<&'r mut T> {
Item{opt: self.as_mut()}
}
/// Returns a consuming iterator over the possibly contained value.
#[inline]
+ #[unstable = "waiting for iterator conventions"]
pub fn move_iter(self) -> Item<T> {
Item{opt: self}
}
/// Returns `None` if the option is `None`, otherwise returns `optb`.
#[inline]
+ #[stable]
pub fn and<U>(self, optb: Option<U>) -> Option<U> {
match self {
Some(_) => optb,
/// Returns `None` if the option is `None`, otherwise calls `f` with the
/// wrapped value and returns the result.
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn and_then<U>(self, f: |T| -> Option<U>) -> Option<U> {
match self {
Some(x) => f(x),
/// Returns the option if it contains a value, otherwise returns `optb`.
#[inline]
+ #[stable]
pub fn or(self, optb: Option<T>) -> Option<T> {
match self {
Some(_) => self,
/// Returns the option if it contains a value, otherwise calls `f` and
/// returns the result.
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn or_else(self, f: || -> Option<T>) -> Option<T> {
match self {
Some(_) => self,
/// Takes the value out of the option, leaving a `None` in its place.
#[inline]
+ #[stable]
pub fn take(&mut self) -> Option<T> {
mem::replace(self, None)
}
+ /// Deprecated.
+ ///
/// Filters an optional value using a given function.
#[inline(always)]
+ #[deprecated = "removed due to lack of use"]
pub fn filtered(self, f: |t: &T| -> bool) -> Option<T> {
match self {
Some(x) => if f(&x) { Some(x) } else { None },
}
}
+ /// Deprecated.
+ ///
/// Applies a function zero or more times until the result is `None`.
#[inline]
+ #[deprecated = "removed due to lack of use"]
pub fn while_some(self, f: |v: T| -> Option<T>) {
let mut opt = self;
loop {
// Common special cases
/////////////////////////////////////////////////////////////////////////
+ /// Deprecated: use `take().unwrap()` instead.
+ ///
/// The option dance. Moves a value out of an option type and returns it,
/// replacing the original with `None`.
///
///
/// Fails if the value equals `None`.
#[inline]
+ #[deprecated = "use take().unwrap() instead"]
pub fn take_unwrap(&mut self) -> T {
match self.take() {
Some(x) => x,
}
}
+ /// Deprecated: use `as_ref().unwrap()` instead.
+ ///
/// Gets an immutable reference to the value inside an option.
///
/// # Failure
/// Instead, prefer to use pattern matching and handle the `None`
/// case explicitly.
#[inline]
+ #[deprecated = "use .as_ref().unwrap() instead"]
pub fn get_ref<'a>(&'a self) -> &'a T {
match *self {
Some(ref x) => x,
}
}
+ /// Deprecated: use `as_mut().unwrap()` instead.
+ ///
/// Gets a mutable reference to the value inside an option.
///
/// # Failure
/// Instead, prefer to use pattern matching and handle the `None`
/// case explicitly.
#[inline]
+ #[deprecated = "use .as_mut().unwrap() instead"]
pub fn get_mut_ref<'a>(&'a mut self) -> &'a mut T {
match *self {
Some(ref mut x) => x,
/// assert_eq!(0i, bad_year);
/// ```
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap_or_default(self) -> T {
match self {
Some(x) => x,
impl<T> Slice<T> for Option<T> {
/// Convert from `Option<T>` to `&[T]` (without copying)
#[inline]
+ #[stable]
fn as_slice<'a>(&'a self) -> &'a [T] {
match *self {
Some(ref x) => slice::ref_slice(x),
/// The `Item` iterator is returned by the `iter`, `mut_iter` and `move_iter`
/// methods on `Option`.
#[deriving(Clone)]
+#[unstable = "waiting for iterator conventions"]
pub struct Item<A> {
opt: Option<A>
}
// Free functions
/////////////////////////////////////////////////////////////////////////////
-/// Takes each element in the `Iterator`: if it is `None`, no further
-/// elements are taken, and the `None` is returned. Should no `None` occur, a
-/// vector containing the values of each `Option` is returned.
-///
-/// Here is an example which increments every integer in a vector,
-/// checking for overflow:
-///
-/// ```rust
-/// use std::option;
-/// use std::uint;
-///
-/// let v = vec!(1u, 2u);
-/// let res: Option<Vec<uint>> = option::collect(v.iter().map(|x: &uint|
-/// if *x == uint::MAX { None }
-/// else { Some(x + 1) }
-/// ));
-/// assert!(res == Some(vec!(2u, 3u)));
-/// ```
+/// Deprecated: use `Iterator::collect` instead.
#[inline]
-pub fn collect<T, Iter: Iterator<Option<T>>, V: FromIterator<T>>(iter: Iter) -> Option<V> {
- // FIXME(#11084): This could be replaced with Iterator::scan when this
- // performance bug is closed.
-
- struct Adapter<Iter> {
- iter: Iter,
- found_none: bool,
- }
-
- impl<T, Iter: Iterator<Option<T>>> Iterator<T> for Adapter<Iter> {
- #[inline]
- fn next(&mut self) -> Option<T> {
- match self.iter.next() {
- Some(Some(value)) => Some(value),
- Some(None) => {
- self.found_none = true;
- None
+#[deprecated = "use Iterator::collect instead"]
+pub fn collect<T, Iter: Iterator<Option<T>>, V: FromIterator<T>>(mut iter: Iter) -> Option<V> {
+ iter.collect()
+}
+
+impl<A, V: FromIterator<A>> FromIterator<Option<A>> for Option<V> {
+ /// Takes each element in the `Iterator`: if it is `None`, no further
+ /// elements are taken, and the `None` is returned. Should no `None` occur, a
+ /// container with the values of each `Option` is returned.
+ ///
+ /// Here is an example which increments every integer in a vector,
+ /// checking for overflow:
+ ///
+ /// ```rust
+ /// use std::uint;
+ ///
+ /// let v = vec!(1u, 2u);
+ /// let res: Option<Vec<uint>> = v.iter().map(|x: &uint|
+ /// if *x == uint::MAX { None }
+ /// else { Some(x + 1) }
+ /// ).collect();
+ /// assert!(res == Some(vec!(2u, 3u)));
+ /// ```
+ #[inline]
+ fn from_iter<I: Iterator<Option<A>>>(iter: I) -> Option<V> {
+ // FIXME(#11084): This could be replaced with Iterator::scan when this
+ // performance bug is closed.
+
+ struct Adapter<Iter> {
+ iter: Iter,
+ found_none: bool,
+ }
+
+ impl<T, Iter: Iterator<Option<T>>> Iterator<T> for Adapter<Iter> {
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ match self.iter.next() {
+ Some(Some(value)) => Some(value),
+ Some(None) => {
+ self.found_none = true;
+ None
+ }
+ None => None,
}
- None => None,
}
}
- }
- let mut adapter = Adapter { iter: iter, found_none: false };
- let v: V = FromIterator::from_iter(adapter.by_ref());
+ let mut adapter = Adapter { iter: iter, found_none: false };
+ let v: V = FromIterator::from_iter(adapter.by_ref());
- if adapter.found_none {
- None
- } else {
- Some(v)
+ if adapter.found_none {
+ None
+ } else {
+ Some(v)
+ }
}
}
pub trait RawPtr<T> {
/// Returns the null pointer.
fn null() -> Self;
+
/// Returns true if the pointer is equal to the null pointer.
fn is_null(&self) -> bool;
+
/// Returns true if the pointer is not equal to the null pointer.
fn is_not_null(&self) -> bool { !self.is_null() }
+
/// Returns the value of this pointer (ie, the address it points to)
fn to_uint(&self) -> uint;
- /// Returns `None` if the pointer is null, or else returns the value wrapped
- /// in `Some`.
+
+ /// Returns `None` if the pointer is null, or else returns a reference to the
+ /// value wrapped in `Some`.
///
/// # Safety Notes
///
- /// While this method is useful for null-safety, it is important to note
- /// that this is still an unsafe operation because the returned value could
- /// be pointing to invalid memory.
- unsafe fn to_option(&self) -> Option<&T>;
+ /// While this method and its mutable counterpart are useful for null-safety,
+ /// it is important to note that this is still an unsafe operation because
+ /// the returned value could be pointing to invalid memory.
+ unsafe fn as_ref<'a>(&self) -> Option<&'a T>;
+
+ /// A synonym for `as_ref`, except with incorrect lifetime semantics
+ #[deprecated="Use `as_ref` instead"]
+ unsafe fn to_option<'a>(&'a self) -> Option<&'a T> {
+ mem::transmute(self.as_ref())
+ }
+
/// Calculates the offset from a pointer. The offset *must* be in-bounds of
/// the object, or one-byte-past-the-end. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
unsafe fn offset(self, count: int) -> Self;
}
+/// Methods on mutable raw pointers
+pub trait RawMutPtr<T>{
+ /// Returns `None` if the pointer is null, or else returns a mutable reference
+ /// to the value wrapped in `Some`. As with `as_ref`, this is unsafe because
+ /// it cannot verify the validity of the returned pointer.
+ unsafe fn as_mut<'a>(&self) -> Option<&'a mut T>;
+}
+
impl<T> RawPtr<T> for *const T {
#[inline]
fn null() -> *const T { null() }
}
#[inline]
- unsafe fn to_option(&self) -> Option<&T> {
+ unsafe fn as_ref<'a>(&self) -> Option<&'a T> {
if self.is_null() {
None
} else {
}
#[inline]
- unsafe fn to_option(&self) -> Option<&T> {
+ unsafe fn as_ref<'a>(&self) -> Option<&'a T> {
if self.is_null() {
None
} else {
}
}
+impl<T> RawMutPtr<T> for *mut T {
+ #[inline]
+ unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> {
+ if self.is_null() {
+ None
+ } else {
+ Some(&mut **self)
+ }
+ }
+}
+
// Equality for pointers
impl<T> PartialEq for *const T {
#[inline]
///
/// This struct does not have a `Repr` implementation
/// because there is no way to refer to all trait objects generically.
-#[cfg(stage0)]
-pub struct TraitObject {
- pub vtable: *mut (),
- pub data: *mut (),
-}
-#[cfg(not(stage0))]
pub struct TraitObject {
pub data: *mut (),
pub vtable: *mut (),
//! the context. The caller of `fail!` should assume that execution
//! will not resume after failure, that failure is catastrophic.
+#![stable]
+
use clone::Clone;
use cmp::PartialEq;
use std::fmt::Show;
-use iter::{Iterator, FromIterator};
+use slice;
+use slice::Slice;
+use iter::{Iterator, DoubleEndedIterator, FromIterator, ExactSize};
use option::{None, Option, Some};
/// `Result` is a type that represents either success (`Ok`) or failure (`Err`).
/// See the [`std::result`](index.html) module documentation for details.
#[deriving(Clone, PartialEq, PartialOrd, Eq, Ord, Show)]
#[must_use]
+#[stable]
pub enum Result<T, E> {
/// Contains the success value
Ok(T),
/// # }
/// ~~~
#[inline]
+ #[stable]
pub fn is_ok(&self) -> bool {
match *self {
Ok(_) => true,
/// assert!(bogus.is_err());
/// ~~~
#[inline]
+ #[stable]
pub fn is_err(&self) -> bool {
!self.is_ok()
}
/// let bdays: File = bdays.ok().expect("unable to open birthday file");
/// ~~~
#[inline]
+ #[stable]
pub fn ok(self) -> Option<T> {
match self {
Ok(x) => Some(x),
/// Converts `self` into an `Option<T>`, consuming `self`,
/// and discarding the value, if any.
#[inline]
+ #[stable]
pub fn err(self) -> Option<E> {
match self {
Ok(_) => None,
/// Produces a new `Result`, containing a reference
/// into the original, leaving the original in place.
#[inline]
+ #[stable]
pub fn as_ref<'r>(&'r self) -> Result<&'r T, &'r E> {
match *self {
Ok(ref x) => Ok(x),
/// Convert from `Result<T, E>` to `Result<&mut T, &mut E>`
#[inline]
+ #[unstable = "waiting for mut conventions"]
pub fn as_mut<'r>(&'r mut self) -> Result<&'r mut T, &'r mut E> {
match *self {
Ok(ref mut x) => Ok(x),
}
}
+ /// Convert from `Result<T, E>` to `&mut [T]` (without copying)
+ #[inline]
+ #[unstable = "waiting for mut conventions"]
+ pub fn as_mut_slice<'r>(&'r mut self) -> &'r mut [T] {
+ match *self {
+ Ok(ref mut x) => slice::mut_ref_slice(x),
+ Err(_) => {
+ // work around lack of implicit coercion from fixed-size array to slice
+ let emp: &mut [_] = &mut [];
+ emp
+ }
+ }
+ }
+
/////////////////////////////////////////////////////////////////////////
// Transforming contained values
/////////////////////////////////////////////////////////////////////////
/// assert!(sum == 10);
/// ~~~
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn map<U>(self, op: |T| -> U) -> Result<U,E> {
match self {
Ok(t) => Ok(op(t)),
/// This function can be used to pass through a successful result while handling
/// an error.
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn map_err<F>(self, op: |E| -> F) -> Result<T,F> {
match self {
Ok(t) => Ok(t),
}
}
+
+ /////////////////////////////////////////////////////////////////////////
+ // Iterator constructors
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Returns an iterator over the possibly contained value.
+ #[inline]
+ #[unstable = "waiting for iterator conventions"]
+ pub fn iter<'r>(&'r self) -> Item<&'r T> {
+ Item{opt: self.as_ref().ok()}
+ }
+
+ /// Returns a mutable iterator over the possibly contained value.
+ #[inline]
+ #[unstable = "waiting for iterator conventions"]
+ pub fn mut_iter<'r>(&'r mut self) -> Item<&'r mut T> {
+ Item{opt: self.as_mut().ok()}
+ }
+
+ /// Returns a consuming iterator over the possibly contained value.
+ #[inline]
+ #[unstable = "waiting for iterator conventions"]
+ pub fn move_iter(self) -> Item<T> {
+ Item{opt: self.ok()}
+ }
+
////////////////////////////////////////////////////////////////////////
// Boolean operations on the values, eager and lazy
/////////////////////////////////////////////////////////////////////////
/// Returns `res` if the result is `Ok`, otherwise returns the `Err` value of `self`.
#[inline]
+ #[stable]
pub fn and<U>(self, res: Result<U, E>) -> Result<U, E> {
match self {
Ok(_) => res,
///
/// This function can be used for control flow based on result values
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn and_then<U>(self, op: |T| -> Result<U, E>) -> Result<U, E> {
match self {
Ok(t) => op(t),
/// Returns `res` if the result is `Err`, otherwise returns the `Ok` value of `self`.
#[inline]
+ #[stable]
pub fn or(self, res: Result<T, E>) -> Result<T, E> {
match self {
Ok(_) => self,
///
/// This function can be used for control flow based on result values
#[inline]
+ #[unstable = "waiting for unboxed closures"]
pub fn or_else<F>(self, op: |E| -> Result<T, F>) -> Result<T, F> {
match self {
Ok(t) => Ok(t),
/// Unwraps a result, yielding the content of an `Ok`.
/// Else it returns `optb`.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap_or(self, optb: T) -> T {
match self {
Ok(t) => t,
/// Unwraps a result, yielding the content of an `Ok`.
/// If the value is an `Err` then it calls `op` with its value.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap_or_else(self, op: |E| -> T) -> T {
match self {
Ok(t) => t,
/// Fails if the value is an `Err`, with a custom failure message provided
/// by the `Err`'s value.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap(self) -> T {
match self {
Ok(t) => t,
/// Fails if the value is an `Ok`, with a custom failure message provided
/// by the `Ok`'s value.
#[inline]
+ #[unstable = "waiting for conventions"]
pub fn unwrap_err(self) -> E {
match self {
Ok(t) =>
}
/////////////////////////////////////////////////////////////////////////////
-// Free functions
+// Trait implementations
/////////////////////////////////////////////////////////////////////////////
-/// Takes each element in the `Iterator`: if it is an `Err`, no further
-/// elements are taken, and the `Err` is returned. Should no `Err` occur, a
-/// vector containing the values of each `Result` is returned.
-///
-/// Here is an example which increments every integer in a vector,
-/// checking for overflow:
-///
-/// ```rust
-/// use std::result;
-/// use std::uint;
+impl<T, E> Slice<T> for Result<T, E> {
+ /// Convert from `Result<T, E>` to `&[T]` (without copying)
+ #[inline]
+ #[stable]
+ fn as_slice<'a>(&'a self) -> &'a [T] {
+ match *self {
+ Ok(ref x) => slice::ref_slice(x),
+ Err(_) => {
+ // work around lack of implicit coercion from fixed-size array to slice
+ let emp: &[_] = &[];
+ emp
+ }
+ }
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// The Result Iterator
+/////////////////////////////////////////////////////////////////////////////
+
+/// A `Result` iterator that yields either one or zero elements
///
-/// let v = vec!(1u, 2u);
-/// let res: Result<Vec<uint>, &'static str> = result::collect(v.iter().map(|x: &uint|
-/// if *x == uint::MAX { Err("Overflow!") }
-/// else { Ok(x + 1) }
-/// ));
-/// assert!(res == Ok(vec!(2u, 3u)));
-/// ```
-#[inline]
-pub fn collect<T, E, Iter: Iterator<Result<T, E>>, V: FromIterator<T>>(iter: Iter) -> Result<V, E> {
- // FIXME(#11084): This could be replaced with Iterator::scan when this
- // performance bug is closed.
+/// The `Item` iterator is returned by the `iter`, `mut_iter` and `move_iter`
+/// methods on `Result`.
+#[deriving(Clone)]
+#[unstable = "waiting for iterator conventions"]
+pub struct Item<T> {
+ opt: Option<T>
+}
+
+impl<T> Iterator<T> for Item<T> {
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.opt.take()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ match self.opt {
+ Some(_) => (1, Some(1)),
+ None => (0, Some(0)),
+ }
+ }
+}
- struct Adapter<Iter, E> {
- iter: Iter,
- err: Option<E>,
+impl<A> DoubleEndedIterator<A> for Item<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A> {
+ self.opt.take()
}
+}
+
+impl<A> ExactSize<A> for Item<A> {}
- impl<T, E, Iter: Iterator<Result<T, E>>> Iterator<T> for Adapter<Iter, E> {
- #[inline]
- fn next(&mut self) -> Option<T> {
- match self.iter.next() {
- Some(Ok(value)) => Some(value),
- Some(Err(err)) => {
- self.err = Some(err);
- None
+/////////////////////////////////////////////////////////////////////////////
+// Free functions
+/////////////////////////////////////////////////////////////////////////////
+
+/// Deprecated: use `Iterator::collect`.
+#[inline]
+#[deprecated = "use Iterator::collect instead"]
+pub fn collect<T, E, Iter: Iterator<Result<T, E>>, V: FromIterator<T>>(mut iter: Iter)
+ -> Result<V, E> {
+ iter.collect()
+}
+
+impl<A, E, V: FromIterator<A>> FromIterator<Result<A, E>> for Result<V, E> {
+ /// Takes each element in the `Iterator`: if it is an `Err`, no further
+ /// elements are taken, and the `Err` is returned. Should no `Err` occur, a
+ /// container with the values of each `Result` is returned.
+ ///
+ /// Here is an example which increments every integer in a vector,
+ /// checking for overflow:
+ ///
+ /// ```rust
+ /// use std::uint;
+ ///
+ /// let v = vec!(1u, 2u);
+ /// let res: Result<Vec<uint>, &'static str> = v.iter().map(|x: &uint|
+ /// if *x == uint::MAX { Err("Overflow!") }
+ /// else { Ok(x + 1) }
+ /// ).collect();
+ /// assert!(res == Ok(vec!(2u, 3u)));
+ /// ```
+ #[inline]
+ fn from_iter<I: Iterator<Result<A, E>>>(iter: I) -> Result<V, E> {
+ // FIXME(#11084): This could be replaced with Iterator::scan when this
+ // performance bug is closed.
+
+ struct Adapter<Iter, E> {
+ iter: Iter,
+ err: Option<E>,
+ }
+
+ impl<T, E, Iter: Iterator<Result<T, E>>> Iterator<T> for Adapter<Iter, E> {
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ match self.iter.next() {
+ Some(Ok(value)) => Some(value),
+ Some(Err(err)) => {
+ self.err = Some(err);
+ None
+ }
+ None => None,
}
- None => None,
}
}
- }
- let mut adapter = Adapter { iter: iter, err: None };
- let v: V = FromIterator::from_iter(adapter.by_ref());
+ let mut adapter = Adapter { iter: iter, err: None };
+ let v: V = FromIterator::from_iter(adapter.by_ref());
- match adapter.err {
- Some(err) => Err(err),
- None => Ok(v),
+ match adapter.err {
+ Some(err) => Err(err),
+ None => Ok(v),
+ }
}
}
/// If an `Err` is encountered, it is immediately returned.
/// Otherwise, the folded value is returned.
#[inline]
+#[experimental]
pub fn fold<T,
V,
E,
Ok(init)
}
+/// Deprecated.
+///
/// Perform a trivial fold operation over the result values
/// from an iterator.
///
/// If an `Err` is encountered, it is immediately returned.
/// Otherwise, a simple `Ok(())` is returned.
#[inline]
+#[deprecated = "use fold instead"]
pub fn fold_<T,E,Iter:Iterator<Result<T,E>>>(iterator: Iter) -> Result<(),E> {
fold(iterator, (), |_, _| ())
}
// Extension traits
//
-/// Extension methods for vectors
+/// Extension methods for immutable slices.
#[unstable = "may merge with other traits; region parameter may disappear"]
pub trait ImmutableSlice<'a, T> {
- /**
- * Returns a slice of self spanning the interval [`start`, `end`).
- *
- * Fails when the slice (or part of it) is outside the bounds of self,
- * or when `start` > `end`.
- */
+ /// Returns a subslice spanning the interval [`start`, `end`).
+ ///
+ /// Fails when the end of the new slice lies beyond the end of the
+ /// original slice (i.e. when `end > self.len()`) or when `start > end`.
+ ///
+ /// Slicing with `start` equal to `end` yields an empty slice.
#[unstable]
fn slice(&self, start: uint, end: uint) -> &'a [T];
- /**
- * Returns a slice of self from `start` to the end of the vec.
- *
- * Fails when `start` points outside the bounds of self.
- */
+ /// Returns a subslice from `start` to the end of the slice.
+ ///
+ /// Fails when `start` is strictly greater than the length of the original slice.
+ ///
+ /// Slicing from `self.len()` yields an empty slice.
#[unstable]
fn slice_from(&self, start: uint) -> &'a [T];
- /**
- * Returns a slice of self from the start of the vec to `end`.
- *
- * Fails when `end` points outside the bounds of self.
- */
+ /// Returns a subslice from the start of the slice to `end`.
+ ///
+ /// Fails when `end` is strictly greater than the length of the original slice.
+ ///
+ /// Slicing to `0` yields an empty slice.
#[unstable]
fn slice_to(&self, end: uint) -> &'a [T];
/// Primarily intended for getting a &mut [T] from a [T, ..N].
fn as_mut_slice(self) -> &'a mut [T];
- /// Return a slice that points into another slice.
+ /// Returns a mutable subslice spanning the interval [`start`, `end`).
+ ///
+ /// Fails when the end of the new slice lies beyond the end of the
+ /// original slice (i.e. when `end > self.len()`) or when `start > end`.
+ ///
+ /// Slicing with `start` equal to `end` yields an empty slice.
fn mut_slice(self, start: uint, end: uint) -> &'a mut [T];
- /**
- * Returns a slice of self from `start` to the end of the vec.
- *
- * Fails when `start` points outside the bounds of self.
- */
+ /// Returns a mutable subslice from `start` to the end of the slice.
+ ///
+ /// Fails when `start` is strictly greater than the length of the original slice.
+ ///
+ /// Slicing from `self.len()` yields an empty slice.
fn mut_slice_from(self, start: uint) -> &'a mut [T];
- /**
- * Returns a slice of self from the start of the vec to `end`.
- *
- * Fails when `end` points outside the bounds of self.
- */
+ /// Returns a mutable subslice from the start of the slice to `end`.
+ ///
+ /// Fails when `end` is strictly greater than the length of the original slice.
+ ///
+ /// Slicing to `0` yields an empty slice.
fn mut_slice_to(self, end: uint) -> &'a mut [T];
/// Returns an iterator that allows modifying each value
fn mut_split(self, pred: |&T|: 'a -> bool) -> MutSplits<'a, T>;
/**
- * Returns an iterator over `size` elements of the vector at a time.
- * The chunks are mutable and do not overlap. If `size` does not divide the
- * length of the vector, then the last chunk will not have length
- * `size`.
+ * Returns an iterator over `chunk_size` elements of the vector at a time.
+ * The chunks are mutable and do not overlap. If `chunk_size` does
+ * not divide the length of the vector, then the last chunk will not
+ * have length `chunk_size`.
*
* # Failure
*
- * Fails if `size` is 0.
+ * Fails if `chunk_size` is 0.
*/
fn mut_chunks(self, chunk_size: uint) -> MutChunks<'a, T>;
let mut i: uint = 0;
let ln = self.len();
while i < ln / 2 {
- self.swap(i, ln - i - 1);
+ // Unsafe swap to avoid the bounds check in safe swap.
+ unsafe {
+ let pa: *mut T = self.unsafe_mut_ref(i);
+ let pb: *mut T = self.unsafe_mut_ref(ln - i - 1);
+ ptr::swap(pa, pb);
+ }
i += 1;
}
}
}
}
+#[experimental = "trait is experimental"]
+impl<'a, T> Collection for &'a mut [T] {
+ /// Returns the length of a vector
+ #[inline]
+ fn len(&self) -> uint {
+ self.repr().len
+ }
+}
+
#[unstable = "waiting for DST"]
impl<'a, T> Default for &'a [T] {
fn default() -> &'a [T] { &[] }
/// An iterator over the slices of a vector separated by elements that
/// match a predicate function.
-#[cfg(not(stage0))]
#[experimental = "needs review"]
pub struct Splits<'a, T:'a> {
v: &'a [T],
finished: bool
}
-/// Dox.
-#[cfg(stage0)]
-pub struct Splits<'a, T> {
- v: &'a [T],
- pred: |t: &T|: 'a -> bool,
- finished: bool
-}
-
#[experimental = "needs review"]
impl<'a, T> Iterator<&'a [T]> for Splits<'a, T> {
#[inline]
/// An iterator over the subslices of the vector which are separated
/// by elements that match `pred`.
-#[cfg(not(stage0))]
#[experimental = "needs review"]
pub struct MutSplits<'a, T:'a> {
v: &'a mut [T],
finished: bool
}
-/// Dox
-#[cfg(stage0)]
-pub struct MutSplits<'a, T> {
- v: &'a mut [T],
- pred: |t: &T|: 'a -> bool,
- finished: bool
-}
-
#[experimental = "needs review"]
impl<'a, T> Iterator<&'a mut [T]> for MutSplits<'a, T> {
#[inline]
/// An iterator over the slices of a vector separated by elements that
/// match a predicate function, splitting at most a fixed number of times.
-#[cfg(not(stage0))]
#[experimental = "needs review"]
pub struct SplitsN<'a, T:'a> {
iter: Splits<'a, T>,
invert: bool
}
-/// Dox.
-#[cfg(stage0)]
-pub struct SplitsN<'a, T> {
- iter: Splits<'a, T>,
- count: uint,
- invert: bool
-}
-
#[experimental = "needs review"]
impl<'a, T> Iterator<&'a [T]> for SplitsN<'a, T> {
#[inline]
/// An iterator over the (overlapping) slices of length `size` within
/// a vector.
-#[cfg(stage0)]
-#[deriving(Clone)]
-#[experimental = "needs review"]
-pub struct Windows<'a, T> {
- v: &'a [T],
- size: uint
-}
-
-/// An iterator over the (overlapping) slices of length `size` within
-/// a vector.
-#[cfg(not(stage0))]
#[deriving(Clone)]
#[experimental = "needs review"]
pub struct Windows<'a, T:'a> {
///
/// When the vector len is not evenly divided by the chunk size,
/// the last slice of the iteration will be the remainder.
-#[cfg(stage0)]
#[deriving(Clone)]
#[experimental = "needs review"]
-pub struct Chunks<'a, T> {
- v: &'a [T],
- size: uint
-}
-
-/// An iterator over a vector in (non-overlapping) chunks (`size`
-/// elements at a time).
-///
-/// When the vector len is not evenly divided by the chunk size,
-/// the last slice of the iteration will be the remainder.
-#[cfg(not(stage0))]
-#[deriving(Clone)]
pub struct Chunks<'a, T:'a> {
v: &'a [T],
size: uint
/// An iterator over a vector in (non-overlapping) mutable chunks (`size` elements at a time). When
/// the vector len is not evenly divided by the chunk size, the last slice of the iteration will be
/// the remainder.
-#[cfg(not(stage0))]
#[experimental = "needs review"]
pub struct MutChunks<'a, T:'a> {
v: &'a mut [T],
chunk_size: uint
}
-/// Dox.
-#[cfg(stage0)]
-pub struct MutChunks<'a, T> {
- v: &'a mut [T],
- chunk_size: uint
-}
-
#[experimental = "needs review"]
impl<'a, T> Iterator<&'a mut [T]> for MutChunks<'a, T> {
#[inline]
fn next(&mut self, haystack: &[u8], needle: &[u8]) -> Option<(uint, uint)> {
while self.position + needle.len() <= haystack.len() {
if haystack.slice(self.position, self.position + needle.len()) == needle {
- let matchPos = self.position;
+ let match_pos = self.position;
self.position += needle.len(); // add 1 for all matches
- return Some((matchPos, matchPos + needle.len()));
+ return Some((match_pos, match_pos + needle.len()));
} else {
self.position += 1;
}
#[deriving(Clone)]
struct TwoWaySearcher {
// constants
- critPos: uint,
+ crit_pos: uint,
period: uint,
byteset: u64,
// Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
impl TwoWaySearcher {
fn new(needle: &[u8]) -> TwoWaySearcher {
- let (critPos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
- let (critPos2, period2) = TwoWaySearcher::maximal_suffix(needle, true);
+ let (crit_pos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
+ let (crit_pos2, period2) = TwoWaySearcher::maximal_suffix(needle, true);
- let critPos;
+ let crit_pos;
let period;
- if critPos1 > critPos2 {
- critPos = critPos1;
+ if crit_pos1 > crit_pos2 {
+ crit_pos = crit_pos1;
period = period1;
} else {
- critPos = critPos2;
+ crit_pos = crit_pos2;
period = period2;
}
let byteset = needle.iter()
.fold(0, |a, &b| (1 << ((b & 0x3f) as uint)) | a);
-
- // The logic here (calculating critPos and period, the final if statement to see which
+ // The logic here (calculating crit_pos and period, the final if statement to see which
// period to use for the TwoWaySearcher) is essentially an implementation of the
// "small-period" function from the paper (p. 670)
//
- // In the paper they check whether `needle.slice_to(critPos)` is a suffix of
- // `needle.slice(critPos, critPos + period)`, which is precisely what this does
- if needle.slice_to(critPos) == needle.slice(period, period + critPos) {
+ // In the paper they check whether `needle.slice_to(crit_pos)` is a suffix of
+ // `needle.slice(crit_pos, crit_pos + period)`, which is precisely what this does
+ if needle.slice_to(crit_pos) == needle.slice(period, period + crit_pos) {
TwoWaySearcher {
- critPos: critPos,
+ crit_pos: crit_pos,
period: period,
byteset: byteset,
}
} else {
TwoWaySearcher {
- critPos: critPos,
- period: cmp::max(critPos, needle.len() - critPos) + 1,
+ crit_pos: crit_pos,
+ period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
byteset: byteset,
position: 0,
}
#[inline]
- fn next(&mut self, haystack: &[u8], needle: &[u8], longPeriod: bool) -> Option<(uint, uint)> {
+ fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> Option<(uint, uint)> {
'search: loop {
// Check that we have room to search in
if self.position + needle.len() > haystack.len() {
}
// See if the right part of the needle matches
- let start = if longPeriod { self.critPos } else { cmp::max(self.critPos, self.memory) };
+ let start = if long_period { self.crit_pos }
+ else { cmp::max(self.crit_pos, self.memory) };
for i in range(start, needle.len()) {
if needle[i] != haystack[self.position + i] {
- self.position += i - self.critPos + 1;
- if !longPeriod {
+ self.position += i - self.crit_pos + 1;
+ if !long_period {
self.memory = 0;
}
continue 'search;
}
// See if the left part of the needle matches
- let start = if longPeriod { 0 } else { self.memory };
- for i in range(start, self.critPos).rev() {
+ let start = if long_period { 0 } else { self.memory };
+ for i in range(start, self.crit_pos).rev() {
if needle[i] != haystack[self.position + i] {
self.position += self.period;
- if !longPeriod {
+ if !long_period {
self.memory = needle.len() - self.period;
}
continue 'search;
}
// We have found a match!
- let matchPos = self.position;
+ let match_pos = self.position;
self.position += needle.len(); // add self.period for all matches
- if !longPeriod {
+ if !long_period {
self.memory = 0; // set to needle.len() - self.period for all matches
}
- return Some((matchPos, matchPos + needle.len()));
+ return Some((match_pos, match_pos + needle.len()));
}
}
fn contains_char(&self, needle: char) -> bool;
/// An iterator over the characters of `self`. Note, this iterates
- /// over unicode code-points, not unicode graphemes.
+ /// over Unicode code-points, not Unicode graphemes.
///
/// # Example
///
/// Pluck a character out of a string and return the index of the next
/// character.
///
- /// This function can be used to iterate over the unicode characters of a
+ /// This function can be used to iterate over the Unicode characters of a
/// string.
///
/// # Example
/// # Return value
///
/// A record {ch: char, next: uint} containing the char value and the byte
- /// index of the next unicode character.
+ /// index of the next Unicode character.
///
/// # Failure
///
/// Given a byte position and a str, return the previous char and its position.
///
- /// This function can be used to iterate over a unicode string in reverse.
+ /// This function can be used to iterate over a Unicode string in reverse.
///
/// Returns 0 for next index if called on start index 0.
///
#[test]
fn test_count_zeros() {
- assert!(A.count_zeros() == BITS as $T - 3);
- assert!(B.count_zeros() == BITS as $T - 2);
- assert!(C.count_zeros() == BITS as $T - 5);
+ assert!(A.count_zeros() == BITS - 3);
+ assert!(B.count_zeros() == BITS - 2);
+ assert!(C.count_zeros() == BITS - 5);
}
#[test]
#[test]
fn test_count_zeros() {
- assert!(A.count_zeros() == BITS as $T - 3);
- assert!(B.count_zeros() == BITS as $T - 2);
- assert!(C.count_zeros() == BITS as $T - 5);
+ assert!(A.count_zeros() == BITS - 3);
+ assert!(B.count_zeros() == BITS - 2);
+ assert!(C.count_zeros() == BITS - 5);
}
#[test]
let mut y = Some(5i);
let mut y2 = 0;
for _x in x.iter() {
- y2 = y.take_unwrap();
+ y2 = y.take().unwrap();
}
assert_eq!(y2, 5);
assert!(y.is_none());
#[test] #[should_fail]
fn test_option_too_much_dance() {
let mut y = Some(marker::NoCopy);
- let _y2 = y.take_unwrap();
- let _y3 = y.take_unwrap();
+ let _y2 = y.take().unwrap();
+ let _y3 = y.take().unwrap();
}
#[test]
}
#[test]
-fn test_to_option() {
+fn test_as_ref() {
unsafe {
let p: *const int = null();
- assert_eq!(p.to_option(), None);
+ assert_eq!(p.as_ref(), None);
let q: *const int = &2;
- assert_eq!(q.to_option().unwrap(), &2);
+ assert_eq!(q.as_ref().unwrap(), &2);
let p: *mut int = mut_null();
- assert_eq!(p.to_option(), None);
+ assert_eq!(p.as_ref(), None);
let q: *mut int = &mut 2;
- assert_eq!(q.to_option().unwrap(), &2);
+ assert_eq!(q.as_ref().unwrap(), &2);
+
+ // Lifetime inference
+ let u = 2i;
+ {
+ let p: *const int = &u as *const _;
+ assert_eq!(p.as_ref().unwrap(), &2);
+ }
+ }
+}
+
+#[test]
+fn test_as_mut() {
+ unsafe {
+ let p: *mut int = mut_null();
+ assert!(p.as_mut() == None);
+
+ let q: *mut int = &mut 2;
+ assert!(q.as_mut().unwrap() == &mut 2);
+
+ // Lifetime inference
+ let mut u = 2i;
+ {
+ let p: *mut int = &mut u as *mut _;
+ assert!(p.as_mut().unwrap() == &mut 2);
+ }
}
}
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/")]
#![experimental]
-#![feature(managed_boxes, macro_rules, issue_5723_bootstrap)]
+#![feature(managed_boxes, macro_rules)]
#![allow(experimental)]
pub mod fmt;
true
}
- // NOTE: remove after snapshot
- #[cfg(stage0)]
- fn visit_estr_fixed(&mut self, n: uint,
- sz: uint,
- align: uint) -> bool {
- self.align(align);
- if ! self.inner.visit_estr_fixed(n, sz, align) { return false; }
- self.bump(sz);
- true
- }
-
fn visit_box(&mut self, mtbl: uint, inner: *const TyDesc) -> bool {
self.align_to::<Gc<u8>>();
if ! self.inner.visit_box(mtbl, inner) { return false; }
true
}
- #[cfg(stage0)]
- fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
- mtbl: uint, inner: *const TyDesc) -> bool {
- self.align(align);
- if ! self.inner.visit_evec_fixed(n, sz, align, mtbl, inner) {
- return false;
- }
- self.bump(sz);
- true
- }
- #[cfg(not(stage0))]
fn visit_evec_fixed(&mut self, n: uint, sz: uint, align: uint,
inner: *const TyDesc) -> bool {
self.align(align);
self.get::<&str>(|this, s| this.write_escaped_slice(*s))
}
- // Type no longer exists, vestigial function.
- // NOTE: remove after snapshot
- #[cfg(stage0)]
- fn visit_estr_fixed(&mut self, _n: uint, _sz: uint,
- _align: uint) -> bool { fail!(); }
-
fn visit_box(&mut self, mtbl: uint, inner: *const TyDesc) -> bool {
try!(self, self.writer.write("box(GC) ".as_bytes()));
self.write_mut_qualifier(mtbl);
})
}
- // NOTE: remove after snapshot
- #[cfg(stage0)]
- fn visit_evec_fixed(&mut self, n: uint, sz: uint, _align: uint,
- _: uint, inner: *const TyDesc) -> bool {
- let assumed_size = if sz == 0 { n } else { sz };
- self.get::<()>(|this, b| {
- this.write_vec_range(b, assumed_size, inner)
- })
- }
-
- #[cfg(not(stage0))]
fn visit_evec_fixed(&mut self, n: uint, sz: uint, _align: uint,
inner: *const TyDesc) -> bool {
let assumed_size = if sz == 0 { n } else { sz };
#![crate_type = "dylib"]
#![feature(macro_rules, globs, import_shadowing)]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
use std::char;
use std::str;
AlignLeft,
/// The value will be aligned to the right.
AlignRight,
+ /// The value will be aligned in the center.
+ AlignCenter,
/// The value will take on a default alignment.
AlignUnknown,
}
match self.cur.clone().next() {
Some((_, c)) => {
match self.cur.clone().skip(1).next() {
- Some((_, '>')) | Some((_, '<')) => {
+ Some((_, '>')) | Some((_, '<')) | Some((_, '^')) => {
spec.fill = Some(c);
self.cur.next();
}
spec.align = AlignLeft;
} else if self.consume('>') {
spec.align = AlignRight;
+ } else if self.consume('^') {
+ spec.align = AlignCenter;
}
// Sign flags
if self.consume('+') {
#![feature(import_shadowing)]
#![deny(missing_doc)]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
#[cfg(test)] extern crate debug;
#[cfg(test)] #[phase(plugin, link)] extern crate log;
let mut root = os::getcwd();
let pat_root = Path::new(pattern).root_path();
if pat_root.is_some() {
- if check_windows_verbatim(pat_root.get_ref()) {
+ if check_windows_verbatim(pat_root.as_ref().unwrap()) {
// FIXME: How do we want to handle verbatim paths? I'm inclined to return nothing,
// since we can't very well find all UNC shares with a 1-letter server name.
return Paths {
todo: Vec::new(),
};
}
- root.push(pat_root.get_ref());
+ root.push(pat_root.as_ref().unwrap());
}
let root_len = pat_root.map_or(0u, |p| p.as_vec().len());
}
#[test]
+ #[ignore(cfg(windows))] // FIXME (#9406)
fn test_lots_of_files() {
// this is a good test because it touches lots of differently named files
glob("/*/*/*/*").skip(10000).next();
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![license = "MIT/ASL2"]
-#![feature(issue_5723_bootstrap)]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/")]
/// Some clients will have a pre-allocated vector ready to hand off in
/// a slice; others will want to create the set on the fly and hand
/// off ownership, via `Growable`.
-#[cfg(not(stage0))]
pub enum MaybeOwnedVector<'a,T:'a> {
Growable(Vec<T>),
Borrowed(&'a [T]),
}
-/// Stage0 only.
-#[cfg(stage0)]
-pub enum MaybeOwnedVector<'a,T> {
- Growable(Vec<T>),
- Borrowed(&'a [T]),
-}
-
/// Trait for moving into a `MaybeOwnedVector`
pub trait IntoMaybeOwnedVector<'a,T> {
/// Moves self into a `MaybeOwnedVector`
let mut main = Some(main);
let mut ret = None;
simple::task().run(|| {
- ret = Some(run(event_loop_factory, main.take_unwrap()));
+ ret = Some(run(event_loop_factory, main.take().unwrap()));
}).destroy();
// unsafe is ok b/c we're sure that the runtime is gone
unsafe { rt::cleanup() }
let mut sched_task = self.run(sched_task);
// Close the idle callback.
- let mut sched = sched_task.sched.take_unwrap();
+ let mut sched = sched_task.sched.take().unwrap();
sched.idle_callback.take();
// Make one go through the loop to run the close callback.
let mut stask = sched.run(sched_task);
assert!(sched.sched_task.is_none());
sched.sched_task = Some(stask);
});
- (cur.sched.take_unwrap(), cur)
+ (cur.sched.take().unwrap(), cur)
}
fn resume_task_immediately_cl(sched: Box<Scheduler>,
f: |&mut Scheduler, BlockedTask|) {
// Trickier - we need to get the scheduler task out of self
// and use it as the destination.
- let stask = self.sched_task.take_unwrap();
+ let stask = self.sched_task.take().unwrap();
// Otherwise this is the same as below.
self.switch_running_tasks_and_then(cur, stask, f)
}
sched.enqueue_task(last_task);
}
});
- (cur.sched.take_unwrap(), cur)
+ (cur.sched.take().unwrap(), cur)
}
// * Task Context Helpers
-> ! {
// Similar to deschedule running task and then, but cannot go through
// the task-blocking path. The task is already dying.
- let stask = self.sched_task.take_unwrap();
+ let stask = self.sched_task.take().unwrap();
let _cur = self.change_task_context(cur, stask, |sched, mut dead_task| {
- let coroutine = dead_task.coroutine.take_unwrap();
+ let coroutine = dead_task.coroutine.take().unwrap();
coroutine.recycle(&mut sched.stack_pool);
sched.task_state.decrement();
});
}
pub fn run_task_later(mut cur: Box<GreenTask>, next: Box<GreenTask>) {
- let mut sched = cur.sched.take_unwrap();
+ let mut sched = cur.sched.take().unwrap();
sched.enqueue_task(next);
cur.put_with_sched(sched);
}
self.yield_check_count = reset_yield_check(&mut self.rng);
// Tell the scheduler to start stealing on the next iteration
self.steal_for_yield = true;
- let stask = self.sched_task.take_unwrap();
+ let stask = self.sched_task.take().unwrap();
let cur = self.change_task_context(cur, stask, |sched, task| {
sched.enqueue_task(task);
});
pub fn sched_id(&self) -> uint { self as *const Scheduler as uint }
pub fn run_cleanup_job(&mut self) {
- let cleanup_job = self.cleanup_job.take_unwrap();
+ let cleanup_job = self.cleanup_job.take().unwrap();
cleanup_job.run(self)
}
fn run(next: Box<GreenTask>) {
let mut task = GreenTask::convert(Local::take());
- let sched = task.sched.take_unwrap();
+ let sched = task.sched.take().unwrap();
sched.run_task(task, next)
}
// requested. This is the "try/catch" block for this green task and
// is the wrapper for *all* code run in the task.
let mut start = Some(start);
- let task = task.swap().run(|| start.take_unwrap()()).destroy();
+ let task = task.swap().run(|| start.take().unwrap()()).destroy();
// Once the function has exited, it's time to run the termination
// routine. This means we need to context switch one more time but
pub fn take_unwrap_home(&mut self) -> Home {
match self.task_type {
- TypeGreen(ref mut home) => home.take_unwrap(),
+ TypeGreen(ref mut home) => home.take().unwrap(),
TypeSched => rtabort!("type error: used SchedTask as GreenTask"),
}
}
}
pub fn swap(mut self: Box<GreenTask>) -> Box<Task> {
- let mut task = self.task.take_unwrap();
+ let mut task = self.task.take().unwrap();
task.put_runtime(self);
return task;
}
}
fn terminate(mut self: Box<GreenTask>) -> ! {
- let sched = self.sched.take_unwrap();
+ let sched = self.sched.take().unwrap();
sched.terminate_current_task(self)
}
impl Runtime for GreenTask {
fn yield_now(mut self: Box<GreenTask>, cur_task: Box<Task>) {
self.put_task(cur_task);
- let sched = self.sched.take_unwrap();
+ let sched = self.sched.take().unwrap();
sched.yield_now(self);
}
fn maybe_yield(mut self: Box<GreenTask>, cur_task: Box<Task>) {
self.put_task(cur_task);
- let sched = self.sched.take_unwrap();
+ let sched = self.sched.take().unwrap();
sched.maybe_yield(self);
}
cur_task: Box<Task>,
f: |BlockedTask| -> Result<(), BlockedTask>) {
self.put_task(cur_task);
- let mut sched = self.sched.take_unwrap();
+ let mut sched = self.sched.take().unwrap();
// In order for this task to be reawoken in all possible contexts, we
// may need a handle back in to the current scheduler. When we're woken
match running_task.maybe_take_runtime::<GreenTask>() {
Some(mut running_green_task) => {
running_green_task.put_task(running_task);
- let sched = running_green_task.sched.take_unwrap();
+ let sched = running_green_task.sched.take().unwrap();
if sched.pool_id == self.pool_id {
sched.run_task(running_green_task, self);
* definitions common-to-all (held in modules named c95, c99, posix88, posix01
* and posix08) and definitions that appear only on *some* platforms (named
* 'extra'). This would be things like significant OSX foundation kit, or Windows
-* library kernel32.dll, or various fancy glibc, linux or BSD extensions.
+* library kernel32.dll, or various fancy glibc, Linux or BSD extensions.
*
* In addition to the per-platform 'extra' modules, we define a module of
* 'common BSD' libc routines that never quite made it into POSIX but show up
*/
#![allow(non_camel_case_types)]
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
#![allow(non_uppercase_statics)]
#![allow(missing_doc)]
-#![allow(uppercase_variables)]
+#![allow(non_snake_case)]
#[cfg(test)] extern crate std;
#[cfg(test)] extern crate test;
pub use consts::os::posix88::{S_IREAD, S_IRUSR, S_IRWXU, S_IWUSR};
pub use consts::os::posix88::{STDERR_FILENO, STDIN_FILENO, S_IXUSR};
pub use consts::os::posix88::{STDOUT_FILENO, W_OK, X_OK};
-pub use consts::os::bsd44::{AF_INET, AF_INET6, SOCK_STREAM, SOCK_DGRAM};
+pub use consts::os::bsd44::{AF_INET, AF_INET6, SOCK_STREAM, SOCK_DGRAM, SOCK_RAW};
pub use consts::os::bsd44::{IPPROTO_IP, IPPROTO_IPV6, IPPROTO_TCP, TCP_NODELAY};
pub use consts::os::bsd44::{SOL_SOCKET, SO_KEEPALIVE, SO_ERROR};
pub use consts::os::bsd44::{SO_REUSEADDR, SO_BROADCAST, SHUT_WR, IP_MULTICAST_LOOP};
pub use consts::os::bsd44::{IP_ADD_MEMBERSHIP, IP_DROP_MEMBERSHIP};
pub use consts::os::bsd44::{IPV6_ADD_MEMBERSHIP, IPV6_DROP_MEMBERSHIP};
-pub use consts::os::bsd44::{IP_MULTICAST_TTL, IP_TTL, SHUT_RD};
+pub use consts::os::bsd44::{IP_MULTICAST_TTL, IP_TTL, IP_HDRINCL, SHUT_RD};
+pub use consts::os::extra::{IPPROTO_RAW};
pub use funcs::c95::ctype::{isalnum, isalpha, iscntrl, isdigit};
pub use funcs::c95::ctype::{islower, isprint, ispunct, isspace};
#[cfg(unix)] pub use consts::os::posix88::{ECANCELED, SIGINT, EINPROGRESS};
#[cfg(unix)] pub use consts::os::posix88::{ENOSYS, ENOTTY, ETIMEDOUT, EMFILE};
#[cfg(unix)] pub use consts::os::posix88::{SIGTERM, SIGKILL, SIGPIPE, PROT_NONE};
-#[cfg(unix)] pub use consts::os::posix01::{SIG_IGN};
+#[cfg(unix)] pub use consts::os::posix01::{SIG_IGN, F_GETFL, F_SETFL};
#[cfg(unix)] pub use consts::os::bsd44::{AF_UNIX};
+#[cfg(unix)] pub use consts::os::extra::{O_NONBLOCK};
#[cfg(unix)] pub use types::os::common::posix01::{pthread_t, timespec, timezone};
#[cfg(unix)] pub use types::os::arch::posix88::{uid_t, gid_t};
#[cfg(unix)] pub use types::os::arch::posix01::{pthread_attr_t};
#[cfg(unix)] pub use types::os::arch::posix01::{stat, utimbuf};
+#[cfg(unix)] pub use types::os::common::bsd44::{ifaddrs};
#[cfg(unix)] pub use funcs::posix88::unistd::{sysconf, setgid, setsid, setuid, pread, pwrite};
#[cfg(unix)] pub use funcs::posix88::unistd::{getgid, getuid};
#[cfg(unix)] pub use funcs::posix88::unistd::{_PC_NAME_MAX, utime, nanosleep, pathconf, link};
#[cfg(unix)] pub use funcs::posix88::mman::{mmap, munmap, mprotect};
#[cfg(unix)] pub use funcs::posix88::dirent::{opendir, readdir_r, closedir};
#[cfg(unix)] pub use funcs::posix88::fcntl::{fcntl};
+#[cfg(unix)] pub use funcs::posix88::net::{if_nametoindex};
#[cfg(unix)] pub use funcs::posix01::stat_::{lstat};
#[cfg(unix)] pub use funcs::posix01::unistd::{fsync, ftruncate};
#[cfg(unix)] pub use funcs::posix01::unistd::{readlink, symlink};
+#[cfg(unix)] pub use funcs::bsd43::{getifaddrs, freeifaddrs};
#[cfg(windows)] pub use consts::os::c95::{WSAECONNREFUSED, WSAECONNRESET, WSAEACCES};
#[cfg(windows)] pub use consts::os::c95::{WSAEWOULDBLOCK, WSAENOTCONN, WSAECONNABORTED};
#[cfg(windows)] pub use consts::os::extra::{ERROR_PIPE_CONNECTED, WAIT_OBJECT_0};
#[cfg(windows)] pub use consts::os::extra::{ERROR_NOT_FOUND};
#[cfg(windows)] pub use consts::os::extra::{ERROR_OPERATION_ABORTED};
+#[cfg(windows)] pub use consts::os::extra::{FIONBIO};
#[cfg(windows)] pub use types::os::common::bsd44::{SOCKET};
#[cfg(windows)] pub use types::os::common::posix01::{stat, utimbuf};
#[cfg(windows)] pub use types::os::arch::extra::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES};
#[cfg(windows)] pub use types::os::arch::extra::{LARGE_INTEGER, LPVOID, LONG};
#[cfg(windows)] pub use types::os::arch::extra::{time64_t, OVERLAPPED, LPCWSTR};
#[cfg(windows)] pub use types::os::arch::extra::{LPOVERLAPPED, SIZE_T, LPDWORD};
-#[cfg(windows)] pub use types::os::arch::extra::{SECURITY_ATTRIBUTES};
+#[cfg(windows)] pub use types::os::arch::extra::{SECURITY_ATTRIBUTES, WIN32_FIND_DATAW};
#[cfg(windows)] pub use funcs::c95::string::{wcslen};
#[cfg(windows)] pub use funcs::posix88::stat_::{wstat, wutime, wchmod, wrmdir};
#[cfg(windows)] pub use funcs::bsd43::{closesocket};
#[cfg(windows)] pub use funcs::extra::kernel32::{DisconnectNamedPipe, OpenProcess};
#[cfg(windows)] pub use funcs::extra::kernel32::{MoveFileExW, VirtualProtect};
#[cfg(windows)] pub use funcs::extra::msvcrt::{get_osfhandle, open_osfhandle};
+#[cfg(windows)] pub use funcs::extra::winsock::{ioctlsocket};
#[cfg(target_os = "linux")] #[cfg(target_os = "android")]
#[cfg(target_os = "freebsd")] #[cfg(target_os = "dragonfly")]
#[cfg(target_os = "linux")] #[cfg(target_os = "android")]
pub use funcs::posix01::unistd::{fdatasync};
+#[cfg(target_os = "linux")] #[cfg(target_os = "android")]
+pub use types::os::arch::extra::{sockaddr_ll};
+#[cfg(target_os = "linux")] #[cfg(target_os = "android")]
+pub use consts::os::extra::{AF_PACKET};
#[cfg(unix, not(target_os = "freebsd"))]
pub use consts::os::extra::{MAP_STACK};
pub type sighandler_t = size_t;
}
pub mod bsd44 {
+ use types::common::c95::{c_void};
use types::os::arch::c95::{c_char, c_int, c_uint};
pub type socklen_t = u32;
pub sun_family: sa_family_t,
pub sun_path: [c_char, ..108]
}
+
+ #[repr(C)]
+ pub struct ifaddrs {
+ pub ifa_next: *mut ifaddrs,
+ pub ifa_name: *mut c_char,
+ pub ifa_flags: c_uint,
+ pub ifa_addr: *mut sockaddr,
+ pub ifa_netmask: *mut sockaddr,
+ pub ifa_ifu: *mut sockaddr, // FIXME This should be a union
+ pub ifa_data: *mut c_void
+ }
+
}
}
}
pub mod posix08 {}
pub mod bsd44 {}
- pub mod extra {}
+ pub mod extra {
+ use types::os::arch::c95::{c_ushort, c_int, c_uchar};
+ #[repr(C)]
+ pub struct sockaddr_ll {
+ pub sll_family: c_ushort,
+ pub sll_protocol: c_ushort,
+ pub sll_ifindex: c_int,
+ pub sll_hatype: c_ushort,
+ pub sll_pkttype: c_uchar,
+ pub sll_halen: c_uchar,
+ pub sll_addr: [c_uchar, ..8]
+ }
+ }
+
}
#[cfg(target_arch = "x86_64")]
pub mod bsd44 {
}
pub mod extra {
+ use types::os::arch::c95::{c_ushort, c_int, c_uchar};
+ pub struct sockaddr_ll {
+ pub sll_family: c_ushort,
+ pub sll_protocol: c_ushort,
+ pub sll_ifindex: c_int,
+ pub sll_hatype: c_ushort,
+ pub sll_pkttype: c_uchar,
+ pub sll_halen: c_uchar,
+ pub sll_addr: [c_uchar, ..8]
+ }
+
}
}
}
pub type sighandler_t = size_t;
}
pub mod bsd44 {
+ use types::common::c95::{c_void};
use types::os::arch::c95::{c_char, c_int, c_uint};
pub type socklen_t = u32;
pub sun_family: sa_family_t,
pub sun_path: [c_char, ..104]
}
+ #[repr(C)]
+ pub struct ifaddrs {
+ pub ifa_next: *mut ifaddrs,
+ pub ifa_name: *mut c_char,
+ pub ifa_flags: c_uint,
+ pub ifa_addr: *mut sockaddr,
+ pub ifa_netmask: *mut sockaddr,
+ pub ifa_dstaddr: *mut sockaddr,
+ pub ifa_data: *mut c_void
+ }
+
+
}
}
pub type LPWSAPROTOCOL_INFO = *mut WSAPROTOCOL_INFO;
pub type GROUP = c_uint;
+
+ #[repr(C)]
+ pub struct WIN32_FIND_DATAW {
+ pub dwFileAttributes: DWORD,
+ pub ftCreationTime: FILETIME,
+ pub ftLastAccessTime: FILETIME,
+ pub ftLastWriteTime: FILETIME,
+ pub nFileSizeHigh: DWORD,
+ pub nFileSizeLow: DWORD,
+ pub dwReserved0: DWORD,
+ pub dwReserved1: DWORD,
+ pub cFileName: [wchar_t, ..260], // #define MAX_PATH 260
+ pub cAlternateFileName: [wchar_t, ..14],
+ }
+
+ pub type LPWIN32_FIND_DATAW = *mut WIN32_FIND_DATAW;
}
}
}
}
pub mod bsd44 {
+ use types::common::c95::{c_void};
use types::os::arch::c95::{c_char, c_int, c_uint};
pub type socklen_t = c_int;
pub sun_family: sa_family_t,
pub sun_path: [c_char, ..104]
}
+ #[repr(C)]
+ pub struct ifaddrs {
+ pub ifa_next: *mut ifaddrs,
+ pub ifa_name: *mut c_char,
+ pub ifa_flags: c_uint,
+ pub ifa_addr: *mut sockaddr,
+ pub ifa_netmask: *mut sockaddr,
+ pub ifa_dstaddr: *mut sockaddr,
+ pub ifa_data: *mut c_void
+ }
}
}
pub static AF_INET6: c_int = 23;
pub static SOCK_STREAM: c_int = 1;
pub static SOCK_DGRAM: c_int = 2;
+ pub static SOCK_RAW: c_int = 3;
pub static IPPROTO_TCP: c_int = 6;
pub static IPPROTO_IP: c_int = 0;
pub static IPPROTO_IPV6: c_int = 41;
pub static IPV6_ADD_MEMBERSHIP: c_int = 5;
pub static IPV6_DROP_MEMBERSHIP: c_int = 6;
pub static IP_TTL: c_int = 4;
+ pub static IP_HDRINCL: c_int = 2;
pub static TCP_NODELAY: c_int = 0x0001;
pub static SOL_SOCKET: c_int = 0xffff;
pub static SO_REUSEADDR: c_int = 4;
pub static SO_ERROR: c_int = 0x1007;
+ pub static IFF_LOOPBACK: c_int = 4;
+
pub static SHUT_RD: c_int = 0;
pub static SHUT_WR: c_int = 1;
pub static SHUT_RDWR: c_int = 2;
}
pub mod extra {
- use types::os::arch::c95::c_int;
+ use types::os::arch::c95::{c_int, c_long};
use types::os::arch::extra::{WORD, DWORD, BOOL, HANDLE};
pub static TRUE : BOOL = 1;
pub static PIPE_ACCEPT_REMOTE_CLIENTS: DWORD = 0x00000000;
pub static PIPE_REJECT_REMOTE_CLIENTS: DWORD = 0x00000008;
pub static PIPE_UNLIMITED_INSTANCES: DWORD = 255;
+
+ pub static IPPROTO_RAW: c_int = 255;
+
+ pub static FIONBIO: c_long = -0x7FFB9982;
}
pub mod sysconf {
}
pub mod posix01 {
use types::os::arch::c95::{c_int, size_t};
+ pub static F_DUPFD : c_int = 0;
+ pub static F_GETFD : c_int = 1;
+ pub static F_SETFD : c_int = 2;
+ pub static F_GETFL : c_int = 3;
+ pub static F_SETFL : c_int = 4;
+
pub static SIGTRAP : c_int = 5;
pub static SIGPIPE: c_int = 13;
pub static SIG_IGN: size_t = 1;
pub static MADV_UNMERGEABLE : c_int = 13;
pub static MADV_HWPOISON : c_int = 100;
+ pub static IFF_LOOPBACK: c_int = 0x8;
+
pub static AF_UNIX: c_int = 1;
pub static AF_INET: c_int = 2;
pub static AF_INET6: c_int = 10;
pub static SOCK_STREAM: c_int = 1;
pub static SOCK_DGRAM: c_int = 2;
+ pub static SOCK_RAW: c_int = 3;
pub static IPPROTO_TCP: c_int = 6;
pub static IPPROTO_IP: c_int = 0;
pub static IPPROTO_IPV6: c_int = 41;
pub static IP_MULTICAST_TTL: c_int = 33;
pub static IP_MULTICAST_LOOP: c_int = 34;
pub static IP_TTL: c_int = 2;
+ pub static IP_HDRINCL: c_int = 3;
pub static IP_ADD_MEMBERSHIP: c_int = 35;
pub static IP_DROP_MEMBERSHIP: c_int = 36;
pub static IPV6_ADD_MEMBERSHIP: c_int = 20;
pub static AF_INET6: c_int = 10;
pub static SOCK_STREAM: c_int = 2;
pub static SOCK_DGRAM: c_int = 1;
+ pub static SOCK_RAW: c_int = 3;
pub static IPPROTO_TCP: c_int = 6;
pub static IPPROTO_IP: c_int = 0;
pub static IPPROTO_IPV6: c_int = 41;
pub static IP_MULTICAST_TTL: c_int = 33;
pub static IP_MULTICAST_LOOP: c_int = 34;
pub static IP_TTL: c_int = 2;
+ pub static IP_HDRINCL: c_int = 3;
pub static IP_ADD_MEMBERSHIP: c_int = 35;
pub static IP_DROP_MEMBERSHIP: c_int = 36;
pub static IPV6_ADD_MEMBERSHIP: c_int = 20;
pub mod extra {
use types::os::arch::c95::c_int;
+ pub static AF_PACKET : c_int = 17;
+ pub static IPPROTO_RAW : c_int = 255;
+
pub static O_RSYNC : c_int = 1052672;
pub static O_DSYNC : c_int = 4096;
+ pub static O_NONBLOCK : c_int = 2048;
pub static O_SYNC : c_int = 1052672;
pub static PROT_GROWSDOWN : c_int = 0x010000000;
pub mod extra {
use types::os::arch::c95::c_int;
+ pub static AF_PACKET : c_int = 17;
+ pub static IPPROTO_RAW : c_int = 255;
+
pub static O_RSYNC : c_int = 16400;
pub static O_DSYNC : c_int = 16;
+ pub static O_NONBLOCK : c_int = 128;
pub static O_SYNC : c_int = 16400;
pub static PROT_GROWSDOWN : c_int = 0x01000000;
pub mod posix01 {
use types::os::arch::c95::{c_int, size_t};
+ pub static F_DUPFD : c_int = 0;
+ pub static F_GETFD : c_int = 1;
+ pub static F_SETFD : c_int = 2;
+ pub static F_GETFL : c_int = 3;
+ pub static F_SETFL : c_int = 4;
+
pub static SIGTRAP : c_int = 5;
pub static SIGPIPE: c_int = 13;
pub static SIG_IGN: size_t = 1;
pub static AF_UNIX: c_int = 1;
pub static SOCK_STREAM: c_int = 1;
pub static SOCK_DGRAM: c_int = 2;
+ pub static SOCK_RAW: c_int = 3;
pub static IPPROTO_TCP: c_int = 6;
pub static IPPROTO_IP: c_int = 0;
pub static IPPROTO_IPV6: c_int = 41;
pub static IP_MULTICAST_TTL: c_int = 10;
pub static IP_MULTICAST_LOOP: c_int = 11;
pub static IP_TTL: c_int = 4;
+ pub static IP_HDRINCL: c_int = 2;
pub static IP_ADD_MEMBERSHIP: c_int = 12;
pub static IP_DROP_MEMBERSHIP: c_int = 13;
pub static IPV6_ADD_MEMBERSHIP: c_int = 12;
pub static SO_REUSEADDR: c_int = 0x0004;
pub static SO_ERROR: c_int = 0x1007;
+ pub static IFF_LOOPBACK: c_int = 0x8;
+
pub static SHUT_RD: c_int = 0;
pub static SHUT_WR: c_int = 1;
pub static SHUT_RDWR: c_int = 2;
use types::os::arch::c95::c_int;
pub static O_SYNC : c_int = 128;
+ pub static O_NONBLOCK : c_int = 4;
pub static CTL_KERN: c_int = 1;
pub static KERN_PROC: c_int = 14;
#[cfg(target_os = "freebsd")]
pub static MAP_STACK : c_int = 0x0400;
pub static MAP_NOSYNC : c_int = 0x0800;
pub static MAP_NOCORE : c_int = 0x020000;
+
+ pub static IPPROTO_RAW : c_int = 255;
}
pub mod sysconf {
use types::os::arch::c95::c_int;
pub mod posix01 {
use types::os::arch::c95::{c_int, size_t};
+ pub static F_DUPFD : c_int = 0;
+ pub static F_GETFD : c_int = 1;
+ pub static F_SETFD : c_int = 2;
+ pub static F_GETFL : c_int = 3;
+ pub static F_SETFL : c_int = 4;
+
pub static SIGTRAP : c_int = 5;
pub static SIGPIPE: c_int = 13;
pub static SIG_IGN: size_t = 1;
pub static AF_INET6: c_int = 30;
pub static SOCK_STREAM: c_int = 1;
pub static SOCK_DGRAM: c_int = 2;
+ pub static SOCK_RAW: c_int = 3;
pub static IPPROTO_TCP: c_int = 6;
pub static IPPROTO_IP: c_int = 0;
pub static IPPROTO_IPV6: c_int = 41;
pub static IP_MULTICAST_TTL: c_int = 10;
pub static IP_MULTICAST_LOOP: c_int = 11;
pub static IP_TTL: c_int = 4;
+ pub static IP_HDRINCL: c_int = 2;
pub static IP_ADD_MEMBERSHIP: c_int = 12;
pub static IP_DROP_MEMBERSHIP: c_int = 13;
pub static IPV6_ADD_MEMBERSHIP: c_int = 12;
pub static SO_REUSEADDR: c_int = 0x0004;
pub static SO_ERROR: c_int = 0x1007;
+ pub static IFF_LOOPBACK: c_int = 0x8;
+
pub static SHUT_RD: c_int = 0;
pub static SHUT_WR: c_int = 1;
pub static SHUT_RDWR: c_int = 2;
pub static O_DSYNC : c_int = 4194304;
pub static O_SYNC : c_int = 128;
+ pub static O_NONBLOCK : c_int = 4;
pub static F_FULLFSYNC : c_int = 51;
pub static MAP_COPY : c_int = 0x0002;
pub static MAP_NOCACHE : c_int = 0x0400;
pub static MAP_JIT : c_int = 0x0800;
pub static MAP_STACK : c_int = 0;
+
+ pub static IPPROTO_RAW : c_int = 255;
}
pub mod sysconf {
use types::os::arch::c95::c_int;
pub fn shm_unlink(name: *const c_char) -> c_int;
}
}
+
+ pub mod net {
+ use types::os::arch::c95::{c_char, c_uint};
+
+ extern {
+ pub fn if_nametoindex(ifname: *const c_char) -> c_uint;
+ }
+ }
+
}
#[cfg(target_os = "linux")]
pub fn glob(pattern: *const c_char,
flags: c_int,
errfunc: ::Nullable<extern "C" fn(epath: *const c_char,
- errno: c_int) -> int>,
+ errno: c_int) -> c_int>,
pglob: *mut glob_t);
pub fn globfree(pglob: *mut glob_t);
}
pub mod mman {
}
+
+ pub mod net {
+ }
}
#[cfg(not(windows))]
pub mod bsd43 {
use types::common::c95::{c_void};
- use types::os::common::bsd44::{socklen_t, sockaddr};
+ use types::os::common::bsd44::{socklen_t, sockaddr, ifaddrs};
use types::os::arch::c95::{c_int, size_t};
use types::os::arch::posix88::ssize_t;
pub fn sendto(socket: c_int, buf: *const c_void, len: size_t,
flags: c_int, addr: *const sockaddr,
addrlen: socklen_t) -> ssize_t;
+ pub fn getifaddrs(ifap: *mut *mut ifaddrs) -> c_int;
+ pub fn freeifaddrs(ifa: *mut ifaddrs);
pub fn shutdown(socket: c_int, how: c_int) -> c_int;
}
}
#[cfg(target_os = "dragonfly")]
pub mod bsd44 {
use types::common::c95::{c_void};
- use types::os::arch::c95::{c_char, c_uchar, c_int, c_uint, size_t};
+ use types::os::arch::c95::{c_char, c_uchar, c_int, c_uint, c_ulong, size_t};
extern {
+ pub fn ioctl(d: c_int, request: c_ulong, ...) -> c_int;
pub fn sysctl(name: *mut c_int,
namelen: c_uint,
oldp: *mut c_void,
extern {
pub fn getdtablesize() -> c_int;
+ pub fn ioctl(d: c_int, request: c_int, ...) -> c_int;
pub fn madvise(addr: *mut c_void, len: size_t, advice: c_int)
-> c_int;
pub fn mincore(addr: *mut c_void, len: size_t, vec: *mut c_uchar)
LPMEMORY_BASIC_INFORMATION,
LPSYSTEM_INFO, HANDLE, LPHANDLE,
LARGE_INTEGER, PLARGE_INTEGER,
- LPFILETIME};
+ LPFILETIME, LPWIN32_FIND_DATAW};
extern "system" {
pub fn GetEnvironmentVariableW(n: LPCWSTR,
-> DWORD;
pub fn SetCurrentDirectoryW(lpPathName: LPCWSTR) -> BOOL;
pub fn GetLastError() -> DWORD;
- pub fn FindFirstFileW(fileName: LPCWSTR, findFileData: HANDLE)
+ pub fn FindFirstFileW(fileName: LPCWSTR, findFileData: LPWIN32_FIND_DATAW)
-> HANDLE;
- pub fn FindNextFileW(findFile: HANDLE, findFileData: HANDLE)
+ pub fn FindNextFileW(findFile: HANDLE, findFileData: LPWIN32_FIND_DATAW)
-> BOOL;
pub fn FindClose(findFile: HANDLE) -> BOOL;
pub fn DuplicateHandle(hSourceProcessHandle: HANDLE,
flags: c_int) -> c_int;
}
}
+
+ pub mod winsock {
+ use types::os::arch::c95::{c_int, c_long, c_ulong};
+ use types::os::common::bsd44::SOCKET;
+
+ extern "system" {
+ pub fn ioctlsocket(s: SOCKET, cmd: c_long, argp: *mut c_ulong) -> c_int;
+ }
+ }
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use regex::Regex;
use std::ascii::AsciiExt;
use std::cmp;
}).map(|p| cmp::min(p, ::MAX_LOG_LEVEL))
}
-/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=1")
+/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=1/foo")
/// and return a vector with log directives.
///
/// Valid log levels are 0-255, with the most likely ones being 1-4 (defined in
/// std::). Also supports string log levels of error, warn, info, and debug
-pub fn parse_logging_spec(spec: &str) -> Vec<LogDirective> {
+pub fn parse_logging_spec(spec: &str) -> (Vec<LogDirective>, Option<Regex>) {
let mut dirs = Vec::new();
- for s in spec.split(',') {
+
+ let mut parts = spec.split('/');
+ let mods = parts.next();
+ let filter = parts.next();
+ if parts.next().is_some() {
+ println!("warning: invalid logging spec '{}', \
+ ignoring it (too many '/'s)", spec);
+ return (dirs, None);
+ }
+ mods.map(|m| { for s in m.split(',') {
if s.len() == 0 { continue }
let mut parts = s.split('=');
let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) {
name: name.map(|s| s.to_string()),
level: log_level,
});
- }
- return dirs;
+ }});
+
+ let filter = filter.map_or(None, |filter| {
+ match Regex::new(filter) {
+ Ok(re) => Some(re),
+ Err(e) => {
+ println!("warning: invalid regex filter - {}", e);
+ None
+ }
+ }
+ });
+
+ return (dirs, filter);
}
#[cfg(test)]
#[test]
fn parse_logging_spec_valid() {
- let dirs = parse_logging_spec("crate1::mod1=1,crate1::mod2,crate2=4");
+ let (dirs, filter) = parse_logging_spec("crate1::mod1=1,crate1::mod2,crate2=4");
let dirs = dirs.as_slice();
assert_eq!(dirs.len(), 3);
assert_eq!(dirs[0].name, Some("crate1::mod1".to_string()));
assert_eq!(dirs[2].name, Some("crate2".to_string()));
assert_eq!(dirs[2].level, 4);
+ assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_invalid_crate() {
// test parse_logging_spec with multiple = in specification
- let dirs = parse_logging_spec("crate1::mod1=1=2,crate2=4");
+ let (dirs, filter) = parse_logging_spec("crate1::mod1=1=2,crate2=4");
let dirs = dirs.as_slice();
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, 4);
+ assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_invalid_log_level() {
// test parse_logging_spec with 'noNumber' as log level
- let dirs = parse_logging_spec("crate1::mod1=noNumber,crate2=4");
+ let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=4");
let dirs = dirs.as_slice();
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, 4);
+ assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_string_log_level() {
// test parse_logging_spec with 'warn' as log level
- let dirs = parse_logging_spec("crate1::mod1=wrong,crate2=warn");
+ let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn");
let dirs = dirs.as_slice();
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, ::WARN);
+ assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_empty_log_level() {
// test parse_logging_spec with '' as log level
- let dirs = parse_logging_spec("crate1::mod1=wrong,crate2=");
+ let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=");
let dirs = dirs.as_slice();
assert_eq!(dirs.len(), 1);
assert_eq!(dirs[0].name, Some("crate2".to_string()));
assert_eq!(dirs[0].level, ::MAX_LOG_LEVEL);
+ assert!(filter.is_none());
}
#[test]
fn parse_logging_spec_global() {
// test parse_logging_spec with no crate
- let dirs = parse_logging_spec("warn,crate2=4");
+ let (dirs, filter) = parse_logging_spec("warn,crate2=4");
let dirs = dirs.as_slice();
assert_eq!(dirs.len(), 2);
assert_eq!(dirs[0].name, None);
assert_eq!(dirs[0].level, 2);
assert_eq!(dirs[1].name, Some("crate2".to_string()));
assert_eq!(dirs[1].level, 4);
+ assert!(filter.is_none());
+ }
+
+ #[test]
+ fn parse_logging_spec_valid_filter() {
+ let (dirs, filter) = parse_logging_spec("crate1::mod1=1,crate1::mod2,crate2=4/abc");
+ let dirs = dirs.as_slice();
+ assert_eq!(dirs.len(), 3);
+ assert_eq!(dirs[0].name, Some("crate1::mod1".to_string()));
+ assert_eq!(dirs[0].level, 1);
+
+ assert_eq!(dirs[1].name, Some("crate1::mod2".to_string()));
+ assert_eq!(dirs[1].level, ::MAX_LOG_LEVEL);
+
+ assert_eq!(dirs[2].name, Some("crate2".to_string()));
+ assert_eq!(dirs[2].level, 4);
+ assert!(filter.is_some() && filter.unwrap().to_string().as_slice() == "abc");
+ }
+
+ #[test]
+ fn parse_logging_spec_invalid_crate_filter() {
+ let (dirs, filter) = parse_logging_spec("crate1::mod1=1=2,crate2=4/a.c");
+ let dirs = dirs.as_slice();
+ assert_eq!(dirs.len(), 1);
+ assert_eq!(dirs[0].name, Some("crate2".to_string()));
+ assert_eq!(dirs[0].level, 4);
+ assert!(filter.is_some() && filter.unwrap().to_string().as_slice() == "a.c");
+ }
+
+ #[test]
+ fn parse_logging_spec_empty_with_filter() {
+ let (dirs, filter) = parse_logging_spec("crate1/a*c");
+ let dirs = dirs.as_slice();
+ assert_eq!(dirs.len(), 1);
+ assert_eq!(dirs[0].name, Some("crate1".to_string()));
+ assert_eq!(dirs[0].level, ::MAX_LOG_LEVEL);
+ assert!(filter.is_some() && filter.unwrap().to_string().as_slice() == "a*c");
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/*!
-
-Utilities for program-wide and customizable logging
-
-## Example
-
-```
-#![feature(phase)]
-#[phase(plugin, link)] extern crate log;
-
-fn main() {
- debug!("this is a debug {}", "message");
- error!("this is printed by default");
-
- if log_enabled!(log::INFO) {
- let x = 3i * 4i; // expensive computation
- info!("the answer was: {}", x);
- }
-}
-```
-
-## Logging Macros
-
-There are five macros that the logging subsystem uses:
-
-* `log!(level, ...)` - the generic logging macro, takes a level as a u32 and any
- related `format!` arguments
-* `debug!(...)` - a macro hard-wired to the log level of `DEBUG`
-* `info!(...)` - a macro hard-wired to the log level of `INFO`
-* `warn!(...)` - a macro hard-wired to the log level of `WARN`
-* `error!(...)` - a macro hard-wired to the log level of `ERROR`
-
-All of these macros use the same style of syntax as the `format!` syntax
-extension. Details about the syntax can be found in the documentation of
-`std::fmt` along with the Rust tutorial/manual.
-
-If you want to check at runtime if a given logging level is enabled (e.g. if the
-information you would want to log is expensive to produce), you can use the
-following macro:
-
-* `log_enabled!(level)` - returns true if logging of the given level is enabled
-
-## Enabling logging
-
-Log levels are controlled on a per-module basis, and by default all logging is
-disabled except for `error!` (a log level of 1). Logging is controlled via the
-`RUST_LOG` environment variable. The value of this environment variable is a
-comma-separated list of logging directives. A logging directive is of the form:
-
-```text
-path::to::module=log_level
-```
-
-The path to the module is rooted in the name of the crate it was compiled for,
-so if your program is contained in a file `hello.rs`, for example, to turn on
-logging for this file you would use a value of `RUST_LOG=hello`.
-Furthermore, this path is a prefix-search, so all modules nested in the
-specified module will also have logging enabled.
-
-The actual `log_level` is optional to specify. If omitted, all logging will be
-enabled. If specified, the it must be either a numeric in the range of 1-255, or
-it must be one of the strings `debug`, `error`, `info`, or `warn`. If a numeric
-is specified, then all logging less than or equal to that numeral is enabled.
-For example, if logging level 3 is active, error, warn, and info logs will be
-printed, but debug will be omitted.
-
-As the log level for a module is optional, the module to enable logging for is
-also optional. If only a `log_level` is provided, then the global log level for
-all modules is set to this value.
-
-Some examples of valid values of `RUST_LOG` are:
-
-```text
-hello // turns on all logging for the 'hello' module
-info // turns on all info logging
-hello=debug // turns on debug logging for 'hello'
-hello=3 // turns on info logging for 'hello'
-hello,std::option // turns on hello, and std's option logging
-error,hello=warn // turn on global error logging and also warn for hello
-```
-
-## Performance and Side Effects
-
-Each of these macros will expand to code similar to:
-
-```rust,ignore
-if log_level <= my_module_log_level() {
- ::log::log(log_level, format!(...));
-}
-```
-
-What this means is that each of these macros are very cheap at runtime if
-they're turned off (just a load and an integer comparison). This also means that
-if logging is disabled, none of the components of the log will be executed.
-
-*/
+//! Utilities for program-wide and customizable logging
+//!
+//! ## Example
+//!
+//! ```
+//! #![feature(phase)]
+//! #[phase(plugin, link)] extern crate log;
+//!
+//! fn main() {
+//! debug!("this is a debug {}", "message");
+//! error!("this is printed by default");
+//!
+//! if log_enabled!(log::INFO) {
+//! let x = 3i * 4i; // expensive computation
+//! info!("the answer was: {}", x);
+//! }
+//! }
+//! ```
+//!
+//! Assumes the binary is `main`:
+//!
+//! ```{.bash}
+//! $ RUST_LOG=error ./main
+//! ERROR:main: this is printed by default
+//! ```
+//!
+//! ```{.bash}
+//! $ RUST_LOG=info ./main
+//! ERROR:main: this is printed by default
+//! INFO:main: the answer was: 12
+//! ```
+//!
+//! ```{.bash}
+//! $ RUST_LOG=debug ./main
+//! DEBUG:main: this is a debug message
+//! ERROR:main: this is printed by default
+//! INFO:main: the answer was: 12
+//! ```
+//!
+//! You can also set the log level on a per module basis:
+//!
+//! ```{.bash}
+//! $ RUST_LOG=main=info ./main
+//! ERROR:main: this is printed by default
+//! INFO:main: the answer was: 12
+//! ```
+//!
+//! And enable all logging:
+//!
+//! ```{.bash}
+//! $ RUST_LOG=main ./main
+//! DEBUG:main: this is a debug message
+//! ERROR:main: this is printed by default
+//! INFO:main: the answer was: 12
+//! ```
+//!
+//!
+//! ## Logging Macros
+//!
+//! There are five macros that the logging subsystem uses:
+//!
+//! * `log!(level, ...)` - the generic logging macro, takes a level as a u32 and any
+//! related `format!` arguments
+//! * `debug!(...)` - a macro hard-wired to the log level of `DEBUG`
+//! * `info!(...)` - a macro hard-wired to the log level of `INFO`
+//! * `warn!(...)` - a macro hard-wired to the log level of `WARN`
+//! * `error!(...)` - a macro hard-wired to the log level of `ERROR`
+//!
+//! All of these macros use the same style of syntax as the `format!` syntax
+//! extension. Details about the syntax can be found in the documentation of
+//! `std::fmt` along with the Rust tutorial/manual.
+//!
+//! If you want to check at runtime if a given logging level is enabled (e.g. if the
+//! information you would want to log is expensive to produce), you can use the
+//! following macro:
+//!
+//! * `log_enabled!(level)` - returns true if logging of the given level is enabled
+//!
+//! ## Enabling logging
+//!
+//! Log levels are controlled on a per-module basis, and by default all logging is
+//! disabled except for `error!` (a log level of 1). Logging is controlled via the
+//! `RUST_LOG` environment variable. The value of this environment variable is a
+//! comma-separated list of logging directives. A logging directive is of the form:
+//!
+//! ```text
+//! path::to::module=log_level
+//! ```
+//!
+//! The path to the module is rooted in the name of the crate it was compiled for,
+//! so if your program is contained in a file `hello.rs`, for example, to turn on
+//! logging for this file you would use a value of `RUST_LOG=hello`.
+//! Furthermore, this path is a prefix-search, so all modules nested in the
+//! specified module will also have logging enabled.
+//!
+//! The actual `log_level` is optional to specify. If omitted, all logging will be
+//! enabled. If specified, the it must be either a numeric in the range of 1-255, or
+//! it must be one of the strings `debug`, `error`, `info`, or `warn`. If a numeric
+//! is specified, then all logging less than or equal to that numeral is enabled.
+//! For example, if logging level 3 is active, error, warn, and info logs will be
+//! printed, but debug will be omitted.
+//!
+//! As the log level for a module is optional, the module to enable logging for is
+//! also optional. If only a `log_level` is provided, then the global log level for
+//! all modules is set to this value.
+//!
+//! Some examples of valid values of `RUST_LOG` are:
+//!
+//! * `hello` turns on all logging for the 'hello' module
+//! * `info` turns on all info logging
+//! * `hello=debug` turns on debug logging for 'hello'
+//! * `hello=3` turns on info logging for 'hello'
+//! * `hello,std::option` turns on hello, and std's option logging
+//! * `error,hello=warn` turn on global error logging and also warn for hello
+//!
+//! ## Filtering results
+//!
+//! A RUST_LOG directive may include a regex filter. The syntax is to append `/`
+//! followed by a regex. Each message is checked against the regex, and is only
+//! logged if it matches. Note that the matching is done after formatting the log
+//! string but before adding any logging meta-data. There is a single filter for all
+//! modules.
+//!
+//! Some examples:
+//!
+//! * `hello/foo` turns on all logging for the 'hello' module where the log message
+//! includes 'foo'.
+//! * `info/f.o` turns on all info logging where the log message includes 'foo',
+//! 'f1o', 'fao', etc.
+//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the the log
+//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc.
+//! * `error,hello=warn/[0-9] scopes` turn on global error logging and also warn for
+//! hello. In both cases the log message must include a single digit number
+//! followed by 'scopes'
+//!
+//! ## Performance and Side Effects
+//!
+//! Each of these macros will expand to code similar to:
+//!
+//! ```rust,ignore
+//! if log_level <= my_module_log_level() {
+//! ::log::log(log_level, format!(...));
+//! }
+//! ```
+//!
+//! What this means is that each of these macros are very cheap at runtime if
+//! they're turned off (just a load and an integer comparison). This also means that
+//! if logging is disabled, none of the components of the log will be executed.
#![crate_name = "log"]
#![experimental]
#![feature(macro_rules)]
#![deny(missing_doc)]
+extern crate regex;
+
+use regex::Regex;
use std::fmt;
use std::io::LineBufferedWriter;
use std::io;
static mut DIRECTIVES: *const Vec<directive::LogDirective> =
0 as *const Vec<directive::LogDirective>;
+/// Optional regex filter.
+static mut FILTER: *const Regex = 0 as *const _;
+
/// Debug log level
pub static DEBUG: u32 = 4;
/// Info log level
/// invoked through the logging family of macros.
#[doc(hidden)]
pub fn log(level: u32, loc: &'static LogLocation, args: &fmt::Arguments) {
+ // Test the literal string from args against the current filter, if there
+ // is one.
+ match unsafe { FILTER.as_ref() } {
+ Some(filter) if filter.is_match(args.to_string().as_slice()) => return,
+ _ => {}
+ }
+
// Completely remove the local logger from TLS in case anyone attempts to
// frob the slot while we're doing the logging. This will destroy any logger
// set during logging.
/// This is not threadsafe at all, so initialization os performed through a
/// `Once` primitive (and this function is called from that primitive).
fn init() {
- let mut directives = match os::getenv("RUST_LOG") {
+ let (mut directives, filter) = match os::getenv("RUST_LOG") {
Some(spec) => directive::parse_logging_spec(spec.as_slice()),
- None => Vec::new(),
+ None => (Vec::new(), None),
};
// Sort the provided directives by length of their name, this allows a
unsafe {
LOG_LEVEL = max_level;
+ assert!(FILTER.is_null());
+ match filter {
+ Some(f) => FILTER = mem::transmute(box f),
+ None => {}
+ }
+
assert!(DIRECTIVES.is_null());
DIRECTIVES = mem::transmute(box directives);
- // Schedule the cleanup for this global for when the runtime exits.
+ // Schedule the cleanup for the globals for when the runtime exits.
rt::at_exit(proc() {
assert!(!DIRECTIVES.is_null());
let _directives: Box<Vec<directive::LogDirective>> =
mem::transmute(DIRECTIVES);
DIRECTIVES = 0 as *const Vec<directive::LogDirective>;
+
+ if !FILTER.is_null() {
+ let _filter: Box<Regex> = mem::transmute(FILTER);
+ FILTER = 0 as *const _;
+ }
});
}
}
/// #![feature(phase)]
/// #[phase(plugin, link)] extern crate log;
///
-/// # fn main() {
-/// log!(log::DEBUG, "this is a debug message");
-/// log!(log::WARN, "this is a warning {}", "message");
-/// log!(6, "this is a custom logging level: {level}", level=6u);
-/// # }
+/// fn main() {
+/// log!(log::WARN, "this is a warning {}", "message");
+/// log!(log::DEBUG, "this is a debug message");
+/// log!(6, "this is a custom logging level: {level}", level=6u);
+/// }
+/// ```
+///
+/// Assumes the binary is `main`:
+///
+/// ```{.bash}
+/// $ RUST_LOG=warn ./main
+/// WARN:main: this is a warning message
+/// ```
+///
+/// ```{.bash}
+/// $ RUST_LOG=debug ./main
+/// DEBUG:main: this is a debug message
+/// WARN:main: this is a warning message
+/// ```
+///
+/// ```{.bash}
+/// $ RUST_LOG=6 ./main
+/// DEBUG:main: this is a debug message
+/// WARN:main: this is a warning message
+/// 6:main: this is a custom logging level: 6
/// ```
#[macro_export]
macro_rules! log(
/// #![feature(phase)]
/// #[phase(plugin, link)] extern crate log;
///
-/// # fn main() {
-/// # let error = 3u;
-/// error!("the build has failed with error code: {}", error);
-/// # }
+/// fn main() {
+/// let error = 3u;
+/// error!("the build has failed with error code: {}", error);
+/// }
+/// ```
+///
+/// Assumes the binary is `main`:
+///
+/// ```{.bash}
+/// $ RUST_LOG=error ./main
+/// ERROR:main: the build has failed with error code: 3
/// ```
+///
#[macro_export]
macro_rules! error(
($($arg:tt)*) => (log!(::log::ERROR, $($arg)*))
/// #![feature(phase)]
/// #[phase(plugin, link)] extern crate log;
///
-/// # fn main() {
-/// # let code = 3u;
-/// warn!("you may like to know that a process exited with: {}", code);
-/// # }
+/// fn main() {
+/// let code = 3u;
+/// warn!("you may like to know that a process exited with: {}", code);
+/// }
+/// ```
+///
+/// Assumes the binary is `main`:
+///
+/// ```{.bash}
+/// $ RUST_LOG=warn ./main
+/// WARN:main: you may like to know that a process exited with: 3
/// ```
#[macro_export]
macro_rules! warn(
/// #![feature(phase)]
/// #[phase(plugin, link)] extern crate log;
///
-/// # fn main() {
-/// # let ret = 3i;
-/// info!("this function is about to return: {}", ret);
-/// # }
+/// fn main() {
+/// let ret = 3i;
+/// info!("this function is about to return: {}", ret);
+/// }
+/// ```
+///
+/// Assumes the binary is `main`:
+///
+/// ```{.bash}
+/// $ RUST_LOG=info ./main
+/// INFO:main: this function is about to return: 3
/// ```
#[macro_export]
macro_rules! info(
/// #![feature(phase)]
/// #[phase(plugin, link)] extern crate log;
///
-/// # fn main() {
-/// debug!("x = {x}, y = {y}", x=10i, y=20i);
-/// # }
+/// fn main() {
+/// debug!("x = {x}, y = {y}", x=10i, y=20i);
+/// }
+/// ```
+///
+/// Assumes the binary is `main`:
+///
+/// ```{.bash}
+/// $ RUST_LOG=debug ./main
+/// DEBUG:main: x = 10, y = 20
/// ```
#[macro_export]
macro_rules! debug(
/// #![feature(phase)]
/// #[phase(plugin, link)] extern crate log;
///
-/// # fn main() {
-/// # struct Point { x: int, y: int }
-/// # fn some_expensive_computation() -> Point { Point { x: 1, y: 2 } }
-/// if log_enabled!(log::DEBUG) {
-/// let x = some_expensive_computation();
-/// debug!("x.x = {}, x.y = {}", x.x, x.y);
+/// struct Point { x: int, y: int }
+/// fn some_expensive_computation() -> Point { Point { x: 1, y: 2 } }
+///
+/// fn main() {
+/// if log_enabled!(log::DEBUG) {
+/// let x = some_expensive_computation();
+/// debug!("x.x = {}, x.y = {}", x.x, x.y);
+/// }
/// }
-/// # }
+/// ```
+///
+/// Assumes the binary is `main`:
+///
+/// ```{.bash}
+/// $ RUST_LOG=error ./main
+/// ```
+///
+/// ```{.bash}
+/// $ RUST_LOG=debug ./main
+/// DEBUG:main: x.x = 1, x.y = 2
/// ```
#[macro_export]
macro_rules! log_enabled(
fn fstat(&mut self) -> IoResult<rtio::FileStat> {
let mut stat: libc::stat = unsafe { mem::zeroed() };
- match retry(|| unsafe { libc::fstat(self.fd(), &mut stat) }) {
+ match unsafe { libc::fstat(self.fd(), &mut stat) } {
0 => Ok(mkstat(&stat)),
_ => Err(super::last_error()),
}
}
pub fn mkdir(p: &CString, mode: uint) -> IoResult<()> {
- super::mkerr_libc(retry(|| unsafe {
- libc::mkdir(p.as_ptr(), mode as libc::mode_t)
- }))
+ super::mkerr_libc(unsafe { libc::mkdir(p.as_ptr(), mode as libc::mode_t) })
}
pub fn readdir(p: &CString) -> IoResult<Vec<CString>> {
}
pub fn unlink(p: &CString) -> IoResult<()> {
- super::mkerr_libc(retry(|| unsafe { libc::unlink(p.as_ptr()) }))
+ super::mkerr_libc(unsafe { libc::unlink(p.as_ptr()) })
}
pub fn rename(old: &CString, new: &CString) -> IoResult<()> {
- super::mkerr_libc(retry(|| unsafe {
- libc::rename(old.as_ptr(), new.as_ptr())
- }))
+ super::mkerr_libc(unsafe { libc::rename(old.as_ptr(), new.as_ptr()) })
}
pub fn chmod(p: &CString, mode: uint) -> IoResult<()> {
}
pub fn rmdir(p: &CString) -> IoResult<()> {
- super::mkerr_libc(retry(|| unsafe {
- libc::rmdir(p.as_ptr())
- }))
+ super::mkerr_libc(unsafe { libc::rmdir(p.as_ptr()) })
}
pub fn chown(p: &CString, uid: int, gid: int) -> IoResult<()> {
len = 1024; // FIXME: read PATH_MAX from C ffi?
}
let mut buf: Vec<u8> = Vec::with_capacity(len as uint);
- match retry(|| unsafe {
+ match unsafe {
libc::readlink(p, buf.as_ptr() as *mut libc::c_char,
len as libc::size_t) as libc::c_int
- }) {
+ } {
-1 => Err(super::last_error()),
n => {
assert!(n > 0);
}
pub fn symlink(src: &CString, dst: &CString) -> IoResult<()> {
- super::mkerr_libc(retry(|| unsafe {
- libc::symlink(src.as_ptr(), dst.as_ptr())
- }))
+ super::mkerr_libc(unsafe { libc::symlink(src.as_ptr(), dst.as_ptr()) })
}
pub fn link(src: &CString, dst: &CString) -> IoResult<()> {
- super::mkerr_libc(retry(|| unsafe {
- libc::link(src.as_ptr(), dst.as_ptr())
- }))
+ super::mkerr_libc(unsafe { libc::link(src.as_ptr(), dst.as_ptr()) })
}
fn mkstat(stat: &libc::stat) -> rtio::FileStat {
pub fn stat(p: &CString) -> IoResult<rtio::FileStat> {
let mut stat: libc::stat = unsafe { mem::zeroed() };
- match retry(|| unsafe { libc::stat(p.as_ptr(), &mut stat) }) {
+ match unsafe { libc::stat(p.as_ptr(), &mut stat) } {
0 => Ok(mkstat(&stat)),
_ => Err(super::last_error()),
}
pub fn lstat(p: &CString) -> IoResult<rtio::FileStat> {
let mut stat: libc::stat = unsafe { mem::zeroed() };
- match retry(|| unsafe { libc::lstat(p.as_ptr(), &mut stat) }) {
+ match unsafe { libc::lstat(p.as_ptr(), &mut stat) } {
0 => Ok(mkstat(&stat)),
_ => Err(super::last_error()),
}
actime: (atime / 1000) as libc::time_t,
modtime: (mtime / 1000) as libc::time_t,
};
- super::mkerr_libc(retry(|| unsafe {
- libc::utime(p.as_ptr(), &buf)
- }))
+ super::mkerr_libc(unsafe { libc::utime(p.as_ptr(), &buf) })
}
#[cfg(test)]
//! Blocking Windows-based file I/O
use alloc::arc::Arc;
-use libc::{c_int, c_void};
-use libc;
+use libc::{mod, c_int};
use std::c_str::CString;
use std::mem;
use std::os::windows::fill_utf16_buf_and_decode;
use std::rt::rtio;
use std::rt::rtio::{IoResult, IoError};
use std::str;
-use std::vec;
pub type fd_t = libc::c_int;
}
pub fn readdir(p: &CString) -> IoResult<Vec<CString>> {
- use std::rt::libc_heap::malloc_raw;
-
fn prune(root: &CString, dirs: Vec<Path>) -> Vec<CString> {
let root = unsafe { CString::new(root.as_ptr(), false) };
let root = Path::new(root);
}).map(|path| root.join(path).to_c_str()).collect()
}
- extern {
- fn rust_list_dir_wfd_size() -> libc::size_t;
- fn rust_list_dir_wfd_fp_buf(wfd: *mut libc::c_void) -> *const u16;
- }
let star = Path::new(unsafe {
CString::new(p.as_ptr(), false)
}).join("*");
let path = try!(to_utf16(&star.to_c_str()));
unsafe {
- let wfd_ptr = malloc_raw(rust_list_dir_wfd_size() as uint);
- let find_handle = libc::FindFirstFileW(path.as_ptr(),
- wfd_ptr as libc::HANDLE);
+ let mut wfd = mem::zeroed();
+ let find_handle = libc::FindFirstFileW(path.as_ptr(), &mut wfd);
if find_handle != libc::INVALID_HANDLE_VALUE {
- let mut paths = vec!();
- let mut more_files = 1 as libc::c_int;
+ let mut paths = vec![];
+ let mut more_files = 1 as libc::BOOL;
while more_files != 0 {
- let fp_buf = rust_list_dir_wfd_fp_buf(wfd_ptr as *mut c_void);
- if fp_buf as uint == 0 {
- fail!("os::list_dir() failure: got null ptr from wfd");
- } else {
- let fp_vec = vec::raw::from_buf(fp_buf, libc::wcslen(fp_buf) as uint);
- let fp_trimmed = str::truncate_utf16_at_nul(fp_vec.as_slice());
- let fp_str = String::from_utf16(fp_trimmed)
- .expect("rust_list_dir_wfd_fp_buf returned invalid UTF-16");
- paths.push(Path::new(fp_str));
+ {
+ let filename = str::truncate_utf16_at_nul(wfd.cFileName);
+ match String::from_utf16(filename) {
+ Some(filename) => paths.push(Path::new(filename)),
+ None => {
+ assert!(libc::FindClose(find_handle) != 0);
+ return Err(IoError {
+ code: super::c::ERROR_ILLEGAL_CHARACTER as uint,
+ extra: 0,
+ detail: Some(format!("path was not valid UTF-16: {}", filename)),
+ })
+ }, // FIXME #12056: Convert the UCS-2 to invalid utf-8 instead of erroring
+ }
}
- more_files = libc::FindNextFileW(find_handle,
- wfd_ptr as libc::HANDLE);
+ more_files = libc::FindNextFileW(find_handle, &mut wfd);
}
assert!(libc::FindClose(find_handle) != 0);
- libc::free(wfd_ptr as *mut c_void);
Ok(prune(p, paths))
} else {
Err(super::last_error())
//! play. The only dependencies of these modules are the normal system libraries
//! that you would find on the respective platform.
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
use libc::c_int;
use libc;
//
// It turns out that there's this nifty MSG_DONTWAIT flag which can be passed to
// send/recv, but the niftiness wears off once you realize it only works well on
-// linux [1] [2]. This means that it's pretty easy to get a nonblocking
-// operation on linux (no flag fiddling, no affecting other objects), but not on
+// Linux [1] [2]. This means that it's pretty easy to get a nonblocking
+// operation on Linux (no flag fiddling, no affecting other objects), but not on
// other platforms.
//
// To work around this constraint on other platforms, we end up using the
// operations performed in the lock are *nonblocking* to avoid holding the mutex
// forever.
//
-// So, in summary, linux uses MSG_DONTWAIT and doesn't need mutexes, everyone
+// So, in summary, Linux uses MSG_DONTWAIT and doesn't need mutexes, everyone
// else uses O_NONBLOCK and mutexes with some trickery to make sure blocking
// reads/writes are still blocking.
//
// wait for the socket to become readable again.
let _guard = lock();
match retry(|| read(deadline.is_some())) {
- -1 if util::wouldblock() => { assert!(deadline.is_some()); }
+ -1 if util::wouldblock() => {}
-1 => return Err(os::last_error()),
n => { ret = n; break }
}
// using the original server pipe.
let handle = self.listener.handle;
- // If we've had an artifical call to close_accept, be sure to never
+ // If we've had an artificial call to close_accept, be sure to never
// proceed in accepting new clients in the future
if self.inner.closed.load(atomic::SeqCst) { return Err(util::eof()) }
#[cfg(unix)] use libc::EINVAL as ERROR;
#[cfg(windows)] use libc::ERROR_NOTHING_TO_TERMINATE as ERROR;
- // On linux (and possibly other unices), a process that has exited will
+ // On Linux (and possibly other unices), a process that has exited will
// continue to accept signals because it is "defunct". The delivery of
// signals will only fail once the child has been reaped. For this
// reason, if the process hasn't exited yet, then we attempt to collect
#[cfg(unix)]
fn translate_status(status: c_int) -> rtio::ProcessExit {
- #![allow(non_snake_case_functions)]
+ #![allow(non_snake_case)]
#[cfg(target_os = "linux")]
#[cfg(target_os = "android")]
mod imp {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Timers for non-linux/non-windows OSes
+//! Timers for non-Linux/non-Windows OSes
//!
//! This module implements timers with a worker thread, select(), and a lot of
//! witchcraft that turns out to be horribly inaccurate timers. The unfortunate
//! part is that I'm at a loss of what else to do one these OSes. This is also
-//! why linux has a specialized timerfd implementation and windows has its own
+//! why Linux has a specialized timerfd implementation and windows has its own
//! implementation (they're more accurate than this one).
//!
//! The basic idea is that there is a worker thread that's communicated to via a
let mut timer = match active.shift() {
Some(timer) => timer, None => return
};
- let mut cb = timer.cb.take_unwrap();
+ let mut cb = timer.cb.take().unwrap();
cb.call();
if timer.repeat {
timer.cb = Some(cb);
//! This module contains the implementation of a Windows specific console TTY.
//! Also converts between UTF-16 and UTF-8. Windows has very poor support for
//! UTF-8 and some functions will fail. In particular ReadFile and ReadConsole
-//! will fail when the codepage is set to UTF-8 and a unicode character is
+//! will fail when the codepage is set to UTF-8 and a Unicode character is
//! entered.
//!
//! FIXME
unsafe {
rt::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
}
- exit_code = Some(run(main.take_unwrap()));
+ exit_code = Some(run(main.take().unwrap()));
}).destroy());
unsafe { rt::cleanup(); }
// If the exit code wasn't set, then the task block must have failed.
let mut f = Some(f);
let mut task = task;
task.put_runtime(ops);
- drop(task.run(|| { f.take_unwrap()() }).destroy());
+ drop(task.run(|| { f.take().unwrap()() }).destroy());
drop(token);
})
}
pub static ZERO_BIG_DIGIT: BigDigit = 0;
static ZERO_VEC: [BigDigit, ..1] = [ZERO_BIG_DIGIT];
+#[allow(non_snake_case)]
pub mod BigDigit {
use super::BigDigit;
use super::DoubleBigDigit;
pub fn bits(&self) -> uint {
if self.is_zero() { return 0; }
let zeros = self.data.last().unwrap().leading_zeros();
- return self.data.len()*BigDigit::bits - (zeros as uint);
+ return self.data.len()*BigDigit::bits - zeros;
}
}
//! Complex numbers.
use std::fmt;
-use std::num::{Zero,One,ToStrRadix};
+use std::num::{Zero, One, ToStrRadix};
// FIXME #1284: handle complex NaN & infinity etc. This
// probably doesn't map to C's _Complex correctly.
/// A complex number in Cartesian form.
-#[deriving(PartialEq,Clone)]
+#[deriving(PartialEq, Clone, Hash)]
pub struct Complex<T> {
/// Real portion of the complex number
pub re: T,
Complex { re: re, im: im }
}
- /**
- Returns the square of the norm (since `T` doesn't necessarily
- have a sqrt function), i.e. `re^2 + im^2`.
- */
+ /// Returns the square of the norm (since `T` doesn't necessarily
+ /// have a sqrt function), i.e. `re^2 + im^2`.
#[inline]
pub fn norm_sqr(&self) -> T {
self.re * self.re + self.im * self.im
#![allow(non_uppercase_statics)]
use super::{Complex64, Complex};
- use std::num::{Zero,One,Float};
+ use std::num::{Zero, One, Float};
+ use std::hash::hash;
pub static _0_0i : Complex64 = Complex { re: 0.0, im: 0.0 };
pub static _1_0i : Complex64 = Complex { re: 1.0, im: 0.0 };
test(-_neg1_1i, "1-1i".to_string());
test(_05_05i, "0.5+0.5i".to_string());
}
+
+ #[test]
+ fn test_hash() {
+ let a = Complex::new(0i32, 0i32);
+ let b = Complex::new(1i32, 0i32);
+ let c = Complex::new(0i32, 1i32);
+ assert!(hash(&a) != hash(&b));
+ assert!(hash(&b) != hash(&c));
+ assert!(hash(&c) != hash(&a));
+ }
}
use bigint::{BigInt, BigUint, Sign, Plus, Minus};
/// Represents the ratio between 2 numbers.
-#[deriving(Clone)]
+#[deriving(Clone, Hash)]
#[allow(missing_doc)]
pub struct Ratio<T> {
numer: T,
}
/// Rounds to the nearest integer. Rounds half-way cases away from zero.
- ///
- /// Note: This function is currently broken and always rounds away from zero.
#[inline]
pub fn round(&self) -> Ratio<T> {
- // FIXME(#15826)
if *self < Zero::zero() {
- Ratio::from_integer((self.numer - self.denom + One::one()) / self.denom)
+ // a/b - 1/2 = (2*a - b)/(2*b)
+ Ratio::from_integer((self.numer + self.numer - self.denom) / (self.denom + self.denom))
} else {
- Ratio::from_integer((self.numer + self.denom - One::one()) / self.denom)
+ // a/b + 1/2 = (2*a + b)/(2*b)
+ Ratio::from_integer((self.numer + self.numer + self.denom) / (self.denom + self.denom))
}
}
Ratio::from_integer(self.numer / self.denom)
}
- ///Returns the fractional part of a number.
+ /// Returns the fractional part of a number.
#[inline]
pub fn fract(&self) -> Ratio<T> {
Ratio::new_raw(self.numer % self.denom, self.denom.clone())
}
}
-// a/b + c/d = (a*d + b*c)/(b*d
+// a/b + c/d = (a*d + b*c)/(b*d)
arith_impl!(impl Add, add)
// a/b - c/d = (a*d - b*c)/(b*d)
use super::{Ratio, Rational, BigRational};
use std::num::{Zero, One, FromStrRadix, FromPrimitive, ToStrRadix};
use std::from_str::FromStr;
+ use std::hash::hash;
use std::num;
pub static _0 : Rational = Ratio { numer: 0, denom: 1};
pub static _2: Rational = Ratio { numer: 2, denom: 1};
pub static _1_2: Rational = Ratio { numer: 1, denom: 2};
pub static _3_2: Rational = Ratio { numer: 3, denom: 2};
- pub static _neg1_2: Rational = Ratio { numer: -1, denom: 2};
+ pub static _neg1_2: Rational = Ratio { numer: -1, denom: 2};
+ pub static _1_3: Rational = Ratio { numer: 1, denom: 3};
+ pub static _neg1_3: Rational = Ratio { numer: -1, denom: 3};
+ pub static _2_3: Rational = Ratio { numer: 2, denom: 3};
+ pub static _neg2_3: Rational = Ratio { numer: -2, denom: 3};
pub fn to_big(n: Rational) -> BigRational {
Ratio::new(
#[test]
fn test_round() {
+ assert_eq!(_1_3.ceil(), _1);
+ assert_eq!(_1_3.floor(), _0);
+ assert_eq!(_1_3.round(), _0);
+ assert_eq!(_1_3.trunc(), _0);
+
+ assert_eq!(_neg1_3.ceil(), _0);
+ assert_eq!(_neg1_3.floor(), -_1);
+ assert_eq!(_neg1_3.round(), _0);
+ assert_eq!(_neg1_3.trunc(), _0);
+
+ assert_eq!(_2_3.ceil(), _1);
+ assert_eq!(_2_3.floor(), _0);
+ assert_eq!(_2_3.round(), _1);
+ assert_eq!(_2_3.trunc(), _0);
+
+ assert_eq!(_neg2_3.ceil(), _0);
+ assert_eq!(_neg2_3.floor(), -_1);
+ assert_eq!(_neg2_3.round(), -_1);
+ assert_eq!(_neg2_3.trunc(), _0);
+
assert_eq!(_1_2.ceil(), _1);
assert_eq!(_1_2.floor(), _0);
assert_eq!(_1_2.round(), _1);
assert!(! _neg1_2.is_positive());
assert!(! _1_2.is_negative());
}
+
+ #[test]
+ fn test_hash() {
+ assert!(hash(&_0) != hash(&_1));
+ assert!(hash(&_0) != hash(&_3_2));
+ }
}
pub item: T,
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct WeightedChoice<'a, T> {
- items: &'a mut [Weighted<T>],
- weight_range: Range<uint>
-}
-
/// A distribution that selects from a finite collection of weighted items.
///
/// Each item has an associated weight that influences how likely it
/// println!("{}", wc.ind_sample(&mut rng));
/// }
/// ```
-#[cfg(not(stage0))]
pub struct WeightedChoice<'a, T:'a> {
items: &'a mut [Weighted<T>],
weight_range: Range<uint>
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct Generator<'a, T, R> {
- rng: &'a mut R,
-}
-
/// Iterator which will generate a stream of random items.
///
/// This iterator is created via the `gen_iter` method on `Rng`.
-#[cfg(not(stage0))]
pub struct Generator<'a, T, R:'a> {
rng: &'a mut R,
}
}
}
-/// Note: stage0-specific version.
-#[cfg(stage0)]
-pub struct AsciiGenerator<'a, R> {
- rng: &'a mut R,
-}
-
/// Iterator which will continuously generate random ascii characters.
///
/// This iterator is created via the `gen_ascii_chars` method on `Rng`.
-#[cfg(not(stage0))]
pub struct AsciiGenerator<'a, R:'a> {
rng: &'a mut R,
}
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/master/",
html_playground_url = "http://play.rust-lang.org/")]
-#![feature(macro_rules, phase, issue_5723_bootstrap)]
+#![feature(macro_rules, phase)]
#![allow(missing_doc)]
extern crate serialize;
pub type EncodeResult = io::IoResult<()>;
// rbml writing
- #[cfg(stage0)]
- pub struct Encoder<'a, W> {
- pub writer: &'a mut W,
- size_positions: Vec<uint>,
- }
-
- // rbml writing
- #[cfg(not(stage0))]
pub struct Encoder<'a, W:'a> {
pub writer: &'a mut W,
size_positions: Vec<uint>,
#[cfg(test)]
mod bench {
- #![allow(non_snake_case_functions)]
+ #![allow(non_snake_case)]
use test::Bencher;
use super::reader;
#![deny(missing_doc)]
#[cfg(test)]
-extern crate stdtest = "test";
+extern crate "test" as stdtest;
#[cfg(test)]
extern crate rand;
#[cfg(test)]
extern crate regex;
-// unicode tables for character classes are defined in libunicode
+// Unicode tables for character classes are defined in libunicode
extern crate unicode;
pub use parse::Error;
}
}
- // Parses a unicode character class name, either of the form \pF where
- // F is a one letter unicode class name or of the form \p{name} where
- // name is the unicode class name.
+ // Parses a Unicode character class name, either of the form \pF where
+ // F is a one letter Unicode class name or of the form \p{name} where
+ // name is the Unicode class name.
// Assumes that \p or \P has been read (and 'p' or 'P' is the current
// character).
fn parse_unicode_name(&mut self) -> Result<Ast, Error> {
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
use std::rand::{Rng, task_rng};
use stdtest::Bencher;
}
/// Sets the previous and current character given any arbitrary byte
- /// index (at a unicode codepoint boundary).
+ /// index (at a Unicode codepoint boundary).
#[inline]
pub fn set(&mut self, ic: uint) -> uint {
self.prev = None;
// LLVM to optimize these function calls to themselves!
#![no_builtins]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
#[cfg(test)] extern crate native;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate debug;
use super::rpath;
use super::rpath::RPathConfig;
use super::svh::Svh;
+use super::write::{OutputTypeBitcode, OutputTypeExe, OutputTypeObject};
use driver::driver::{CrateTranslation, OutputFilenames, Input, FileInput};
use driver::config::NoDebugInfo;
use driver::session::Session;
use driver::config;
-use llvm;
-use llvm::ModuleRef;
use metadata::common::LinkMeta;
use metadata::{encoder, cstore, filesearch, csearch, loader, creader};
use middle::trans::context::CrateContext;
use util::ppaux;
use util::sha2::{Digest, Sha256};
-use std::c_str::{ToCStr, CString};
use std::char;
-use std::collections::HashSet;
use std::io::{fs, TempDir, Command};
use std::io;
use std::mem;
-use std::ptr;
use std::str;
use std::string::String;
use flate;
RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET + 8;
-#[deriving(Clone, PartialEq, PartialOrd, Ord, Eq)]
-pub enum OutputType {
- OutputTypeBitcode,
- OutputTypeAssembly,
- OutputTypeLlvmAssembly,
- OutputTypeObject,
- OutputTypeExe,
-}
-
-pub fn llvm_err(sess: &Session, msg: String) -> ! {
- unsafe {
- let cstr = llvm::LLVMRustGetLastError();
- if cstr == ptr::null() {
- sess.fatal(msg.as_slice());
- } else {
- let err = CString::new(cstr, true);
- let err = String::from_utf8_lossy(err.as_bytes());
- sess.fatal(format!("{}: {}",
- msg.as_slice(),
- err.as_slice()).as_slice());
- }
- }
-}
-
-pub fn write_output_file(
- sess: &Session,
- target: llvm::TargetMachineRef,
- pm: llvm::PassManagerRef,
- m: ModuleRef,
- output: &Path,
- file_type: llvm::FileType) {
- unsafe {
- output.with_c_str(|output| {
- let result = llvm::LLVMRustWriteOutputFile(
- target, pm, m, output, file_type);
- if !result {
- llvm_err(sess, "could not write output".to_string());
- }
- })
- }
-}
-
-pub mod write {
-
- use super::super::lto;
- use super::{write_output_file, OutputType};
- use super::{OutputTypeAssembly, OutputTypeBitcode};
- use super::{OutputTypeExe, OutputTypeLlvmAssembly};
- use super::{OutputTypeObject};
- use driver::driver::{CrateTranslation, OutputFilenames};
- use driver::config::NoDebugInfo;
- use driver::session::Session;
- use driver::config;
- use llvm;
- use llvm::{ModuleRef, TargetMachineRef, PassManagerRef};
- use util::common::time;
- use syntax::abi;
-
- use std::c_str::ToCStr;
- use std::io::{Command};
- use libc::{c_uint, c_int};
- use std::str;
-
- // On android, we by default compile for armv7 processors. This enables
- // things like double word CAS instructions (rather than emulating them)
- // which are *far* more efficient. This is obviously undesirable in some
- // cases, so if any sort of target feature is specified we don't append v7
- // to the feature list.
- //
- // On iOS only armv7 and newer are supported. So it is useful to
- // get all hardware potential via VFP3 (hardware floating point)
- // and NEON (SIMD) instructions supported by LLVM.
- // Note that without those flags various linking errors might
- // arise as some of intrinsics are converted into function calls
- // and nobody provides implementations those functions
- fn target_feature<'a>(sess: &'a Session) -> &'a str {
- match sess.targ_cfg.os {
- abi::OsAndroid => {
- if "" == sess.opts.cg.target_feature.as_slice() {
- "+v7"
- } else {
- sess.opts.cg.target_feature.as_slice()
- }
- },
- abi::OsiOS if sess.targ_cfg.arch == abi::Arm => {
- "+v7,+thumb2,+vfp3,+neon"
- },
- _ => sess.opts.cg.target_feature.as_slice()
- }
- }
-
- pub fn run_passes(sess: &Session,
- trans: &CrateTranslation,
- output_types: &[OutputType],
- output: &OutputFilenames) {
- let llmod = trans.module;
- let llcx = trans.context;
- unsafe {
- configure_llvm(sess);
-
- if sess.opts.cg.save_temps {
- output.with_extension("no-opt.bc").with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
-
- let opt_level = match sess.opts.optimize {
- config::No => llvm::CodeGenLevelNone,
- config::Less => llvm::CodeGenLevelLess,
- config::Default => llvm::CodeGenLevelDefault,
- config::Aggressive => llvm::CodeGenLevelAggressive,
- };
- let use_softfp = sess.opts.cg.soft_float;
-
- // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a parameter.
- // FIXME: #11954: mac64 unwinding may not work with fp elim
- let no_fp_elim = (sess.opts.debuginfo != NoDebugInfo) ||
- (sess.targ_cfg.os == abi::OsMacos &&
- sess.targ_cfg.arch == abi::X86_64);
-
- // OSX has -dead_strip, which doesn't rely on ffunction_sections
- // FIXME(#13846) this should be enabled for windows
- let ffunction_sections = sess.targ_cfg.os != abi::OsMacos &&
- sess.targ_cfg.os != abi::OsWindows;
- let fdata_sections = ffunction_sections;
-
- let reloc_model = match sess.opts.cg.relocation_model.as_slice() {
- "pic" => llvm::RelocPIC,
- "static" => llvm::RelocStatic,
- "default" => llvm::RelocDefault,
- "dynamic-no-pic" => llvm::RelocDynamicNoPic,
- _ => {
- sess.err(format!("{} is not a valid relocation mode",
- sess.opts
- .cg
- .relocation_model).as_slice());
- sess.abort_if_errors();
- return;
- }
- };
-
- let code_model = match sess.opts.cg.code_model.as_slice() {
- "default" => llvm::CodeModelDefault,
- "small" => llvm::CodeModelSmall,
- "kernel" => llvm::CodeModelKernel,
- "medium" => llvm::CodeModelMedium,
- "large" => llvm::CodeModelLarge,
- _ => {
- sess.err(format!("{} is not a valid code model",
- sess.opts
- .cg
- .code_model).as_slice());
- sess.abort_if_errors();
- return;
- }
- };
-
- let tm = sess.targ_cfg
- .target_strs
- .target_triple
- .as_slice()
- .with_c_str(|t| {
- sess.opts.cg.target_cpu.as_slice().with_c_str(|cpu| {
- target_feature(sess).with_c_str(|features| {
- llvm::LLVMRustCreateTargetMachine(
- t, cpu, features,
- code_model,
- reloc_model,
- opt_level,
- true /* EnableSegstk */,
- use_softfp,
- no_fp_elim,
- ffunction_sections,
- fdata_sections,
- )
- })
- })
- });
-
- // Create the two optimizing pass managers. These mirror what clang
- // does, and are by populated by LLVM's default PassManagerBuilder.
- // Each manager has a different set of passes, but they also share
- // some common passes.
- let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
- let mpm = llvm::LLVMCreatePassManager();
-
- // If we're verifying or linting, add them to the function pass
- // manager.
- let addpass = |pass: &str| {
- pass.as_slice().with_c_str(|s| llvm::LLVMRustAddPass(fpm, s))
- };
- if !sess.no_verify() { assert!(addpass("verify")); }
-
- if !sess.opts.cg.no_prepopulate_passes {
- llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
- llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
- populate_llvm_passes(fpm, mpm, llmod, opt_level,
- trans.no_builtins);
- }
-
- for pass in sess.opts.cg.passes.iter() {
- pass.as_slice().with_c_str(|s| {
- if !llvm::LLVMRustAddPass(mpm, s) {
- sess.warn(format!("unknown pass {}, ignoring",
- *pass).as_slice());
- }
- })
- }
-
- // Finally, run the actual optimization passes
- time(sess.time_passes(), "llvm function passes", (), |()|
- llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
- time(sess.time_passes(), "llvm module passes", (), |()|
- llvm::LLVMRunPassManager(mpm, llmod));
-
- // Deallocate managers that we're now done with
- llvm::LLVMDisposePassManager(fpm);
- llvm::LLVMDisposePassManager(mpm);
-
- // Emit the bytecode if we're either saving our temporaries or
- // emitting an rlib. Whenever an rlib is created, the bytecode is
- // inserted into the archive in order to allow LTO against it.
- if sess.opts.cg.save_temps ||
- (sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
- sess.opts.output_types.contains(&OutputTypeExe)) {
- output.temp_path(OutputTypeBitcode).with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
-
- if sess.lto() {
- time(sess.time_passes(), "all lto passes", (), |()|
- lto::run(sess, llmod, tm, trans.reachable.as_slice()));
-
- if sess.opts.cg.save_temps {
- output.with_extension("lto.bc").with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
- }
-
- // A codegen-specific pass manager is used to generate object
- // files for an LLVM module.
- //
- // Apparently each of these pass managers is a one-shot kind of
- // thing, so we create a new one for each type of output. The
- // pass manager passed to the closure should be ensured to not
- // escape the closure itself, and the manager should only be
- // used once.
- fn with_codegen(tm: TargetMachineRef, llmod: ModuleRef,
- no_builtins: bool, f: |PassManagerRef|) {
- unsafe {
- let cpm = llvm::LLVMCreatePassManager();
- llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
- llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
- f(cpm);
- llvm::LLVMDisposePassManager(cpm);
- }
- }
-
- let mut object_file = None;
- let mut needs_metadata = false;
- for output_type in output_types.iter() {
- let path = output.path(*output_type);
- match *output_type {
- OutputTypeBitcode => {
- path.with_c_str(|buf| {
- llvm::LLVMWriteBitcodeToFile(llmod, buf);
- })
- }
- OutputTypeLlvmAssembly => {
- path.with_c_str(|output| {
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- llvm::LLVMRustPrintModule(cpm, llmod, output);
- })
- })
- }
- OutputTypeAssembly => {
- // If we're not using the LLVM assembler, this function
- // could be invoked specially with output_type_assembly,
- // so in this case we still want the metadata object
- // file.
- let ty = OutputTypeAssembly;
- let path = if sess.opts.output_types.contains(&ty) {
- path
- } else {
- needs_metadata = true;
- output.temp_path(OutputTypeAssembly)
- };
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- write_output_file(sess, tm, cpm, llmod, &path,
- llvm::AssemblyFile);
- });
- }
- OutputTypeObject => {
- object_file = Some(path);
- }
- OutputTypeExe => {
- object_file = Some(output.temp_path(OutputTypeObject));
- needs_metadata = true;
- }
- }
- }
-
- time(sess.time_passes(), "codegen passes", (), |()| {
- match object_file {
- Some(ref path) => {
- with_codegen(tm, llmod, trans.no_builtins, |cpm| {
- write_output_file(sess, tm, cpm, llmod, path,
- llvm::ObjectFile);
- });
- }
- None => {}
- }
- if needs_metadata {
- with_codegen(tm, trans.metadata_module,
- trans.no_builtins, |cpm| {
- let out = output.temp_path(OutputTypeObject)
- .with_extension("metadata.o");
- write_output_file(sess, tm, cpm,
- trans.metadata_module, &out,
- llvm::ObjectFile);
- })
- }
- });
-
- llvm::LLVMRustDisposeTargetMachine(tm);
- llvm::LLVMDisposeModule(trans.metadata_module);
- llvm::LLVMDisposeModule(llmod);
- llvm::LLVMContextDispose(llcx);
- if sess.time_llvm_passes() { llvm::LLVMRustPrintPassTimings(); }
- }
- }
-
- pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
- let pname = super::get_cc_prog(sess);
- let mut cmd = Command::new(pname.as_slice());
-
- cmd.arg("-c").arg("-o").arg(outputs.path(OutputTypeObject))
- .arg(outputs.temp_path(OutputTypeAssembly));
- debug!("{}", &cmd);
-
- match cmd.output() {
- Ok(prog) => {
- if !prog.status.success() {
- sess.err(format!("linking with `{}` failed: {}",
- pname,
- prog.status).as_slice());
- sess.note(format!("{}", &cmd).as_slice());
- let mut note = prog.error.clone();
- note.push_all(prog.output.as_slice());
- sess.note(str::from_utf8(note.as_slice()).unwrap());
- sess.abort_if_errors();
- }
- },
- Err(e) => {
- sess.err(format!("could not exec the linker `{}`: {}",
- pname,
- e).as_slice());
- sess.abort_if_errors();
- }
- }
- }
-
- unsafe fn configure_llvm(sess: &Session) {
- use std::sync::{Once, ONCE_INIT};
- static mut INIT: Once = ONCE_INIT;
-
- // Copy what clang does by turning on loop vectorization at O2 and
- // slp vectorization at O3
- let vectorize_loop = !sess.opts.cg.no_vectorize_loops &&
- (sess.opts.optimize == config::Default ||
- sess.opts.optimize == config::Aggressive);
- let vectorize_slp = !sess.opts.cg.no_vectorize_slp &&
- sess.opts.optimize == config::Aggressive;
-
- let mut llvm_c_strs = Vec::new();
- let mut llvm_args = Vec::new();
- {
- let add = |arg: &str| {
- let s = arg.to_c_str();
- llvm_args.push(s.as_ptr());
- llvm_c_strs.push(s);
- };
- add("rustc"); // fake program name
- if vectorize_loop { add("-vectorize-loops"); }
- if vectorize_slp { add("-vectorize-slp"); }
- if sess.time_llvm_passes() { add("-time-passes"); }
- if sess.print_llvm_passes() { add("-debug-pass=Structure"); }
-
- for arg in sess.opts.cg.llvm_args.iter() {
- add((*arg).as_slice());
- }
- }
-
- INIT.doit(|| {
- llvm::LLVMInitializePasses();
-
- // Only initialize the platforms supported by Rust here, because
- // using --llvm-root will have multiple platforms that rustllvm
- // doesn't actually link to and it's pointless to put target info
- // into the registry that Rust cannot generate machine code for.
- llvm::LLVMInitializeX86TargetInfo();
- llvm::LLVMInitializeX86Target();
- llvm::LLVMInitializeX86TargetMC();
- llvm::LLVMInitializeX86AsmPrinter();
- llvm::LLVMInitializeX86AsmParser();
-
- llvm::LLVMInitializeARMTargetInfo();
- llvm::LLVMInitializeARMTarget();
- llvm::LLVMInitializeARMTargetMC();
- llvm::LLVMInitializeARMAsmPrinter();
- llvm::LLVMInitializeARMAsmParser();
-
- llvm::LLVMInitializeMipsTargetInfo();
- llvm::LLVMInitializeMipsTarget();
- llvm::LLVMInitializeMipsTargetMC();
- llvm::LLVMInitializeMipsAsmPrinter();
- llvm::LLVMInitializeMipsAsmParser();
-
- llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
- llvm_args.as_ptr());
- });
- }
-
- unsafe fn populate_llvm_passes(fpm: llvm::PassManagerRef,
- mpm: llvm::PassManagerRef,
- llmod: ModuleRef,
- opt: llvm::CodeGenOptLevel,
- no_builtins: bool) {
- // Create the PassManagerBuilder for LLVM. We configure it with
- // reasonable defaults and prepare it to actually populate the pass
- // manager.
- let builder = llvm::LLVMPassManagerBuilderCreate();
- match opt {
- llvm::CodeGenLevelNone => {
- // Don't add lifetime intrinsics at O0
- llvm::LLVMRustAddAlwaysInlinePass(builder, false);
- }
- llvm::CodeGenLevelLess => {
- llvm::LLVMRustAddAlwaysInlinePass(builder, true);
- }
- // numeric values copied from clang
- llvm::CodeGenLevelDefault => {
- llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
- 225);
- }
- llvm::CodeGenLevelAggressive => {
- llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
- 275);
- }
- }
- llvm::LLVMPassManagerBuilderSetOptLevel(builder, opt as c_uint);
- llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, no_builtins);
-
- // Use the builder to populate the function/module pass managers.
- llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
- llvm::LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
- llvm::LLVMPassManagerBuilderDispose(builder);
-
- match opt {
- llvm::CodeGenLevelDefault | llvm::CodeGenLevelAggressive => {
- "mergefunc".with_c_str(|s| llvm::LLVMRustAddPass(mpm, s));
- }
- _ => {}
- };
- }
-}
-
-
/*
* Name mangling and its relationship to metadata. This is complex. Read
* carefully.
}
fn get_symbol_hash(ccx: &CrateContext, t: ty::t) -> String {
- match ccx.type_hashcodes.borrow().find(&t) {
+ match ccx.type_hashcodes().borrow().find(&t) {
Some(h) => return h.to_string(),
None => {}
}
- let mut symbol_hasher = ccx.symbol_hasher.borrow_mut();
- let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, &ccx.link_meta);
- ccx.type_hashcodes.borrow_mut().insert(t, hash.clone());
+ let mut symbol_hasher = ccx.symbol_hasher().borrow_mut();
+ let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, ccx.link_meta());
+ ccx.type_hashcodes().borrow_mut().insert(t, hash.clone());
hash
}
}
}
-fn remove(sess: &Session, path: &Path) {
+pub fn remove(sess: &Session, path: &Path) {
match fs::unlink(path) {
Ok(..) => {}
Err(e) => {
fn archive_search_paths(sess: &Session) -> Vec<Path> {
let mut rustpath = filesearch::rust_path();
rustpath.push(sess.target_filesearch().get_lib_path());
- // FIXME: Addl lib search paths are an unordered HashSet?
- // Shouldn't this search be done in some order?
- let addl_lib_paths: HashSet<Path> = sess.opts.addl_lib_search_paths.borrow().clone();
- let mut search: Vec<Path> = addl_lib_paths.move_iter().collect();
+ let mut search: Vec<Path> = sess.opts.addl_lib_search_paths.borrow().clone();
search.push_all(rustpath.as_slice());
return search;
}
// contain the metadata in a separate file. We use a temp directory
// here so concurrent builds in the same directory don't try to use
// the same filename for metadata (stomping over one another)
- let tmpdir = TempDir::new("rustc").expect("needs a temp dir");
+ let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
let metadata = tmpdir.path().join(METADATA_FILENAME);
match fs::File::create(&metadata).write(trans.metadata
.as_slice()) {
ab.add_file(&metadata).unwrap();
remove(sess, &metadata);
- // For LTO purposes, the bytecode of this library is also inserted
- // into the archive.
- //
- // Note that we make sure that the bytecode filename in the archive
- // is never exactly 16 bytes long by adding a 16 byte extension to
- // it. This is to work around a bug in LLDB that would cause it to
- // crash if the name of a file in an archive was exactly 16 bytes.
- let bc_filename = obj_filename.with_extension("bc");
- let bc_deflated_filename = obj_filename.with_extension("bytecode.deflate");
-
- let bc_data = match fs::File::open(&bc_filename).read_to_end() {
- Ok(buffer) => buffer,
- Err(e) => sess.fatal(format!("failed to read bytecode: {}",
- e).as_slice())
- };
+ if sess.opts.cg.codegen_units == 1 {
+ // For LTO purposes, the bytecode of this library is also
+ // inserted into the archive. We currently do this only when
+ // codegen_units == 1, so we don't have to deal with multiple
+ // bitcode files per crate.
+ //
+ // Note that we make sure that the bytecode filename in the
+ // archive is never exactly 16 bytes long by adding a 16 byte
+ // extension to it. This is to work around a bug in LLDB that
+ // would cause it to crash if the name of a file in an archive
+ // was exactly 16 bytes.
+ let bc_filename = obj_filename.with_extension("bc");
+ let bc_deflated_filename = obj_filename.with_extension("bytecode.deflate");
+
+ let bc_data = match fs::File::open(&bc_filename).read_to_end() {
+ Ok(buffer) => buffer,
+ Err(e) => sess.fatal(format!("failed to read bytecode: {}",
+ e).as_slice())
+ };
- let bc_data_deflated = match flate::deflate_bytes(bc_data.as_slice()) {
- Some(compressed) => compressed,
- None => sess.fatal(format!("failed to compress bytecode from {}",
- bc_filename.display()).as_slice())
- };
+ let bc_data_deflated = match flate::deflate_bytes(bc_data.as_slice()) {
+ Some(compressed) => compressed,
+ None => sess.fatal(format!("failed to compress bytecode from {}",
+ bc_filename.display()).as_slice())
+ };
- let mut bc_file_deflated = match fs::File::create(&bc_deflated_filename) {
- Ok(file) => file,
- Err(e) => {
- sess.fatal(format!("failed to create compressed bytecode \
- file: {}", e).as_slice())
- }
- };
+ let mut bc_file_deflated = match fs::File::create(&bc_deflated_filename) {
+ Ok(file) => file,
+ Err(e) => {
+ sess.fatal(format!("failed to create compressed bytecode \
+ file: {}", e).as_slice())
+ }
+ };
- match write_rlib_bytecode_object_v1(&mut bc_file_deflated,
- bc_data_deflated.as_slice()) {
- Ok(()) => {}
- Err(e) => {
- sess.err(format!("failed to write compressed bytecode: \
- {}", e).as_slice());
- sess.abort_if_errors()
- }
- };
+ match write_rlib_bytecode_object_v1(&mut bc_file_deflated,
+ bc_data_deflated.as_slice()) {
+ Ok(()) => {}
+ Err(e) => {
+ sess.err(format!("failed to write compressed bytecode: \
+ {}", e).as_slice());
+ sess.abort_if_errors()
+ }
+ };
- ab.add_file(&bc_deflated_filename).unwrap();
- remove(sess, &bc_deflated_filename);
- if !sess.opts.cg.save_temps &&
- !sess.opts.output_types.contains(&OutputTypeBitcode) {
- remove(sess, &bc_filename);
+ ab.add_file(&bc_deflated_filename).unwrap();
+ remove(sess, &bc_deflated_filename);
+ if !sess.opts.cg.save_temps &&
+ !sess.opts.output_types.contains(&OutputTypeBitcode) {
+ remove(sess, &bc_filename);
+ }
}
}
// links to all upstream files as well.
fn link_natively(sess: &Session, trans: &CrateTranslation, dylib: bool,
obj_filename: &Path, out_filename: &Path) {
- let tmpdir = TempDir::new("rustc").expect("needs a temp dir");
+ let tmpdir = TempDir::new("rustc").ok().expect("needs a temp dir");
// The invocations of cc share some flags across platforms
let pname = get_cc_prog(sess);
// Mark all dynamic libraries and executables as compatible with ASLR
cmd.arg("-Wl,--dynamicbase");
+
+ // Mark all dynamic libraries and executables as compatible with the larger 4GiB address
+ // space available to x86 Windows binaries on x86_64.
+ if sess.targ_cfg.arch == abi::X86 {
+ cmd.arg("-Wl,--large-address-aware");
+ }
}
if sess.targ_cfg.os == abi::OsAndroid {
//
// We must continue to link to the upstream archives to be sure
// to pull in native static dependencies. As the final caveat,
- // on linux it is apparently illegal to link to a blank archive,
+ // on Linux it is apparently illegal to link to a blank archive,
// so if an archive no longer has any object files in it after
// we remove `lib.o`, then don't link against it at all.
//
// except according to those terms.
use super::link;
+use super::write;
use driver::session;
use driver::config;
use llvm;
archive.read(format!("{}.bytecode.deflate",
file).as_slice())
});
- let bc_encoded = bc_encoded.expect("missing compressed bytecode in archive!");
+ let bc_encoded = match bc_encoded {
+ Some(data) => data,
+ None => {
+ sess.fatal(format!("missing compressed bytecode in {} \
+ (perhaps it was compiled with -C codegen-units > 1)",
+ path.display()).as_slice());
+ },
+ };
let bc_extractor = if is_versioned_bytecode_format(bc_encoded) {
|_| {
// Read the version
if !llvm::LLVMRustLinkInExternalBitcode(llmod,
ptr as *const libc::c_char,
bc_decoded.len() as libc::size_t) {
- link::llvm_err(sess,
- format!("failed to load bc of `{}`",
- name.as_slice()));
+ write::llvm_err(sess.diagnostic().handler(),
+ format!("failed to load bc of `{}`",
+ name.as_slice()));
}
});
}
"verify".with_c_str(|s| llvm::LLVMRustAddPass(pm, s));
- time(sess.time_passes(), "LTO pases", (), |()|
+ time(sess.time_passes(), "LTO passes", (), |()|
llvm::LLVMRunPassManager(pm, llmod));
llvm::LLVMDisposePassManager(pm);
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use back::lto;
+use back::link::{get_cc_prog, remove};
+use driver::driver::{CrateTranslation, ModuleTranslation, OutputFilenames};
+use driver::config::NoDebugInfo;
+use driver::session::Session;
+use driver::config;
+use llvm;
+use llvm::{ModuleRef, TargetMachineRef, PassManagerRef};
+use util::common::time;
+use syntax::abi;
+use syntax::codemap;
+use syntax::diagnostic;
+use syntax::diagnostic::{Emitter, Handler, Level, mk_handler};
+
+use std::c_str::{ToCStr, CString};
+use std::io::Command;
+use std::io::fs;
+use std::iter::Unfold;
+use std::ptr;
+use std::str;
+use std::sync::{Arc, Mutex};
+use std::task::TaskBuilder;
+use libc::{c_uint, c_int};
+
+
+#[deriving(Clone, PartialEq, PartialOrd, Ord, Eq)]
+pub enum OutputType {
+ OutputTypeBitcode,
+ OutputTypeAssembly,
+ OutputTypeLlvmAssembly,
+ OutputTypeObject,
+ OutputTypeExe,
+}
+
+
+pub fn llvm_err(handler: &diagnostic::Handler, msg: String) -> ! {
+ unsafe {
+ let cstr = llvm::LLVMRustGetLastError();
+ if cstr == ptr::null() {
+ handler.fatal(msg.as_slice());
+ } else {
+ let err = CString::new(cstr, true);
+ let err = String::from_utf8_lossy(err.as_bytes());
+ handler.fatal(format!("{}: {}",
+ msg.as_slice(),
+ err.as_slice()).as_slice());
+ }
+ }
+}
+
+pub fn write_output_file(
+ handler: &diagnostic::Handler,
+ target: llvm::TargetMachineRef,
+ pm: llvm::PassManagerRef,
+ m: ModuleRef,
+ output: &Path,
+ file_type: llvm::FileType) {
+ unsafe {
+ output.with_c_str(|output| {
+ let result = llvm::LLVMRustWriteOutputFile(
+ target, pm, m, output, file_type);
+ if !result {
+ llvm_err(handler, "could not write output".to_string());
+ }
+ })
+ }
+}
+
+
+struct Diagnostic {
+ msg: String,
+ code: Option<String>,
+ lvl: Level,
+}
+
+// We use an Arc instead of just returning a list of diagnostics from the
+// child task because we need to make sure that the messages are seen even
+// if the child task fails (for example, when `fatal` is called).
+#[deriving(Clone)]
+struct SharedEmitter {
+ buffer: Arc<Mutex<Vec<Diagnostic>>>,
+}
+
+impl SharedEmitter {
+ fn new() -> SharedEmitter {
+ SharedEmitter {
+ buffer: Arc::new(Mutex::new(Vec::new())),
+ }
+ }
+
+ fn dump(&mut self, handler: &Handler) {
+ let mut buffer = self.buffer.lock();
+ for diag in buffer.iter() {
+ match diag.code {
+ Some(ref code) => {
+ handler.emit_with_code(None,
+ diag.msg.as_slice(),
+ code.as_slice(),
+ diag.lvl);
+ },
+ None => {
+ handler.emit(None,
+ diag.msg.as_slice(),
+ diag.lvl);
+ },
+ }
+ }
+ buffer.clear();
+ }
+}
+
+impl Emitter for SharedEmitter {
+ fn emit(&mut self, cmsp: Option<(&codemap::CodeMap, codemap::Span)>,
+ msg: &str, code: Option<&str>, lvl: Level) {
+ assert!(cmsp.is_none(), "SharedEmitter doesn't support spans");
+
+ self.buffer.lock().push(Diagnostic {
+ msg: msg.to_string(),
+ code: code.map(|s| s.to_string()),
+ lvl: lvl,
+ });
+ }
+
+ fn custom_emit(&mut self, _cm: &codemap::CodeMap,
+ _sp: diagnostic::RenderSpan, _msg: &str, _lvl: Level) {
+ fail!("SharedEmitter doesn't support custom_emit");
+ }
+}
+
+
+// On android, we by default compile for armv7 processors. This enables
+// things like double word CAS instructions (rather than emulating them)
+// which are *far* more efficient. This is obviously undesirable in some
+// cases, so if any sort of target feature is specified we don't append v7
+// to the feature list.
+//
+// On iOS only armv7 and newer are supported. So it is useful to
+// get all hardware potential via VFP3 (hardware floating point)
+// and NEON (SIMD) instructions supported by LLVM.
+// Note that without those flags various linking errors might
+// arise as some of intrinsics are converted into function calls
+// and nobody provides implementations those functions
+fn target_feature<'a>(sess: &'a Session) -> &'a str {
+ match sess.targ_cfg.os {
+ abi::OsAndroid => {
+ if "" == sess.opts.cg.target_feature.as_slice() {
+ "+v7"
+ } else {
+ sess.opts.cg.target_feature.as_slice()
+ }
+ },
+ abi::OsiOS if sess.targ_cfg.arch == abi::Arm => {
+ "+v7,+thumb2,+vfp3,+neon"
+ },
+ _ => sess.opts.cg.target_feature.as_slice()
+ }
+}
+
+fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel {
+ match optimize {
+ config::No => llvm::CodeGenLevelNone,
+ config::Less => llvm::CodeGenLevelLess,
+ config::Default => llvm::CodeGenLevelDefault,
+ config::Aggressive => llvm::CodeGenLevelAggressive,
+ }
+}
+
+fn create_target_machine(sess: &Session) -> TargetMachineRef {
+ let reloc_model = match sess.opts.cg.relocation_model.as_slice() {
+ "pic" => llvm::RelocPIC,
+ "static" => llvm::RelocStatic,
+ "default" => llvm::RelocDefault,
+ "dynamic-no-pic" => llvm::RelocDynamicNoPic,
+ _ => {
+ sess.err(format!("{} is not a valid relocation mode",
+ sess.opts
+ .cg
+ .relocation_model).as_slice());
+ sess.abort_if_errors();
+ unreachable!();
+ }
+ };
+
+ let opt_level = get_llvm_opt_level(sess.opts.optimize);
+ let use_softfp = sess.opts.cg.soft_float;
+
+ // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a parameter.
+ // FIXME: #11954: mac64 unwinding may not work with fp elim
+ let no_fp_elim = (sess.opts.debuginfo != NoDebugInfo) ||
+ (sess.targ_cfg.os == abi::OsMacos &&
+ sess.targ_cfg.arch == abi::X86_64);
+
+ // OSX has -dead_strip, which doesn't rely on ffunction_sections
+ // FIXME(#13846) this should be enabled for windows
+ let ffunction_sections = sess.targ_cfg.os != abi::OsMacos &&
+ sess.targ_cfg.os != abi::OsWindows;
+ let fdata_sections = ffunction_sections;
+
+ let code_model = match sess.opts.cg.code_model.as_slice() {
+ "default" => llvm::CodeModelDefault,
+ "small" => llvm::CodeModelSmall,
+ "kernel" => llvm::CodeModelKernel,
+ "medium" => llvm::CodeModelMedium,
+ "large" => llvm::CodeModelLarge,
+ _ => {
+ sess.err(format!("{} is not a valid code model",
+ sess.opts
+ .cg
+ .code_model).as_slice());
+ sess.abort_if_errors();
+ unreachable!();
+ }
+ };
+
+ unsafe {
+ sess.targ_cfg
+ .target_strs
+ .target_triple
+ .as_slice()
+ .with_c_str(|t| {
+ sess.opts.cg.target_cpu.as_slice().with_c_str(|cpu| {
+ target_feature(sess).with_c_str(|features| {
+ llvm::LLVMRustCreateTargetMachine(
+ t, cpu, features,
+ code_model,
+ reloc_model,
+ opt_level,
+ true /* EnableSegstk */,
+ use_softfp,
+ no_fp_elim,
+ ffunction_sections,
+ fdata_sections,
+ )
+ })
+ })
+ })
+ }
+}
+
+
+/// Module-specific configuration for `optimize_and_codegen`.
+#[deriving(Clone)]
+struct ModuleConfig {
+ /// LLVM TargetMachine to use for codegen.
+ tm: TargetMachineRef,
+ /// Names of additional optimization passes to run.
+ passes: Vec<String>,
+ /// Some(level) to optimize at a certain level, or None to run
+ /// absolutely no optimizations (used for the metadata module).
+ opt_level: Option<llvm::CodeGenOptLevel>,
+
+ // Flags indicating which outputs to produce.
+ emit_no_opt_bc: bool,
+ emit_bc: bool,
+ emit_lto_bc: bool,
+ emit_ir: bool,
+ emit_asm: bool,
+ emit_obj: bool,
+
+ // Miscellaneous flags. These are mostly copied from command-line
+ // options.
+ no_verify: bool,
+ no_prepopulate_passes: bool,
+ no_builtins: bool,
+ time_passes: bool,
+}
+
+impl ModuleConfig {
+ fn new(tm: TargetMachineRef, passes: Vec<String>) -> ModuleConfig {
+ ModuleConfig {
+ tm: tm,
+ passes: passes,
+ opt_level: None,
+
+ emit_no_opt_bc: false,
+ emit_bc: false,
+ emit_lto_bc: false,
+ emit_ir: false,
+ emit_asm: false,
+ emit_obj: false,
+
+ no_verify: false,
+ no_prepopulate_passes: false,
+ no_builtins: false,
+ time_passes: false,
+ }
+ }
+
+ fn set_flags(&mut self, sess: &Session, trans: &CrateTranslation) {
+ self.no_verify = sess.no_verify();
+ self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes;
+ self.no_builtins = trans.no_builtins;
+ self.time_passes = sess.time_passes();
+ }
+}
+
+/// Additional resources used by optimize_and_codegen (not module specific)
+struct CodegenContext<'a> {
+ // Extra resources used for LTO: (sess, reachable). This will be `None`
+ // when running in a worker thread.
+ lto_ctxt: Option<(&'a Session, &'a [String])>,
+ // Handler to use for diagnostics produced during codegen.
+ handler: &'a Handler,
+}
+
+impl<'a> CodegenContext<'a> {
+ fn new(handler: &'a Handler) -> CodegenContext<'a> {
+ CodegenContext {
+ lto_ctxt: None,
+ handler: handler,
+ }
+ }
+
+ fn new_with_session(sess: &'a Session, reachable: &'a [String]) -> CodegenContext<'a> {
+ CodegenContext {
+ lto_ctxt: Some((sess, reachable)),
+ handler: sess.diagnostic().handler(),
+ }
+ }
+}
+
+// Unsafe due to LLVM calls.
+unsafe fn optimize_and_codegen(cgcx: &CodegenContext,
+ mtrans: ModuleTranslation,
+ config: ModuleConfig,
+ name_extra: String,
+ output_names: OutputFilenames) {
+ let ModuleTranslation { llmod, llcx } = mtrans;
+ let tm = config.tm;
+
+ if config.emit_no_opt_bc {
+ let ext = format!("{}.no-opt.bc", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+
+ match config.opt_level {
+ Some(opt_level) => {
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ // If we're verifying or linting, add them to the function pass
+ // manager.
+ let addpass = |pass: &str| {
+ pass.as_slice().with_c_str(|s| llvm::LLVMRustAddPass(fpm, s))
+ };
+ if !config.no_verify { assert!(addpass("verify")); }
+
+ if !config.no_prepopulate_passes {
+ llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod);
+ llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod);
+ populate_llvm_passes(fpm, mpm, llmod, opt_level,
+ config.no_builtins);
+ }
+
+ for pass in config.passes.iter() {
+ pass.as_slice().with_c_str(|s| {
+ if !llvm::LLVMRustAddPass(mpm, s) {
+ cgcx.handler.warn(format!("unknown pass {}, ignoring",
+ *pass).as_slice());
+ }
+ })
+ }
+
+ // Finally, run the actual optimization passes
+ time(config.time_passes, "llvm function passes", (), |()|
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod));
+ time(config.time_passes, "llvm module passes", (), |()|
+ llvm::LLVMRunPassManager(mpm, llmod));
+
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
+
+ match cgcx.lto_ctxt {
+ Some((sess, reachable)) if sess.lto() => {
+ time(sess.time_passes(), "all lto passes", (), |()|
+ lto::run(sess, llmod, tm, reachable));
+
+ if config.emit_lto_bc {
+ let name = format!("{}.lto.bc", name_extra);
+ output_names.with_extension(name.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+ },
+ _ => {},
+ }
+ },
+ None => {},
+ }
+
+ // A codegen-specific pass manager is used to generate object
+ // files for an LLVM module.
+ //
+ // Apparently each of these pass managers is a one-shot kind of
+ // thing, so we create a new one for each type of output. The
+ // pass manager passed to the closure should be ensured to not
+ // escape the closure itself, and the manager should only be
+ // used once.
+ unsafe fn with_codegen(tm: TargetMachineRef, llmod: ModuleRef,
+ no_builtins: bool, f: |PassManagerRef|) {
+ let cpm = llvm::LLVMCreatePassManager();
+ llvm::LLVMRustAddAnalysisPasses(tm, cpm, llmod);
+ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+ f(cpm);
+ llvm::LLVMDisposePassManager(cpm);
+ }
+
+ if config.emit_bc {
+ let ext = format!("{}.bc", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|buf| {
+ llvm::LLVMWriteBitcodeToFile(llmod, buf);
+ })
+ }
+
+ time(config.time_passes, "codegen passes", (), |()| {
+ if config.emit_ir {
+ let ext = format!("{}.ll", name_extra);
+ output_names.with_extension(ext.as_slice()).with_c_str(|output| {
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ llvm::LLVMRustPrintModule(cpm, llmod, output);
+ })
+ })
+ }
+
+ if config.emit_asm {
+ let path = output_names.with_extension(format!("{}.s", name_extra).as_slice());
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::AssemblyFile);
+ });
+ }
+
+ if config.emit_obj {
+ let path = output_names.with_extension(format!("{}.o", name_extra).as_slice());
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::ObjectFile);
+ });
+ }
+ });
+
+ llvm::LLVMDisposeModule(llmod);
+ llvm::LLVMContextDispose(llcx);
+ llvm::LLVMRustDisposeTargetMachine(tm);
+}
+
+pub fn run_passes(sess: &Session,
+ trans: &CrateTranslation,
+ output_types: &[OutputType],
+ crate_output: &OutputFilenames) {
+ // It's possible that we have `codegen_units > 1` but only one item in
+ // `trans.modules`. We could theoretically proceed and do LTO in that
+ // case, but it would be confusing to have the validity of
+ // `-Z lto -C codegen-units=2` depend on details of the crate being
+ // compiled, so we complain regardless.
+ if sess.lto() && sess.opts.cg.codegen_units > 1 {
+ // This case is impossible to handle because LTO expects to be able
+ // to combine the entire crate and all its dependencies into a
+ // single compilation unit, but each codegen unit is in a separate
+ // LLVM context, so they can't easily be combined.
+ sess.fatal("can't perform LTO when using multiple codegen units");
+ }
+
+ // Sanity check
+ assert!(trans.modules.len() == sess.opts.cg.codegen_units);
+
+ unsafe {
+ configure_llvm(sess);
+ }
+
+ let tm = create_target_machine(sess);
+
+ // Figure out what we actually need to build.
+
+ let mut modules_config = ModuleConfig::new(tm, sess.opts.cg.passes.clone());
+ let mut metadata_config = ModuleConfig::new(tm, vec!());
+
+ modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize));
+
+ // Save all versions of the bytecode if we're saving our temporaries.
+ if sess.opts.cg.save_temps {
+ modules_config.emit_no_opt_bc = true;
+ modules_config.emit_bc = true;
+ modules_config.emit_lto_bc = true;
+ metadata_config.emit_bc = true;
+ }
+
+ // Emit a bitcode file for the crate if we're emitting an rlib.
+ // Whenever an rlib is created, the bitcode is inserted into the
+ // archive in order to allow LTO against it.
+ let needs_crate_bitcode =
+ sess.crate_types.borrow().contains(&config::CrateTypeRlib) &&
+ sess.opts.output_types.contains(&OutputTypeExe) &&
+ sess.opts.cg.codegen_units == 1;
+ if needs_crate_bitcode {
+ modules_config.emit_bc = true;
+ }
+
+ for output_type in output_types.iter() {
+ match *output_type {
+ OutputTypeBitcode => { modules_config.emit_bc = true; },
+ OutputTypeLlvmAssembly => { modules_config.emit_ir = true; },
+ OutputTypeAssembly => {
+ modules_config.emit_asm = true;
+ // If we're not using the LLVM assembler, this function
+ // could be invoked specially with output_type_assembly, so
+ // in this case we still want the metadata object file.
+ if !sess.opts.output_types.contains(&OutputTypeAssembly) {
+ metadata_config.emit_obj = true;
+ }
+ },
+ OutputTypeObject => { modules_config.emit_obj = true; },
+ OutputTypeExe => {
+ modules_config.emit_obj = true;
+ metadata_config.emit_obj = true;
+ },
+ }
+ }
+
+ modules_config.set_flags(sess, trans);
+ metadata_config.set_flags(sess, trans);
+
+
+ // Populate a buffer with a list of codegen tasks. Items are processed in
+ // LIFO order, just because it's a tiny bit simpler that way. (The order
+ // doesn't actually matter.)
+ let mut work_items = Vec::with_capacity(1 + trans.modules.len());
+
+ {
+ let work = build_work_item(sess,
+ trans.metadata_module,
+ metadata_config.clone(),
+ crate_output.clone(),
+ "metadata".to_string());
+ work_items.push(work);
+ }
+
+ for (index, mtrans) in trans.modules.iter().enumerate() {
+ let work = build_work_item(sess,
+ *mtrans,
+ modules_config.clone(),
+ crate_output.clone(),
+ format!("{}", index));
+ work_items.push(work);
+ }
+
+ // Process the work items, optionally using worker threads.
+ if sess.opts.cg.codegen_units == 1 {
+ run_work_singlethreaded(sess, trans.reachable.as_slice(), work_items);
+
+ if needs_crate_bitcode {
+ // The only bitcode file produced (aside from metadata) was
+ // "crate.0.bc". Rename to "crate.bc" since that's what
+ // `link_rlib` expects to find.
+ fs::copy(&crate_output.with_extension("0.bc"),
+ &crate_output.temp_path(OutputTypeBitcode)).unwrap();
+ }
+ } else {
+ run_work_multithreaded(sess, work_items, sess.opts.cg.codegen_units);
+
+ assert!(!needs_crate_bitcode,
+ "can't produce a crate bitcode file from multiple compilation units");
+ }
+
+ // All codegen is finished.
+ unsafe {
+ llvm::LLVMRustDisposeTargetMachine(tm);
+ }
+
+ // Produce final compile outputs.
+
+ let copy_if_one_unit = |ext: &str, output_type: OutputType| {
+ // Three cases:
+ if sess.opts.cg.codegen_units == 1 {
+ // 1) Only one codegen unit. In this case it's no difficulty
+ // to copy `foo.0.x` to `foo.x`.
+ fs::copy(&crate_output.with_extension(ext),
+ &crate_output.path(output_type)).unwrap();
+ if !sess.opts.cg.save_temps {
+ // The user just wants `foo.x`, not `foo.0.x`.
+ remove(sess, &crate_output.with_extension(ext));
+ }
+ } else {
+ if crate_output.single_output_file.is_some() {
+ // 2) Multiple codegen units, with `-o some_name`. We have
+ // no good solution for this case, so warn the user.
+ sess.warn(format!("ignoring -o because multiple .{} files were produced",
+ ext).as_slice());
+ } else {
+ // 3) Multiple codegen units, but no `-o some_name`. We
+ // just leave the `foo.0.x` files in place.
+ // (We don't have to do any work in this case.)
+ }
+ }
+ };
+
+ let link_obj = |output_path: &Path| {
+ // Running `ld -r` on a single input is kind of pointless.
+ if sess.opts.cg.codegen_units == 1 {
+ fs::copy(&crate_output.with_extension("0.o"),
+ output_path).unwrap();
+ // Leave the .0.o file around, to mimic the behavior of the normal
+ // code path.
+ return;
+ }
+
+ // Some builds of MinGW GCC will pass --force-exe-suffix to ld, which
+ // will automatically add a .exe extension if the extension is not
+ // already .exe or .dll. To ensure consistent behavior on Windows, we
+ // add the .exe suffix explicitly and then rename the output file to
+ // the desired path. This will give the correct behavior whether or
+ // not GCC adds --force-exe-suffix.
+ let windows_output_path =
+ if sess.targ_cfg.os == abi::OsWindows {
+ Some(output_path.with_extension("o.exe"))
+ } else {
+ None
+ };
+
+ let pname = get_cc_prog(sess);
+ let mut cmd = Command::new(pname.as_slice());
+
+ cmd.args(sess.targ_cfg.target_strs.cc_args.as_slice());
+ cmd.arg("-nostdlib");
+
+ for index in range(0, trans.modules.len()) {
+ cmd.arg(crate_output.with_extension(format!("{}.o", index).as_slice()));
+ }
+
+ cmd.arg("-r")
+ .arg("-o")
+ .arg(windows_output_path.as_ref().unwrap_or(output_path));
+
+ if (sess.opts.debugging_opts & config::PRINT_LINK_ARGS) != 0 {
+ println!("{}", &cmd);
+ }
+
+ cmd.stdin(::std::io::process::Ignored)
+ .stdout(::std::io::process::InheritFd(1))
+ .stderr(::std::io::process::InheritFd(2));
+ match cmd.status() {
+ Ok(_) => {},
+ Err(e) => {
+ sess.err(format!("could not exec the linker `{}`: {}",
+ pname,
+ e).as_slice());
+ sess.abort_if_errors();
+ },
+ }
+
+ match windows_output_path {
+ Some(ref windows_path) => {
+ fs::rename(windows_path, output_path).unwrap();
+ },
+ None => {
+ // The file is already named according to `output_path`.
+ }
+ }
+ };
+
+ // Flag to indicate whether the user explicitly requested bitcode.
+ // Otherwise, we produced it only as a temporary output, and will need
+ // to get rid of it.
+ // FIXME: Since we don't support LTO anyway, maybe we can avoid
+ // producing the temporary .0.bc's in the first place?
+ let mut save_bitcode = false;
+ for output_type in output_types.iter() {
+ match *output_type {
+ OutputTypeBitcode => {
+ save_bitcode = true;
+ copy_if_one_unit("0.bc", OutputTypeBitcode);
+ },
+ OutputTypeLlvmAssembly => { copy_if_one_unit("0.ll", OutputTypeLlvmAssembly); },
+ OutputTypeAssembly => { copy_if_one_unit("0.s", OutputTypeAssembly); },
+ OutputTypeObject => { link_obj(&crate_output.path(OutputTypeObject)); },
+ OutputTypeExe => {
+ // If OutputTypeObject is already in the list, then
+ // `crate.o` will be handled by the OutputTypeObject case.
+ // Otherwise, we need to create the temporary object so we
+ // can run the linker.
+ if !sess.opts.output_types.contains(&OutputTypeObject) {
+ link_obj(&crate_output.temp_path(OutputTypeObject));
+ }
+ },
+ }
+ }
+ let save_bitcode = save_bitcode;
+
+ // Clean up unwanted temporary files.
+
+ // We create the following files by default:
+ // - crate.0.bc
+ // - crate.0.o
+ // - crate.metadata.bc
+ // - crate.metadata.o
+ // - crate.o (linked from crate.##.o)
+ // - crate.bc (copied from crate.0.bc)
+ // We may create additional files if requested by the user (through
+ // `-C save-temps` or `--emit=` flags).
+
+ if !sess.opts.cg.save_temps {
+ // Remove the temporary .0.o objects. If the user didn't
+ // explicitly request bitcode (with --emit=bc), we must remove
+ // .0.bc as well. (We don't touch the crate.bc that may have been
+ // produced earlier.)
+ for i in range(0, trans.modules.len()) {
+ if modules_config.emit_obj {
+ let ext = format!("{}.o", i);
+ remove(sess, &crate_output.with_extension(ext.as_slice()));
+ }
+
+ if modules_config.emit_bc && !save_bitcode {
+ let ext = format!("{}.bc", i);
+ remove(sess, &crate_output.with_extension(ext.as_slice()));
+ }
+ }
+
+ if metadata_config.emit_bc && !save_bitcode {
+ remove(sess, &crate_output.with_extension("metadata.bc"));
+ }
+ }
+
+ // We leave the following files around by default:
+ // - crate.o
+ // - crate.metadata.o
+ // - crate.bc
+ // These are used in linking steps and will be cleaned up afterward.
+
+ // FIXME: time_llvm_passes support - does this use a global context or
+ // something?
+ //if sess.time_llvm_passes() { llvm::LLVMRustPrintPassTimings(); }
+}
+
+type WorkItem = proc(&CodegenContext):Send;
+
+fn build_work_item(sess: &Session,
+ mtrans: ModuleTranslation,
+ config: ModuleConfig,
+ output_names: OutputFilenames,
+ name_extra: String) -> WorkItem {
+ let mut config = config;
+ config.tm = create_target_machine(sess);
+
+ proc(cgcx) unsafe {
+ optimize_and_codegen(cgcx, mtrans, config, name_extra, output_names);
+ }
+}
+
+fn run_work_singlethreaded(sess: &Session,
+ reachable: &[String],
+ work_items: Vec<WorkItem>) {
+ let cgcx = CodegenContext::new_with_session(sess, reachable);
+ let mut work_items = work_items;
+
+ // Since we're running single-threaded, we can pass the session to
+ // the proc, allowing `optimize_and_codegen` to perform LTO.
+ for work in Unfold::new((), |_| work_items.pop()) {
+ work(&cgcx);
+ }
+}
+
+fn run_work_multithreaded(sess: &Session,
+ work_items: Vec<WorkItem>,
+ num_workers: uint) {
+ // Run some workers to process the work items.
+ let work_items_arc = Arc::new(Mutex::new(work_items));
+ let mut diag_emitter = SharedEmitter::new();
+ let mut futures = Vec::with_capacity(num_workers);
+
+ for i in range(0, num_workers) {
+ let work_items_arc = work_items_arc.clone();
+ let diag_emitter = diag_emitter.clone();
+
+ let future = TaskBuilder::new().named(format!("codegen-{}", i)).try_future(proc() {
+ let diag_handler = mk_handler(box diag_emitter);
+
+ // Must construct cgcx inside the proc because it has non-Send
+ // fields.
+ let cgcx = CodegenContext::new(&diag_handler);
+
+ loop {
+ // Avoid holding the lock for the entire duration of the match.
+ let maybe_work = work_items_arc.lock().pop();
+ match maybe_work {
+ Some(work) => {
+ work(&cgcx);
+
+ // Make sure to fail the worker so the main thread can
+ // tell that there were errors.
+ cgcx.handler.abort_if_errors();
+ }
+ None => break,
+ }
+ }
+ });
+ futures.push(future);
+ }
+
+ let mut failed = false;
+ for future in futures.move_iter() {
+ match future.unwrap() {
+ Ok(()) => {},
+ Err(_) => {
+ failed = true;
+ },
+ }
+ // Display any new diagnostics.
+ diag_emitter.dump(sess.diagnostic().handler());
+ }
+ if failed {
+ sess.fatal("aborting due to worker thread failure");
+ }
+}
+
+pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) {
+ let pname = get_cc_prog(sess);
+ let mut cmd = Command::new(pname.as_slice());
+
+ cmd.arg("-c").arg("-o").arg(outputs.path(OutputTypeObject))
+ .arg(outputs.temp_path(OutputTypeAssembly));
+ debug!("{}", &cmd);
+
+ match cmd.output() {
+ Ok(prog) => {
+ if !prog.status.success() {
+ sess.err(format!("linking with `{}` failed: {}",
+ pname,
+ prog.status).as_slice());
+ sess.note(format!("{}", &cmd).as_slice());
+ let mut note = prog.error.clone();
+ note.push_all(prog.output.as_slice());
+ sess.note(str::from_utf8(note.as_slice()).unwrap());
+ sess.abort_if_errors();
+ }
+ },
+ Err(e) => {
+ sess.err(format!("could not exec the linker `{}`: {}",
+ pname,
+ e).as_slice());
+ sess.abort_if_errors();
+ }
+ }
+}
+
+unsafe fn configure_llvm(sess: &Session) {
+ use std::sync::{Once, ONCE_INIT};
+ static mut INIT: Once = ONCE_INIT;
+
+ // Copy what clang does by turning on loop vectorization at O2 and
+ // slp vectorization at O3
+ let vectorize_loop = !sess.opts.cg.no_vectorize_loops &&
+ (sess.opts.optimize == config::Default ||
+ sess.opts.optimize == config::Aggressive);
+ let vectorize_slp = !sess.opts.cg.no_vectorize_slp &&
+ sess.opts.optimize == config::Aggressive;
+
+ let mut llvm_c_strs = Vec::new();
+ let mut llvm_args = Vec::new();
+ {
+ let add = |arg: &str| {
+ let s = arg.to_c_str();
+ llvm_args.push(s.as_ptr());
+ llvm_c_strs.push(s);
+ };
+ add("rustc"); // fake program name
+ if vectorize_loop { add("-vectorize-loops"); }
+ if vectorize_slp { add("-vectorize-slp"); }
+ if sess.time_llvm_passes() { add("-time-passes"); }
+ if sess.print_llvm_passes() { add("-debug-pass=Structure"); }
+
+ for arg in sess.opts.cg.llvm_args.iter() {
+ add((*arg).as_slice());
+ }
+ }
+
+ INIT.doit(|| {
+ llvm::LLVMInitializePasses();
+
+ // Only initialize the platforms supported by Rust here, because
+ // using --llvm-root will have multiple platforms that rustllvm
+ // doesn't actually link to and it's pointless to put target info
+ // into the registry that Rust cannot generate machine code for.
+ llvm::LLVMInitializeX86TargetInfo();
+ llvm::LLVMInitializeX86Target();
+ llvm::LLVMInitializeX86TargetMC();
+ llvm::LLVMInitializeX86AsmPrinter();
+ llvm::LLVMInitializeX86AsmParser();
+
+ llvm::LLVMInitializeARMTargetInfo();
+ llvm::LLVMInitializeARMTarget();
+ llvm::LLVMInitializeARMTargetMC();
+ llvm::LLVMInitializeARMAsmPrinter();
+ llvm::LLVMInitializeARMAsmParser();
+
+ llvm::LLVMInitializeMipsTargetInfo();
+ llvm::LLVMInitializeMipsTarget();
+ llvm::LLVMInitializeMipsTargetMC();
+ llvm::LLVMInitializeMipsAsmPrinter();
+ llvm::LLVMInitializeMipsAsmParser();
+
+ llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int,
+ llvm_args.as_ptr());
+ });
+}
+
+unsafe fn populate_llvm_passes(fpm: llvm::PassManagerRef,
+ mpm: llvm::PassManagerRef,
+ llmod: ModuleRef,
+ opt: llvm::CodeGenOptLevel,
+ no_builtins: bool) {
+ // Create the PassManagerBuilder for LLVM. We configure it with
+ // reasonable defaults and prepare it to actually populate the pass
+ // manager.
+ let builder = llvm::LLVMPassManagerBuilderCreate();
+ match opt {
+ llvm::CodeGenLevelNone => {
+ // Don't add lifetime intrinsics at O0
+ llvm::LLVMRustAddAlwaysInlinePass(builder, false);
+ }
+ llvm::CodeGenLevelLess => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, true);
+ }
+ // numeric values copied from clang
+ llvm::CodeGenLevelDefault => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
+ 225);
+ }
+ llvm::CodeGenLevelAggressive => {
+ llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder,
+ 275);
+ }
+ }
+ llvm::LLVMPassManagerBuilderSetOptLevel(builder, opt as c_uint);
+ llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, no_builtins);
+
+ // Use the builder to populate the function/module pass managers.
+ llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(builder, fpm);
+ llvm::LLVMPassManagerBuilderPopulateModulePassManager(builder, mpm);
+ llvm::LLVMPassManagerBuilderDispose(builder);
+
+ match opt {
+ llvm::CodeGenLevelDefault | llvm::CodeGenLevelAggressive => {
+ "mergefunc".with_c_str(|s| llvm::LLVMRustAddPass(mpm, s));
+ }
+ _ => {}
+ };
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+#![allow(non_snake_case)]
+
register_diagnostic!(E0001, r##"
This error suggests that the expression arm corresponding to the noted pattern
will never be reached as for all possible values of the expression being matched,
E0146,
E0147,
E0148,
- E0149,
- E0150,
E0151,
E0152,
E0153,
E0155,
E0156,
E0157,
- E0158
+ E0158,
+ E0159,
+ E0160,
+ E0161
)
use driver::session::Session;
use back;
-use back::link;
+use back::write;
use back::target_strs;
use back::{arm, x86, x86_64, mips, mipsel};
use lint;
use syntax::parse;
use syntax::parse::token::InternedString;
-use std::collections::{HashSet, HashMap};
+use std::collections::HashMap;
use getopts::{optopt, optmulti, optflag, optflagopt};
use getopts;
use std::cell::{RefCell};
pub debuginfo: DebugInfoLevel,
pub lint_opts: Vec<(String, lint::Level)>,
pub describe_lints: bool,
- pub output_types: Vec<back::link::OutputType> ,
+ pub output_types: Vec<back::write::OutputType> ,
// This was mutable for rustpkg, which updates search paths based on the
// parsed code. It remains mutable in case its replacements wants to use
// this.
- pub addl_lib_search_paths: RefCell<HashSet<Path>>,
+ pub addl_lib_search_paths: RefCell<Vec<Path>>,
pub maybe_sysroot: Option<Path>,
pub target_triple: String,
// User-specified cfg meta items. The compiler itself will add additional
lint_opts: Vec::new(),
describe_lints: false,
output_types: Vec::new(),
- addl_lib_search_paths: RefCell::new(HashSet::new()),
+ addl_lib_search_paths: RefCell::new(Vec::new()),
maybe_sysroot: None,
target_triple: driver::host_triple().to_string(),
cfg: Vec::new(),
}
}
+ fn parse_uint(slot: &mut uint, v: Option<&str>) -> bool {
+ use std::from_str::FromStr;
+ match v.and_then(FromStr::from_str) {
+ Some(i) => { *slot = i; true },
+ None => false
+ }
+ }
}
) )
"metadata to mangle symbol names with"),
extra_filename: String = ("".to_string(), parse_string,
"extra data to put in each output filename"),
+ codegen_units: uint = (1, parse_uint,
+ "divide crate into N units to optimize in parallel"),
)
pub fn build_codegen_options(matches: &getopts::Matches) -> CodegenOptions
for unparsed_output_type in unparsed_output_types.iter() {
for part in unparsed_output_type.as_slice().split(',') {
let output_type = match part.as_slice() {
- "asm" => link::OutputTypeAssembly,
- "ir" => link::OutputTypeLlvmAssembly,
- "bc" => link::OutputTypeBitcode,
- "obj" => link::OutputTypeObject,
- "link" => link::OutputTypeExe,
+ "asm" => write::OutputTypeAssembly,
+ "ir" => write::OutputTypeLlvmAssembly,
+ "bc" => write::OutputTypeBitcode,
+ "obj" => write::OutputTypeObject,
+ "link" => write::OutputTypeExe,
_ => {
early_error(format!("unknown emission type: `{}`",
part).as_slice())
output_types.as_mut_slice().sort();
output_types.dedup();
if output_types.len() == 0 {
- output_types.push(link::OutputTypeExe);
+ output_types.push(write::OutputTypeExe);
}
let sysroot_opt = matches.opt_str("sysroot").map(|m| Path::new(m));
use back::link;
+use back::write;
use driver::session::Session;
-use driver::{config, PpMode, PpSourceMode};
-use driver::{PpmFlowGraph, PpmExpanded, PpmExpandedIdentified, PpmTyped};
-use driver::{PpmIdentified, PpmNormal, PpmSource};
+use driver::config;
use front;
use lint;
use llvm::{ContextRef, ModuleRef};
use metadata::common::LinkMeta;
use metadata::creader;
-use middle::borrowck::{FnPartsWithCFG};
-use middle::borrowck;
-use middle::borrowck::graphviz as borrowck_dot;
-use middle::cfg;
-use middle::cfg::graphviz::LabelledCFG;
use middle::{trans, freevars, stability, kind, ty, typeck, reachable};
use middle::dependency_format;
use middle;
use plugin;
use util::common::time;
-use util::ppaux;
use util::nodemap::{NodeSet};
-use graphviz as dot;
-
use serialize::{json, Encodable};
-use std::from_str::FromStr;
use std::io;
use std::io::fs;
-use std::io::MemReader;
-use std::option;
+use arena::TypedArena;
use syntax::ast;
-use syntax::ast_map;
-use syntax::ast_map::blocks;
-use syntax::ast_map::NodePrinter;
use syntax::attr;
use syntax::attr::{AttrMetaMethods};
use syntax::diagnostics;
use syntax::parse;
use syntax::parse::token;
-use syntax::print::{pp, pprust};
use syntax;
pub fn host_triple() -> &'static str {
if stop_after_phase_2(&sess) { return; }
+ let type_arena = TypedArena::new();
let analysis = phase_3_run_analysis_passes(sess, &expanded_crate,
- ast_map, id);
+ ast_map, &type_arena, id);
phase_save_analysis(&analysis.ty_cx.sess, &expanded_crate, &analysis, outdir);
if stop_after_phase_3(&analysis.ty_cx.sess) { return; }
let (tcx, trans) = phase_4_translate_to_llvm(expanded_crate, analysis);
let mut addl_plugins = Some(addl_plugins);
let Plugins { macros, registrars }
= time(time_passes, "plugin loading", (), |_|
- plugin::load::load_plugins(sess, &krate, addl_plugins.take_unwrap()));
+ plugin::load::load_plugins(sess, &krate, addl_plugins.take().unwrap()));
let mut registry = Registry::new(&krate);
}
});
- let Registry { syntax_exts, lint_passes, .. } = registry;
+ let Registry { syntax_exts, lint_passes, lint_groups, .. } = registry;
{
let mut ls = sess.lint_store.borrow_mut();
for pass in lint_passes.move_iter() {
ls.register_pass(Some(sess), true, pass);
}
+
+ for (name, to) in lint_groups.move_iter() {
+ ls.register_group(Some(sess), true, name, to);
+ }
}
// Lint plugins are registered; now we can process command line flags.
Some((krate, map))
}
-pub struct CrateAnalysis {
+pub struct CrateAnalysis<'tcx> {
pub exp_map2: middle::resolve::ExportMap2,
pub exported_items: middle::privacy::ExportedItems,
pub public_items: middle::privacy::PublicItems,
- pub ty_cx: ty::ctxt,
+ pub ty_cx: ty::ctxt<'tcx>,
pub reachable: NodeSet,
pub name: String,
}
/// Run the resolution, typechecking, region checking and other
/// miscellaneous analysis passes on the crate. Return various
/// structures carrying the results of the analysis.
-pub fn phase_3_run_analysis_passes(sess: Session,
- krate: &ast::Crate,
- ast_map: syntax::ast_map::Map,
- name: String) -> CrateAnalysis {
+pub fn phase_3_run_analysis_passes<'tcx>(sess: Session,
+ krate: &ast::Crate,
+ ast_map: syntax::ast_map::Map,
+ type_arena: &'tcx TypedArena<ty::t_box_>,
+ name: String) -> CrateAnalysis<'tcx> {
let time_passes = sess.time_passes();
time(time_passes, "external crate/lib resolution", (), |_|
stability::Index::build(krate));
let ty_cx = ty::mk_ctxt(sess,
+ type_arena,
def_map,
named_region_map,
ast_map,
time(time_passes, "borrow checking", (), |_|
middle::borrowck::check_crate(&ty_cx, krate));
+ time(time_passes, "rvalue checking", (), |_|
+ middle::check_rvalues::check_crate(&ty_cx, krate));
+
time(time_passes, "kind checking", (), |_|
kind::check_crate(&ty_cx, krate));
middle::save::process_crate(sess, krate, analysis, odir));
}
+pub struct ModuleTranslation {
+ pub llcx: ContextRef,
+ pub llmod: ModuleRef,
+}
+
pub struct CrateTranslation {
- pub context: ContextRef,
- pub module: ModuleRef,
- pub metadata_module: ModuleRef,
+ pub modules: Vec<ModuleTranslation>,
+ pub metadata_module: ModuleTranslation,
pub link: LinkMeta,
pub metadata: Vec<u8>,
pub reachable: Vec<String>,
trans: &CrateTranslation,
outputs: &OutputFilenames) {
if sess.opts.cg.no_integrated_as {
- let output_type = link::OutputTypeAssembly;
+ let output_type = write::OutputTypeAssembly;
time(sess.time_passes(), "LLVM passes", (), |_|
- link::write::run_passes(sess, trans, [output_type], outputs));
+ write::run_passes(sess, trans, [output_type], outputs));
- link::write::run_assembler(sess, outputs);
+ write::run_assembler(sess, outputs);
// Remove assembly source, unless --save-temps was specified
if !sess.opts.cg.save_temps {
- fs::unlink(&outputs.temp_path(link::OutputTypeAssembly)).unwrap();
+ fs::unlink(&outputs.temp_path(write::OutputTypeAssembly)).unwrap();
}
} else {
time(sess.time_passes(), "LLVM passes", (), |_|
- link::write::run_passes(sess,
- trans,
- sess.opts.output_types.as_slice(),
- outputs));
+ write::run_passes(sess,
+ trans,
+ sess.opts.output_types.as_slice(),
+ outputs));
}
}
}
pub fn stop_after_phase_5(sess: &Session) -> bool {
- if !sess.opts.output_types.iter().any(|&i| i == link::OutputTypeExe) {
+ if !sess.opts.output_types.iter().any(|&i| i == write::OutputTypeExe) {
debug!("not building executable, returning early from compile_input");
return true;
}
for output_type in sess.opts.output_types.iter() {
let file = outputs.path(*output_type);
match *output_type {
- link::OutputTypeExe => {
+ write::OutputTypeExe => {
for output in sess.crate_types.borrow().iter() {
let p = link::filename_for_input(sess, *output,
id, &file);
}
}
-// This slightly awkward construction is to allow for each PpMode to
-// choose whether it needs to do analyses (which can consume the
-// Session) and then pass through the session (now attached to the
-// analysis results) on to the chosen pretty-printer, along with the
-// `&PpAnn` object.
-//
-// Note that since the `&PrinterSupport` is freshly constructed on each
-// call, it would not make sense to try to attach the lifetime of `self`
-// to the lifetime of the `&PrinterObject`.
-//
-// (The `use_once_payload` is working around the current lack of once
-// functions in the compiler.)
-trait CratePrinter {
- /// Constructs a `PrinterSupport` object and passes it to `f`.
- fn call_with_pp_support<A,B>(&self,
- sess: Session,
- krate: &ast::Crate,
- ast_map: Option<syntax::ast_map::Map>,
- id: String,
- use_once_payload: B,
- f: |&PrinterSupport, B| -> A) -> A;
-}
-
-trait SessionCarrier {
- /// Provides a uniform interface for re-extracting a reference to a
- /// `Session` from a value that now owns it.
- fn sess<'a>(&'a self) -> &'a Session;
-}
-
-trait AstMapCarrier {
- /// Provides a uniform interface for re-extracting a reference to an
- /// `ast_map::Map` from a value that now owns it.
- fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map>;
-}
-
-trait PrinterSupport : SessionCarrier + AstMapCarrier {
- /// Produces the pretty-print annotation object.
- ///
- /// Usually implemented via `self as &pprust::PpAnn`.
- ///
- /// (Rust does not yet support upcasting from a trait object to
- /// an object for one of its super-traits.)
- fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn;
-}
-
-struct NoAnn {
- sess: Session,
- ast_map: Option<ast_map::Map>,
-}
-
-impl PrinterSupport for NoAnn {
- fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
-}
-
-impl SessionCarrier for NoAnn {
- fn sess<'a>(&'a self) -> &'a Session { &self.sess }
-}
-
-impl AstMapCarrier for NoAnn {
- fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
- self.ast_map.as_ref()
- }
-}
-
-impl pprust::PpAnn for NoAnn {}
-
-struct IdentifiedAnnotation {
- sess: Session,
- ast_map: Option<ast_map::Map>,
-}
-
-impl PrinterSupport for IdentifiedAnnotation {
- fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
-}
-
-impl SessionCarrier for IdentifiedAnnotation {
- fn sess<'a>(&'a self) -> &'a Session { &self.sess }
-}
-
-impl AstMapCarrier for IdentifiedAnnotation {
- fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
- self.ast_map.as_ref()
- }
-}
-
-impl pprust::PpAnn for IdentifiedAnnotation {
- fn pre(&self,
- s: &mut pprust::State,
- node: pprust::AnnNode) -> io::IoResult<()> {
- match node {
- pprust::NodeExpr(_) => s.popen(),
- _ => Ok(())
- }
- }
- fn post(&self,
- s: &mut pprust::State,
- node: pprust::AnnNode) -> io::IoResult<()> {
- match node {
- pprust::NodeItem(item) => {
- try!(pp::space(&mut s.s));
- s.synth_comment(item.id.to_string())
- }
- pprust::NodeBlock(blk) => {
- try!(pp::space(&mut s.s));
- s.synth_comment(format!("block {}", blk.id))
- }
- pprust::NodeExpr(expr) => {
- try!(pp::space(&mut s.s));
- try!(s.synth_comment(expr.id.to_string()));
- s.pclose()
- }
- pprust::NodePat(pat) => {
- try!(pp::space(&mut s.s));
- s.synth_comment(format!("pat {}", pat.id))
- }
- }
- }
-}
-
-struct TypedAnnotation {
- analysis: CrateAnalysis,
-}
-
-impl PrinterSupport for TypedAnnotation {
- fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
-}
-
-impl SessionCarrier for TypedAnnotation {
- fn sess<'a>(&'a self) -> &'a Session { &self.analysis.ty_cx.sess }
-}
-
-impl AstMapCarrier for TypedAnnotation {
- fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
- Some(&self.analysis.ty_cx.map)
- }
-}
-
-impl pprust::PpAnn for TypedAnnotation {
- fn pre(&self,
- s: &mut pprust::State,
- node: pprust::AnnNode) -> io::IoResult<()> {
- match node {
- pprust::NodeExpr(_) => s.popen(),
- _ => Ok(())
- }
- }
- fn post(&self,
- s: &mut pprust::State,
- node: pprust::AnnNode) -> io::IoResult<()> {
- let tcx = &self.analysis.ty_cx;
- match node {
- pprust::NodeExpr(expr) => {
- try!(pp::space(&mut s.s));
- try!(pp::word(&mut s.s, "as"));
- try!(pp::space(&mut s.s));
- try!(pp::word(&mut s.s,
- ppaux::ty_to_string(
- tcx,
- ty::expr_ty(tcx, expr)).as_slice()));
- s.pclose()
- }
- _ => Ok(())
- }
- }
-}
-
-fn gather_flowgraph_variants(sess: &Session) -> Vec<borrowck_dot::Variant> {
- let print_loans = config::FLOWGRAPH_PRINT_LOANS;
- let print_moves = config::FLOWGRAPH_PRINT_MOVES;
- let print_assigns = config::FLOWGRAPH_PRINT_ASSIGNS;
- let print_all = config::FLOWGRAPH_PRINT_ALL;
- let opt = |print_which| sess.debugging_opt(print_which);
- let mut variants = Vec::new();
- if opt(print_all) || opt(print_loans) {
- variants.push(borrowck_dot::Loans);
- }
- if opt(print_all) || opt(print_moves) {
- variants.push(borrowck_dot::Moves);
- }
- if opt(print_all) || opt(print_assigns) {
- variants.push(borrowck_dot::Assigns);
- }
- variants
-}
-
-#[deriving(Clone, Show)]
-pub enum UserIdentifiedItem {
- ItemViaNode(ast::NodeId),
- ItemViaPath(Vec<String>),
-}
-
-impl FromStr for UserIdentifiedItem {
- fn from_str(s: &str) -> Option<UserIdentifiedItem> {
- let extract_path_parts = || {
- let v : Vec<_> = s.split_str("::")
- .map(|x|x.to_string())
- .collect();
- Some(ItemViaPath(v))
- };
-
- from_str(s).map(ItemViaNode).or_else(extract_path_parts)
- }
-}
-
-enum NodesMatchingUII<'a> {
- NodesMatchingDirect(option::Item<ast::NodeId>),
- NodesMatchingSuffix(ast_map::NodesMatchingSuffix<'a, String>),
-}
-
-impl<'a> Iterator<ast::NodeId> for NodesMatchingUII<'a> {
- fn next(&mut self) -> Option<ast::NodeId> {
- match self {
- &NodesMatchingDirect(ref mut iter) => iter.next(),
- &NodesMatchingSuffix(ref mut iter) => iter.next(),
- }
- }
-}
-
-impl UserIdentifiedItem {
- fn reconstructed_input(&self) -> String {
- match *self {
- ItemViaNode(node_id) => node_id.to_string(),
- ItemViaPath(ref parts) => parts.connect("::"),
- }
- }
-
- fn all_matching_node_ids<'a>(&'a self, map: &'a ast_map::Map) -> NodesMatchingUII<'a> {
- match *self {
- ItemViaNode(node_id) =>
- NodesMatchingDirect(Some(node_id).move_iter()),
- ItemViaPath(ref parts) =>
- NodesMatchingSuffix(map.nodes_matching_suffix(parts.as_slice())),
- }
- }
-
- fn to_one_node_id(self, user_option: &str, sess: &Session, map: &ast_map::Map) -> ast::NodeId {
- let fail_because = |is_wrong_because| -> ast::NodeId {
- let message =
- format!("{:s} needs NodeId (int) or unique \
- path suffix (b::c::d); got {:s}, which {:s}",
- user_option,
- self.reconstructed_input(),
- is_wrong_because);
- sess.fatal(message.as_slice())
- };
-
- let mut saw_node = ast::DUMMY_NODE_ID;
- let mut seen = 0u;
- for node in self.all_matching_node_ids(map) {
- saw_node = node;
- seen += 1;
- if seen > 1 {
- fail_because("does not resolve uniquely");
- }
- }
- if seen == 0 {
- fail_because("does not resolve to any item");
- }
-
- assert!(seen == 1);
- return saw_node;
- }
-}
-
-impl CratePrinter for PpSourceMode {
- fn call_with_pp_support<A,B>(&self,
- sess: Session,
- krate: &ast::Crate,
- ast_map: Option<syntax::ast_map::Map>,
- id: String,
- payload: B,
- f: |&PrinterSupport, B| -> A) -> A {
- match *self {
- PpmNormal | PpmExpanded => {
- let annotation = NoAnn { sess: sess, ast_map: ast_map };
- f(&annotation, payload)
- }
-
- PpmIdentified | PpmExpandedIdentified => {
- let annotation = IdentifiedAnnotation { sess: sess, ast_map: ast_map };
- f(&annotation, payload)
- }
- PpmTyped => {
- let ast_map = ast_map.expect("--pretty=typed missing ast_map");
- let analysis = phase_3_run_analysis_passes(sess, krate, ast_map, id);
- let annotation = TypedAnnotation { analysis: analysis };
- f(&annotation, payload)
- }
- }
- }
-}
-
-fn needs_ast_map(ppm: &PpMode, opt_uii: &Option<UserIdentifiedItem>) -> bool {
- match *ppm {
- PpmSource(PpmNormal) |
- PpmSource(PpmIdentified) => opt_uii.is_some(),
-
- PpmSource(PpmExpanded) |
- PpmSource(PpmExpandedIdentified) |
- PpmSource(PpmTyped) |
- PpmFlowGraph => true
- }
-}
-
-fn needs_expansion(ppm: &PpMode) -> bool {
- match *ppm {
- PpmSource(PpmNormal) |
- PpmSource(PpmIdentified) => false,
-
- PpmSource(PpmExpanded) |
- PpmSource(PpmExpandedIdentified) |
- PpmSource(PpmTyped) |
- PpmFlowGraph => true
- }
-}
-pub fn pretty_print_input(sess: Session,
- cfg: ast::CrateConfig,
- input: &Input,
- ppm: PpMode,
- opt_uii: Option<UserIdentifiedItem>,
- ofile: Option<Path>) {
- let krate = phase_1_parse_input(&sess, cfg, input);
- let id = link::find_crate_name(Some(&sess), krate.attrs.as_slice(), input);
-
- let is_expanded = needs_expansion(&ppm);
- let (krate, ast_map) = if needs_ast_map(&ppm, &opt_uii) {
- let k = phase_2_configure_and_expand(&sess, krate, id.as_slice(), None);
- let (krate, ast_map) = match k {
- None => return,
- Some(p) => p,
- };
- (krate, Some(ast_map))
- } else {
- (krate, None)
- };
-
- let src_name = source_name(input);
- let src = Vec::from_slice(sess.codemap()
- .get_filemap(src_name.as_slice())
- .src
- .as_bytes());
- let mut rdr = MemReader::new(src);
-
- let out = match ofile {
- None => box io::stdout() as Box<Writer+'static>,
- Some(p) => {
- let r = io::File::create(&p);
- match r {
- Ok(w) => box w as Box<Writer+'static>,
- Err(e) => fail!("print-print failed to open {} due to {}",
- p.display(), e),
- }
- }
- };
-
- match (ppm, opt_uii) {
- (PpmSource(s), None) =>
- s.call_with_pp_support(
- sess, &krate, ast_map, id, out, |annotation, out| {
- debug!("pretty printing source code {}", s);
- let sess = annotation.sess();
- pprust::print_crate(sess.codemap(),
- sess.diagnostic(),
- &krate,
- src_name.to_string(),
- &mut rdr,
- out,
- annotation.pp_ann(),
- is_expanded)
- }),
-
- (PpmSource(s), Some(uii)) =>
- s.call_with_pp_support(
- sess, &krate, ast_map, id, (out,uii), |annotation, (out,uii)| {
- debug!("pretty printing source code {}", s);
- let sess = annotation.sess();
- let ast_map = annotation.ast_map()
- .expect("--pretty missing ast_map");
- let mut pp_state =
- pprust::State::new_from_input(sess.codemap(),
- sess.diagnostic(),
- src_name.to_string(),
- &mut rdr,
- out,
- annotation.pp_ann(),
- is_expanded);
- for node_id in uii.all_matching_node_ids(ast_map) {
- let node = ast_map.get(node_id);
- try!(pp_state.print_node(&node));
- try!(pp::space(&mut pp_state.s));
- try!(pp_state.synth_comment(ast_map.path_to_string(node_id)));
- try!(pp::hardbreak(&mut pp_state.s));
- }
- pp::eof(&mut pp_state.s)
- }),
-
- (PpmFlowGraph, opt_uii) => {
- debug!("pretty printing flow graph for {}", opt_uii);
- let uii = opt_uii.unwrap_or_else(|| {
- sess.fatal(format!("`pretty flowgraph=..` needs NodeId (int) or
- unique path suffix (b::c::d)").as_slice())
-
- });
- let ast_map = ast_map.expect("--pretty flowgraph missing ast_map");
- let nodeid = uii.to_one_node_id("--pretty", &sess, &ast_map);
-
- let node = ast_map.find(nodeid).unwrap_or_else(|| {
- sess.fatal(format!("--pretty flowgraph couldn't find id: {}",
- nodeid).as_slice())
- });
-
- let code = blocks::Code::from_node(node);
- match code {
- Some(code) => {
- let variants = gather_flowgraph_variants(&sess);
- let analysis = phase_3_run_analysis_passes(sess, &krate,
- ast_map, id);
- print_flowgraph(variants, analysis, code, out)
- }
- None => {
- let message = format!("--pretty=flowgraph needs \
- block, fn, or method; got {:?}",
- node);
-
- // point to what was found, if there's an
- // accessible span.
- match ast_map.opt_span(nodeid) {
- Some(sp) => sess.span_fatal(sp, message.as_slice()),
- None => sess.fatal(message.as_slice())
- }
- }
- }
- }
- }.unwrap()
-}
-
-fn print_flowgraph<W:io::Writer>(variants: Vec<borrowck_dot::Variant>,
- analysis: CrateAnalysis,
- code: blocks::Code,
- mut out: W) -> io::IoResult<()> {
- let ty_cx = &analysis.ty_cx;
- let cfg = match code {
- blocks::BlockCode(block) => cfg::CFG::new(ty_cx, &*block),
- blocks::FnLikeCode(fn_like) => cfg::CFG::new(ty_cx, &*fn_like.body()),
- };
- debug!("cfg: {:?}", cfg);
-
- match code {
- _ if variants.len() == 0 => {
- let lcfg = LabelledCFG {
- ast_map: &ty_cx.map,
- cfg: &cfg,
- name: format!("node_{}", code.id()),
- };
- let r = dot::render(&lcfg, &mut out);
- return expand_err_details(r);
- }
- blocks::BlockCode(_) => {
- ty_cx.sess.err("--pretty flowgraph with -Z flowgraph-print \
- annotations requires fn-like node id.");
- return Ok(())
- }
- blocks::FnLikeCode(fn_like) => {
- let fn_parts = FnPartsWithCFG::from_fn_like(&fn_like, &cfg);
- let (bccx, analysis_data) =
- borrowck::build_borrowck_dataflow_data_for_fn(ty_cx, fn_parts);
-
- let lcfg = LabelledCFG {
- ast_map: &ty_cx.map,
- cfg: &cfg,
- name: format!("node_{}", code.id()),
- };
- let lcfg = borrowck_dot::DataflowLabeller {
- inner: lcfg,
- variants: variants,
- borrowck_ctxt: &bccx,
- analysis_data: &analysis_data,
- };
- let r = dot::render(&lcfg, &mut out);
- return expand_err_details(r);
- }
- }
-
- fn expand_err_details(r: io::IoResult<()>) -> io::IoResult<()> {
- r.map_err(|ioerr| {
- let orig_detail = ioerr.detail.clone();
- let m = "graphviz::render failed";
- io::IoError {
- detail: Some(match orig_detail {
- None => m.into_string(),
- Some(d) => format!("{}: {}", m, d)
- }),
- ..ioerr
- }
- })
- }
-}
-
pub fn collect_crate_types(session: &Session,
attrs: &[ast::Attribute]) -> Vec<config::CrateType> {
// Unconditionally collect crate types from attributes to make them used
session.opts.cg.metadata.clone()
}
+#[deriving(Clone)]
pub struct OutputFilenames {
pub out_directory: Path,
pub out_filestem: String,
}
impl OutputFilenames {
- pub fn path(&self, flavor: link::OutputType) -> Path {
+ pub fn path(&self, flavor: write::OutputType) -> Path {
match self.single_output_file {
Some(ref path) => return path.clone(),
None => {}
self.temp_path(flavor)
}
- pub fn temp_path(&self, flavor: link::OutputType) -> Path {
+ pub fn temp_path(&self, flavor: write::OutputType) -> Path {
let base = self.out_directory.join(self.filestem());
match flavor {
- link::OutputTypeBitcode => base.with_extension("bc"),
- link::OutputTypeAssembly => base.with_extension("s"),
- link::OutputTypeLlvmAssembly => base.with_extension("ll"),
- link::OutputTypeObject => base.with_extension("o"),
- link::OutputTypeExe => base,
+ write::OutputTypeBitcode => base.with_extension("bc"),
+ write::OutputTypeAssembly => base.with_extension("s"),
+ write::OutputTypeLlvmAssembly => base.with_extension("ll"),
+ write::OutputTypeObject => base.with_extension("o"),
+ write::OutputTypeExe => base,
}
}
pub mod driver;
pub mod session;
pub mod config;
+pub mod pretty;
pub fn main_args(args: &[String]) -> int {
let ofile = matches.opt_str("o").map(|o| Path::new(o));
let pretty = matches.opt_default("pretty", "normal").map(|a| {
- parse_pretty(&sess, a.as_slice())
+ pretty::parse_pretty(&sess, a.as_slice())
});
match pretty {
Some((ppm, opt_uii)) => {
- driver::pretty_print_input(sess, cfg, &input, ppm, opt_uii, ofile);
+ pretty::pretty_print_input(sess, cfg, &input, ppm, opt_uii, ofile);
return;
}
None => {/* continue */ }
lints
}
+ fn sort_lint_groups(lints: Vec<(&'static str, Vec<lint::LintId>, bool)>)
+ -> Vec<(&'static str, Vec<lint::LintId>)> {
+ let mut lints: Vec<_> = lints.move_iter().map(|(x, y, _)| (x, y)).collect();
+ lints.sort_by(|&(x, _): &(&'static str, Vec<lint::LintId>),
+ &(y, _): &(&'static str, Vec<lint::LintId>)| {
+ x.cmp(&y)
+ });
+ lints
+ }
+
let (plugin, builtin) = lint_store.get_lints().partitioned(|&(_, p)| p);
let plugin = sort_lints(plugin);
let builtin = sort_lints(builtin);
- // FIXME (#7043): We should use the width in character cells rather than
- // the number of codepoints.
+ let (plugin_groups, builtin_groups) = lint_store.get_lint_groups().partitioned(|&(_, _, p)| p);
+ let plugin_groups = sort_lint_groups(plugin_groups);
+ let builtin_groups = sort_lint_groups(builtin_groups);
+
let max_name_len = plugin.iter().chain(builtin.iter())
- .map(|&s| s.name.char_len())
+ .map(|&s| s.name.width(true))
.max().unwrap_or(0);
let padded = |x: &str| {
" ".repeat(max_name_len - x.char_len()).append(x)
print_lints(builtin);
- match (loaded_plugins, plugin.len()) {
- (false, 0) => {
- println!("Compiler plugins can provide additional lints. To see a listing of these, \
- re-run `rustc -W help` with a crate filename.");
+
+
+ let max_name_len = plugin_groups.iter().chain(builtin_groups.iter())
+ .map(|&(s, _)| s.width(true))
+ .max().unwrap_or(0);
+ let padded = |x: &str| {
+ " ".repeat(max_name_len - x.char_len()).append(x)
+ };
+
+ println!("Lint groups provided by rustc:\n");
+ println!(" {} {}", padded("name"), "sub-lints");
+ println!(" {} {}", padded("----"), "---------");
+
+ let print_lint_groups = |lints: Vec<(&'static str, Vec<lint::LintId>)>| {
+ for (name, to) in lints.move_iter() {
+ let name = name.chars().map(|x| x.to_lowercase())
+ .collect::<String>().replace("_", "-");
+ let desc = to.move_iter().map(|x| x.as_str()).collect::<Vec<String>>().connect(", ");
+ println!(" {} {}",
+ padded(name.as_slice()), desc);
}
- (false, _) => fail!("didn't load lint plugins but got them anyway!"),
- (true, 0) => println!("This crate does not load any lint plugins."),
- (true, _) => {
- println!("Lint checks provided by plugins loaded by this crate:\n");
- print_lints(plugin);
+ println!("\n");
+ };
+
+ print_lint_groups(builtin_groups);
+
+ match (loaded_plugins, plugin.len(), plugin_groups.len()) {
+ (false, 0, _) | (false, _, 0) => {
+ println!("Compiler plugins can provide additional lints and lint groups. To see a \
+ listing of these, re-run `rustc -W help` with a crate filename.");
+ }
+ (false, _, _) => fail!("didn't load lint plugins but got them anyway!"),
+ (true, 0, 0) => println!("This crate does not load any lint plugins or lint groups."),
+ (true, l, g) => {
+ if l > 0 {
+ println!("Lint checks provided by plugins loaded by this crate:\n");
+ print_lints(plugin);
+ }
+ if g > 0 {
+ println!("Lint groups provided by plugins loaded by this crate:\n");
+ print_lint_groups(plugin_groups);
+ }
}
}
}
}
}
-#[deriving(PartialEq, Show)]
-pub enum PpSourceMode {
- PpmNormal,
- PpmExpanded,
- PpmTyped,
- PpmIdentified,
- PpmExpandedIdentified,
-}
-
-#[deriving(PartialEq, Show)]
-pub enum PpMode {
- PpmSource(PpSourceMode),
- PpmFlowGraph,
-}
-
-fn parse_pretty(sess: &Session, name: &str) -> (PpMode, Option<driver::UserIdentifiedItem>) {
- let mut split = name.splitn(1, '=');
- let first = split.next().unwrap();
- let opt_second = split.next();
- let first = match first {
- "normal" => PpmSource(PpmNormal),
- "expanded" => PpmSource(PpmExpanded),
- "typed" => PpmSource(PpmTyped),
- "expanded,identified" => PpmSource(PpmExpandedIdentified),
- "identified" => PpmSource(PpmIdentified),
- "flowgraph" => PpmFlowGraph,
- _ => {
- sess.fatal(format!(
- "argument to `pretty` must be one of `normal`, \
- `expanded`, `flowgraph=<nodeid>`, `typed`, `identified`, \
- or `expanded,identified`; got {}", name).as_slice());
- }
- };
- let opt_second = opt_second.and_then::<driver::UserIdentifiedItem>(from_str);
- (first, opt_second)
-}
-
fn parse_crate_attrs(sess: &Session, input: &Input) ->
Vec<ast::Attribute> {
let result = match *input {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! The various pretty print routines.
+
+use back::link;
+
+use driver::config;
+use driver::driver::{mod, CrateAnalysis};
+use driver::session::Session;
+
+use middle::ty;
+use middle::borrowck::{mod, FnPartsWithCFG};
+use middle::borrowck::graphviz as borrowck_dot;
+use middle::cfg;
+use middle::cfg::graphviz::LabelledCFG;
+
+use util::ppaux;
+
+use syntax::ast;
+use syntax::ast_map::{mod, blocks, NodePrinter};
+use syntax::print::{pp, pprust};
+
+use graphviz as dot;
+
+use std::io::{mod, MemReader};
+use std::from_str::FromStr;
+use std::option;
+use arena::TypedArena;
+
+#[deriving(PartialEq, Show)]
+pub enum PpSourceMode {
+ PpmNormal,
+ PpmExpanded,
+ PpmTyped,
+ PpmIdentified,
+ PpmExpandedIdentified,
+ PpmExpandedHygiene,
+}
+
+#[deriving(PartialEq, Show)]
+pub enum PpMode {
+ PpmSource(PpSourceMode),
+ PpmFlowGraph,
+}
+
+pub fn parse_pretty(sess: &Session, name: &str) -> (PpMode, Option<UserIdentifiedItem>) {
+ let mut split = name.splitn(1, '=');
+ let first = split.next().unwrap();
+ let opt_second = split.next();
+ let first = match first {
+ "normal" => PpmSource(PpmNormal),
+ "expanded" => PpmSource(PpmExpanded),
+ "typed" => PpmSource(PpmTyped),
+ "expanded,identified" => PpmSource(PpmExpandedIdentified),
+ "expanded,hygiene" => PpmSource(PpmExpandedHygiene),
+ "identified" => PpmSource(PpmIdentified),
+ "flowgraph" => PpmFlowGraph,
+ _ => {
+ sess.fatal(format!(
+ "argument to `pretty` must be one of `normal`, \
+ `expanded`, `flowgraph=<nodeid>`, `typed`, `identified`, \
+ or `expanded,identified`; got {}", name).as_slice());
+ }
+ };
+ let opt_second = opt_second.and_then::<UserIdentifiedItem>(from_str);
+ (first, opt_second)
+}
+
+
+
+// This slightly awkward construction is to allow for each PpMode to
+// choose whether it needs to do analyses (which can consume the
+// Session) and then pass through the session (now attached to the
+// analysis results) on to the chosen pretty-printer, along with the
+// `&PpAnn` object.
+//
+// Note that since the `&PrinterSupport` is freshly constructed on each
+// call, it would not make sense to try to attach the lifetime of `self`
+// to the lifetime of the `&PrinterObject`.
+//
+// (The `use_once_payload` is working around the current lack of once
+// functions in the compiler.)
+
+impl PpSourceMode {
+ /// Constructs a `PrinterSupport` object and passes it to `f`.
+ fn call_with_pp_support<A,B>(&self,
+ sess: Session,
+ krate: &ast::Crate,
+ ast_map: Option<ast_map::Map>,
+ id: String,
+ payload: B,
+ f: |&PrinterSupport, B| -> A) -> A {
+ match *self {
+ PpmNormal | PpmExpanded => {
+ let annotation = NoAnn { sess: sess, ast_map: ast_map };
+ f(&annotation, payload)
+ }
+
+ PpmIdentified | PpmExpandedIdentified => {
+ let annotation = IdentifiedAnnotation { sess: sess, ast_map: ast_map };
+ f(&annotation, payload)
+ }
+ PpmExpandedHygiene => {
+ let annotation = HygieneAnnotation { sess: sess, ast_map: ast_map };
+ f(&annotation, payload)
+ }
+ PpmTyped => {
+ let ast_map = ast_map.expect("--pretty=typed missing ast_map");
+ let type_arena = TypedArena::new();
+ let analysis = driver::phase_3_run_analysis_passes(sess, krate, ast_map,
+ &type_arena, id);
+ let annotation = TypedAnnotation { analysis: analysis };
+ f(&annotation, payload)
+ }
+ }
+ }
+}
+
+trait SessionCarrier {
+ /// Provides a uniform interface for re-extracting a reference to a
+ /// `Session` from a value that now owns it.
+ fn sess<'a>(&'a self) -> &'a Session;
+}
+
+trait AstMapCarrier {
+ /// Provides a uniform interface for re-extracting a reference to an
+ /// `ast_map::Map` from a value that now owns it.
+ fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map>;
+}
+
+trait PrinterSupport : SessionCarrier + AstMapCarrier {
+ /// Produces the pretty-print annotation object.
+ ///
+ /// Usually implemented via `self as &pprust::PpAnn`.
+ ///
+ /// (Rust does not yet support upcasting from a trait object to
+ /// an object for one of its super-traits.)
+ fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn;
+}
+
+struct NoAnn {
+ sess: Session,
+ ast_map: Option<ast_map::Map>,
+}
+
+impl PrinterSupport for NoAnn {
+ fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
+}
+
+impl SessionCarrier for NoAnn {
+ fn sess<'a>(&'a self) -> &'a Session { &self.sess }
+}
+
+impl AstMapCarrier for NoAnn {
+ fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
+ self.ast_map.as_ref()
+ }
+}
+
+impl pprust::PpAnn for NoAnn {}
+
+struct IdentifiedAnnotation {
+ sess: Session,
+ ast_map: Option<ast_map::Map>,
+}
+
+impl PrinterSupport for IdentifiedAnnotation {
+ fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
+}
+
+impl SessionCarrier for IdentifiedAnnotation {
+ fn sess<'a>(&'a self) -> &'a Session { &self.sess }
+}
+
+impl AstMapCarrier for IdentifiedAnnotation {
+ fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
+ self.ast_map.as_ref()
+ }
+}
+
+impl pprust::PpAnn for IdentifiedAnnotation {
+ fn pre(&self,
+ s: &mut pprust::State,
+ node: pprust::AnnNode) -> io::IoResult<()> {
+ match node {
+ pprust::NodeExpr(_) => s.popen(),
+ _ => Ok(())
+ }
+ }
+ fn post(&self,
+ s: &mut pprust::State,
+ node: pprust::AnnNode) -> io::IoResult<()> {
+ match node {
+ pprust::NodeIdent(_) | pprust::NodeName(_) => Ok(()),
+
+ pprust::NodeItem(item) => {
+ try!(pp::space(&mut s.s));
+ s.synth_comment(item.id.to_string())
+ }
+ pprust::NodeBlock(blk) => {
+ try!(pp::space(&mut s.s));
+ s.synth_comment(format!("block {}", blk.id))
+ }
+ pprust::NodeExpr(expr) => {
+ try!(pp::space(&mut s.s));
+ try!(s.synth_comment(expr.id.to_string()));
+ s.pclose()
+ }
+ pprust::NodePat(pat) => {
+ try!(pp::space(&mut s.s));
+ s.synth_comment(format!("pat {}", pat.id))
+ }
+ }
+ }
+}
+
+struct HygieneAnnotation {
+ sess: Session,
+ ast_map: Option<ast_map::Map>,
+}
+
+impl PrinterSupport for HygieneAnnotation {
+ fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
+}
+
+impl SessionCarrier for HygieneAnnotation {
+ fn sess<'a>(&'a self) -> &'a Session { &self.sess }
+}
+
+impl AstMapCarrier for HygieneAnnotation {
+ fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
+ self.ast_map.as_ref()
+ }
+}
+
+impl pprust::PpAnn for HygieneAnnotation {
+ fn post(&self,
+ s: &mut pprust::State,
+ node: pprust::AnnNode) -> io::IoResult<()> {
+ match node {
+ pprust::NodeIdent(&ast::Ident { name: ast::Name(nm), ctxt }) => {
+ try!(pp::space(&mut s.s));
+ // FIXME #16420: this doesn't display the connections
+ // between syntax contexts
+ s.synth_comment(format!("{}#{}", nm, ctxt))
+ }
+ pprust::NodeName(&ast::Name(nm)) => {
+ try!(pp::space(&mut s.s));
+ s.synth_comment(nm.to_string())
+ }
+ _ => Ok(())
+ }
+ }
+}
+
+
+struct TypedAnnotation<'tcx> {
+ analysis: CrateAnalysis<'tcx>,
+}
+
+impl<'tcx> PrinterSupport for TypedAnnotation<'tcx> {
+ fn pp_ann<'a>(&'a self) -> &'a pprust::PpAnn { self as &pprust::PpAnn }
+}
+
+impl<'tcx> SessionCarrier for TypedAnnotation<'tcx> {
+ fn sess<'a>(&'a self) -> &'a Session { &self.analysis.ty_cx.sess }
+}
+
+impl<'tcx> AstMapCarrier for TypedAnnotation<'tcx> {
+ fn ast_map<'a>(&'a self) -> Option<&'a ast_map::Map> {
+ Some(&self.analysis.ty_cx.map)
+ }
+}
+
+impl<'tcx> pprust::PpAnn for TypedAnnotation<'tcx> {
+ fn pre(&self,
+ s: &mut pprust::State,
+ node: pprust::AnnNode) -> io::IoResult<()> {
+ match node {
+ pprust::NodeExpr(_) => s.popen(),
+ _ => Ok(())
+ }
+ }
+ fn post(&self,
+ s: &mut pprust::State,
+ node: pprust::AnnNode) -> io::IoResult<()> {
+ let tcx = &self.analysis.ty_cx;
+ match node {
+ pprust::NodeExpr(expr) => {
+ try!(pp::space(&mut s.s));
+ try!(pp::word(&mut s.s, "as"));
+ try!(pp::space(&mut s.s));
+ try!(pp::word(&mut s.s,
+ ppaux::ty_to_string(
+ tcx,
+ ty::expr_ty(tcx, expr)).as_slice()));
+ s.pclose()
+ }
+ _ => Ok(())
+ }
+ }
+}
+
+fn gather_flowgraph_variants(sess: &Session) -> Vec<borrowck_dot::Variant> {
+ let print_loans = config::FLOWGRAPH_PRINT_LOANS;
+ let print_moves = config::FLOWGRAPH_PRINT_MOVES;
+ let print_assigns = config::FLOWGRAPH_PRINT_ASSIGNS;
+ let print_all = config::FLOWGRAPH_PRINT_ALL;
+ let opt = |print_which| sess.debugging_opt(print_which);
+ let mut variants = Vec::new();
+ if opt(print_all) || opt(print_loans) {
+ variants.push(borrowck_dot::Loans);
+ }
+ if opt(print_all) || opt(print_moves) {
+ variants.push(borrowck_dot::Moves);
+ }
+ if opt(print_all) || opt(print_assigns) {
+ variants.push(borrowck_dot::Assigns);
+ }
+ variants
+}
+
+#[deriving(Clone, Show)]
+pub enum UserIdentifiedItem {
+ ItemViaNode(ast::NodeId),
+ ItemViaPath(Vec<String>),
+}
+
+impl FromStr for UserIdentifiedItem {
+ fn from_str(s: &str) -> Option<UserIdentifiedItem> {
+ let extract_path_parts = || {
+ let v : Vec<_> = s.split_str("::")
+ .map(|x|x.to_string())
+ .collect();
+ Some(ItemViaPath(v))
+ };
+
+ from_str(s).map(ItemViaNode).or_else(extract_path_parts)
+ }
+}
+
+enum NodesMatchingUII<'a> {
+ NodesMatchingDirect(option::Item<ast::NodeId>),
+ NodesMatchingSuffix(ast_map::NodesMatchingSuffix<'a, String>),
+}
+
+impl<'a> Iterator<ast::NodeId> for NodesMatchingUII<'a> {
+ fn next(&mut self) -> Option<ast::NodeId> {
+ match self {
+ &NodesMatchingDirect(ref mut iter) => iter.next(),
+ &NodesMatchingSuffix(ref mut iter) => iter.next(),
+ }
+ }
+}
+
+impl UserIdentifiedItem {
+ fn reconstructed_input(&self) -> String {
+ match *self {
+ ItemViaNode(node_id) => node_id.to_string(),
+ ItemViaPath(ref parts) => parts.connect("::"),
+ }
+ }
+
+ fn all_matching_node_ids<'a>(&'a self, map: &'a ast_map::Map) -> NodesMatchingUII<'a> {
+ match *self {
+ ItemViaNode(node_id) =>
+ NodesMatchingDirect(Some(node_id).move_iter()),
+ ItemViaPath(ref parts) =>
+ NodesMatchingSuffix(map.nodes_matching_suffix(parts.as_slice())),
+ }
+ }
+
+ fn to_one_node_id(self, user_option: &str, sess: &Session, map: &ast_map::Map) -> ast::NodeId {
+ let fail_because = |is_wrong_because| -> ast::NodeId {
+ let message =
+ format!("{:s} needs NodeId (int) or unique \
+ path suffix (b::c::d); got {:s}, which {:s}",
+ user_option,
+ self.reconstructed_input(),
+ is_wrong_because);
+ sess.fatal(message.as_slice())
+ };
+
+ let mut saw_node = ast::DUMMY_NODE_ID;
+ let mut seen = 0u;
+ for node in self.all_matching_node_ids(map) {
+ saw_node = node;
+ seen += 1;
+ if seen > 1 {
+ fail_because("does not resolve uniquely");
+ }
+ }
+ if seen == 0 {
+ fail_because("does not resolve to any item");
+ }
+
+ assert!(seen == 1);
+ return saw_node;
+ }
+}
+
+fn needs_ast_map(ppm: &PpMode, opt_uii: &Option<UserIdentifiedItem>) -> bool {
+ match *ppm {
+ PpmSource(PpmNormal) |
+ PpmSource(PpmIdentified) => opt_uii.is_some(),
+
+ PpmSource(PpmExpanded) |
+ PpmSource(PpmExpandedIdentified) |
+ PpmSource(PpmExpandedHygiene) |
+ PpmSource(PpmTyped) |
+ PpmFlowGraph => true
+ }
+}
+
+fn needs_expansion(ppm: &PpMode) -> bool {
+ match *ppm {
+ PpmSource(PpmNormal) |
+ PpmSource(PpmIdentified) => false,
+
+ PpmSource(PpmExpanded) |
+ PpmSource(PpmExpandedIdentified) |
+ PpmSource(PpmExpandedHygiene) |
+ PpmSource(PpmTyped) |
+ PpmFlowGraph => true
+ }
+}
+
+pub fn pretty_print_input(sess: Session,
+ cfg: ast::CrateConfig,
+ input: &driver::Input,
+ ppm: PpMode,
+ opt_uii: Option<UserIdentifiedItem>,
+ ofile: Option<Path>) {
+ let krate = driver::phase_1_parse_input(&sess, cfg, input);
+ let id = link::find_crate_name(Some(&sess), krate.attrs.as_slice(), input);
+
+ let is_expanded = needs_expansion(&ppm);
+ let (krate, ast_map) = if needs_ast_map(&ppm, &opt_uii) {
+ let k = driver::phase_2_configure_and_expand(&sess, krate, id.as_slice(), None);
+ let (krate, ast_map) = match k {
+ None => return,
+ Some(p) => p,
+ };
+ (krate, Some(ast_map))
+ } else {
+ (krate, None)
+ };
+
+ let src_name = driver::source_name(input);
+ let src = Vec::from_slice(sess.codemap()
+ .get_filemap(src_name.as_slice())
+ .src
+ .as_bytes());
+ let mut rdr = MemReader::new(src);
+
+ let out = match ofile {
+ None => box io::stdout() as Box<Writer+'static>,
+ Some(p) => {
+ let r = io::File::create(&p);
+ match r {
+ Ok(w) => box w as Box<Writer+'static>,
+ Err(e) => fail!("print-print failed to open {} due to {}",
+ p.display(), e),
+ }
+ }
+ };
+
+ match (ppm, opt_uii) {
+ (PpmSource(s), None) =>
+ s.call_with_pp_support(
+ sess, &krate, ast_map, id, out, |annotation, out| {
+ debug!("pretty printing source code {}", s);
+ let sess = annotation.sess();
+ pprust::print_crate(sess.codemap(),
+ sess.diagnostic(),
+ &krate,
+ src_name.to_string(),
+ &mut rdr,
+ out,
+ annotation.pp_ann(),
+ is_expanded)
+ }),
+
+ (PpmSource(s), Some(uii)) =>
+ s.call_with_pp_support(
+ sess, &krate, ast_map, id, (out,uii), |annotation, (out,uii)| {
+ debug!("pretty printing source code {}", s);
+ let sess = annotation.sess();
+ let ast_map = annotation.ast_map()
+ .expect("--pretty missing ast_map");
+ let mut pp_state =
+ pprust::State::new_from_input(sess.codemap(),
+ sess.diagnostic(),
+ src_name.to_string(),
+ &mut rdr,
+ out,
+ annotation.pp_ann(),
+ is_expanded);
+ for node_id in uii.all_matching_node_ids(ast_map) {
+ let node = ast_map.get(node_id);
+ try!(pp_state.print_node(&node));
+ try!(pp::space(&mut pp_state.s));
+ try!(pp_state.synth_comment(ast_map.path_to_string(node_id)));
+ try!(pp::hardbreak(&mut pp_state.s));
+ }
+ pp::eof(&mut pp_state.s)
+ }),
+
+ (PpmFlowGraph, opt_uii) => {
+ debug!("pretty printing flow graph for {}", opt_uii);
+ let uii = opt_uii.unwrap_or_else(|| {
+ sess.fatal(format!("`pretty flowgraph=..` needs NodeId (int) or
+ unique path suffix (b::c::d)").as_slice())
+
+ });
+ let ast_map = ast_map.expect("--pretty flowgraph missing ast_map");
+ let nodeid = uii.to_one_node_id("--pretty", &sess, &ast_map);
+
+ let node = ast_map.find(nodeid).unwrap_or_else(|| {
+ sess.fatal(format!("--pretty flowgraph couldn't find id: {}",
+ nodeid).as_slice())
+ });
+
+ let code = blocks::Code::from_node(node);
+ match code {
+ Some(code) => {
+ let variants = gather_flowgraph_variants(&sess);
+ let type_arena = TypedArena::new();
+ let analysis = driver::phase_3_run_analysis_passes(sess, &krate,
+ ast_map, &type_arena, id);
+ print_flowgraph(variants, analysis, code, out)
+ }
+ None => {
+ let message = format!("--pretty=flowgraph needs \
+ block, fn, or method; got {:?}",
+ node);
+
+ // point to what was found, if there's an
+ // accessible span.
+ match ast_map.opt_span(nodeid) {
+ Some(sp) => sess.span_fatal(sp, message.as_slice()),
+ None => sess.fatal(message.as_slice())
+ }
+ }
+ }
+ }
+ }.unwrap()
+}
+
+fn print_flowgraph<W:io::Writer>(variants: Vec<borrowck_dot::Variant>,
+ analysis: CrateAnalysis,
+ code: blocks::Code,
+ mut out: W) -> io::IoResult<()> {
+ let ty_cx = &analysis.ty_cx;
+ let cfg = match code {
+ blocks::BlockCode(block) => cfg::CFG::new(ty_cx, &*block),
+ blocks::FnLikeCode(fn_like) => cfg::CFG::new(ty_cx, &*fn_like.body()),
+ };
+ debug!("cfg: {:?}", cfg);
+
+ match code {
+ _ if variants.len() == 0 => {
+ let lcfg = LabelledCFG {
+ ast_map: &ty_cx.map,
+ cfg: &cfg,
+ name: format!("node_{}", code.id()),
+ };
+ let r = dot::render(&lcfg, &mut out);
+ return expand_err_details(r);
+ }
+ blocks::BlockCode(_) => {
+ ty_cx.sess.err("--pretty flowgraph with -Z flowgraph-print \
+ annotations requires fn-like node id.");
+ return Ok(())
+ }
+ blocks::FnLikeCode(fn_like) => {
+ let fn_parts = FnPartsWithCFG::from_fn_like(&fn_like, &cfg);
+ let (bccx, analysis_data) =
+ borrowck::build_borrowck_dataflow_data_for_fn(ty_cx, fn_parts);
+
+ let lcfg = LabelledCFG {
+ ast_map: &ty_cx.map,
+ cfg: &cfg,
+ name: format!("node_{}", code.id()),
+ };
+ let lcfg = borrowck_dot::DataflowLabeller {
+ inner: lcfg,
+ variants: variants,
+ borrowck_ctxt: &bccx,
+ analysis_data: &analysis_data,
+ };
+ let r = dot::render(&lcfg, &mut out);
+ return expand_err_details(r);
+ }
+ }
+
+ fn expand_err_details(r: io::IoResult<()>) -> io::IoResult<()> {
+ r.map_err(|ioerr| {
+ let orig_detail = ioerr.detail.clone();
+ let m = "graphviz::render failed";
+ io::IoError {
+ detail: Some(match orig_detail {
+ None => m.into_string(),
+ Some(d) => format!("{}: {}", m, d)
+ }),
+ ..ioerr
+ }
+ })
+ }
+}
sess.span_err(mi.span, "feature has been removed");
}
Some(&(_, Accepted)) => {
- sess.span_warn(mi.span, "feature has added to rust, \
+ sess.span_warn(mi.span, "feature has been added to Rust, \
directive not necessary");
}
None => {
impl<'a> fold::Folder for StandardLibraryInjector<'a> {
fn fold_crate(&mut self, mut krate: ast::Crate) -> ast::Crate {
- // The name to use in `extern crate std = "name";`
+ // The name to use in `extern crate "name" as std;`
let actual_crate_name = match self.sess.opts.alt_std_name {
Some(ref s) => token::intern_and_get_ident(s.as_slice()),
None => token::intern_and_get_ident("std"),
path: Vec<ast::Ident>,
ext_cx: ExtCtxt<'a>,
testfns: Vec<Test>,
- reexport_mod_ident: ast::Ident,
reexport_test_harness_main: Option<InternedString>,
is_test_crate: bool,
config: ast::CrateConfig,
+
+ // top-level re-export submodule, filled out after folding is finished
+ toplevel_reexport: Option<ast::Ident>,
}
// Traverse the crate, collecting all the test functions, eliding any
struct TestHarnessGenerator<'a> {
cx: TestCtxt<'a>,
tests: Vec<ast::Ident>,
- tested_submods: Vec<ast::Ident>,
+
+ // submodule name, gensym'd identifier for re-exports
+ tested_submods: Vec<(ast::Ident, ast::Ident)>,
}
impl<'a> fold::Folder for TestHarnessGenerator<'a> {
*i = nomain(*i);
}
if !tests.is_empty() || !tested_submods.is_empty() {
- mod_folded.items.push(mk_reexport_mod(&mut self.cx, tests,
- tested_submods));
+ let (it, sym) = mk_reexport_mod(&mut self.cx, tests, tested_submods);
+ mod_folded.items.push(it);
+
if !self.cx.path.is_empty() {
- self.tested_submods.push(self.cx.path[self.cx.path.len()-1]);
+ self.tested_submods.push((self.cx.path[self.cx.path.len()-1], sym));
+ } else {
+ debug!("pushing nothing, sym: {}", sym);
+ self.cx.toplevel_reexport = Some(sym);
}
}
}
fn mk_reexport_mod(cx: &mut TestCtxt, tests: Vec<ast::Ident>,
- tested_submods: Vec<ast::Ident>) -> Gc<ast::Item> {
+ tested_submods: Vec<(ast::Ident, ast::Ident)>) -> (Gc<ast::Item>, ast::Ident) {
let mut view_items = Vec::new();
let super_ = token::str_to_ident("super");
cx.ext_cx.view_use_simple(DUMMY_SP, ast::Public,
cx.ext_cx.path(DUMMY_SP, vec![super_, r]))
}));
- view_items.extend(tested_submods.move_iter().map(|r| {
- let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, cx.reexport_mod_ident]);
+ view_items.extend(tested_submods.move_iter().map(|(r, sym)| {
+ let path = cx.ext_cx.path(DUMMY_SP, vec![super_, r, sym]);
cx.ext_cx.view_use_simple_(DUMMY_SP, ast::Public, r, path)
}));
view_items: view_items,
items: Vec::new(),
};
- box(GC) ast::Item {
- ident: cx.reexport_mod_ident.clone(),
+
+ let sym = token::gensym_ident("__test_reexports");
+ let it = box(GC) ast::Item {
+ ident: sym.clone(),
attrs: Vec::new(),
id: ast::DUMMY_NODE_ID,
node: ast::ItemMod(reexport_mod),
vis: ast::Public,
span: DUMMY_SP,
- }
+ };
+
+ (it, sym)
}
fn generate_test_harness(sess: &Session,
}),
path: Vec::new(),
testfns: Vec::new(),
- reexport_mod_ident: token::gensym_ident("__test_reexports"),
reexport_test_harness_main: reexport_test_harness_main,
is_test_crate: is_test_crate(&krate),
config: krate.config.clone(),
+ toplevel_reexport: None,
};
cx.ext_cx.bt_push(ExpnInfo {
field("should_fail", fail_expr)]);
- let mut visible_path = vec![cx.reexport_mod_ident.clone()];
+ let mut visible_path = match cx.toplevel_reexport {
+ Some(id) => vec![id],
+ None => {
+ cx.sess.bug(
+ "expected to find top-level re-export name, but found None"
+ );
+ }
+ };
visible_path.extend(path.move_iter());
let fn_expr = ecx.expr_path(ecx.path_global(span, visible_path));
#![allow(deprecated)]
#![feature(macro_rules, globs, struct_variant, managed_boxes, quote)]
#![feature(default_type_params, phase, unsafe_destructor)]
-#![feature(issue_5723_bootstrap)]
#![allow(unknown_features)] // NOTE: Remove after next snapshot
#![feature(rustc_diagnostic_macros)]
extern crate getopts;
extern crate graphviz;
extern crate libc;
-extern crate llvm = "rustc_llvm";
-extern crate rustc_back = "rustc_back";
+extern crate "rustc_llvm" as llvm;
+extern crate "rustc_back" as rustc_back;
extern crate serialize;
extern crate rbml;
extern crate time;
pub mod link;
pub mod lto;
+ pub mod write;
}
pub mod check_const;
pub mod check_loop;
pub mod check_match;
+ pub mod check_rvalues;
pub mod check_static;
pub mod const_eval;
pub mod dataflow;
pub mod common;
pub mod ppaux;
pub mod nodemap;
+ pub mod snapshot_vec;
}
pub mod lib {
use syntax::codemap::Span;
use syntax::parse::token;
use syntax::{ast, ast_util, visit};
+use syntax::visit::Visitor;
declare_lint!(WHILE_TRUE, Warn,
"suggest using `loop { }` instead of `while true { }`")
fn check_expr(&mut self, cx: &Context, e: &ast::Expr) {
match e.node {
- ast::ExprWhile(cond, _) => {
+ ast::ExprWhile(cond, _, _) => {
match cond.node {
ast::ExprLit(lit) => {
match lit.node {
declare_lint!(CTYPES, Warn,
"proper use of libc types in foreign modules")
+struct CTypesVisitor<'a, 'tcx: 'a> {
+ cx: &'a Context<'a, 'tcx>
+}
+
+impl<'a, 'tcx> CTypesVisitor<'a, 'tcx> {
+ fn check_def(&mut self, sp: Span, ty_id: ast::NodeId, path_id: ast::NodeId) {
+ match self.cx.tcx.def_map.borrow().get_copy(&path_id) {
+ def::DefPrimTy(ast::TyInt(ast::TyI)) => {
+ self.cx.span_lint(CTYPES, sp,
+ "found rust type `int` in foreign module, while \
+ libc::c_int or libc::c_long should be used");
+ }
+ def::DefPrimTy(ast::TyUint(ast::TyU)) => {
+ self.cx.span_lint(CTYPES, sp,
+ "found rust type `uint` in foreign module, while \
+ libc::c_uint or libc::c_ulong should be used");
+ }
+ def::DefTy(..) => {
+ let tty = match self.cx.tcx.ast_ty_to_ty_cache.borrow().find(&ty_id) {
+ Some(&ty::atttce_resolved(t)) => t,
+ _ => fail!("ast_ty_to_ty_cache was incomplete after typeck!")
+ };
+
+ if !ty::is_ffi_safe(self.cx.tcx, tty) {
+ self.cx.span_lint(CTYPES, sp,
+ "found type without foreign-function-safe
+ representation annotation in foreign module, consider \
+ adding a #[repr(...)] attribute to the type");
+ }
+ }
+ _ => ()
+ }
+ }
+}
+
+impl<'a, 'tcx> Visitor<()> for CTypesVisitor<'a, 'tcx> {
+ fn visit_ty(&mut self, ty: &ast::Ty, _: ()) {
+ match ty.node {
+ ast::TyPath(_, _, id) => self.check_def(ty.span, ty.id, id),
+ _ => (),
+ }
+ visit::walk_ty(self, ty, ());
+ }
+}
+
pub struct CTypes;
impl LintPass for CTypes {
fn check_item(&mut self, cx: &Context, it: &ast::Item) {
fn check_ty(cx: &Context, ty: &ast::Ty) {
- match ty.node {
- ast::TyPath(_, _, id) => {
- match cx.tcx.def_map.borrow().get_copy(&id) {
- def::DefPrimTy(ast::TyInt(ast::TyI)) => {
- cx.span_lint(CTYPES, ty.span,
- "found rust type `int` in foreign module, while \
- libc::c_int or libc::c_long should be used");
- }
- def::DefPrimTy(ast::TyUint(ast::TyU)) => {
- cx.span_lint(CTYPES, ty.span,
- "found rust type `uint` in foreign module, while \
- libc::c_uint or libc::c_ulong should be used");
- }
- def::DefTy(..) => {
- let tty = match cx.tcx.ast_ty_to_ty_cache.borrow().find(&ty.id) {
- Some(&ty::atttce_resolved(t)) => t,
- _ => fail!("ast_ty_to_ty_cache was incomplete after typeck!")
- };
-
- if !ty::is_ffi_safe(cx.tcx, tty) {
- cx.span_lint(CTYPES, ty.span,
- "found type without foreign-function-safe
- representation annotation in foreign module, consider \
- adding a #[repr(...)] attribute to the type");
- }
- }
- _ => ()
- }
- }
- ast::TyPtr(ref mt) => { check_ty(cx, &*mt.ty) }
- _ => {}
- }
+ let mut vis = CTypesVisitor { cx: cx };
+ vis.visit_ty(ty, ());
}
fn check_foreign_fn(cx: &Context, decl: &ast::FnDecl) {
}
match it.node {
- ast::ItemForeignMod(ref nmod) if nmod.abi != abi::RustIntrinsic => {
- for ni in nmod.items.iter() {
- match ni.node {
- ast::ForeignItemFn(decl, _) => check_foreign_fn(cx, &*decl),
- ast::ForeignItemStatic(t, _) => check_ty(cx, &*t)
+ ast::ItemForeignMod(ref nmod) if nmod.abi != abi::RustIntrinsic => {
+ for ni in nmod.items.iter() {
+ match ni.node {
+ ast::ForeignItemFn(decl, _) => check_foreign_fn(cx, &*decl),
+ ast::ForeignItemStatic(t, _) => check_ty(cx, &*t)
+ }
}
}
- }
- _ => {/* nothing to do */ }
+ _ => (),
}
}
}
declare_lint!(RAW_POINTER_DERIVING, Warn,
"uses of #[deriving] with raw pointers are rarely correct")
-struct RawPtrDerivingVisitor<'a> {
- cx: &'a Context<'a>
+struct RawPtrDerivingVisitor<'a, 'tcx: 'a> {
+ cx: &'a Context<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for RawPtrDerivingVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for RawPtrDerivingVisitor<'a, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty, _: ()) {
static MSG: &'static str = "use of `#[deriving]` with a raw pointer";
match ty.node {
}
}
-declare_lint!(NON_CAMEL_CASE_TYPES, Warn,
- "types, variants and traits should have camel case names")
+declare_lint!(pub NON_CAMEL_CASE_TYPES, Warn,
+ "types, variants, traits and type parameters should have camel case names")
pub struct NonCamelCaseTypes;
-impl LintPass for NonCamelCaseTypes {
- fn get_lints(&self) -> LintArray {
- lint_array!(NON_CAMEL_CASE_TYPES)
- }
-
- fn check_item(&mut self, cx: &Context, it: &ast::Item) {
+impl NonCamelCaseTypes {
+ fn check_case(&self, cx: &Context, sort: &str, ident: ast::Ident, span: Span) {
fn is_camel_case(ident: ast::Ident) -> bool {
let ident = token::get_ident(ident);
- assert!(!ident.get().is_empty());
+ if ident.get().is_empty() { return true; }
let ident = ident.get().trim_chars('_');
// start with a non-lowercase letter rather than non-uppercase
)).collect()
}
- fn check_case(cx: &Context, sort: &str, ident: ast::Ident, span: Span) {
- let s = token::get_ident(ident);
+ let s = token::get_ident(ident);
- if !is_camel_case(ident) {
- let c = to_camel_case(s.get());
- let m = if c.is_empty() {
- format!("{} `{}` should have a camel case name such as `CamelCase`", sort, s)
- } else {
- format!("{} `{}` should have a camel case name such as `{}`", sort, s, c)
- };
- cx.span_lint(NON_CAMEL_CASE_TYPES, span, m.as_slice());
- }
+ if !is_camel_case(ident) {
+ let c = to_camel_case(s.get());
+ let m = if c.is_empty() {
+ format!("{} `{}` should have a camel case name such as `CamelCase`", sort, s)
+ } else {
+ format!("{} `{}` should have a camel case name such as `{}`", sort, s, c)
+ };
+ cx.span_lint(NON_CAMEL_CASE_TYPES, span, m.as_slice());
}
+ }
+}
+
+impl LintPass for NonCamelCaseTypes {
+ fn get_lints(&self) -> LintArray {
+ lint_array!(NON_CAMEL_CASE_TYPES)
+ }
+ fn check_item(&mut self, cx: &Context, it: &ast::Item) {
let has_extern_repr = it.attrs.iter().map(|attr| {
attr::find_repr_attrs(cx.tcx.sess.diagnostic(), attr).iter()
.any(|r| r == &attr::ReprExtern)
match it.node {
ast::ItemTy(..) | ast::ItemStruct(..) => {
- check_case(cx, "type", it.ident, it.span)
+ self.check_case(cx, "type", it.ident, it.span)
}
ast::ItemTrait(..) => {
- check_case(cx, "trait", it.ident, it.span)
+ self.check_case(cx, "trait", it.ident, it.span)
}
ast::ItemEnum(ref enum_definition, _) => {
if has_extern_repr { return }
- check_case(cx, "type", it.ident, it.span);
+ self.check_case(cx, "type", it.ident, it.span);
for variant in enum_definition.variants.iter() {
- check_case(cx, "variant", variant.node.name, variant.span);
+ self.check_case(cx, "variant", variant.node.name, variant.span);
}
}
_ => ()
}
}
+
+ fn check_generics(&mut self, cx: &Context, it: &ast::Generics) {
+ for gen in it.ty_params.iter() {
+ self.check_case(cx, "type parameter", gen.ident, gen.span);
+ }
+ }
}
#[deriving(PartialEq)]
}
}
-declare_lint!(NON_SNAKE_CASE_FUNCTIONS, Warn,
- "methods and functions should have snake case names")
+declare_lint!(pub NON_SNAKE_CASE, Warn,
+ "methods, functions, lifetime parameters and modules should have snake case names")
-pub struct NonSnakeCaseFunctions;
+pub struct NonSnakeCase;
-impl NonSnakeCaseFunctions {
+impl NonSnakeCase {
fn check_snake_case(&self, cx: &Context, sort: &str, ident: ast::Ident, span: Span) {
fn is_snake_case(ident: ast::Ident) -> bool {
let ident = token::get_ident(ident);
- assert!(!ident.get().is_empty());
- let ident = ident.get().trim_chars('_');
+ if ident.get().is_empty() { return true; }
+ let ident = ident.get().trim_left_chars('\'');
+ let ident = ident.trim_chars('_');
let mut allow_underscore = true;
ident.chars().all(|c| {
let mut buf = String::new();
if s.is_empty() { continue; }
for ch in s.chars() {
- if !buf.is_empty() && ch.is_uppercase() {
+ if !buf.is_empty() && buf.as_slice() != "'" && ch.is_uppercase() {
words.push(buf);
buf = String::new();
}
let s = token::get_ident(ident);
if !is_snake_case(ident) {
- cx.span_lint(NON_SNAKE_CASE_FUNCTIONS, span,
+ cx.span_lint(NON_SNAKE_CASE, span,
format!("{} `{}` should have a snake case name such as `{}`",
sort, s, to_snake_case(s.get())).as_slice());
}
}
}
-impl LintPass for NonSnakeCaseFunctions {
+impl LintPass for NonSnakeCase {
fn get_lints(&self) -> LintArray {
- lint_array!(NON_SNAKE_CASE_FUNCTIONS)
+ lint_array!(NON_SNAKE_CASE)
}
fn check_fn(&mut self, cx: &Context,
}
}
+ fn check_item(&mut self, cx: &Context, it: &ast::Item) {
+ match it.node {
+ ast::ItemMod(_) => {
+ self.check_snake_case(cx, "module", it.ident, it.span);
+ }
+ _ => {}
+ }
+ }
+
fn check_ty_method(&mut self, cx: &Context, t: &ast::TypeMethod) {
self.check_snake_case(cx, "trait method", t.ident, t.span);
}
+
+ fn check_lifetime_decl(&mut self, cx: &Context, t: &ast::LifetimeDef) {
+ self.check_snake_case(cx, "lifetime", t.lifetime.name.ident(), t.lifetime.span);
+ }
+
+ fn check_pat(&mut self, cx: &Context, p: &ast::Pat) {
+ match &p.node {
+ &ast::PatIdent(_, ref path1, _) => {
+ match cx.tcx.def_map.borrow().find(&p.id) {
+ Some(&def::DefLocal(_, _)) | Some(&def::DefBinding(_, _)) |
+ Some(&def::DefArg(_, _)) => {
+ self.check_snake_case(cx, "variable", path1.node, p.span);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+ }
+
+ fn check_struct_def(&mut self, cx: &Context, s: &ast::StructDef,
+ _: ast::Ident, _: &ast::Generics, _: ast::NodeId) {
+ for sf in s.fields.iter() {
+ match sf.node {
+ ast::StructField_ { kind: ast::NamedField(ident, _), .. } => {
+ self.check_snake_case(cx, "structure field", ident, sf.span);
+ }
+ _ => {}
+ }
+ }
+ }
}
-declare_lint!(NON_UPPERCASE_STATICS, Allow,
+declare_lint!(pub NON_UPPERCASE_STATICS, Allow,
"static constants should have uppercase identifiers")
pub struct NonUppercaseStatics;
_ => {}
}
}
-}
-
-declare_lint!(NON_UPPERCASE_PATTERN_STATICS, Warn,
- "static constants in match patterns should be all caps")
-
-pub struct NonUppercasePatternStatics;
-
-impl LintPass for NonUppercasePatternStatics {
- fn get_lints(&self) -> LintArray {
- lint_array!(NON_UPPERCASE_PATTERN_STATICS)
- }
fn check_pat(&mut self, cx: &Context, p: &ast::Pat) {
// Lint for constants that look like binding identifiers (#7526)
(&ast::PatIdent(_, ref path1, _), Some(&def::DefStatic(_, false))) => {
let s = token::get_ident(path1.node);
if s.get().chars().any(|c| c.is_lowercase()) {
- cx.span_lint(NON_UPPERCASE_PATTERN_STATICS, path1.span,
+ cx.span_lint(NON_UPPERCASE_STATICS, path1.span,
format!("static constant in pattern `{}` should have an uppercase \
name such as `{}`",
s.get(), s.get().chars().map(|c| c.to_uppercase())
}
}
-declare_lint!(UPPERCASE_VARIABLES, Warn,
- "variable and structure field names should start with a lowercase character")
-
-pub struct UppercaseVariables;
-
-impl LintPass for UppercaseVariables {
- fn get_lints(&self) -> LintArray {
- lint_array!(UPPERCASE_VARIABLES)
- }
-
- fn check_pat(&mut self, cx: &Context, p: &ast::Pat) {
- match &p.node {
- &ast::PatIdent(_, ref path1, _) => {
- match cx.tcx.def_map.borrow().find(&p.id) {
- Some(&def::DefLocal(_, _)) | Some(&def::DefBinding(_, _)) |
- Some(&def::DefArg(_, _)) => {
- let s = token::get_ident(path1.node);
- if s.get().len() > 0 && s.get().char_at(0).is_uppercase() {
- cx.span_lint(UPPERCASE_VARIABLES, path1.span,
- "variable names should start with \
- a lowercase character");
- }
- }
- _ => {}
- }
- }
- _ => {}
- }
- }
-
- fn check_struct_def(&mut self, cx: &Context, s: &ast::StructDef,
- _: ast::Ident, _: &ast::Generics, _: ast::NodeId) {
- for sf in s.fields.iter() {
- match sf.node {
- ast::StructField_ { kind: ast::NamedField(ident, _), .. } => {
- let s = token::get_ident(ident);
- if s.get().char_at(0).is_uppercase() {
- cx.span_lint(UPPERCASE_VARIABLES, sf.span,
- "structure field names should start with \
- a lowercase character");
- }
- }
- _ => {}
- }
- }
- }
-}
-
declare_lint!(UNNECESSARY_PARENS, Warn,
"`if`, `match`, `while` and `return` do not need parentheses")
fn check_expr(&mut self, cx: &Context, e: &ast::Expr) {
let (value, msg, struct_lit_needs_parens) = match e.node {
ast::ExprIf(cond, _, _) => (cond, "`if` condition", true),
- ast::ExprWhile(cond, _) => (cond, "`while` condition", true),
+ ast::ExprWhile(cond, _, _) => (cond, "`while` condition", true),
ast::ExprMatch(head, _) => (head, "`match` head expression", true),
ast::ExprRet(Some(value)) => (value, "`return` value", false),
ast::ExprAssign(_, value) => (value, "assigned value", false),
}
}
-declare_lint!(UNUSED_MUT, Warn,
+declare_lint!(pub UNUSED_MUT, Warn,
"detect mut variables which don't need to be mutable")
pub struct UnusedMut;
/// Current levels of each lint, and where they were set.
levels: HashMap<LintId, LevelSource>,
+
+ /// Map of registered lint groups to what lints they expand to. The bool
+ /// is true if the lint group was added by a plugin.
+ lint_groups: HashMap<&'static str, (Vec<LintId>, bool)>,
}
impl LintStore {
passes: Some(vec!()),
by_name: HashMap::new(),
levels: HashMap::new(),
+ lint_groups: HashMap::new(),
}
}
self.lints.as_slice()
}
+ pub fn get_lint_groups<'t>(&'t self) -> Vec<(&'static str, Vec<LintId>, bool)> {
+ self.lint_groups.iter().map(|(k, v)| (*k,
+ v.ref0().clone(),
+ *v.ref1())).collect()
+ }
+
pub fn register_pass(&mut self, sess: Option<&Session>,
from_plugin: bool, pass: LintPassObject) {
for &lint in pass.get_lints().iter() {
self.passes.get_mut_ref().push(pass);
}
+ pub fn register_group(&mut self, sess: Option<&Session>,
+ from_plugin: bool, name: &'static str,
+ to: Vec<LintId>) {
+ let new = self.lint_groups.insert(name, (to, from_plugin));
+
+ if !new {
+ let msg = format!("duplicate specification of lint group {}", name);
+ match (sess, from_plugin) {
+ // We load builtin lints first, so a duplicate is a compiler bug.
+ // Use early_error when handling -W help with no crate.
+ (None, _) => early_error(msg.as_slice()),
+ (Some(sess), false) => sess.bug(msg.as_slice()),
+
+ // A duplicate name from a plugin is a user error.
+ (Some(sess), true) => sess.err(msg.as_slice()),
+ }
+ }
+ }
+
pub fn register_builtin(&mut self, sess: Option<&Session>) {
macro_rules! add_builtin ( ( $sess:ident, $($name:ident),*, ) => (
{$(
)*}
))
+ macro_rules! add_lint_group ( ( $sess:ident, $name:expr, $($lint:ident),* ) => (
+ self.register_group($sess, false, $name, vec![$(LintId::of(builtin::$lint)),*]);
+ ))
+
add_builtin!(sess,
HardwiredLints,
WhileTrue,
PathStatement,
UnusedResult,
NonCamelCaseTypes,
- NonSnakeCaseFunctions,
+ NonSnakeCase,
NonUppercaseStatics,
- NonUppercasePatternStatics,
- UppercaseVariables,
UnnecessaryParens,
UnusedUnsafe,
UnsafeBlock,
MissingDoc,
)
+ add_lint_group!(sess, "bad_style",
+ NON_CAMEL_CASE_TYPES, NON_SNAKE_CASE, NON_UPPERCASE_STATICS)
+
+ add_lint_group!(sess, "unused",
+ UNUSED_IMPORTS, UNUSED_VARIABLE, DEAD_ASSIGNMENT, DEAD_CODE,
+ UNUSED_MUT, UNREACHABLE_CODE)
+
// We have one lint pass defined in this module.
self.register_pass(sess, false, box GatherNodeLevels as LintPassObject);
}
for &(ref lint_name, level) in sess.opts.lint_opts.iter() {
match self.by_name.find_equiv(&lint_name.as_slice()) {
Some(&lint_id) => self.set_level(lint_id, (level, CommandLine)),
- None => sess.err(format!("unknown {} flag: {}",
- level.as_str(), lint_name).as_slice()),
+ None => {
+ match self.lint_groups.iter().map(|(&x, pair)| (x, pair.ref0().clone()))
+ .collect::<HashMap<&'static str, Vec<LintId>>>()
+ .find_equiv(&lint_name.as_slice()) {
+ Some(v) => {
+ v.iter()
+ .map(|lint_id: &LintId|
+ self.set_level(*lint_id, (level, CommandLine)))
+ .collect::<Vec<()>>();
+ }
+ None => sess.err(format!("unknown {} flag: {}",
+ level.as_str(), lint_name).as_slice()),
+ }
+ }
}
}
}
}
/// Context for lint checking.
-pub struct Context<'a> {
+pub struct Context<'a, 'tcx: 'a> {
/// Type context we're checking in.
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
/// The crate being checked.
pub krate: &'a ast::Crate,
macro_rules! run_lints ( ($cx:expr, $f:ident, $($args:expr),*) => ({
// Move the vector of passes out of `$cx` so that we can
// iterate over it mutably while passing `$cx` to the methods.
- let mut passes = $cx.lints.passes.take_unwrap();
+ let mut passes = $cx.lints.passes.take().unwrap();
for obj in passes.mut_iter() {
obj.$f($cx, $($args),*);
}
}
}
-impl<'a> Context<'a> {
- fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> Context<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
krate: &'a ast::Crate,
- exported_items: &'a ExportedItems) -> Context<'a> {
+ exported_items: &'a ExportedItems) -> Context<'a, 'tcx> {
// We want to own the lint store, so move it out of the session.
let lint_store = mem::replace(&mut *tcx.sess.lint_store.borrow_mut(),
LintStore::new());
krate: krate,
exported_items: exported_items,
lints: lint_store,
- level_stack: vec!(),
+ level_stack: vec![],
node_levels: RefCell::new(HashMap::new()),
}
}
let mut pushed = 0u;
for result in gather_attrs(attrs).move_iter() {
- let (lint_id, level, span) = match result {
+ let v = match result {
Err(span) => {
self.tcx.sess.span_err(span, "malformed lint attribute");
continue;
}
Ok((lint_name, level, span)) => {
match self.lints.by_name.find_equiv(&lint_name.get()) {
- Some(&lint_id) => (lint_id, level, span),
+ Some(&lint_id) => vec![(lint_id, level, span)],
None => {
- self.span_lint(builtin::UNRECOGNIZED_LINT, span,
- format!("unknown `{}` attribute: `{}`",
- level.as_str(), lint_name).as_slice());
- continue;
+ match self.lints.lint_groups.find_equiv(&lint_name.get()) {
+ Some(&(ref v, _)) => v.iter()
+ .map(|lint_id: &LintId|
+ (*lint_id, level, span))
+ .collect(),
+ None => {
+ self.span_lint(builtin::UNRECOGNIZED_LINT, span,
+ format!("unknown `{}` attribute: `{}`",
+ level.as_str(), lint_name).as_slice());
+ continue;
+ }
+ }
}
}
}
};
- let now = self.lints.get_level_source(lint_id).val0();
- if now == Forbid && level != Forbid {
- let lint_name = lint_id.as_str();
- self.tcx.sess.span_err(span,
- format!("{}({}) overruled by outer forbid({})",
- level.as_str(), lint_name, lint_name).as_slice());
- } else if now != level {
- let src = self.lints.get_level_source(lint_id).val1();
- self.level_stack.push((lint_id, (now, src)));
- pushed += 1;
- self.lints.set_level(lint_id, (level, Node(span)));
+ for (lint_id, level, span) in v.move_iter() {
+ let now = self.lints.get_level_source(lint_id).val0();
+ if now == Forbid && level != Forbid {
+ let lint_name = lint_id.as_str();
+ self.tcx.sess.span_err(span,
+ format!("{}({}) overruled by outer forbid({})",
+ level.as_str(), lint_name,
+ lint_name).as_slice());
+ } else if now != level {
+ let src = self.lints.get_level_source(lint_id).val1();
+ self.level_stack.push((lint_id, (now, src)));
+ pushed += 1;
+ self.lints.set_level(lint_id, (level, Node(span)));
+ }
}
}
}
}
-impl<'a> AstConv for Context<'a>{
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> AstConv<'tcx> for Context<'a, 'tcx>{
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
ty::lookup_item_type(self.tcx, id)
}
}
-impl<'a> Visitor<()> for Context<'a> {
+impl<'a, 'tcx> Visitor<()> for Context<'a, 'tcx> {
fn visit_item(&mut self, it: &ast::Item, _: ()) {
self.with_lint_attrs(it.attrs.as_slice(), |cx| {
run_lints!(cx, check_item, it);
}
// Output any lints that were previously added to the session.
-impl<'a> IdVisitingOperation for Context<'a> {
+impl<'a, 'tcx> IdVisitingOperation for Context<'a, 'tcx> {
fn visit_id(&self, id: ast::NodeId) {
match self.tcx.sess.lints.borrow_mut().pop(&id) {
None => {}
rbml_w: &mut Encoder,
ii: InlinedItemRef|: 'a;
-pub struct EncodeParams<'a> {
+pub struct EncodeParams<'a, 'tcx: 'a> {
pub diag: &'a SpanHandler,
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
pub reexports2: &'a middle::resolve::ExportMap2,
pub item_symbols: &'a RefCell<NodeMap<String>>,
pub non_inlineable_statics: &'a RefCell<NodeSet>,
pub reachable: &'a NodeSet,
}
-pub struct EncodeContext<'a> {
+pub struct EncodeContext<'a, 'tcx: 'a> {
pub diag: &'a SpanHandler,
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
pub reexports2: &'a middle::resolve::ExportMap2,
pub item_symbols: &'a RefCell<NodeMap<String>>,
pub non_inlineable_statics: &'a RefCell<NodeSet>,
IITraitItemRef(local_def(parent_id),
RequiredInlinedTraitItemRef(
&*ast_method)));
- } else {
+ }
+ if !any_types {
encode_symbol(ecx, rbml_w, m.def_id.node);
}
encode_method_argument_names(rbml_w, &*ast_method.pe_fn_decl());
encode_attributes(rbml_w, item.attrs.as_slice());
if tps_len > 0u || should_inline(item.attrs.as_slice()) {
encode_inlined_item(ecx, rbml_w, IIItemRef(item));
- } else {
+ }
+ if tps_len == 0 {
encode_symbol(ecx, rbml_w, item.id);
}
encode_visibility(rbml_w, vis);
encode_name(rbml_w, nitem.ident.name);
if abi == abi::RustIntrinsic {
encode_inlined_item(ecx, rbml_w, IIForeignRef(nitem));
- } else {
- encode_symbol(ecx, rbml_w, nitem.id);
}
+ encode_symbol(ecx, rbml_w, nitem.id);
}
ForeignItemStatic(_, mutbl) => {
if mutbl {
-struct ImplVisitor<'a,'b:'a,'c:'a> {
- ecx: &'a EncodeContext<'b>,
+struct ImplVisitor<'a, 'b:'a, 'c:'a, 'tcx:'b> {
+ ecx: &'a EncodeContext<'b, 'tcx>,
rbml_w: &'a mut Encoder<'c>,
}
-impl<'a,'b,'c> Visitor<()> for ImplVisitor<'a,'b,'c> {
+impl<'a, 'b, 'c, 'tcx> Visitor<()> for ImplVisitor<'a, 'b, 'c, 'tcx> {
fn visit_item(&mut self, item: &Item, _: ()) {
match item.node {
ItemImpl(_, Some(ref trait_ref), _, _) => {
pub struct FileSearch<'a> {
pub sysroot: &'a Path,
- pub addl_lib_search_paths: &'a RefCell<HashSet<Path>>,
+ pub addl_lib_search_paths: &'a RefCell<Vec<Path>>,
pub triple: &'a str,
}
pub fn new(sysroot: &'a Path,
triple: &'a str,
- addl_lib_search_paths: &'a RefCell<HashSet<Path>>) -> FileSearch<'a> {
+ addl_lib_search_paths: &'a RefCell<Vec<Path>>) -> FileSearch<'a> {
debug!("using sysroot = {}, triple = {}", sysroot.display(), triple);
FileSearch {
sysroot: sysroot,
pub type conv_did<'a> =
|source: DefIdSource, ast::DefId|: 'a -> ast::DefId;
-pub struct PState<'a> {
+pub struct PState<'a, 'tcx: 'a> {
data: &'a [u8],
krate: ast::CrateNum,
pos: uint,
- tcx: &'a ty::ctxt
+ tcx: &'a ty::ctxt<'tcx>
}
fn peek(st: &PState) -> char {
})
}
-pub fn parse_state_from_data<'a>(data: &'a [u8], crate_num: ast::CrateNum,
- pos: uint, tcx: &'a ty::ctxt) -> PState<'a> {
+pub fn parse_state_from_data<'a, 'tcx>(data: &'a [u8], crate_num: ast::CrateNum,
+ pos: uint, tcx: &'a ty::ctxt<'tcx>)
+ -> PState<'a, 'tcx> {
PState {
data: data,
krate: crate_num,
macro_rules! mywrite( ($($arg:tt)*) => ({ write!($($arg)*); }) )
-pub struct ctxt<'a> {
+pub struct ctxt<'a, 'tcx: 'a> {
pub diag: &'a SpanHandler,
// Def -> str Callback:
pub ds: fn(DefId) -> String,
// The type context.
- pub tcx: &'a ty::ctxt,
+ pub tcx: &'a ty::ctxt<'tcx>,
pub abbrevs: &'a abbrev_map
}
#[cfg(test)] use syntax::print::pprust;
#[cfg(test)] use std::gc::Gc;
-struct DecodeContext<'a> {
+struct DecodeContext<'a, 'tcx: 'a> {
cdata: &'a cstore::crate_metadata,
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
}
-struct ExtendedDecodeContext<'a> {
- dcx: &'a DecodeContext<'a>,
+struct ExtendedDecodeContext<'a, 'tcx: 'a> {
+ dcx: &'a DecodeContext<'a, 'tcx>,
from_id_range: ast_util::IdRange,
to_id_range: ast_util::IdRange
}
ast_util::IdRange { min: to_id_min, max: to_id_max }
}
-impl<'a> ExtendedDecodeContext<'a> {
+impl<'a, 'tcx> ExtendedDecodeContext<'a, 'tcx> {
pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId {
/*!
* Translates an internal id, meaning a node id that is known
Decodable::decode(&mut d).unwrap()
}
-struct AstRenumberer<'a> {
- xcx: &'a ExtendedDecodeContext<'a>,
+struct AstRenumberer<'a, 'tcx: 'a> {
+ xcx: &'a ExtendedDecodeContext<'a, 'tcx>,
}
-impl<'a> ast_map::FoldOps for AstRenumberer<'a> {
+impl<'a, 'tcx> ast_map::FoldOps for AstRenumberer<'a, 'tcx> {
fn new_id(&self, id: ast::NodeId) -> ast::NodeId {
if id == ast::DUMMY_NODE_ID {
// Used by ast_map to map the NodeInlinedParent.
// ______________________________________________________________________
// Encoding and decoding the side tables
-trait get_ty_str_ctxt {
- fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a>;
+trait get_ty_str_ctxt<'tcx> {
+ fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a, 'tcx>;
}
-impl<'a> get_ty_str_ctxt for e::EncodeContext<'a> {
- fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a> {
+impl<'a, 'tcx> get_ty_str_ctxt<'tcx> for e::EncodeContext<'a, 'tcx> {
+ fn ty_str_ctxt<'a>(&'a self) -> tyencode::ctxt<'a, 'tcx> {
tyencode::ctxt {
diag: self.tcx.sess.diagnostic(),
ds: e::def_to_string,
this.emit_enum_variant_arg(0, |this| Ok(this.emit_unsize_kind(ecx, uk)))
})
}
- &ty::AutoUnsafe(m) => {
- this.emit_enum_variant("AutoUnsafe", 3, 1, |this| {
- this.emit_enum_variant_arg(0, |this| m.encode(this))
+ &ty::AutoUnsafe(m, None) => {
+ this.emit_enum_variant("AutoUnsafe", 3, 2, |this| {
+ this.emit_enum_variant_arg(0, |this| m.encode(this));
+ this.emit_enum_variant_arg(1,
+ |this| this.emit_option(|this| this.emit_option_none()))
+ })
+ }
+ &ty::AutoUnsafe(m, Some(box ref a)) => {
+ this.emit_enum_variant("AutoUnsafe", 3, 2, |this| {
+ this.emit_enum_variant_arg(0, |this| m.encode(this));
+ this.emit_enum_variant_arg(1, |this| this.emit_option(
+ |this| this.emit_option_some(|this| Ok(this.emit_autoref(ecx, a)))))
})
}
}
3 => {
let m: ast::Mutability =
this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap();
+ let a: Option<Box<ty::AutoRef>> =
+ this.read_enum_variant_arg(1, |this| this.read_option(|this, b| {
+ if b {
+ Ok(Some(box this.read_autoref(xcx)))
+ } else {
+ Ok(None)
+ }
+ })).unwrap();
- ty::AutoUnsafe(m)
+ ty::AutoUnsafe(m, a)
}
_ => fail!("bad enum variant for ty::AutoRef")
})
}
}
-struct CheckLoanCtxt<'a> {
- bccx: &'a BorrowckCtxt<'a>,
- dfcx_loans: &'a LoanDataFlow<'a>,
- move_data: move_data::FlowedMoveData<'a>,
+struct CheckLoanCtxt<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
+ dfcx_loans: &'a LoanDataFlow<'a, 'tcx>,
+ move_data: move_data::FlowedMoveData<'a, 'tcx>,
all_loans: &'a [Loan],
}
-impl<'a> euv::Delegate for CheckLoanCtxt<'a> {
+impl<'a, 'tcx> euv::Delegate for CheckLoanCtxt<'a, 'tcx> {
fn consume(&mut self,
consume_id: ast::NodeId,
consume_span: Span,
fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) { }
}
-pub fn check_loans(bccx: &BorrowckCtxt,
- dfcx_loans: &LoanDataFlow,
- move_data: move_data::FlowedMoveData,
- all_loans: &[Loan],
- decl: &ast::FnDecl,
- body: &ast::Block) {
+pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>,
+ dfcx_loans: &LoanDataFlow<'b, 'tcx>,
+ move_data: move_data::FlowedMoveData<'c, 'tcx>,
+ all_loans: &[Loan],
+ decl: &ast::FnDecl,
+ body: &ast::Block) {
debug!("check_loans(body id={:?})", body.id);
let mut clcx = CheckLoanCtxt {
borrow_kind1 == ty::ImmBorrow && borrow_kind2 == ty::ImmBorrow
}
-impl<'a> CheckLoanCtxt<'a> {
- pub fn tcx(&self) -> &'a ty::ctxt { self.bccx.tcx }
+impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
pub fn each_issued_loan(&self, scope_id: ast::NodeId, op: |&Loan| -> bool)
-> bool {
///////////////////////////////////////////////////////////////////////////
// Private
-struct GuaranteeLifetimeContext<'a> {
- bccx: &'a BorrowckCtxt<'a>,
+struct GuaranteeLifetimeContext<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
// the node id of the function body for the enclosing item
item_scope_id: ast::NodeId,
cmt_original: mc::cmt
}
-impl<'a> GuaranteeLifetimeContext<'a> {
+impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> {
fn check(&self, cmt: &mc::cmt, discr_scope: Option<ast::NodeId>) -> R {
//! Main routine. Walks down `cmt` until we find the "guarantor".
(all_loans, move_data)
}
-struct GatherLoanCtxt<'a> {
- bccx: &'a BorrowckCtxt<'a>,
+struct GatherLoanCtxt<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
move_data: move_data::MoveData,
move_error_collector: move_error::MoveErrorCollector,
all_loans: Vec<Loan>,
item_ub: ast::NodeId,
}
-impl<'a> euv::Delegate for GatherLoanCtxt<'a> {
+impl<'a, 'tcx> euv::Delegate for GatherLoanCtxt<'a, 'tcx> {
fn consume(&mut self,
consume_id: ast::NodeId,
_consume_span: Span,
}
}
-impl<'a> GatherLoanCtxt<'a> {
- pub fn tcx(&self) -> &'a ty::ctxt { self.bccx.tcx }
+impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx }
fn guarantee_valid(&mut self,
borrow_id: ast::NodeId,
///
/// This visitor walks static initializer's expressions and makes
/// sure the loans being taken are sound.
-struct StaticInitializerCtxt<'a> {
- bccx: &'a BorrowckCtxt<'a>
+struct StaticInitializerCtxt<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for StaticInitializerCtxt<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for StaticInitializerCtxt<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr, _: ()) {
match ex.node {
ast::ExprAddrOf(mutbl, ref base) => {
///////////////////////////////////////////////////////////////////////////
// Private
-struct RestrictionsContext<'a> {
- bccx: &'a BorrowckCtxt<'a>,
+struct RestrictionsContext<'a, 'tcx: 'a> {
+ bccx: &'a BorrowckCtxt<'a, 'tcx>,
span: Span,
loan_region: ty::Region,
cause: euv::LoanCause,
}
-impl<'a> RestrictionsContext<'a> {
+impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> {
fn restrict(&self,
cmt: mc::cmt) -> RestrictionResult {
debug!("restrict(cmt={})", cmt.repr(self.bccx.tcx));
}
}
-pub struct DataflowLabeller<'a> {
+pub struct DataflowLabeller<'a, 'tcx: 'a> {
pub inner: cfg_dot::LabelledCFG<'a>,
pub variants: Vec<Variant>,
- pub borrowck_ctxt: &'a BorrowckCtxt<'a>,
- pub analysis_data: &'a borrowck::AnalysisData<'a>,
+ pub borrowck_ctxt: &'a BorrowckCtxt<'a, 'tcx>,
+ pub analysis_data: &'a borrowck::AnalysisData<'a, 'tcx>,
}
-impl<'a> DataflowLabeller<'a> {
+impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> {
fn dataflow_for(&self, e: EntryOrExit, n: &Node<'a>) -> String {
let id = n.val1().data.id;
debug!("dataflow_for({}, id={}) {}", e, id, self.variants);
fn build_set<O:DataFlowOperator>(&self,
e: EntryOrExit,
cfgidx: CFGIndex,
- dfcx: &DataFlowContext<'a, O>,
+ dfcx: &DataFlowContext<'a, 'tcx, O>,
to_lp: |uint| -> Rc<LoanPath>) -> String {
let mut saw_some = false;
let mut set = "{".to_string();
}
}
-impl<'a> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a> {
+impl<'a, 'tcx> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> {
fn graph_id(&'a self) -> dot::Id<'a> { self.inner.graph_id() }
fn node_id(&'a self, n: &Node<'a>) -> dot::Id<'a> { self.inner.node_id(n) }
fn node_label(&'a self, n: &Node<'a>) -> dot::LabelText<'a> {
fn edge_label(&'a self, e: &Edge<'a>) -> dot::LabelText<'a> { self.inner.edge_label(e) }
}
-impl<'a> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a> {
+impl<'a, 'tcx> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> {
fn nodes(&self) -> dot::Nodes<'a, Node<'a>> { self.inner.nodes() }
fn edges(&self) -> dot::Edges<'a, Edge<'a>> { self.inner.edges() }
fn source(&self, edge: &Edge<'a>) -> Node<'a> { self.inner.source(edge) }
#[deriving(Clone)]
pub struct LoanDataFlowOperator;
-pub type LoanDataFlow<'a> = DataFlowContext<'a, LoanDataFlowOperator>;
+pub type LoanDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, LoanDataFlowOperator>;
-impl<'a> Visitor<()> for BorrowckCtxt<'a> {
+impl<'a, 'tcx> Visitor<()> for BorrowckCtxt<'a, 'tcx> {
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl,
b: &Block, s: Span, n: NodeId, _: ()) {
borrowck_fn(self, fk, fd, b, s, n);
}
/// Collection of conclusions determined via borrow checker analyses.
-pub struct AnalysisData<'a> {
+pub struct AnalysisData<'a, 'tcx: 'a> {
pub all_loans: Vec<Loan>,
- pub loans: DataFlowContext<'a, LoanDataFlowOperator>,
- pub move_data: move_data::FlowedMoveData<'a>,
+ pub loans: DataFlowContext<'a, 'tcx, LoanDataFlowOperator>,
+ pub move_data: move_data::FlowedMoveData<'a, 'tcx>,
}
fn borrowck_fn(this: &mut BorrowckCtxt,
visit::walk_fn(this, fk, decl, body, sp, ());
}
-fn build_borrowck_dataflow_data<'a>(this: &mut BorrowckCtxt<'a>,
- fk: &FnKind,
- decl: &ast::FnDecl,
- cfg: &cfg::CFG,
- body: &ast::Block,
- sp: Span,
- id: ast::NodeId) -> AnalysisData<'a> {
+fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>,
+ fk: &FnKind,
+ decl: &ast::FnDecl,
+ cfg: &cfg::CFG,
+ body: &ast::Block,
+ sp: Span,
+ id: ast::NodeId) -> AnalysisData<'a, 'tcx> {
// Check the body of fn items.
let id_range = ast_util::compute_id_range_for_fn_body(fk, decl, body, sp, id);
let (all_loans, move_data) =
/// Accessor for introspective clients inspecting `AnalysisData` and
/// the `BorrowckCtxt` itself , e.g. the flowgraph visualizer.
-pub fn build_borrowck_dataflow_data_for_fn<'a>(
- tcx: &'a ty::ctxt,
- input: FnPartsWithCFG<'a>) -> (BorrowckCtxt<'a>, AnalysisData<'a>) {
+pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>(
+ tcx: &'a ty::ctxt<'tcx>,
+ input: FnPartsWithCFG<'a>) -> (BorrowckCtxt<'a, 'tcx>, AnalysisData<'a, 'tcx>) {
let mut bccx = BorrowckCtxt {
tcx: tcx,
// ----------------------------------------------------------------------
// Type definitions
-pub struct BorrowckCtxt<'a> {
- tcx: &'a ty::ctxt,
+pub struct BorrowckCtxt<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
// Statistics:
stats: Gc<BorrowStats>,
///////////////////////////////////////////////////////////////////////////
// Misc
-impl<'a> BorrowckCtxt<'a> {
+impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> {
pub fn is_subregion_of(&self, r_sub: ty::Region, r_sup: ty::Region)
-> bool {
self.tcx.region_maps.is_subregion_of(r_sub, r_sup)
self.tcx.region_maps.is_subscope_of(r_sub, r_sup)
}
- pub fn mc(&self) -> mc::MemCategorizationContext<'a,ty::ctxt> {
+ pub fn mc(&self) -> mc::MemCategorizationContext<'a, ty::ctxt<'tcx>> {
mc::MemCategorizationContext::new(self.tcx)
}
pub assignee_ids: RefCell<HashSet<ast::NodeId>>,
}
-pub struct FlowedMoveData<'a> {
+pub struct FlowedMoveData<'a, 'tcx: 'a> {
pub move_data: MoveData,
- pub dfcx_moves: MoveDataFlow<'a>,
+ pub dfcx_moves: MoveDataFlow<'a, 'tcx>,
// We could (and maybe should, for efficiency) combine both move
// and assign data flow into one, but this way it's easier to
// distinguish the bits that correspond to moves and assignments.
- pub dfcx_assign: AssignDataFlow<'a>
+ pub dfcx_assign: AssignDataFlow<'a, 'tcx>
}
/// Index into `MoveData.paths`, used like a pointer
#[deriving(Clone)]
pub struct MoveDataFlowOperator;
-pub type MoveDataFlow<'a> = DataFlowContext<'a, MoveDataFlowOperator>;
+pub type MoveDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, MoveDataFlowOperator>;
#[deriving(Clone)]
pub struct AssignDataFlowOperator;
-pub type AssignDataFlow<'a> = DataFlowContext<'a, AssignDataFlowOperator>;
+pub type AssignDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, AssignDataFlowOperator>;
fn loan_path_is_precise(loan_path: &LoanPath) -> bool {
match *loan_path {
}
}
-impl<'a> FlowedMoveData<'a> {
+impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> {
pub fn new(move_data: MoveData,
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
cfg: &cfg::CFG,
id_range: ast_util::IdRange,
decl: &ast::FnDecl,
body: &ast::Block)
- -> FlowedMoveData<'a> {
+ -> FlowedMoveData<'a, 'tcx> {
let mut dfcx_moves =
DataFlowContext::new(tcx,
"flowed_move_data_moves",
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
use std::gc::Gc;
-struct CFGBuilder<'a> {
- tcx: &'a ty::ctxt,
+struct CFGBuilder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
exit_map: NodeMap<CFGIndex>,
graph: CFGGraph,
fn_exit: CFGIndex,
g.add_node(CFGNodeData { id: ast::DUMMY_NODE_ID })
}
-impl<'a> CFGBuilder<'a> {
+impl<'a, 'tcx> CFGBuilder<'a, 'tcx> {
fn block(&mut self, blk: &ast::Block, pred: CFGIndex) -> CFGIndex {
let mut stmts_exit = pred;
for stmt in blk.stmts.iter() {
self.add_node(expr.id, [then_exit, else_exit]) // 4, 5
}
- ast::ExprWhile(ref cond, ref body) => {
+ ast::ExprWhile(ref cond, ref body, _) => {
//
// [pred]
// |
use syntax::visit::Visitor;
use syntax::visit;
-pub struct CheckCrateVisitor<'a> {
- tcx: &'a ty::ctxt,
+pub struct CheckCrateVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
}
-impl<'a> Visitor<bool> for CheckCrateVisitor<'a> {
+impl<'a, 'tcx> Visitor<bool> for CheckCrateVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &Item, env: bool) {
check_item(self, i, env);
}
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
fn visit_expr(&mut self, e: &ast::Expr, cx:Context) {
match e.node {
- ast::ExprWhile(ref e, ref b) => {
+ ast::ExprWhile(ref e, ref b, _) => {
self.visit_expr(&**e, cx);
self.visit_block(&**b, Loop);
}
use syntax::fold::{Folder, noop_fold_pat};
use syntax::print::pprust::pat_to_string;
use syntax::parse::token;
-use syntax::visit;
-use syntax::visit::{Visitor, FnKind};
+use syntax::visit::{mod, Visitor, FnKind};
use util::ppaux::ty_to_string;
struct Matrix(Vec<Vec<Gc<Pat>>>);
}
}
-pub struct MatchCheckCtxt<'a> {
- pub tcx: &'a ty::ctxt
+pub struct MatchCheckCtxt<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>
}
#[deriving(Clone, PartialEq)]
/// Ranges of literal values (2..5).
ConstantRange(const_val, const_val),
/// Array patterns of length n.
- Slice(uint)
+ Slice(uint),
+ /// Array patterns with a subslice.
+ SliceWithSubslice(uint, uint)
}
#[deriving(Clone, PartialEq)]
LeaveOutWitness
}
-impl<'a> Visitor<()> for MatchCheckCtxt<'a> {
+impl<'a, 'tcx> Visitor<()> for MatchCheckCtxt<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr, _: ()) {
check_expr(self, ex);
}
}
}
-fn raw_pat(p: Gc<Pat>) -> Gc<Pat> {
- match p.node {
- PatIdent(_, _, Some(s)) => { raw_pat(s) }
- _ => { p }
- }
-}
-
fn check_exhaustive(cx: &MatchCheckCtxt, sp: Span, matrix: &Matrix) {
match is_useful(cx, matrix, [wild()], ConstructWitness) {
UsefulWithWitness(pats) => {
}
}
-pub struct StaticInliner<'a> {
- pub tcx: &'a ty::ctxt,
+pub struct StaticInliner<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>,
pub failed: bool
}
-impl<'a> StaticInliner<'a> {
- pub fn new<'a>(tcx: &'a ty::ctxt) -> StaticInliner<'a> {
+impl<'a, 'tcx> StaticInliner<'a, 'tcx> {
+ pub fn new<'a>(tcx: &'a ty::ctxt<'tcx>) -> StaticInliner<'a, 'tcx> {
StaticInliner {
tcx: tcx,
failed: false
}
}
-impl<'a> Folder for StaticInliner<'a> {
+impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
fn fold_pat(&mut self, pat: Gc<Pat>) -> Gc<Pat> {
match pat.node {
PatIdent(..) | PatEnum(..) => {
pats.push_all(after.as_slice());
Some(pats)
},
+ SliceWithSubslice(prefix, suffix)
+ if before.len() == prefix
+ && after.len() == suffix
+ && slice.is_some() => {
+ let mut pats = before.clone();
+ pats.push_all(after.as_slice());
+ Some(pats)
+ }
_ => None
}
}
/// Ensures that a pattern guard doesn't borrow by mutable reference or
/// assign.
-fn check_for_mutation_in_guard<'a>(cx: &'a MatchCheckCtxt<'a>, guard: &Expr) {
+fn check_for_mutation_in_guard<'a, 'tcx>(cx: &'a MatchCheckCtxt<'a, 'tcx>, guard: &Expr) {
let mut checker = MutationChecker {
cx: cx,
};
visitor.walk_expr(guard);
}
-struct MutationChecker<'a> {
- cx: &'a MatchCheckCtxt<'a>,
+struct MutationChecker<'a, 'tcx: 'a> {
+ cx: &'a MatchCheckCtxt<'a, 'tcx>,
}
-impl<'a> Delegate for MutationChecker<'a> {
+impl<'a, 'tcx> Delegate for MutationChecker<'a, 'tcx> {
fn consume(&mut self, _: NodeId, _: Span, _: cmt, _: ConsumeMode) {}
fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {}
fn borrow(&mut self,
visitor.visit_pat(pat, true);
}
-struct AtBindingPatternVisitor<'a,'b:'a> {
- cx: &'a MatchCheckCtxt<'b>,
+struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> {
+ cx: &'a MatchCheckCtxt<'b, 'tcx>,
}
-impl<'a,'b> Visitor<bool> for AtBindingPatternVisitor<'a,'b> {
+impl<'a, 'b, 'tcx> Visitor<bool> for AtBindingPatternVisitor<'a, 'b, 'tcx> {
fn visit_pat(&mut self, pat: &Pat, bindings_allowed: bool) {
if !bindings_allowed && pat_is_binding(&self.cx.tcx.def_map, pat) {
self.cx.tcx.sess.span_err(pat.span,
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Checks that all rvalues in a crate have statically known size. check_crate
+// is the public starting point.
+
+use middle::expr_use_visitor as euv;
+use middle::mem_categorization as mc;
+use middle::ty;
+use util::ppaux::ty_to_string;
+
+use syntax::ast;
+use syntax::codemap::Span;
+use syntax::visit;
+
+pub fn check_crate(tcx: &ty::ctxt,
+ krate: &ast::Crate) {
+ let mut rvcx = RvalueContext { tcx: tcx };
+ visit::walk_crate(&mut rvcx, krate, ());
+}
+
+struct RvalueContext<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>
+}
+
+impl<'a, 'tcx> visit::Visitor<()> for RvalueContext<'a, 'tcx> {
+ fn visit_fn(&mut self,
+ _: &visit::FnKind,
+ fd: &ast::FnDecl,
+ b: &ast::Block,
+ _: Span,
+ _: ast::NodeId,
+ _: ()) {
+ let mut euv = euv::ExprUseVisitor::new(self, self.tcx);
+ euv.walk_fn(fd, b);
+ }
+}
+
+impl<'a, 'tcx> euv::Delegate for RvalueContext<'a, 'tcx> {
+ fn consume(&mut self,
+ _: ast::NodeId,
+ span: Span,
+ cmt: mc::cmt,
+ _: euv::ConsumeMode) {
+ debug!("consume; cmt: {:?}; type: {}", *cmt, ty_to_string(self.tcx, cmt.ty));
+ if !ty::type_is_sized(self.tcx, cmt.ty) {
+ span_err!(self.tcx.sess, span, E0161,
+ "cannot move a value of type {0}: the size of {0} cannot be statically determined",
+ ty_to_string(self.tcx, cmt.ty));
+ }
+ }
+
+ fn consume_pat(&mut self,
+ _consume_pat: &ast::Pat,
+ _cmt: mc::cmt,
+ _mode: euv::ConsumeMode) {
+ }
+
+ fn borrow(&mut self,
+ _borrow_id: ast::NodeId,
+ _borrow_span: Span,
+ _cmt: mc::cmt,
+ _loan_region: ty::Region,
+ _bk: ty::BorrowKind,
+ _loan_cause: euv::LoanCause) {
+ }
+
+ fn decl_without_init(&mut self,
+ _id: ast::NodeId,
+ _span: Span) {
+ }
+
+ fn mutate(&mut self,
+ _assignment_id: ast::NodeId,
+ _assignment_span: Span,
+ _assignee_cmt: mc::cmt,
+ _mode: euv::MutateMode) {
+ }
+}
Some(format!("mutable static items are not allowed to have {}", suffix))
}
-struct CheckStaticVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct CheckStaticVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
}
pub fn check_crate(tcx: &ty::ctxt, krate: &ast::Crate) {
visit::walk_crate(&mut CheckStaticVisitor { tcx: tcx }, krate, false)
}
-impl<'a> CheckStaticVisitor<'a> {
+impl<'a, 'tcx> CheckStaticVisitor<'a, 'tcx> {
fn report_error(&self, span: Span, result: Option<String>) -> bool {
match result {
None => { false }
}
}
-impl<'a> Visitor<bool> for CheckStaticVisitor<'a> {
+impl<'a, 'tcx> Visitor<bool> for CheckStaticVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _is_const: bool) {
debug!("visit_item(item={})", pprust::item_to_string(i));
// copies of general constants
//
// (in theory, probably not at first: if/match on integer-const
-// conditions / descriminants)
+// conditions / discriminants)
//
// - Non-constants: everything else.
//
}
}
-struct ConstEvalVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct ConstEvalVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
ccache: constness_cache,
}
-impl<'a> ConstEvalVisitor<'a> {
+impl<'a, 'tcx> ConstEvalVisitor<'a, 'tcx> {
fn classify(&mut self, e: &Expr) -> constness {
let did = ast_util::local_def(e.id);
match self.ccache.find(&did) {
}
-impl<'a> Visitor<()> for ConstEvalVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for ConstEvalVisitor<'a, 'tcx> {
fn visit_ty(&mut self, t: &Ty, _: ()) {
match t.node {
TyFixedLengthVec(_, expr) => {
}
}
-pub fn eval_const_expr_partial<T: ty::ExprTyProvider>(tcx: &T, e: &Expr)
- -> Result<const_val, String> {
+pub fn eval_const_expr_partial(tcx: &ty::ctxt, e: &Expr) -> Result<const_val, String> {
fn fromb(b: bool) -> Result<const_val, String> { Ok(const_int(b as i64)) }
match e.node {
ExprUnary(UnNeg, ref inner) => {
// This tends to get called w/o the type actually having been
// populated in the ctxt, which was causing things to blow up
// (#5900). Fall back to doing a limited lookup to get past it.
- let ety = ty::expr_ty_opt(tcx.ty_ctxt(), e)
- .or_else(|| astconv::ast_ty_to_prim_ty(tcx.ty_ctxt(), &**target_ty))
+ let ety = ty::expr_ty_opt(tcx, e)
+ .or_else(|| astconv::ast_ty_to_prim_ty(tcx, &**target_ty))
.unwrap_or_else(|| {
- tcx.ty_ctxt().sess.span_fatal(target_ty.span,
- "target type not found for \
- const cast")
+ tcx.sess.span_fatal(target_ty.span,
+ "target type not found for const cast")
});
let base = eval_const_expr_partial(tcx, &**base);
}
}
ExprPath(_) => {
- match lookup_const(tcx.ty_ctxt(), e) {
- Some(actual_e) => eval_const_expr_partial(tcx.ty_ctxt(), &*actual_e),
+ match lookup_const(tcx, e) {
+ Some(actual_e) => eval_const_expr_partial(tcx, &*actual_e),
None => Err("non-constant path in constant expr".to_string())
}
}
pub enum EntryOrExit { Entry, Exit }
#[deriving(Clone)]
-pub struct DataFlowContext<'a, O> {
- tcx: &'a ty::ctxt,
+pub struct DataFlowContext<'a, 'tcx: 'a, O> {
+ tcx: &'a ty::ctxt<'tcx>,
/// a name for the analysis using this dataflow instance
analysis_name: &'static str,
fn initial_value(&self) -> bool;
}
-#[cfg(stage0)]
-struct PropagationContext<'a, 'b, O> {
- dfcx: &'a mut DataFlowContext<'b, O>,
- changed: bool
-}
-
-#[cfg(not(stage0))]
-struct PropagationContext<'a, 'b:'a, O:'a> {
- dfcx: &'a mut DataFlowContext<'b, O>,
+struct PropagationContext<'a, 'b: 'a, 'tcx: 'b, O: 'a> {
+ dfcx: &'a mut DataFlowContext<'b, 'tcx, O>,
changed: bool
}
})
}
-impl<'a, O:DataFlowOperator> DataFlowContext<'a, O> {
+impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
fn has_bitset_for_nodeid(&self, n: ast::NodeId) -> bool {
assert!(n != ast::DUMMY_NODE_ID);
self.nodeid_to_index.contains_key(&n)
}
}
-impl<'a, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, O> {
+impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O> {
fn pre(&self,
ps: &mut pprust::State,
node: pprust::AnnNode) -> io::IoResult<()> {
let id = match node {
+ pprust::NodeIdent(_) | pprust::NodeName(_) => 0,
pprust::NodeExpr(expr) => expr.id,
pprust::NodeBlock(blk) => blk.id,
pprust::NodeItem(_) => 0,
}
}
-impl<'a, O:DataFlowOperator> DataFlowContext<'a, O> {
- pub fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
+ pub fn new(tcx: &'a ty::ctxt<'tcx>,
analysis_name: &'static str,
decl: Option<&ast::FnDecl>,
cfg: &cfg::CFG,
oper: O,
id_range: IdRange,
- bits_per_id: uint) -> DataFlowContext<'a, O> {
+ bits_per_id: uint) -> DataFlowContext<'a, 'tcx, O> {
let words_per_id = (bits_per_id + uint::BITS - 1) / uint::BITS;
let num_nodes = cfg.graph.all_nodes().len();
}
}
-impl<'a, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, O> {
-// ^^^^^^^^^^^^^ only needed for pretty printing
+impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
+// ^^^^^^^^^^^^^ only needed for pretty printing
pub fn propagate(&mut self, cfg: &cfg::CFG, blk: &ast::Block) {
//! Performs the data flow analysis.
}
}
-impl<'a, 'b, O:DataFlowOperator> PropagationContext<'a, 'b, O> {
+impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
fn walk_cfg(&mut self,
cfg: &cfg::CFG,
in_out: &mut [uint]) {
}
}
-struct MarkSymbolVisitor<'a> {
+struct MarkSymbolVisitor<'a, 'tcx: 'a> {
worklist: Vec<ast::NodeId>,
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
struct_has_extern_repr: bool
}
-impl<'a> MarkSymbolVisitor<'a> {
- fn new(tcx: &'a ty::ctxt,
- worklist: Vec<ast::NodeId>) -> MarkSymbolVisitor<'a> {
+impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
+ worklist: Vec<ast::NodeId>) -> MarkSymbolVisitor<'a, 'tcx> {
MarkSymbolVisitor {
worklist: worklist,
tcx: tcx,
}
}
-impl<'a> Visitor<MarkSymbolVisitorContext> for MarkSymbolVisitor<'a> {
+impl<'a, 'tcx> Visitor<MarkSymbolVisitorContext> for MarkSymbolVisitor<'a, 'tcx> {
fn visit_struct_def(&mut self, def: &ast::StructDef, _: ast::Ident, _: &ast::Generics,
_: ast::NodeId, ctxt: MarkSymbolVisitorContext) {
}
}
-struct DeadVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct DeadVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
live_symbols: Box<HashSet<ast::NodeId>>,
}
-impl<'a> DeadVisitor<'a> {
+impl<'a, 'tcx> DeadVisitor<'a, 'tcx> {
fn should_warn_about_field(&mut self, node: &ast::StructField_) -> bool {
let is_named = node.ident().is_some();
let field_type = ty::node_id_to_type(self.tcx, node.id);
}
}
-impl<'a> Visitor<()> for DeadVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for DeadVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let ctor_id = get_struct_ctor_id(item);
if !self.symbol_is_live(item.id, ctor_id) && should_warn(item) {
}
}
-struct EffectCheckVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct EffectCheckVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
/// Whether we're in an unsafe context.
unsafe_context: UnsafeContext,
}
-impl<'a> EffectCheckVisitor<'a> {
+impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> {
fn require_unsafe(&mut self, span: Span, description: &str) {
match self.unsafe_context {
SafeContext => {
}
}
-impl<'a> Visitor<()> for EffectCheckVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for EffectCheckVisitor<'a, 'tcx> {
fn visit_fn(&mut self, fn_kind: &visit::FnKind, fn_decl: &ast::FnDecl,
block: &ast::Block, span: Span, _: ast::NodeId, _:()) {
// supplies types from the tree. After type checking is complete, you
// can just use the tcx as the typer.
-#[cfg(stage0)]
-pub struct ExprUseVisitor<'d,'t,TYPER> {
- typer: &'t TYPER,
- mc: mc::MemCategorizationContext<'t,TYPER>,
- delegate: &'d mut Delegate+'d,
-}
-
-#[cfg(not(stage0))]
pub struct ExprUseVisitor<'d,'t,TYPER:'t> {
typer: &'t TYPER,
mc: mc::MemCategorizationContext<'t,TYPER>,
)
)
-impl<'d,'t,TYPER:mc::Typer> ExprUseVisitor<'d,'t,TYPER> {
+impl<'d,'t,'tcx,TYPER:mc::Typer<'tcx>> ExprUseVisitor<'d,'t,TYPER> {
pub fn new(delegate: &'d mut Delegate,
typer: &'t TYPER)
-> ExprUseVisitor<'d,'t,TYPER> {
}
}
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+ fn tcx(&self) -> &'t ty::ctxt<'tcx> {
self.typer.tcx()
}
self.walk_block(&**blk);
}
- ast::ExprWhile(ref cond_expr, ref blk) => {
+ ast::ExprWhile(ref cond_expr, ref blk, _) => {
self.consume_expr(&**cond_expr);
self.walk_block(&**blk);
}
ty::BorrowKind::from_mutbl(m),
AutoRef);
}
- ty::AutoUnsizeUniq(_) | ty::AutoUnsize(_) | ty::AutoUnsafe(_) => {}
+ ty::AutoUnsizeUniq(_) | ty::AutoUnsize(_) | ty::AutoUnsafe(..) => {}
}
}
}
}
-pub fn get_capture_mode<T:Typer>(tcx: &T, closure_expr_id: ast::NodeId)
- -> CaptureMode {
+pub fn get_capture_mode<'tcx, T:Typer<'tcx>>(tcx: &T, closure_expr_id: ast::NodeId)
+ -> CaptureMode {
tcx.capture_mode(closure_expr_id)
}
result
}
-struct IntrinsicCheckingVisitor<'a> {
- tcx: &'a ctxt,
+struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ctxt<'tcx>,
}
-impl<'a> IntrinsicCheckingVisitor<'a> {
+impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> {
fn def_id_is_transmute(&self, def_id: DefId) -> bool {
let intrinsic = match ty::get(ty::lookup_item_type(self.tcx, def_id).ty).sty {
ty::ty_bare_fn(ref bfty) => bfty.abi == RustIntrinsic,
}
}
-impl<'a> Visitor<()> for IntrinsicCheckingVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for IntrinsicCheckingVisitor<'a, 'tcx> {
fn visit_expr(&mut self, expr: &ast::Expr, (): ()) {
match expr.node {
ast::ExprPath(..) => {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-
use middle::freevars::freevar_entry;
use middle::freevars;
use middle::subst;
// primitives in the stdlib are explicitly annotated to only take sendable
// types.
-pub struct Context<'a> {
- tcx: &'a ty::ctxt,
+pub struct Context<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
struct_and_enum_bounds_checked: HashSet<ty::t>,
parameter_environments: Vec<ParameterEnvironment>,
}
-impl<'a> Visitor<()> for Context<'a> {
+impl<'a, 'tcx> Visitor<()> for Context<'a, 'tcx> {
fn visit_expr(&mut self, ex: &Expr, _: ()) {
check_expr(self, ex);
}
tcx.sess.abort_if_errors();
}
-struct EmptySubstsFolder<'a> {
- tcx: &'a ty::ctxt
+struct EmptySubstsFolder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>
}
-impl<'a> ty_fold::TypeFolder for EmptySubstsFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'a, 'tcx> ty_fold::TypeFolder<'tcx> for EmptySubstsFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.tcx
}
fn fold_substs(&mut self, _: &subst::Substs) -> subst::Substs {
fn check_item(cx: &mut Context, item: &Item) {
if !attr::contains_name(item.attrs.as_slice(), "unsafe_destructor") {
match item.node {
- ItemImpl(_, Some(ref trait_ref), ref self_type, _) => {
- check_impl_of_trait(cx, item, trait_ref, &**self_type);
-
+ ItemImpl(_, ref trait_ref, ref self_type, _) => {
let parameter_environment =
ParameterEnvironment::for_item(cx.tcx, item.id);
cx.parameter_environments.push(parameter_environment);
item.span,
ty::node_id_to_type(cx.tcx, item.id));
- // Check bounds on the trait ref.
- match ty::impl_trait_ref(cx.tcx,
- ast_util::local_def(item.id)) {
- None => {}
- Some(trait_ref) => {
- check_bounds_on_structs_or_enums_in_trait_ref(
- cx,
- item.span,
- &*trait_ref);
+ match trait_ref {
+ &Some(ref trait_ref) => {
+ check_impl_of_trait(cx, item, trait_ref, &**self_type);
+
+ // Check bounds on the trait ref.
+ match ty::impl_trait_ref(cx.tcx,
+ ast_util::local_def(item.id)) {
+ None => {}
+ Some(trait_ref) => {
+ check_bounds_on_structs_or_enums_in_trait_ref(
+ cx,
+ item.span,
+ &*trait_ref);
+ }
+ }
}
+ &None => {}
}
drop(cx.parameter_environments.pop());
match aty.node {
TyPath(_, _, id) => {
match cx.tcx.item_substs.borrow().find(&id) {
- None => { }
+ None => {}
Some(ref item_substs) => {
let def_map = cx.tcx.def_map.borrow();
let did = def_map.get_copy(&id).def_id();
for def in generics.types.iter() {
let ty = *item_substs.substs.types.get(def.space,
def.index);
- check_typaram_bounds(cx, aty.span, ty, def)
+ check_typaram_bounds(cx, aty.span, ty, def);
}
}
}
.zip(polytype.generics
.types
.iter()) {
- check_typaram_bounds(cx, span, *ty, type_param_def)
+ check_typaram_bounds(cx, span, *ty, type_param_def);
}
// Check trait bounds.
NoSendItem, "no_send_bound", no_send_bound;
NoCopyItem, "no_copy_bound", no_copy_bound;
- NoSyncItem, "no_share_bound", no_share_bound;
+ NoSyncItem, "no_sync_bound", no_sync_bound;
ManagedItem, "managed_bound", managed_bound;
IteratorItem, "iterator", iterator;
}
}
-impl<'a> Visitor<()> for IrMaps<'a> {
+impl<'a, 'tcx> Visitor<()> for IrMaps<'a, 'tcx> {
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl, b: &Block, s: Span, n: NodeId, _: ()) {
visit_fn(self, fk, fd, b, s, n);
}
ImplicitRet
}
-struct IrMaps<'a> {
- tcx: &'a ty::ctxt,
+struct IrMaps<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
num_live_nodes: uint,
num_vars: uint,
lnks: Vec<LiveNodeKind>,
}
-impl<'a> IrMaps<'a> {
- fn new(tcx: &'a ty::ctxt) -> IrMaps<'a> {
+impl<'a, 'tcx> IrMaps<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>) -> IrMaps<'a, 'tcx> {
IrMaps {
tcx: tcx,
num_live_nodes: 0,
}
}
-impl<'a> Visitor<()> for Liveness<'a> {
+impl<'a, 'tcx> Visitor<()> for Liveness<'a, 'tcx> {
fn visit_fn(&mut self, fk: &FnKind, fd: &FnDecl, b: &Block, s: Span, n: NodeId, _: ()) {
check_fn(self, fk, fd, b, s, n);
}
static ACC_WRITE: uint = 2u;
static ACC_USE: uint = 4u;
-struct Liveness<'a> {
- ir: &'a mut IrMaps<'a>,
+struct Liveness<'a, 'tcx: 'a> {
+ ir: &'a mut IrMaps<'a, 'tcx>,
s: Specials,
successors: Vec<LiveNode>,
users: Vec<Users>,
cont_ln: NodeMap<LiveNode>
}
-impl<'a> Liveness<'a> {
- fn new(ir: &'a mut IrMaps<'a>, specials: Specials) -> Liveness<'a> {
+impl<'a, 'tcx> Liveness<'a, 'tcx> {
+ fn new(ir: &'a mut IrMaps<'a, 'tcx>, specials: Specials) -> Liveness<'a, 'tcx> {
let num_live_nodes = ir.num_live_nodes;
let num_vars = ir.num_vars;
Liveness {
fn pat_bindings(&mut self,
pat: &Pat,
- f: |&mut Liveness<'a>, LiveNode, Variable, Span, NodeId|) {
+ f: |&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId|) {
pat_util::pat_bindings(&self.ir.tcx.def_map, pat, |_bm, p_id, sp, _n| {
let ln = self.live_node(p_id, sp);
let var = self.variable(p_id, sp);
fn arm_pats_bindings(&mut self,
pats: &[Gc<Pat>],
- f: |&mut Liveness<'a>, LiveNode, Variable, Span, NodeId|) {
+ f: |&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId|) {
// only consider the first pattern; any later patterns must have
// the same bindings, and we also consider the first pattern to be
// the "authoritative" set of ids
fn indices2(&mut self,
ln: LiveNode,
succ_ln: LiveNode,
- op: |&mut Liveness<'a>, uint, uint|) {
+ op: |&mut Liveness<'a, 'tcx>, uint, uint|) {
let node_base_idx = self.idx(ln, Variable(0u));
let succ_base_idx = self.idx(succ_ln, Variable(0u));
for var_idx in range(0u, self.ir.num_vars) {
self.propagate_through_expr(&**cond, ln)
}
- ExprWhile(ref cond, ref blk) => {
+ ExprWhile(ref cond, ref blk, _) => {
self.propagate_through_loop(expr,
WhileLoop(cond.clone()),
&**blk,
loop_node_id: NodeId,
break_ln: LiveNode,
cont_ln: LiveNode,
- f: |&mut Liveness<'a>| -> R)
+ f: |&mut Liveness<'a, 'tcx>| -> R)
-> R {
debug!("with_loop_nodes: {} {}", loop_node_id, break_ln.get());
self.loop_scope.push(loop_node_id);
// do not check contents of nested fns
}
-impl<'a> Liveness<'a> {
+impl<'a, 'tcx> Liveness<'a, 'tcx> {
fn check_ret(&self,
id: NodeId,
sp: Span,
// like `*x`, the type of this deref node is the deref'd type (`T`),
// but in a pattern like `@x`, the `@x` pattern is again a
// dereference, but its type is the type *before* the dereference
-// (`@T`). So use `cmt.type` to find the type of the value in a consistent
+// (`@T`). So use `cmt.ty` to find the type of the value in a consistent
// fashion. For more details, see the method `cat_pattern`
#[deriving(Clone, PartialEq)]
pub struct cmt_ {
fn span(&self) -> Span { self.span }
}
-#[cfg(stage0)]
-pub struct MemCategorizationContext<'t,TYPER> {
- typer: &'t TYPER
-}
-
-#[cfg(not(stage0))]
pub struct MemCategorizationContext<'t,TYPER:'t> {
typer: &'t TYPER
}
* know that no errors have occurred, so we simply consult the tcx and we
* can be sure that only `Ok` results will occur.
*/
-pub trait Typer {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt;
+pub trait Typer<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn node_ty(&self, id: ast::NodeId) -> McResult<ty::t>;
fn node_method_ty(&self, method_call: typeck::MethodCall) -> Option<ty::t>;
fn adjustments<'a>(&'a self) -> &'a RefCell<NodeMap<ty::AutoAdjustment>>;
)
)
-impl<'t,TYPER:Typer> MemCategorizationContext<'t,TYPER> {
+impl<'t,'tcx,TYPER:Typer<'tcx>> MemCategorizationContext<'t,TYPER> {
pub fn new(typer: &'t TYPER) -> MemCategorizationContext<'t,TYPER> {
MemCategorizationContext { typer: typer }
}
- fn tcx(&self) -> &'t ty::ctxt {
+ fn tcx(&self) -> &'t ty::ctxt<'tcx> {
self.typer.tcx()
}
box (GC) Pat { id: 0, node: PatWild(PatWildSingle), span: DUMMY_SP }
}
+pub fn raw_pat(p: Gc<Pat>) -> Gc<Pat> {
+ match p.node {
+ PatIdent(_, _, Some(s)) => { raw_pat(s) }
+ _ => { p }
+ }
+}
+
pub fn def_to_path(tcx: &ty::ctxt, id: DefId) -> Path {
ty::with_path(tcx, id, |mut path| Path {
global: false,
/// The embargo visitor, used to determine the exports of the ast
////////////////////////////////////////////////////////////////////////////////
-struct EmbargoVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct EmbargoVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
exp_map2: &'a resolve::ExportMap2,
// This flag is an indicator of whether the previous item in the
prev_public: bool,
}
-impl<'a> EmbargoVisitor<'a> {
+impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> {
// There are checks inside of privacy which depend on knowing whether a
// trait should be exported or not. The two current consumers of this are:
//
}
}
-impl<'a> Visitor<()> for EmbargoVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for EmbargoVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let orig_all_pub = self.prev_public;
self.prev_public = orig_all_pub && item.vis == ast::Public;
/// The privacy visitor, where privacy checks take place (violations reported)
////////////////////////////////////////////////////////////////////////////////
-struct PrivacyVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct PrivacyVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
curitem: ast::NodeId,
in_foreign: bool,
parents: NodeMap<ast::NodeId>,
NamedField(ast::Ident),
}
-impl<'a> PrivacyVisitor<'a> {
+impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> {
// used when debugging
fn nodestr(&self, id: ast::NodeId) -> String {
self.tcx.map.node_to_string(id).to_string()
}
}
-impl<'a> Visitor<()> for PrivacyVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for PrivacyVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let orig_curitem = replace(&mut self.curitem, item.id);
visit::walk_item(self, item, ());
/// The privacy sanity check visitor, ensures unnecessary visibility isn't here
////////////////////////////////////////////////////////////////////////////////
-struct SanePrivacyVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct SanePrivacyVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
in_fn: bool,
}
-impl<'a> Visitor<()> for SanePrivacyVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for SanePrivacyVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
if self.in_fn {
self.check_all_inherited(item);
}
}
-impl<'a> SanePrivacyVisitor<'a> {
+impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> {
/// Validates all of the visibility qualifiers placed on the item given. This
/// ensures that there are no extraneous qualifiers that don't actually do
/// anything. In theory these qualifiers wouldn't parse, but that may happen
}
}
-struct VisiblePrivateTypesVisitor<'a> {
- tcx: &'a ty::ctxt,
+struct VisiblePrivateTypesVisitor<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
exported_items: &'a ExportedItems,
public_items: &'a PublicItems,
}
-struct CheckTypeForPrivatenessVisitor<'a, 'b:'a> {
- inner: &'a VisiblePrivateTypesVisitor<'b>,
+struct CheckTypeForPrivatenessVisitor<'a, 'b: 'a, 'tcx: 'b> {
+ inner: &'a VisiblePrivateTypesVisitor<'b, 'tcx>,
/// whether the type refers to private types.
contains_private: bool,
/// whether we've recurred at all (i.e. if we're pointing at the
outer_type_is_public_path: bool,
}
-impl<'a> VisiblePrivateTypesVisitor<'a> {
+impl<'a, 'tcx> VisiblePrivateTypesVisitor<'a, 'tcx> {
fn path_is_private_type(&self, path_id: ast::NodeId) -> bool {
let did = match self.tcx.def_map.borrow().find_copy(&path_id) {
// `int` etc. (None doesn't seem to occur.)
}
}
-impl<'a, 'b> Visitor<()> for CheckTypeForPrivatenessVisitor<'a, 'b> {
+impl<'a, 'b, 'tcx> Visitor<()> for CheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
fn visit_ty(&mut self, ty: &ast::Ty, _: ()) {
match ty.node {
ast::TyPath(_, _, path_id) => {
fn visit_expr(&mut self, _: &ast::Expr, _: ()) {}
}
-impl<'a> Visitor<()> for VisiblePrivateTypesVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for VisiblePrivateTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
match item.node {
// contents of a private mod can be reexported, so we need
}
// Information needed while computing reachability.
-struct ReachableContext<'a> {
+struct ReachableContext<'a, 'tcx: 'a> {
// The type context.
- tcx: &'a ty::ctxt,
+ tcx: &'a ty::ctxt<'tcx>,
// The set of items which must be exported in the linkage sense.
reachable_symbols: NodeSet,
// A worklist of item IDs. Each item ID in this worklist will be inlined
any_library: bool,
}
-impl<'a> Visitor<()> for ReachableContext<'a> {
+impl<'a, 'tcx> Visitor<()> for ReachableContext<'a, 'tcx> {
fn visit_expr(&mut self, expr: &ast::Expr, _: ()) {
}
}
-impl<'a> ReachableContext<'a> {
+impl<'a, 'tcx> ReachableContext<'a, 'tcx> {
// Creates a new reachability computation context.
- fn new(tcx: &'a ty::ctxt) -> ReachableContext<'a> {
+ fn new(tcx: &'a ty::ctxt<'tcx>) -> ReachableContext<'a, 'tcx> {
let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| {
*ty != config::CrateTypeExecutable
});
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
visitor.region_maps.mark_as_terminating_scope(body.id);
}
- ast::ExprWhile(expr, body) => {
+ ast::ExprWhile(expr, body, _) => {
visitor.region_maps.mark_as_terminating_scope(expr.id);
visitor.region_maps.mark_as_terminating_scope(body.id);
}
use syntax::ast::{Arm, BindByRef, BindByValue, BindingMode, Block, Crate};
use syntax::ast::{DeclItem, DefId, Expr, ExprAgain, ExprBreak, ExprField};
-use syntax::ast::{ExprFnBlock, ExprForLoop, ExprLoop, ExprMethodCall};
+use syntax::ast::{ExprFnBlock, ExprForLoop, ExprLoop, ExprWhile, ExprMethodCall};
use syntax::ast::{ExprPath, ExprProc, ExprStruct, ExprUnboxedFn, FnDecl};
use syntax::ast::{ForeignItem, ForeignItemFn, ForeignItemStatic, Generics};
use syntax::ast::{Ident, ImplItem, Item, ItemEnum, ItemFn, ItemForeignMod};
.contains_key(&name) {
match import_resolution.type_target {
Some(ref target) if !target.shadowable => {
- self.session.span_err(import_span,
- "import conflicts with imported \
- crate in this module");
+ let msg = format!("import `{}` conflicts with imported \
+ crate in this module",
+ token::get_name(name).get());
+ self.session.span_err(import_span, msg.as_slice());
}
Some(_) | None => {}
}
match *name_bindings.value_def.borrow() {
None => {}
Some(ref value) => {
- self.session.span_err(import_span,
- "import conflicts with value \
- in this module");
+ let msg = format!("import `{}` conflicts with value \
+ in this module",
+ token::get_name(name).get());
+ self.session.span_err(import_span, msg.as_slice());
match value.value_span {
None => {}
Some(span) => {
match *name_bindings.type_def.borrow() {
None => {}
Some(ref ty) => {
- self.session.span_err(import_span,
- "import conflicts with type in \
- this module");
+ let msg = format!("import `{}` conflicts with type in \
+ this module",
+ token::get_name(name).get());
+ self.session.span_err(import_span, msg.as_slice());
match ty.type_span {
None => {}
Some(span) => {
visit::walk_expr(self, expr, ());
}
- ExprLoop(_, Some(label)) => {
+ ExprLoop(_, Some(label)) | ExprWhile(_, _, Some(label)) => {
self.with_label_rib(|this| {
let def_like = DlDef(DefLabel(expr.id));
span.expn_info.is_some() || span == DUMMY_SP
}
-struct DxrVisitor<'l> {
+struct DxrVisitor<'l, 'tcx: 'l> {
sess: &'l Session,
- analysis: &'l CrateAnalysis,
+ analysis: &'l CrateAnalysis<'tcx>,
collected_paths: Vec<(NodeId, ast::Path, bool, recorder::Row)>,
collecting: bool,
fmt: FmtStrs<'l>,
}
-impl <'l> DxrVisitor<'l> {
+impl <'l, 'tcx> DxrVisitor<'l, 'tcx> {
fn dump_crate_info(&mut self, name: &str, krate: &ast::Crate) {
// the current crate
self.fmt.crate_str(krate.span, name);
}
}
-impl<'l> Visitor<DxrVisitorEnv> for DxrVisitor<'l> {
+impl<'l, 'tcx> Visitor<DxrVisitorEnv> for DxrVisitor<'l, 'tcx> {
fn visit_item(&mut self, item:&ast::Item, e: DxrVisitorEnv) {
if generated_code(item.span) {
return
///////////////////////////////////////////////////////////////////////////
// The actual substitution engine itself is a type folder.
-struct SubstFolder<'a> {
- tcx: &'a ty::ctxt,
+struct SubstFolder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
substs: &'a Substs,
// The location for which the substitution is performed, if available.
ty_stack_depth: uint,
}
-impl<'a> TypeFolder for SubstFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn fold_region(&mut self, r: ty::Region) -> ty::Region {
// Note: This routine only handles regions that are bound on
*
*/
-#![allow(non_camel_case_types)]
-
use back::abi;
use driver::config::FullDebugInfo;
use llvm::{ValueRef, BasicBlockRef};
-use llvm;
use middle::check_match::StaticInliner;
use middle::check_match;
use middle::const_eval;
use middle::resolve::DefMap;
use middle::trans::adt;
use middle::trans::base::*;
-use middle::trans::build::{And, BitCast, Br, CondBr, GEPi, InBoundsGEP, Load};
-use middle::trans::build::{Mul, Not, Store, Sub, Switch, add_comment};
+use middle::trans::build::{AddCase, And, BitCast, Br, CondBr, GEPi, InBoundsGEP, Load};
+use middle::trans::build::{Mul, Not, Store, Sub, add_comment};
use middle::trans::build;
use middle::trans::callee;
-use middle::trans::cleanup;
-use middle::trans::cleanup::CleanupMethods;
+use middle::trans::cleanup::{mod, CleanupMethods};
use middle::trans::common::*;
use middle::trans::consts;
use middle::trans::datum::*;
-use middle::trans::expr::Dest;
-use middle::trans::expr;
+use middle::trans::expr::{mod, Dest};
use middle::trans::tvec;
use middle::trans::type_of;
use middle::trans::debuginfo;
use std;
use std::collections::HashMap;
-use std::rc::Rc;
use std::gc::{Gc};
+use std::rc::Rc;
use syntax::ast;
use syntax::ast::Ident;
use syntax::codemap::Span;
use syntax::fold::Folder;
-#[deriving(PartialEq)]
-pub enum VecLenOpt {
- vec_len_eq,
- vec_len_ge(/* length of prefix */uint)
+struct ConstantExpr<'a, 'tcx: 'a>(&'a ty::ctxt<'tcx>, Gc<ast::Expr>);
+
+impl<'a, 'tcx> Eq for ConstantExpr<'a, 'tcx> {
+ fn assert_receiver_is_total_eq(&self) {}
+}
+
+impl<'a, 'tcx> PartialEq for ConstantExpr<'a, 'tcx> {
+ fn eq(&self, other: &ConstantExpr<'a, 'tcx>) -> bool {
+ let &ConstantExpr(tcx, expr) = self;
+ let &ConstantExpr(_, other_expr) = other;
+ match const_eval::compare_lit_exprs(tcx, &*expr, &*other_expr) {
+ Some(val1) => val1 == 0,
+ None => fail!("compare_list_exprs: type mismatch"),
+ }
+ }
}
-// An option identifying a branch (either a literal, an enum variant or a
-// range)
-enum Opt {
- lit(Gc<ast::Expr>),
- var(ty::Disr, Rc<adt::Repr>, ast::DefId),
- range(Gc<ast::Expr>, Gc<ast::Expr>),
- vec_len(/* length */ uint, VecLenOpt, /*range of matches*/(uint, uint))
+// An option identifying a branch (either a literal, an enum variant or a range)
+#[deriving(Eq, PartialEq)]
+enum Opt<'blk, 'tcx: 'blk> {
+ ConstantValue(ConstantExpr<'blk, 'tcx>),
+ ConstantRange(ConstantExpr<'blk, 'tcx>, ConstantExpr<'blk, 'tcx>),
+ Variant(ty::Disr, Rc<adt::Repr>, ast::DefId),
+ SliceLengthEqual(uint),
+ SliceLengthGreaterOrEqual(/* prefix length */ uint, /* suffix length */ uint),
}
-fn opt_eq(tcx: &ty::ctxt, a: &Opt, b: &Opt) -> bool {
- match (a, b) {
- (&lit(a_expr), &lit(b_expr)) => {
- match const_eval::compare_lit_exprs(tcx, &*a_expr, &*b_expr) {
- Some(val1) => val1 == 0,
- None => fail!("compare_list_exprs: type mismatch"),
+impl<'blk, 'tcx> Opt<'blk, 'tcx> {
+ fn trans(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> {
+ let _icx = push_ctxt("match::trans_opt");
+ let ccx = bcx.ccx();
+ match *self {
+ ConstantValue(ConstantExpr(_, lit_expr)) => {
+ let lit_ty = ty::node_id_to_type(bcx.tcx(), lit_expr.id);
+ let (llval, _, _) = consts::const_expr(ccx, &*lit_expr, true);
+ let lit_datum = immediate_rvalue(llval, lit_ty);
+ let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
+ SingleResult(Result::new(bcx, lit_datum.val))
}
- }
- (&range(ref a1, ref a2), &range(ref b1, ref b2)) => {
- let m1 = const_eval::compare_lit_exprs(tcx, &**a1, &**b1);
- let m2 = const_eval::compare_lit_exprs(tcx, &**a2, &**b2);
- match (m1, m2) {
- (Some(val1), Some(val2)) => (val1 == 0 && val2 == 0),
- _ => fail!("compare_list_exprs: type mismatch"),
+ ConstantRange(
+ ConstantExpr(_, ref l1),
+ ConstantExpr(_, ref l2)) => {
+ let (l1, _, _) = consts::const_expr(ccx, &**l1, true);
+ let (l2, _, _) = consts::const_expr(ccx, &**l2, true);
+ RangeResult(Result::new(bcx, l1), Result::new(bcx, l2))
+ }
+ Variant(disr_val, ref repr, _) => {
+ adt::trans_case(bcx, &**repr, disr_val)
+ }
+ SliceLengthEqual(length) => {
+ SingleResult(Result::new(bcx, C_uint(ccx, length)))
+ }
+ SliceLengthGreaterOrEqual(prefix, suffix) => {
+ LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix)))
}
}
- (&var(a, _, _), &var(b, _, _)) => a == b,
- (&vec_len(a1, a2, _), &vec_len(b1, b2, _)) =>
- a1 == b1 && a2 == b2,
- _ => false
}
}
-pub enum opt_result<'a> {
- single_result(Result<'a>),
- lower_bound(Result<'a>),
- range_result(Result<'a>, Result<'a>),
+#[deriving(PartialEq)]
+pub enum BranchKind {
+ NoBranch,
+ Single,
+ Switch,
+ Compare,
+ CompareSliceLength
}
-fn trans_opt<'a>(mut bcx: &'a Block<'a>, o: &Opt) -> opt_result<'a> {
- let _icx = push_ctxt("match::trans_opt");
- let ccx = bcx.ccx();
- match *o {
- lit(lit_expr) => {
- let lit_ty = ty::node_id_to_type(bcx.tcx(), lit_expr.id);
- let (llval, _, _) = consts::const_expr(ccx, &*lit_expr, true);
- let lit_datum = immediate_rvalue(llval, lit_ty);
- let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx));
- return single_result(Result::new(bcx, lit_datum.val));
- }
- var(disr_val, ref repr, _) => {
- return adt::trans_case(bcx, &**repr, disr_val);
- }
- range(ref l1, ref l2) => {
- let (l1, _, _) = consts::const_expr(ccx, &**l1, true);
- let (l2, _, _) = consts::const_expr(ccx, &**l2, true);
- return range_result(Result::new(bcx, l1), Result::new(bcx, l2));
- }
- vec_len(n, vec_len_eq, _) => {
- return single_result(Result::new(bcx, C_int(ccx, n as int)));
- }
- vec_len(n, vec_len_ge(_), _) => {
- return lower_bound(Result::new(bcx, C_int(ccx, n as int)));
- }
- }
+pub enum OptResult<'blk, 'tcx: 'blk> {
+ SingleResult(Result<'blk, 'tcx>),
+ RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>),
+ LowerBound(Result<'blk, 'tcx>)
}
#[deriving(Clone)]
type BindingsMap = HashMap<Ident, BindingInfo>;
-struct ArmData<'a, 'b> {
- bodycx: &'b Block<'b>,
+struct ArmData<'a, 'blk, 'tcx: 'blk> {
+ bodycx: Block<'blk, 'tcx>,
arm: &'a ast::Arm,
bindings_map: BindingsMap
}
* As we proceed `bound_ptrs` are filled with pointers to values to be bound,
* these pointers are stored in llmatch variables just before executing `data` arm.
*/
-#[cfg(not(stage0))]
-struct Match<'a, 'b:'a> {
- pats: Vec<Gc<ast::Pat>>,
- data: &'a ArmData<'a, 'b>,
- bound_ptrs: Vec<(Ident, ValueRef)>
-}
-
-///Dox
-#[cfg(stage0)]
-struct Match<'a, 'b> {
+struct Match<'a, 'blk: 'a, 'tcx: 'blk> {
pats: Vec<Gc<ast::Pat>>,
- data: &'a ArmData<'a, 'b>,
+ data: &'a ArmData<'a, 'blk, 'tcx>,
bound_ptrs: Vec<(Ident, ValueRef)>
}
-impl<'a, 'b> Repr for Match<'a, 'b> {
+impl<'a, 'blk, 'tcx> Repr for Match<'a, 'blk, 'tcx> {
fn repr(&self, tcx: &ty::ctxt) -> String {
if tcx.sess.verbose() {
// for many programs, this just take too long to serialize
return false;
}
-fn expand_nested_bindings<'a, 'b>(
- bcx: &'b Block<'b>,
- m: &'a [Match<'a, 'b>],
- col: uint,
- val: ValueRef)
- -> Vec<Match<'a, 'b>> {
+fn expand_nested_bindings<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ col: uint,
+ val: ValueRef)
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("expand_nested_bindings(bcx={}, m={}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
}).collect()
}
-type enter_pats<'a> = |&[Gc<ast::Pat>]|: 'a -> Option<Vec<Gc<ast::Pat>>>;
+type EnterPatterns<'a> = |&[Gc<ast::Pat>]|: 'a -> Option<Vec<Gc<ast::Pat>>>;
-fn enter_match<'a, 'b>(
- bcx: &'b Block<'b>,
- dm: &DefMap,
- m: &'a [Match<'a, 'b>],
- col: uint,
- val: ValueRef,
- e: enter_pats)
- -> Vec<Match<'a, 'b>> {
+fn enter_match<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ dm: &DefMap,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ col: uint,
+ val: ValueRef,
+ e: EnterPatterns)
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("enter_match(bcx={}, m={}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
let this = *br.pats.get(col);
let mut bound_ptrs = br.bound_ptrs.clone();
match this.node {
- ast::PatIdent(_, ref path1, None) => {
+ ast::PatIdent(_, ref path, None) => {
if pat_is_binding(dm, &*this) {
- bound_ptrs.push((path1.node, val));
+ bound_ptrs.push((path.node, val));
+ }
+ }
+ ast::PatVec(ref before, Some(slice), ref after) => {
+ match slice.node {
+ ast::PatIdent(_, ref path, None) => {
+ let subslice_val = bind_subslice_pat(
+ bcx, this.id, val,
+ before.len(), after.len());
+ bound_ptrs.push((path.node, subslice_val));
+ }
+ _ => {}
}
}
_ => {}
}).collect()
}
-fn enter_default<'a, 'b>(
- bcx: &'b Block<'b>,
- dm: &DefMap,
- m: &'a [Match<'a, 'b>],
- col: uint,
- val: ValueRef)
- -> Vec<Match<'a, 'b>> {
+fn enter_default<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ dm: &DefMap,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ col: uint,
+ val: ValueRef)
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("enter_default(bcx={}, m={}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
/// takes the complete row of patterns rather than just the first one.
/// Also, most of the enter_() family functions have been unified with
/// the check_match specialization step.
-fn enter_opt<'a, 'b>(
- bcx: &'b Block<'b>,
+fn enter_opt<'a, 'blk, 'tcx>(
+ bcx: Block<'blk, 'tcx>,
_: ast::NodeId,
dm: &DefMap,
- m: &'a [Match<'a, 'b>],
+ m: &'a [Match<'a, 'blk, 'tcx>],
opt: &Opt,
col: uint,
variant_size: uint,
val: ValueRef)
- -> Vec<Match<'a, 'b>> {
+ -> Vec<Match<'a, 'blk, 'tcx>> {
debug!("enter_opt(bcx={}, m={}, opt={:?}, col={}, val={})",
bcx.to_str(),
m.repr(bcx.tcx()),
let _indenter = indenter();
let ctor = match opt {
- &lit(expr) => check_match::ConstantValue(
+ &ConstantValue(ConstantExpr(_, expr)) => check_match::ConstantValue(
const_eval::eval_const_expr(bcx.tcx(), &*expr)
),
- &range(lo, hi) => check_match::ConstantRange(
+ &ConstantRange(ConstantExpr(_, lo), ConstantExpr(_, hi)) => check_match::ConstantRange(
const_eval::eval_const_expr(bcx.tcx(), &*lo),
const_eval::eval_const_expr(bcx.tcx(), &*hi)
),
- &vec_len(len, _, _) => check_match::Slice(len),
- &var(_, _, def_id) => check_match::Variant(def_id)
+ &SliceLengthEqual(n) =>
+ check_match::Slice(n),
+ &SliceLengthGreaterOrEqual(before, after) =>
+ check_match::SliceWithSubslice(before, after),
+ &Variant(_, _, def_id) =>
+ check_match::Variant(def_id)
};
- let mut i = 0;
- let tcx = bcx.tcx();
let mcx = check_match::MatchCheckCtxt { tcx: bcx.tcx() };
- enter_match(bcx, dm, m, col, val, |pats| {
- let span = pats[col].span;
- let specialized = match pats[col].node {
- ast::PatVec(ref before, slice, ref after) => {
- let (lo, hi) = match *opt {
- vec_len(_, _, (lo, hi)) => (lo, hi),
- _ => tcx.sess.span_bug(span,
- "vec pattern but not vec opt")
- };
-
- let elems = match slice {
- Some(slice) if i >= lo && i <= hi => {
- let n = before.len() + after.len();
- let this_opt = vec_len(n, vec_len_ge(before.len()),
- (lo, hi));
- if opt_eq(tcx, &this_opt, opt) {
- let mut new_before = Vec::new();
- for pat in before.iter() {
- new_before.push(*pat);
- }
- new_before.push(slice);
- for pat in after.iter() {
- new_before.push(*pat);
- }
- Some(new_before)
- } else {
- None
- }
- }
- None if i >= lo && i <= hi => {
- let n = before.len();
- if opt_eq(tcx, &vec_len(n, vec_len_eq, (lo,hi)), opt) {
- let mut new_before = Vec::new();
- for pat in before.iter() {
- new_before.push(*pat);
- }
- Some(new_before)
- } else {
- None
- }
- }
- _ => None
- };
- elems.map(|head| head.append(pats.slice_to(col)).append(pats.slice_from(col + 1)))
- }
- _ => {
- check_match::specialize(&mcx, pats.as_slice(), &ctor, col, variant_size)
- }
- };
- i += 1;
- specialized
- })
+ enter_match(bcx, dm, m, col, val, |pats|
+ check_match::specialize(&mcx, pats.as_slice(), &ctor, col, variant_size)
+ )
}
// Returns the options in one column of matches. An option is something that
// needs to be conditionally matched at runtime; for example, the discriminant
// on a set of enum variants or a literal.
-fn get_options(bcx: &Block, m: &[Match], col: uint) -> Vec<Opt> {
+fn get_branches<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ m: &[Match], col: uint)
+ -> Vec<Opt<'blk, 'tcx>> {
let ccx = bcx.ccx();
- fn add_to_set(tcx: &ty::ctxt, set: &mut Vec<Opt>, val: Opt) {
- if set.iter().any(|l| opt_eq(tcx, l, &val)) {return;}
- set.push(val);
- }
- // Vector comparisons are special in that since the actual
- // conditions over-match, we need to be careful about them. This
- // means that in order to properly handle things in order, we need
- // to not always merge conditions.
- fn add_veclen_to_set(set: &mut Vec<Opt> , i: uint,
- len: uint, vlo: VecLenOpt) {
- match set.last() {
- // If the last condition in the list matches the one we want
- // to add, then extend its range. Otherwise, make a new
- // vec_len with a range just covering the new entry.
- Some(&vec_len(len2, vlo2, (start, end)))
- if len == len2 && vlo == vlo2 => {
- let length = set.len();
- *set.get_mut(length - 1) =
- vec_len(len, vlo, (start, end+1))
- }
- _ => set.push(vec_len(len, vlo, (i, i)))
+
+ fn add_to_set<'blk, 'tcx>(set: &mut Vec<Opt<'blk, 'tcx>>, opt: Opt<'blk, 'tcx>) {
+ if !set.contains(&opt) {
+ set.push(opt);
}
}
let cur = *br.pats.get(col);
match cur.node {
ast::PatLit(l) => {
- add_to_set(ccx.tcx(), &mut found, lit(l));
+ add_to_set(&mut found, ConstantValue(ConstantExpr(ccx.tcx(), l)));
}
ast::PatIdent(..) | ast::PatEnum(..) | ast::PatStruct(..) => {
// This is either an enum variant or a variable binding.
- let opt_def = ccx.tcx.def_map.borrow().find_copy(&cur.id);
+ let opt_def = ccx.tcx().def_map.borrow().find_copy(&cur.id);
match opt_def {
Some(def::DefVariant(enum_id, var_id, _)) => {
let variant = ty::enum_variant_with_id(ccx.tcx(), enum_id, var_id);
- add_to_set(ccx.tcx(), &mut found,
- var(variant.disr_val,
- adt::represent_node(bcx, cur.id), var_id));
+ add_to_set(&mut found, Variant(
+ variant.disr_val,
+ adt::represent_node(bcx, cur.id), var_id
+ ));
}
_ => {}
}
}
ast::PatRange(l1, l2) => {
- add_to_set(ccx.tcx(), &mut found, range(l1, l2));
+ add_to_set(&mut found, ConstantRange(
+ ConstantExpr(ccx.tcx(), l1),
+ ConstantExpr(ccx.tcx(), l2)
+ ));
}
- ast::PatVec(ref before, slice, ref after) => {
- let (len, vec_opt) = match slice {
- None => (before.len(), vec_len_eq),
- Some(_) => (before.len() + after.len(),
- vec_len_ge(before.len()))
- };
- add_veclen_to_set(&mut found, i, len, vec_opt);
+ ast::PatVec(ref before, None, ref after) => {
+ add_to_set(&mut found, SliceLengthEqual(before.len() + after.len()));
+ }
+ ast::PatVec(ref before, Some(_), ref after) => {
+ add_to_set(&mut found, SliceLengthGreaterOrEqual(before.len(), after.len()));
}
_ => {}
}
}
- return found;
+ found
}
-struct ExtractedBlock<'a> {
- vals: Vec<ValueRef> ,
- bcx: &'a Block<'a>,
+struct ExtractedBlock<'blk, 'tcx: 'blk> {
+ vals: Vec<ValueRef>,
+ bcx: Block<'blk, 'tcx>,
}
-fn extract_variant_args<'a>(
- bcx: &'a Block<'a>,
- repr: &adt::Repr,
- disr_val: ty::Disr,
- val: ValueRef)
- -> ExtractedBlock<'a> {
+fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ repr: &adt::Repr,
+ disr_val: ty::Disr,
+ val: ValueRef)
+ -> ExtractedBlock<'blk, 'tcx> {
let _icx = push_ctxt("match::extract_variant_args");
let args = Vec::from_fn(adt::num_args(repr, disr_val), |i| {
adt::trans_field_ptr(bcx, repr, val, disr_val, i)
ExtractedBlock { vals: args, bcx: bcx }
}
-fn match_datum(bcx: &Block,
- val: ValueRef,
- pat_id: ast::NodeId)
- -> Datum<Lvalue> {
+fn match_datum(val: ValueRef, left_ty: ty::t) -> Datum<Lvalue> {
/*!
* Helper for converting from the ValueRef that we pass around in
* the match code, which is always an lvalue, into a Datum. Eventually
* we should just pass around a Datum and be done with it.
*/
-
- let ty = node_id_type(bcx, pat_id);
- Datum::new(val, ty, Lvalue)
+ Datum::new(val, left_ty, Lvalue)
}
-
-fn extract_vec_elems<'a>(
- bcx: &'a Block<'a>,
+fn bind_subslice_pat(bcx: Block,
pat_id: ast::NodeId,
- elem_count: uint,
- slice: Option<uint>,
- val: ValueRef)
- -> ExtractedBlock<'a> {
- let _icx = push_ctxt("match::extract_vec_elems");
- let vec_datum = match_datum(bcx, val, pat_id);
- let (base, len) = vec_datum.get_vec_base_and_len(bcx);
+ val: ValueRef,
+ offset_left: uint,
+ offset_right: uint) -> ValueRef {
+ let _icx = push_ctxt("match::bind_subslice_pat");
let vec_ty = node_id_type(bcx, pat_id);
let vt = tvec::vec_types(bcx, ty::sequence_element_type(bcx.tcx(), ty::type_content(vec_ty)));
+ let vec_datum = match_datum(val, vec_ty);
+ let (base, len) = vec_datum.get_vec_base_and_len(bcx);
- let mut elems = Vec::from_fn(elem_count, |i| {
- match slice {
- None => GEPi(bcx, base, [i]),
- Some(n) if i < n => GEPi(bcx, base, [i]),
- Some(n) if i > n => {
- InBoundsGEP(bcx, base, [
- Sub(bcx, len,
- C_int(bcx.ccx(), (elem_count - i) as int))])
- }
- _ => unsafe { llvm::LLVMGetUndef(vt.llunit_ty.to_ref()) }
- }
- });
- if slice.is_some() {
- let n = slice.unwrap();
- let slice_byte_offset = Mul(bcx, vt.llunit_size, C_uint(bcx.ccx(), n));
- let slice_begin = tvec::pointer_add_byte(bcx, base, slice_byte_offset);
- let slice_len_offset = C_uint(bcx.ccx(), elem_count - 1u);
- let slice_len = Sub(bcx, len, slice_len_offset);
- let slice_ty = ty::mk_slice(bcx.tcx(),
- ty::ReStatic,
- ty::mt {ty: vt.unit_ty, mutbl: ast::MutImmutable});
- let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
- Store(bcx, slice_begin,
- GEPi(bcx, scratch.val, [0u, abi::slice_elt_base]));
- Store(bcx, slice_len, GEPi(bcx, scratch.val, [0u, abi::slice_elt_len]));
- *elems.get_mut(n) = scratch.val;
- }
+ let slice_byte_offset = Mul(bcx, vt.llunit_size, C_uint(bcx.ccx(), offset_left));
+ let slice_begin = tvec::pointer_add_byte(bcx, base, slice_byte_offset);
+ let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
+ let slice_len = Sub(bcx, len, slice_len_offset);
+ let slice_ty = ty::mk_slice(bcx.tcx(),
+ ty::ReStatic,
+ ty::mt {ty: vt.unit_ty, mutbl: ast::MutImmutable});
+ let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
+ Store(bcx, slice_begin,
+ GEPi(bcx, scratch.val, [0u, abi::slice_elt_base]));
+ Store(bcx, slice_len, GEPi(bcx, scratch.val, [0u, abi::slice_elt_len]));
+ scratch.val
+}
+fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ left_ty: ty::t,
+ before: uint,
+ after: uint,
+ val: ValueRef)
+ -> ExtractedBlock<'blk, 'tcx> {
+ let _icx = push_ctxt("match::extract_vec_elems");
+ let vec_datum = match_datum(val, left_ty);
+ let (base, len) = vec_datum.get_vec_base_and_len(bcx);
+ let mut elems = vec![];
+ elems.extend(range(0, before).map(|i| GEPi(bcx, base, [i])));
+ elems.extend(range(0, after).rev().map(|i| {
+ InBoundsGEP(bcx, base, [
+ Sub(bcx, len, C_uint(bcx.ccx(), i + 1))
+ ])
+ }));
ExtractedBlock { vals: elems, bcx: bcx }
}
any_pat!(m, col, ast::PatRegion(_))
}
-fn any_irrefutable_adt_pat(bcx: &Block, m: &[Match], col: uint) -> bool {
+fn any_irrefutable_adt_pat(tcx: &ty::ctxt, m: &[Match], col: uint) -> bool {
m.iter().any(|br| {
let pat = *br.pats.get(col);
match pat.node {
ast::PatTup(_) => true,
ast::PatStruct(..) => {
- match bcx.tcx().def_map.borrow().find(&pat.id) {
+ match tcx.def_map.borrow().find(&pat.id) {
Some(&def::DefVariant(..)) => false,
_ => true,
}
}
ast::PatEnum(..) | ast::PatIdent(_, _, None) => {
- match bcx.tcx().def_map.borrow().find(&pat.id) {
+ match tcx.def_map.borrow().find(&pat.id) {
Some(&def::DefFn(..)) |
Some(&def::DefStruct(..)) => true,
_ => false
}
/// What to do when the pattern match fails.
-enum FailureHandler<'a> {
+enum FailureHandler {
Infallible,
JumpToBasicBlock(BasicBlockRef),
Unreachable
}
-impl<'a> FailureHandler<'a> {
- fn is_infallible(&self) -> bool {
+impl FailureHandler {
+ fn is_fallible(&self) -> bool {
match *self {
- Infallible => true,
- _ => false
+ Infallible => false,
+ _ => true
}
}
- fn is_fallible(&self) -> bool {
- !self.is_infallible()
+ fn is_infallible(&self) -> bool {
+ !self.is_fallible()
}
- fn handle_fail(&self, bcx: &Block) {
+ fn handle_fail(&self, bcx: Block) {
match *self {
Infallible =>
- fail!("attempted to fail in infallible failure handler!"),
+ fail!("attempted to fail in an infallible failure handler!"),
JumpToBasicBlock(basic_block) =>
Br(bcx, basic_block),
Unreachable =>
return best_col;
}
-#[deriving(PartialEq)]
-pub enum branch_kind { no_branch, single, switch, compare, compare_vec_len }
-
// Compiles a comparison between two things.
-fn compare_values<'a>(
- cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: ty::t)
- -> Result<'a> {
- fn compare_str<'a>(cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: ty::t)
- -> Result<'a> {
+fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ rhs_t: ty::t)
+ -> Result<'blk, 'tcx> {
+ fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ rhs_t: ty::t)
+ -> Result<'blk, 'tcx> {
let did = langcall(cx,
None,
format!("comparison of `{}`",
}
}
-fn insert_lllocals<'a>(mut bcx: &'a Block<'a>, bindings_map: &BindingsMap,
- cs: Option<cleanup::ScopeId>)
- -> &'a Block<'a> {
+fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ bindings_map: &BindingsMap,
+ cs: Option<cleanup::ScopeId>)
+ -> Block<'blk, 'tcx> {
/*!
* For each binding in `data.bindings_map`, adds an appropriate entry into
* the `fcx.lllocals` map
bcx
}
-fn compile_guard<'a, 'b>(
- bcx: &'b Block<'b>,
- guard_expr: &ast::Expr,
- data: &ArmData,
- m: &'a [Match<'a, 'b>],
- vals: &[ValueRef],
- chk: &FailureHandler,
- has_genuine_default: bool)
- -> &'b Block<'b> {
+fn compile_guard<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ guard_expr: &ast::Expr,
+ data: &ArmData,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ vals: &[ValueRef],
+ chk: &FailureHandler,
+ has_genuine_default: bool)
+ -> Block<'blk, 'tcx> {
debug!("compile_guard(bcx={}, guard_expr={}, m={}, vals={})",
bcx.to_str(),
bcx.expr_to_string(guard_expr),
}
}
- return with_cond(bcx, Not(bcx, val), |bcx| {
+ with_cond(bcx, Not(bcx, val), |bcx| {
// Guard does not match: remove all bindings from the lllocals table
for (_, &binding_info) in data.bindings_map.iter() {
call_lifetime_end(bcx, binding_info.llmatch);
}
};
bcx
- });
+ })
}
-fn compile_submatch<'a, 'b>(
- bcx: &'b Block<'b>,
- m: &'a [Match<'a, 'b>],
- vals: &[ValueRef],
- chk: &FailureHandler,
- has_genuine_default: bool) {
+fn compile_submatch<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ vals: &[ValueRef],
+ chk: &FailureHandler,
+ has_genuine_default: bool) {
debug!("compile_submatch(bcx={}, m={}, vals={})",
bcx.to_str(),
m.repr(bcx.tcx()),
}
return;
}
- if m[0].pats.len() == 0u {
+
+ let col_count = m[0].pats.len();
+ if col_count == 0u {
let data = &m[0].data;
for &(ref ident, ref value_ptr) in m[0].bound_ptrs.iter() {
let llmatch = data.bindings_map.get(ident).llmatch;
}
}
-fn compile_submatch_continue<'a, 'b>(
- mut bcx: &'b Block<'b>,
- m: &'a [Match<'a, 'b>],
- vals: &[ValueRef],
- chk: &FailureHandler,
- col: uint,
- val: ValueRef,
- has_genuine_default: bool) {
+fn compile_submatch_continue<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ m: &'a [Match<'a, 'blk, 'tcx>],
+ vals: &[ValueRef],
+ chk: &FailureHandler,
+ col: uint,
+ val: ValueRef,
+ has_genuine_default: bool) {
let fcx = bcx.fcx;
let tcx = bcx.tcx();
let dm = &tcx.def_map;
};
let mcx = check_match::MatchCheckCtxt { tcx: bcx.tcx() };
- let adt_vals = if any_irrefutable_adt_pat(bcx, m, col) {
+ let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) {
let repr = adt::represent_type(bcx.ccx(), left_ty);
let arg_count = adt::num_args(&*repr, 0);
let field_vals: Vec<ValueRef> = std::iter::range(0, arg_count).map(|ix|
} else if any_uniq_pat(m, col) || any_region_pat(m, col) {
Some(vec!(Load(bcx, val)))
} else {
- None
+ match ty::get(left_ty).sty {
+ ty::ty_vec(_, Some(n)) => {
+ let args = extract_vec_elems(bcx, left_ty, n, 0, val);
+ Some(args.vals)
+ }
+ _ => None
+ }
};
match adt_vals {
}
// Decide what kind of branch we need
- let opts = get_options(bcx, m, col);
+ let opts = get_branches(bcx, m, col);
debug!("options={:?}", opts);
- let mut kind = no_branch;
+ let mut kind = NoBranch;
let mut test_val = val;
debug!("test_val={}", bcx.val_to_string(test_val));
if opts.len() > 0u {
match *opts.get(0) {
- var(_, ref repr, _) => {
+ ConstantValue(_) | ConstantRange(_, _) => {
+ test_val = load_if_immediate(bcx, val, left_ty);
+ kind = if ty::type_is_integral(left_ty) {
+ Switch
+ } else {
+ Compare
+ };
+ }
+ Variant(_, ref repr, _) => {
let (the_kind, val_opt) = adt::trans_switch(bcx, &**repr, val);
kind = the_kind;
for &tval in val_opt.iter() { test_val = tval; }
}
- lit(_) => {
- test_val = load_if_immediate(bcx, val, left_ty);
- kind = if ty::type_is_integral(left_ty) { switch }
- else { compare };
- }
- range(_, _) => {
- test_val = Load(bcx, val);
- kind = compare;
- },
- vec_len(..) => {
+ SliceLengthEqual(_) | SliceLengthGreaterOrEqual(_, _) => {
let (_, len) = tvec::get_base_and_len(bcx, val, left_ty);
test_val = len;
- kind = compare_vec_len;
+ kind = Switch;
}
}
}
for o in opts.iter() {
match *o {
- range(_, _) => { kind = compare; break }
+ ConstantRange(_, _) => { kind = Compare; break },
+ SliceLengthGreaterOrEqual(_, _) => { kind = CompareSliceLength; break },
_ => ()
}
}
let else_cx = match kind {
- no_branch | single => bcx,
+ NoBranch | Single => bcx,
_ => bcx.fcx.new_temp_block("match_else")
};
- let sw = if kind == switch {
- Switch(bcx, test_val, else_cx.llbb, opts.len())
+ let sw = if kind == Switch {
+ build::Switch(bcx, test_val, else_cx.llbb, opts.len())
} else {
C_int(ccx, 0) // Placeholder for when not using a switch
};
// for the current conditional branch.
let mut branch_chk = None;
let mut opt_cx = else_cx;
- if !exhaustive || i+1 < len {
+ if !exhaustive || i + 1 < len {
opt_cx = bcx.fcx.new_temp_block("match_case");
match kind {
- single => Br(bcx, opt_cx.llbb),
- switch => {
- match trans_opt(bcx, opt) {
- single_result(r) => {
- unsafe {
- llvm::LLVMAddCase(sw, r.val, opt_cx.llbb);
- bcx = r.bcx;
+ Single => Br(bcx, opt_cx.llbb),
+ Switch => {
+ match opt.trans(bcx) {
+ SingleResult(r) => {
+ AddCase(sw, r.val, opt_cx.llbb);
+ bcx = r.bcx;
+ }
+ _ => {
+ bcx.sess().bug(
+ "in compile_submatch, expected \
+ opt.trans() to return a SingleResult")
}
- }
- _ => {
- bcx.sess().bug(
- "in compile_submatch, expected \
- trans_opt to return a single_result")
- }
- }
- }
- compare | compare_vec_len => {
- let t = if kind == compare {
- left_ty
- } else {
- ty::mk_uint() // vector length
- };
- let Result {bcx: after_cx, val: matches} = {
- match trans_opt(bcx, opt) {
- single_result(Result {bcx, val}) => {
- compare_values(bcx, test_val, val, t)
- }
- lower_bound(Result {bcx, val}) => {
- compare_scalar_types(bcx, test_val, val, t, ast::BiGe)
- }
- range_result(Result {val: vbegin, ..},
- Result {bcx, val: vend}) => {
- let Result {bcx, val: llge} =
- compare_scalar_types(
- bcx, test_val,
- vbegin, t, ast::BiGe);
- let Result {bcx, val: llle} =
- compare_scalar_types(
- bcx, test_val, vend,
- t, ast::BiLe);
- Result::new(bcx, And(bcx, llge, llle))
- }
- }
- };
- bcx = fcx.new_temp_block("compare_next");
-
- // If none of the sub-cases match, and the current condition
- // is guarded or has multiple patterns, move on to the next
- // condition, if there is any, rather than falling back to
- // the default.
- let guarded = m[i].data.arm.guard.is_some();
- let multi_pats = m[i].pats.len() > 1;
- if i + 1 < len && (guarded || multi_pats || kind == compare_vec_len) {
- branch_chk = Some(JumpToBasicBlock(bcx.llbb));
- }
- CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb);
- }
- _ => ()
+ }
+ }
+ Compare | CompareSliceLength => {
+ let t = if kind == Compare {
+ left_ty
+ } else {
+ ty::mk_uint() // vector length
+ };
+ let Result { bcx: after_cx, val: matches } = {
+ match opt.trans(bcx) {
+ SingleResult(Result { bcx, val }) => {
+ compare_values(bcx, test_val, val, t)
+ }
+ RangeResult(Result { val: vbegin, .. },
+ Result { bcx, val: vend }) => {
+ let Result { bcx, val: llge } =
+ compare_scalar_types(
+ bcx, test_val,
+ vbegin, t, ast::BiGe);
+ let Result { bcx, val: llle } =
+ compare_scalar_types(
+ bcx, test_val, vend,
+ t, ast::BiLe);
+ Result::new(bcx, And(bcx, llge, llle))
+ }
+ LowerBound(Result { bcx, val }) => {
+ compare_scalar_types(bcx, test_val, val, t, ast::BiGe)
+ }
+ }
+ };
+ bcx = fcx.new_temp_block("compare_next");
+
+ // If none of the sub-cases match, and the current condition
+ // is guarded or has multiple patterns, move on to the next
+ // condition, if there is any, rather than falling back to
+ // the default.
+ let guarded = m[i].data.arm.guard.is_some();
+ let multi_pats = m[i].pats.len() > 1;
+ if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) {
+ branch_chk = Some(JumpToBasicBlock(bcx.llbb));
+ }
+ CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb);
+ }
+ _ => ()
}
- } else if kind == compare || kind == compare_vec_len {
+ } else if kind == Compare || kind == CompareSliceLength {
Br(bcx, else_cx.llbb);
}
let mut size = 0u;
let mut unpacked = Vec::new();
match *opt {
- var(disr_val, ref repr, _) => {
+ Variant(disr_val, ref repr, _) => {
let ExtractedBlock {vals: argvals, bcx: new_bcx} =
extract_variant_args(opt_cx, &**repr, disr_val, val);
size = argvals.len();
unpacked = argvals;
opt_cx = new_bcx;
}
- vec_len(n, vt, _) => {
- let (n, slice) = match vt {
- vec_len_ge(i) => (n + 1u, Some(i)),
- vec_len_eq => (n, None)
- };
- let args = extract_vec_elems(opt_cx, pat_id, n,
- slice, val);
+ SliceLengthEqual(len) => {
+ let args = extract_vec_elems(opt_cx, left_ty, len, 0, val);
size = args.vals.len();
unpacked = args.vals.clone();
opt_cx = args.bcx;
}
- lit(_) | range(_, _) => ()
+ SliceLengthGreaterOrEqual(before, after) => {
+ let args = extract_vec_elems(opt_cx, left_ty, before, after, val);
+ size = args.vals.len();
+ unpacked = args.vals.clone();
+ opt_cx = args.bcx;
+ }
+ ConstantValue(_) | ConstantRange(_, _) => ()
}
let opt_ms = enter_opt(opt_cx, pat_id, dm, m, opt, col, size, val);
let opt_vals = unpacked.append(vals_left.as_slice());
-
- match branch_chk {
- None => {
- compile_submatch(opt_cx,
- opt_ms.as_slice(),
- opt_vals.as_slice(),
- chk,
- has_genuine_default)
- }
- Some(branch_chk) => {
- compile_submatch(opt_cx,
- opt_ms.as_slice(),
- opt_vals.as_slice(),
- &branch_chk,
- has_genuine_default)
- }
- }
+ compile_submatch(opt_cx,
+ opt_ms.as_slice(),
+ opt_vals.as_slice(),
+ branch_chk.as_ref().unwrap_or(chk),
+ has_genuine_default);
}
// Compile the fall-through case, if any
- if !exhaustive && kind != single {
- if kind == compare || kind == compare_vec_len {
+ if !exhaustive && kind != Single {
+ if kind == Compare || kind == CompareSliceLength {
Br(bcx, else_cx.llbb);
}
match chk {
}
}
-pub fn trans_match<'a>(
- bcx: &'a Block<'a>,
- match_expr: &ast::Expr,
- discr_expr: &ast::Expr,
- arms: &[ast::Arm],
- dest: Dest)
- -> &'a Block<'a> {
+pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ match_expr: &ast::Expr,
+ discr_expr: &ast::Expr,
+ arms: &[ast::Arm],
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("match::trans_match");
trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest)
}
/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body`
-fn is_discr_reassigned(bcx: &Block, discr: &ast::Expr, body: &ast::Expr) -> bool {
+fn is_discr_reassigned(bcx: Block, discr: &ast::Expr, body: &ast::Expr) -> bool {
match discr.node {
ast::ExprPath(..) => match bcx.def(discr.id) {
def::DefArg(vid, _) | def::DefBinding(vid, _) |
}
}
-fn create_bindings_map(bcx: &Block, pat: Gc<ast::Pat>,
+fn create_bindings_map(bcx: Block, pat: Gc<ast::Pat>,
discr: &ast::Expr, body: &ast::Expr) -> BindingsMap {
// Create the bindings map, which is a mapping from each binding name
// to an alloca() that will be the value for that local variable.
return bindings_map;
}
-fn trans_match_inner<'a>(scope_cx: &'a Block<'a>,
- match_id: ast::NodeId,
- discr_expr: &ast::Expr,
- arms: &[ast::Arm],
- dest: Dest) -> &'a Block<'a> {
+fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>,
+ match_id: ast::NodeId,
+ discr_expr: &ast::Expr,
+ arms: &[ast::Arm],
+ dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("match::trans_match_inner");
let fcx = scope_cx.fcx;
let mut bcx = scope_cx;
BindArgument
}
-pub fn store_local<'a>(bcx: &'a Block<'a>,
- local: &ast::Local)
- -> &'a Block<'a> {
+pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ local: &ast::Local)
+ -> Block<'blk, 'tcx> {
/*!
* Generates code for a local variable declaration like
* `let <pat>;` or `let <pat> = <opt_init_expr>`.
}
};
- fn create_dummy_locals<'a>(mut bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>)
- -> &'a Block<'a> {
+ fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>)
+ -> Block<'blk, 'tcx> {
// create dummy memory for the variables if we have no
// value to store into them immediately
let tcx = bcx.tcx();
}
}
-pub fn store_arg<'a>(mut bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>,
- arg: Datum<Rvalue>,
- arg_scope: cleanup::ScopeId)
- -> &'a Block<'a> {
+pub fn store_arg<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>,
+ arg: Datum<Rvalue>,
+ arg_scope: cleanup::ScopeId)
+ -> Block<'blk, 'tcx> {
/*!
* Generates code for argument patterns like `fn foo(<pat>: T)`.
* Creates entries in the `llargs` map for each of the bindings
/// Generates code for the pattern binding in a `for` loop like
/// `for <pat> in <expr> { ... }`.
-pub fn store_for_loop_binding<'a>(
- bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>,
- llvalue: ValueRef,
- body_scope: cleanup::ScopeId)
- -> &'a Block<'a> {
+pub fn store_for_loop_binding<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>,
+ llvalue: ValueRef,
+ body_scope: cleanup::ScopeId)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("match::store_for_loop_binding");
if simple_identifier(&*pat).is_some() {
bind_irrefutable_pat(bcx, pat, llvalue, BindLocal, body_scope)
}
-fn mk_binding_alloca<'a,A>(bcx: &'a Block<'a>,
- p_id: ast::NodeId,
- ident: &ast::Ident,
- binding_mode: IrrefutablePatternBindingMode,
- cleanup_scope: cleanup::ScopeId,
- arg: A,
- populate: |A, &'a Block<'a>, ValueRef, ty::t| -> &'a Block<'a>)
- -> &'a Block<'a> {
+fn mk_binding_alloca<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
+ p_id: ast::NodeId,
+ ident: &ast::Ident,
+ binding_mode: IrrefutablePatternBindingMode,
+ cleanup_scope: cleanup::ScopeId,
+ arg: A,
+ populate: |A, Block<'blk, 'tcx>, ValueRef, ty::t|
+ -> Block<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let var_ty = node_id_type(bcx, p_id);
// Allocate memory on stack for the binding.
bcx
}
-fn bind_irrefutable_pat<'a>(
- bcx: &'a Block<'a>,
- pat: Gc<ast::Pat>,
- val: ValueRef,
- binding_mode: IrrefutablePatternBindingMode,
- cleanup_scope: cleanup::ScopeId)
- -> &'a Block<'a> {
+fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ pat: Gc<ast::Pat>,
+ val: ValueRef,
+ binding_mode: IrrefutablePatternBindingMode,
+ cleanup_scope: cleanup::ScopeId)
+ -> Block<'blk, 'tcx> {
/*!
* A simple version of the pattern matching code that only handles
* irrefutable patterns. This is used in let/argument patterns,
bcx = bind_irrefutable_pat(bcx, inner, loaded_val, binding_mode, cleanup_scope);
}
ast::PatVec(ref before, ref slice, ref after) => {
- let extracted = extract_vec_elems(
- bcx, pat.id, before.len() + 1u + after.len(),
- slice.map(|_| before.len()), val
- );
+ let pat_ty = node_id_type(bcx, pat.id);
+ let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val);
+ match slice {
+ &Some(_) => {
+ extracted.vals.insert(
+ before.len(),
+ bind_subslice_pat(bcx, pat.id, val, before.len(), after.len())
+ );
+ }
+ &None => ()
+ }
bcx = before
- .iter().map(|v| Some(*v))
- .chain(Some(*slice).move_iter())
- .chain(after.iter().map(|v| Some(*v)))
- .zip(extracted.vals.iter())
- .fold(bcx, |bcx, (inner, elem)| {
- inner.map_or(bcx, |inner| {
- bind_irrefutable_pat(bcx, inner, *elem, binding_mode, cleanup_scope)
- })
- });
+ .iter()
+ .chain(slice.iter())
+ .chain(after.iter())
+ .zip(extracted.vals.move_iter())
+ .fold(bcx, |bcx, (&inner, elem)|
+ bind_irrefutable_pat(bcx, inner, elem, binding_mode, cleanup_scope)
+ );
}
ast::PatMac(..) => {
bcx.sess().span_bug(pat.span, "unexpanded macro");
/// Representations.
+#[deriving(Eq, PartialEq)]
pub enum Repr {
/// C-like enums; basically an int.
CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
}
/// For structs, and struct-like parts of anything fancier.
+#[deriving(Eq, PartialEq)]
pub struct Struct {
// If the struct is DST, then the size and alignment do not take into
// account the unsized fields of the struct.
* these, for places in trans where the `ty::t` isn't directly
* available.
*/
-pub fn represent_node(bcx: &Block, node: ast::NodeId) -> Rc<Repr> {
+pub fn represent_node(bcx: Block, node: ast::NodeId) -> Rc<Repr> {
represent_type(bcx.ccx(), node_id_type(bcx, node))
}
/// Decides how to represent a given type.
pub fn represent_type(cx: &CrateContext, t: ty::t) -> Rc<Repr> {
debug!("Representing: {}", ty_to_string(cx.tcx(), t));
- match cx.adt_reprs.borrow().find(&t) {
+ match cx.adt_reprs().borrow().find(&t) {
Some(repr) => return repr.clone(),
None => {}
}
let repr = Rc::new(represent_type_uncached(cx, t));
debug!("Represented as: {:?}", repr)
- cx.adt_reprs.borrow_mut().insert(t, repr.clone());
+ cx.adt_reprs().borrow_mut().insert(t, repr.clone());
repr
}
}
-#[deriving(Show)]
+#[deriving(Eq, PartialEq, Show)]
pub enum PointerField {
ThinPointer(uint),
FatPointer(uint, uint)
for (i, &ty) in self.tys.iter().enumerate() {
match ty::get(ty).sty {
- // &T/&mut T could either be a thin or fat pointer depending on T
- ty::ty_rptr(_, ty::mt { ty, .. }) => match ty::get(ty).sty {
+ // &T/&mut T/*T could either be a thin or fat pointer depending on T
+ ty::ty_rptr(_, ty::mt { ty, .. })
+ | ty::ty_ptr(ty::mt { ty, .. }) => match ty::get(ty).sty {
// &[T] and &str are a pointer and length pair
ty::ty_vec(_, None) | ty::ty_str => return Some(FatPointer(i, slice_elt_base)),
attempts = choose_shortest;
},
attr::ReprPacked => {
- cx.tcx.sess.bug("range_to_inttype: found ReprPacked on an enum");
+ cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum");
}
}
for &ity in attempts.iter() {
*
* This should ideally be less tightly tied to `_match`.
*/
-pub fn trans_switch(bcx: &Block, r: &Repr, scrutinee: ValueRef)
- -> (_match::branch_kind, Option<ValueRef>) {
+pub fn trans_switch(bcx: Block, r: &Repr, scrutinee: ValueRef)
+ -> (_match::BranchKind, Option<ValueRef>) {
match *r {
CEnum(..) | General(..) |
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
- (_match::switch, Some(trans_get_discr(bcx, r, scrutinee, None)))
+ (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None)))
}
Univariant(..) => {
- (_match::single, None)
+ (_match::Single, None)
}
}
}
/// Obtain the actual discriminant of a value.
-pub fn trans_get_discr(bcx: &Block, r: &Repr, scrutinee: ValueRef, cast_to: Option<Type>)
+pub fn trans_get_discr(bcx: Block, r: &Repr, scrutinee: ValueRef, cast_to: Option<Type>)
-> ValueRef {
let signed;
let val;
}
}
-fn struct_wrapped_nullable_bitdiscr(bcx: &Block, nndiscr: Disr, ptrfield: PointerField,
+fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, ptrfield: PointerField,
scrutinee: ValueRef) -> ValueRef {
let llptrptr = match ptrfield {
ThinPointer(field) => GEPi(bcx, scrutinee, [0, field]),
}
/// Helper for cases where the discriminant is simply loaded.
-fn load_discr(bcx: &Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
+fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
-> ValueRef {
let llty = ll_inttype(bcx.ccx(), ity);
assert_eq!(val_ty(ptr), llty.ptr_to());
*
* This should ideally be less tightly tied to `_match`.
*/
-pub fn trans_case<'a>(bcx: &'a Block<'a>, r: &Repr, discr: Disr)
- -> _match::opt_result<'a> {
+pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
+ -> _match::OptResult<'blk, 'tcx> {
match *r {
CEnum(ity, _, _) => {
- _match::single_result(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
+ _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
discr as u64, true)))
}
General(ity, _, _) => {
- _match::single_result(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
+ _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
discr as u64, true)))
}
Univariant(..) => {
RawNullablePointer { .. } |
StructWrappedNullablePointer { .. } => {
assert!(discr == 0 || discr == 1);
- _match::single_result(Result::new(bcx, C_bool(bcx.ccx(), discr != 0)))
+ _match::SingleResult(Result::new(bcx, C_bool(bcx.ccx(), discr != 0)))
}
}
}
* Set the discriminant for a new value of the given case of the given
* representation.
*/
-pub fn trans_set_discr(bcx: &Block, r: &Repr, val: ValueRef, discr: Disr) {
+pub fn trans_set_discr(bcx: Block, r: &Repr, val: ValueRef, discr: Disr) {
match *r {
CEnum(ity, min, max) => {
assert_discr_in_range(ity, min, max, discr);
}
/// Access a field, at a point when the value's case is known.
-pub fn trans_field_ptr(bcx: &Block, r: &Repr, val: ValueRef, discr: Disr,
+pub fn trans_field_ptr(bcx: Block, r: &Repr, val: ValueRef, discr: Disr,
ix: uint) -> ValueRef {
// Note: if this ever needs to generate conditionals (e.g., if we
// decide to do some kind of cdr-coding-like non-unique repr
}
}
-pub fn struct_field_ptr(bcx: &Block, st: &Struct, val: ValueRef,
+pub fn struct_field_ptr(bcx: Block, st: &Struct, val: ValueRef,
ix: uint, needs_cast: bool) -> ValueRef {
let val = if needs_cast {
let ccx = bcx.ccx();
GEPi(bcx, val, [0, ix])
}
-pub fn fold_variants<'r, 'b>(
- bcx: &'b Block<'b>, r: &Repr, value: ValueRef,
- f: |&'b Block<'b>, &Struct, ValueRef|: 'r -> &'b Block<'b>
-) -> &'b Block<'b> {
+pub fn fold_variants<'blk, 'tcx>(
+ bcx: Block<'blk, 'tcx>, r: &Repr, value: ValueRef,
+ f: |Block<'blk, 'tcx>, &Struct, ValueRef| -> Block<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
match *r {
Univariant(ref st, _) => {
}
/// Access the struct drop flag, if present.
-pub fn trans_drop_flag_ptr<'b>(mut bcx: &'b Block<'b>, r: &Repr,
- val: ValueRef) -> datum::DatumBlock<'b, datum::Expr> {
+pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr, val: ValueRef)
+ -> datum::DatumBlock<'blk, 'tcx, datum::Expr> {
let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), ty::mk_bool());
match *r {
Univariant(ref st, true) => {
use syntax::ast;
// Take an inline assembly expression and splat it out via LLVM
-pub fn trans_inline_asm<'a>(bcx: &'a Block<'a>, ia: &ast::InlineAsm)
- -> &'a Block<'a> {
+pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
+ -> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
let mut bcx = bcx;
let mut constraints = Vec::new();
use back::{link, abi};
use driver::config;
use driver::config::{NoDebugInfo, FullDebugInfo};
-use driver::driver::{CrateAnalysis, CrateTranslation};
+use driver::driver::{CrateAnalysis, CrateTranslation, ModuleTranslation};
use driver::session::Session;
use lint;
use llvm::{BasicBlockRef, ModuleRef, ValueRef, Vector, get_param};
use middle::trans::callee;
use middle::trans::cleanup::{CleanupMethods, ScopeId};
use middle::trans::cleanup;
-use middle::trans::common::{Block, C_bool, C_bytes, C_i32, C_integral, C_nil};
-use middle::trans::common::{C_null, C_struct, C_u64, C_u8, C_uint, C_undef};
+use middle::trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_integral, C_nil};
+use middle::trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_uint, C_undef};
use middle::trans::common::{CrateContext, ExternMap, FunctionContext};
use middle::trans::common::{NodeInfo, Result, SubstP, monomorphize_type};
use middle::trans::common::{node_id_type, param_substs, return_type_is_void};
use middle::trans::common::{type_is_zero_size, val_ty};
use middle::trans::common;
use middle::trans::consts;
+use middle::trans::context::SharedCrateContext;
use middle::trans::controlflow;
use middle::trans::datum;
use middle::trans::debuginfo;
use middle::trans::inline;
use middle::trans::intrinsic;
use middle::trans::machine;
-use middle::trans::machine::{llsize_of, llsize_of_real};
+use middle::trans::machine::{llsize_of, llsize_of_real, llalign_of_min};
use middle::trans::meth;
use middle::trans::monomorphize;
use middle::trans::tvec;
use libc::{c_uint, uint64_t};
use std::c_str::ToCStr;
use std::cell::{Cell, RefCell};
+use std::collections::HashSet;
use std::rc::Rc;
use std::{i8, i16, i32, i64};
use syntax::abi::{X86, X86_64, Arm, Mips, Mipsel, Rust, RustCall};
_InsnCtxt { _cannot_construct_outside_of_this_module: () }
}
-pub struct StatRecorder<'a> {
- ccx: &'a CrateContext,
+pub struct StatRecorder<'a, 'tcx: 'a> {
+ ccx: &'a CrateContext<'a, 'tcx>,
name: Option<String>,
start: u64,
istart: uint,
}
-impl<'a> StatRecorder<'a> {
- pub fn new(ccx: &'a CrateContext, name: String) -> StatRecorder<'a> {
+impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
+ pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String)
+ -> StatRecorder<'a, 'tcx> {
let start = if ccx.sess().trans_stats() {
time::precise_time_ns()
} else {
0
};
- let istart = ccx.stats.n_llvm_insns.get();
+ let istart = ccx.stats().n_llvm_insns.get();
StatRecorder {
ccx: ccx,
name: Some(name),
}
#[unsafe_destructor]
-impl<'a> Drop for StatRecorder<'a> {
+impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
fn drop(&mut self) {
if self.ccx.sess().trans_stats() {
let end = time::precise_time_ns();
let elapsed = ((end - self.start) / 1_000_000) as uint;
- let iend = self.ccx.stats.n_llvm_insns.get();
- self.ccx.stats.fn_stats.borrow_mut().push((self.name.take_unwrap(),
+ let iend = self.ccx.stats().n_llvm_insns.get();
+ self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
elapsed,
iend - self.istart));
- self.ccx.stats.n_fns.set(self.ccx.stats.n_fns.get() + 1);
+ self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
// Reset LLVM insn count to avoid compound costs.
- self.ccx.stats.n_llvm_insns.set(self.istart);
+ self.ccx.stats().n_llvm_insns.set(self.istart);
}
}
}
let llfn: ValueRef = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMGetOrInsertFunction(ccx.llmod, buf, ty.to_ref())
+ llvm::LLVMGetOrInsertFunction(ccx.llmod(), buf, ty.to_ref())
}
});
_ => {}
}
- if ccx.tcx.sess.opts.cg.no_redzone {
+ if ccx.tcx().sess.opts.cg.no_redzone {
unsafe {
llvm::LLVMAddFunctionAttribute(llfn,
llvm::FunctionIndex as c_uint,
}
fn get_extern_rust_fn(ccx: &CrateContext, fn_ty: ty::t, name: &str, did: ast::DefId) -> ValueRef {
- match ccx.externs.borrow().find_equiv(&name) {
+ match ccx.externs().borrow().find_equiv(&name) {
Some(n) => return *n,
None => ()
}
set_llvm_fn_attrs(attrs.as_slice(), f)
});
- ccx.externs.borrow_mut().insert(name.to_string(), f);
+ ccx.externs().borrow_mut().insert(name.to_string(), f);
f
}
let unboxed_closure_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_id);
match unboxed_closure.kind {
ty::FnUnboxedClosureKind => {
- ty::mk_imm_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_imm_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnMutUnboxedClosureKind => {
- ty::mk_mut_rptr(&ccx.tcx, ty::ReStatic, unboxed_closure_type)
+ ty::mk_mut_rptr(ccx.tcx(), ty::ReStatic, unboxed_closure_type)
}
ty::FnOnceUnboxedClosureKind => unboxed_closure_type,
}
pub fn kind_for_unboxed_closure(ccx: &CrateContext, closure_id: ast::DefId)
-> ty::UnboxedClosureKind {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
unboxed_closures.get(&closure_id).kind
}
(f.sig.inputs.clone(), f.sig.output, f.abi, Some(Type::i8p(ccx)))
}
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = unboxed_closures.get(&closure_did);
let function_type = unboxed_closure.closure_type.clone();
let self_type = self_type_for_unboxed_closure(ccx, closure_did);
let llfty = type_of_rust_fn(ccx, env, inputs.as_slice(), output, abi);
debug!("decl_rust_fn(input count={},type={})",
inputs.len(),
- ccx.tn.type_to_string(llfty));
+ ccx.tn().type_to_string(llfty));
let llfn = decl_fn(ccx, name, llvm::CCallConv, llfty, output);
let attrs = get_fn_llvm_attributes(ccx, fn_ty);
// Returns a pointer to the body for the box. The box may be an opaque
// box. The result will be casted to the type of body_t, if it is statically
// known.
-pub fn at_box_body(bcx: &Block, body_t: ty::t, boxptr: ValueRef) -> ValueRef {
+pub fn at_box_body(bcx: Block, body_t: ty::t, boxptr: ValueRef) -> ValueRef {
let _icx = push_ctxt("at_box_body");
let ccx = bcx.ccx();
let ty = Type::at_box(ccx, type_of(ccx, body_t));
GEPi(bcx, boxptr, [0u, abi::box_field_body])
}
-fn require_alloc_fn(bcx: &Block, info_ty: ty::t, it: LangItem) -> ast::DefId {
+fn require_alloc_fn(bcx: Block, info_ty: ty::t, it: LangItem) -> ast::DefId {
match bcx.tcx().lang_items.require(it) {
Ok(id) => id,
Err(s) => {
// The following malloc_raw_dyn* functions allocate a box to contain
// a given type, but with a potentially dynamic size.
-pub fn malloc_raw_dyn<'a>(bcx: &'a Block<'a>,
- llty_ptr: Type,
- info_ty: ty::t,
- size: ValueRef,
- align: ValueRef)
- -> Result<'a> {
+pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ llty_ptr: Type,
+ info_ty: ty::t,
+ size: ValueRef,
+ align: ValueRef)
+ -> Result<'blk, 'tcx> {
let _icx = push_ctxt("malloc_raw_exchange");
// Allocate space:
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
}
-pub fn malloc_raw_dyn_managed<'a>(
- bcx: &'a Block<'a>,
- t: ty::t,
- alloc_fn: LangItem,
- size: ValueRef)
- -> Result<'a> {
- let _icx = push_ctxt("malloc_raw_managed");
+pub fn malloc_raw_dyn_proc<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t, alloc_fn: LangItem)
+ -> Result<'blk, 'tcx> {
+ let _icx = push_ctxt("malloc_raw_dyn_proc");
+ let ccx = bcx.ccx();
+
+ let langcall = require_alloc_fn(bcx, t, alloc_fn);
+
+ // Grab the TypeRef type of ptr_ty.
+ let ptr_ty = ty::mk_uniq(bcx.tcx(), t);
+ let ptr_llty = type_of(ccx, ptr_ty);
+
+ let llty = type_of(bcx.ccx(), t);
+ let size = llsize_of(bcx.ccx(), llty);
+ let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty) as uint);
+
+ // Allocate space:
+ let drop_glue = glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t));
+ let r = callee::trans_lang_call(
+ bcx,
+ langcall,
+ [
+ PointerCast(bcx, drop_glue, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to()),
+ size,
+ llalign
+ ],
+ None);
+ Result::new(r.bcx, PointerCast(r.bcx, r.val, ptr_llty))
+}
+
+
+pub fn malloc_raw_dyn_managed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ alloc_fn: LangItem,
+ size: ValueRef)
+ -> Result<'blk, 'tcx> {
+ let _icx = push_ctxt("malloc_raw_dyn_managed");
let ccx = bcx.ccx();
let langcall = require_alloc_fn(bcx, t, alloc_fn);
// Type descriptor and type glue stuff
pub fn get_tydesc(ccx: &CrateContext, t: ty::t) -> Rc<tydesc_info> {
- match ccx.tydescs.borrow().find(&t) {
+ match ccx.tydescs().borrow().find(&t) {
Some(inf) => return inf.clone(),
_ => { }
}
- ccx.stats.n_static_tydescs.set(ccx.stats.n_static_tydescs.get() + 1u);
+ ccx.stats().n_static_tydescs.set(ccx.stats().n_static_tydescs.get() + 1u);
let inf = Rc::new(glue::declare_tydesc(ccx, t));
- ccx.tydescs.borrow_mut().insert(t, inf.clone());
+ ccx.tydescs().borrow_mut().insert(t, inf.clone());
inf
}
// Double-check that we never ask LLVM to declare the same symbol twice. It
// silently mangles such symbols, breaking our linkage model.
pub fn note_unique_llvm_symbol(ccx: &CrateContext, sym: String) {
- if ccx.all_llvm_symbols.borrow().contains(&sym) {
+ if ccx.all_llvm_symbols().borrow().contains(&sym) {
ccx.sess().bug(format!("duplicate LLVM symbol: {}", sym).as_slice());
}
- ccx.all_llvm_symbols.borrow_mut().insert(sym);
+ ccx.all_llvm_symbols().borrow_mut().insert(sym);
}
substs: &subst::Substs)
-> ValueRef {
let _icx = push_ctxt("trans_res_dtor");
- let did = if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(ccx, did)
- } else {
- did
- };
+ let did = inline::maybe_instantiate_inline(ccx, did);
if !substs.types.is_empty() {
assert_eq!(did.krate, ast::LOCAL_CRATE);
let dtor_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[glue::get_drop_glue_type(ccx, t)], ty::mk_nil());
get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name.as_slice(),
llvm::CCallConv,
llty,
// Used only for creating scalar comparison glue.
pub enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, }
-pub fn compare_scalar_types<'a>(
- cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- t: ty::t,
- op: ast::BinOp)
- -> Result<'a> {
+pub fn compare_scalar_types<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ t: ty::t,
+ op: ast::BinOp)
+ -> Result<'blk, 'tcx> {
let f = |a| Result::new(cx, compare_scalar_values(cx, lhs, rhs, a, op));
match ty::get(t).sty {
ty::ty_nil => f(nil_type),
- ty::ty_bool | ty::ty_ptr(_) |
- ty::ty_uint(_) | ty::ty_char => f(unsigned_int),
+ ty::ty_bool | ty::ty_uint(_) | ty::ty_char => f(unsigned_int),
+ ty::ty_ptr(mt) if ty::type_is_sized(cx.tcx(), mt.ty) => f(unsigned_int),
ty::ty_int(_) => f(signed_int),
ty::ty_float(_) => f(floating_point),
// Should never get here, because t is scalar.
// A helper function to do the actual comparison of scalar values.
-pub fn compare_scalar_values<'a>(
- cx: &'a Block<'a>,
- lhs: ValueRef,
- rhs: ValueRef,
- nt: scalar_type,
- op: ast::BinOp)
- -> ValueRef {
+pub fn compare_scalar_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ nt: scalar_type,
+ op: ast::BinOp)
+ -> ValueRef {
let _icx = push_ctxt("compare_scalar_values");
- fn die(cx: &Block) -> ! {
+ fn die(cx: Block) -> ! {
cx.sess().bug("compare_scalar_values: must be a comparison operator");
}
match nt {
}
pub fn compare_simd_types(
- cx: &Block,
+ cx: Block,
lhs: ValueRef,
rhs: ValueRef,
t: ty::t,
}
}
-pub type val_and_ty_fn<'r,'b> =
- |&'b Block<'b>, ValueRef, ty::t|: 'r -> &'b Block<'b>;
+pub type val_and_ty_fn<'a, 'blk, 'tcx> =
+ |Block<'blk, 'tcx>, ValueRef, ty::t|: 'a -> Block<'blk, 'tcx>;
// Iterates through the elements of a structural type.
-pub fn iter_structural_ty<'r,
- 'b>(
- cx: &'b Block<'b>,
- av: ValueRef,
- t: ty::t,
- f: val_and_ty_fn<'r,'b>)
- -> &'b Block<'b> {
+pub fn iter_structural_ty<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ av: ValueRef,
+ t: ty::t,
+ f: val_and_ty_fn<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("iter_structural_ty");
- fn iter_variant<'r,
- 'b>(
- cx: &'b Block<'b>,
- repr: &adt::Repr,
- av: ValueRef,
- variant: &ty::VariantInfo,
- substs: &subst::Substs,
- f: val_and_ty_fn<'r,'b>)
- -> &'b Block<'b> {
+ fn iter_variant<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ repr: &adt::Repr,
+ av: ValueRef,
+ variant: &ty::VariantInfo,
+ substs: &subst::Substs,
+ f: val_and_ty_fn<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("iter_variant");
let tcx = cx.tcx();
let mut cx = cx;
// comparison know not to proceed when the discriminants differ.
match adt::trans_switch(cx, &*repr, av) {
- (_match::single, None) => {
+ (_match::Single, None) => {
cx = iter_variant(cx, &*repr, av, &**variants.get(0),
substs, f);
}
- (_match::switch, Some(lldiscrim_a)) => {
+ (_match::Switch, Some(lldiscrim_a)) => {
cx = f(cx, lldiscrim_a, ty::mk_int());
let unr_cx = fcx.new_temp_block("enum-iter-unr");
Unreachable(unr_cx);
variant.disr_val.to_string().as_slice())
.as_slice());
match adt::trans_case(cx, &*repr, variant.disr_val) {
- _match::single_result(r) => {
+ _match::SingleResult(r) => {
AddCase(llswitch, r.val, variant_cx.llbb)
}
_ => ccx.sess().unimpl("value from adt::trans_case \
return cx;
}
-pub fn cast_shift_expr_rhs<'a>(
- cx: &'a Block<'a>,
+pub fn cast_shift_expr_rhs(cx: Block,
op: ast::BinOp,
lhs: ValueRef,
rhs: ValueRef)
}
}
-pub fn fail_if_zero_or_overflows<'a>(
- cx: &'a Block<'a>,
- span: Span,
- divrem: ast::BinOp,
- lhs: ValueRef,
- rhs: ValueRef,
- rhs_t: ty::t)
- -> &'a Block<'a> {
+pub fn fail_if_zero_or_overflows<'blk, 'tcx>(
+ cx: Block<'blk, 'tcx>,
+ span: Span,
+ divrem: ast::BinOp,
+ lhs: ValueRef,
+ rhs: ValueRef,
+ rhs_t: ty::t)
+ -> Block<'blk, 'tcx> {
let (zero_text, overflow_text) = if divrem == ast::BiDiv {
("attempted to divide by zero",
"attempted to divide with overflow")
}
_ => {
let llty = type_of(ccx, t);
- get_extern_const(&mut *ccx.externs.borrow_mut(),
- ccx.llmod,
+ get_extern_const(&mut *ccx.externs().borrow_mut(),
+ ccx.llmod(),
name.as_slice(),
llty)
}
}
}
-pub fn invoke<'a>(
- bcx: &'a Block<'a>,
- llfn: ValueRef,
- llargs: Vec<ValueRef> ,
- fn_ty: ty::t,
- call_info: Option<NodeInfo>,
- // FIXME(15064) is_lang_item is a horrible hack, please remove it
- // at the soonest opportunity.
- is_lang_item: bool)
- -> (ValueRef, &'a Block<'a>) {
+pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ llfn: ValueRef,
+ llargs: Vec<ValueRef> ,
+ fn_ty: ty::t,
+ call_info: Option<NodeInfo>,
+ // FIXME(15064) is_lang_item is a horrible hack, please remove it
+ // at the soonest opportunity.
+ is_lang_item: bool)
+ -> (ValueRef, Block<'blk, 'tcx>) {
let _icx = push_ctxt("invoke_");
if bcx.unreachable.get() {
return (C_null(Type::i8(bcx.ccx())), bcx);
}
}
-pub fn need_invoke(bcx: &Block) -> bool {
+pub fn need_invoke(bcx: Block) -> bool {
if bcx.sess().no_landing_pads() {
return false;
}
bcx.fcx.needs_invoke()
}
-pub fn load_if_immediate(cx: &Block, v: ValueRef, t: ty::t) -> ValueRef {
+pub fn load_if_immediate(cx: Block, v: ValueRef, t: ty::t) -> ValueRef {
let _icx = push_ctxt("load_if_immediate");
if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); }
return v;
}
-pub fn load_ty(cx: &Block, ptr: ValueRef, t: ty::t) -> ValueRef {
+pub fn load_ty(cx: Block, ptr: ValueRef, t: ty::t) -> ValueRef {
/*!
* Helper for loading values from memory. Does the necessary conversion if
* the in-memory type differs from the type used for SSA values. Also
} else if ty::type_is_bool(t) {
Trunc(cx, LoadRangeAssert(cx, ptr, 0, 2, llvm::False), Type::i1(cx.ccx()))
} else if ty::type_is_char(t) {
- // a char is a unicode codepoint, and so takes values from 0
+ // a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
} else {
}
}
-pub fn store_ty(cx: &Block, v: ValueRef, dst: ValueRef, t: ty::t) {
+pub fn store_ty(cx: Block, v: ValueRef, dst: ValueRef, t: ty::t) {
/*!
* Helper for storing values in memory. Does the necessary conversion if
* the in-memory type differs from the type used for SSA values.
};
}
-pub fn ignore_lhs(_bcx: &Block, local: &ast::Local) -> bool {
+pub fn ignore_lhs(_bcx: Block, local: &ast::Local) -> bool {
match local.pat.node {
ast::PatWild(ast::PatWildSingle) => true, _ => false
}
}
-pub fn init_local<'a>(bcx: &'a Block<'a>, local: &ast::Local)
- -> &'a Block<'a> {
+pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &ast::Local)
+ -> Block<'blk, 'tcx> {
debug!("init_local(bcx={}, local.id={:?})", bcx.to_str(), local.id);
let _indenter = indenter();
let _icx = push_ctxt("init_local");
_match::store_local(bcx, local)
}
-pub fn raw_block<'a>(
- fcx: &'a FunctionContext<'a>,
- is_lpad: bool,
- llbb: BasicBlockRef)
- -> &'a Block<'a> {
- common::Block::new(llbb, is_lpad, None, fcx)
+pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
+ is_lpad: bool,
+ llbb: BasicBlockRef)
+ -> Block<'blk, 'tcx> {
+ common::BlockS::new(llbb, is_lpad, None, fcx)
}
-pub fn with_cond<'a>(
- bcx: &'a Block<'a>,
- val: ValueRef,
- f: |&'a Block<'a>| -> &'a Block<'a>)
- -> &'a Block<'a> {
+pub fn with_cond<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ f: |Block<'blk, 'tcx>| -> Block<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("with_cond");
let fcx = bcx.fcx;
let next_cx = fcx.new_temp_block("next");
next_cx
}
-pub fn call_lifetime_start(cx: &Block, ptr: ValueRef) {
+pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
if cx.sess().opts.optimize == config::No {
return;
}
Call(cx, lifetime_start, [llsize, ptr], None);
}
-pub fn call_lifetime_end(cx: &Block, ptr: ValueRef) {
+pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
if cx.sess().opts.optimize == config::No {
return;
}
Call(cx, lifetime_end, [llsize, ptr], None);
}
-pub fn call_memcpy(cx: &Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
+pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
let _icx = push_ctxt("call_memcpy");
let ccx = cx.ccx();
let key = match ccx.sess().targ_cfg.arch {
let memcpy = ccx.get_intrinsic(&key);
let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
- let size = IntCast(cx, n_bytes, ccx.int_type);
+ let size = IntCast(cx, n_bytes, ccx.int_type());
let align = C_i32(ccx, align as i32);
let volatile = C_bool(ccx, false);
Call(cx, memcpy, [dst_ptr, src_ptr, size, align, volatile], None);
}
-pub fn memcpy_ty(bcx: &Block, dst: ValueRef, src: ValueRef, t: ty::t) {
+pub fn memcpy_ty(bcx: Block, dst: ValueRef, src: ValueRef, t: ty::t) {
let _icx = push_ctxt("memcpy_ty");
let ccx = bcx.ccx();
if ty::type_is_structural(t) {
}
}
-pub fn zero_mem(cx: &Block, llptr: ValueRef, t: ty::t) {
+pub fn zero_mem(cx: Block, llptr: ValueRef, t: ty::t) {
if cx.unreachable.get() { return; }
let _icx = push_ctxt("zero_mem");
let bcx = cx;
b.call(llintrinsicfn, [llptr, llzeroval, size, align, volatile], None);
}
-pub fn alloc_ty(bcx: &Block, t: ty::t, name: &str) -> ValueRef {
+pub fn alloc_ty(bcx: Block, t: ty::t, name: &str) -> ValueRef {
let _icx = push_ctxt("alloc_ty");
let ccx = bcx.ccx();
let ty = type_of::type_of(ccx, t);
return val;
}
-pub fn alloca(cx: &Block, ty: Type, name: &str) -> ValueRef {
+pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
let p = alloca_no_lifetime(cx, ty, name);
call_lifetime_start(cx, p);
p
}
-pub fn alloca_no_lifetime(cx: &Block, ty: Type, name: &str) -> ValueRef {
+pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef {
let _icx = push_ctxt("alloca");
if cx.unreachable.get() {
unsafe {
Alloca(cx, ty, name)
}
-pub fn alloca_zeroed(cx: &Block, ty: ty::t, name: &str) -> ValueRef {
+pub fn alloca_zeroed(cx: Block, ty: ty::t, name: &str) -> ValueRef {
let llty = type_of::type_of(cx.ccx(), ty);
if cx.unreachable.get() {
unsafe {
p
}
-pub fn arrayalloca(cx: &Block, ty: Type, v: ValueRef) -> ValueRef {
+pub fn arrayalloca(cx: Block, ty: Type, v: ValueRef) -> ValueRef {
let _icx = push_ctxt("arrayalloca");
if cx.unreachable.get() {
unsafe {
//
// Be warned! You must call `init_function` before doing anything with the
// returned function context.
-pub fn new_fn_ctxt<'a>(ccx: &'a CrateContext,
- llfndecl: ValueRef,
- id: ast::NodeId,
- has_env: bool,
- output_type: ty::t,
- param_substs: &'a param_substs,
- sp: Option<Span>,
- block_arena: &'a TypedArena<Block<'a>>)
- -> FunctionContext<'a> {
+pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
+ llfndecl: ValueRef,
+ id: ast::NodeId,
+ has_env: bool,
+ output_type: ty::t,
+ param_substs: &'a param_substs,
+ sp: Option<Span>,
+ block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
+ -> FunctionContext<'a, 'tcx> {
param_substs.validate();
debug!("new_fn_ctxt(path={}, id={}, param_substs={})",
if id == -1 {
"".to_string()
} else {
- ccx.tcx.map.path_to_string(id).to_string()
+ ccx.tcx().map.path_to_string(id).to_string()
},
id, param_substs.repr(ccx.tcx()));
/// Performs setup on a newly created function, creating the entry scope block
/// and allocating space for the return pointer.
-pub fn init_function<'a>(fcx: &'a FunctionContext<'a>,
- skip_retptr: bool,
- output_type: ty::t) -> &'a Block<'a> {
+pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>,
+ skip_retptr: bool,
+ output_type: ty::t) -> Block<'a, 'tcx> {
let entry_bcx = fcx.new_temp_block("entry-block");
// Use a dummy instruction as the insertion point for all allocas.
/// datums.
///
/// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
-fn create_datums_for_fn_args_under_call_abi<
- 'a>(
- mut bcx: &'a Block<'a>,
+fn create_datums_for_fn_args_under_call_abi(
+ mut bcx: Block,
arg_scope: cleanup::CustomScopeIndex,
arg_tys: &[ty::t])
-> Vec<RvalueDatum> {
result
}
-fn copy_args_to_allocas<'a>(fcx: &FunctionContext<'a>,
- arg_scope: cleanup::CustomScopeIndex,
- bcx: &'a Block<'a>,
- args: &[ast::Arg],
- arg_datums: Vec<RvalueDatum> )
- -> &'a Block<'a> {
+fn copy_args_to_allocas<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
+ arg_scope: cleanup::CustomScopeIndex,
+ bcx: Block<'blk, 'tcx>,
+ args: &[ast::Arg],
+ arg_datums: Vec<RvalueDatum> )
+ -> Block<'blk, 'tcx> {
debug!("copy_args_to_allocas");
let _icx = push_ctxt("copy_args_to_allocas");
bcx
}
-fn copy_unboxed_closure_args_to_allocas<'a>(
- mut bcx: &'a Block<'a>,
+fn copy_unboxed_closure_args_to_allocas<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
arg_scope: cleanup::CustomScopeIndex,
args: &[ast::Arg],
arg_datums: Vec<RvalueDatum>,
monomorphized_arg_types: &[ty::t])
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("copy_unboxed_closure_args_to_allocas");
let arg_scope_id = cleanup::CustomScope(arg_scope);
// Ties up the llstaticallocas -> llloadenv -> lltop edges,
// and builds the return block.
-pub fn finish_fn<'a>(fcx: &'a FunctionContext<'a>,
- last_bcx: &'a Block<'a>,
- retty: ty::t) {
+pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
+ last_bcx: Block<'blk, 'tcx>,
+ retty: ty::t) {
let _icx = push_ctxt("finish_fn");
// This shouldn't need to recompute the return type,
}
// Builds the return block for a function.
-pub fn build_return_block(fcx: &FunctionContext, ret_cx: &Block, retty: ty::t) {
+pub fn build_return_block(fcx: &FunctionContext, ret_cx: Block, retty: ty::t) {
if fcx.llretslotptr.get().is_none() ||
(!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
return RetVoid(ret_cx);
abi: Abi,
has_env: bool,
is_unboxed_closure: IsUnboxedClosureFlag,
- maybe_load_env: <'a>|&'a Block<'a>, ScopeId|
- -> &'a Block<'a>) {
- ccx.stats.n_closures.set(ccx.stats.n_closures.get() + 1);
+ maybe_load_env: <'blk, 'tcx> |Block<'blk, 'tcx>, ScopeId|
+ -> Block<'blk, 'tcx>) {
+ ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
let _icx = push_ctxt("trans_closure");
set_uwtable(llfndecl);
ty_to_string(ccx.tcx(), *monomorphized_arg_type));
}
debug!("trans_closure: function lltype: {}",
- bcx.fcx.ccx.tn.val_to_string(bcx.fcx.llfn));
+ bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
let arg_datums = if abi != RustCall {
create_datums_for_fn_args(&fcx,
param_substs: ¶m_substs,
id: ast::NodeId,
attrs: &[ast::Attribute]) {
- let _s = StatRecorder::new(ccx, ccx.tcx.map.path_to_string(id).to_string());
+ let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
debug!("trans_fn(param_substs={})", param_substs.repr(ccx.tcx()));
let _icx = push_ctxt("trans_fn");
let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
llfndecl);
}
-pub fn trans_named_tuple_constructor<'a>(mut bcx: &'a Block<'a>,
- ctor_ty: ty::t,
- disr: ty::Disr,
- args: callee::CallArgs,
- dest: expr::Dest) -> Result<'a> {
+pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ ctor_ty: ty::t,
+ disr: ty::Disr,
+ args: callee::CallArgs,
+ dest: expr::Dest) -> Result<'blk, 'tcx> {
let ccx = bcx.fcx.ccx;
- let tcx = &ccx.tcx;
+ let tcx = ccx.tcx();
let result_ty = match ty::get(ctor_ty).sty {
ty::ty_bare_fn(ref bft) => bft.sig.output,
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
- let levels = ccx.tcx.node_lint_levels.borrow();
+ let levels = ccx.tcx().node_lint_levels.borrow();
let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCE);
let lvlsrc = match levels.find(&(id, lint_id)) {
None | Some(&(lint::Allow, _)) => return,
}
}
-pub struct TransItemVisitor<'a> {
- pub ccx: &'a CrateContext,
+pub struct TransItemVisitor<'a, 'tcx: 'a> {
+ pub ccx: &'a CrateContext<'a, 'tcx>,
}
-impl<'a> Visitor<()> for TransItemVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for TransItemVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _:()) {
trans_item(self.ccx, i);
}
}
+/// Enum describing the origin of an LLVM `Value`, for linkage purposes.
+pub enum ValueOrigin {
+ /// The LLVM `Value` is in this context because the corresponding item was
+ /// assigned to the current compilation unit.
+ OriginalTranslation,
+ /// The `Value`'s corresponding item was assigned to some other compilation
+ /// unit, but the `Value` was translated in this context anyway because the
+ /// item is marked `#[inline]`.
+ InlinedCopy,
+}
+
+/// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
+/// If the `llval` is the direct translation of a specific Rust item, `id`
+/// should be set to the `NodeId` of that item. (This mapping should be
+/// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
+/// `None`.) `llval_origin` indicates whether `llval` is the translation of an
+/// item assigned to `ccx`'s compilation unit or an inlined copy of an item
+/// assigned to a different compilation unit.
+pub fn update_linkage(ccx: &CrateContext,
+ llval: ValueRef,
+ id: Option<ast::NodeId>,
+ llval_origin: ValueOrigin) {
+ match llval_origin {
+ InlinedCopy => {
+ // `llval` is a translation of an item defined in a separate
+ // compilation unit. This only makes sense if there are at least
+ // two compilation units.
+ assert!(ccx.sess().opts.cg.codegen_units > 1);
+ // `llval` is a copy of something defined elsewhere, so use
+ // `AvailableExternallyLinkage` to avoid duplicating code in the
+ // output.
+ llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
+ return;
+ },
+ OriginalTranslation => {},
+ }
+
+ match id {
+ Some(id) if ccx.reachable().contains(&id) => {
+ llvm::SetLinkage(llval, llvm::ExternalLinkage);
+ },
+ _ => {
+ // `id` does not refer to an item in `ccx.reachable`.
+ if ccx.sess().opts.cg.codegen_units > 1 {
+ llvm::SetLinkage(llval, llvm::ExternalLinkage);
+ } else {
+ llvm::SetLinkage(llval, llvm::InternalLinkage);
+ }
+ },
+ }
+}
+
pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
let _icx = push_ctxt("trans_item");
+
+ let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
+
match item.node {
ast::ItemFn(ref decl, _fn_style, abi, ref generics, ref body) => {
if !generics.is_type_parameterized() {
- let llfn = get_item_val(ccx, item.id);
- if abi != Rust {
- foreign::trans_rust_fn_with_foreign_abi(ccx,
- &**decl,
- &**body,
- item.attrs.as_slice(),
- llfn,
- ¶m_substs::empty(),
- item.id,
- None);
- } else {
- trans_fn(ccx,
- &**decl,
- &**body,
- llfn,
- ¶m_substs::empty(),
- item.id,
- item.attrs.as_slice());
+ let trans_everywhere = attr::requests_inline(item.attrs.as_slice());
+ // Ignore `trans_everywhere` for cross-crate inlined items
+ // (`from_external`). `trans_item` will be called once for each
+ // compilation unit that references the item, so it will still get
+ // translated everywhere it's needed.
+ for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+ let llfn = get_item_val(ccx, item.id);
+ if abi != Rust {
+ foreign::trans_rust_fn_with_foreign_abi(ccx,
+ &**decl,
+ &**body,
+ item.attrs.as_slice(),
+ llfn,
+ ¶m_substs::empty(),
+ item.id,
+ None);
+ } else {
+ trans_fn(ccx,
+ &**decl,
+ &**body,
+ llfn,
+ ¶m_substs::empty(),
+ item.id,
+ item.attrs.as_slice());
+ }
+ update_linkage(ccx,
+ llfn,
+ Some(item.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
}
}
item.id);
}
ast::ItemMod(ref m) => {
- trans_mod(ccx, m);
+ trans_mod(&ccx.rotate(), m);
}
ast::ItemEnum(ref enum_definition, _) => {
enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
// Recurse on the expression to catch items in blocks
let mut v = TransItemVisitor{ ccx: ccx };
v.visit_expr(&**expr, ());
- consts::trans_const(ccx, m, item.id);
+
+ let trans_everywhere = attr::requests_inline(item.attrs.as_slice());
+ for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
+ consts::trans_const(ccx, m, item.id);
+
+ let g = get_item_val(ccx, item.id);
+ update_linkage(ccx,
+ g,
+ Some(item.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
+ }
+
// Do static_assert checking. It can't really be done much earlier
// because we need to get the value of the bool out of LLVM
if attr::contains_name(item.attrs.as_slice(), "static_assert") {
static");
}
- let v = ccx.const_values.borrow().get_copy(&item.id);
+ let v = ccx.const_values().borrow().get_copy(&item.id);
unsafe {
if !(llvm::LLVMConstIntGetZExtValue(v) != 0) {
ccx.sess().span_fatal(expr.span, "static assertion failed");
fn finish_register_fn(ccx: &CrateContext, sp: Span, sym: String, node_id: ast::NodeId,
llfn: ValueRef) {
- ccx.item_symbols.borrow_mut().insert(node_id, sym);
-
- if !ccx.reachable.contains(&node_id) {
- llvm::SetLinkage(llfn, llvm::InternalLinkage);
- }
+ ccx.item_symbols().borrow_mut().insert(node_id, sym);
// The stack exhaustion lang item shouldn't have a split stack because
// otherwise it would continue to be exhausted (bad), and both it and the
// eh_personality functions need to be externally linkable.
let def = ast_util::local_def(node_id);
- if ccx.tcx.lang_items.stack_exhausted() == Some(def) {
+ if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
unset_split_stack(llfn);
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
- if ccx.tcx.lang_items.eh_personality() == Some(def) {
+ if ccx.tcx().lang_items.eh_personality() == Some(def) {
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
}
ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
ty::ty_bare_fn(ref f) => (f.sig.clone(), f.abi, false),
ty::ty_unboxed_closure(closure_did, _) => {
- let unboxed_closures = ccx.tcx.unboxed_closures.borrow();
+ let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let ref function_type = unboxed_closures.get(&closure_did)
.closure_type;
fn create_entry_fn(ccx: &CrateContext,
rust_main: ValueRef,
use_start_lang_item: bool) {
- let llfty = Type::func([ccx.int_type, Type::i8p(ccx).ptr_to()],
- &ccx.int_type);
+ let llfty = Type::func([ccx.int_type(), Type::i8p(ccx).ptr_to()],
+ &ccx.int_type());
let llfn = decl_cdecl_fn(ccx, "main", llfty, ty::mk_nil());
let llbb = "top".with_c_str(|buf| {
unsafe {
- llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, buf)
}
});
- let bld = ccx.builder.b;
+ let bld = ccx.raw_builder();
unsafe {
llvm::LLVMPositionBuilderAtEnd(bld, llbb);
let (start_fn, args) = if use_start_lang_item {
- let start_def_id = match ccx.tcx.lang_items.require(StartFnLangItem) {
+ let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
Ok(id) => id,
Err(s) => { ccx.sess().fatal(s.as_slice()); }
};
fn exported_name(ccx: &CrateContext, id: ast::NodeId,
ty: ty::t, attrs: &[ast::Attribute]) -> String {
+ match ccx.external_srcs().borrow().find(&id) {
+ Some(&did) => {
+ let sym = csearch::get_symbol(&ccx.sess().cstore, did);
+ debug!("found item {} in other crate...", sym);
+ return sym;
+ }
+ None => {}
+ }
+
match attr::first_attr_value_str_by_name(attrs, "export_name") {
// Use provided name
Some(name) => name.get().to_string(),
- _ => ccx.tcx.map.with_path(id, |mut path| {
+ _ => ccx.tcx().map.with_path(id, |mut path| {
if attr::contains_name(attrs, "no_mangle") {
// Don't mangle
path.last().unwrap().to_string()
pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
debug!("get_item_val(id=`{:?}`)", id);
- match ccx.item_vals.borrow().find_copy(&id) {
+ match ccx.item_vals().borrow().find_copy(&id) {
Some(v) => return v,
None => {}
}
- let mut foreign = false;
- let item = ccx.tcx.map.get(id);
+ let item = ccx.tcx().map.get(id);
let val = match item {
ast_map::NodeItem(i) => {
let ty = ty::node_id_to_type(ccx.tcx(), i.id);
// using the current crate's name/version
// information in the hash of the symbol
debug!("making {}", sym);
- let (sym, is_local) = {
- match ccx.external_srcs.borrow().find(&i.id) {
- Some(&did) => {
- debug!("but found in other crate...");
- (csearch::get_symbol(&ccx.sess().cstore,
- did), false)
- }
- None => (sym, true)
- }
- };
+ let is_local = !ccx.external_srcs().borrow().contains_key(&id);
// We need the translated value here, because for enums the
// LLVM type is not fully determined by the Rust type.
let (v, inlineable, _) = consts::const_expr(ccx, &**expr, is_local);
- ccx.const_values.borrow_mut().insert(id, v);
+ ccx.const_values().borrow_mut().insert(id, v);
let mut inlineable = inlineable;
unsafe {
let llty = llvm::LLVMTypeOf(v);
let g = sym.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty, buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty, buf)
});
- if !ccx.reachable.contains(&id) {
- llvm::SetLinkage(g, llvm::InternalLinkage);
- }
-
// Apply the `unnamed_addr` attribute if
// requested
if !ast_util::static_has_significant_address(
if !inlineable {
debug!("{} not inlined", sym);
- ccx.non_inlineable_statics.borrow_mut()
+ ccx.non_inlineable_statics().borrow_mut()
.insert(id);
}
- ccx.item_symbols.borrow_mut().insert(i.id, sym);
+ ccx.item_symbols().borrow_mut().insert(i.id, sym);
g
}
}
}
ast_map::NodeForeignItem(ni) => {
- foreign = true;
-
match ni.node {
ast::ForeignItemFn(..) => {
- let abi = ccx.tcx.map.get_foreign_abi(id);
+ let abi = ccx.tcx().map.get_foreign_abi(id);
let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
let name = foreign::link_name(&*ni);
foreign::register_foreign_item_fn(ccx, abi, ty,
};
assert!(args.len() != 0u);
let ty = ty::node_id_to_type(ccx.tcx(), id);
- let parent = ccx.tcx.map.get_parent(id);
- let enm = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let enm = ccx.tcx().map.expect_item(parent);
let sym = exported_name(ccx,
id,
ty,
}
Some(ctor_id) => ctor_id,
};
- let parent = ccx.tcx.map.get_parent(id);
- let struct_item = ccx.tcx.map.expect_item(parent);
+ let parent = ccx.tcx().map.get_parent(id);
+ let struct_item = ccx.tcx().map.expect_item(parent);
let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
let sym = exported_name(ccx,
id,
}
};
- // foreign items (extern fns and extern statics) don't have internal
- // linkage b/c that doesn't quite make sense. Otherwise items can
- // have internal linkage if they're not reachable.
- if !foreign && !ccx.reachable.contains(&id) {
- llvm::SetLinkage(val, llvm::InternalLinkage);
- }
+ // All LLVM globals and functions are initially created as external-linkage
+ // declarations. If `trans_item`/`trans_fn` later turns the declaration
+ // into a definition, it adjusts the linkage then (using `update_linkage`).
+ //
+ // The exception is foreign items, which have their linkage set inside the
+ // call to `foreign::register_*` above. We don't touch the linkage after
+ // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
+ // other item translation functions do).
- ccx.item_vals.borrow_mut().insert(id, val);
+ ccx.item_vals().borrow_mut().insert(id, val);
val
}
pub fn p2i(ccx: &CrateContext, v: ValueRef) -> ValueRef {
unsafe {
- return llvm::LLVMConstPtrToInt(v, ccx.int_type.to_ref());
+ return llvm::LLVMConstPtrToInt(v, ccx.int_type().to_ref());
}
}
-pub fn crate_ctxt_to_encode_parms<'r>(cx: &'r CrateContext, ie: encoder::EncodeInlinedItem<'r>)
- -> encoder::EncodeParams<'r> {
- encoder::EncodeParams {
- diag: cx.sess().diagnostic(),
- tcx: cx.tcx(),
- reexports2: &cx.exp_map2,
- item_symbols: &cx.item_symbols,
- non_inlineable_statics: &cx.non_inlineable_statics,
- link_meta: &cx.link_meta,
- cstore: &cx.sess().cstore,
- encode_inlined_item: ie,
- reachable: &cx.reachable,
- }
+pub fn crate_ctxt_to_encode_parms<'a, 'tcx>(cx: &'a SharedCrateContext<'tcx>,
+ ie: encoder::EncodeInlinedItem<'a>)
+ -> encoder::EncodeParams<'a, 'tcx> {
+ encoder::EncodeParams {
+ diag: cx.sess().diagnostic(),
+ tcx: cx.tcx(),
+ reexports2: cx.exp_map2(),
+ item_symbols: cx.item_symbols(),
+ non_inlineable_statics: cx.non_inlineable_statics(),
+ link_meta: cx.link_meta(),
+ cstore: &cx.sess().cstore,
+ encode_inlined_item: ie,
+ reachable: cx.reachable(),
+ }
}
-pub fn write_metadata(cx: &CrateContext, krate: &ast::Crate) -> Vec<u8> {
+pub fn write_metadata(cx: &SharedCrateContext, krate: &ast::Crate) -> Vec<u8> {
use flate;
let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
cx.sess().fatal("failed to compress metadata")
}
}.as_slice());
- let llmeta = C_bytes(cx, compressed.as_slice());
- let llconst = C_struct(cx, [llmeta], false);
+ let llmeta = C_bytes_in_context(cx.metadata_llcx(), compressed.as_slice());
+ let llconst = C_struct_in_context(cx.metadata_llcx(), [llmeta], false);
let name = format!("rust_metadata_{}_{}",
- cx.link_meta.crate_name,
- cx.link_meta.crate_hash);
+ cx.link_meta().crate_name,
+ cx.link_meta().crate_hash);
let llglobal = name.with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(cx.metadata_llmod, val_ty(llconst).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf)
}
});
unsafe {
return metadata;
}
+/// Find any symbols that are defined in one compilation unit, but not declared
+/// in any other compilation unit. Give these symbols internal linkage.
+fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<String>) {
+ use std::c_str::CString;
+
+ unsafe {
+ let mut declared = HashSet::new();
+
+ let iter_globals = |llmod| {
+ ValueIter {
+ cur: llvm::LLVMGetFirstGlobal(llmod),
+ step: llvm::LLVMGetNextGlobal,
+ }
+ };
+
+ let iter_functions = |llmod| {
+ ValueIter {
+ cur: llvm::LLVMGetFirstFunction(llmod),
+ step: llvm::LLVMGetNextFunction,
+ }
+ };
+
+ // Collect all external declarations in all compilation units.
+ for ccx in cx.iter() {
+ for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ let linkage = llvm::LLVMGetLinkage(val);
+ // We only care about external declarations (not definitions)
+ // and available_externally definitions.
+ if !(linkage == llvm::ExternalLinkage as c_uint &&
+ llvm::LLVMIsDeclaration(val) != 0) &&
+ !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
+ continue
+ }
+
+ let name = CString::new(llvm::LLVMGetValueName(val), false);
+ declared.insert(name);
+ }
+ }
+
+ // Examine each external definition. If the definition is not used in
+ // any other compilation unit, and is not reachable from other crates,
+ // then give it internal linkage.
+ for ccx in cx.iter() {
+ for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
+ // We only care about external definitions.
+ if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
+ llvm::LLVMIsDeclaration(val) == 0) {
+ continue
+ }
+
+ let name = CString::new(llvm::LLVMGetValueName(val), false);
+ if !declared.contains(&name) &&
+ !reachable.contains_equiv(&name.as_str().unwrap()) {
+ llvm::SetLinkage(val, llvm::InternalLinkage);
+ }
+ }
+ }
+ }
+
+
+ struct ValueIter {
+ cur: ValueRef,
+ step: unsafe extern "C" fn(ValueRef) -> ValueRef,
+ }
+
+ impl Iterator<ValueRef> for ValueIter {
+ fn next(&mut self) -> Option<ValueRef> {
+ let old = self.cur;
+ if !old.is_null() {
+ self.cur = unsafe { (self.step)(old) };
+ Some(old)
+ } else {
+ None
+ }
+ }
+ }
+}
+
pub fn trans_crate(krate: ast::Crate,
analysis: CrateAnalysis) -> (ty::ctxt, CrateTranslation) {
let CrateAnalysis { ty_cx: tcx, exp_map2, reachable, name, .. } = analysis;
let link_meta = link::build_link_meta(&tcx.sess, &krate, name);
- // Append ".rs" to crate name as LLVM module identifier.
- //
- // LLVM code generator emits a ".file filename" directive
- // for ELF backends. Value of the "filename" is set as the
- // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
- // crashes if the module identifier is same as other symbols
- // such as a function name in the module.
- // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
- let mut llmod_id = link_meta.crate_name.clone();
- llmod_id.push_str(".rs");
-
- let ccx = CrateContext::new(llmod_id.as_slice(), tcx, exp_map2,
- Sha256::new(), link_meta, reachable);
-
- // First, verify intrinsics.
- intrinsic::check_intrinsics(&ccx);
-
- // Next, translate the module.
+ let codegen_units = tcx.sess.opts.cg.codegen_units;
+ let shared_ccx = SharedCrateContext::new(link_meta.crate_name.as_slice(),
+ codegen_units,
+ tcx,
+ exp_map2,
+ Sha256::new(),
+ link_meta.clone(),
+ reachable);
+
{
- let _icx = push_ctxt("text");
- trans_mod(&ccx, &krate.module);
+ let ccx = shared_ccx.get_ccx(0);
+
+ // First, verify intrinsics.
+ intrinsic::check_intrinsics(&ccx);
+
+ // Next, translate the module.
+ {
+ let _icx = push_ctxt("text");
+ trans_mod(&ccx, &krate.module);
+ }
}
- glue::emit_tydescs(&ccx);
- if ccx.sess().opts.debuginfo != NoDebugInfo {
- debuginfo::finalize(&ccx);
+ for ccx in shared_ccx.iter() {
+ glue::emit_tydescs(&ccx);
+ if ccx.sess().opts.debuginfo != NoDebugInfo {
+ debuginfo::finalize(&ccx);
+ }
}
// Translate the metadata.
- let metadata = write_metadata(&ccx, &krate);
- if ccx.sess().trans_stats() {
+ let metadata = write_metadata(&shared_ccx, &krate);
+
+ if shared_ccx.sess().trans_stats() {
+ let stats = shared_ccx.stats();
println!("--- trans stats ---");
- println!("n_static_tydescs: {}", ccx.stats.n_static_tydescs.get());
- println!("n_glues_created: {}", ccx.stats.n_glues_created.get());
- println!("n_null_glues: {}", ccx.stats.n_null_glues.get());
- println!("n_real_glues: {}", ccx.stats.n_real_glues.get());
-
- println!("n_fns: {}", ccx.stats.n_fns.get());
- println!("n_monos: {}", ccx.stats.n_monos.get());
- println!("n_inlines: {}", ccx.stats.n_inlines.get());
- println!("n_closures: {}", ccx.stats.n_closures.get());
+ println!("n_static_tydescs: {}", stats.n_static_tydescs.get());
+ println!("n_glues_created: {}", stats.n_glues_created.get());
+ println!("n_null_glues: {}", stats.n_null_glues.get());
+ println!("n_real_glues: {}", stats.n_real_glues.get());
+
+ println!("n_fns: {}", stats.n_fns.get());
+ println!("n_monos: {}", stats.n_monos.get());
+ println!("n_inlines: {}", stats.n_inlines.get());
+ println!("n_closures: {}", stats.n_closures.get());
println!("fn stats:");
- ccx.stats.fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
+ stats.fn_stats.borrow_mut().sort_by(|&(_, _, insns_a), &(_, _, insns_b)| {
insns_b.cmp(&insns_a)
});
- for tuple in ccx.stats.fn_stats.borrow().iter() {
+ for tuple in stats.fn_stats.borrow().iter() {
match *tuple {
(ref name, ms, insns) => {
println!("{} insns, {} ms, {}", insns, ms, *name);
}
}
}
- if ccx.sess().count_llvm_insns() {
- for (k, v) in ccx.stats.llvm_insns.borrow().iter() {
+ if shared_ccx.sess().count_llvm_insns() {
+ for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
println!("{:7u} {}", *v, *k);
}
}
- let llcx = ccx.llcx;
- let link_meta = ccx.link_meta.clone();
- let llmod = ccx.llmod;
+ let modules = shared_ccx.iter()
+ .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
+ .collect();
- let mut reachable: Vec<String> = ccx.reachable.iter().filter_map(|id| {
- ccx.item_symbols.borrow().find(id).map(|s| s.to_string())
+ let mut reachable: Vec<String> = shared_ccx.reachable().iter().filter_map(|id| {
+ shared_ccx.item_symbols().borrow().find(id).map(|s| s.to_string())
}).collect();
// For the purposes of LTO, we add to the reachable set all of the upstream
// reachable extern fns. These functions are all part of the public ABI of
// the final product, so LTO needs to preserve them.
- ccx.sess().cstore.iter_crate_data(|cnum, _| {
- let syms = csearch::get_reachable_extern_fns(&ccx.sess().cstore, cnum);
+ shared_ccx.sess().cstore.iter_crate_data(|cnum, _| {
+ let syms = csearch::get_reachable_extern_fns(&shared_ccx.sess().cstore, cnum);
reachable.extend(syms.move_iter().map(|did| {
- csearch::get_symbol(&ccx.sess().cstore, did)
+ csearch::get_symbol(&shared_ccx.sess().cstore, did)
}));
});
// referenced from rt/rust_try.ll
reachable.push("rust_eh_personality_catch".to_string());
- let metadata_module = ccx.metadata_llmod;
- let formats = ccx.tcx.dependency_formats.borrow().clone();
+ if codegen_units > 1 {
+ internalize_symbols(&shared_ccx, &reachable.iter().map(|x| x.clone()).collect());
+ }
+
+ let metadata_module = ModuleTranslation {
+ llcx: shared_ccx.metadata_llcx(),
+ llmod: shared_ccx.metadata_llmod(),
+ };
+ let formats = shared_ccx.tcx().dependency_formats.borrow().clone();
let no_builtins = attr::contains_name(krate.attrs.as_slice(), "no_builtins");
- (ccx.tcx, CrateTranslation {
- context: llcx,
- module: llmod,
- link: link_meta,
+ let translation = CrateTranslation {
+ modules: modules,
metadata_module: metadata_module,
+ link: link_meta,
metadata: metadata,
reachable: reachable,
crate_formats: formats,
no_builtins: no_builtins,
- })
+ };
+
+ (shared_ccx.take_tcx(), translation)
}
// except according to those terms.
#![allow(dead_code)] // FFI wrappers
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
use llvm;
use llvm::{CallConv, AtomicBinOp, AtomicOrdering, AsmDialect, AttrBuilder};
use libc::{c_uint, c_ulonglong, c_char};
-pub fn terminate(cx: &Block, _: &str) {
+pub fn terminate(cx: Block, _: &str) {
debug!("terminate({})", cx.to_str());
cx.terminated.set(true);
}
-pub fn check_not_terminated(cx: &Block) {
+pub fn check_not_terminated(cx: Block) {
if cx.terminated.get() {
fail!("already terminated!");
}
}
-pub fn B<'a>(cx: &'a Block) -> Builder<'a> {
+pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> {
let b = cx.fcx.ccx.builder();
b.position_at_end(cx.llbb);
b
// for (fail/break/return statements, call to diverging functions, etc), and
// further instructions to the block should simply be ignored.
-pub fn RetVoid(cx: &Block) {
+pub fn RetVoid(cx: Block) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "RetVoid");
B(cx).ret_void();
}
-pub fn Ret(cx: &Block, v: ValueRef) {
+pub fn Ret(cx: Block, v: ValueRef) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "Ret");
B(cx).ret(v);
}
-pub fn AggregateRet(cx: &Block, ret_vals: &[ValueRef]) {
+pub fn AggregateRet(cx: Block, ret_vals: &[ValueRef]) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "AggregateRet");
B(cx).aggregate_ret(ret_vals);
}
-pub fn Br(cx: &Block, dest: BasicBlockRef) {
+pub fn Br(cx: Block, dest: BasicBlockRef) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "Br");
B(cx).br(dest);
}
-pub fn CondBr(cx: &Block,
+pub fn CondBr(cx: Block,
if_: ValueRef,
then: BasicBlockRef,
else_: BasicBlockRef) {
B(cx).cond_br(if_, then, else_);
}
-pub fn Switch(cx: &Block, v: ValueRef, else_: BasicBlockRef, num_cases: uint)
+pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: uint)
-> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
check_not_terminated(cx);
}
}
-pub fn IndirectBr(cx: &Block, addr: ValueRef, num_dests: uint) {
+pub fn IndirectBr(cx: Block, addr: ValueRef, num_dests: uint) {
if cx.unreachable.get() { return; }
check_not_terminated(cx);
terminate(cx, "IndirectBr");
B(cx).indirect_br(addr, num_dests);
}
-pub fn Invoke(cx: &Block,
+pub fn Invoke(cx: Block,
fn_: ValueRef,
args: &[ValueRef],
then: BasicBlockRef,
B(cx).invoke(fn_, args, then, catch, attributes)
}
-pub fn Unreachable(cx: &Block) {
+pub fn Unreachable(cx: Block) {
if cx.unreachable.get() {
return
}
}
/* Arithmetic */
-pub fn Add(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Add(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).add(lhs, rhs)
}
-pub fn NSWAdd(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NSWAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nswadd(lhs, rhs)
}
-pub fn NUWAdd(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NUWAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nuwadd(lhs, rhs)
}
-pub fn FAdd(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FAdd(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fadd(lhs, rhs)
}
-pub fn Sub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Sub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).sub(lhs, rhs)
}
-pub fn NSWSub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NSWSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nswsub(lhs, rhs)
}
-pub fn NUWSub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NUWSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nuwsub(lhs, rhs)
}
-pub fn FSub(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FSub(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fsub(lhs, rhs)
}
-pub fn Mul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Mul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).mul(lhs, rhs)
}
-pub fn NSWMul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NSWMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nswmul(lhs, rhs)
}
-pub fn NUWMul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn NUWMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).nuwmul(lhs, rhs)
}
-pub fn FMul(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FMul(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fmul(lhs, rhs)
}
-pub fn UDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn UDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).udiv(lhs, rhs)
}
-pub fn SDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn SDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).sdiv(lhs, rhs)
}
-pub fn ExactSDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn ExactSDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).exactsdiv(lhs, rhs)
}
-pub fn FDiv(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FDiv(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).fdiv(lhs, rhs)
}
-pub fn URem(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn URem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).urem(lhs, rhs)
}
-pub fn SRem(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn SRem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).srem(lhs, rhs)
}
-pub fn FRem(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn FRem(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).frem(lhs, rhs)
}
-pub fn Shl(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Shl(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).shl(lhs, rhs)
}
-pub fn LShr(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn LShr(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).lshr(lhs, rhs)
}
-pub fn AShr(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn AShr(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).ashr(lhs, rhs)
}
-pub fn And(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn And(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).and(lhs, rhs)
}
-pub fn Or(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Or(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).or(lhs, rhs)
}
-pub fn Xor(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn Xor(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).xor(lhs, rhs)
}
-pub fn BinOp(cx: &Block, op: Opcode, lhs: ValueRef, rhs: ValueRef)
+pub fn BinOp(cx: Block, op: Opcode, lhs: ValueRef, rhs: ValueRef)
-> ValueRef {
if cx.unreachable.get() { return _Undef(lhs); }
B(cx).binop(op, lhs, rhs)
}
-pub fn Neg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn Neg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).neg(v)
}
-pub fn NSWNeg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn NSWNeg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).nswneg(v)
}
-pub fn NUWNeg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn NUWNeg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).nuwneg(v)
}
-pub fn FNeg(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn FNeg(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).fneg(v)
}
-pub fn Not(cx: &Block, v: ValueRef) -> ValueRef {
+pub fn Not(cx: Block, v: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(v); }
B(cx).not(v)
}
/* Memory */
-pub fn Malloc(cx: &Block, ty: Type) -> ValueRef {
+pub fn Malloc(cx: Block, ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
}
-pub fn ArrayMalloc(cx: &Block, ty: Type, val: ValueRef) -> ValueRef {
+pub fn ArrayMalloc(cx: Block, ty: Type, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
}
-pub fn Alloca(cx: &Block, ty: Type, name: &str) -> ValueRef {
+pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); }
AllocaFcx(cx.fcx, ty, name)
b.alloca(ty, name)
}
-pub fn ArrayAlloca(cx: &Block, ty: Type, val: ValueRef) -> ValueRef {
+pub fn ArrayAlloca(cx: Block, ty: Type, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); }
let b = cx.fcx.ccx.builder();
}
}
-pub fn Free(cx: &Block, pointer_val: ValueRef) {
+pub fn Free(cx: Block, pointer_val: ValueRef) {
if cx.unreachable.get() { return; }
B(cx).free(pointer_val)
}
-pub fn Load(cx: &Block, pointer_val: ValueRef) -> ValueRef {
+pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
if cx.unreachable.get() {
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
return llvm::LLVMGetUndef(eltty.to_ref());
}
}
}
-pub fn VolatileLoad(cx: &Block, pointer_val: ValueRef) -> ValueRef {
+pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn AtomicLoad(cx: &Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef {
+pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
if cx.unreachable.get() {
- return llvm::LLVMGetUndef(ccx.int_type.to_ref());
+ return llvm::LLVMGetUndef(ccx.int_type().to_ref());
}
B(cx).atomic_load(pointer_val, order)
}
}
-pub fn LoadRangeAssert(cx: &Block, pointer_val: ValueRef, lo: c_ulonglong,
+pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: c_ulonglong,
hi: c_ulonglong, signed: llvm::Bool) -> ValueRef {
if cx.unreachable.get() {
let ccx = cx.fcx.ccx;
let eltty = if ty.kind() == llvm::Array {
ty.element_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
unsafe {
llvm::LLVMGetUndef(eltty.to_ref())
}
}
-pub fn Store(cx: &Block, val: ValueRef, ptr: ValueRef) {
+pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) {
if cx.unreachable.get() { return; }
B(cx).store(val, ptr)
}
-pub fn VolatileStore(cx: &Block, val: ValueRef, ptr: ValueRef) {
+pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) {
if cx.unreachable.get() { return; }
B(cx).volatile_store(val, ptr)
}
-pub fn AtomicStore(cx: &Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
+pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
if cx.unreachable.get() { return; }
B(cx).atomic_store(val, ptr, order)
}
-pub fn GEP(cx: &Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
+pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
// Simple wrapper around GEP that takes an array of ints and wraps them
// in C_i32()
#[inline]
-pub fn GEPi(cx: &Block, base: ValueRef, ixs: &[uint]) -> ValueRef {
+pub fn GEPi(cx: Block, base: ValueRef, ixs: &[uint]) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
}
}
-pub fn InBoundsGEP(cx: &Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
+pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
}
}
-pub fn StructGEP(cx: &Block, pointer: ValueRef, idx: uint) -> ValueRef {
+pub fn StructGEP(cx: Block, pointer: ValueRef, idx: uint) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref());
}
}
-pub fn GlobalString(cx: &Block, _str: *const c_char) -> ValueRef {
+pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
}
-pub fn GlobalStringPtr(cx: &Block, _str: *const c_char) -> ValueRef {
+pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref());
}
/* Casts */
-pub fn Trunc(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).trunc(val, dest_ty)
}
}
-pub fn ZExt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).zext(val, dest_ty)
}
}
-pub fn SExt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).sext(val, dest_ty)
}
}
-pub fn FPToUI(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fptoui(val, dest_ty)
}
}
-pub fn FPToSI(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fptosi(val, dest_ty)
}
}
-pub fn UIToFP(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).uitofp(val, dest_ty)
}
}
-pub fn SIToFP(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).sitofp(val, dest_ty)
}
}
-pub fn FPTrunc(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fptrunc(val, dest_ty)
}
}
-pub fn FPExt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fpext(val, dest_ty)
}
}
-pub fn PtrToInt(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).ptrtoint(val, dest_ty)
}
}
-pub fn IntToPtr(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).inttoptr(val, dest_ty)
}
}
-pub fn BitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).bitcast(val, dest_ty)
}
}
-pub fn ZExtOrBitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).zext_or_bitcast(val, dest_ty)
}
}
-pub fn SExtOrBitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).sext_or_bitcast(val, dest_ty)
}
}
-pub fn TruncOrBitCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).trunc_or_bitcast(val, dest_ty)
}
}
-pub fn Cast(cx: &Block, op: Opcode, val: ValueRef, dest_ty: Type,
+pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type,
_: *const u8)
-> ValueRef {
unsafe {
}
}
-pub fn PointerCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).pointercast(val, dest_ty)
}
}
-pub fn IntCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).intcast(val, dest_ty)
}
}
-pub fn FPCast(cx: &Block, val: ValueRef, dest_ty: Type) -> ValueRef {
+pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); }
B(cx).fpcast(val, dest_ty)
/* Comparisons */
-pub fn ICmp(cx: &Block, op: IntPredicate, lhs: ValueRef, rhs: ValueRef)
+pub fn ICmp(cx: Block, op: IntPredicate, lhs: ValueRef, rhs: ValueRef)
-> ValueRef {
unsafe {
if cx.unreachable.get() {
}
}
-pub fn FCmp(cx: &Block, op: RealPredicate, lhs: ValueRef, rhs: ValueRef)
+pub fn FCmp(cx: Block, op: RealPredicate, lhs: ValueRef, rhs: ValueRef)
-> ValueRef {
unsafe {
if cx.unreachable.get() {
}
/* Miscellaneous instructions */
-pub fn EmptyPhi(cx: &Block, ty: Type) -> ValueRef {
+pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
B(cx).empty_phi(ty)
}
}
-pub fn Phi(cx: &Block, ty: Type, vals: &[ValueRef],
+pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef],
bbs: &[BasicBlockRef]) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
}
}
-pub fn _UndefReturn(cx: &Block, fn_: ValueRef) -> ValueRef {
+pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
let ty = val_ty(fn_);
let retty = if ty.kind() == llvm::Integer {
ty.return_type()
} else {
- ccx.int_type
+ ccx.int_type()
};
B(cx).count_insn("ret_undef");
llvm::LLVMGetUndef(retty.to_ref())
}
}
-pub fn add_span_comment(cx: &Block, sp: Span, text: &str) {
+pub fn add_span_comment(cx: Block, sp: Span, text: &str) {
B(cx).add_span_comment(sp, text)
}
-pub fn add_comment(cx: &Block, text: &str) {
+pub fn add_comment(cx: Block, text: &str) {
B(cx).add_comment(text)
}
-pub fn InlineAsmCall(cx: &Block, asm: *const c_char, cons: *const c_char,
+pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char,
inputs: &[ValueRef], output: Type,
volatile: bool, alignstack: bool,
dia: AsmDialect) -> ValueRef {
B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia)
}
-pub fn Call(cx: &Block, fn_: ValueRef, args: &[ValueRef],
+pub fn Call(cx: Block, fn_: ValueRef, args: &[ValueRef],
attributes: Option<AttrBuilder>) -> ValueRef {
if cx.unreachable.get() { return _UndefReturn(cx, fn_); }
B(cx).call(fn_, args, attributes)
}
-pub fn CallWithConv(cx: &Block, fn_: ValueRef, args: &[ValueRef], conv: CallConv,
+pub fn CallWithConv(cx: Block, fn_: ValueRef, args: &[ValueRef], conv: CallConv,
attributes: Option<AttrBuilder>) -> ValueRef {
if cx.unreachable.get() { return _UndefReturn(cx, fn_); }
B(cx).call_with_conv(fn_, args, conv, attributes)
}
-pub fn AtomicFence(cx: &Block, order: AtomicOrdering) {
+pub fn AtomicFence(cx: Block, order: AtomicOrdering) {
if cx.unreachable.get() { return; }
B(cx).atomic_fence(order)
}
-pub fn Select(cx: &Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef {
+pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef {
if cx.unreachable.get() { return _Undef(then); }
B(cx).select(if_, then, else_)
}
-pub fn VAArg(cx: &Block, list: ValueRef, ty: Type) -> ValueRef {
+pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef {
unsafe {
if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); }
B(cx).va_arg(list, ty)
}
}
-pub fn ExtractElement(cx: &Block, vec_val: ValueRef, index: ValueRef) -> ValueRef {
+pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn InsertElement(cx: &Block, vec_val: ValueRef, elt_val: ValueRef,
+pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef,
index: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
}
}
-pub fn ShuffleVector(cx: &Block, v1: ValueRef, v2: ValueRef,
+pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef,
mask: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
}
}
-pub fn VectorSplat(cx: &Block, num_elts: uint, elt_val: ValueRef) -> ValueRef {
+pub fn VectorSplat(cx: Block, num_elts: uint, elt_val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn ExtractValue(cx: &Block, agg_val: ValueRef, index: uint) -> ValueRef {
+pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: uint) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn InsertValue(cx: &Block, agg_val: ValueRef, elt_val: ValueRef, index: uint) -> ValueRef {
+pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: uint) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref());
}
}
-pub fn IsNull(cx: &Block, val: ValueRef) -> ValueRef {
+pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
}
}
-pub fn IsNotNull(cx: &Block, val: ValueRef) -> ValueRef {
+pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef {
unsafe {
if cx.unreachable.get() {
return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref());
}
}
-pub fn PtrDiff(cx: &Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
+pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
unsafe {
let ccx = cx.fcx.ccx;
- if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type.to_ref()); }
+ if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); }
B(cx).ptrdiff(lhs, rhs)
}
}
-pub fn Trap(cx: &Block) {
+pub fn Trap(cx: Block) {
if cx.unreachable.get() { return; }
B(cx).trap();
}
-pub fn LandingPad(cx: &Block, ty: Type, pers_fn: ValueRef,
+pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef,
num_clauses: uint) -> ValueRef {
check_not_terminated(cx);
assert!(!cx.unreachable.get());
B(cx).landing_pad(ty, pers_fn, num_clauses)
}
-pub fn SetCleanup(cx: &Block, landing_pad: ValueRef) {
+pub fn SetCleanup(cx: Block, landing_pad: ValueRef) {
B(cx).set_cleanup(landing_pad)
}
-pub fn Resume(cx: &Block, exn: ValueRef) -> ValueRef {
+pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef {
check_not_terminated(cx);
terminate(cx, "Resume");
B(cx).resume(exn)
}
// Atomic Operations
-pub fn AtomicCmpXchg(cx: &Block, dst: ValueRef,
+pub fn AtomicCmpXchg(cx: Block, dst: ValueRef,
cmp: ValueRef, src: ValueRef,
order: AtomicOrdering,
failure_order: AtomicOrdering) -> ValueRef {
B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order)
}
-pub fn AtomicRMW(cx: &Block, op: AtomicBinOp,
+pub fn AtomicRMW(cx: Block, op: AtomicBinOp,
dst: ValueRef, src: ValueRef,
order: AtomicOrdering) -> ValueRef {
B(cx).atomic_rmw(op, dst, src, order)
use std::string::String;
use syntax::codemap::Span;
-pub struct Builder<'a> {
+pub struct Builder<'a, 'tcx: 'a> {
pub llbuilder: BuilderRef,
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a, 'tcx>,
}
// This is a really awful way to get a zero-length c-string, but better (and a
&cnull as *const c_char
}
-impl<'a> Builder<'a> {
- pub fn new(ccx: &'a CrateContext) -> Builder<'a> {
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> {
Builder {
- llbuilder: ccx.builder.b,
+ llbuilder: ccx.raw_builder(),
ccx: ccx,
}
}
pub fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() {
- self.ccx.stats.n_llvm_insns.set(self.ccx
- .stats
+ self.ccx.stats().n_llvm_insns.set(self.ccx
+ .stats()
.n_llvm_insns
.get() + 1);
}
+ self.ccx.count_llvm_insn();
if self.ccx.sess().count_llvm_insns() {
base::with_insn_ctxt(|v| {
- let mut h = self.ccx.stats.llvm_insns.borrow_mut();
+ let mut h = self.ccx.stats().llvm_insns.borrow_mut();
// Build version of path with cycles removed.
self.count_insn("invoke");
debug!("Invoke {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
let v = [min, max];
llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
- llvm::LLVMMDNodeInContext(self.ccx.llcx,
+ llvm::LLVMMDNodeInContext(self.ccx.llcx(),
v.as_ptr(), v.len() as c_uint));
}
pub fn store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store");
unsafe {
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store.volatile");
unsafe {
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
self.count_insn("store.atomic");
unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
else { llvm::False };
let argtys = inputs.iter().map(|v| {
- debug!("Asm Input Type: {:?}", self.ccx.tn.val_to_string(*v));
+ debug!("Asm Input Type: {:?}", self.ccx.tn().val_to_string(*v));
val_ty(*v)
}).collect::<Vec<_>>();
- debug!("Asm Output Type: {:?}", self.ccx.tn.type_to_string(output));
+ debug!("Asm Output Type: {:?}", self.ccx.tn().type_to_string(output));
let fty = Type::func(argtys.as_slice(), &output);
unsafe {
let v = llvm::LLVMInlineAsm(
self.count_insn("call");
debug!("Call {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(non_uppercase_pattern_statics)]
+#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double, Struct, Array};
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(non_uppercase_pattern_statics)]
+#![allow(non_uppercase_statics)]
use libc::c_uint;
use std::cmp;
let r = size % 32;
if r > 0 {
unsafe {
- args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx, r as c_uint)));
+ args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
}
}
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
-#![allow(non_uppercase_pattern_statics)]
+#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double};
TraitItem(MethodData)
}
-pub struct Callee<'a> {
- pub bcx: &'a Block<'a>,
+pub struct Callee<'blk, 'tcx: 'blk> {
+ pub bcx: Block<'blk, 'tcx>,
pub data: CalleeData,
}
-fn trans<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> Callee<'a> {
+fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr)
+ -> Callee<'blk, 'tcx> {
let _icx = push_ctxt("trans_callee");
debug!("callee::trans(expr={})", expr.repr(bcx.tcx()));
// any other expressions are closures:
return datum_callee(bcx, expr);
- fn datum_callee<'a>(bcx: &'a Block<'a>, expr: &ast::Expr) -> Callee<'a> {
+ fn datum_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &ast::Expr)
+ -> Callee<'blk, 'tcx> {
let DatumBlock {bcx: mut bcx, datum} = expr::trans(bcx, expr);
match ty::get(datum.ty).sty {
ty::ty_bare_fn(..) => {
}
}
- fn fn_callee<'a>(bcx: &'a Block<'a>, llfn: ValueRef) -> Callee<'a> {
+ fn fn_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, llfn: ValueRef)
+ -> Callee<'blk, 'tcx> {
return Callee {
bcx: bcx,
data: Fn(llfn),
};
}
- fn trans_def<'a>(bcx: &'a Block<'a>, def: def::Def, ref_expr: &ast::Expr)
- -> Callee<'a> {
+ fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: def::Def, ref_expr: &ast::Expr)
+ -> Callee<'blk, 'tcx> {
debug!("trans_def(def={}, ref_expr={})", def.repr(bcx.tcx()), ref_expr.repr(bcx.tcx()));
let expr_ty = node_id_type(bcx, ref_expr.id);
match def {
def::DefFn(did, _) if {
- let def_id = if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(bcx.ccx(), did)
- } else {
- did
- };
- match bcx.tcx().map.find(def_id.node) {
+ let maybe_def_id = inline::get_local_instance(bcx.ccx(), did);
+ let maybe_ast_node = maybe_def_id.and_then(|def_id| bcx.tcx().map
+ .find(def_id.node));
+ match maybe_ast_node {
Some(ast_map::NodeStructCtor(_)) => true,
_ => false
}
_ => false
} => {
let substs = node_id_substs(bcx, ExprId(ref_expr.id));
- let def_id = if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(bcx.ccx(), did)
- } else {
- did
- };
+ let def_id = inline::maybe_instantiate_inline(bcx.ccx(), did);
Callee { bcx: bcx, data: Intrinsic(def_id.node, substs) }
}
def::DefFn(did, _) |
}
}
-pub fn trans_fn_ref(bcx: &Block, def_id: ast::DefId, node: ExprOrMethodCall) -> ValueRef {
+pub fn trans_fn_ref(bcx: Block, def_id: ast::DefId, node: ExprOrMethodCall) -> ValueRef {
/*!
* Translates a reference (with id `ref_id`) to the fn/method
* with id `def_id` into a function pointer. This may require
trans_fn_ref_with_vtables(bcx, def_id, node, substs, vtables)
}
-fn trans_fn_ref_with_vtables_to_callee<'a>(bcx: &'a Block<'a>,
- def_id: ast::DefId,
- ref_id: ast::NodeId,
- substs: subst::Substs,
- vtables: typeck::vtable_res)
- -> Callee<'a> {
+fn trans_fn_ref_with_vtables_to_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ def_id: ast::DefId,
+ ref_id: ast::NodeId,
+ substs: subst::Substs,
+ vtables: typeck::vtable_res)
+ -> Callee<'blk, 'tcx> {
Callee {
bcx: bcx,
data: Fn(trans_fn_ref_with_vtables(bcx,
}
}
-fn resolve_default_method_vtables(bcx: &Block,
+fn resolve_default_method_vtables(bcx: Block,
impl_id: ast::DefId,
substs: &subst::Substs,
impl_vtables: typeck::vtable_res)
/// Translates the adapter that deconstructs a `Box<Trait>` object into
/// `Trait` so that a by-value self method can be called.
-pub fn trans_unboxing_shim(bcx: &Block,
+pub fn trans_unboxing_shim(bcx: Block,
llshimmedfn: ValueRef,
fty: &ty::BareFnTy,
method_id: ast::DefId,
}
pub fn trans_fn_ref_with_vtables(
- bcx: &Block, //
+ bcx: Block, //
def_id: ast::DefId, // def id of fn
node: ExprOrMethodCall, // node id of use of fn; may be zero if N/A
substs: subst::Substs, // values for fn's ty params
// Check whether this fn has an inlined copy and, if so, redirect
// def_id to the local id of the inlined copy.
- let def_id = {
- if def_id.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(ccx, def_id)
- } else {
- def_id
- }
- };
+ let def_id = inline::maybe_instantiate_inline(ccx, def_id);
// We must monomorphise if the fn has type parameters, is a default method,
// or is a named tuple constructor.
// ______________________________________________________________________
// Translating calls
-pub fn trans_call<'a>(
- in_cx: &'a Block<'a>,
- call_ex: &ast::Expr,
- f: &ast::Expr,
- args: CallArgs,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_call<'blk, 'tcx>(in_cx: Block<'blk, 'tcx>,
+ call_ex: &ast::Expr,
+ f: &ast::Expr,
+ args: CallArgs,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_call");
trans_call_inner(in_cx,
Some(common::expr_info(call_ex)),
Some(dest)).bcx
}
-pub fn trans_method_call<'a>(
- bcx: &'a Block<'a>,
- call_ex: &ast::Expr,
- rcvr: &ast::Expr,
- args: CallArgs,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_method_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ call_ex: &ast::Expr,
+ rcvr: &ast::Expr,
+ args: CallArgs,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_method_call");
debug!("trans_method_call(call_ex={})", call_ex.repr(bcx.tcx()));
let method_call = MethodCall::expr(call_ex.id);
Some(dest)).bcx
}
-pub fn trans_lang_call<'a>(
- bcx: &'a Block<'a>,
- did: ast::DefId,
- args: &[ValueRef],
- dest: Option<expr::Dest>)
- -> Result<'a> {
+pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ did: ast::DefId,
+ args: &[ValueRef],
+ dest: Option<expr::Dest>)
+ -> Result<'blk, 'tcx> {
let fty = if did.krate == ast::LOCAL_CRATE {
ty::node_id_to_type(bcx.tcx(), did.node)
} else {
dest)
}
-pub fn trans_call_inner<'a>(
- bcx: &'a Block<'a>,
- call_info: Option<NodeInfo>,
- callee_ty: ty::t,
- get_callee: |bcx: &'a Block<'a>,
- arg_cleanup_scope: cleanup::ScopeId|
- -> Callee<'a>,
- args: CallArgs,
- dest: Option<expr::Dest>)
- -> Result<'a> {
+pub fn trans_call_inner<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ call_info: Option<NodeInfo>,
+ callee_ty: ty::t,
+ get_callee: |bcx: Block<'blk, 'tcx>,
+ arg_cleanup_scope: cleanup::ScopeId|
+ -> Callee<'blk, 'tcx>,
+ args: CallArgs,
+ dest: Option<expr::Dest>)
+ -> Result<'blk, 'tcx> {
/*!
* This behemoth of a function translates function calls.
* Unfortunately, in order to generate more efficient LLVM
// The code below invokes the function, using either the Rust
// conventions (if it is a rust fn) or the native conventions
- // (otherwise). The important part is that, when all is sad
+ // (otherwise). The important part is that, when all is said
// and done, either the return value of the function will have been
// written in opt_llretslot (if it is Some) or `llresult` will be
// set appropriately (otherwise).
ArgOverloadedCall(&'a [Gc<ast::Expr>]),
}
-fn trans_args_under_call_abi<'a>(
- mut bcx: &'a Block<'a>,
+fn trans_args_under_call_abi<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
arg_exprs: &[Gc<ast::Expr>],
fn_ty: ty::t,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId,
ignore_self: bool)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
// Translate the `self` argument first.
let arg_tys = ty::ty_fn_args(fn_ty);
if !ignore_self {
bcx
}
-fn trans_overloaded_call_args<'a>(
- mut bcx: &'a Block<'a>,
+fn trans_overloaded_call_args<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
arg_exprs: &[Gc<ast::Expr>],
fn_ty: ty::t,
llargs: &mut Vec<ValueRef>,
arg_cleanup_scope: cleanup::ScopeId,
ignore_self: bool)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
// Translate the `self` argument first.
let arg_tys = ty::ty_fn_args(fn_ty);
if !ignore_self {
bcx
}
-pub fn trans_args<'a>(
- cx: &'a Block<'a>,
- args: CallArgs,
- fn_ty: ty::t,
- llargs: &mut Vec<ValueRef> ,
- arg_cleanup_scope: cleanup::ScopeId,
- ignore_self: bool,
- abi: synabi::Abi)
- -> &'a Block<'a> {
+pub fn trans_args<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ args: CallArgs,
+ fn_ty: ty::t,
+ llargs: &mut Vec<ValueRef> ,
+ arg_cleanup_scope: cleanup::ScopeId,
+ ignore_self: bool,
+ abi: synabi::Abi)
+ -> Block<'blk, 'tcx> {
debug!("trans_args(abi={})", abi);
let _icx = push_ctxt("trans_args");
DoAutorefArg(ast::NodeId)
}
-pub fn trans_arg_datum<'a>(
- bcx: &'a Block<'a>,
- formal_arg_ty: ty::t,
- arg_datum: Datum<Expr>,
- arg_cleanup_scope: cleanup::ScopeId,
- autoref_arg: AutorefArg)
- -> Result<'a> {
+pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ formal_arg_ty: ty::t,
+ arg_datum: Datum<Expr>,
+ arg_cleanup_scope: cleanup::ScopeId,
+ autoref_arg: AutorefArg)
+ -> Result<'blk, 'tcx> {
let _icx = push_ctxt("trans_arg_datum");
let mut bcx = bcx;
let ccx = bcx.ccx();
use syntax::ast;
use util::ppaux::Repr;
-pub struct CleanupScope<'a> {
+pub struct CleanupScope<'blk, 'tcx: 'blk> {
// The id of this cleanup scope. If the id is None,
// this is a *temporary scope* that is pushed during trans to
// cleanup miscellaneous garbage that trans may generate whose
// lifetime is a subset of some expression. See module doc for
// more details.
- kind: CleanupScopeKind<'a>,
+ kind: CleanupScopeKind<'blk, 'tcx>,
// Cleanups to run upon scope exit.
cleanups: Vec<CleanupObj>,
pub static EXIT_LOOP: uint = 1;
pub static EXIT_MAX: uint = 2;
-pub enum CleanupScopeKind<'a> {
+pub enum CleanupScopeKind<'blk, 'tcx: 'blk> {
CustomScopeKind,
AstScopeKind(ast::NodeId),
- LoopScopeKind(ast::NodeId, [&'a Block<'a>, ..EXIT_MAX])
+ LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>, ..EXIT_MAX])
}
#[deriving(PartialEq)]
pub trait Cleanup {
fn must_unwind(&self) -> bool;
fn clean_on_unwind(&self) -> bool;
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a>;
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx>;
}
pub type CleanupObj = Box<Cleanup+'static>;
CustomScope(CustomScopeIndex)
}
-impl<'a> CleanupMethods<'a> for FunctionContext<'a> {
+impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
fn push_ast_cleanup_scope(&self, id: ast::NodeId) {
/*!
* Invoked when we start to trans the code contained
*/
debug!("push_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
// FIXME(#2202) -- currently closure bodies have a parent
// region, which messes up the assertion below, since there
// this new AST scope had better be its immediate child.
let top_scope = self.top_ast_scope();
if top_scope.is_some() {
- assert_eq!(self.ccx.tcx.region_maps.opt_encl_scope(id), top_scope);
+ assert_eq!(self.ccx.tcx().region_maps.opt_encl_scope(id), top_scope);
}
self.push_scope(CleanupScope::new(AstScopeKind(id)));
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
- exits: [&'a Block<'a>, ..EXIT_MAX]) {
+ exits: [Block<'blk, 'tcx>, ..EXIT_MAX]) {
debug!("push_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(id));
+ self.ccx.tcx().map.node_to_string(id));
assert_eq!(Some(id), self.top_ast_scope());
self.push_scope(CleanupScope::new(LoopScopeKind(id, exits)));
}
fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
/*!
* Removes the cleanup scope for id `cleanup_scope`, which
* must be at the top of the cleanup stack, and generates the
*/
debug!("pop_and_trans_ast_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope)));
*/
debug!("pop_loop_cleanup_scope({})",
- self.ccx.tcx.map.node_to_string(cleanup_scope));
+ self.ccx.tcx().map.node_to_string(cleanup_scope));
assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope)));
}
fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
/*!
* Removes the top cleanup scope from the stack, which must be
* a temporary scope, and generates the code to do its
self.ccx.sess().bug("no loop scope found");
}
- fn normal_exit_block(&'a self,
+ fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: uint) -> BasicBlockRef {
/*!
self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit))
}
- fn return_exit_block(&'a self) -> BasicBlockRef {
+ fn return_exit_block(&'blk self) -> BasicBlockRef {
/*!
* Returns a block to branch to which will perform all pending
* cleanups and then return from this function
debug!("schedule_lifetime_end({:?}, val={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val));
+ self.ccx.tn().val_to_string(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
}
debug!("schedule_drop_mem({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_drop_and_zero_mem({:?}, val={}, ty={}, zero={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()),
true);
debug!("schedule_drop_immediate({:?}, val={}, ty={})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
ty.repr(self.ccx.tcx()));
self.schedule_clean(cleanup_scope, drop as CleanupObj);
debug!("schedule_free_value({:?}, val={}, heap={:?})",
cleanup_scope,
- self.ccx.tn.val_to_string(val),
+ self.ccx.tn().val_to_string(val),
+ heap);
+
+ self.schedule_clean(cleanup_scope, drop as CleanupObj);
+ }
+
+ fn schedule_free_slice(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap) {
+ /*!
+ * Schedules a call to `free(val)`. Note that this is a shallow
+ * operation.
+ */
+
+ let drop = box FreeSlice { ptr: val, size: size, align: align, heap: heap };
+
+ debug!("schedule_free_slice({:?}, val={}, heap={:?})",
+ cleanup_scope,
+ self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj);
self.ccx.sess().bug(
format!("no cleanup scope {} found",
- self.ccx.tcx.map.node_to_string(cleanup_scope)).as_slice());
+ self.ccx.tcx().map.node_to_string(cleanup_scope)).as_slice());
}
fn schedule_clean_in_custom_scope(&self,
self.scopes.borrow().iter().rev().any(|s| s.needs_invoke())
}
- fn get_landing_pad(&'a self) -> BasicBlockRef {
+ fn get_landing_pad(&'blk self) -> BasicBlockRef {
/*!
* Returns a basic block to branch to in the event of a failure.
* This block will run the failure cleanups and eventually
}
}
-impl<'a> CleanupHelperMethods<'a> for FunctionContext<'a> {
+impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
fn top_ast_scope(&self) -> Option<ast::NodeId> {
/*!
* Returns the id of the current top-most AST scope, if any.
}
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
- bcx: &'a Block<'a>,
- scope: &CleanupScope) -> &'a Block<'a> {
+ bcx: Block<'blk, 'tcx>,
+ scope: &CleanupScope) -> Block<'blk, 'tcx> {
/*! Generates the cleanups for `scope` into `bcx` */
let mut bcx = bcx;
self.scopes.borrow().len()
}
- fn push_scope(&self, scope: CleanupScope<'a>) {
+ fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) {
self.scopes.borrow_mut().push(scope)
}
- fn pop_scope(&self) -> CleanupScope<'a> {
+ fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> {
debug!("popping cleanup scope {}, {} scopes remaining",
self.top_scope(|s| s.block_name("")),
self.scopes_len() - 1);
self.scopes.borrow_mut().pop().unwrap()
}
- fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R {
+ fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R {
f(self.scopes.borrow().last().unwrap())
}
- fn trans_cleanups_to_exit_scope(&'a self,
+ fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef {
/*!
while !popped_scopes.is_empty() {
let mut scope = popped_scopes.pop().unwrap();
- if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(*c, label))
+ if scope.cleanups.iter().any(|c| cleanup_is_suitable_for(&**c, label))
{
let name = scope.block_name("clean");
debug!("generating cleanups for {}", name);
None);
let mut bcx_out = bcx_in;
for cleanup in scope.cleanups.iter().rev() {
- if cleanup_is_suitable_for(*cleanup, label) {
+ if cleanup_is_suitable_for(&**cleanup, label) {
bcx_out = cleanup.trans(bcx_out);
}
}
prev_llbb
}
- fn get_or_create_landing_pad(&'a self) -> BasicBlockRef {
+ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
/*!
* Creates a landing pad for the top scope, if one does not
* exist. The landing pad will perform all cleanups necessary
let llpersonality = match pad_bcx.tcx().lang_items.eh_personality() {
Some(def_id) => callee::trans_fn_ref(pad_bcx, def_id, ExprId(0)),
None => {
- let mut personality = self.ccx.eh_personality.borrow_mut();
+ let mut personality = self.ccx.eh_personality().borrow_mut();
match *personality {
Some(llpersonality) => llpersonality,
None => {
}
}
-impl<'a> CleanupScope<'a> {
- fn new(kind: CleanupScopeKind<'a>) -> CleanupScope<'a> {
+impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> {
+ fn new(kind: CleanupScopeKind<'blk, 'tcx>) -> CleanupScope<'blk, 'tcx> {
CleanupScope {
kind: kind,
cleanups: vec!(),
}
}
-impl<'a> CleanupScopeKind<'a> {
+impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> {
fn is_temp(&self) -> bool {
match *self {
CustomScopeKind => true,
self.must_unwind
}
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
let bcx = if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty)
} else {
true
}
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
match self.heap {
HeapManaged => {
glue::trans_free(bcx, self.ptr)
}
}
+pub struct FreeSlice {
+ ptr: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap,
+}
+
+impl Cleanup for FreeSlice {
+ fn must_unwind(&self) -> bool {
+ true
+ }
+
+ fn clean_on_unwind(&self) -> bool {
+ true
+ }
+
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
+ match self.heap {
+ HeapManaged => {
+ glue::trans_free(bcx, self.ptr)
+ }
+ HeapExchange => {
+ glue::trans_exchange_free_dyn(bcx, self.ptr, self.size, self.align)
+ }
+ }
+ }
+}
+
pub struct LifetimeEnd {
ptr: ValueRef,
}
true
}
- fn trans<'a>(&self, bcx: &'a Block<'a>) -> &'a Block<'a> {
+ fn trans<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) -> Block<'blk, 'tcx> {
base::call_lifetime_end(bcx, self.ptr);
bcx
}
///////////////////////////////////////////////////////////////////////////
// These traits just exist to put the methods into this file.
-pub trait CleanupMethods<'a> {
+pub trait CleanupMethods<'blk, 'tcx> {
fn push_ast_cleanup_scope(&self, id: ast::NodeId);
fn push_loop_cleanup_scope(&self,
id: ast::NodeId,
- exits: [&'a Block<'a>, ..EXIT_MAX]);
+ exits: [Block<'blk, 'tcx>, ..EXIT_MAX]);
fn push_custom_cleanup_scope(&self) -> CustomScopeIndex;
fn pop_and_trans_ast_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
cleanup_scope: ast::NodeId)
- -> &'a Block<'a>;
+ -> Block<'blk, 'tcx>;
fn pop_loop_cleanup_scope(&self,
cleanup_scope: ast::NodeId);
fn pop_custom_cleanup_scope(&self,
custom_scope: CustomScopeIndex);
fn pop_and_trans_custom_cleanup_scope(&self,
- bcx: &'a Block<'a>,
+ bcx: Block<'blk, 'tcx>,
custom_scope: CustomScopeIndex)
- -> &'a Block<'a>;
+ -> Block<'blk, 'tcx>;
fn top_loop_scope(&self) -> ast::NodeId;
- fn normal_exit_block(&'a self,
+ fn normal_exit_block(&'blk self,
cleanup_scope: ast::NodeId,
exit: uint) -> BasicBlockRef;
- fn return_exit_block(&'a self) -> BasicBlockRef;
+ fn return_exit_block(&'blk self) -> BasicBlockRef;
fn schedule_lifetime_end(&self,
cleanup_scope: ScopeId,
val: ValueRef);
val: ValueRef,
heap: Heap,
content_ty: ty::t);
+ fn schedule_free_slice(&self,
+ cleanup_scope: ScopeId,
+ val: ValueRef,
+ size: ValueRef,
+ align: ValueRef,
+ heap: Heap);
fn schedule_clean(&self,
cleanup_scope: ScopeId,
cleanup: CleanupObj);
custom_scope: CustomScopeIndex,
cleanup: CleanupObj);
fn needs_invoke(&self) -> bool;
- fn get_landing_pad(&'a self) -> BasicBlockRef;
+ fn get_landing_pad(&'blk self) -> BasicBlockRef;
}
-trait CleanupHelperMethods<'a> {
+trait CleanupHelperMethods<'blk, 'tcx> {
fn top_ast_scope(&self) -> Option<ast::NodeId>;
fn top_nonempty_cleanup_scope(&self) -> Option<uint>;
fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool;
fn trans_scope_cleanups(&self,
- bcx: &'a Block<'a>,
- scope: &CleanupScope<'a>) -> &'a Block<'a>;
- fn trans_cleanups_to_exit_scope(&'a self,
+ bcx: Block<'blk, 'tcx>,
+ scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>;
+ fn trans_cleanups_to_exit_scope(&'blk self,
label: EarlyExitLabel)
-> BasicBlockRef;
- fn get_or_create_landing_pad(&'a self) -> BasicBlockRef;
+ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef;
fn scopes_len(&self) -> uint;
- fn push_scope(&self, scope: CleanupScope<'a>);
- fn pop_scope(&self) -> CleanupScope<'a>;
- fn top_scope<R>(&self, f: |&CleanupScope<'a>| -> R) -> R;
+ fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>);
+ fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>;
+ fn top_scope<R>(&self, f: |&CleanupScope<'blk, 'tcx>| -> R) -> R;
}
use middle::trans::datum::{Datum, DatumBlock, Expr, Lvalue, rvalue_scratch_datum};
use middle::trans::debuginfo;
use middle::trans::expr;
-use middle::trans::machine::llsize_of;
use middle::trans::type_of::*;
use middle::trans::type_::Type;
use middle::ty;
ty::mk_tup(tcx, vec!(ty::mk_uint(), ty::mk_nil_ptr(tcx), ptr, ptr, t))
}
-fn allocate_cbox<'a>(bcx: &'a Block<'a>,
- store: ty::TraitStore,
- cdata_ty: ty::t)
- -> Result<'a> {
+fn allocate_cbox<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ store: ty::TraitStore,
+ cdata_ty: ty::t)
+ -> Result<'blk, 'tcx> {
let _icx = push_ctxt("closure::allocate_cbox");
let tcx = bcx.tcx();
// Allocate and initialize the box:
+ let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
match store {
ty::UniqTraitStore => {
- let ty = type_of(bcx.ccx(), cdata_ty);
- let size = llsize_of(bcx.ccx(), ty);
- // we treat proc as @ here, which isn't ideal
- malloc_raw_dyn_managed(bcx, cdata_ty, ClosureExchangeMallocFnLangItem, size)
+ malloc_raw_dyn_proc(bcx, cbox_ty, ClosureExchangeMallocFnLangItem)
}
ty::RegionTraitStore(..) => {
- let cbox_ty = tuplify_box_ty(tcx, cdata_ty);
let llbox = alloc_ty(bcx, cbox_ty, "__closure");
Result::new(bcx, llbox)
}
}
}
-pub struct ClosureResult<'a> {
+pub struct ClosureResult<'blk, 'tcx: 'blk> {
llbox: ValueRef, // llvalue of ptr to closure
cdata_ty: ty::t, // type of the closure data
- bcx: &'a Block<'a> // final bcx
+ bcx: Block<'blk, 'tcx> // final bcx
}
// Given a block context and a list of tydescs and values to bind
// construct a closure out of them. If copying is true, it is a
// heap allocated closure that copies the upvars into environment.
// Otherwise, it is stack allocated and copies pointers to the upvars.
-pub fn store_environment<'a>(
- bcx: &'a Block<'a>,
- bound_values: Vec<EnvValue> ,
- store: ty::TraitStore)
- -> ClosureResult<'a> {
+pub fn store_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ bound_values: Vec<EnvValue> ,
+ store: ty::TraitStore)
+ -> ClosureResult<'blk, 'tcx> {
let _icx = push_ctxt("closure::store_environment");
let ccx = bcx.ccx();
let tcx = ccx.tcx();
// Given a context and a list of upvars, build a closure. This just
// collects the upvars and packages them up for store_environment.
-fn build_closure<'a>(bcx0: &'a Block<'a>,
- freevar_mode: freevars::CaptureMode,
- freevars: &Vec<freevars::freevar_entry>,
- store: ty::TraitStore)
- -> ClosureResult<'a>
-{
+fn build_closure<'blk, 'tcx>(bcx0: Block<'blk, 'tcx>,
+ freevar_mode: freevars::CaptureMode,
+ freevars: &Vec<freevars::freevar_entry>,
+ store: ty::TraitStore)
+ -> ClosureResult<'blk, 'tcx> {
let _icx = push_ctxt("closure::build_closure");
// If we need to, package up the iterator body to call
// Given an enclosing block context, a new function context, a closure type,
// and a list of upvars, generate code to load and populate the environment
// with the upvars and type descriptors.
-fn load_environment<'a>(bcx: &'a Block<'a>,
- cdata_ty: ty::t,
- freevars: &Vec<freevars::freevar_entry>,
- store: ty::TraitStore)
- -> &'a Block<'a> {
+fn load_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ cdata_ty: ty::t,
+ freevars: &Vec<freevars::freevar_entry>,
+ store: ty::TraitStore)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::load_environment");
// Don't bother to create the block if there's nothing to load
bcx
}
-fn load_unboxed_closure_environment<'a>(
- bcx: &'a Block<'a>,
+fn load_unboxed_closure_environment<'blk, 'tcx>(
+ bcx: Block<'blk, 'tcx>,
arg_scope_id: ScopeId,
freevars: &Vec<freevars::freevar_entry>,
closure_id: ast::DefId)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::load_environment");
if freevars.len() == 0 {
bcx
}
-fn fill_fn_pair(bcx: &Block, pair: ValueRef, llfn: ValueRef, llenvptr: ValueRef) {
+fn fill_fn_pair(bcx: Block, pair: ValueRef, llfn: ValueRef, llenvptr: ValueRef) {
Store(bcx, llfn, GEPi(bcx, pair, [0u, abi::fn_field_code]));
let llenvptr = PointerCast(bcx, llenvptr, Type::i8p(bcx.ccx()));
Store(bcx, llenvptr, GEPi(bcx, pair, [0u, abi::fn_field_box]));
}
-pub fn trans_expr_fn<'a>(
- bcx: &'a Block<'a>,
- store: ty::TraitStore,
- decl: &ast::FnDecl,
- body: &ast::Block,
- id: ast::NodeId,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_expr_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ store: ty::TraitStore,
+ decl: &ast::FnDecl,
+ body: &ast::Block,
+ id: ast::NodeId,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
/*!
*
* Translates the body of a closure expression.
pub fn get_or_create_declaration_if_unboxed_closure(ccx: &CrateContext,
closure_id: ast::DefId)
-> Option<ValueRef> {
- if !ccx.tcx.unboxed_closures.borrow().contains_key(&closure_id) {
+ if !ccx.tcx().unboxed_closures.borrow().contains_key(&closure_id) {
// Not an unboxed closure.
return None
}
- match ccx.unboxed_closure_vals.borrow().find(&closure_id) {
+ match ccx.unboxed_closure_vals().borrow().find(&closure_id) {
Some(llfn) => {
debug!("get_or_create_declaration_if_unboxed_closure(): found \
closure");
None => {}
}
- let function_type = ty::mk_unboxed_closure(&ccx.tcx,
+ let function_type = ty::mk_unboxed_closure(ccx.tcx(),
closure_id,
ty::ReStatic);
- let symbol = ccx.tcx.map.with_path(closure_id.node, |path| {
+ let symbol = ccx.tcx().map.with_path(closure_id.node, |path| {
mangle_internal_name_by_path_and_seq(path, "unboxed_closure")
});
debug!("get_or_create_declaration_if_unboxed_closure(): inserting new \
closure {} (type {})",
closure_id,
- ccx.tn.type_to_string(val_ty(llfn)));
- ccx.unboxed_closure_vals.borrow_mut().insert(closure_id, llfn);
+ ccx.tn().type_to_string(val_ty(llfn)));
+ ccx.unboxed_closure_vals().borrow_mut().insert(closure_id, llfn);
Some(llfn)
}
-pub fn trans_unboxed_closure<'a>(
- mut bcx: &'a Block<'a>,
+pub fn trans_unboxed_closure<'blk, 'tcx>(
+ mut bcx: Block<'blk, 'tcx>,
decl: &ast::FnDecl,
body: &ast::Block,
id: ast::NodeId,
dest: expr::Dest)
- -> &'a Block<'a> {
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("closure::trans_unboxed_closure");
debug!("trans_unboxed_closure()");
}
};
- match ccx.closure_bare_wrapper_cache.borrow().find(&fn_ptr) {
+ match ccx.closure_bare_wrapper_cache().borrow().find(&fn_ptr) {
Some(&llval) => return llval,
None => {}
}
decl_rust_fn(ccx, closure_ty, name.as_slice())
};
- ccx.closure_bare_wrapper_cache.borrow_mut().insert(fn_ptr, llfn);
+ ccx.closure_bare_wrapper_cache().borrow_mut().insert(fn_ptr, llfn);
// This is only used by statics inlined from a different crate.
if !is_local {
llfn
}
-pub fn make_closure_from_bare_fn<'a>(bcx: &'a Block<'a>,
- closure_ty: ty::t,
- def: def::Def,
- fn_ptr: ValueRef)
- -> DatumBlock<'a, Expr> {
+pub fn make_closure_from_bare_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ closure_ty: ty::t,
+ def: def::Def,
+ fn_ptr: ValueRef)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let scratch = rvalue_scratch_datum(bcx, closure_ty, "__adjust");
let wrapper = get_wrapper_for_bare_fn(bcx.ccx(), closure_ty, def, fn_ptr, true);
fill_fn_pair(bcx, scratch.val, wrapper, C_null(Type::i8p(bcx.ccx())));
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(non_camel_case_types, non_snake_case_functions)]
+#![allow(non_camel_case_types, non_snake_case)]
//! Code that is useful in various trans modules.
use driver::session::Session;
use llvm;
-use llvm::{ValueRef, BasicBlockRef, BuilderRef};
+use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef};
use llvm::{True, False, Bool};
use middle::def;
use middle::freevars;
ty::ty_struct(..) | ty::ty_enum(..) | ty::ty_tup(..) |
ty::ty_unboxed_closure(..) => {
let llty = sizing_type_of(ccx, ty);
- llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type)
+ llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type())
}
_ => type_is_zero_size(ccx, ty)
}
// Function context. Every LLVM function we create will have one of
// these.
-pub struct FunctionContext<'a> {
+pub struct FunctionContext<'a, 'tcx: 'a> {
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
// address of the first instruction in the sequence of
// instructions for this function that will go in the .text
pub span: Option<Span>,
// The arena that blocks are allocated from.
- pub block_arena: &'a TypedArena<Block<'a>>,
+ pub block_arena: &'a TypedArena<BlockS<'a, 'tcx>>,
// This function's enclosing crate context.
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a, 'tcx>,
// Used and maintained by the debuginfo module.
pub debug_context: debuginfo::FunctionDebugContext,
// Cleanup scopes.
- pub scopes: RefCell<Vec<cleanup::CleanupScope<'a>> >,
+ pub scopes: RefCell<Vec<cleanup::CleanupScope<'a, 'tcx>>>,
}
-impl<'a> FunctionContext<'a> {
+impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
pub fn arg_pos(&self, arg: uint) -> uint {
let arg = self.env_arg_pos() + arg;
if self.llenv.is_some() {
self.llreturn.set(Some(unsafe {
"return".with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx, self.llfn, buf)
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, buf)
})
}))
}
self.llreturn.get().unwrap()
}
- pub fn get_ret_slot(&self, bcx: &Block, ty: ty::t, name: &str) -> ValueRef {
+ pub fn get_ret_slot(&self, bcx: Block, ty: ty::t, name: &str) -> ValueRef {
if self.needs_ret_allocas {
base::alloca_no_lifetime(bcx, type_of::type_of(bcx.ccx(), ty), name)
} else {
is_lpad: bool,
name: &str,
opt_node_id: Option<ast::NodeId>)
- -> &'a Block<'a> {
+ -> Block<'a, 'tcx> {
unsafe {
let llbb = name.with_c_str(|buf| {
- llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx,
+ llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(),
self.llfn,
buf)
});
- Block::new(llbb, is_lpad, opt_node_id, self)
+ BlockS::new(llbb, is_lpad, opt_node_id, self)
}
}
pub fn new_id_block(&'a self,
name: &str,
node_id: ast::NodeId)
- -> &'a Block<'a> {
+ -> Block<'a, 'tcx> {
self.new_block(false, name, Some(node_id))
}
pub fn new_temp_block(&'a self,
name: &str)
- -> &'a Block<'a> {
+ -> Block<'a, 'tcx> {
self.new_block(false, name, None)
}
pub fn join_blocks(&'a self,
id: ast::NodeId,
- in_cxs: &[&'a Block<'a>])
- -> &'a Block<'a> {
+ in_cxs: &[Block<'a, 'tcx>])
+ -> Block<'a, 'tcx> {
let out = self.new_id_block("join", id);
let mut reachable = false;
for bcx in in_cxs.iter() {
// code. Each basic block we generate is attached to a function, typically
// with many basic blocks per function. All the basic blocks attached to a
// function are organized as a directed graph.
-pub struct Block<'a> {
+pub struct BlockS<'blk, 'tcx: 'blk> {
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
// block to the function pointed to by llfn. We insert
// The function context for the function to which this block is
// attached.
- pub fcx: &'a FunctionContext<'a>,
+ pub fcx: &'blk FunctionContext<'blk, 'tcx>,
}
-impl<'a> Block<'a> {
- pub fn new<'a>(
- llbb: BasicBlockRef,
+pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>;
+
+impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
+ pub fn new(llbb: BasicBlockRef,
is_lpad: bool,
opt_node_id: Option<ast::NodeId>,
- fcx: &'a FunctionContext<'a>)
- -> &'a Block<'a> {
- fcx.block_arena.alloc(Block {
+ fcx: &'blk FunctionContext<'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
+ fcx.block_arena.alloc(BlockS {
llbb: llbb,
terminated: Cell::new(false),
unreachable: Cell::new(false),
})
}
- pub fn ccx(&self) -> &'a CrateContext { self.fcx.ccx }
- pub fn tcx(&self) -> &'a ty::ctxt {
- &self.fcx.ccx.tcx
+ pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
+ self.fcx.ccx
+ }
+ pub fn tcx(&self) -> &'blk ty::ctxt<'tcx> {
+ self.fcx.ccx.tcx()
}
- pub fn sess(&self) -> &'a Session { self.fcx.ccx.sess() }
+ pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() }
pub fn ident(&self, ident: Ident) -> String {
token::get_ident(ident).get().to_string()
}
pub fn val_to_string(&self, val: ValueRef) -> String {
- self.ccx().tn.val_to_string(val)
+ self.ccx().tn().val_to_string(val)
}
pub fn llty_str(&self, ty: Type) -> String {
- self.ccx().tn.type_to_string(ty)
+ self.ccx().tn().type_to_string(ty)
}
pub fn ty_to_string(&self, t: ty::t) -> String {
}
pub fn to_str(&self) -> String {
- let blk: *const Block = self;
- format!("[block {}]", blk)
+ format!("[block {:p}]", self)
}
}
-impl<'a> mc::Typer for Block<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'blk, 'tcx> mc::Typer<'tcx> for BlockS<'blk, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.tcx()
}
}
}
-pub struct Result<'a> {
- pub bcx: &'a Block<'a>,
+pub struct Result<'blk, 'tcx: 'blk> {
+ pub bcx: Block<'blk, 'tcx>,
pub val: ValueRef
}
-impl<'a> Result<'a> {
- pub fn new(bcx: &'a Block<'a>, val: ValueRef) -> Result<'a> {
+impl<'b, 'tcx> Result<'b, 'tcx> {
+ pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
Result {
bcx: bcx,
val: val,
}
pub fn C_int(ccx: &CrateContext, i: int) -> ValueRef {
- C_integral(ccx.int_type, i as u64, true)
+ C_integral(ccx.int_type(), i as u64, true)
}
pub fn C_uint(ccx: &CrateContext, i: uint) -> ValueRef {
- C_integral(ccx.int_type, i as u64, false)
+ C_integral(ccx.int_type(), i as u64, false)
}
pub fn C_u8(ccx: &CrateContext, i: uint) -> ValueRef {
// our boxed-and-length-annotated strings.
pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
unsafe {
- match cx.const_cstr_cache.borrow().find(&s) {
+ match cx.const_cstr_cache().borrow().find(&s) {
Some(&llval) => return llval,
None => ()
}
- let sc = llvm::LLVMConstStringInContext(cx.llcx,
+ let sc = llvm::LLVMConstStringInContext(cx.llcx(),
s.get().as_ptr() as *const c_char,
s.get().len() as c_uint,
!null_terminated as Bool);
let gsym = token::gensym("str");
let g = format!("str{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(sc).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(sc).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::SetLinkage(g, llvm::InternalLinkage);
- cx.const_cstr_cache.borrow_mut().insert(s, g);
+ cx.const_cstr_cache().borrow_mut().insert(s, g);
g
}
}
let len = s.get().len();
let cs = llvm::LLVMConstPointerCast(C_cstr(cx, s, false),
Type::i8p(cx).to_ref());
- C_named_struct(cx.tn.find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
+ C_named_struct(cx.tn().find_type("str_slice").unwrap(), [cs, C_uint(cx, len)])
}
}
let gsym = token::gensym("binary");
let g = format!("binary{}", gsym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(lldata).to_ref(), buf)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(lldata).to_ref(), buf)
});
llvm::LLVMSetInitializer(g, lldata);
llvm::LLVMSetGlobalConstant(g, True);
}
}
-pub fn C_struct(ccx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
+pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
+ C_struct_in_context(cx.llcx(), elts, packed)
+}
+
+pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
unsafe {
- llvm::LLVMConstStructInContext(ccx.llcx,
+ llvm::LLVMConstStructInContext(llcx,
elts.as_ptr(), elts.len() as c_uint,
packed as Bool)
}
}
}
-pub fn C_bytes(ccx: &CrateContext, bytes: &[u8]) -> ValueRef {
+pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
+ C_bytes_in_context(cx.llcx(), bytes)
+}
+
+pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
- return llvm::LLVMConstStringInContext(ccx.llcx, ptr, bytes.len() as c_uint, True);
+ return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
}
}
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={}, us={:?}, r={})",
- cx.tn.val_to_string(v), us, cx.tn.val_to_string(r));
+ cx.tn().val_to_string(v), us, cx.tn().val_to_string(r));
return r;
}
}
}
-pub fn monomorphize_type(bcx: &Block, t: ty::t) -> ty::t {
+pub fn monomorphize_type(bcx: &BlockS, t: ty::t) -> ty::t {
t.subst(bcx.tcx(), &bcx.fcx.param_substs.substs)
}
-pub fn node_id_type(bcx: &Block, id: ast::NodeId) -> ty::t {
+pub fn node_id_type(bcx: &BlockS, id: ast::NodeId) -> ty::t {
let tcx = bcx.tcx();
let t = ty::node_id_to_type(tcx, id);
monomorphize_type(bcx, t)
}
-pub fn expr_ty(bcx: &Block, ex: &ast::Expr) -> ty::t {
+pub fn expr_ty(bcx: Block, ex: &ast::Expr) -> ty::t {
node_id_type(bcx, ex.id)
}
-pub fn expr_ty_adjusted(bcx: &Block, ex: &ast::Expr) -> ty::t {
+pub fn expr_ty_adjusted(bcx: Block, ex: &ast::Expr) -> ty::t {
monomorphize_type(bcx, ty::expr_ty_adjusted(bcx.tcx(), ex))
}
MethodCall(typeck::MethodCall)
}
-pub fn node_id_substs(bcx: &Block,
+pub fn node_id_substs(bcx: Block,
node: ExprOrMethodCall)
-> subst::Substs {
let tcx = bcx.tcx();
substs.substp(tcx, bcx.fcx.param_substs)
}
-pub fn node_vtables(bcx: &Block, id: typeck::MethodCall)
+pub fn node_vtables(bcx: Block, id: typeck::MethodCall)
-> typeck::vtable_res {
bcx.tcx().vtable_map.borrow().find(&id).map(|vts| {
resolve_vtables_in_fn_ctxt(bcx.fcx, vts)
param_bounds.get(n_bound).clone()
}
-pub fn langcall(bcx: &Block,
+pub fn langcall(bcx: Block,
span: Option<Span>,
msg: &str,
li: LangItem)
pub fn const_ptrcast(cx: &CrateContext, a: ValueRef, t: Type) -> ValueRef {
unsafe {
let b = llvm::LLVMConstPointerCast(a, t.ptr_to().to_ref());
- assert!(cx.const_globals.borrow_mut().insert(b as int, a));
+ assert!(cx.const_globals().borrow_mut().insert(b as int, a));
b
}
}
pub fn const_addr_of(cx: &CrateContext, cv: ValueRef, mutbl: ast::Mutability) -> ValueRef {
unsafe {
let gv = "const".with_c_str(|name| {
- llvm::LLVMAddGlobal(cx.llmod, val_ty(cv).to_ref(), name)
+ llvm::LLVMAddGlobal(cx.llmod(), val_ty(cv).to_ref(), name)
});
llvm::LLVMSetInitializer(gv, cv);
llvm::LLVMSetGlobalConstant(gv,
}
fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef {
- let v = match cx.const_globals.borrow().find(&(v as int)) {
+ let v = match cx.const_globals().borrow().find(&(v as int)) {
Some(&v) => v,
None => v
};
}
}
None => {
- cx.sess().bug(format!("can't dereference const of type {}",
+ cx.sess().bug(format!("cannot dereference const of type {}",
ty_to_string(cx.tcx(), t)).as_slice())
}
}
pub fn get_const_val(cx: &CrateContext,
mut def_id: ast::DefId) -> (ValueRef, bool) {
- let contains_key = cx.const_values.borrow().contains_key(&def_id.node);
+ let contains_key = cx.const_values().borrow().contains_key(&def_id.node);
if !ast_util::is_local(def_id) || !contains_key {
if !ast_util::is_local(def_id) {
def_id = inline::maybe_instantiate_inline(cx, def_id);
}
- match cx.tcx.map.expect_item(def_id.node).node {
+ match cx.tcx().map.expect_item(def_id.node).node {
ast::ItemStatic(_, ast::MutImmutable, _) => {
trans_const(cx, ast::MutImmutable, def_id.node);
}
}
}
- (cx.const_values.borrow().get_copy(&def_id.node),
- !cx.non_inlineable_statics.borrow().contains(&def_id.node))
+ (cx.const_values().borrow().get_copy(&def_id.node),
+ !cx.non_inlineable_statics().borrow().contains(&def_id.node))
}
pub fn const_expr(cx: &CrateContext, e: &ast::Expr, is_local: bool) -> (ValueRef, bool, ty::t) {
let mut inlineable = inlineable;
let ety = ty::expr_ty(cx.tcx(), e);
let mut ety_adjusted = ty::expr_ty_adjusted(cx.tcx(), e);
- let opt_adj = cx.tcx.adjustments.borrow().find_copy(&e.id);
+ let opt_adj = cx.tcx().adjustments.borrow().find_copy(&e.id);
match opt_adj {
None => { }
Some(adj) => {
ty::AutoDerefRef(ref adj) => {
let mut ty = ety;
// Save the last autoderef in case we can avoid it.
- for _ in range(0, adj.autoderefs-1) {
- let (dv, dt) = const_deref(cx, llconst, ty, false);
- llconst = dv;
- ty = dt;
+ if adj.autoderefs > 0 {
+ for _ in range(0, adj.autoderefs-1) {
+ let (dv, dt) = const_deref(cx, llconst, ty, false);
+ llconst = dv;
+ ty = dt;
+ }
}
match adj.autoref {
}
Some(ref autoref) => {
match *autoref {
- ty::AutoUnsafe(_) |
+ ty::AutoUnsafe(_, None) |
ty::AutoPtr(ty::ReStatic, _, None) => {
// Don't copy data to do a deref+ref
// (i.e., skip the last auto-deref).
// work properly.
let (_, dt) = const_deref(cx, llconst, ty, false);
ty = dt;
+ } else {
+ llconst = const_addr_of(cx, llconst, ast::MutImmutable)
}
match ty::get(ty).sty {
(expr::cast_enum, expr::cast_integral) => {
let repr = adt::represent_type(cx, basety);
let discr = adt::const_get_discrim(cx, &*repr, v);
- let iv = C_integral(cx.int_type, discr, false);
+ let iv = C_integral(cx.int_type(), discr, false);
let ety_cast = expr::cast_type_kind(cx.tcx(), ety);
match ety_cast {
expr::cast_integral => {
let g = base::get_item_val(ccx, id);
// At this point, get_item_val has already translated the
// constant's initializer to determine its LLVM type.
- let v = ccx.const_values.borrow().get_copy(&id);
+ let v = ccx.const_values().borrow().get_copy(&id);
llvm::LLVMSetInitializer(g, v);
+
+ // `get_item_val` left `g` with external linkage, but we just set an
+ // initializer for it. But we don't know yet if `g` should really be
+ // defined in this compilation unit, so we set its linkage to
+ // `AvailableExternallyLinkage`. (It's still a definition, but acts
+ // like a declaration for most purposes.) If `g` really should be
+ // declared here, then `trans_item` will fix up the linkage later on.
+ llvm::SetLinkage(g, llvm::AvailableExternallyLinkage);
+
if m != ast::MutMutable {
llvm::LLVMSetGlobalConstant(g, True);
}
use driver::config::NoDebugInfo;
use driver::session::Session;
use llvm;
-use llvm::{ContextRef, ModuleRef, ValueRef};
+use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef};
use llvm::{TargetData};
use llvm::mk_target_data;
use metadata::common::LinkMeta;
pub fn_stats: RefCell<Vec<(String, uint, uint)> >,
}
-pub struct CrateContext {
- pub llmod: ModuleRef,
- pub llcx: ContextRef,
- pub metadata_llmod: ModuleRef,
- pub td: TargetData,
- pub tn: TypeNames,
- pub externs: RefCell<ExternMap>,
- pub item_vals: RefCell<NodeMap<ValueRef>>,
- pub exp_map2: resolve::ExportMap2,
- pub reachable: NodeSet,
- pub item_symbols: RefCell<NodeMap<String>>,
- pub link_meta: LinkMeta,
- pub drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
- pub tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
+/// The shared portion of a `CrateContext`. There is one `SharedCrateContext`
+/// per crate. The data here is shared between all compilation units of the
+/// crate, so it must not contain references to any LLVM data structures
+/// (aside from metadata-related ones).
+pub struct SharedCrateContext<'tcx> {
+ local_ccxs: Vec<LocalCrateContext>,
+
+ metadata_llmod: ModuleRef,
+ metadata_llcx: ContextRef,
+
+ exp_map2: resolve::ExportMap2,
+ reachable: NodeSet,
+ item_symbols: RefCell<NodeMap<String>>,
+ link_meta: LinkMeta,
+ /// A set of static items which cannot be inlined into other crates. This
+ /// will prevent in IIItem() structures from being encoded into the metadata
+ /// that is generated
+ non_inlineable_statics: RefCell<NodeSet>,
+ symbol_hasher: RefCell<Sha256>,
+ tcx: ty::ctxt<'tcx>,
+ stats: Stats,
+
+ available_monomorphizations: RefCell<HashSet<String>>,
+ available_drop_glues: RefCell<HashMap<ty::t, String>>,
+ available_visit_glues: RefCell<HashMap<ty::t, String>>,
+}
+
+/// The local portion of a `CrateContext`. There is one `LocalCrateContext`
+/// per compilation unit. Each one has its own LLVM `ContextRef` so that
+/// several compilation units may be optimized in parallel. All other LLVM
+/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
+pub struct LocalCrateContext {
+ llmod: ModuleRef,
+ llcx: ContextRef,
+ td: TargetData,
+ tn: TypeNames,
+ externs: RefCell<ExternMap>,
+ item_vals: RefCell<NodeMap<ValueRef>>,
+ drop_glues: RefCell<HashMap<ty::t, ValueRef>>,
+ tydescs: RefCell<HashMap<ty::t, Rc<tydesc_info>>>,
/// Set when running emit_tydescs to enforce that no more tydescs are
/// created.
- pub finished_tydescs: Cell<bool>,
+ finished_tydescs: Cell<bool>,
/// Track mapping of external ids to local items imported for inlining
- pub external: RefCell<DefIdMap<Option<ast::NodeId>>>,
+ external: RefCell<DefIdMap<Option<ast::NodeId>>>,
/// Backwards version of the `external` map (inlined items to where they
/// came from)
- pub external_srcs: RefCell<NodeMap<ast::DefId>>,
- /// A set of static items which cannot be inlined into other crates. This
- /// will prevent in IIItem() structures from being encoded into the metadata
- /// that is generated
- pub non_inlineable_statics: RefCell<NodeSet>,
+ external_srcs: RefCell<NodeMap<ast::DefId>>,
/// Cache instances of monomorphized functions
- pub monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
- pub monomorphizing: RefCell<DefIdMap<uint>>,
+ monomorphized: RefCell<HashMap<MonoId, ValueRef>>,
+ monomorphizing: RefCell<DefIdMap<uint>>,
/// Cache generated vtables
- pub vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
+ vtables: RefCell<HashMap<(ty::t, MonoId), ValueRef>>,
/// Cache of constant strings,
- pub const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
+ const_cstr_cache: RefCell<HashMap<InternedString, ValueRef>>,
/// Reverse-direction for const ptrs cast from globals.
/// Key is an int, cast from a ValueRef holding a *T,
/// when we ptrcast, and we have to ptrcast during translation
/// of a [T] const because we form a slice, a [*T,int] pair, not
/// a pointer to an LLVM array type.
- pub const_globals: RefCell<HashMap<int, ValueRef>>,
+ const_globals: RefCell<HashMap<int, ValueRef>>,
/// Cache of emitted const values
- pub const_values: RefCell<NodeMap<ValueRef>>,
+ const_values: RefCell<NodeMap<ValueRef>>,
/// Cache of external const values
- pub extern_const_values: RefCell<DefIdMap<ValueRef>>,
+ extern_const_values: RefCell<DefIdMap<ValueRef>>,
- pub impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
+ impl_method_cache: RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>>,
/// Cache of closure wrappers for bare fn's.
- pub closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
-
- pub lltypes: RefCell<HashMap<ty::t, Type>>,
- pub llsizingtypes: RefCell<HashMap<ty::t, Type>>,
- pub adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
- pub symbol_hasher: RefCell<Sha256>,
- pub type_hashcodes: RefCell<HashMap<ty::t, String>>,
- pub all_llvm_symbols: RefCell<HashSet<String>>,
- pub tcx: ty::ctxt,
- pub stats: Stats,
- pub int_type: Type,
- pub opaque_vec_type: Type,
- pub builder: BuilderRef_res,
+ closure_bare_wrapper_cache: RefCell<HashMap<ValueRef, ValueRef>>,
+
+ lltypes: RefCell<HashMap<ty::t, Type>>,
+ llsizingtypes: RefCell<HashMap<ty::t, Type>>,
+ adt_reprs: RefCell<HashMap<ty::t, Rc<adt::Repr>>>,
+ type_hashcodes: RefCell<HashMap<ty::t, String>>,
+ all_llvm_symbols: RefCell<HashSet<String>>,
+ int_type: Type,
+ opaque_vec_type: Type,
+ builder: BuilderRef_res,
/// Holds the LLVM values for closure IDs.
- pub unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
+ unboxed_closure_vals: RefCell<DefIdMap<ValueRef>>,
- pub dbg_cx: Option<debuginfo::CrateDebugContext>,
+ dbg_cx: Option<debuginfo::CrateDebugContext>,
- pub eh_personality: RefCell<Option<ValueRef>>,
+ eh_personality: RefCell<Option<ValueRef>>,
intrinsics: RefCell<HashMap<&'static str, ValueRef>>,
+
+ /// Number of LLVM instructions translated into this `LocalCrateContext`.
+ /// This is used to perform some basic load-balancing to keep all LLVM
+ /// contexts around the same size.
+ n_llvm_insns: Cell<uint>,
+}
+
+pub struct CrateContext<'a, 'tcx: 'a> {
+ shared: &'a SharedCrateContext<'tcx>,
+ local: &'a LocalCrateContext,
+ /// The index of `local` in `shared.local_ccxs`. This is used in
+ /// `maybe_iter(true)` to identify the original `LocalCrateContext`.
+ index: uint,
+}
+
+pub struct CrateContextIterator<'a, 'tcx: 'a> {
+ shared: &'a SharedCrateContext<'tcx>,
+ index: uint,
+}
+
+impl<'a, 'tcx> Iterator<CrateContext<'a, 'tcx>> for CrateContextIterator<'a,'tcx> {
+ fn next(&mut self) -> Option<CrateContext<'a, 'tcx>> {
+ if self.index >= self.shared.local_ccxs.len() {
+ return None;
+ }
+
+ let index = self.index;
+ self.index += 1;
+
+ Some(CrateContext {
+ shared: self.shared,
+ local: &self.shared.local_ccxs[index],
+ index: index,
+ })
+ }
+}
+
+/// The iterator produced by `CrateContext::maybe_iter`.
+pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> {
+ shared: &'a SharedCrateContext<'tcx>,
+ index: uint,
+ single: bool,
+ origin: uint,
+}
+
+impl<'a, 'tcx> Iterator<(CrateContext<'a, 'tcx>, bool)> for CrateContextMaybeIterator<'a, 'tcx> {
+ fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> {
+ if self.index >= self.shared.local_ccxs.len() {
+ return None;
+ }
+
+ let index = self.index;
+ self.index += 1;
+ if self.single {
+ self.index = self.shared.local_ccxs.len();
+ }
+
+ let ccx = CrateContext {
+ shared: self.shared,
+ local: &self.shared.local_ccxs[index],
+ index: index,
+ };
+ Some((ccx, index == self.origin))
+ }
+}
+
+
+unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) {
+ let llcx = llvm::LLVMContextCreate();
+ let llmod = mod_name.with_c_str(|buf| {
+ llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
+ });
+ sess.targ_cfg
+ .target_strs
+ .data_layout
+ .as_slice()
+ .with_c_str(|buf| {
+ llvm::LLVMSetDataLayout(llmod, buf);
+ });
+ sess.targ_cfg
+ .target_strs
+ .target_triple
+ .as_slice()
+ .with_c_str(|buf| {
+ llvm::LLVMRustSetNormalizedTarget(llmod, buf);
+ });
+ (llcx, llmod)
}
-impl CrateContext {
- pub fn new(name: &str,
- tcx: ty::ctxt,
+impl<'tcx> SharedCrateContext<'tcx> {
+ pub fn new(crate_name: &str,
+ local_count: uint,
+ tcx: ty::ctxt<'tcx>,
emap2: resolve::ExportMap2,
symbol_hasher: Sha256,
link_meta: LinkMeta,
reachable: NodeSet)
- -> CrateContext {
+ -> SharedCrateContext<'tcx> {
+ let (metadata_llcx, metadata_llmod) = unsafe {
+ create_context_and_module(&tcx.sess, "metadata")
+ };
+
+ let mut shared_ccx = SharedCrateContext {
+ local_ccxs: Vec::with_capacity(local_count),
+ metadata_llmod: metadata_llmod,
+ metadata_llcx: metadata_llcx,
+ exp_map2: emap2,
+ reachable: reachable,
+ item_symbols: RefCell::new(NodeMap::new()),
+ link_meta: link_meta,
+ non_inlineable_statics: RefCell::new(NodeSet::new()),
+ symbol_hasher: RefCell::new(symbol_hasher),
+ tcx: tcx,
+ stats: Stats {
+ n_static_tydescs: Cell::new(0u),
+ n_glues_created: Cell::new(0u),
+ n_null_glues: Cell::new(0u),
+ n_real_glues: Cell::new(0u),
+ n_fns: Cell::new(0u),
+ n_monos: Cell::new(0u),
+ n_inlines: Cell::new(0u),
+ n_closures: Cell::new(0u),
+ n_llvm_insns: Cell::new(0u),
+ llvm_insns: RefCell::new(HashMap::new()),
+ fn_stats: RefCell::new(Vec::new()),
+ },
+ available_monomorphizations: RefCell::new(HashSet::new()),
+ available_drop_glues: RefCell::new(HashMap::new()),
+ available_visit_glues: RefCell::new(HashMap::new()),
+ };
+
+ for i in range(0, local_count) {
+ // Append ".rs" to crate name as LLVM module identifier.
+ //
+ // LLVM code generator emits a ".file filename" directive
+ // for ELF backends. Value of the "filename" is set as the
+ // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
+ // crashes if the module identifier is same as other symbols
+ // such as a function name in the module.
+ // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
+ let llmod_id = format!("{}.{}.rs", crate_name, i);
+ let local_ccx = LocalCrateContext::new(&shared_ccx, llmod_id.as_slice());
+ shared_ccx.local_ccxs.push(local_ccx);
+ }
+
+ shared_ccx
+ }
+
+ pub fn iter<'a>(&'a self) -> CrateContextIterator<'a, 'tcx> {
+ CrateContextIterator {
+ shared: self,
+ index: 0,
+ }
+ }
+
+ pub fn get_ccx<'a>(&'a self, index: uint) -> CrateContext<'a, 'tcx> {
+ CrateContext {
+ shared: self,
+ local: &self.local_ccxs[index],
+ index: index,
+ }
+ }
+
+ fn get_smallest_ccx<'a>(&'a self) -> CrateContext<'a, 'tcx> {
+ let (local_ccx, index) =
+ self.local_ccxs
+ .iter()
+ .zip(range(0, self.local_ccxs.len()))
+ .min_by(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get())
+ .unwrap();
+ CrateContext {
+ shared: self,
+ local: local_ccx,
+ index: index,
+ }
+ }
+
+
+ pub fn metadata_llmod(&self) -> ModuleRef {
+ self.metadata_llmod
+ }
+
+ pub fn metadata_llcx(&self) -> ContextRef {
+ self.metadata_llcx
+ }
+
+ pub fn exp_map2<'a>(&'a self) -> &'a resolve::ExportMap2 {
+ &self.exp_map2
+ }
+
+ pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+ &self.reachable
+ }
+
+ pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+ &self.item_symbols
+ }
+
+ pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+ &self.link_meta
+ }
+
+ pub fn non_inlineable_statics<'a>(&'a self) -> &'a RefCell<NodeSet> {
+ &self.non_inlineable_statics
+ }
+
+ pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+ &self.symbol_hasher
+ }
+
+ pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ &self.tcx
+ }
+
+ pub fn take_tcx(self) -> ty::ctxt<'tcx> {
+ self.tcx
+ }
+
+ pub fn sess<'a>(&'a self) -> &'a Session {
+ &self.tcx.sess
+ }
+
+ pub fn stats<'a>(&'a self) -> &'a Stats {
+ &self.stats
+ }
+}
+
+impl LocalCrateContext {
+ fn new(shared: &SharedCrateContext,
+ name: &str)
+ -> LocalCrateContext {
unsafe {
- let llcx = llvm::LLVMContextCreate();
- let llmod = name.with_c_str(|buf| {
- llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
- });
- let metadata_llmod = format!("{}_metadata", name).with_c_str(|buf| {
- llvm::LLVMModuleCreateWithNameInContext(buf, llcx)
- });
- tcx.sess
- .targ_cfg
- .target_strs
- .data_layout
- .as_slice()
- .with_c_str(|buf| {
- llvm::LLVMSetDataLayout(llmod, buf);
- llvm::LLVMSetDataLayout(metadata_llmod, buf);
- });
- tcx.sess
- .targ_cfg
- .target_strs
- .target_triple
- .as_slice()
- .with_c_str(|buf| {
- llvm::LLVMRustSetNormalizedTarget(llmod, buf);
- llvm::LLVMRustSetNormalizedTarget(metadata_llmod, buf);
- });
-
- let td = mk_target_data(tcx.sess
- .targ_cfg
- .target_strs
- .data_layout
- .as_slice());
-
- let dbg_cx = if tcx.sess.opts.debuginfo != NoDebugInfo {
+ let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name);
+
+ let td = mk_target_data(shared.tcx
+ .sess
+ .targ_cfg
+ .target_strs
+ .data_layout
+ .as_slice());
+
+ let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
Some(debuginfo::CrateDebugContext::new(llmod))
} else {
None
};
- let mut ccx = CrateContext {
+ let mut local_ccx = LocalCrateContext {
llmod: llmod,
llcx: llcx,
- metadata_llmod: metadata_llmod,
td: td,
tn: TypeNames::new(),
externs: RefCell::new(HashMap::new()),
item_vals: RefCell::new(NodeMap::new()),
- exp_map2: emap2,
- reachable: reachable,
- item_symbols: RefCell::new(NodeMap::new()),
- link_meta: link_meta,
drop_glues: RefCell::new(HashMap::new()),
tydescs: RefCell::new(HashMap::new()),
finished_tydescs: Cell::new(false),
external: RefCell::new(DefIdMap::new()),
external_srcs: RefCell::new(NodeMap::new()),
- non_inlineable_statics: RefCell::new(NodeSet::new()),
monomorphized: RefCell::new(HashMap::new()),
monomorphizing: RefCell::new(DefIdMap::new()),
vtables: RefCell::new(HashMap::new()),
lltypes: RefCell::new(HashMap::new()),
llsizingtypes: RefCell::new(HashMap::new()),
adt_reprs: RefCell::new(HashMap::new()),
- symbol_hasher: RefCell::new(symbol_hasher),
type_hashcodes: RefCell::new(HashMap::new()),
all_llvm_symbols: RefCell::new(HashSet::new()),
- tcx: tcx,
- stats: Stats {
- n_static_tydescs: Cell::new(0u),
- n_glues_created: Cell::new(0u),
- n_null_glues: Cell::new(0u),
- n_real_glues: Cell::new(0u),
- n_fns: Cell::new(0u),
- n_monos: Cell::new(0u),
- n_inlines: Cell::new(0u),
- n_closures: Cell::new(0u),
- n_llvm_insns: Cell::new(0u),
- llvm_insns: RefCell::new(HashMap::new()),
- fn_stats: RefCell::new(Vec::new()),
- },
int_type: Type::from_ref(ptr::mut_null()),
opaque_vec_type: Type::from_ref(ptr::mut_null()),
builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
dbg_cx: dbg_cx,
eh_personality: RefCell::new(None),
intrinsics: RefCell::new(HashMap::new()),
+ n_llvm_insns: Cell::new(0u),
};
- ccx.int_type = Type::int(&ccx);
- ccx.opaque_vec_type = Type::opaque_vec(&ccx);
+ local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared));
+ local_ccx.opaque_vec_type = Type::opaque_vec(&local_ccx.dummy_ccx(shared));
- let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
- str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type], false);
- ccx.tn.associate_type("str_slice", &str_slice_ty);
+ // Done mutating local_ccx directly. (The rest of the
+ // initialization goes through RefCell.)
+ {
+ let ccx = local_ccx.dummy_ccx(shared);
- ccx.tn.associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
+ let mut str_slice_ty = Type::named_struct(&ccx, "str_slice");
+ str_slice_ty.set_struct_body([Type::i8p(&ccx), ccx.int_type()], false);
+ ccx.tn().associate_type("str_slice", &str_slice_ty);
- if ccx.sess().count_llvm_insns() {
- base::init_insn_ctxt()
+ ccx.tn().associate_type("tydesc", &Type::tydesc(&ccx, str_slice_ty));
+
+ if ccx.sess().count_llvm_insns() {
+ base::init_insn_ctxt()
+ }
}
- ccx
+ local_ccx
}
}
- pub fn tcx<'a>(&'a self) -> &'a ty::ctxt {
- &self.tcx
+ /// Create a dummy `CrateContext` from `self` and the provided
+ /// `SharedCrateContext`. This is somewhat dangerous because `self` may
+ /// not actually be an element of `shared.local_ccxs`, which can cause some
+ /// operations to `fail` unexpectedly.
+ ///
+ /// This is used in the `LocalCrateContext` constructor to allow calling
+ /// functions that expect a complete `CrateContext`, even before the local
+ /// portion is fully initialized and attached to the `SharedCrateContext`.
+ fn dummy_ccx<'a, 'tcx>(&'a self, shared: &'a SharedCrateContext<'tcx>)
+ -> CrateContext<'a, 'tcx> {
+ CrateContext {
+ shared: shared,
+ local: self,
+ index: -1 as uint,
+ }
+ }
+}
+
+impl<'b, 'tcx> CrateContext<'b, 'tcx> {
+ pub fn shared(&self) -> &'b SharedCrateContext<'tcx> {
+ self.shared
+ }
+
+ pub fn local(&self) -> &'b LocalCrateContext {
+ self.local
+ }
+
+
+ /// Get a (possibly) different `CrateContext` from the same
+ /// `SharedCrateContext`.
+ pub fn rotate(&self) -> CrateContext<'b, 'tcx> {
+ self.shared.get_smallest_ccx()
+ }
+
+ /// Either iterate over only `self`, or iterate over all `CrateContext`s in
+ /// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)`
+ /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false`
+ /// otherwise. This method is useful for avoiding code duplication in
+ /// cases where it may or may not be necessary to translate code into every
+ /// context.
+ pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> {
+ CrateContextMaybeIterator {
+ shared: self.shared,
+ index: if iter_all { 0 } else { self.index },
+ single: !iter_all,
+ origin: self.index,
+ }
+ }
+
+
+ pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
+ &self.shared.tcx
}
pub fn sess<'a>(&'a self) -> &'a Session {
- &self.tcx.sess
+ &self.shared.tcx.sess
}
- pub fn builder<'a>(&'a self) -> Builder<'a> {
+ pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> {
Builder::new(self)
}
+ pub fn raw_builder<'a>(&'a self) -> BuilderRef {
+ self.local.builder.b
+ }
+
pub fn tydesc_type(&self) -> Type {
- self.tn.find_type("tydesc").unwrap()
+ self.local.tn.find_type("tydesc").unwrap()
}
pub fn get_intrinsic(&self, key: & &'static str) -> ValueRef {
- match self.intrinsics.borrow().find_copy(key) {
+ match self.intrinsics().borrow().find_copy(key) {
Some(v) => return v,
_ => {}
}
let ref cfg = self.sess().targ_cfg;
cfg.os != abi::OsiOS || cfg.arch != abi::Arm
}
+
+
+ pub fn llmod(&self) -> ModuleRef {
+ self.local.llmod
+ }
+
+ pub fn llcx(&self) -> ContextRef {
+ self.local.llcx
+ }
+
+ pub fn td<'a>(&'a self) -> &'a TargetData {
+ &self.local.td
+ }
+
+ pub fn tn<'a>(&'a self) -> &'a TypeNames {
+ &self.local.tn
+ }
+
+ pub fn externs<'a>(&'a self) -> &'a RefCell<ExternMap> {
+ &self.local.externs
+ }
+
+ pub fn item_vals<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.local.item_vals
+ }
+
+ pub fn exp_map2<'a>(&'a self) -> &'a resolve::ExportMap2 {
+ &self.shared.exp_map2
+ }
+
+ pub fn reachable<'a>(&'a self) -> &'a NodeSet {
+ &self.shared.reachable
+ }
+
+ pub fn item_symbols<'a>(&'a self) -> &'a RefCell<NodeMap<String>> {
+ &self.shared.item_symbols
+ }
+
+ pub fn link_meta<'a>(&'a self) -> &'a LinkMeta {
+ &self.shared.link_meta
+ }
+
+ pub fn drop_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, ValueRef>> {
+ &self.local.drop_glues
+ }
+
+ pub fn tydescs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<tydesc_info>>> {
+ &self.local.tydescs
+ }
+
+ pub fn finished_tydescs<'a>(&'a self) -> &'a Cell<bool> {
+ &self.local.finished_tydescs
+ }
+
+ pub fn external<'a>(&'a self) -> &'a RefCell<DefIdMap<Option<ast::NodeId>>> {
+ &self.local.external
+ }
+
+ pub fn external_srcs<'a>(&'a self) -> &'a RefCell<NodeMap<ast::DefId>> {
+ &self.local.external_srcs
+ }
+
+ pub fn non_inlineable_statics<'a>(&'a self) -> &'a RefCell<NodeSet> {
+ &self.shared.non_inlineable_statics
+ }
+
+ pub fn monomorphized<'a>(&'a self) -> &'a RefCell<HashMap<MonoId, ValueRef>> {
+ &self.local.monomorphized
+ }
+
+ pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<uint>> {
+ &self.local.monomorphizing
+ }
+
+ pub fn vtables<'a>(&'a self) -> &'a RefCell<HashMap<(ty::t, MonoId), ValueRef>> {
+ &self.local.vtables
+ }
+
+ pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<HashMap<InternedString, ValueRef>> {
+ &self.local.const_cstr_cache
+ }
+
+ pub fn const_globals<'a>(&'a self) -> &'a RefCell<HashMap<int, ValueRef>> {
+ &self.local.const_globals
+ }
+
+ pub fn const_values<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
+ &self.local.const_values
+ }
+
+ pub fn extern_const_values<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.local.extern_const_values
+ }
+
+ pub fn impl_method_cache<'a>(&'a self)
+ -> &'a RefCell<HashMap<(ast::DefId, ast::Name), ast::DefId>> {
+ &self.local.impl_method_cache
+ }
+
+ pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell<HashMap<ValueRef, ValueRef>> {
+ &self.local.closure_bare_wrapper_cache
+ }
+
+ pub fn lltypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.local.lltypes
+ }
+
+ pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Type>> {
+ &self.local.llsizingtypes
+ }
+
+ pub fn adt_reprs<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, Rc<adt::Repr>>> {
+ &self.local.adt_reprs
+ }
+
+ pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell<Sha256> {
+ &self.shared.symbol_hasher
+ }
+
+ pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.local.type_hashcodes
+ }
+
+ pub fn all_llvm_symbols<'a>(&'a self) -> &'a RefCell<HashSet<String>> {
+ &self.local.all_llvm_symbols
+ }
+
+ pub fn stats<'a>(&'a self) -> &'a Stats {
+ &self.shared.stats
+ }
+
+ pub fn available_monomorphizations<'a>(&'a self) -> &'a RefCell<HashSet<String>> {
+ &self.shared.available_monomorphizations
+ }
+
+ pub fn available_drop_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.shared.available_drop_glues
+ }
+
+ pub fn available_visit_glues<'a>(&'a self) -> &'a RefCell<HashMap<ty::t, String>> {
+ &self.shared.available_visit_glues
+ }
+
+ pub fn int_type(&self) -> Type {
+ self.local.int_type
+ }
+
+ pub fn opaque_vec_type(&self) -> Type {
+ self.local.opaque_vec_type
+ }
+
+ pub fn unboxed_closure_vals<'a>(&'a self) -> &'a RefCell<DefIdMap<ValueRef>> {
+ &self.local.unboxed_closure_vals
+ }
+
+ pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext> {
+ &self.local.dbg_cx
+ }
+
+ pub fn eh_personality<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> {
+ &self.local.eh_personality
+ }
+
+ fn intrinsics<'a>(&'a self) -> &'a RefCell<HashMap<&'static str, ValueRef>> {
+ &self.local.intrinsics
+ }
+
+ pub fn count_llvm_insn(&self) {
+ self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1);
+ }
}
fn declare_intrinsic(ccx: &CrateContext, key: & &'static str) -> Option<ValueRef> {
($name:expr fn() -> $ret:expr) => (
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name, Type::func([], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
);
if *key == $name {
let f = base::decl_cdecl_fn(ccx, $name,
Type::func([$($arg),*], &$ret), ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
let f = base::decl_cdecl_fn(ccx, stringify!($cname),
Type::func([$($arg),*], &$ret),
ty::mk_nil());
- ccx.intrinsics.borrow_mut().insert($name, f.clone());
+ ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f);
}
)
use std::gc::Gc;
-pub fn trans_stmt<'a>(cx: &'a Block<'a>,
- s: &ast::Stmt)
- -> &'a Block<'a> {
+pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
+ s: &ast::Stmt)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_stmt");
let fcx = cx.fcx;
debug!("trans_stmt({})", s.repr(cx.tcx()));
return bcx;
}
-pub fn trans_stmt_semi<'a>(cx: &'a Block<'a>, e: &ast::Expr) -> &'a Block<'a> {
+pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &ast::Expr)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_stmt_semi");
let ty = expr_ty(cx, e);
if ty::type_needs_drop(cx.tcx(), ty) {
}
}
-pub fn trans_block<'a>(bcx: &'a Block<'a>,
- b: &ast::Block,
- mut dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ b: &ast::Block,
+ mut dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_block");
let fcx = bcx.fcx;
let mut bcx = bcx;
return bcx;
}
-pub fn trans_if<'a>(bcx: &'a Block<'a>,
- if_id: ast::NodeId,
- cond: &ast::Expr,
- thn: ast::P<ast::Block>,
- els: Option<Gc<ast::Expr>>,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ if_id: ast::NodeId,
+ cond: &ast::Expr,
+ thn: ast::P<ast::Block>,
+ els: Option<Gc<ast::Expr>>,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
debug!("trans_if(bcx={}, if_id={}, cond={}, thn={:?}, dest={})",
bcx.to_str(), if_id, bcx.expr_to_string(cond), thn.id,
dest.to_string(bcx.ccx()));
next_bcx
}
-pub fn trans_while<'a>(bcx: &'a Block<'a>,
- loop_id: ast::NodeId,
- cond: &ast::Expr,
- body: &ast::Block)
- -> &'a Block<'a> {
+pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ loop_id: ast::NodeId,
+ cond: &ast::Expr,
+ body: &ast::Block)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_while");
let fcx = bcx.fcx;
}
/// Translates a `for` loop.
-pub fn trans_for<'a>(
- mut bcx: &'a Block<'a>,
- loop_info: NodeInfo,
- pat: Gc<ast::Pat>,
- head: &ast::Expr,
- body: &ast::Block)
- -> &'a Block<'a> {
+pub fn trans_for<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ loop_info: NodeInfo,
+ pat: Gc<ast::Pat>,
+ head: &ast::Expr,
+ body: &ast::Block)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_for");
// bcx
next_bcx_in
}
-pub fn trans_loop<'a>(bcx:&'a Block<'a>,
- loop_id: ast::NodeId,
- body: &ast::Block)
- -> &'a Block<'a> {
+pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ loop_id: ast::NodeId,
+ body: &ast::Block)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_loop");
let fcx = bcx.fcx;
return next_bcx_in;
}
-pub fn trans_break_cont<'a>(bcx: &'a Block<'a>,
- expr_id: ast::NodeId,
- opt_label: Option<Ident>,
- exit: uint)
- -> &'a Block<'a> {
+pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId,
+ opt_label: Option<Ident>,
+ exit: uint)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_break_cont");
let fcx = bcx.fcx;
return bcx;
}
-pub fn trans_break<'a>(bcx: &'a Block<'a>,
- expr_id: ast::NodeId,
- label_opt: Option<Ident>)
- -> &'a Block<'a> {
+pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId,
+ label_opt: Option<Ident>)
+ -> Block<'blk, 'tcx> {
return trans_break_cont(bcx, expr_id, label_opt, cleanup::EXIT_BREAK);
}
-pub fn trans_cont<'a>(bcx: &'a Block<'a>,
- expr_id: ast::NodeId,
- label_opt: Option<Ident>)
- -> &'a Block<'a> {
+pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId,
+ label_opt: Option<Ident>)
+ -> Block<'blk, 'tcx> {
return trans_break_cont(bcx, expr_id, label_opt, cleanup::EXIT_LOOP);
}
-pub fn trans_ret<'a>(bcx: &'a Block<'a>,
- e: Option<Gc<ast::Expr>>)
- -> &'a Block<'a> {
+pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ e: Option<Gc<ast::Expr>>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_ret");
let fcx = bcx.fcx;
let mut bcx = bcx;
return bcx;
}
-pub fn trans_fail<'a>(
- bcx: &'a Block<'a>,
- sp: Span,
- fail_str: InternedString)
- -> &'a Block<'a> {
+pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ sp: Span,
+ fail_str: InternedString)
+ -> Block<'blk, 'tcx> {
let ccx = bcx.ccx();
let _icx = push_ctxt("trans_fail_value");
return bcx;
}
-pub fn trans_fail_bounds_check<'a>(
- bcx: &'a Block<'a>,
- sp: Span,
- index: ValueRef,
- len: ValueRef)
- -> &'a Block<'a> {
+pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ sp: Span,
+ index: ValueRef,
+ len: ValueRef)
+ -> Block<'blk, 'tcx> {
let ccx = bcx.ccx();
let _icx = push_ctxt("trans_fail_bounds_check");
pub kind: K,
}
-pub struct DatumBlock<'a, K> {
- pub bcx: &'a Block<'a>,
+pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
+ pub bcx: Block<'blk, 'tcx>,
pub datum: Datum<K>,
}
return Datum::new(val, ty, Rvalue::new(ByValue));
}
-pub fn immediate_rvalue_bcx<'a>(bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> DatumBlock<'a, Rvalue> {
+pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
return DatumBlock::new(bcx, immediate_rvalue(val, ty))
}
-pub fn lvalue_scratch_datum<'a, A>(bcx: &'a Block<'a>,
- ty: ty::t,
- name: &str,
- zero: bool,
- scope: cleanup::ScopeId,
- arg: A,
- populate: |A, &'a Block<'a>, ValueRef|
- -> &'a Block<'a>)
- -> DatumBlock<'a, Lvalue> {
+pub fn lvalue_scratch_datum<'blk, 'tcx, A>(bcx: Block<'blk, 'tcx>,
+ ty: ty::t,
+ name: &str,
+ zero: bool,
+ scope: cleanup::ScopeId,
+ arg: A,
+ populate: |A, Block<'blk, 'tcx>, ValueRef|
+ -> Block<'blk, 'tcx>)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
/*!
* Allocates temporary space on the stack using alloca() and
* returns a by-ref Datum pointing to it. The memory will be
DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue))
}
-pub fn rvalue_scratch_datum(bcx: &Block,
+pub fn rvalue_scratch_datum(bcx: Block,
ty: ty::t,
name: &str)
-> Datum<Rvalue> {
* Take appropriate action after the value in `datum` has been
* stored to a new location.
*/
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> &'a Block<'a>;
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> Block<'blk, 'tcx>;
/**
* True if this mode is a reference mode, meaning that the datum's
}
impl KindOps for Rvalue {
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- _val: ValueRef,
- _ty: ty::t)
- -> &'a Block<'a> {
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ _val: ValueRef,
+ _ty: ty::t)
+ -> Block<'blk, 'tcx> {
// No cleanup is scheduled for an rvalue, so we don't have
// to do anything after a move to cancel or duplicate it.
bcx
}
impl KindOps for Lvalue {
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> &'a Block<'a> {
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> Block<'blk, 'tcx> {
/*!
* If an lvalue is moved, we must zero out the memory in which
* it resides so as to cancel cleanup. If an @T lvalue is
}
impl KindOps for Expr {
- fn post_store<'a>(&self,
- bcx: &'a Block<'a>,
- val: ValueRef,
- ty: ty::t)
- -> &'a Block<'a> {
+ fn post_store<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ val: ValueRef,
+ ty: ty::t)
+ -> Block<'blk, 'tcx> {
match *self {
LvalueExpr => Lvalue.post_store(bcx, val, ty),
RvalueExpr(ref r) => r.post_store(bcx, val, ty),
self.val
}
- pub fn to_lvalue_datum_in_scope<'a>(self,
- bcx: &'a Block<'a>,
- name: &str,
- scope: cleanup::ScopeId)
- -> DatumBlock<'a, Lvalue> {
+ pub fn to_lvalue_datum_in_scope<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &str,
+ scope: cleanup::ScopeId)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
/*!
* Returns an lvalue datum (that is, a by ref datum with
* cleanup scheduled). If `self` is not already an lvalue,
}
}
- pub fn to_ref_datum<'a>(self, bcx: &'a Block<'a>) -> DatumBlock<'a, Rvalue> {
+ pub fn to_ref_datum<'blk, 'tcx>(self, bcx: Block<'blk, 'tcx>)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
let mut bcx = bcx;
match self.kind.mode {
ByRef => DatumBlock::new(bcx, self),
}
}
- pub fn to_appropriate_datum<'a>(self,
- bcx: &'a Block<'a>)
- -> DatumBlock<'a, Rvalue> {
+ pub fn to_appropriate_datum<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
match self.appropriate_rvalue_mode(bcx.ccx()) {
ByRef => {
self.to_ref_datum(bcx)
}
#[allow(dead_code)] // potentially useful
- pub fn assert_lvalue(self, bcx: &Block) -> Datum<Lvalue> {
+ pub fn assert_lvalue(self, bcx: Block) -> Datum<Lvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
|_| bcx.sess().bug("assert_lvalue given rvalue"))
}
- pub fn assert_rvalue(self, bcx: &Block) -> Datum<Rvalue> {
+ pub fn assert_rvalue(self, bcx: Block) -> Datum<Rvalue> {
/*!
* Asserts that this datum *is* an lvalue and returns it.
*/
|r| r)
}
- pub fn store_to_dest<'a>(self,
- bcx: &'a Block<'a>,
- dest: expr::Dest,
- expr_id: ast::NodeId)
- -> &'a Block<'a> {
+ pub fn store_to_dest<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ dest: expr::Dest,
+ expr_id: ast::NodeId)
+ -> Block<'blk, 'tcx> {
match dest {
expr::Ignore => {
self.add_clean_if_rvalue(bcx, expr_id);
}
}
- pub fn add_clean_if_rvalue<'a>(self,
- bcx: &'a Block<'a>,
- expr_id: ast::NodeId) {
+ pub fn add_clean_if_rvalue<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ expr_id: ast::NodeId) {
/*!
* Arranges cleanup for `self` if it is an rvalue. Use when
* you are done working with a value that may need drop.
})
}
- pub fn clean<'a>(self,
- bcx: &'a Block<'a>,
- name: &'static str,
- expr_id: ast::NodeId)
- -> &'a Block<'a> {
+ pub fn clean<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &'static str,
+ expr_id: ast::NodeId)
+ -> Block<'blk, 'tcx> {
/*!
* Ensures that `self` will get cleaned up, if it is not an lvalue
* already.
self.to_lvalue_datum(bcx, name, expr_id).bcx
}
- pub fn to_lvalue_datum<'a>(self,
- bcx: &'a Block<'a>,
- name: &str,
- expr_id: ast::NodeId)
- -> DatumBlock<'a, Lvalue> {
+ pub fn to_lvalue_datum<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &str,
+ expr_id: ast::NodeId)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
+ debug!("to_lvalue_datum self: {}", self.to_string(bcx.ccx()));
+
assert!(ty::lltype_is_sized(bcx.tcx(), self.ty),
"Trying to convert unsized value to lval");
self.match_kind(
})
}
- pub fn to_rvalue_datum<'a>(self,
- bcx: &'a Block<'a>,
- name: &'static str)
- -> DatumBlock<'a, Rvalue> {
+ pub fn to_rvalue_datum<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ name: &'static str)
+ -> DatumBlock<'blk, 'tcx, Rvalue> {
/*!
* Ensures that we have an rvalue datum (that is, a datum with
* no cleanup scheduled).
// datum may also be unsized _without the size information_. It is the
// callers responsibility to package the result in some way to make a valid
// datum in that case (e.g., by making a fat pointer or opened pair).
- pub fn get_element<'a>(&self,
- bcx: &'a Block<'a>,
- ty: ty::t,
- gep: |ValueRef| -> ValueRef)
- -> Datum<Lvalue> {
+ pub fn get_element(&self, bcx: Block, ty: ty::t,
+ gep: |ValueRef| -> ValueRef)
+ -> Datum<Lvalue> {
let val = match ty::get(self.ty).sty {
_ if ty::type_is_sized(bcx.tcx(), self.ty) => gep(self.val),
ty::ty_open(_) => {
}
}
- pub fn get_vec_base_and_len<'a>(&self, bcx: &'a Block<'a>) -> (ValueRef, ValueRef) {
+ pub fn get_vec_base_and_len(&self, bcx: Block) -> (ValueRef, ValueRef) {
//! Converts a vector into the slice pair.
tvec::get_base_and_len(bcx, self.val, self.ty)
Datum { val: val, ty: ty, kind: kind.to_expr_kind() }
}
- pub fn store_to<'a>(self,
- bcx: &'a Block<'a>,
- dst: ValueRef)
- -> &'a Block<'a> {
+ pub fn store_to<'blk, 'tcx>(self,
+ bcx: Block<'blk, 'tcx>,
+ dst: ValueRef)
+ -> Block<'blk, 'tcx> {
/*!
* Moves or copies this value into a new home, as appropriate
* depending on the type of the datum. This method consumes
self.kind.post_store(bcx, self.val, self.ty)
}
- fn shallow_copy<'a>(&self,
- bcx: &'a Block<'a>,
- dst: ValueRef)
- -> &'a Block<'a> {
+ fn shallow_copy<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ dst: ValueRef)
+ -> Block<'blk, 'tcx> {
/*!
* Helper function that performs a shallow copy of this value
* into `dst`, which should be a pointer to a memory location
return bcx;
}
- pub fn shallow_copy_and_take<'a>(&self,
- bcx: &'a Block<'a>,
- dst: ValueRef)
- -> &'a Block<'a> {
+ pub fn shallow_copy_and_take<'blk, 'tcx>(&self,
+ bcx: Block<'blk, 'tcx>,
+ dst: ValueRef)
+ -> Block<'blk, 'tcx> {
/*!
* Copies the value into a new location and runs any necessary
* take glue on the new location. This function always
#[allow(dead_code)] // useful for debugging
pub fn to_string(&self, ccx: &CrateContext) -> String {
format!("Datum({}, {}, {:?})",
- ccx.tn.val_to_string(self.val),
+ ccx.tn().val_to_string(self.val),
ty_to_string(ccx.tcx(), self.ty),
self.kind)
}
appropriate_rvalue_mode(ccx, self.ty)
}
- pub fn to_llscalarish<'a>(self, bcx: &'a Block<'a>) -> ValueRef {
+ pub fn to_llscalarish(self, bcx: Block) -> ValueRef {
/*!
* Converts `self` into a by-value `ValueRef`. Consumes this
* datum (i.e., absolves you of responsibility to cleanup the
}
}
- pub fn to_llbool<'a>(self, bcx: &'a Block<'a>) -> ValueRef {
+ pub fn to_llbool(self, bcx: Block) -> ValueRef {
assert!(ty::type_is_bool(self.ty) || ty::type_is_bot(self.ty))
self.to_llscalarish(bcx)
}
}
-impl <'a, K> DatumBlock<'a, K> {
- pub fn new(bcx: &'a Block<'a>, datum: Datum<K>) -> DatumBlock<'a, K> {
+impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> {
+ pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<K>) -> DatumBlock<'blk, 'tcx, K> {
DatumBlock { bcx: bcx, datum: datum }
}
}
-impl<'a, K:KindOps> DatumBlock<'a, K> {
- pub fn to_expr_datumblock(self) -> DatumBlock<'a, Expr> {
+impl<'blk, 'tcx, K:KindOps> DatumBlock<'blk, 'tcx, K> {
+ pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> {
DatumBlock::new(self.bcx, self.datum.to_expr_datum())
}
}
-impl<'a> DatumBlock<'a, Expr> {
+impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> {
pub fn store_to_dest(self,
dest: expr::Dest,
- expr_id: ast::NodeId) -> &'a Block<'a> {
+ expr_id: ast::NodeId) -> Block<'blk, 'tcx> {
let DatumBlock { bcx, datum } = self;
datum.store_to_dest(bcx, dest, expr_id)
}
- pub fn to_llbool(self) -> Result<'a> {
+ pub fn to_llbool(self) -> Result<'blk, 'tcx> {
let DatumBlock { datum, bcx } = self;
Result::new(bcx, datum.to_llbool(bcx))
}
static UNKNOWN_FILE_METADATA: DIFile = (0 as DIFile);
static UNKNOWN_SCOPE_METADATA: DIScope = (0 as DIScope);
+static FLAGS_NONE: c_uint = 0;
+static FLAGS_ARTIFICAL: c_uint = llvm::debuginfo::FlagArtificial as c_uint;
+
//=-----------------------------------------------------------------------------
// Public Interface of debuginfo module
//=-----------------------------------------------------------------------------
// First, find out the 'real' def_id of the type. Items inlined from
// other crates have to be mapped back to their source.
let source_def_id = if def_id.krate == ast::LOCAL_CRATE {
- match cx.external_srcs.borrow().find_copy(&def_id.node) {
+ match cx.external_srcs().borrow().find_copy(&def_id.node) {
Some(source_def_id) => {
// The given def_id identifies the inlined copy of a
// type definition, let's take the source of the copy.
// Get the crate hash as first part of the identifier.
let crate_hash = if source_def_id.krate == ast::LOCAL_CRATE {
- cx.link_meta.crate_hash.clone()
+ cx.link_meta().crate_hash.clone()
} else {
cx.sess().cstore.get_crate_hash(source_def_id.krate)
};
/// Create any deferred debug metadata nodes
pub fn finalize(cx: &CrateContext) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
if cx.sess().targ_cfg.os == abi::OsMacos ||
cx.sess().targ_cfg.os == abi::OsiOS {
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 2));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 2));
} else {
- // FIXME(#13611) this is a kludge fix because the linux bots have
+ // FIXME(#13611) this is a kludge fix because the Linux bots have
// gdb 7.4 which doesn't understand dwarf4, we should
// do something more graceful here.
"Dwarf Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s, 3));
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s, 3));
}
// Prevent bitcode readers from deleting the debug info.
"Debug Info Version".with_c_str(
- |s| llvm::LLVMRustAddModuleFlag(cx.llmod, s,
+ |s| llvm::LLVMRustAddModuleFlag(cx.llmod(), s,
llvm::LLVMRustDebugMetadataVersion));
};
}
pub fn create_global_var_metadata(cx: &CrateContext,
node_id: ast::NodeId,
global: ValueRef) {
- if cx.dbg_cx.is_none() {
+ if cx.dbg_cx().is_none() {
return;
}
// crate should already contain debuginfo for it. More importantly, the
// global might not even exist in un-inlined form anywhere which would lead
// to a linker errors.
- if cx.external_srcs.borrow().contains_key(&node_id) {
+ if cx.external_srcs().borrow().contains_key(&node_id) {
return;
}
- let var_item = cx.tcx.map.get(node_id);
+ let var_item = cx.tcx().map.get(node_id);
let (ident, span) = match var_item {
ast_map::NodeItem(item) => {
/// Creates debug information for the given local variable.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_local_var_metadata(bcx: &Block, local: &ast::Local) {
+pub fn create_local_var_metadata(bcx: Block, local: &ast::Local) {
if fn_should_be_ignored(bcx.fcx) {
return;
}
let cx = bcx.ccx();
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, path1| {
let var_ident = path1.node;
/// Creates debug information for a variable captured in a closure.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_captured_var_metadata(bcx: &Block,
+pub fn create_captured_var_metadata(bcx: Block,
node_id: ast::NodeId,
env_data_type: ty::t,
env_pointer: ValueRef,
let cx = bcx.ccx();
- let ast_item = cx.tcx.map.find(node_id);
+ let ast_item = cx.tcx().map.find(node_id);
let variable_ident = match ast_item {
None => {
/// match-statement arm.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_match_binding_metadata(bcx: &Block,
+pub fn create_match_binding_metadata(bcx: Block,
variable_ident: ast::Ident,
binding: BindingInfo) {
if fn_should_be_ignored(bcx.fcx) {
let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span);
let aops = unsafe {
- [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type.to_ref())]
+ [llvm::LLVMDIBuilderCreateOpDeref(bcx.ccx().int_type().to_ref())]
};
// Regardless of the actual type (`T`) we're always passed the stack slot (alloca)
// for the binding. For ByRef bindings that's a `T*` but for ByMove bindings we
/// Creates debug information for the given function argument.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_argument_metadata(bcx: &Block, arg: &ast::Arg) {
+pub fn create_argument_metadata(bcx: Block, arg: &ast::Arg) {
if fn_should_be_ignored(bcx.fcx) {
return;
}
let fcx = bcx.fcx;
let cx = fcx.ccx;
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
let scope_metadata = bcx.fcx.debug_context.get_ref(cx, arg.pat.span).fn_metadata;
pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, path1| {
let empty_generics = ast_util::empty_generics();
- let fnitem = cx.tcx.map.get(fn_ast_id);
+ let fnitem = cx.tcx().map.get(fn_ast_id);
let (ident, fn_decl, generics, top_level_block, span, has_path) = match fnitem {
ast_map::NodeItem(ref item) => {
// externally visible or by being inlined into something externally visible).
// It might better to use the `exported_items` set from `driver::CrateAnalysis`
// in the future, but (atm) this set is not available in the translation pass.
- !cx.reachable.contains(&node_id)
+ !cx.reachable().contains(&node_id)
}
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray {
return unsafe {
llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32)
});
fn fallback_path(cx: &CrateContext) -> CString {
- cx.link_meta.crate_name.as_slice().to_c_str()
+ cx.link_meta().crate_name.as_slice().to_c_str()
}
}
-fn declare_local(bcx: &Block,
+fn declare_local(bcx: Block,
variable_ident: ast::Ident,
variable_type: ty::t,
scope_metadata: DIScope,
match scope_map.borrow().find_copy(&node_id) {
Some(scope_metadata) => scope_metadata,
None => {
- let node = fcx.ccx.tcx.map.get(node_id);
+ let node = fcx.ccx.tcx().map.get(node_id);
fcx.ccx.sess().span_bug(span,
format!("debuginfo: Could not find scope info for node {:?}",
llvm_type: Type,
type_metadata: DIType,
offset: MemberOffset,
+ flags: c_uint
}
// A factory for MemberDescriptions. It produces a list of member descriptions
llvm_type: type_of::type_of(cx, field.mt.ty),
type_metadata: type_metadata(cx, field.mt.ty, self.span),
offset: offset,
+ flags: FLAGS_NONE,
}
}).collect()
}
llvm_type: type_of::type_of(cx, component_type),
type_metadata: type_metadata(cx, component_type, self.span),
offset: ComputedMemberOffset,
+ flags: FLAGS_NONE,
}
}).collect()
}
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
+ flags: FLAGS_NONE
}
}).collect()
},
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
+ flags: FLAGS_NONE
}
]
}
llvm_type: non_null_llvm_type,
type_metadata: non_null_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
+ flags: FLAGS_NONE
};
let unique_type_id = debug_context(cx).type_map
llvm_type: artificial_struct_llvm_type,
type_metadata: artificial_struct_metadata,
offset: FixedMemberOffset { bytes: 0 },
+ flags: FLAGS_NONE
}
]
},
llvm_type: variant_llvm_type,
type_metadata: variant_type_metadata,
offset: FixedMemberOffset { bytes: 0 },
+ flags: FLAGS_NONE
}
]
},
_ => type_metadata(cx, ty, self.span)
},
offset: ComputedMemberOffset,
+ flags: if self.discriminant_type_metadata.is_some() && i == 0 {
+ FLAGS_ARTIFICAL
+ } else {
+ FLAGS_NONE
+ }
}
}).collect()
}
def_id: ast::DefId)
-> token::InternedString {
let name = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.get_path_elem(def_id.node).name()
+ cx.tcx().map.get_path_elem(def_id.node).name()
} else {
- csearch::get_item_path(&cx.tcx, def_id).last().unwrap().name()
+ csearch::get_item_path(cx.tcx(), def_id).last().unwrap().name()
};
token::get_name(name)
bytes_to_bits(member_size),
bytes_to_bits(member_align),
bytes_to_bits(member_offset),
- 0,
+ member_description.flags,
member_description.type_metadata)
}
})
llvm_type: *member_llvm_types.get(0),
type_metadata: type_metadata(cx, int_type, codemap::DUMMY_SP),
offset: ComputedMemberOffset,
+ flags: FLAGS_ARTIFICAL,
},
MemberDescription {
name: "drop_glue".to_string(),
llvm_type: *member_llvm_types.get(1),
type_metadata: nil_pointer_type_metadata,
offset: ComputedMemberOffset,
+ flags: FLAGS_ARTIFICAL,
},
MemberDescription {
name: "prev".to_string(),
llvm_type: *member_llvm_types.get(2),
type_metadata: nil_pointer_type_metadata,
offset: ComputedMemberOffset,
+ flags: FLAGS_ARTIFICAL,
},
MemberDescription {
name: "next".to_string(),
llvm_type: *member_llvm_types.get(3),
type_metadata: nil_pointer_type_metadata,
offset: ComputedMemberOffset,
+ flags: FLAGS_ARTIFICAL,
},
MemberDescription {
name: "val".to_string(),
llvm_type: *member_llvm_types.get(4),
type_metadata: content_type_metadata,
offset: ComputedMemberOffset,
+ flags: FLAGS_ARTIFICAL,
}
];
content_llvm_type: Type)
-> bool {
member_llvm_types.len() == 5 &&
- member_llvm_types[0] == cx.int_type &&
+ member_llvm_types[0] == cx.int_type() &&
member_llvm_types[1] == Type::generic_glue_fn(cx).ptr_to() &&
member_llvm_types[2] == Type::i8(cx).ptr_to() &&
member_llvm_types[3] == Type::i8(cx).ptr_to() &&
llvm_type: *member_llvm_types.get(0),
type_metadata: element_type_metadata,
offset: ComputedMemberOffset,
+ flags: FLAGS_ARTIFICAL
},
MemberDescription {
name: "length".to_string(),
llvm_type: *member_llvm_types.get(1),
type_metadata: type_metadata(cx, ty::mk_uint(), span),
offset: ComputedMemberOffset,
+ flags: FLAGS_ARTIFICAL
},
];
-> bool {
member_llvm_types.len() == 2 &&
member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
- member_llvm_types[1] == cx.int_type
+ member_llvm_types[1] == cx.int_type()
}
}
};
unsafe {
- llvm::LLVMSetCurrentDebugLocation(cx.builder.b, metadata_node);
+ llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node);
}
debug_context(cx).current_debug_location.set(debug_location);
#[inline]
fn debug_context<'a>(cx: &'a CrateContext) -> &'a CrateDebugContext {
- let debug_context: &'a CrateDebugContext = cx.dbg_cx.get_ref();
+ let debug_context: &'a CrateDebugContext = cx.dbg_cx().get_ref();
debug_context
}
#[inline]
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
fn DIB(cx: &CrateContext) -> DIBuilderRef {
- cx.dbg_cx.get_ref().builder
+ cx.dbg_cx().get_ref().builder
}
fn fn_should_be_ignored(fcx: &FunctionContext) -> bool {
}
fn assert_type_for_node_id(cx: &CrateContext, node_id: ast::NodeId, error_span: Span) {
- if !cx.tcx.node_types.borrow().contains_key(&(node_id as uint)) {
+ if !cx.tcx().node_types.borrow().contains_key(&(node_id as uint)) {
cx.sess().span_bug(error_span, "debuginfo: Could not find type for node id!");
}
}
-> (DIScope, Span) {
let containing_scope = namespace_for_item(cx, def_id).scope;
let definition_span = if def_id.krate == ast::LOCAL_CRATE {
- cx.tcx.map.span(def_id.node)
+ cx.tcx().map.span(def_id.node)
} else {
// For external items there is no span information
codemap::DUMMY_SP
fn_entry_block: &ast::Block,
fn_metadata: DISubprogram,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
struct ScopeStackEntry {
scope_metadata: DIScope,
scope_stack: &mut Vec<ScopeStackEntry> ,
scope_map: &mut HashMap<ast::NodeId, DIScope>) {
- let def_map = &cx.tcx.def_map;
+ let def_map = &cx.tcx().def_map;
// Unfortunately, we cannot just use pat_util::pat_bindings() or
// ast_util::walk_pat() here because we have to visit *all* nodes in
}
}
- ast::ExprWhile(ref cond_exp, ref loop_body) => {
+ ast::ExprWhile(ref cond_exp, ref loop_body, _) => {
walk_expr(cx, &**cond_exp, scope_stack, scope_map);
with_new_scope(cx,
}
fn crate_root_namespace<'a>(cx: &'a CrateContext) -> &'a str {
- cx.link_meta.crate_name.as_slice()
+ cx.link_meta().crate_name.as_slice()
}
fn namespace_for_item(cx: &CrateContext, def_id: ast::DefId) -> Rc<NamespaceTreeNode> {
impl Dest {
pub fn to_string(&self, ccx: &CrateContext) -> String {
match *self {
- SaveIn(v) => format!("SaveIn({})", ccx.tn.val_to_string(v)),
+ SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)),
Ignore => "Ignore".to_string()
}
}
}
-pub fn trans_into<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- dest: Dest)
- -> &'a Block<'a> {
+pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
/*!
* This function is equivalent to `trans(bcx, expr).store_to_dest(dest)`
* but it may generate better optimized LLVM code.
bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id)
}
-pub fn trans<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* Translates an expression, returning a datum (and new block)
* encapsulating the result. When possible, it is preferred to
return DatumBlock::new(bcx, datum);
}
-pub fn get_len(bcx: &Block, fat_ptr: ValueRef) -> ValueRef {
+pub fn get_len(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, [0u, abi::slice_elt_len])
}
-pub fn get_dataptr(bcx: &Block, fat_ptr: ValueRef) -> ValueRef {
+pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
GEPi(bcx, fat_ptr, [0u, abi::slice_elt_base])
}
-fn apply_adjustments<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* Helper for trans that apply adjustments from `expr` to `datum`,
* which should be the unadjusted translation of `expr`.
debug!("after adjustments, datum={}", datum.to_string(bcx.ccx()));
return DatumBlock::new(bcx, datum);
- fn apply_autoref<'a>(autoref: &ty::AutoRef,
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn apply_autoref<'blk, 'tcx>(autoref: &ty::AutoRef,
+ bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let mut datum = datum;
let datum = match autoref {
- &AutoUnsafe(..) => {
- debug!(" AutoUnsafe");
- unpack_datum!(bcx, ref_ptr(bcx, expr, datum))
- }
- &AutoPtr(_, _, ref a) => {
+ &AutoPtr(_, _, ref a) | &AutoUnsafe(_, ref a) => {
debug!(" AutoPtr");
match a {
&Some(box ref a) => datum = unpack_datum!(bcx,
DatumBlock::new(bcx, datum)
}
- fn ref_ptr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn ref_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
if !ty::type_is_sized(bcx.tcx(), datum.ty) {
debug!("Taking address of unsized type {}",
bcx.ty_to_string(datum.ty));
// into a type to be destructed. If we want to end up with a Box pointer,
// then mk_ty should make a Box pointer (T -> Box<T>), if we want a
// borrowed reference then it should be T -> &T.
- fn unsized_info<'a>(bcx: &'a Block<'a>,
- kind: &ty::UnsizeKind,
- id: ast::NodeId,
- unsized_ty: ty::t,
- mk_ty: |ty::t| -> ty::t) -> ValueRef {
+ fn unsized_info<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ kind: &ty::UnsizeKind,
+ id: ast::NodeId,
+ unsized_ty: ty::t,
+ mk_ty: |ty::t| -> ty::t) -> ValueRef {
match kind {
&ty::UnsizeLength(len) => C_uint(bcx.ccx(), len),
&ty::UnsizeStruct(box ref k, tp_index) => match ty::get(unsized_ty).sty {
}
}
- fn unsize_expr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- k: &ty::UnsizeKind)
- -> DatumBlock<'a, Expr> {
+ fn unsize_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ k: &ty::UnsizeKind)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let tcx = bcx.tcx();
let datum_ty = datum.ty;
let unsized_ty = ty::unsize_ty(tcx, datum_ty, k, expr.span);
into_fat_ptr(bcx, expr, datum, dest_ty, base, info)
}
- fn ref_fat_ptr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn ref_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let tcx = bcx.tcx();
let dest_ty = ty::close_type(tcx, datum.ty);
let base = |bcx, val| Load(bcx, get_dataptr(bcx, val));
into_fat_ptr(bcx, expr, datum, dest_ty, base, len)
}
- fn into_fat_ptr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- dest_ty: ty::t,
- base: |&'a Block<'a>, ValueRef| -> ValueRef,
- info: |&'a Block<'a>, ValueRef| -> ValueRef)
- -> DatumBlock<'a, Expr> {
+ fn into_fat_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ dest_ty: ty::t,
+ base: |Block<'blk, 'tcx>, ValueRef| -> ValueRef,
+ info: |Block<'blk, 'tcx>, ValueRef| -> ValueRef)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
// Arrange cleanup
DatumBlock::new(bcx, scratch.to_expr_datum())
}
- fn unsize_unique_vec<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- len: uint)
- -> DatumBlock<'a, Expr> {
+ fn unsize_unique_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ len: uint)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let tcx = bcx.tcx();
DatumBlock::new(bcx, scratch.to_expr_datum())
}
- fn unsize_unique_expr<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- k: &ty::UnsizeKind)
- -> DatumBlock<'a, Expr> {
+ fn unsize_unique_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ k: &ty::UnsizeKind)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let tcx = bcx.tcx();
DatumBlock::new(bcx, scratch.to_expr_datum())
}
- fn add_env<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>)
- -> DatumBlock<'a, Expr> {
+ fn add_env<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>)
+ -> DatumBlock<'blk, 'tcx, Expr> {
// This is not the most efficient thing possible; since closures
// are two words it'd be better if this were compiled in
// 'dest' mode, but I can't find a nice way to structure the
}
}
-pub fn trans_to_lvalue<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- name: &str)
- -> DatumBlock<'a, Lvalue> {
+pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ name: &str)
+ -> DatumBlock<'blk, 'tcx, Lvalue> {
/*!
* Translates an expression in "lvalue" mode -- meaning that it
* returns a reference to the memory that the expr represents.
return datum.to_lvalue_datum(bcx, name, expr.id);
}
-fn trans_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* A version of `trans` that ignores adjustments. You almost
* certainly do not want to call this directly.
}
};
- fn nil<'a>(bcx: &'a Block<'a>, ty: ty::t) -> DatumBlock<'a, Expr> {
+ fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let llval = C_undef(type_of::type_of(bcx.ccx(), ty));
let datum = immediate_rvalue(llval, ty);
DatumBlock::new(bcx, datum.to_expr_datum())
}
}
-fn trans_datum_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let fcx = bcx.fcx;
let _icx = push_ctxt("trans_datum_unadjusted");
}
}
-fn trans_rec_field<'a>(bcx: &'a Block<'a>,
- base: &ast::Expr,
- field: ast::Ident)
- -> DatumBlock<'a, Expr> {
+fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ base: &ast::Expr,
+ field: ast::Ident)
+ -> DatumBlock<'blk, 'tcx, Expr> {
//! Translates `base.field`.
let mut bcx = bcx;
})
}
-fn trans_index<'a>(bcx: &'a Block<'a>,
- index_expr: &ast::Expr,
- base: &ast::Expr,
- idx: &ast::Expr,
- method_call: MethodCall)
- -> DatumBlock<'a, Expr> {
+fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ index_expr: &ast::Expr,
+ base: &ast::Expr,
+ idx: &ast::Expr,
+ method_call: MethodCall)
+ -> DatumBlock<'blk, 'tcx, Expr> {
//! Translates `base[idx]`.
let _icx = push_ctxt("trans_index");
let mut bcx = bcx;
// Check for overloaded index.
- let method_ty = ccx.tcx
+ let method_ty = ccx.tcx()
.method_map
.borrow()
.find(&method_call)
let ix_size = machine::llbitsize_of_real(bcx.ccx(),
val_ty(ix_val));
let int_size = machine::llbitsize_of_real(bcx.ccx(),
- ccx.int_type);
+ ccx.int_type());
let ix_val = {
if ix_size < int_size {
if ty::type_is_signed(expr_ty(bcx, idx)) {
- SExt(bcx, ix_val, ccx.int_type)
- } else { ZExt(bcx, ix_val, ccx.int_type) }
+ SExt(bcx, ix_val, ccx.int_type())
+ } else { ZExt(bcx, ix_val, ccx.int_type()) }
} else if ix_size > int_size {
- Trunc(bcx, ix_val, ccx.int_type)
+ Trunc(bcx, ix_val, ccx.int_type())
} else {
ix_val
}
DatumBlock::new(bcx, elt_datum)
}
-fn trans_def<'a>(bcx: &'a Block<'a>,
- ref_expr: &ast::Expr,
- def: def::Def)
- -> DatumBlock<'a, Expr>
-{
+fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ ref_expr: &ast::Expr,
+ def: def::Def)
+ -> DatumBlock<'blk, 'tcx, Expr> {
//! Translates a reference to a path.
let _icx = push_ctxt("trans_def_lvalue");
trans_def_fn_unadjusted(bcx, ref_expr, def)
}
def::DefStatic(did, _) => {
+ // There are three things that may happen here:
+ // 1) If the static item is defined in this crate, it will be
+ // translated using `get_item_val`, and we return a pointer to
+ // the result.
+ // 2) If the static item is defined in another crate, but is
+ // marked inlineable, then it will be inlined into this crate
+ // and then translated with `get_item_val`. Again, we return a
+ // pointer to the result.
+ // 3) If the static item is defined in another crate and is not
+ // marked inlineable, then we add (or reuse) a declaration of
+ // an external global, and return a pointer to that.
let const_ty = expr_ty(bcx, ref_expr);
- fn get_did(ccx: &CrateContext, did: ast::DefId)
- -> ast::DefId {
- if did.krate != ast::LOCAL_CRATE {
- inline::maybe_instantiate_inline(ccx, did)
- } else {
- did
- }
- }
-
- fn get_val<'a>(bcx: &'a Block<'a>, did: ast::DefId, const_ty: ty::t)
- -> ValueRef {
+ fn get_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, did: ast::DefId, const_ty: ty::t)
+ -> ValueRef {
// For external constants, we don't inline.
if did.krate == ast::LOCAL_CRATE {
+ // Case 1 or 2. (The inlining in case 2 produces a new
+ // DefId in LOCAL_CRATE.)
+
// The LLVM global has the type of its initializer,
// which may not be equal to the enum's type for
// non-C-like enums.
let pty = type_of::type_of(bcx.ccx(), const_ty).ptr_to();
PointerCast(bcx, val, pty)
} else {
- match bcx.ccx().extern_const_values.borrow().find(&did) {
+ // Case 3.
+ match bcx.ccx().extern_const_values().borrow().find(&did) {
None => {} // Continue.
Some(llval) => {
return *llval;
&bcx.ccx().sess().cstore,
did);
let llval = symbol.as_slice().with_c_str(|buf| {
- llvm::LLVMAddGlobal(bcx.ccx().llmod,
+ llvm::LLVMAddGlobal(bcx.ccx().llmod(),
llty.to_ref(),
buf)
});
- bcx.ccx().extern_const_values.borrow_mut()
+ bcx.ccx().extern_const_values().borrow_mut()
.insert(did, llval);
llval
}
}
}
-
- let did = get_did(bcx.ccx(), did);
+ // The DefId produced by `maybe_instantiate_inline`
+ // may be in the LOCAL_CRATE or not.
+ let did = inline::maybe_instantiate_inline(bcx.ccx(), did);
let val = get_val(bcx, did, const_ty);
DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr))
}
}
}
-fn trans_rvalue_stmt_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr)
- -> &'a Block<'a> {
+fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr)
+ -> Block<'blk, 'tcx> {
let mut bcx = bcx;
let _icx = push_ctxt("trans_rvalue_stmt");
ast::ExprRet(ex) => {
controlflow::trans_ret(bcx, ex)
}
- ast::ExprWhile(ref cond, ref body) => {
+ ast::ExprWhile(ref cond, ref body, _) => {
controlflow::trans_while(bcx, expr.id, &**cond, &**body)
}
ast::ExprForLoop(ref pat, ref head, ref body, _) => {
}
}
-fn trans_rvalue_dps_unadjusted<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- dest: Dest)
- -> &'a Block<'a> {
+fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rvalue_dps_unadjusted");
let mut bcx = bcx;
let tcx = bcx.tcx();
}
}
-fn trans_def_dps_unadjusted<'a>(
- bcx: &'a Block<'a>,
- ref_expr: &ast::Expr,
- def: def::Def,
- dest: Dest)
- -> &'a Block<'a> {
+fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ ref_expr: &ast::Expr,
+ def: def::Def,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_def_dps_unadjusted");
let lldest = match dest {
}
}
-fn trans_def_fn_unadjusted<'a>(bcx: &'a Block<'a>,
- ref_expr: &ast::Expr,
- def: def::Def) -> DatumBlock<'a, Expr> {
+fn trans_def_fn_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ ref_expr: &ast::Expr,
+ def: def::Def)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_def_datum_unadjusted");
let llfn = match def {
DatumBlock::new(bcx, Datum::new(llfn, fn_ty, RvalueExpr(Rvalue::new(ByValue))))
}
-pub fn trans_local_var<'a>(bcx: &'a Block<'a>,
- def: def::Def)
- -> Datum<Lvalue> {
+pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ def: def::Def)
+ -> Datum<Lvalue> {
/*!
* Translates a reference to a local variable or argument.
* This always results in an lvalue datum.
}
};
- fn take_local<'a>(bcx: &'a Block<'a>,
- table: &NodeMap<Datum<Lvalue>>,
- nid: ast::NodeId)
- -> Datum<Lvalue> {
+ fn take_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ table: &NodeMap<Datum<Lvalue>>,
+ nid: ast::NodeId)
+ -> Datum<Lvalue> {
let datum = match table.find(&nid) {
Some(&v) => v,
None => {
}
}
-fn trans_struct<'a>(bcx: &'a Block<'a>,
- fields: &[ast::Field],
- base: Option<Gc<ast::Expr>>,
- expr_span: codemap::Span,
- id: ast::NodeId,
- dest: Dest) -> &'a Block<'a> {
+fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ fields: &[ast::Field],
+ base: Option<Gc<ast::Expr>>,
+ expr_span: codemap::Span,
+ id: ast::NodeId,
+ dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_rec");
let ty = node_id_type(bcx, id);
* - `optbase` contains information on the base struct (if any) from
* which remaining fields are copied; see comments on `StructBaseInfo`.
*/
-pub fn trans_adt<'a>(mut bcx: &'a Block<'a>,
- ty: ty::t,
- discr: ty::Disr,
- fields: &[(uint, Gc<ast::Expr>)],
- optbase: Option<StructBaseInfo>,
- dest: Dest) -> &'a Block<'a> {
+pub fn trans_adt<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ ty: ty::t,
+ discr: ty::Disr,
+ fields: &[(uint, Gc<ast::Expr>)],
+ optbase: Option<StructBaseInfo>,
+ dest: Dest) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_adt");
let fcx = bcx.fcx;
let repr = adt::represent_type(bcx.ccx(), ty);
}
}
- // Now, we just overwrite the fields we've explicity specified
+ // Now, we just overwrite the fields we've explicitly specified
for &(i, ref e) in fields.iter() {
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
let e_ty = expr_ty_adjusted(bcx, &**e);
}
-fn trans_immediate_lit<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- lit: ast::Lit)
- -> DatumBlock<'a, Expr> {
+fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ lit: ast::Lit)
+ -> DatumBlock<'blk, 'tcx, Expr> {
// must not be a string constant, that is a RvalueDpsExpr
let _icx = push_ctxt("trans_immediate_lit");
let ty = expr_ty(bcx, expr);
immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock()
}
-fn trans_unary<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- op: ast::UnOp,
- sub_expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ op: ast::UnOp,
+ sub_expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
let mut bcx = bcx;
let _icx = push_ctxt("trans_unary_datum");
// Otherwise, we should be in the RvalueDpsExpr path.
assert!(
op == ast::UnDeref ||
- !ccx.tcx.method_map.borrow().contains_key(&method_call));
+ !ccx.tcx().method_map.borrow().contains_key(&method_call));
let un_ty = expr_ty(bcx, expr);
}
}
-fn trans_uniq_expr<'a>(bcx: &'a Block<'a>,
- box_ty: ty::t,
- contents: &ast::Expr,
- contents_ty: ty::t)
- -> DatumBlock<'a, Expr> {
+fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ty: ty::t,
+ contents: &ast::Expr,
+ contents_ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_uniq_expr");
let fcx = bcx.fcx;
assert!(ty::type_is_sized(bcx.tcx(), contents_ty));
immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock()
}
-fn trans_managed_expr<'a>(bcx: &'a Block<'a>,
- box_ty: ty::t,
- contents: &ast::Expr,
- contents_ty: ty::t)
- -> DatumBlock<'a, Expr> {
+fn trans_managed_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ty: ty::t,
+ contents: &ast::Expr,
+ contents_ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_managed_expr");
let fcx = bcx.fcx;
let ty = type_of::type_of(bcx.ccx(), contents_ty);
immediate_rvalue_bcx(bcx, bx, box_ty).to_expr_datumblock()
}
-fn trans_addr_of<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- subexpr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ subexpr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_addr_of");
let mut bcx = bcx;
let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of"));
// Important to get types for both lhs and rhs, because one might be _|_
// and the other not.
-fn trans_eager_binop<'a>(
- bcx: &'a Block<'a>,
- binop_expr: &ast::Expr,
- binop_ty: ty::t,
- op: ast::BinOp,
- lhs_t: ty::t,
- lhs: ValueRef,
- rhs_t: ty::t,
- rhs: ValueRef)
- -> DatumBlock<'a, Expr> {
+fn trans_eager_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ binop_expr: &ast::Expr,
+ binop_ty: ty::t,
+ op: ast::BinOp,
+ lhs_t: ty::t,
+ lhs: ValueRef,
+ rhs_t: ty::t,
+ rhs: ValueRef)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_eager_binop");
let tcx = bcx.tcx();
lazy_or,
}
-fn trans_lazy_binop<'a>(
- bcx: &'a Block<'a>,
- binop_expr: &ast::Expr,
- op: lazy_binop_ty,
- a: &ast::Expr,
- b: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ binop_expr: &ast::Expr,
+ op: lazy_binop_ty,
+ a: &ast::Expr,
+ b: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_lazy_binop");
let binop_ty = expr_ty(bcx, binop_expr);
let fcx = bcx.fcx;
return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock();
}
-fn trans_binary<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- op: ast::BinOp,
- lhs: &ast::Expr,
- rhs: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ op: ast::BinOp,
+ lhs: &ast::Expr,
+ rhs: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_binary");
let ccx = bcx.ccx();
// if overloaded, would be RvalueDpsExpr
- assert!(!ccx.tcx.method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
+ assert!(!ccx.tcx().method_map.borrow().contains_key(&MethodCall::expr(expr.id)));
match op {
ast::BiAnd => {
}
}
-fn trans_overloaded_op<'a, 'b>(
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- method_call: MethodCall,
- lhs: Datum<Expr>,
- rhs: Option<(Datum<Expr>, ast::NodeId)>,
- dest: Option<Dest>)
- -> Result<'a> {
+fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ method_call: MethodCall,
+ lhs: Datum<Expr>,
+ rhs: Option<(Datum<Expr>, ast::NodeId)>,
+ dest: Option<Dest>)
+ -> Result<'blk, 'tcx> {
let method_ty = bcx.tcx().method_map.borrow().get(&method_call).ty;
callee::trans_call_inner(bcx,
Some(expr_info(expr)),
dest)
}
-fn trans_overloaded_call<'a>(
- mut bcx: &'a Block<'a>,
- expr: &ast::Expr,
- callee: Gc<ast::Expr>,
- args: &[Gc<ast::Expr>],
- dest: Option<Dest>)
- -> &'a Block<'a> {
+fn trans_overloaded_call<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ callee: Gc<ast::Expr>,
+ args: &[Gc<ast::Expr>],
+ dest: Option<Dest>)
+ -> Block<'blk, 'tcx> {
let method_call = MethodCall::expr(expr.id);
let method_type = bcx.tcx()
.method_map
bcx
}
-fn int_cast(bcx: &Block,
+fn int_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef,
}
}
-fn float_cast(bcx: &Block,
+fn float_cast(bcx: Block,
lldsttype: Type,
llsrctype: Type,
llsrc: ValueRef)
match ty::get(t).sty {
ty::ty_char => cast_integral,
ty::ty_float(..) => cast_float,
- ty::ty_ptr(..) => cast_pointer,
- ty::ty_rptr(_, mt) => {
+ ty::ty_rptr(_, mt) | ty::ty_ptr(mt) => {
if ty::type_is_sized(tcx, mt.ty) {
cast_pointer
} else {
}
}
-fn trans_imm_cast<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- id: ast::NodeId)
- -> DatumBlock<'a, Expr> {
+fn cast_is_noop(t_in: ty::t, t_out: ty::t) -> bool {
+ if ty::type_is_boxed(t_in) || ty::type_is_boxed(t_out) {
+ return false;
+ }
+
+ match (ty::deref(t_in, true), ty::deref(t_out, true)) {
+ (Some(ty::mt{ ty: t_in, .. }), Some(ty::mt{ ty: t_out, .. })) => {
+ t_in == t_out
+ }
+ _ => false
+ }
+}
+
+fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ id: ast::NodeId)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let _icx = push_ctxt("trans_cast");
let mut bcx = bcx;
let ccx = bcx.ccx();
// Convert the value to be cast into a ValueRef, either by-ref or
// by-value as appropriate given its type:
- let datum = unpack_datum!(bcx, trans(bcx, expr));
+ let mut datum = unpack_datum!(bcx, trans(bcx, expr));
+
+ if cast_is_noop(datum.ty, t_out) {
+ datum.ty = t_out;
+ return DatumBlock::new(bcx, datum);
+ }
+
let newval = match (k_in, k_out) {
(cast_integral, cast_integral) => {
let llexpr = datum.to_llscalarish(bcx);
return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock();
}
-fn trans_assign_op<'a>(
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- op: ast::BinOp,
- dst: &ast::Expr,
- src: Gc<ast::Expr>)
- -> &'a Block<'a> {
+fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ op: ast::BinOp,
+ dst: &ast::Expr,
+ src: Gc<ast::Expr>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_assign_op");
let mut bcx = bcx;
return result_datum.store_to(bcx, dst_datum.val);
}
-fn auto_ref<'a>(bcx: &'a Block<'a>,
- datum: Datum<Expr>,
- expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ datum: Datum<Expr>,
+ expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
// Ensure cleanup of `datum` if not already scheduled and obtain
DatumBlock::new(bcx, Datum::new(llref, ptr_ty, RvalueExpr(Rvalue::new(ByValue))))
}
-fn deref_multiple<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- times: uint)
- -> DatumBlock<'a, Expr> {
+fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ times: uint)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let mut bcx = bcx;
let mut datum = datum;
for i in range(0, times) {
DatumBlock { bcx: bcx, datum: datum }
}
-fn deref_once<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- method_call: MethodCall)
- -> DatumBlock<'a, Expr> {
+fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ method_call: MethodCall)
+ -> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx();
debug!("deref_once(expr={}, datum={}, method_call={})",
let mut bcx = bcx;
// Check for overloaded deref.
- let method_ty = ccx.tcx.method_map.borrow()
+ let method_ty = ccx.tcx().method_map.borrow()
.find(&method_call).map(|method| method.ty);
let datum = match method_ty {
Some(method_ty) => {
typeck::AutoDeref(_) => unpack_datum!(bcx, auto_ref(bcx, datum, expr)),
_ => datum
};
- let val = unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
- datum, None, None));
+
let ref_ty = ty::ty_fn_ret(monomorphize_type(bcx, method_ty));
- Datum::new(val, ref_ty, RvalueExpr(Rvalue::new(ByValue)))
+ let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref");
+
+ unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call,
+ datum, None, Some(SaveIn(scratch.val))));
+ scratch.to_expr_datum()
}
None => {
// Not overloaded. We already have a pointer we know how to deref.
if ty::type_is_sized(bcx.tcx(), content_ty) {
deref_owned_pointer(bcx, expr, datum, content_ty)
} else {
- // A fat pointer and an opened DST value have the same represenation
- // just different types.
- DatumBlock::new(bcx, Datum::new(datum.val,
- ty::mk_open(bcx.tcx(), content_ty),
- datum.kind))
+ // A fat pointer and an opened DST value have the same
+ // represenation just different types. Since there is no
+ // temporary for `*e` here (because it is unsized), we cannot
+ // emulate the sized object code path for running drop glue and
+ // free. Instead, we schedule cleanup for `e`, turning it into
+ // an lvalue.
+ let datum = unpack_datum!(
+ bcx, datum.to_lvalue_datum(bcx, "deref", expr.id));
+
+ let datum = Datum::new(datum.val, ty::mk_open(bcx.tcx(), content_ty), LvalueExpr);
+ DatumBlock::new(bcx, datum)
}
}
// just different types.
DatumBlock::new(bcx, Datum::new(datum.val,
ty::mk_open(bcx.tcx(), content_ty),
- datum.kind))
+ LvalueExpr))
}
}
return r;
- fn deref_owned_pointer<'a>(bcx: &'a Block<'a>,
- expr: &ast::Expr,
- datum: Datum<Expr>,
- content_ty: ty::t)
- -> DatumBlock<'a, Expr> {
+ fn deref_owned_pointer<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ datum: Datum<Expr>,
+ content_ty: ty::t)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* We microoptimize derefs of owned pointers a bit here.
* Basically, the idea is to make the deref of an rvalue
}
};
unsafe {
+ // Declare a symbol `foo` with the desired linkage.
let g1 = ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty2.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty2.to_ref(), buf)
});
llvm::SetLinkage(g1, linkage);
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(ident.get());
let g2 = real_name.with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
});
llvm::SetLinkage(g2, llvm::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
}
}
None => unsafe {
+ // Generate an external declaration.
ident.get().with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, llty.to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), llty.to_ref(), buf)
})
}
}
let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys);
let llfn = base::get_extern_fn(ccx,
- &mut *ccx.externs.borrow_mut(),
+ &mut *ccx.externs().borrow_mut(),
name,
cc,
llfn_ty,
llfn
}
-pub fn trans_native_call<'a>(
- bcx: &'a Block<'a>,
- callee_ty: ty::t,
- llfn: ValueRef,
- llretptr: ValueRef,
- llargs_rust: &[ValueRef],
- passed_arg_tys: Vec<ty::t> )
- -> &'a Block<'a> {
+pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ callee_ty: ty::t,
+ llfn: ValueRef,
+ llretptr: ValueRef,
+ llargs_rust: &[ValueRef],
+ passed_arg_tys: Vec<ty::t> )
+ -> Block<'blk, 'tcx> {
/*!
* Prepares a call to a native function. This requires adapting
* from the Rust argument passing rules to the native rules.
llfn={}, \
llretptr={})",
callee_ty.repr(tcx),
- ccx.tn.val_to_string(llfn),
- ccx.tn.val_to_string(llretptr));
+ ccx.tn().val_to_string(llfn),
+ ccx.tn().val_to_string(llretptr));
let (fn_abi, fn_sig) = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref fn_ty) => (fn_ty.abi, fn_ty.sig.clone()),
debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}",
i,
- ccx.tn.val_to_string(llarg_rust),
+ ccx.tn().val_to_string(llarg_rust),
rust_indirect,
- ccx.tn.type_to_string(arg_tys[i].ty));
+ ccx.tn().type_to_string(arg_tys[i].ty));
// Ensure that we always have the Rust value indirectly,
// because it makes bitcasting easier.
}
debug!("llarg_rust={} (after indirection)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Check whether we need to do any casting
match arg_tys[i].cast {
}
debug!("llarg_rust={} (after casting)",
- ccx.tn.val_to_string(llarg_rust));
+ ccx.tn().val_to_string(llarg_rust));
// Finally, load the value if needed for the foreign ABI
let foreign_indirect = arg_tys[i].is_indirect();
};
debug!("argument {}, llarg_foreign={}",
- i, ccx.tn.val_to_string(llarg_foreign));
+ i, ccx.tn().val_to_string(llarg_foreign));
// fill padding with undef value
match arg_tys[i].pad {
None => fn_type.ret_ty.ty
};
- debug!("llretptr={}", ccx.tn.val_to_string(llretptr));
- debug!("llforeign_retval={}", ccx.tn.val_to_string(llforeign_retval));
- debug!("llrust_ret_ty={}", ccx.tn.type_to_string(llrust_ret_ty));
- debug!("llforeign_ret_ty={}", ccx.tn.type_to_string(llforeign_ret_ty));
+ debug!("llretptr={}", ccx.tn().val_to_string(llretptr));
+ debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval));
+ debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty));
+ debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty));
if llrust_ret_ty == llforeign_ret_ty {
base::store_ty(bcx, llforeign_retval, llretptr, fn_sig.output)
register_foreign_item_fn(ccx, abi, ty,
lname.get().as_slice(),
Some(foreign_item.span));
+ // Unlike for other items, we shouldn't call
+ // `base::update_linkage` here. Foreign items have
+ // special linkage requirements, which are handled
+ // inside `foreign::register_*`.
}
}
}
_ => {}
}
- ccx.item_symbols.borrow_mut().insert(foreign_item.id,
+ ccx.item_symbols().borrow_mut().insert(foreign_item.id,
lname.get().to_string());
}
}
let llfn = base::decl_fn(ccx, name, cconv, llfn_ty, ty::mk_nil());
add_argument_attributes(&tys, llfn);
debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})",
- ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty);
add_argument_attributes(&tys, llfn);
debug!("register_rust_fn_with_foreign_abi(node_id={:?}, llfn_ty={}, llfn={})",
- node_id, ccx.tn.type_to_string(llfn_ty), ccx.tn.val_to_string(llfn));
+ node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn));
llfn
}
let t = ty::node_id_to_type(tcx, id).subst(
ccx.tcx(), ¶m_substs.substs);
- let ps = ccx.tcx.map.with_path(id, |path| {
+ let ps = ccx.tcx().map.with_path(id, |path| {
let abi = Some(ast_map::PathName(special_idents::clownshoe_abi.name));
link::mangle(path.chain(abi.move_iter()), hash)
});
_ => {
ccx.sess().bug(format!("build_rust_fn: extern fn {} has ty {}, \
expected a bare fn ty",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
t.repr(tcx)).as_slice());
}
};
debug!("build_rust_fn: path={} id={} t={}",
- ccx.tcx.map.path_to_string(id),
+ ccx.tcx().map.path_to_string(id),
id, t.repr(tcx));
let llfn = base::decl_internal_rust_fn(ccx, t, ps.as_slice());
let tcx = ccx.tcx();
debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={})",
- ccx.tn.val_to_string(llrustfn),
- ccx.tn.val_to_string(llwrapfn),
+ ccx.tn().val_to_string(llrustfn),
+ ccx.tn().val_to_string(llwrapfn),
t.repr(ccx.tcx()));
// Avoid all the Rust generation stuff and just generate raw
let the_block =
"the block".with_c_str(
- |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx, llwrapfn, s));
+ |s| llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn, s));
let builder = ccx.builder();
builder.position_at_end(the_block);
match foreign_outptr {
Some(llforeign_outptr) => {
debug!("out pointer, foreign={}",
- ccx.tn.val_to_string(llforeign_outptr));
+ ccx.tn().val_to_string(llforeign_outptr));
let llrust_retptr =
builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to());
debug!("out pointer, foreign={} (casted)",
- ccx.tn.val_to_string(llrust_retptr));
+ ccx.tn().val_to_string(llrust_retptr));
llrust_args.push(llrust_retptr);
return_alloca = None;
}
allocad={}, \
llrust_ret_ty={}, \
return_ty={}",
- ccx.tn.val_to_string(slot),
- ccx.tn.type_to_string(llrust_ret_ty),
+ ccx.tn().val_to_string(slot),
+ ccx.tn().type_to_string(llrust_ret_ty),
tys.fn_sig.output.repr(tcx));
llrust_args.push(slot);
return_alloca = Some(slot);
let mut llforeign_arg = get_param(llwrapfn, foreign_index);
debug!("llforeign_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llforeign_arg));
+ i, ccx.tn().val_to_string(llforeign_arg));
debug!("rust_indirect = {}, foreign_indirect = {}",
rust_indirect, foreign_indirect);
};
debug!("llrust_arg {}{}: {}", "#",
- i, ccx.tn.val_to_string(llrust_arg));
+ i, ccx.tn().val_to_string(llrust_arg));
llrust_args.push(llrust_arg);
}
// Perform the call itself
- debug!("calling llrustfn = {}, t = {}", ccx.tn.val_to_string(llrustfn), t.repr(ccx.tcx()));
+ debug!("calling llrustfn = {}, t = {}",
+ ccx.tn().val_to_string(llrustfn), t.repr(ccx.tcx()));
let attributes = base::get_fn_llvm_attributes(ccx, t);
let llrust_ret_val = builder.call(llrustfn, llrust_args.as_slice(), Some(attributes));
fn_ty={} -> {}, \
ret_def={}",
ty.repr(ccx.tcx()),
- ccx.tn.types_to_str(llsig.llarg_tys.as_slice()),
- ccx.tn.type_to_string(llsig.llret_ty),
- ccx.tn.types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
- ccx.tn.type_to_string(fn_ty.ret_ty.ty),
+ ccx.tn().types_to_str(llsig.llarg_tys.as_slice()),
+ ccx.tn().type_to_string(llsig.llret_ty),
+ ccx.tn().types_to_str(fn_ty.arg_tys.iter().map(|t| t.ty).collect::<Vec<_>>().as_slice()),
+ ccx.tn().type_to_string(fn_ty.ret_ty.ty),
ret_def);
ForeignTypes {
use syntax::ast;
use syntax::parse::token;
-pub fn trans_free<'a>(cx: &'a Block<'a>, v: ValueRef) -> &'a Block<'a> {
+pub fn trans_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_free");
callee::trans_lang_call(cx,
langcall(cx, None, "", FreeFnLangItem),
Some(expr::Ignore)).bcx
}
-fn trans_exchange_free_internal<'a>(cx: &'a Block<'a>, v: ValueRef, size: ValueRef,
- align: ValueRef) -> &'a Block<'a> {
+pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
+ size: ValueRef, align: ValueRef)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx,
Some(expr::Ignore)).bcx
}
-pub fn trans_exchange_free<'a>(cx: &'a Block<'a>, v: ValueRef, size: u64,
- align: u64) -> &'a Block<'a> {
- trans_exchange_free_internal(cx,
- v,
- C_uint(cx.ccx(), size as uint),
- C_uint(cx.ccx(), align as uint))
+pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef,
+ size: u64, align: u64) -> Block<'blk, 'tcx> {
+ trans_exchange_free_dyn(cx, v, C_uint(cx.ccx(), size as uint),
+ C_uint(cx.ccx(), align as uint))
}
-pub fn trans_exchange_free_ty<'a>(bcx: &'a Block<'a>, ptr: ValueRef,
- content_ty: ty::t) -> &'a Block<'a> {
+pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef,
+ content_ty: ty::t) -> Block<'blk, 'tcx> {
assert!(ty::type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
}
}
-pub fn take_ty<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+pub fn take_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("take_ty");
match ty::get(t).sty {
return ty::mk_i8();
}
match ty::get(t).sty {
- ty::ty_box(typ) if !ty::type_needs_drop(tcx, typ) =>
- ty::mk_box(tcx, ty::mk_i8()),
-
ty::ty_uniq(typ) if !ty::type_needs_drop(tcx, typ)
&& ty::type_is_sized(tcx, typ) => {
let llty = sizing_type_of(ccx, typ);
if llsize_of_alloc(ccx, llty) == 0 {
ty::mk_i8()
} else {
- ty::mk_uniq(tcx, ty::mk_i8())
+ t
}
}
_ => t
}
}
-pub fn drop_ty<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty(t={})", t.repr(bcx.tcx()));
let _icx = push_ctxt("drop_ty");
bcx
}
-pub fn drop_ty_immediate<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate");
let vp = alloca(bcx, type_of(bcx.ccx(), t), "");
Store(bcx, v, vp);
debug!("make drop glue for {}", ppaux::ty_to_string(ccx.tcx(), t));
let t = get_drop_glue_type(ccx, t);
debug!("drop glue type {}", ppaux::ty_to_string(ccx.tcx(), t));
- match ccx.drop_glues.borrow().find(&t) {
+ match ccx.drop_glues().borrow().find(&t) {
Some(&glue) => return glue,
_ => { }
}
};
let llfnty = Type::glue_fn(ccx, llty);
- let glue = declare_generic_glue(ccx, t, llfnty, "drop");
- ccx.drop_glues.borrow_mut().insert(t, glue);
+ let (glue, new_sym) = match ccx.available_drop_glues().borrow().find(&t) {
+ Some(old_sym) => {
+ let glue = decl_cdecl_fn(ccx, old_sym.as_slice(), llfnty, ty::mk_nil());
+ (glue, None)
+ },
+ None => {
+ let (sym, glue) = declare_generic_glue(ccx, t, llfnty, "drop");
+ (glue, Some(sym))
+ },
+ };
- make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
+ ccx.drop_glues().borrow_mut().insert(t, glue);
+
+ // To avoid infinite recursion, don't `make_drop_glue` until after we've
+ // added the entry to the `drop_glues` cache.
+ match new_sym {
+ Some(sym) => {
+ ccx.available_drop_glues().borrow_mut().insert(t, sym);
+ // We're creating a new drop glue, so also generate a body.
+ make_generic_glue(ccx, t, glue, make_drop_glue, "drop");
+ },
+ None => {},
+ }
glue
}
Some(visit_glue) => visit_glue,
None => {
debug!("+++ lazily_emit_tydesc_glue VISIT {}", ppaux::ty_to_string(ccx.tcx(), ti.ty));
- let glue_fn = declare_generic_glue(ccx, ti.ty, llfnty, "visit");
+
+ let (glue_fn, new_sym) = match ccx.available_visit_glues().borrow().find(&ti.ty) {
+ Some(old_sym) => {
+ let glue_fn = decl_cdecl_fn(ccx, old_sym.as_slice(), llfnty, ty::mk_nil());
+ (glue_fn, None)
+ },
+ None => {
+ let (sym, glue_fn) = declare_generic_glue(ccx, ti.ty, llfnty, "visit");
+ (glue_fn, Some(sym))
+ },
+ };
+
ti.visit_glue.set(Some(glue_fn));
- make_generic_glue(ccx, ti.ty, glue_fn, make_visit_glue, "visit");
+
+ match new_sym {
+ Some(sym) => {
+ ccx.available_visit_glues().borrow_mut().insert(ti.ty, sym);
+ make_generic_glue(ccx, ti.ty, glue_fn, make_visit_glue, "visit");
+ },
+ None => {},
+ }
+
debug!("--- lazily_emit_tydesc_glue VISIT {}", ppaux::ty_to_string(ccx.tcx(), ti.ty));
glue_fn
}
}
// See [Note-arg-mode]
-pub fn call_visit_glue(bcx: &Block, v: ValueRef, tydesc: ValueRef) {
+pub fn call_visit_glue(bcx: Block, v: ValueRef, tydesc: ValueRef) {
let _icx = push_ctxt("call_visit_glue");
// Select the glue function to call from the tydesc
Call(bcx, llfn, [llrawptr], None);
}
-fn make_visit_glue<'a>(bcx: &'a Block<'a>, v: ValueRef, t: ty::t)
- -> &'a Block<'a> {
+fn make_visit_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("make_visit_glue");
let mut bcx = bcx;
let (visitor_trait, object_ty) = match ty::visitor_object_ty(bcx.tcx(),
bcx
}
-fn trans_struct_drop_flag<'a>(mut bcx: &'a Block<'a>,
- t: ty::t,
- v0: ValueRef,
- dtor_did: ast::DefId,
- class_did: ast::DefId,
- substs: &subst::Substs)
- -> &'a Block<'a> {
+fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ v0: ValueRef,
+ dtor_did: ast::DefId,
+ class_did: ast::DefId,
+ substs: &subst::Substs)
+ -> Block<'blk, 'tcx> {
let repr = adt::represent_type(bcx.ccx(), t);
let struct_data = if ty::type_is_sized(bcx.tcx(), t) {
v0
})
}
-fn trans_struct_drop<'a>(bcx: &'a Block<'a>,
- t: ty::t,
- v0: ValueRef,
- dtor_did: ast::DefId,
- class_did: ast::DefId,
- substs: &subst::Substs)
- -> &'a Block<'a> {
+fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ v0: ValueRef,
+ dtor_did: ast::DefId,
+ class_did: ast::DefId,
+ substs: &subst::Substs)
+ -> Block<'blk, 'tcx> {
let repr = adt::represent_type(bcx.ccx(), t);
// Find and call the actual destructor
})
}
-fn size_and_align_of_dst<'a>(bcx: &'a Block<'a>, t :ty::t, info: ValueRef) -> (ValueRef, ValueRef) {
+fn size_and_align_of_dst(bcx: Block, t :ty::t, info: ValueRef) -> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}",
bcx.ty_to_string(t), bcx.val_to_string(info));
if ty::type_is_sized(bcx.tcx(), t) {
}
}
-fn make_drop_glue<'a>(bcx: &'a Block<'a>, v0: ValueRef, t: ty::t) -> &'a Block<'a> {
+fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, t: ty::t)
+ -> Block<'blk, 'tcx> {
// NB: v0 is an *alias* of type t here, not a direct value.
let _icx = push_ctxt("make_drop_glue");
match ty::get(t).sty {
ty::ty_uniq(content_ty) => {
match ty::get(content_ty).sty {
ty::ty_vec(ty, None) => {
- tvec::make_drop_glue_unboxed(bcx, v0, ty)
+ tvec::make_drop_glue_unboxed(bcx, v0, ty, true)
}
ty::ty_str => {
let unit_ty = ty::sequence_element_type(bcx.tcx(), t);
- tvec::make_drop_glue_unboxed(bcx, v0, unit_ty)
+ tvec::make_drop_glue_unboxed(bcx, v0, unit_ty, true)
}
ty::ty_trait(..) => {
let lluniquevalue = GEPi(bcx, v0, [0, abi::trt_field_box]);
let info = GEPi(bcx, v0, [0, abi::slice_elt_len]);
let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info);
- trans_exchange_free_internal(bcx, llbox, llsize, llalign)
+ trans_exchange_free_dyn(bcx, llbox, llsize, llalign)
})
}
_ => {
with_cond(bcx, IsNotNull(bcx, env), |bcx| {
let dtor_ptr = GEPi(bcx, env, [0u, abi::box_field_tydesc]);
let dtor = Load(bcx, dtor_ptr);
- let cdata = GEPi(bcx, env, [0u, abi::box_field_body]);
- Call(bcx, dtor, [PointerCast(bcx, cdata, Type::i8p(bcx.ccx()))], None);
-
- // Free the environment itself
- // FIXME: #13994: pass align and size here
- trans_exchange_free(bcx, env, 0, 8)
+ Call(bcx, dtor, [PointerCast(bcx, box_cell_v, Type::i8p(bcx.ccx()))], None);
+ bcx
})
}
ty::ty_trait(..) => {
None);
bcx
}
- ty::ty_vec(ty, None) => tvec::make_drop_glue_unboxed(bcx, v0, ty),
+ ty::ty_vec(ty, None) => tvec::make_drop_glue_unboxed(bcx, v0, ty, false),
_ => {
assert!(ty::type_is_sized(bcx.tcx(), t));
if ty::type_needs_drop(bcx.tcx(), t) &&
}
}
-fn decr_refcnt_maybe_free<'a>(bcx: &'a Block<'a>,
- box_ptr_ptr: ValueRef,
- t: ty::t) -> &'a Block<'a> {
+fn decr_refcnt_maybe_free<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ptr_ptr: ValueRef,
+ t: ty::t) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("decr_refcnt_maybe_free");
let fcx = bcx.fcx;
let ccx = bcx.ccx();
next_bcx
}
-fn incr_refcnt_of_boxed<'a>(bcx: &'a Block<'a>,
- box_ptr_ptr: ValueRef) -> &'a Block<'a> {
+fn incr_refcnt_of_boxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ box_ptr_ptr: ValueRef) -> Block<'blk, 'tcx> {
let _icx = push_ctxt("incr_refcnt_of_boxed");
let ccx = bcx.ccx();
let box_ptr = Load(bcx, box_ptr_ptr);
pub fn declare_tydesc(ccx: &CrateContext, t: ty::t) -> tydesc_info {
// If emit_tydescs already ran, then we shouldn't be creating any new
// tydescs.
- assert!(!ccx.finished_tydescs.get());
+ assert!(!ccx.finished_tydescs().get());
let llty = type_of(ccx, t);
debug!("+++ declare_tydesc {} {}", ppaux::ty_to_string(ccx.tcx(), t), name);
let gvar = name.as_slice().with_c_str(|buf| {
unsafe {
- llvm::LLVMAddGlobal(ccx.llmod, ccx.tydesc_type().to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), ccx.tydesc_type().to_ref(), buf)
}
});
note_unique_llvm_symbol(ccx, name);
}
fn declare_generic_glue(ccx: &CrateContext, t: ty::t, llfnty: Type,
- name: &str) -> ValueRef {
+ name: &str) -> (String, ValueRef) {
let _icx = push_ctxt("declare_generic_glue");
let fn_nm = mangle_internal_name_by_type_and_seq(
ccx,
t,
format!("glue_{}", name).as_slice());
let llfn = decl_cdecl_fn(ccx, fn_nm.as_slice(), llfnty, ty::mk_nil());
- note_unique_llvm_symbol(ccx, fn_nm);
- return llfn;
+ note_unique_llvm_symbol(ccx, fn_nm.clone());
+ return (fn_nm, llfn);
}
fn make_generic_glue(ccx: &CrateContext,
t: ty::t,
llfn: ValueRef,
- helper: <'a> |&'a Block<'a>, ValueRef, ty::t|
- -> &'a Block<'a>,
+ helper: <'blk, 'tcx> |Block<'blk, 'tcx>, ValueRef, ty::t|
+ -> Block<'blk, 'tcx>,
name: &str)
-> ValueRef {
let _icx = push_ctxt("make_generic_glue");
let bcx = init_function(&fcx, false, ty::mk_nil());
- llvm::SetLinkage(llfn, llvm::InternalLinkage);
- ccx.stats.n_glues_created.set(ccx.stats.n_glues_created.get() + 1u);
+ update_linkage(ccx, llfn, None, OriginalTranslation);
+
+ ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1u);
// All glue functions take values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
pub fn emit_tydescs(ccx: &CrateContext) {
let _icx = push_ctxt("emit_tydescs");
// As of this point, allow no more tydescs to be created.
- ccx.finished_tydescs.set(true);
+ ccx.finished_tydescs().set(true);
let glue_fn_ty = Type::generic_glue_fn(ccx).ptr_to();
- for (_, ti) in ccx.tydescs.borrow().iter() {
+ for (_, ti) in ccx.tydescs().borrow().iter() {
// Each of the glue functions needs to be cast to a generic type
// before being put into the tydesc because we only have a singleton
// tydesc type. Then we'll recast each function to its real type when
let drop_glue = unsafe {
llvm::LLVMConstPointerCast(get_drop_glue(ccx, ti.ty), glue_fn_ty.to_ref())
};
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() + 1);
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() + 1);
let visit_glue =
match ti.visit_glue.get() {
None => {
- ccx.stats.n_null_glues.set(ccx.stats.n_null_glues.get() +
+ ccx.stats().n_null_glues.set(ccx.stats().n_null_glues.get() +
1u);
C_null(glue_fn_ty)
}
Some(v) => {
unsafe {
- ccx.stats.n_real_glues.set(ccx.stats.n_real_glues.get() +
+ ccx.stats().n_real_glues.set(ccx.stats().n_real_glues.get() +
1);
llvm::LLVMConstPointerCast(v, glue_fn_ty.to_ref())
}
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use llvm::{AvailableExternallyLinkage, SetLinkage};
+use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage};
use metadata::csearch;
use middle::astencode;
use middle::trans::base::{push_ctxt, trans_item, get_item_val, trans_fn};
use syntax::ast_util::{local_def, PostExpansionMethod};
use syntax::ast_util;
-pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId)
- -> ast::DefId {
+fn instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId)
+ -> Option<ast::DefId> {
let _icx = push_ctxt("maybe_instantiate_inline");
- match ccx.external.borrow().find(&fn_id) {
+ match ccx.external().borrow().find(&fn_id) {
Some(&Some(node_id)) => {
// Already inline
debug!("maybe_instantiate_inline({}): already inline as node id {}",
ty::item_path_str(ccx.tcx(), fn_id), node_id);
- return local_def(node_id);
+ return Some(local_def(node_id));
}
Some(&None) => {
- return fn_id; // Not inlinable
+ return None; // Not inlinable
}
None => {
// Not seen yet
csearch::maybe_get_item_ast(
ccx.tcx(), fn_id,
|a,b,c,d| astencode::decode_inlined_item(a, b, c, d));
- return match csearch_result {
+
+ let inline_def = match csearch_result {
csearch::not_found => {
- ccx.external.borrow_mut().insert(fn_id, None);
- fn_id
+ ccx.external().borrow_mut().insert(fn_id, None);
+ return None;
}
csearch::found(ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
trans_item(ccx, &*item);
- // We're bringing an external global into this crate, but we don't
- // want to create two copies of the global. If we do this, then if
- // you take the address of the global in two separate crates you get
- // two different addresses. This is bad for things like conditions,
- // but it could possibly have other adverse side effects. We still
- // want to achieve the optimizations related to this global,
- // however, so we use the available_externally linkage which llvm
- // provides
- match item.node {
+ let linkage = match item.node {
+ ast::ItemFn(_, _, _, ref generics, _) => {
+ if generics.is_type_parameterized() {
+ // Generics have no symbol, so they can't be given any
+ // linkage.
+ None
+ } else {
+ if ccx.sess().opts.cg.codegen_units == 1 {
+ // We could use AvailableExternallyLinkage here,
+ // but InternalLinkage allows LLVM to optimize more
+ // aggressively (at the cost of sometimes
+ // duplicating code).
+ Some(InternalLinkage)
+ } else {
+ // With multiple compilation units, duplicated code
+ // is more of a problem. Also, `codegen_units > 1`
+ // means the user is okay with losing some
+ // performance.
+ Some(AvailableExternallyLinkage)
+ }
+ }
+ }
ast::ItemStatic(_, mutbl, _) => {
- let g = get_item_val(ccx, item.id);
- // see the comment in get_item_val() as to why this check is
- // performed here.
- if ast_util::static_has_significant_address(
- mutbl,
- item.attrs.as_slice()) {
- SetLinkage(g, AvailableExternallyLinkage);
+ if !ast_util::static_has_significant_address(mutbl, item.attrs.as_slice()) {
+ // Inlined static items use internal linkage when
+ // possible, so that LLVM will coalesce globals with
+ // identical initializers. (It only does this for
+ // globals with unnamed_addr and either internal or
+ // private linkage.)
+ Some(InternalLinkage)
+ } else {
+ // The address is significant, so we can't create an
+ // internal copy of the static. (The copy would have a
+ // different address from the original.)
+ Some(AvailableExternallyLinkage)
}
}
- _ => {}
+ _ => unreachable!(),
+ };
+
+ match linkage {
+ Some(linkage) => {
+ let g = get_item_val(ccx, item.id);
+ SetLinkage(g, linkage);
+ }
+ None => {}
}
local_def(item.id)
}
csearch::found(ast::IIForeign(item)) => {
- ccx.external.borrow_mut().insert(fn_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
local_def(item.id)
}
csearch::found_parent(parent_id, ast::IIItem(item)) => {
- ccx.external.borrow_mut().insert(parent_id, Some(item.id));
- ccx.external_srcs.borrow_mut().insert(item.id, parent_id);
+ ccx.external().borrow_mut().insert(parent_id, Some(item.id));
+ ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
let mut my_id = 0;
match item.node {
let vs_there = ty::enum_variants(ccx.tcx(), parent_id);
for (here, there) in vs_here.iter().zip(vs_there.iter()) {
if there.id == fn_id { my_id = here.id.node; }
- ccx.external.borrow_mut().insert(there.id, Some(here.id.node));
+ ccx.external().borrow_mut().insert(there.id, Some(here.id.node));
}
}
ast::ItemStruct(ref struct_def, _) => {
match struct_def.ctor_id {
None => {}
Some(ctor_id) => {
- ccx.external.borrow_mut().insert(fn_id, Some(ctor_id));
+ ccx.external().borrow_mut().insert(fn_id, Some(ctor_id));
my_id = ctor_id;
}
}
match impl_item {
ast::ProvidedInlinedTraitItem(mth) |
ast::RequiredInlinedTraitItem(mth) => {
- ccx.external.borrow_mut().insert(fn_id, Some(mth.id));
- ccx.external_srcs.borrow_mut().insert(mth.id, fn_id);
+ ccx.external().borrow_mut().insert(fn_id, Some(mth.id));
+ ccx.external_srcs().borrow_mut().insert(mth.id, fn_id);
- ccx.stats.n_inlines.set(ccx.stats.n_inlines.get() + 1);
+ ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
}
}
¶m_substs::empty(),
mth.id,
[]);
+ // Use InternalLinkage so LLVM can optimize more
+ // aggressively.
+ SetLinkage(llfn, InternalLinkage);
}
local_def(mth.id)
}
}
}
};
+
+ return Some(inline_def);
+}
+
+pub fn get_local_instance(ccx: &CrateContext, fn_id: ast::DefId)
+ -> Option<ast::DefId> {
+ if fn_id.krate == ast::LOCAL_CRATE {
+ Some(fn_id)
+ } else {
+ instantiate_inline(ccx, fn_id)
+ }
+}
+
+pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: ast::DefId) -> ast::DefId {
+ get_local_instance(ccx, fn_id).unwrap_or(fn_id)
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(non_uppercase_pattern_statics)]
+#![allow(non_uppercase_statics)]
use llvm;
use llvm::{SequentiallyConsistent, Acquire, Release, Xchg, ValueRef};
/// Performs late verification that intrinsics are used correctly. At present,
/// the only intrinsic that needs such verification is `transmute`.
pub fn check_intrinsics(ccx: &CrateContext) {
- for transmute_restriction in ccx.tcx
+ for transmute_restriction in ccx.tcx()
.transmute_restrictions
.borrow()
.iter() {
ccx.sess().abort_if_errors();
}
-pub fn trans_intrinsic_call<'a>(mut bcx: &'a Block<'a>, node: ast::NodeId,
- callee_ty: ty::t, cleanup_scope: cleanup::CustomScopeIndex,
- args: callee::CallArgs, dest: expr::Dest,
- substs: subst::Substs, call_info: NodeInfo) -> Result<'a> {
+pub fn trans_intrinsic_call<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, node: ast::NodeId,
+ callee_ty: ty::t, cleanup_scope: cleanup::CustomScopeIndex,
+ args: callee::CallArgs, dest: expr::Dest,
+ substs: subst::Substs, call_info: NodeInfo)
+ -> Result<'blk, 'tcx> {
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let hash = ty::hash_crate_independent(
ccx.tcx(),
*substs.types.get(FnSpace, 0),
- &ccx.link_meta.crate_hash);
+ &ccx.link_meta().crate_hash);
// NB: This needs to be kept in lockstep with the TypeId struct in
// the intrinsic module
C_named_struct(llret_ty, [C_u64(ccx, hash)])
Result::new(bcx, llresult)
}
-fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool,
+fn copy_intrinsic(bcx: Block, allow_overlap: bool, volatile: bool,
tp_ty: ty::t, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let int_size = machine::llbitsize_of_real(ccx, ccx.int_type);
+ let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
let name = if allow_overlap {
if int_size == 32 {
"llvm.memmove.p0i8.p0i8.i32"
C_bool(ccx, volatile)], None)
}
-fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t,
+fn memset_intrinsic(bcx: Block, volatile: bool, tp_ty: ty::t,
dst: ValueRef, val: ValueRef, count: ValueRef) -> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
- let name = if machine::llbitsize_of_real(ccx, ccx.int_type) == 32 {
+ let name = if machine::llbitsize_of_real(ccx, ccx.int_type()) == 32 {
"llvm.memset.p0i8.i32"
} else {
"llvm.memset.p0i8.i64"
C_bool(ccx, volatile)], None)
}
-fn count_zeros_intrinsic(bcx: &Block, name: &'static str, val: ValueRef) -> ValueRef {
+fn count_zeros_intrinsic(bcx: Block, name: &'static str, val: ValueRef) -> ValueRef {
let y = C_bool(bcx.ccx(), false);
let llfn = bcx.ccx().get_intrinsic(&name);
Call(bcx, llfn, [val, y], None)
}
-fn with_overflow_intrinsic(bcx: &Block, name: &'static str, t: ty::t,
+fn with_overflow_intrinsic(bcx: Block, name: &'static str, t: ty::t,
a: ValueRef, b: ValueRef) -> ValueRef {
let llfn = bcx.ccx().get_intrinsic(&name);
impl LlvmRepr for Type {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.type_to_string(*self)
+ ccx.tn().type_to_string(*self)
}
}
impl LlvmRepr for ValueRef {
fn llrepr(&self, ccx: &CrateContext) -> String {
- ccx.tn.val_to_string(*self)
+ ccx.tn().val_to_string(*self)
}
}
// Returns the number of bytes clobbered by a Store to this type.
pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMStoreSizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMStoreSizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABISizeOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABISizeOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// below.
pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- let nbits = llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64;
+ let nbits = llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64;
if nbits & 7 != 0 {
// Not an even number of bytes, spills into "next" byte.
1 + (nbits >> 3)
/// Returns the "real" size of the type in bits.
pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- llvm::LLVMSizeOfTypeInBits(cx.td.lltd, ty.to_ref()) as u64
+ llvm::LLVMSizeOfTypeInBits(cx.td().lltd, ty.to_ref()) as u64
}
}
// space to be consumed.
pub fn nonzero_llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
if llbitsize_of_real(cx, ty) == 0 {
- unsafe { llvm::LLVMConstInt(cx.int_type.to_ref(), 1, False) }
+ unsafe { llvm::LLVMConstInt(cx.int_type().to_ref(), 1, False) }
} else {
llsize_of(cx, ty)
}
// allocations inside a stack frame, which LLVM has a free hand in.
pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMPreferredAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMPreferredAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
// and similar ABI-mandated things.
pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> u64 {
unsafe {
- return llvm::LLVMABIAlignmentOfType(cx.td.lltd, ty.to_ref()) as u64;
+ return llvm::LLVMABIAlignmentOfType(cx.td().lltd, ty.to_ref()) as u64;
}
}
pub fn llalign_of(cx: &CrateContext, ty: Type) -> ValueRef {
unsafe {
return llvm::LLVMConstIntCast(
- llvm::LLVMAlignOf(ty.to_ref()), cx.int_type.to_ref(), False);
+ llvm::LLVMAlignOf(ty.to_ref()), cx.int_type().to_ref(), False);
}
}
pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: uint) -> u64 {
unsafe {
- return llvm::LLVMOffsetOfElement(cx.td.lltd, struct_ty.to_ref(), element as u32) as u64;
+ return llvm::LLVMOffsetOfElement(cx.td().lltd, struct_ty.to_ref(), element as u32) as u64;
}
}
use std::c_str::ToCStr;
use syntax::abi::{Rust, RustCall};
use syntax::parse::token;
-use syntax::{ast, ast_map, visit};
+use syntax::{ast, ast_map, attr, visit};
use syntax::ast_util::PostExpansionMethod;
// drop_glue pointer, size, align.
match *impl_item {
ast::MethodImplItem(method) => {
if method.pe_generics().ty_params.len() == 0u {
- let llfn = get_item_val(ccx, method.id);
- trans_fn(ccx,
- &*method.pe_fn_decl(),
- &*method.pe_body(),
- llfn,
- ¶m_substs::empty(),
- method.id,
- []);
+ let trans_everywhere = attr::requests_inline(method.attrs.as_slice());
+ for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
+ let llfn = get_item_val(ccx, method.id);
+ trans_fn(ccx,
+ &*method.pe_fn_decl(),
+ &*method.pe_body(),
+ llfn,
+ ¶m_substs::empty(),
+ method.id,
+ []);
+ update_linkage(ccx,
+ llfn,
+ Some(method.id),
+ if is_origin { OriginalTranslation } else { InlinedCopy });
+ }
}
let mut v = TransItemVisitor {
ccx: ccx,
}
}
-pub fn trans_method_callee<'a>(
- bcx: &'a Block<'a>,
- method_call: MethodCall,
- self_expr: Option<&ast::Expr>,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Callee<'a> {
+pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ method_call: MethodCall,
+ self_expr: Option<&ast::Expr>,
+ arg_cleanup_scope: cleanup::ScopeId)
+ -> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_method_callee");
let (origin, method_ty) = match bcx.tcx().method_map
}
}
-pub fn trans_static_method_callee(bcx: &Block,
+pub fn trans_static_method_callee(bcx: Block,
method_id: ast::DefId,
trait_id: ast::DefId,
expr_id: ast::NodeId)
let vtable_key = MethodCall::expr(expr_id);
let vtbls = resolve_vtables_in_fn_ctxt(
bcx.fcx,
- ccx.tcx.vtable_map.borrow().get(&vtable_key));
+ ccx.tcx().vtable_map.borrow().get(&vtable_key));
match *vtbls.get_self().unwrap().get(0) {
typeck::vtable_static(impl_did, ref rcvr_substs, ref rcvr_origins) => {
fn method_with_name(ccx: &CrateContext, impl_id: ast::DefId, name: ast::Name)
-> ast::DefId {
- match ccx.impl_method_cache.borrow().find_copy(&(impl_id, name)) {
+ match ccx.impl_method_cache().borrow().find_copy(&(impl_id, name)) {
Some(m) => return m,
None => {}
}
- let impl_items = ccx.tcx.impl_items.borrow();
+ let impl_items = ccx.tcx().impl_items.borrow();
let impl_items =
impl_items.find(&impl_id)
.expect("could not find impl while translating");
.find(|&did| {
match *did {
ty::MethodTraitItemId(did) => {
- ty::impl_or_trait_item(&ccx.tcx,
+ ty::impl_or_trait_item(ccx.tcx(),
did).ident()
.name ==
name
}).expect("could not find method while \
translating");
- ccx.impl_method_cache.borrow_mut().insert((impl_id, name),
+ ccx.impl_method_cache().borrow_mut().insert((impl_id, name),
meth_did.def_id());
meth_did.def_id()
}
-fn trans_monomorphized_callee<'a>(
- bcx: &'a Block<'a>,
- method_call: MethodCall,
- trait_id: ast::DefId,
- n_method: uint,
- vtbl: typeck::vtable_origin)
- -> Callee<'a> {
+fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ method_call: MethodCall,
+ trait_id: ast::DefId,
+ n_method: uint,
+ vtbl: typeck::vtable_origin)
+ -> Callee<'blk, 'tcx> {
let _icx = push_ctxt("meth::trans_monomorphized_callee");
match vtbl {
typeck::vtable_static(impl_did, rcvr_substs, rcvr_origins) => {
}
}
-fn combine_impl_and_methods_tps(bcx: &Block,
+fn combine_impl_and_methods_tps(bcx: Block,
node: ExprOrMethodCall,
rcvr_substs: subst::Substs,
rcvr_origins: typeck::vtable_res)
(ty_substs, vtables)
}
-fn trans_trait_callee<'a>(bcx: &'a Block<'a>,
- method_ty: ty::t,
- n_method: uint,
- self_expr: &ast::Expr,
- arg_cleanup_scope: cleanup::ScopeId)
- -> Callee<'a> {
+fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ method_ty: ty::t,
+ n_method: uint,
+ self_expr: &ast::Expr,
+ arg_cleanup_scope: cleanup::ScopeId)
+ -> Callee<'blk, 'tcx> {
/*!
* Create a method callee where the method is coming from a trait
* object (e.g., Box<Trait> type). In this case, we must pull the fn
trans_trait_callee_from_llval(bcx, method_ty, n_method, llval)
}
-pub fn trans_trait_callee_from_llval<'a>(bcx: &'a Block<'a>,
- callee_ty: ty::t,
- n_method: uint,
- llpair: ValueRef)
- -> Callee<'a> {
+pub fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ callee_ty: ty::t,
+ n_method: uint,
+ llpair: ValueRef)
+ -> Callee<'blk, 'tcx> {
/*!
* Same as `trans_trait_callee()` above, except that it is given
* a by-ref pointer to the object pair.
/// Creates the self type and (fake) callee substitutions for an unboxed
/// closure with the given def ID. The static region and type parameters are
/// lies, but we're in trans so it doesn't matter.
-fn get_callee_substitutions_for_unboxed_closure(bcx: &Block,
+fn get_callee_substitutions_for_unboxed_closure(bcx: Block,
def_id: ast::DefId)
-> subst::Substs {
let self_ty = ty::mk_unboxed_closure(bcx.tcx(), def_id, ty::ReStatic);
/// Creates a returns a dynamic vtable for the given type and vtable origin.
/// This is used only for objects.
-fn get_vtable(bcx: &Block,
+fn get_vtable(bcx: Block,
self_ty: ty::t,
origins: typeck::vtable_param_res)
-> ValueRef
// Check the cache.
let hash_id = (self_ty, monomorphize::make_vtable_id(ccx, origins.get(0)));
- match ccx.vtables.borrow().find(&hash_id) {
+ match ccx.vtables().borrow().find(&hash_id) {
Some(&val) => { return val }
None => { }
}
let drop_glue = glue::get_drop_glue(ccx, self_ty);
let vtable = make_vtable(ccx, drop_glue, ll_size, ll_align, methods);
- ccx.vtables.borrow_mut().insert(hash_id, vtable);
+ ccx.vtables().borrow_mut().insert(hash_id, vtable);
vtable
}
let tbl = C_struct(ccx, components.as_slice(), false);
let sym = token::gensym("vtable");
let vt_gvar = format!("vtable{}", sym.uint()).with_c_str(|buf| {
- llvm::LLVMAddGlobal(ccx.llmod, val_ty(tbl).to_ref(), buf)
+ llvm::LLVMAddGlobal(ccx.llmod(), val_ty(tbl).to_ref(), buf)
});
llvm::LLVMSetInitializer(vt_gvar, tbl);
llvm::LLVMSetGlobalConstant(vt_gvar, llvm::True);
}
}
-fn emit_vtable_methods(bcx: &Block,
+fn emit_vtable_methods(bcx: Block,
impl_id: ast::DefId,
substs: subst::Substs,
vtables: typeck::vtable_res)
}).collect()
}
-pub fn vtable_ptr<'a>(bcx: &'a Block<'a>,
- id: ast::NodeId,
- self_ty: ty::t) -> ValueRef {
+pub fn vtable_ptr(bcx: Block,
+ id: ast::NodeId,
+ self_ty: ty::t) -> ValueRef {
let ccx = bcx.ccx();
let origins = {
- let vtable_map = ccx.tcx.vtable_map.borrow();
+ let vtable_map = ccx.tcx().vtable_map.borrow();
// This trait cast might be because of implicit coercion
- let adjs = ccx.tcx.adjustments.borrow();
+ let adjs = ccx.tcx().adjustments.borrow();
let adjust = adjs.find(&id);
let method_call = if adjust.is_some() && ty::adjust_is_object(adjust.unwrap()) {
MethodCall::autoobject(id)
get_vtable(bcx, self_ty, origins)
}
-pub fn trans_trait_cast<'a>(bcx: &'a Block<'a>,
- datum: Datum<Expr>,
- id: ast::NodeId,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_trait_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ datum: Datum<Expr>,
+ id: ast::NodeId,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
/*!
* Generates the code to convert from a pointer (`Box<T>`, `&T`, etc)
* into an object (`Box<Trait>`, `&Trait`, etc). This means creating a
use back::link::exported_name;
use driver::session;
use llvm::ValueRef;
+use llvm;
use middle::subst;
use middle::subst::Subst;
use middle::trans::base::{set_llvm_fn_attrs, set_inline_hint};
use syntax::ast;
use syntax::ast_map;
use syntax::ast_util::{local_def, PostExpansionMethod};
+use syntax::attr;
use std::hash::{sip, Hash};
pub fn monomorphic_fn(ccx: &CrateContext,
params: real_substs.types.clone()
};
- match ccx.monomorphized.borrow().find(&hash_id) {
+ match ccx.monomorphized().borrow().find(&hash_id) {
Some(&val) => {
debug!("leaving monomorphic fn {}",
ty::item_path_str(ccx.tcx(), fn_id));
let map_node = session::expect(
ccx.sess(),
- ccx.tcx.map.find(fn_id.node),
+ ccx.tcx().map.find(fn_id.node),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
match map_node {
ast_map::NodeForeignItem(_) => {
- if ccx.tcx.map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
+ if ccx.tcx().map.get_foreign_abi(fn_id.node) != abi::RustIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_id.node), true);
}
debug!("monomorphic_fn about to subst into {}", llitem_ty.repr(ccx.tcx()));
let mono_ty = llitem_ty.subst(ccx.tcx(), real_substs);
- ccx.stats.n_monos.set(ccx.stats.n_monos.get() + 1);
+ ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
let depth;
{
- let mut monomorphizing = ccx.monomorphizing.borrow_mut();
+ let mut monomorphizing = ccx.monomorphizing().borrow_mut();
depth = match monomorphizing.find(&fn_id) {
Some(&d) => d, None => 0
};
// recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() {
- ccx.sess().span_fatal(ccx.tcx.map.span(fn_id.node),
+ ccx.sess().span_fatal(ccx.tcx().map.span(fn_id.node),
"reached the recursion limit during monomorphization");
}
mono_ty.hash(&mut state);
hash = format!("h{}", state.result());
- ccx.tcx.map.with_path(fn_id.node, |path| {
+ ccx.tcx().map.with_path(fn_id.node, |path| {
exported_name(path, hash.as_slice())
})
};
decl_internal_rust_fn(ccx, mono_ty, s.as_slice())
};
- ccx.monomorphized.borrow_mut().insert(hash_id.take_unwrap(), lldecl);
+ ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl);
lldecl
};
+ let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
+ base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
+ set_llvm_fn_attrs(attrs, lldecl);
+
+ let is_first = !ccx.available_monomorphizations().borrow().contains(&s);
+ if is_first {
+ ccx.available_monomorphizations().borrow_mut().insert(s.clone());
+ }
+
+ let trans_everywhere = attr::requests_inline(attrs);
+ if trans_everywhere && !is_first {
+ llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
+ }
+
+ // If `true`, then `lldecl` should be given a function body.
+ // Otherwise, it should be left as a declaration of an external
+ // function, with no definition in the current compilation unit.
+ trans_everywhere || is_first
+ };
let lldecl = match map_node {
ast_map::NodeItem(i) => {
..
} => {
let d = mk_lldecl(abi);
- set_llvm_fn_attrs(i.attrs.as_slice(), d);
-
- if abi != abi::Rust {
- foreign::trans_rust_fn_with_foreign_abi(
- ccx, &**decl, &**body, [], d, &psubsts, fn_id.node,
- Some(hash.as_slice()));
- } else {
- trans_fn(ccx, &**decl, &**body, d, &psubsts, fn_id.node, []);
+ let needs_body = setup_lldecl(d, i.attrs.as_slice());
+ if needs_body {
+ if abi != abi::Rust {
+ foreign::trans_rust_fn_with_foreign_abi(
+ ccx, &**decl, &**body, [], d, &psubsts, fn_id.node,
+ Some(hash.as_slice()));
+ } else {
+ trans_fn(ccx, &**decl, &**body, d, &psubsts, fn_id.node, []);
+ }
}
d
}
}
ast_map::NodeVariant(v) => {
- let parent = ccx.tcx.map.get_parent(fn_id.node);
+ let parent = ccx.tcx().map.get_parent(fn_id.node);
let tvs = ty::enum_variants(ccx.tcx(), local_def(parent));
let this_tv = tvs.iter().find(|tv| { tv.id.node == fn_id.node}).unwrap();
let d = mk_lldecl(abi::Rust);
match *ii {
ast::MethodImplItem(mth) => {
let d = mk_lldecl(abi::Rust);
- set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx,
- &*mth.pe_fn_decl(),
- &*mth.pe_body(),
- d,
- &psubsts,
- mth.id,
- []);
+ let needs_body = setup_lldecl(d, mth.attrs.as_slice());
+ if needs_body {
+ trans_fn(ccx,
+ &*mth.pe_fn_decl(),
+ &*mth.pe_body(),
+ d,
+ &psubsts,
+ mth.id,
+ []);
+ }
d
}
}
match *method {
ast::ProvidedMethod(mth) => {
let d = mk_lldecl(abi::Rust);
- set_llvm_fn_attrs(mth.attrs.as_slice(), d);
- trans_fn(ccx, &*mth.pe_fn_decl(), &*mth.pe_body(), d,
- &psubsts, mth.id, []);
+ let needs_body = setup_lldecl(d, mth.attrs.as_slice());
+ if needs_body {
+ trans_fn(ccx, &*mth.pe_fn_decl(), &*mth.pe_body(), d,
+ &psubsts, mth.id, []);
+ }
d
}
_ => {
}
};
- ccx.monomorphizing.borrow_mut().insert(fn_id, depth);
+ ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ty::item_path_str(ccx.tcx(), fn_id));
(lldecl, true)
use syntax::parse::token::{InternedString, special_idents};
use syntax::parse::token;
-pub struct Reflector<'a, 'b> {
+pub struct Reflector<'a, 'blk, 'tcx: 'blk> {
visitor_val: ValueRef,
visitor_items: &'a [ty::ImplOrTraitItem],
- final_bcx: &'b Block<'b>,
+ final_bcx: Block<'blk, 'tcx>,
tydesc_ty: Type,
- bcx: &'b Block<'b>
+ bcx: Block<'blk, 'tcx>
}
-impl<'a, 'b> Reflector<'a, 'b> {
+impl<'a, 'blk, 'tcx> Reflector<'a, 'blk, 'tcx> {
pub fn c_uint(&mut self, u: uint) -> ValueRef {
C_uint(self.bcx.ccx(), u)
}
// Unfortunately we can't do anything here because at runtime we
// pass around the value by pointer (*u8). But unsized pointers are
// fat and so we can't just cast them to *u8 and back. So we have
- // to work with the pointer directly (see ty_rptr/ty_uniq).
+ // to work with the pointer directly (see ty_ptr/ty_rptr/ty_uniq).
fail!("Can't reflect unsized type")
}
// FIXME(15049) Reflection for unsized structs.
self.visit("box", extra.as_slice())
}
ty::ty_ptr(ref mt) => {
- let extra = self.c_mt(mt);
- self.visit("ptr", extra.as_slice())
+ match ty::get(mt.ty).sty {
+ ty::ty_vec(ty, None) => {
+ let extra = self.c_mt(&ty::mt{ty: ty, mutbl: mt.mutbl});
+ self.visit("evec_slice", extra.as_slice())
+ }
+ ty::ty_str => self.visit("estr_slice", &[]),
+ ty::ty_trait(..) => {
+ let extra = [
+ self.c_slice(token::intern_and_get_ident(
+ ty_to_string(tcx, t).as_slice()))
+ ];
+ self.visit("trait", extra);
+ }
+ _ => {
+ let extra = self.c_mt(mt);
+ self.visit("ptr", extra.as_slice())
+ }
+ }
}
ty::ty_uniq(typ) => {
match ty::get(typ).sty {
let sym = mangle_internal_name_by_path_and_seq(
ast_map::Values([].iter()).chain(None), "get_disr");
- let fn_ty = ty::mk_ctor_fn(&ccx.tcx, ast::DUMMY_NODE_ID,
+ let fn_ty = ty::mk_ctor_fn(ccx.tcx(), ast::DUMMY_NODE_ID,
[opaqueptrty], ty::mk_u64());
let llfdecl = decl_internal_rust_fn(ccx,
fn_ty,
}
// Emit a sequence of calls to visit_ty::visit_foo
-pub fn emit_calls_to_trait_visit_ty<'a>(
- bcx: &'a Block<'a>,
- t: ty::t,
- visitor_val: ValueRef,
- visitor_trait_id: DefId)
- -> &'a Block<'a> {
+pub fn emit_calls_to_trait_visit_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ t: ty::t,
+ visitor_val: ValueRef,
+ visitor_trait_id: DefId)
+ -> Block<'blk, 'tcx> {
let fcx = bcx.fcx;
let final = fcx.new_temp_block("final");
let tydesc_ty = ty::get_tydesc_ty(bcx.tcx()).unwrap();
use middle::trans::expr::{Dest, Ignore, SaveIn};
use middle::trans::expr;
use middle::trans::glue;
+use middle::trans::machine;
use middle::trans::machine::{nonzero_llsize_of, llsize_of_alloc};
use middle::trans::type_::Type;
use middle::trans::type_of;
use syntax::ast;
use syntax::parse::token::InternedString;
-fn get_len(bcx: &Block, vptr: ValueRef) -> ValueRef {
+fn get_len(bcx: Block, vptr: ValueRef) -> ValueRef {
let _icx = push_ctxt("tvec::get_lenl");
Load(bcx, expr::get_len(bcx, vptr))
}
-fn get_dataptr(bcx: &Block, vptr: ValueRef) -> ValueRef {
+fn get_dataptr(bcx: Block, vptr: ValueRef) -> ValueRef {
let _icx = push_ctxt("tvec::get_dataptr");
Load(bcx, expr::get_dataptr(bcx, vptr))
}
-pub fn pointer_add_byte(bcx: &Block, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
+pub fn pointer_add_byte(bcx: Block, ptr: ValueRef, bytes: ValueRef) -> ValueRef {
let _icx = push_ctxt("tvec::pointer_add_byte");
let old_ty = val_ty(ptr);
let bptr = PointerCast(bcx, ptr, Type::i8p(bcx.ccx()));
return PointerCast(bcx, InBoundsGEP(bcx, bptr, [bytes]), old_ty);
}
-pub fn make_drop_glue_unboxed<'a>(
- bcx: &'a Block<'a>,
- vptr: ValueRef,
- unit_ty: ty::t)
- -> &'a Block<'a> {
+pub fn make_drop_glue_unboxed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ vptr: ValueRef,
+ unit_ty: ty::t,
+ should_deallocate: bool)
+ -> Block<'blk, 'tcx> {
let not_null = IsNotNull(bcx, vptr);
with_cond(bcx, not_null, |bcx| {
+ let ccx = bcx.ccx();
let tcx = bcx.tcx();
let _icx = push_ctxt("tvec::make_drop_glue_unboxed");
- let len = get_len(bcx, vptr);
let dataptr = get_dataptr(bcx, vptr);
let bcx = if ty::type_needs_drop(tcx, unit_ty) {
+ let len = get_len(bcx, vptr);
iter_vec_raw(bcx, dataptr, unit_ty, len, glue::drop_ty)
} else {
bcx
};
- let not_null = IsNotNull(bcx, dataptr);
- with_cond(bcx, not_null, |bcx| {
- glue::trans_exchange_free(bcx, dataptr, 0, 8)
- })
+ if should_deallocate {
+ let not_null = IsNotNull(bcx, dataptr);
+ with_cond(bcx, not_null, |bcx| {
+ let llty = type_of::type_of(ccx, unit_ty);
+ let llsize = machine::llsize_of(ccx, llty);
+ let llalign = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
+ let size = Mul(bcx, llsize, get_len(bcx, vptr));
+ glue::trans_exchange_free_dyn(bcx, dataptr, size, llalign)
+ })
+ } else {
+ bcx
+ }
})
}
format!("VecTypes {{unit_ty={}, llunit_ty={}, \
llunit_size={}, llunit_alloc_size={}}}",
ty_to_string(ccx.tcx(), self.unit_ty),
- ccx.tn.type_to_string(self.llunit_ty),
- ccx.tn.val_to_string(self.llunit_size),
+ ccx.tn().type_to_string(self.llunit_ty),
+ ccx.tn().val_to_string(self.llunit_size),
self.llunit_alloc_size)
}
}
-pub fn trans_fixed_vstore<'a>(
- bcx: &'a Block<'a>,
- expr: &ast::Expr,
- dest: expr::Dest)
- -> &'a Block<'a> {
+pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ expr: &ast::Expr,
+ dest: expr::Dest)
+ -> Block<'blk, 'tcx> {
//!
//
// [...] allocates a fixed-size array and moves it around "by value".
};
}
-pub fn trans_slice_vec<'a>(bcx: &'a Block<'a>,
- slice_expr: &ast::Expr,
- content_expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ slice_expr: &ast::Expr,
+ content_expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* &[...] allocates memory on the stack and writes the values into it,
* returning the vector (the caller must make the reference). "..." is
immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock()
}
-pub fn trans_lit_str<'a>(
- bcx: &'a Block<'a>,
- lit_expr: &ast::Expr,
- str_lit: InternedString,
- dest: Dest)
- -> &'a Block<'a> {
+pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ lit_expr: &ast::Expr,
+ str_lit: InternedString,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
/*!
* Literal strings translate to slices into static memory. This is
* different from trans_slice_vstore() above because it doesn't need to copy
}
}
-pub fn trans_uniq_vec<'a>(bcx: &'a Block<'a>,
- uniq_expr: &ast::Expr,
- content_expr: &ast::Expr)
- -> DatumBlock<'a, Expr> {
+pub fn trans_uniq_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ uniq_expr: &ast::Expr,
+ content_expr: &ast::Expr)
+ -> DatumBlock<'blk, 'tcx, Expr> {
/*!
* Box<[...]> and "...".to_string() allocate boxes in the exchange heap and write
* the array elements into them.
debug!(" vt={}, count={:?}", vt.to_string(ccx), count);
let vec_ty = node_id_type(bcx, uniq_expr.id);
- let unit_sz = nonzero_llsize_of(ccx, type_of::type_of(ccx, vt.unit_ty));
+ let llty = type_of::type_of(ccx, vt.unit_ty);
+ let unit_sz = nonzero_llsize_of(ccx, llty);
let llcount = if count < 4u {
C_int(ccx, 4)
} else {
C_uint(ccx, count)
};
let alloc = Mul(bcx, llcount, unit_sz);
- let llty_ptr = type_of::type_of(ccx, vt.unit_ty).ptr_to();
- let align = C_uint(ccx, 8);
+ let llty_ptr = llty.ptr_to();
+ let align = C_uint(ccx, machine::llalign_of_min(ccx, llty) as uint);
let Result { bcx: bcx, val: dataptr } = malloc_raw_dyn(bcx,
llty_ptr,
vec_ty,
// Create a temporary scope lest execution should fail while
// constructing the vector.
let temp_scope = fcx.push_custom_cleanup_scope();
- // FIXME: #13994: the old `Box<[T]> will not support sized deallocation,
- // this is a placeholder
- fcx.schedule_free_value(cleanup::CustomScope(temp_scope),
- dataptr, cleanup::HeapExchange, vt.unit_ty);
- debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
- bcx.val_to_string(dataptr), count);
+ fcx.schedule_free_slice(cleanup::CustomScope(temp_scope),
+ dataptr, alloc, align, cleanup::HeapExchange);
- let bcx = write_content(bcx, &vt, uniq_expr,
- content_expr, SaveIn(dataptr));
+ debug!(" alloc_uniq_vec() returned dataptr={}, len={}",
+ bcx.val_to_string(dataptr), count);
+
+ let bcx = write_content(bcx, &vt, uniq_expr,
+ content_expr, SaveIn(dataptr));
fcx.pop_custom_cleanup_scope(temp_scope);
}
}
-pub fn write_content<'a>(
- bcx: &'a Block<'a>,
- vt: &VecTypes,
- vstore_expr: &ast::Expr,
- content_expr: &ast::Expr,
- dest: Dest)
- -> &'a Block<'a> {
+pub fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ vt: &VecTypes,
+ vstore_expr: &ast::Expr,
+ content_expr: &ast::Expr,
+ dest: Dest)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::write_content");
let fcx = bcx.fcx;
let mut bcx = bcx;
}
}
-pub fn vec_types_from_expr(bcx: &Block, vec_expr: &ast::Expr) -> VecTypes {
+pub fn vec_types_from_expr(bcx: Block, vec_expr: &ast::Expr) -> VecTypes {
let vec_ty = node_id_type(bcx, vec_expr.id);
vec_types(bcx, ty::sequence_element_type(bcx.tcx(), vec_ty))
}
-pub fn vec_types(bcx: &Block, unit_ty: ty::t) -> VecTypes {
+pub fn vec_types(bcx: Block, unit_ty: ty::t) -> VecTypes {
let ccx = bcx.ccx();
let llunit_ty = type_of::type_of(ccx, unit_ty);
let llunit_size = nonzero_llsize_of(ccx, llunit_ty);
}
}
-pub fn elements_required(bcx: &Block, content_expr: &ast::Expr) -> uint {
+pub fn elements_required(bcx: Block, content_expr: &ast::Expr) -> uint {
//! Figure out the number of elements we need to store this content
match content_expr.node {
}
}
-pub fn get_fixed_base_and_len(bcx: &Block,
+pub fn get_fixed_base_and_len(bcx: Block,
llval: ValueRef,
vec_length: uint)
-> (ValueRef, ValueRef) {
(base, len)
}
-fn get_slice_base_and_len(bcx: &Block,
+fn get_slice_base_and_len(bcx: Block,
llval: ValueRef)
-> (ValueRef, ValueRef) {
let base = Load(bcx, GEPi(bcx, llval, [0u, abi::slice_elt_base]));
(base, len)
}
-pub fn get_base_and_len(bcx: &Block,
+pub fn get_base_and_len(bcx: Block,
llval: ValueRef,
vec_ty: ty::t)
-> (ValueRef, ValueRef) {
}
}
-pub type iter_vec_block<'r,'b> =
- |&'b Block<'b>, ValueRef, ty::t|: 'r -> &'b Block<'b>;
-
-pub fn iter_vec_loop<'r,
- 'b>(
- bcx: &'b Block<'b>,
- data_ptr: ValueRef,
- vt: &VecTypes,
- count: ValueRef,
- f: iter_vec_block<'r,'b>)
- -> &'b Block<'b> {
+pub type iter_vec_block<'a, 'blk, 'tcx> =
+ |Block<'blk, 'tcx>, ValueRef, ty::t|: 'a -> Block<'blk, 'tcx>;
+
+pub fn iter_vec_loop<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ data_ptr: ValueRef,
+ vt: &VecTypes,
+ count: ValueRef,
+ f: iter_vec_block<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::iter_vec_loop");
let fcx = bcx.fcx;
let loop_counter = {
// i = 0
- let i = alloca(loop_bcx, bcx.ccx().int_type, "__i");
+ let i = alloca(loop_bcx, bcx.ccx().int_type(), "__i");
Store(loop_bcx, C_uint(bcx.ccx(), 0), i);
Br(loop_bcx, cond_bcx.llbb);
next_bcx
}
-pub fn iter_vec_raw<'r,
- 'b>(
- bcx: &'b Block<'b>,
- data_ptr: ValueRef,
- unit_ty: ty::t,
- len: ValueRef,
- f: iter_vec_block<'r,'b>)
- -> &'b Block<'b> {
+pub fn iter_vec_raw<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
+ data_ptr: ValueRef,
+ unit_ty: ty::t,
+ len: ValueRef,
+ f: iter_vec_block<'a, 'blk, 'tcx>)
+ -> Block<'blk, 'tcx> {
let _icx = push_ctxt("tvec::iter_vec_raw");
let fcx = bcx.fcx;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(non_uppercase_pattern_statics)]
+#![allow(non_uppercase_statics)]
use llvm;
use llvm::{TypeRef, Bool, False, True, TypeKind, ValueRef};
}
pub fn void(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMVoidTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
}
pub fn nil(ccx: &CrateContext) -> Type {
}
pub fn metadata(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx()))
}
pub fn i1(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt1TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt1TypeInContext(ccx.llcx()))
}
pub fn i8(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt8TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
}
pub fn i16(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt16TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
}
pub fn i32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt32TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt32TypeInContext(ccx.llcx()))
}
pub fn i64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMInt64TypeInContext(ccx.llcx))
+ ty!(llvm::LLVMInt64TypeInContext(ccx.llcx()))
}
pub fn f32(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMFloatTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMFloatTypeInContext(ccx.llcx()))
}
pub fn f64(ccx: &CrateContext) -> Type {
- ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx))
+ ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx()))
}
pub fn bool(ccx: &CrateContext) -> Type {
}
pub fn int(ccx: &CrateContext) -> Type {
- match ccx.tcx.sess.targ_cfg.arch {
+ match ccx.tcx().sess.targ_cfg.arch {
X86 | Arm | Mips | Mipsel => Type::i32(ccx),
X86_64 => Type::i64(ccx)
}
pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type {
match t {
- ast::TyI => ccx.int_type,
+ ast::TyI => ccx.int_type(),
ast::TyI8 => Type::i8(ccx),
ast::TyI16 => Type::i16(ccx),
ast::TyI32 => Type::i32(ccx),
pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type {
match t {
- ast::TyU => ccx.int_type,
+ ast::TyU => ccx.int_type(),
ast::TyU8 => Type::i8(ccx),
ast::TyU16 => Type::i16(ccx),
ast::TyU32 => Type::i32(ccx),
pub fn struct_(ccx: &CrateContext, els: &[Type], packed: bool) -> Type {
let els : &[TypeRef] = unsafe { mem::transmute(els) };
- ty!(llvm::LLVMStructTypeInContext(ccx.llcx, els.as_ptr(),
+ ty!(llvm::LLVMStructTypeInContext(ccx.llcx(), els.as_ptr(),
els.len() as c_uint,
packed as Bool))
}
pub fn named_struct(ccx: &CrateContext, name: &str) -> Type {
- ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx, s)))
+ ty!(name.with_c_str(|s| llvm::LLVMStructCreateNamed(ccx.llcx(), s)))
}
pub fn empty_struct(ccx: &CrateContext) -> Type {
}
pub fn generic_glue_fn(cx: &CrateContext) -> Type {
- match cx.tn.find_type("glue_fn") {
+ match cx.tn().find_type("glue_fn") {
Some(ty) => return ty,
None => ()
}
let ty = Type::glue_fn(cx, Type::i8p(cx));
- cx.tn.associate_type("glue_fn", &ty);
+ cx.tn().associate_type("glue_fn", &ty);
ty
}
// The box pointed to by @T.
pub fn at_box(ccx: &CrateContext, ty: Type) -> Type {
Type::struct_(ccx, [
- ccx.int_type, Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
+ ccx.int_type(), Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to(),
Type::i8p(ccx), Type::i8p(ccx), ty
], false)
}
// recursive types. For example, enum types rely on this behavior.
pub fn sizing_type_of(cx: &CrateContext, t: ty::t) -> Type {
- match cx.llsizingtypes.borrow().find_copy(&t) {
+ match cx.llsizingtypes().borrow().find_copy(&t) {
Some(t) => return t,
None => ()
}
ty::ty_uint(t) => Type::uint_from_ty(cx, t),
ty::ty_float(t) => Type::float_from_ty(cx, t),
- ty::ty_box(..) |
- ty::ty_ptr(..) => Type::i8p(cx),
- ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) => {
+ ty::ty_box(..) => Type::i8p(cx),
+ ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) | ty::ty_ptr(ty::mt{ty, ..}) => {
if ty::type_is_sized(cx.tcx(), ty) {
Type::i8p(cx)
} else {
ty::ty_vec(_, None) | ty::ty_trait(..) | ty::ty_str => fail!("unreachable")
};
- cx.llsizingtypes.borrow_mut().insert(t, llsizingty);
+ cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
llsizingty
}
}
// Check the cache.
- match cx.lltypes.borrow().find(&t) {
+ match cx.lltypes().borrow().find(&t) {
Some(&llty) => return llty,
None => ()
}
t,
t_norm.repr(cx.tcx()),
t_norm,
- cx.tn.type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.tn().type_to_string(llty));
+ cx.lltypes().borrow_mut().insert(t, llty);
return llty;
}
ty::ty_box(typ) => {
Type::at_box(cx, type_of(cx, typ)).ptr_to()
}
- ty::ty_ptr(ref mt) => type_of(cx, mt.ty).ptr_to(),
- ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) => {
+ ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) | ty::ty_ptr(ty::mt{ty, ..}) => {
match ty::get(ty).sty {
ty::ty_str => {
// This means we get a nicer name in the output (str is always
// unsized).
- cx.tn.find_type("str_slice").unwrap()
+ cx.tn().find_type("str_slice").unwrap()
}
ty::ty_trait(..) => Type::opaque_trait(cx),
_ if !ty::type_is_sized(cx.tcx(), ty) => {
debug!("--> mapped t={} {:?} to llty={}",
t.repr(cx.tcx()),
t,
- cx.tn.type_to_string(llty));
+ cx.tn().type_to_string(llty));
- cx.lltypes.borrow_mut().insert(t, llty);
+ cx.lltypes().borrow_mut().insert(t, llty);
// If this was an enum or struct, fill in the type now.
match ty::get(t).sty {
/// This only performs a search for a trivially dominating store. The store
/// must be the only user of this value, and there must not be any conditional
/// branches between the store and the given block.
- pub fn get_dominating_store(self, bcx: &Block) -> Option<Value> {
+ pub fn get_dominating_store(self, bcx: Block) -> Option<Value> {
match self.get_single_user().and_then(|user| user.as_store_inst()) {
Some(store) => {
store.get_parent().and_then(|store_bb| {
use middle::subst;
use middle::ty;
use middle::typeck;
-use middle::typeck::MethodCall;
use middle::ty_fold;
use middle::ty_fold::{TypeFoldable,TypeFolder};
use middle;
use std::ops;
use std::rc::Rc;
use std::collections::{HashMap, HashSet};
+use arena::TypedArena;
use syntax::abi;
use syntax::ast::{CrateNum, DefId, FnStyle, Ident, ItemTrait, LOCAL_CRATE};
use syntax::ast::{MutImmutable, MutMutable, Name, NamedField, NodeId};
/// Convert from T to *T
/// Value to thin pointer
- AutoUnsafe(ast::Mutability),
+ /// The second field allows us to wrap other AutoRef adjustments.
+ AutoUnsafe(ast::Mutability, Option<Box<AutoRef>>),
}
// Ugly little helper function. The first bool in the returned tuple is true if
(b, u, Some(adj_r))
}
}
+ &AutoUnsafe(_, Some(box ref autoref)) => autoref_object_region(autoref),
_ => (false, false, None)
}
}
None => None
}
}
+ &AutoUnsafe(m, Some(box ref autoref)) => {
+ match type_of_autoref(cx, autoref) {
+ Some(t) => Some(mk_ptr(cx, mt {mutbl: m, ty: t})),
+ None => None
+ }
+ }
_ => None
}
}
/// The data structure to keep track of all the information that typechecker
/// generates so that so that it can be reused and doesn't have to be redone
/// later on.
-pub struct ctxt {
+pub struct ctxt<'tcx> {
+ /// The arena that types are allocated from.
+ type_arena: &'tcx TypedArena<t_box_>,
+
/// Specifically use a speedy hash algorithm for this hash map, it's used
/// quite often.
- pub interner: RefCell<FnvHashMap<intern_key, Box<t_box_>>>,
+ interner: RefCell<FnvHashMap<intern_key, &'tcx t_box_>>,
pub next_id: Cell<uint>,
pub sess: Session,
pub def_map: resolve::DefMap,
/// Substs here, possibly against intuition, *may* contain `ty_param`s.
/// That is, even after substitution it is possible that there are type
/// variables. This happens when the `ty_enum` corresponds to an enum
- /// definition and not a concerete use of it. To get the correct `ty_enum`
+ /// definition and not a concrete use of it. To get the correct `ty_enum`
/// from the tcx, use the `NodeId` from the `ast::Ty` and look it up in
/// the `ast_ty_to_ty_cache`. This is probably true for `ty_struct` as
/// well.`
}
}
-pub fn mk_ctxt(s: Session,
- dm: resolve::DefMap,
- named_region_map: resolve_lifetime::NamedRegionMap,
- map: ast_map::Map,
- freevars: freevars::freevar_map,
- capture_modes: freevars::CaptureModeMap,
- region_maps: middle::region::RegionMaps,
- lang_items: middle::lang_items::LanguageItems,
- stability: stability::Index)
- -> ctxt {
+pub fn mk_ctxt<'tcx>(s: Session,
+ type_arena: &'tcx TypedArena<t_box_>,
+ dm: resolve::DefMap,
+ named_region_map: resolve_lifetime::NamedRegionMap,
+ map: ast_map::Map,
+ freevars: freevars::freevar_map,
+ capture_modes: freevars::CaptureModeMap,
+ region_maps: middle::region::RegionMaps,
+ lang_items: middle::lang_items::LanguageItems,
+ stability: stability::Index) -> ctxt<'tcx> {
ctxt {
+ type_arena: type_arena,
+ interner: RefCell::new(FnvHashMap::new()),
named_region_map: named_region_map,
item_variance_map: RefCell::new(DefIdMap::new()),
variance_computed: Cell::new(false),
- interner: RefCell::new(FnvHashMap::new()),
next_id: Cell::new(primitives::LAST_PRIMITIVE_ID),
sess: s,
def_map: dm,
}
}
- let t = box t_box_ {
+ let t = cx.type_arena.alloc(t_box_ {
sty: st,
id: cx.next_id.get(),
flags: flags,
- };
+ });
let sty_ptr = &t.sty as *const sty;
fn type_is_slice(ty: t) -> bool {
match get(ty).sty {
- ty_rptr(_, mt) => match get(mt.ty).sty {
+ ty_ptr(mt) | ty_rptr(_, mt) => match get(mt.ty).sty {
ty_vec(_, None) | ty_str => true,
_ => false,
},
pub fn type_is_fat_ptr(cx: &ctxt, ty: t) -> bool {
match get(ty).sty {
- ty_rptr(_, mt{ty, ..}) | ty_uniq(ty) if !type_is_sized(cx, ty) => true,
+ ty_ptr(mt{ty, ..}) | ty_rptr(_, mt{ty, ..})
+ | ty_uniq(ty) if !type_is_sized(cx, ty) => true,
_ => false,
}
}
macro_rules! def_type_content_sets(
(mod $mname:ident { $($name:ident = $bits:expr),+ }) => {
+ #[allow(non_snake_case)]
mod $mname {
use middle::ty::TypeContents;
$(pub static $name: TypeContents = TypeContents { bits: $bits };)+
}
ty_trait(box ty::TyTrait { bounds, .. }) => {
- object_contents(cx, bounds) | TC::ReachesFfiUnsafe
+ object_contents(cx, bounds) | TC::ReachesFfiUnsafe | TC::Nonsized
}
ty_ptr(ref mt) => {
tc | TC::Managed
} else if Some(did) == cx.lang_items.no_copy_bound() {
tc | TC::OwnsAffine
- } else if Some(did) == cx.lang_items.no_share_bound() {
+ } else if Some(did) == cx.lang_items.no_sync_bound() {
tc | TC::ReachesNoSync
} else if Some(did) == cx.lang_items.unsafe_type() {
// FIXME(#13231): This shouldn't be needed after
pub fn type_is_trait(ty: t) -> bool {
match get(ty).sty {
- ty_uniq(ty) | ty_rptr(_, mt { ty, ..}) => match get(ty).sty {
+ ty_uniq(ty) | ty_rptr(_, mt { ty, ..}) | ty_ptr(mt { ty, ..}) => match get(ty).sty {
ty_trait(..) => true,
_ => false
},
-> ty::t {
/*! See `expr_ty_adjusted` */
+ match get(unadjusted_ty).sty {
+ ty_err => return unadjusted_ty,
+ _ => {}
+ }
+
return match adjustment {
Some(adjustment) => {
match *adjustment {
})
}
- AutoUnsafe(m) => {
- mk_ptr(cx, mt {ty: ty, mutbl: m})
+ AutoUnsafe(m, ref a) => {
+ let adjusted_ty = match a {
+ &Some(box ref a) => adjust_for_autoref(cx, span, ty, a),
+ &None => ty
+ };
+ mk_ptr(cx, mt {ty: adjusted_ty, mutbl: m})
}
AutoUnsize(ref k) => unsize_ty(cx, ty, k, span),
ty::AutoPtr(r, m, Some(ref a)) => ty::AutoPtr(f(r), m, Some(box a.map_region(f))),
ty::AutoUnsize(ref k) => ty::AutoUnsize(k.clone()),
ty::AutoUnsizeUniq(ref k) => ty::AutoUnsizeUniq(k.clone()),
- ty::AutoUnsafe(m) => ty::AutoUnsafe(m),
+ ty::AutoUnsafe(m, None) => ty::AutoUnsafe(m, None),
+ ty::AutoUnsafe(m, Some(ref a)) => ty::AutoUnsafe(m, Some(box a.map_region(f))),
}
}
}
-pub fn method_call_type_param_defs<T>(typer: &T,
- origin: typeck::MethodOrigin)
- -> VecPerParamSpace<TypeParameterDef>
- where T: mc::Typer {
+pub fn method_call_type_param_defs<'tcx, T>(typer: &T,
+ origin: typeck::MethodOrigin)
+ -> VecPerParamSpace<TypeParameterDef>
+ where T: mc::Typer<'tcx> {
match origin {
typeck::MethodStatic(did) => {
ty::lookup_item_type(typer.tcx(), did).generics.types.clone()
let u = TypeNormalizer(cx).fold_ty(t);
return u;
- struct TypeNormalizer<'a>(&'a ctxt);
+ struct TypeNormalizer<'a, 'tcx: 'a>(&'a ctxt<'tcx>);
- impl<'a> TypeFolder for TypeNormalizer<'a> {
- fn tcx<'a>(&'a self) -> &'a ctxt { let TypeNormalizer(c) = *self; c }
+ impl<'a, 'tcx> TypeFolder<'tcx> for TypeNormalizer<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ctxt<'tcx> { let TypeNormalizer(c) = *self; c }
fn fold_ty(&mut self, t: ty::t) -> ty::t {
match self.tcx().normalized_cache.borrow().find_copy(&t) {
}
}
-pub trait ExprTyProvider {
- fn expr_ty(&self, ex: &ast::Expr) -> t;
- fn ty_ctxt<'a>(&'a self) -> &'a ctxt;
-}
-
-impl ExprTyProvider for ctxt {
- fn expr_ty(&self, ex: &ast::Expr) -> t {
- expr_ty(self, ex)
- }
-
- fn ty_ctxt<'a>(&'a self) -> &'a ctxt {
- self
- }
-}
-
// Returns the repeat count for a repeating vector expression.
-pub fn eval_repeat_count<T: ExprTyProvider>(tcx: &T, count_expr: &ast::Expr) -> uint {
+pub fn eval_repeat_count(tcx: &ctxt, count_expr: &ast::Expr) -> uint {
match const_eval::eval_const_expr_partial(tcx, count_expr) {
Ok(ref const_val) => match *const_val {
const_eval::const_int(count) => if count < 0 {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found negative integer");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found negative integer");
+ 0
} else {
- return count as uint
+ count as uint
},
- const_eval::const_uint(count) => return count as uint,
+ const_eval::const_uint(count) => count as uint,
const_eval::const_float(count) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found float");
- return count as uint;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found float");
+ count as uint
}
const_eval::const_str(_) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found string");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found string");
+ 0
}
const_eval::const_bool(_) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found boolean");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found boolean");
+ 0
}
const_eval::const_binary(_) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found binary array");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found binary array");
+ 0
}
const_eval::const_nil => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected positive integer for \
- repeat count, found ()");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected positive integer for \
+ repeat count, found ()");
+ 0
}
},
Err(..) => {
- tcx.ty_ctxt().sess.span_err(count_expr.span,
- "expected constant integer for repeat count, \
- found variable");
- return 0;
+ tcx.sess.span_err(count_expr.span,
+ "expected constant integer for repeat count, \
+ found variable");
+ 0
}
}
}
}
}
-impl mc::Typer for ty::ctxt {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'tcx> mc::Typer<'tcx> for ty::ctxt<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self
}
/// The TypeFoldable trait is implemented for every type that can be folded.
/// Basically, every type that has a corresponding method in TypeFolder.
pub trait TypeFoldable {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Self;
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self;
}
/// The TypeFolder trait defines the actual *folding*. There is a
/// default implementation that does an "identity" fold. Within each
/// identity fold, it should invoke `foo.fold_with(self)` to fold each
/// sub-item.
-pub trait TypeFolder {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt;
+pub trait TypeFolder<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn fold_ty(&mut self, t: ty::t) -> ty::t {
super_fold_ty(self, t)
// needed.
impl<T:TypeFoldable> TypeFoldable for Option<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Option<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Option<T> {
self.as_ref().map(|t| t.fold_with(folder))
}
}
impl<T:TypeFoldable> TypeFoldable for Rc<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Rc<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Rc<T> {
Rc::new((**self).fold_with(folder))
}
}
impl<T:TypeFoldable> TypeFoldable for Vec<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Vec<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Vec<T> {
self.iter().map(|t| t.fold_with(folder)).collect()
}
}
impl<T:TypeFoldable> TypeFoldable for OwnedSlice<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> OwnedSlice<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> OwnedSlice<T> {
self.iter().map(|t| t.fold_with(folder)).collect()
}
}
impl<T:TypeFoldable> TypeFoldable for VecPerParamSpace<T> {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> VecPerParamSpace<T> {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> VecPerParamSpace<T> {
self.map(|t| t.fold_with(folder))
}
}
impl TypeFoldable for ty::TraitStore {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TraitStore {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::TraitStore {
folder.fold_trait_store(*self)
}
}
impl TypeFoldable for ty::t {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::t {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::t {
folder.fold_ty(*self)
}
}
impl TypeFoldable for ty::BareFnTy {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::BareFnTy {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::BareFnTy {
folder.fold_bare_fn_ty(self)
}
}
impl TypeFoldable for ty::ClosureTy {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ClosureTy {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ClosureTy {
folder.fold_closure_ty(self)
}
}
impl TypeFoldable for ty::mt {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::mt {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::mt {
folder.fold_mt(self)
}
}
impl TypeFoldable for ty::FnSig {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::FnSig {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::FnSig {
folder.fold_sig(self)
}
}
impl TypeFoldable for ty::sty {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::sty {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::sty {
folder.fold_sty(self)
}
}
impl TypeFoldable for ty::TraitRef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TraitRef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::TraitRef {
folder.fold_trait_ref(self)
}
}
impl TypeFoldable for ty::Region {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::Region {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::Region {
folder.fold_region(*self)
}
}
impl TypeFoldable for subst::Substs {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> subst::Substs {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> subst::Substs {
folder.fold_substs(self)
}
}
impl TypeFoldable for ty::ItemSubsts {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ItemSubsts {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ItemSubsts {
ty::ItemSubsts {
substs: self.substs.fold_with(folder),
}
}
impl TypeFoldable for ty::AutoRef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::AutoRef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::AutoRef {
folder.fold_autoref(self)
}
}
impl TypeFoldable for typeck::vtable_origin {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> typeck::vtable_origin {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> typeck::vtable_origin {
match *self {
typeck::vtable_static(def_id, ref substs, ref origins) => {
let r_substs = substs.fold_with(folder);
}
impl TypeFoldable for ty::BuiltinBounds {
- fn fold_with<F:TypeFolder>(&self, _folder: &mut F) -> ty::BuiltinBounds {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, _folder: &mut F) -> ty::BuiltinBounds {
*self
}
}
impl TypeFoldable for ty::ExistentialBounds {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ExistentialBounds {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ExistentialBounds {
folder.fold_existential_bounds(*self)
}
}
impl TypeFoldable for ty::ParamBounds {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ParamBounds {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::ParamBounds {
ty::ParamBounds {
opt_region_bound: self.opt_region_bound.fold_with(folder),
builtin_bounds: self.builtin_bounds.fold_with(folder),
}
impl TypeFoldable for ty::TypeParameterDef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TypeParameterDef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::TypeParameterDef {
ty::TypeParameterDef {
ident: self.ident,
def_id: self.def_id,
}
impl TypeFoldable for ty::RegionParameterDef {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::RegionParameterDef {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::RegionParameterDef {
ty::RegionParameterDef {
name: self.name,
def_id: self.def_id,
}
impl TypeFoldable for ty::Generics {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::Generics {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::Generics {
ty::Generics {
types: self.types.fold_with(folder),
regions: self.regions.fold_with(folder),
}
impl TypeFoldable for ty::UnsizeKind {
- fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::UnsizeKind {
+ fn fold_with<'tcx, F: TypeFolder<'tcx>>(&self, folder: &mut F) -> ty::UnsizeKind {
match *self {
ty::UnsizeLength(len) => ty::UnsizeLength(len),
ty::UnsizeStruct(box ref k, n) => ty::UnsizeStruct(box k.fold_with(folder), n),
// "super" routines: these are the default implementations for TypeFolder.
//
// They should invoke `foo.fold_with()` to do recursive folding.
-
-pub fn super_fold_ty<T:TypeFolder>(this: &mut T,
- t: ty::t)
- -> ty::t {
+pub fn super_fold_ty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ t: ty::t)
+ -> ty::t {
let sty = ty::get(t).sty.fold_with(this);
ty::mk_t(this.tcx(), sty)
}
-pub fn super_fold_substs<T:TypeFolder>(this: &mut T,
- substs: &subst::Substs)
- -> subst::Substs {
+pub fn super_fold_substs<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ substs: &subst::Substs)
+ -> subst::Substs {
let regions = match substs.regions {
subst::ErasedRegions => {
subst::ErasedRegions
types: substs.types.fold_with(this) }
}
-pub fn super_fold_sig<T:TypeFolder>(this: &mut T,
- sig: &ty::FnSig)
- -> ty::FnSig {
+pub fn super_fold_sig<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ sig: &ty::FnSig)
+ -> ty::FnSig {
ty::FnSig { binder_id: sig.binder_id,
inputs: sig.inputs.fold_with(this),
output: sig.output.fold_with(this),
variadic: sig.variadic }
}
-pub fn super_fold_bare_fn_ty<T:TypeFolder>(this: &mut T,
- fty: &ty::BareFnTy)
- -> ty::BareFnTy
+pub fn super_fold_bare_fn_ty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ fty: &ty::BareFnTy)
+ -> ty::BareFnTy
{
ty::BareFnTy { sig: fty.sig.fold_with(this),
abi: fty.abi,
fn_style: fty.fn_style }
}
-pub fn super_fold_closure_ty<T:TypeFolder>(this: &mut T,
- fty: &ty::ClosureTy)
- -> ty::ClosureTy
+pub fn super_fold_closure_ty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ fty: &ty::ClosureTy)
+ -> ty::ClosureTy
{
ty::ClosureTy {
store: fty.store.fold_with(this),
abi: fty.abi,
}
}
-
-pub fn super_fold_trait_ref<T:TypeFolder>(this: &mut T,
- t: &ty::TraitRef)
- -> ty::TraitRef {
+pub fn super_fold_trait_ref<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ t: &ty::TraitRef)
+ -> ty::TraitRef {
ty::TraitRef {
def_id: t.def_id,
substs: t.substs.fold_with(this),
}
}
-pub fn super_fold_mt<T:TypeFolder>(this: &mut T,
- mt: &ty::mt) -> ty::mt {
+pub fn super_fold_mt<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ mt: &ty::mt) -> ty::mt {
ty::mt {ty: mt.ty.fold_with(this),
mutbl: mt.mutbl}
}
-pub fn super_fold_sty<T:TypeFolder>(this: &mut T,
- sty: &ty::sty) -> ty::sty {
+pub fn super_fold_sty<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ sty: &ty::sty) -> ty::sty {
match *sty {
ty::ty_box(typ) => {
ty::ty_box(typ.fold_with(this))
}
}
-pub fn super_fold_trait_store<T:TypeFolder>(this: &mut T,
- trait_store: ty::TraitStore)
- -> ty::TraitStore {
+pub fn super_fold_trait_store<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ trait_store: ty::TraitStore)
+ -> ty::TraitStore {
match trait_store {
ty::UniqTraitStore => ty::UniqTraitStore,
ty::RegionTraitStore(r, m) => {
}
}
-pub fn super_fold_existential_bounds<T:TypeFolder>(this: &mut T,
- bounds: ty::ExistentialBounds)
- -> ty::ExistentialBounds {
+pub fn super_fold_existential_bounds<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ bounds: ty::ExistentialBounds)
+ -> ty::ExistentialBounds {
ty::ExistentialBounds {
region_bound: bounds.region_bound.fold_with(this),
builtin_bounds: bounds.builtin_bounds,
}
}
-pub fn super_fold_autoref<T:TypeFolder>(this: &mut T,
- autoref: &ty::AutoRef)
- -> ty::AutoRef
+pub fn super_fold_autoref<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ autoref: &ty::AutoRef)
+ -> ty::AutoRef
{
match *autoref {
ty::AutoPtr(r, m, None) => ty::AutoPtr(this.fold_region(r), m, None),
ty::AutoPtr(r, m, Some(ref a)) => {
ty::AutoPtr(this.fold_region(r), m, Some(box super_fold_autoref(this, &**a)))
}
- ty::AutoUnsafe(m) => ty::AutoUnsafe(m),
+ ty::AutoUnsafe(m, None) => ty::AutoUnsafe(m, None),
+ ty::AutoUnsafe(m, Some(ref a)) => {
+ ty::AutoUnsafe(m, Some(box super_fold_autoref(this, &**a)))
+ }
ty::AutoUnsize(ref k) => ty::AutoUnsize(k.fold_with(this)),
ty::AutoUnsizeUniq(ref k) => ty::AutoUnsizeUniq(k.fold_with(this)),
}
}
-pub fn super_fold_item_substs<T:TypeFolder>(this: &mut T,
- substs: ty::ItemSubsts)
- -> ty::ItemSubsts
+pub fn super_fold_item_substs<'tcx, T: TypeFolder<'tcx>>(this: &mut T,
+ substs: ty::ItemSubsts)
+ -> ty::ItemSubsts
{
ty::ItemSubsts {
substs: substs.substs.fold_with(this),
///////////////////////////////////////////////////////////////////////////
// Some sample folders
-pub struct BottomUpFolder<'a> {
- pub tcx: &'a ty::ctxt,
+pub struct BottomUpFolder<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>,
pub fldop: |ty::t|: 'a -> ty::t,
}
-impl<'a> TypeFolder for BottomUpFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> TypeFolder<'tcx> for BottomUpFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn fold_ty(&mut self, ty: ty::t) -> ty::t {
let t1 = super_fold_ty(self, ty);
/// (The distinction between "free" and "bound" is represented by
/// keeping track of each `FnSig` in the lexical context of the
/// current position of the fold.)
-pub struct RegionFolder<'a> {
- tcx: &'a ty::ctxt,
+pub struct RegionFolder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
fld_t: |ty::t|: 'a -> ty::t,
fld_r: |ty::Region|: 'a -> ty::Region,
within_binder_ids: Vec<ast::NodeId>,
}
-impl<'a> RegionFolder<'a> {
- pub fn general(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
+ pub fn general(tcx: &'a ty::ctxt<'tcx>,
fld_r: |ty::Region|: 'a -> ty::Region,
fld_t: |ty::t|: 'a -> ty::t)
- -> RegionFolder<'a> {
+ -> RegionFolder<'a, 'tcx> {
RegionFolder {
tcx: tcx,
fld_t: fld_t,
}
}
- pub fn regions(tcx: &'a ty::ctxt, fld_r: |ty::Region|: 'a -> ty::Region)
- -> RegionFolder<'a> {
+ pub fn regions(tcx: &'a ty::ctxt<'tcx>, fld_r: |ty::Region|: 'a -> ty::Region)
+ -> RegionFolder<'a, 'tcx> {
fn noop(t: ty::t) -> ty::t { t }
RegionFolder {
}
}
-impl<'a> TypeFolder for RegionFolder<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn fold_ty(&mut self, ty: ty::t) -> ty::t {
debug!("RegionFolder.fold_ty({})", ty.repr(self.tcx()));
use syntax::{ast, ast_util};
use syntax::codemap::Span;
-pub trait AstConv {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt;
+pub trait AstConv<'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>;
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype;
fn get_trait_def(&self, id: ast::DefId) -> Rc<ty::TraitDef>;
r
}
-pub fn opt_ast_region_to_region<AC:AstConv,RS:RegionScope>(
+pub fn opt_ast_region_to_region<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
default_span: Span,
r
}
-fn ast_path_substs<AC:AstConv,RS:RegionScope>(
+fn ast_path_substs<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
decl_generics: &ty::Generics,
substs
}
-pub fn ast_path_to_trait_ref<AC:AstConv,RS:RegionScope>(
+pub fn ast_path_to_trait_ref<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
trait_def_id: ast::DefId,
})
}
-pub fn ast_path_to_ty<AC:AstConv,RS:RegionScope>(
+pub fn ast_path_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
this: &AC,
rscope: &RS,
did: ast::DefId,
/// and/or region variables are substituted.
///
/// This is used when checking the constructor in struct literals.
-pub fn ast_path_to_ty_relaxed<AC:AstConv,
+pub fn ast_path_to_ty_relaxed<'tcx, AC: AstConv<'tcx>,
RS:RegionScope>(
this: &AC,
rscope: &RS,
/// Converts the given AST type to a built-in type. A "built-in type" is, at
/// present, either a core numeric type, a string, or `Box`.
-pub fn ast_ty_to_builtin_ty<AC:AstConv,
- RS:RegionScope>(
- this: &AC,
- rscope: &RS,
- ast_ty: &ast::Ty)
- -> Option<ty::t> {
+pub fn ast_ty_to_builtin_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
+ this: &AC,
+ rscope: &RS,
+ ast_ty: &ast::Ty)
+ -> Option<ty::t> {
match ast_ty_to_prim_ty(this.tcx(), ast_ty) {
Some(typ) => return Some(typ),
None => {}
}
}
-pub fn trait_ref_for_unboxed_function<AC:AstConv,
+pub fn trait_ref_for_unboxed_function<'tcx, AC: AstConv<'tcx>,
RS:RegionScope>(
this: &AC,
rscope: &RS,
// Handle `~`, `Box`, and `&` being able to mean strs and vecs.
// If a_seq_ty is a str or a vec, make it a str/vec.
// Also handle first-class trait types.
-fn mk_pointer<AC:AstConv,
- RS:RegionScope>(
- this: &AC,
- rscope: &RS,
- a_seq_ty: &ast::MutTy,
- ptr_ty: PointerTy,
- constr: |ty::t| -> ty::t)
- -> ty::t {
+fn mk_pointer<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
+ this: &AC,
+ rscope: &RS,
+ a_seq_ty: &ast::MutTy,
+ ptr_ty: PointerTy,
+ constr: |ty::t| -> ty::t)
+ -> ty::t {
let tcx = this.tcx();
debug!("mk_pointer(ptr_ty={})", ptr_ty);
// Parses the programmer's textual representation of a type into our
// internal notion of a type.
-pub fn ast_ty_to_ty<AC:AstConv, RS:RegionScope>(
- this: &AC, rscope: &RS, ast_ty: &ast::Ty) -> ty::t {
+pub fn ast_ty_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
+ this: &AC, rscope: &RS, ast_ty: &ast::Ty) -> ty::t {
let tcx = this.tcx();
return typ;
}
-pub fn ty_of_arg<AC: AstConv, RS: RegionScope>(this: &AC, rscope: &RS, a: &ast::Arg,
- expected_ty: Option<ty::t>) -> ty::t {
+pub fn ty_of_arg<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(this: &AC, rscope: &RS,
+ a: &ast::Arg,
+ expected_ty: Option<ty::t>)
+ -> ty::t {
match a.ty.node {
ast::TyInfer if expected_ty.is_some() => expected_ty.unwrap(),
ast::TyInfer => this.ty_infer(a.ty.span),
explicit_self: ast::ExplicitSelf,
}
-pub fn ty_of_method<AC:AstConv>(
+pub fn ty_of_method<'tcx, AC: AstConv<'tcx>>(
this: &AC,
id: ast::NodeId,
fn_style: ast::FnStyle,
(bare_fn_ty, optional_explicit_self_category.unwrap())
}
-pub fn ty_of_bare_fn<AC:AstConv>(this: &AC, id: ast::NodeId,
- fn_style: ast::FnStyle, abi: abi::Abi,
- decl: &ast::FnDecl) -> ty::BareFnTy {
+pub fn ty_of_bare_fn<'tcx, AC: AstConv<'tcx>>(this: &AC, id: ast::NodeId,
+ fn_style: ast::FnStyle, abi: abi::Abi,
+ decl: &ast::FnDecl) -> ty::BareFnTy {
let (bare_fn_ty, _) =
ty_of_method_or_bare_fn(this, id, fn_style, abi, None, decl);
bare_fn_ty
}
-fn ty_of_method_or_bare_fn<AC:AstConv>(
+fn ty_of_method_or_bare_fn<'tcx, AC: AstConv<'tcx>>(
this: &AC,
id: ast::NodeId,
fn_style: ast::FnStyle,
}, explicit_self_category_result)
}
-fn determine_explicit_self_category<AC:AstConv,
+fn determine_explicit_self_category<'tcx, AC: AstConv<'tcx>,
RS:RegionScope>(
this: &AC,
rscope: &RS,
}
}
-pub fn ty_of_closure<AC:AstConv>(
+pub fn ty_of_closure<'tcx, AC: AstConv<'tcx>>(
this: &AC,
id: ast::NodeId,
fn_style: ast::FnStyle,
}
}
-pub fn conv_existential_bounds<AC:AstConv, RS:RegionScope>(
+pub fn conv_existential_bounds<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
this: &AC,
rscope: &RS,
span: Span,
return Some(r);
}
-fn compute_region_bound<AC:AstConv, RS:RegionScope>(
+fn compute_region_bound<'tcx, AC: AstConv<'tcx>, RS:RegionScope>(
this: &AC,
rscope: &RS,
span: Span,
fcx.write_ty(expr.id, result_ty);
}
-pub struct pat_ctxt<'a> {
- pub fcx: &'a FnCtxt<'a>,
+pub struct pat_ctxt<'a, 'tcx: 'a> {
+ pub fcx: &'a FnCtxt<'a, 'tcx>,
pub map: PatIdMap,
}
IgnoreStaticMethods,
}
-pub fn lookup<'a>(
- fcx: &'a FnCtxt<'a>,
+pub fn lookup<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
// In a call `a.b::<X, Y, ...>(...)`:
expr: &ast::Expr, // The expression `a.b(...)`.
lcx.search(self_ty)
}
-pub fn lookup_in_trait<'a>(
- fcx: &'a FnCtxt<'a>,
+pub fn lookup_in_trait<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
// In a call `a.b::<X, Y, ...>(...)`:
span: Span, // The expression `a.b(...)`'s span.
}
}
-struct LookupContext<'a> {
- fcx: &'a FnCtxt<'a>,
+struct LookupContext<'a, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
span: Span,
// The receiver to the method call. Only `None` in the case of
RcvrMatchesIfSubtype(ty::t),
}
-impl<'a> LookupContext<'a> {
+impl<'a, 'tcx> LookupContext<'a, 'tcx> {
fn search(&self, self_ty: ty::t) -> Option<MethodCallee> {
let span = self.self_expr.map_or(self.span, |e| e.span);
let self_expr_id = self.self_expr.map(|e| e.id);
idx + 1u, ty::item_path_str(self.tcx(), did));
}
- fn infcx(&'a self) -> &'a infer::InferCtxt<'a> {
+ fn infcx(&'a self) -> &'a infer::InferCtxt<'a, 'tcx> {
&self.fcx.inh.infcx
}
- fn tcx(&self) -> &'a ty::ctxt {
+ fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.fcx.tcx()
}
use middle::subst::{Subst, Substs, VecPerParamSpace, ParamSpace};
use middle::ty::{FnSig, VariantInfo};
use middle::ty::{Polytype};
-use middle::ty::{Disr, ExprTyProvider, ParamTy, ParameterEnvironment};
+use middle::ty::{Disr, ParamTy, ParameterEnvironment};
use middle::ty;
use middle::ty_fold::TypeFolder;
use middle::typeck::astconv::AstConv;
/// Here, the function `foo()` and the closure passed to
/// `bar()` will each have their own `FnCtxt`, but they will
/// share the inherited fields.
-pub struct Inherited<'a> {
- infcx: infer::InferCtxt<'a>,
+pub struct Inherited<'a, 'tcx: 'a> {
+ infcx: infer::InferCtxt<'a, 'tcx>,
locals: RefCell<NodeMap<ty::t>>,
param_env: ty::ParameterEnvironment,
}
#[deriving(Clone)]
-pub struct FnCtxt<'a> {
+pub struct FnCtxt<'a, 'tcx: 'a> {
body_id: ast::NodeId,
// This flag is set to true if, during the writeback phase, we encounter
ps: RefCell<FnStyleState>,
- inh: &'a Inherited<'a>,
+ inh: &'a Inherited<'a, 'tcx>,
- ccx: &'a CrateCtxt<'a>,
+ ccx: &'a CrateCtxt<'a, 'tcx>,
}
-impl<'a> mem_categorization::Typer for FnCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'a, 'tcx> mem_categorization::Typer<'tcx> for FnCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.ccx.tcx
}
fn node_ty(&self, id: ast::NodeId) -> McResult<ty::t> {
}
}
-impl<'a> Inherited<'a> {
- fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> Inherited<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
param_env: ty::ParameterEnvironment)
- -> Inherited<'a> {
+ -> Inherited<'a, 'tcx> {
Inherited {
infcx: infer::new_infer_ctxt(tcx),
locals: RefCell::new(NodeMap::new()),
}
// Used by check_const and check_enum_variants
-pub fn blank_fn_ctxt<'a>(
- ccx: &'a CrateCtxt<'a>,
- inh: &'a Inherited<'a>,
- rty: ty::t,
- body_id: ast::NodeId)
- -> FnCtxt<'a> {
+pub fn blank_fn_ctxt<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
+ inh: &'a Inherited<'a, 'tcx>,
+ rty: ty::t,
+ body_id: ast::NodeId)
+ -> FnCtxt<'a, 'tcx> {
FnCtxt {
body_id: body_id,
writeback_errors: Cell::new(false),
}
}
-fn static_inherited_fields<'a>(ccx: &'a CrateCtxt<'a>) -> Inherited<'a> {
+fn static_inherited_fields<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>)
+ -> Inherited<'a, 'tcx> {
// It's kind of a kludge to manufacture a fake function context
// and statement context, but we might as well do write the code only once
let param_env = ty::ParameterEnvironment {
Inherited::new(ccx.tcx, param_env)
}
-impl<'a> ExprTyProvider for FnCtxt<'a> {
- fn expr_ty(&self, ex: &ast::Expr) -> ty::t {
- self.expr_ty(ex)
- }
-
- fn ty_ctxt<'a>(&'a self) -> &'a ty::ctxt {
- self.ccx.tcx
- }
-}
-
-struct CheckTypeWellFormedVisitor<'a> { ccx: &'a CrateCtxt<'a> }
+struct CheckItemTypesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
+struct CheckTypeWellFormedVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> }
-impl<'a> Visitor<()> for CheckTypeWellFormedVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for CheckTypeWellFormedVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
check_type_well_formed(self.ccx, i);
visit::walk_item(self, i, ());
}
}
-struct CheckItemTypesVisitor<'a> { ccx: &'a CrateCtxt<'a> }
-impl<'a> Visitor<()> for CheckItemTypesVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for CheckItemTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
check_item(self.ccx, i);
visit::walk_item(self, i, ());
}
}
-struct CheckItemSizedTypesVisitor<'a> { ccx: &'a CrateCtxt<'a> }
+struct CheckItemSizedTypesVisitor<'a, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'tcx>
+}
-impl<'a> Visitor<()> for CheckItemSizedTypesVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for CheckItemSizedTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
check_item_sized(self.ccx, i);
visit::walk_item(self, i, ());
}
}
-struct GatherLocalsVisitor<'a> {
- fcx: &'a FnCtxt<'a>
+struct GatherLocalsVisitor<'a, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'tcx>
}
-impl<'a> GatherLocalsVisitor<'a> {
+impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> {
fn assign(&mut self, nid: ast::NodeId, ty_opt: Option<ty::t>) {
match ty_opt {
None => {
}
}
-impl<'a> Visitor<()> for GatherLocalsVisitor<'a> {
+impl<'a, 'tcx> Visitor<()> for GatherLocalsVisitor<'a, 'tcx> {
// Add explicitly-declared locals.
fn visit_local(&mut self, local: &ast::Local, _: ()) {
let o_ty = match local.ty.node {
}
-fn check_fn<'a>(
- ccx: &'a CrateCtxt<'a>,
- fn_style: ast::FnStyle,
- fn_style_id: ast::NodeId,
- fn_sig: &ty::FnSig,
- decl: &ast::FnDecl,
- fn_id: ast::NodeId,
- body: &ast::Block,
- inherited: &'a Inherited<'a>)
- -> FnCtxt<'a>
-{
+fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>,
+ fn_style: ast::FnStyle,
+ fn_style_id: ast::NodeId,
+ fn_sig: &ty::FnSig,
+ decl: &ast::FnDecl,
+ fn_id: ast::NodeId,
+ body: &ast::Block,
+ inherited: &'a Inherited<'a, 'tcx>)
+ -> FnCtxt<'a, 'tcx> {
/*!
* Helper used by check_bare_fn and check_expr_fn. Does the
* grungy work of checking a function body and returns the
} else if ty::type_is_region_ptr(t_e) && ty::type_is_unsafe_ptr(t_1) {
fn types_compatible(fcx: &FnCtxt, sp: Span,
t1: ty::t, t2: ty::t) -> bool {
- if !ty::type_is_vec(t1) {
- // If the type being casted from is not a vector, this special
- // case does not apply.
- return false
+ match ty::get(t1).sty {
+ ty::ty_vec(_, Some(_)) => {}
+ _ => return false
}
if ty::type_needs_infer(t2) {
// This prevents this special case from going off when casting
// need to special-case obtaining an unsafe pointer
// from a region pointer to a vector.
- /* this cast is only allowed from &[T] to *T or
+ /* this cast is only allowed from &[T, ..n] to *T or
&T to *T. */
match (&ty::get(t_e).sty, &ty::get(t_1).sty) {
(&ty::ty_rptr(_, ty::mt { ty: mt1, mutbl: ast::MutImmutable }),
fcx.write_ty(id, t_1);
}
-impl<'a> AstConv for FnCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.ccx.tcx }
+impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.ccx.tcx }
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
ty::lookup_item_type(self.tcx(), id)
}
}
-impl<'a> FnCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.ccx.tcx }
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.ccx.tcx }
- pub fn infcx<'b>(&'b self) -> &'b infer::InferCtxt<'a> {
+ pub fn infcx<'b>(&'b self) -> &'b infer::InferCtxt<'a, 'tcx> {
&self.inh.infcx
}
self.ccx.tcx.sess.err_count() - self.err_count_on_creation
}
- pub fn vtable_context<'a>(&'a self) -> VtableContext<'a> {
+ pub fn vtable_context<'a>(&'a self) -> VtableContext<'a, 'tcx> {
VtableContext {
infcx: self.infcx(),
param_env: &self.inh.param_env,
}
}
-impl<'a> RegionScope for infer::InferCtxt<'a> {
+impl<'a, 'tcx> RegionScope for infer::InferCtxt<'a, 'tcx> {
fn default_region_bound(&self, span: Span) -> Option<ty::Region> {
Some(self.next_region_var(infer::MiscVariable(span)))
}
}
}
-impl<'a> FnCtxt<'a> {
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub fn tag(&self) -> String {
format!("{}", self as *const FnCtxt)
}
span_err!(fcx.tcx().sess, call_expression.span, E0056,
"overloaded calls are experimental");
span_note!(fcx.tcx().sess, call_expression.span,
- "add `#[feature(overloaded_calls)]` to \
+ "add `#![feature(overloaded_calls)]` to \
the crate attributes to enable");
}
}
};
+ let expr_type = fcx.expr_ty(&*iterator_expr);
let method = method::lookup_in_trait(fcx,
iterator_expr.span,
Some(&*iterator_expr),
token::intern("next"),
trait_did,
- fcx.expr_ty(&*iterator_expr),
+ expr_type,
[],
DontAutoderefReceiver,
IgnoreStaticMethods);
let method_type = match method {
Some(ref method) => method.ty,
None => {
- fcx.tcx().sess.span_err(iterator_expr.span,
- "`for` loop expression does not \
- implement the `Iterator` trait");
+ let true_expr_type = fcx.infcx().resolve_type_vars_if_possible(expr_type);
+
+ if !ty::type_is_error(true_expr_type) {
+ let ty_string = fcx.infcx().ty_to_string(true_expr_type);
+ fcx.tcx().sess.span_err(iterator_expr.span,
+ format!("`for` loop expression has type `{}` which does \
+ not implement the `Iterator` trait",
+ ty_string).as_slice());
+ }
ty::mk_err()
}
};
fcx.write_ty(id, enum_type);
}
+ fn check_struct_fields_on_error(fcx: &FnCtxt,
+ id: ast::NodeId,
+ fields: &[ast::Field],
+ base_expr: Option<Gc<ast::Expr>>) {
+ // Make sure to still write the types
+ // otherwise we might ICE
+ fcx.write_error(id);
+ for field in fields.iter() {
+ check_expr(fcx, &*field.expr);
+ }
+ match base_expr {
+ Some(ref base) => check_expr(fcx, &**base),
+ None => {}
+ }
+ }
+
type ExprCheckerWithTy = fn(&FnCtxt, &ast::Expr, ty::t);
let tcx = fcx.ccx.tcx;
ast::ExprAddrOf(mutbl, ref oprnd) => {
let expected = expected.only_has_type();
let hint = expected.map(fcx, |sty| {
- match *sty { ty::ty_rptr(_, ref mt) => ExpectHasType(mt.ty),
+ match *sty { ty::ty_rptr(_, ref mt) | ty::ty_ptr(ref mt) => ExpectHasType(mt.ty),
_ => NoExpectation }
});
let lvalue_pref = match mutbl {
check_then_else(fcx, &**cond, &**then_blk, opt_else_expr.clone(),
id, expr.span, expected);
}
- ast::ExprWhile(ref cond, ref body) => {
+ ast::ExprWhile(ref cond, ref body, _) => {
check_expr_has_type(fcx, &**cond, ty::mk_bool());
check_block_no_value(fcx, &**body);
let cond_ty = fcx.expr_ty(&**cond);
}
ast::ExprRepeat(ref element, ref count_expr) => {
check_expr_has_type(fcx, &**count_expr, ty::mk_uint());
- let count = ty::eval_repeat_count(fcx, &**count_expr);
+ let count = ty::eval_repeat_count(fcx.tcx(), &**count_expr);
let uty = match expected {
ExpectHasType(uty) => {
variant_id, fields.as_slice());
enum_id
}
+ Some(def::DefTrait(def_id)) => {
+ span_err!(tcx.sess, path.span, E0159,
+ "use of trait `{}` as a struct constructor",
+ pprust::path_to_string(path));
+ check_struct_fields_on_error(fcx,
+ id,
+ fields.as_slice(),
+ base_expr);
+ def_id
+ },
Some(def) => {
// Verify that this was actually a struct.
let typ = ty::lookup_item_type(fcx.ccx.tcx, def.def_id());
span_err!(tcx.sess, path.span, E0071,
"`{}` does not name a structure",
pprust::path_to_string(path));
-
- // Make sure to still write the types
- // otherwise we might ICE
- fcx.write_error(id);
- for field in fields.iter() {
- check_expr(fcx, &*field.expr);
- }
- match base_expr {
- Some(ref base) => check_expr(fcx, &**base),
- None => {}
- }
+ check_struct_fields_on_error(fcx,
+ id,
+ fields.as_slice(),
+ base_expr);
}
}
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
)
)
-pub struct Rcx<'a> {
- fcx: &'a FnCtxt<'a>,
+pub struct Rcx<'a, 'tcx: 'a> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
region_param_pairs: Vec<(ty::Region, ty::ParamTy)>,
}
}
-impl<'a> Rcx<'a> {
- pub fn new(fcx: &'a FnCtxt<'a>,
- initial_repeating_scope: ast::NodeId) -> Rcx<'a> {
+impl<'a, 'tcx> Rcx<'a, 'tcx> {
+ pub fn new(fcx: &'a FnCtxt<'a, 'tcx>,
+ initial_repeating_scope: ast::NodeId) -> Rcx<'a, 'tcx> {
Rcx { fcx: fcx,
repeating_scope: initial_repeating_scope,
region_param_pairs: Vec::new() }
}
- pub fn tcx(&self) -> &'a ty::ctxt {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.fcx.ccx.tcx
}
}
}
-impl<'fcx> mc::Typer for Rcx<'fcx> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'fcx, 'tcx> mc::Typer<'tcx> for Rcx<'fcx, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.fcx.ccx.tcx
}
}
}
-impl<'a> Visitor<()> for Rcx<'a> {
+impl<'a, 'tcx> Visitor<()> for Rcx<'a, 'tcx> {
// (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local,
// However, right now we run into an issue whereby some free
// regions are not properly related if they appear within the
rcx.set_repeating_scope(repeating_scope);
}
- ast::ExprWhile(ref cond, ref body) => {
+ ast::ExprWhile(ref cond, ref body, _) => {
let repeating_scope = rcx.set_repeating_scope(cond.id);
rcx.visit_expr(&**cond, ());
ty::BorrowKind::from_mutbl(m), expr_cmt);
}
- ty::AutoUnsafe(_) | ty::AutoUnsizeUniq(_) | ty::AutoUnsize(_) => {}
+ ty::AutoUnsafe(..) | ty::AutoUnsizeUniq(_) | ty::AutoUnsize(_) => {}
}
}
expr.repr(tcx), callee_scope);
let mc = mc::MemCategorizationContext::new(rcx);
let expr_cmt = ignore_err!(mc.cat_expr(expr));
- let region_min = ty::ReScope(callee_scope);
- link_region(rcx, expr.span, region_min, ty::ImmBorrow, expr_cmt);
+ let borrow_region = ty::ReScope(callee_scope);
+ link_region(rcx, expr.span, borrow_region, ty::ImmBorrow, expr_cmt);
}
fn link_region_from_node_type(rcx: &Rcx,
fn link_region(rcx: &Rcx,
span: Span,
- region_min: ty::Region,
- kind: ty::BorrowKind,
- cmt_borrowed: mc::cmt) {
+ borrow_region: ty::Region,
+ borrow_kind: ty::BorrowKind,
+ borrow_cmt: mc::cmt) {
/*!
- * Informs the inference engine that a borrow of `cmt`
- * must have the borrow kind `kind` and lifetime `region_min`.
- * If `cmt` is a deref of a region pointer with
- * lifetime `r_borrowed`, this will add the constraint that
- * `region_min <= r_borrowed`.
+ * Informs the inference engine that `borrow_cmt` is being
+ * borrowed with kind `borrow_kind` and lifetime `borrow_region`.
+ * In order to ensure borrowck is satisfied, this may create
+ * constraints between regions, as explained in
+ * `link_reborrowed_region()`.
*/
- // Iterate through all the things that must be live at least
- // for the lifetime `region_min` for the borrow to be valid:
- let mut cmt_borrowed = cmt_borrowed;
+ let mut borrow_cmt = borrow_cmt;
+ let mut borrow_kind = borrow_kind;
+
loop {
- debug!("link_region(region_min={}, kind={}, cmt_borrowed={})",
- region_min.repr(rcx.tcx()),
- kind.repr(rcx.tcx()),
- cmt_borrowed.repr(rcx.tcx()));
- match cmt_borrowed.cat.clone() {
- mc::cat_deref(base, _, mc::BorrowedPtr(_, r_borrowed)) |
- mc::cat_deref(base, _, mc::Implicit(_, r_borrowed)) => {
- // References to an upvar `x` are translated to
- // `*x`, since that is what happens in the
- // underlying machine. We detect such references
- // and treat them slightly differently, both to
- // offer better error messages and because we need
- // to infer the kind of borrow (mut, const, etc)
- // to use for each upvar.
- let cause = match base.cat {
- mc::cat_upvar(ref upvar_id, _) => {
- match rcx.fcx.inh.upvar_borrow_map.borrow_mut()
- .find_mut(upvar_id) {
- Some(upvar_borrow) => {
- debug!("link_region: {} <= {}",
- region_min.repr(rcx.tcx()),
- upvar_borrow.region.repr(rcx.tcx()));
- adjust_upvar_borrow_kind_for_loan(
- *upvar_id,
- upvar_borrow,
- kind);
- infer::ReborrowUpvar(span, *upvar_id)
- }
- None => {
- rcx.tcx().sess.span_bug(
- span,
- format!("Illegal upvar id: {}",
- upvar_id.repr(
- rcx.tcx())).as_slice());
- }
- }
+ debug!("link_region(borrow_region={}, borrow_kind={}, borrow_cmt={})",
+ borrow_region.repr(rcx.tcx()),
+ borrow_kind.repr(rcx.tcx()),
+ borrow_cmt.repr(rcx.tcx()));
+ match borrow_cmt.cat.clone() {
+ mc::cat_deref(ref_cmt, _,
+ mc::Implicit(ref_kind, ref_region)) |
+ mc::cat_deref(ref_cmt, _,
+ mc::BorrowedPtr(ref_kind, ref_region)) => {
+ match link_reborrowed_region(rcx, span,
+ borrow_region, borrow_kind,
+ ref_cmt, ref_region, ref_kind) {
+ Some((c, k)) => {
+ borrow_cmt = c;
+ borrow_kind = k;
}
-
- _ => {
- infer::Reborrow(span)
+ None => {
+ return;
}
- };
-
- debug!("link_region: {} <= {}",
- region_min.repr(rcx.tcx()),
- r_borrowed.repr(rcx.tcx()));
- rcx.fcx.mk_subr(cause, region_min, r_borrowed);
-
- if kind != ty::ImmBorrow {
- // If this is a mutable borrow, then the thing
- // being borrowed will have to be unique.
- // In user code, this means it must be an `&mut`
- // borrow, but for an upvar, we might opt
- // for an immutable-unique borrow.
- adjust_upvar_borrow_kind_for_unique(rcx, base);
}
-
- // Borrowing an `&mut` pointee for `region_min` is
- // only valid if the pointer resides in a unique
- // location which is itself valid for
- // `region_min`. We don't care about the unique
- // part, but we may need to influence the
- // inference to ensure that the location remains
- // valid.
- //
- // FIXME(#8624) fixing borrowck will require this
- // if m == ast::m_mutbl {
- // cmt_borrowed = cmt_base;
- // } else {
- // return;
- // }
- return;
}
+
mc::cat_discr(cmt_base, _) |
mc::cat_downcast(cmt_base) |
mc::cat_deref(cmt_base, _, mc::GcPtr(..)) |
mc::cat_deref(cmt_base, _, mc::OwnedPtr) |
mc::cat_interior(cmt_base, _) => {
- // Interior or owned data requires its base to be valid
- cmt_borrowed = cmt_base;
+ // Borrowing interior or owned data requires the base
+ // to be valid and borrowable in the same fashion.
+ borrow_cmt = cmt_base;
+ borrow_kind = borrow_kind;
}
+
mc::cat_deref(_, _, mc::UnsafePtr(..)) |
mc::cat_static_item |
mc::cat_copied_upvar(..) |
}
}
+fn link_reborrowed_region(rcx: &Rcx,
+ span: Span,
+ borrow_region: ty::Region,
+ borrow_kind: ty::BorrowKind,
+ ref_cmt: mc::cmt,
+ ref_region: ty::Region,
+ ref_kind: ty::BorrowKind)
+ -> Option<(mc::cmt, ty::BorrowKind)>
+{
+ /*!
+ * This is the most complicated case: the path being borrowed is
+ * itself the referent of a borrowed pointer. Let me give an
+ * example fragment of code to make clear(er) the situation:
+ *
+ * let r: &'a mut T = ...; // the original reference "r" has lifetime 'a
+ * ...
+ * &'z *r // the reborrow has lifetime 'z
+ *
+ * Now, in this case, our primary job is to add the inference
+ * constraint that `'z <= 'a`. Given this setup, let's clarify the
+ * parameters in (roughly) terms of the example:
+ *
+ * A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
+ * borrow_region ^~ ref_region ^~
+ * borrow_kind ^~ ref_kind ^~
+ * ref_cmt ^
+ *
+ * Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
+ *
+ * Unfortunately, there are some complications beyond the simple
+ * scenario I just painted:
+ *
+ * 1. The reference `r` might in fact be a "by-ref" upvar. In that
+ * case, we have two jobs. First, we are inferring whether this reference
+ * should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
+ * adjust that based on this borrow (e.g., if this is an `&mut` borrow,
+ * then `r` must be an `&mut` reference). Second, whenever we link
+ * two regions (here, `'z <= 'a`), we supply a *cause*, and in this
+ * case we adjust the cause to indicate that the reference being
+ * "reborrowed" is itself an upvar. This provides a nicer error message
+ * should something go wrong.
+ *
+ * 2. There may in fact be more levels of reborrowing. In the
+ * example, I said the borrow was like `&'z *r`, but it might
+ * in fact be a borrow like `&'z **q` where `q` has type `&'a
+ * &'b mut T`. In that case, we want to ensure that `'z <= 'a`
+ * and `'z <= 'b`. This is explained more below.
+ *
+ * The return value of this function indicates whether we need to
+ * recurse and process `ref_cmt` (see case 2 above).
+ */
+
+ // Detect references to an upvar `x`:
+ let cause = match ref_cmt.cat {
+ mc::cat_upvar(ref upvar_id, _) => {
+ let mut upvar_borrow_map =
+ rcx.fcx.inh.upvar_borrow_map.borrow_mut();
+ match upvar_borrow_map.find_mut(upvar_id) {
+ Some(upvar_borrow) => {
+ // Adjust mutability that we infer for the upvar
+ // so it can accommodate being borrowed with
+ // mutability `kind`:
+ adjust_upvar_borrow_kind_for_loan(*upvar_id,
+ upvar_borrow,
+ borrow_kind);
+
+ infer::ReborrowUpvar(span, *upvar_id)
+ }
+ None => {
+ rcx.tcx().sess.span_bug(
+ span,
+ format!("Illegal upvar id: {}",
+ upvar_id.repr(
+ rcx.tcx())).as_slice());
+ }
+ }
+ }
+
+ _ => {
+ infer::Reborrow(span)
+ }
+ };
+
+ debug!("link_reborrowed_region: {} <= {}",
+ borrow_region.repr(rcx.tcx()),
+ ref_region.repr(rcx.tcx()));
+ rcx.fcx.mk_subr(cause, borrow_region, ref_region);
+
+ // Decide whether we need to recurse and link any regions within
+ // the `ref_cmt`. This is concerned for the case where the value
+ // being reborrowed is in fact a borrowed pointer found within
+ // another borrowed pointer. For example:
+ //
+ // let p: &'b &'a mut T = ...;
+ // ...
+ // &'z **p
+ //
+ // What makes this case particularly tricky is that, if the data
+ // being borrowed is a `&mut` or `&uniq` borrow, borrowck requires
+ // not only that `'z <= 'a`, (as before) but also `'z <= 'b`
+ // (otherwise the user might mutate through the `&mut T` reference
+ // after `'b` expires and invalidate the borrow we are looking at
+ // now).
+ //
+ // So let's re-examine our parameters in light of this more
+ // complicated (possible) scenario:
+ //
+ // A borrow of: `& 'z bk * * p` where `p` has type `&'b bk & 'a bk T`
+ // borrow_region ^~ ref_region ^~
+ // borrow_kind ^~ ref_kind ^~
+ // ref_cmt ^~~
+ //
+ // (Note that since we have not examined `ref_cmt.cat`, we don't
+ // know whether this scenario has occurred; but I wanted to show
+ // how all the types get adjusted.)
+ match ref_kind {
+ ty::ImmBorrow => {
+ // The reference being reborrowed is a sharable ref of
+ // type `&'a T`. In this case, it doesn't matter where we
+ // *found* the `&T` pointer, the memory it references will
+ // be valid and immutable for `'a`. So we can stop here.
+ //
+ // (Note that the `borrow_kind` must also be ImmBorrow or
+ // else the user is borrowed imm memory as mut memory,
+ // which means they'll get an error downstream in borrowck
+ // anyhow.)
+ return None;
+ }
+
+ ty::MutBorrow | ty::UniqueImmBorrow => {
+ // The reference being reborrowed is either an `&mut T` or
+ // `&uniq T`. This is the case where recursion is needed.
+ //
+ // One interesting twist is that we can weaken the borrow
+ // kind when we recurse: to reborrow an `&mut` referent as
+ // mutable, borrowck requires a unique path to the `&mut`
+ // reference but not necessarily a *mutable* path.
+ let new_borrow_kind = match borrow_kind {
+ ty::ImmBorrow =>
+ ty::ImmBorrow,
+ ty::MutBorrow | ty::UniqueImmBorrow =>
+ ty::UniqueImmBorrow
+ };
+ return Some((ref_cmt, new_borrow_kind));
+ }
+ }
+}
+
fn adjust_borrow_kind_for_assignment_lhs(rcx: &Rcx,
lhs: &ast::Expr) {
/*!
fn adjust_upvar_borrow_kind_for_mut(rcx: &Rcx,
cmt: mc::cmt) {
+ /*!
+ * Indicates that `cmt` is being directly mutated (e.g., assigned
+ * to). If cmt contains any by-ref upvars, this implies that
+ * those upvars must be borrowed using an `&mut` borow.
+ */
+
let mut cmt = cmt;
loop {
debug!("adjust_upvar_borrow_kind_for_mut(cmt={})",
RegionSubParamConstraint(Option<ty::t>, ty::Region, ty::ParamTy),
}
-struct Wf<'a> {
- tcx: &'a ty::ctxt,
+struct Wf<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
stack: Vec<(ty::Region, Option<ty::t>)>,
out: Vec<WfConstraint>,
}
wf.out
}
-impl<'a> Wf<'a> {
+impl<'a, 'tcx> Wf<'a, 'tcx> {
fn accumulate_from_ty(&mut self, ty: ty::t) {
debug!("Wf::accumulate_from_ty(ty={})",
ty.repr(self.tcx));
/// A vtable context includes an inference context, a parameter environment,
/// and a list of unboxed closure types.
-pub struct VtableContext<'a> {
- pub infcx: &'a infer::InferCtxt<'a>,
+pub struct VtableContext<'a, 'tcx: 'a> {
+ pub infcx: &'a infer::InferCtxt<'a, 'tcx>,
pub param_env: &'a ty::ParameterEnvironment,
pub unboxed_closures: &'a RefCell<DefIdMap<ty::UnboxedClosure>>,
}
-impl<'a> VtableContext<'a> {
- pub fn tcx(&self) -> &'a ty::ctxt { self.infcx.tcx }
+impl<'a, 'tcx> VtableContext<'a, 'tcx> {
+ pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.infcx.tcx }
}
fn lookup_vtables(vcx: &VtableContext,
let cx = fcx.ccx;
let check_object_cast = |src_ty: ty::t, target_ty: ty::t| {
+ debug!("check_object_cast {} to {}",
+ fcx.infcx().ty_to_string(src_ty),
+ fcx.infcx().ty_to_string(target_ty));
// Check that a cast is of correct types.
match (&ty::get(target_ty).sty, &ty::get(src_ty).sty) {
(&ty::ty_rptr(_, ty::mt{ty, mutbl}), &ty::ty_rptr(_, mt))
+ | (&ty::ty_ptr(ty::mt{ty, mutbl}), &ty::ty_rptr(_, mt))
if !mutability_allowed(mt.mutbl, mutbl) => {
match ty::get(ty).sty {
ty::ty_trait(..) => {
_ => {}
}
}
- (&ty::ty_uniq(..), &ty::ty_uniq(..) ) => {}
+ (&ty::ty_uniq(..), &ty::ty_uniq(..) )
+ | (&ty::ty_ptr(..), &ty::ty_ptr(..) )
+ | (&ty::ty_ptr(..), &ty::ty_rptr(..)) => {}
(&ty::ty_rptr(r_t, _), &ty::ty_rptr(r_s, _)) => {
infer::mk_subr(fcx.infcx(),
infer::RelateObjectBound(ex.span),
_ => {}
}
}
+ (&ty::ty_ptr(ty::mt{ty, ..}), _) => {
+ match ty::get(ty).sty {
+ ty::ty_trait(..) => {
+ span_err!(fcx.ccx.tcx.sess, ex.span, E0160,
+ "can only cast an *-pointer or &-pointer to an *-object, not a {}",
+ ty::ty_sort_string(fcx.tcx(), src_ty));
+ }
+ _ => {}
+ }
+ }
_ => {}
}
};
match autoref {
&ty::AutoUnsize(ref k) |
&ty::AutoUnsizeUniq(ref k) => trait_cast_types_unsize(fcx, k, src_ty, sp),
- &ty::AutoPtr(_, _, Some(box ref autoref)) => {
+ &ty::AutoPtr(_, _, Some(box ref autoref)) |
+ &ty::AutoUnsafe(_, Some(box ref autoref)) => {
trait_cast_types_autoref(fcx, autoref, src_ty, sp)
}
_ => None
&ty::AutoDerefRef(AutoDerefRef{autoref: Some(ref autoref), autoderefs}) => {
let mut derefed_type = src_ty;
for _ in range(0, autoderefs) {
- derefed_type = ty::deref(derefed_type, false).unwrap().ty;
+ derefed_type = ty::deref(derefed_type, true).unwrap().ty;
derefed_type = structurally_resolved_type(fcx, sp, derefed_type)
}
trait_cast_types_autoref(fcx, autoref, derefed_type, sp)
false)
}
-impl<'a, 'b> visit::Visitor<()> for &'a FnCtxt<'b> {
+impl<'a, 'b, 'tcx> visit::Visitor<()> for &'a FnCtxt<'b, 'tcx> {
fn visit_expr(&mut self, ex: &ast::Expr, _: ()) {
early_resolve_expr(ex, *self, false);
visit::walk_expr(self, ex, ());
// there, it applies a few ad-hoc checks that were not convenient to
// do elsewhere.
-struct WritebackCx<'cx> {
- fcx: &'cx FnCtxt<'cx>,
+struct WritebackCx<'cx, 'tcx: 'cx> {
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
}
-impl<'cx> WritebackCx<'cx> {
- fn new(fcx: &'cx FnCtxt) -> WritebackCx<'cx> {
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+ fn new(fcx: &'cx FnCtxt<'cx, 'tcx>) -> WritebackCx<'cx, 'tcx> {
WritebackCx { fcx: fcx }
}
- fn tcx(&self) -> &'cx ty::ctxt {
+ fn tcx(&self) -> &'cx ty::ctxt<'tcx> {
self.fcx.tcx()
}
}
// below. In general, a function is made into a `visitor` if it must
// traffic in node-ids or update tables in the type context etc.
-impl<'cx> Visitor<()> for WritebackCx<'cx> {
+impl<'cx, 'tcx> Visitor<()> for WritebackCx<'cx, 'tcx> {
fn visit_item(&mut self, _: &ast::Item, _: ()) {
// Ignore items
}
}
}
-impl<'cx> WritebackCx<'cx> {
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
fn visit_upvar_borrow_map(&self) {
if self.fcx.writeback_errors.get() {
return;
// The Resolver. This is the type folding engine that detects
// unresolved types and so forth.
-struct Resolver<'cx> {
- tcx: &'cx ty::ctxt,
- infcx: &'cx infer::InferCtxt<'cx>,
+struct Resolver<'cx, 'tcx: 'cx> {
+ tcx: &'cx ty::ctxt<'tcx>,
+ infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason,
}
-impl<'cx> Resolver<'cx> {
- fn new(fcx: &'cx FnCtxt<'cx>,
+impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
+ fn new(fcx: &'cx FnCtxt<'cx, 'tcx>,
reason: ResolveReason)
- -> Resolver<'cx>
+ -> Resolver<'cx, 'tcx>
{
Resolver { infcx: fcx.infcx(),
tcx: fcx.tcx(),
reason: reason }
}
- fn from_infcx(infcx: &'cx infer::InferCtxt<'cx>,
+ fn from_infcx(infcx: &'cx infer::InferCtxt<'cx, 'tcx>,
writeback_errors: &'cx Cell<bool>,
reason: ResolveReason)
- -> Resolver<'cx>
+ -> Resolver<'cx, 'tcx>
{
Resolver { infcx: infcx,
tcx: infcx.tcx,
}
}
-impl<'cx> TypeFolder for Resolver<'cx> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.tcx
}
ty_unboxed_closure(def_id, _) => {
Some(def_id)
}
- ty_rptr(_, ty::mt {ty, ..}) | ty_uniq(ty) => match ty::get(ty).sty {
- ty_trait(box ty::TyTrait { def_id, .. }) => {
- Some(def_id)
- }
- _ => {
- fail!("get_base_type() returned a type that wasn't an \
- enum, struct, or trait");
+ ty_ptr(ty::mt {ty, ..}) |
+ ty_rptr(_, ty::mt {ty, ..}) |
+ ty_uniq(ty) => {
+ match ty::get(ty).sty {
+ ty_trait(box ty::TyTrait { def_id, .. }) => {
+ Some(def_id)
+ }
+ _ => {
+ fail!("get_base_type() returned a type that wasn't an \
+ enum, struct, or trait");
+ }
}
- },
+ }
ty_trait(box ty::TyTrait { def_id, .. }) => {
Some(def_id)
}
}
}
-struct CoherenceChecker<'a> {
- crate_context: &'a CrateCtxt<'a>,
- inference_context: InferCtxt<'a>,
+struct CoherenceChecker<'a, 'tcx: 'a> {
+ crate_context: &'a CrateCtxt<'a, 'tcx>,
+ inference_context: InferCtxt<'a, 'tcx>,
}
-struct CoherenceCheckVisitor<'a> {
- cc: &'a CoherenceChecker<'a>
+struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
+ cc: &'a CoherenceChecker<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for CoherenceCheckVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for CoherenceCheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item, _: ()) {
//debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
}
}
-struct PrivilegedScopeVisitor<'a> { cc: &'a CoherenceChecker<'a> }
+struct PrivilegedScopeVisitor<'a, 'tcx: 'a> {
+ cc: &'a CoherenceChecker<'a, 'tcx>
+}
-impl<'a> visit::Visitor<()> for PrivilegedScopeVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for PrivilegedScopeVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item, _: ()) {
match item.node {
}
}
-impl<'a> CoherenceChecker<'a> {
+impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
fn check(&self, krate: &Crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// of type parameters and supertraits. This is information we need to
// know later when parsing field defs.
-struct CollectTraitDefVisitor<'a> {
- ccx: &'a CrateCtxt<'a>
+struct CollectTraitDefVisitor<'a, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for CollectTraitDefVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for CollectTraitDefVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
match i.node {
ast::ItemTrait(..) => {
///////////////////////////////////////////////////////////////////////////
// Second phase: collection proper.
-struct CollectItemTypesVisitor<'a> {
- ccx: &'a CrateCtxt<'a>
+struct CollectItemTypesVisitor<'a, 'tcx: 'a> {
+ ccx: &'a CrateCtxt<'a, 'tcx>
}
-impl<'a> visit::Visitor<()> for CollectItemTypesVisitor<'a> {
+impl<'a, 'tcx> visit::Visitor<()> for CollectItemTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &ast::Item, _: ()) {
convert(self.ccx, i);
visit::walk_item(self, i, ());
fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &ast::Ty) -> ty::t;
}
-impl<'a> ToTy for CrateCtxt<'a> {
+impl<'a, 'tcx> ToTy for CrateCtxt<'a, 'tcx> {
fn to_ty<RS:RegionScope>(&self, rs: &RS, ast_ty: &ast::Ty) -> ty::t {
ast_ty_to_ty(self, rs, ast_ty)
}
}
-impl<'a> AstConv for CrateCtxt<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx }
+impl<'a, 'tcx> AstConv<'tcx> for CrateCtxt<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn get_item_ty(&self, id: ast::DefId) -> ty::Polytype {
if id.krate != ast::LOCAL_CRATE {
ast::SelfExplicit(ref ast_type, _) => {
let typ = crate_context.to_ty(rs, &**ast_type);
let base_type = match ty::get(typ).sty {
- ty::ty_rptr(_, tm) => tm.ty,
+ ty::ty_ptr(tm) | ty::ty_rptr(_, tm) => tm.ty,
ty::ty_uniq(typ) => typ,
_ => typ,
};
*/
use middle::subst;
-use middle::ty::{AutoPtr, AutoDerefRef, AutoUnsize};
+use middle::ty::{AutoPtr, AutoDerefRef, AutoUnsize, AutoUnsafe};
use middle::ty::{mt};
use middle::ty;
use middle::typeck::infer::{CoerceResult, resolve_type, Coercion};
// Note: Coerce is not actually a combiner, in that it does not
// conform to the same interface, though it performs a similar
// function.
-pub struct Coerce<'f>(pub CombineFields<'f>);
+pub struct Coerce<'f, 'tcx: 'f>(pub CombineFields<'f, 'tcx>);
-impl<'f> Coerce<'f> {
- pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> {
+impl<'f, 'tcx> Coerce<'f, 'tcx> {
+ pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f, 'tcx> {
let Coerce(ref v) = *self; v
}
// to `&[T]`. Doing it all at once makes the target code a bit more
// efficient and spares us from having to handle multiple coercions.
match ty::get(b).sty {
- ty::ty_rptr(_, mt_b) => {
+ ty::ty_ptr(mt_b) | ty::ty_rptr(_, mt_b) => {
match ty::get(mt_b.ty).sty {
ty::ty_vec(_, None) => {
let unsize_and_ref = self.unpack_actual_value(a, |sty_a| {
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match ty::get(b).sty {
+ ty::ty_ptr(mt_b) => {
+ match ty::get(mt_b.ty).sty {
+ ty::ty_str => {
+ return self.unpack_actual_value(a, |sty_a| {
+ self.coerce_unsafe_ptr(a, sty_a, b, ast::MutImmutable)
+ });
+ }
+
+ ty::ty_trait(..) => {
+ let result = self.unpack_actual_value(a, |sty_a| {
+ self.coerce_unsafe_object(a, sty_a, b, mt_b.mutbl)
+ });
+
+ match result {
+ Ok(t) => return Ok(t),
+ Err(..) => {}
+ }
+ }
+
+ _ => {
+ return self.unpack_actual_value(a, |sty_a| {
+ self.coerce_unsafe_ptr(a, sty_a, b, mt_b.mutbl)
+ });
+ }
+ };
+ }
+
ty::ty_rptr(_, mt_b) => {
match ty::get(mt_b.ty).sty {
ty::ty_str => {
});
}
- ty::ty_ptr(mt_b) => {
- return self.unpack_actual_value(a, |sty_a| {
- self.coerce_unsafe_ptr(a, sty_a, b, mt_b)
- });
- }
-
_ => {}
}
let a_borrowed = ty::mk_rptr(self.get_ref().infcx.tcx,
r_borrow,
mt {ty: inner_ty, mutbl: mutbl_b});
- if_ok!(sub.tys(a_borrowed, b));
+ try!(sub.tys(a_borrowed, b));
Ok(Some(AutoDerefRef(AutoDerefRef {
autoderefs: 1,
let r_borrow = self.get_ref().infcx.next_region_var(coercion);
let unsized_ty = ty::mk_slice(self.get_ref().infcx.tcx, r_borrow,
mt {ty: t_a, mutbl: mutbl_b});
- if_ok!(self.get_ref().infcx.try(|| sub.tys(unsized_ty, b)));
+ try!(self.get_ref().infcx.try(|| sub.tys(unsized_ty, b)));
Ok(Some(AutoDerefRef(AutoDerefRef {
autoderefs: 0,
autoref: Some(ty::AutoPtr(r_borrow,
let sty_b = &ty::get(b).sty;
match (sty_a, sty_b) {
- (&ty::ty_uniq(_), &ty::ty_rptr(..)) => Err(ty::terr_mismatch),
(&ty::ty_rptr(_, ty::mt{ty: t_a, ..}), &ty::ty_rptr(_, mt_b)) => {
self.unpack_actual_value(t_a, |sty_a| {
match self.unsize_ty(sty_a, mt_b.ty) {
let ty = ty::mk_rptr(self.get_ref().infcx.tcx,
r_borrow,
ty::mt{ty: ty, mutbl: mt_b.mutbl});
- if_ok!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
+ try!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
debug!("Success, coerced with AutoDerefRef(1, \
AutoPtr(AutoUnsize({:?})))", kind);
Ok(Some(AutoDerefRef(AutoDerefRef {
}
})
}
+ (&ty::ty_rptr(_, ty::mt{ty: t_a, ..}), &ty::ty_ptr(mt_b)) => {
+ self.unpack_actual_value(t_a, |sty_a| {
+ match self.unsize_ty(sty_a, mt_b.ty) {
+ Some((ty, kind)) => {
+ let ty = ty::mk_ptr(self.get_ref().infcx.tcx,
+ ty::mt{ty: ty, mutbl: mt_b.mutbl});
+ try!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
+ debug!("Success, coerced with AutoDerefRef(1, \
+ AutoPtr(AutoUnsize({:?})))", kind);
+ Ok(Some(AutoDerefRef(AutoDerefRef {
+ autoderefs: 1,
+ autoref: Some(ty::AutoUnsafe(mt_b.mutbl,
+ Some(box AutoUnsize(kind))))
+ })))
+ }
+ _ => Err(ty::terr_mismatch)
+ }
+ })
+ }
(&ty::ty_uniq(t_a), &ty::ty_uniq(t_b)) => {
self.unpack_actual_value(t_a, |sty_a| {
match self.unsize_ty(sty_a, t_b) {
Some((ty, kind)) => {
let ty = ty::mk_uniq(self.get_ref().infcx.tcx, ty);
- if_ok!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
+ try!(self.get_ref().infcx.try(|| sub.tys(ty, b)));
debug!("Success, coerced with AutoDerefRef(1, \
AutoUnsizeUniq({:?}))", kind);
Ok(Some(AutoDerefRef(AutoDerefRef {
sty_a: &ty::sty,
ty_b: ty::t)
-> Option<(ty::t, ty::UnsizeKind)> {
- debug!("unsize_ty(sty_a={:?}", sty_a);
+ debug!("unsize_ty(sty_a={:?}, ty_b={})", sty_a, ty_b.repr(self.get_ref().infcx.tcx));
let tcx = self.get_ref().infcx.tcx;
b: ty::t,
b_mutbl: ast::Mutability) -> CoerceResult
{
+ let tcx = self.get_ref().infcx.tcx;
+
debug!("coerce_borrowed_object(a={}, sty_a={:?}, b={})",
- a.repr(self.get_ref().infcx.tcx), sty_a,
- b.repr(self.get_ref().infcx.tcx));
+ a.repr(tcx), sty_a,
+ b.repr(tcx));
- let tcx = self.get_ref().infcx.tcx;
let coercion = Coercion(self.get_ref().trace.clone());
let r_a = self.get_ref().infcx.next_region_var(coercion);
- let a_borrowed = match *sty_a {
- ty::ty_uniq(ty) | ty::ty_rptr(_, ty::mt{ty, ..}) => match ty::get(ty).sty {
+ self.coerce_object(a, sty_a, b,
+ |tr| ty::mk_rptr(tcx, r_a, ty::mt{ mutbl: b_mutbl, ty: tr }),
+ || AutoPtr(r_a, b_mutbl, None))
+ }
+
+ fn coerce_unsafe_object(&self,
+ a: ty::t,
+ sty_a: &ty::sty,
+ b: ty::t,
+ b_mutbl: ast::Mutability) -> CoerceResult
+ {
+ let tcx = self.get_ref().infcx.tcx;
+
+ debug!("coerce_unsafe_object(a={}, sty_a={:?}, b={})",
+ a.repr(tcx), sty_a,
+ b.repr(tcx));
+
+ self.coerce_object(a, sty_a, b,
+ |tr| ty::mk_ptr(tcx, ty::mt{ mutbl: b_mutbl, ty: tr }),
+ || AutoUnsafe(b_mutbl, None))
+ }
+
+ fn coerce_object(&self,
+ a: ty::t,
+ sty_a: &ty::sty,
+ b: ty::t,
+ mk_ty: |ty::t| -> ty::t,
+ mk_adjust: || -> ty::AutoRef) -> CoerceResult
+ {
+ let tcx = self.get_ref().infcx.tcx;
+
+ match *sty_a {
+ ty::ty_rptr(_, ty::mt{ty, ..}) => match ty::get(ty).sty {
ty::ty_trait(box ty::TyTrait {
def_id,
ref substs,
..
}) => {
let tr = ty::mk_trait(tcx, def_id, substs.clone(), bounds);
- ty::mk_rptr(tcx, r_a, ty::mt{ mutbl: b_mutbl, ty: tr })
+ try!(self.subtype(mk_ty(tr), b));
+ Ok(Some(AutoDerefRef(AutoDerefRef {
+ autoderefs: 1,
+ autoref: Some(mk_adjust())
+ })))
}
_ => {
- return self.subtype(a, b);
+ self.subtype(a, b)
}
},
_ => {
- return self.subtype(a, b);
+ self.subtype(a, b)
}
- };
-
- if_ok!(self.subtype(a_borrowed, b));
- Ok(Some(AutoDerefRef(AutoDerefRef {
- autoderefs: 1,
- autoref: Some(AutoPtr(r_a, b_mutbl, None))
- })))
+ }
}
pub fn coerce_borrowed_fn(&self,
sig: fn_ty_a.sig.clone(),
.. *fn_ty_b
});
- if_ok!(self.subtype(a_closure, b));
+ try!(self.subtype(a_closure, b));
Ok(Some(adj))
})
}
a: ty::t,
sty_a: &ty::sty,
b: ty::t,
- mt_b: ty::mt)
+ mutbl_b: ast::Mutability)
-> CoerceResult {
debug!("coerce_unsafe_ptr(a={}, sty_a={:?}, b={})",
a.repr(self.get_ref().infcx.tcx), sty_a,
}
};
- // check that the types which they point at are compatible
+ // Check that the types which they point at are compatible.
+ // Note that we don't adjust the mutability here. We cannot change
+ // the mutability and the kind of pointer in a single coercion.
let a_unsafe = ty::mk_ptr(self.get_ref().infcx.tcx, mt_a);
- if_ok!(self.subtype(a_unsafe, b));
+ try!(self.subtype(a_unsafe, b));
- // although references and unsafe ptrs have the same
+ // Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
- // regionck knows that the region for `a` must be valid here
+ // regionck knows that the region for `a` must be valid here.
Ok(Some(AutoDerefRef(AutoDerefRef {
autoderefs: 1,
- autoref: Some(ty::AutoUnsafe(mt_b.mutbl))
+ autoref: Some(ty::AutoUnsafe(mutbl_b, None))
})))
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ______________________________________________________________________
-// Type combining
+///////////////////////////////////////////////////////////////////////////
+// # Type combining
//
-// There are three type combiners: sub, lub, and glb. Each implements
-// the trait `Combine` and contains methods for combining two
-// instances of various things and yielding a new instance. These
-// combiner methods always yield a `result<T>`---failure is propagated
-// upward using `and_then()` methods. There is a lot of common code for
-// these operations, implemented as default methods on the `Combine`
-// trait.
+// There are four type combiners: equate, sub, lub, and glb. Each
+// implements the trait `Combine` and contains methods for combining
+// two instances of various things and yielding a new instance. These
+// combiner methods always yield a `Result<T>`. There is a lot of
+// common code for these operations, implemented as default methods on
+// the `Combine` trait.
//
-// In reality, the sub operation is rather different from lub/glb, but
-// they are combined into one trait to avoid duplication (they used to
-// be separate but there were many bugs because there were two copies
-// of most routines).
+// Each operation may have side-effects on the inference context,
+// though these can be unrolled using snapshots. On success, the
+// LUB/GLB operations return the appropriate bound. The Eq and Sub
+// operations generally return the first operand.
//
-// The differences are:
-//
-// - when making two things have a sub relationship, the order of the
-// arguments is significant (a <: b) and the return value of the
-// combine functions is largely irrelevant. The important thing is
-// whether the action succeeds or fails. If it succeeds, then side
-// effects have been committed into the type variables.
-//
-// - for GLB/LUB, the order of arguments is not significant (GLB(a,b) ==
-// GLB(b,a)) and the return value is important (it is the GLB). Of
-// course GLB/LUB may also have side effects.
-//
-// Contravariance
+// ## Contravariance
//
// When you are relating two things which have a contravariant
// relationship, you should use `contratys()` or `contraregions()`,
// rather than inversing the order of arguments! This is necessary
// because the order of arguments is not relevant for LUB and GLB. It
// is also useful to track which value is the "expected" value in
-// terms of error reporting, although we do not do that properly right
-// now.
+// terms of error reporting.
use middle::subst;
use middle::ty::{IntType, UintType};
use middle::ty::{BuiltinBounds};
use middle::ty;
-use middle::typeck::infer::{ToUres};
+use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::sub::Sub;
use middle::typeck::infer::unify::InferCtxtMethodsForSimplyUnifiableTypes;
-use middle::typeck::infer::{InferCtxt, cres, ures};
-use middle::typeck::infer::{TypeTrace};
-use util::common::indent;
+use middle::typeck::infer::{InferCtxt, cres};
+use middle::typeck::infer::{MiscVariable, TypeTrace};
+use middle::typeck::infer::type_variable::{RelationDir, EqTo,
+ SubtypeOf, SupertypeOf};
+use middle::ty_fold::{RegionFolder, TypeFoldable};
use util::ppaux::Repr;
use std::result;
use syntax::ast;
use syntax::abi;
-pub trait Combine {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a>;
+pub trait Combine<'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx>;
fn tag(&self) -> String;
fn a_is_expected(&self) -> bool;
fn trace(&self) -> TypeTrace;
- fn sub<'a>(&'a self) -> Sub<'a>;
- fn lub<'a>(&'a self) -> Lub<'a>;
- fn glb<'a>(&'a self) -> Glb<'a>;
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx>;
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx>;
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx>;
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx>;
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt>;
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t>;
try!(result::fold_(as_
.iter()
.zip(bs.iter())
- .map(|(a, b)| eq_tys(self, *a, *b))));
+ .map(|(a, b)| self.equate().tys(*a, *b))));
Ok(Vec::from_slice(as_))
}
for &space in subst::ParamSpace::all().iter() {
let a_tps = a_subst.types.get_slice(space);
let b_tps = b_subst.types.get_slice(space);
- let tps = if_ok!(self.tps(space, a_tps, b_tps));
+ let tps = try!(self.tps(space, a_tps, b_tps));
let a_regions = a_subst.regions().get_slice(space);
let b_regions = b_subst.regions().get_slice(space);
}
};
- let regions = if_ok!(relate_region_params(self,
- item_def_id,
- r_variances,
- a_regions,
- b_regions));
+ let regions = try!(relate_region_params(self,
+ item_def_id,
+ r_variances,
+ a_regions,
+ b_regions));
substs.types.replace(space, tps);
substs.mut_regions().replace(space, regions);
return Ok(substs);
- fn relate_region_params<C:Combine>(this: &C,
- item_def_id: ast::DefId,
- variances: &[ty::Variance],
- a_rs: &[ty::Region],
- b_rs: &[ty::Region])
- -> cres<Vec<ty::Region>>
- {
+ fn relate_region_params<'tcx, C: Combine<'tcx>>(this: &C,
+ item_def_id: ast::DefId,
+ variances: &[ty::Variance],
+ a_rs: &[ty::Region],
+ b_rs: &[ty::Region])
+ -> cres<Vec<ty::Region>> {
let tcx = this.infcx().tcx;
let num_region_params = variances.len();
let b_r = b_rs[i];
let variance = variances[i];
let r = match variance {
- ty::Invariant => {
- eq_regions(this, a_r, b_r)
- .and_then(|()| Ok(a_r))
- }
+ ty::Invariant => this.equate().regions(a_r, b_r),
ty::Covariant => this.regions(a_r, b_r),
ty::Contravariant => this.contraregions(a_r, b_r),
ty::Bivariant => Ok(a_r),
};
- rs.push(if_ok!(r));
+ rs.push(try!(r));
}
Ok(rs)
}
fn bare_fn_tys(&self, a: &ty::BareFnTy,
b: &ty::BareFnTy) -> cres<ty::BareFnTy> {
- let fn_style = if_ok!(self.fn_styles(a.fn_style, b.fn_style));
- let abi = if_ok!(self.abi(a.abi, b.abi));
- let sig = if_ok!(self.fn_sigs(&a.sig, &b.sig));
+ let fn_style = try!(self.fn_styles(a.fn_style, b.fn_style));
+ let abi = try!(self.abi(a.abi, b.abi));
+ let sig = try!(self.fn_sigs(&a.sig, &b.sig));
Ok(ty::BareFnTy {fn_style: fn_style,
abi: abi,
sig: sig})
let store = match (a.store, b.store) {
(ty::RegionTraitStore(a_r, a_m),
ty::RegionTraitStore(b_r, b_m)) if a_m == b_m => {
- let r = if_ok!(self.contraregions(a_r, b_r));
+ let r = try!(self.contraregions(a_r, b_r));
ty::RegionTraitStore(r, a_m)
}
return Err(ty::terr_sigil_mismatch(expected_found(self, a.store, b.store)))
}
};
- let fn_style = if_ok!(self.fn_styles(a.fn_style, b.fn_style));
- let onceness = if_ok!(self.oncenesses(a.onceness, b.onceness));
- let bounds = if_ok!(self.existential_bounds(a.bounds, b.bounds));
- let sig = if_ok!(self.fn_sigs(&a.sig, &b.sig));
- let abi = if_ok!(self.abi(a.abi, b.abi));
+ let fn_style = try!(self.fn_styles(a.fn_style, b.fn_style));
+ let onceness = try!(self.oncenesses(a.onceness, b.onceness));
+ let bounds = try!(self.existential_bounds(a.bounds, b.bounds));
+ let sig = try!(self.fn_sigs(&a.sig, &b.sig));
+ let abi = try!(self.abi(a.abi, b.abi));
Ok(ty::ClosureTy {
fn_style: fn_style,
onceness: onceness,
Err(ty::terr_traits(
expected_found(self, a.def_id, b.def_id)))
} else {
- let substs = if_ok!(self.substs(a.def_id, &a.substs, &b.substs));
+ let substs = try!(self.substs(a.def_id, &a.substs, &b.substs));
Ok(ty::TraitRef { def_id: a.def_id,
substs: substs })
}
}
#[deriving(Clone)]
-pub struct CombineFields<'a> {
- pub infcx: &'a InferCtxt<'a>,
+pub struct CombineFields<'a, 'tcx: 'a> {
+ pub infcx: &'a InferCtxt<'a, 'tcx>,
pub a_is_expected: bool,
pub trace: TypeTrace,
}
-pub fn expected_found<C:Combine,T>(
+pub fn expected_found<'tcx, C: Combine<'tcx>, T>(
this: &C, a: T, b: T) -> ty::expected_found<T> {
if this.a_is_expected() {
ty::expected_found {expected: a, found: b}
}
}
-pub fn eq_tys<C:Combine>(this: &C, a: ty::t, b: ty::t) -> ures {
- let suber = this.sub();
- this.infcx().try(|| {
- suber.tys(a, b).and_then(|_ok| suber.contratys(a, b)).to_ures()
- })
-}
-
-pub fn eq_regions<C:Combine>(this: &C, a: ty::Region, b: ty::Region)
- -> ures {
- debug!("eq_regions({}, {})",
- a.repr(this.infcx().tcx),
- b.repr(this.infcx().tcx));
- let sub = this.sub();
- indent(|| {
- this.infcx().try(|| {
- sub.regions(a, b).and_then(|_r| sub.contraregions(a, b))
- }).or_else(|e| {
- // substitute a better error, but use the regions
- // found in the original error
- match e {
- ty::terr_regions_does_not_outlive(a1, b1) =>
- Err(ty::terr_regions_not_same(a1, b1)),
- _ => Err(e)
- }
- }).to_ures()
- })
-}
+pub fn super_fn_sigs<'tcx, C: Combine<'tcx>>(this: &C,
+ a: &ty::FnSig,
+ b: &ty::FnSig)
+ -> cres<ty::FnSig> {
-pub fn super_fn_sigs<C:Combine>(this: &C, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
-
- fn argvecs<C:Combine>(this: &C, a_args: &[ty::t], b_args: &[ty::t]) -> cres<Vec<ty::t> > {
+ fn argvecs<'tcx, C: Combine<'tcx>>(this: &C,
+ a_args: &[ty::t],
+ b_args: &[ty::t])
+ -> cres<Vec<ty::t>> {
if a_args.len() == b_args.len() {
result::collect(a_args.iter().zip(b_args.iter())
.map(|(a, b)| this.args(*a, *b)))
return Err(ty::terr_variadic_mismatch(expected_found(this, a.variadic, b.variadic)));
}
- let inputs = if_ok!(argvecs(this,
+ let inputs = try!(argvecs(this,
a.inputs.as_slice(),
b.inputs.as_slice()));
- let output = if_ok!(this.tys(a.output, b.output));
+ let output = try!(this.tys(a.output, b.output));
Ok(FnSig {binder_id: a.binder_id,
inputs: inputs,
output: output,
variadic: a.variadic})
}
-pub fn super_tys<C:Combine>(this: &C, a: ty::t, b: ty::t) -> cres<ty::t> {
+pub fn super_tys<'tcx, C: Combine<'tcx>>(this: &C, a: ty::t, b: ty::t) -> cres<ty::t> {
// This is a horrible hack - historically, [T] was not treated as a type,
// so, for example, &T and &[U] should not unify. In fact the only thing
// &[U] should unify with is &[T]. We preserve that behaviour with this
// check.
- fn check_ptr_to_unsized<C:Combine>(this: &C,
- a: ty::t,
- b: ty::t,
- a_inner: ty::t,
- b_inner: ty::t,
- result: ty::t) -> cres<ty::t> {
+ fn check_ptr_to_unsized<'tcx, C: Combine<'tcx>>(this: &C,
+ a: ty::t,
+ b: ty::t,
+ a_inner: ty::t,
+ b_inner: ty::t,
+ result: ty::t) -> cres<ty::t> {
match (&ty::get(a_inner).sty, &ty::get(b_inner).sty) {
(&ty::ty_vec(_, None), &ty::ty_vec(_, None)) |
(&ty::ty_str, &ty::ty_str) |
// Relate integral variables to other types
(&ty::ty_infer(IntVar(a_id)), &ty::ty_infer(IntVar(b_id))) => {
- if_ok!(this.infcx().simple_vars(this.a_is_expected(),
+ try!(this.infcx().simple_vars(this.a_is_expected(),
a_id, b_id));
Ok(a)
}
// Relate floating-point variables to other types
(&ty::ty_infer(FloatVar(a_id)), &ty::ty_infer(FloatVar(b_id))) => {
- if_ok!(this.infcx().simple_vars(this.a_is_expected(),
- a_id, b_id));
+ try!(this.infcx().simple_vars(this.a_is_expected(), a_id, b_id));
Ok(a)
}
(&ty::ty_infer(FloatVar(v_id)), &ty::ty_float(v)) => {
(&ty::ty_bool, _) |
(&ty::ty_int(_), _) |
(&ty::ty_uint(_), _) |
- (&ty::ty_float(_), _) => {
+ (&ty::ty_float(_), _) |
+ (&ty::ty_err, _) => {
if ty::get(a).sty == ty::get(b).sty {
Ok(a)
} else {
(&ty::ty_enum(a_id, ref a_substs),
&ty::ty_enum(b_id, ref b_substs))
if a_id == b_id => {
- let substs = if_ok!(this.substs(a_id,
+ let substs = try!(this.substs(a_id,
a_substs,
b_substs));
Ok(ty::mk_enum(tcx, a_id, substs))
&ty::ty_trait(ref b_))
if a_.def_id == b_.def_id => {
debug!("Trying to match traits {:?} and {:?}", a, b);
- let substs = if_ok!(this.substs(a_.def_id, &a_.substs, &b_.substs));
- let bounds = if_ok!(this.existential_bounds(a_.bounds, b_.bounds));
+ let substs = try!(this.substs(a_.def_id, &a_.substs, &b_.substs));
+ let bounds = try!(this.existential_bounds(a_.bounds, b_.bounds));
Ok(ty::mk_trait(tcx,
a_.def_id,
substs.clone(),
(&ty::ty_struct(a_id, ref a_substs), &ty::ty_struct(b_id, ref b_substs))
if a_id == b_id => {
- let substs = if_ok!(this.substs(a_id, a_substs, b_substs));
+ let substs = try!(this.substs(a_id, a_substs, b_substs));
Ok(ty::mk_struct(tcx, a_id, substs))
}
(&ty::ty_unboxed_closure(a_id, a_region),
&ty::ty_unboxed_closure(b_id, b_region))
if a_id == b_id => {
- let region = if_ok!(this.regions(a_region, b_region));
+ // All ty_unboxed_closure types with the same id represent
+ // the (anonymous) type of the same closure expression. So
+ // all of their regions should be equated.
+ let region = try!(this.equate().regions(a_region, b_region));
Ok(ty::mk_unboxed_closure(tcx, a_id, region))
}
}
(&ty::ty_uniq(a_inner), &ty::ty_uniq(b_inner)) => {
- let typ = if_ok!(this.tys(a_inner, b_inner));
+ let typ = try!(this.tys(a_inner, b_inner));
check_ptr_to_unsized(this, a, b, a_inner, b_inner, ty::mk_uniq(tcx, typ))
}
(&ty::ty_ptr(ref a_mt), &ty::ty_ptr(ref b_mt)) => {
- let mt = if_ok!(this.mts(a_mt, b_mt));
+ let mt = try!(this.mts(a_mt, b_mt));
check_ptr_to_unsized(this, a, b, a_mt.ty, b_mt.ty, ty::mk_ptr(tcx, mt))
}
(&ty::ty_rptr(a_r, ref a_mt), &ty::ty_rptr(b_r, ref b_mt)) => {
- let r = if_ok!(this.contraregions(a_r, b_r));
+ let r = try!(this.contraregions(a_r, b_r));
// FIXME(14985) If we have mutable references to trait objects, we
// used to use covariant subtyping. I have preserved this behaviour,
// even though it is probably incorrect. So don't go down the usual
// path which would require invariance.
let mt = match (&ty::get(a_mt.ty).sty, &ty::get(b_mt.ty).sty) {
(&ty::ty_trait(..), &ty::ty_trait(..)) if a_mt.mutbl == b_mt.mutbl => {
- let ty = if_ok!(this.tys(a_mt.ty, b_mt.ty));
+ let ty = try!(this.tys(a_mt.ty, b_mt.ty));
ty::mt { ty: ty, mutbl: a_mt.mutbl }
}
- _ => if_ok!(this.mts(a_mt, b_mt))
+ _ => try!(this.mts(a_mt, b_mt))
};
check_ptr_to_unsized(this, a, b, a_mt.ty, b_mt.ty, ty::mk_rptr(tcx, r, mt))
}
_ => Err(ty::terr_sorts(expected_found(this, a, b)))
};
- fn unify_integral_variable<C:Combine>(
+ fn unify_integral_variable<'tcx, C: Combine<'tcx>>(
this: &C,
vid_is_expected: bool,
vid: ty::IntVid,
val: ty::IntVarValue) -> cres<ty::t>
{
- if_ok!(this.infcx().simple_var_t(vid_is_expected, vid, val));
+ try!(this.infcx().simple_var_t(vid_is_expected, vid, val));
match val {
IntType(v) => Ok(ty::mk_mach_int(v)),
UintType(v) => Ok(ty::mk_mach_uint(v))
}
}
- fn unify_float_variable<C:Combine>(
+ fn unify_float_variable<'tcx, C: Combine<'tcx>>(
this: &C,
vid_is_expected: bool,
vid: ty::FloatVid,
val: ast::FloatTy) -> cres<ty::t>
{
- if_ok!(this.infcx().simple_var_t(vid_is_expected, vid, val));
+ try!(this.infcx().simple_var_t(vid_is_expected, vid, val));
Ok(ty::mk_mach_float(val))
}
}
+
+impl<'f, 'tcx> CombineFields<'f, 'tcx> {
+ pub fn switch_expected(&self) -> CombineFields<'f, 'tcx> {
+ CombineFields {
+ a_is_expected: !self.a_is_expected,
+ ..(*self).clone()
+ }
+ }
+
+ fn equate(&self) -> Equate<'f, 'tcx> {
+ Equate((*self).clone())
+ }
+
+ fn sub(&self) -> Sub<'f, 'tcx> {
+ Sub((*self).clone())
+ }
+
+ pub fn instantiate(&self,
+ a_ty: ty::t,
+ dir: RelationDir,
+ b_vid: ty::TyVid)
+ -> cres<()>
+ {
+ let tcx = self.infcx.tcx;
+ let mut stack = Vec::new();
+ stack.push((a_ty, dir, b_vid));
+ loop {
+ // For each turn of the loop, we extract a tuple
+ //
+ // (a_ty, dir, b_vid)
+ //
+ // to relate. Here dir is either SubtypeOf or
+ // SupertypeOf. The idea is that we should ensure that
+ // the type `a_ty` is a subtype or supertype (respectively) of the
+ // type to which `b_vid` is bound.
+ //
+ // If `b_vid` has not yet been instantiated with a type
+ // (which is always true on the first iteration, but not
+ // necessarily true on later iterations), we will first
+ // instantiate `b_vid` with a *generalized* version of
+ // `a_ty`. Generalization introduces other inference
+ // variables wherever subtyping could occur (at time of
+ // this writing, this means replacing free regions with
+ // region variables).
+ let (a_ty, dir, b_vid) = match stack.pop() {
+ None => break,
+ Some(e) => e,
+ };
+
+ debug!("instantiate(a_ty={} dir={} b_vid={})",
+ a_ty.repr(tcx),
+ dir,
+ b_vid.repr(tcx));
+
+ // Check whether `vid` has been instantiated yet. If not,
+ // make a generalized form of `ty` and instantiate with
+ // that.
+ let b_ty = self.infcx.type_variables.borrow().probe(b_vid);
+ let b_ty = match b_ty {
+ Some(t) => t, // ...already instantiated.
+ None => { // ...not yet instantiated:
+ // Generalize type if necessary.
+ let generalized_ty = match dir {
+ EqTo => a_ty,
+ SupertypeOf | SubtypeOf => self.generalize(a_ty)
+ };
+ debug!("instantiate(a_ty={}, dir={}, \
+ b_vid={}, generalized_ty={})",
+ a_ty.repr(tcx), dir, b_vid.repr(tcx),
+ generalized_ty.repr(tcx));
+ self.infcx.type_variables
+ .borrow_mut()
+ .instantiate_and_push(
+ b_vid, generalized_ty, &mut stack);
+ generalized_ty
+ }
+ };
+
+ // The original triple was `(a_ty, dir, b_vid)` -- now we have
+ // resolved `b_vid` to `b_ty`, so apply `(a_ty, dir, b_ty)`:
+ //
+ // FIXME(#16847): This code is non-ideal because all these subtype
+ // relations wind up attributed to the same spans. We need
+ // to associate causes/spans with each of the relations in
+ // the stack to get this right.
+ match dir {
+ EqTo => {
+ try!(self.equate().tys(a_ty, b_ty));
+ }
+
+ SubtypeOf => {
+ try!(self.sub().tys(a_ty, b_ty));
+ }
+
+ SupertypeOf => {
+ try!(self.sub().contratys(a_ty, b_ty));
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn generalize(&self, t: ty::t) -> ty::t {
+ // FIXME(#16847): This is non-ideal because we don't give a
+ // very descriptive origin for this region variable.
+
+ let infcx = self.infcx;
+ let span = self.trace.origin.span();
+ t.fold_with(
+ &mut RegionFolder::regions(
+ self.infcx.tcx,
+ |_| infcx.next_region_var(MiscVariable(span))))
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::ty::{BuiltinBounds};
+use middle::ty;
+use middle::ty::TyVar;
+use middle::typeck::infer::combine::*;
+use middle::typeck::infer::{cres};
+use middle::typeck::infer::glb::Glb;
+use middle::typeck::infer::InferCtxt;
+use middle::typeck::infer::lub::Lub;
+use middle::typeck::infer::sub::Sub;
+use middle::typeck::infer::{TypeTrace, Subtype};
+use middle::typeck::infer::type_variable::{EqTo};
+use util::ppaux::{Repr};
+
+use syntax::ast::{Onceness, FnStyle};
+
+pub struct Equate<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
+}
+
+#[allow(non_snake_case)]
+pub fn Equate<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Equate<'f, 'tcx> {
+ Equate { fields: cf }
+}
+
+impl<'f, 'tcx> Combine<'tcx> for Equate<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
+ fn tag(&self) -> String { "eq".to_string() }
+ fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+ fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
+
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
+
+ fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
+ self.tys(a, b)
+ }
+
+ fn contraregions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
+ self.regions(a, b)
+ }
+
+ fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
+ debug!("{}.regions({}, {})",
+ self.tag(),
+ a.repr(self.fields.infcx.tcx),
+ b.repr(self.fields.infcx.tcx));
+ self.infcx().region_vars.make_eqregion(Subtype(self.trace()), a, b);
+ Ok(a)
+ }
+
+ fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
+ debug!("mts({} <: {})",
+ a.repr(self.fields.infcx.tcx),
+ b.repr(self.fields.infcx.tcx));
+
+ if a.mutbl != b.mutbl { return Err(ty::terr_mutability); }
+ let t = try!(self.tys(a.ty, b.ty));
+ Ok(ty::mt { mutbl: a.mutbl, ty: t })
+ }
+
+ fn fn_styles(&self, a: FnStyle, b: FnStyle) -> cres<FnStyle> {
+ if a != b {
+ Err(ty::terr_fn_style_mismatch(expected_found(self, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+
+ fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<Onceness> {
+ if a != b {
+ Err(ty::terr_onceness_mismatch(expected_found(self, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+
+ fn builtin_bounds(&self,
+ a: BuiltinBounds,
+ b: BuiltinBounds)
+ -> cres<BuiltinBounds>
+ {
+ // More bounds is a subtype of fewer bounds.
+ //
+ // e.g., fn:Copy() <: fn(), because the former is a function
+ // that only closes over copyable things, but the latter is
+ // any function at all.
+ if a != b {
+ Err(ty::terr_builtin_bounds(expected_found(self, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+
+ fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
+ debug!("{}.tys({}, {})", self.tag(),
+ a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
+ if a == b { return Ok(a); }
+
+ let infcx = self.fields.infcx;
+ let a = infcx.type_variables.borrow().replace_if_possible(a);
+ let b = infcx.type_variables.borrow().replace_if_possible(b);
+ match (&ty::get(a).sty, &ty::get(b).sty) {
+ (&ty::ty_bot, &ty::ty_bot) => {
+ Ok(a)
+ }
+
+ (&ty::ty_bot, _) |
+ (_, &ty::ty_bot) => {
+ Err(ty::terr_sorts(expected_found(self, a, b)))
+ }
+
+ (&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
+ infcx.type_variables.borrow_mut().relate_vars(a_id, EqTo, b_id);
+ Ok(a)
+ }
+
+ (&ty::ty_infer(TyVar(a_id)), _) => {
+ try!(self.fields.instantiate(b, EqTo, a_id));
+ Ok(a)
+ }
+
+ (_, &ty::ty_infer(TyVar(b_id))) => {
+ try!(self.fields.instantiate(a, EqTo, b_id));
+ Ok(a)
+ }
+
+ _ => {
+ super_tys(self, a, b)
+ }
+ }
+ }
+
+ fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
+ try!(self.sub().fn_sigs(a, b));
+ self.sub().fn_sigs(b, a)
+ }
+}
span: codemap::Span);
}
-impl<'a> ErrorReporting for InferCtxt<'a> {
+impl<'a, 'tcx> ErrorReporting for InferCtxt<'a, 'tcx> {
fn report_region_errors(&self,
errors: &Vec<RegionResolutionError>) {
let p_errors = self.process_errors(errors);
sub,
"");
}
+ infer::ProcCapture(span, id) => {
+ self.tcx.sess.span_err(
+ span,
+ format!("captured variable `{}` must be 'static \
+ to be captured in a proc",
+ ty::local_var_name_str(self.tcx, id).get())
+ .as_slice());
+ note_and_explain_region(
+ self.tcx,
+ "captured variable is only valid for ",
+ sup,
+ "");
+ }
infer::IndexSlice(span) => {
self.tcx.sess.span_err(span,
"index of slice outside its lifetime");
region_names: &'a HashSet<ast::Name>
}
-struct Rebuilder<'a> {
- tcx: &'a ty::ctxt,
+struct Rebuilder<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
fn_decl: ast::P<ast::FnDecl>,
expl_self_opt: Option<ast::ExplicitSelf_>,
generics: &'a ast::Generics,
Kept
}
-impl<'a> Rebuilder<'a> {
- fn new(tcx: &'a ty::ctxt,
+impl<'a, 'tcx> Rebuilder<'a, 'tcx> {
+ fn new(tcx: &'a ty::ctxt<'tcx>,
fn_decl: ast::P<ast::FnDecl>,
expl_self_opt: Option<ast::ExplicitSelf_>,
generics: &'a ast::Generics,
same_regions: &'a [SameRegions],
life_giver: &'a LifeGiver)
- -> Rebuilder<'a> {
+ -> Rebuilder<'a, 'tcx> {
Rebuilder {
tcx: tcx,
fn_decl: fn_decl,
}
}
-impl<'a> ErrorReportingHelpers for InferCtxt<'a> {
+impl<'a, 'tcx> ErrorReportingHelpers for InferCtxt<'a, 'tcx> {
fn give_expl_lifetime_param(&self,
decl: &ast::FnDecl,
fn_style: ast::FnStyle,
bound_region_to_string(self.tcx, "lifetime parameter ", true, br))
}
infer::EarlyBoundRegion(_, name) => {
- format!(" for lifetime parameter `{}",
+ format!(" for lifetime parameter `{}`",
token::get_name(name).get())
}
infer::BoundRegionInCoherence(name) => {
- format!(" for lifetime parameter `{} in coherence check",
+ format!(" for lifetime parameter `{}` in coherence check",
token::get_name(name).get())
}
infer::UpvarRegion(ref upvar_id, _) => {
self.tcx,
id).get().to_string()).as_slice());
}
+ infer::ProcCapture(span, id) => {
+ self.tcx.sess.span_note(
+ span,
+ format!("...so that captured variable `{}` \
+ is 'static",
+ ty::local_var_name_str(
+ self.tcx,
+ id).get()).as_slice());
+ }
infer::IndexSlice(span) => {
self.tcx.sess.span_note(
span,
infer::AutoBorrow(span) => {
self.tcx.sess.span_note(
span,
- "...so that reference is valid \
- at the time of implicit borrow");
+ "...so that auto-reference is valid \
+ at the time of borrow");
}
infer::ExprTypeIsNotInScope(t, span) => {
self.tcx.sess.span_note(
use middle::ty::{BuiltinBounds};
use middle::ty::RegionVid;
use middle::ty;
-use middle::typeck::infer::then;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::lattice::*;
+use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::sub::Sub;
use middle::typeck::infer::{cres, InferCtxt};
use util::ppaux::mt_to_string;
use util::ppaux::Repr;
-pub struct Glb<'f>(pub CombineFields<'f>); // "greatest lower bound" (common subtype)
+/// "Greatest lower bound" (common subtype)
+pub struct Glb<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
+}
-impl<'f> Glb<'f> {
- pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> { let Glb(ref v) = *self; v }
+#[allow(non_snake_case)]
+pub fn Glb<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Glb<'f, 'tcx> {
+ Glb { fields: cf }
}
-impl<'f> Combine for Glb<'f> {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.get_ref().infcx }
+impl<'f, 'tcx> Combine<'tcx> for Glb<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
fn tag(&self) -> String { "glb".to_string() }
- fn a_is_expected(&self) -> bool { self.get_ref().a_is_expected }
- fn trace(&self) -> TypeTrace { self.get_ref().trace.clone() }
+ fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+ fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
- fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.get_ref().clone()) }
- fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.get_ref().clone()) }
- fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.get_ref().clone()) }
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
- let tcx = self.get_ref().infcx.tcx;
+ let tcx = self.fields.infcx.tcx;
debug!("{}.mts({}, {})",
self.tag(),
mt_to_string(tcx, b));
match (a.mutbl, b.mutbl) {
- // If one side or both is mut, then the GLB must use
- // the precise type from the mut side.
- (MutMutable, MutMutable) => {
- eq_tys(self, a.ty, b.ty).then(|| {
- Ok(ty::mt {ty: a.ty, mutbl: MutMutable})
- })
- }
-
- // If one side or both is immutable, we can use the GLB of
- // both sides but mutbl must be `MutImmutable`.
- (MutImmutable, MutImmutable) => {
- self.tys(a.ty, b.ty).and_then(|t| {
+ // If one side or both is mut, then the GLB must use
+ // the precise type from the mut side.
+ (MutMutable, MutMutable) => {
+ let t = try!(self.equate().tys(a.ty, b.ty));
+ Ok(ty::mt {ty: t, mutbl: MutMutable})
+ }
+
+ // If one side or both is immutable, we can use the GLB of
+ // both sides but mutbl must be `MutImmutable`.
+ (MutImmutable, MutImmutable) => {
+ let t = try!(self.tys(a.ty, b.ty));
Ok(ty::mt {ty: t, mutbl: MutImmutable})
- })
- }
-
- // There is no mutual subtype of these combinations.
- (MutMutable, MutImmutable) |
- (MutImmutable, MutMutable) => {
- Err(ty::terr_mutability)
- }
+ }
+
+ // There is no mutual subtype of these combinations.
+ (MutMutable, MutImmutable) |
+ (MutImmutable, MutMutable) => {
+ Err(ty::terr_mutability)
+ }
}
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({:?}, {:?})",
self.tag(),
- a.repr(self.get_ref().infcx.tcx),
- b.repr(self.get_ref().infcx.tcx));
+ a.repr(self.fields.infcx.tcx),
+ b.repr(self.fields.infcx.tcx));
- Ok(self.get_ref().infcx.region_vars.glb_regions(Subtype(self.trace()), a, b))
+ Ok(self.fields.infcx.region_vars.glb_regions(Subtype(self.trace()), a, b))
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
// please see the large comment in `region_inference.rs`.
debug!("{}.fn_sigs({:?}, {:?})",
- self.tag(), a.repr(self.get_ref().infcx.tcx), b.repr(self.get_ref().infcx.tcx));
+ self.tag(), a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
let _indenter = indenter();
// Make a mark so we can examine "all bindings that were
// created as part of this type comparison".
- let mark = self.get_ref().infcx.region_vars.mark();
+ let mark = self.fields.infcx.region_vars.mark();
// Instantiate each bound region with a fresh region variable.
let (a_with_fresh, a_map) =
- self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
+ self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), a);
let a_vars = var_ids(self, &a_map);
let (b_with_fresh, b_map) =
- self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
+ self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), b);
let b_vars = var_ids(self, &b_map);
// Collect constraints.
- let sig0 = if_ok!(super_fn_sigs(self, &a_with_fresh, &b_with_fresh));
- debug!("sig0 = {}", sig0.repr(self.get_ref().infcx.tcx));
+ let sig0 = try!(super_fn_sigs(self, &a_with_fresh, &b_with_fresh));
+ debug!("sig0 = {}", sig0.repr(self.fields.infcx.tcx));
// Generalize the regions appearing in fn_ty0 if possible
let new_vars =
- self.get_ref().infcx.region_vars.vars_created_since_mark(mark);
+ self.fields.infcx.region_vars.vars_created_since_mark(mark);
let sig1 =
fold_regions_in_sig(
- self.get_ref().infcx.tcx,
+ self.fields.infcx.tcx,
&sig0,
|r| {
generalize_region(self,
b_vars.as_slice(),
r)
});
- debug!("sig1 = {}", sig1.repr(self.get_ref().infcx.tcx));
+ debug!("sig1 = {}", sig1.repr(self.fields.infcx.tcx));
return Ok(sig1);
fn generalize_region(this: &Glb,
return r0;
}
- let tainted = this.get_ref().infcx.region_vars.tainted(mark, r0);
+ let tainted = this.fields.infcx.region_vars.tainted(mark, r0);
let mut a_r = None;
let mut b_r = None;
return ty::ReLateBound(new_binder_id, *a_br);
}
}
- this.get_ref().infcx.tcx.sess.span_bug(
- this.get_ref().trace.origin.span(),
+ this.fields.infcx.tcx.sess.span_bug(
+ this.fields.trace.origin.span(),
format!("could not find original bound region for {:?}",
r).as_slice())
}
fn fresh_bound_variable(this: &Glb, binder_id: NodeId) -> ty::Region {
- this.get_ref().infcx.region_vars.new_bound(binder_id)
+ this.fields.infcx.region_vars.new_bound(binder_id)
}
}
}
// except according to those terms.
/*!
- *
* # Lattice Variables
*
* This file contains generic code for operating on inference variables
use middle::ty::{RegionVid, TyVar};
use middle::ty;
-use middle::typeck::infer::{ToUres};
use middle::typeck::infer::*;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::lub::Lub;
-use middle::typeck::infer::unify::*;
-use middle::typeck::infer::sub::Sub;
use util::ppaux::Repr;
use std::collections::HashMap;
-trait LatticeValue : Clone + Repr + PartialEq {
- fn sub(cf: CombineFields, a: &Self, b: &Self) -> ures;
- fn lub(cf: CombineFields, a: &Self, b: &Self) -> cres<Self>;
- fn glb(cf: CombineFields, a: &Self, b: &Self) -> cres<Self>;
-}
-
-pub type LatticeOp<'a, T> =
- |cf: CombineFields, a: &T, b: &T|: 'a -> cres<T>;
-
-impl LatticeValue for ty::t {
- fn sub(cf: CombineFields, a: &ty::t, b: &ty::t) -> ures {
- Sub(cf).tys(*a, *b).to_ures()
- }
-
- fn lub(cf: CombineFields, a: &ty::t, b: &ty::t) -> cres<ty::t> {
- Lub(cf).tys(*a, *b)
- }
-
- fn glb(cf: CombineFields, a: &ty::t, b: &ty::t) -> cres<ty::t> {
- Glb(cf).tys(*a, *b)
- }
-}
-
-pub trait CombineFieldsLatticeMethods<T:LatticeValue, K:UnifyKey<Bounds<T>>> {
- /// make variable a subtype of variable
- fn var_sub_var(&self,
- a_id: K,
- b_id: K)
- -> ures;
-
- /// make variable a subtype of T
- fn var_sub_t(&self,
- a_id: K,
- b: T)
- -> ures;
-
- /// make T a subtype of variable
- fn t_sub_var(&self,
- a: T,
- b_id: K)
- -> ures;
-
- fn set_var_to_merged_bounds(&self,
- v_id: K,
- a: &Bounds<T>,
- b: &Bounds<T>,
- rank: uint)
- -> ures;
-}
-
-pub trait CombineFieldsLatticeMethods2<T:LatticeValue> {
- fn merge_bnd(&self,
- a: &Bound<T>,
- b: &Bound<T>,
- lattice_op: LatticeOp<T>)
- -> cres<Bound<T>>;
-
- fn bnds(&self, a: &Bound<T>, b: &Bound<T>) -> ures;
-}
-
-impl<'f,T:LatticeValue, K:UnifyKey<Bounds<T>>>
- CombineFieldsLatticeMethods<T,K> for CombineFields<'f>
-{
- fn var_sub_var(&self,
- a_id: K,
- b_id: K)
- -> ures
- {
- /*!
- * Make one variable a subtype of another variable. This is a
- * subtle and tricky process, as described in detail at the
- * top of infer.rs.
- */
-
- let tcx = self.infcx.tcx;
- let table = UnifyKey::unification_table(self.infcx);
-
- // Need to make sub_id a subtype of sup_id.
- let node_a = table.borrow_mut().get(tcx, a_id);
- let node_b = table.borrow_mut().get(tcx, b_id);
- let a_id = node_a.key.clone();
- let b_id = node_b.key.clone();
- let a_bounds = node_a.value.clone();
- let b_bounds = node_b.value.clone();
-
- debug!("vars({}={} <: {}={})",
- a_id, a_bounds.repr(tcx),
- b_id, b_bounds.repr(tcx));
-
- if a_id == b_id { return Ok(()); }
-
- // If both A's UB and B's LB have already been bound to types,
- // see if we can make those types subtypes.
- match (&a_bounds.ub, &b_bounds.lb) {
- (&Some(ref a_ub), &Some(ref b_lb)) => {
- let r = self.infcx.try(
- || LatticeValue::sub(self.clone(), a_ub, b_lb));
- match r {
- Ok(()) => {
- return Ok(());
- }
- Err(_) => { /*fallthrough */ }
- }
- }
- _ => { /*fallthrough*/ }
- }
-
- // Otherwise, we need to merge A and B so as to guarantee that
- // A remains a subtype of B. Actually, there are other options,
- // but that's the route we choose to take.
-
- let (new_root, new_rank) =
- table.borrow_mut().unify(tcx, &node_a, &node_b);
- self.set_var_to_merged_bounds(new_root,
- &a_bounds, &b_bounds,
- new_rank)
- }
-
- /// make variable a subtype of T
- fn var_sub_t(&self,
- a_id: K,
- b: T)
- -> ures
- {
- /*!
- * Make a variable (`a_id`) a subtype of the concrete type `b`.
- */
-
- let tcx = self.infcx.tcx;
- let table = UnifyKey::unification_table(self.infcx);
- let node_a = table.borrow_mut().get(tcx, a_id);
- let a_id = node_a.key.clone();
- let a_bounds = &node_a.value;
- let b_bounds = &Bounds { lb: None, ub: Some(b.clone()) };
-
- debug!("var_sub_t({}={} <: {})",
- a_id,
- a_bounds.repr(self.infcx.tcx),
- b.repr(self.infcx.tcx));
-
- self.set_var_to_merged_bounds(
- a_id, a_bounds, b_bounds, node_a.rank)
- }
-
- fn t_sub_var(&self,
- a: T,
- b_id: K)
- -> ures
- {
- /*!
- * Make a concrete type (`a`) a subtype of the variable `b_id`
- */
-
- let tcx = self.infcx.tcx;
- let table = UnifyKey::unification_table(self.infcx);
- let a_bounds = &Bounds { lb: Some(a.clone()), ub: None };
- let node_b = table.borrow_mut().get(tcx, b_id);
- let b_id = node_b.key.clone();
- let b_bounds = &node_b.value;
-
- debug!("t_sub_var({} <: {}={})",
- a.repr(self.infcx.tcx),
- b_id,
- b_bounds.repr(self.infcx.tcx));
-
- self.set_var_to_merged_bounds(
- b_id, a_bounds, b_bounds, node_b.rank)
- }
-
- fn set_var_to_merged_bounds(&self,
- v_id: K,
- a: &Bounds<T>,
- b: &Bounds<T>,
- rank: uint)
- -> ures
- {
- /*!
- * Updates the bounds for the variable `v_id` to be the intersection
- * of `a` and `b`. That is, the new bounds for `v_id` will be
- * a bounds c such that:
- * c.ub <: a.ub
- * c.ub <: b.ub
- * a.lb <: c.lb
- * b.lb <: c.lb
- * If this cannot be achieved, the result is failure.
- */
-
- // Think of the two diamonds, we want to find the
- // intersection. There are basically four possibilities (you
- // can swap A/B in these pictures):
- //
- // A A
- // / \ / \
- // / B \ / B \
- // / / \ \ / / \ \
- // * * * * * / * *
- // \ \ / / \ / /
- // \ B / / \ / /
- // \ / * \ /
- // A \ / A
- // B
-
- let tcx = self.infcx.tcx;
- let table = UnifyKey::unification_table(self.infcx);
-
- debug!("merge({},{},{})",
- v_id,
- a.repr(self.infcx.tcx),
- b.repr(self.infcx.tcx));
-
- // First, relate the lower/upper bounds of A and B.
- // Note that these relations *must* hold for us
- // to be able to merge A and B at all, and relating
- // them explicitly gives the type inferencer more
- // information and helps to produce tighter bounds
- // when necessary.
- let () = if_ok!(self.bnds(&a.lb, &b.ub));
- let () = if_ok!(self.bnds(&b.lb, &a.ub));
- let ub = if_ok!(self.merge_bnd(&a.ub, &b.ub, LatticeValue::glb));
- let lb = if_ok!(self.merge_bnd(&a.lb, &b.lb, LatticeValue::lub));
- let bounds = Bounds { lb: lb, ub: ub };
- debug!("merge({}): bounds={}",
- v_id,
- bounds.repr(self.infcx.tcx));
-
- // the new bounds must themselves
- // be relatable:
- let () = if_ok!(self.bnds(&bounds.lb, &bounds.ub));
- table.borrow_mut().set(tcx, v_id, Root(bounds, rank));
- Ok(())
- }
-}
-
-impl<'f,T:LatticeValue>
- CombineFieldsLatticeMethods2<T> for CombineFields<'f>
-{
- fn merge_bnd(&self,
- a: &Bound<T>,
- b: &Bound<T>,
- lattice_op: LatticeOp<T>)
- -> cres<Bound<T>>
- {
- /*!
- * Combines two bounds into a more general bound.
- */
-
- debug!("merge_bnd({},{})",
- a.repr(self.infcx.tcx),
- b.repr(self.infcx.tcx));
- match (a, b) {
- (&None, &None) => Ok(None),
- (&Some(_), &None) => Ok((*a).clone()),
- (&None, &Some(_)) => Ok((*b).clone()),
- (&Some(ref v_a), &Some(ref v_b)) => {
- lattice_op(self.clone(), v_a, v_b).and_then(|v| Ok(Some(v)))
- }
- }
- }
-
- fn bnds(&self,
- a: &Bound<T>,
- b: &Bound<T>)
- -> ures
- {
- debug!("bnds({} <: {})",
- a.repr(self.infcx.tcx),
- b.repr(self.infcx.tcx));
-
- match (a, b) {
- (&None, &None) |
- (&Some(_), &None) |
- (&None, &Some(_)) => {
- Ok(())
- }
- (&Some(ref t_a), &Some(ref t_b)) => {
- LatticeValue::sub(self.clone(), t_a, t_b)
- }
- }
- }
-}
-
-// ______________________________________________________________________
-// Lattice operations on variables
-//
-// This is common code used by both LUB and GLB to compute the LUB/GLB
-// for pairs of variables or for variables and values.
-
pub trait LatticeDir {
- fn combine_fields<'a>(&'a self) -> CombineFields<'a>;
- fn bnd<T:Clone>(&self, b: &Bounds<T>) -> Option<T>;
- fn with_bnd<T:Clone>(&self, b: &Bounds<T>, t: T) -> Bounds<T>;
-}
-
-pub trait TyLatticeDir {
+ // Relates the bottom type to `t` and returns LUB(t, _|_) or
+ // GLB(t, _|_) as appropriate.
fn ty_bot(&self, t: ty::t) -> cres<ty::t>;
-}
-impl<'f> LatticeDir for Lub<'f> {
- fn combine_fields<'a>(&'a self) -> CombineFields<'a> { self.get_ref().clone() }
- fn bnd<T:Clone>(&self, b: &Bounds<T>) -> Option<T> { b.ub.clone() }
- fn with_bnd<T:Clone>(&self, b: &Bounds<T>, t: T) -> Bounds<T> {
- Bounds { ub: Some(t), ..(*b).clone() }
- }
+ // Relates the type `v` to `a` and `b` such that `v` represents
+ // the LUB/GLB of `a` and `b` as appropriate.
+ fn relate_bound<'a>(&'a self, v: ty::t, a: ty::t, b: ty::t) -> cres<()>;
}
-impl<'f> TyLatticeDir for Lub<'f> {
+impl<'a, 'tcx> LatticeDir for Lub<'a, 'tcx> {
fn ty_bot(&self, t: ty::t) -> cres<ty::t> {
Ok(t)
}
-}
-impl<'f> LatticeDir for Glb<'f> {
- fn combine_fields<'a>(&'a self) -> CombineFields<'a> { self.get_ref().clone() }
- fn bnd<T:Clone>(&self, b: &Bounds<T>) -> Option<T> { b.lb.clone() }
- fn with_bnd<T:Clone>(&self, b: &Bounds<T>, t: T) -> Bounds<T> {
- Bounds { lb: Some(t), ..(*b).clone() }
+ fn relate_bound<'a>(&'a self, v: ty::t, a: ty::t, b: ty::t) -> cres<()> {
+ let sub = self.sub();
+ try!(sub.tys(a, v));
+ try!(sub.tys(b, v));
+ Ok(())
}
}
-impl<'f> TyLatticeDir for Glb<'f> {
- fn ty_bot(&self, _t: ty::t) -> cres<ty::t> {
+impl<'a, 'tcx> LatticeDir for Glb<'a, 'tcx> {
+ fn ty_bot(&self, _: ty::t) -> cres<ty::t> {
Ok(ty::mk_bot())
}
+
+ fn relate_bound<'a>(&'a self, v: ty::t, a: ty::t, b: ty::t) -> cres<()> {
+ let sub = self.sub();
+ try!(sub.tys(v, a));
+ try!(sub.tys(v, b));
+ Ok(())
+ }
}
-pub fn super_lattice_tys<L:LatticeDir+TyLatticeDir+Combine>(this: &L,
- a: ty::t,
- b: ty::t)
- -> cres<ty::t> {
+pub fn super_lattice_tys<'tcx, L:LatticeDir+Combine<'tcx>>(this: &L,
+ a: ty::t,
+ b: ty::t)
+ -> cres<ty::t>
+{
debug!("{}.lattice_tys({}, {})",
this.tag(),
a.repr(this.infcx().tcx),
return Ok(a);
}
- let tcx = this.infcx().tcx;
-
+ let infcx = this.infcx();
+ let a = infcx.type_variables.borrow().replace_if_possible(a);
+ let b = infcx.type_variables.borrow().replace_if_possible(b);
match (&ty::get(a).sty, &ty::get(b).sty) {
- (&ty::ty_bot, _) => { return this.ty_bot(b); }
- (_, &ty::ty_bot) => { return this.ty_bot(a); }
-
- (&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
- let r = if_ok!(lattice_vars(this, a_id, b_id,
- |x, y| this.tys(*x, *y)));
- return match r {
- VarResult(v) => Ok(ty::mk_var(tcx, v)),
- ValueResult(t) => Ok(t)
- };
- }
-
- (&ty::ty_infer(TyVar(a_id)), _) => {
- return lattice_var_and_t(this, a_id, &b,
- |x, y| this.tys(*x, *y));
- }
-
- (_, &ty::ty_infer(TyVar(b_id))) => {
- return lattice_var_and_t(this, b_id, &a,
- |x, y| this.tys(*x, *y));
+ (&ty::ty_bot, _) => { this.ty_bot(b) }
+ (_, &ty::ty_bot) => { this.ty_bot(a) }
+
+ (&ty::ty_infer(TyVar(..)), _) |
+ (_, &ty::ty_infer(TyVar(..))) => {
+ let v = infcx.next_ty_var();
+ try!(this.relate_bound(v, a, b));
+ Ok(v)
}
_ => {
- return super_tys(this, a, b);
- }
- }
-}
-
-pub type LatticeDirOp<'a, T> = |a: &T, b: &T|: 'a -> cres<T>;
-
-#[deriving(Clone)]
-pub enum LatticeVarResult<K,T> {
- VarResult(K),
- ValueResult(T)
-}
-
-/**
- * Computes the LUB or GLB of two bounded variables. These could be any
- * sort of variables, but in the comments on this function I'll assume
- * we are doing an LUB on two type variables.
- *
- * This computation can be done in one of two ways:
- *
- * - If both variables have an upper bound, we may just compute the
- * LUB of those bounds and return that, in which case we are
- * returning a type. This is indicated with a `ValueResult` return.
- *
- * - If the variables do not both have an upper bound, we will unify
- * the variables and return the unified variable, in which case the
- * result is a variable. This is indicated with a `VarResult`
- * return. */
-pub fn lattice_vars<L:LatticeDir+Combine,
- T:LatticeValue,
- K:UnifyKey<Bounds<T>>>(
- this: &L, // defines whether we want LUB or GLB
- a_vid: K, // first variable
- b_vid: K, // second variable
- lattice_dir_op: LatticeDirOp<T>) // LUB or GLB operation on types
- -> cres<LatticeVarResult<K,T>>
-{
- let tcx = this.infcx().tcx;
- let table = UnifyKey::unification_table(this.infcx());
-
- let node_a = table.borrow_mut().get(tcx, a_vid);
- let node_b = table.borrow_mut().get(tcx, b_vid);
- let a_vid = node_a.key.clone();
- let b_vid = node_b.key.clone();
- let a_bounds = &node_a.value;
- let b_bounds = &node_b.value;
-
- debug!("{}.lattice_vars({}={} <: {}={})",
- this.tag(),
- a_vid, a_bounds.repr(tcx),
- b_vid, b_bounds.repr(tcx));
-
- // Same variable: the easy case.
- if a_vid == b_vid {
- return Ok(VarResult(a_vid));
- }
-
- // If both A and B have an UB type, then we can just compute the
- // LUB of those types:
- let (a_bnd, b_bnd) = (this.bnd(a_bounds), this.bnd(b_bounds));
- match (a_bnd, b_bnd) {
- (Some(ref a_ty), Some(ref b_ty)) => {
- match this.infcx().try(|| lattice_dir_op(a_ty, b_ty) ) {
- Ok(t) => return Ok(ValueResult(t)),
- Err(_) => { /*fallthrough */ }
- }
- }
- _ => {/*fallthrough*/}
- }
-
- // Otherwise, we need to merge A and B into one variable. We can
- // then use either variable as an upper bound:
- let cf = this.combine_fields();
- let () = try!(cf.var_sub_var(a_vid.clone(), b_vid.clone()));
- Ok(VarResult(a_vid.clone()))
-}
-
-pub fn lattice_var_and_t<L:LatticeDir+Combine,
- T:LatticeValue,
- K:UnifyKey<Bounds<T>>>(
- this: &L,
- a_id: K,
- b: &T,
- lattice_dir_op: LatticeDirOp<T>)
- -> cres<T>
-{
- let tcx = this.infcx().tcx;
- let table = UnifyKey::unification_table(this.infcx());
-
- let node_a = table.borrow_mut().get(tcx, a_id);
- let a_id = node_a.key.clone();
- let a_bounds = &node_a.value;
-
- // The comments in this function are written for LUB, but they
- // apply equally well to GLB if you inverse upper/lower/sub/super/etc.
-
- debug!("{}.lattice_var_and_t({}={} <: {})",
- this.tag(),
- a_id,
- a_bounds.repr(this.infcx().tcx),
- b.repr(this.infcx().tcx));
-
- match this.bnd(a_bounds) {
- Some(ref a_bnd) => {
- // If a has an upper bound, return the LUB(a.ub, b)
- debug!("bnd=Some({})", a_bnd.repr(this.infcx().tcx));
- lattice_dir_op(a_bnd, b)
- }
- None => {
- // If a does not have an upper bound, make b the upper bound of a
- // and then return b.
- debug!("bnd=None");
- let a_bounds = this.with_bnd(a_bounds, (*b).clone());
- let () = try!(this.combine_fields().bnds(&a_bounds.lb,
- &a_bounds.ub));
- table.borrow_mut().set(tcx,
- a_id.clone(),
- Root(a_bounds.clone(), node_a.rank));
- Ok((*b).clone())
+ super_tys(this, a, b)
}
}
}
-// ___________________________________________________________________________
+///////////////////////////////////////////////////////////////////////////
// Random utility functions used by LUB/GLB when computing LUB/GLB of
// fn types
-pub fn var_ids<T:Combine>(this: &T,
- map: &HashMap<ty::BoundRegion, ty::Region>)
- -> Vec<RegionVid> {
+pub fn var_ids<'tcx, T: Combine<'tcx>>(this: &T,
+ map: &HashMap<ty::BoundRegion, ty::Region>)
+ -> Vec<RegionVid> {
map.iter().map(|(_, r)| match *r {
ty::ReInfer(ty::ReVar(r)) => { r }
r => {
use middle::ty::{BuiltinBounds};
use middle::ty::RegionVid;
use middle::ty;
-use middle::typeck::infer::then;
use middle::typeck::infer::combine::*;
+use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::lattice::*;
use middle::typeck::infer::sub::Sub;
use util::ppaux::mt_to_string;
use util::ppaux::Repr;
-pub struct Lub<'f>(pub CombineFields<'f>); // least-upper-bound: common supertype
+/// "Least upper bound" (common supertype)
+pub struct Lub<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
+}
-impl<'f> Lub<'f> {
- pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> { let Lub(ref v) = *self; v }
+#[allow(non_snake_case)]
+pub fn Lub<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Lub<'f, 'tcx> {
+ Lub { fields: cf }
}
-impl<'f> Combine for Lub<'f> {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.get_ref().infcx }
+impl<'f, 'tcx> Combine<'tcx> for Lub<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
fn tag(&self) -> String { "lub".to_string() }
- fn a_is_expected(&self) -> bool { self.get_ref().a_is_expected }
- fn trace(&self) -> TypeTrace { self.get_ref().trace.clone() }
+ fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+ fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
- fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.get_ref().clone()) }
- fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.get_ref().clone()) }
- fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.get_ref().clone()) }
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
- let tcx = self.get_ref().infcx.tcx;
+ let tcx = self.fields.infcx.tcx;
debug!("{}.mts({}, {})",
self.tag(),
let m = a.mutbl;
match m {
- MutImmutable => {
- self.tys(a.ty, b.ty).and_then(|t| Ok(ty::mt {ty: t, mutbl: m}) )
- }
-
- MutMutable => {
- self.get_ref().infcx.try(|| {
- eq_tys(self, a.ty, b.ty).then(|| {
- Ok(ty::mt {ty: a.ty, mutbl: m})
- })
- }).or_else(|e| Err(e))
- }
+ MutImmutable => {
+ let t = try!(self.tys(a.ty, b.ty));
+ Ok(ty::mt {ty: t, mutbl: m})
+ }
+
+ MutMutable => {
+ let t = try!(self.equate().tys(a.ty, b.ty));
+ Ok(ty::mt {ty: t, mutbl: m})
+ }
}
}
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
- a.repr(self.get_ref().infcx.tcx),
- b.repr(self.get_ref().infcx.tcx));
+ a.repr(self.fields.infcx.tcx),
+ b.repr(self.fields.infcx.tcx));
- Ok(self.get_ref().infcx.region_vars.lub_regions(Subtype(self.trace()), a, b))
+ Ok(self.fields.infcx.region_vars.lub_regions(Subtype(self.trace()), a, b))
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
// Make a mark so we can examine "all bindings that were
// created as part of this type comparison".
- let mark = self.get_ref().infcx.region_vars.mark();
+ let mark = self.fields.infcx.region_vars.mark();
// Instantiate each bound region with a fresh region variable.
let (a_with_fresh, a_map) =
- self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
+ self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), a);
let (b_with_fresh, _) =
- self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
+ self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), b);
// Collect constraints.
- let sig0 = if_ok!(super_fn_sigs(self, &a_with_fresh, &b_with_fresh));
- debug!("sig0 = {}", sig0.repr(self.get_ref().infcx.tcx));
+ let sig0 = try!(super_fn_sigs(self, &a_with_fresh, &b_with_fresh));
+ debug!("sig0 = {}", sig0.repr(self.fields.infcx.tcx));
// Generalize the regions appearing in sig0 if possible
let new_vars =
- self.get_ref().infcx.region_vars.vars_created_since_mark(mark);
+ self.fields.infcx.region_vars.vars_created_since_mark(mark);
let sig1 =
fold_regions_in_sig(
- self.get_ref().infcx.tcx,
+ self.fields.infcx.tcx,
&sig0,
|r| generalize_region(self, mark, new_vars.as_slice(),
sig0.binder_id, &a_map, r));
return r0;
}
- let tainted = this.get_ref().infcx.region_vars.tainted(mark, r0);
+ let tainted = this.fields.infcx.region_vars.tainted(mark, r0);
// Variables created during LUB computation which are
// *related* to regions that pre-date the LUB computation
}
}
- this.get_ref().infcx.tcx.sess.span_bug(
- this.get_ref().trace.origin.span(),
+ this.fields.infcx.tcx.sess.span_bug(
+ this.fields.trace.origin.span(),
format!("region {:?} is not associated with \
any bound region from A!",
r0).as_slice())
+++ /dev/null
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![macro_escape]
-
-macro_rules! if_ok(
- ($inp: expr) => (
- match $inp {
- Ok(v) => { v }
- Err(e) => { return Err(e); }
- }
- )
-)
use middle::ty_fold::TypeFolder;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::coercion::Coerce;
-use middle::typeck::infer::combine::{Combine, CombineFields, eq_tys};
-use middle::typeck::infer::region_inference::{RegionSnapshot};
-use middle::typeck::infer::region_inference::{RegionVarBindings};
+use middle::typeck::infer::combine::{Combine, CombineFields};
+use middle::typeck::infer::region_inference::{RegionVarBindings,
+ RegionSnapshot};
use middle::typeck::infer::resolve::{resolver};
+use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::sub::Sub;
use middle::typeck::infer::lub::Lub;
-use middle::typeck::infer::unify::{UnificationTable, Snapshot};
+use middle::typeck::infer::unify::{UnificationTable};
use middle::typeck::infer::error_reporting::ErrorReporting;
use std::cell::{RefCell};
use std::collections::HashMap;
use util::common::indent;
use util::ppaux::{bound_region_to_string, ty_to_string, trait_ref_to_string, Repr};
-pub mod doc;
-pub mod macros;
+pub mod coercion;
pub mod combine;
+pub mod doc;
+pub mod equate;
+pub mod error_reporting;
pub mod glb;
pub mod lattice;
pub mod lub;
pub mod region_inference;
pub mod resolve;
pub mod sub;
-pub mod unify;
-pub mod coercion;
-pub mod error_reporting;
pub mod test;
+pub mod type_variable;
+pub mod unify;
pub type Bound<T> = Option<T>;
pub type fres<T> = Result<T, fixup_err>; // "fixup result"
pub type CoerceResult = cres<Option<ty::AutoAdjustment>>;
-pub struct InferCtxt<'a> {
- pub tcx: &'a ty::ctxt,
+pub struct InferCtxt<'a, 'tcx: 'a> {
+ pub tcx: &'a ty::ctxt<'tcx>,
// We instantiate UnificationTable with bounds<ty::t> because the
// types that might instantiate a general type variable have an
// order, represented by its upper and lower bounds.
- type_unification_table:
- RefCell<UnificationTable<ty::TyVid, Bounds<ty::t>>>,
+ type_variables: RefCell<type_variable::TypeVariableTable>,
// Map from integral variable to the kind of integer it represents
int_unification_table:
// For region variables.
region_vars:
- RegionVarBindings<'a>,
+ RegionVarBindings<'a, 'tcx>,
}
/// Why did we require that the two types be related?
// Closure bound must not outlive captured free variables
FreeVariable(Span, ast::NodeId),
+ // Proc upvars must be 'static
+ ProcCapture(Span, ast::NodeId),
+
// Index into slice must be within its lifetime
IndexSlice(Span),
}
}
-pub fn new_infer_ctxt<'a>(tcx: &'a ty::ctxt) -> InferCtxt<'a> {
+pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>)
+ -> InferCtxt<'a, 'tcx> {
InferCtxt {
tcx: tcx,
- type_unification_table: RefCell::new(UnificationTable::new()),
+ type_variables: RefCell::new(type_variable::TypeVariableTable::new()),
int_unification_table: RefCell::new(UnificationTable::new()),
float_unification_table: RefCell::new(UnificationTable::new()),
region_vars: RegionVarBindings::new(tcx),
origin: origin,
values: Types(expected_found(a_is_expected, a, b))
};
- let suber = cx.sub(a_is_expected, trace);
- eq_tys(&suber, a, b)
+ try!(cx.equate(a_is_expected, trace).tys(a, b));
+ Ok(())
})
}
}
pub struct CombinedSnapshot {
- type_snapshot: Snapshot<ty::TyVid>,
- int_snapshot: Snapshot<ty::IntVid>,
- float_snapshot: Snapshot<ty::FloatVid>,
+ type_snapshot: type_variable::Snapshot,
+ int_snapshot: unify::Snapshot<ty::IntVid>,
+ float_snapshot: unify::Snapshot<ty::FloatVid>,
region_vars_snapshot: RegionSnapshot,
}
-impl<'a> InferCtxt<'a> {
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn combine_fields<'a>(&'a self, a_is_expected: bool, trace: TypeTrace)
- -> CombineFields<'a> {
+ -> CombineFields<'a, 'tcx> {
CombineFields {infcx: self,
a_is_expected: a_is_expected,
trace: trace}
}
- pub fn sub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Sub<'a> {
- Sub(self.combine_fields(a_is_expected, trace))
+ pub fn equate<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Equate<'a, 'tcx> {
+ Equate(self.combine_fields(a_is_expected, trace))
}
- pub fn lub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Lub<'a> {
- Lub(self.combine_fields(a_is_expected, trace))
+ pub fn sub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Sub<'a, 'tcx> {
+ Sub(self.combine_fields(a_is_expected, trace))
}
- pub fn in_snapshot(&self) -> bool {
- self.region_vars.in_snapshot()
+ pub fn lub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Lub<'a, 'tcx> {
+ Lub(self.combine_fields(a_is_expected, trace))
}
fn start_snapshot(&self) -> CombinedSnapshot {
CombinedSnapshot {
- type_snapshot: self.type_unification_table.borrow_mut().snapshot(),
+ type_snapshot: self.type_variables.borrow_mut().snapshot(),
int_snapshot: self.int_unification_table.borrow_mut().snapshot(),
float_snapshot: self.float_unification_table.borrow_mut().snapshot(),
region_vars_snapshot: self.region_vars.start_snapshot(),
float_snapshot,
region_vars_snapshot } = snapshot;
- self.type_unification_table
+ self.type_variables
.borrow_mut()
- .rollback_to(self.tcx, type_snapshot);
+ .rollback_to(type_snapshot);
self.int_unification_table
.borrow_mut()
- .rollback_to(self.tcx, int_snapshot);
+ .rollback_to(int_snapshot);
self.float_unification_table
.borrow_mut()
- .rollback_to(self.tcx, float_snapshot);
+ .rollback_to(float_snapshot);
self.region_vars
.rollback_to(region_vars_snapshot);
}
float_snapshot,
region_vars_snapshot } = snapshot;
- self.type_unification_table
+ self.type_variables
.borrow_mut()
.commit(type_snapshot);
self.int_unification_table
}
}
-impl<'a> InferCtxt<'a> {
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
pub fn next_ty_var_id(&self) -> TyVid {
- self.type_unification_table
+ self.type_variables
.borrow_mut()
- .new_key(Bounds { lb: None, ub: None })
+ .new_var()
}
pub fn next_ty_var(&self) -> ty::t {
InvokeClosure(a) => a,
DerefPointer(a) => a,
FreeVariable(a, _) => a,
+ ProcCapture(a, _) => a,
IndexSlice(a) => a,
RelateObjectBound(a) => a,
RelateProcBound(a, _, _) => a,
FreeVariable(a, b) => {
format!("FreeVariable({}, {})", a.repr(tcx), b)
}
+ ProcCapture(a, b) => {
+ format!("ProcCapture({}, {})", a.repr(tcx), b)
+ }
IndexSlice(a) => {
format!("IndexSlice({})", a.repr(tcx))
}
pub type CombineMap = HashMap<TwoRegions, RegionVid>;
-pub struct RegionVarBindings<'a> {
- tcx: &'a ty::ctxt,
+pub struct RegionVarBindings<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
var_origins: RefCell<Vec<RegionVariableOrigin>>,
// Constraints of the form `A <= B` introduced by the region
length: uint
}
-impl<'a> RegionVarBindings<'a> {
- pub fn new(tcx: &'a ty::ctxt) -> RegionVarBindings<'a> {
+impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
+ pub fn new(tcx: &'a ty::ctxt<'tcx>) -> RegionVarBindings<'a, 'tcx> {
RegionVarBindings {
tcx: tcx,
var_origins: RefCell::new(Vec::new()),
}
}
- pub fn in_snapshot(&self) -> bool {
+ fn in_snapshot(&self) -> bool {
self.undo_log.borrow().len() > 0
}
}
pub fn commit(&self, snapshot: RegionSnapshot) {
- debug!("RegionVarBindings: commit()");
+ debug!("RegionVarBindings: commit({})", snapshot.length);
assert!(self.undo_log.borrow().len() > snapshot.length);
assert!(*self.undo_log.borrow().get(snapshot.length) == OpenSnapshot);
}
}
+ pub fn make_eqregion(&self,
+ origin: SubregionOrigin,
+ sub: Region,
+ sup: Region) {
+ if sub != sup {
+ // Eventually, it would be nice to add direct support for
+ // equating regions.
+ self.make_subregion(origin.clone(), sub, sup);
+ self.make_subregion(origin, sup, sub);
+ }
+ }
+
pub fn make_subregion(&self,
origin: SubregionOrigin,
sub: Region,
*self.values.borrow_mut() = Some(v);
errors
}
-}
-impl<'a> RegionVarBindings<'a> {
fn is_subregion_of(&self, sub: Region, sup: Region) -> bool {
self.tcx.region_maps.is_subregion_of(sub, sup)
}
type RegionGraph = graph::Graph<(), Constraint>;
-impl<'a> RegionVarBindings<'a> {
+impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
fn infer_variable_values(&self,
errors: &mut Vec<RegionResolutionError>)
-> Vec<VarValue>
use middle::ty::{FloatVar, FloatVid, IntVar, IntVid, RegionVid, TyVar, TyVid};
-use middle::ty::{type_is_bot, IntType, UintType};
+use middle::ty::{IntType, UintType};
use middle::ty;
use middle::ty_fold;
-use middle::typeck::infer::{Bounds, cyclic_ty, fixup_err, fres, InferCtxt};
-use middle::typeck::infer::{unresolved_float_ty, unresolved_int_ty};
-use middle::typeck::infer::{unresolved_ty};
+use middle::typeck::infer::{cyclic_ty, fixup_err, fres, InferCtxt};
+use middle::typeck::infer::{unresolved_int_ty,unresolved_float_ty,unresolved_ty};
use syntax::codemap::Span;
use util::common::indent;
use util::ppaux::{Repr, ty_to_string};
pub static resolve_and_force_all_but_regions: uint =
(resolve_all | force_all) & not_regions;
-pub struct ResolveState<'a> {
- infcx: &'a InferCtxt<'a>,
+pub struct ResolveState<'a, 'tcx: 'a> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
modes: uint,
err: Option<fixup_err>,
v_seen: Vec<TyVid> ,
type_depth: uint,
}
-pub fn resolver<'a>(infcx: &'a InferCtxt,
- modes: uint,
- _: Option<Span>)
- -> ResolveState<'a> {
+pub fn resolver<'a, 'tcx>(infcx: &'a InferCtxt<'a, 'tcx>,
+ modes: uint,
+ _: Option<Span>)
+ -> ResolveState<'a, 'tcx> {
ResolveState {
infcx: infcx,
modes: modes,
}
}
-impl<'a> ty_fold::TypeFolder for ResolveState<'a> {
- fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+impl<'a, 'tcx> ty_fold::TypeFolder<'tcx> for ResolveState<'a, 'tcx> {
+ fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
self.infcx.tcx
}
}
}
-impl<'a> ResolveState<'a> {
+impl<'a, 'tcx> ResolveState<'a, 'tcx> {
pub fn should(&mut self, mode: uint) -> bool {
(self.modes & mode) == mode
}
assert!(self.v_seen.is_empty());
match self.err {
None => {
- debug!("Resolved to {} + {} (modes={:x})",
- ty_to_string(self.infcx.tcx, rty),
+ debug!("Resolved {} to {} (modes={:x})",
+ ty_to_string(self.infcx.tcx, typ),
ty_to_string(self.infcx.tcx, rty),
self.modes);
return Ok(rty);
// tend to carry more restrictions or higher
// perf. penalties, so it pays to know more.
- let node =
- self.infcx.type_unification_table.borrow_mut().get(tcx, vid);
- let t1 = match node.value {
- Bounds { ub:_, lb:Some(t) } if !type_is_bot(t) => {
- self.resolve_type(t)
- }
- Bounds { ub:Some(t), lb:_ } | Bounds { ub:_, lb:Some(t) } => {
- self.resolve_type(t)
- }
- Bounds { ub:None, lb:None } => {
- if self.should(force_tvar) {
- self.err = Some(unresolved_ty(vid));
+ let t1 = match self.infcx.type_variables.borrow().probe(vid) {
+ Some(t) => {
+ self.resolve_type(t)
+ }
+ None => {
+ if self.should(force_tvar) {
+ self.err = Some(unresolved_ty(vid));
+ }
+ ty::mk_var(tcx, vid)
}
- ty::mk_var(tcx, vid)
- }
};
self.v_seen.pop().unwrap();
return t1;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::combine::*;
use middle::typeck::infer::{cres, CresCompare};
+use middle::typeck::infer::equate::Equate;
use middle::typeck::infer::glb::Glb;
use middle::typeck::infer::InferCtxt;
-use middle::typeck::infer::lattice::CombineFieldsLatticeMethods;
use middle::typeck::infer::lub::Lub;
-use middle::typeck::infer::then;
use middle::typeck::infer::{TypeTrace, Subtype};
+use middle::typeck::infer::type_variable::{SubtypeOf, SupertypeOf};
use util::common::{indenter};
use util::ppaux::{bound_region_to_string, Repr};
use syntax::ast::{Onceness, FnStyle, MutImmutable, MutMutable};
-pub struct Sub<'f>(pub CombineFields<'f>); // "subtype", "subregion" etc
-impl<'f> Sub<'f> {
- pub fn get_ref<'a>(&'a self) -> &'a CombineFields<'f> { let Sub(ref v) = *self; v }
+/// "Greatest lower bound" (common subtype)
+pub struct Sub<'f, 'tcx: 'f> {
+ fields: CombineFields<'f, 'tcx>
}
-impl<'f> Combine for Sub<'f> {
- fn infcx<'a>(&'a self) -> &'a InferCtxt<'a> { self.get_ref().infcx }
+#[allow(non_snake_case)]
+pub fn Sub<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Sub<'f, 'tcx> {
+ Sub { fields: cf }
+}
+
+impl<'f, 'tcx> Combine<'tcx> for Sub<'f, 'tcx> {
+ fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx }
fn tag(&self) -> String { "sub".to_string() }
- fn a_is_expected(&self) -> bool { self.get_ref().a_is_expected }
- fn trace(&self) -> TypeTrace { self.get_ref().trace.clone() }
+ fn a_is_expected(&self) -> bool { self.fields.a_is_expected }
+ fn trace(&self) -> TypeTrace { self.fields.trace.clone() }
- fn sub<'a>(&'a self) -> Sub<'a> { Sub(self.get_ref().clone()) }
- fn lub<'a>(&'a self) -> Lub<'a> { Lub(self.get_ref().clone()) }
- fn glb<'a>(&'a self) -> Glb<'a> { Glb(self.get_ref().clone()) }
+ fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) }
+ fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) }
+ fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) }
+ fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) }
fn contratys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
- let opp = CombineFields {
- a_is_expected: !self.get_ref().a_is_expected,
- ..self.get_ref().clone()
- };
- Sub(opp).tys(b, a)
+ Sub(self.fields.switch_expected()).tys(b, a)
}
fn contraregions(&self, a: ty::Region, b: ty::Region)
- -> cres<ty::Region> {
- let opp = CombineFields {
- a_is_expected: !self.get_ref().a_is_expected,
- ..self.get_ref().clone()
- };
- Sub(opp).regions(b, a)
- }
+ -> cres<ty::Region> {
+ let opp = CombineFields {
+ a_is_expected: !self.fields.a_is_expected,
+ ..self.fields.clone()
+ };
+ Sub(opp).regions(b, a)
+ }
fn regions(&self, a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
- a.repr(self.get_ref().infcx.tcx),
- b.repr(self.get_ref().infcx.tcx));
- self.get_ref().infcx.region_vars.make_subregion(Subtype(self.trace()), a, b);
+ a.repr(self.fields.infcx.tcx),
+ b.repr(self.fields.infcx.tcx));
+ self.fields.infcx.region_vars.make_subregion(Subtype(self.trace()), a, b);
Ok(a)
}
fn mts(&self, a: &ty::mt, b: &ty::mt) -> cres<ty::mt> {
debug!("mts({} <: {})",
- a.repr(self.get_ref().infcx.tcx),
- b.repr(self.get_ref().infcx.tcx));
+ a.repr(self.fields.infcx.tcx),
+ b.repr(self.fields.infcx.tcx));
if a.mutbl != b.mutbl {
return Err(ty::terr_mutability);
}
match b.mutbl {
- MutMutable => {
- // If supertype is mut, subtype must match exactly
- // (i.e., invariant if mut):
- eq_tys(self, a.ty, b.ty).then(|| Ok(*a))
- }
- MutImmutable => {
- // Otherwise we can be covariant:
- self.tys(a.ty, b.ty).and_then(|_t| Ok(*a) )
- }
+ MutMutable => {
+ // If supertype is mut, subtype must match exactly
+ // (i.e., invariant if mut):
+ try!(self.equate().tys(a.ty, b.ty));
+ }
+ MutImmutable => {
+ // Otherwise we can be covariant:
+ try!(self.tys(a.ty, b.ty));
+ }
}
+
+ Ok(*a) // return is meaningless in sub, just return *a
}
fn fn_styles(&self, a: FnStyle, b: FnStyle) -> cres<FnStyle> {
fn tys(&self, a: ty::t, b: ty::t) -> cres<ty::t> {
debug!("{}.tys({}, {})", self.tag(),
- a.repr(self.get_ref().infcx.tcx), b.repr(self.get_ref().infcx.tcx));
+ a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
if a == b { return Ok(a); }
- let _indenter = indenter();
+
+ let infcx = self.fields.infcx;
+ let a = infcx.type_variables.borrow().replace_if_possible(a);
+ let b = infcx.type_variables.borrow().replace_if_possible(b);
match (&ty::get(a).sty, &ty::get(b).sty) {
(&ty::ty_bot, _) => {
Ok(a)
}
(&ty::ty_infer(TyVar(a_id)), &ty::ty_infer(TyVar(b_id))) => {
- if_ok!(self.get_ref().var_sub_var(a_id, b_id));
+ infcx.type_variables
+ .borrow_mut()
+ .relate_vars(a_id, SubtypeOf, b_id);
Ok(a)
}
// The vec/str check here and below is so that we don't unify
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(&ty::ty_infer(TyVar(a_id)), _) => {
- if_ok!(self.get_ref().var_sub_t(a_id, b));
+ try!(self.fields
+ .switch_expected()
+ .instantiate(b, SupertypeOf, a_id));
Ok(a)
}
Err(ty::terr_sorts(expected_found(self, a, b)))
}
(_, &ty::ty_infer(TyVar(b_id))) => {
- if_ok!(self.get_ref().t_sub_var(a, b_id));
+ try!(self.fields.instantiate(a, SubtypeOf, b_id));
Ok(a)
}
fn fn_sigs(&self, a: &ty::FnSig, b: &ty::FnSig) -> cres<ty::FnSig> {
debug!("fn_sigs(a={}, b={})",
- a.repr(self.get_ref().infcx.tcx), b.repr(self.get_ref().infcx.tcx));
+ a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx));
let _indenter = indenter();
// Rather than checking the subtype relationship between `a` and `b`
// Make a mark so we can examine "all bindings that were
// created as part of this type comparison".
- let mark = self.get_ref().infcx.region_vars.mark();
+ let mark = self.fields.infcx.region_vars.mark();
// First, we instantiate each bound region in the subtype with a fresh
// region variable.
let (a_sig, _) =
- self.get_ref().infcx.replace_late_bound_regions_with_fresh_regions(
+ self.fields.infcx.replace_late_bound_regions_with_fresh_regions(
self.trace(), a);
// Second, we instantiate each bound region in the supertype with a
// fresh concrete region.
let (skol_map, b_sig) = {
- replace_late_bound_regions_in_fn_sig(self.get_ref().infcx.tcx, b, |br| {
- let skol = self.get_ref().infcx.region_vars.new_skolemized(br);
+ replace_late_bound_regions_in_fn_sig(self.fields.infcx.tcx, b, |br| {
+ let skol = self.fields.infcx.region_vars.new_skolemized(br);
debug!("Bound region {} skolemized to {:?}",
- bound_region_to_string(self.get_ref().infcx.tcx, "", false, br),
+ bound_region_to_string(self.fields.infcx.tcx, "", false, br),
skol);
skol
})
};
- debug!("a_sig={}", a_sig.repr(self.get_ref().infcx.tcx));
- debug!("b_sig={}", b_sig.repr(self.get_ref().infcx.tcx));
+ debug!("a_sig={}", a_sig.repr(self.fields.infcx.tcx));
+ debug!("b_sig={}", b_sig.repr(self.fields.infcx.tcx));
// Compare types now that bound regions have been replaced.
- let sig = if_ok!(super_fn_sigs(self, &a_sig, &b_sig));
+ let sig = try!(super_fn_sigs(self, &a_sig, &b_sig));
// Presuming type comparison succeeds, we need to check
// that the skolemized regions do not "leak".
let new_vars =
- self.get_ref().infcx.region_vars.vars_created_since_mark(mark);
+ self.fields.infcx.region_vars.vars_created_since_mark(mark);
for (&skol_br, &skol) in skol_map.iter() {
- let tainted = self.get_ref().infcx.region_vars.tainted(mark, skol);
+ let tainted = self.fields.infcx.region_vars.tainted(mark, skol);
for tainted_region in tainted.iter() {
// Each skolemized should only be relatable to itself
// or new variables:
if self.a_is_expected() {
debug!("Not as polymorphic!");
return Err(ty::terr_regions_insufficiently_polymorphic(
- skol_br, *tainted_region));
+ skol_br, *tainted_region));
} else {
debug!("Overly polymorphic!");
return Err(ty::terr_regions_overly_polymorphic(
- skol_br, *tainted_region));
+ skol_br, *tainted_region));
}
}
}
return Ok(sig);
}
-
}
+
use syntax::ast;
use util::ppaux::{ty_to_string, UserString};
-struct Env<'a> {
+use arena::TypedArena;
+
+struct Env<'a, 'tcx: 'a> {
krate: ast::Crate,
- tcx: &'a ty::ctxt,
- infcx: &'a infer::InferCtxt<'a>,
+ infcx: &'a infer::InferCtxt<'a, 'tcx>,
}
struct RH<'a> {
let named_region_map = resolve_lifetime::krate(&sess, &krate);
let region_map = region::resolve_crate(&sess, &krate);
let stability_index = stability::Index::build(&krate);
+ let type_arena = TypedArena::new();
let tcx = ty::mk_ctxt(sess,
+ &type_arena,
def_map,
named_region_map,
ast_map,
lang_items,
stability_index);
let infcx = infer::new_infer_ctxt(&tcx);
- let env = Env {krate: krate,
- tcx: &tcx,
- infcx: &infcx};
+ let env = Env {
+ krate: krate,
+ infcx: &infcx
+ };
body(env);
infcx.resolve_regions_and_report_errors();
assert_eq!(tcx.sess.err_count(), expected_err_count);
}
-impl<'a> Env<'a> {
+impl<'a, 'tcx> Env<'a, 'tcx> {
pub fn create_region_hierarchy(&self, rh: &RH) {
for child_rh in rh.sub.iter() {
self.create_region_hierarchy(child_rh);
- self.tcx.region_maps.record_encl_scope(child_rh.id, rh.id);
+ self.infcx.tcx.region_maps.record_encl_scope(child_rh.id, rh.id);
}
}
-> Option<ast::NodeId> {
assert!(idx < names.len());
for item in m.items.iter() {
- if item.ident.user_string(this.tcx) == names[idx] {
+ if item.ident.user_string(this.infcx.tcx) == names[idx] {
return search(this, &**item, idx+1, names);
}
}
match infer::mk_subty(self.infcx, true, infer::Misc(DUMMY_SP), a, b) {
Ok(_) => true,
Err(ref e) => fail!("Encountered error: {}",
- ty::type_err_to_str(self.tcx, e))
+ ty::type_err_to_str(self.infcx.tcx, e))
}
}
}
pub fn ty_to_string(&self, a: ty::t) -> String {
- ty_to_string(self.tcx, a)
+ ty_to_string(self.infcx.tcx, a)
}
pub fn t_fn(&self,
output_ty: ty::t)
-> ty::t
{
- ty::mk_ctor_fn(self.tcx, binder_id, input_tys, output_ty)
+ ty::mk_ctor_fn(self.infcx.tcx, binder_id, input_tys, output_ty)
}
pub fn t_int(&self) -> ty::t {
}
pub fn t_rptr_late_bound(&self, binder_id: ast::NodeId, id: uint) -> ty::t {
- ty::mk_imm_rptr(self.tcx, ty::ReLateBound(binder_id, ty::BrAnon(id)),
+ ty::mk_imm_rptr(self.infcx.tcx, ty::ReLateBound(binder_id, ty::BrAnon(id)),
self.t_int())
}
pub fn t_rptr_scope(&self, id: ast::NodeId) -> ty::t {
- ty::mk_imm_rptr(self.tcx, ty::ReScope(id), self.t_int())
+ ty::mk_imm_rptr(self.infcx.tcx, ty::ReScope(id), self.t_int())
}
pub fn t_rptr_free(&self, nid: ast::NodeId, id: uint) -> ty::t {
- ty::mk_imm_rptr(self.tcx,
+ ty::mk_imm_rptr(self.infcx.tcx,
ty::ReFree(ty::FreeRegion {scope_id: nid,
bound_region: ty::BrAnon(id)}),
self.t_int())
}
pub fn t_rptr_static(&self) -> ty::t {
- ty::mk_imm_rptr(self.tcx, ty::ReStatic, self.t_int())
+ ty::mk_imm_rptr(self.infcx.tcx, ty::ReStatic, self.t_int())
}
pub fn dummy_type_trace(&self) -> infer::TypeTrace {
}
}
- pub fn lub(&self) -> Lub<'a> {
+ pub fn lub(&self) -> Lub<'a, 'tcx> {
let trace = self.dummy_type_trace();
Lub(self.infcx.combine_fields(true, trace))
}
- pub fn glb(&self) -> Glb<'a> {
+ pub fn glb(&self) -> Glb<'a, 'tcx> {
let trace = self.dummy_type_trace();
Glb(self.infcx.combine_fields(true, trace))
}
match self.lub().tys(t1, t2) {
Ok(t) => t,
Err(ref e) => fail!("unexpected error computing LUB: {:?}",
- ty::type_err_to_str(self.tcx, e))
+ ty::type_err_to_str(self.infcx.tcx, e))
}
}
}
Err(ref e) => {
fail!("unexpected error in LUB: {}",
- ty::type_err_to_str(self.tcx, e))
+ ty::type_err_to_str(self.infcx.tcx, e))
}
}
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use middle::ty;
+use std::mem;
+use util::snapshot_vec as sv;
+
+pub struct TypeVariableTable {
+ values: sv::SnapshotVec<TypeVariableData,UndoEntry,Delegate>,
+}
+
+struct TypeVariableData {
+ value: TypeVariableValue
+}
+
+enum TypeVariableValue {
+ Known(ty::t),
+ Bounded(Vec<Relation>),
+}
+
+pub struct Snapshot {
+ snapshot: sv::Snapshot
+}
+
+enum UndoEntry {
+ // The type of the var was specified.
+ SpecifyVar(ty::TyVid, Vec<Relation>),
+ Relate(ty::TyVid, ty::TyVid),
+}
+
+struct Delegate;
+
+type Relation = (RelationDir, ty::TyVid);
+
+#[deriving(PartialEq,Show)]
+pub enum RelationDir {
+ SubtypeOf, SupertypeOf, EqTo
+}
+
+impl RelationDir {
+ fn opposite(self) -> RelationDir {
+ match self {
+ SubtypeOf => SupertypeOf,
+ SupertypeOf => SubtypeOf,
+ EqTo => EqTo
+ }
+ }
+}
+
+impl TypeVariableTable {
+ pub fn new() -> TypeVariableTable {
+ TypeVariableTable { values: sv::SnapshotVec::new(Delegate) }
+ }
+
+ fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
+ relations(self.values.get_mut(a.index))
+ }
+
+ pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) {
+ /*!
+ * Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
+ *
+ * Precondition: neither `a` nor `b` are known.
+ */
+
+ if a != b {
+ self.relations(a).push((dir, b));
+ self.relations(b).push((dir.opposite(), a));
+ self.values.record(Relate(a, b));
+ }
+ }
+
+ pub fn instantiate_and_push(
+ &mut self,
+ vid: ty::TyVid,
+ ty: ty::t,
+ stack: &mut Vec<(ty::t, RelationDir, ty::TyVid)>)
+ {
+ /*!
+ * Instantiates `vid` with the type `ty` and then pushes an
+ * entry onto `stack` for each of the relations of `vid` to
+ * other variables. The relations will have the form `(ty,
+ * dir, vid1)` where `vid1` is some other variable id.
+ */
+
+ let old_value = {
+ let value_ptr = &mut self.values.get_mut(vid.index).value;
+ mem::replace(value_ptr, Known(ty))
+ };
+
+ let relations = match old_value {
+ Bounded(b) => b,
+ Known(_) => fail!("Asked to instantiate variable that is \
+ already instantiated")
+ };
+
+ for &(dir, vid) in relations.iter() {
+ stack.push((ty, dir, vid));
+ }
+
+ self.values.record(SpecifyVar(vid, relations));
+ }
+
+ pub fn new_var(&mut self) -> ty::TyVid {
+ let index =
+ self.values.push(
+ TypeVariableData { value: Bounded(Vec::new()) });
+ ty::TyVid { index: index }
+ }
+
+ pub fn probe(&self, vid: ty::TyVid) -> Option<ty::t> {
+ match self.values.get(vid.index).value {
+ Bounded(..) => None,
+ Known(t) => Some(t)
+ }
+ }
+
+ pub fn replace_if_possible(&self, t: ty::t) -> ty::t {
+ match ty::get(t).sty {
+ ty::ty_infer(ty::TyVar(v)) => {
+ match self.probe(v) {
+ None => t,
+ Some(u) => u
+ }
+ }
+ _ => t,
+ }
+ }
+
+ pub fn snapshot(&mut self) -> Snapshot {
+ Snapshot { snapshot: self.values.start_snapshot() }
+ }
+
+ pub fn rollback_to(&mut self, s: Snapshot) {
+ self.values.rollback_to(s.snapshot);
+ }
+
+ pub fn commit(&mut self, s: Snapshot) {
+ self.values.commit(s.snapshot);
+ }
+}
+
+impl sv::SnapshotVecDelegate<TypeVariableData,UndoEntry> for Delegate {
+ fn reverse(&mut self,
+ values: &mut Vec<TypeVariableData>,
+ action: UndoEntry) {
+ match action {
+ SpecifyVar(vid, relations) => {
+ values.get_mut(vid.index).value = Bounded(relations);
+ }
+
+ Relate(a, b) => {
+ relations(values.get_mut(a.index)).pop();
+ relations(values.get_mut(b.index)).pop();
+ }
+ }
+ }
+}
+
+fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec<Relation> {
+ match v.value {
+ Known(_) => fail!("var_sub_var: variable is known"),
+ Bounded(ref mut relations) => relations
+ }
+}
+
use middle::ty::{expected_found, IntVarValue};
use middle::ty;
-use middle::typeck::infer::{Bounds, uok, ures};
+use middle::typeck::infer::{uok, ures};
use middle::typeck::infer::InferCtxt;
use std::cell::RefCell;
use std::fmt::Show;
-use std::mem;
use syntax::ast;
use util::ppaux::Repr;
+use util::snapshot_vec as sv;
/**
* This trait is implemented by any type that can serve as a type
* variable. We call such variables *unification keys*. For example,
- * this trait is implemented by `TyVid`, which represents normal
- * type variables, and `IntVid`, which represents integral variables.
+ * this trait is implemented by `IntVid`, which represents integral
+ * variables.
*
- * Each key type has an associated value type `V`. For example,
- * for `TyVid`, this is `Bounds<ty::t>`, representing a pair of
- * upper- and lower-bound types.
+ * Each key type has an associated value type `V`. For example, for
+ * `IntVid`, this is `Option<IntVarValue>`, representing some
+ * (possibly not yet known) sort of integer.
*
* Implementations of this trait are at the end of this file.
*/
}
/**
- * Trait for valid types that a type variable can be set to. Note
- * that this is typically not the end type that the value will
- * take on, but rather some wrapper: for example, for normal type
- * variables, the associated type is not `ty::t` but rather
- * `Bounds<ty::t>`.
+ * Trait for valid types that a type variable can be set to. Note that
+ * this is typically not the end type that the value will take on, but
+ * rather an `Option` wrapper (where `None` represents a variable
+ * whose value is not yet set).
*
* Implementations of this trait are at the end of this file.
*/
/**
* Indicates the current value of each key.
*/
- values: Vec<VarValue<K,V>>,
- /**
- * When a snapshot is active, logs each change made to the table
- * so that they can be unrolled.
- */
- undo_log: Vec<UndoLog<K,V>>,
+ values: sv::SnapshotVec<VarValue<K,V>,(),Delegate>,
}
/**
* made during the snapshot may either be *committed* or *rolled back*.
*/
pub struct Snapshot<K> {
- // Ensure that this snapshot is keyed to the table type.
- marker1: marker::CovariantType<K>,
-
- // Snapshots are tokens that should be created/consumed linearly.
- marker2: marker::NoCopy,
-
- // Length of the undo log at the time the snapshot was taken.
- length: uint,
-}
-
-#[deriving(PartialEq)]
-enum UndoLog<K,V> {
- /// Indicates where a snapshot started.
- OpenSnapshot,
-
- /// Indicates a snapshot that has been committed.
- CommittedSnapshot,
-
- /// New variable with given index was created.
- NewVar(uint),
-
- /// Variable with given index was changed *from* the given value.
- SetVar(uint, VarValue<K,V>),
+ // Link snapshot to the key type `K` of the table.
+ marker: marker::CovariantType<K>,
+ snapshot: sv::Snapshot,
}
/**
pub rank: uint,
}
+pub struct Delegate;
+
// We can't use V:LatticeValue, much as I would like to,
-// because frequently the pattern is that V=Bounds<U> for some
+// because frequently the pattern is that V=Option<U> for some
// other type parameter U, and we have no way to say
-// Bounds<U>:
+// Option<U>:LatticeValue.
impl<V:PartialEq+Clone+Repr,K:UnifyKey<V>> UnificationTable<K,V> {
pub fn new() -> UnificationTable<K,V> {
UnificationTable {
- values: Vec::new(),
- undo_log: Vec::new()
+ values: sv::SnapshotVec::new(Delegate),
}
}
- pub fn in_snapshot(&self) -> bool {
- /*! True if a snapshot has been started. */
-
- self.undo_log.len() > 0
- }
-
/**
* Starts a new snapshot. Each snapshot must be either
* rolled back or committed in a "LIFO" (stack) order.
*/
pub fn snapshot(&mut self) -> Snapshot<K> {
- let length = self.undo_log.len();
- debug!("{}: snapshot at length {}",
- UnifyKey::tag(None::<K>),
- length);
- self.undo_log.push(OpenSnapshot);
- Snapshot { length: length,
- marker1: marker::CovariantType,
- marker2: marker::NoCopy }
- }
-
- fn assert_open_snapshot(&self, snapshot: &Snapshot<K>) {
- // Or else there was a failure to follow a stack discipline:
- assert!(self.undo_log.len() > snapshot.length);
-
- // Invariant established by start_snapshot():
- assert!(*self.undo_log.get(snapshot.length) == OpenSnapshot);
+ Snapshot { marker: marker::CovariantType::<K>,
+ snapshot: self.values.start_snapshot() }
}
/**
* Reverses all changes since the last snapshot. Also
* removes any keys that have been created since then.
*/
- pub fn rollback_to(&mut self, tcx: &ty::ctxt, snapshot: Snapshot<K>) {
- debug!("{}: rollback_to({})",
- UnifyKey::tag(None::<K>),
- snapshot.length);
-
- self.assert_open_snapshot(&snapshot);
-
- while self.undo_log.len() > snapshot.length + 1 {
- match self.undo_log.pop().unwrap() {
- OpenSnapshot => {
- // This indicates a failure to obey the stack discipline.
- tcx.sess.bug("Cannot rollback an uncommitted snapshot");
- }
-
- CommittedSnapshot => {
- // This occurs when there are nested snapshots and
- // the inner is committed but outer is rolled back.
- }
-
- NewVar(i) => {
- assert!(self.values.len() == i);
- self.values.pop();
- }
-
- SetVar(i, v) => {
- *self.values.get_mut(i) = v;
- }
- }
- }
-
- let v = self.undo_log.pop().unwrap();
- assert!(v == OpenSnapshot);
- assert!(self.undo_log.len() == snapshot.length);
+ pub fn rollback_to(&mut self, snapshot: Snapshot<K>) {
+ debug!("{}: rollback_to()", UnifyKey::tag(None::<K>));
+ self.values.rollback_to(snapshot.snapshot);
}
/**
* can still be undone if there is a snapshot further out.
*/
pub fn commit(&mut self, snapshot: Snapshot<K>) {
- debug!("{}: commit({})",
- UnifyKey::tag(None::<K>),
- snapshot.length);
-
- self.assert_open_snapshot(&snapshot);
-
- if snapshot.length == 0 {
- // The root snapshot.
- self.undo_log.truncate(0);
- } else {
- *self.undo_log.get_mut(snapshot.length) = CommittedSnapshot;
- }
+ debug!("{}: commit()", UnifyKey::tag(None::<K>));
+ self.values.commit(snapshot.snapshot);
}
pub fn new_key(&mut self, value: V) -> K {
- let index = self.values.len();
-
- if self.in_snapshot() {
- self.undo_log.push(NewVar(index));
- }
-
- self.values.push(Root(value, 0));
+ let index = self.values.push(Root(value, 0));
let k = UnifyKey::from_index(index);
debug!("{}: created new key: {}",
UnifyKey::tag(None::<K>),
k
}
- fn swap_value(&mut self,
- index: uint,
- new_value: VarValue<K,V>)
- -> VarValue<K,V>
- {
- /*!
- * Primitive operation to swap a value in the var array.
- * Caller should update the undo log if we are in a snapshot.
- */
-
- let loc = self.values.get_mut(index);
- mem::replace(loc, new_value)
- }
-
pub fn get(&mut self, tcx: &ty::ctxt, vid: K) -> Node<K,V> {
/*!
* Find the root node for `vid`. This uses the standard
let node: Node<K,V> = self.get(tcx, redirect.clone());
if node.key != redirect {
// Path compression
- let old_value =
- self.swap_value(index, Redirect(node.key.clone()));
-
- // If we are in a snapshot, record this compression,
- // because it's possible that the unification which
- // caused it will be rolled back later.
- if self.in_snapshot() {
- self.undo_log.push(SetVar(index, old_value));
- }
+ self.values.set(index, Redirect(node.key.clone()));
}
node
}
*/
assert!(self.is_root(&key));
- assert!(self.in_snapshot());
debug!("Updating variable {} to {}",
key.repr(tcx),
new_value.repr(tcx));
- let index = key.index();
- let old_value = self.swap_value(index, new_value);
- self.undo_log.push(SetVar(index, old_value));
+ self.values.set(key.index(), new_value);
}
pub fn unify(&mut self,
}
}
+impl<K,V> sv::SnapshotVecDelegate<VarValue<K,V>,()> for Delegate {
+ fn reverse(&mut self, _: &mut Vec<VarValue<K,V>>, _: ()) {
+ fail!("Nothing to reverse");
+ }
+}
+
///////////////////////////////////////////////////////////////////////////
// Code to handle simple keys like ints, floats---anything that
// doesn't have a subtyping relationship we need to worry about.
pub fn err<V:SimplyUnifiable>(a_is_expected: bool,
a_t: V,
- b_t: V) -> ures {
+ b_t: V)
+ -> ures {
if a_is_expected {
Err(SimplyUnifiable::to_type_err(
ty::expected_found {expected: a_t, found: b_t}))
-> ures;
}
-impl<'tcx,V:SimplyUnifiable,K:UnifyKey<Option<V>>>
- InferCtxtMethodsForSimplyUnifiableTypes<V,K> for InferCtxt<'tcx>
+impl<'a,'tcx,V:SimplyUnifiable,K:UnifyKey<Option<V>>>
+ InferCtxtMethodsForSimplyUnifiableTypes<V,K> for InferCtxt<'a, 'tcx>
{
fn simple_vars(&self,
a_is_expected: bool,
///////////////////////////////////////////////////////////////////////////
-// General type keys
-
-impl UnifyKey<Bounds<ty::t>> for ty::TyVid {
- fn index(&self) -> uint { self.index }
-
- fn from_index(i: uint) -> ty::TyVid { ty::TyVid { index: i } }
-
- fn unification_table<'v>(infcx: &'v InferCtxt)
- -> &'v RefCell<UnificationTable<ty::TyVid, Bounds<ty::t>>>
- {
- return &infcx.type_unification_table;
- }
-
- fn tag(_: Option<ty::TyVid>) -> &'static str {
- "TyVid"
- }
-}
-
-impl UnifyValue for Bounds<ty::t> { }
-
// Integral type keys
impl UnifyKey<Option<IntVarValue>> for ty::IntVid {
pub type impl_vtable_map = RefCell<DefIdMap<vtable_res>>;
-pub struct CrateCtxt<'a> {
+pub struct CrateCtxt<'a, 'tcx: 'a> {
// A mapping from method call sites to traits that have that method.
trait_map: resolve::TraitMap,
- tcx: &'a ty::ctxt
+ tcx: &'a ty::ctxt<'tcx>
}
// Functions that write types into the node type table
* The first pass over the crate simply builds up the set of inferreds.
*/
-struct TermsContext<'a> {
- tcx: &'a ty::ctxt,
+struct TermsContext<'a, 'tcx: 'a> {
+ tcx: &'a ty::ctxt<'tcx>,
arena: &'a Arena,
empty_variances: Rc<ty::ItemVariances>,
term: VarianceTermPtr<'a>,
}
-fn determine_parameters_to_be_inferred<'a>(tcx: &'a ty::ctxt,
- arena: &'a mut Arena,
- krate: &ast::Crate)
- -> TermsContext<'a> {
+fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>,
+ arena: &'a mut Arena,
+ krate: &ast::Crate)
+ -> TermsContext<'a, 'tcx> {
let mut terms_cx = TermsContext {
tcx: tcx,
arena: arena,
terms_cx
}
-impl<'a> TermsContext<'a> {
+impl<'a, 'tcx> TermsContext<'a, 'tcx> {
fn add_inferred(&mut self,
item_id: ast::NodeId,
kind: ParamKind,
}
}
-impl<'a> Visitor<()> for TermsContext<'a> {
+impl<'a, 'tcx> Visitor<()> for TermsContext<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
debug!("add_inferreds for item {}", item.repr(self.tcx));
* We walk the set of items and, for each member, generate new constraints.
*/
-struct ConstraintContext<'a> {
- terms_cx: TermsContext<'a>,
+struct ConstraintContext<'a, 'tcx: 'a> {
+ terms_cx: TermsContext<'a, 'tcx>,
// These are the def-id of the std::kinds::marker::InvariantType,
// std::kinds::marker::InvariantLifetime, and so on. The arrays
variance: &'a VarianceTerm<'a>,
}
-fn add_constraints_from_crate<'a>(terms_cx: TermsContext<'a>,
- krate: &ast::Crate)
- -> ConstraintContext<'a> {
+fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>,
+ krate: &ast::Crate)
+ -> ConstraintContext<'a, 'tcx> {
let mut invariant_lang_items = [None, ..2];
let mut covariant_lang_items = [None, ..2];
let mut contravariant_lang_items = [None, ..2];
constraint_cx
}
-impl<'a> Visitor<()> for ConstraintContext<'a> {
+impl<'a, 'tcx> Visitor<()> for ConstraintContext<'a, 'tcx> {
fn visit_item(&mut self, item: &ast::Item, _: ()) {
let did = ast_util::local_def(item.id);
let tcx = self.terms_cx.tcx;
}
}
-impl<'a> ConstraintContext<'a> {
- fn tcx(&self) -> &'a ty::ctxt {
+impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
+ fn tcx(&self) -> &'a ty::ctxt<'tcx> {
self.terms_cx.tcx
}
* inferred is then written into the `variance_map` in the tcx.
*/
-struct SolveContext<'a> {
- terms_cx: TermsContext<'a>,
+struct SolveContext<'a, 'tcx: 'a> {
+ terms_cx: TermsContext<'a, 'tcx>,
constraints: Vec<Constraint<'a>> ,
// Maps from an InferredIndex to the inferred value for that variable.
solutions_cx.write();
}
-impl<'a> SolveContext<'a> {
+impl<'a, 'tcx> SolveContext<'a, 'tcx> {
fn solve(&mut self) {
// Propagate constraints until a fixed point is reached. Note
// that the maximum number of iterations is 2C where C is the
//! Used by plugin crates to tell `rustc` about the plugins they provide.
-use lint::LintPassObject;
+use lint::{LintPassObject, LintId, Lint};
use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT};
use syntax::ext::base::{IdentTT, LetSyntaxTT, ItemDecorator, ItemModifier, BasicMacroExpander};
use syntax::parse::token;
use syntax::ast;
+use std::collections::HashMap;
+
/// Structure used to register plugins.
///
/// A plugin registrar function takes an `&mut Registry` and should call
#[doc(hidden)]
pub lint_passes: Vec<LintPassObject>,
+
+ #[doc(hidden)]
+ pub lint_groups: HashMap<&'static str, Vec<LintId>>,
}
impl Registry {
krate_span: krate.span,
syntax_exts: vec!(),
lint_passes: vec!(),
+ lint_groups: HashMap::new(),
}
}
pub fn register_lint_pass(&mut self, lint_pass: LintPassObject) {
self.lint_passes.push(lint_pass);
}
+
+ /// Register a lint group.
+ pub fn register_lint_group(&mut self, name: &'static str, to: Vec<&'static Lint>) {
+ self.lint_groups.insert(name, to.move_iter().map(|x| LintId::of(x)).collect());
+ }
}
//! An efficient hash map for node IDs
+#![allow(non_snake_case)]
+
use std::collections::{HashMap, HashSet};
use std::hash::{Hasher, Hash, Writer};
use syntax::ast;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+/*!
+ * A utility class for implementing "snapshottable" things; a
+ * snapshottable data structure permits you to take a snapshot (via
+ * `start_snapshot`) and then, after making some changes, elect either
+ * to rollback to the start of the snapshot or commit those changes.
+ *
+ * This vector is intended to be used as part of an abstraction, not
+ * serve as a complete abstraction on its own. As such, while it will
+ * roll back most changes on its own, it also supports a `get_mut`
+ * operation that gives you an abitrary mutable pointer into the
+ * vector. To ensure that any changes you make this with this pointer
+ * are rolled back, you must invoke `record` to record any changes you
+ * make and also supplying a delegate capable of reversing those
+ * changes.
+ */
+
+use std::kinds::marker;
+use std::mem;
+
+#[deriving(PartialEq)]
+enum UndoLog<T,U> {
+ /// Indicates where a snapshot started.
+ OpenSnapshot,
+
+ /// Indicates a snapshot that has been committed.
+ CommittedSnapshot,
+
+ /// New variable with given index was created.
+ NewElem(uint),
+
+ /// Variable with given index was changed *from* the given value.
+ SetElem(uint, T),
+
+ /// Extensible set of actions
+ Other(U)
+}
+
+pub struct SnapshotVec<T,U,D> {
+ values: Vec<T>,
+ undo_log: Vec<UndoLog<T,U>>,
+ delegate: D
+}
+
+pub struct Snapshot {
+ // Snapshots are tokens that should be created/consumed linearly.
+ marker: marker::NoCopy,
+
+ // Length of the undo log at the time the snapshot was taken.
+ length: uint,
+}
+
+pub trait SnapshotVecDelegate<T,U> {
+ fn reverse(&mut self, values: &mut Vec<T>, action: U);
+}
+
+impl<T,U,D:SnapshotVecDelegate<T,U>> SnapshotVec<T,U,D> {
+ pub fn new(delegate: D) -> SnapshotVec<T,U,D> {
+ SnapshotVec {
+ values: Vec::new(),
+ undo_log: Vec::new(),
+ delegate: delegate
+ }
+ }
+
+ fn in_snapshot(&self) -> bool {
+ !self.undo_log.is_empty()
+ }
+
+ pub fn record(&mut self, action: U) {
+ if self.in_snapshot() {
+ self.undo_log.push(Other(action));
+ }
+ }
+
+ pub fn push(&mut self, elem: T) -> uint {
+ let len = self.values.len();
+ self.values.push(elem);
+
+ if self.in_snapshot() {
+ self.undo_log.push(NewElem(len));
+ }
+
+ len
+ }
+
+ pub fn get<'a>(&'a self, index: uint) -> &'a T {
+ self.values.get(index)
+ }
+
+ pub fn get_mut<'a>(&'a mut self, index: uint) -> &'a mut T {
+ /*!
+ * Returns a mutable pointer into the vec; whatever changes
+ * you make here cannot be undone automatically, so you should
+ * be sure call `record()` with some sort of suitable undo
+ * action.
+ */
+
+ self.values.get_mut(index)
+ }
+
+ pub fn set(&mut self, index: uint, new_elem: T) {
+ /*!
+ * Updates the element at the given index. The old value will
+ * saved (and perhaps restored) if a snapshot is active.
+ */
+
+ let old_elem = mem::replace(self.values.get_mut(index), new_elem);
+ if self.in_snapshot() {
+ self.undo_log.push(SetElem(index, old_elem));
+ }
+ }
+
+ pub fn start_snapshot(&mut self) -> Snapshot {
+ let length = self.undo_log.len();
+ self.undo_log.push(OpenSnapshot);
+ Snapshot { length: length,
+ marker: marker::NoCopy }
+ }
+
+ fn assert_open_snapshot(&self, snapshot: &Snapshot) {
+ // Or else there was a failure to follow a stack discipline:
+ assert!(self.undo_log.len() > snapshot.length);
+
+ // Invariant established by start_snapshot():
+ assert!(
+ match *self.undo_log.get(snapshot.length) {
+ OpenSnapshot => true,
+ _ => false
+ });
+ }
+
+ pub fn rollback_to(&mut self, snapshot: Snapshot) {
+ debug!("rollback_to({})", snapshot.length);
+
+ self.assert_open_snapshot(&snapshot);
+
+ while self.undo_log.len() > snapshot.length + 1 {
+ match self.undo_log.pop().unwrap() {
+ OpenSnapshot => {
+ // This indicates a failure to obey the stack discipline.
+ fail!("Cannot rollback an uncommitted snapshot");
+ }
+
+ CommittedSnapshot => {
+ // This occurs when there are nested snapshots and
+ // the inner is committed but outer is rolled back.
+ }
+
+ NewElem(i) => {
+ self.values.pop();
+ assert!(self.values.len() == i);
+ }
+
+ SetElem(i, v) => {
+ *self.values.get_mut(i) = v;
+ }
+
+ Other(u) => {
+ self.delegate.reverse(&mut self.values, u);
+ }
+ }
+ }
+
+ let v = self.undo_log.pop().unwrap();
+ assert!(match v { OpenSnapshot => true, _ => false });
+ assert!(self.undo_log.len() == snapshot.length);
+ }
+
+ /**
+ * Commits all changes since the last snapshot. Of course, they
+ * can still be undone if there is a snapshot further out.
+ */
+ pub fn commit(&mut self, snapshot: Snapshot) {
+ debug!("commit({})", snapshot.length);
+
+ self.assert_open_snapshot(&snapshot);
+
+ if snapshot.length == 0 {
+ // The root snapshot.
+ self.undo_log.truncate(0);
+ } else {
+ *self.undo_log.get_mut(snapshot.length) = CommittedSnapshot;
+ }
+ }
+}
}
abi::OsWindows => {
- // FIXME: Test this. Copied from linux (#2398)
+ // FIXME: Test this. Copied from Linux (#2398)
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-\
f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-\
s0:64:64-f80:128:128-n8:16:32:64-S128".to_string()
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(non_uppercase_pattern_statics)]
+#![allow(non_uppercase_statics)]
#![allow(non_camel_case_types)]
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
#![allow(dead_code)]
#![crate_name = "rustc_llvm"]
AD_Intel = 1
}
-#[deriving(PartialEq)]
+#[deriving(PartialEq, Clone)]
#[repr(C)]
pub enum CodeGenOptLevel {
CodeGenLevelNone = 0,
FlagObjcClassComplete = 1 << 9,
FlagObjectPointer = 1 << 10,
FlagVector = 1 << 11,
- FlagStaticMember = 1 << 12
+ FlagStaticMember = 1 << 12,
+ FlagIndirectVariable = 1 << 13,
+ FlagLValueReference = 1 << 14,
+ FlagRValueReference = 1 << 15
}
}
use rustc::middle::subst;
use rustc::middle::stability;
-use core;
+use core::DocContext;
use doctree;
use clean;
///
/// The returned value is `None` if the `id` could not be inlined, and `Some`
/// of a vector of items if it was successfully expanded.
-pub fn try_inline(id: ast::NodeId, into: Option<ast::Ident>)
+pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option<ast::Ident>)
-> Option<Vec<clean::Item>> {
- let cx = ::ctxtkey.get().unwrap();
- let tcx = match cx.maybe_typed {
- core::Typed(ref tycx) => tycx,
- core::NotTyped(_) => return None,
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return None,
};
let def = match tcx.def_map.borrow().find(&id) {
Some(def) => *def,
};
let did = def.def_id();
if ast_util::is_local(did) { return None }
- try_inline_def(&**cx, tcx, def).map(|vec| {
+ try_inline_def(cx, tcx, def).map(|vec| {
vec.move_iter().map(|mut item| {
match into {
Some(into) if item.name.is_some() => {
- item.name = Some(into.clean());
+ item.name = Some(into.clean(cx));
}
_ => {}
}
})
}
-fn try_inline_def(cx: &core::DocContext,
- tcx: &ty::ctxt,
+fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt,
def: def::Def) -> Option<Vec<clean::Item>> {
let mut ret = Vec::new();
let did = def.def_id();
let inner = match def {
def::DefTrait(did) => {
record_extern_fqn(cx, did, clean::TypeTrait);
- clean::TraitItem(build_external_trait(tcx, did))
+ clean::TraitItem(build_external_trait(cx, tcx, did))
}
def::DefFn(did, style) => {
// If this function is a tuple struct constructor, we just skip it
return None
}
record_extern_fqn(cx, did, clean::TypeFunction);
- clean::FunctionItem(build_external_function(tcx, did, style))
+ clean::FunctionItem(build_external_function(cx, tcx, did, style))
}
def::DefStruct(did) => {
record_extern_fqn(cx, did, clean::TypeStruct);
ret.extend(build_impls(cx, tcx, did).move_iter());
- clean::StructItem(build_struct(tcx, did))
+ clean::StructItem(build_struct(cx, tcx, did))
}
def::DefTy(did) => {
record_extern_fqn(cx, did, clean::TypeEnum);
ret.extend(build_impls(cx, tcx, did).move_iter());
- build_type(tcx, did)
+ build_type(cx, tcx, did)
}
// Assume that the enum type is reexported next to the variant, and
// variants don't show up in documentation specially.
}
def::DefStatic(did, mtbl) => {
record_extern_fqn(cx, did, clean::TypeStatic);
- clean::StaticItem(build_static(tcx, did, mtbl))
+ clean::StaticItem(build_static(cx, tcx, did, mtbl))
}
_ => return None,
};
let fqn = csearch::get_item_path(tcx, did);
- cx.inlined.borrow_mut().get_mut_ref().insert(did);
+ cx.inlined.borrow_mut().as_mut().unwrap().insert(did);
ret.push(clean::Item {
source: clean::Span::empty(),
name: Some(fqn.last().unwrap().to_string()),
- attrs: load_attrs(tcx, did),
+ attrs: load_attrs(cx, tcx, did),
inner: inner,
visibility: Some(ast::Public),
- stability: stability::lookup(tcx, did).clean(),
+ stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
Some(ret)
}
-pub fn load_attrs(tcx: &ty::ctxt, did: ast::DefId) -> Vec<clean::Attribute> {
+pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt,
+ did: ast::DefId) -> Vec<clean::Attribute> {
let mut attrs = Vec::new();
csearch::get_item_attrs(&tcx.sess.cstore, did, |v| {
attrs.extend(v.move_iter().map(|a| {
- a.clean()
+ a.clean(cx)
}));
});
attrs
///
/// These names are used later on by HTML rendering to generate things like
/// source links back to the original item.
-pub fn record_extern_fqn(cx: &core::DocContext,
- did: ast::DefId,
- kind: clean::TypeKind) {
- match cx.maybe_typed {
- core::Typed(ref tcx) => {
+pub fn record_extern_fqn(cx: &DocContext, did: ast::DefId, kind: clean::TypeKind) {
+ match cx.tcx_opt() {
+ Some(tcx) => {
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.move_iter().map(|i| i.to_string()).collect();
- cx.external_paths.borrow_mut().get_mut_ref().insert(did, (fqn, kind));
+ cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
}
- core::NotTyped(..) => {}
+ None => {}
}
}
-pub fn build_external_trait(tcx: &ty::ctxt, did: ast::DefId) -> clean::Trait {
+pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt,
+ did: ast::DefId) -> clean::Trait {
let def = ty::lookup_trait_def(tcx, did);
- let trait_items = ty::trait_items(tcx, did).clean();
+ let trait_items = ty::trait_items(tcx, did).clean(cx);
let provided = ty::provided_trait_methods(tcx, did);
let mut items = trait_items.move_iter().map(|trait_item| {
if provided.iter().any(|a| a.def_id == trait_item.def_id) {
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
- let bounds = trait_def.bounds.clean();
+ let bounds = trait_def.bounds.clean(cx);
clean::Trait {
- generics: (&def.generics, subst::TypeSpace).clean(),
+ generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
}
}
-fn build_external_function(tcx: &ty::ctxt,
+fn build_external_function(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
style: ast::FnStyle) -> clean::Function {
let t = ty::lookup_item_type(tcx, did);
clean::Function {
decl: match ty::get(t.ty).sty {
- ty::ty_bare_fn(ref f) => (did, &f.sig).clean(),
+ ty::ty_bare_fn(ref f) => (did, &f.sig).clean(cx),
_ => fail!("bad function"),
},
- generics: (&t.generics, subst::FnSpace).clean(),
+ generics: (&t.generics, subst::FnSpace).clean(cx),
fn_style: style,
}
}
-fn build_struct(tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
+fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::Struct {
use syntax::parse::token::special_idents::unnamed_field;
let t = ty::lookup_item_type(tcx, did);
[ref f, ..] if f.name == unnamed_field.name => doctree::Tuple,
_ => doctree::Plain,
},
- generics: (&t.generics, subst::TypeSpace).clean(),
- fields: fields.clean(),
+ generics: (&t.generics, subst::TypeSpace).clean(cx),
+ fields: fields.clean(cx),
fields_stripped: false,
}
}
-fn build_type(tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
+fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId) -> clean::ItemEnum {
let t = ty::lookup_item_type(tcx, did);
match ty::get(t.ty).sty {
ty::ty_enum(edid, _) if !csearch::is_typedef(&tcx.sess.cstore, did) => {
return clean::EnumItem(clean::Enum {
- generics: (&t.generics, subst::TypeSpace).clean(),
+ generics: (&t.generics, subst::TypeSpace).clean(cx),
variants_stripped: false,
- variants: ty::enum_variants(tcx, edid).clean(),
+ variants: ty::enum_variants(tcx, edid).clean(cx),
})
}
_ => {}
}
clean::TypedefItem(clean::Typedef {
- type_: t.ty.clean(),
- generics: (&t.generics, subst::TypeSpace).clean(),
+ type_: t.ty.clean(cx),
+ generics: (&t.generics, subst::TypeSpace).clean(cx),
})
}
-fn build_impls(cx: &core::DocContext,
- tcx: &ty::ctxt,
+fn build_impls(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Vec<clean::Item> {
ty::populate_implementations_for_type_if_necessary(tcx, did);
let mut impls = Vec::new();
populate_impls(cx, tcx, def, &mut impls)
});
- fn populate_impls(cx: &core::DocContext,
- tcx: &ty::ctxt,
+ fn populate_impls(cx: &DocContext, tcx: &ty::ctxt,
def: decoder::DefLike,
impls: &mut Vec<Option<clean::Item>>) {
match def {
impls.move_iter().filter_map(|a| a).collect()
}
-fn build_impl(cx: &core::DocContext,
- tcx: &ty::ctxt,
+fn build_impl(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> Option<clean::Item> {
- if !cx.inlined.borrow_mut().get_mut_ref().insert(did) {
+ if !cx.inlined.borrow_mut().as_mut().unwrap().insert(did) {
return None
}
// If this is an impl for a #[doc(hidden)] trait, be sure to not inline it.
match associated_trait {
Some(ref t) => {
- let trait_attrs = load_attrs(tcx, t.def_id);
+ let trait_attrs = load_attrs(cx, tcx, t.def_id);
if trait_attrs.iter().any(|a| is_doc_hidden(a)) {
return None
}
None => {}
}
- let attrs = load_attrs(tcx, did);
+ let attrs = load_attrs(cx, tcx, did);
let ty = ty::lookup_item_type(tcx, did);
let trait_items = csearch::get_impl_items(&tcx.sess.cstore, did)
.iter()
if method.vis != ast::Public && associated_trait.is_none() {
return None
}
- let mut item = method.clean();
+ let mut item = method.clean(cx);
item.inner = match item.inner.clone() {
clean::TyMethodItem(clean::TyMethod {
fn_style, decl, self_, generics
return Some(clean::Item {
inner: clean::ImplItem(clean::Impl {
derived: clean::detect_derived(attrs.as_slice()),
- trait_: associated_trait.clean().map(|bound| {
+ trait_: associated_trait.clean(cx).map(|bound| {
match bound {
clean::TraitBound(ty) => ty,
clean::RegionBound => unreachable!(),
}
}),
- for_: ty.ty.clean(),
- generics: (&ty.generics, subst::TypeSpace).clean(),
+ for_: ty.ty.clean(cx),
+ generics: (&ty.generics, subst::TypeSpace).clean(cx),
items: trait_items,
}),
source: clean::Span::empty(),
name: None,
attrs: attrs,
visibility: Some(ast::Inherited),
- stability: stability::lookup(tcx, did).clean(),
+ stability: stability::lookup(tcx, did).clean(cx),
def_id: did,
});
}
}
-fn build_module(cx: &core::DocContext, tcx: &ty::ctxt,
+fn build_module(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId) -> clean::Module {
let mut items = Vec::new();
fill_in(cx, tcx, did, &mut items);
// FIXME: this doesn't handle reexports inside the module itself.
// Should they be handled?
- fn fill_in(cx: &core::DocContext, tcx: &ty::ctxt, did: ast::DefId,
+ fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: ast::DefId,
items: &mut Vec<clean::Item>) {
csearch::each_child_of_item(&tcx.sess.cstore, did, |def, _, vis| {
match def {
}
}
-fn build_static(tcx: &ty::ctxt,
+fn build_static(cx: &DocContext, tcx: &ty::ctxt,
did: ast::DefId,
mutable: bool) -> clean::Static {
clean::Static {
- type_: ty::lookup_item_type(tcx, did).ty.clean(),
+ type_: ty::lookup_item_type(tcx, did).ty.clean(cx),
mutability: if mutable {clean::Mutable} else {clean::Immutable},
expr: "\n\n\n".to_string(), // trigger the "[definition]" links
}
use std::u32;
use std::gc::{Gc, GC};
-use core;
+use core::DocContext;
use doctree;
use visit_ast;
mod inline;
-// load the current DocContext from TLD
-fn get_cx() -> Gc<core::DocContext> {
- *super::ctxtkey.get().unwrap()
-}
-
-// extract the stability index for a node from TLD, if possible
-fn get_stability(def_id: ast::DefId) -> Option<Stability> {
- get_cx().tcx_opt().and_then(|tcx| stability::lookup(tcx, def_id))
- .map(|stab| stab.clean())
+// extract the stability index for a node from tcx, if possible
+fn get_stability(cx: &DocContext, def_id: ast::DefId) -> Option<Stability> {
+ cx.tcx_opt().and_then(|tcx| stability::lookup(tcx, def_id)).clean(cx)
}
pub trait Clean<T> {
- fn clean(&self) -> T;
+ fn clean(&self, cx: &DocContext) -> T;
}
impl<T: Clean<U>, U> Clean<Vec<U>> for Vec<T> {
- fn clean(&self) -> Vec<U> {
- self.iter().map(|x| x.clean()).collect()
+ fn clean(&self, cx: &DocContext) -> Vec<U> {
+ self.iter().map(|x| x.clean(cx)).collect()
}
}
impl<T: Clean<U>, U> Clean<VecPerParamSpace<U>> for VecPerParamSpace<T> {
- fn clean(&self) -> VecPerParamSpace<U> {
- self.map(|x| x.clean())
+ fn clean(&self, cx: &DocContext) -> VecPerParamSpace<U> {
+ self.map(|x| x.clean(cx))
}
}
impl<T: 'static + Clean<U>, U> Clean<U> for Gc<T> {
- fn clean(&self) -> U {
- (**self).clean()
+ fn clean(&self, cx: &DocContext) -> U {
+ (**self).clean(cx)
}
}
impl<T: Clean<U>, U> Clean<U> for Rc<T> {
- fn clean(&self) -> U {
- (**self).clean()
+ fn clean(&self, cx: &DocContext) -> U {
+ (**self).clean(cx)
}
}
impl<T: Clean<U>, U> Clean<Option<U>> for Option<T> {
- fn clean(&self) -> Option<U> {
+ fn clean(&self, cx: &DocContext) -> Option<U> {
match self {
&None => None,
- &Some(ref v) => Some(v.clean())
+ &Some(ref v) => Some(v.clean(cx))
}
}
}
impl<T: Clean<U>, U> Clean<Vec<U>> for syntax::owned_slice::OwnedSlice<T> {
- fn clean(&self) -> Vec<U> {
- self.iter().map(|x| x.clean()).collect()
+ fn clean(&self, cx: &DocContext) -> Vec<U> {
+ self.iter().map(|x| x.clean(cx)).collect()
}
}
pub primitives: Vec<Primitive>,
}
-impl<'a> Clean<Crate> for visit_ast::RustdocVisitor<'a> {
- fn clean(&self) -> Crate {
- let cx = get_cx();
-
+impl<'a, 'tcx> Clean<Crate> for visit_ast::RustdocVisitor<'a, 'tcx> {
+ fn clean(&self, cx: &DocContext) -> Crate {
let mut externs = Vec::new();
cx.sess().cstore.iter_crate_data(|n, meta| {
- externs.push((n, meta.clean()));
+ externs.push((n, meta.clean(cx)));
});
externs.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
// Clean the crate, translating the entire libsyntax AST to one that is
// understood by rustdoc.
- let mut module = self.module.clean();
+ let mut module = self.module.clean(cx);
// Collect all inner modules which are tagged as implementations of
// primitives.
}
impl Clean<ExternalCrate> for cstore::crate_metadata {
- fn clean(&self) -> ExternalCrate {
+ fn clean(&self, cx: &DocContext) -> ExternalCrate {
let mut primitives = Vec::new();
- get_cx().tcx_opt().map(|tcx| {
+ cx.tcx_opt().map(|tcx| {
csearch::each_top_level_item_of_crate(&tcx.sess.cstore,
self.cnum,
|def, _, _| {
decoder::DlDef(def::DefMod(did)) => did,
_ => return
};
- let attrs = inline::load_attrs(tcx, did);
+ let attrs = inline::load_attrs(cx, tcx, did);
Primitive::find(attrs.as_slice()).map(|prim| primitives.push(prim));
})
});
ExternalCrate {
name: self.name.to_string(),
- attrs: decoder::get_crate_attributes(self.data()).clean(),
+ attrs: decoder::get_crate_attributes(self.data()).clean(cx),
primitives: primitives,
}
}
}
impl Clean<Item> for doctree::Module {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let name = if self.name.is_some() {
- self.name.unwrap().clean()
+ self.name.unwrap().clean(cx)
} else {
"".to_string()
};
let mut foreigns = Vec::new();
- for subforeigns in self.foreigns.clean().move_iter() {
+ for subforeigns in self.foreigns.clean(cx).move_iter() {
for foreign in subforeigns.move_iter() {
foreigns.push(foreign)
}
}
let items: Vec<Vec<Item> > = vec!(
- self.structs.clean(),
- self.enums.clean(),
- self.fns.clean(),
+ self.structs.clean(cx),
+ self.enums.clean(cx),
+ self.fns.clean(cx),
foreigns,
- self.mods.clean(),
- self.typedefs.clean(),
- self.statics.clean(),
- self.traits.clean(),
- self.impls.clean(),
- self.view_items.clean().move_iter()
+ self.mods.clean(cx),
+ self.typedefs.clean(cx),
+ self.statics.clean(cx),
+ self.traits.clean(cx),
+ self.impls.clean(cx),
+ self.view_items.clean(cx).move_iter()
.flat_map(|s| s.move_iter()).collect(),
- self.macros.clean(),
+ self.macros.clean(cx),
);
// determine if we should display the inner contents or
// the outer `mod` item for the source code.
let whence = {
- let ctxt = super::ctxtkey.get().unwrap();
- let cm = ctxt.sess().codemap();
+ let cm = cx.sess().codemap();
let outer = cm.lookup_char_pos(self.where_outer.lo);
let inner = cm.lookup_char_pos(self.where_inner.lo);
if outer.file.start_pos == inner.file.start_pos {
Item {
name: Some(name),
- attrs: self.attrs.clean(),
- source: whence.clean(),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ attrs: self.attrs.clean(cx),
+ source: whence.clean(cx),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: ModuleItem(Module {
is_crate: self.is_crate,
}
impl Clean<Attribute> for ast::MetaItem {
- fn clean(&self) -> Attribute {
+ fn clean(&self, cx: &DocContext) -> Attribute {
match self.node {
ast::MetaWord(ref s) => Word(s.get().to_string()),
ast::MetaList(ref s, ref l) => {
- List(s.get().to_string(), l.clean())
+ List(s.get().to_string(), l.clean(cx))
}
ast::MetaNameValue(ref s, ref v) => {
NameValue(s.get().to_string(), lit_to_string(v))
}
impl Clean<Attribute> for ast::Attribute {
- fn clean(&self) -> Attribute {
- self.desugar_doc().node.value.clean()
+ fn clean(&self, cx: &DocContext) -> Attribute {
+ self.desugar_doc().node.value.clean(cx)
}
}
}
impl Clean<TyParam> for ast::TyParam {
- fn clean(&self) -> TyParam {
+ fn clean(&self, cx: &DocContext) -> TyParam {
TyParam {
- name: self.ident.clean(),
+ name: self.ident.clean(cx),
did: ast::DefId { krate: ast::LOCAL_CRATE, node: self.id },
- bounds: self.bounds.clean(),
- default: self.default.clean()
+ bounds: self.bounds.clean(cx),
+ default: self.default.clean(cx)
}
}
}
impl Clean<TyParam> for ty::TypeParameterDef {
- fn clean(&self) -> TyParam {
- get_cx().external_typarams.borrow_mut().get_mut_ref()
- .insert(self.def_id, self.ident.clean());
+ fn clean(&self, cx: &DocContext) -> TyParam {
+ cx.external_typarams.borrow_mut().as_mut().unwrap()
+ .insert(self.def_id, self.ident.clean(cx));
TyParam {
- name: self.ident.clean(),
+ name: self.ident.clean(cx),
did: self.def_id,
- bounds: self.bounds.clean(),
- default: self.default.clean()
+ bounds: self.bounds.clean(cx),
+ default: self.default.clean(cx)
}
}
}
}
impl Clean<TyParamBound> for ast::TyParamBound {
- fn clean(&self) -> TyParamBound {
+ fn clean(&self, cx: &DocContext) -> TyParamBound {
match *self {
ast::RegionTyParamBound(_) => RegionBound,
ast::UnboxedFnTyParamBound(_) => {
// FIXME(pcwalton): Wrong.
RegionBound
}
- ast::TraitTyParamBound(ref t) => TraitBound(t.clean()),
+ ast::TraitTyParamBound(ref t) => TraitBound(t.clean(cx)),
}
}
}
impl Clean<Vec<TyParamBound>> for ty::ExistentialBounds {
- fn clean(&self) -> Vec<TyParamBound> {
+ fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
let mut vec = vec!(RegionBound);
for bb in self.builtin_bounds.iter() {
- vec.push(bb.clean());
+ vec.push(bb.clean(cx));
}
vec
}
}
-fn external_path(name: &str, substs: &subst::Substs) -> Path {
+fn external_path(cx: &DocContext, name: &str, substs: &subst::Substs) -> Path {
let lifetimes = substs.regions().get_slice(subst::TypeSpace)
.iter()
- .filter_map(|v| v.clean())
+ .filter_map(|v| v.clean(cx))
.collect();
let types = Vec::from_slice(substs.types.get_slice(subst::TypeSpace));
- let types = types.clean();
+ let types = types.clean(cx);
Path {
global: false,
segments: vec![PathSegment {
}
impl Clean<TyParamBound> for ty::BuiltinBound {
- fn clean(&self) -> TyParamBound {
- let cx = get_cx();
- let tcx = match cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return RegionBound,
+ fn clean(&self, cx: &DocContext) -> TyParamBound {
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return RegionBound,
};
let empty = subst::Substs::empty();
let (did, path) = match *self {
ty::BoundSend =>
(tcx.lang_items.send_trait().unwrap(),
- external_path("Send", &empty)),
+ external_path(cx, "Send", &empty)),
ty::BoundSized =>
(tcx.lang_items.sized_trait().unwrap(),
- external_path("Sized", &empty)),
+ external_path(cx, "Sized", &empty)),
ty::BoundCopy =>
(tcx.lang_items.copy_trait().unwrap(),
- external_path("Copy", &empty)),
+ external_path(cx, "Copy", &empty)),
ty::BoundSync =>
(tcx.lang_items.sync_trait().unwrap(),
- external_path("Sync", &empty)),
+ external_path(cx, "Sync", &empty)),
};
let fqn = csearch::get_item_path(tcx, did);
let fqn = fqn.move_iter().map(|i| i.to_string()).collect();
- cx.external_paths.borrow_mut().get_mut_ref().insert(did,
- (fqn, TypeTrait));
+ cx.external_paths.borrow_mut().as_mut().unwrap().insert(did,
+ (fqn, TypeTrait));
TraitBound(ResolvedPath {
path: path,
typarams: None,
}
impl Clean<TyParamBound> for ty::TraitRef {
- fn clean(&self) -> TyParamBound {
- let cx = get_cx();
- let tcx = match cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return RegionBound,
+ fn clean(&self, cx: &DocContext) -> TyParamBound {
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return RegionBound,
};
let fqn = csearch::get_item_path(tcx, self.def_id);
let fqn = fqn.move_iter().map(|i| i.to_string())
.collect::<Vec<String>>();
- let path = external_path(fqn.last().unwrap().as_slice(),
+ let path = external_path(cx, fqn.last().unwrap().as_slice(),
&self.substs);
- cx.external_paths.borrow_mut().get_mut_ref().insert(self.def_id,
+ cx.external_paths.borrow_mut().as_mut().unwrap().insert(self.def_id,
(fqn, TypeTrait));
TraitBound(ResolvedPath {
path: path,
}
impl Clean<Vec<TyParamBound>> for ty::ParamBounds {
- fn clean(&self) -> Vec<TyParamBound> {
+ fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
let mut v = Vec::new();
for b in self.builtin_bounds.iter() {
if b != ty::BoundSized {
- v.push(b.clean());
+ v.push(b.clean(cx));
}
}
for t in self.trait_bounds.iter() {
- v.push(t.clean());
+ v.push(t.clean(cx));
}
return v;
}
}
impl Clean<Option<Vec<TyParamBound>>> for subst::Substs {
- fn clean(&self) -> Option<Vec<TyParamBound>> {
+ fn clean(&self, cx: &DocContext) -> Option<Vec<TyParamBound>> {
let mut v = Vec::new();
v.extend(self.regions().iter().map(|_| RegionBound));
- v.extend(self.types.iter().map(|t| TraitBound(t.clean())));
+ v.extend(self.types.iter().map(|t| TraitBound(t.clean(cx))));
if v.len() > 0 {Some(v)} else {None}
}
}
}
impl Clean<Lifetime> for ast::Lifetime {
- fn clean(&self) -> Lifetime {
+ fn clean(&self, _: &DocContext) -> Lifetime {
Lifetime(token::get_name(self.name).get().to_string())
}
}
impl Clean<Lifetime> for ast::LifetimeDef {
- fn clean(&self) -> Lifetime {
+ fn clean(&self, _: &DocContext) -> Lifetime {
Lifetime(token::get_name(self.lifetime.name).get().to_string())
}
}
impl Clean<Lifetime> for ty::RegionParameterDef {
- fn clean(&self) -> Lifetime {
+ fn clean(&self, _: &DocContext) -> Lifetime {
Lifetime(token::get_name(self.name).get().to_string())
}
}
impl Clean<Option<Lifetime>> for ty::Region {
- fn clean(&self) -> Option<Lifetime> {
+ fn clean(&self, cx: &DocContext) -> Option<Lifetime> {
match *self {
ty::ReStatic => Some(Lifetime("'static".to_string())),
ty::ReLateBound(_, ty::BrNamed(_, name)) =>
Some(Lifetime(token::get_name(name).get().to_string())),
- ty::ReEarlyBound(_, _, _, name) => Some(Lifetime(name.clean())),
+ ty::ReEarlyBound(_, _, _, name) => Some(Lifetime(name.clean(cx))),
ty::ReLateBound(..) |
ty::ReFree(..) |
}
impl Clean<Generics> for ast::Generics {
- fn clean(&self) -> Generics {
+ fn clean(&self, cx: &DocContext) -> Generics {
Generics {
- lifetimes: self.lifetimes.clean(),
- type_params: self.ty_params.clean(),
+ lifetimes: self.lifetimes.clean(cx),
+ type_params: self.ty_params.clean(cx),
}
}
}
impl<'a> Clean<Generics> for (&'a ty::Generics, subst::ParamSpace) {
- fn clean(&self) -> Generics {
+ fn clean(&self, cx: &DocContext) -> Generics {
let (me, space) = *self;
Generics {
- type_params: Vec::from_slice(me.types.get_slice(space)).clean(),
- lifetimes: Vec::from_slice(me.regions.get_slice(space)).clean(),
+ type_params: Vec::from_slice(me.types.get_slice(space)).clean(cx),
+ lifetimes: Vec::from_slice(me.regions.get_slice(space)).clean(cx),
}
}
}
}
impl Clean<Item> for ast::Method {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let all_inputs = &self.pe_fn_decl().inputs;
let inputs = match self.pe_explicit_self().node {
ast::SelfStatic => all_inputs.as_slice(),
};
let decl = FnDecl {
inputs: Arguments {
- values: inputs.iter().map(|x| x.clean()).collect(),
+ values: inputs.iter().map(|x| x.clean(cx)).collect(),
},
- output: (self.pe_fn_decl().output.clean()),
- cf: self.pe_fn_decl().cf.clean(),
+ output: (self.pe_fn_decl().output.clean(cx)),
+ cf: self.pe_fn_decl().cf.clean(cx),
attrs: Vec::new()
};
Item {
- name: Some(self.pe_ident().clean()),
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ name: Some(self.pe_ident().clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.pe_vis().clean(),
- stability: get_stability(ast_util::local_def(self.id)),
+ visibility: self.pe_vis().clean(cx),
+ stability: get_stability(cx, ast_util::local_def(self.id)),
inner: MethodItem(Method {
- generics: self.pe_generics().clean(),
- self_: self.pe_explicit_self().node.clean(),
+ generics: self.pe_generics().clean(cx),
+ self_: self.pe_explicit_self().node.clean(cx),
fn_style: self.pe_fn_style().clone(),
decl: decl,
}),
}
impl Clean<Item> for ast::TypeMethod {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let inputs = match self.explicit_self.node {
ast::SelfStatic => self.decl.inputs.as_slice(),
_ => self.decl.inputs.slice_from(1)
};
let decl = FnDecl {
inputs: Arguments {
- values: inputs.iter().map(|x| x.clean()).collect(),
+ values: inputs.iter().map(|x| x.clean(cx)).collect(),
},
- output: (self.decl.output.clean()),
- cf: self.decl.cf.clean(),
+ output: (self.decl.output.clean(cx)),
+ cf: self.decl.cf.clean(cx),
attrs: Vec::new()
};
Item {
- name: Some(self.ident.clean()),
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ name: Some(self.ident.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(self.id),
visibility: None,
- stability: get_stability(ast_util::local_def(self.id)),
+ stability: get_stability(cx, ast_util::local_def(self.id)),
inner: TyMethodItem(TyMethod {
fn_style: self.fn_style.clone(),
decl: decl,
- self_: self.explicit_self.node.clean(),
- generics: self.generics.clean(),
+ self_: self.explicit_self.node.clean(cx),
+ generics: self.generics.clean(cx),
}),
}
}
}
impl Clean<SelfTy> for ast::ExplicitSelf_ {
- fn clean(&self) -> SelfTy {
+ fn clean(&self, cx: &DocContext) -> SelfTy {
match *self {
ast::SelfStatic => SelfStatic,
ast::SelfValue(_) => SelfValue,
ast::SelfRegion(lt, mt, _) => {
- SelfBorrowed(lt.clean(), mt.clean())
+ SelfBorrowed(lt.clean(cx), mt.clean(cx))
}
- ast::SelfExplicit(typ, _) => SelfExplicit(typ.clean()),
+ ast::SelfExplicit(typ, _) => SelfExplicit(typ.clean(cx)),
}
}
}
}
impl Clean<Item> for doctree::Function {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: FunctionItem(Function {
- decl: self.decl.clean(),
- generics: self.generics.clean(),
+ decl: self.decl.clean(cx),
+ generics: self.generics.clean(cx),
fn_style: self.fn_style,
}),
}
}
impl Clean<ClosureDecl> for ast::ClosureTy {
- fn clean(&self) -> ClosureDecl {
+ fn clean(&self, cx: &DocContext) -> ClosureDecl {
ClosureDecl {
- lifetimes: self.lifetimes.clean(),
- decl: self.decl.clean(),
+ lifetimes: self.lifetimes.clean(cx),
+ decl: self.decl.clean(cx),
onceness: self.onceness,
fn_style: self.fn_style,
- bounds: self.bounds.clean()
+ bounds: self.bounds.clean(cx)
}
}
}
}
impl Clean<FnDecl> for ast::FnDecl {
- fn clean(&self) -> FnDecl {
+ fn clean(&self, cx: &DocContext) -> FnDecl {
FnDecl {
inputs: Arguments {
- values: self.inputs.iter().map(|x| x.clean()).collect(),
+ values: self.inputs.clean(cx),
},
- output: (self.output.clean()),
- cf: self.cf.clean(),
+ output: self.output.clean(cx),
+ cf: self.cf.clean(cx),
attrs: Vec::new()
}
}
}
impl<'a> Clean<FnDecl> for (ast::DefId, &'a ty::FnSig) {
- fn clean(&self) -> FnDecl {
- let cx = get_cx();
+ fn clean(&self, cx: &DocContext) -> FnDecl {
let (did, sig) = *self;
let mut names = if did.node != 0 {
csearch::get_method_arg_names(&cx.tcx().sess.cstore, did).move_iter()
let _ = names.next();
}
FnDecl {
- output: sig.output.clean(),
+ output: sig.output.clean(cx),
cf: Return,
attrs: Vec::new(),
inputs: Arguments {
values: sig.inputs.iter().map(|t| {
Argument {
- type_: t.clean(),
+ type_: t.clean(cx),
id: 0,
name: names.next().unwrap_or("".to_string()),
}
}
impl Clean<Argument> for ast::Arg {
- fn clean(&self) -> Argument {
+ fn clean(&self, cx: &DocContext) -> Argument {
Argument {
name: name_from_pat(&*self.pat),
- type_: (self.ty.clean()),
+ type_: (self.ty.clean(cx)),
id: self.id
}
}
}
impl Clean<RetStyle> for ast::RetStyle {
- fn clean(&self) -> RetStyle {
+ fn clean(&self, _: &DocContext) -> RetStyle {
match *self {
ast::Return => Return,
ast::NoReturn => NoReturn
}
impl Clean<Item> for doctree::Trait {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: TraitItem(Trait {
- items: self.items.clean(),
- generics: self.generics.clean(),
- bounds: self.bounds.clean(),
+ items: self.items.clean(cx),
+ generics: self.generics.clean(cx),
+ bounds: self.bounds.clean(cx),
}),
}
}
}
impl Clean<Type> for ast::TraitRef {
- fn clean(&self) -> Type {
- resolve_type(self.path.clean(), None, self.ref_id)
+ fn clean(&self, cx: &DocContext) -> Type {
+ resolve_type(cx, self.path.clean(cx), None, self.ref_id)
}
}
}
impl Clean<TraitItem> for ast::TraitItem {
- fn clean(&self) -> TraitItem {
+ fn clean(&self, cx: &DocContext) -> TraitItem {
match self {
- &ast::RequiredMethod(ref t) => RequiredMethod(t.clean()),
- &ast::ProvidedMethod(ref t) => ProvidedMethod(t.clean()),
+ &ast::RequiredMethod(ref t) => RequiredMethod(t.clean(cx)),
+ &ast::ProvidedMethod(ref t) => ProvidedMethod(t.clean(cx)),
}
}
}
}
impl Clean<ImplItem> for ast::ImplItem {
- fn clean(&self) -> ImplItem {
+ fn clean(&self, cx: &DocContext) -> ImplItem {
match self {
- &ast::MethodImplItem(ref t) => MethodImplItem(t.clean()),
+ &ast::MethodImplItem(ref t) => MethodImplItem(t.clean(cx)),
}
}
}
impl Clean<Item> for ty::Method {
- fn clean(&self) -> Item {
- let cx = get_cx();
+ fn clean(&self, cx: &DocContext) -> Item {
let (self_, sig) = match self.explicit_self {
- ty::StaticExplicitSelfCategory => (ast::SelfStatic.clean(),
+ ty::StaticExplicitSelfCategory => (ast::SelfStatic.clean(cx),
self.fty.sig.clone()),
s => {
let sig = ty::FnSig {
ty::ByReferenceExplicitSelfCategory(..) => {
match ty::get(self.fty.sig.inputs[0]).sty {
ty::ty_rptr(r, mt) => {
- SelfBorrowed(r.clean(), mt.mutbl.clean())
+ SelfBorrowed(r.clean(cx), mt.mutbl.clean(cx))
}
_ => unreachable!(),
}
}
ty::ByBoxExplicitSelfCategory => {
- SelfExplicit(self.fty.sig.inputs[0].clean())
+ SelfExplicit(self.fty.sig.inputs[0].clean(cx))
}
ty::StaticExplicitSelfCategory => unreachable!(),
};
};
Item {
- name: Some(self.ident.clean()),
+ name: Some(self.ident.clean(cx)),
visibility: Some(ast::Inherited),
- stability: get_stability(self.def_id),
+ stability: get_stability(cx, self.def_id),
def_id: self.def_id,
- attrs: inline::load_attrs(cx.tcx(), self.def_id),
+ attrs: inline::load_attrs(cx, cx.tcx(), self.def_id),
source: Span::empty(),
inner: TyMethodItem(TyMethod {
fn_style: self.fty.fn_style,
- generics: (&self.generics, subst::FnSpace).clean(),
+ generics: (&self.generics, subst::FnSpace).clean(cx),
self_: self_,
- decl: (self.def_id, &sig).clean(),
+ decl: (self.def_id, &sig).clean(cx),
})
}
}
}
impl Clean<Item> for ty::ImplOrTraitItem {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
match *self {
- ty::MethodTraitItem(ref mti) => mti.clean(),
+ ty::MethodTraitItem(ref mti) => mti.clean(cx),
}
}
}
}
impl Clean<Type> for ast::Ty {
- fn clean(&self) -> Type {
+ fn clean(&self, cx: &DocContext) -> Type {
use syntax::ast::*;
match self.node {
TyNil => Primitive(Unit),
- TyPtr(ref m) => RawPointer(m.mutbl.clean(), box m.ty.clean()),
+ TyPtr(ref m) => RawPointer(m.mutbl.clean(cx), box m.ty.clean(cx)),
TyRptr(ref l, ref m) =>
- BorrowedRef {lifetime: l.clean(), mutability: m.mutbl.clean(),
- type_: box m.ty.clean()},
- TyBox(ty) => Managed(box ty.clean()),
- TyUniq(ty) => Unique(box ty.clean()),
- TyVec(ty) => Vector(box ty.clean()),
- TyFixedLengthVec(ty, ref e) => FixedVector(box ty.clean(),
- e.span.to_src()),
- TyTup(ref tys) => Tuple(tys.iter().map(|x| x.clean()).collect()),
+ BorrowedRef {lifetime: l.clean(cx), mutability: m.mutbl.clean(cx),
+ type_: box m.ty.clean(cx)},
+ TyBox(ty) => Managed(box ty.clean(cx)),
+ TyUniq(ty) => Unique(box ty.clean(cx)),
+ TyVec(ty) => Vector(box ty.clean(cx)),
+ TyFixedLengthVec(ty, ref e) => FixedVector(box ty.clean(cx),
+ e.span.to_src(cx)),
+ TyTup(ref tys) => Tuple(tys.clean(cx)),
TyPath(ref p, ref tpbs, id) => {
- resolve_type(p.clean(),
- tpbs.clean().map(|x| x),
- id)
+ resolve_type(cx, p.clean(cx), tpbs.clean(cx), id)
}
- TyClosure(ref c) => Closure(box c.clean()),
- TyProc(ref c) => Proc(box c.clean()),
- TyBareFn(ref barefn) => BareFunction(box barefn.clean()),
- TyParen(ref ty) => ty.clean(),
+ TyClosure(ref c) => Closure(box c.clean(cx)),
+ TyProc(ref c) => Proc(box c.clean(cx)),
+ TyBareFn(ref barefn) => BareFunction(box barefn.clean(cx)),
+ TyParen(ref ty) => ty.clean(cx),
TyBot => Bottom,
ref x => fail!("Unimplemented type {:?}", x),
}
}
impl Clean<Type> for ty::t {
- fn clean(&self) -> Type {
+ fn clean(&self, cx: &DocContext) -> Type {
match ty::get(*self).sty {
ty::ty_bot => Bottom,
ty::ty_nil => Primitive(Unit),
ty::ty_float(ast::TyF64) => Primitive(F64),
ty::ty_str => Primitive(Str),
ty::ty_box(t) => {
- let gc_did = get_cx().tcx_opt().and_then(|tcx| {
+ let gc_did = cx.tcx_opt().and_then(|tcx| {
tcx.lang_items.gc()
});
- lang_struct(gc_did, t, "Gc", Managed)
+ lang_struct(cx, gc_did, t, "Gc", Managed)
}
ty::ty_uniq(t) => {
- let box_did = get_cx().tcx_opt().and_then(|tcx| {
+ let box_did = cx.tcx_opt().and_then(|tcx| {
tcx.lang_items.owned_box()
});
- lang_struct(box_did, t, "Box", Unique)
+ lang_struct(cx, box_did, t, "Box", Unique)
}
- ty::ty_vec(ty, None) => Vector(box ty.clean()),
- ty::ty_vec(ty, Some(i)) => FixedVector(box ty.clean(),
+ ty::ty_vec(ty, None) => Vector(box ty.clean(cx)),
+ ty::ty_vec(ty, Some(i)) => FixedVector(box ty.clean(cx),
format!("{}", i)),
- ty::ty_ptr(mt) => RawPointer(mt.mutbl.clean(), box mt.ty.clean()),
+ ty::ty_ptr(mt) => RawPointer(mt.mutbl.clean(cx), box mt.ty.clean(cx)),
ty::ty_rptr(r, mt) => BorrowedRef {
- lifetime: r.clean(),
- mutability: mt.mutbl.clean(),
- type_: box mt.ty.clean(),
+ lifetime: r.clean(cx),
+ mutability: mt.mutbl.clean(cx),
+ type_: box mt.ty.clean(cx),
},
ty::ty_bare_fn(ref fty) => BareFunction(box BareFunctionDecl {
fn_style: fty.fn_style,
generics: Generics {
lifetimes: Vec::new(), type_params: Vec::new()
},
- decl: (ast_util::local_def(0), &fty.sig).clean(),
+ decl: (ast_util::local_def(0), &fty.sig).clean(cx),
abi: fty.abi.to_string(),
}),
ty::ty_closure(ref fty) => {
let decl = box ClosureDecl {
lifetimes: Vec::new(), // FIXME: this looks wrong...
- decl: (ast_util::local_def(0), &fty.sig).clean(),
+ decl: (ast_util::local_def(0), &fty.sig).clean(cx),
onceness: fty.onceness,
fn_style: fty.fn_style,
- bounds: fty.bounds.clean(),
+ bounds: fty.bounds.clean(cx),
};
match fty.store {
ty::UniqTraitStore => Proc(decl),
ty::ty_struct(did, ref substs) |
ty::ty_enum(did, ref substs) |
ty::ty_trait(box ty::TyTrait { def_id: did, ref substs, .. }) => {
- let fqn = csearch::get_item_path(get_cx().tcx(), did);
+ let fqn = csearch::get_item_path(cx.tcx(), did);
let fqn: Vec<String> = fqn.move_iter().map(|i| {
i.to_string()
}).collect();
ty::ty_trait(..) => TypeTrait,
_ => TypeEnum,
};
- let path = external_path(fqn.last().unwrap().to_string().as_slice(),
+ let path = external_path(cx, fqn.last().unwrap().to_string().as_slice(),
substs);
- get_cx().external_paths.borrow_mut().get_mut_ref()
- .insert(did, (fqn, kind));
+ cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind));
ResolvedPath {
path: path,
typarams: None,
did: did,
}
}
- ty::ty_tup(ref t) => Tuple(t.iter().map(|t| t.clean()).collect()),
+ ty::ty_tup(ref t) => Tuple(t.clean(cx)),
ty::ty_param(ref p) => {
if p.space == subst::SelfSpace {
}
impl Clean<Item> for ast::StructField {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let (name, vis) = match self.node.kind {
ast::NamedField(id, vis) => (Some(id), vis),
ast::UnnamedField(vis) => (None, vis)
};
Item {
- name: name.clean(),
- attrs: self.node.attrs.clean(),
- source: self.span.clean(),
+ name: name.clean(cx),
+ attrs: self.node.attrs.clean(cx),
+ source: self.span.clean(cx),
visibility: Some(vis),
- stability: get_stability(ast_util::local_def(self.node.id)),
+ stability: get_stability(cx, ast_util::local_def(self.node.id)),
def_id: ast_util::local_def(self.node.id),
- inner: StructFieldItem(TypedStructField(self.node.ty.clean())),
+ inner: StructFieldItem(TypedStructField(self.node.ty.clean(cx))),
}
}
}
impl Clean<Item> for ty::field_ty {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
use syntax::parse::token::special_idents::unnamed_field;
use rustc::metadata::csearch;
- let cx = get_cx();
- let attrs;
-
let attr_map = csearch::get_struct_field_attrs(&cx.tcx().sess.cstore, self.id);
- let name = if self.name == unnamed_field.name {
- attrs = None;
- None
+ let (name, attrs) = if self.name == unnamed_field.name {
+ (None, None)
} else {
- attrs = Some(attr_map.find(&self.id.node).unwrap());
- Some(self.name)
+ (Some(self.name), Some(attr_map.find(&self.id.node).unwrap()))
};
let ty = ty::lookup_item_type(cx.tcx(), self.id);
Item {
- name: name.clean(),
- attrs: attrs.unwrap_or(&Vec::new()).clean(),
+ name: name.clean(cx),
+ attrs: attrs.unwrap_or(&Vec::new()).clean(cx),
source: Span::empty(),
visibility: Some(self.vis),
- stability: get_stability(self.id),
+ stability: get_stability(cx, self.id),
def_id: self.id,
- inner: StructFieldItem(TypedStructField(ty.ty.clean())),
+ inner: StructFieldItem(TypedStructField(ty.ty.clean(cx))),
}
}
}
pub type Visibility = ast::Visibility;
impl Clean<Option<Visibility>> for ast::Visibility {
- fn clean(&self) -> Option<Visibility> {
+ fn clean(&self, _: &DocContext) -> Option<Visibility> {
Some(*self)
}
}
}
impl Clean<Item> for doctree::Struct {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: StructItem(Struct {
struct_type: self.struct_type,
- generics: self.generics.clean(),
- fields: self.fields.clean(),
+ generics: self.generics.clean(cx),
+ fields: self.fields.clean(cx),
fields_stripped: false,
}),
}
}
impl Clean<VariantStruct> for syntax::ast::StructDef {
- fn clean(&self) -> VariantStruct {
+ fn clean(&self, cx: &DocContext) -> VariantStruct {
VariantStruct {
struct_type: doctree::struct_type_from_def(self),
- fields: self.fields.clean(),
+ fields: self.fields.clean(cx),
fields_stripped: false,
}
}
}
impl Clean<Item> for doctree::Enum {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: EnumItem(Enum {
- variants: self.variants.clean(),
- generics: self.generics.clean(),
+ variants: self.variants.clean(cx),
+ generics: self.generics.clean(cx),
variants_stripped: false,
}),
}
}
impl Clean<Item> for doctree::Variant {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: VariantItem(Variant {
- kind: self.kind.clean(),
+ kind: self.kind.clean(cx),
}),
}
}
}
impl Clean<Item> for ty::VariantInfo {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
// use syntax::parse::token::special_idents::unnamed_field;
- let cx = get_cx();
let kind = match self.arg_names.as_ref().map(|s| s.as_slice()) {
None | Some([]) if self.args.len() == 0 => CLikeVariant,
None | Some([]) => {
- TupleVariant(self.args.iter().map(|t| t.clean()).collect())
+ TupleVariant(self.args.clean(cx))
}
Some(s) => {
StructVariant(VariantStruct {
fields: s.iter().zip(self.args.iter()).map(|(name, ty)| {
Item {
source: Span::empty(),
- name: Some(name.clean()),
+ name: Some(name.clean(cx)),
attrs: Vec::new(),
visibility: Some(ast::Public),
// FIXME: this is not accurate, we need an id for
// more infrastructure work before we can get
// at the needed information here.
def_id: self.id,
- stability: get_stability(self.id),
+ stability: get_stability(cx, self.id),
inner: StructFieldItem(
- TypedStructField(ty.clean())
+ TypedStructField(ty.clean(cx))
)
}
}).collect()
}
};
Item {
- name: Some(self.name.clean()),
- attrs: inline::load_attrs(cx.tcx(), self.id),
+ name: Some(self.name.clean(cx)),
+ attrs: inline::load_attrs(cx, cx.tcx(), self.id),
source: Span::empty(),
visibility: Some(ast::Public),
def_id: self.id,
inner: VariantItem(Variant { kind: kind }),
- stability: get_stability(self.id),
+ stability: get_stability(cx, self.id),
}
}
}
}
impl Clean<VariantKind> for ast::VariantKind {
- fn clean(&self) -> VariantKind {
+ fn clean(&self, cx: &DocContext) -> VariantKind {
match self {
&ast::TupleVariantKind(ref args) => {
if args.len() == 0 {
CLikeVariant
} else {
- TupleVariant(args.iter().map(|x| x.ty.clean()).collect())
+ TupleVariant(args.iter().map(|x| x.ty.clean(cx)).collect())
}
},
- &ast::StructVariantKind(ref sd) => StructVariant(sd.clean()),
+ &ast::StructVariantKind(ref sd) => StructVariant(sd.clean(cx)),
}
}
}
}
impl Clean<Span> for syntax::codemap::Span {
- fn clean(&self) -> Span {
- let ctxt = super::ctxtkey.get().unwrap();
- let cm = ctxt.sess().codemap();
+ fn clean(&self, cx: &DocContext) -> Span {
+ let cm = cx.sess().codemap();
let filename = cm.span_to_filename(*self);
let lo = cm.lookup_char_pos(self.lo);
let hi = cm.lookup_char_pos(self.hi);
}
impl Clean<Path> for ast::Path {
- fn clean(&self) -> Path {
+ fn clean(&self, cx: &DocContext) -> Path {
Path {
global: self.global,
- segments: self.segments.clean(),
+ segments: self.segments.clean(cx),
}
}
}
}
impl Clean<PathSegment> for ast::PathSegment {
- fn clean(&self) -> PathSegment {
+ fn clean(&self, cx: &DocContext) -> PathSegment {
PathSegment {
- name: self.identifier.clean(),
- lifetimes: self.lifetimes.clean(),
- types: self.types.clean(),
+ name: self.identifier.clean(cx),
+ lifetimes: self.lifetimes.clean(cx),
+ types: self.types.clean(cx),
}
}
}
}
impl Clean<String> for ast::Ident {
- fn clean(&self) -> String {
+ fn clean(&self, _: &DocContext) -> String {
token::get_ident(*self).get().to_string()
}
}
impl Clean<String> for ast::Name {
- fn clean(&self) -> String {
+ fn clean(&self, _: &DocContext) -> String {
token::get_name(*self).get().to_string()
}
}
}
impl Clean<Item> for doctree::Typedef {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id.clone()),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: TypedefItem(Typedef {
- type_: self.ty.clean(),
- generics: self.gen.clean(),
+ type_: self.ty.clean(cx),
+ generics: self.gen.clean(cx),
}),
}
}
}
impl Clean<BareFunctionDecl> for ast::BareFnTy {
- fn clean(&self) -> BareFunctionDecl {
+ fn clean(&self, cx: &DocContext) -> BareFunctionDecl {
BareFunctionDecl {
fn_style: self.fn_style,
generics: Generics {
- lifetimes: self.lifetimes.clean(),
+ lifetimes: self.lifetimes.clean(cx),
type_params: Vec::new(),
},
- decl: self.decl.clean(),
+ decl: self.decl.clean(cx),
abi: self.abi.to_string(),
}
}
}
impl Clean<Item> for doctree::Static {
- fn clean(&self) -> Item {
- debug!("claning static {}: {:?}", self.name.clean(), self);
+ fn clean(&self, cx: &DocContext) -> Item {
+ debug!("claning static {}: {:?}", self.name.clean(cx), self);
Item {
- name: Some(self.name.clean()),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ name: Some(self.name.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: StaticItem(Static {
- type_: self.type_.clean(),
- mutability: self.mutability.clean(),
- expr: self.expr.span.to_src(),
+ type_: self.type_.clean(cx),
+ mutability: self.mutability.clean(cx),
+ expr: self.expr.span.to_src(cx),
}),
}
}
}
impl Clean<Mutability> for ast::Mutability {
- fn clean(&self) -> Mutability {
+ fn clean(&self, _: &DocContext) -> Mutability {
match self {
&ast::MutMutable => Mutable,
&ast::MutImmutable => Immutable,
}
impl Clean<Item> for doctree::Impl {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
name: None,
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: self.stab.clean(),
+ visibility: self.vis.clean(cx),
+ stability: self.stab.clean(cx),
inner: ImplItem(Impl {
- generics: self.generics.clean(),
- trait_: self.trait_.clean(),
- for_: self.for_.clean(),
- items: self.items.clean().move_iter().map(|ti| {
+ generics: self.generics.clean(cx),
+ trait_: self.trait_.clean(cx),
+ for_: self.for_.clean(cx),
+ items: self.items.clean(cx).move_iter().map(|ti| {
match ti {
MethodImplItem(i) => i,
}
}
impl Clean<Vec<Item>> for ast::ViewItem {
- fn clean(&self) -> Vec<Item> {
+ fn clean(&self, cx: &DocContext) -> Vec<Item> {
// We consider inlining the documentation of `pub use` statements, but we
// forcefully don't inline if this is not public or if the
// #[doc(no_inline)] attribute is present.
let convert = |node: &ast::ViewItem_| {
Item {
name: None,
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(0),
- visibility: self.vis.clean(),
+ visibility: self.vis.clean(cx),
stability: None,
- inner: ViewItemItem(ViewItem { inner: node.clean() }),
+ inner: ViewItemItem(ViewItem { inner: node.clean(cx) }),
}
};
let mut ret = Vec::new();
// to keep any non-inlineable reexports so they can be
// listed in the documentation.
let remaining = list.iter().filter(|path| {
- match inline::try_inline(path.node.id(), None) {
+ match inline::try_inline(cx, path.node.id(), None) {
Some(items) => {
ret.extend(items.move_iter()); false
}
}
}
ast::ViewPathSimple(ident, _, id) => {
- match inline::try_inline(id, Some(ident)) {
+ match inline::try_inline(cx, id, Some(ident)) {
Some(items) => ret.extend(items.move_iter()),
None => ret.push(convert(&self.node)),
}
}
impl Clean<ViewItemInner> for ast::ViewItem_ {
- fn clean(&self) -> ViewItemInner {
+ fn clean(&self, cx: &DocContext) -> ViewItemInner {
match self {
&ast::ViewItemExternCrate(ref i, ref p, ref id) => {
let string = match *p {
None => None,
Some((ref x, _)) => Some(x.get().to_string()),
};
- ExternCrate(i.clean(), string, *id)
+ ExternCrate(i.clean(cx), string, *id)
}
&ast::ViewItemUse(ref vp) => {
- Import(vp.clean())
+ Import(vp.clean(cx))
}
}
}
}
impl Clean<ViewPath> for ast::ViewPath {
- fn clean(&self) -> ViewPath {
+ fn clean(&self, cx: &DocContext) -> ViewPath {
match self.node {
ast::ViewPathSimple(ref i, ref p, id) =>
- SimpleImport(i.clean(), resolve_use_source(p.clean(), id)),
+ SimpleImport(i.clean(cx), resolve_use_source(cx, p.clean(cx), id)),
ast::ViewPathGlob(ref p, id) =>
- GlobImport(resolve_use_source(p.clean(), id)),
+ GlobImport(resolve_use_source(cx, p.clean(cx), id)),
ast::ViewPathList(ref p, ref pl, id) => {
- ImportList(resolve_use_source(p.clean(), id),
- pl.clean())
+ ImportList(resolve_use_source(cx, p.clean(cx), id),
+ pl.clean(cx))
}
}
}
}
impl Clean<ViewListIdent> for ast::PathListItem {
- fn clean(&self) -> ViewListIdent {
+ fn clean(&self, cx: &DocContext) -> ViewListIdent {
match self.node {
ast::PathListIdent { id, name } => ViewListIdent {
- name: name.clean(),
- source: resolve_def(id)
+ name: name.clean(cx),
+ source: resolve_def(cx, id)
},
ast::PathListMod { id } => ViewListIdent {
name: "mod".to_string(),
- source: resolve_def(id)
+ source: resolve_def(cx, id)
}
}
}
}
impl Clean<Vec<Item>> for ast::ForeignMod {
- fn clean(&self) -> Vec<Item> {
- self.items.clean()
+ fn clean(&self, cx: &DocContext) -> Vec<Item> {
+ self.items.clean(cx)
}
}
impl Clean<Item> for ast::ForeignItem {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
let inner = match self.node {
ast::ForeignItemFn(ref decl, ref generics) => {
ForeignFunctionItem(Function {
- decl: decl.clean(),
- generics: generics.clean(),
+ decl: decl.clean(cx),
+ generics: generics.clean(cx),
fn_style: ast::UnsafeFn,
})
}
ast::ForeignItemStatic(ref ty, mutbl) => {
ForeignStaticItem(Static {
- type_: ty.clean(),
+ type_: ty.clean(cx),
mutability: if mutbl {Mutable} else {Immutable},
expr: "".to_string(),
})
}
};
Item {
- name: Some(self.ident.clean()),
- attrs: self.attrs.clean(),
- source: self.span.clean(),
+ name: Some(self.ident.clean(cx)),
+ attrs: self.attrs.clean(cx),
+ source: self.span.clean(cx),
def_id: ast_util::local_def(self.id),
- visibility: self.vis.clean(),
- stability: get_stability(ast_util::local_def(self.id)),
+ visibility: self.vis.clean(cx),
+ stability: get_stability(cx, ast_util::local_def(self.id)),
inner: inner,
}
}
// Utilities
trait ToSource {
- fn to_src(&self) -> String;
+ fn to_src(&self, cx: &DocContext) -> String;
}
impl ToSource for syntax::codemap::Span {
- fn to_src(&self) -> String {
- debug!("converting span {:?} to snippet", self.clean());
- let ctxt = super::ctxtkey.get().unwrap();
- let cm = ctxt.sess().codemap().clone();
- let sn = match cm.span_to_snippet(*self) {
+ fn to_src(&self, cx: &DocContext) -> String {
+ debug!("converting span {:?} to snippet", self.clean(cx));
+ let sn = match cx.sess().codemap().span_to_snippet(*self) {
Some(x) => x.to_string(),
None => "".to_string()
};
}
/// Given a Type, resolve it using the def_map
-fn resolve_type(path: Path, tpbs: Option<Vec<TyParamBound>>,
+fn resolve_type(cx: &DocContext, path: Path,
+ tpbs: Option<Vec<TyParamBound>>,
id: ast::NodeId) -> Type {
- let cx = get_cx();
- let tycx = match cx.maybe_typed {
- core::Typed(ref tycx) => tycx,
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
// If we're extracting tests, this return value doesn't matter.
- core::NotTyped(_) => return Primitive(Bool),
+ None => return Primitive(Bool),
};
debug!("searching for {:?} in defmap", id);
- let def = match tycx.def_map.borrow().find(&id) {
+ let def = match tcx.def_map.borrow().find(&id) {
Some(&k) => k,
None => fail!("unresolved id not in defmap")
};
ResolvedPath { path: path, typarams: tpbs, did: did }
}
-fn register_def(cx: &core::DocContext, def: def::Def) -> ast::DefId {
+fn register_def(cx: &DocContext, def: def::Def) -> ast::DefId {
let (did, kind) = match def {
def::DefFn(i, _) => (i, TypeFunction),
def::DefTy(i) => (i, TypeEnum),
_ => return def.def_id()
};
if ast_util::is_local(did) { return did }
- let tcx = match cx.maybe_typed {
- core::Typed(ref t) => t,
- core::NotTyped(_) => return did
+ let tcx = match cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return did
};
inline::record_extern_fqn(cx, did, kind);
match kind {
TypeTrait => {
- let t = inline::build_external_trait(tcx, did);
- cx.external_traits.borrow_mut().get_mut_ref().insert(did, t);
+ let t = inline::build_external_trait(cx, tcx, did);
+ cx.external_traits.borrow_mut().as_mut().unwrap().insert(did, t);
}
_ => {}
}
return did;
}
-fn resolve_use_source(path: Path, id: ast::NodeId) -> ImportSource {
+fn resolve_use_source(cx: &DocContext, path: Path, id: ast::NodeId) -> ImportSource {
ImportSource {
path: path,
- did: resolve_def(id),
+ did: resolve_def(cx, id),
}
}
-fn resolve_def(id: ast::NodeId) -> Option<ast::DefId> {
- get_cx().tcx_opt().and_then(|tcx| {
- tcx.def_map.borrow().find(&id).map(|&def| register_def(&*get_cx(), def))
+fn resolve_def(cx: &DocContext, id: ast::NodeId) -> Option<ast::DefId> {
+ cx.tcx_opt().and_then(|tcx| {
+ tcx.def_map.borrow().find(&id).map(|&def| register_def(cx, def))
})
}
}
impl Clean<Item> for doctree::Macro {
- fn clean(&self) -> Item {
+ fn clean(&self, cx: &DocContext) -> Item {
Item {
- name: Some(format!("{}!", self.name.clean())),
- attrs: self.attrs.clean(),
- source: self.whence.clean(),
- visibility: ast::Public.clean(),
- stability: self.stab.clean(),
+ name: Some(format!("{}!", self.name.clean(cx))),
+ attrs: self.attrs.clean(cx),
+ source: self.whence.clean(cx),
+ visibility: ast::Public.clean(cx),
+ stability: self.stab.clean(cx),
def_id: ast_util::local_def(self.id),
inner: MacroItem(Macro {
- source: self.whence.to_src(),
+ source: self.whence.to_src(cx),
}),
}
}
}
impl Clean<Stability> for attr::Stability {
- fn clean(&self) -> Stability {
+ fn clean(&self, _: &DocContext) -> Stability {
Stability {
level: self.level,
text: self.text.as_ref().map_or("".to_string(),
}
}
-fn lang_struct(did: Option<ast::DefId>, t: ty::t, name: &str,
+fn lang_struct(cx: &DocContext, did: Option<ast::DefId>,
+ t: ty::t, name: &str,
fallback: fn(Box<Type>) -> Type) -> Type {
let did = match did {
Some(did) => did,
- None => return fallback(box t.clean()),
+ None => return fallback(box t.clean(cx)),
};
- let fqn = csearch::get_item_path(get_cx().tcx(), did);
+ let fqn = csearch::get_item_path(cx.tcx(), did);
let fqn: Vec<String> = fqn.move_iter().map(|i| {
i.to_string()
}).collect();
- get_cx().external_paths.borrow_mut().get_mut_ref()
- .insert(did, (fqn, TypeStruct));
+ cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, TypeStruct));
ResolvedPath {
typarams: None,
did: did,
segments: vec![PathSegment {
name: name.to_string(),
lifetimes: vec![],
- types: vec![t.clean()],
+ types: vec![t.clean(cx)],
}],
},
}
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
use std::gc::GC;
use std::os;
use std::collections::{HashMap, HashSet};
+use arena::TypedArena;
use visit_ast::RustdocVisitor;
use clean;
use clean::Clean;
/// Are we generating documentation (`Typed`) or tests (`NotTyped`)?
-pub enum MaybeTyped {
- Typed(middle::ty::ctxt),
+pub enum MaybeTyped<'tcx> {
+ Typed(middle::ty::ctxt<'tcx>),
NotTyped(driver::session::Session)
}
pub type ExternalPaths = RefCell<Option<HashMap<ast::DefId,
(Vec<String>, clean::TypeKind)>>>;
-pub struct DocContext {
+pub struct DocContext<'tcx> {
pub krate: ast::Crate,
- pub maybe_typed: MaybeTyped,
+ pub maybe_typed: MaybeTyped<'tcx>,
pub src: Path,
pub external_paths: ExternalPaths,
pub external_traits: RefCell<Option<HashMap<ast::DefId, clean::Trait>>>,
pub populated_crate_impls: RefCell<HashSet<ast::CrateNum>>,
}
-impl DocContext {
+impl<'tcx> DocContext<'tcx> {
pub fn sess<'a>(&'a self) -> &'a driver::session::Session {
match self.maybe_typed {
Typed(ref tcx) => &tcx.sess,
}
}
- pub fn tcx_opt<'a>(&'a self) -> Option<&'a ty::ctxt> {
+ pub fn tcx_opt<'a>(&'a self) -> Option<&'a ty::ctxt<'tcx>> {
match self.maybe_typed {
Typed(ref tcx) => Some(tcx),
NotTyped(_) => None
}
}
- pub fn tcx<'a>(&'a self) -> &'a ty::ctxt {
+ pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> {
let tcx_opt = self.tcx_opt();
tcx_opt.expect("tcx not present")
}
pub type Externs = HashMap<String, Vec<String>>;
/// Parses, resolves, and typechecks the given crate
-fn get_ast_and_resolve(cpath: &Path, libs: HashSet<Path>, cfgs: Vec<String>,
- externs: Externs, triple: Option<String>)
- -> (DocContext, CrateAnalysis) {
+fn get_ast_and_resolve<'tcx>(cpath: &Path, libs: Vec<Path>, cfgs: Vec<String>,
+ externs: Externs, triple: Option<String>,
+ type_arena: &'tcx TypedArena<ty::t_box_>)
+ -> (DocContext<'tcx>, CrateAnalysis) {
use syntax::codemap::dummy_spanned;
use rustc::driver::driver::{FileInput,
phase_1_parse_input,
let driver::driver::CrateAnalysis {
exported_items, public_items, ty_cx, ..
- } = phase_3_run_analysis_passes(sess, &krate, ast_map, name);
+ } = phase_3_run_analysis_passes(sess, &krate, ast_map, type_arena, name);
debug!("crate: {:?}", krate);
(DocContext {
})
}
-pub fn run_core(libs: HashSet<Path>, cfgs: Vec<String>, externs: Externs,
+pub fn run_core(libs: Vec<Path>, cfgs: Vec<String>, externs: Externs,
path: &Path, triple: Option<String>)
-> (clean::Crate, CrateAnalysis) {
- let (ctxt, analysis) = get_ast_and_resolve(path, libs, cfgs, externs, triple);
- let ctxt = box(GC) ctxt;
- super::ctxtkey.replace(Some(ctxt));
+ let type_arena = TypedArena::new();
+ let (ctxt, analysis) = get_ast_and_resolve(path, libs, cfgs, externs,
+ triple, &type_arena);
let krate = {
- let mut v = RustdocVisitor::new(&*ctxt, Some(&analysis));
+ let mut v = RustdocVisitor::new(&ctxt, Some(&analysis));
v.visit(&ctxt.krate);
- v.clean()
+ v.clean(&ctxt)
};
let external_paths = ctxt.external_paths.borrow_mut().take();
static LOCKFILE_EXCLUSIVE_LOCK: libc::DWORD = 0x00000002;
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
extern "system" {
fn LockFileEx(hFile: libc::HANDLE,
dwFlags: libc::DWORD,
let public_items = public_items.unwrap_or(NodeSet::new());
let paths: HashMap<ast::DefId, (Vec<String>, ItemType)> =
analysis.as_ref().map(|a| {
- let paths = a.external_paths.borrow_mut().take_unwrap();
+ let paths = a.external_paths.borrow_mut().take().unwrap();
paths.move_iter().map(|(k, (v, t))| {
(k, (v, match t {
clean::TypeStruct => item_type::Struct,
}).unwrap_or(HashMap::new());
let mut cache = Cache {
impls: HashMap::new(),
- external_paths: paths.iter().map(|(&k, &(ref v, _))| (k, v.clone()))
+ external_paths: paths.iter().map(|(&k, v)| (k, v.ref0().clone()))
.collect(),
paths: paths,
implementors: HashMap::new(),
public_items: public_items,
orphan_methods: Vec::new(),
traits: analysis.as_ref().map(|a| {
- a.external_traits.borrow_mut().take_unwrap()
+ a.external_traits.borrow_mut().take().unwrap()
}).unwrap_or(HashMap::new()),
typarams: analysis.as_ref().map(|a| {
- a.external_typarams.borrow_mut().take_unwrap()
+ a.external_typarams.borrow_mut().take().unwrap()
}).unwrap_or(HashMap::new()),
inlined: analysis.as_ref().map(|a| {
- a.inlined.borrow_mut().take_unwrap()
+ a.inlined.borrow_mut().take().unwrap()
}).unwrap_or(HashSet::new()),
};
cache.stack.push(krate.name.clone());
v.push(Implementor {
def_id: item.def_id,
generics: i.generics.clone(),
- trait_: i.trait_.get_ref().clone(),
+ trait_: i.trait_.as_ref().unwrap().clone(),
for_: i.for_.clone(),
stability: item.stability.clone(),
});
// Index this method for searching later on
match item.name {
Some(ref s) => {
- let parent = match item.inner {
+ let (parent, is_method) = match item.inner {
clean::TyMethodItem(..) |
clean::StructFieldItem(..) |
clean::VariantItem(..) => {
- (Some(*self.parent_stack.last().unwrap()),
- Some(self.stack.slice_to(self.stack.len() - 1)))
+ ((Some(*self.parent_stack.last().unwrap()),
+ Some(self.stack.slice_to(self.stack.len() - 1))),
+ false)
}
clean::MethodItem(..) => {
if self.parent_stack.len() == 0 {
- (None, None)
+ ((None, None), false)
} else {
let last = self.parent_stack.last().unwrap();
let did = *last;
Some(..) => Some(self.stack.as_slice()),
None => None
};
- (Some(*last), path)
+ ((Some(*last), path), true)
}
}
- _ => (None, Some(self.stack.as_slice()))
+ _ => ((None, Some(self.stack.as_slice())), false)
};
let hidden_field = match item.inner {
clean::StructFieldItem(clean::HiddenStructField) => true,
_ => false
};
+
match parent {
- (parent, Some(path)) if !self.privmod && !hidden_field => {
+ (parent, Some(path)) if is_method || (!self.privmod && !hidden_field) => {
self.search_index.push(IndexItem {
ty: shortty(&item),
name: s.to_string(),
parent: parent,
});
}
- (Some(parent), None) if !self.privmod => {
+ (Some(parent), None) if is_method || (!self.privmod && !hidden_field)=> {
if ast_util::is_local(parent) {
// We have a parent, but we don't know where they're
// defined yet. Wait for later to index this item.
// Keep track of the fully qualified path for this item.
let pushed = if item.name.is_some() {
- let n = item.name.get_ref();
+ let n = item.name.as_ref().unwrap();
if n.len() > 0 {
self.stack.push(n.to_string());
true
if title.len() > 0 {
title.push_str("::");
}
- title.push_str(it.name.get_ref().as_slice());
+ title.push_str(it.name.as_ref().unwrap().as_slice());
}
title.push_str(" - Rust");
let tyname = shortty(it).to_static_str();
// We have a huge number of calls to write, so try to alleviate some
// of the pain by using a buffered writer instead of invoking the
- // write sycall all the time.
+ // write syscall all the time.
let mut writer = BufferedWriter::new(w);
if !cx.render_redirect_pages {
try!(layout::render(&mut writer, &cx.layout, &page,
// modules are special because they add a namespace. We also need to
// recurse into the items of the module as well.
clean::ModuleItem(..) => {
- let name = item.name.get_ref().to_string();
+ let name = item.name.as_ref().unwrap().to_string();
let mut item = Some(item);
self.recurse(name, |this| {
- let item = item.take_unwrap();
+ let item = item.take().unwrap();
let dst = this.dst.join("index.html");
let dst = try!(File::create(&dst));
try!(render(dst, this, &item, false));
fn full_path(cx: &Context, item: &clean::Item) -> String {
let mut s = cx.current.connect("::");
s.push_str("::");
- s.push_str(item.name.get_ref().as_slice());
+ s.push_str(item.name.as_ref().unwrap().as_slice());
return s
}
try!(write!(w, " {{\n"));
for v in e.variants.iter() {
try!(write!(w, " "));
- let name = v.name.get_ref().as_slice();
+ let name = v.name.as_ref().unwrap().as_slice();
match v.inner {
clean::VariantItem(ref var) => {
match var.kind {
try!(write!(w, "<div class='block {}'><h2>{}</h2>", short, longty));
for item in items.iter() {
let curty = shortty(cur).to_static_str();
- let class = if cur.name.get_ref() == item &&
+ let class = if cur.name.as_ref().unwrap() == item &&
short == curty { "current" } else { "" };
try!(write!(w, "<a class='{ty} {class}' href='{href}{path}'>\
{name}</a>",
color: #333;
}
+.location a:first-child { font-weight: bold; }
+
.block {
padding: 0 10px;
margin-bottom: 14px;
}
.content .highlighted {
- cursor: pointer;
color: #000 !important;
background-color: #ccc;
}
-.content .highlighted a { color: #000 !important; }
+.content .highlighted a, .content .highlighted span { color: #000 !important; }
.content .highlighted.trait { background-color: #fece7e; }
.content .highlighted.mod { background-color: #afc6e4; }
.content .highlighted.enum { background-color: #b4d1b9; }
p a { color: #4e8bca; }
p a:hover { text-decoration: underline; }
-.content a.trait, .block a.current.trait { color: #ed9603; }
-.content a.mod, .block a.current.mod { color: #4d76ae; }
-.content a.enum, .block a.current.enum { color: #5e9766; }
-.content a.struct, .block a.current.struct { color: #e53700; }
-.content a.fn, .block a.current.fn { color: #8c6067; }
+.content span.trait, .block a.current.trait { color: #ed9603; }
+.content span.mod, .block a.current.mod { color: #4d76ae; }
+.content span.enum, .block a.current.enum { color: #5e9766; }
+.content span.struct, .block a.current.struct { color: #e53700; }
+.content span.fn, .block a.current.fn { color: #8c6067; }
.content .fnname { color: #8c6067; }
.search-input {
display: block;
}
+.search-results a {
+ display: block;
+}
+
+.content .search-results td:first-child { padding-right: 0; }
+.content .search-results td:first-child a { padding-right: 10px; }
+
#help {
background: #e9e9e9;
border-radius: 4px;
* A function to compute the Levenshtein distance between two strings
* Licensed under the Creative Commons Attribution-ShareAlike 3.0 Unported
* Full License can be found at http://creativecommons.org/licenses/by-sa/3.0/legalcode
- * This code is an unmodified version of the code written by Marco de Wit
+ * This code is an unmodified version of the code written by Marco de Wit
* and was found at http://stackoverflow.com/a/18514751/745719
*/
var levenshtein = (function() {
});
}
} else if (
- (lev_distance = levenshtein(searchWords[j], val)) <=
+ (lev_distance = levenshtein(searchWords[j], val)) <=
MAX_LEV_DISTANCE) {
if (typeFilter < 0 || typeFilter === searchIndex[j].ty) {
results.push({
function validateResult(name, path, keys, parent) {
for (var i=0; i < keys.length; ++i) {
// each check is for validation so we negate the conditions and invalidate
- if (!(
+ if (!(
// check for an exact name match
name.toLowerCase().indexOf(keys[i]) > -1 ||
// then an exact path match
path.toLowerCase().indexOf(keys[i]) > -1 ||
// next if there is a parent, check for exact parent match
- (parent !== undefined &&
+ (parent !== undefined &&
parent.name.toLowerCase().indexOf(keys[i]) > -1) ||
// lastly check to see if the name was a levenshtein match
- levenshtein(name.toLowerCase(), keys[i]) <=
+ levenshtein(name.toLowerCase(), keys[i]) <=
MAX_LEV_DISTANCE)) {
return false;
}
if (window.location.pathname == dst.pathname) {
$('#search').addClass('hidden');
$('#main').removeClass('hidden');
+ document.location.href = dst.href;
}
- document.location.href = dst.href;
}).on('mouseover', function() {
var $el = $(this);
clearTimeout(hoverTimeout);
shown = [];
results.forEach(function(item) {
- var name, type;
+ var name, type, href, displayPath;
if (shown.indexOf(item) !== -1) {
return;
name = item.name;
type = itemTypes[item.ty];
- output += '<tr class="' + type + ' result"><td>';
-
if (type === 'mod') {
- output += item.path +
- '::<a href="' + rootPath +
- item.path.replace(/::/g, '/') + '/' +
- name + '/index.html" class="' +
- type + '">' + name + '</a>';
+ displayPath = item.path + '::';
+ href = rootPath + item.path.replace(/::/g, '/') + '/' +
+ name + '/index.html';
} else if (type === 'static' || type === 'reexport') {
- output += item.path +
- '::<a href="' + rootPath +
- item.path.replace(/::/g, '/') +
- '/index.html" class="' + type +
- '">' + name + '</a>';
+ displayPath = item.path + '::';
+ href = rootPath + item.path.replace(/::/g, '/') +
+ '/index.html';
} else if (item.parent !== undefined) {
var myparent = item.parent;
var anchor = '#' + type + '.' + name;
- output += item.path + '::' + myparent.name +
- '::<a href="' + rootPath +
- item.path.replace(/::/g, '/') +
- '/' + itemTypes[myparent.ty] +
- '.' + myparent.name +
- '.html' + anchor +
- '" class="' + type +
- '">' + name + '</a>';
+ displayPath = item.path + '::' + myparent.name + '::';
+ href = rootPath + item.path.replace(/::/g, '/') +
+ '/' + itemTypes[myparent.ty] +
+ '.' + myparent.name +
+ '.html' + anchor;
} else {
- output += item.path +
- '::<a href="' + rootPath +
- item.path.replace(/::/g, '/') +
- '/' + type +
- '.' + name +
- '.html" class="' + type +
- '">' + name + '</a>';
+ displayPath = item.path + '::';
+ href = rootPath + item.path.replace(/::/g, '/') +
+ '/' + type + '.' + name + '.html';
}
- output += '</td><td><span class="desc">' + item.desc +
- '</span></td></tr>';
+ output += '<tr class="' + type + ' result"><td>' +
+ '<a href="' + href + '">' +
+ displayPath + '<span class="' + type + '">' +
+ name + '</span></a></td><td>' +
+ '<a href="' + href + '">' +
+ '<span class="desc">' + item.desc +
+ ' </span></a></td></tr>';
});
} else {
output += 'No results :( <a href="https://duckduckgo.com/?q=' +
});
$(function() {
- var toggle = "<a href='javascript:void(0)'"
- + "class='collapse-toggle'>[<span class='inner'>-</span>]</a>";
+ var toggle = $("<a/>", {'href': 'javascript:void(0)', 'class': 'collapse-toggle'})
+ .html("[<span class='inner'>-</span>]");
$(".method").each(function() {
if ($(this).next().is(".docblock")) {
- $(this).children().first().after(toggle);
+ $(this).children().first().after(toggle[0]);
}
});
- var mainToggle = $(toggle);
- mainToggle.append("<span class='toggle-label' style='display:none'>"
- + " Expand description</span></a>")
- var wrapper = $("<div class='toggle-wrapper'>");
- wrapper.append(mainToggle);
+ var mainToggle =
+ $(toggle).append(
+ $('<span/>', {'class': 'toggle-label'})
+ .css('display', 'none')
+ .html(' Expand description'));
+ var wrapper = $("<div class='toggle-wrapper'>").append(mainToggle);
$("#main > .docblock").before(wrapper);
});
#![feature(globs, struct_variant, managed_boxes, macro_rules, phase)]
+extern crate arena;
extern crate debug;
extern crate getopts;
extern crate libc;
extern crate rustc;
extern crate serialize;
extern crate syntax;
-extern crate testing = "test";
+extern crate "test" as testing;
extern crate time;
#[phase(plugin, link)] extern crate log;
use std::io;
use std::io::{File, MemWriter};
-use std::gc::Gc;
use std::collections::HashMap;
use serialize::{json, Decodable, Encodable};
use externalfiles::ExternalHtml;
"unindent-comments",
];
-local_data_key!(pub ctxtkey: Gc<core::DocContext>)
local_data_key!(pub analysiskey: core::CrateAnalysis)
type Output = (clean::Crate, Vec<plugins::PluginJson> );
info!("starting to run rustc");
let (mut krate, analysis) = std::task::try(proc() {
let cr = cr;
- core::run_core(libs.move_iter().collect(),
- cfgs,
- externs,
- &cr,
- triple)
+ core::run_core(libs, cfgs, externs, &cr, triple)
}).map_err(|boxed_any|format!("{:?}", boxed_any)).unwrap();
info!("finished with rustc");
analysiskey.replace(Some(analysis));
// Process all of the crate attributes, extracting plugin metadata along
// with the passes which we are supposed to run.
- match krate.module.get_ref().doc_list() {
+ match krate.module.as_ref().unwrap().doc_list() {
Some(nested) => {
for inner in nested.iter() {
match *inner {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::collections::HashSet;
use std::io;
use std::string::String;
}
/// Run any tests/code examples in the markdown file `input`.
-pub fn test(input: &str, libs: HashSet<Path>, externs: core::Externs,
+pub fn test(input: &str, libs: Vec<Path>, externs: core::Externs,
mut test_args: Vec<String>) -> int {
let input_str = load_or_return!(input, 1, 2);
//! This module crawls a `clean::Crate` and produces a summarization of the
//! stability levels within the crate. The summary contains the module
//! hierarchy, with item counts for every stability level per module. A parent
-//! module's count includes its childrens's.
+//! module's count includes its children's.
use std::ops::Add;
use std::num::Zero;
use std::collections::{HashSet, HashMap};
use testing;
-use rustc::back::link;
+use rustc::back::write;
use rustc::driver::config;
use rustc::driver::driver;
use rustc::driver::session;
pub fn run(input: &str,
cfgs: Vec<String>,
- libs: HashSet<Path>,
+ libs: Vec<Path>,
externs: core::Externs,
mut test_args: Vec<String>,
crate_name: Option<String>)
"rustdoc-test", None)
.expect("phase_2_configure_and_expand aborted in rustdoc!");
- let ctx = box(GC) core::DocContext {
+ let ctx = core::DocContext {
krate: krate,
maybe_typed: core::NotTyped(sess),
src: input_path,
inlined: RefCell::new(None),
populated_crate_impls: RefCell::new(HashSet::new()),
};
- super::ctxtkey.replace(Some(ctx));
- let mut v = RustdocVisitor::new(&*ctx, None);
+ let mut v = RustdocVisitor::new(&ctx, None);
v.visit(&ctx.krate);
- let mut krate = v.clean();
+ let mut krate = v.clean(&ctx);
match crate_name {
Some(name) => krate.name = name,
None => {}
0
}
-fn runtest(test: &str, cratename: &str, libs: HashSet<Path>, externs: core::Externs,
+fn runtest(test: &str, cratename: &str, libs: Vec<Path>, externs: core::Externs,
should_fail: bool, no_run: bool, as_test_harness: bool) {
// the test harness wants its own `main` & top level functions, so
// never wrap the test in `fn main() { ... }`
maybe_sysroot: Some(os::self_exe_path().unwrap().dir_path()),
addl_lib_search_paths: RefCell::new(libs),
crate_types: vec!(config::CrateTypeExecutable),
- output_types: vec!(link::OutputTypeExe),
+ output_types: vec!(write::OutputTypeExe),
no_trans: no_run,
externs: externs,
cg: config::CodegenOptions {
None,
span_diagnostic_handler);
- let outdir = TempDir::new("rustdoctest").expect("rustdoc needs a tempdir");
+ let outdir = TempDir::new("rustdoctest").ok().expect("rustdoc needs a tempdir");
let out = Some(outdir.path().clone());
let cfg = config::build_configuration(&sess);
let libdir = sess.target_filesearch().get_lib_path();
pub struct Collector {
pub tests: Vec<testing::TestDescAndFn>,
names: Vec<String>,
- libs: HashSet<Path>,
+ libs: Vec<Path>,
externs: core::Externs,
cnt: uint,
use_headers: bool,
}
impl Collector {
- pub fn new(cratename: String, libs: HashSet<Path>, externs: core::Externs,
+ pub fn new(cratename: String, libs: Vec<Path>, externs: core::Externs,
use_headers: bool) -> Collector {
Collector {
tests: Vec::new(),
// also, is there some reason that this doesn't use the 'visit'
// framework from syntax?
-pub struct RustdocVisitor<'a> {
+pub struct RustdocVisitor<'a, 'tcx: 'a> {
pub module: Module,
pub attrs: Vec<ast::Attribute>,
- pub cx: &'a core::DocContext,
+ pub cx: &'a core::DocContext<'tcx>,
pub analysis: Option<&'a core::CrateAnalysis>,
}
-impl<'a> RustdocVisitor<'a> {
- pub fn new<'b>(cx: &'b core::DocContext,
- analysis: Option<&'b core::CrateAnalysis>) -> RustdocVisitor<'b> {
+impl<'a, 'tcx> RustdocVisitor<'a, 'tcx> {
+ pub fn new(cx: &'a core::DocContext<'tcx>,
+ analysis: Option<&'a core::CrateAnalysis>) -> RustdocVisitor<'a, 'tcx> {
RustdocVisitor {
module: Module::new(None),
attrs: Vec::new(),
}
fn stability(&self, id: ast::NodeId) -> Option<attr::Stability> {
- let tcx = match self.cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return None
- };
- stability::lookup(tcx, ast_util::local_def(id))
+ self.cx.tcx_opt().and_then(|tcx| stability::lookup(tcx, ast_util::local_def(id)))
}
pub fn visit(&mut self, krate: &ast::Crate) {
fn resolve_id(&mut self, id: ast::NodeId, renamed: Option<ast::Ident>,
glob: bool, om: &mut Module, please_inline: bool) -> bool {
- let tcx = match self.cx.maybe_typed {
- core::Typed(ref tcx) => tcx,
- core::NotTyped(_) => return false
+ let tcx = match self.cx.tcx_opt() {
+ Some(tcx) => tcx,
+ None => return false
};
let def = (*tcx.def_map.borrow())[id].def_id();
if !ast_util::is_local(def) { return false }
//! the processes `argc` and `argv` arguments to be stored
//! in a globally-accessible location for use by the `os` module.
//!
-//! Only valid to call on linux. Mac and Windows use syscalls to
+//! Only valid to call on Linux. Mac and Windows use syscalls to
//! discover the command line arguments.
//!
//! FIXME #7756: Would be nice for this to not exist.
The other problem with translating Rust strings to C strings is that Rust
strings can validly contain a null-byte in the middle of the string (0 is a
-valid unicode codepoint). This means that not all Rust strings can actually be
+valid Unicode codepoint). This means that not all Rust strings can actually be
translated to C strings.
# Creation of a C string
data: UnsafeCell<T>,
}
-/// stage0 only
-#[cfg(stage0)]
-pub struct ExclusiveGuard<'a, T> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a mut T,
- _guard: mutex::LockGuard<'a>,
-}
-
/// An RAII guard returned via `lock`
-#[cfg(not(stage0))]
pub struct ExclusiveGuard<'a, T:'a> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
#![feature(macro_rules, phase, globs, thread_local, managed_boxes, asm)]
#![feature(linkage, lang_items, unsafe_destructor, default_type_params)]
#![feature(import_shadowing)]
-#![feature(issue_5723_bootstrap)]
#![no_std]
#![experimental]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
#[phase(plugin, link)] extern crate core;
extern crate alloc;
extern crate libc;
extern crate collections;
-#[cfg(test)] extern crate realrustrt = "rustrt";
+#[cfg(test)] extern crate "rustrt" as realrustrt;
#[cfg(test)] extern crate test;
#[cfg(test)] extern crate native;
//! Unwind library interface
#![allow(non_camel_case_types)]
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
#![allow(dead_code)] // these are just bindings
use libc;
///
/// The task-local data can be accessed through this value, and when this
/// structure is dropped it will return the borrow on the data.
-#[cfg(not(stage0))]
pub struct Ref<T:'static> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_marker: marker::NoSend
}
-/// stage0 only
-#[cfg(stage0)]
-pub struct Ref<T> {
- // FIXME #12808: strange names to try to avoid interfering with
- // field accesses of the contained type via Deref
- _inner: &'static TLDValueBox<T>,
- _marker: marker::NoSend
-}
-
fn key_to_key_value<T: 'static>(key: Key<T>) -> uint {
key as *const _ as uint
}
// Do nothing.
None
}
- (0, Some(newValue)) => {
+ (0, Some(new_value)) => {
// The current value is uninitialized and we're storing a new value.
unsafe {
- ptr::write(&mut (*value_box).value, newValue);
+ ptr::write(&mut (*value_box).value, new_value);
*(*value_box).refcount.get() = 1;
None
}
Some(ret)
}
}
- (1, Some(newValue)) => {
+ (1, Some(new_value)) => {
// We have an initialized value and we're replacing it.
let value_ref = unsafe { &mut (*value_box).value };
- let ret = mem::replace(value_ref, newValue);
+ let ret = mem::replace(value_ref, new_value);
// Refcount is already 1, leave it as that.
Some(ret)
}
// efficient sequence of instructions. This also involves dealing with fun
// stuff in object files and whatnot. Regardless, it turns out this causes
// trouble with green threads and lots of optimizations turned on. The
- // following case study was done on linux x86_64, but I would imagine that
+ // following case study was done on Linux x86_64, but I would imagine that
// other platforms are similar.
//
- // On linux, the instruction sequence for loading the tls pointer global
+ // On Linux, the instruction sequence for loading the tls pointer global
// looks like:
//
// mov %fs:0x0, %rax
libc::CloseHandle(block);
}
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
extern "system" {
fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES,
bManualReset: BOOL,
// function, and I would be saddened if more usage of the function
// crops up.
unsafe {
- let imp = self.imp.take_unwrap();
+ let imp = self.imp.take().unwrap();
let vtable = mem::transmute::<_, &raw::TraitObject>(&imp).vtable;
match imp.wrap().downcast::<T>() {
Ok(t) => Some(t),
pub fn spawn_sibling(mut self: Box<Task>,
opts: TaskOpts,
f: proc(): Send) {
- let ops = self.imp.take_unwrap();
+ let ops = self.imp.take().unwrap();
ops.spawn_sibling(self, opts, f)
}
pub fn deschedule(mut self: Box<Task>,
amt: uint,
f: |BlockedTask| -> ::core::result::Result<(), BlockedTask>) {
- let ops = self.imp.take_unwrap();
+ let ops = self.imp.take().unwrap();
ops.deschedule(amt, self, f)
}
/// current task can accept a change in scheduling. This function can only
/// be called on tasks that were previously blocked in `deschedule`.
pub fn reawaken(mut self: Box<Task>) {
- let ops = self.imp.take_unwrap();
+ let ops = self.imp.take().unwrap();
ops.reawaken(self);
}
/// eventually return, but possibly not immediately. This is used as an
/// opportunity to allow other tasks a chance to run.
pub fn yield_now(mut self: Box<Task>) {
- let ops = self.imp.take_unwrap();
+ let ops = self.imp.take().unwrap();
ops.yield_now(self);
}
/// Similar to `yield_now`, except that this function may immediately return
/// without yielding (depending on what the runtime decides to do).
pub fn maybe_yield(mut self: Box<Task>) {
- let ops = self.imp.take_unwrap();
+ let ops = self.imp.take().unwrap();
ops.maybe_yield(self);
}
/// stored in the task's runtime. This factory may not always be available,
/// which is why the return type is `Option`
pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> {
- self.imp.get_mut_ref().local_io()
+ self.imp.as_mut().unwrap().local_io()
}
/// Returns the stack bounds for this task in (lo, hi) format. The stack
/// bounds may not be known for all tasks, so the return value may be
/// `None`.
pub fn stack_bounds(&self) -> (uint, uint) {
- self.imp.get_ref().stack_bounds()
+ self.imp.as_ref().unwrap().stack_bounds()
}
/// Returns whether it is legal for this task to block the OS thread that it
/// is running on.
pub fn can_block(&self) -> bool {
- self.imp.get_ref().can_block()
+ self.imp.as_ref().unwrap().can_block()
}
/// Consume this task, flagging it as a candidate for destruction.
unsafe { imp::join(self.native) };
self.joined = true;
assert!(self.packet.is_some());
- self.packet.take_unwrap()
+ self.packet.take().unwrap()
}
}
SwitchToThread();
}
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
extern "system" {
fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
dwStackSize: SIZE_T,
}
#[cfg(windows)]
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
extern "system" {
fn TlsAlloc() -> DWORD;
fn TlsFree(dwTlsIndex: DWORD) -> BOOL;
use alloc::boxed::Box;
use collections::string::String;
+use collections::str::StrAllocating;
use collections::vec::Vec;
use core::any::Any;
use core::atomic;
#[cfg(windows, target_arch = "x86_64", not(test))]
#[doc(hidden)]
#[allow(visible_private_types)]
-#[allow(non_camel_case_types)]
+#[allow(non_camel_case_types, non_snake_case)]
pub mod eabi {
use libunwind as uw;
use libc::{c_void, c_int};
let mut v = Vec::new();
let _ = write!(&mut VecWriter { v: &mut v }, "{}", msg);
- begin_unwind_inner(box String::from_utf8(v).unwrap(), file_line)
+ let msg = box String::from_utf8_lossy(v.as_slice()).into_string();
+ begin_unwind_inner(msg, file_line)
}
/// This is the entry point of unwinding for fail!() and assert!().
n => {
let f: Callback = unsafe { mem::transmute(n) };
let (file, line) = *file_line;
- f(msg, file, line);
+ f(&*msg, file, line);
}
}
};
inner: Arc<UnsafeCell<Inner<T>>>,
}
-#[cfg(stage0)]
-pub struct Guard<'a, T> {
- access: &'a mut Access<T>,
- missile: Option<HomingMissile>,
-}
-
-#[cfg(not(stage0))]
pub struct Guard<'a, T:'static> {
access: &'a mut Access<T>,
missile: Option<HomingMissile>,
});
match cx.status {
- 0 => Ok(accum_addrinfo(cx.addrinfo.get_ref())),
+ 0 => Ok(accum_addrinfo(cx.addrinfo.as_ref().unwrap())),
n => Err(UvError(n))
}
}
// once
let MyCallback(ref mut s) = *self;
if s.is_some() {
- s.take_unwrap().send(1);
+ s.take().unwrap().send(1);
}
}
}
#[cfg(test)] extern crate green;
#[cfg(test)] extern crate debug;
-#[cfg(test)] extern crate realrustuv = "rustuv";
+#[cfg(test)] extern crate "rustuv" as realrustuv;
extern crate libc;
extern crate alloc;
fn wakeup(slot: &mut Option<BlockedTask>) {
assert!(slot.is_some());
- let _ = slot.take_unwrap().wake().map(|t| t.reawaken());
+ let _ = slot.take().unwrap().wake().map(|t| t.reawaken());
}
pub struct Request {
wait_until_woken_after(&mut cx.task, &loop_, || {
unsafe { uvll::set_data_for_uv_handle(handle, &mut cx) }
});
- match cx.result.take_unwrap() {
+ match cx.result.take().unwrap() {
(n, _) if n < 0 =>
Err(uv_error_to_io_error(UvError(n as c_int))),
(n, addr) => Ok((n as uint, addr.unwrap()))
// here.
let data = if guard.can_timeout {Some(Vec::from_slice(buf))} else {None};
let uv_buf = if guard.can_timeout {
- slice_to_uv_buf(data.get_ref().as_slice())
+ slice_to_uv_buf(data.as_ref().unwrap().as_slice())
} else {
slice_to_uv_buf(buf)
};
self.timer = Some(timer);
}
- let timer = self.timer.get_mut_ref();
+ let timer = self.timer.as_mut().unwrap();
timer.stop();
timer.start(timer_cb, ms, 0);
self.timeout_state = TimeoutPending;
// bytes.
let data = if may_timeout {Some(Vec::from_slice(buf))} else {None};
let uv_buf = if may_timeout {
- slice_to_uv_buf(data.get_ref().as_slice())
+ slice_to_uv_buf(data.as_ref().unwrap().as_slice())
} else {
slice_to_uv_buf(buf)
};
pub access: access::Access<T>,
}
-#[cfg(stage0)]
-pub struct Guard<'a, T> {
- state: &'a mut TimeoutState,
- pub access: access::Guard<'a, T>,
- pub can_timeout: bool,
-}
-
-#[cfg(not(stage0))]
pub struct Guard<'a, T:'static> {
state: &'a mut TimeoutState,
pub access: access::Guard<'a, T>,
self.timer = Some(timer);
}
- let timer = self.timer.get_mut_ref();
+ let timer = self.timer.as_mut().unwrap();
unsafe {
let cx = uvll::get_data_for_uv_handle(timer.handle);
let cx = cx as *mut TimerContext;
let _f = ForbidSwitch::new("timer callback can't switch");
let timer: &mut TimerWatcher = unsafe { UvHandle::from_uv_handle(&handle) };
- match timer.action.take_unwrap() {
+ match timer.action.take().unwrap() {
WakeTask => {
- let task = timer.blocker.take_unwrap();
+ let task = timer.blocker.take().unwrap();
let _ = task.wake().map(|t| t.reawaken());
}
CallOnce(mut cb) => { cb.call() }
// Lastly, after we've closed the pool of handles we pump the event loop
// one last time to run any closing callbacks to make sure the loop
// shuts down cleanly.
- let handle = self.uvio.handle_pool.get_ref().handle();
+ let handle = self.uvio.handle_pool.as_ref().unwrap().handle();
drop(self.uvio.handle_pool.take());
self.run();
// It's understood by the homing code that the "local id" is just the
// pointer of the local I/O factory cast to a uint.
let id: uint = unsafe { mem::transmute_copy(&self) };
- HomeHandle::new(id, &mut **self.handle_pool.get_mut_ref())
+ HomeHandle::new(id, &mut **self.handle_pool.as_mut().unwrap())
}
}
UnrecognizedHex => "invalid \\u escape (unrecognized hex)",
NotFourDigit => "invalid \\u escape (not four digits)",
NotUtf8 => "contents not utf-8",
- InvalidUnicodeCodePoint => "invalid unicode code point",
+ InvalidUnicodeCodePoint => "invalid Unicode code point",
LoneLeadingSurrogateInHexEscape => "lone leading surrogate in hex escape",
UnexpectedEndOfHexEscape => "unexpected end of hex escape",
}
> ToJson for ( $( $tyvar ),* , ) {
#[inline]
- #[allow(uppercase_variables)]
+ #[allow(non_snake_case)]
fn to_json(&self) -> Json {
match *self {
($(ref $tyvar),*,) => List(vec![$($tyvar.to_json()),*])
html_root_url = "http://doc.rust-lang.org/master/",
html_playground_url = "http://play.rust-lang.org/")]
#![feature(macro_rules, managed_boxes, default_type_params, phase)]
-#![feature(issue_5723_bootstrap)]
// test harness access
#[cfg(test)]
() => ();
( $($name:ident,)+ ) => (
impl<E, D:Decoder<E>,$($name:Decodable<D, E>),*> Decodable<D,E> for ($($name,)*) {
- #[allow(uppercase_variables)]
+ #[allow(non_snake_case)]
fn decode(d: &mut D) -> Result<($($name,)*), E> {
d.read_tuple(|d, amt| {
let mut i = 0;
}
}
impl<E, S:Encoder<E>,$($name:Encodable<S, E>),*> Encodable<S, E> for ($($name,)*) {
- #[allow(uppercase_variables)]
+ #[allow(non_snake_case)]
fn encode(&self, s: &mut S) -> Result<(), E> {
let ($(ref $name,)*) = *self;
let mut n = 0;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! The `bitflags!` macro generates a `struct` that holds a set of C-style
-//! bitmask flags. It is useful for creating typesafe wrappers for C APIs.
-//!
-//! The flags should only be defined for integer types, otherwise unexpected
-//! type errors may occur at compile time.
-//!
-//! # Example
-//!
-//! ~~~rust
-//! bitflags!(
-//! flags Flags: u32 {
-//! static FlagA = 0x00000001,
-//! static FlagB = 0x00000010,
-//! static FlagC = 0x00000100,
-//! static FlagABC = FlagA.bits
-//! | FlagB.bits
-//! | FlagC.bits
-//! }
-//! )
-//!
-//! fn main() {
-//! let e1 = FlagA | FlagC;
-//! let e2 = FlagB | FlagC;
-//! assert!((e1 | e2) == FlagABC); // union
-//! assert!((e1 & e2) == FlagC); // intersection
-//! assert!((e1 - e2) == FlagA); // set difference
-//! assert!(!e2 == FlagA); // set complement
-//! }
-//! ~~~
-//!
-//! The generated `struct`s can also be extended with type and trait implementations:
-//!
-//! ~~~rust
-//! use std::fmt;
-//!
-//! bitflags!(
-//! flags Flags: u32 {
-//! static FlagA = 0x00000001,
-//! static FlagB = 0x00000010
-//! }
-//! )
-//!
-//! impl Flags {
-//! pub fn clear(&mut self) {
-//! self.bits = 0; // The `bits` field can be accessed from within the
-//! // same module where the `bitflags!` macro was invoked.
-//! }
-//! }
-//!
-//! impl fmt::Show for Flags {
-//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-//! write!(f, "hi!")
-//! }
-//! }
-//!
-//! fn main() {
-//! let mut flags = FlagA | FlagB;
-//! flags.clear();
-//! assert!(flags.is_empty());
-//! assert_eq!(format!("{}", flags).as_slice(), "hi!");
-//! }
-//! ~~~
-//!
-//! # Attributes
-//!
-//! Attributes can be attached to the generated `struct` by placing them
-//! before the `flags` keyword.
-//!
-//! # Derived traits
-//!
-//! The `PartialEq` and `Clone` traits are automatically derived for the `struct` using
-//! the `deriving` attribute. Additional traits can be derived by providing an
-//! explicit `deriving` attribute on `flags`.
-//!
-//! # Operators
-//!
-//! The following operator traits are implemented for the generated `struct`:
-//!
-//! - `BitOr`: union
-//! - `BitAnd`: intersection
-//! - `Sub`: set difference
-//! - `Not`: set complement
-//!
-//! # Methods
-//!
-//! The following methods are defined for the generated `struct`:
-//!
-//! - `empty`: an empty set of flags
-//! - `all`: the set of all flags
-//! - `bits`: the raw value of the flags currently stored
-//! - `is_empty`: `true` if no flags are currently stored
-//! - `is_all`: `true` if all flags are currently set
-//! - `intersects`: `true` if there are flags common to both `self` and `other`
-//! - `contains`: `true` all of the flags in `other` are contained within `self`
-//! - `insert`: inserts the specified flags in-place
-//! - `remove`: removes the specified flags in-place
-
#![experimental]
#![macro_escape]
+//! A typesafe bitmask flag generator.
+
+/// The `bitflags!` macro generates a `struct` that holds a set of C-style
+/// bitmask flags. It is useful for creating typesafe wrappers for C APIs.
+///
+/// The flags should only be defined for integer types, otherwise unexpected
+/// type errors may occur at compile time.
+///
+/// # Example
+///
+/// ~~~rust
+/// bitflags! {
+/// flags Flags: u32 {
+/// static FlagA = 0x00000001,
+/// static FlagB = 0x00000010,
+/// static FlagC = 0x00000100,
+/// static FlagABC = FlagA.bits
+/// | FlagB.bits
+/// | FlagC.bits,
+/// }
+/// }
+///
+/// fn main() {
+/// let e1 = FlagA | FlagC;
+/// let e2 = FlagB | FlagC;
+/// assert!((e1 | e2) == FlagABC); // union
+/// assert!((e1 & e2) == FlagC); // intersection
+/// assert!((e1 - e2) == FlagA); // set difference
+/// assert!(!e2 == FlagA); // set complement
+/// }
+/// ~~~
+///
+/// The generated `struct`s can also be extended with type and trait implementations:
+///
+/// ~~~rust
+/// use std::fmt;
+///
+/// bitflags! {
+/// flags Flags: u32 {
+/// static FlagA = 0x00000001,
+/// static FlagB = 0x00000010,
+/// }
+/// }
+///
+/// impl Flags {
+/// pub fn clear(&mut self) {
+/// self.bits = 0; // The `bits` field can be accessed from within the
+/// // same module where the `bitflags!` macro was invoked.
+/// }
+/// }
+///
+/// impl fmt::Show for Flags {
+/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+/// write!(f, "hi!")
+/// }
+/// }
+///
+/// fn main() {
+/// let mut flags = FlagA | FlagB;
+/// flags.clear();
+/// assert!(flags.is_empty());
+/// assert_eq!(format!("{}", flags).as_slice(), "hi!");
+/// }
+/// ~~~
+///
+/// # Attributes
+///
+/// Attributes can be attached to the generated `struct` by placing them
+/// before the `flags` keyword.
+///
+/// # Derived traits
+///
+/// The `PartialEq` and `Clone` traits are automatically derived for the `struct` using
+/// the `deriving` attribute. Additional traits can be derived by providing an
+/// explicit `deriving` attribute on `flags`.
+///
+/// # Operators
+///
+/// The following operator traits are implemented for the generated `struct`:
+///
+/// - `BitOr`: union
+/// - `BitAnd`: intersection
+/// - `Sub`: set difference
+/// - `Not`: set complement
+///
+/// # Methods
+///
+/// The following methods are defined for the generated `struct`:
+///
+/// - `empty`: an empty set of flags
+/// - `all`: the set of all flags
+/// - `bits`: the raw value of the flags currently stored
+/// - `is_empty`: `true` if no flags are currently stored
+/// - `is_all`: `true` if all flags are currently set
+/// - `intersects`: `true` if there are flags common to both `self` and `other`
+/// - `contains`: `true` all of the flags in `other` are contained within `self`
+/// - `insert`: inserts the specified flags in-place
+/// - `remove`: removes the specified flags in-place
#[macro_export]
-macro_rules! bitflags(
+macro_rules! bitflags {
($(#[$attr:meta])* flags $BitFlags:ident: $T:ty {
$($(#[$Flag_attr:meta])* static $Flag:ident = $value:expr),+
- }) => (
+ }) => {
#[deriving(PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
$(#[$attr])*
pub struct $BitFlags {
$BitFlags { bits: !self.bits } & $BitFlags::all()
}
}
- )
-)
+ };
+ ($(#[$attr:meta])* flags $BitFlags:ident: $T:ty {
+ $($(#[$Flag_attr:meta])* static $Flag:ident = $value:expr),+,
+ }) => {
+ bitflags! {
+ $(#[$attr])*
+ flags $BitFlags: u32 {
+ $($(#[$Flag_attr])* static $Flag = $value),+
+ }
+ }
+ };
+}
#[cfg(test)]
mod tests {
use option::{Some, None};
use ops::{BitOr, BitAnd, Sub, Not};
- bitflags!(
+ bitflags! {
+ #[doc = "> The first principle is that you must not fool yourself — and"]
+ #[doc = "> you are the easiest person to fool."]
+ #[doc = "> "]
+ #[doc = "> - Richard Feynman"]
flags Flags: u32 {
static FlagA = 0x00000001,
+ #[doc = "<pcwalton> macros are way better at generating code than trans is"]
static FlagB = 0x00000010,
static FlagC = 0x00000100,
+ #[doc = "* cmr bed"]
+ #[doc = "* strcat table"]
+ #[doc = "<strcat> wait what?"]
static FlagABC = FlagA.bits
| FlagB.bits
- | FlagC.bits
+ | FlagC.bits,
}
- )
+ }
#[test]
fn test_bits(){
+++ /dev/null
-// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-// ignore-lexer-test FIXME #15883
-
-//! Unordered containers, implemented as hash-tables (`HashSet` and `HashMap` types)
-
-use clone::Clone;
-use cmp::{max, Eq, Equiv, PartialEq};
-use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
-use default::Default;
-use fmt::Show;
-use fmt;
-use hash::{Hash, Hasher, RandomSipHasher};
-use iter::{Iterator, FilterMap, Chain, Repeat, Zip, Extendable};
-use iter::{range, range_inclusive, FromIterator};
-use iter;
-use mem::replace;
-use num;
-use option::{Some, None, Option};
-use result::{Ok, Err};
-use ops::Index;
-
-mod table {
- use clone::Clone;
- use cmp;
- use hash::{Hash, Hasher};
- use iter::range_step_inclusive;
- use iter::{Iterator, range};
- use kinds::marker;
- use mem::{min_align_of, size_of};
- use mem::{overwrite, transmute};
- use num::{CheckedMul, is_power_of_two};
- use ops::Drop;
- use option::{Some, None, Option};
- use ptr::RawPtr;
- use ptr::set_memory;
- use ptr;
- use rt::heap::{allocate, deallocate};
-
- static EMPTY_BUCKET: u64 = 0u64;
-
- /// The raw hashtable, providing safe-ish access to the unzipped and highly
- /// optimized arrays of hashes, keys, and values.
- ///
- /// This design uses less memory and is a lot faster than the naive
- /// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
- /// option on every element, and we get a generally more cache-aware design.
- ///
- /// Key invariants of this structure:
- ///
- /// - if hashes[i] == EMPTY_BUCKET, then keys[i] and vals[i] have
- /// 'undefined' contents. Don't read from them. This invariant is
- /// enforced outside this module with the `EmptyIndex`, `FullIndex`,
- /// and `SafeHash` types.
- ///
- /// - An `EmptyIndex` is only constructed for a bucket at an index with
- /// a hash of EMPTY_BUCKET.
- ///
- /// - A `FullIndex` is only constructed for a bucket at an index with a
- /// non-EMPTY_BUCKET hash.
- ///
- /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
- /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
- /// which will likely map to the same bucket, while not being confused
- /// with "empty".
- ///
- /// - All three "arrays represented by pointers" are the same length:
- /// `capacity`. This is set at creation and never changes. The arrays
- /// are unzipped to save space (we don't have to pay for the padding
- /// between odd sized elements, such as in a map from u64 to u8), and
- /// be more cache aware (scanning through 8 hashes brings in 2 cache
- /// lines, since they're all right beside each other).
- ///
- /// You can kind of think of this module/data structure as a safe wrapper
- /// around just the "table" part of the hashtable. It enforces some
- /// invariants at the type level and employs some performance trickery,
- /// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
- ///
- /// FIXME(cgaebel):
- ///
- /// Feb 11, 2014: This hashtable was just implemented, and, hard as I tried,
- /// isn't yet totally safe. There's a "known exploit" that you can create
- /// multiple FullIndexes for a bucket, `take` one, and then still `take`
- /// the other causing undefined behavior. Currently, there's no story
- /// for how to protect against this statically. Therefore, there are asserts
- /// on `take`, `get`, `get_mut`, and `put` which check the bucket state.
- /// With time, and when we're confident this works correctly, they should
- /// be removed. Also, the bounds check in `peek` is especially painful,
- /// as that's called in the innermost loops of the hashtable and has the
- /// potential to be a major performance drain. Remove this too.
- ///
- /// Or, better than remove, only enable these checks for debug builds.
- /// There's currently no "debug-only" asserts in rust, so if you're reading
- /// this and going "what? of course there are debug-only asserts!", then
- /// please make this use them!
- #[unsafe_no_drop_flag]
- pub struct RawTable<K, V> {
- capacity: uint,
- size: uint,
- hashes: *mut u64,
- keys: *mut K,
- vals: *mut V,
- }
-
- /// Represents an index into a `RawTable` with no key or value in it.
- pub struct EmptyIndex {
- idx: int,
- nocopy: marker::NoCopy,
- }
-
- /// Represents an index into a `RawTable` with a key, value, and hash
- /// in it.
- pub struct FullIndex {
- idx: int,
- hash: SafeHash,
- nocopy: marker::NoCopy,
- }
-
- impl FullIndex {
- /// Since we get the hash for free whenever we check the bucket state,
- /// this function is provided for fast access, letting us avoid
- /// redundant trips back to the hashtable.
- #[inline(always)]
- pub fn hash(&self) -> SafeHash { self.hash }
-
- /// Same comment as with `hash`.
- #[inline(always)]
- pub fn raw_index(&self) -> uint { self.idx as uint }
- }
-
- /// Represents the state of a bucket: it can either have a key/value
- /// pair (be full) or not (be empty). You cannot `take` empty buckets,
- /// and you cannot `put` into full buckets.
- pub enum BucketState {
- Empty(EmptyIndex),
- Full(FullIndex),
- }
-
- /// A hash that is not zero, since we use a hash of zero to represent empty
- /// buckets.
- #[deriving(PartialEq)]
- pub struct SafeHash {
- hash: u64,
- }
-
- impl SafeHash {
- /// Peek at the hash value, which is guaranteed to be non-zero.
- #[inline(always)]
- pub fn inspect(&self) -> u64 { self.hash }
- }
-
- /// We need to remove hashes of 0. That's reserved for empty buckets.
- /// This function wraps up `hash_keyed` to be the only way outside this
- /// module to generate a SafeHash.
- pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
- match hasher.hash(t) {
- // This constant is exceedingly likely to hash to the same
- // bucket, but it won't be counted as empty!
- EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
- h => SafeHash { hash: h },
- }
- }
-
- fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
- assert!(is_power_of_two(target_alignment));
- (unrounded + target_alignment - 1) & !(target_alignment - 1)
- }
-
- #[test]
- fn test_rounding() {
- assert_eq!(round_up_to_next(0, 4), 0);
- assert_eq!(round_up_to_next(1, 4), 4);
- assert_eq!(round_up_to_next(2, 4), 4);
- assert_eq!(round_up_to_next(3, 4), 4);
- assert_eq!(round_up_to_next(4, 4), 4);
- assert_eq!(round_up_to_next(5, 4), 8);
- }
-
- // Returns a tuple of (minimum required malloc alignment, hash_offset,
- // key_offset, val_offset, array_size), from the start of a mallocated array.
- fn calculate_offsets(
- hash_size: uint, hash_align: uint,
- keys_size: uint, keys_align: uint,
- vals_size: uint, vals_align: uint) -> (uint, uint, uint, uint, uint) {
-
- let hash_offset = 0;
- let end_of_hashes = hash_offset + hash_size;
-
- let keys_offset = round_up_to_next(end_of_hashes, keys_align);
- let end_of_keys = keys_offset + keys_size;
-
- let vals_offset = round_up_to_next(end_of_keys, vals_align);
- let end_of_vals = vals_offset + vals_size;
-
- let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
-
- (min_align, hash_offset, keys_offset, vals_offset, end_of_vals)
- }
-
- #[test]
- fn test_offset_calculation() {
- assert_eq!(calculate_offsets(128, 8, 15, 1, 4, 4 ), (8, 0, 128, 144, 148));
- assert_eq!(calculate_offsets(3, 1, 2, 1, 1, 1 ), (1, 0, 3, 5, 6));
- assert_eq!(calculate_offsets(6, 2, 12, 4, 24, 8), (8, 0, 8, 24, 48));
- }
-
- impl<K, V> RawTable<K, V> {
-
- /// Does not initialize the buckets. The caller should ensure they,
- /// at the very least, set every hash to EMPTY_BUCKET.
- unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
- let hashes_size = capacity.checked_mul(&size_of::<u64>())
- .expect("capacity overflow");
- let keys_size = capacity.checked_mul(&size_of::< K >())
- .expect("capacity overflow");
- let vals_size = capacity.checked_mul(&size_of::< V >())
- .expect("capacity overflow");
-
- // Allocating hashmaps is a little tricky. We need to allocate three
- // arrays, but since we know their sizes and alignments up front,
- // we just allocate a single array, and then have the subarrays
- // point into it.
- //
- // This is great in theory, but in practice getting the alignment
- // right is a little subtle. Therefore, calculating offsets has been
- // factored out into a different function.
- let (malloc_alignment, hash_offset, keys_offset, vals_offset, size) =
- calculate_offsets(
- hashes_size, min_align_of::<u64>(),
- keys_size, min_align_of::< K >(),
- vals_size, min_align_of::< V >());
-
- let buffer = allocate(size, malloc_alignment);
-
- let hashes = buffer.offset(hash_offset as int) as *mut u64;
- let keys = buffer.offset(keys_offset as int) as *mut K;
- let vals = buffer.offset(vals_offset as int) as *mut V;
-
- RawTable {
- capacity: capacity,
- size: 0,
- hashes: hashes,
- keys: keys,
- vals: vals,
- }
- }
-
- /// Creates a new raw table from a given capacity. All buckets are
- /// initially empty.
- #[allow(experimental)]
- pub fn new(capacity: uint) -> RawTable<K, V> {
- unsafe {
- let ret = RawTable::new_uninitialized(capacity);
- set_memory(ret.hashes, 0u8, capacity);
- ret
- }
- }
-
- /// Reads a bucket at a given index, returning an enum indicating whether
- /// there's anything there or not. You need to match on this enum to get
- /// the appropriate types to pass on to most of the other functions in
- /// this module.
- pub fn peek(&self, index: uint) -> BucketState {
- debug_assert!(index < self.capacity);
-
- let idx = index as int;
- let hash = unsafe { *self.hashes.offset(idx) };
-
- let nocopy = marker::NoCopy;
-
- match hash {
- EMPTY_BUCKET =>
- Empty(EmptyIndex {
- idx: idx,
- nocopy: nocopy
- }),
- full_hash =>
- Full(FullIndex {
- idx: idx,
- hash: SafeHash { hash: full_hash },
- nocopy: nocopy,
- })
- }
- }
-
- /// Gets references to the key and value at a given index.
- pub fn read<'a>(&'a self, index: &FullIndex) -> (&'a K, &'a V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (&*self.keys.offset(idx), &*self.vals.offset(idx))
- }
- }
-
- /// Gets references to the key and value at a given index, with the
- /// value's reference being mutable.
- pub fn read_mut<'a>(&'a mut self, index: &FullIndex) -> (&'a K, &'a mut V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (&*self.keys.offset(idx), &mut *self.vals.offset(idx))
- }
- }
-
- /// Read everything, mutably.
- pub fn read_all_mut<'a>(&'a mut self, index: &FullIndex)
- -> (&'a mut SafeHash, &'a mut K, &'a mut V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
- (transmute(self.hashes.offset(idx)),
- &mut *self.keys.offset(idx), &mut *self.vals.offset(idx))
- }
- }
-
- /// Puts a key and value pair, along with the key's hash, into a given
- /// index in the hashtable. Note how the `EmptyIndex` is 'moved' into this
- /// function, because that slot will no longer be empty when we return!
- /// A FullIndex is returned for later use, pointing to the newly-filled
- /// slot in the hashtable.
- ///
- /// Use `make_hash` to construct a `SafeHash` to pass to this function.
- pub fn put(&mut self, index: EmptyIndex, hash: SafeHash, k: K, v: V) -> FullIndex {
- let idx = index.idx;
-
- unsafe {
- debug_assert_eq!(*self.hashes.offset(idx), EMPTY_BUCKET);
- *self.hashes.offset(idx) = hash.inspect();
- overwrite(&mut *self.keys.offset(idx), k);
- overwrite(&mut *self.vals.offset(idx), v);
- }
-
- self.size += 1;
-
- FullIndex { idx: idx, hash: hash, nocopy: marker::NoCopy }
- }
-
- /// Removes a key and value from the hashtable.
- ///
- /// This works similarly to `put`, building an `EmptyIndex` out of the
- /// taken FullIndex.
- pub fn take(&mut self, index: FullIndex) -> (EmptyIndex, K, V) {
- let idx = index.idx;
-
- unsafe {
- debug_assert!(*self.hashes.offset(idx) != EMPTY_BUCKET);
-
- *self.hashes.offset(idx) = EMPTY_BUCKET;
-
- // Drop the mutable constraint.
- let keys = self.keys as *const K;
- let vals = self.vals as *const V;
-
- let k = ptr::read(keys.offset(idx));
- let v = ptr::read(vals.offset(idx));
-
- self.size -= 1;
-
- (EmptyIndex { idx: idx, nocopy: marker::NoCopy }, k, v)
- }
- }
-
- /// The hashtable's capacity, similar to a vector's.
- pub fn capacity(&self) -> uint {
- self.capacity
- }
-
- /// The number of elements ever `put` in the hashtable, minus the number
- /// of elements ever `take`n.
- pub fn size(&self) -> uint {
- self.size
- }
-
- pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
- Entries { table: self, idx: 0, elems_seen: 0 }
- }
-
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
- MutEntries { table: self, idx: 0, elems_seen: 0 }
- }
-
- pub fn move_iter(self) -> MoveEntries<K, V> {
- MoveEntries { table: self, idx: 0 }
- }
- }
-
- // `read_all_mut` casts a `*u64` to a `*SafeHash`. Since we statically
- // ensure that a `FullIndex` points to an index with a non-zero hash,
- // and a `SafeHash` is just a `u64` with a different name, this is
- // safe.
- //
- // This test ensures that a `SafeHash` really IS the same size as a
- // `u64`. If you need to change the size of `SafeHash` (and
- // consequently made this test fail), `read_all_mut` needs to be
- // modified to no longer assume this.
- #[test]
- fn can_alias_safehash_as_u64() {
- assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
- }
-
- /// Note: stage0-specific version that lacks bound.
- #[cfg(stage0)]
- pub struct Entries<'a, K, V> {
- table: &'a RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Iterator over shared references to entries in a table.
- #[cfg(not(stage0))]
- pub struct Entries<'a, K:'a, V:'a> {
- table: &'a RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Note: stage0-specific version that lacks bound.
- #[cfg(stage0)]
- pub struct MutEntries<'a, K, V> {
- table: &'a mut RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Iterator over mutable references to entries in a table.
- #[cfg(not(stage0))]
- pub struct MutEntries<'a, K:'a, V:'a> {
- table: &'a mut RawTable<K, V>,
- idx: uint,
- elems_seen: uint,
- }
-
- /// Iterator over the entries in a table, consuming the table.
- pub struct MoveEntries<K, V> {
- table: RawTable<K, V>,
- idx: uint
- }
-
- impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
- fn next(&mut self) -> Option<(&'a K, &'a V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- Full(idx) => {
- self.elems_seen += 1;
- return Some(self.table.read(&idx));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size() - self.elems_seen;
- (size, Some(size))
- }
- }
-
- impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
- fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- // the transmute here fixes:
- // error: lifetime of `self` is too short to guarantee its contents
- // can be safely reborrowed
- Full(idx) => unsafe {
- self.elems_seen += 1;
- return Some(transmute(self.table.read_mut(&idx)));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size() - self.elems_seen;
- (size, Some(size))
- }
- }
-
- impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
- fn next(&mut self) -> Option<(SafeHash, K, V)> {
- while self.idx < self.table.capacity() {
- let i = self.idx;
- self.idx += 1;
-
- match self.table.peek(i) {
- Empty(_) => {},
- Full(idx) => {
- let h = idx.hash();
- let (_, k, v) = self.table.take(idx);
- return Some((h, k, v));
- }
- }
- }
-
- None
- }
-
- fn size_hint(&self) -> (uint, Option<uint>) {
- let size = self.table.size();
- (size, Some(size))
- }
- }
-
- impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
- fn clone(&self) -> RawTable<K, V> {
- unsafe {
- let mut new_ht = RawTable::new_uninitialized(self.capacity());
-
- for i in range(0, self.capacity()) {
- match self.peek(i) {
- Empty(_) => {
- *new_ht.hashes.offset(i as int) = EMPTY_BUCKET;
- },
- Full(idx) => {
- let hash = idx.hash().inspect();
- let (k, v) = self.read(&idx);
- *new_ht.hashes.offset(i as int) = hash;
- overwrite(&mut *new_ht.keys.offset(i as int), (*k).clone());
- overwrite(&mut *new_ht.vals.offset(i as int), (*v).clone());
- }
- }
- }
-
- new_ht.size = self.size();
-
- new_ht
- }
- }
- }
-
- #[unsafe_destructor]
- impl<K, V> Drop for RawTable<K, V> {
- fn drop(&mut self) {
- // This is in reverse because we're likely to have partially taken
- // some elements out with `.move_iter()` from the front.
- for i in range_step_inclusive(self.capacity as int - 1, 0, -1) {
- // Check if the size is 0, so we don't do a useless scan when
- // dropping empty tables such as on resize.
- if self.size == 0 { break }
-
- match self.peek(i as uint) {
- Empty(_) => {},
- Full(idx) => { self.take(idx); }
- }
- }
-
- assert_eq!(self.size, 0);
-
- if self.hashes.is_not_null() {
- let hashes_size = self.capacity * size_of::<u64>();
- let keys_size = self.capacity * size_of::<K>();
- let vals_size = self.capacity * size_of::<V>();
- let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
- keys_size, min_align_of::<K>(),
- vals_size, min_align_of::<V>());
-
- unsafe {
- deallocate(self.hashes as *mut u8, size, align);
- // Remember how everything was allocated out of one buffer
- // during initialization? We only need one call to free here.
- }
-
- self.hashes = RawPtr::null();
- }
- }
- }
-}
-
-static INITIAL_LOG2_CAP: uint = 5;
-static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
-
-/// The default behavior of HashMap implements a load factor of 90.9%.
-/// This behavior is characterized by the following conditions:
-///
-/// - if `size * 1.1 < cap < size * 4` then shouldn't resize
-/// - if `cap < minimum_capacity * 2` then shouldn't shrink
-#[deriving(Clone)]
-struct DefaultResizePolicy {
- /// Doubled minimal capacity. The capacity must never drop below
- /// the minimum capacity. (The check happens before the capacity
- /// is potentially halved.)
- minimum_capacity2: uint
-}
-
-impl DefaultResizePolicy {
- fn new(new_capacity: uint) -> DefaultResizePolicy {
- DefaultResizePolicy {
- minimum_capacity2: new_capacity << 1
- }
- }
-
- #[inline]
- fn capacity_range(&self, new_size: uint) -> (uint, uint) {
- ((new_size * 11) / 10, max(new_size << 3, self.minimum_capacity2))
- }
-
- #[inline]
- fn reserve(&mut self, new_capacity: uint) {
- self.minimum_capacity2 = new_capacity << 1;
- }
-}
-
-// The main performance trick in this hashmap is called Robin Hood Hashing.
-// It gains its excellent performance from one key invariant:
-//
-// If an insertion collides with an existing element, and that elements
-// "probe distance" (how far away the element is from its ideal location)
-// is higher than how far we've already probed, swap the elements.
-//
-// This massively lowers variance in probe distance, and allows us to get very
-// high load factors with good performance. The 90% load factor I use is rather
-// conservative.
-//
-// > Why a load factor of approximately 90%?
-//
-// In general, all the distances to initial buckets will converge on the mean.
-// At a load factor of α, the odds of finding the target bucket after k
-// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
-// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
-// this down to make the math easier on the CPU and avoid its FPU.
-// Since on average we start the probing in the middle of a cache line, this
-// strategy pulls in two cache lines of hashes on every lookup. I think that's
-// pretty good, but if you want to trade off some space, it could go down to one
-// cache line on average with an α of 0.84.
-//
-// > Wait, what? Where did you get 1-α^k from?
-//
-// On the first probe, your odds of a collision with an existing element is α.
-// The odds of doing this twice in a row is approximately α^2. For three times,
-// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
-// colliding after k tries is 1-α^k.
-//
-// Future Improvements (FIXME!)
-// ============================
-//
-// Allow the load factor to be changed dynamically and/or at initialization.
-//
-// Also, would it be possible for us to reuse storage when growing the
-// underlying table? This is exactly the use case for 'realloc', and may
-// be worth exploring.
-//
-// Future Optimizations (FIXME!)
-// =============================
-//
-// The paper cited below mentions an implementation which keeps track of the
-// distance-to-initial-bucket histogram. I'm suspicious of this approach because
-// it requires maintaining an internal map. If this map were replaced with a
-// hashmap, it would be faster, but now our data structure is self-referential
-// and blows up. Also, this allows very good first guesses, but array accesses
-// are no longer linear and in one direction, as we have now. There is also
-// memory and cache pressure that this map would entail that would be very
-// difficult to properly see in a microbenchmark.
-//
-// Another possible design choice that I made without any real reason is
-// parameterizing the raw table over keys and values. Technically, all we need
-// is the size and alignment of keys and values, and the code should be just as
-// efficient (well, we might need one for power-of-two size and one for not...).
-// This has the potential to reduce code bloat in rust executables, without
-// really losing anything except 4 words (key size, key alignment, val size,
-// val alignment) which can be passed in to every call of a `RawTable` function.
-// This would definitely be an avenue worth exploring if people start complaining
-// about the size of rust executables.
-//
-// There's also an "optimization" that has been omitted regarding how the
-// hashtable allocates. The vector type has set the expectation that a hashtable
-// which never has an element inserted should not allocate. I'm suspicious of
-// implementing this for hashtables, because supporting it has no performance
-// benefit over using an `Option<HashMap<K, V>>`, and is significantly more
-// complicated.
-
-/// A hash map implementation which uses linear probing with Robin
-/// Hood bucket stealing.
-///
-/// The hashes are all keyed by the task-local random number generator
-/// on creation by default. This means that the ordering of the keys is
-/// randomized, but makes the tables more resistant to
-/// denial-of-service attacks (Hash DoS). This behaviour can be
-/// overridden with one of the constructors.
-///
-/// It is required that the keys implement the `Eq` and `Hash` traits, although
-/// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
-///
-/// Relevant papers/articles:
-///
-/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
-/// 2. Emmanuel Goossaert. ["Robin Hood
-/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
-/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
-/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
-///
-/// # Example
-///
-/// ```
-/// use std::collections::HashMap;
-///
-/// // type inference lets us omit an explicit type signature (which
-/// // would be `HashMap<&str, &str>` in this example).
-/// let mut book_reviews = HashMap::new();
-///
-/// // review some books.
-/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
-/// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
-/// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
-/// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
-///
-/// // check for a specific one.
-/// if !book_reviews.contains_key(&("Les Misérables")) {
-/// println!("We've got {} reviews, but Les Misérables ain't one.",
-/// book_reviews.len());
-/// }
-///
-/// // oops, this review has a lot of spelling mistakes, let's delete it.
-/// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
-///
-/// // look up the values associated with some keys.
-/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
-/// for book in to_find.iter() {
-/// match book_reviews.find(book) {
-/// Some(review) => println!("{}: {}", *book, *review),
-/// None => println!("{} is unreviewed.", *book)
-/// }
-/// }
-///
-/// // iterate over everything.
-/// for (book, review) in book_reviews.iter() {
-/// println!("{}: \"{}\"", *book, *review);
-/// }
-/// ```
-///
-/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
-/// We must also derive `PartialEq`.
-///
-/// ```
-/// use std::collections::HashMap;
-///
-/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
-/// }
-///
-/// let mut vikings = HashMap::new();
-///
-/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
-/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
-/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
-///
-/// // Use derived implementation to print the vikings.
-/// for (land, viking) in vikings.iter() {
-/// println!("{} at {}", viking, land);
-/// }
-/// ```
-#[deriving(Clone)]
-pub struct HashMap<K, V, H = RandomSipHasher> {
- // All hashes are keyed on these values, to prevent hash collision attacks.
- hasher: H,
-
- table: table::RawTable<K, V>,
-
- // We keep this at the end since it might as well have tail padding.
- resize_policy: DefaultResizePolicy,
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
- // Probe the `idx`th bucket for a given hash, returning the index of the
- // target bucket.
- //
- // This exploits the power-of-two size of the hashtable. As long as this
- // is always true, we can use a bitmask of cap-1 to do modular arithmetic.
- //
- // Prefer using this with increasing values of `idx` rather than repeatedly
- // calling `probe_next`. This reduces data-dependencies between loops, which
- // can help the optimizer, and certainly won't hurt it. `probe_next` is
- // simply for convenience, and is no more efficient than `probe`.
- fn probe(&self, hash: &table::SafeHash, idx: uint) -> uint {
- let hash_mask = self.table.capacity() - 1;
-
- // So I heard a rumor that unsigned overflow is safe in rust..
- ((hash.inspect() as uint) + idx) & hash_mask
- }
-
- // Generate the next probe in a sequence. Prefer using 'probe' by itself,
- // but this can sometimes be useful.
- fn probe_next(&self, probe: uint) -> uint {
- let hash_mask = self.table.capacity() - 1;
- (probe + 1) & hash_mask
- }
-
- fn make_hash<X: Hash<S>>(&self, x: &X) -> table::SafeHash {
- table::make_hash(&self.hasher, x)
- }
-
- /// Get the distance of the bucket at the given index that it lies
- /// from its 'ideal' location.
- ///
- /// In the cited blog posts above, this is called the "distance to
- /// initial bucket", or DIB.
- fn bucket_distance(&self, index_of_elem: &table::FullIndex) -> uint {
- // where the hash of the element that happens to reside at
- // `index_of_elem` tried to place itself first.
- let first_probe_index = self.probe(&index_of_elem.hash(), 0);
-
- let raw_index = index_of_elem.raw_index();
-
- if first_probe_index <= raw_index {
- // probe just went forward
- raw_index - first_probe_index
- } else {
- // probe wrapped around the hashtable
- raw_index + (self.table.capacity() - first_probe_index)
- }
- }
-
- /// Search for a pre-hashed key.
- fn search_hashed_generic(&self, hash: &table::SafeHash, is_match: |&K| -> bool)
- -> Option<table::FullIndex> {
- for num_probes in range(0u, self.table.size()) {
- let probe = self.probe(hash, num_probes);
-
- let idx = match self.table.peek(probe) {
- table::Empty(_) => return None, // hit an empty bucket
- table::Full(idx) => idx
- };
-
- // We can finish the search early if we hit any bucket
- // with a lower distance to initial bucket than we've probed.
- if self.bucket_distance(&idx) < num_probes { return None }
-
- // If the hash doesn't match, it can't be this one..
- if *hash != idx.hash() { continue }
-
- let (k, _) = self.table.read(&idx);
-
- // If the key doesn't match, it can't be this one..
- if !is_match(k) { continue }
-
- return Some(idx);
- }
-
- return None
- }
-
- fn search_hashed(&self, hash: &table::SafeHash, k: &K) -> Option<table::FullIndex> {
- self.search_hashed_generic(hash, |k_| *k == *k_)
- }
-
- fn search_equiv<Q: Hash<S> + Equiv<K>>(&self, q: &Q) -> Option<table::FullIndex> {
- self.search_hashed_generic(&self.make_hash(q), |k| q.equiv(k))
- }
-
- /// Search for a key, yielding the index if it's found in the hashtable.
- /// If you already have the hash for the key lying around, use
- /// search_hashed.
- fn search(&self, k: &K) -> Option<table::FullIndex> {
- self.search_hashed(&self.make_hash(k), k)
- }
-
- fn pop_internal(&mut self, starting_index: table::FullIndex) -> Option<V> {
- let starting_probe = starting_index.raw_index();
-
- let ending_probe = {
- let mut probe = self.probe_next(starting_probe);
- for _ in range(0u, self.table.size()) {
- match self.table.peek(probe) {
- table::Empty(_) => {}, // empty bucket. this is the end of our shifting.
- table::Full(idx) => {
- // Bucket that isn't us, which has a non-zero probe distance.
- // This isn't the ending index, so keep searching.
- if self.bucket_distance(&idx) != 0 {
- probe = self.probe_next(probe);
- continue;
- }
-
- // if we do have a bucket_distance of zero, we're at the end
- // of what we need to shift.
- }
- }
- break;
- }
-
- probe
- };
-
- let (_, _, retval) = self.table.take(starting_index);
-
- let mut probe = starting_probe;
- let mut next_probe = self.probe_next(probe);
-
- // backwards-shift all the elements after our newly-deleted one.
- while next_probe != ending_probe {
- match self.table.peek(next_probe) {
- table::Empty(_) => {
- // nothing to shift in. just empty it out.
- match self.table.peek(probe) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
- },
- table::Full(next_idx) => {
- // something to shift. move it over!
- let next_hash = next_idx.hash();
- let (_, next_key, next_val) = self.table.take(next_idx);
- match self.table.peek(probe) {
- table::Empty(idx) => {
- self.table.put(idx, next_hash, next_key, next_val);
- },
- table::Full(idx) => {
- let (emptyidx, _, _) = self.table.take(idx);
- self.table.put(emptyidx, next_hash, next_key, next_val);
- }
- }
- }
- }
-
- probe = next_probe;
- next_probe = self.probe_next(next_probe);
- }
-
- // Done the backwards shift, but there's still an element left!
- // Empty it out.
- match self.table.peek(probe) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
-
- // Now we're done all our shifting. Return the value we grabbed
- // earlier.
- return Some(retval);
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
- /// Return the number of elements in the map.
- fn len(&self) -> uint { self.table.size() }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
- /// Clear the map, removing all key-value pairs. Keeps the allocated memory
- /// for reuse.
- fn clear(&mut self) {
- // Prevent reallocations from happening from now on. Makes it possible
- // for the map to be reused but has a downside: reserves permanently.
- self.resize_policy.reserve(self.table.size());
-
- for i in range(0, self.table.capacity()) {
- match self.table.peek(i) {
- table::Empty(_) => {},
- table::Full(idx) => { self.table.take(idx); }
- }
- }
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
- fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
- self.search(k).map(|idx| {
- let (_, v) = self.table.read(&idx);
- v
- })
- }
-
- fn contains_key(&self, k: &K) -> bool {
- self.search(k).is_some()
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
- fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
- match self.search(k) {
- None => None,
- Some(idx) => {
- let (_, v) = self.table.read_mut(&idx);
- Some(v)
- }
- }
- }
-
- fn swap(&mut self, k: K, v: V) -> Option<V> {
- let hash = self.make_hash(&k);
- let potential_new_size = self.table.size() + 1;
- self.make_some_room(potential_new_size);
-
- for dib in range_inclusive(0u, self.table.size()) {
- let probe = self.probe(&hash, dib);
-
- let idx = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Found a hole!
- self.table.put(idx, hash, k, v);
- return None;
- },
- table::Full(idx) => idx
- };
-
- if idx.hash() == hash {
- let (bucket_k, bucket_v) = self.table.read_mut(&idx);
- if k == *bucket_k {
- // Found an existing value.
- return Some(replace(bucket_v, v));
- }
- }
-
- let probe_dib = self.bucket_distance(&idx);
-
- if probe_dib < dib {
- // Found a luckier bucket. This implies that the key does not
- // already exist in the hashtable. Just do a robin hood
- // insertion, then.
- self.robin_hood(idx, probe_dib, hash, k, v);
- return None;
- }
- }
-
- // We really shouldn't be here.
- fail!("Internal HashMap error: Out of space.");
- }
-
- fn pop(&mut self, k: &K) -> Option<V> {
- if self.table.size() == 0 {
- return None
- }
-
- let potential_new_size = self.table.size() - 1;
- self.make_some_room(potential_new_size);
-
- let starting_index = match self.search(k) {
- Some(idx) => idx,
- None => return None,
- };
-
- self.pop_internal(starting_index)
- }
-
-}
-
-impl<K: Hash + Eq, V> HashMap<K, V, RandomSipHasher> {
- /// Create an empty HashMap.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::new();
- /// ```
- #[inline]
- pub fn new() -> HashMap<K, V, RandomSipHasher> {
- HashMap::with_capacity(INITIAL_CAPACITY)
- }
-
- /// Creates an empty hash map with the given initial capacity.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
- /// ```
- #[inline]
- pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomSipHasher> {
- let hasher = RandomSipHasher::new();
- HashMap::with_capacity_and_hasher(capacity, hasher)
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
- /// Creates an empty hashmap which will use the given hasher to hash keys.
- ///
- /// The creates map has the default initial capacity.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut map = HashMap::with_hasher(h);
- /// map.insert(1i, 2u);
- /// ```
- #[inline]
- pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
- HashMap::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
- }
-
- /// Create an empty HashMap with space for at least `capacity`
- /// elements, using `hasher` to hash the keys.
- ///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut map = HashMap::with_capacity_and_hasher(10, h);
- /// map.insert(1i, 2u);
- /// ```
- #[inline]
- pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
- let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
- HashMap {
- hasher: hasher,
- resize_policy: DefaultResizePolicy::new(cap),
- table: table::RawTable::new(cap),
- }
- }
-
- /// The hashtable will never try to shrink below this size. You can use
- /// this function to reduce reallocations if your hashtable frequently
- /// grows and shrinks by large amounts.
- ///
- /// This function has no effect on the operational semantics of the
- /// hashtable, only on performance.
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map: HashMap<&str, int> = HashMap::new();
- /// map.reserve(10);
- /// ```
- pub fn reserve(&mut self, new_minimum_capacity: uint) {
- let cap = num::next_power_of_two(
- max(INITIAL_CAPACITY, new_minimum_capacity));
-
- self.resize_policy.reserve(cap);
-
- if self.table.capacity() < cap {
- self.resize(cap);
- }
- }
-
- /// Resizes the internal vectors to a new capacity. It's your responsibility to:
- /// 1) Make sure the new capacity is enough for all the elements, accounting
- /// for the load factor.
- /// 2) Ensure new_capacity is a power of two.
- fn resize(&mut self, new_capacity: uint) {
- assert!(self.table.size() <= new_capacity);
- assert!(num::is_power_of_two(new_capacity));
-
- let old_table = replace(&mut self.table, table::RawTable::new(new_capacity));
- let old_size = old_table.size();
-
- for (h, k, v) in old_table.move_iter() {
- self.insert_hashed_nocheck(h, k, v);
- }
-
- assert_eq!(self.table.size(), old_size);
- }
-
- /// Performs any necessary resize operations, such that there's space for
- /// new_size elements.
- fn make_some_room(&mut self, new_size: uint) {
- let (grow_at, shrink_at) = self.resize_policy.capacity_range(new_size);
- let cap = self.table.capacity();
-
- // An invalid value shouldn't make us run out of space.
- debug_assert!(grow_at >= new_size);
-
- if cap <= grow_at {
- let new_capacity = cap << 1;
- self.resize(new_capacity);
- } else if shrink_at <= cap {
- let new_capacity = cap >> 1;
- self.resize(new_capacity);
- }
- }
-
- /// Perform robin hood bucket stealing at the given 'index'. You must
- /// also pass that probe's "distance to initial bucket" so we don't have
- /// to recalculate it, as well as the total number of probes already done
- /// so we have some sort of upper bound on the number of probes to do.
- ///
- /// 'hash', 'k', and 'v' are the elements to robin hood into the hashtable.
- fn robin_hood(&mut self, mut index: table::FullIndex, mut dib_param: uint,
- mut hash: table::SafeHash, mut k: K, mut v: V) {
- 'outer: loop {
- let (old_hash, old_key, old_val) = {
- let (old_hash_ref, old_key_ref, old_val_ref) =
- self.table.read_all_mut(&index);
-
- let old_hash = replace(old_hash_ref, hash);
- let old_key = replace(old_key_ref, k);
- let old_val = replace(old_val_ref, v);
-
- (old_hash, old_key, old_val)
- };
-
- let mut probe = self.probe_next(index.raw_index());
-
- for dib in range(dib_param + 1, self.table.size()) {
- let full_index = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Finally. A hole!
- self.table.put(idx, old_hash, old_key, old_val);
- return;
- },
- table::Full(idx) => idx
- };
-
- let probe_dib = self.bucket_distance(&full_index);
-
- // Robin hood! Steal the spot.
- if probe_dib < dib {
- index = full_index;
- dib_param = probe_dib;
- hash = old_hash;
- k = old_key;
- v = old_val;
- continue 'outer;
- }
-
- probe = self.probe_next(probe);
- }
-
- fail!("HashMap fatal error: 100% load factor?");
- }
- }
-
- /// Insert a pre-hashed key-value pair, without first checking
- /// that there's enough room in the buckets. Returns a reference to the
- /// newly insert value.
- ///
- /// If the key already exists, the hashtable will be returned untouched
- /// and a reference to the existing element will be returned.
- fn insert_hashed_nocheck<'a>(
- &'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
-
- for dib in range_inclusive(0u, self.table.size()) {
- let probe = self.probe(&hash, dib);
-
- let idx = match self.table.peek(probe) {
- table::Empty(idx) => {
- // Found a hole!
- let fullidx = self.table.put(idx, hash, k, v);
- let (_, val) = self.table.read_mut(&fullidx);
- return val;
- },
- table::Full(idx) => idx
- };
-
- if idx.hash() == hash {
- let (bucket_k, bucket_v) = self.table.read_mut(&idx);
- // FIXME #12147 the conditional return confuses
- // borrowck if we return bucket_v directly
- let bv: *mut V = bucket_v;
- if k == *bucket_k {
- // Key already exists. Get its reference.
- return unsafe {&mut *bv};
- }
- }
-
- let probe_dib = self.bucket_distance(&idx);
-
- if probe_dib < dib {
- // Found a luckier bucket than me. Better steal his spot.
- self.robin_hood(idx, probe_dib, hash, k, v);
-
- // Now that it's stolen, just read the value's pointer
- // right out of the table!
- match self.table.peek(probe) {
- table::Empty(_) => fail!("Just stole a spot, but now that spot's empty."),
- table::Full(idx) => {
- let (_, v) = self.table.read_mut(&idx);
- return v;
- }
- }
- }
- }
-
- // We really shouldn't be here.
- fail!("Internal HashMap error: Out of space.");
- }
-
- /// Inserts an element which has already been hashed, returning a reference
- /// to that element inside the hashtable. This is more efficient that using
- /// `insert`, since the key will not be rehashed.
- fn insert_hashed<'a>(&'a mut self, hash: table::SafeHash, k: K, v: V) -> &'a mut V {
- let potential_new_size = self.table.size() + 1;
- self.make_some_room(potential_new_size);
- self.insert_hashed_nocheck(hash, k, v)
- }
-
- /// Return the value corresponding to the key in the map, or insert
- /// and return the value if it doesn't exist.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 1i with key "a"
- /// assert_eq!(*map.find_or_insert("a", 1i), 1);
- ///
- /// // Find the existing key
- /// assert_eq!(*map.find_or_insert("a", -2), 1);
- /// ```
- pub fn find_or_insert<'a>(&'a mut self, k: K, v: V) -> &'a mut V {
- self.find_with_or_insert_with(k, v, |_k, _v, _a| (), |_k, a| a)
- }
-
- /// Return the value corresponding to the key in the map, or create,
- /// insert, and return a new value if it doesn't exist.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 10 with key 2
- /// assert_eq!(*map.find_or_insert_with(2i, |&key| 5 * key as uint), 10u);
- ///
- /// // Find the existing key
- /// assert_eq!(*map.find_or_insert_with(2, |&key| key as uint), 10);
- /// ```
- pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
- -> &'a mut V {
- self.find_with_or_insert_with(k, (), |_k, _v, _a| (), |k, _a| f(k))
- }
-
- /// Insert a key-value pair into the map if the key is not already present.
- /// Otherwise, modify the existing value for the key.
- /// Returns the new or modified value for the key.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- /// let mut map = HashMap::new();
- ///
- /// // Insert 2 with key "a"
- /// assert_eq!(*map.insert_or_update_with("a", 2u, |_key, val| *val = 3), 2);
- ///
- /// // Update and return the existing value
- /// assert_eq!(*map.insert_or_update_with("a", 9, |_key, val| *val = 7), 7);
- /// assert_eq!(map["a"], 7);
- /// ```
- pub fn insert_or_update_with<'a>(
- &'a mut self,
- k: K,
- v: V,
- f: |&K, &mut V|)
- -> &'a mut V {
- self.find_with_or_insert_with(k, v, |k, v, _a| f(k, v), |_k, a| a)
- }
-
- /// Modify and return the value corresponding to the key in the map, or
- /// insert and return a new value if it doesn't exist.
- ///
- /// This method allows for all insertion behaviours of a hashmap;
- /// see methods like
- /// [`insert`](../trait.MutableMap.html#tymethod.insert),
- /// [`find_or_insert`](#method.find_or_insert) and
- /// [`insert_or_update_with`](#method.insert_or_update_with)
- /// for less general and more friendly variations of this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// // map some strings to vectors of strings
- /// let mut map = HashMap::new();
- /// map.insert("a key", vec!["value"]);
- /// map.insert("z key", vec!["value"]);
- ///
- /// let new = vec!["a key", "b key", "z key"];
- ///
- /// for k in new.move_iter() {
- /// map.find_with_or_insert_with(
- /// k, "new value",
- /// // if the key does exist either prepend or append this
- /// // new value based on the first letter of the key.
- /// |key, already, new| {
- /// if key.as_slice().starts_with("z") {
- /// already.insert(0, new);
- /// } else {
- /// already.push(new);
- /// }
- /// },
- /// // if the key doesn't exist in the map yet, add it in
- /// // the obvious way.
- /// |_k, v| vec![v]);
- /// }
- ///
- /// assert_eq!(map.len(), 3);
- /// assert_eq!(map["a key"], vec!["value", "new value"]);
- /// assert_eq!(map["b key"], vec!["new value"]);
- /// assert_eq!(map["z key"], vec!["new value", "value"]);
- /// ```
- pub fn find_with_or_insert_with<'a, A>(&'a mut self,
- k: K,
- a: A,
- found: |&K, &mut V, A|,
- not_found: |&K, A| -> V)
- -> &'a mut V {
- let hash = self.make_hash(&k);
- match self.search_hashed(&hash, &k) {
- None => {
- let v = not_found(&k, a);
- self.insert_hashed(hash, k, v)
- },
- Some(idx) => {
- let (_, v_ref) = self.table.read_mut(&idx);
- found(&k, v_ref, a);
- v_ref
- }
- }
- }
-
- /// Retrieves a value for the given key.
- /// See [`find`](../trait.Map.html#tymethod.find) for a non-failing alternative.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// #![allow(deprecated)]
- ///
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// assert_eq!(map.get(&"a"), &1);
- /// ```
- #[deprecated = "prefer indexing instead, e.g., map[key]"]
- pub fn get<'a>(&'a self, k: &K) -> &'a V {
- match self.find(k) {
- Some(v) => v,
- None => fail!("no entry found for key")
- }
- }
-
- /// Retrieves a mutable value for the given key.
- /// See [`find_mut`](../trait.MutableMap.html#tymethod.find_mut) for a non-failing alternative.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// {
- /// // val will freeze map to prevent usage during its lifetime
- /// let val = map.get_mut(&"a");
- /// *val = 40;
- /// }
- /// assert_eq!(map["a"], 40);
- ///
- /// // A more direct way could be:
- /// *map.get_mut(&"a") = -2;
- /// assert_eq!(map["a"], -2);
- /// ```
- pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
- match self.find_mut(k) {
- Some(v) => v,
- None => fail!("no entry found for key")
- }
- }
-
- /// Return true if the map contains a value for the specified key,
- /// using equivalence.
- ///
- /// See [pop_equiv](#method.pop_equiv) for an extended example.
- pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
- self.search_equiv(key).is_some()
- }
-
- /// Return the value corresponding to the key in the map, using
- /// equivalence.
- ///
- /// See [pop_equiv](#method.pop_equiv) for an extended example.
- pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
- match self.search_equiv(k) {
- None => None,
- Some(idx) => {
- let (_, v_ref) = self.table.read(&idx);
- Some(v_ref)
- }
- }
- }
-
- /// Remove an equivalent key from the map, returning the value at the
- /// key if the key was previously in the map.
- ///
- /// # Example
- ///
- /// This is a slightly silly example where we define the number's parity as
- /// the equivalence class. It is important that the values hash the same,
- /// which is why we override `Hash`.
- ///
- /// ```
- /// use std::collections::HashMap;
- /// use std::hash::Hash;
- /// use std::hash::sip::SipState;
- ///
- /// #[deriving(Eq, PartialEq)]
- /// struct EvenOrOdd {
- /// num: uint
- /// };
- ///
- /// impl Hash for EvenOrOdd {
- /// fn hash(&self, state: &mut SipState) {
- /// let parity = self.num % 2;
- /// parity.hash(state);
- /// }
- /// }
- ///
- /// impl Equiv<EvenOrOdd> for EvenOrOdd {
- /// fn equiv(&self, other: &EvenOrOdd) -> bool {
- /// self.num % 2 == other.num % 2
- /// }
- /// }
- ///
- /// let mut map = HashMap::new();
- /// map.insert(EvenOrOdd { num: 3 }, "foo");
- ///
- /// assert!(map.contains_key_equiv(&EvenOrOdd { num: 1 }));
- /// assert!(!map.contains_key_equiv(&EvenOrOdd { num: 4 }));
- ///
- /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 5 }), Some(&"foo"));
- /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 2 }), None);
- ///
- /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 1 }), Some("foo"));
- /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 2 }), None);
- ///
- /// ```
- #[experimental]
- pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
- if self.table.size() == 0 {
- return None
- }
-
- let potential_new_size = self.table.size() - 1;
- self.make_some_room(potential_new_size);
-
- let starting_index = match self.search_equiv(k) {
- Some(idx) => idx,
- None => return None,
- };
-
- self.pop_internal(starting_index)
- }
-
- /// An iterator visiting all keys in arbitrary order.
- /// Iterator element type is `&'a K`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for key in map.keys() {
- /// println!("{}", key);
- /// }
- /// ```
- pub fn keys<'a>(&'a self) -> Keys<'a, K, V> {
- self.iter().map(|(k, _v)| k)
- }
-
- /// An iterator visiting all values in arbitrary order.
- /// Iterator element type is `&'a V`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for key in map.values() {
- /// println!("{}", key);
- /// }
- /// ```
- pub fn values<'a>(&'a self) -> Values<'a, K, V> {
- self.iter().map(|(_k, v)| v)
- }
-
- /// An iterator visiting all key-value pairs in arbitrary order.
- /// Iterator element type is `(&'a K, &'a V)`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// for (key, val) in map.iter() {
- /// println!("key: {} val: {}", key, val);
- /// }
- /// ```
- pub fn iter<'a>(&'a self) -> Entries<'a, K, V> {
- self.table.iter()
- }
-
- /// An iterator visiting all key-value pairs in arbitrary order,
- /// with mutable references to the values.
- /// Iterator element type is `(&'a K, &'a mut V)`.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// // Update all values
- /// for (_, val) in map.mut_iter() {
- /// *val *= 2;
- /// }
- ///
- /// for (key, val) in map.iter() {
- /// println!("key: {} val: {}", key, val);
- /// }
- /// ```
- pub fn mut_iter<'a>(&'a mut self) -> MutEntries<'a, K, V> {
- self.table.mut_iter()
- }
-
- /// Creates a consuming iterator, that is, one that moves each key-value
- /// pair out of the map in arbitrary order. The map cannot be used after
- /// calling this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map = HashMap::new();
- /// map.insert("a", 1i);
- /// map.insert("b", 2);
- /// map.insert("c", 3);
- ///
- /// // Not possible with .iter()
- /// let vec: Vec<(&str, int)> = map.move_iter().collect();
- /// ```
- pub fn move_iter(self) -> MoveEntries<K, V> {
- self.table.move_iter().map(|(_, k, v)| (k, v))
- }
-}
-
-impl<K: Eq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
- /// Return a copy of the value corresponding to the key.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map: HashMap<uint, String> = HashMap::new();
- /// map.insert(1u, "foo".to_string());
- /// let s: String = map.find_copy(&1).unwrap();
- /// ```
- pub fn find_copy(&self, k: &K) -> Option<V> {
- self.find(k).map(|v| (*v).clone())
- }
-
- /// Return a copy of the value corresponding to the key.
- ///
- /// # Failure
- ///
- /// Fails if the key is not present.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashMap;
- ///
- /// let mut map: HashMap<uint, String> = HashMap::new();
- /// map.insert(1u, "foo".to_string());
- /// let s: String = map.get_copy(&1);
- /// ```
- pub fn get_copy(&self, k: &K) -> V {
- (*self.get(k)).clone()
- }
-}
-
-impl<K: Eq + Hash<S>, V: PartialEq, S, H: Hasher<S>> PartialEq for HashMap<K, V, H> {
- fn eq(&self, other: &HashMap<K, V, H>) -> bool {
- if self.len() != other.len() { return false; }
-
- self.iter()
- .all(|(key, value)| {
- match other.find(key) {
- None => false,
- Some(v) => *value == *v
- }
- })
- }
-}
-
-impl<K: Eq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {}
-
-impl<K: Eq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- try!(write!(f, "{{"));
-
- for (i, (k, v)) in self.iter().enumerate() {
- if i != 0 { try!(write!(f, ", ")); }
- try!(write!(f, "{}: {}", *k, *v));
- }
-
- write!(f, "}}")
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
- fn default() -> HashMap<K, V, H> {
- HashMap::with_hasher(Default::default())
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Index<K, V> for HashMap<K, V, H> {
- #[inline]
- fn index<'a>(&'a self, index: &K) -> &'a V {
- self.get(index)
- }
-}
-
-// FIXME(#12825) Indexing will always try IndexMut first and that causes issues.
-/*impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> ops::IndexMut<K, V> for HashMap<K, V, H> {
- #[inline]
- fn index_mut<'a>(&'a mut self, index: &K) -> &'a mut V {
- self.get_mut(index)
- }
-}*/
-
-/// HashMap iterator
-pub type Entries<'a, K, V> = table::Entries<'a, K, V>;
-
-/// HashMap mutable values iterator
-pub type MutEntries<'a, K, V> = table::MutEntries<'a, K, V>;
-
-/// HashMap move iterator
-pub type MoveEntries<K, V> =
- iter::Map<'static, (table::SafeHash, K, V), (K, V), table::MoveEntries<K, V>>;
-
-/// HashMap keys iterator
-pub type Keys<'a, K, V> =
- iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
-
-/// HashMap values iterator
-pub type Values<'a, K, V> =
- iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
- fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
- let (lower, _) = iter.size_hint();
- let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
- map.extend(iter);
- map
- }
-}
-
-impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
- fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
- for (k, v) in iter {
- self.insert(k, v);
- }
- }
-}
-
-/// HashSet iterator
-pub type SetItems<'a, K> =
- iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
-
-/// HashSet move iterator
-pub type SetMoveItems<K> =
- iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
-
-/// An implementation of a hash set using the underlying representation of a
-/// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
-/// requires that the elements implement the `Eq` and `Hash` traits.
-///
-/// # Example
-///
-/// ```
-/// use std::collections::HashSet;
-///
-/// // Type inference lets us omit an explicit type signature (which
-/// // would be `HashSet<&str>` in this example).
-/// let mut books = HashSet::new();
-///
-/// // Add some books.
-/// books.insert("A Dance With Dragons");
-/// books.insert("To Kill a Mockingbird");
-/// books.insert("The Odyssey");
-/// books.insert("The Great Gatsby");
-///
-/// // Check for a specific one.
-/// if !books.contains(&("The Winds of Winter")) {
-/// println!("We have {} books, but The Winds of Winter ain't one.",
-/// books.len());
-/// }
-///
-/// // Remove a book.
-/// books.remove(&"The Odyssey");
-///
-/// // Iterate over everything.
-/// for book in books.iter() {
-/// println!("{}", *book);
-/// }
-/// ```
-///
-/// The easiest way to use `HashSet` with a custom type is to derive
-/// `Eq` and `Hash`. We must also derive `PartialEq`, this will in the
-/// future be implied by `Eq`.
-///
-/// ```rust
-/// use std::collections::HashSet;
-///
-/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
-/// }
-///
-/// let mut vikings = HashSet::new();
-///
-/// vikings.insert(Viking { name: "Einar", power: 9u });
-/// vikings.insert(Viking { name: "Einar", power: 9u });
-/// vikings.insert(Viking { name: "Olaf", power: 4u });
-/// vikings.insert(Viking { name: "Harald", power: 8u });
-///
-/// // Use derived implementation to print the vikings.
-/// for x in vikings.iter() {
-/// println!("{}", x);
-/// }
-/// ```
-#[deriving(Clone)]
-pub struct HashSet<T, H = RandomSipHasher> {
- map: HashMap<T, (), H>
-}
-
-impl<T: Hash + Eq> HashSet<T, RandomSipHasher> {
- /// Create an empty HashSet.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::new();
- /// ```
- #[inline]
- pub fn new() -> HashSet<T, RandomSipHasher> {
- HashSet::with_capacity(INITIAL_CAPACITY)
- }
-
- /// Create an empty HashSet with space for at least `n` elements in
- /// the hash table.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::with_capacity(10);
- /// ```
- #[inline]
- pub fn with_capacity(capacity: uint) -> HashSet<T, RandomSipHasher> {
- HashSet { map: HashMap::with_capacity(capacity) }
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
- /// Creates a new empty hash set which will use the given hasher to hash
- /// keys.
- ///
- /// The hash set is also created with the default initial capacity.
- ///
- /// # Example
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut set = HashSet::with_hasher(h);
- /// set.insert(2u);
- /// ```
- #[inline]
- pub fn with_hasher(hasher: H) -> HashSet<T, H> {
- HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
- }
-
- /// Create an empty HashSet with space for at least `capacity`
- /// elements in the hash table, using `hasher` to hash the keys.
- ///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
- ///
- /// # Example
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::sip::SipHasher;
- ///
- /// let h = SipHasher::new();
- /// let mut set = HashSet::with_capacity_and_hasher(10u, h);
- /// set.insert(1i);
- /// ```
- #[inline]
- pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
- HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
- }
-
- /// Reserve space for at least `n` elements in the hash table.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set: HashSet<int> = HashSet::new();
- /// set.reserve(10);
- /// ```
- pub fn reserve(&mut self, n: uint) {
- self.map.reserve(n)
- }
-
- /// Returns true if the hash set contains a value equivalent to the
- /// given query value.
- ///
- /// # Example
- ///
- /// This is a slightly silly example where we define the number's
- /// parity as the equivalence class. It is important that the
- /// values hash the same, which is why we implement `Hash`.
- ///
- /// ```rust
- /// use std::collections::HashSet;
- /// use std::hash::Hash;
- /// use std::hash::sip::SipState;
- ///
- /// #[deriving(Eq, PartialEq)]
- /// struct EvenOrOdd {
- /// num: uint
- /// };
- ///
- /// impl Hash for EvenOrOdd {
- /// fn hash(&self, state: &mut SipState) {
- /// let parity = self.num % 2;
- /// parity.hash(state);
- /// }
- /// }
- ///
- /// impl Equiv<EvenOrOdd> for EvenOrOdd {
- /// fn equiv(&self, other: &EvenOrOdd) -> bool {
- /// self.num % 2 == other.num % 2
- /// }
- /// }
- ///
- /// let mut set = HashSet::new();
- /// set.insert(EvenOrOdd { num: 3u });
- ///
- /// assert!(set.contains_equiv(&EvenOrOdd { num: 3u }));
- /// assert!(set.contains_equiv(&EvenOrOdd { num: 5u }));
- /// assert!(!set.contains_equiv(&EvenOrOdd { num: 4u }));
- /// assert!(!set.contains_equiv(&EvenOrOdd { num: 2u }));
- ///
- /// ```
- pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
- self.map.contains_key_equiv(value)
- }
-
- /// An iterator visiting all elements in arbitrary order.
- /// Iterator element type is &'a T.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set = HashSet::new();
- /// set.insert("a");
- /// set.insert("b");
- ///
- /// // Will print in an arbitrary order.
- /// for x in set.iter() {
- /// println!("{}", x);
- /// }
- /// ```
- pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
- self.map.keys()
- }
-
- /// Creates a consuming iterator, that is, one that moves each value out
- /// of the set in arbitrary order. The set cannot be used after calling
- /// this.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let mut set = HashSet::new();
- /// set.insert("a".to_string());
- /// set.insert("b".to_string());
- ///
- /// // Not possible to collect to a Vec<String> with a regular `.iter()`.
- /// let v: Vec<String> = set.move_iter().collect();
- ///
- /// // Will print in an arbitrary order.
- /// for x in v.iter() {
- /// println!("{}", x);
- /// }
- /// ```
- pub fn move_iter(self) -> SetMoveItems<T> {
- self.map.move_iter().map(|(k, _)| k)
- }
-
- /// Visit the values representing the difference.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Can be seen as `a - b`.
- /// for x in a.difference(&b) {
- /// println!("{}", x); // Print 1
- /// }
- ///
- /// let diff: HashSet<int> = a.difference(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [1i].iter().map(|&x| x).collect());
- ///
- /// // Note that difference is not symmetric,
- /// // and `b - a` means something else:
- /// let diff: HashSet<int> = b.difference(&a).map(|&x| x).collect();
- /// assert_eq!(diff, [4i].iter().map(|&x| x).collect());
- /// ```
- pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
- Repeat::new(other).zip(self.iter())
- .filter_map(|(other, elt)| {
- if !other.contains(elt) { Some(elt) } else { None }
- })
- }
-
- /// Visit the values representing the symmetric difference.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 1, 4 in arbitrary order.
- /// for x in a.symmetric_difference(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff1: HashSet<int> = a.symmetric_difference(&b).map(|&x| x).collect();
- /// let diff2: HashSet<int> = b.symmetric_difference(&a).map(|&x| x).collect();
- ///
- /// assert_eq!(diff1, diff2);
- /// assert_eq!(diff1, [1i, 4].iter().map(|&x| x).collect());
- /// ```
- pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
- -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
- self.difference(other).chain(other.difference(self))
- }
-
- /// Visit the values representing the intersection.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 2, 3 in arbitrary order.
- /// for x in a.intersection(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff: HashSet<int> = a.intersection(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [2i, 3].iter().map(|&x| x).collect());
- /// ```
- pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
- -> SetAlgebraItems<'a, T, H> {
- Repeat::new(other).zip(self.iter())
- .filter_map(|(other, elt)| {
- if other.contains(elt) { Some(elt) } else { None }
- })
- }
-
- /// Visit the values representing the union.
- ///
- /// # Example
- ///
- /// ```
- /// use std::collections::HashSet;
- /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
- /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
- ///
- /// // Print 1, 2, 3, 4 in arbitrary order.
- /// for x in a.union(&b) {
- /// println!("{}", x);
- /// }
- ///
- /// let diff: HashSet<int> = a.union(&b).map(|&x| x).collect();
- /// assert_eq!(diff, [1i, 2, 3, 4].iter().map(|&x| x).collect());
- /// ```
- pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
- -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
- self.iter().chain(other.difference(self))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> PartialEq for HashSet<T, H> {
- fn eq(&self, other: &HashSet<T, H>) -> bool {
- if self.len() != other.len() { return false; }
-
- self.iter().all(|key| other.contains(key))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Collection for HashSet<T, H> {
- fn len(&self) -> uint { self.map.len() }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
- fn clear(&mut self) { self.map.clear() }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
- fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
-
- fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
- self.iter().all(|v| !other.contains(v))
- }
-
- fn is_subset(&self, other: &HashSet<T, H>) -> bool {
- self.iter().all(|v| other.contains(v))
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
- fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
-
- fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
-}
-
-
-impl<T: Eq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- try!(write!(f, "{{"));
-
- for (i, x) in self.iter().enumerate() {
- if i != 0 { try!(write!(f, ", ")); }
- try!(write!(f, "{}", *x));
- }
-
- write!(f, "}}")
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
- fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
- let (lower, _) = iter.size_hint();
- let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
- set.extend(iter);
- set
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
- fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
- for k in iter {
- self.insert(k);
- }
- }
-}
-
-impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Default for HashSet<T, H> {
- fn default() -> HashSet<T, H> {
- HashSet::with_hasher(Default::default())
- }
-}
-
-// `Repeat` is used to feed the filter closure an explicit capture
-// of a reference to the other set
-/// Set operations iterator
-pub type SetAlgebraItems<'a, T, H> =
- FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
- Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
-
-#[cfg(test)]
-mod test_map {
- use prelude::*;
-
- use super::HashMap;
- use cmp::Equiv;
- use hash;
- use iter::{Iterator,range_inclusive,range_step_inclusive};
- use cell::RefCell;
-
- struct KindaIntLike(int);
-
- impl Equiv<int> for KindaIntLike {
- fn equiv(&self, other: &int) -> bool {
- let KindaIntLike(this) = *self;
- this == *other
- }
- }
- impl<S: hash::Writer> hash::Hash<S> for KindaIntLike {
- fn hash(&self, state: &mut S) {
- let KindaIntLike(this) = *self;
- this.hash(state)
- }
- }
-
- #[test]
- fn test_create_capacity_zero() {
- let mut m = HashMap::with_capacity(0);
-
- assert!(m.insert(1i, 1i));
-
- assert!(m.contains_key(&1));
- assert!(!m.contains_key(&0));
- }
-
- #[test]
- fn test_insert() {
- let mut m = HashMap::new();
- assert_eq!(m.len(), 0);
- assert!(m.insert(1i, 2i));
- assert_eq!(m.len(), 1);
- assert!(m.insert(2i, 4i));
- assert_eq!(m.len(), 2);
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&2).unwrap(), 4);
- }
-
- local_data_key!(drop_vector: RefCell<Vec<int>>)
-
- #[deriving(Hash, PartialEq, Eq)]
- struct Dropable {
- k: uint
- }
-
-
- impl Dropable {
- fn new(k: uint) -> Dropable {
- let v = drop_vector.get().unwrap();
- v.borrow_mut().as_mut_slice()[k] += 1;
-
- Dropable { k: k }
- }
- }
-
- impl Drop for Dropable {
- fn drop(&mut self) {
- let v = drop_vector.get().unwrap();
- v.borrow_mut().as_mut_slice()[self.k] -= 1;
- }
- }
-
- #[test]
- fn test_drops() {
- drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
-
- {
- let mut m = HashMap::new();
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- }
- drop(v);
-
- for i in range(0u, 100) {
- let d1 = Dropable::new(i);
- let d2 = Dropable::new(i+100);
- m.insert(d1, d2);
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 1);
- }
- drop(v);
-
- for i in range(0u, 50) {
- let k = Dropable::new(i);
- let v = m.pop(&k);
-
- assert!(v.is_some());
-
- let v = drop_vector.get().unwrap();
- assert_eq!(v.borrow().as_slice()[i], 1);
- assert_eq!(v.borrow().as_slice()[i+100], 1);
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 50) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- assert_eq!(v.borrow().as_slice()[i+100], 0);
- }
-
- for i in range(50u, 100) {
- assert_eq!(v.borrow().as_slice()[i], 1);
- assert_eq!(v.borrow().as_slice()[i+100], 1);
- }
- }
-
- let v = drop_vector.get().unwrap();
- for i in range(0u, 200) {
- assert_eq!(v.borrow().as_slice()[i], 0);
- }
- }
-
- #[test]
- fn test_empty_pop() {
- let mut m: HashMap<int, bool> = HashMap::new();
- assert_eq!(m.pop(&0), None);
- }
-
- #[test]
- fn test_lots_of_insertions() {
- let mut m = HashMap::new();
-
- // Try this a few times to make sure we never screw up the hashmap's
- // internal state.
- for _ in range(0i, 10) {
- assert!(m.is_empty());
-
- for i in range_inclusive(1i, 1000) {
- assert!(m.insert(i, i));
-
- for j in range_inclusive(1, i) {
- let r = m.find(&j);
- assert_eq!(r, Some(&j));
- }
-
- for j in range_inclusive(i+1, 1000) {
- let r = m.find(&j);
- assert_eq!(r, None);
- }
- }
-
- for i in range_inclusive(1001i, 2000) {
- assert!(!m.contains_key(&i));
- }
-
- // remove forwards
- for i in range_inclusive(1i, 1000) {
- assert!(m.remove(&i));
-
- for j in range_inclusive(1, i) {
- assert!(!m.contains_key(&j));
- }
-
- for j in range_inclusive(i+1, 1000) {
- assert!(m.contains_key(&j));
- }
- }
-
- for i in range_inclusive(1i, 1000) {
- assert!(!m.contains_key(&i));
- }
-
- for i in range_inclusive(1i, 1000) {
- assert!(m.insert(i, i));
- }
-
- // remove backwards
- for i in range_step_inclusive(1000i, 1, -1) {
- assert!(m.remove(&i));
-
- for j in range_inclusive(i, 1000) {
- assert!(!m.contains_key(&j));
- }
-
- for j in range_inclusive(1, i-1) {
- assert!(m.contains_key(&j));
- }
- }
- }
- }
-
- #[test]
- fn test_find_mut() {
- let mut m = HashMap::new();
- assert!(m.insert(1i, 12i));
- assert!(m.insert(2i, 8i));
- assert!(m.insert(5i, 14i));
- let new = 100;
- match m.find_mut(&5) {
- None => fail!(), Some(x) => *x = new
- }
- assert_eq!(m.find(&5), Some(&new));
- }
-
- #[test]
- fn test_insert_overwrite() {
- let mut m = HashMap::new();
- assert!(m.insert(1i, 2i));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert!(!m.insert(1i, 3i));
- assert_eq!(*m.find(&1).unwrap(), 3);
- }
-
- #[test]
- fn test_insert_conflicts() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert!(m.insert(5i, 3i));
- assert!(m.insert(9i, 4i));
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert_eq!(*m.find(&1).unwrap(), 2);
- }
-
- #[test]
- fn test_conflict_remove() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert!(m.insert(5, 3));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert!(m.insert(9, 4));
- assert_eq!(*m.find(&1).unwrap(), 2);
- assert_eq!(*m.find(&5).unwrap(), 3);
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert!(m.remove(&1));
- assert_eq!(*m.find(&9).unwrap(), 4);
- assert_eq!(*m.find(&5).unwrap(), 3);
- }
-
- #[test]
- fn test_is_empty() {
- let mut m = HashMap::with_capacity(4);
- assert!(m.insert(1i, 2i));
- assert!(!m.is_empty());
- assert!(m.remove(&1));
- assert!(m.is_empty());
- }
-
- #[test]
- fn test_pop() {
- let mut m = HashMap::new();
- m.insert(1i, 2i);
- assert_eq!(m.pop(&1), Some(2));
- assert_eq!(m.pop(&1), None);
- }
-
- #[test]
- #[allow(experimental)]
- fn test_pop_equiv() {
- let mut m = HashMap::new();
- m.insert(1i, 2i);
- assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
- assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
- }
-
- #[test]
- fn test_swap() {
- let mut m = HashMap::new();
- assert_eq!(m.swap(1i, 2i), None);
- assert_eq!(m.swap(1i, 3i), Some(2));
- assert_eq!(m.swap(1i, 4i), Some(3));
- }
-
- #[test]
- fn test_move_iter() {
- let hm = {
- let mut hm = HashMap::new();
-
- hm.insert('a', 1i);
- hm.insert('b', 2i);
-
- hm
- };
-
- let v = hm.move_iter().collect::<Vec<(char, int)>>();
- assert!([('a', 1), ('b', 2)] == v.as_slice() || [('b', 2), ('a', 1)] == v.as_slice());
- }
-
- #[test]
- fn test_iterate() {
- let mut m = HashMap::with_capacity(4);
- for i in range(0u, 32) {
- assert!(m.insert(i, i*2));
- }
- assert_eq!(m.len(), 32);
-
- let mut observed: u32 = 0;
-
- for (k, v) in m.iter() {
- assert_eq!(*v, *k * 2);
- observed |= 1 << *k;
- }
- assert_eq!(observed, 0xFFFF_FFFF);
- }
-
- #[test]
- fn test_keys() {
- let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
- let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
- assert_eq!(keys.len(), 3);
- assert!(keys.contains(&1));
- assert!(keys.contains(&2));
- assert!(keys.contains(&3));
- }
-
- #[test]
- fn test_values() {
- let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
- let map = vec.move_iter().collect::<HashMap<int, char>>();
- let values = map.values().map(|&v| v).collect::<Vec<char>>();
- assert_eq!(values.len(), 3);
- assert!(values.contains(&'a'));
- assert!(values.contains(&'b'));
- assert!(values.contains(&'c'));
- }
-
- #[test]
- fn test_find() {
- let mut m = HashMap::new();
- assert!(m.find(&1i).is_none());
- m.insert(1i, 2i);
- match m.find(&1) {
- None => fail!(),
- Some(v) => assert_eq!(*v, 2)
- }
- }
-
- #[test]
- fn test_eq() {
- let mut m1 = HashMap::new();
- m1.insert(1i, 2i);
- m1.insert(2i, 3i);
- m1.insert(3i, 4i);
-
- let mut m2 = HashMap::new();
- m2.insert(1i, 2i);
- m2.insert(2i, 3i);
-
- assert!(m1 != m2);
-
- m2.insert(3i, 4i);
-
- assert_eq!(m1, m2);
- }
-
- #[test]
- fn test_show() {
- let mut map: HashMap<int, int> = HashMap::new();
- let empty: HashMap<int, int> = HashMap::new();
-
- map.insert(1i, 2i);
- map.insert(3i, 4i);
-
- let map_str = format!("{}", map);
-
- assert!(map_str == "{1: 2, 3: 4}".to_string() || map_str == "{3: 4, 1: 2}".to_string());
- assert_eq!(format!("{}", empty), "{}".to_string());
- }
-
- #[test]
- fn test_expand() {
- let mut m = HashMap::new();
-
- assert_eq!(m.len(), 0);
- assert!(m.is_empty());
-
- let mut i = 0u;
- let old_cap = m.table.capacity();
- while old_cap == m.table.capacity() {
- m.insert(i, i);
- i += 1;
- }
-
- assert_eq!(m.len(), i);
- assert!(!m.is_empty());
- }
-
- #[test]
- fn test_resize_policy() {
- let mut m = HashMap::new();
-
- assert_eq!(m.len(), 0);
- assert!(m.is_empty());
-
- let initial_cap = m.table.capacity();
- m.reserve(initial_cap * 2);
- let cap = m.table.capacity();
-
- assert_eq!(cap, initial_cap * 2);
-
- let mut i = 0u;
- for _ in range(0, cap * 3 / 4) {
- m.insert(i, i);
- i += 1;
- }
-
- assert_eq!(m.len(), i);
- assert_eq!(m.table.capacity(), cap);
-
- for _ in range(0, cap / 4) {
- m.insert(i, i);
- i += 1;
- }
-
- let new_cap = m.table.capacity();
- assert_eq!(new_cap, cap * 2);
-
- for _ in range(0, cap / 2) {
- i -= 1;
- m.remove(&i);
- assert_eq!(m.table.capacity(), new_cap);
- }
-
- for _ in range(0, cap / 2 - 1) {
- i -= 1;
- m.remove(&i);
- }
-
- assert_eq!(m.table.capacity(), cap);
- assert_eq!(m.len(), i);
- assert!(!m.is_empty());
- }
-
- #[test]
- fn test_find_equiv() {
- let mut m = HashMap::new();
-
- let (foo, bar, baz) = (1i,2i,3i);
- m.insert("foo".to_string(), foo);
- m.insert("bar".to_string(), bar);
- m.insert("baz".to_string(), baz);
-
-
- assert_eq!(m.find_equiv(&("foo")), Some(&foo));
- assert_eq!(m.find_equiv(&("bar")), Some(&bar));
- assert_eq!(m.find_equiv(&("baz")), Some(&baz));
-
- assert_eq!(m.find_equiv(&("qux")), None);
- }
-
- #[test]
- fn test_from_iter() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- for &(k, v) in xs.iter() {
- assert_eq!(map.find(&k), Some(&v));
- }
- }
-
- #[test]
- fn test_size_hint() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- let mut iter = map.iter();
-
- for _ in iter.by_ref().take(3) {}
-
- assert_eq!(iter.size_hint(), (3, Some(3)));
- }
-
- #[test]
- fn test_mut_size_hint() {
- let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
-
- let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
-
- let mut iter = map.mut_iter();
-
- for _ in iter.by_ref().take(3) {}
-
- assert_eq!(iter.size_hint(), (3, Some(3)));
- }
-
- #[test]
- fn test_index() {
- let mut map: HashMap<int, int> = HashMap::new();
-
- map.insert(1, 2);
- map.insert(2, 1);
- map.insert(3, 4);
-
- assert_eq!(map[2], 1);
- }
-
- #[test]
- #[should_fail]
- fn test_index_nonexistent() {
- let mut map: HashMap<int, int> = HashMap::new();
-
- map.insert(1, 2);
- map.insert(2, 1);
- map.insert(3, 4);
-
- map[4];
- }
-}
-
-#[cfg(test)]
-mod test_set {
- use prelude::*;
-
- use super::HashSet;
- use slice::ImmutablePartialEqSlice;
- use collections::Collection;
-
- #[test]
- fn test_disjoint() {
- let mut xs = HashSet::new();
- let mut ys = HashSet::new();
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(xs.insert(5i));
- assert!(ys.insert(11i));
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(xs.insert(7));
- assert!(xs.insert(19));
- assert!(xs.insert(4));
- assert!(ys.insert(2));
- assert!(ys.insert(-11));
- assert!(xs.is_disjoint(&ys));
- assert!(ys.is_disjoint(&xs));
- assert!(ys.insert(7));
- assert!(!xs.is_disjoint(&ys));
- assert!(!ys.is_disjoint(&xs));
- }
-
- #[test]
- fn test_subset_and_superset() {
- let mut a = HashSet::new();
- assert!(a.insert(0i));
- assert!(a.insert(5));
- assert!(a.insert(11));
- assert!(a.insert(7));
-
- let mut b = HashSet::new();
- assert!(b.insert(0i));
- assert!(b.insert(7));
- assert!(b.insert(19));
- assert!(b.insert(250));
- assert!(b.insert(11));
- assert!(b.insert(200));
-
- assert!(!a.is_subset(&b));
- assert!(!a.is_superset(&b));
- assert!(!b.is_subset(&a));
- assert!(!b.is_superset(&a));
-
- assert!(b.insert(5));
-
- assert!(a.is_subset(&b));
- assert!(!a.is_superset(&b));
- assert!(!b.is_subset(&a));
- assert!(b.is_superset(&a));
- }
-
- #[test]
- fn test_iterate() {
- let mut a = HashSet::new();
- for i in range(0u, 32) {
- assert!(a.insert(i));
- }
- let mut observed: u32 = 0;
- for k in a.iter() {
- observed |= 1 << *k;
- }
- assert_eq!(observed, 0xFFFF_FFFF);
- }
-
- #[test]
- fn test_intersection() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(11i));
- assert!(a.insert(1));
- assert!(a.insert(3));
- assert!(a.insert(77));
- assert!(a.insert(103));
- assert!(a.insert(5));
- assert!(a.insert(-5));
-
- assert!(b.insert(2i));
- assert!(b.insert(11));
- assert!(b.insert(77));
- assert!(b.insert(-9));
- assert!(b.insert(-42));
- assert!(b.insert(5));
- assert!(b.insert(3));
-
- let mut i = 0;
- let expected = [3, 5, 11, 77];
- for x in a.intersection(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_difference() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
-
- assert!(b.insert(3i));
- assert!(b.insert(9));
-
- let mut i = 0;
- let expected = [1, 5, 11];
- for x in a.difference(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_symmetric_difference() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
-
- assert!(b.insert(-2i));
- assert!(b.insert(3));
- assert!(b.insert(9));
- assert!(b.insert(14));
- assert!(b.insert(22));
-
- let mut i = 0;
- let expected = [-2, 1, 5, 11, 14, 22];
- for x in a.symmetric_difference(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_union() {
- let mut a = HashSet::new();
- let mut b = HashSet::new();
-
- assert!(a.insert(1i));
- assert!(a.insert(3));
- assert!(a.insert(5));
- assert!(a.insert(9));
- assert!(a.insert(11));
- assert!(a.insert(16));
- assert!(a.insert(19));
- assert!(a.insert(24));
-
- assert!(b.insert(-2i));
- assert!(b.insert(1));
- assert!(b.insert(5));
- assert!(b.insert(9));
- assert!(b.insert(13));
- assert!(b.insert(19));
-
- let mut i = 0;
- let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
- for x in a.union(&b) {
- assert!(expected.contains(x));
- i += 1
- }
- assert_eq!(i, expected.len());
- }
-
- #[test]
- fn test_from_iter() {
- let xs = [1i, 2, 3, 4, 5, 6, 7, 8, 9];
-
- let set: HashSet<int> = xs.iter().map(|&x| x).collect();
-
- for x in xs.iter() {
- assert!(set.contains(x));
- }
- }
-
- #[test]
- fn test_move_iter() {
- let hs = {
- let mut hs = HashSet::new();
-
- hs.insert('a');
- hs.insert('b');
-
- hs
- };
-
- let v = hs.move_iter().collect::<Vec<char>>();
- assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
- }
-
- #[test]
- fn test_eq() {
- // These constants once happened to expose a bug in insert().
- // I'm keeping them around to prevent a regression.
- let mut s1 = HashSet::new();
-
- s1.insert(1i);
- s1.insert(2);
- s1.insert(3);
-
- let mut s2 = HashSet::new();
-
- s2.insert(1i);
- s2.insert(2);
-
- assert!(s1 != s2);
-
- s2.insert(3);
-
- assert_eq!(s1, s2);
- }
-
- #[test]
- fn test_show() {
- let mut set: HashSet<int> = HashSet::new();
- let empty: HashSet<int> = HashSet::new();
-
- set.insert(1i);
- set.insert(2);
-
- let set_str = format!("{}", set);
-
- assert!(set_str == "{1, 2}".to_string() || set_str == "{2, 1}".to_string());
- assert_eq!(format!("{}", empty), "{}".to_string());
- }
-}
-
-#[cfg(test)]
-mod bench {
- extern crate test;
- use prelude::*;
-
- use self::test::Bencher;
- use iter::{range_inclusive};
-
- #[bench]
- fn new_drop(b : &mut Bencher) {
- use super::HashMap;
-
- b.iter(|| {
- let m : HashMap<int, int> = HashMap::new();
- assert_eq!(m.len(), 0);
- })
- }
-
- #[bench]
- fn new_insert_drop(b : &mut Bencher) {
- use super::HashMap;
-
- b.iter(|| {
- let mut m = HashMap::new();
- m.insert(0i, 0i);
- assert_eq!(m.len(), 1);
- })
- }
-
- #[bench]
- fn insert(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1001;
-
- b.iter(|| {
- m.insert(k, k);
- k += 1;
- });
- }
-
- #[bench]
- fn find_existing(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- b.iter(|| {
- for i in range_inclusive(1i, 1000) {
- m.contains_key(&i);
- }
- });
- }
-
- #[bench]
- fn find_nonexisting(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- b.iter(|| {
- for i in range_inclusive(1001i, 2000) {
- m.contains_key(&i);
- }
- });
- }
-
- #[bench]
- fn hashmap_as_queue(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1i;
-
- b.iter(|| {
- m.pop(&k);
- m.insert(k + 1000, k + 1000);
- k += 1;
- });
- }
-
- #[bench]
- fn find_pop_insert(b: &mut Bencher) {
- use super::HashMap;
-
- let mut m = HashMap::new();
-
- for i in range_inclusive(1i, 1000) {
- m.insert(i, i);
- }
-
- let mut k = 1i;
-
- b.iter(|| {
- m.find(&(k + 400));
- m.find(&(k + 2000));
- m.pop(&k);
- m.insert(k + 1000, k + 1000);
- k += 1;
- })
- }
-}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![cfg(test)]
+
+extern crate test;
+use prelude::*;
+
+use self::test::Bencher;
+use iter::{range_inclusive};
+
+#[bench]
+fn new_drop(b : &mut Bencher) {
+ use super::HashMap;
+
+ b.iter(|| {
+ let m : HashMap<int, int> = HashMap::new();
+ assert_eq!(m.len(), 0);
+ })
+}
+
+#[bench]
+fn new_insert_drop(b : &mut Bencher) {
+ use super::HashMap;
+
+ b.iter(|| {
+ let mut m = HashMap::new();
+ m.insert(0i, 0i);
+ assert_eq!(m.len(), 1);
+ })
+}
+
+#[bench]
+fn grow_by_insertion(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1001;
+
+ b.iter(|| {
+ m.insert(k, k);
+ k += 1;
+ });
+}
+
+#[bench]
+fn find_existing(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in range_inclusive(1i, 1000) {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn find_nonexisting(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ b.iter(|| {
+ for i in range_inclusive(1001i, 2000) {
+ m.contains_key(&i);
+ }
+ });
+}
+
+#[bench]
+fn hashmap_as_queue(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1i;
+
+ b.iter(|| {
+ m.pop(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ });
+}
+
+#[bench]
+fn find_pop_insert(b: &mut Bencher) {
+ use super::HashMap;
+
+ let mut m = HashMap::new();
+
+ for i in range_inclusive(1i, 1000) {
+ m.insert(i, i);
+ }
+
+ let mut k = 1i;
+
+ b.iter(|| {
+ m.find(&(k + 400));
+ m.find(&(k + 2000));
+ m.pop(&k);
+ m.insert(k + 1000, k + 1000);
+ k += 1;
+ })
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp::{max, Eq, Equiv, PartialEq};
+use collections::{Collection, Mutable, MutableSet, Map, MutableMap};
+use default::Default;
+use fmt::Show;
+use fmt;
+use hash::{Hash, Hasher, RandomSipHasher};
+use iter::{Iterator, FromIterator, Extendable};
+use iter;
+use mem::replace;
+use num;
+use ops::{Deref, DerefMut};
+use option::{Some, None, Option};
+use result::{Ok, Err};
+use ops::Index;
+
+use super::table;
+use super::table::{
+ Bucket,
+ Empty,
+ Full,
+ FullBucket,
+ FullBucketImm,
+ FullBucketMut,
+ RawTable,
+ SafeHash
+};
+
+static INITIAL_LOG2_CAP: uint = 5;
+pub static INITIAL_CAPACITY: uint = 1 << INITIAL_LOG2_CAP; // 2^5
+
+/// The default behavior of HashMap implements a load factor of 90.9%.
+/// This behavior is characterized by the following conditions:
+///
+/// - if size > 0.909 * capacity: grow
+/// - if size < 0.25 * capacity: shrink (if this won't bring capacity lower
+/// than the minimum)
+#[deriving(Clone)]
+struct DefaultResizePolicy {
+ /// Doubled minimal capacity. The capacity must never drop below
+ /// the minimum capacity. (The check happens before the capacity
+ /// is potentially halved.)
+ minimum_capacity2: uint
+}
+
+impl DefaultResizePolicy {
+ fn new(new_capacity: uint) -> DefaultResizePolicy {
+ DefaultResizePolicy {
+ minimum_capacity2: new_capacity << 1
+ }
+ }
+
+ #[inline]
+ fn capacity_range(&self, new_size: uint) -> (uint, uint) {
+ // Here, we are rephrasing the logic by specifying the ranges:
+ //
+ // - if `size * 1.1 < cap < size * 4`: don't resize
+ // - if `cap < minimum_capacity * 2`: don't shrink
+ // - otherwise, resize accordingly
+ ((new_size * 11) / 10, max(new_size << 2, self.minimum_capacity2))
+ }
+
+ #[inline]
+ fn reserve(&mut self, new_capacity: uint) {
+ self.minimum_capacity2 = new_capacity << 1;
+ }
+}
+
+// The main performance trick in this hashmap is called Robin Hood Hashing.
+// It gains its excellent performance from one essential operation:
+//
+// If an insertion collides with an existing element, and that element's
+// "probe distance" (how far away the element is from its ideal location)
+// is higher than how far we've already probed, swap the elements.
+//
+// This massively lowers variance in probe distance, and allows us to get very
+// high load factors with good performance. The 90% load factor I use is rather
+// conservative.
+//
+// > Why a load factor of approximately 90%?
+//
+// In general, all the distances to initial buckets will converge on the mean.
+// At a load factor of α, the odds of finding the target bucket after k
+// probes is approximately 1-α^k. If we set this equal to 50% (since we converge
+// on the mean) and set k=8 (64-byte cache line / 8-byte hash), α=0.92. I round
+// this down to make the math easier on the CPU and avoid its FPU.
+// Since on average we start the probing in the middle of a cache line, this
+// strategy pulls in two cache lines of hashes on every lookup. I think that's
+// pretty good, but if you want to trade off some space, it could go down to one
+// cache line on average with an α of 0.84.
+//
+// > Wait, what? Where did you get 1-α^k from?
+//
+// On the first probe, your odds of a collision with an existing element is α.
+// The odds of doing this twice in a row is approximately α^2. For three times,
+// α^3, etc. Therefore, the odds of colliding k times is α^k. The odds of NOT
+// colliding after k tries is 1-α^k.
+//
+// The paper from 1986 cited below mentions an implementation which keeps track
+// of the distance-to-initial-bucket histogram. This approach is not suitable
+// for modern architectures because it requires maintaining an internal data
+// structure. This allows very good first guesses, but we are most concerned
+// with guessing entire cache lines, not individual indexes. Furthermore, array
+// accesses are no longer linear and in one direction, as we have now. There
+// is also memory and cache pressure that this would entail that would be very
+// difficult to properly see in a microbenchmark.
+//
+// Future Improvements (FIXME!)
+// ============================
+//
+// Allow the load factor to be changed dynamically and/or at initialization.
+//
+// Also, would it be possible for us to reuse storage when growing the
+// underlying table? This is exactly the use case for 'realloc', and may
+// be worth exploring.
+//
+// Future Optimizations (FIXME!)
+// =============================
+//
+// Another possible design choice that I made without any real reason is
+// parameterizing the raw table over keys and values. Technically, all we need
+// is the size and alignment of keys and values, and the code should be just as
+// efficient (well, we might need one for power-of-two size and one for not...).
+// This has the potential to reduce code bloat in rust executables, without
+// really losing anything except 4 words (key size, key alignment, val size,
+// val alignment) which can be passed in to every call of a `RawTable` function.
+// This would definitely be an avenue worth exploring if people start complaining
+// about the size of rust executables.
+//
+// Annotate exceedingly likely branches in `table::make_hash`
+// and `search_hashed_generic` to reduce instruction cache pressure
+// and mispredictions once it becomes possible (blocked on issue #11092).
+//
+// Shrinking the table could simply reallocate in place after moving buckets
+// to the first half.
+//
+// The growth algorithm (fragment of the Proof of Correctness)
+// --------------------
+//
+// The growth algorithm is basically a fast path of the naive reinsertion-
+// during-resize algorithm. Other paths should never be taken.
+//
+// Consider growing a robin hood hashtable of capacity n. Normally, we do this
+// by allocating a new table of capacity `2n`, and then individually reinsert
+// each element in the old table into the new one. This guarantees that the
+// new table is a valid robin hood hashtable with all the desired statistical
+// properties. Remark that the order we reinsert the elements in should not
+// matter. For simplicity and efficiency, we will consider only linear
+// reinsertions, which consist of reinserting all elements in the old table
+// into the new one by increasing order of index. However we will not be
+// starting our reinsertions from index 0 in general. If we start from index
+// i, for the purpose of reinsertion we will consider all elements with real
+// index j < i to have virtual index n + j.
+//
+// Our hash generation scheme consists of generating a 64-bit hash and
+// truncating the most significant bits. When moving to the new table, we
+// simply introduce a new bit to the front of the hash. Therefore, if an
+// elements has ideal index i in the old table, it can have one of two ideal
+// locations in the new table. If the new bit is 0, then the new ideal index
+// is i. If the new bit is 1, then the new ideal index is n + i. Intutively,
+// we are producing two independent tables of size n, and for each element we
+// independently choose which table to insert it into with equal probability.
+// However the rather than wrapping around themselves on overflowing their
+// indexes, the first table overflows into the first, and the first into the
+// second. Visually, our new table will look something like:
+//
+// [yy_xxx_xxxx_xxx|xx_yyy_yyyy_yyy]
+//
+// Where x's are elements inserted into the first table, y's are elements
+// inserted into the second, and _'s are empty sections. We now define a few
+// key concepts that we will use later. Note that this is a very abstract
+// perspective of the table. A real resized table would be at least half
+// empty.
+//
+// Theorem: A linear robin hood reinsertion from the first ideal element
+// produces identical results to a linear naive reinsertion from the same
+// element.
+//
+// FIXME(Gankro, pczarn): review the proof and put it all in a separate doc.rs
+
+/// A hash map implementation which uses linear probing with Robin
+/// Hood bucket stealing.
+///
+/// The hashes are all keyed by the task-local random number generator
+/// on creation by default. This means that the ordering of the keys is
+/// randomized, but makes the tables more resistant to
+/// denial-of-service attacks (Hash DoS). This behaviour can be
+/// overridden with one of the constructors.
+///
+/// It is required that the keys implement the `Eq` and `Hash` traits, although
+/// this can frequently be achieved by using `#[deriving(Eq, Hash)]`.
+///
+/// Relevant papers/articles:
+///
+/// 1. Pedro Celis. ["Robin Hood Hashing"](https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf)
+/// 2. Emmanuel Goossaert. ["Robin Hood
+/// hashing"](http://codecapsule.com/2013/11/11/robin-hood-hashing/)
+/// 3. Emmanuel Goossaert. ["Robin Hood hashing: backward shift
+/// deletion"](http://codecapsule.com/2013/11/17/robin-hood-hashing-backward-shift-deletion/)
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `HashMap<&str, &str>` in this example).
+/// let mut book_reviews = HashMap::new();
+///
+/// // review some books.
+/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.");
+/// book_reviews.insert("Grimms' Fairy Tales", "Masterpiece.");
+/// book_reviews.insert("Pride and Prejudice", "Very enjoyable.");
+/// book_reviews.insert("The Adventures of Sherlock Holmes", "Eye lyked it alot.");
+///
+/// // check for a specific one.
+/// if !book_reviews.contains_key(&("Les Misérables")) {
+/// println!("We've got {} reviews, but Les Misérables ain't one.",
+/// book_reviews.len());
+/// }
+///
+/// // oops, this review has a lot of spelling mistakes, let's delete it.
+/// book_reviews.remove(&("The Adventures of Sherlock Holmes"));
+///
+/// // look up the values associated with some keys.
+/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"];
+/// for book in to_find.iter() {
+/// match book_reviews.find(book) {
+/// Some(review) => println!("{}: {}", *book, *review),
+/// None => println!("{} is unreviewed.", *book)
+/// }
+/// }
+///
+/// // iterate over everything.
+/// for (book, review) in book_reviews.iter() {
+/// println!("{}: \"{}\"", *book, *review);
+/// }
+/// ```
+///
+/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
+/// We must also derive `PartialEq`.
+///
+/// ```
+/// use std::collections::HashMap;
+///
+/// #[deriving(Hash, Eq, PartialEq, Show)]
+/// struct Viking<'a> {
+/// name: &'a str,
+/// power: uint,
+/// }
+///
+/// let mut vikings = HashMap::new();
+///
+/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
+/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
+/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
+///
+/// // Use derived implementation to print the vikings.
+/// for (land, viking) in vikings.iter() {
+/// println!("{} at {}", viking, land);
+/// }
+/// ```
+#[deriving(Clone)]
+pub struct HashMap<K, V, H = RandomSipHasher> {
+ // All hashes are keyed on these values, to prevent hash collision attacks.
+ hasher: H,
+
+ table: RawTable<K, V>,
+
+ // We keep this at the end since it might as well have tail padding.
+ resize_policy: DefaultResizePolicy,
+}
+
+/// Search for a pre-hashed key.
+fn search_hashed_generic<K, V, M: Deref<RawTable<K, V>>>(table: M,
+ hash: &SafeHash,
+ is_match: |&K| -> bool)
+ -> SearchResult<K, V, M> {
+ let size = table.size();
+ let mut probe = Bucket::new(table, hash);
+ let ib = probe.index();
+
+ while probe.index() != ib + size {
+ let full = match probe.peek() {
+ Empty(b) => return TableRef(b.into_table()), // hit an empty bucket
+ Full(b) => b
+ };
+
+ if full.distance() + ib < full.index() {
+ // We can finish the search early if we hit any bucket
+ // with a lower distance to initial bucket than we've probed.
+ return TableRef(full.into_table());
+ }
+
+ // If the hash doesn't match, it can't be this one..
+ if *hash == full.hash() {
+ let matched = {
+ let (k, _) = full.read();
+ is_match(k)
+ };
+
+ // If the key doesn't match, it can't be this one..
+ if matched {
+ return FoundExisting(full);
+ }
+ }
+
+ probe = full.next();
+ }
+
+ TableRef(probe.into_table())
+}
+
+fn search_hashed<K: Eq, V, M: Deref<RawTable<K, V>>>(table: M, hash: &SafeHash, k: &K)
+ -> SearchResult<K, V, M> {
+ search_hashed_generic(table, hash, |k_| *k == *k_)
+}
+
+fn pop_internal<K, V>(starting_bucket: FullBucketMut<K, V>) -> V {
+ let (empty, _k, retval) = starting_bucket.take();
+ let mut gap = match empty.gap_peek() {
+ Some(b) => b,
+ None => return retval
+ };
+
+ while gap.full().distance() != 0 {
+ gap = match gap.shift() {
+ Some(b) => b,
+ None => break
+ };
+ }
+
+ // Now we've done all our shifting. Return the value we grabbed earlier.
+ return retval;
+}
+
+/// Perform robin hood bucket stealing at the given `bucket`. You must
+/// also pass the position of that bucket's initial bucket so we don't have
+/// to recalculate it.
+///
+/// `hash`, `k`, and `v` are the elements to "robin hood" into the hashtable.
+fn robin_hood<'a, K: 'a, V: 'a>(mut bucket: FullBucketMut<'a, K, V>,
+ mut ib: uint,
+ mut hash: SafeHash,
+ mut k: K,
+ mut v: V)
+ -> &'a mut V {
+ let starting_index = bucket.index();
+ let size = {
+ let table = bucket.table(); // FIXME "lifetime too short".
+ table.size()
+ };
+ // There can be at most `size - dib` buckets to displace, because
+ // in the worst case, there are `size` elements and we already are
+ // `distance` buckets away from the initial one.
+ let idx_end = starting_index + size - bucket.distance();
+
+ loop {
+ let (old_hash, old_key, old_val) = bucket.replace(hash, k, v);
+ loop {
+ let probe = bucket.next();
+ assert!(probe.index() != idx_end);
+
+ let full_bucket = match probe.peek() {
+ table::Empty(bucket) => {
+ // Found a hole!
+ let b = bucket.put(old_hash, old_key, old_val);
+ // Now that it's stolen, just read the value's pointer
+ // right out of the table!
+ let (_, v) = Bucket::at_index(b.into_table(), starting_index).peek()
+ .expect_full()
+ .into_mut_refs();
+ return v;
+ },
+ table::Full(bucket) => bucket
+ };
+
+ let probe_ib = full_bucket.index() - full_bucket.distance();
+
+ bucket = full_bucket;
+
+ // Robin hood! Steal the spot.
+ if ib < probe_ib {
+ ib = probe_ib;
+ hash = old_hash;
+ k = old_key;
+ v = old_val;
+ break;
+ }
+ }
+ }
+}
+
+/// A result that works like Option<FullBucket<..>> but preserves
+/// the reference that grants us access to the table in any case.
+enum SearchResult<K, V, M> {
+ // This is an entry that holds the given key:
+ FoundExisting(FullBucket<K, V, M>),
+
+ // There was no such entry. The reference is given back:
+ TableRef(M)
+}
+
+impl<K, V, M> SearchResult<K, V, M> {
+ fn into_option(self) -> Option<FullBucket<K, V, M>> {
+ match self {
+ FoundExisting(bucket) => Some(bucket),
+ TableRef(_) => None
+ }
+ }
+}
+
+/// A newtyped mutable reference to the hashmap that allows e.g. Deref to be
+/// implemented without making changes to the visible interface of HashMap.
+/// Used internally because it's accepted by the search functions above.
+struct MapMutRef<'a, K: 'a, V: 'a, H: 'a> {
+ map_ref: &'a mut HashMap<K, V, H>
+}
+
+impl<'a, K, V, H> Deref<RawTable<K, V>> for MapMutRef<'a, K, V, H> {
+ fn deref(&self) -> &RawTable<K, V> {
+ &self.map_ref.table
+ }
+}
+
+impl<'a, K, V, H> DerefMut<RawTable<K, V>> for MapMutRef<'a, K, V, H> {
+ fn deref_mut(&mut self) -> &mut RawTable<K, V> {
+ &mut self.map_ref.table
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
+ fn make_hash<X: Hash<S>>(&self, x: &X) -> SafeHash {
+ table::make_hash(&self.hasher, x)
+ }
+
+ fn search_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, q: &Q)
+ -> Option<FullBucketImm<'a, K, V>> {
+ let hash = self.make_hash(q);
+ search_hashed_generic(&self.table, &hash, |k| q.equiv(k)).into_option()
+ }
+
+ fn search_equiv_mut<'a, Q: Hash<S> + Equiv<K>>(&'a mut self, q: &Q)
+ -> Option<FullBucketMut<'a, K, V>> {
+ let hash = self.make_hash(q);
+ search_hashed_generic(&mut self.table, &hash, |k| q.equiv(k)).into_option()
+ }
+
+ /// Search for a key, yielding the index if it's found in the hashtable.
+ /// If you already have the hash for the key lying around, use
+ /// search_hashed.
+ fn search<'a>(&'a self, k: &K) -> Option<FullBucketImm<'a, K, V>> {
+ let hash = self.make_hash(k);
+ search_hashed(&self.table, &hash, k).into_option()
+ }
+
+ fn search_mut<'a>(&'a mut self, k: &K) -> Option<FullBucketMut<'a, K, V>> {
+ let hash = self.make_hash(k);
+ search_hashed(&mut self.table, &hash, k).into_option()
+ }
+
+ // The caller should ensure that invariants by Robin Hood Hashing hold.
+ fn insert_hashed_ordered(&mut self, hash: SafeHash, k: K, v: V) {
+ let cap = self.table.capacity();
+ let mut buckets = Bucket::new(&mut self.table, &hash);
+ let ib = buckets.index();
+
+ while buckets.index() != ib + cap {
+ // We don't need to compare hashes for value swap.
+ // Not even DIBs for Robin Hood.
+ buckets = match buckets.peek() {
+ Empty(empty) => {
+ empty.put(hash, k, v);
+ return;
+ }
+ Full(b) => b.into_bucket()
+ };
+ buckets.next();
+ }
+ fail!("Internal HashMap error: Out of space.");
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Collection for HashMap<K, V, H> {
+ /// Return the number of elements in the map.
+ fn len(&self) -> uint { self.table.size() }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Mutable for HashMap<K, V, H> {
+ /// Clear the map, removing all key-value pairs. Keeps the allocated memory
+ /// for reuse.
+ fn clear(&mut self) {
+ // Prevent reallocations from happening from now on. Makes it possible
+ // for the map to be reused but has a downside: reserves permanently.
+ self.resize_policy.reserve(self.table.size());
+
+ let cap = self.table.capacity();
+ let mut buckets = Bucket::first(&mut self.table);
+
+ while buckets.index() != cap {
+ buckets = match buckets.peek() {
+ Empty(b) => b.next(),
+ Full(full) => {
+ let (b, _, _) = full.take();
+ b.next()
+ }
+ };
+ }
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Map<K, V> for HashMap<K, V, H> {
+ fn find<'a>(&'a self, k: &K) -> Option<&'a V> {
+ self.search(k).map(|bucket| {
+ let (_, v) = bucket.into_refs();
+ v
+ })
+ }
+
+ fn contains_key(&self, k: &K) -> bool {
+ self.search(k).is_some()
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> MutableMap<K, V> for HashMap<K, V, H> {
+ fn find_mut<'a>(&'a mut self, k: &K) -> Option<&'a mut V> {
+ match self.search_mut(k) {
+ Some(bucket) => {
+ let (_, v) = bucket.into_mut_refs();
+ Some(v)
+ }
+ _ => None
+ }
+ }
+
+ fn swap(&mut self, k: K, v: V) -> Option<V> {
+ let hash = self.make_hash(&k);
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+
+ let mut retval = None;
+ self.insert_or_replace_with(hash, k, v, |_, val_ref, val| {
+ retval = Some(replace(val_ref, val));
+ });
+ retval
+ }
+
+
+ fn pop(&mut self, k: &K) -> Option<V> {
+ if self.table.size() == 0 {
+ return None
+ }
+
+ let potential_new_size = self.table.size() - 1;
+ self.make_some_room(potential_new_size);
+
+ self.search_mut(k).map(|bucket| {
+ pop_internal(bucket)
+ })
+ }
+}
+
+impl<K: Hash + Eq, V> HashMap<K, V, RandomSipHasher> {
+ /// Create an empty HashMap.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn new() -> HashMap<K, V, RandomSipHasher> {
+ let hasher = RandomSipHasher::new();
+ HashMap::with_hasher(hasher)
+ }
+
+ /// Creates an empty hash map with the given initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: uint) -> HashMap<K, V, RandomSipHasher> {
+ let hasher = RandomSipHasher::new();
+ HashMap::with_capacity_and_hasher(capacity, hasher)
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> HashMap<K, V, H> {
+ /// Creates an empty hashmap which will use the given hasher to hash keys.
+ ///
+ /// The creates map has the default initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut map = HashMap::with_hasher(h);
+ /// map.insert(1i, 2u);
+ /// ```
+ #[inline]
+ pub fn with_hasher(hasher: H) -> HashMap<K, V, H> {
+ HashMap {
+ hasher: hasher,
+ resize_policy: DefaultResizePolicy::new(INITIAL_CAPACITY),
+ table: RawTable::new(0),
+ }
+ }
+
+ /// Create an empty HashMap with space for at least `capacity`
+ /// elements, using `hasher` to hash the keys.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow HashMaps to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut map = HashMap::with_capacity_and_hasher(10, h);
+ /// map.insert(1i, 2u);
+ /// ```
+ #[inline]
+ pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashMap<K, V, H> {
+ let cap = num::next_power_of_two(max(INITIAL_CAPACITY, capacity));
+ HashMap {
+ hasher: hasher,
+ resize_policy: DefaultResizePolicy::new(cap),
+ table: RawTable::new(cap),
+ }
+ }
+
+ /// The hashtable will never try to shrink below this size. You can use
+ /// this function to reduce reallocations if your hashtable frequently
+ /// grows and shrinks by large amounts.
+ ///
+ /// This function has no effect on the operational semantics of the
+ /// hashtable, only on performance.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map: HashMap<&str, int> = HashMap::new();
+ /// map.reserve(10);
+ /// ```
+ pub fn reserve(&mut self, new_minimum_capacity: uint) {
+ let cap = num::next_power_of_two(
+ max(INITIAL_CAPACITY, new_minimum_capacity));
+
+ self.resize_policy.reserve(cap);
+
+ if self.table.capacity() < cap {
+ self.resize(cap);
+ }
+ }
+
+ /// Resizes the internal vectors to a new capacity. It's your responsibility to:
+ /// 1) Make sure the new capacity is enough for all the elements, accounting
+ /// for the load factor.
+ /// 2) Ensure new_capacity is a power of two.
+ fn resize(&mut self, new_capacity: uint) {
+ assert!(self.table.size() <= new_capacity);
+ assert!(num::is_power_of_two(new_capacity));
+
+ let mut old_table = replace(&mut self.table, RawTable::new(new_capacity));
+ let old_size = old_table.size();
+
+ if old_table.capacity() == 0 || old_table.size() == 0 {
+ return;
+ }
+
+ if new_capacity < old_table.capacity() {
+ // Shrink the table. Naive algorithm for resizing:
+ for (h, k, v) in old_table.move_iter() {
+ self.insert_hashed_nocheck(h, k, v);
+ }
+ } else {
+ // Grow the table.
+ // Specialization of the other branch.
+ let mut bucket = Bucket::first(&mut old_table);
+
+ // "So a few of the first shall be last: for many be called,
+ // but few chosen."
+ //
+ // We'll most likely encounter a few buckets at the beginning that
+ // have their initial buckets near the end of the table. They were
+ // placed at the beginning as the probe wrapped around the table
+ // during insertion. We must skip forward to a bucket that won't
+ // get reinserted too early and won't unfairly steal others spot.
+ // This eliminates the need for robin hood.
+ loop {
+ bucket = match bucket.peek() {
+ Full(full) => {
+ if full.distance() == 0 {
+ // This bucket occupies its ideal spot.
+ // It indicates the start of another "cluster".
+ bucket = full.into_bucket();
+ break;
+ }
+ // Leaving this bucket in the last cluster for later.
+ full.into_bucket()
+ }
+ Empty(b) => {
+ // Encountered a hole between clusters.
+ b.into_bucket()
+ }
+ };
+ bucket.next();
+ }
+
+ // This is how the buckets might be laid out in memory:
+ // ($ marks an initialized bucket)
+ // ________________
+ // |$$$_$$$$$$_$$$$$|
+ //
+ // But we've skipped the entire initial cluster of buckets
+ // and will continue iteration in this order:
+ // ________________
+ // |$$$$$$_$$$$$
+ // ^ wrap around once end is reached
+ // ________________
+ // $$$_____________|
+ // ^ exit once table.size == 0
+ loop {
+ bucket = match bucket.peek() {
+ Full(bucket) => {
+ let h = bucket.hash();
+ let (b, k, v) = bucket.take();
+ self.insert_hashed_ordered(h, k, v);
+ {
+ let t = b.table(); // FIXME "lifetime too short".
+ if t.size() == 0 { break }
+ };
+ b.into_bucket()
+ }
+ Empty(b) => b.into_bucket()
+ };
+ bucket.next();
+ }
+ }
+
+ assert_eq!(self.table.size(), old_size);
+ }
+
+ /// Performs any necessary resize operations, such that there's space for
+ /// new_size elements.
+ fn make_some_room(&mut self, new_size: uint) {
+ let (grow_at, shrink_at) = self.resize_policy.capacity_range(new_size);
+ let cap = self.table.capacity();
+
+ // An invalid value shouldn't make us run out of space.
+ debug_assert!(grow_at >= new_size);
+
+ if cap <= grow_at {
+ let new_capacity = max(cap << 1, INITIAL_CAPACITY);
+ self.resize(new_capacity);
+ } else if shrink_at <= cap {
+ let new_capacity = cap >> 1;
+ self.resize(new_capacity);
+ }
+ }
+
+ /// Insert a pre-hashed key-value pair, without first checking
+ /// that there's enough room in the buckets. Returns a reference to the
+ /// newly insert value.
+ ///
+ /// If the key already exists, the hashtable will be returned untouched
+ /// and a reference to the existing element will be returned.
+ fn insert_hashed_nocheck(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
+ self.insert_or_replace_with(hash, k, v, |_, _, _| ())
+ }
+
+ fn insert_or_replace_with<'a>(&'a mut self,
+ hash: SafeHash,
+ k: K,
+ v: V,
+ found_existing: |&mut K, &mut V, V|)
+ -> &'a mut V {
+ // Worst case, we'll find one empty bucket among `size + 1` buckets.
+ let size = self.table.size();
+ let mut probe = Bucket::new(&mut self.table, &hash);
+ let ib = probe.index();
+
+ loop {
+ let mut bucket = match probe.peek() {
+ Empty(bucket) => {
+ // Found a hole!
+ let bucket = bucket.put(hash, k, v);
+ let (_, val) = bucket.into_mut_refs();
+ return val;
+ },
+ Full(bucket) => bucket
+ };
+
+ if bucket.hash() == hash {
+ let found_match = {
+ let (bucket_k, _) = bucket.read_mut();
+ k == *bucket_k
+ };
+ if found_match {
+ let (bucket_k, bucket_v) = bucket.into_mut_refs();
+ debug_assert!(k == *bucket_k);
+ // Key already exists. Get its reference.
+ found_existing(bucket_k, bucket_v, v);
+ return bucket_v;
+ }
+ }
+
+ let robin_ib = bucket.index() as int - bucket.distance() as int;
+
+ if (ib as int) < robin_ib {
+ // Found a luckier bucket than me. Better steal his spot.
+ return robin_hood(bucket, robin_ib as uint, hash, k, v);
+ }
+
+ probe = bucket.next();
+ assert!(probe.index() != ib + size + 1);
+ }
+ }
+
+ /// Inserts an element which has already been hashed, returning a reference
+ /// to that element inside the hashtable. This is more efficient that using
+ /// `insert`, since the key will not be rehashed.
+ fn insert_hashed(&mut self, hash: SafeHash, k: K, v: V) -> &mut V {
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+ self.insert_hashed_nocheck(hash, k, v)
+ }
+
+ /// Return the value corresponding to the key in the map, or insert
+ /// and return the value if it doesn't exist.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 1i with key "a"
+ /// assert_eq!(*map.find_or_insert("a", 1i), 1);
+ ///
+ /// // Find the existing key
+ /// assert_eq!(*map.find_or_insert("a", -2), 1);
+ /// ```
+ pub fn find_or_insert(&mut self, k: K, v: V) -> &mut V {
+ self.find_with_or_insert_with(k, v, |_k, _v, _a| (), |_k, a| a)
+ }
+
+ /// Return the value corresponding to the key in the map, or create,
+ /// insert, and return a new value if it doesn't exist.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 10 with key 2
+ /// assert_eq!(*map.find_or_insert_with(2i, |&key| 5 * key as uint), 10u);
+ ///
+ /// // Find the existing key
+ /// assert_eq!(*map.find_or_insert_with(2, |&key| key as uint), 10);
+ /// ```
+ pub fn find_or_insert_with<'a>(&'a mut self, k: K, f: |&K| -> V)
+ -> &'a mut V {
+ self.find_with_or_insert_with(k, (), |_k, _v, _a| (), |k, _a| f(k))
+ }
+
+ /// Insert a key-value pair into the map if the key is not already present.
+ /// Otherwise, modify the existing value for the key.
+ /// Returns the new or modified value for the key.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// let mut map = HashMap::new();
+ ///
+ /// // Insert 2 with key "a"
+ /// assert_eq!(*map.insert_or_update_with("a", 2u, |_key, val| *val = 3), 2);
+ ///
+ /// // Update and return the existing value
+ /// assert_eq!(*map.insert_or_update_with("a", 9, |_key, val| *val = 7), 7);
+ /// assert_eq!(map["a"], 7);
+ /// ```
+ pub fn insert_or_update_with<'a>(
+ &'a mut self,
+ k: K,
+ v: V,
+ f: |&K, &mut V|)
+ -> &'a mut V {
+ let potential_new_size = self.table.size() + 1;
+ self.make_some_room(potential_new_size);
+
+ let hash = self.make_hash(&k);
+ self.insert_or_replace_with(hash, k, v, |kref, vref, _v| f(kref, vref))
+ }
+
+ /// Modify and return the value corresponding to the key in the map, or
+ /// insert and return a new value if it doesn't exist.
+ ///
+ /// This method allows for all insertion behaviours of a hashmap;
+ /// see methods like
+ /// [`insert`](../trait.MutableMap.html#tymethod.insert),
+ /// [`find_or_insert`](#method.find_or_insert) and
+ /// [`insert_or_update_with`](#method.insert_or_update_with)
+ /// for less general and more friendly variations of this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// // map some strings to vectors of strings
+ /// let mut map = HashMap::new();
+ /// map.insert("a key", vec!["value"]);
+ /// map.insert("z key", vec!["value"]);
+ ///
+ /// let new = vec!["a key", "b key", "z key"];
+ ///
+ /// for k in new.move_iter() {
+ /// map.find_with_or_insert_with(
+ /// k, "new value",
+ /// // if the key does exist either prepend or append this
+ /// // new value based on the first letter of the key.
+ /// |key, already, new| {
+ /// if key.as_slice().starts_with("z") {
+ /// already.insert(0, new);
+ /// } else {
+ /// already.push(new);
+ /// }
+ /// },
+ /// // if the key doesn't exist in the map yet, add it in
+ /// // the obvious way.
+ /// |_k, v| vec![v]);
+ /// }
+ ///
+ /// assert_eq!(map.len(), 3);
+ /// assert_eq!(map["a key"], vec!["value", "new value"]);
+ /// assert_eq!(map["b key"], vec!["new value"]);
+ /// assert_eq!(map["z key"], vec!["new value", "value"]);
+ /// ```
+ pub fn find_with_or_insert_with<'a, A>(&'a mut self,
+ k: K,
+ a: A,
+ found: |&K, &mut V, A|,
+ not_found: |&K, A| -> V)
+ -> &'a mut V
+ {
+ let hash = self.make_hash(&k);
+ let this = MapMutRef { map_ref: self };
+
+ match search_hashed(this, &hash, &k) {
+ FoundExisting(bucket) => {
+ let (_, v_ref) = bucket.into_mut_refs();
+ found(&k, v_ref, a);
+ v_ref
+ }
+ TableRef(this) => {
+ let v = not_found(&k, a);
+ this.map_ref.insert_hashed(hash, k, v)
+ }
+ }
+ }
+
+ /// Retrieves a value for the given key.
+ /// See [`find`](../trait.Map.html#tymethod.find) for a non-failing alternative.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// #![allow(deprecated)]
+ ///
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// assert_eq!(map.get(&"a"), &1);
+ /// ```
+ #[deprecated = "prefer indexing instead, e.g., map[key]"]
+ pub fn get<'a>(&'a self, k: &K) -> &'a V {
+ match self.find(k) {
+ Some(v) => v,
+ None => fail!("no entry found for key")
+ }
+ }
+
+ /// Retrieves a mutable value for the given key.
+ /// See [`find_mut`](../trait.MutableMap.html#tymethod.find_mut) for a non-failing alternative.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// {
+ /// // val will freeze map to prevent usage during its lifetime
+ /// let val = map.get_mut(&"a");
+ /// *val = 40;
+ /// }
+ /// assert_eq!(map["a"], 40);
+ ///
+ /// // A more direct way could be:
+ /// *map.get_mut(&"a") = -2;
+ /// assert_eq!(map["a"], -2);
+ /// ```
+ pub fn get_mut<'a>(&'a mut self, k: &K) -> &'a mut V {
+ match self.find_mut(k) {
+ Some(v) => v,
+ None => fail!("no entry found for key")
+ }
+ }
+
+ /// Return true if the map contains a value for the specified key,
+ /// using equivalence.
+ ///
+ /// See [pop_equiv](#method.pop_equiv) for an extended example.
+ pub fn contains_key_equiv<Q: Hash<S> + Equiv<K>>(&self, key: &Q) -> bool {
+ self.search_equiv(key).is_some()
+ }
+
+ /// Return the value corresponding to the key in the map, using
+ /// equivalence.
+ ///
+ /// See [pop_equiv](#method.pop_equiv) for an extended example.
+ pub fn find_equiv<'a, Q: Hash<S> + Equiv<K>>(&'a self, k: &Q) -> Option<&'a V> {
+ match self.search_equiv(k) {
+ None => None,
+ Some(bucket) => {
+ let (_, v_ref) = bucket.into_refs();
+ Some(v_ref)
+ }
+ }
+ }
+
+ /// Remove an equivalent key from the map, returning the value at the
+ /// key if the key was previously in the map.
+ ///
+ /// # Example
+ ///
+ /// This is a slightly silly example where we define the number's
+ /// parity as the equivalence class. It is important that the
+ /// values hash the same, which is why we implement `Hash`.
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ /// use std::hash::Hash;
+ /// use std::hash::sip::SipState;
+ ///
+ /// #[deriving(Eq, PartialEq)]
+ /// struct EvenOrOdd {
+ /// num: uint
+ /// };
+ ///
+ /// impl Hash for EvenOrOdd {
+ /// fn hash(&self, state: &mut SipState) {
+ /// let parity = self.num % 2;
+ /// parity.hash(state);
+ /// }
+ /// }
+ ///
+ /// impl Equiv<EvenOrOdd> for EvenOrOdd {
+ /// fn equiv(&self, other: &EvenOrOdd) -> bool {
+ /// self.num % 2 == other.num % 2
+ /// }
+ /// }
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert(EvenOrOdd { num: 3 }, "foo");
+ ///
+ /// assert!(map.contains_key_equiv(&EvenOrOdd { num: 1 }));
+ /// assert!(!map.contains_key_equiv(&EvenOrOdd { num: 4 }));
+ ///
+ /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 5 }), Some(&"foo"));
+ /// assert_eq!(map.find_equiv(&EvenOrOdd { num: 2 }), None);
+ ///
+ /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 1 }), Some("foo"));
+ /// assert_eq!(map.pop_equiv(&EvenOrOdd { num: 2 }), None);
+ ///
+ /// ```
+ #[experimental]
+ pub fn pop_equiv<Q:Hash<S> + Equiv<K>>(&mut self, k: &Q) -> Option<V> {
+ if self.table.size() == 0 {
+ return None
+ }
+
+ let potential_new_size = self.table.size() - 1;
+ self.make_some_room(potential_new_size);
+
+ match self.search_equiv_mut(k) {
+ Some(bucket) => {
+ Some(pop_internal(bucket))
+ }
+ _ => None
+ }
+ }
+
+ /// An iterator visiting all keys in arbitrary order.
+ /// Iterator element type is `&'a K`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for key in map.keys() {
+ /// println!("{}", key);
+ /// }
+ /// ```
+ pub fn keys(&self) -> Keys<K, V> {
+ self.iter().map(|(k, _v)| k)
+ }
+
+ /// An iterator visiting all values in arbitrary order.
+ /// Iterator element type is `&'a V`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for key in map.values() {
+ /// println!("{}", key);
+ /// }
+ /// ```
+ pub fn values(&self) -> Values<K, V> {
+ self.iter().map(|(_k, v)| v)
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order.
+ /// Iterator element type is `(&'a K, &'a V)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// for (key, val) in map.iter() {
+ /// println!("key: {} val: {}", key, val);
+ /// }
+ /// ```
+ pub fn iter(&self) -> Entries<K, V> {
+ Entries { inner: self.table.iter() }
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order,
+ /// with mutable references to the values.
+ /// Iterator element type is `(&'a K, &'a mut V)`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// // Update all values
+ /// for (_, val) in map.mut_iter() {
+ /// *val *= 2;
+ /// }
+ ///
+ /// for (key, val) in map.iter() {
+ /// println!("key: {} val: {}", key, val);
+ /// }
+ /// ```
+ pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ MutEntries { inner: self.table.mut_iter() }
+ }
+
+ /// Creates a consuming iterator, that is, one that moves each key-value
+ /// pair out of the map in arbitrary order. The map cannot be used after
+ /// calling this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1i);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// // Not possible with .iter()
+ /// let vec: Vec<(&str, int)> = map.move_iter().collect();
+ /// ```
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ MoveEntries {
+ inner: self.table.move_iter().map(|(_, k, v)| (k, v))
+ }
+ }
+}
+
+impl<K: Eq + Hash<S>, V: Clone, S, H: Hasher<S>> HashMap<K, V, H> {
+ /// Return a copy of the value corresponding to the key.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<uint, String> = HashMap::new();
+ /// map.insert(1u, "foo".to_string());
+ /// let s: String = map.find_copy(&1).unwrap();
+ /// ```
+ pub fn find_copy(&self, k: &K) -> Option<V> {
+ self.find(k).map(|v| (*v).clone())
+ }
+
+ /// Return a copy of the value corresponding to the key.
+ ///
+ /// # Failure
+ ///
+ /// Fails if the key is not present.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashMap;
+ ///
+ /// let mut map: HashMap<uint, String> = HashMap::new();
+ /// map.insert(1u, "foo".to_string());
+ /// let s: String = map.get_copy(&1);
+ /// ```
+ pub fn get_copy(&self, k: &K) -> V {
+ (*self.get(k)).clone()
+ }
+}
+
+impl<K: Eq + Hash<S>, V: PartialEq, S, H: Hasher<S>> PartialEq for HashMap<K, V, H> {
+ fn eq(&self, other: &HashMap<K, V, H>) -> bool {
+ if self.len() != other.len() { return false; }
+
+ self.iter().all(|(key, value)|
+ other.find(key).map_or(false, |v| *value == *v)
+ )
+ }
+}
+
+impl<K: Eq + Hash<S>, V: Eq, S, H: Hasher<S>> Eq for HashMap<K, V, H> {}
+
+impl<K: Eq + Hash<S> + Show, V: Show, S, H: Hasher<S>> Show for HashMap<K, V, H> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "{{"));
+
+ for (i, (k, v)) in self.iter().enumerate() {
+ if i != 0 { try!(write!(f, ", ")); }
+ try!(write!(f, "{}: {}", *k, *v));
+ }
+
+ write!(f, "}}")
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Default for HashMap<K, V, H> {
+ fn default() -> HashMap<K, V, H> {
+ HashMap::with_hasher(Default::default())
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> Index<K, V> for HashMap<K, V, H> {
+ #[inline]
+ fn index<'a>(&'a self, index: &K) -> &'a V {
+ self.get(index)
+ }
+}
+
+// FIXME(#12825) Indexing will always try IndexMut first and that causes issues.
+/*impl<K: Eq + Hash<S>, V, S, H: Hasher<S>> ops::IndexMut<K, V> for HashMap<K, V, H> {
+ #[inline]
+ fn index_mut<'a>(&'a mut self, index: &K) -> &'a mut V {
+ self.get_mut(index)
+ }
+}*/
+
+/// HashMap iterator
+pub struct Entries<'a, K: 'a, V: 'a> {
+ inner: table::Entries<'a, K, V>
+}
+
+/// HashMap mutable values iterator
+pub struct MutEntries<'a, K: 'a, V: 'a> {
+ inner: table::MutEntries<'a, K, V>
+}
+
+/// HashMap move iterator
+pub struct MoveEntries<K, V> {
+ inner: iter::Map<'static, (SafeHash, K, V), (K, V), table::MoveEntries<K, V>>
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<K, V> Iterator<(K, V)> for MoveEntries<K, V> {
+ #[inline]
+ fn next(&mut self) -> Option<(K, V)> {
+ self.inner.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.inner.size_hint()
+ }
+}
+
+/// HashMap keys iterator
+pub type Keys<'a, K, V> =
+ iter::Map<'static, (&'a K, &'a V), &'a K, Entries<'a, K, V>>;
+
+/// HashMap values iterator
+pub type Values<'a, K, V> =
+ iter::Map<'static, (&'a K, &'a V), &'a V, Entries<'a, K, V>>;
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> FromIterator<(K, V)> for HashMap<K, V, H> {
+ fn from_iter<T: Iterator<(K, V)>>(iter: T) -> HashMap<K, V, H> {
+ let (lower, _) = iter.size_hint();
+ let mut map = HashMap::with_capacity_and_hasher(lower, Default::default());
+ map.extend(iter);
+ map
+ }
+}
+
+impl<K: Eq + Hash<S>, V, S, H: Hasher<S> + Default> Extendable<(K, V)> for HashMap<K, V, H> {
+ fn extend<T: Iterator<(K, V)>>(&mut self, mut iter: T) {
+ for (k, v) in iter {
+ self.insert(k, v);
+ }
+ }
+}
+
+#[cfg(test)]
+mod test_map {
+ use prelude::*;
+
+ use super::HashMap;
+ use cmp::Equiv;
+ use hash;
+ use iter::{Iterator,range_inclusive,range_step_inclusive};
+ use cell::RefCell;
+
+ struct KindaIntLike(int);
+
+ impl Equiv<int> for KindaIntLike {
+ fn equiv(&self, other: &int) -> bool {
+ let KindaIntLike(this) = *self;
+ this == *other
+ }
+ }
+ impl<S: hash::Writer> hash::Hash<S> for KindaIntLike {
+ fn hash(&self, state: &mut S) {
+ let KindaIntLike(this) = *self;
+ this.hash(state)
+ }
+ }
+
+ #[test]
+ fn test_create_capacity_zero() {
+ let mut m = HashMap::with_capacity(0);
+
+ assert!(m.insert(1i, 1i));
+
+ assert!(m.contains_key(&1));
+ assert!(!m.contains_key(&0));
+ }
+
+ #[test]
+ fn test_insert() {
+ let mut m = HashMap::new();
+ assert_eq!(m.len(), 0);
+ assert!(m.insert(1i, 2i));
+ assert_eq!(m.len(), 1);
+ assert!(m.insert(2i, 4i));
+ assert_eq!(m.len(), 2);
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&2).unwrap(), 4);
+ }
+
+ local_data_key!(drop_vector: RefCell<Vec<int>>)
+
+ #[deriving(Hash, PartialEq, Eq)]
+ struct Dropable {
+ k: uint
+ }
+
+ impl Dropable {
+ fn new(k: uint) -> Dropable {
+ let v = drop_vector.get().unwrap();
+ v.borrow_mut().as_mut_slice()[k] += 1;
+
+ Dropable { k: k }
+ }
+ }
+
+ impl Drop for Dropable {
+ fn drop(&mut self) {
+ let v = drop_vector.get().unwrap();
+ v.borrow_mut().as_mut_slice()[self.k] -= 1;
+ }
+ }
+
+ impl Clone for Dropable {
+ fn clone(&self) -> Dropable {
+ Dropable::new(self.k)
+ }
+ }
+
+ #[test]
+ fn test_drops() {
+ drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
+
+ {
+ let mut m = HashMap::new();
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ drop(v);
+
+ for i in range(0u, 100) {
+ let d1 = Dropable::new(i);
+ let d2 = Dropable::new(i+100);
+ m.insert(d1, d2);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ for i in range(0u, 50) {
+ let k = Dropable::new(i);
+ let v = m.pop(&k);
+
+ assert!(v.is_some());
+
+ let v = drop_vector.get().unwrap();
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ assert_eq!(v.borrow().as_slice()[i+100], 1);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 50) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ assert_eq!(v.borrow().as_slice()[i+100], 0);
+ }
+
+ for i in range(50u, 100) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ assert_eq!(v.borrow().as_slice()[i+100], 1);
+ }
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ }
+
+ #[test]
+ fn test_move_iter_drops() {
+ drop_vector.replace(Some(RefCell::new(Vec::from_elem(200, 0i))));
+
+ let hm = {
+ let mut hm = HashMap::new();
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ drop(v);
+
+ for i in range(0u, 100) {
+ let d1 = Dropable::new(i);
+ let d2 = Dropable::new(i+100);
+ hm.insert(d1, d2);
+ }
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ hm
+ };
+
+ // By the way, ensure that cloning doesn't screw up the dropping.
+ drop(hm.clone());
+
+ {
+ let mut half = hm.move_iter().take(50);
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 1);
+ }
+ drop(v);
+
+ for _ in half {}
+
+ let v = drop_vector.get().unwrap();
+ let nk = range(0u, 100).filter(|&i| {
+ v.borrow().as_slice()[i] == 1
+ }).count();
+
+ let nv = range(0u, 100).filter(|&i| {
+ v.borrow().as_slice()[i+100] == 1
+ }).count();
+
+ assert_eq!(nk, 50);
+ assert_eq!(nv, 50);
+ };
+
+ let v = drop_vector.get().unwrap();
+ for i in range(0u, 200) {
+ assert_eq!(v.borrow().as_slice()[i], 0);
+ }
+ }
+
+ #[test]
+ fn test_empty_pop() {
+ let mut m: HashMap<int, bool> = HashMap::new();
+ assert_eq!(m.pop(&0), None);
+ }
+
+ #[test]
+ fn test_lots_of_insertions() {
+ let mut m = HashMap::new();
+
+ // Try this a few times to make sure we never screw up the hashmap's
+ // internal state.
+ for _ in range(0i, 10) {
+ assert!(m.is_empty());
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.insert(i, i));
+
+ for j in range_inclusive(1, i) {
+ let r = m.find(&j);
+ assert_eq!(r, Some(&j));
+ }
+
+ for j in range_inclusive(i+1, 1000) {
+ let r = m.find(&j);
+ assert_eq!(r, None);
+ }
+ }
+
+ for i in range_inclusive(1001i, 2000) {
+ assert!(!m.contains_key(&i));
+ }
+
+ // remove forwards
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.remove(&i));
+
+ for j in range_inclusive(1, i) {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in range_inclusive(i+1, 1000) {
+ assert!(m.contains_key(&j));
+ }
+ }
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(!m.contains_key(&i));
+ }
+
+ for i in range_inclusive(1i, 1000) {
+ assert!(m.insert(i, i));
+ }
+
+ // remove backwards
+ for i in range_step_inclusive(1000i, 1, -1) {
+ assert!(m.remove(&i));
+
+ for j in range_inclusive(i, 1000) {
+ assert!(!m.contains_key(&j));
+ }
+
+ for j in range_inclusive(1, i-1) {
+ assert!(m.contains_key(&j));
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_find_mut() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1i, 12i));
+ assert!(m.insert(2i, 8i));
+ assert!(m.insert(5i, 14i));
+ let new = 100;
+ match m.find_mut(&5) {
+ None => fail!(), Some(x) => *x = new
+ }
+ assert_eq!(m.find(&5), Some(&new));
+ }
+
+ #[test]
+ fn test_insert_overwrite() {
+ let mut m = HashMap::new();
+ assert!(m.insert(1i, 2i));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert!(!m.insert(1i, 3i));
+ assert_eq!(*m.find(&1).unwrap(), 3);
+ }
+
+ #[test]
+ fn test_insert_conflicts() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert!(m.insert(5i, 3i));
+ assert!(m.insert(9i, 4i));
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ }
+
+ #[test]
+ fn test_update_with() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+
+ for i in range(1i, 1000) {
+ assert_eq!(
+ i + 2,
+ *m.insert_or_update_with(i + 1, i + 2, |_k, _v| {
+ fail!("Key not yet present");
+ })
+ );
+ assert_eq!(
+ i + 1,
+ *m.insert_or_update_with(i, i + 3, |k, v| {
+ assert_eq!(*k, i);
+ assert_eq!(*v, i + 1);
+ })
+ );
+ }
+ }
+
+ #[test]
+ fn test_conflict_remove() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert!(m.insert(5, 3));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert!(m.insert(9, 4));
+ assert_eq!(*m.find(&1).unwrap(), 2);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert!(m.remove(&1));
+ assert_eq!(*m.find(&9).unwrap(), 4);
+ assert_eq!(*m.find(&5).unwrap(), 3);
+ }
+
+ #[test]
+ fn test_is_empty() {
+ let mut m = HashMap::with_capacity(4);
+ assert!(m.insert(1i, 2i));
+ assert!(!m.is_empty());
+ assert!(m.remove(&1));
+ assert!(m.is_empty());
+ }
+
+ #[test]
+ fn test_pop() {
+ let mut m = HashMap::new();
+ m.insert(1i, 2i);
+ assert_eq!(m.pop(&1), Some(2));
+ assert_eq!(m.pop(&1), None);
+ }
+
+ #[test]
+ #[allow(experimental)]
+ fn test_pop_equiv() {
+ let mut m = HashMap::new();
+ m.insert(1i, 2i);
+ assert_eq!(m.pop_equiv(&KindaIntLike(1)), Some(2));
+ assert_eq!(m.pop_equiv(&KindaIntLike(1)), None);
+ }
+
+ #[test]
+ fn test_swap() {
+ let mut m = HashMap::new();
+ assert_eq!(m.swap(1i, 2i), None);
+ assert_eq!(m.swap(1i, 3i), Some(2));
+ assert_eq!(m.swap(1i, 4i), Some(3));
+ }
+
+ #[test]
+ fn test_iterate() {
+ let mut m = HashMap::with_capacity(4);
+ for i in range(0u, 32) {
+ assert!(m.insert(i, i*2));
+ }
+ assert_eq!(m.len(), 32);
+
+ let mut observed: u32 = 0;
+
+ for (k, v) in m.iter() {
+ assert_eq!(*v, *k * 2);
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+ }
+
+ #[test]
+ fn test_keys() {
+ let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
+ let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let keys = map.keys().map(|&k| k).collect::<Vec<int>>();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn test_values() {
+ let vec = vec![(1i, 'a'), (2i, 'b'), (3i, 'c')];
+ let map = vec.move_iter().collect::<HashMap<int, char>>();
+ let values = map.values().map(|&v| v).collect::<Vec<char>>();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
+ fn test_find() {
+ let mut m = HashMap::new();
+ assert!(m.find(&1i).is_none());
+ m.insert(1i, 2i);
+ match m.find(&1) {
+ None => fail!(),
+ Some(v) => assert_eq!(*v, 2)
+ }
+ }
+
+ #[test]
+ fn test_find_copy() {
+ let mut m = HashMap::new();
+ assert!(m.find(&1i).is_none());
+
+ for i in range(1i, 10000) {
+ m.insert(i, i + 7);
+ match m.find_copy(&i) {
+ None => fail!(),
+ Some(v) => assert_eq!(v, i + 7)
+ }
+ for j in range(1i, i/100) {
+ match m.find_copy(&j) {
+ None => fail!(),
+ Some(v) => assert_eq!(v, j + 7)
+ }
+ }
+ }
+ }
+
+ #[test]
+ fn test_eq() {
+ let mut m1 = HashMap::new();
+ m1.insert(1i, 2i);
+ m1.insert(2i, 3i);
+ m1.insert(3i, 4i);
+
+ let mut m2 = HashMap::new();
+ m2.insert(1i, 2i);
+ m2.insert(2i, 3i);
+
+ assert!(m1 != m2);
+
+ m2.insert(3i, 4i);
+
+ assert_eq!(m1, m2);
+ }
+
+ #[test]
+ fn test_show() {
+ let mut map: HashMap<int, int> = HashMap::new();
+ let empty: HashMap<int, int> = HashMap::new();
+
+ map.insert(1i, 2i);
+ map.insert(3i, 4i);
+
+ let map_str = format!("{}", map);
+
+ assert!(map_str == "{1: 2, 3: 4}".to_string() || map_str == "{3: 4, 1: 2}".to_string());
+ assert_eq!(format!("{}", empty), "{}".to_string());
+ }
+
+ #[test]
+ fn test_expand() {
+ let mut m = HashMap::new();
+
+ assert_eq!(m.len(), 0);
+ assert!(m.is_empty());
+
+ let mut i = 0u;
+ let old_cap = m.table.capacity();
+ while old_cap == m.table.capacity() {
+ m.insert(i, i);
+ i += 1;
+ }
+
+ assert_eq!(m.len(), i);
+ assert!(!m.is_empty());
+ }
+
+ #[test]
+ fn test_resize_policy() {
+ let mut m = HashMap::new();
+
+ assert_eq!(m.len(), 0);
+ assert_eq!(m.table.capacity(), 0);
+ assert!(m.is_empty());
+
+ m.insert(0, 0);
+ m.remove(&0);
+ assert!(m.is_empty());
+ let initial_cap = m.table.capacity();
+ m.reserve(initial_cap * 2);
+ let cap = m.table.capacity();
+
+ assert_eq!(cap, initial_cap * 2);
+
+ let mut i = 0u;
+ for _ in range(0, cap * 3 / 4) {
+ m.insert(i, i);
+ i += 1;
+ }
+ // three quarters full
+
+ assert_eq!(m.len(), i);
+ assert_eq!(m.table.capacity(), cap);
+
+ for _ in range(0, cap / 4) {
+ m.insert(i, i);
+ i += 1;
+ }
+ // half full
+
+ let new_cap = m.table.capacity();
+ assert_eq!(new_cap, cap * 2);
+
+ for _ in range(0, cap / 2 - 1) {
+ i -= 1;
+ m.remove(&i);
+ assert_eq!(m.table.capacity(), new_cap);
+ }
+ // A little more than one quarter full.
+ // Shrinking starts as we remove more elements:
+ for _ in range(0, cap / 2 - 1) {
+ i -= 1;
+ m.remove(&i);
+ }
+
+ assert_eq!(m.len(), i);
+ assert!(!m.is_empty());
+ assert_eq!(m.table.capacity(), cap);
+ }
+
+ #[test]
+ fn test_find_equiv() {
+ let mut m = HashMap::new();
+
+ let (foo, bar, baz) = (1i,2i,3i);
+ m.insert("foo".to_string(), foo);
+ m.insert("bar".to_string(), bar);
+ m.insert("baz".to_string(), baz);
+
+
+ assert_eq!(m.find_equiv(&("foo")), Some(&foo));
+ assert_eq!(m.find_equiv(&("bar")), Some(&bar));
+ assert_eq!(m.find_equiv(&("baz")), Some(&baz));
+
+ assert_eq!(m.find_equiv(&("qux")), None);
+ }
+
+ #[test]
+ fn test_from_iter() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ for &(k, v) in xs.iter() {
+ assert_eq!(map.find(&k), Some(&v));
+ }
+ }
+
+ #[test]
+ fn test_size_hint() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ let mut iter = map.iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+ }
+
+ #[test]
+ fn test_mut_size_hint() {
+ let xs = [(1i, 1i), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
+
+ let mut map: HashMap<int, int> = xs.iter().map(|&x| x).collect();
+
+ let mut iter = map.mut_iter();
+
+ for _ in iter.by_ref().take(3) {}
+
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+ }
+
+ #[test]
+ fn test_index() {
+ let mut map: HashMap<int, int> = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ assert_eq!(map[2], 1);
+ }
+
+ #[test]
+ #[should_fail]
+ fn test_index_nonexistent() {
+ let mut map: HashMap<int, int> = HashMap::new();
+
+ map.insert(1, 2);
+ map.insert(2, 1);
+ map.insert(3, 4);
+
+ map[4];
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Unordered containers, implemented as hash-tables
+
+pub use self::map::HashMap;
+pub use self::map::Entries;
+pub use self::map::MutEntries;
+pub use self::map::MoveEntries;
+pub use self::map::Keys;
+pub use self::map::Values;
+pub use self::map::INITIAL_CAPACITY;
+pub use self::set::HashSet;
+pub use self::set::SetItems;
+pub use self::set::SetMoveItems;
+pub use self::set::SetAlgebraItems;
+
+mod bench;
+mod map;
+mod set;
+mod table;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp::{Eq, Equiv, PartialEq};
+use collections::{Collection, Mutable, Set, MutableSet, Map, MutableMap};
+use default::Default;
+use fmt::Show;
+use fmt;
+use hash::{Hash, Hasher, RandomSipHasher};
+use iter::{Iterator, FromIterator, FilterMap, Chain, Repeat, Zip, Extendable};
+use iter;
+use option::{Some, None};
+use result::{Ok, Err};
+
+use super::{HashMap, Entries, MoveEntries, INITIAL_CAPACITY};
+
+
+// Future Optimization (FIXME!)
+// =============================
+//
+// Iteration over zero sized values is a noop. There is no need
+// for `bucket.val` in the case of HashSet. I suppose we would need HKT
+// to get rid of it properly.
+
+/// An implementation of a hash set using the underlying representation of a
+/// HashMap where the value is (). As with the `HashMap` type, a `HashSet`
+/// requires that the elements implement the `Eq` and `Hash` traits.
+///
+/// # Example
+///
+/// ```
+/// use std::collections::HashSet;
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `HashSet<&str>` in this example).
+/// let mut books = HashSet::new();
+///
+/// // Add some books.
+/// books.insert("A Dance With Dragons");
+/// books.insert("To Kill a Mockingbird");
+/// books.insert("The Odyssey");
+/// books.insert("The Great Gatsby");
+///
+/// // Check for a specific one.
+/// if !books.contains(&("The Winds of Winter")) {
+/// println!("We have {} books, but The Winds of Winter ain't one.",
+/// books.len());
+/// }
+///
+/// // Remove a book.
+/// books.remove(&"The Odyssey");
+///
+/// // Iterate over everything.
+/// for book in books.iter() {
+/// println!("{}", *book);
+/// }
+/// ```
+///
+/// The easiest way to use `HashSet` with a custom type is to derive
+/// `Eq` and `Hash`. We must also derive `PartialEq`, this will in the
+/// future be implied by `Eq`.
+///
+/// ```
+/// use std::collections::HashSet;
+/// #[deriving(Hash, Eq, PartialEq, Show)]
+/// struct Viking<'a> {
+/// name: &'a str,
+/// power: uint,
+/// }
+///
+/// let mut vikings = HashSet::new();
+///
+/// vikings.insert(Viking { name: "Einar", power: 9u });
+/// vikings.insert(Viking { name: "Einar", power: 9u });
+/// vikings.insert(Viking { name: "Olaf", power: 4u });
+/// vikings.insert(Viking { name: "Harald", power: 8u });
+///
+/// // Use derived implementation to print the vikings.
+/// for x in vikings.iter() {
+/// println!("{}", x);
+/// }
+/// ```
+#[deriving(Clone)]
+pub struct HashSet<T, H = RandomSipHasher> {
+ map: HashMap<T, (), H>
+}
+
+impl<T: Hash + Eq> HashSet<T, RandomSipHasher> {
+ /// Create an empty HashSet.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::new();
+ /// ```
+ #[inline]
+ pub fn new() -> HashSet<T, RandomSipHasher> {
+ HashSet::with_capacity(INITIAL_CAPACITY)
+ }
+
+ /// Create an empty HashSet with space for at least `n` elements in
+ /// the hash table.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::with_capacity(10);
+ /// ```
+ #[inline]
+ pub fn with_capacity(capacity: uint) -> HashSet<T, RandomSipHasher> {
+ HashSet { map: HashMap::with_capacity(capacity) }
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> HashSet<T, H> {
+ /// Creates a new empty hash set which will use the given hasher to hash
+ /// keys.
+ ///
+ /// The hash set is also created with the default initial capacity.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut set = HashSet::with_hasher(h);
+ /// set.insert(2u);
+ /// ```
+ #[inline]
+ pub fn with_hasher(hasher: H) -> HashSet<T, H> {
+ HashSet::with_capacity_and_hasher(INITIAL_CAPACITY, hasher)
+ }
+
+ /// Create an empty HashSet with space for at least `capacity`
+ /// elements in the hash table, using `hasher` to hash the keys.
+ ///
+ /// Warning: `hasher` is normally randomly generated, and
+ /// is designed to allow `HashSet`s to be resistant to attacks that
+ /// cause many collisions and very poor performance. Setting it
+ /// manually using this function can expose a DoS attack vector.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::sip::SipHasher;
+ ///
+ /// let h = SipHasher::new();
+ /// let mut set = HashSet::with_capacity_and_hasher(10u, h);
+ /// set.insert(1i);
+ /// ```
+ #[inline]
+ pub fn with_capacity_and_hasher(capacity: uint, hasher: H) -> HashSet<T, H> {
+ HashSet { map: HashMap::with_capacity_and_hasher(capacity, hasher) }
+ }
+
+ /// Reserve space for at least `n` elements in the hash table.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set: HashSet<int> = HashSet::new();
+ /// set.reserve(10);
+ /// ```
+ pub fn reserve(&mut self, n: uint) {
+ self.map.reserve(n)
+ }
+
+ /// Returns true if the hash set contains a value equivalent to the
+ /// given query value.
+ ///
+ /// # Example
+ ///
+ /// This is a slightly silly example where we define the number's
+ /// parity as the equivilance class. It is important that the
+ /// values hash the same, which is why we implement `Hash`.
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// use std::hash::Hash;
+ /// use std::hash::sip::SipState;
+ ///
+ /// #[deriving(Eq, PartialEq)]
+ /// struct EvenOrOdd {
+ /// num: uint
+ /// };
+ ///
+ /// impl Hash for EvenOrOdd {
+ /// fn hash(&self, state: &mut SipState) {
+ /// let parity = self.num % 2;
+ /// parity.hash(state);
+ /// }
+ /// }
+ ///
+ /// impl Equiv<EvenOrOdd> for EvenOrOdd {
+ /// fn equiv(&self, other: &EvenOrOdd) -> bool {
+ /// self.num % 2 == other.num % 2
+ /// }
+ /// }
+ ///
+ /// let mut set = HashSet::new();
+ /// set.insert(EvenOrOdd { num: 3u });
+ ///
+ /// assert!(set.contains_equiv(&EvenOrOdd { num: 3u }));
+ /// assert!(set.contains_equiv(&EvenOrOdd { num: 5u }));
+ /// assert!(!set.contains_equiv(&EvenOrOdd { num: 4u }));
+ /// assert!(!set.contains_equiv(&EvenOrOdd { num: 2u }));
+ ///
+ /// ```
+ pub fn contains_equiv<Q: Hash<S> + Equiv<T>>(&self, value: &Q) -> bool {
+ self.map.contains_key_equiv(value)
+ }
+
+ /// An iterator visiting all elements in arbitrary order.
+ /// Iterator element type is &'a T.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a");
+ /// set.insert("b");
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in set.iter() {
+ /// println!("{}", x);
+ /// }
+ /// ```
+ pub fn iter<'a>(&'a self) -> SetItems<'a, T> {
+ self.map.keys()
+ }
+
+ /// Creates a consuming iterator, that is, one that moves each value out
+ /// of the set in arbitrary order. The set cannot be used after calling
+ /// this.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let mut set = HashSet::new();
+ /// set.insert("a".to_string());
+ /// set.insert("b".to_string());
+ ///
+ /// // Not possible to collect to a Vec<String> with a regular `.iter()`.
+ /// let v: Vec<String> = set.move_iter().collect();
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in v.iter() {
+ /// println!("{}", x);
+ /// }
+ /// ```
+ pub fn move_iter(self) -> SetMoveItems<T> {
+ self.map.move_iter().map(|(k, _)| k)
+ }
+
+ /// Visit the values representing the difference.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Can be seen as `a - b`.
+ /// for x in a.difference(&b) {
+ /// println!("{}", x); // Print 1
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.difference(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [1i].iter().map(|&x| x).collect());
+ ///
+ /// // Note that difference is not symmetric,
+ /// // and `b - a` means something else:
+ /// let diff: HashSet<int> = b.difference(&a).map(|&x| x).collect();
+ /// assert_eq!(diff, [4i].iter().map(|&x| x).collect());
+ /// ```
+ pub fn difference<'a>(&'a self, other: &'a HashSet<T, H>) -> SetAlgebraItems<'a, T, H> {
+ Repeat::new(other).zip(self.iter())
+ .filter_map(|(other, elt)| {
+ if !other.contains(elt) { Some(elt) } else { None }
+ })
+ }
+
+ /// Visit the values representing the symmetric difference.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 1, 4 in arbitrary order.
+ /// for x in a.symmetric_difference(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff1: HashSet<int> = a.symmetric_difference(&b).map(|&x| x).collect();
+ /// let diff2: HashSet<int> = b.symmetric_difference(&a).map(|&x| x).collect();
+ ///
+ /// assert_eq!(diff1, diff2);
+ /// assert_eq!(diff1, [1i, 4].iter().map(|&x| x).collect());
+ /// ```
+ pub fn symmetric_difference<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> Chain<SetAlgebraItems<'a, T, H>, SetAlgebraItems<'a, T, H>> {
+ self.difference(other).chain(other.difference(self))
+ }
+
+ /// Visit the values representing the intersection.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 2, 3 in arbitrary order.
+ /// for x in a.intersection(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.intersection(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [2i, 3].iter().map(|&x| x).collect());
+ /// ```
+ pub fn intersection<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> SetAlgebraItems<'a, T, H> {
+ Repeat::new(other).zip(self.iter())
+ .filter_map(|(other, elt)| {
+ if other.contains(elt) { Some(elt) } else { None }
+ })
+ }
+
+ /// Visit the values representing the union.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::collections::HashSet;
+ /// let a: HashSet<int> = [1i, 2, 3].iter().map(|&x| x).collect();
+ /// let b: HashSet<int> = [4i, 2, 3, 4].iter().map(|&x| x).collect();
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order.
+ /// for x in a.union(&b) {
+ /// println!("{}", x);
+ /// }
+ ///
+ /// let diff: HashSet<int> = a.union(&b).map(|&x| x).collect();
+ /// assert_eq!(diff, [1i, 2, 3, 4].iter().map(|&x| x).collect());
+ /// ```
+ pub fn union<'a>(&'a self, other: &'a HashSet<T, H>)
+ -> Chain<SetItems<'a, T>, SetAlgebraItems<'a, T, H>> {
+ self.iter().chain(other.difference(self))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> PartialEq for HashSet<T, H> {
+ fn eq(&self, other: &HashSet<T, H>) -> bool {
+ if self.len() != other.len() { return false; }
+
+ self.iter().all(|key| other.contains(key))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Eq for HashSet<T, H> {}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Collection for HashSet<T, H> {
+ fn len(&self) -> uint { self.map.len() }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Mutable for HashSet<T, H> {
+ fn clear(&mut self) { self.map.clear() }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> Set<T> for HashSet<T, H> {
+ fn contains(&self, value: &T) -> bool { self.map.contains_key(value) }
+
+ fn is_disjoint(&self, other: &HashSet<T, H>) -> bool {
+ self.iter().all(|v| !other.contains(v))
+ }
+
+ fn is_subset(&self, other: &HashSet<T, H>) -> bool {
+ self.iter().all(|v| other.contains(v))
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S>> MutableSet<T> for HashSet<T, H> {
+ fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()) }
+
+ fn remove(&mut self, value: &T) -> bool { self.map.remove(value) }
+}
+
+impl<T: Eq + Hash<S> + fmt::Show, S, H: Hasher<S>> fmt::Show for HashSet<T, H> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ try!(write!(f, "{{"));
+
+ for (i, x) in self.iter().enumerate() {
+ if i != 0 { try!(write!(f, ", ")); }
+ try!(write!(f, "{}", *x));
+ }
+
+ write!(f, "}}")
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> FromIterator<T> for HashSet<T, H> {
+ fn from_iter<I: Iterator<T>>(iter: I) -> HashSet<T, H> {
+ let (lower, _) = iter.size_hint();
+ let mut set = HashSet::with_capacity_and_hasher(lower, Default::default());
+ set.extend(iter);
+ set
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Extendable<T> for HashSet<T, H> {
+ fn extend<I: Iterator<T>>(&mut self, mut iter: I) {
+ for k in iter {
+ self.insert(k);
+ }
+ }
+}
+
+impl<T: Eq + Hash<S>, S, H: Hasher<S> + Default> Default for HashSet<T, H> {
+ fn default() -> HashSet<T, H> {
+ HashSet::with_hasher(Default::default())
+ }
+}
+
+/// HashSet iterator
+pub type SetItems<'a, K> =
+ iter::Map<'static, (&'a K, &'a ()), &'a K, Entries<'a, K, ()>>;
+
+/// HashSet move iterator
+pub type SetMoveItems<K> =
+ iter::Map<'static, (K, ()), K, MoveEntries<K, ()>>;
+
+// `Repeat` is used to feed the filter closure an explicit capture
+// of a reference to the other set
+/// Set operations iterator
+pub type SetAlgebraItems<'a, T, H> =
+ FilterMap<'static, (&'a HashSet<T, H>, &'a T), &'a T,
+ Zip<Repeat<&'a HashSet<T, H>>, SetItems<'a, T>>>;
+
+#[cfg(test)]
+mod test_set {
+ use prelude::*;
+
+ use super::HashSet;
+ use slice::ImmutablePartialEqSlice;
+ use collections::Collection;
+
+ #[test]
+ fn test_disjoint() {
+ let mut xs = HashSet::new();
+ let mut ys = HashSet::new();
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(5i));
+ assert!(ys.insert(11i));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(xs.insert(7));
+ assert!(xs.insert(19));
+ assert!(xs.insert(4));
+ assert!(ys.insert(2));
+ assert!(ys.insert(-11));
+ assert!(xs.is_disjoint(&ys));
+ assert!(ys.is_disjoint(&xs));
+ assert!(ys.insert(7));
+ assert!(!xs.is_disjoint(&ys));
+ assert!(!ys.is_disjoint(&xs));
+ }
+
+ #[test]
+ fn test_subset_and_superset() {
+ let mut a = HashSet::new();
+ assert!(a.insert(0i));
+ assert!(a.insert(5));
+ assert!(a.insert(11));
+ assert!(a.insert(7));
+
+ let mut b = HashSet::new();
+ assert!(b.insert(0i));
+ assert!(b.insert(7));
+ assert!(b.insert(19));
+ assert!(b.insert(250));
+ assert!(b.insert(11));
+ assert!(b.insert(200));
+
+ assert!(!a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(!b.is_superset(&a));
+
+ assert!(b.insert(5));
+
+ assert!(a.is_subset(&b));
+ assert!(!a.is_superset(&b));
+ assert!(!b.is_subset(&a));
+ assert!(b.is_superset(&a));
+ }
+
+ #[test]
+ fn test_iterate() {
+ let mut a = HashSet::new();
+ for i in range(0u, 32) {
+ assert!(a.insert(i));
+ }
+ let mut observed: u32 = 0;
+ for k in a.iter() {
+ observed |= 1 << *k;
+ }
+ assert_eq!(observed, 0xFFFF_FFFF);
+ }
+
+ #[test]
+ fn test_intersection() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(11i));
+ assert!(a.insert(1));
+ assert!(a.insert(3));
+ assert!(a.insert(77));
+ assert!(a.insert(103));
+ assert!(a.insert(5));
+ assert!(a.insert(-5));
+
+ assert!(b.insert(2i));
+ assert!(b.insert(11));
+ assert!(b.insert(77));
+ assert!(b.insert(-9));
+ assert!(b.insert(-42));
+ assert!(b.insert(5));
+ assert!(b.insert(3));
+
+ let mut i = 0;
+ let expected = [3, 5, 11, 77];
+ for x in a.intersection(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(3i));
+ assert!(b.insert(9));
+
+ let mut i = 0;
+ let expected = [1, 5, 11];
+ for x in a.difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_symmetric_difference() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+
+ assert!(b.insert(-2i));
+ assert!(b.insert(3));
+ assert!(b.insert(9));
+ assert!(b.insert(14));
+ assert!(b.insert(22));
+
+ let mut i = 0;
+ let expected = [-2, 1, 5, 11, 14, 22];
+ for x in a.symmetric_difference(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_union() {
+ let mut a = HashSet::new();
+ let mut b = HashSet::new();
+
+ assert!(a.insert(1i));
+ assert!(a.insert(3));
+ assert!(a.insert(5));
+ assert!(a.insert(9));
+ assert!(a.insert(11));
+ assert!(a.insert(16));
+ assert!(a.insert(19));
+ assert!(a.insert(24));
+
+ assert!(b.insert(-2i));
+ assert!(b.insert(1));
+ assert!(b.insert(5));
+ assert!(b.insert(9));
+ assert!(b.insert(13));
+ assert!(b.insert(19));
+
+ let mut i = 0;
+ let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
+ for x in a.union(&b) {
+ assert!(expected.contains(x));
+ i += 1
+ }
+ assert_eq!(i, expected.len());
+ }
+
+ #[test]
+ fn test_from_iter() {
+ let xs = [1i, 2, 3, 4, 5, 6, 7, 8, 9];
+
+ let set: HashSet<int> = xs.iter().map(|&x| x).collect();
+
+ for x in xs.iter() {
+ assert!(set.contains(x));
+ }
+ }
+
+ #[test]
+ fn test_move_iter() {
+ let hs = {
+ let mut hs = HashSet::new();
+
+ hs.insert('a');
+ hs.insert('b');
+
+ hs
+ };
+
+ let v = hs.move_iter().collect::<Vec<char>>();
+ assert!(['a', 'b'] == v.as_slice() || ['b', 'a'] == v.as_slice());
+ }
+
+ #[test]
+ fn test_eq() {
+ // These constants once happened to expose a bug in insert().
+ // I'm keeping them around to prevent a regression.
+ let mut s1 = HashSet::new();
+
+ s1.insert(1i);
+ s1.insert(2);
+ s1.insert(3);
+
+ let mut s2 = HashSet::new();
+
+ s2.insert(1i);
+ s2.insert(2);
+
+ assert!(s1 != s2);
+
+ s2.insert(3);
+
+ assert_eq!(s1, s2);
+ }
+
+ #[test]
+ fn test_show() {
+ let mut set: HashSet<int> = HashSet::new();
+ let empty: HashSet<int> = HashSet::new();
+
+ set.insert(1i);
+ set.insert(2);
+
+ let set_str = format!("{}", set);
+
+ assert!(set_str == "{1, 2}".to_string() || set_str == "{2, 1}".to_string());
+ assert_eq!(format!("{}", empty), "{}".to_string());
+ }
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15883
+
+use clone::Clone;
+use cmp;
+use hash::{Hash, Hasher};
+use iter::{Iterator, count};
+use kinds::marker;
+use mem::{min_align_of, size_of};
+use mem;
+use num::{CheckedAdd, CheckedMul, is_power_of_two};
+use ops::{Deref, DerefMut, Drop};
+use option::{Some, None, Option};
+use ptr::{RawPtr, copy_nonoverlapping_memory, zero_memory};
+use ptr;
+use rt::heap::{allocate, deallocate};
+
+static EMPTY_BUCKET: u64 = 0u64;
+
+/// The raw hashtable, providing safe-ish access to the unzipped and highly
+/// optimized arrays of hashes, keys, and values.
+///
+/// This design uses less memory and is a lot faster than the naive
+/// `Vec<Option<u64, K, V>>`, because we don't pay for the overhead of an
+/// option on every element, and we get a generally more cache-aware design.
+///
+/// Essential invariants of this structure:
+///
+/// - if t.hashes[i] == EMPTY_BUCKET, then `Bucket::at_index(&t, i).raw`
+/// points to 'undefined' contents. Don't read from it. This invariant is
+/// enforced outside this module with the `EmptyBucket`, `FullBucket`,
+/// and `SafeHash` types.
+///
+/// - An `EmptyBucket` is only constructed at an index with
+/// a hash of EMPTY_BUCKET.
+///
+/// - A `FullBucket` is only constructed at an index with a
+/// non-EMPTY_BUCKET hash.
+///
+/// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
+/// around hashes of zero by changing them to 0x8000_0000_0000_0000,
+/// which will likely map to the same bucket, while not being confused
+/// with "empty".
+///
+/// - All three "arrays represented by pointers" are the same length:
+/// `capacity`. This is set at creation and never changes. The arrays
+/// are unzipped to save space (we don't have to pay for the padding
+/// between odd sized elements, such as in a map from u64 to u8), and
+/// be more cache aware (scanning through 8 hashes brings in at most
+/// 2 cache lines, since they're all right beside each other).
+///
+/// You can kind of think of this module/data structure as a safe wrapper
+/// around just the "table" part of the hashtable. It enforces some
+/// invariants at the type level and employs some performance trickery,
+/// but in general is just a tricked out `Vec<Option<u64, K, V>>`.
+#[unsafe_no_drop_flag]
+pub struct RawTable<K, V> {
+ capacity: uint,
+ size: uint,
+ hashes: *mut u64,
+ // Because K/V do not appear directly in any of the types in the struct,
+ // inform rustc that in fact instances of K and V are reachable from here.
+ marker: marker::CovariantType<(K,V)>,
+}
+
+struct RawBucket<K, V> {
+ hash: *mut u64,
+ key: *mut K,
+ val: *mut V
+}
+
+pub struct Bucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub struct EmptyBucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub struct FullBucket<K, V, M> {
+ raw: RawBucket<K, V>,
+ idx: uint,
+ table: M
+}
+
+pub type EmptyBucketImm<'table, K, V> = EmptyBucket<K, V, &'table RawTable<K, V>>;
+pub type FullBucketImm<'table, K, V> = FullBucket<K, V, &'table RawTable<K, V>>;
+
+pub type EmptyBucketMut<'table, K, V> = EmptyBucket<K, V, &'table mut RawTable<K, V>>;
+pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
+
+pub enum BucketState<K, V, M> {
+ Empty(EmptyBucket<K, V, M>),
+ Full(FullBucket<K, V, M>),
+}
+
+// A GapThenFull encapsulates the state of two consecutive buckets at once.
+// The first bucket, called the gap, is known to be empty.
+// The second bucket is full.
+struct GapThenFull<K, V, M> {
+ gap: EmptyBucket<K, V, ()>,
+ full: FullBucket<K, V, M>,
+}
+
+/// A hash that is not zero, since we use a hash of zero to represent empty
+/// buckets.
+#[deriving(PartialEq)]
+pub struct SafeHash {
+ hash: u64,
+}
+
+impl SafeHash {
+ /// Peek at the hash value, which is guaranteed to be non-zero.
+ #[inline(always)]
+ pub fn inspect(&self) -> u64 { self.hash }
+}
+
+/// We need to remove hashes of 0. That's reserved for empty buckets.
+/// This function wraps up `hash_keyed` to be the only way outside this
+/// module to generate a SafeHash.
+pub fn make_hash<T: Hash<S>, S, H: Hasher<S>>(hasher: &H, t: &T) -> SafeHash {
+ match hasher.hash(t) {
+ // This constant is exceedingly likely to hash to the same
+ // bucket, but it won't be counted as empty! Just so we can maintain
+ // our precious uniform distribution of initial indexes.
+ EMPTY_BUCKET => SafeHash { hash: 0x8000_0000_0000_0000 },
+ h => SafeHash { hash: h },
+ }
+}
+
+// `replace` casts a `*u64` to a `*SafeHash`. Since we statically
+// ensure that a `FullBucket` points to an index with a non-zero hash,
+// and a `SafeHash` is just a `u64` with a different name, this is
+// safe.
+//
+// This test ensures that a `SafeHash` really IS the same size as a
+// `u64`. If you need to change the size of `SafeHash` (and
+// consequently made this test fail), `replace` needs to be
+// modified to no longer assume this.
+#[test]
+fn can_alias_safehash_as_u64() {
+ assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
+}
+
+impl<K, V> RawBucket<K, V> {
+ unsafe fn offset(self, count: int) -> RawBucket<K, V> {
+ RawBucket {
+ hash: self.hash.offset(count),
+ key: self.key.offset(count),
+ val: self.val.offset(count),
+ }
+ }
+}
+
+// For parameterizing over mutability.
+impl<'t, K, V> Deref<RawTable<K, V>> for &'t RawTable<K, V> {
+ fn deref(&self) -> &RawTable<K, V> {
+ &**self
+ }
+}
+
+impl<'t, K, V> Deref<RawTable<K, V>> for &'t mut RawTable<K, V> {
+ fn deref(&self) -> &RawTable<K,V> {
+ &**self
+ }
+}
+
+impl<'t, K, V> DerefMut<RawTable<K, V>> for &'t mut RawTable<K, V> {
+ fn deref_mut(&mut self) -> &mut RawTable<K,V> {
+ &mut **self
+ }
+}
+
+// Buckets hold references to the table.
+impl<K, V, M> FullBucket<K, V, M> {
+ /// Borrow a reference to the table.
+ pub fn table(&self) -> &M {
+ &self.table
+ }
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+ /// Get the raw index.
+ pub fn index(&self) -> uint {
+ self.idx
+ }
+}
+
+impl<K, V, M> EmptyBucket<K, V, M> {
+ /// Borrow a reference to the table.
+ pub fn table(&self) -> &M {
+ &self.table
+ }
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+}
+
+impl<K, V, M> Bucket<K, V, M> {
+ /// Move out the reference to the table.
+ pub fn into_table(self) -> M {
+ self.table
+ }
+ /// Get the raw index.
+ pub fn index(&self) -> uint {
+ self.idx
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> Bucket<K, V, M> {
+ pub fn new(table: M, hash: &SafeHash) -> Bucket<K, V, M> {
+ Bucket::at_index(table, hash.inspect() as uint)
+ }
+
+ pub fn at_index(table: M, ib_index: uint) -> Bucket<K, V, M> {
+ let ib_index = ib_index & (table.capacity() - 1);
+ Bucket {
+ raw: unsafe {
+ table.first_bucket_raw().offset(ib_index as int)
+ },
+ idx: ib_index,
+ table: table
+ }
+ }
+
+ pub fn first(table: M) -> Bucket<K, V, M> {
+ Bucket {
+ raw: table.first_bucket_raw(),
+ idx: 0,
+ table: table
+ }
+ }
+
+ /// Reads a bucket at a given index, returning an enum indicating whether
+ /// it's initialized or not. You need to match on this enum to get
+ /// the appropriate types to call most of the other functions in
+ /// this module.
+ pub fn peek(self) -> BucketState<K, V, M> {
+ match unsafe { *self.raw.hash } {
+ EMPTY_BUCKET =>
+ Empty(EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }),
+ _ =>
+ Full(FullBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ })
+ }
+ }
+
+ /// Modifies the bucket pointer in place to make it point to the next slot.
+ pub fn next(&mut self) {
+ // Branchless bucket iteration step.
+ // As we reach the end of the table...
+ // We take the current idx: 0111111b
+ // Xor it by its increment: ^ 1000000b
+ // ------------
+ // 1111111b
+ // Then AND with the capacity: & 1000000b
+ // ------------
+ // to get the backwards offset: 1000000b
+ // ... and it's zero at all other times.
+ let maybe_wraparound_dist = (self.idx ^ (self.idx + 1)) & self.table.capacity();
+ // Finally, we obtain the offset 1 or the offset -cap + 1.
+ let dist = 1i - (maybe_wraparound_dist as int);
+
+ self.idx += 1;
+
+ unsafe {
+ self.raw = self.raw.offset(dist);
+ }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> EmptyBucket<K, V, M> {
+ #[inline]
+ pub fn next(self) -> Bucket<K, V, M> {
+ let mut bucket = self.into_bucket();
+ bucket.next();
+ bucket
+ }
+
+ #[inline]
+ pub fn into_bucket(self) -> Bucket<K, V, M> {
+ Bucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }
+ }
+
+ pub fn gap_peek(self) -> Option<GapThenFull<K, V, M>> {
+ let gap = EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: ()
+ };
+
+ match self.next().peek() {
+ Full(bucket) => {
+ Some(GapThenFull {
+ gap: gap,
+ full: bucket
+ })
+ }
+ Empty(..) => None
+ }
+ }
+}
+
+impl<K, V, M: DerefMut<RawTable<K, V>>> EmptyBucket<K, V, M> {
+ /// Puts given key and value pair, along with the key's hash,
+ /// into this bucket in the hashtable. Note how `self` is 'moved' into
+ /// this function, because this slot will no longer be empty when
+ /// we return! A `FullBucket` is returned for later use, pointing to
+ /// the newly-filled slot in the hashtable.
+ ///
+ /// Use `make_hash` to construct a `SafeHash` to pass to this function.
+ pub fn put(mut self, hash: SafeHash, key: K, value: V)
+ -> FullBucket<K, V, M> {
+ unsafe {
+ *self.raw.hash = hash.inspect();
+ ptr::write(self.raw.key, key);
+ ptr::write(self.raw.val, value);
+ }
+
+ self.table.size += 1;
+
+ FullBucket { raw: self.raw, idx: self.idx, table: self.table }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> FullBucket<K, V, M> {
+ #[inline]
+ pub fn next(self) -> Bucket<K, V, M> {
+ let mut bucket = self.into_bucket();
+ bucket.next();
+ bucket
+ }
+
+ #[inline]
+ pub fn into_bucket(self) -> Bucket<K, V, M> {
+ Bucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ }
+ }
+
+ /// Get the distance between this bucket and the 'ideal' location
+ /// as determined by the key's hash stored in it.
+ ///
+ /// In the cited blog posts above, this is called the "distance to
+ /// initial bucket", or DIB. Also known as "probe count".
+ pub fn distance(&self) -> uint {
+ // Calculates the distance one has to travel when going from
+ // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
+ // if the destination is not reached before the end of the table.
+ (self.idx - self.hash().inspect() as uint) & (self.table.capacity() - 1)
+ }
+
+ #[inline]
+ pub fn hash(&self) -> SafeHash {
+ unsafe {
+ SafeHash {
+ hash: *self.raw.hash
+ }
+ }
+ }
+
+ /// Gets references to the key and value at a given index.
+ pub fn read(&self) -> (&K, &V) {
+ unsafe {
+ (&*self.raw.key,
+ &*self.raw.val)
+ }
+ }
+}
+
+impl<K, V, M: DerefMut<RawTable<K, V>>> FullBucket<K, V, M> {
+ /// Removes this bucket's key and value from the hashtable.
+ ///
+ /// This works similarly to `put`, building an `EmptyBucket` out of the
+ /// taken bucket.
+ pub fn take(mut self) -> (EmptyBucket<K, V, M>, K, V) {
+ let key = self.raw.key as *const K;
+ let val = self.raw.val as *const V;
+
+ self.table.size -= 1;
+
+ unsafe {
+ *self.raw.hash = EMPTY_BUCKET;
+ (
+ EmptyBucket {
+ raw: self.raw,
+ idx: self.idx,
+ table: self.table
+ },
+ ptr::read(key),
+ ptr::read(val)
+ )
+ }
+ }
+
+ pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
+ unsafe {
+ let old_hash = ptr::replace(self.raw.hash as *mut SafeHash, h);
+ let old_key = ptr::replace(self.raw.key, k);
+ let old_val = ptr::replace(self.raw.val, v);
+
+ (old_hash, old_key, old_val)
+ }
+ }
+
+ /// Gets mutable references to the key and value at a given index.
+ pub fn read_mut(&mut self) -> (&mut K, &mut V) {
+ unsafe {
+ (&mut *self.raw.key,
+ &mut *self.raw.val)
+ }
+ }
+}
+
+impl<'t, K, V, M: Deref<RawTable<K, V>> + 't> FullBucket<K, V, M> {
+ /// Exchange a bucket state for immutable references into the table.
+ /// Because the underlying reference to the table is also consumed,
+ /// no further changes to the structure of the table are possible;
+ /// in exchange for this, the returned references have a longer lifetime
+ /// than the references returned by `read()`.
+ pub fn into_refs(self) -> (&'t K, &'t V) {
+ unsafe {
+ (&*self.raw.key,
+ &*self.raw.val)
+ }
+ }
+}
+
+impl<'t, K, V, M: DerefMut<RawTable<K, V>> + 't> FullBucket<K, V, M> {
+ /// This works similarly to `into_refs`, exchanging a bucket state
+ /// for mutable references into the table.
+ pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
+ unsafe {
+ (&mut *self.raw.key,
+ &mut *self.raw.val)
+ }
+ }
+}
+
+impl<K, V, M> BucketState<K, V, M> {
+ // For convenience.
+ pub fn expect_full(self) -> FullBucket<K, V, M> {
+ match self {
+ Full(full) => full,
+ Empty(..) => fail!("Expected full bucket")
+ }
+ }
+}
+
+impl<K, V, M: Deref<RawTable<K, V>>> GapThenFull<K, V, M> {
+ #[inline]
+ pub fn full(&self) -> &FullBucket<K, V, M> {
+ &self.full
+ }
+
+ pub fn shift(mut self) -> Option<GapThenFull<K, V, M>> {
+ unsafe {
+ *self.gap.raw.hash = mem::replace(&mut *self.full.raw.hash, EMPTY_BUCKET);
+ copy_nonoverlapping_memory(self.gap.raw.key, self.full.raw.key as *const K, 1);
+ copy_nonoverlapping_memory(self.gap.raw.val, self.full.raw.val as *const V, 1);
+ }
+
+ let FullBucket { raw: prev_raw, idx: prev_idx, .. } = self.full;
+
+ match self.full.next().peek() {
+ Full(bucket) => {
+ self.gap.raw = prev_raw;
+ self.gap.idx = prev_idx;
+
+ self.full = bucket;
+
+ Some(self)
+ }
+ Empty(..) => None
+ }
+ }
+}
+
+
+/// Rounds up to a multiple of a power of two. Returns the closest multiple
+/// of `target_alignment` that is higher or equal to `unrounded`.
+///
+/// # Failure
+///
+/// Fails if `target_alignment` is not a power of two.
+fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
+ assert!(is_power_of_two(target_alignment));
+ (unrounded + target_alignment - 1) & !(target_alignment - 1)
+}
+
+#[test]
+fn test_rounding() {
+ assert_eq!(round_up_to_next(0, 4), 0);
+ assert_eq!(round_up_to_next(1, 4), 4);
+ assert_eq!(round_up_to_next(2, 4), 4);
+ assert_eq!(round_up_to_next(3, 4), 4);
+ assert_eq!(round_up_to_next(4, 4), 4);
+ assert_eq!(round_up_to_next(5, 4), 8);
+}
+
+// Returns a tuple of (key_offset, val_offset),
+// from the start of a mallocated array.
+fn calculate_offsets(hashes_size: uint,
+ keys_size: uint, keys_align: uint,
+ vals_align: uint)
+ -> (uint, uint) {
+ let keys_offset = round_up_to_next(hashes_size, keys_align);
+ let end_of_keys = keys_offset + keys_size;
+
+ let vals_offset = round_up_to_next(end_of_keys, vals_align);
+
+ (keys_offset, vals_offset)
+}
+
+// Returns a tuple of (minimum required malloc alignment, hash_offset,
+// array_size), from the start of a mallocated array.
+fn calculate_allocation(hash_size: uint, hash_align: uint,
+ keys_size: uint, keys_align: uint,
+ vals_size: uint, vals_align: uint)
+ -> (uint, uint, uint) {
+ let hash_offset = 0;
+ let (_, vals_offset) = calculate_offsets(hash_size,
+ keys_size, keys_align,
+ vals_align);
+ let end_of_vals = vals_offset + vals_size;
+
+ let min_align = cmp::max(hash_align, cmp::max(keys_align, vals_align));
+
+ (min_align, hash_offset, end_of_vals)
+}
+
+#[test]
+fn test_offset_calculation() {
+ assert_eq!(calculate_allocation(128, 8, 15, 1, 4, 4), (8, 0, 148));
+ assert_eq!(calculate_allocation(3, 1, 2, 1, 1, 1), (1, 0, 6));
+ assert_eq!(calculate_allocation(6, 2, 12, 4, 24, 8), (8, 0, 48));
+ assert_eq!(calculate_offsets(128, 15, 1, 4), (128, 144));
+ assert_eq!(calculate_offsets(3, 2, 1, 1), (3, 5));
+ assert_eq!(calculate_offsets(6, 12, 4, 8), (8, 24));
+}
+
+impl<K, V> RawTable<K, V> {
+ /// Does not initialize the buckets. The caller should ensure they,
+ /// at the very least, set every hash to EMPTY_BUCKET.
+ unsafe fn new_uninitialized(capacity: uint) -> RawTable<K, V> {
+ if capacity == 0 {
+ return RawTable {
+ size: 0,
+ capacity: 0,
+ hashes: 0 as *mut u64,
+ marker: marker::CovariantType,
+ };
+ }
+ // No need for `checked_mul` before a more restrictive check performed
+ // later in this method.
+ let hashes_size = capacity * size_of::<u64>();
+ let keys_size = capacity * size_of::< K >();
+ let vals_size = capacity * size_of::< V >();
+
+ // Allocating hashmaps is a little tricky. We need to allocate three
+ // arrays, but since we know their sizes and alignments up front,
+ // we just allocate a single array, and then have the subarrays
+ // point into it.
+ //
+ // This is great in theory, but in practice getting the alignment
+ // right is a little subtle. Therefore, calculating offsets has been
+ // factored out into a different function.
+ let (malloc_alignment, hash_offset, size) =
+ calculate_allocation(
+ hashes_size, min_align_of::<u64>(),
+ keys_size, min_align_of::< K >(),
+ vals_size, min_align_of::< V >());
+
+ // One check for overflow that covers calculation and rounding of size.
+ let size_of_bucket = size_of::<u64>().checked_add(&size_of::<K>()).unwrap()
+ .checked_add(&size_of::<V>()).unwrap();
+ assert!(size >= capacity.checked_mul(&size_of_bucket)
+ .expect("capacity overflow"),
+ "capacity overflow");
+
+ let buffer = allocate(size, malloc_alignment);
+
+ let hashes = buffer.offset(hash_offset as int) as *mut u64;
+
+ RawTable {
+ capacity: capacity,
+ size: 0,
+ hashes: hashes,
+ marker: marker::CovariantType,
+ }
+ }
+
+ fn first_bucket_raw(&self) -> RawBucket<K, V> {
+ let hashes_size = self.capacity * size_of::<u64>();
+ let keys_size = self.capacity * size_of::<K>();
+
+ let buffer = self.hashes as *mut u8;
+ let (keys_offset, vals_offset) = calculate_offsets(hashes_size,
+ keys_size, min_align_of::<K>(),
+ min_align_of::<V>());
+
+ unsafe {
+ RawBucket {
+ hash: self.hashes,
+ key: buffer.offset(keys_offset as int) as *mut K,
+ val: buffer.offset(vals_offset as int) as *mut V
+ }
+ }
+ }
+
+ /// Creates a new raw table from a given capacity. All buckets are
+ /// initially empty.
+ #[allow(experimental)]
+ pub fn new(capacity: uint) -> RawTable<K, V> {
+ unsafe {
+ let ret = RawTable::new_uninitialized(capacity);
+ zero_memory(ret.hashes, capacity);
+ ret
+ }
+ }
+
+ /// The hashtable's capacity, similar to a vector's.
+ pub fn capacity(&self) -> uint {
+ self.capacity
+ }
+
+ /// The number of elements ever `put` in the hashtable, minus the number
+ /// of elements ever `take`n.
+ pub fn size(&self) -> uint {
+ self.size
+ }
+
+ fn raw_buckets(&self) -> RawBuckets<K, V> {
+ RawBuckets {
+ raw: self.first_bucket_raw(),
+ hashes_end: unsafe {
+ self.hashes.offset(self.capacity as int)
+ }
+ }
+ }
+
+ pub fn iter(&self) -> Entries<K, V> {
+ Entries {
+ iter: self.raw_buckets(),
+ elems_left: self.size(),
+ }
+ }
+
+ pub fn mut_iter(&mut self) -> MutEntries<K, V> {
+ MutEntries {
+ iter: self.raw_buckets(),
+ elems_left: self.size(),
+ }
+ }
+
+ pub fn move_iter(self) -> MoveEntries<K, V> {
+ MoveEntries {
+ iter: self.raw_buckets(),
+ table: self,
+ }
+ }
+
+ /// Returns an iterator that copies out each entry. Used while the table
+ /// is being dropped.
+ unsafe fn rev_move_buckets(&mut self) -> RevMoveBuckets<K, V> {
+ let raw_bucket = self.first_bucket_raw();
+ RevMoveBuckets {
+ raw: raw_bucket.offset(self.capacity as int),
+ hashes_end: raw_bucket.hash,
+ elems_left: self.size
+ }
+ }
+}
+
+/// A raw iterator. The basis for some other iterators in this module. Although
+/// this interface is safe, it's not used outside this module.
+struct RawBuckets<'a, K, V> {
+ raw: RawBucket<K, V>,
+ hashes_end: *mut u64
+}
+
+impl<'a, K, V> Iterator<RawBucket<K, V>> for RawBuckets<'a, K, V> {
+ fn next(&mut self) -> Option<RawBucket<K, V>> {
+ while self.raw.hash != self.hashes_end {
+ unsafe {
+ // We are swapping out the pointer to a bucket and replacing
+ // it with the pointer to the next one.
+ let prev = ptr::replace(&mut self.raw, self.raw.offset(1));
+ if *prev.hash != EMPTY_BUCKET {
+ return Some(prev);
+ }
+ }
+ }
+
+ None
+ }
+}
+
+/// An iterator that moves out buckets in reverse order. It leaves the table
+/// in an an inconsistent state and should only be used for dropping
+/// the table's remaining entries. It's used in the implementation of Drop.
+struct RevMoveBuckets<'a, K, V> {
+ raw: RawBucket<K, V>,
+ hashes_end: *mut u64,
+ elems_left: uint
+}
+
+impl<'a, K, V> Iterator<(K, V)> for RevMoveBuckets<'a, K, V> {
+ fn next(&mut self) -> Option<(K, V)> {
+ if self.elems_left == 0 {
+ return None;
+ }
+
+ loop {
+ debug_assert!(self.raw.hash != self.hashes_end);
+
+ unsafe {
+ self.raw = self.raw.offset(-1);
+
+ if *self.raw.hash != EMPTY_BUCKET {
+ self.elems_left -= 1;
+ return Some((
+ ptr::read(self.raw.key as *const K),
+ ptr::read(self.raw.val as *const V)
+ ));
+ }
+ }
+ }
+ }
+}
+
+/// Iterator over shared references to entries in a table.
+pub struct Entries<'a, K: 'a, V: 'a> {
+ iter: RawBuckets<'a, K, V>,
+ elems_left: uint,
+}
+
+/// Iterator over mutable references to entries in a table.
+pub struct MutEntries<'a, K: 'a, V: 'a> {
+ iter: RawBuckets<'a, K, V>,
+ elems_left: uint,
+}
+
+/// Iterator over the entries in a table, consuming the table.
+pub struct MoveEntries<K, V> {
+ table: RawTable<K, V>,
+ iter: RawBuckets<'static, K, V>
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.iter.next().map(|bucket| {
+ self.elems_left -= 1;
+ unsafe {
+ (&*bucket.key,
+ &*bucket.val)
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ (self.elems_left, Some(self.elems_left))
+ }
+}
+
+impl<'a, K, V> Iterator<(&'a K, &'a mut V)> for MutEntries<'a, K, V> {
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.iter.next().map(|bucket| {
+ self.elems_left -= 1;
+ unsafe {
+ (&*bucket.key,
+ &mut *bucket.val)
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ (self.elems_left, Some(self.elems_left))
+ }
+}
+
+impl<K, V> Iterator<(SafeHash, K, V)> for MoveEntries<K, V> {
+ fn next(&mut self) -> Option<(SafeHash, K, V)> {
+ self.iter.next().map(|bucket| {
+ self.table.size -= 1;
+ unsafe {
+ (
+ SafeHash {
+ hash: *bucket.hash,
+ },
+ ptr::read(bucket.key as *const K),
+ ptr::read(bucket.val as *const V)
+ )
+ }
+ })
+ }
+
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ let size = self.table.size();
+ (size, Some(size))
+ }
+}
+
+impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
+ fn clone(&self) -> RawTable<K, V> {
+ unsafe {
+ let mut new_ht = RawTable::new_uninitialized(self.capacity());
+
+ {
+ let cap = self.capacity();
+ let mut new_buckets = Bucket::first(&mut new_ht);
+ let mut buckets = Bucket::first(self);
+ while buckets.index() != cap {
+ match buckets.peek() {
+ Full(full) => {
+ let (h, k, v) = {
+ let (k, v) = full.read();
+ (full.hash(), k.clone(), v.clone())
+ };
+ *new_buckets.raw.hash = h.inspect();
+ mem::overwrite(new_buckets.raw.key, k);
+ mem::overwrite(new_buckets.raw.val, v);
+ }
+ Empty(..) => {
+ *new_buckets.raw.hash = EMPTY_BUCKET;
+ }
+ }
+ new_buckets.next();
+ buckets.next();
+ }
+ };
+
+ new_ht.size = self.size();
+
+ new_ht
+ }
+ }
+}
+
+#[unsafe_destructor]
+impl<K, V> Drop for RawTable<K, V> {
+ fn drop(&mut self) {
+ if self.hashes.is_null() {
+ return;
+ }
+ // This is done in reverse because we've likely partially taken
+ // some elements out with `.move_iter()` from the front.
+ // Check if the size is 0, so we don't do a useless scan when
+ // dropping empty tables such as on resize.
+ // Also avoid double drop of elements that have been already moved out.
+ unsafe {
+ for _ in self.rev_move_buckets() {}
+ }
+
+ let hashes_size = self.capacity * size_of::<u64>();
+ let keys_size = self.capacity * size_of::<K>();
+ let vals_size = self.capacity * size_of::<V>();
+ let (align, _, size) = calculate_allocation(hashes_size, min_align_of::<u64>(),
+ keys_size, min_align_of::<K>(),
+ vals_size, min_align_of::<V>());
+
+ unsafe {
+ deallocate(self.hashes as *mut u8, size, align);
+ // Remember how everything was allocated out of one buffer
+ // during initialization? We only need one call to free here.
+ }
+ }
+}
FreeLibrary(handle as *mut libc::c_void); ()
}
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
extern "system" {
fn SetLastError(error: libc::size_t);
fn LoadLibraryW(name: *const libc::c_void) -> *mut libc::c_void;
"task '{}' failed at '{}', {}:{}\n",
n, msg, file, line);
if backtrace::log_enabled() {
- let _ = backtrace::write(stderr);
+ let _ = backtrace::write(&mut *stderr);
}
local_stderr.replace(Some(stderr));
}
format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type]
fill := character
-align := '<' | '>'
+align := '<' | '^' | '>'
sign := '+' | '-'
width := count
precision := count | '*'
are specified by `fill`, and the alignment can be one of two options:
* `<` - the argument is left-aligned in `width` columns
+* `^` - the argument is center-aligned in `width` columns
* `>` - the argument is right-aligned in `width` columns
### Sign/#/0
pub fn unwrap(mut self) -> W {
// FIXME(#12628): is failing the right thing to do if flushing fails?
self.flush_buf().unwrap();
- self.inner.take_unwrap()
+ self.inner.take().unwrap()
}
}
///
/// Any error other than `EndOfFile` that is produced by the underlying Reader
/// is returned by the iterator and should be handled by the caller.
-#[cfg(stage0)]
-pub struct Bytes<'r, T> {
- reader: &'r mut T,
-}
-
-/// An iterator that reads a single byte on each iteration,
-/// until `.read_byte()` returns `EndOfFile`.
-///
-/// # Notes about the Iteration Protocol
-///
-/// The `Bytes` may yield `None` and thus terminate
-/// an iteration, but continue to yield elements if iteration
-/// is attempted again.
-///
-/// # Error
-///
-/// Any error other than `EndOfFile` that is produced by the underlying Reader
-/// is returned by the iterator and should be handled by the caller.
-#[cfg(not(stage0))]
pub struct Bytes<'r, T:'r> {
reader: &'r mut T,
}
error!(result, "couldn't recursively mkdir");
error!(result, "couldn't create directory");
- error!(result, "mode=FilePermission { bits: 448 }");
+ error!(result, "mode=0700");
error!(result, format!("path={}", file.display()));
})
}
}
-impl Reader for Box<Reader+'static> {
+impl<'a> Reader for Box<Reader+'a> {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> { self.read(buf) }
}
})
}
-/// Note: stage0-specific version that lacks bound.
-#[cfg(stage0)]
-pub struct RefReader<'a, R> {
- /// The underlying reader which this is referencing
- inner: &'a mut R
-}
-
/// A `RefReader` is a struct implementing `Reader` which contains a reference
/// to another reader. This is often useful when composing streams.
///
///
/// # }
/// ```
-#[cfg(not(stage0))]
pub struct RefReader<'a, R:'a> {
/// The underlying reader which this is referencing
inner: &'a mut R
///
/// This function will return any I/O error reported while formatting.
fn write_fmt(&mut self, fmt: &fmt::Arguments) -> IoResult<()> {
- // Note: stage0-specific version that lacks bound.
- #[cfg(stage0)]
- struct Adaptor<'a, T> {
- inner: &'a mut T,
- error: IoResult<()>,
- }
-
// Create a shim which translates a Writer to a FormatWriter and saves
// off I/O errors. instead of discarding them
- #[cfg(not(stage0))]
struct Adaptor<'a, T:'a> {
inner: &'a mut T,
error: IoResult<()>,
}
}
-impl Writer for Box<Writer+'static> {
+impl<'a> Writer for Box<Writer+'a> {
#[inline]
fn write(&mut self, buf: &[u8]) -> IoResult<()> { self.write(buf) }
/// println!("input processed: {}", output.unwrap());
/// # }
/// ```
-#[cfg(stage0)]
-pub struct RefWriter<'a, W> {
- /// The underlying writer which this is referencing
- inner: &'a mut W
-}
-
-/// A `RefWriter` is a struct implementing `Writer` which contains a reference
-/// to another writer. This is often useful when composing streams.
-///
-/// # Example
-///
-/// ```
-/// # fn main() {}
-/// # fn process_input<R: Reader>(r: R) {}
-/// # fn foo () {
-/// use std::io::util::TeeReader;
-/// use std::io::{stdin, MemWriter};
-///
-/// let mut output = MemWriter::new();
-///
-/// {
-/// // Don't give ownership of 'output' to the 'tee'. Instead we keep a
-/// // handle to it in the outer scope
-/// let mut tee = TeeReader::new(stdin(), output.by_ref());
-/// process_input(tee);
-/// }
-///
-/// println!("input processed: {}", output.unwrap());
-/// # }
-/// ```
-#[cfg(not(stage0))]
pub struct RefWriter<'a, W:'a> {
/// The underlying writer which this is referencing
inner: &'a mut W
///
/// Any error other than `EndOfFile` that is produced by the underlying Reader
/// is returned by the iterator and should be handled by the caller.
-#[cfg(stage0)]
-pub struct Lines<'r, T> {
- buffer: &'r mut T,
-}
-
-/// An iterator that reads a line on each iteration,
-/// until `.read_line()` encounters `EndOfFile`.
-///
-/// # Notes about the Iteration Protocol
-///
-/// The `Lines` may yield `None` and thus terminate
-/// an iteration, but continue to yield elements if iteration
-/// is attempted again.
-///
-/// # Error
-///
-/// Any error other than `EndOfFile` that is produced by the underlying Reader
-/// is returned by the iterator and should be handled by the caller.
-#[cfg(not(stage0))]
pub struct Lines<'r, T:'r> {
buffer: &'r mut T,
}
///
/// Any error other than `EndOfFile` that is produced by the underlying Reader
/// is returned by the iterator and should be handled by the caller.
-#[cfg(stage0)]
-pub struct Chars<'r, T> {
- buffer: &'r mut T
-}
-
-/// An iterator that reads a utf8-encoded character on each iteration,
-/// until `.read_char()` encounters `EndOfFile`.
-///
-/// # Notes about the Iteration Protocol
-///
-/// The `Chars` may yield `None` and thus terminate
-/// an iteration, but continue to yield elements if iteration
-/// is attempted again.
-///
-/// # Error
-///
-/// Any error other than `EndOfFile` that is produced by the underlying Reader
-/// is returned by the iterator and should be handled by the caller.
-#[cfg(not(stage0))]
pub struct Chars<'r, T:'r> {
buffer: &'r mut T
}
fn consume(&mut self, amt: uint);
/// Reads the next line of input, interpreted as a sequence of UTF-8
- /// encoded unicode codepoints. If a newline is encountered, then the
+ /// encoded Unicode codepoints. If a newline is encountered, then the
/// newline is contained in the returned string.
///
/// # Example
}
}
-/// Note: stage0-specific version that lacks bound on A.
-#[cfg(stage0)]
-pub struct IncomingConnections<'a, A> {
- inc: &'a mut A,
-}
-
/// An infinite iterator over incoming connection attempts.
/// Calling `next` will block the task until a connection is attempted.
///
/// `Some`. The `Some` contains the `IoResult` representing whether the
/// connection attempt was successful. A successful connection will be wrapped
/// in `Ok`. A failed connection is represented as an `Err`.
-#[cfg(not(stage0))]
pub struct IncomingConnections<'a, A:'a> {
inc: &'a mut A,
}
pub gen: u64,
}
-bitflags!(
- #[doc="A set of permissions for a file or directory is represented
-by a set of flags which are or'd together."]
- #[deriving(Show)]
+bitflags! {
+ #[doc = "A set of permissions for a file or directory is represented"]
+ #[doc = "by a set of flags which are or'd together."]
flags FilePermission: u32 {
static UserRead = 0o400,
static UserWrite = 0o200,
static GroupRWX = GroupRead.bits | GroupWrite.bits | GroupExecute.bits,
static OtherRWX = OtherRead.bits | OtherWrite.bits | OtherExecute.bits,
- #[doc="Permissions for user owned files, equivalent to 0644 on
-unix-like systems."]
+ #[doc = "Permissions for user owned files, equivalent to 0644 on"]
+ #[doc = "unix-like systems."]
static UserFile = UserRead.bits | UserWrite.bits | GroupRead.bits | OtherRead.bits,
- #[doc="Permissions for user owned directories, equivalent to 0755 on
-unix-like systems."]
+ #[doc = "Permissions for user owned directories, equivalent to 0755 on"]
+ #[doc = "unix-like systems."]
static UserDir = UserRWX.bits | GroupRead.bits | GroupExecute.bits |
OtherRead.bits | OtherExecute.bits,
- #[doc="Permissions for user owned executables, equivalent to 0755
-on unix-like systems."]
+ #[doc = "Permissions for user owned executables, equivalent to 0755"]
+ #[doc = "on unix-like systems."]
static UserExec = UserDir.bits,
- #[doc="All possible permissions enabled."]
- static AllPermissions = UserRWX.bits | GroupRWX.bits | OtherRWX.bits
+ #[doc = "All possible permissions enabled."]
+ static AllPermissions = UserRWX.bits | GroupRWX.bits | OtherRWX.bits,
}
-)
+}
impl Default for FilePermission {
#[inline]
fn default() -> FilePermission { FilePermission::empty() }
}
+impl fmt::Show for FilePermission {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ formatter.fill = '0';
+ formatter.width = Some(4);
+ (&self.bits as &fmt::Octal).fmt(formatter)
+ }
+}
+
#[cfg(test)]
mod tests {
use super::{IoResult, Reader, MemReader, NoProgress, InvalidInput};
let mut r = MemReader::new(Vec::from_slice(b"hello, world!"));
assert_eq!(r.push_at_least(5, 1, &mut buf).unwrap_err().kind, InvalidInput);
}
+
+ #[test]
+ fn test_show() {
+ use super::*;
+
+ assert_eq!(format!("{}", UserRead), "0400".to_string());
+ assert_eq!(format!("{}", UserFile), "0644".to_string());
+ assert_eq!(format!("{}", UserExec), "0755".to_string());
+ assert_eq!(format!("{}", UserRWX), "0700".to_string());
+ assert_eq!(format!("{}", GroupRWX), "0070".to_string());
+ assert_eq!(format!("{}", OtherRWX), "0007".to_string());
+ assert_eq!(format!("{}", AllPermissions), "0777".to_string());
+ assert_eq!(format!("{}", UserRead | UserWrite | OtherWrite), "0602".to_string());
+ }
}
/// the specified duration.
///
/// This is the same as the `connect` method, except that if the timeout
- /// specified (in milliseconds) elapses before a connection is made an error
- /// will be returned. The error's kind will be `TimedOut`.
+ /// specified elapses before a connection is made an error will be
+ /// returned. The error's kind will be `TimedOut`.
///
/// Note that the `addr` argument may one day be split into a separate host
/// and port, similar to the API seen in `connect`.
/// Connect to a pipe named by `path`, timing out if the specified number of
/// milliseconds.
///
- /// This function is similar to `connect`, except that if `timeout_ms`
+ /// This function is similar to `connect`, except that if `timeout`
/// elapses the function will return an error of kind `TimedOut`.
///
/// If a `timeout` with zero or negative duration is specified then
/// Err(e) => fail!("failed to execute child: {}", e),
/// };
///
-/// let contents = child.stdout.get_mut_ref().read_to_end();
+/// let contents = child.stdout.as_mut().unwrap().read_to_end();
/// assert!(child.wait().unwrap().success());
/// ```
pub struct Process {
/// Err(e) => fail!("failed to execute process: {}", e),
/// };
///
-/// let output = process.stdout.get_mut_ref().read_to_end();
+/// let output = process.stdout.as_mut().unwrap().read_to_end();
/// ```
#[deriving(Clone)]
pub struct Command {
let mut my_stdout = local_stdout.replace(None).unwrap_or_else(|| {
box stdout() as Box<Writer + Send>
});
- let result = f(my_stdout);
+ let result = f(&mut *my_stdout);
local_stdout.replace(Some(my_stdout));
result
} else {
// sizes. For an example, see #14940. For this reason, chunk the output
// buffer on windows, but on unix we can just write the whole buffer all
// at once.
- let max_size = if cfg!(windows) {64 * 1024} else {uint::MAX};
+ //
+ // For some other references, it appears that this problem has been
+ // encountered by others [1] [2]. We choose the number 8KB just because
+ // libuv does the same.
+ //
+ // [1]: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232
+ // [2]: http://www.mail-archive.com/log4net-dev@logging.apache.org/msg00661.html
+ let max_size = if cfg!(windows) {8192} else {uint::MAX};
for chunk in buf.chunks(max_size) {
try!(match self.inner {
TTY(ref mut tty) => tty.write(chunk),
use io::{fs, IoResult};
use io;
-use iter::range;
use libc;
use ops::Drop;
use option::{Option, None, Some};
/// will have the suffix `suffix`. The directory will be automatically
/// deleted once the returned wrapper is destroyed.
///
- /// If no directory can be created, None is returned.
- pub fn new_in(tmpdir: &Path, suffix: &str) -> Option<TempDir> {
+ /// If no directory can be created, `Err` is returned.
+ pub fn new_in(tmpdir: &Path, suffix: &str) -> IoResult<TempDir> {
if !tmpdir.is_absolute() {
return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
}
static mut CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
- for _ in range(0u, 1000) {
+ let mut attempts = 0u;
+ loop {
let filename =
format!("rs-{}-{}-{}",
unsafe { libc::getpid() },
suffix);
let p = tmpdir.join(filename);
match fs::mkdir(&p, io::UserRWX) {
- Err(..) => {}
- Ok(()) => return Some(TempDir { path: Some(p), disarmed: false })
+ Err(error) => {
+ if attempts >= 1000 {
+ return Err(error)
+ }
+ attempts += 1;
+ }
+ Ok(()) => return Ok(TempDir { path: Some(p), disarmed: false })
}
}
- None
}
/// Attempts to make a temporary directory inside of `os::tmpdir()` whose
/// name will have the suffix `suffix`. The directory will be automatically
/// deleted once the returned wrapper is destroyed.
///
- /// If no directory can be created, None is returned.
- pub fn new(suffix: &str) -> Option<TempDir> {
+ /// If no directory can be created, `Err` is returned.
+ pub fn new(suffix: &str) -> IoResult<TempDir> {
TempDir::new_in(&os::tmpdir(), suffix)
}
/// temporary directory is prevented.
pub fn unwrap(self) -> Path {
let mut tmpdir = self;
- tmpdir.path.take_unwrap()
+ tmpdir.path.take().unwrap()
}
/// Access the wrapped `std::path::Path` to the temporary directory.
#![feature(macro_rules, globs, managed_boxes, linkage)]
#![feature(default_type_params, phase, lang_items, unsafe_destructor)]
#![feature(import_shadowing)]
-#![feature(issue_5723_bootstrap)]
// Don't link to std. We are std.
#![no_std]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
#![allow(deprecated)]
#![deny(missing_doc)]
extern crate alloc;
extern crate unicode;
extern crate core;
-extern crate core_collections = "collections";
-extern crate core_rand = "rand";
-extern crate core_sync = "sync";
+extern crate "collections" as core_collections;
+extern crate "rand" as core_rand;
+extern crate "sync" as core_sync;
extern crate libc;
extern crate rustrt;
// Make std testable by not duplicating lang items. See #2912
-#[cfg(test)] extern crate realstd = "std";
+#[cfg(test)] extern crate "std" as realstd;
#[cfg(test)] pub use realstd::kinds;
#[cfg(test)] pub use realstd::ops;
#[cfg(test)] pub use realstd::cmp;
pub use fmt; // used for any formatting strings
pub use io; // used for println!()
pub use local_data; // used for local_data_key!()
- pub use option; // used for bitflags!()
+ pub use option; // used for bitflags!{}
pub use rt; // used for fail!()
pub use vec; // used for vec![]
#![experimental]
#![allow(missing_doc)]
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
use clone::Clone;
use collections::{Collection, MutableSeq};
unsafe fn push_unchecked<T: BytesContainer>(&mut self, path: T);
}
-/// Note: stage0-specific version that lacks bound.
-#[cfg(stage0)]
-pub struct Display<'a, P> {
- path: &'a P,
- filename: bool
-}
-
/// Helper struct for printing paths with format!()
-#[cfg(not(stage0))]
pub struct Display<'a, P:'a> {
path: &'a P,
filename: bool
/// Returns the path as a possibly-owned string.
///
/// If the path is not UTF-8, invalid sequences will be replaced with the
- /// unicode replacement char. This involves allocation.
+ /// Unicode replacement char. This involves allocation.
#[inline]
pub fn as_maybe_owned(&self) -> MaybeOwned<'a> {
String::from_utf8_lossy(if self.filename {
static CRYPT_VERIFYCONTEXT: DWORD = 0xF0000000;
static NTE_BAD_SIGNATURE: DWORD = 0x80090006;
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
extern "system" {
fn CryptAcquireContextA(phProv: *mut HCRYPTPROV,
pszContainer: LPCSTR,
"$RP$" => ")",
"$C$" => ",",
- // in theory we can demangle any unicode code point, but
+ // in theory we can demangle any Unicode code point, but
// for simplicity we just catch the common ones.
"$x20" => " ",
"$x27" => "'",
//
// An additionally oddity in this function is that we initialize the
// filename via self_exe_name() to pass to libbacktrace. It turns out
- // that on linux libbacktrace seamlessly gets the filename of the
+ // that on Linux libbacktrace seamlessly gets the filename of the
// current executable, but this fails on freebsd. by always providing
// it, we make sure that libbacktrace never has a reason to not look up
// the symbols. The libbacktrace API also states that the filename must
/// iOS doesn't use all of them it but adding more
/// platform-specific configs pollutes the code too much
#[allow(non_camel_case_types)]
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
#[allow(dead_code)]
mod uw {
use libc;
(val & !1) as libc::uintptr_t
}
- // This function also doesn't exist on android or arm/linux, so make it
+ // This function also doesn't exist on Android or ARM/Linux, so make it
// a no-op
#[cfg(target_os = "android")]
#[cfg(target_os = "linux", target_arch = "arm")]
/// copy of that function in my mingw install (maybe it was broken?). Instead,
/// this takes the route of using StackWalk64 in order to walk the stack.
#[cfg(windows)]
-#[allow(dead_code, uppercase_variables)]
+#[allow(dead_code, non_snake_case)]
mod imp {
use c_str::CString;
use core_collections::Collection;
use str::StrSlice;
use dynamic_lib::DynamicLibrary;
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
extern "system" {
fn GetCurrentProcess() -> libc::HANDLE;
fn GetCurrentThread() -> libc::HANDLE;
static IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
static IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
- #[cfg(stage0)]
- #[packed]
- struct SYMBOL_INFO {
- SizeOfStruct: libc::c_ulong,
- TypeIndex: libc::c_ulong,
- Reserved: [u64, ..2],
- Index: libc::c_ulong,
- Size: libc::c_ulong,
- ModBase: u64,
- Flags: libc::c_ulong,
- Value: u64,
- Address: u64,
- Register: libc::c_ulong,
- Scope: libc::c_ulong,
- Tag: libc::c_ulong,
- NameLen: libc::c_ulong,
- MaxNameLen: libc::c_ulong,
- // note that windows has this as 1, but it basically just means that
- // the name is inline at the end of the struct. For us, we just bump
- // the struct size up to MAX_SYM_NAME.
- Name: [libc::c_char, ..MAX_SYM_NAME],
- }
-
- #[cfg(not(stage0))]
#[repr(C, packed)]
struct SYMBOL_INFO {
SizeOfStruct: libc::c_ulong,
// LLVM implements the `frem` instruction as a call to `fmod`, which lives in
// libm. Hence, we must explicitly link to it.
//
-// On linux librt and libdl are indirect dependencies via rustrt,
+// On Linux, librt and libdl are indirect dependencies via rustrt,
// and binutils 2.22+ won't add them automatically
#[cfg(target_os = "linux")]
#[link(name = "dl")]
#![experimental]
-use {fmt, i32};
+use {fmt, i64};
use ops::{Add, Sub, Mul, Div, Neg};
use option::{Option, Some, None};
use num;
use num::{CheckedAdd, CheckedMul};
use result::{Result, Ok, Err};
-
-/// `Duration`'s `days` component should have no more than this value.
-static MIN_DAYS: i32 = i32::MIN;
-/// `Duration`'s `days` component should have no less than this value.
-static MAX_DAYS: i32 = i32::MAX;
-
+/// The number of nanoseconds in a microsecond.
+static NANOS_PER_MICRO: i32 = 1000;
+/// The number of nanoseconds in a millisecond.
+static NANOS_PER_MILLI: i32 = 1000_000;
/// The number of nanoseconds in seconds.
static NANOS_PER_SEC: i32 = 1_000_000_000;
+/// The number of microseconds per second.
+static MICROS_PER_SEC: i64 = 1000_000;
+/// The number of milliseconds per second.
+static MILLIS_PER_SEC: i64 = 1000;
+/// The number of seconds in a minute.
+static SECS_PER_MINUTE: i64 = 60;
+/// The number of seconds in an hour.
+static SECS_PER_HOUR: i64 = 3600;
/// The number of (non-leap) seconds in days.
-static SECS_PER_DAY: i32 = 86400;
+static SECS_PER_DAY: i64 = 86400;
+/// The number of (non-leap) seconds in a week.
+static SECS_PER_WEEK: i64 = 604800;
macro_rules! try_opt(
($e:expr) => (match $e { Some(v) => v, None => return None })
)
-// FIXME #16466: This could be represented as (i64 seconds, u32 nanos)
/// ISO 8601 time duration with nanosecond precision.
/// This also allows for the negative duration; see individual methods for details.
#[deriving(Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Duration {
- days: i32,
- secs: u32, // Always < SECS_PER_DAY
- nanos: u32, // Always < NANOS_PR_SECOND
+ secs: i64,
+ nanos: i32, // Always 0 <= nanos < NANOS_PER_SEC
}
-/// The minimum possible `Duration`.
-pub static MIN: Duration = Duration { days: MIN_DAYS, secs: 0, nanos: 0 };
-/// The maximum possible `Duration`.
-pub static MAX: Duration = Duration { days: MAX_DAYS, secs: SECS_PER_DAY as u32 - 1,
- nanos: NANOS_PER_SEC as u32 - 1 };
+/// The minimum possible `Duration`: `i64::MIN` milliseconds.
+pub static MIN: Duration = Duration {
+ secs: i64::MIN / MILLIS_PER_SEC - 1,
+ nanos: NANOS_PER_SEC + (i64::MIN % MILLIS_PER_SEC) as i32 * NANOS_PER_MILLI
+};
+
+/// The maximum possible `Duration`: `i64::MAX` milliseconds.
+pub static MAX: Duration = Duration {
+ secs: i64::MAX / MILLIS_PER_SEC,
+ nanos: (i64::MAX % MILLIS_PER_SEC) as i32 * NANOS_PER_MILLI
+};
impl Duration {
/// Makes a new `Duration` with given number of weeks.
- /// Equivalent to `Duration::new(weeks * 7, 0, 0)` with overflow checks.
- ///
+ /// Equivalent to `Duration::seconds(weeks * 7 * 24 * 60 * 60), with overflow checks.
/// Fails when the duration is out of bounds.
#[inline]
- pub fn weeks(weeks: i32) -> Duration {
- let days = weeks.checked_mul(&7).expect("Duration::weeks out of bounds");
- Duration::days(days)
+ pub fn weeks(weeks: i64) -> Duration {
+ let secs = weeks.checked_mul(&SECS_PER_WEEK).expect("Duration::weeks out of bounds");
+ Duration::seconds(secs)
}
/// Makes a new `Duration` with given number of days.
- /// Equivalent to `Duration::new(days, 0, 0)`.
+ /// Equivalent to `Duration::seconds(days * 24 * 60 * 60)` with overflow checks.
+ /// Fails when the duration is out of bounds.
#[inline]
- pub fn days(days: i32) -> Duration {
- Duration { days: days, secs: 0, nanos: 0 }
+ pub fn days(days: i64) -> Duration {
+ let secs = days.checked_mul(&SECS_PER_DAY).expect("Duration::days out of bounds");
+ Duration::seconds(secs)
}
/// Makes a new `Duration` with given number of hours.
- /// Equivalent to `Duration::new(0, hours * 3600, 0)` with overflow checks.
+ /// Equivalent to `Duration::seconds(hours * 60 * 60)` with overflow checks.
+ /// Fails when the duration is out of bounds.
#[inline]
- pub fn hours(hours: i32) -> Duration {
- let (days, hours) = div_mod_floor(hours, (SECS_PER_DAY / 3600));
- let secs = hours * 3600;
- Duration { secs: secs as u32, ..Duration::days(days) }
+ pub fn hours(hours: i64) -> Duration {
+ let secs = hours.checked_mul(&SECS_PER_HOUR).expect("Duration::hours ouf of bounds");
+ Duration::seconds(secs)
}
/// Makes a new `Duration` with given number of minutes.
- /// Equivalent to `Duration::new(0, mins * 60, 0)` with overflow checks.
+ /// Equivalent to `Duration::seconds(minutes * 60)` with overflow checks.
+ /// Fails when the duration is out of bounds.
#[inline]
- pub fn minutes(mins: i32) -> Duration {
- let (days, mins) = div_mod_floor(mins, (SECS_PER_DAY / 60));
- let secs = mins * 60;
- Duration { secs: secs as u32, ..Duration::days(days) }
+ pub fn minutes(minutes: i64) -> Duration {
+ let secs = minutes.checked_mul(&SECS_PER_MINUTE).expect("Duration::minutes out of bounds");
+ Duration::seconds(secs)
}
/// Makes a new `Duration` with given number of seconds.
- /// Equivalent to `Duration::new(0, secs, 0)`.
+ /// Fails when the duration is more than `i64::MAX` milliseconds
+ /// or less than `i64::MIN` milliseconds.
#[inline]
- pub fn seconds(secs: i32) -> Duration {
- let (days, secs) = div_mod_floor(secs, SECS_PER_DAY);
- Duration { secs: secs as u32, ..Duration::days(days) }
+ pub fn seconds(seconds: i64) -> Duration {
+ let d = Duration { secs: seconds, nanos: 0 };
+ if d < MIN || d > MAX {
+ fail!("Duration::seconds out of bounds");
+ }
+ d
}
/// Makes a new `Duration` with given number of milliseconds.
- /// Equivalent to `Duration::new(0, 0, millis * 1_000_000)` with overflow checks.
#[inline]
- pub fn milliseconds(millis: i32) -> Duration {
- let (secs, millis) = div_mod_floor(millis, (NANOS_PER_SEC / 1_000_000));
- let nanos = millis * 1_000_000;
- Duration { nanos: nanos as u32, ..Duration::seconds(secs) }
+ pub fn milliseconds(milliseconds: i64) -> Duration {
+ let (secs, millis) = div_mod_floor_64(milliseconds, MILLIS_PER_SEC);
+ let nanos = millis as i32 * NANOS_PER_MILLI;
+ Duration { secs: secs, nanos: nanos }
}
/// Makes a new `Duration` with given number of microseconds.
- /// Equivalent to `Duration::new(0, 0, micros * 1_000)` with overflow checks.
#[inline]
- pub fn microseconds(micros: i32) -> Duration {
- let (secs, micros) = div_mod_floor(micros, (NANOS_PER_SEC / 1_000));
- let nanos = micros * 1_000;
- Duration { nanos: nanos as u32, ..Duration::seconds(secs) }
+ pub fn microseconds(microseconds: i64) -> Duration {
+ let (secs, micros) = div_mod_floor_64(microseconds, MICROS_PER_SEC);
+ let nanos = micros as i32 * NANOS_PER_MICRO;
+ Duration { secs: secs, nanos: nanos }
}
/// Makes a new `Duration` with given number of nanoseconds.
- /// Equivalent to `Duration::new(0, 0, nanos)`.
- #[inline]
- pub fn nanoseconds(nanos: i32) -> Duration {
- let (secs, nanos) = div_mod_floor(nanos, NANOS_PER_SEC);
- Duration { nanos: nanos as u32, ..Duration::seconds(secs) }
- }
-
- /// Returns a tuple of the number of days, (non-leap) seconds and
- /// nanoseconds in the duration. Note that the number of seconds
- /// and nanoseconds are always positive, so that for example
- /// `-Duration::seconds(3)` has -1 days and 86,397 seconds.
#[inline]
- fn to_tuple_64(&self) -> (i64, u32, u32) {
- (self.days as i64, self.secs, self.nanos)
- }
-
- /// Negates the duration and returns a tuple like `to_tuple`.
- /// This does not overflow and thus is internally used for several methods.
- fn to_negated_tuple_64(&self) -> (i64, u32, u32) {
- let mut days = -(self.days as i64);
- let mut secs = -(self.secs as i32);
- let mut nanos = -(self.nanos as i32);
- if nanos < 0 {
- nanos += NANOS_PER_SEC;
- secs -= 1;
- }
- if secs < 0 {
- secs += SECS_PER_DAY;
- days -= 1;
- }
- (days, secs as u32, nanos as u32)
+ pub fn nanoseconds(nanos: i64) -> Duration {
+ let (secs, nanos) = div_mod_floor_64(nanos, NANOS_PER_SEC as i64);
+ Duration { secs: secs, nanos: nanos as i32 }
}
/// Returns the total number of whole weeks in the duration.
#[inline]
- pub fn num_weeks(&self) -> i32 {
+ pub fn num_weeks(&self) -> i64 {
self.num_days() / 7
}
/// Returns the total number of whole days in the duration.
- pub fn num_days(&self) -> i32 {
- if self.days < 0 {
- let negated = -*self;
- -negated.days
- } else {
- self.days
- }
+ pub fn num_days(&self) -> i64 {
+ self.num_seconds() / SECS_PER_DAY
}
/// Returns the total number of whole hours in the duration.
#[inline]
pub fn num_hours(&self) -> i64 {
- self.num_seconds() / 3600
+ self.num_seconds() / SECS_PER_HOUR
}
/// Returns the total number of whole minutes in the duration.
#[inline]
pub fn num_minutes(&self) -> i64 {
- self.num_seconds() / 60
+ self.num_seconds() / SECS_PER_MINUTE
}
/// Returns the total number of whole seconds in the duration.
pub fn num_seconds(&self) -> i64 {
- // cannot overflow, 2^32 * 86400 < 2^64
- fn secs((days, secs, _): (i64, u32, u32)) -> i64 {
- days as i64 * SECS_PER_DAY as i64 + secs as i64
+ // If secs is negative, nanos should be subtracted from the duration.
+ if self.secs < 0 && self.nanos > 0 {
+ self.secs + 1
+ } else {
+ self.secs
}
- if self.days < 0 {-secs(self.to_negated_tuple_64())} else {secs(self.to_tuple_64())}
}
- /// Returns the total number of whole milliseconds in the duration.
- pub fn num_milliseconds(&self) -> i64 {
- // cannot overflow, 2^32 * 86400 * 1000 < 2^64
- fn millis((days, secs, nanos): (i64, u32, u32)) -> i64 {
- static MILLIS_PER_SEC: i64 = 1_000;
- static NANOS_PER_MILLI: i64 = 1_000_000;
- (days as i64 * MILLIS_PER_SEC * SECS_PER_DAY as i64 +
- secs as i64 * MILLIS_PER_SEC +
- nanos as i64 / NANOS_PER_MILLI)
+ /// Returns the number of nanoseconds such that
+ /// `nanos_mod_sec() + num_seconds() * NANOS_PER_SEC` is the total number of
+ /// nanoseconds in the duration.
+ fn nanos_mod_sec(&self) -> i32 {
+ if self.secs < 0 && self.nanos > 0 {
+ self.nanos - NANOS_PER_SEC
+ } else {
+ self.nanos
}
- if self.days < 0 {-millis(self.to_negated_tuple_64())} else {millis(self.to_tuple_64())}
+ }
+
+ /// Returns the total number of whole milliseconds in the duration,
+ pub fn num_milliseconds(&self) -> i64 {
+ // A proper Duration will not overflow, because MIN and MAX are defined
+ // such that the range is exactly i64 milliseconds.
+ let secs_part = self.num_seconds() * MILLIS_PER_SEC;
+ let nanos_part = self.nanos_mod_sec() / NANOS_PER_MILLI;
+ secs_part + nanos_part as i64
}
/// Returns the total number of whole microseconds in the duration,
- /// or `None` on the overflow (exceeding 2^63 microseconds in either directions).
+ /// or `None` on overflow (exceeding 2^63 microseconds in either direction).
pub fn num_microseconds(&self) -> Option<i64> {
- fn micros((days, secs, nanos): (i64, u32, u32)) -> Option<i64> {
- static MICROS_PER_SEC: i64 = 1_000_000;
- static MICROS_PER_DAY: i64 = MICROS_PER_SEC * SECS_PER_DAY as i64;
- static NANOS_PER_MICRO: i64 = 1_000;
- let nmicros = try_opt!((days as i64).checked_mul(&MICROS_PER_DAY));
- let nmicros = try_opt!(nmicros.checked_add(&(secs as i64 * MICROS_PER_SEC)));
- let nmicros = try_opt!(nmicros.checked_add(&(nanos as i64 / NANOS_PER_MICRO as i64)));
- Some(nmicros)
- }
- if self.days < 0 {
- // the final negation won't overflow since we start with positive numbers.
- micros(self.to_negated_tuple_64()).map(|micros| -micros)
- } else {
- micros(self.to_tuple_64())
- }
+ let secs_part = try_opt!(self.num_seconds().checked_mul(&MICROS_PER_SEC));
+ let nanos_part = self.nanos_mod_sec() / NANOS_PER_MICRO;
+ secs_part.checked_add(&(nanos_part as i64))
}
/// Returns the total number of whole nanoseconds in the duration,
- /// or `None` on the overflow (exceeding 2^63 nanoseconds in either directions).
+ /// or `None` on overflow (exceeding 2^63 nanoseconds in either direction).
pub fn num_nanoseconds(&self) -> Option<i64> {
- fn nanos((days, secs, nanos): (i64, u32, u32)) -> Option<i64> {
- static NANOS_PER_DAY: i64 = NANOS_PER_SEC as i64 * SECS_PER_DAY as i64;
- let nnanos = try_opt!((days as i64).checked_mul(&NANOS_PER_DAY));
- let nnanos = try_opt!(nnanos.checked_add(&(secs as i64 * NANOS_PER_SEC as i64)));
- let nnanos = try_opt!(nnanos.checked_add(&(nanos as i64)));
- Some(nnanos)
- }
- if self.days < 0 {
- // the final negation won't overflow since we start with positive numbers.
- nanos(self.to_negated_tuple_64()).map(|micros| -micros)
- } else {
- nanos(self.to_tuple_64())
- }
+ let secs_part = try_opt!(self.num_seconds().checked_mul(&(NANOS_PER_SEC as i64)));
+ let nanos_part = self.nanos_mod_sec();
+ secs_part.checked_add(&(nanos_part as i64))
}
}
impl num::Zero for Duration {
#[inline]
fn zero() -> Duration {
- Duration { days: 0, secs: 0, nanos: 0 }
+ Duration { secs: 0, nanos: 0 }
}
#[inline]
fn is_zero(&self) -> bool {
- self.days == 0 && self.secs == 0 && self.nanos == 0
+ self.secs == 0 && self.nanos == 0
}
}
impl Neg<Duration> for Duration {
#[inline]
fn neg(&self) -> Duration {
- let (days, secs, nanos) = self.to_negated_tuple_64();
- Duration { days: days as i32, secs: secs, nanos: nanos } // FIXME can overflow
+ if self.nanos == 0 {
+ Duration { secs: -self.secs, nanos: 0 }
+ } else {
+ Duration { secs: -self.secs - 1, nanos: NANOS_PER_SEC - self.nanos }
+ }
}
}
impl Add<Duration,Duration> for Duration {
fn add(&self, rhs: &Duration) -> Duration {
- let mut days = self.days + rhs.days;
let mut secs = self.secs + rhs.secs;
let mut nanos = self.nanos + rhs.nanos;
- if nanos >= NANOS_PER_SEC as u32 {
- nanos -= NANOS_PER_SEC as u32;
+ if nanos >= NANOS_PER_SEC {
+ nanos -= NANOS_PER_SEC;
secs += 1;
}
- if secs >= SECS_PER_DAY as u32 {
- secs -= SECS_PER_DAY as u32;
- days += 1;
- }
- Duration { days: days, secs: secs, nanos: nanos }
+ Duration { secs: secs, nanos: nanos }
}
}
impl num::CheckedAdd for Duration {
fn checked_add(&self, rhs: &Duration) -> Option<Duration> {
- let mut days = try_opt!(self.days.checked_add(&rhs.days));
- let mut secs = self.secs + rhs.secs;
+ let mut secs = try_opt!(self.secs.checked_add(&rhs.secs));
let mut nanos = self.nanos + rhs.nanos;
- if nanos >= NANOS_PER_SEC as u32 {
- nanos -= NANOS_PER_SEC as u32;
- secs += 1;
- }
- if secs >= SECS_PER_DAY as u32 {
- secs -= SECS_PER_DAY as u32;
- days = try_opt!(days.checked_add(&1));
+ if nanos >= NANOS_PER_SEC {
+ nanos -= NANOS_PER_SEC;
+ secs = try_opt!(secs.checked_add(&1));
}
- Some(Duration { days: days, secs: secs, nanos: nanos })
+ let d = Duration { secs: secs, nanos: nanos };
+ // Even if d is within the bounds of i64 seconds,
+ // it might still overflow i64 milliseconds.
+ if d < MIN || d > MAX { None } else { Some(d) }
}
}
impl Sub<Duration,Duration> for Duration {
fn sub(&self, rhs: &Duration) -> Duration {
- let mut days = self.days - rhs.days;
- let mut secs = self.secs as i32 - rhs.secs as i32;
- let mut nanos = self.nanos as i32 - rhs.nanos as i32;
+ let mut secs = self.secs - rhs.secs;
+ let mut nanos = self.nanos - rhs.nanos;
if nanos < 0 {
nanos += NANOS_PER_SEC;
secs -= 1;
}
- if secs < 0 {
- secs += SECS_PER_DAY;
- days -= 1;
- }
- Duration { days: days, secs: secs as u32, nanos: nanos as u32 }
+ Duration { secs: secs, nanos: nanos }
}
}
impl num::CheckedSub for Duration {
fn checked_sub(&self, rhs: &Duration) -> Option<Duration> {
- let mut days = try_opt!(self.days.checked_sub(&rhs.days));
- let mut secs = self.secs as i32 - rhs.secs as i32;
- let mut nanos = self.nanos as i32 - rhs.nanos as i32;
+ let mut secs = try_opt!(self.secs.checked_sub(&rhs.secs));
+ let mut nanos = self.nanos - rhs.nanos;
if nanos < 0 {
nanos += NANOS_PER_SEC;
- secs -= 1;
- }
- if secs < 0 {
- secs += SECS_PER_DAY;
- days = try_opt!(days.checked_sub(&1));
+ secs = try_opt!(secs.checked_sub(&1));
}
- Some(Duration { days: days, secs: secs as u32, nanos: nanos as u32 })
+ let d = Duration { secs: secs, nanos: nanos };
+ // Even if d is within the bounds of i64 seconds,
+ // it might still overflow i64 milliseconds.
+ if d < MIN || d > MAX { None } else { Some(d) }
}
}
impl Mul<i32,Duration> for Duration {
fn mul(&self, rhs: &i32) -> Duration {
- /// Given `0 <= y < limit <= 2^30`,
- /// returns `(h,l)` such that `x * y = h * limit + l` where `0 <= l < limit`.
- fn mul_i64_u32_limit(x: i64, y: u32, limit: u32) -> (i64,u32) {
- let y = y as i64;
- let limit = limit as i64;
- let (xh, xl) = div_mod_floor_64(x, limit);
- let (h, l) = (xh * y, xl * y);
- let (h_, l) = div_rem_64(l, limit);
- (h + h_, l as u32)
- }
-
- let rhs = *rhs as i64;
- let (secs1, nanos) = mul_i64_u32_limit(rhs, self.nanos, NANOS_PER_SEC as u32);
- let (days1, secs1) = div_mod_floor_64(secs1, (SECS_PER_DAY as i64));
- let (days2, secs2) = mul_i64_u32_limit(rhs, self.secs, SECS_PER_DAY as u32);
- let mut days = self.days as i64 * rhs + days1 + days2;
- let mut secs = secs1 as u32 + secs2;
- if secs >= SECS_PER_DAY as u32 {
- secs -= 1;
- days += 1;
- }
- Duration { days: days as i32, secs: secs, nanos: nanos }
+ // Multiply nanoseconds as i64, because it cannot overflow that way.
+ let total_nanos = self.nanos as i64 * *rhs as i64;
+ let (extra_secs, nanos) = div_mod_floor_64(total_nanos, NANOS_PER_SEC as i64);
+ let secs = self.secs * *rhs as i64 + extra_secs;
+ Duration { secs: secs, nanos: nanos as i32 }
}
}
impl Div<i32,Duration> for Duration {
fn div(&self, rhs: &i32) -> Duration {
- let (rhs, days, secs, nanos) = if *rhs < 0 {
- let (days, secs, nanos) = self.to_negated_tuple_64();
- (-(*rhs as i64), days, secs as i64, nanos as i64)
- } else {
- (*rhs as i64, self.days as i64, self.secs as i64, self.nanos as i64)
- };
-
- let (days, carry) = div_mod_floor_64(days, rhs);
- let secs = secs + carry * SECS_PER_DAY as i64;
- let (secs, carry) = div_mod_floor_64(secs, rhs);
- let nanos = nanos + carry * NANOS_PER_SEC as i64;
- let nanos = nanos / rhs;
- Duration { days: days as i32, secs: secs as u32, nanos: nanos as u32 }
+ let mut secs = self.secs / *rhs as i64;
+ let carry = self.secs - secs * *rhs as i64;
+ let extra_nanos = carry * NANOS_PER_SEC as i64 / *rhs as i64;
+ let mut nanos = self.nanos / *rhs + extra_nanos as i32;
+ if nanos >= NANOS_PER_SEC {
+ nanos -= NANOS_PER_SEC;
+ secs += 1;
+ }
+ if nanos < 0 {
+ nanos += NANOS_PER_SEC;
+ secs -= 1;
+ }
+ Duration { secs: secs, nanos: nanos }
}
}
impl fmt::Show for Duration {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- let hasdate = self.days != 0;
- let hastime = (self.secs != 0 || self.nanos != 0) || !hasdate;
+ let days = self.num_days();
+ let secs = self.secs - days * SECS_PER_DAY;
+ let hasdate = days != 0;
+ let hastime = (secs != 0 || self.nanos != 0) || !hasdate;
try!(write!(f, "P"));
if hasdate {
// technically speaking the negative part is not the valid ISO 8601,
// but we need to print it anyway.
- try!(write!(f, "{}D", self.days));
+ try!(write!(f, "{}D", days));
}
if hastime {
if self.nanos == 0 {
- try!(write!(f, "T{}S", self.secs));
- } else if self.nanos % 1_000_000 == 0 {
- try!(write!(f, "T{}.{:03}S", self.secs, self.nanos / 1_000_000));
- } else if self.nanos % 1_000 == 0 {
- try!(write!(f, "T{}.{:06}S", self.secs, self.nanos / 1_000));
+ try!(write!(f, "T{}S", secs));
+ } else if self.nanos % NANOS_PER_MILLI == 0 {
+ try!(write!(f, "T{}.{:03}S", secs, self.nanos / NANOS_PER_MILLI));
+ } else if self.nanos % NANOS_PER_MICRO == 0 {
+ try!(write!(f, "T{}.{:06}S", secs, self.nanos / NANOS_PER_MICRO));
} else {
- try!(write!(f, "T{}.{:09}S", self.secs, self.nanos));
+ try!(write!(f, "T{}.{:09}S", secs, self.nanos));
}
}
Ok(())
}
// Copied from libnum
-#[inline]
-fn div_mod_floor(this: i32, other: i32) -> (i32, i32) {
- (div_floor(this, other), mod_floor(this, other))
-}
-
-#[inline]
-fn div_floor(this: i32, other: i32) -> i32 {
- match div_rem(this, other) {
- (d, r) if (r > 0 && other < 0)
- || (r < 0 && other > 0) => d - 1,
- (d, _) => d,
- }
-}
-
-#[inline]
-fn mod_floor(this: i32, other: i32) -> i32 {
- match this % other {
- r if (r > 0 && other < 0)
- || (r < 0 && other > 0) => r + other,
- r => r,
- }
-}
-
-#[inline]
-fn div_rem(this: i32, other: i32) -> (i32, i32) {
- (this / other, this % other)
-}
-
#[inline]
fn div_mod_floor_64(this: i64, other: i64) -> (i64, i64) {
(div_floor_64(this, other), mod_floor_64(this, other))
#[cfg(test)]
mod tests {
- use super::{Duration, MIN_DAYS, MAX_DAYS, MIN, MAX};
+ use super::{Duration, MIN, MAX};
use {i32, i64};
use num::{Zero, CheckedAdd, CheckedSub};
use option::{Some, None};
Duration::days(1) + Duration::seconds(3));
assert_eq!(Duration::days(10) - Duration::seconds(1000), Duration::seconds(863000));
assert_eq!(Duration::days(10) - Duration::seconds(1000000), Duration::seconds(-136000));
- assert_eq!(Duration::days(2) + Duration::seconds(86399) + Duration::nanoseconds(1234567890),
+ assert_eq!(Duration::days(2) + Duration::seconds(86399) +
+ Duration::nanoseconds(1234567890),
Duration::days(3) + Duration::nanoseconds(234567890));
assert_eq!(-Duration::days(3), Duration::days(-3));
assert_eq!(-(Duration::days(3) + Duration::seconds(70)),
assert_eq!(Duration::seconds(86401).num_days(), 1);
assert_eq!(Duration::seconds(-86399).num_days(), 0);
assert_eq!(Duration::seconds(-86401).num_days(), -1);
- assert_eq!(Duration::days(i32::MAX).num_days(), i32::MAX);
- assert_eq!(Duration::days(i32::MIN).num_days(), i32::MIN);
- assert_eq!(MAX.num_days(), MAX_DAYS);
- assert_eq!(MIN.num_days(), MIN_DAYS);
+ assert_eq!(Duration::days(i32::MAX as i64).num_days(), i32::MAX as i64);
+ assert_eq!(Duration::days(i32::MIN as i64).num_days(), i32::MIN as i64);
}
#[test]
assert_eq!(Duration::milliseconds(1001).num_seconds(), 1);
assert_eq!(Duration::milliseconds(-999).num_seconds(), 0);
assert_eq!(Duration::milliseconds(-1001).num_seconds(), -1);
- assert_eq!(Duration::seconds(i32::MAX).num_seconds(), i32::MAX as i64);
- assert_eq!(Duration::seconds(i32::MIN).num_seconds(), i32::MIN as i64);
- assert_eq!(MAX.num_seconds(), (MAX_DAYS as i64 + 1) * 86400 - 1);
- assert_eq!(MIN.num_seconds(), MIN_DAYS as i64 * 86400);
}
#[test]
assert_eq!(Duration::microseconds(1001).num_milliseconds(), 1);
assert_eq!(Duration::microseconds(-999).num_milliseconds(), 0);
assert_eq!(Duration::microseconds(-1001).num_milliseconds(), -1);
- assert_eq!(Duration::milliseconds(i32::MAX).num_milliseconds(), i32::MAX as i64);
- assert_eq!(Duration::milliseconds(i32::MIN).num_milliseconds(), i32::MIN as i64);
- assert_eq!(MAX.num_milliseconds(), (MAX_DAYS as i64 + 1) * 86400_000 - 1);
- assert_eq!(MIN.num_milliseconds(), MIN_DAYS as i64 * 86400_000);
+ assert_eq!(Duration::milliseconds(i64::MAX).num_milliseconds(), i64::MAX);
+ assert_eq!(Duration::milliseconds(i64::MIN).num_milliseconds(), i64::MIN);
+ assert_eq!(MAX.num_milliseconds(), i64::MAX);
+ assert_eq!(MIN.num_milliseconds(), i64::MIN);
}
#[test]
assert_eq!(Duration::nanoseconds(1001).num_microseconds(), Some(1));
assert_eq!(Duration::nanoseconds(-999).num_microseconds(), Some(0));
assert_eq!(Duration::nanoseconds(-1001).num_microseconds(), Some(-1));
- assert_eq!(Duration::microseconds(i32::MAX).num_microseconds(), Some(i32::MAX as i64));
- assert_eq!(Duration::microseconds(i32::MIN).num_microseconds(), Some(i32::MIN as i64));
+ assert_eq!(Duration::microseconds(i64::MAX).num_microseconds(), Some(i64::MAX));
+ assert_eq!(Duration::microseconds(i64::MIN).num_microseconds(), Some(i64::MIN));
assert_eq!(MAX.num_microseconds(), None);
assert_eq!(MIN.num_microseconds(), None);
// overflow checks
static MICROS_PER_DAY: i64 = 86400_000_000;
- assert_eq!(Duration::days((i64::MAX / MICROS_PER_DAY) as i32).num_microseconds(),
+ assert_eq!(Duration::days(i64::MAX / MICROS_PER_DAY).num_microseconds(),
Some(i64::MAX / MICROS_PER_DAY * MICROS_PER_DAY));
- assert_eq!(Duration::days((i64::MIN / MICROS_PER_DAY) as i32).num_microseconds(),
+ assert_eq!(Duration::days(i64::MIN / MICROS_PER_DAY).num_microseconds(),
Some(i64::MIN / MICROS_PER_DAY * MICROS_PER_DAY));
- assert_eq!(Duration::days((i64::MAX / MICROS_PER_DAY + 1) as i32).num_microseconds(), None);
- assert_eq!(Duration::days((i64::MIN / MICROS_PER_DAY - 1) as i32).num_microseconds(), None);
+ assert_eq!(Duration::days(i64::MAX / MICROS_PER_DAY + 1).num_microseconds(), None);
+ assert_eq!(Duration::days(i64::MIN / MICROS_PER_DAY - 1).num_microseconds(), None);
}
#[test]
assert_eq!(d.num_nanoseconds(), Some(0));
assert_eq!(Duration::nanoseconds(1).num_nanoseconds(), Some(1));
assert_eq!(Duration::nanoseconds(-1).num_nanoseconds(), Some(-1));
- assert_eq!(Duration::nanoseconds(i32::MAX).num_nanoseconds(), Some(i32::MAX as i64));
- assert_eq!(Duration::nanoseconds(i32::MIN).num_nanoseconds(), Some(i32::MIN as i64));
+ assert_eq!(Duration::nanoseconds(i64::MAX).num_nanoseconds(), Some(i64::MAX));
+ assert_eq!(Duration::nanoseconds(i64::MIN).num_nanoseconds(), Some(i64::MIN));
assert_eq!(MAX.num_nanoseconds(), None);
assert_eq!(MIN.num_nanoseconds(), None);
// overflow checks
static NANOS_PER_DAY: i64 = 86400_000_000_000;
- assert_eq!(Duration::days((i64::MAX / NANOS_PER_DAY) as i32).num_nanoseconds(),
+ assert_eq!(Duration::days(i64::MAX / NANOS_PER_DAY).num_nanoseconds(),
Some(i64::MAX / NANOS_PER_DAY * NANOS_PER_DAY));
- assert_eq!(Duration::days((i64::MIN / NANOS_PER_DAY) as i32).num_nanoseconds(),
+ assert_eq!(Duration::days(i64::MIN / NANOS_PER_DAY).num_nanoseconds(),
Some(i64::MIN / NANOS_PER_DAY * NANOS_PER_DAY));
- assert_eq!(Duration::days((i64::MAX / NANOS_PER_DAY + 1) as i32).num_nanoseconds(), None);
- assert_eq!(Duration::days((i64::MIN / NANOS_PER_DAY - 1) as i32).num_nanoseconds(), None);
+ assert_eq!(Duration::days(i64::MAX / NANOS_PER_DAY + 1).num_nanoseconds(), None);
+ assert_eq!(Duration::days(i64::MIN / NANOS_PER_DAY - 1).num_nanoseconds(), None);
}
#[test]
fn test_duration_checked_ops() {
- assert_eq!(Duration::days(MAX_DAYS).checked_add(&Duration::seconds(86399)),
- Some(Duration::days(MAX_DAYS - 1) + Duration::seconds(86400+86399)));
- assert!(Duration::days(MAX_DAYS).checked_add(&Duration::seconds(86400)).is_none());
+ assert_eq!(Duration::milliseconds(i64::MAX - 1).checked_add(&Duration::microseconds(999)),
+ Some(Duration::milliseconds(i64::MAX - 2) + Duration::microseconds(1999)));
+ assert!(Duration::milliseconds(i64::MAX).checked_add(&Duration::microseconds(1000))
+ .is_none());
- assert_eq!(Duration::days(MIN_DAYS).checked_sub(&Duration::seconds(0)),
- Some(Duration::days(MIN_DAYS)));
- assert!(Duration::days(MIN_DAYS).checked_sub(&Duration::seconds(1)).is_none());
+ assert_eq!(Duration::milliseconds(i64::MIN).checked_sub(&Duration::milliseconds(0)),
+ Some(Duration::milliseconds(i64::MIN)));
+ assert!(Duration::milliseconds(i64::MIN).checked_sub(&Duration::milliseconds(1))
+ .is_none());
}
#[test]
Duration::seconds(10) - Duration::nanoseconds(10));
assert_eq!((Duration::nanoseconds(1) + Duration::seconds(1) + Duration::days(1)) * 3,
Duration::nanoseconds(3) + Duration::seconds(3) + Duration::days(3));
+ assert_eq!(Duration::milliseconds(1500) * -2, Duration::seconds(-3));
+ assert_eq!(Duration::milliseconds(-1500) * 2, Duration::seconds(-3));
}
#[test]
assert_eq!(Duration::nanoseconds(123_456_789) / -1, -Duration::nanoseconds(123_456_789));
assert_eq!(-Duration::nanoseconds(123_456_789) / -1, Duration::nanoseconds(123_456_789));
assert_eq!(-Duration::nanoseconds(123_456_789) / 1, -Duration::nanoseconds(123_456_789));
+ assert_eq!(Duration::seconds(1) / 3, Duration::nanoseconds(333_333_333));
+ assert_eq!(Duration::seconds(4) / 3, Duration::nanoseconds(1_333_333_333));
+ assert_eq!(Duration::seconds(-1) / 2, Duration::milliseconds(-500));
+ assert_eq!(Duration::seconds(1) / -2, Duration::milliseconds(-500));
+ assert_eq!(Duration::seconds(-1) / -2, Duration::milliseconds(500));
+ assert_eq!(Duration::seconds(-4) / 3, Duration::nanoseconds(-1_333_333_333));
+ assert_eq!(Duration::seconds(-4) / -3, Duration::nanoseconds(1_333_333_333));
}
#[test]
/// whenever `next` is called, waiting for a new message, and `None` will be
/// returned when the corresponding channel has hung up.
#[unstable]
-#[cfg(not(stage0))]
pub struct Messages<'a, T:'a> {
rx: &'a Receiver<T>
}
-/// Stage0 only
-#[cfg(stage0)]
-#[unstable]
-pub struct Messages<'a, T> {
- rx: &'a Receiver<T>
-}
-
/// The sending-half of Rust's asynchronous channel type. This half can only be
/// owned by one task, but it can be cloned to send to other tasks.
#[unstable]
// Couldn't send the data, the port hung up first. Return the data
// back up the stack.
DISCONNECTED => {
- Err(self.data.take_unwrap())
+ Err(self.data.take().unwrap())
}
// Not possible, these are one-use channels
// There's data on the channel, so make sure we destroy it promptly.
// This is why not using an arc is a little difficult (need the box
// to stay valid while we take the data).
- DATA => { self.data.take_unwrap(); }
+ DATA => { self.data.take().unwrap(); }
// We're the only ones that can block on this port
_ => unreachable!()
/// A handle to a receiver which is currently a member of a `Select` set of
/// receivers. This handle is used to keep the receiver in the set as well as
/// interact with the underlying receiver.
-#[cfg(not(stage0))]
pub struct Handle<'rx, T:'rx> {
/// The ID of this handle, used to compare against the return value of
/// `Select::wait()`
rx: &'rx Receiver<T>,
}
-/// Stage0 only
-#[cfg(stage0)]
-pub struct Handle<'rx, T> {
- /// The ID of this handle, used to compare against the return value of
- /// `Select::wait()`
- id: uint,
- selector: &'rx Select,
- next: *mut Handle<'static, ()>,
- prev: *mut Handle<'static, ()>,
- added: bool,
- packet: &'rx Packet+'rx,
-
- // due to our fun transmutes, we be sure to place this at the end. (nothing
- // previous relies on T)
- rx: &'rx Receiver<T>,
-}
-
struct Packets { cur: *mut Handle<'static, ()> }
#[doc(hidden)]
let waiter = match mem::replace(&mut state.blocker, NoneBlocked) {
NoneBlocked => None,
BlockedSender(task) => {
- *state.canceled.take_unwrap() = true;
+ *state.canceled.take().unwrap() = true;
Some(task)
}
BlockedReceiver(..) => unreachable!(),
let start = self.start;
self.size -= 1;
self.start = (self.start + 1) % self.buf.len();
- self.buf.get_mut(start).take_unwrap()
+ self.buf.get_mut(start).take().unwrap()
}
fn size(&self) -> uint { self.size }
}
unsafe {
(*node).next = 0 as *mut Node;
- Some((*node).task.take_unwrap())
+ Some((*node).task.take().unwrap())
}
}
}
#![feature(phase, globs, macro_rules, unsafe_destructor)]
#![feature(import_shadowing)]
-#![feature(issue_5723_bootstrap)]
#![deny(missing_doc)]
#![no_std]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
#[phase(plugin, link)] extern crate core;
extern crate alloc;
extern crate collections;
/// An guard which is created by locking a mutex. Through this guard the
/// underlying data can be accessed.
-#[cfg(not(stage0))]
pub struct MutexGuard<'a, T:'a> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
pub cond: Condvar<'a>,
}
-/// stage0 only
-#[cfg(stage0)]
-pub struct MutexGuard<'a, T> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a mut T,
- /// Inner condition variable connected to the locked mutex that this guard
- /// was created from. This can be used for atomic-unlock-and-deschedule.
- pub cond: Condvar<'a>,
-}
-
impl<T: Send> Mutex<T> {
/// Creates a new mutex to protect the user-supplied data.
pub fn new(user_data: T) -> Mutex<T> {
/// A guard which is created by locking an rwlock in write mode. Through this
/// guard the underlying data can be accessed.
-#[cfg(not(stage0))]
pub struct RWLockWriteGuard<'a, T:'a> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
pub cond: Condvar<'a>,
}
-/// stage0 only
-#[cfg(stage0)]
-pub struct RWLockWriteGuard<'a, T> {
- // FIXME #12808: strange name to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a mut T,
- /// Inner condition variable that can be used to sleep on the write mode of
- /// this rwlock.
- pub cond: Condvar<'a>,
-}
-
/// A guard which is created by locking an rwlock in read mode. Through this
/// guard the underlying data can be accessed.
-#[cfg(not(stage0))]
pub struct RWLockReadGuard<'a, T:'a> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
_guard: raw::RWLockReadGuard<'a>,
}
-/// Stage0 only
-#[cfg(stage0)]
-pub struct RWLockReadGuard<'a, T> {
- // FIXME #12808: strange names to try to avoid interfering with
- // field accesses of the contained type via Deref
- _data: &'a T,
- _guard: raw::RWLockReadGuard<'a>,
-}
-
impl<T: Send + Sync> RWLock<T> {
/// Create a reader/writer lock with the supplied data.
pub fn new(user_data: T) -> RWLock<T> {
*self.tail.get() = next;
assert!((*tail).value.is_none());
assert!((*next).value.is_some());
- let ret = (*next).value.take_unwrap();
+ let ret = (*next).value.take().unwrap();
let _: Box<Node<T>> = mem::transmute(tail);
return Data(ret);
}
}
#[must_use]
-#[cfg(stage0)]
-struct SemGuard<'a, Q> {
- sem: &'a Sem<Q>,
-}
-
-#[must_use]
-#[cfg(not(stage0))]
struct SemGuard<'a, Q:'a> {
sem: &'a Sem<Q>,
}
// signaller already sent -- I mean 'unconditionally' in contrast
// with acquire().)
(|| {
- let _ = wait_end.take_unwrap().recv();
+ let _ = wait_end.take().unwrap().recv();
}).finally(|| {
// Reacquire the condvar.
match self.order {
condvar_id,
"cond.signal_on()",
|| {
- queue.take_unwrap().broadcast()
+ queue.take().unwrap().broadcast()
})
}
}
}
}
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
#[test]
fn lookup_Rust() {
let abi = lookup("Rust");
// FIXME(eddyb) #10676 use Rc<T> in the future.
pub type P<T> = Gc<T>;
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
/// Construct a P<T> from a T value.
pub fn P<T: 'static>(value: T) -> P<T> {
box(GC) value
ExprLit(Gc<Lit>),
ExprCast(Gc<Expr>, P<Ty>),
ExprIf(Gc<Expr>, P<Block>, Option<Gc<Expr>>),
- ExprWhile(Gc<Expr>, P<Block>),
+ // FIXME #6993: change to Option<Name> ... or not, if these are hygienic.
+ ExprWhile(Gc<Expr>, P<Block>, Option<Ident>),
// FIXME #6993: change to Option<Name> ... or not, if these are hygienic.
ExprForLoop(Gc<Pat>, Gc<Expr>, P<Block>, Option<Ident>),
// Conditionless loop (can be exited with break, cont, or ret)
}
}
-#[cfg(stage0)]
-#[deriving(Clone)]
-pub struct Values<'a, T>(pub slice::Items<'a, T>);
-
// HACK(eddyb) move this into libstd (value wrapper for slice::Items).
-#[cfg(not(stage0))]
#[deriving(Clone)]
pub struct Values<'a, T:'a>(pub slice::Items<'a, T>);
}
}
-#[cfg(stage0)]
-pub struct NodesMatchingSuffix<'a, S> {
- map: &'a Map,
- item_name: &'a S,
- in_which: &'a [S],
- idx: NodeId,
-}
-
-#[cfg(not(stage0))]
pub struct NodesMatchingSuffix<'a, S:'a> {
map: &'a Map,
item_name: &'a S,
/// A visitor that applies its operation to all of the node IDs
/// in a visitable thing.
-#[cfg(stage0)]
-pub struct IdVisitor<'a, O> {
- pub operation: &'a O,
- pub pass_through_items: bool,
- pub visited_outermost: bool,
-}
-
-#[cfg(not(stage0))]
pub struct IdVisitor<'a, O:'a> {
pub operation: &'a O,
pub pass_through_items: bool,
InlineNever,
}
-/// True if something like #[inline] is found in the list of attrs.
+/// Determine what `#[inline]` attribute is present in `attrs`, if any.
pub fn find_inline_attr(attrs: &[Attribute]) -> InlineAttr {
// FIXME (#2809)---validate the usage of #[inline] and #[inline]
attrs.iter().fold(InlineNone, |ia,attr| {
})
}
+/// True if `#[inline]` or `#[inline(always)]` is present in `attrs`.
+pub fn requests_inline(attrs: &[Attribute]) -> bool {
+ match find_inline_attr(attrs) {
+ InlineHint | InlineAlways => true,
+ InlineNone | InlineNever => false,
+ }
+}
+
/// Tests if any `cfg(...)` meta items in `metas` match `cfg`. e.g.
///
/// test_cfg(`[foo="a", bar]`, `[cfg(foo), cfg(bar)]`) == true
}
}
-#[deriving(PartialEq, Show)]
+#[deriving(Eq, Hash, PartialEq, Show)]
pub enum IntType {
SignedInt(ast::IntTy),
UnsignedInt(ast::UintTy)
}
}
-#[deriving(PartialEq)]
+#[deriving(PartialEq, Clone)]
pub enum Level {
Bug,
Fatal,
fn make_expr(&self) -> Option<Gc<ast::Expr>> {
Some(self.e)
}
+ fn make_pat(&self) -> Option<Gc<ast::Pat>> {
+ match self.e.node {
+ ast::ExprLit(_) => Some(box(GC) ast::Pat {
+ id: ast::DUMMY_NODE_ID,
+ node: ast::PatLit(self.e),
+ span: self.e.span
+ }),
+ _ => None
+ }
+ }
}
/// A convenience type for macros that return a single pattern.
pub struct MacPat {
ty: P<ast::Ty>,
lifetime: Option<ast::Lifetime>,
mutbl: ast::Mutability) -> P<ast::Ty>;
+ fn ty_ptr(&self, span: Span,
+ ty: P<ast::Ty>,
+ mutbl: ast::Mutability) -> P<ast::Ty>;
fn ty_uniq(&self, span: Span, ty: P<ast::Ty>) -> P<ast::Ty>;
fn ty_option(&self, ty: P<ast::Ty>) -> P<ast::Ty>;
ast::TyRptr(lifetime, self.ty_mt(ty, mutbl)))
}
+ fn ty_ptr(&self,
+ span: Span,
+ ty: P<ast::Ty>,
+ mutbl: ast::Mutability)
+ -> P<ast::Ty> {
+ self.ty(span,
+ ast::TyPtr(self.ty_mt(ty, mutbl)))
+ }
fn ty_uniq(&self, span: Span, ty: P<ast::Ty>) -> P<ast::Ty> {
self.ty(span, ast::TyUniq(ty))
}
pub enum PtrTy<'a> {
/// &'lifetime mut
Borrowed(Option<&'a str>, ast::Mutability),
+ /// *mut
+ Raw(ast::Mutability),
}
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
}
}
-/// A type. Supports pointers (except for *), Self, and literals
+/// A type. Supports pointers, Self, and literals
#[deriving(Clone)]
pub enum Ty<'a> {
Self,
let lt = mk_lifetime(cx, span, lt);
cx.ty_rptr(span, raw_ty, lt, mutbl)
}
+ Raw(mutbl) => cx.ty_ptr(span, raw_ty, mutbl)
}
}
Literal(ref p) => { p.to_ty(cx, span, self_ty, self_generics) }
let lt = lt.map(|s| cx.lifetime(span, cx.ident_of(s).name));
ast::SelfRegion(lt, mutbl, special_idents::self_)
}
+ Raw(_) => cx.span_bug(span, "attempted to use *self in deriving definition")
});
let self_expr = cx.expr_deref(span, self_path);
(self_expr, self_ty)
}
}
+ ast::ExprWhile(cond, body, opt_ident) => {
+ let cond = fld.fold_expr(cond);
+ let (body, opt_ident) = expand_loop_block(body, opt_ident, fld);
+ fld.cx.expr(e.span, ast::ExprWhile(cond, body, opt_ident))
+ }
+
ast::ExprLoop(loop_block, opt_ident) => {
let (loop_block, opt_ident) = expand_loop_block(loop_block, opt_ident, fld);
fld.cx.expr(e.span, ast::ExprLoop(loop_block, opt_ident))
parse::AlignRight => {
self.ecx.path_global(sp, self.rtpath("AlignRight"))
}
+ parse::AlignCenter => {
+ self.ecx.path_global(sp, self.rtpath("AlignCenter"))
+ }
parse::AlignUnknown => {
self.ecx.path_global(sp, self.rtpath("AlignUnknown"))
}
// Right now there is a bug such that for the expression:
// foo(bar(&1))
// the lifetime of `1` doesn't outlast the call to `bar`, so it's not
- // vald for the call to `foo`. To work around this all arguments to the
+ // valid for the call to `foo`. To work around this all arguments to the
// format! string are shoved into locals. Furthermore, we shove the address
// of each variable because we don't want to move out of the arguments
// passed to this function.
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
folder.fold_block(tr),
fl.map(|x| folder.fold_expr(x)))
}
- ExprWhile(cond, body) => {
- ExprWhile(folder.fold_expr(cond), folder.fold_block(body))
+ ExprWhile(cond, body, opt_ident) => {
+ ExprWhile(folder.fold_expr(cond),
+ folder.fold_block(body),
+ opt_ident.map(|i| folder.fold_ident(i)))
}
- ExprForLoop(pat, iter, body, ref maybe_ident) => {
+ ExprForLoop(pat, iter, body, ref opt_ident) => {
ExprForLoop(folder.fold_pat(pat),
folder.fold_expr(iter),
folder.fold_block(body),
- maybe_ident.map(|i| folder.fold_ident(i)))
+ opt_ident.map(|i| folder.fold_ident(i)))
}
ExprLoop(body, opt_ident) => {
ExprLoop(folder.fold_block(body),
- opt_ident.map(|x| folder.fold_ident(x)))
+ opt_ident.map(|i| folder.fold_ident(i)))
}
ExprMatch(expr, ref arms) => {
ExprMatch(folder.fold_expr(expr),
#![feature(macro_rules, globs, managed_boxes, default_type_params, phase)]
#![feature(quote, struct_variant, unsafe_destructor, import_shadowing)]
-#![feature(issue_5723_bootstrap)]
#![allow(deprecated)]
-// NOTE(stage0, pcwalton): Remove after snapshot.
-#![allow(unknown_features)]
-
extern crate fmt_macros;
extern crate debug;
#[phase(plugin, link)] extern crate log;
if len == 0 {
OwnedSlice::empty()
} else {
+ // drop excess capacity to avoid breaking sized deallocation
+ v.shrink_to_fit();
+
let p = v.as_mut_ptr();
// we own the allocation now
- unsafe {mem::forget(v)}
+ unsafe { mem::forget(v) }
OwnedSlice { data: p, len: len }
}
self.bump();
valid &= self.scan_char_or_byte(ch_start, ch, /* ascii_only = */ false, '"');
}
- // adjust for the ACSII " at the start of the literal
+ // adjust for the ASCII " at the start of the literal
let id = if valid { self.name_from(start_bpos + BytePos(1)) }
else { token::intern("??") };
self.bump();
mut rdr: Box<Reader+'a>)
-> Parser<'a>
{
- let tok0 = real_token(rdr);
+ let tok0 = real_token(&mut *rdr);
let span = tok0.sp;
let placeholder = TokenAndSpan {
tok: token::UNDERSCORE,
let token_str = self.this_token_to_string();
let span = self.span;
self.span_err(span,
- format!("found `{}` in ident position",
+ format!("expected identifier, found keyword `{}`",
token_str).as_slice());
}
}
None
};
let next = if self.buffer_start == self.buffer_end {
- real_token(self.reader)
+ real_token(&mut *self.reader)
} else {
// Avoid token copies with `replace`.
let buffer_start = self.buffer_start as uint;
-> R {
let dist = distance as int;
while self.buffer_length() < dist {
- self.buffer[self.buffer_end as uint] = real_token(self.reader);
+ self.buffer[self.buffer_end as uint] = real_token(&mut *self.reader);
self.buffer_end = (self.buffer_end + 1) & 3;
}
f(&self.buffer[((self.buffer_start + dist - 1) & 3) as uint].tok)
} else if self.token == token::TILDE {
// OWNED POINTER
self.bump();
- let span = self.last_span;
+ let last_span = self.last_span;
match self.token {
- token::IDENT(ref ident, _)
- if "str" == token::get_ident(*ident).get() => {
- // This is OK (for now).
- }
- token::LBRACKET => {} // Also OK.
- _ => self.obsolete(span, ObsoleteOwnedType)
+ token::LBRACKET => self.obsolete(last_span, ObsoleteOwnedVector),
+ _ => self.obsolete(last_span, ObsoleteOwnedType)
}
TyUniq(self.parse_ty(false))
} else if self.token == token::BINOP(token::STAR) {
return self.parse_for_expr(None);
}
if self.eat_keyword(keywords::While) {
- return self.parse_while_expr();
+ return self.parse_while_expr(None);
}
if Parser::token_is_lifetime(&self.token) {
let lifetime = self.get_lifetime();
self.bump();
self.expect(&token::COLON);
+ if self.eat_keyword(keywords::While) {
+ return self.parse_while_expr(Some(lifetime))
+ }
if self.eat_keyword(keywords::For) {
return self.parse_for_expr(Some(lifetime))
}
if self.eat_keyword(keywords::Loop) {
return self.parse_loop_expr(Some(lifetime))
}
- self.fatal("expected `for` or `loop` after a label")
+ self.fatal("expected `while`, `for`, or `loop` after a label")
}
if self.eat_keyword(keywords::Loop) {
return self.parse_loop_expr(None);
}
token::TILDE => {
self.bump();
- let span = self.last_span;
+ let last_span = self.last_span;
match self.token {
- token::LIT_STR(_) => {
- // This is OK (for now).
- }
- token::LBRACKET => {} // Also OK.
- _ => self.obsolete(span, ObsoleteOwnedExpr)
+ token::LBRACKET => self.obsolete(last_span, ObsoleteOwnedVector),
+ _ => self.obsolete(last_span, ObsoleteOwnedExpr)
}
let e = self.parse_prefix_expr();
self.mk_expr(lo, hi, ExprForLoop(pat, expr, loop_block, opt_ident))
}
- pub fn parse_while_expr(&mut self) -> Gc<Expr> {
+ pub fn parse_while_expr(&mut self, opt_ident: Option<ast::Ident>) -> Gc<Expr> {
let lo = self.last_span.lo;
let cond = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL);
let body = self.parse_block();
let hi = body.span.hi;
- return self.mk_expr(lo, hi, ExprWhile(cond, body));
+ return self.mk_expr(lo, hi, ExprWhile(cond, body, opt_ident));
}
pub fn parse_loop_expr(&mut self, opt_ident: Option<ast::Ident>) -> Gc<Expr> {
token::IDENT(..) => {
let the_ident = self.parse_ident();
self.expect_one_of(&[], &[token::EQ, token::SEMI]);
- // NOTE - #16689 change this to a warning once
- // the 'as' support is in stage0
let path = if self.token == token::EQ {
self.bump();
- Some(self.parse_str())
+ let path = self.parse_str();
+ let span = self.span;
+ self.span_warn(span,
+ format!("this extern crate syntax is deprecated. \
+ Use: extern crate \"{}\" as {};",
+ path.ref0().get(), the_ident.as_str() ).as_slice()
+ );
+ Some(path)
} else {None};
self.expect(&token::SEMI);
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
use std::mem;
pub enum AnnNode<'a> {
+ NodeIdent(&'a ast::Ident),
+ NodeName(&'a ast::Name),
NodeBlock(&'a ast::Block),
NodeItem(&'a ast::Item),
NodeExpr(&'a ast::Expr),
// FIXME (Issue #16472): the thing_to_string_impls macro should go away
// after we revise the syntax::ext::quote::ToToken impls to go directly
-// to token-trees instea of thing -> string -> token-trees.
+// to token-trees instead of thing -> string -> token-trees.
macro_rules! thing_to_string_impls {
($to_string:ident) => {
ast::ExprIf(ref test, ref blk, elseopt) => {
try!(self.print_if(&**test, &**blk, elseopt, false));
}
- ast::ExprWhile(ref test, ref blk) => {
+ ast::ExprWhile(ref test, ref blk, opt_ident) => {
+ for ident in opt_ident.iter() {
+ try!(self.print_ident(*ident));
+ try!(self.word_space(":"));
+ }
try!(self.head("while"));
try!(self.print_expr(&**test));
try!(space(&mut self.s));
pub fn print_ident(&mut self, ident: ast::Ident) -> IoResult<()> {
if self.encode_idents_with_hygiene {
let encoded = ident.encode_with_hygiene();
- word(&mut self.s, encoded.as_slice())
+ try!(word(&mut self.s, encoded.as_slice()))
} else {
- word(&mut self.s, token::get_ident(ident).get())
+ try!(word(&mut self.s, token::get_ident(ident).get()))
}
+ self.ann.post(self, NodeIdent(&ident))
}
pub fn print_name(&mut self, name: ast::Name) -> IoResult<()> {
- word(&mut self.s, token::get_name(name).get())
+ try!(word(&mut self.s, token::get_name(name).get()));
+ self.ann.post(self, NodeName(&name))
}
pub fn print_for_decl(&mut self, loc: &ast::Local,
/// Does the given string match the pattern? whitespace in the first string
/// may be deleted or replaced with other whitespace to match the pattern.
-/// this function is unicode-ignorant; fortunately, the careful design of
+/// this function is Unicode-ignorant; fortunately, the careful design of
/// UTF-8 mitigates this ignorance. In particular, this function only collapses
-/// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode
+/// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate Unicode
/// chars. Unsurprisingly, it doesn't do NKF-normalization(?).
pub fn matches_codepattern(a : &str, b : &str) -> bool {
let mut idx_a = 0;
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
visitor.visit_block(&**if_block, env.clone());
walk_expr_opt(visitor, optional_else, env.clone())
}
- ExprWhile(ref subexpression, ref block) => {
+ ExprWhile(ref subexpression, ref block, _) => {
visitor.visit_expr(&**subexpression, env.clone());
visitor.visit_block(&**block, env.clone())
}
background: color::Color,
}
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
#[link(name = "kernel32")]
extern "system" {
fn SetConsoleTextAttribute(handle: libc::HANDLE, attr: libc::WORD) -> libc::BOOL;
#[test]
pub fn ratchet_test() {
- let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
+ let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
let pth = dpth.path().join("ratchet.json");
let mut m1 = MetricMap::new();
use std::io::BufReader;
use std::num;
use std::string::String;
+use std::time::Duration;
static NSEC_PER_SEC: i32 = 1_000_000_000_i32;
}
}
-impl Add<Timespec, Timespec> for Timespec {
- fn add(&self, other: &Timespec) -> Timespec {
- let mut sec = self.sec + other.sec;
- let mut nsec = self.nsec + other.nsec;
+impl Add<Duration, Timespec> for Timespec {
+ fn add(&self, other: &Duration) -> Timespec {
+ let d_sec = other.num_seconds();
+ // It is safe to unwrap the nanoseconds, because there cannot be
+ // more than one second left, which fits in i64 and in i32.
+ let d_nsec = (other - Duration::seconds(d_sec))
+ .num_nanoseconds().unwrap() as i32;
+ let mut sec = self.sec + d_sec;
+ let mut nsec = self.nsec + d_nsec;
if nsec >= NSEC_PER_SEC {
nsec -= NSEC_PER_SEC;
sec += 1;
+ } else if nsec < 0 {
+ nsec += NSEC_PER_SEC;
+ sec -= 1;
}
Timespec::new(sec, nsec)
}
}
-impl Sub<Timespec, Timespec> for Timespec {
- fn sub(&self, other: &Timespec) -> Timespec {
- let mut sec = self.sec - other.sec;
- let mut nsec = self.nsec - other.nsec;
- if nsec < 0 {
- nsec += NSEC_PER_SEC;
- sec -= 1;
- }
- Timespec::new(sec, nsec)
+impl Sub<Timespec, Duration> for Timespec {
+ fn sub(&self, other: &Timespec) -> Duration {
+ let sec = self.sec - other.sec;
+ let nsec = self.nsec - other.nsec;
+ Duration::seconds(sec) + Duration::nanoseconds(nsec as i64)
}
}
/// Holds a calendar date and time broken down into its components (year, month, day, and so on),
/// also called a broken-down time value.
+// FIXME: use c_int instead of i32?
+#[repr(C)]
#[deriving(Clone, PartialEq, Eq, Show)]
pub struct Tm {
/// Seconds after the minute - [0, 60]
use std::f64;
use std::result::{Err, Ok};
+ use std::time::Duration;
use self::test::Bencher;
#[cfg(windows)]
fn test_timespec_add() {
let a = Timespec::new(1, 2);
- let b = Timespec::new(2, 3);
+ let b = Duration::seconds(2) + Duration::nanoseconds(3);
let c = a + b;
assert_eq!(c.sec, 3);
assert_eq!(c.nsec, 5);
let p = Timespec::new(1, super::NSEC_PER_SEC - 2);
- let q = Timespec::new(2, 2);
+ let q = Duration::seconds(2) + Duration::nanoseconds(2);
let r = p + q;
assert_eq!(r.sec, 4);
assert_eq!(r.nsec, 0);
let u = Timespec::new(1, super::NSEC_PER_SEC - 2);
- let v = Timespec::new(2, 3);
+ let v = Duration::seconds(2) + Duration::nanoseconds(3);
let w = u + v;
assert_eq!(w.sec, 4);
assert_eq!(w.nsec, 1);
+
+ let k = Timespec::new(1, 0);
+ let l = Duration::nanoseconds(-1);
+ let m = k + l;
+ assert_eq!(m.sec, 0);
+ assert_eq!(m.nsec, 999_999_999);
}
fn test_timespec_sub() {
let a = Timespec::new(2, 3);
let b = Timespec::new(1, 2);
let c = a - b;
- assert_eq!(c.sec, 1);
- assert_eq!(c.nsec, 1);
+ assert_eq!(c.num_nanoseconds(), Some(super::NSEC_PER_SEC as i64 + 1));
let p = Timespec::new(2, 0);
let q = Timespec::new(1, 2);
let r = p - q;
- assert_eq!(r.sec, 0);
- assert_eq!(r.nsec, super::NSEC_PER_SEC - 2);
+ assert_eq!(r.num_nanoseconds(), Some(super::NSEC_PER_SEC as i64 - 2));
let u = Timespec::new(1, 2);
let v = Timespec::new(2, 3);
let w = u - v;
- assert_eq!(w.sec, -2);
- assert_eq!(w.nsec, super::NSEC_PER_SEC - 1);
+ assert_eq!(w.num_nanoseconds(), Some(-super::NSEC_PER_SEC as i64 - 1));
}
#[test]
// NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
-#![allow(missing_doc, non_uppercase_statics, non_snake_case_functions)]
+#![allow(missing_doc, non_uppercase_statics, non_snake_case)]
fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
use core::cmp::{Equal, Less, Greater};
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
pub fn is_XID_start(c: char) -> bool { derived_property::XID_Start(c) }
/// Returns whether the specified `char` satisfies the 'XID_Continue' Unicode property
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
-#[allow(non_snake_case_functions)]
+#[allow(non_snake_case)]
pub fn is_XID_continue(c: char) -> bool { derived_property::XID_Continue(c) }
///
/// Convert a char to its uppercase equivalent
///
/// The case-folding performed is the common or simple mapping:
-/// it maps one unicode codepoint (one char in Rust) to its uppercase equivalent according
+/// it maps one Unicode codepoint (one char in Rust) to its uppercase equivalent according
/// to the Unicode database at ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
/// The additional SpecialCasing.txt is not considered here, as it expands to multiple
/// codepoints in some cases.
/// 'XID_Start' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to ID_Start but modified for closure under NFKx.
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
fn is_XID_start(&self) -> bool;
/// Returns whether the specified `char` satisfies the 'XID_Continue'
/// 'XID_Continue' is a Unicode Derived Property specified in
/// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
/// mostly similar to 'ID_Continue' but modified for closure under NFKx.
- #[allow(non_snake_case_functions)]
+ #[allow(non_snake_case)]
fn is_XID_continue(&self) -> bool;
/// Converts a character to its uppercase equivalent.
///
/// The case-folding performed is the common or simple mapping: it maps
- /// one unicode codepoint (one character in Rust) to its uppercase
+ /// one Unicode codepoint (one character in Rust) to its uppercase
/// equivalent according to the Unicode database [1]. The additional
/// `SpecialCasing.txt` is not considered here, as it expands to multiple
/// codepoints in some cases.
// looking up each character twice.
cat = match self.cat {
None => gr::grapheme_category(ch),
- _ => self.cat.take_unwrap()
+ _ => self.cat.take().unwrap()
};
if match cat {
// cached category, if any
cat = match self.catb {
None => gr::grapheme_category(ch),
- _ => self.catb.take_unwrap()
+ _ => self.catb.take().unwrap()
};
// a matching state machine that runs *backwards* across an input string
}
#endif
-size_t
-#if defined(__WIN32__)
-rust_list_dir_wfd_size() {
- return sizeof(WIN32_FIND_DATAW);
-}
-#else
-rust_list_dir_wfd_size() {
- return 0;
-}
-#endif
-
-void*
-#if defined(__WIN32__)
-rust_list_dir_wfd_fp_buf(WIN32_FIND_DATAW* wfd) {
- if(wfd == NULL) {
- return 0;
- }
- else {
- return wfd->cFileName;
- }
-}
-#else
-rust_list_dir_wfd_fp_buf(void* wfd) {
- return 0;
-}
-#endif
-
typedef struct {
int32_t tm_sec;
int32_t tm_min;
+S 2014-09-05 67b97ab
+ freebsd-x86_64 5ed208394cb2a378ddfaa005b6298d2f142ad47f
+ linux-i386 d90866947bfa09738cf8540d17a8eedc70988fcc
+ linux-x86_64 52955b8f7a3b1bf664345060f421101979ced9f2
+ macos-i386 2a38d39afa94ad6d274464ee4e82b1b98c2b3a11
+ macos-x86_64 51df6e27c7d0776f83023e30a976525934ddb93f
+ winnt-i386 3b0bc6d5c1435f22a3782ae25acd19bc27b2cff4
+
+S 2014-08-29 6025926
+ freebsd-x86_64 285330b798eefcc929fc94c9d0604b6172ce3309
+ linux-i386 5b57ab2dc32952dc78551a955f3c1746b2d915a3
+ linux-x86_64 2624aeac3fe1b2359b61c1109e4708680e237ca5
+ macos-i386 deffce32408b023bcda84f6ce338ca3de02f406b
+ macos-x86_64 8ef7351e34fc1583570d752d223ddc6eb68ef27b
+ winnt-i386 c2dfa9a358de8cc554007addbc09e3e43d49aec6
+
S 2014-08-17 a86d9ad
freebsd-x86_64 f49e0c8e64c8a60dc47df9965974d2a98ef70b34
linux-i386 8f2c5f6a1b6504ee63de73c7a53aee1e4b07bca5
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// force-host
+
+#![feature(phase, plugin_registrar)]
+
+extern crate syntax;
+
+// Load rustc as a plugin to get macros
+#[phase(plugin, link)]
+extern crate rustc;
+
+use syntax::ast;
+use syntax::parse::token;
+use rustc::lint::{Context, LintPass, LintPassObject, LintArray};
+use rustc::plugin::Registry;
+
+declare_lint!(TEST_LINT, Warn,
+ "Warn about items named 'lintme'")
+
+declare_lint!(PLEASE_LINT, Warn,
+ "Warn about items named 'pleaselintme'")
+
+struct Pass;
+
+impl LintPass for Pass {
+ fn get_lints(&self) -> LintArray {
+ lint_array!(TEST_LINT, PLEASE_LINT)
+ }
+
+ fn check_item(&mut self, cx: &Context, it: &ast::Item) {
+ let name = token::get_ident(it.ident);
+ if name.get() == "lintme" {
+ cx.span_lint(TEST_LINT, it.span, "item is named 'lintme'");
+ } else if name.get() == "pleaselintme" {
+ cx.span_lint(PLEASE_LINT, it.span, "item is named 'pleaselintme'");
+ }
+ }
+}
+
+#[plugin_registrar]
+pub fn plugin_registrar(reg: &mut Registry) {
+ reg.register_lint_pass(box Pass as LintPassObject);
+ reg.register_lint_group("lint_me", vec![TEST_LINT, PLEASE_LINT]);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[no_mangle]
+pub extern "C" fn foo() -> uint {
+ 1234
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[inline]
+pub fn cci_fn() -> uint {
+ 1200
+}
+
+#[inline]
+pub static CCI_STATIC: uint = 34;
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3 --crate-type=rlib,dylib
+
+pub mod a {
+ pub fn one() -> uint {
+ 1
+ }
+}
+
+pub mod b {
+ pub fn two() -> uint {
+ 2
+ }
+}
+
+pub mod c {
+ use a::one;
+ use b::two;
+ pub fn three() -> uint {
+ one() + two()
+ }
+}
// Send/Receive lots of messages.
for j in range(0u, count) {
//println!("task %?, iter %?", i, j);
- let num_chan2 = num_chan.take_unwrap();
- let num_port2 = num_port.take_unwrap();
+ let num_chan2 = num_chan.take().unwrap();
+ let num_port2 = num_port.take().unwrap();
send(&num_chan2, i * j);
num_chan = Some(num_chan2);
let _n = recv(&num_port2);
// Send/Receive lots of messages.
for j in range(0u, count) {
//println!("task %?, iter %?", i, j);
- let num_chan2 = num_chan.take_unwrap();
- let num_port2 = num_port.take_unwrap();
+ let num_chan2 = num_chan.take().unwrap();
+ let num_port2 = num_port.take().unwrap();
send(&num_chan2, i * j);
num_chan = Some(num_chan2);
let _n = recv(&num_port2);
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
-use std::cmp::max;
+use std::{cmp, iter, mem};
+use std::sync::Future;
-fn fact(n: uint) -> uint {
- range(1, n + 1).fold(1, |accu, i| accu * i)
+fn rotate(x: &mut [i32]) {
+ let mut prev = x[0];
+ for place in x.mut_iter().rev() {
+ prev = mem::replace(place, prev)
+ }
}
-fn fannkuch(n: uint, i: uint) -> (int, int) {
- let mut perm = Vec::from_fn(n, |e| ((n + e - i) % n + 1) as i32);
- let mut tperm = perm.clone();
- let mut count = Vec::from_elem(n, 0u);
- let mut perm_count = 0i;
- let mut checksum = 0;
+fn next_permutation(perm: &mut [i32], count: &mut [i32]) {
+ for i in range(1, perm.len()) {
+ rotate(perm.mut_slice_to(i + 1));
+ let count_i = &mut count[i];
+ if *count_i >= i as i32 {
+ *count_i = 0;
+ } else {
+ *count_i += 1;
+ break
+ }
+ }
+}
+
+struct P {
+ p: [i32, .. 16],
+}
+
+struct Perm {
+ cnt: [i32, .. 16],
+ fact: [u32, .. 16],
+ n: u32,
+ permcount: u32,
+ perm: P,
+}
+
+impl Perm {
+ fn new(n: u32) -> Perm {
+ let mut fact = [1, .. 16];
+ for i in range(1, n as uint + 1) {
+ fact[i] = fact[i - 1] * i as u32;
+ }
+ Perm {
+ cnt: [0, .. 16],
+ fact: fact,
+ n: n,
+ permcount: 0,
+ perm: P { p: [0, .. 16 ] }
+ }
+ }
+
+ fn get(&mut self, mut idx: i32) -> P {
+ let mut pp = [0u8, .. 16];
+ self.permcount = idx as u32;
+ for (i, place) in self.perm.p.mut_iter().enumerate() {
+ *place = i as i32 + 1;
+ }
- for countdown in range(1, fact(n - 1) + 1).rev() {
- for i in range(1, n) {
- let perm0 = *perm.get(0);
- for j in range(0, i) {
- *perm.get_mut(j) = *perm.get(j + 1);
+ for i in range(1, self.n as uint).rev() {
+ let d = idx / self.fact[i] as i32;
+ self.cnt[i] = d;
+ idx %= self.fact[i] as i32;
+ for (place, val) in pp.mut_iter().zip(self.perm.p.slice_to(i + 1).iter()) {
+ *place = (*val) as u8
}
- *perm.get_mut(i) = perm0;
-
- let count_i = count.get_mut(i);
- if *count_i >= i {
- *count_i = 0;
- } else {
- *count_i += 1;
- break;
+
+ let d = d as uint;
+ for j in range(0, i + 1) {
+ self.perm.p[j] = if j + d <= i {pp[j + d]} else {pp[j+d-i-1]} as i32;
}
}
- tperm.clone_from(&perm);
- let mut flips_count = 0;
- loop {
- let k = *tperm.get(0);
- if k == 1 { break; }
- tperm.mut_slice_to(k as uint).reverse();
- flips_count += 1;
+ self.perm
+ }
+
+ fn count(&self) -> u32 { self.permcount }
+ fn max(&self) -> u32 { self.fact[self.n as uint] }
+
+ fn next(&mut self) -> P {
+ next_permutation(self.perm.p, self.cnt);
+ self.permcount += 1;
+
+ self.perm
+ }
+}
+
+
+fn reverse(tperm: &mut [i32], mut k: uint) {
+ tperm.mut_slice_to(k).reverse()
+}
+
+fn work(mut perm: Perm, n: uint, max: uint) -> (i32, i32) {
+ let mut checksum = 0;
+ let mut maxflips = 0;
+
+ let mut p = perm.get(n as i32);
+
+ while perm.count() < max as u32 {
+ let mut flips = 0;
+
+ while p.p[0] != 1 {
+ let k = p.p[0] as uint;
+ reverse(p.p, k);
+ flips += 1;
}
- perm_count = max(perm_count, flips_count);
- checksum += if countdown & 1 == 1 {flips_count} else {-flips_count}
+
+ checksum += if perm.count() % 2 == 0 {flips} else {-flips};
+ maxflips = cmp::max(maxflips, flips);
+
+ p = perm.next();
}
- (checksum, perm_count)
+
+ (checksum, maxflips)
}
-fn main() {
- let n = std::os::args().as_slice()
- .get(1)
- .and_then(|arg| from_str(arg.as_slice()))
- .unwrap_or(2u);
-
- let (tx, rx) = channel();
- for i in range(0, n) {
- let tx = tx.clone();
- spawn(proc() tx.send(fannkuch(n, i)));
+fn fannkuch(n: i32) -> (i32, i32) {
+ let perm = Perm::new(n as u32);
+
+ let N = 4;
+ let mut futures = vec![];
+ let k = perm.max() / N;
+
+ for (i, j) in range(0, N).zip(iter::count(0, k)) {
+ let max = cmp::min(j+k, perm.max());
+
+ futures.push(Future::spawn(proc() {
+ work(perm, j as uint, max as uint)
+ }))
}
- drop(tx);
let mut checksum = 0;
- let mut perm = 0;
- for (cur_cks, cur_perm) in rx.iter() {
- checksum += cur_cks;
- perm = max(perm, cur_perm);
+ let mut maxflips = 0;
+ for fut in futures.mut_iter() {
+ let (cs, mf) = fut.get();
+ checksum += cs;
+ maxflips = cmp::max(maxflips, mf);
}
- println!("{}\nPfannkuchen({}) = {}", checksum, n, perm);
+ (checksum, maxflips)
+}
+
+fn main() {
+ let n = std::os::args().as_slice()
+ .get(1)
+ .and_then(|arg| from_str(arg.as_slice()))
+ .unwrap_or(2i32);
+
+ let (checksum, maxflips) = fannkuch(n);
+ println!("{}\nPfannkuchen({}) = {}", checksum, n, maxflips);
}
// no-pretty-expanded FIXME #15189
#![feature(phase)]
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
#[phase(plugin)] extern crate green;
use std::from_str::FromStr;
// ignore-pretty very bad with line comments
-#![allow(non_snake_case_functions)]
+#![allow(non_snake_case)]
use std::io;
use std::io::stdio::StdReader;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:lint_group_plugin_test.rs
+// ignore-stage1
+// compile-flags: -D lint-me
+
+#![feature(phase)]
+
+#[phase(plugin)]
+extern crate lint_group_plugin_test;
+
+fn lintme() { } //~ ERROR item is named 'lintme'
+
+fn pleaselintme() { } //~ ERROR item is named 'pleaselintme'
+
+pub fn main() {
+ lintme();
+ pleaselintme();
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-tidy-linelength
+
+use std::cell::RefCell;
+
+trait Trait {}
+
+pub fn main() {
+ let x: Vec<Trait + Sized> = Vec::new();
+ //~^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
+ //~^^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
+ //~^^^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
+ let x: Vec<Box<RefCell<Trait + Sized>>> = Vec::new();
+ //~^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
+ //~^^ ERROR instantiating a type parameter with an incompatible type `Trait+Sized`, which does not fulfill `Sized`
+}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-fn false() { } //~ ERROR found `false` in ident position
+fn false() { } //~ ERROR expected identifier, found keyword `false`
fn main() { }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-fn true() { } //~ ERROR found `true` in ident position
+fn true() { } //~ ERROR expected identifier, found keyword `true`
fn main() { }
let c1 = || set(&mut *x);
//~^ ERROR cannot borrow
let c2 = || set(&mut *x);
- //~^ ERROR closure requires unique access to `x`
- //~^^ ERROR cannot borrow
+ //~^ ERROR cannot borrow
}
fn main() {
fn copy_borrowed_ptr<'a,'b>(p: &'a mut S<'b>) -> S<'b> {
S { pointer: &mut *p.pointer }
- //~^ ERROR lifetime of `p` is too short to guarantee its contents can be safely reborrowed
+ //~^ ERROR cannot infer
}
fn main() {
--- /dev/null
+// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that cross-borrowing (implicitly converting from `Box<T>` to `&T`) is
+// forbidden when `T` is a trait.
+
+struct Foo;
+trait Trait {}
+impl Trait for Foo {}
+
+pub fn main() {
+ let x: Box<Trait> = box Foo;
+ let _y: &Trait = x; //~ ERROR mismatched types: expected `&Trait`, found `Box<Trait>`
+}
+
let f5: &mut Fat<ToBar> = &mut Fat { f1: 5, f2: "some str", ptr: Bar1 {f :42} };
let z: Box<ToBar> = box Bar1 {f: 36};
f5.ptr = *z; //~ ERROR dynamically sized type on lhs of assignment
+ //~^ ERROR E0161
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test implicit coercions involving DSTs and raw pointers.
+
+struct S;
+trait T {}
+impl T for S {}
+
+struct Foo<Sized? T> {
+ f: T
+}
+
+pub fn main() {
+ // Test that we cannot convert from *-ptr to &-ptr
+ let x: *const S = &S;
+ let y: &S = x; //~ ERROR mismatched types
+ let y: &T = x; //~ ERROR mismatched types
+
+ // Test that we cannot convert from *-ptr to &-ptr (mut version)
+ let x: *mut S = &mut S;
+ let y: &S = x; //~ ERROR mismatched types
+ let y: &T = x; //~ ERROR mismatched types
+
+ // Test that we cannot convert an immutable ptr to a mutable one using *-ptrs
+ let x: &mut T = &S; //~ ERROR types differ in mutability
+ let x: *mut T = &S; //~ ERROR types differ in mutability
+ let x: *mut S = &S;
+ //~^ ERROR mismatched types
+
+ // The below four sets of tests test that we cannot implicitly deref a *-ptr
+ // during a coercion.
+ let x: *const S = &S;
+ let y: *const T = x; //~ ERROR mismatched types
+
+ let x: *mut S = &mut S;
+ let y: *mut T = x; //~ ERROR mismatched types
+
+ let x: *const Foo<S> = &Foo {f: S};
+ let y: *const Foo<T> = x; //~ ERROR mismatched types
+
+ let x: *mut Foo<S> = &mut Foo {f: S};
+ let y: *mut Foo<T> = x; //~ ERROR mismatched types
+}
let g: &Fat<[int]> = &f;
let h: &Fat<Fat<[int]>> = &Fat { ptr: *g };
//~^ ERROR trying to initialise a dynamically sized struct
+ //~^^ ERROR E0161
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that dynamically sized rvalues are forbidden
+
+pub fn main() {
+ let _x: Box<str> = box *"hello world";
+ //~^ ERROR E0161
+
+ let array: &[int] = &[1, 2, 3];
+ let _x: Box<[int]> = box *array;
+ //~^ ERROR E0161
+}
x: 1,
y: 2,
};
- for x in bogus { //~ ERROR does not implement the `Iterator` trait
+ for x in bogus { //~ ERROR has type `MyStruct` which does not implement the `Iterator` trait
drop(x);
}
}
-
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub fn main() {
+ let x = () + (); //~ ERROR binary operation
+
+ // this shouldn't have a flow-on error:
+ for _ in x {}
+}
#![feature = "foo"] //~ ERROR: malformed feature
#![feature(test_removed_feature)] //~ ERROR: feature has been removed
-#![feature(test_accepted_feature)] //~ WARNING: feature has added
+#![feature(test_accepted_feature)] //~ WARNING: feature has been added
// except according to those terms.
fn test<'x>(x: &'x int) {
- drop::< <'z>|&'z int| -> &'z int>(|z| {
+ drop::< <'z>|&'z int| -> &'z int >(|z| {
x
//~^ ERROR cannot infer an appropriate lifetime
});
let x = [1,2];
let y = match x {
[] => None,
-//~^ ERROR expected `[<generic integer #1>, .. 2]`, found a fixed vector pattern of size 0
+//~^ ERROR expected `[<generic integer #0>, .. 2]`, found a fixed vector pattern of size 0
[a,_] => Some(a)
};
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(warnings)]
+
+extern {
+ pub fn foo(x: (int)); //~ ERROR found rust type `int` in foreign module
+}
+
+fn main() {
+}
fn main() {
let Slice { data: data, len: len } = "foo";
- //~^ ERROR mismatched types: expected `&'static str`, found a structure pattern
+ //~^ ERROR mismatched types: expected `&str`, found a structure pattern
}
// aux-build:issue-16725.rs
-extern crate foo = "issue-16725";
+extern crate "issue-16725" as foo;
fn main() {
unsafe { foo::bar(); }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub fn main() {
+ let x = [1, 2, 3];
+ //~^ ERROR cannot determine a type for this local variable: cannot determine the type of this
+ let y = x.as_slice();
+}
}
fn main() {
["hi"].bind(|x| [x] );
- //~^ ERROR type `[&'static str, .. 1]` does not implement any method in scope named `bind`
+ //~^ ERROR type `[&str, .. 1]` does not implement any method in scope named `bind`
}
fn new_struct(r: A+'static) -> Struct {
//~^ ERROR variable `r` has dynamically sized type
Struct { r: r } //~ ERROR trying to initialise a dynamically sized struct
+ //~^ ERROR E0161
+ //~^^ ERROR E0161
}
trait Curve {}
// except according to those terms.
fn main() {
- let super: int; //~ ERROR found `super` in ident position
+ let super: int; //~ ERROR expected identifier, found keyword `super`
}
// except according to those terms.
pub mod break {
- //~^ ERROR found `break` in ident position
+ //~^ ERROR expected identifier, found keyword `break`
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that Copy bounds inherited by trait are checked.
+
+use std::any::Any;
+use std::any::AnyRefExt;
+
+trait Foo : Copy {
+}
+
+impl<T:Copy> Foo for T {
+}
+
+fn take_param<T:Foo>(foo: &T) { }
+
+fn main() {
+ let x = box 3i;
+ take_param(&x); //~ ERROR does not fulfill `Copy`
+
+ let y = &x;
+ let z = &x as &Foo; //~ ERROR does not fulfill `Copy`
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test which object types are considered sendable. This test
+// is broken into two parts because some errors occur in distinct
+// phases in the compiler. See kindck-send-object2.rs as well!
+
+fn assert_send<T:Send>() { }
+trait Dummy { }
+
+// careful with object types, who knows what they close over...
+fn test51<'a>() {
+ assert_send::<&'a Dummy>(); //~ ERROR does not fulfill the required lifetime
+}
+fn test52<'a>() {
+ assert_send::<&'a Dummy+Send>(); //~ ERROR does not fulfill the required lifetime
+}
+
+// ...unless they are properly bounded
+fn test60() {
+ assert_send::<&'static Dummy+Send>();
+}
+fn test61() {
+ assert_send::<Box<Dummy+Send>>();
+}
+
+// closure and object types can have lifetime bounds which make
+// them not ok
+fn test_70<'a>() {
+ assert_send::<proc():'a>(); //~ ERROR does not fulfill the required lifetime
+}
+
+fn test_71<'a>() {
+ assert_send::<Box<Dummy+'a>>(); //~ ERROR does not fulfill the required lifetime
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Continue kindck-send-object1.rs.
+
+fn assert_send<T:Send>() { }
+trait Dummy { }
+
+fn test50() {
+ assert_send::<&'static Dummy>(); //~ ERROR does not fulfill `Send`
+}
+
+fn test53() {
+ assert_send::<Box<Dummy>>(); //~ ERROR does not fulfill `Send`
+}
+
+// ...unless they are properly bounded
+fn test60() {
+ assert_send::<&'static Dummy+Send>();
+}
+fn test61() {
+ assert_send::<Box<Dummy+Send>>();
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test which of the builtin types are considered sendable.
+
+fn assert_send<T:Send>() { }
+
+// owned content are ok
+fn test30() { assert_send::<Box<int>>(); }
+fn test31() { assert_send::<String>(); }
+fn test32() { assert_send::<Vec<int> >(); }
+
+// but not if they own a bad thing
+fn test40<'a>(_: &'a int) {
+ assert_send::<Box<&'a int>>(); //~ ERROR does not fulfill the required lifetime
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that borrowed pointers are not sendable unless 'static.
+
+fn assert_send<T:Send>() { }
+
+// lifetime pointers with 'static lifetime are ok
+fn test01() { assert_send::<&'static int>(); }
+fn test02() { assert_send::<&'static str>(); }
+fn test03() { assert_send::<&'static [int]>(); }
+
+// whether or not they are mutable
+fn test10() { assert_send::<&'static mut int>(); }
+
+// otherwise lifetime pointers are not ok
+fn test20<'a>(_: &'a int) {
+ assert_send::<&'a int>(); //~ ERROR does not fulfill the required lifetime
+}
+fn test21<'a>(_: &'a int) {
+ assert_send::<&'a str>(); //~ ERROR does not fulfill the required lifetime
+}
+fn test22<'a>(_: &'a int) {
+ assert_send::<&'a [int]>(); //~ ERROR does not fulfill the required lifetime
+}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn assert_send<T:Send>() { }
+
+// unsafe ptrs are ok unless they point at unsendable things
+fn test70() {
+ assert_send::<*mut int>();
+}
+fn test71<'a>() {
+ assert_send::<*mut &'a int>(); //~ ERROR does not fulfill the required lifetime
+}
+
+fn main() {
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(bad_style)]
+//~^ NOTE lint level defined here
+#![allow(dead_code)]
+
+fn CamelCase() {} //~ ERROR function `CamelCase` should have a snake case name
+
+#[allow(bad_style)]
+mod test {
+ fn CamelCase() {}
+
+ #[forbid(bad_style)]
+ //~^ NOTE lint level defined here
+ //~^^ NOTE lint level defined here
+ mod bad {
+ fn CamelCase() {} //~ ERROR function `CamelCase` should have a snake case name
+
+ static bad: int = 1; //~ ERROR static constant `bad` should have an uppercase name
+ }
+
+ mod warn {
+ #![warn(bad_style)]
+ //~^ NOTE lint level defined here
+
+ fn CamelCase() {} //~ WARN function `CamelCase` should have a snake case name
+
+ struct snake_case; //~ WARN type `snake_case` should have a camel case name
+ }
+}
+
+fn main() {}
trait foo6 { //~ ERROR trait `foo6` should have a camel case name such as `Foo6`
}
+fn f<ty>(_: ty) {} //~ ERROR type parameter `ty` should have a camel case name such as `Ty`
+
#[repr(C)]
struct foo7 {
bar: int,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![deny(non_snake_case_functions)]
+#![deny(non_snake_case)]
#![allow(dead_code)]
struct Foo;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(non_snake_case)]
+#![allow(dead_code)]
+
+fn f<'FooBar>( //~ ERROR lifetime `'FooBar` should have a snake case name such as `'foo_bar`
+ _: &'FooBar ()
+) {}
+
+fn main() { }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![deny(non_snake_case)]
+#![allow(dead_code)]
+
+mod FooBar { //~ ERROR module `FooBar` should have a snake case name such as `foo_bar`
+ pub struct S;
+}
+
+fn f(_: FooBar::S) { }
+
+fn main() { }
// ignore-tidy-linelength
#![allow(dead_code)]
-#![deny(uppercase_variables)]
+#![deny(non_snake_case)]
use std::io::File;
use std::io::IoError;
struct Something {
- X: uint //~ ERROR structure field names should start with a lowercase character
+ X: uint //~ ERROR structure field `X` should have a snake case name such as `x`
}
-fn test(Xx: uint) { //~ ERROR variable names should start with a lowercase character
+fn test(Xx: uint) { //~ ERROR variable `Xx` should have a snake case name such as `xx`
println!("{}", Xx);
}
fn main() {
- let Test: uint = 0; //~ ERROR variable names should start with a lowercase character
+ let Test: uint = 0; //~ ERROR variable `Test` should have a snake case name such as `test`
println!("{}", Test);
let mut f = File::open(&Path::new("something.txt"));
match f.read(buff) {
Ok(cnt) => println!("read this many bytes: {}", cnt),
Err(IoError{ kind: EndOfFile, .. }) => println!("Got end of file: {}", EndOfFile.to_string()),
- //~^ ERROR variable names should start with a lowercase character
+ //~^ ERROR variable `EndOfFile` should have a snake case name such as `end_of_file`
}
test(1);
// Issue #7526: lowercase static constants in patterns look like bindings
#![allow(dead_code)]
-#![deny(non_uppercase_pattern_statics)]
+#![deny(non_uppercase_statics)]
+#[allow(non_uppercase_statics)]
pub static a : int = 97;
fn f() {
}
mod m {
+ #[allow(non_uppercase_statics)]
pub static aha : int = 7;
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that ~ pointers give an obsolescence message.
+
+fn foo(x: ~int) {} //~ ERROR obsolete syntax: `~` notation for owned pointers
+fn bar(x: ~str) {} //~ ERROR obsolete syntax: `~` notation for owned pointers
+fn baz(x: ~[int]) {} //~ ERROR obsolete syntax: `~[T]` is no longer a type
+
+fn main() {
+ let x = ~4i; //~ ERROR obsolete syntax: `~` notation for owned pointer allocation
+ let y = ~"hello"; //~ ERROR obsolete syntax: `~` notation for owned pointer allocation
+ let z = ~[1i, 2, 3]; //~ ERROR obsolete syntax: `~[T]` is no longer a type
+}
// Check explicit region bounds on methods in the cross crate case.
-extern crate lib = "regions-bounded-method-type-parameters-cross-crate-lib";
+extern crate "regions-bounded-method-type-parameters-cross-crate-lib" as lib;
use lib::Inv;
use lib::MaybeOwned;
fn a_fn1<'a,'b>(e: an_enum<'a>) -> an_enum<'b> {
return e; //~ ERROR mismatched types: expected `an_enum<'b>`, found `an_enum<'a>`
- //~^ ERROR cannot infer
}
fn a_fn3<'a,'b>(e: a_class<'a>) -> a_class<'b> {
return e; //~ ERROR mismatched types: expected `a_class<'b>`, found `a_class<'a>`
- //~^ ERROR cannot infer
}
fn main() { }
impl<'a, T> X for B<'a, T> {}
fn f<'a, T, U>(v: Box<A<T>+'static>) -> Box<X+'static> {
- box B(v) as Box<X>
+ box B(&*v) as Box<X>
}
fn g<'a, T: 'static>(v: Box<A<T>>) -> Box<X+'static> {
- box B(v) as Box<X> //~ ERROR cannot infer
+ box B(&*v) as Box<X> //~ ERROR cannot infer
}
fn h<'a, T, U>(v: Box<A<U>+'static>) -> Box<X+'static> {
- box B(v) as Box<X>
+ box B(&*v) as Box<X>
}
fn i<'a, T, U>(v: Box<A<U>>) -> Box<X+'static> {
- box B(v) as Box<X> //~ ERROR cannot infer
+ box B(&*v) as Box<X> //~ ERROR cannot infer
}
fn main() {}
fn main() {
let mut x = None;
- //~^ ERROR lifetime of variable does not enclose its declaration
- //~^^ ERROR type of expression contains references that are not valid during the expression
with_int(|y| x = Some(y));
+ //~^ ERROR cannot infer
}
}
fn main() {
- let mut x: Option<&int> = None; //~ ERROR cannot infer
- with_int(|y| x = Some(y));
+ let mut x: Option<&int> = None;
+ with_int(|y| x = Some(y)); //~ ERROR cannot infer
}
}
fn return_it() -> int {
- with(|o| o) //~ ERROR cannot infer an appropriate lifetime
+ with(|o| o) //~ ERROR cannot infer
}
fn main() {
fn take1<'a>(p: parameterized1) -> parameterized1<'a> { p }
//~^ ERROR mismatched types
-//~^^ ERROR cannot infer
fn take3(p: not_parameterized1) -> not_parameterized1 { p }
fn take4(p: not_parameterized2) -> not_parameterized2 { p }
// covariant with respect to its parameter 'a.
let _: Contravariant<'long> = c; //~ ERROR mismatched types
- //~^ ERROR cannot infer an appropriate lifetime
}
fn main() {}
// contravariant with respect to its parameter 'a.
let _: Covariant<'short> = c; //~ ERROR mismatched types
- //~^ ERROR cannot infer an appropriate lifetime
}
fn main() {}
}
fn take_direct<'a,'b>(p: direct<'a>) -> direct<'b> { p } //~ ERROR mismatched types
-//~^ ERROR cannot infer
fn take_indirect1(p: indirect1) -> indirect1 { p }
fn take_indirect2<'a,'b>(p: indirect2<'a>) -> indirect2<'b> { p } //~ ERROR mismatched types
-//~^ ERROR cannot infer
fn main() {}
}
trait set_f<'a> {
- fn set_f_ok(&self, b: Gc<b<'a>>);
- fn set_f_bad(&self, b: Gc<b>);
+ fn set_f_ok(&mut self, b: Gc<b<'a>>);
+ fn set_f_bad(&mut self, b: Gc<b>);
}
impl<'a> set_f<'a> for c<'a> {
- fn set_f_ok(&self, b: Gc<b<'a>>) {
+ fn set_f_ok(&mut self, b: Gc<b<'a>>) {
self.f = b;
}
- fn set_f_bad(&self, b: Gc<b>) {
+ fn set_f_bad(&mut self, b: Gc<b>) {
self.f = b; //~ ERROR mismatched types: expected `Gc<Gc<&'a int>>`, found `Gc<Gc<&int>>`
- //~^ ERROR cannot infer
}
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that, when a variable of type `&T` is captured inside a proc,
+// we correctly infer/require that its lifetime is 'static.
+
+fn foo(_p: proc():'static) { }
+
+static i: int = 3;
+
+fn capture_local() {
+ let x = 3i;
+ let y = &x; //~ ERROR `x` does not live long enough
+ foo(proc() {
+ let _a = *y;
+ });
+}
+
+fn capture_static() {
+ // Legal because &i can have static lifetime:
+ let y = &i;
+ foo(proc() {
+ let _a = *y;
+ });
+}
+
+fn main() { }
// Issue #8624. Test for reborrowing with 3 levels, not just two.
fn copy_borrowed_ptr<'a, 'b, 'c>(p: &'a mut &'b mut &'c mut int) -> &'b mut int {
- &mut ***p //~ ERROR lifetime of `p` is too short to guarantee its contents
+ &mut ***p //~ ERROR cannot infer
}
fn main() {
// for `'a` (which must be a sublifetime of `'b`).
fn copy_borrowed_ptr<'a, 'b>(p: &'a mut &'b mut int) -> &'b mut int {
- &mut **p //~ ERROR lifetime of `p` is too short
+ &mut **p //~ ERROR cannot infer
}
fn main() {
fn return_it<'a>() -> &'a int {
with(|o| o)
//~^ ERROR cannot infer
- //~^^ ERROR not valid during the expression
- //~^^^ ERROR not valid at this point
}
fn main() {
fn return_it<'a>() -> &'a int {
with(|o| o)
//~^ ERROR cannot infer
- //~^^ ERROR not valid during the expression
- //~^^^ ERROR not valid at this point
}
fn main() {
// covariant with respect to its parameter 'a.
let _: S<'long, 'long> = c; //~ ERROR mismatched types
- //~^ ERROR cannot infer an appropriate lifetime
}
fn main() {}
// covariant with respect to its parameter 'a.
let _: Contravariant<'long> = c; //~ ERROR mismatched types
- //~^ ERROR cannot infer an appropriate lifetime
}
fn main() {}
// contravariant with respect to its parameter 'a.
let _: Covariant<'short> = c; //~ ERROR mismatched types
- //~^ ERROR cannot infer an appropriate lifetime
}
fn main() {}
struct s {
let foo: (),
- //~^ ERROR found `let` in ident position
+ //~^ ERROR expected identifier, found keyword `let`
//~^^ ERROR expected `:`, found `foo`
}
fn f() {
let v = [mut 1, 2, 3, 4];
- //~^ ERROR found `mut` in ident position
+ //~^ ERROR expected identifier, found keyword `mut`
//~^^ ERROR expected `]`, found `1`
}
// except according to those terms.
type v = [mut int];
- //~^ ERROR found `mut` in ident position
+ //~^ ERROR expected identifier, found keyword `mut`
//~^^ ERROR expected `]`, found `int`
fn f() {
let a_box = box mut 42;
- //~^ ERROR found `mut` in ident position
+ //~^ ERROR expected identifier, found keyword `mut`
//~^^ ERROR expected `;`, found `42`
}
// except according to those terms.
type mut_box = Box<mut int>;
- //~^ ERROR found `mut` in ident position
+ //~^ ERROR expected identifier, found keyword `mut`
//~^^ ERROR expected `,`, found `int`
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::slice as std; //~ ERROR import conflicts with imported crate
+use std::slice as std; //~ ERROR import `std` conflicts with imported crate
fn main() {
}
// except according to those terms.
use std::mem::transmute;
-//~^ ERROR import conflicts with value in this module
+//~^ ERROR import `transmute` conflicts with value in this module
fn transmute() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::slice::Items;
+//~^ ERROR import `Items` conflicts with type in this module
+
+struct Items;
+
+fn main() {
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Make sure we give a sane error message when the user requests LTO with a
+// library built with -C codegen-units > 1.
+
+// aux-build:sepcomp_lib.rs
+// compile-flags: -Z lto
+// error-pattern:missing compressed bytecode
+// no-prefer-dynamic
+
+extern crate sepcomp_lib;
+use sepcomp_lib::a::one;
+use sepcomp_lib::b::two;
+use sepcomp_lib::c::three;
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(two(), 2);
+ assert_eq!(three(), 3);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait TraitNotAStruct { }
+
+fn main() {
+ TraitNotAStruct{ value: 0 };
+ //~^ ERROR: use of trait `TraitNotAStruct` as a struct constructor [E0159]
+}
+
//~^ ERROR failed to find an implementation
//~^^ ERROR instantiating a type parameter with an incompatible type
-impl<T> Foo<T> {
+impl<T> Foo<T> { //~ ERROR failed to find an implementation
+//~^ ERROR instantiating a type parameter with an incompatible type
fn uhoh() {}
}
fn f<X>() {}
pub fn main() {
- f<type>(); //~ ERROR found `type` in ident position
+ f<type>(); //~ ERROR expected identifier, found keyword `type`
}
// Test some tuples.
fn f9<Sized? X>(x1: Box<S<X>>, x2: Box<E<X>>) {
- f5(&(*x1, 34i)); //~ERROR instantiating a type parameter with an incompatible type `(S<X>,int)`,
- f5(&(32i, *x2)); //~ERROR instantiating a type parameter with an incompatible type `(int,E<X>)`,
+ f5(&(*x1, 34i)); //~ERROR E0161
+ //~^ ERROR instantiating a type parameter with an incompatible type
+ f5(&(32i, *x2)); //~ERROR E0161
+ //~^ ERROR instantiating a type parameter with an incompatible type
}
// I would like these to fail eventually.
trait T2<Z> {
}
impl<Sized? X> T2<X> for S3<X> { //ERROR instantiating a type parameter with an incompatible type `X
+*/
// impl - struct
trait T3<Sized? Z> {
}
struct S4<Y>;
-impl<Sized? X> T3<X> for S4<X> { //ERROR instantiating a type parameter with an incompatible type `X
+impl<Sized? X> T3<X> for S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
}
-*/
+impl<Sized? X> S4<X> { //~ ERROR instantiating a type parameter with an incompatible type
+}
+
pub fn main() {
}
let y: X = *x1; //~ERROR variable `y` has dynamically sized type `X`
let y = *x2; //~ERROR variable `y` has dynamically sized type `X`
let (y, z) = (*x3, 4i); //~ERROR variable `y` has dynamically sized type `X`
+ //~^ ERROR E0161
}
fn f4<Sized? X: T>(x1: Box<X>, x2: Box<X>, x3: Box<X>) {
let y: X = *x1; //~ERROR variable `y` has dynamically sized type `X`
let y = *x2; //~ERROR variable `y` has dynamically sized type `X`
let (y, z) = (*x3, 4i); //~ERROR variable `y` has dynamically sized type `X`
+ //~^ ERROR E0161
}
fn g1<Sized? X>(x: X) {} //~ERROR variable `x` has dynamically sized type `X`
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test uses only GDB Python API features which should be available in
+// older versions of GDB too. A more extensive test can be found in
+// gdb-pretty-struct-and-enums.rs
+
+// ignore-test FIXME(#16919)
+// ignore-tidy-linelength
+// ignore-lldb
+// ignore-android: FIXME(#10381)
+// compile-flags:-g
+// gdb-use-pretty-printer
+
+// The following line actually doesn't have to do anything with pretty printing,
+// it just tells GDB to print values on one line:
+// gdb-command: set print pretty off
+
+// gdb-command: rbreak zzz
+// gdb-command: run
+// gdb-command: finish
+
+// gdb-command: print regular_struct
+// gdb-check:$1 = RegularStruct = {the_first_field = 101, the_second_field = 102.5, the_third_field = false}
+
+// gdb-command: print empty_struct
+// gdb-check:$2 = EmptyStruct
+
+// gdb-command: print c_style_enum1
+// gdb-check:$3 = CStyleEnumVar1
+
+// gdb-command: print c_style_enum2
+// gdb-check:$4 = CStyleEnumVar2
+
+// gdb-command: print c_style_enum3
+// gdb-check:$5 = CStyleEnumVar3
+
+struct RegularStruct {
+ the_first_field: int,
+ the_second_field: f64,
+ the_third_field: bool,
+}
+
+struct EmptyStruct;
+
+enum CStyleEnum {
+ CStyleEnumVar1,
+ CStyleEnumVar2,
+ CStyleEnumVar3,
+}
+
+fn main() {
+
+ let regular_struct = RegularStruct {
+ the_first_field: 101,
+ the_second_field: 102.5,
+ the_third_field: false
+ };
+
+ let empty_struct = EmptyStruct;
+
+ let c_style_enum1 = CStyleEnumVar1;
+ let c_style_enum2 = CStyleEnumVar2;
+ let c_style_enum3 = CStyleEnumVar3;
+
+ zzz();
+}
+
+fn zzz() { () }
--- /dev/null
+// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// ignore-test FIXME(#16919)
+// ignore-tidy-linelength
+// ignore-lldb
+// ignore-android: FIXME(#10381)
+// compile-flags:-g
+// gdb-use-pretty-printer
+
+// This test uses some GDB Python API features (e.g. accessing anonymous fields)
+// which are only available in newer GDB version. The following directive will
+// case the test runner to ignore this test if an older GDB version is used:
+// min-gdb-version 7.7
+
+// The following line actually doesn't have to do anything with pretty printing,
+// it just tells GDB to print values on one line:
+// gdb-command: set print pretty off
+
+// gdb-command: rbreak zzz
+// gdb-command: run
+// gdb-command: finish
+
+// gdb-command: print regular_struct
+// gdb-check:$1 = RegularStruct = {the_first_field = 101, the_second_field = 102.5, the_third_field = false, the_fourth_field = "I'm so pretty, oh so pretty..."}
+
+// gdb-command: print tuple
+// gdb-check:$2 = {true, 103, "blub"}
+
+// gdb-command: print tuple_struct
+// gdb-check:$3 = TupleStruct = {-104.5, 105}
+
+// gdb-command: print empty_struct
+// gdb-check:$4 = EmptyStruct
+
+// gdb-command: print c_style_enum1
+// gdb-check:$5 = CStyleEnumVar1
+
+// gdb-command: print c_style_enum2
+// gdb-check:$6 = CStyleEnumVar2
+
+// gdb-command: print c_style_enum3
+// gdb-check:$7 = CStyleEnumVar3
+
+// gdb-command: print mixed_enum_c_style_var
+// gdb-check:$8 = MixedEnumCStyleVar
+
+// gdb-command: print mixed_enum_tuple_var
+// gdb-check:$9 = MixedEnumTupleVar = {106, 107, false}
+
+// gdb-command: print mixed_enum_struct_var
+// gdb-check:$10 = MixedEnumStructVar = {field1 = 108.5, field2 = 109}
+
+// gdb-command: print some
+// gdb-check:$11 = Some = {110}
+
+// gdb-command: print none
+// gdb-check:$12 = None
+
+// gdb-command: print nested_variant1
+// gdb-check:$13 = NestedVariant1 = {NestedStruct = {regular_struct = RegularStruct = {the_first_field = 111, the_second_field = 112.5, the_third_field = true, the_fourth_field = "NestedStructString1"}, tuple_struct = TupleStruct = {113.5, 114}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar2, mixed_enum = MixedEnumTupleVar = {115, 116, false}}}
+
+// gdb-command: print nested_variant2
+// gdb-check:$14 = NestedVariant2 = {abc = NestedStruct = {regular_struct = RegularStruct = {the_first_field = 117, the_second_field = 118.5, the_third_field = false, the_fourth_field = "NestedStructString10"}, tuple_struct = TupleStruct = {119.5, 120}, empty_struct = EmptyStruct, c_style_enum = CStyleEnumVar3, mixed_enum = MixedEnumStructVar = {field1 = 121.5, field2 = -122}}}
+
+#![feature(struct_variant)]
+
+struct RegularStruct {
+ the_first_field: int,
+ the_second_field: f64,
+ the_third_field: bool,
+ the_fourth_field: &'static str,
+}
+
+struct TupleStruct(f64, i16);
+
+struct EmptyStruct;
+
+enum CStyleEnum {
+ CStyleEnumVar1,
+ CStyleEnumVar2,
+ CStyleEnumVar3,
+}
+
+enum MixedEnum {
+ MixedEnumCStyleVar,
+ MixedEnumTupleVar(u32, u16, bool),
+ MixedEnumStructVar { field1: f64, field2: i32 }
+}
+
+struct NestedStruct {
+ regular_struct: RegularStruct,
+ tuple_struct: TupleStruct,
+ empty_struct: EmptyStruct,
+ c_style_enum: CStyleEnum,
+ mixed_enum: MixedEnum,
+}
+
+enum NestedEnum {
+ NestedVariant1(NestedStruct),
+ NestedVariant2 { abc: NestedStruct }
+}
+
+fn main() {
+
+ let regular_struct = RegularStruct {
+ the_first_field: 101,
+ the_second_field: 102.5,
+ the_third_field: false,
+ the_fourth_field: "I'm so pretty, oh so pretty..."
+ };
+
+ let tuple = ( true, 103u32, "blub" );
+
+ let tuple_struct = TupleStruct(-104.5, 105);
+
+ let empty_struct = EmptyStruct;
+
+ let c_style_enum1 = CStyleEnumVar1;
+ let c_style_enum2 = CStyleEnumVar2;
+ let c_style_enum3 = CStyleEnumVar3;
+
+ let mixed_enum_c_style_var = MixedEnumCStyleVar;
+ let mixed_enum_tuple_var = MixedEnumTupleVar(106, 107, false);
+ let mixed_enum_struct_var = MixedEnumStructVar { field1: 108.5, field2: 109 };
+
+ let some = Some(110u);
+ let none: Option<int> = None;
+
+ let nested_variant1 = NestedVariant1(
+ NestedStruct {
+ regular_struct: RegularStruct {
+ the_first_field: 111,
+ the_second_field: 112.5,
+ the_third_field: true,
+ the_fourth_field: "NestedStructString1",
+ },
+ tuple_struct: TupleStruct(113.5, 114),
+ empty_struct: EmptyStruct,
+ c_style_enum: CStyleEnumVar2,
+ mixed_enum: MixedEnumTupleVar(115, 116, false)
+ }
+ );
+
+ let nested_variant2 = NestedVariant2 {
+ abc: NestedStruct {
+ regular_struct: RegularStruct {
+ the_first_field: 117,
+ the_second_field: 118.5,
+ the_third_field: false,
+ the_fourth_field: "NestedStructString10",
+ },
+ tuple_struct: TupleStruct(119.5, 120),
+ empty_struct: EmptyStruct,
+ c_style_enum: CStyleEnumVar3,
+ mixed_enum: MixedEnumStructVar {
+ field1: 121.5,
+ field2: -122
+ }
+ }
+ };
+
+ zzz();
+}
+
+fn zzz() { () }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test bounds checking for DST raw slices
+// error-pattern:index out of bounds
+
+fn main() {
+ let a: *const [_] = &[1i, 2, 3];
+ unsafe {
+ let _b = (*a)[3];
+ }
+}
--- /dev/null
+
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Previously failed formating invalid utf8.
+// cc #16877
+
+// error-pattern:failed at 'hello�'
+
+struct Foo;
+impl std::fmt::Show for Foo {
+ fn fmt(&self, fmtr:&mut std::fmt::Formatter) -> std::fmt::Result {
+ // Purge invalid utf8: 0xff
+ fmtr.write(&[104, 101, 108, 108, 111, 0xff])
+ }
+}
+fn main() {
+ fail!("{}", Foo)
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// error-pattern:capacity overflow
+
+use std::collections::hashmap::HashMap;
+use std::uint;
+use std::mem::size_of;
+
+fn main() {
+ let threshold = uint::MAX / size_of::<(u64, u64, u64)>();
+ let mut h = HashMap::<u64, u64>::with_capacity(threshold + 100);
+ h.insert(0, 0);
+}
--- /dev/null
+-include ../tools.mk
+
+# Verifies that the -L arguments given to the linker is in the same order
+# as the -L arguments on the rustc command line.
+
+CORRECT_DIR=$(TMPDIR)/correct
+WRONG_DIR=$(TMPDIR)/wrong
+
+all: $(TMPDIR)/libcorrect.a $(TMPDIR)/libwrong.a
+ mkdir -p $(CORRECT_DIR) $(WRONG_DIR)
+ mv $(TMPDIR)/libcorrect.a $(CORRECT_DIR)/libfoo.a
+ mv $(TMPDIR)/libwrong.a $(WRONG_DIR)/libfoo.a
+ $(RUSTC) main.rs -o $(TMPDIR)/should_succeed -L $(CORRECT_DIR) -L $(WRONG_DIR)
+ $(call RUN,should_succeed)
+ $(RUSTC) main.rs -o $(TMPDIR)/should_fail -L $(WRONG_DIR) -L $(CORRECT_DIR)
+ $(call FAIL,should_fail)
+
--- /dev/null
+int should_return_one() { return 1; }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate libc;
+
+#[link(name="foo")]
+extern {
+ fn should_return_one() -> libc::c_int;
+}
+
+fn main() {
+ let result = unsafe {
+ should_return_one()
+ };
+
+ if result != 1 {
+ std::os::set_exit_status(255);
+ }
+}
--- /dev/null
+int should_return_one() { return 0; }
$(call REMOVE_RLIBS,bar)
$(call REMOVE_DYLIBS,bar)
rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
+ # Check that $(TMPDIR) is empty.
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=bin
rm $(TMPDIR)/$(call BIN,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=asm,ir,bc,obj,link
rm $(TMPDIR)/bar.ll
rm $(TMPDIR)/bar.bc
rm $(TMPDIR)/bar.s
rm $(TMPDIR)/bar.o
rm $(TMPDIR)/$(call BIN,bar)
- $(RUSTC) foo.rs --emit=asm,ir,bc,obj,link --crate-type=staticlib
- rm $(TMPDIR)/bar.ll
- rm $(TMPDIR)/bar.s
- rm $(TMPDIR)/bar.o
- rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=asm -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=bc -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=ir -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=obj -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --emit=link -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=rlib -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=dylib -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo) # FIXME 13794
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=staticlib -o $(TMPDIR)/foo
rm $(TMPDIR)/foo
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
$(RUSTC) foo.rs --crate-type=bin -o $(TMPDIR)/foo
rm $(TMPDIR)/$(call BIN,foo)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
+
+ $(RUSTC) foo.rs --emit=asm,ir,bc,obj,link --crate-type=staticlib
+ rm $(TMPDIR)/bar.ll
+ rm $(TMPDIR)/bar.s
+ rm $(TMPDIR)/bar.o
+ rm $(TMPDIR)/$(call STATICLIB_GLOB,bar)
mv $(TMPDIR)/bar.bc $(TMPDIR)/foo.bc
+ # Don't check that the $(TMPDIR) is empty - we left `foo.bc` for later
+ # comparison.
+
$(RUSTC) foo.rs --emit=bc,link --crate-type=rlib
cmp $(TMPDIR)/foo.bc $(TMPDIR)/bar.bc
rm $(TMPDIR)/bar.bc
rm $(TMPDIR)/foo.bc
$(call REMOVE_RLIBS,bar)
+ [ "$$(ls -1 $(TMPDIR) | wc -l)" -eq "0" ]
--- /dev/null
+-include ../tools.mk
+
+REPLACEMENT := s/[0-9][0-9]*\#[0-9][0-9]*/$(shell date)/g
+
+all:
+ $(RUSTC) -o $(TMPDIR)/input.out --pretty expanded,hygiene input.rs
+
+ # the name/ctxt numbers are very internals-dependent and thus
+ # change relatively frequently, and testing for their exact values
+ # them will fail annoyingly, so we just check their positions
+ # (using a non-constant replacement like this will make it less
+ # likely the compiler matches whatever other dummy value we
+ # choose).
+ #
+ # (These need to be out-of-place because OSX/BSD & GNU sed
+ # differ.)
+ sed "$(REPLACEMENT)" input.pp.rs > $(TMPDIR)/input.pp.rs
+ sed "$(REPLACEMENT)" $(TMPDIR)/input.out > $(TMPDIR)/input.out.replaced
+
+ diff -u $(TMPDIR)/input.out.replaced $(TMPDIR)/input.pp.rs
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(macro_rules)]
+// minimal junk
+#![no_std]
+
+
+fn bar /* 62#0 */() { let x /* 59#2 */ = 1; y /* 61#4 */ + x /* 59#5 */ }
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(macro_rules)]
+// minimal junk
+#![no_std]
+
+macro_rules! foo {
+ ($x: ident) => { y + $x }
+}
+
+fn bar() {
+ let x = 1;
+ foo!(x)
+}
--- /dev/null
+-include ../tools.mk
+
+# FIXME ignore windows
+ifndef IS_WINDOWS
+
+source=index.rs
+
+all:
+ $(HOST_RPATH_ENV) $(RUSTDOC) -w html -o $(TMPDIR)/doc $(source)
+ cp $(source) $(TMPDIR)
+ cp verify.sh $(TMPDIR)
+ $(call RUN,verify.sh) $(TMPDIR)
+
+else
+all:
+
+endif
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_name = "rustdoc_test"]
+
+// In: Foo
+pub use private::Foo;
+
+mod private {
+ pub struct Foo;
+ impl Foo {
+ // In: test_method
+ pub fn test_method() {}
+ // Out: priv_method
+ fn priv_method() {}
+ }
+
+ pub trait PrivateTrait {
+ // Out: priv_method
+ fn trait_method() {}
+ }
+}
--- /dev/null
+#!/bin/sh
+
+source="$1/index.rs"
+index="$1/doc/search-index.js"
+
+if ! [ -e $index ]
+then
+ echo "Could not find the search index (looked for $index)"
+ exit 1
+fi
+
+ins=$(grep -o 'In: .*' $source | sed 's/In: \(.*\)/\1/g')
+outs=$(grep -o 'Out: .*' $source | sed 's/Out: \(.*\)/\1/g')
+
+for p in $ins
+do
+ if ! grep -q $p $index
+ then
+ echo "'$p' was erroneously excluded from search index."
+ exit 1
+ fi
+done
+
+for p in $outs
+do
+ if grep -q $p $index
+ then
+ echo "'$p' was erroneously included in search index."
+ exit 1
+ fi
+done
+
+exit 0
--- /dev/null
+-include ../tools.mk
+
+# Check that cross-crate inlined items are inlined in all compilation units
+# that refer to them, and not in any other compilation units.
+
+all:
+ $(RUSTC) cci_lib.rs
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ .*cci_fn)" -eq "2" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c CCI_STATIC.*=.*constant)" -eq "2" ]
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "rlib"]
+
+#[inline]
+pub fn cci_fn() -> uint {
+ 1234
+}
+
+#[inline]
+pub static CCI_STATIC: uint = 2345;
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern crate cci_lib;
+use cci_lib::{cci_fn, CCI_STATIC};
+
+fn call1() -> uint {
+ cci_fn() + CCI_STATIC
+}
+
+mod a {
+ use cci_lib::cci_fn;
+ pub fn call2() -> uint {
+ cci_fn()
+ }
+}
+
+mod b {
+ use cci_lib::CCI_STATIC;
+ pub fn call3() -> uint {
+ CCI_STATIC
+ }
+}
+
+fn main() {
+ call1();
+ a::call2();
+ b::call3();
+}
--- /dev/null
+-include ../tools.mk
+
+# Test that #[inline(always)] functions still get inlined across compilation
+# unit boundaries. Compilation should produce three IR files, with each one
+# containing a definition of the inlined function. Also, the non-#[inline]
+# function should be defined in only one compilation unit.
+
+all:
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ i32\ .*inlined)" -eq "1" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ available_externally\ i32\ .*inlined)" -eq "2" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ i32\ .*normal)" -eq "1" ]
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c declare\ i32\ .*normal)" -eq "2" ]
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[inline]
+fn inlined() -> u32 {
+ 1234
+}
+
+fn normal() -> u32 {
+ 2345
+}
+
+mod a {
+ pub fn f() -> u32 {
+ ::inlined() + ::normal()
+ }
+}
+
+mod b {
+ pub fn f() -> u32 {
+ ::inlined() + ::normal()
+ }
+}
+
+fn main() {
+ a::f();
+ b::f();
+}
--- /dev/null
+-include ../tools.mk
+
+# Test that separate compilation actually puts code into separate compilation
+# units. `foo.rs` defines `magic_fn` in three different modules, which should
+# wind up in three different compilation units.
+
+all:
+ $(RUSTC) foo.rs --emit=ir -C codegen-units=3
+ [ "$$(cat "$(TMPDIR)"/foo.?.ll | grep -c define\ .*magic_fn)" -eq "3" ]
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn magic_fn() -> uint {
+ 1234
+}
+
+mod a {
+ pub fn magic_fn() -> uint {
+ 2345
+ }
+}
+
+mod b {
+ pub fn magic_fn() -> uint {
+ 3456
+ }
+}
+
+fn main() { }
fn random_char() -> char {
let mut rng = task_rng();
- // a subset of the XID_start unicode table (ensuring that the
+ // a subset of the XID_start Unicode table (ensuring that the
// compiler doesn't fail with an "unrecognised token" error)
let (lo, hi): (u32, u32) = match rng.gen_range(1u32, 4u32 + 1) {
1 => (0x41, 0x5a),
fn random_char() -> char {
let mut rng = task_rng();
- // a subset of the XID_start unicode table (ensuring that the
+ // a subset of the XID_start Unicode table (ensuring that the
// compiler doesn't fail with an "unrecognised token" error)
let (lo, hi): (u32, u32) = match rng.gen_range(1u32, 4u32 + 1) {
1 => (0x41, 0x5a),
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:lint_group_plugin_test.rs
+// ignore-stage1
+// ignore-pretty
+
+#![feature(phase)]
+
+#[phase(plugin)]
+extern crate lint_group_plugin_test;
+
+fn lintme() { } //~ WARNING item is named 'lintme'
+fn pleaselintme() { } //~ WARNING item is named 'pleaselintme'
+
+#[allow(lint_me)]
+pub fn main() {
+ fn lintme() { }
+
+ fn pleaselintme() { }
+}
pub fn main() {
{
- let _x: &Trait = box Foo as Box<Trait>;
+ let _x: &Trait = &*(box Foo as Box<Trait>);
}
unsafe {
assert!(DROP_RAN);
concat!(1, 2i, 3u, 4f32, 4.0, 'a', true, ()),
"12344.0atrue"
);
+
+ assert!(match "12344.0atrue" {
+ concat!(1, 2i, 3u, 4f32, 4.0, 'a', true, ()) => true,
+ _ => false
+ })
}
static x : [int, ..4] = [1,2,3,4];
static y : &'static [int] = &[1,2,3,4];
+static z : &'static [int, ..4] = &[1,2,3,4];
+static zz : &'static [int] = [1,2,3,4];
pub fn main() {
println!("{:?}", x[1]);
println!("{:?}", y[1]);
+ println!("{:?}", z[1]);
+ println!("{:?}", zz[1]);
assert_eq!(x[1], 2);
assert_eq!(x[3], 4);
assert_eq!(x[3], y[3]);
+ assert_eq!(z[1], 2);
+ assert_eq!(z[3], 4);
+ assert_eq!(z[3], y[3]);
+ assert_eq!(zz[1], 2);
+ assert_eq!(zz[3], 4);
+ assert_eq!(zz[3], y[3]);
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test coercions involving DST and/or raw pointers
+
+struct S;
+trait T {}
+impl T for S {}
+
+pub fn main() {
+ let x: &T = &S;
+ // Test we can convert from &-ptr to *-ptr of trait objects
+ let x: *const T = &S;
+
+ // Test we can convert from &-ptr to *-ptr of struct pointer (not DST)
+ let x: *const S = &S;
+
+ // As above, but mut
+ let x: &mut T = &mut S;
+ let x: *mut T = &mut S;
+
+ let x: *mut S = &mut S;
+
+ // Test we can chnage the mutability from mut to const.
+ let x: &T = &mut S;
+ let x: *const T = &mut S;
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that a custom deref with a fat pointer return type does not ICE
+
+pub struct Arr {
+ ptr: Box<[uint]>
+}
+
+impl Deref<[uint]> for Arr {
+ fn deref(&self) -> &[uint] {
+ fail!();
+ }
+}
+
+impl DerefMut<[uint]> for Arr {
+ fn deref_mut(&mut self) -> &mut [uint] {
+ &mut *self.ptr
+ }
+}
+
+pub fn foo(arr: &mut Arr) {
+ assert!(arr.len() == 3);
+ let x: &mut [uint] = &mut **arr;
+ assert!(x[0] == 1);
+ assert!(x[1] == 2);
+ assert!(x[2] == 3);
+}
+
+fn main() {
+ let mut a = Arr { ptr: box [1, 2, 3] };
+ foo(&mut a);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test that a custom deref with a fat pointer return type does not ICE
+
+pub struct Arr {
+ ptr: Box<[uint]>
+}
+
+impl Deref<[uint]> for Arr {
+ fn deref(&self) -> &[uint] {
+ &*self.ptr
+ }
+}
+
+pub fn foo(arr: &Arr) {
+ assert!(arr.len() == 3);
+ let x: &[uint] = &**arr;
+ assert!(x[0] == 1);
+ assert!(x[1] == 2);
+ assert!(x[2] == 3);
+}
+
+fn main() {
+ let a = Arr { ptr: box [1, 2, 3] };
+ foo(&a);
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test DST raw pointers
+
+trait Trait {
+ fn foo(&self) -> int;
+}
+
+struct A {
+ f: int
+}
+impl Trait for A {
+ fn foo(&self) -> int {
+ self.f
+ }
+}
+
+struct Foo<Sized? T> {
+ f: T
+}
+
+pub fn main() {
+ // raw trait object
+ let x = A { f: 42 };
+ let z: *const Trait = &x;
+ let r = unsafe {
+ (&*z).foo()
+ };
+ assert!(r == 42);
+
+ // raw DST struct
+ let p = Foo {f: A { f: 42 }};
+ let o: *const Foo<Trait> = &p;
+ let r = unsafe {
+ (&*o).f.foo()
+ };
+ assert!(r == 42);
+
+ // raw slice
+ let a: *const [_] = &[1i, 2, 3];
+ unsafe {
+ let b = (*a)[2];
+ assert!(b == 3);
+ let len = (*a).len();
+ assert!(len == 3);
+ }
+
+ // raw slice with explicit cast
+ let a = &[1i, 2, 3] as *const [_];
+ unsafe {
+ let b = (*a)[2];
+ assert!(b == 3);
+ let len = (*a).len();
+ assert!(len == 3);
+ }
+
+ // raw DST struct with slice
+ let c: *const Foo<[_]> = &Foo {f: [1i, 2, 3]};
+ unsafe {
+ let b = (&*c).f[0];
+ assert!(b == 1);
+ let len = (&*c).f.len();
+ assert!(len == 3);
+ }
+
+ // all of the above with *mut
+ let mut x = A { f: 42 };
+ let z: *mut Trait = &mut x;
+ let r = unsafe {
+ (&*z).foo()
+ };
+ assert!(r == 42);
+
+ let mut p = Foo {f: A { f: 42 }};
+ let o: *mut Foo<Trait> = &mut p;
+ let r = unsafe {
+ (&*o).f.foo()
+ };
+ assert!(r == 42);
+
+ let a: *mut [_] = &mut [1i, 2, 3];
+ unsafe {
+ let b = (*a)[2];
+ assert!(b == 3);
+ let len = (*a).len();
+ assert!(len == 3);
+ }
+
+ let a = &mut [1i, 2, 3] as *mut [_];
+ unsafe {
+ let b = (*a)[2];
+ assert!(b == 3);
+ let len = (*a).len();
+ assert!(len == 3);
+ }
+
+ let c: *mut Foo<[_]> = &mut Foo {f: [1i, 2, 3]};
+ unsafe {
+ let b = (&*c).f[0];
+ assert!(b == 1);
+ let len = (&*c).f.len();
+ assert!(len == 3);
+ }
+}
\ No newline at end of file
}
}
+macro_rules! while_true {
+ ($e: expr) => {
+ // $e shouldn't be able to interact with this 'x
+ 'x: while 1i + 1 == 2 { $e }
+ }
+}
+
macro_rules! run_once {
($e: expr) => {
// ditto
};
assert_eq!(k, 1i);
+ let l: int = {
+ 'x: for _ in range(0i, 1) {
+ // ditto
+ while_true!(break 'x);
+ i += 1;
+ }
+ i + 1
+ };
+ assert_eq!(l, 1i);
+
let n: int = {
'x: for _ in range(0i, 1) {
// ditto
}
}
+macro_rules! while_x {
+ ($e: expr) => {
+ // ditto
+ 'x: while 1i + 1 == 2 { $e }
+ }
+}
+
pub fn main() {
'x: for _ in range(0i, 1) {
// this 'x should refer to the outer loop, lexically
fail!("break doesn't act hygienically inside infinite loop");
}
+ 'x: while 1i + 1 == 2 {
+ while_x!(break 'x);
+ fail!("break doesn't act hygienically inside infinite while loop");
+ }
+
'x: for _ in range(0i, 1) {
// ditto
run_once!(continue 'x);
t!(format!("{:4s}", "a"), "a ");
t!(format!("{:>4s}", "a"), " a");
t!(format!("{:<4s}", "a"), "a ");
+ t!(format!("{:^5s}", "a"), " a ");
+ t!(format!("{:^5s}", "aa"), " aa ");
+ t!(format!("{:^4s}", "a"), " a ");
+ t!(format!("{:^4s}", "aa"), " aa ");
t!(format!("{:.4s}", "a"), "a");
t!(format!("{:4.4s}", "a"), "a ");
t!(format!("{:4.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:<4.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:>4.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
+ t!(format!("{:^4.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:>10.4s}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
t!(format!("{:2.4s}", "aaaaa"), "aaaa");
t!(format!("{:2.4s}", "aaaa"), "aaaa");
// aux-build:issue-15562.rs
-extern crate i = "issue-15562";
+extern crate "issue-15562" as i;
pub fn main() {
extern {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:--test
+
+// This verifies that the test generation doesn't crash when we have
+// no tests - for more information, see PR #16892.
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags:--test
+// ignore-pretty turns out the pretty-printer doesn't handle gensym'd things...
+
+#![feature(globs)]
+
+mod test {
+ use super::*;
+
+ #[test]
+ fn test(){}
+}
// aux-build:issue-16643.rs
-extern crate i = "issue-16643";
+extern crate "issue-16643" as i;
pub fn main() {
i::TreeBuilder::<uint>.process_token();
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main() {
+ let x: (int, &[int]) = (2i, &[1i, 2i]);
+ assert_eq!(match x {
+ (0, [_, _]) => 0,
+ (1, _) => 1,
+ (2, [_, _]) => 2,
+ (2, _) => 3,
+ _ => 4
+ }, 2i);
+}
let s: Box<S> = box S { s: 5 };
print_s(&*s);
let t: Box<T> = s as Box<T>;
- print_t(t);
+ print_t(&*t);
}
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
break 'bar;
}
}
+
+ 'foobar: while 1i + 1 == 2 {
+ loop {
+ break 'foobar;
+ }
+ }
}
// around this problem locally by renaming the constant in the `use`
// form to an uppercase identifier that placates the lint.
-#![deny(non_uppercase_pattern_statics)]
+#![deny(non_uppercase_statics)]
pub static A : int = 97;
}
mod m {
+ #[allow(non_uppercase_statics)]
pub static aha : int = 7;
}
fn g(x: Box<Trait>) {
x.printme();
- let y: &Trait = x;
+ let y: &Trait = &*x;
y.printme();
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-test: this has weird linking problems on linux, and it probably needs a
+// ignore-test: this has weird linking problems on Linux, and it probably needs a
// solution along the lines of disabling segmented stacks and/or the
// stack checks.
// aux-build:no_std_crate.rs
extern crate no_std_crate;
-// This is an unfortunate thing to have to do on linux :(
+// This is an unfortunate thing to have to do on Linux :(
#[cfg(target_os = "linux")]
#[doc(hidden)]
pub mod linkhack {
// except according to those terms.
// #11303, #11040:
-// This would previously crash on i686 linux due to abi differences
+// This would previously crash on i686 Linux due to abi differences
// between returning an Option<T> and T, where T is a non nullable
// pointer.
// If we have an enum with two variants such that one is zero sized
fn visit_char(&mut self) -> bool { true }
fn visit_estr_slice(&mut self) -> bool { true }
- // NOTE: remove after snapshot
- #[cfg(stage0)]
- fn visit_estr_fixed(&mut self,
- _sz: uint, _sz2: uint,
- _align: uint) -> bool { true }
fn visit_box(&mut self, _mtbl: uint, _inner: *const TyDesc) -> bool { true }
fn visit_uniq(&mut self, _mtbl: uint, _inner: *const TyDesc) -> bool { true }
assert_eq!(field_invoke2(&s2), 3);
let m : Box<Trait> = make_val();
- assert_eq!(object_invoke1(m), (4,5));
- assert_eq!(object_invoke2(m), 5);
+ assert_eq!(object_invoke1(&*m), (4,5));
+ assert_eq!(object_invoke2(&*m), 5);
// The RefMakerTrait above is pretty strange (i.e. it is strange
// to consume a value of type T and return a &T). Easiest thing
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Test an edge case in region inference: the lifetime of the borrow
+// of `*x` must be extended to at least 'a.
+
+fn foo<'a,'b>(x: &'a &'b mut int) -> &'a int {
+ let y = &*x; // should be inferred to have type &'a &'b mut int...
+
+ // ...because if we inferred, say, &'x &'b mut int where 'x <= 'a,
+ // this reborrow would be illegal:
+ &**y
+}
+
+pub fn main() {
+ /* Just want to know that it compiles. */
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Check that the 'static bound on a proc influences lifetimes of
+// region variables contained within (otherwise, region inference will
+// give `x` a very short lifetime).
+
+static i: uint = 3;
+fn foo(_: proc():'static) {}
+fn read(_: uint) { }
+pub fn main() {
+ let x = &i;
+ foo(proc() {
+ read(*x);
+ });
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This is an example where the older inference algorithm failed. The
+// specifics of why it failed are somewhat, but not entirely, tailed
+// to the algorithm. Ultimately the problem is that when computing the
+// mutual supertype of both sides of the `if` it would be faced with a
+// choice of tightening bounds or unifying variables and it took the
+// wrong path. The new algorithm avoids this problem and hence this
+// example typechecks correctly.
+
+enum ScopeChain<'a> {
+ Link(Scope<'a>),
+ End
+}
+
+type Scope<'a> = &'a ScopeChain<'a>;
+
+struct OuterContext;
+
+struct Context<'a> {
+ foo: &'a OuterContext
+}
+
+impl<'a> Context<'a> {
+ fn foo(&mut self, scope: Scope) {
+ let link = if 1i < 2 {
+ let l = Link(scope);
+ self.take_scope(&l);
+ l
+ } else {
+ Link(scope)
+ };
+ self.take_scope(&link);
+ }
+
+ fn take_scope(&mut self, x: Scope) {
+ }
+}
+
+fn main() { }
unsafe {
static U_RWX: i32 = (libc::S_IRUSR | libc::S_IWUSR | libc::S_IXUSR) as i32;
- let tmpdir = TempDir::new("rename_directory").expect("rename_directory failed");
+ let tmpdir = TempDir::new("rename_directory").ok().expect("rename_directory failed");
let tmpdir = tmpdir.path();
let old_path = tmpdir.join_many(["foo", "bar", "baz"]);
fs::mkdir_recursive(&old_path, io::UserRWX);
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+// aux-build:sepcomp_cci_lib.rs
+
+// Test accessing cross-crate inlined items from multiple compilation units.
+
+extern crate sepcomp_cci_lib;
+use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+
+fn call1() -> uint {
+ cci_fn() + CCI_STATIC
+}
+
+mod a {
+ use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+ pub fn call2() -> uint {
+ cci_fn() + CCI_STATIC
+ }
+}
+
+mod b {
+ use sepcomp_cci_lib::{cci_fn, CCI_STATIC};
+ pub fn call3() -> uint {
+ cci_fn() + CCI_STATIC
+ }
+}
+
+fn main() {
+ assert_eq!(call1(), 1234);
+ assert_eq!(a::call2(), 1234);
+ assert_eq!(b::call3(), 1234);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+// aux-build:sepcomp-extern-lib.rs
+
+// Test accessing external items from multiple compilation units.
+
+#[link(name = "sepcomp-extern-lib")]
+extern {
+ #[allow(ctypes)]
+ fn foo() -> uint;
+}
+
+fn call1() -> uint {
+ unsafe { foo() }
+}
+
+mod a {
+ pub fn call2() -> uint {
+ unsafe { ::foo() }
+ }
+}
+
+mod b {
+ pub fn call3() -> uint {
+ unsafe { ::foo() }
+ }
+}
+
+fn main() {
+ assert_eq!(call1(), 1234);
+ assert_eq!(a::call2(), 1234);
+ assert_eq!(b::call3(), 1234);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test references to items that haven't been translated yet.
+
+// Generate some code in the first compilation unit before declaring any
+// modules. This ensures that the first module doesn't go into the same
+// compilation unit as the top-level module.
+fn pad() -> uint { 0 }
+
+mod b {
+ pub fn three() -> uint {
+ ::one() + ::a::two()
+ }
+}
+
+mod a {
+ pub fn two() -> uint {
+ ::one() + ::one()
+ }
+}
+
+fn one() -> uint {
+ 1
+}
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(a::two(), 2);
+ assert_eq!(b::three(), 3);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test basic separate compilation functionality. The functions should be able
+// to call each other even though they will be placed in different compilation
+// units.
+
+// Generate some code in the first compilation unit before declaring any
+// modules. This ensures that the first module doesn't go into the same
+// compilation unit as the top-level module.
+fn one() -> uint { 1 }
+
+mod a {
+ pub fn two() -> uint {
+ ::one() + ::one()
+ }
+}
+
+mod b {
+ pub fn three() -> uint {
+ ::one() + ::a::two()
+ }
+}
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(a::two(), 2);
+ assert_eq!(b::three(), 3);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// aux-build:sepcomp_lib.rs
+
+// Test linking against a library built with -C codegen-units > 1
+
+extern crate sepcomp_lib;
+use sepcomp_lib::a::one;
+use sepcomp_lib::b::two;
+use sepcomp_lib::c::three;
+
+fn main() {
+ assert_eq!(one(), 1);
+ assert_eq!(two(), 2);
+ assert_eq!(three(), 3);
+}
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test references to static items across compilation units.
+
+fn pad() -> uint { 0 }
+
+static ONE: uint = 1;
+
+mod b {
+ // Separate compilation always switches to the LLVM module with the fewest
+ // instructions. Make sure we have some instructions in this module so
+ // that `a` and `b` don't go into the same compilation unit.
+ fn pad() -> uint { 0 }
+
+ pub static THREE: uint = ::ONE + ::a::TWO;
+}
+
+mod a {
+ fn pad() -> uint { 0 }
+
+ pub static TWO: uint = ::ONE + ::ONE;
+}
+
+fn main() {
+ assert_eq!(ONE, 1);
+ assert_eq!(a::TWO, 2);
+ assert_eq!(b::THREE, 3);
+}
+
--- /dev/null
+// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -C codegen-units=3
+
+// Test unwinding through multiple compilation units.
+
+// According to acrichto, in the distant past `ld -r` (which is used during
+// linking when codegen-units > 1) was known to produce object files with
+// damaged unwinding tables. This may be related to GNU binutils bug #6893
+// ("Partial linking results in corrupt .eh_frame_hdr"), but I'm not certain.
+// In any case, this test should let us know if enabling parallel codegen ever
+// breaks unwinding.
+
+fn pad() -> uint { 0 }
+
+mod a {
+ pub fn f() {
+ fail!();
+ }
+}
+
+mod b {
+ pub fn g() {
+ ::a::f();
+ }
+}
+
+fn main() {
+ std::task::try(proc() { ::b::g() }).unwrap_err();
+}
[1] == (42 as u8)); // '*'
// The Windows tests are wrapped in an extra module for some reason
assert!((m1::m2::where_am_i().as_slice().ends_with("m1::m2")));
+
+ assert!(match (47, "( 2 * 3 ) + 5") {
+ (line!(), stringify!((2*3) + 5)) => true,
+ _ => false
+ })
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-linux #7340 fails on 32-bit linux
+// ignore-linux #7340 fails on 32-bit Linux
// ignore-macos #7340 fails on 32-bit macos
use std::mem;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-linux #7340 fails on 32-bit linux
+// ignore-linux #7340 fails on 32-bit Linux
// ignore-macos #7340 fails on 32-bit macos
use std::mem;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-linux #7340 fails on 32-bit linux
+// ignore-linux #7340 fails on 32-bit Linux
// ignore-macos #7340 fails on 32-bit macos
use std::mem;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-macos osx really doesn't like cycling through large numbers of
+// sockets as calls to connect() will start returning EADDRNOTAVAIL
+// quite quickly and it takes a few seconds for the sockets to get
+// recycled.
+
#![feature(phase)]
#[phase(plugin)]
use native::NativeTaskBuilder;
static N: uint = 8;
-static M: uint = 100;
+static M: uint = 20;
green_start!(main)
let mut a = l.listen().unwrap();
let cnt = Arc::new(atomic::AtomicUint::new(0));
- let (tx, rx) = channel();
+ let (srv_tx, srv_rx) = channel();
+ let (cli_tx, cli_rx) = channel();
for _ in range(0, N) {
let a = a.clone();
let cnt = cnt.clone();
- let tx = tx.clone();
+ let srv_tx = srv_tx.clone();
spawn(proc() {
let mut a = a;
- let mut mycnt = 0u;
loop {
match a.accept() {
Ok(..) => {
- mycnt += 1;
if cnt.fetch_add(1, atomic::SeqCst) == N * M - 1 {
break
}
Err(e) => fail!("{}", e),
}
}
- assert!(mycnt > 0);
- tx.send(());
+ srv_tx.send(());
});
}
for _ in range(0, N) {
- let tx = tx.clone();
+ let cli_tx = cli_tx.clone();
spawn(proc() {
for _ in range(0, M) {
let _s = TcpStream::connect(addr.ip.to_string().as_slice(),
addr.port).unwrap();
}
- tx.send(());
+ cli_tx.send(());
});
}
+ drop((cli_tx, srv_tx));
// wait for senders
- assert_eq!(rx.iter().take(N).count(), N);
+ if cli_rx.iter().take(N).count() != N {
+ a.close_accept().unwrap();
+ fail!("clients failed");
+ }
// wait for one acceptor to die
- let _ = rx.recv();
+ let _ = srv_rx.recv();
// Notify other receivers should die
a.close_accept().unwrap();
// wait for receivers
- assert_eq!(rx.iter().take(N - 1).count(), N - 1);
+ assert_eq!(srv_rx.iter().take(N - 1).count(), N - 1);
// Everything should have been accepted.
assert_eq!(cnt.load(atomic::SeqCst), N * M);
pub fn test_rmdir_recursive_ok() {
let rwx = io::UserRWX;
- let tmpdir = TempDir::new("test").expect("test_rmdir_recursive_ok: \
- couldn't create temp dir");
+ let tmpdir = TempDir::new("test").ok().expect("test_rmdir_recursive_ok: \
+ couldn't create temp dir");
let tmpdir = tmpdir.path();
let root = tmpdir.join("foo");
}
fn in_tmpdir(f: ||) {
- let tmpdir = TempDir::new("test").expect("can't make tmpdir");
+ let tmpdir = TempDir::new("test").ok().expect("can't make tmpdir");
assert!(os::change_dir(tmpdir.path()));
f();
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+
+pub fn main() {
+ let mut i = 100i;
+ 'w: while 1i + 1 == 2 {
+ i -= 1;
+ if i == 95 {
+ break 'w;
+ fail!("Should have broken out of loop");
+ }
+ }
+ assert_eq!(i, 95);
+}