$(filter-out rustc_borrowck, \
$(filter-out rustc_resolve, \
$(filter-out rustc_driver, \
- $(filter-out syntax, $(CRATES))))))))
+ $(filter-out log, \
+ $(filter-out regex, \
+ $(filter-out regex_macros, \
+ $(filter-out getopts, \
+ $(filter-out time, \
+ $(filter-out syntax, $(CRATES)))))))))))))
COMPILER_DOC_CRATES := rustc rustc_trans rustc_borrowck rustc_resolve \
rustc_typeck rustc_driver syntax
$$(RSINPUTS_$(1)) \
$$(RUSTDOC_EXE) \
$$(foreach dep,$$(RUST_DEPS_$(1)), \
- $$(TLIB2_T_$(CFG_BUILD)_H_$(CFG_BUILD))/stamp.$$(dep) \
+ $$(TLIB2_T_$(CFG_BUILD)_H_$(CFG_BUILD))/stamp.$$(dep)) \
+ $$(foreach dep,$$(filter $$(DOC_CRATES), $$(RUST_DEPS_$(1))), \
doc/$$(dep)/)
else
LIB_DOC_DEP_$(1) = $$(CRATEFILE_$(1)) $$(RSINPUTS_$(1))
$(Q)mkdir -p $(BG)
$(BG)RustLexer.class: $(BG) $(SG)RustLexer.g4
- $(Q)$(CFG_ANTLR4) -o $(B)grammar $(SG)RustLexer.g4
+ $(Q)$(CFG_ANTLR4) -o $(BG) $(SG)RustLexer.g4
$(Q)$(CFG_JAVAC) -d $(BG) $(BG)RustLexer.java
check-build-lexer-verifier: $(BG)verify
TEST_LOG_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).log
TEST_OK_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).ok
-# If we're sharding the testsuite between parallel testers,
-# pass this argument along to the compiletest and crate test
-# invocations.
-ifdef TEST_SHARD
- CTEST_TESTARGS += --test-shard=$(TEST_SHARD)
- CRATE_TEST_EXTRA_ARGS += --test-shard=$(TEST_SHARD)
-endif
-
define DEF_TARGET_COMMANDS
ifdef CFG_UNIXY_$(1)
-% The Rust Tasks and Communication Guide
+% The Rust Threads and Communication Guide
-**NOTE** This guide is badly out of date an needs to be rewritten.
+**NOTE** This guide is badly out of date and needs to be rewritten.
# Introduction
relates to the Rust type system, and introduce the fundamental library
abstractions for constructing concurrent programs.
-Tasks provide failure isolation and recovery. When a fatal error occurs in Rust
+Threads provide failure isolation and recovery. When a fatal error occurs in Rust
code as a result of an explicit call to `panic!()`, an assertion failure, or
-another invalid operation, the runtime system destroys the entire task. Unlike
+another invalid operation, the runtime system destroys the entire thread. Unlike
in languages such as Java and C++, there is no way to `catch` an exception.
-Instead, tasks may monitor each other to see if they panic.
+Instead, threads may monitor each other to see if they panic.
-Tasks use Rust's type system to provide strong memory safety guarantees. In
-particular, the type system guarantees that tasks cannot induce a data race
+Threads use Rust's type system to provide strong memory safety guarantees. In
+particular, the type system guarantees that threads cannot induce a data race
from shared mutable state.
# Basics
-At its simplest, creating a task is a matter of calling the `spawn` function
-with a closure argument. `spawn` executes the closure in the new task.
+At its simplest, creating a thread is a matter of calling the `spawn` function
+with a closure argument. `spawn` executes the closure in the new thread.
```{rust,ignore}
-# use std::task::spawn;
+# use std::thread::spawn;
-// Print something profound in a different task using a named function
-fn print_message() { println!("I am running in a different task!"); }
+// Print something profound in a different thread using a named function
+fn print_message() { println!("I am running in a different thread!"); }
spawn(print_message);
// Alternatively, use a `move ||` expression instead of a named function.
// `||` expressions evaluate to an unnamed closure. The `move` keyword
// indicates that the closure should take ownership of any variables it
// touches.
-spawn(move || println!("I am also running in a different task!"));
+spawn(move || println!("I am also running in a different thread!"));
```
-In Rust, a task is not a concept that appears in the language semantics.
+In Rust, a thread is not a concept that appears in the language semantics.
Instead, Rust's type system provides all the tools necessary to implement safe
concurrency: particularly, ownership. The language leaves the implementation
details to the standard library.
closure is limited to capturing `Send`-able data from its environment
(that is, data which is deeply owned). Limiting the closure to `Send`
ensures that `spawn` can safely move the entire closure and all its
-associated state into an entirely different task for execution.
+associated state into an entirely different thread for execution.
```{rust,ignore}
-# use std::task::spawn;
-# fn generate_task_number() -> int { 0 }
+# use std::thread::spawn;
+# fn generate_thread_number() -> int { 0 }
// Generate some state locally
-let child_task_number = generate_task_number();
+let child_thread_number = generate_thread_number();
spawn(move || {
- // Capture it in the remote task. The `move` keyword indicates
- // that this closure should move `child_task_number` into its
+ // Capture it in the remote thread. The `move` keyword indicates
+ // that this closure should move `child_thread_number` into its
// environment, rather than capturing a reference into the
// enclosing stack frame.
- println!("I am child number {}", child_task_number);
+ println!("I am child number {}", child_thread_number);
});
```
## Communication
-Now that we have spawned a new task, it would be nice if we could communicate
+Now that we have spawned a new thread, it would be nice if we could communicate
with it. For this, we use *channels*. A channel is simply a pair of endpoints:
one for sending messages and another for receiving messages.
example of calculating two results concurrently:
```{rust,ignore}
-# use std::task::spawn;
+# use std::thread::spawn;
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
```
-The child task will use the sender to send data to the parent task, which will
+The child thread will use the sender to send data to the parent thread, which will
wait to receive the data on the receiver. The next statement spawns the child
-task.
+thread.
```{rust,ignore}
-# use std::task::spawn;
+# use std::thread::spawn;
# fn some_expensive_computation() -> int { 42 }
# let (tx, rx) = channel();
spawn(move || {
});
```
-Notice that the creation of the task closure transfers `tx` to the child task
+Notice that the creation of the thread closure transfers `tx` to the child thread
implicitly: the closure captures `tx` in its environment. Both `Sender` and
-`Receiver` are sendable types and may be captured into tasks or otherwise
-transferred between them. In the example, the child task runs an expensive
+`Receiver` are sendable types and may be captured into threads or otherwise
+transferred between them. In the example, the child thread runs an expensive
computation, then sends the result over the captured channel.
Finally, the parent continues with some other expensive computation, then waits
communication between a single sender and a single receiver, but multiple
senders cannot use a single `Sender` value, and multiple receivers cannot use a
single `Receiver` value. What if our example needed to compute multiple
-results across a number of tasks? The following program is ill-typed:
+results across a number of threads? The following program is ill-typed:
```{rust,ignore}
# fn some_expensive_computation() -> int { 42 }
let (tx, rx) = channel();
for init_val in range(0u, 3) {
- // Create a new channel handle to distribute to the child task
+ // Create a new channel handle to distribute to the child thread
let child_tx = tx.clone();
spawn(move || {
child_tx.send(some_expensive_computation(init_val));
```
Cloning a `Sender` produces a new handle to the same channel, allowing multiple
-tasks to send data to a single receiver. It upgrades the channel internally in
+threads to send data to a single receiver. It upgrades the channel internally in
order to allow this functionality, which means that channels that are not
cloned can avoid the overhead required to handle multiple senders. But this
fact has no bearing on the channel's usage: the upgrade is transparent.
reference, written with multiple streams, it might look like the example below.
```{rust,ignore}
-# use std::task::spawn;
+# use std::thread::spawn;
-// Create a vector of ports, one for each child task
+// Create a vector of ports, one for each child thread
let rxs = Vec::from_fn(3, |init_val| {
let (tx, rx) = channel();
spawn(move || {
## Sharing without copying: Arc
-To share data between tasks, a first approach would be to only use channel as
+To share data between threads, a first approach would be to only use channel as
we have seen previously. A copy of the data to share would then be made for
-each task. In some cases, this would add up to a significant amount of wasted
+each thread. In some cases, this would add up to a significant amount of wasted
memory and would require copying the same data more than necessary.
To tackle this issue, one can use an Atomically Reference Counted wrapper
(`Arc`) as implemented in the `sync` library of Rust. With an Arc, the data
-will no longer be copied for each task. The Arc acts as a reference to the
+will no longer be copied for each thread. The Arc acts as a reference to the
shared data and only this reference is shared and cloned.
Here is a small example showing how to use Arcs. We wish to run concurrently
-several computations on a single large vector of floats. Each task needs the
+several computations on a single large vector of floats. Each thread needs the
full vector to perform its duty.
```{rust,ignore}
let numbers_arc = Arc::new(numbers);
for num in range(1u, 10) {
- let task_numbers = numbers_arc.clone();
+ let thread_numbers = numbers_arc.clone();
spawn(move || {
- println!("{}-norm = {}", num, pnorm(task_numbers.as_slice(), num));
+ println!("{}-norm = {}", num, pnorm(thread_numbers.as_slice(), num));
});
}
}
# }
```
-and a clone is captured for each task via a procedure. This only copies
-the wrapper and not its contents. Within the task's procedure, the captured
+and a clone is captured for each thread via a procedure. This only copies
+the wrapper and not its contents. Within the thread's procedure, the captured
Arc reference can be used as a shared reference to the underlying vector as
if it were local.
# let numbers=Vec::from_fn(1000000, |_| rand::random::<f64>());
# let numbers_arc = Arc::new(numbers);
# let num = 4;
-let task_numbers = numbers_arc.clone();
+let thread_numbers = numbers_arc.clone();
spawn(move || {
- // Capture task_numbers and use it as if it was the underlying vector
- println!("{}-norm = {}", num, pnorm(task_numbers.as_slice(), num));
+ // Capture thread_numbers and use it as if it was the underlying vector
+ println!("{}-norm = {}", num, pnorm(thread_numbers.as_slice(), num));
});
# }
```
-# Handling task panics
+# Handling thread panics
Rust has a built-in mechanism for raising exceptions. The `panic!()` macro
(which can also be written with an error string as an argument: `panic!(
~reason)`) and the `assert!` construct (which effectively calls `panic!()` if a
-boolean expression is false) are both ways to raise exceptions. When a task
-raises an exception, the task unwinds its stack—running destructors and
+boolean expression is false) are both ways to raise exceptions. When a thread
+raises an exception, the thread unwinds its stack—running destructors and
freeing memory along the way—and then exits. Unlike exceptions in C++,
-exceptions in Rust are unrecoverable within a single task: once a task panics,
+exceptions in Rust are unrecoverable within a single thread: once a thread panics,
there is no way to "catch" the exception.
-While it isn't possible for a task to recover from panicking, tasks may notify
+While it isn't possible for a thread to recover from panicking, threads may notify
each other if they panic. The simplest way of handling a panic is with the
`try` function, which is similar to `spawn`, but immediately blocks and waits
-for the child task to finish. `try` returns a value of type
+for the child thread to finish. `try` returns a value of type
`Result<T, Box<Any + Send>>`. `Result` is an `enum` type with two variants:
`Ok` and `Err`. In this case, because the type arguments to `Result` are `int`
and `()`, callers can pattern-match on a result to check whether it's an `Ok`
Unlike `spawn`, the function spawned using `try` may return a value, which
`try` will dutifully propagate back to the caller in a [`Result`] enum. If the
-child task terminates successfully, `try` will return an `Ok` result; if the
-child task panics, `try` will return an `Error` result.
+child thread terminates successfully, `try` will return an `Ok` result; if the
+child thread panics, `try` will return an `Error` result.
[`Result`]: std/result/index.html
-> *Note:* A panicked task does not currently produce a useful error
+> *Note:* A panicked thread does not currently produce a useful error
> value (`try` always returns `Err(())`). In the
-> future, it may be possible for tasks to intercept the value passed to
+> future, it may be possible for threads to intercept the value passed to
> `panic!()`.
But not all panics are created equal. In some cases you might need to abort
indicates an unrecoverable logic error); in other cases you might want to
contain the panic at a certain boundary (perhaps a small piece of input from
the outside world, which you happen to be processing in parallel, is malformed
-such that the processing task cannot proceed).
+such that the processing thread cannot proceed).
Giving it a type will compile, though:
-```{ignore}
+```{rust}
let x: int;
```
These two will not be equal, even if they have the same values:
-```{rust,ignore}
+```{rust}
+# struct Color(int, int, int);
+# struct Point(int, int, int);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
```
let mut m = [1i, 2i, 3i]; // mut m: [int, ..3]
```
-You can create an array with a given number of elements, all initialized to the
-same value, with `[val, ..N]` syntax. The compiler ensures that arrays are
-always initialized.
-
There's a shorthand for initializing each element of an array to the same
value. In this example, each element of `a` will be initialized to `0i`:
Cargo gets this information from your environment. If it's not correct, go ahead
and fix that.
-Finally, Cargo generated a hello, world for us. Check out `src/main.rs`:
+Finally, Cargo generated a "Hello, world!" for us. Check out `src/main.rs`:
```{rust}
fn main() {
failures:
---- foo stdout ----
- task 'foo' failed at 'assertion failed: false', /home/you/projects/testing/tests/lib.rs:3
+ thread 'foo' failed at 'assertion failed: false', /home/you/projects/testing/tests/lib.rs:3
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured
-task '<main>' failed at 'Some tests failed', /home/you/src/rust/src/libtest/lib.rs:243
+thread '<main>' failed at 'Some tests failed', /home/you/src/rust/src/libtest/lib.rs:243
```
Lots of output! Let's break this down:
failures:
---- foo stdout ----
- task 'foo' failed at 'assertion failed: false', /home/you/projects/testing/tests/lib.rs:3
+ thread 'foo' failed at 'assertion failed: false', /home/you/projects/testing/tests/lib.rs:3
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured
-task '<main>' failed at 'Some tests failed', /home/you/src/rust/src/libtest/lib.rs:243
+thread '<main>' failed at 'Some tests failed', /home/you/src/rust/src/libtest/lib.rs:243
```
After all the tests run, Rust will show us any output from our failed tests.
it uses. Ordinary closures, in contrast, just create a reference into
the enclosing stack frame. Moving closures are most useful with Rust's
concurrency features, and so we'll just leave it at this for
-now. We'll talk about them more in the "Tasks" section of the guide.
+now. We'll talk about them more in the "Threads" section of the guide.
## Accepting closures as arguments
We've seen this before. We make a closure that takes an integer, and returns
its square.
-```{rust,ignore}
+```{rust}
+# fn twice(x: int, f: |int| -> int) -> int { f(x) + f(x) }
+# let square = |x: int| { x * x };
twice(5i, square); // evaluates to 50
```
hence 'statically dispatched'. The downside is that we have two copies of
the same function, so our binary is a little bit larger.
-# Tasks
-
-**NOTE**: this section is currently out of date and will be rewritten soon.
+# Threads
Concurrency and parallelism are topics that are of increasing interest to a
broad subsection of software developers. Modern computers are often multi-core,
issues that programmers have with concurrency. Many concurrency errors that are
runtime errors in other languages are compile-time errors in Rust.
-Rust's concurrency primitive is called a **task**. Tasks are similar to
-threads, and do not share memory in an unsafe manner, preferring message
-passing to communicate. It's worth noting that tasks are implemented as a
-library, and not part of the language. This means that in the future, other
-concurrency libraries can be written for Rust to help in specific scenarios.
-Here's an example of creating a task:
+Rust's concurrency primitive is called a **thread**. It's worth noting that
+threads are implemented as a library, and not part of the language. This means
+that in the future, other concurrency libraries can be written for Rust to help
+in specific scenarios. Here's an example of creating a thread:
```{rust,ignore}
spawn(move || {
- println!("Hello from a task!");
+ println!("Hello from a thread!");
});
```
The `spawn` function takes a closure as an argument, and runs that
-closure in a new task. Typically, you will want to use a moving
+closure in a new thread. Typically, you will want to use a moving
closure, so that the closure takes ownership of any variables that it
touches. This implies that those variables are not usable from the
-parent task after the child task is spawned:
+parent thread after the child thread is spawned:
```{rust,ignore}
let mut x = vec![1i, 2i, 3i];
other languages would let us do this, but it's not safe to do
so. Rust's borrow checker catches the error.
-If tasks were only able to capture these values, they wouldn't be very useful.
-Luckily, tasks can communicate with each other through **channel**s. Channels
+If threads were only able to capture these values, they wouldn't be very useful.
+Luckily, threads can communicate with each other through **channel**s. Channels
work like this:
```{rust,ignore}
let (tx, rx) = channel();
spawn(move || {
- tx.send("Hello from a task!".to_string());
+ tx.send("Hello from a thread!".to_string());
});
let message = rx.recv();
method blocks until it gets a message. There's a similar method, `.try_recv()`,
which returns an `Result<T, TryRecvError>` and does not block.
-If you want to send messages to the task as well, create two channels!
+If you want to send messages to the thread as well, create two channels!
```{rust,ignore}
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(move || {
- tx1.send("Hello from a task!".to_string());
+ tx1.send("Hello from a thread!".to_string());
let message = rx2.recv();
println!("{}", message);
});
tx2.send("Goodbye from main!".to_string());
```
-The closure has one sending end and one receiving end, and the main
-task has one of each as well. Now they can talk back and forth in
-whatever way they wish.
+The closure has one sending end and one receiving end, and the main thread has
+one of each as well. Now they can talk back and forth in whatever way they
+wish.
Notice as well that because `Sender` and `Receiver` are generic, while you can
pass any kind of information through the channel, the ends are strongly typed.
## Success and failure
-Tasks don't always succeed, they can also panic. A task that wishes to panic
+Threads don't always succeed, they can also panic. A thread that wishes to panic
can call the `panic!` macro, passing a message:
```{rust,ignore}
});
```
-If a task panics, it is not possible for it to recover. However, it can
-notify other tasks that it has panicked. We can do this with `task::try`:
+If a thread panics, it is not possible for it to recover. However, it can
+notify other thread that it has panicked. We can do this with `thread::try`:
```{rust,ignore}
-use std::task;
+use std::thread;
use std::rand;
-let result = task::try(move || {
+let result = thread::try(move || {
if rand::random() {
println!("OK");
} else {
});
```
-This task will randomly panic or succeed. `task::try` returns a `Result`
+This thread will randomly panic or succeed. `thread::try` returns a `Result`
type, so we can handle the response like any other computation that may
fail.
* [Strings](guide-strings.html)
* [Pointers](guide-pointers.html)
* [Crates and modules](guide-crates.html)
-* [Tasks and Communication](guide-tasks.html)
+* [Threads and Communication](guide-threads.html)
* [Error Handling](guide-error-handling.html)
* [Foreign Function Interface](guide-ffi.html)
* [Writing Unsafe and Low-Level Code](guide-unsafe.html)
for i in range(0u, 3u) {
let number = numbers.clone();
Thread::spawn(move || {
- let mut array = number.lock();
+ let mut array = number.lock().unwrap();
(*array)[i] += 1;
// Load the `vec` module from `vec.rs`
mod vec;
-mod task {
- // Load the `local_data` module from `task/local_data.rs`
+mod thread {
+ // Load the `local_data` module from `thread/local_data.rs`
mod local_data;
}
```
influenced with the `path` attribute.
```{.ignore}
-#[path = "task_files"]
-mod task {
- // Load the `local_data` module from `task_files/tls.rs`
+#[path = "thread_files"]
+mod thread {
+ // Load the `local_data` module from `thread_files/tls.rs`
#[path = "tls.rs"]
mod local_data;
}
Unsafe blocks are used to wrap foreign libraries, make direct use of hardware
or implement features not directly present in the language. For example, Rust
provides the language features necessary to implement memory-safe concurrency
-in the language but the implementation of tasks and message passing is in the
+in the language but the implementation of threads and message passing is in the
standard library.
Rust's type system is a conservative approximation of the dynamic safety
hard to run into, and this is obviously a very large source of race conditions
or other bugs. For this reason, an `unsafe` block is required when either
reading or writing a mutable static variable. Care should be taken to ensure
-that modifications to a mutable static are safe with respect to other tasks
+that modifications to a mutable static are safe with respect to other threads
running in the same process.
Mutable statics are still very useful, however. They can be used with C
* `drop`
: Have destructors.
* `send`
- : Able to be sent across task boundaries.
+ : Able to be sent across thread boundaries.
* `sized`
: Has a size known at compile time.
* `sync`
- : Able to be safely shared between tasks when aliased.
+ : Able to be safely shared between threads when aliased.
#### Operators
LLVM's implementation which works in concert with the kernel
loader and dynamic linker. This is not necessarily available
on all platforms, and usage of it is discouraged (rust
- focuses more on task-local data instead of thread-local
+ focuses more on thread-local data instead of thread-local
data).
* `trace_macros` - Allows use of the `trace_macros` macro, which is a nasty
be assigned to.
Indices are zero-based, and may be of any integral type. Vector access is
-bounds-checked at run-time. When the check fails, it will put the task in a
+bounds-checked at run-time. When the check fails, it will put the thread in a
_panicked state_.
```{should-fail}
components of the type. The kinds are:
* `Send`
- : Types of this kind can be safely sent between tasks.
+ : Types of this kind can be safely sent between threads.
This kind includes scalars, boxes, procs, and
structural types containing only other owned types.
All `Send` types are `'static`.
# Memory and concurrency models
-Rust has a memory model centered around concurrently-executing _tasks_. Thus
+Rust has a memory model centered around concurrently-executing _threads_. Thus
its memory model and its concurrency model are best discussed simultaneously,
as parts of each only make sense when considered from the perspective of the
other.
When reading about the memory model, keep in mind that it is partitioned in
-order to support tasks; and when reading about tasks, keep in mind that their
+order to support threads; and when reading about threads, keep in mind that their
isolation and communication mechanisms are only possible due to the ownership
and lifetime semantics of the memory model.
## Memory model
A Rust program's memory consists of a static set of *items*, a set of
-[tasks](#tasks) each with its own *stack*, and a *heap*. Immutable portions of
-the heap may be shared between tasks, mutable portions may not.
+[threads](#threads) each with its own *stack*, and a *heap*. Immutable portions of
+the heap may be shared between threads, mutable portions may not.
Allocations in the stack consist of *slots*, and allocations in the heap
consist of *boxes*.
value calculated at compile-time and stored uniquely in the memory image of the
rust process. Items are neither dynamically allocated nor freed.
-A task's _stack_ consists of activation frames automatically allocated on entry
-to each function as the task executes. A stack allocation is reclaimed when
+A thread's _stack_ consists of activation frames automatically allocated on entry
+to each function as the thread executes. A stack allocation is reclaimed when
control leaves the frame containing it.
The _heap_ is a general term that describes boxes. The lifetime of an
### Memory ownership
-A task owns all memory it can *safely* reach through local variables, as well
+A thread owns all memory it can *safely* reach through local variables, as well
as boxes and references.
-When a task sends a value that has the `Send` trait to another task, it loses
+When a thread sends a value that has the `Send` trait to another thread, it loses
ownership of the value sent and can no longer refer to it. This is statically
guaranteed by the combined use of "move semantics", and the compiler-checked
_meaning_ of the `Send` trait: it is only instantiated for (transitively)
When a stack frame is exited, its local allocations are all released, and its
references to boxes are dropped.
-When a task finishes, its stack is necessarily empty and it therefore has no
+When a thread finishes, its stack is necessarily empty and it therefore has no
references to any boxes; the remainder of its heap is immediately freed.
### Memory slots
-A task's stack contains slots.
+A thread's stack contains slots.
A _slot_ is a component of a stack frame, either a function parameter, a
[temporary](#lvalues,-rvalues-and-temporaries), or a local variable.
// attempting to use `x` will result in an error here
```
-## Tasks
+## Threads
-An executing Rust program consists of a tree of tasks. A Rust _task_ consists
-of an entry function, a stack, a set of outgoing communication channels and
-incoming communication ports, and ownership of some portion of the heap of a
-single operating-system process.
+Rust's primary concurrency mechanism is called a **thread**.
-### Communication between tasks
+### Communication between threads
-Rust tasks are isolated and generally unable to interfere with one another's
+Rust threads are isolated and generally unable to interfere with one another's
memory directly, except through [`unsafe` code](#unsafe-functions). All
-contact between tasks is mediated by safe forms of ownership transfer, and data
+contact between threads is mediated by safe forms of ownership transfer, and data
races on memory are prohibited by the type system.
-When you wish to send data between tasks, the values are restricted to the
+When you wish to send data between threads, the values are restricted to the
[`Send` type-kind](#type-kinds). Restricting communication interfaces to this
-kind ensures that no references move between tasks. Thus access to an entire
+kind ensures that no references move between threads. Thus access to an entire
data structure can be mediated through its owning "root" value; no further
locking or copying is required to avoid data races within the substructure of
such a value.
-### Task lifecycle
+### Thread
-The _lifecycle_ of a task consists of a finite set of states and events that
-cause transitions between the states. The lifecycle states of a task are:
+The _lifecycle_ of a threads consists of a finite set of states and events that
+cause transitions between the states. The lifecycle states of a thread are:
* running
* blocked
* panicked
* dead
-A task begins its lifecycle — once it has been spawned — in the
+A thread begins its lifecycle — once it has been spawned — in the
*running* state. In this state it executes the statements of its entry
function, and any functions called by the entry function.
-A task may transition from the *running* state to the *blocked* state any time
+A thread may transition from the *running* state to the *blocked* state any time
it makes a blocking communication call. When the call can be completed —
when a message arrives at a sender, or a buffer opens to receive a message
-— then the blocked task will unblock and transition back to *running*.
+— then the blocked thread will unblock and transition back to *running*.
-A task may transition to the *panicked* state at any time, due being killed by
+A thread may transition to the *panicked* state at any time, due being killed by
some external event or internally, from the evaluation of a `panic!()` macro.
-Once *panicking*, a task unwinds its stack and transitions to the *dead* state.
-Unwinding the stack of a task is done by the task itself, on its own control
+Once *panicking*, a thread unwinds its stack and transitions to the *dead* state.
+Unwinding the stack of a thread is done by the thread itself, on its own control
stack. If a value with a destructor is freed during unwinding, the code for the
-destructor is run, also on the task's control stack. Running the destructor
+destructor is run, also on the thread's control stack. Running the destructor
code causes a temporary transition to a *running* state, and allows the
-destructor code to cause any subsequent state transitions. The original task
+destructor code to cause any subsequent state transitions. The original thread
of unwinding and panicking thereby may suspend temporarily, and may involve
(recursive) unwinding of the stack of a failed destructor. Nonetheless, the
outermost unwinding activity will continue until the stack is unwound and the
-task transitions to the *dead* state. There is no way to "recover" from task
-panics. Once a task has temporarily suspended its unwinding in the *panicking*
+thread transitions to the *dead* state. There is no way to "recover" from thread
+panics. Once a thread has temporarily suspended its unwinding in the *panicking*
state, a panic occurring from within this destructor results in *hard* panic.
A hard panic currently results in the process aborting.
-A task in the *dead* state cannot transition to other states; it exists only to
-have its termination status inspected by other tasks, and/or to await
+A thread in the *dead* state cannot transition to other states; it exists only to
+have its termination status inspected by other threads, and/or to await
reclamation when the last reference to it drops.
# Runtime services, linkage and debugging
The Rust _runtime_ is a relatively compact collection of Rust code that
-provides fundamental services and datatypes to all Rust tasks at run-time. It
+provides fundamental services and datatypes to all Rust threads at run-time. It
is smaller and simpler than many modern language runtimes. It is tightly
-integrated into the language's execution model of memory, tasks, communication
+integrated into the language's execution model of memory, threads, communication
and logging.
### Memory allocation
needed. The default implementation of the service-provider interface consists
of the C runtime functions `malloc` and `free`.
-The runtime memory-management system, in turn, supplies Rust tasks with
+The runtime memory-management system, in turn, supplies Rust threads with
facilities for allocating releasing stacks, as well as allocating and freeing
heap data.
The runtime provides C and Rust code to assist with various built-in types,
such as arrays, strings, and the low level communication system (ports,
-channels, tasks).
+channels, threads).
Support for other built-in types such as simple types, tuples and enums is
open-coded by the Rust compiler.
-### Task scheduling and communication
+### Thread scheduling and communication
-The runtime provides code to manage inter-task communication. This includes
-the system of task-lifecycle state transitions depending on the contents of
+The runtime provides code to manage inter-thread communication. This includes
+the system of thread-lifecycle state transitions depending on the contents of
queues, as well as code to copy values between queues and their recipients and
to serialize values for transmission over operating-system inter-process
communication facilities.
<context attribute="CharEscape" lineEndContext="#pop" name="CharEscape">
<AnyChar String="nrt\'"" attribute="CharEscape" context="#pop"/>
<RegExpr String="x[0-9a-fA-F]{2}" attribute="CharEscape" context="#pop"/>
+ <RegExpr String="u\{[0-9a-fA-F]{1,6}\}" attribute="CharEscape" context="#pop"/>
<RegExpr String="u[0-9a-fA-F]{4}" attribute="CharEscape" context="#pop"/>
<RegExpr String="U[0-9a-fA-F]{8}" attribute="CharEscape" context="#pop"/>
<RegExpr String="." attribute="Error" context="#pop"/>
<Detect2Chars char="*" char1="/" attribute="Comment" context="#pop" endRegion="Comment"/>
</context>
</contexts>
- <itemDatas>
+ <itemDatas>
<itemData name="Normal Text" defStyleNum="dsNormal"/>
<itemData name="Keyword" defStyleNum="dsKeyword" color="#770088" bold="1"/>
<itemData name="Self" defStyleNum="dsKeyword" color="#FF0000" bold="1"/>
done
}
+create_tmp_dir() {
+ local TMP_DIR=./rustup-tmp-install
+
+ rm -Rf "${TMP_DIR}"
+ need_ok "failed to remove temporary installation directory"
+
+ mkdir -p "${TMP_DIR}"
+ need_ok "failed to create create temporary installation directory"
+
+ echo $TMP_DIR
+}
+
probe_need CFG_CURL curl
probe_need CFG_TAR tar
probe_need CFG_FILE file
CFG_INSTALL_FLAGS="${CFG_INSTALL_FLAGS} --prefix=${CFG_PREFIX}"
fi
-CFG_TMP_DIR="./rustup-tmp-install"
+CFG_TMP_DIR=$(mktemp -d 2>/dev/null \
+ || mktemp -d -t 'rustup-tmp-install' 2>/dev/null \
+ || create_tmp_dir)
RUST_URL="https://static.rust-lang.org/dist"
RUST_PACKAGE_NAME=rust-nightly
msg "Downloading ${remote_tarball} to ${local_tarball}"
- mkdir -p "${CFG_TMP_DIR}"
- need_ok "failed to create create download directory"
-
"${CFG_CURL}" -f -o "${local_tarball}" "${remote_tarball}"
if [ $? -ne 0 ]
then
"SHL" => token::BinOp(token::Shl),
"LBRACE" => token::OpenDelim(token::Brace),
"RARROW" => token::RArrow,
- "LIT_STR" => token::Literal(token::Str_(Name(0))),
+ "LIT_STR" => token::Literal(token::Str_(Name(0)), None),
"DOTDOT" => token::DotDot,
"MOD_SEP" => token::ModSep,
"DOTDOTDOT" => token::DotDotDot,
"ANDAND" => token::AndAnd,
"AT" => token::At,
"LBRACKET" => token::OpenDelim(token::Bracket),
- "LIT_STR_RAW" => token::Literal(token::StrRaw(Name(0), 0)),
+ "LIT_STR_RAW" => token::Literal(token::StrRaw(Name(0), 0), None),
"RPAREN" => token::CloseDelim(token::Paren),
"SLASH" => token::BinOp(token::Slash),
"COMMA" => token::Comma,
"TILDE" => token::Tilde,
"IDENT" => id(),
"PLUS" => token::BinOp(token::Plus),
- "LIT_CHAR" => token::Literal(token::Char(Name(0))),
- "LIT_BYTE" => token::Literal(token::Byte(Name(0))),
+ "LIT_CHAR" => token::Literal(token::Char(Name(0)), None),
+ "LIT_BYTE" => token::Literal(token::Byte(Name(0)), None),
"EQ" => token::Eq,
"RBRACKET" => token::CloseDelim(token::Bracket),
"COMMENT" => token::Comment,
"BINOP" => token::BinOp(token::Plus),
"POUND" => token::Pound,
"OROR" => token::OrOr,
- "LIT_INTEGER" => token::Literal(token::Integer(Name(0))),
+ "LIT_INTEGER" => token::Literal(token::Integer(Name(0)), None),
"BINOPEQ" => token::BinOpEq(token::Plus),
- "LIT_FLOAT" => token::Literal(token::Float(Name(0))),
+ "LIT_FLOAT" => token::Literal(token::Float(Name(0)), None),
"WHITESPACE" => token::Whitespace,
"UNDERSCORE" => token::Underscore,
"MINUS" => token::BinOp(token::Minus),
"OR" => token::BinOp(token::Or),
"GT" => token::Gt,
"LE" => token::Le,
- "LIT_BINARY" => token::Literal(token::Binary(Name(0))),
- "LIT_BINARY_RAW" => token::Literal(token::BinaryRaw(Name(0), 0)),
+ "LIT_BINARY" => token::Literal(token::Binary(Name(0)), None),
+ "LIT_BINARY_RAW" => token::Literal(token::BinaryRaw(Name(0), 0), None),
_ => continue,
};
token::BinOp(..) => token::BinOp(str_to_binop(content)),
token::BinOpEq(..) => token::BinOpEq(str_to_binop(content.slice_to(
content.len() - 1))),
- token::Literal(token::Str_(..)) => token::Literal(token::Str_(fix(content))),
- token::Literal(token::StrRaw(..)) => token::Literal(token::StrRaw(fix(content),
- count(content))),
- token::Literal(token::Char(..)) => token::Literal(token::Char(fixchar(content))),
- token::Literal(token::Byte(..)) => token::Literal(token::Byte(fixchar(content))),
+ token::Literal(token::Str_(..), n) => token::Literal(token::Str_(fix(content)), n),
+ token::Literal(token::StrRaw(..), n) => token::Literal(token::StrRaw(fix(content),
+ count(content)), n),
+ token::Literal(token::Char(..), n) => token::Literal(token::Char(fixchar(content)), n),
+ token::Literal(token::Byte(..), n) => token::Literal(token::Byte(fixchar(content)), n),
token::DocComment(..) => token::DocComment(nm),
- token::Literal(token::Integer(..)) => token::Literal(token::Integer(nm)),
- token::Literal(token::Float(..)) => token::Literal(token::Float(nm)),
- token::Literal(token::Binary(..)) => token::Literal(token::Binary(nm)),
- token::Literal(token::BinaryRaw(..)) => token::Literal(token::BinaryRaw(fix(content),
- count(content))),
+ token::Literal(token::Integer(..), n) => token::Literal(token::Integer(nm), n),
+ token::Literal(token::Float(..), n) => token::Literal(token::Float(nm), n),
+ token::Literal(token::Binary(..), n) => token::Literal(token::Binary(nm), n),
+ token::Literal(token::BinaryRaw(..), n) => token::Literal(token::BinaryRaw(fix(content),
+ count(content)), n),
token::Ident(..) => token::Ident(ast::Ident { name: nm, ctxt: 0 },
token::ModName),
token::Lifetime(..) => token::Lifetime(ast::Ident { name: nm, ctxt: 0 }),
};
let sp = syntax::codemap::Span {
- lo: syntax::codemap::BytePos(from_str::<u32>(start).unwrap() - offset),
- hi: syntax::codemap::BytePos(from_str::<u32>(end).unwrap() + 1),
+ lo: syntax::codemap::BytePos(start.parse::<u32>().unwrap() - offset),
+ hi: syntax::codemap::BytePos(end.parse::<u32>().unwrap() + 1),
expn_id: syntax::codemap::NO_EXPANSION
};
let token_map = parse_token_list(token_file.read_to_string().unwrap().as_slice());
let mut stdin = std::io::stdin();
- let mut antlr_tokens = stdin.lines().map(|l| parse_antlr_token(l.unwrap().as_slice().trim(),
+ let mut lock = stdin.lock();
+ let lines = lock.lines();
+ let mut antlr_tokens = lines.map(|l| parse_antlr_token(l.unwrap().as_slice().trim(),
&token_map));
let code = File::open(&Path::new(args[1].as_slice())).unwrap().read_to_string().unwrap();
ref c => assert!(c == &antlr_tok.tok, "{} is not {}", rustc_tok, antlr_tok)
}
)
- )
+ );
matches!(
- token::Literal(token::Byte(..)),
- token::Literal(token::Char(..)),
- token::Literal(token::Integer(..)),
- token::Literal(token::Float(..)),
- token::Literal(token::Str_(..)),
- token::Literal(token::StrRaw(..)),
- token::Literal(token::Binary(..)),
- token::Literal(token::BinaryRaw(..)),
+ token::Literal(token::Byte(..), _),
+ token::Literal(token::Char(..), _),
+ token::Literal(token::Integer(..), _),
+ token::Literal(token::Float(..), _),
+ token::Literal(token::Str_(..), _),
+ token::Literal(token::StrRaw(..), _),
+ token::Literal(token::Binary(..), _),
+ token::Literal(token::BinaryRaw(..), _),
token::Ident(..),
token::Lifetime(..),
token::Interpolated(..),
//! let five = five.clone();
//!
//! Thread::spawn(move || {
-//! let mut number = five.lock();
+//! let mut number = five.lock().unwrap();
//!
//! *number += 1;
//!
use core::kinds::{Sync, Send};
use core::mem::{min_align_of, size_of, drop};
use core::mem;
+use core::nonzero::NonZero;
use core::ops::{Drop, Deref};
use core::option::Option;
use core::option::Option::{Some, None};
-use core::ptr::RawPtr;
-use core::ptr;
+use core::ptr::{mod, PtrExt};
use heap::deallocate;
/// An atomically reference counted wrapper for shared state.
pub struct Arc<T> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
- _ptr: *mut ArcInner<T>,
+ _ptr: NonZero<*mut ArcInner<T>>,
}
unsafe impl<T: Sync + Send> Send for Arc<T> { }
pub struct Weak<T> {
// FIXME #12808: strange name to try to avoid interfering with
// field accesses of the contained type via Deref
- _ptr: *mut ArcInner<T>,
+ _ptr: NonZero<*mut ArcInner<T>>,
}
unsafe impl<T: Sync + Send> Send for Weak<T> { }
weak: atomic::AtomicUint::new(1),
data: data,
};
- Arc { _ptr: unsafe { mem::transmute(x) } }
+ Arc { _ptr: unsafe { NonZero::new(mem::transmute(x)) } }
}
/// Downgrades the `Arc<T>` to a `Weak<T>` reference.
// pointer is valid. Furthermore, we know that the `ArcInner` structure itself is `Sync`
// because the inner data is `Sync` as well, so we're ok loaning out an immutable pointer
// to these contents.
- unsafe { &*self._ptr }
+ unsafe { &**self._ptr }
}
}
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
// this point, and we required the Arc itself to be `mut`, so we're returning the only
// possible reference to the inner data.
- let inner = unsafe { &mut *self._ptr };
+ let inner = unsafe { &mut **self._ptr };
&mut inner.data
}
}
fn drop(&mut self) {
// This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but
// it is guaranteed to be zeroed after the first if it's run more than once)
- if self._ptr.is_null() { return }
+ let ptr = *self._ptr;
+ if ptr.is_null() { return }
// Because `fetch_sub` is already atomic, we do not need to synchronize with other threads
// unless we are going to delete the object. This same logic applies to the below
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
atomic::fence(atomic::Acquire);
- unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
+ unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
#[inline]
fn inner(&self) -> &ArcInner<T> {
// See comments above for why this is "safe"
- unsafe { &*self._ptr }
+ unsafe { &**self._ptr }
}
}
/// } // implicit drop
/// ```
fn drop(&mut self) {
+ let ptr = *self._ptr;
+
// see comments above for why this check is here
- if self._ptr.is_null() { return }
+ if ptr.is_null() { return }
// If we find out that we were the last weak pointer, then its time to deallocate the data
// entirely. See the discussion in Arc::drop() about the memory orderings
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
atomic::fence(atomic::Acquire);
- unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
+ unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(),
min_align_of::<ArcInner<T>>()) }
}
}
let a = Arc::new(Cycle { x: Mutex::new(None) });
let b = a.clone().downgrade();
- *a.x.lock() = Some(b);
+ *a.x.lock().unwrap() = Some(b);
// hopefully we don't double-free (or leak)...
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use core::ptr::RawPtr;
+use core::ptr::PtrExt;
// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias`
mod test {
extern crate test;
use self::test::Bencher;
- use core::ptr::RawPtr;
+ use core::ptr::PtrExt;
use heap;
#[test]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Task-local reference-counted boxes (the `Rc<T>` type).
+//! Thread-local reference-counted boxes (the `Rc<T>` type).
//!
//! The `Rc<T>` type provides shared ownership of an immutable value. Destruction is deterministic,
//! and will occur as soon as the last owner is gone. It is marked as non-sendable because it
use core::hash::{mod, Hash};
use core::kinds::marker;
use core::mem::{transmute, min_align_of, size_of, forget};
+use core::nonzero::NonZero;
use core::ops::{Deref, Drop};
use core::option::Option;
use core::option::Option::{Some, None};
-use core::ptr;
-use core::ptr::RawPtr;
+use core::ptr::{mod, PtrExt};
use core::result::Result;
use core::result::Result::{Ok, Err};
pub struct Rc<T> {
// FIXME #12808: strange names to try to avoid interfering with field accesses of the contained
// type via Deref
- _ptr: *mut RcBox<T>,
+ _ptr: NonZero<*mut RcBox<T>>,
_nosend: marker::NoSend,
_noshare: marker::NoSync
}
// there is an implicit weak pointer owned by all the strong pointers, which
// ensures that the weak destructor never frees the allocation while the strong
// destructor is running, even if the weak pointer is stored inside the strong one.
- _ptr: transmute(box RcBox {
+ _ptr: NonZero::new(transmute(box RcBox {
value: value,
strong: Cell::new(1),
weak: Cell::new(1)
- }),
+ })),
_nosend: marker::NoSend,
_noshare: marker::NoSync
}
let val = ptr::read(&*rc); // copy the contained object
// destruct the box and skip our Drop
// we can ignore the refcounts because we know we're unique
- deallocate(rc._ptr as *mut u8, size_of::<RcBox<T>>(),
+ deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>());
forget(rc);
Ok(val)
#[experimental]
pub fn get_mut<'a, T>(rc: &'a mut Rc<T>) -> Option<&'a mut T> {
if is_unique(rc) {
- let inner = unsafe { &mut *rc._ptr };
+ let inner = unsafe { &mut **rc._ptr };
Some(&mut inner.value)
} else {
None
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
// this point, and we required the `Rc<T>` itself to be `mut`, so we're returning the only
// possible reference to the inner value.
- let inner = unsafe { &mut *self._ptr };
+ let inner = unsafe { &mut **self._ptr };
&mut inner.value
}
}
/// ```
fn drop(&mut self) {
unsafe {
- if !self._ptr.is_null() {
+ let ptr = *self._ptr;
+ if !ptr.is_null() {
self.dec_strong();
if self.strong() == 0 {
ptr::read(&**self); // destroy the contained object
self.dec_weak();
if self.weak() == 0 {
- deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
+ deallocate(ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
pub struct Weak<T> {
// FIXME #12808: strange names to try to avoid interfering with
// field accesses of the contained type via Deref
- _ptr: *mut RcBox<T>,
+ _ptr: NonZero<*mut RcBox<T>>,
_nosend: marker::NoSend,
_noshare: marker::NoSync
}
/// ```
fn drop(&mut self) {
unsafe {
- if !self._ptr.is_null() {
+ let ptr = *self._ptr;
+ if !ptr.is_null() {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all the strong pointers
// have disappeared.
if self.weak() == 0 {
- deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
+ deallocate(ptr as *mut u8, size_of::<RcBox<T>>(),
min_align_of::<RcBox<T>>())
}
}
impl<T> RcBoxPtr<T> for Rc<T> {
#[inline(always)]
- fn inner(&self) -> &RcBox<T> { unsafe { &(*self._ptr) } }
+ fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } }
}
impl<T> RcBoxPtr<T> for Weak<T> {
#[inline(always)]
- fn inner(&self) -> &RcBox<T> { unsafe { &(*self._ptr) } }
+ fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } }
}
#[cfg(test)]
/// Convert the `Rawlink` into an Option value
fn resolve_immut<'a>(&self) -> Option<&'a T> {
unsafe {
- self.p.as_ref()
+ mem::transmute(self.p.as_ref())
}
}
impl<'a, A> ExactSizeIterator<&'a mut A> for IterMut<'a, A> {}
/// Allows mutating a `DList` while iterating.
+#[deprecated = "Trait is deprecated, use inherent methods on the iterator instead"]
pub trait ListInsertion<A> {
/// Inserts `elt` just after to the element most recently returned by
/// `.next()`
}
}
-impl<'a, A> ListInsertion<A> for IterMut<'a, A> {
+impl<'a, A> IterMut<'a, A> {
+ /// Inserts `elt` just after the element most recently returned by `.next()`.
+ /// The inserted element does not appear in the iteration.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::collections::DList;
+ ///
+ /// let mut list: DList<int> = vec![1, 3, 4].into_iter().collect();
+ ///
+ /// {
+ /// let mut it = list.iter_mut();
+ /// assert_eq!(it.next().unwrap(), &1);
+ /// // insert `2` after `1`
+ /// it.insert_next(2);
+ /// }
+ /// {
+ /// let vec: Vec<int> = list.into_iter().collect();
+ /// assert_eq!(vec, vec![1i, 2, 3, 4]);
+ /// }
+ /// ```
#[inline]
- fn insert_next(&mut self, elt: A) {
+ pub fn insert_next(&mut self, elt: A) {
self.insert_next_node(box Node::new(elt))
}
+ /// Provides a reference to the next element, without changing the iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::collections::DList;
+ ///
+ /// let mut list: DList<int> = vec![1, 2, 3].into_iter().collect();
+ ///
+ /// let mut it = list.iter_mut();
+ /// assert_eq!(it.next().unwrap(), &1);
+ /// assert_eq!(it.peek_next().unwrap(), &2);
+ /// // We just peeked at 2, so it was not consumed from the iterator.
+ /// assert_eq!(it.next().unwrap(), &2);
+ /// ```
#[inline]
- fn peek_next(&mut self) -> Option<&mut A> {
+ pub fn peek_next(&mut self) -> Option<&mut A> {
if self.nelem == 0 {
return None
}
use test::Bencher;
use test;
- use super::{DList, Node, ListInsertion};
+ use super::{DList, Node};
pub fn check_links<T>(list: &DList<T>) {
let mut len = 0u;
if contiguous {
let (empty, buf) = buf.split_at_mut(0);
- (buf[mut tail..head], empty)
+ (buf.slice_mut(tail, head), empty)
} else {
let (mid, right) = buf.split_at_mut(tail);
let (left, _) = mid.split_at_mut(head);
use core::kinds::Sized;
use core::mem::size_of;
use core::mem;
-use core::ops::FnMut;
+use core::ops::{FnMut,SliceMut};
use core::prelude::{Clone, Greater, Iterator, IteratorExt, Less, None, Option};
-use core::prelude::{Ord, Ordering, RawPtr, Some, range};
+use core::prelude::{Ord, Ordering, PtrExt, Some, range};
use core::ptr;
use core::slice as core_slice;
use self::Direction::*;
#[inline]
fn move_from(&mut self, mut src: Vec<T>, start: uint, end: uint) -> uint {
- for (a, b) in self.iter_mut().zip(src[mut start..end].iter_mut()) {
+ for (a, b) in self.iter_mut().zip(src.slice_mut(start, end).iter_mut()) {
mem::swap(a, b);
}
cmp::min(self.len(), end-start)
#[unstable = "trait is unstable"]
impl<T> BorrowFromMut<Vec<T>> for [T] {
- fn borrow_from_mut(owned: &mut Vec<T>) -> &mut [T] { owned[mut] }
+ fn borrow_from_mut(owned: &mut Vec<T>) -> &mut [T] { owned.as_mut_slice_() }
}
#[unstable = "trait is unstable"]
assert!(a == [7i,2,3,4]);
let mut a = [1i,2,3,4,5];
let b = vec![5i,6,7,8,9,0];
- assert_eq!(a[mut 2..4].move_from(b,1,6), 2);
+ assert_eq!(a.slice_mut(2, 4).move_from(b,1,6), 2);
assert!(a == [1i,2,6,7,5]);
}
#[test]
fn test_reverse_part() {
let mut values = [1i,2,3,4,5];
- values[mut 1..4].reverse();
+ values.slice_mut(1, 4).reverse();
assert!(values == [1,4,3,2,5]);
}
fn test_bytes_set_memory() {
use slice::bytes::MutableByteVector;
let mut values = [1u8,2,3,4,5];
- values[mut 0..5].set_memory(0xAB);
+ values.slice_mut(0, 5).set_memory(0xAB);
assert!(values == [0xAB, 0xAB, 0xAB, 0xAB, 0xAB]);
- values[mut 2..4].set_memory(0xFF);
+ values.slice_mut(2, 4).set_memory(0xFF);
assert!(values == [0xAB, 0xAB, 0xFF, 0xFF, 0xAB]);
}
pub use core::str::{from_utf8, CharEq, Chars, CharIndices};
pub use core::str::{Bytes, CharSplits, is_utf8};
-pub use core::str::{CharSplitsN, Lines, LinesAny, MatchIndices, StrSplits};
+pub use core::str::{CharSplitsN, Lines, LinesAny, MatchIndices, StrSplits, SplitStr};
pub use core::str::{CharRange};
pub use core::str::{FromStr, from_str, Utf8Error};
pub use core::str::Str;
pub use core::str::{from_utf8_unchecked, from_c_str};
pub use unicode::str::{Words, Graphemes, GraphemeIndices};
+pub use core::str::{Split, SplitTerminator};
+pub use core::str::{SplitN, RSplitN};
// FIXME(conventions): ensure bit/char conventions are followed by str's API
/// // not found, so no change.
/// assert_eq!(s.replace("cookie monster", "little lamb"), s);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
+ #[stable]
fn replace(&self, from: &str, to: &str) -> String {
let mut result = String::new();
let mut last_end = 0;
}
}
- /// Returns true if one string contains another
+ /// Returns true if a string contains a string pattern.
///
/// # Arguments
///
- /// - needle - The string to look for
+ /// - pat - The string pattern to look for
///
/// # Example
///
/// ```rust
/// assert!("bananas".contains("nana"));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn contains(&self, needle: &str) -> bool {
- core_str::StrExt::contains(self[], needle)
+ #[stable]
+ fn contains(&self, pat: &str) -> bool {
+ core_str::StrExt::contains(self[], pat)
}
- /// Returns true if a string contains a char.
+ /// Returns true if a string contains a char pattern.
///
/// # Arguments
///
- /// - needle - The char to look for
+ /// - pat - The char pattern to look for
///
/// # Example
///
/// ```rust
/// assert!("hello".contains_char('e'));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn contains_char(&self, needle: char) -> bool {
- core_str::StrExt::contains_char(self[], needle)
+ #[unstable = "might get removed in favour of a more generic contains()"]
+ fn contains_char<P: CharEq>(&self, pat: P) -> bool {
+ core_str::StrExt::contains_char(self[], pat)
}
/// An iterator over the characters of `self`. Note, this iterates
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`.
+ /// matched by the pattern `pat`.
///
/// # Example
///
/// let v: Vec<&str> = "".split('X').collect();
/// assert_eq!(v, vec![""]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn split<Sep: CharEq>(&self, sep: Sep) -> CharSplits<Sep> {
- core_str::StrExt::split(self[], sep)
+ #[stable]
+ fn split<P: CharEq>(&self, pat: P) -> Split<P> {
+ core_str::StrExt::split(self[], pat)
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`, restricted to splitting at most `count`
+ /// matched by the pattern `pat`, restricted to splitting at most `count`
/// times.
///
/// # Example
/// let v: Vec<&str> = "".splitn(1, 'X').collect();
/// assert_eq!(v, vec![""]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn splitn<Sep: CharEq>(&self, count: uint, sep: Sep) -> CharSplitsN<Sep> {
- core_str::StrExt::splitn(self[], count, sep)
+ #[stable]
+ fn splitn<P: CharEq>(&self, count: uint, pat: P) -> SplitN<P> {
+ core_str::StrExt::splitn(self[], count, pat)
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`.
+ /// matched by the pattern `pat`.
///
/// Equivalent to `split`, except that the trailing substring
/// is skipped if empty (terminator semantics).
/// let v: Vec<&str> = "lionXXtigerXleopard".split('X').rev().collect();
/// assert_eq!(v, vec!["leopard", "tiger", "", "lion"]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn split_terminator<Sep: CharEq>(&self, sep: Sep) -> CharSplits<Sep> {
- core_str::StrExt::split_terminator(self[], sep)
+ #[unstable = "might get removed"]
+ fn split_terminator<P: CharEq>(&self, pat: P) -> SplitTerminator<P> {
+ core_str::StrExt::split_terminator(self[], pat)
}
/// An iterator over substrings of `self`, separated by characters
- /// matched by `sep`, starting from the end of the string.
+ /// matched by the pattern `pat`, starting from the end of the string.
/// Restricted to splitting at most `count` times.
///
/// # Example
/// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(2, 'X').collect();
/// assert_eq!(v, vec!["leopard", "tiger", "lionX"]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn rsplitn<Sep: CharEq>(&self, count: uint, sep: Sep) -> CharSplitsN<Sep> {
- core_str::StrExt::rsplitn(self[], count, sep)
+ #[stable]
+ fn rsplitn<P: CharEq>(&self, count: uint, pat: P) -> RSplitN<P> {
+ core_str::StrExt::rsplitn(self[], count, pat)
}
/// An iterator over the start and end indices of the disjoint
- /// matches of `sep` within `self`.
+ /// matches of the pattern `pat` within `self`.
///
/// That is, each returned value `(start, end)` satisfies
/// `self.slice(start, end) == sep`. For matches of `sep` within
/// let v: Vec<(uint, uint)> = "ababa".match_indices("aba").collect();
/// assert_eq!(v, vec![(0, 3)]); // only the first `aba`
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a> {
- core_str::StrExt::match_indices(self[], sep)
+ #[unstable = "might have its iterator type changed"]
+ fn match_indices<'a>(&'a self, pat: &'a str) -> MatchIndices<'a> {
+ core_str::StrExt::match_indices(self[], pat)
}
- /// An iterator over the substrings of `self` separated by `sep`.
+ /// An iterator over the substrings of `self` separated by the pattern `sep`.
///
/// # Example
///
/// let v: Vec<&str> = "1abcabc2".split_str("abc").collect();
/// assert_eq!(v, vec!["1", "", "2"]);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn split_str<'a>(&'a self, s: &'a str) -> StrSplits<'a> {
- core_str::StrExt::split_str(self[], s)
+ #[unstable = "might get removed in the future in favor of a more generic split()"]
+ fn split_str<'a>(&'a self, pat: &'a str) -> StrSplits<'a> {
+ core_str::StrExt::split_str(self[], pat)
}
/// An iterator over the lines of a string (subsequences separated
core_str::StrExt::slice_unchecked(self[], begin, end)
}
- /// Returns true if `needle` is a prefix of the string.
+ /// Returns true if the pattern `pat` is a prefix of the string.
///
/// # Example
///
/// ```rust
/// assert!("banana".starts_with("ba"));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn starts_with(&self, needle: &str) -> bool {
- core_str::StrExt::starts_with(self[], needle)
+ #[stable]
+ fn starts_with(&self, pat: &str) -> bool {
+ core_str::StrExt::starts_with(self[], pat)
}
- /// Returns true if `needle` is a suffix of the string.
+ /// Returns true if the pattern `pat` is a suffix of the string.
///
/// # Example
///
/// ```rust
/// assert!("banana".ends_with("nana"));
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn ends_with(&self, needle: &str) -> bool {
- core_str::StrExt::ends_with(self[], needle)
+ #[stable]
+ fn ends_with(&self, pat: &str) -> bool {
+ core_str::StrExt::ends_with(self[], pat)
}
- /// Returns a string with characters that match `to_trim` removed from the left and the right.
+ /// Returns a string with all pre- and suffixes that match
+ /// the pattern `pat` repeatedly removed.
///
/// # Arguments
///
- /// * to_trim - a character matcher
+ /// * pat - a string pattern
///
/// # Example
///
/// ```rust
- /// assert_eq!("11foo1bar11".trim_chars('1'), "foo1bar");
+ /// assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
/// let x: &[_] = &['1', '2'];
- /// assert_eq!("12foo1bar12".trim_chars(x), "foo1bar");
- /// assert_eq!("123foo1bar123".trim_chars(|&: c: char| c.is_numeric()), "foo1bar");
+ /// assert_eq!("12foo1bar12".trim_matches(x), "foo1bar");
+ /// assert_eq!("123foo1bar123".trim_matches(|&: c: char| c.is_numeric()), "foo1bar");
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn trim_chars<C: CharEq>(&self, to_trim: C) -> &str {
- core_str::StrExt::trim_chars(self[], to_trim)
+ #[stable]
+ fn trim_matches<P: CharEq>(&self, pat: P) -> &str {
+ core_str::StrExt::trim_matches(self[], pat)
+ }
+
+ /// Deprecated
+ #[deprecated = "Replaced by `trim_matches`"]
+ fn trim_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str {
+ self.trim_matches(to_trim)
}
- /// Returns a string with leading `chars_to_trim` removed.
+ /// Returns a string with all prefixes that match
+ /// the pattern `pat` repeatedly removed.
///
/// # Arguments
///
- /// * to_trim - a character matcher
+ /// * pat - a string pattern
///
/// # Example
///
/// ```rust
- /// assert_eq!("11foo1bar11".trim_left_chars('1'), "foo1bar11");
+ /// assert_eq!("11foo1bar11".trim_left_matches('1'), "foo1bar11");
/// let x: &[_] = &['1', '2'];
- /// assert_eq!("12foo1bar12".trim_left_chars(x), "foo1bar12");
- /// assert_eq!("123foo1bar123".trim_left_chars(|&: c: char| c.is_numeric()), "foo1bar123");
+ /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12");
+ /// assert_eq!("123foo1bar123".trim_left_matches(|&: c: char| c.is_numeric()), "foo1bar123");
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn trim_left_chars<C: CharEq>(&self, to_trim: C) -> &str {
- core_str::StrExt::trim_left_chars(self[], to_trim)
+ #[stable]
+ fn trim_left_matches<P: CharEq>(&self, pat: P) -> &str {
+ core_str::StrExt::trim_left_matches(self[], pat)
+ }
+
+ /// Deprecated
+ #[deprecated = "Replaced by `trim_left_matches`"]
+ fn trim_left_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str {
+ self.trim_left_matches(to_trim)
}
- /// Returns a string with trailing `chars_to_trim` removed.
+ /// Returns a string with all suffixes that match
+ /// the pattern `pat` repeatedly removed.
///
/// # Arguments
///
- /// * to_trim - a character matcher
+ /// * pat - a string pattern
///
/// # Example
///
/// ```rust
- /// assert_eq!("11foo1bar11".trim_right_chars('1'), "11foo1bar");
+ /// assert_eq!("11foo1bar11".trim_right_matches('1'), "11foo1bar");
/// let x: &[_] = &['1', '2'];
- /// assert_eq!("12foo1bar12".trim_right_chars(x), "12foo1bar");
- /// assert_eq!("123foo1bar123".trim_right_chars(|&: c: char| c.is_numeric()), "123foo1bar");
+ /// assert_eq!("12foo1bar12".trim_right_matches(x), "12foo1bar");
+ /// assert_eq!("123foo1bar123".trim_right_matches(|&: c: char| c.is_numeric()), "123foo1bar");
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn trim_right_chars<C: CharEq>(&self, to_trim: C) -> &str {
- core_str::StrExt::trim_right_chars(self[], to_trim)
+ #[stable]
+ fn trim_right_matches<P: CharEq>(&self, pat: P) -> &str {
+ core_str::StrExt::trim_right_matches(self[], pat)
+ }
+
+ /// Deprecated
+ #[deprecated = "Replaced by `trim_right_matches`"]
+ fn trim_right_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str {
+ self.trim_right_matches(to_trim)
}
/// Check that `index`-th byte lies at the start and/or end of a
}
/// Returns the byte index of the first character of `self` that
- /// matches `search`.
+ /// matches the pattern `pat`.
///
/// # Return value
///
/// let x: &[_] = &['1', '2'];
/// assert_eq!(s.find(x), None);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn find<C: CharEq>(&self, search: C) -> Option<uint> {
- core_str::StrExt::find(self[], search)
+ #[stable]
+ fn find<P: CharEq>(&self, pat: P) -> Option<uint> {
+ core_str::StrExt::find(self[], pat)
}
/// Returns the byte index of the last character of `self` that
- /// matches `search`.
+ /// matches the pattern `pat`.
///
/// # Return value
///
/// let x: &[_] = &['1', '2'];
/// assert_eq!(s.rfind(x), None);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
- fn rfind<C: CharEq>(&self, search: C) -> Option<uint> {
- core_str::StrExt::rfind(self[], search)
+ #[stable]
+ fn rfind<P: CharEq>(&self, pat: P) -> Option<uint> {
+ core_str::StrExt::rfind(self[], pat)
}
/// Returns the byte index of the first matching substring
/// assert_eq!(s.find_str("老虎 L"), Some(6));
/// assert_eq!(s.find_str("muffin man"), None);
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
+ #[unstable = "might get removed in favor of a more generic find in the future"]
fn find_str(&self, needle: &str) -> Option<uint> {
core_str::StrExt::find_str(self[], needle)
}
/// assert!(string.subslice_offset(lines[1]) == 2); // &"b"
/// assert!(string.subslice_offset(lines[2]) == 4); // &"c"
/// ```
- #[unstable = "awaiting pattern/matcher stabilization"]
+ #[unstable = "awaiting convention about comparability of arbitrary slices"]
fn subslice_offset(&self, inner: &str) -> uint {
core_str::StrExt::subslice_offset(self[], inner)
}
#[cfg(test)]
mod tests {
- use std::iter::AdditiveIterator;
- use std::iter::range;
- use std::default::Default;
- use std::char::Char;
- use std::clone::Clone;
- use std::cmp::{Ord, PartialOrd, Equiv};
- use std::cmp::Ordering::{Equal, Greater, Less};
- use std::option::Option::{mod, Some, None};
- use std::result::Result::{Ok, Err};
- use std::ptr::RawPtr;
- use std::iter::{Iterator, IteratorExt, DoubleEndedIteratorExt};
+ use prelude::*;
- use super::*;
+ use core::default::Default;
+ use core::iter::AdditiveIterator;
+ use super::{eq_slice, from_utf8, is_utf8, is_utf16, raw};
+ use super::truncate_utf16_at_nul;
use super::MaybeOwned::{Owned, Slice};
use std::slice::{AsSlice, SliceExt};
use string::{String, ToString};
use core::iter::repeat;
use core::kinds::marker::{ContravariantLifetime, InvariantType};
use core::mem;
+use core::nonzero::NonZero;
use core::num::{Int, UnsignedInt};
use core::ops;
-use core::ptr::{mod, Unique};
+use core::ptr;
use core::raw::Slice as RawSlice;
use core::uint;
#[unsafe_no_drop_flag]
#[stable]
pub struct Vec<T> {
- ptr: Unique<T>,
+ ptr: NonZero<*mut T>,
len: uint,
cap: uint,
}
+unsafe impl<T: Send> Send for Vec<T> { }
+unsafe impl<T: Sync> Sync for Vec<T> { }
+
/// A clone-on-write vector
pub type CowVec<'a, T> = Cow<'a, Vec<T>, [T]>;
// non-null value which is fine since we never call deallocate on the ptr
// if cap is 0. The reason for this is because the pointer of a slice
// being NULL would break the null pointer optimization for enums.
- Vec { ptr: Unique(EMPTY as *mut T), len: 0, cap: 0 }
+ Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: 0 }
}
/// Constructs a new, empty `Vec<T>` with the specified capacity.
#[stable]
pub fn with_capacity(capacity: uint) -> Vec<T> {
if mem::size_of::<T>() == 0 {
- Vec { ptr: Unique(EMPTY as *mut T), len: 0, cap: uint::MAX }
+ Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: uint::MAX }
} else if capacity == 0 {
Vec::new()
} else {
.expect("capacity overflow");
let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) };
if ptr.is_null() { ::alloc::oom() }
- Vec { ptr: Unique(ptr as *mut T), len: 0, cap: capacity }
+ Vec { ptr: unsafe { NonZero::new(ptr as *mut T) }, len: 0, cap: capacity }
}
}
#[unstable = "needs finalization"]
pub unsafe fn from_raw_parts(ptr: *mut T, length: uint,
capacity: uint) -> Vec<T> {
- Vec { ptr: Unique(ptr), len: length, cap: capacity }
+ Vec { ptr: NonZero::new(ptr), len: length, cap: capacity }
}
/// Creates a vector by copying the elements from a raw pointer.
if self.len == 0 {
if self.cap != 0 {
unsafe {
- dealloc(self.ptr.0, self.cap)
+ dealloc(*self.ptr, self.cap)
}
self.cap = 0;
}
unsafe {
// Overflow check is unnecessary as the vector is already at
// least this large.
- self.ptr = Unique(reallocate(self.ptr.0 as *mut u8,
- self.cap * mem::size_of::<T>(),
- self.len * mem::size_of::<T>(),
- mem::min_align_of::<T>()) as *mut T);
- if self.ptr.0.is_null() { ::alloc::oom() }
+ let ptr = reallocate(*self.ptr as *mut u8,
+ self.cap * mem::size_of::<T>(),
+ self.len * mem::size_of::<T>(),
+ mem::min_align_of::<T>()) as *mut T;
+ if ptr.is_null() { ::alloc::oom() }
+ self.ptr = NonZero::new(ptr);
}
self.cap = self.len;
}
pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] {
unsafe {
mem::transmute(RawSlice {
- data: self.ptr.0 as *const T,
+ data: *self.ptr as *const T,
len: self.len,
})
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn into_iter(self) -> IntoIter<T> {
unsafe {
- let ptr = self.ptr.0;
+ let ptr = *self.ptr;
let cap = self.cap;
- let begin = self.ptr.0 as *const T;
+ let begin = ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
(ptr as uint + self.len()) as *const T
} else {
let size = max(old_size, 2 * mem::size_of::<T>()) * 2;
if old_size > size { panic!("capacity overflow") }
unsafe {
- self.ptr = Unique(alloc_or_realloc(self.ptr.0, old_size, size));
- if self.ptr.0.is_null() { ::alloc::oom() }
+ let ptr = alloc_or_realloc(*self.ptr, old_size, size);
+ if ptr.is_null() { ::alloc::oom() }
+ self.ptr = NonZero::new(ptr);
}
self.cap = max(self.cap, 2) * 2;
}
unsafe {
- let end = self.ptr.0.offset(self.len as int);
+ let end = (*self.ptr).offset(self.len as int);
ptr::write(&mut *end, value);
self.len += 1;
}
#[unstable = "matches collection reform specification, waiting for dust to settle"]
pub fn drain<'a>(&'a mut self) -> Drain<'a, T> {
unsafe {
- let begin = self.ptr.0 as *const T;
+ let begin = *self.ptr as *const T;
let end = if mem::size_of::<T>() == 0 {
- (self.ptr.0 as uint + self.len()) as *const T
+ (*self.ptr as uint + self.len()) as *const T
} else {
- self.ptr.0.offset(self.len() as int) as *const T
+ (*self.ptr).offset(self.len() as int) as *const T
};
self.set_len(0);
Drain {
let size = capacity.checked_mul(mem::size_of::<T>())
.expect("capacity overflow");
unsafe {
- self.ptr = Unique(alloc_or_realloc(self.ptr.0,
- self.cap * mem::size_of::<T>(),
- size));
- if self.ptr.0.is_null() { ::alloc::oom() }
+ let ptr = alloc_or_realloc(*self.ptr, self.cap * mem::size_of::<T>(), size);
+ if ptr.is_null() { ::alloc::oom() }
+ self.ptr = NonZero::new(ptr);
}
self.cap = capacity;
}
fn as_slice<'a>(&'a self) -> &'a [T] {
unsafe {
mem::transmute(RawSlice {
- data: self.ptr.0 as *const T,
+ data: *self.ptr as *const T,
len: self.len
})
}
for x in self.iter() {
ptr::read(x);
}
- dealloc(self.ptr.0, self.cap)
+ dealloc(*self.ptr, self.cap)
}
}
}
for _x in self { }
let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self;
mem::forget(self);
- Vec { ptr: Unique(allocation), cap: cap, len: 0 }
+ Vec { ptr: NonZero::new(allocation), cap: cap, len: 0 }
}
}
/// but can be overridden to reuse the resources of `a` to avoid unnecessary
/// allocations.
#[inline(always)]
- #[unstable = "this function rarely unused"]
+ #[unstable = "this function is rarely used"]
fn clone_from(&mut self, source: &Self) {
*self = source.clone()
}
_ => ()
}
- buf[mut ..end].reverse();
+ buf.slice_to_mut(end).reverse();
// Remember start of the fractional digits.
// Points one beyond end of buf if none get generated,
impl<'a> fmt::FormatWriter for Filler<'a> {
fn write(&mut self, bytes: &[u8]) -> fmt::Result {
- slice::bytes::copy_memory(self.buf[mut *self.end..],
+ slice::bytes::copy_memory(self.buf.slice_from_mut(*self.end),
bytes);
*self.end += bytes.len();
Ok(())
pub mod intrinsics;
pub mod mem;
+pub mod nonzero;
pub mod ptr;
/* Core language traits */
);
}
-/// Runtime assertion, only without `--cfg ndebug`
-#[macro_export]
-macro_rules! debug_assert {
- ($(a:tt)*) => ({
- if cfg!(not(ndebug)) {
- assert!($($a)*);
- }
- })
-}
-
/// Runtime assertion for equality, for details see std::macros
#[macro_export]
macro_rules! assert_eq {
})
}
-/// Runtime assertion, disableable at compile time
+/// Runtime assertion, disableable at compile time with `--cfg ndebug`
#[macro_export]
macro_rules! debug_assert {
($($arg:tt)*) => (if cfg!(not(ndebug)) { assert!($($arg)*); })
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Exposes the NonZero lang item which provides optimization hints.
+
+use ops::Deref;
+
+/// Unsafe trait to indicate what types are usable with the NonZero struct
+pub unsafe trait Zeroable {}
+
+unsafe impl<T> Zeroable for *const T {}
+unsafe impl<T> Zeroable for *mut T {}
+unsafe impl Zeroable for int {}
+unsafe impl Zeroable for uint {}
+unsafe impl Zeroable for i8 {}
+unsafe impl Zeroable for u8 {}
+unsafe impl Zeroable for i16 {}
+unsafe impl Zeroable for u16 {}
+unsafe impl Zeroable for i32 {}
+unsafe impl Zeroable for u32 {}
+unsafe impl Zeroable for i64 {}
+unsafe impl Zeroable for u64 {}
+
+/// A wrapper type for raw pointers and integers that will never be
+/// NULL or 0 that might allow certain optimizations.
+#[lang="non_zero"]
+#[deriving(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Show)]
+#[experimental]
+pub struct NonZero<T: Zeroable>(T);
+
+impl<T: Zeroable> NonZero<T> {
+ /// Create an instance of NonZero with the provided value.
+ /// You must indeed ensure that the value is actually "non-zero".
+ #[inline(always)]
+ pub unsafe fn new(inner: T) -> NonZero<T> {
+ NonZero(inner)
+ }
+}
+
+impl<T: Zeroable> Deref<T> for NonZero<T> {
+ #[inline]
+ fn deref<'a>(&'a self) -> &'a T {
+ let NonZero(ref inner) = *self;
+ inner
+ }
+}
}
}
+/// A range which is only bounded above.
+#[deriving(Copy)]
+#[lang="range_to"]
+pub struct RangeTo<Idx> {
+ /// The upper bound of the range (exclusive).
+ pub end: Idx,
+}
+
/// The `Deref` trait is used to specify the functionality of dereferencing
/// operations like `*v`.
pub use num::{ToPrimitive, FromPrimitive};
pub use option::Option;
pub use option::Option::{Some, None};
-pub use ptr::RawPtr;
+pub use ptr::{PtrExt, MutPtrExt};
pub use result::Result;
pub use result::Result::{Ok, Err};
pub use str::{Str, StrExt};
//! typically limited to a few patterns.
//!
//! Use the [`null` function](fn.null.html) to create null pointers,
-//! the [`is_null`](trait.RawPtr.html#tymethod.is_null)
-//! and [`is_not_null`](trait.RawPtr.html#method.is_not_null)
-//! methods of the [`RawPtr` trait](trait.RawPtr.html) to check for null.
-//! The `RawPtr` trait is imported by the prelude, so `is_null` etc.
-//! work everywhere. The `RawPtr` also defines the `offset` method,
+//! the [`is_null`](trait.PtrExt.html#tymethod.is_null)
+//! methods of the [`PtrExt` trait](trait.PtrExt.html) to check for null.
+//! The `PtrExt` trait is imported by the prelude, so `is_null` etc.
+//! work everywhere. The `PtrExt` also defines the `offset` method,
//! for pointer math.
//!
//! # Common ways to create unsafe pointers
//! but C APIs hand out a lot of pointers generally, so are a common source
//! of unsafe pointers in Rust.
+#![stable]
+
use mem;
use clone::Clone;
use intrinsics;
-use option::Option;
-use option::Option::{Some, None};
+use option::Option::{mod, Some, None};
use kinds::{Send, Sync};
use cmp::{PartialEq, Eq, Ord, PartialOrd, Equiv};
-use cmp::Ordering;
-use cmp::Ordering::{Less, Equal, Greater};
+use cmp::Ordering::{mod, Less, Equal, Greater};
// FIXME #19649: instrinsic docs don't render, so these have no docs :(
#[experimental = "uncertain about naming and semantics"]
pub use intrinsics::set_memory;
+
/// Creates a null raw pointer.
///
/// # Examples
/// assert!(p.is_null());
/// ```
#[inline]
-#[unstable = "may need a different name after pending changes to pointer types"]
+#[stable]
pub fn null<T>() -> *const T { 0 as *const T }
/// Creates a null mutable raw pointer.
/// assert!(p.is_null());
/// ```
#[inline]
-#[unstable = "may need a different name after pending changes to pointer types"]
+#[stable]
pub fn null_mut<T>() -> *mut T { 0 as *mut T }
-/// Zeroes out `count * size_of::<T>` bytes of memory at `dst`. `count` may be `0`.
+/// Zeroes out `count * size_of::<T>` bytes of memory at `dst`. `count` may be
+/// `0`.
///
/// # Safety
///
-/// Beyond accepting a raw pointer, this is unsafe because it will not drop the contents of `dst`,
-/// and may be used to create invalid instances of `T`.
+/// Beyond accepting a raw pointer, this is unsafe because it will not drop the
+/// contents of `dst`, and may be used to create invalid instances of `T`.
#[inline]
-#[experimental = "uncertain about naming and semantics"]
-#[allow(experimental)]
+#[unstable = "may play a larger role in std::ptr future extensions"]
pub unsafe fn zero_memory<T>(dst: *mut T, count: uint) {
set_memory(dst, 0, count);
}
/// Swaps the values at two mutable locations of the same type, without
-/// deinitialising either. They may overlap, unlike `mem::swap` which is otherwise
-/// equivalent.
+/// deinitialising either. They may overlap, unlike `mem::swap` which is
+/// otherwise equivalent.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
#[inline]
-#[unstable]
+#[stable]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
let mut tmp: T = mem::uninitialized();
/// This is only unsafe because it accepts a raw pointer.
/// Otherwise, this operation is identical to `mem::replace`.
#[inline]
-#[unstable]
+#[stable]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
mem::swap(mem::transmute(dest), &mut src); // cannot overlap
src
/// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
/// because it will attempt to drop the value previously at `*src`.
#[inline(always)]
-#[unstable]
+#[stable]
pub unsafe fn read<T>(src: *const T) -> T {
let mut tmp: T = mem::uninitialized();
copy_nonoverlapping_memory(&mut tmp, src, 1);
///
/// This is unsafe for the same reasons that `read` is unsafe.
#[inline(always)]
-#[experimental]
-#[allow(experimental)]
+#[unstable = "may play a larger role in std::ptr future extensions"]
pub unsafe fn read_and_zero<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
tmp
}
-/// Overwrites a memory location with the given value without reading or dropping
-/// the old value.
+/// Overwrites a memory location with the given value without reading or
+/// dropping the old value.
///
/// # Safety
///
/// not drop the contents of `dst`. This could leak allocations or resources,
/// so care must be taken not to overwrite an object that should be dropped.
///
-/// This is appropriate for initializing uninitialized memory, or overwritting memory
-/// that has previously been `read` from.
+/// This is appropriate for initializing uninitialized memory, or overwritting
+/// memory that has previously been `read` from.
#[inline]
-#[unstable]
+#[stable]
pub unsafe fn write<T>(dst: *mut T, src: T) {
intrinsics::move_val_init(&mut *dst, src)
}
/// Methods on raw pointers
-pub trait RawPtr<T> {
- /// Returns a null raw pointer.
+#[stable]
+pub trait PtrExt<T> {
+ /// Returns the null pointer.
+ #[deprecated = "call ptr::null instead"]
fn null() -> Self;
/// Returns true if the pointer is null.
- fn is_null(&self) -> bool;
+ #[stable]
+ fn is_null(self) -> bool;
- /// Returns true if the pointer is not null.
- fn is_not_null(&self) -> bool { !self.is_null() }
+ /// Returns true if the pointer is not equal to the null pointer.
+ #[deprecated = "use !p.is_null() instead"]
+ fn is_not_null(self) -> bool { !self.is_null() }
- /// Returns the address of the pointer.
- fn to_uint(&self) -> uint;
+ /// Returns true if the pointer is not null.
+ #[deprecated = "use `as uint` instead"]
+ fn to_uint(self) -> uint;
- /// Returns `None` if the pointer is null, or else returns a reference to the
- /// value wrapped in `Some`.
+ /// Returns `None` if the pointer is null, or else returns a reference to
+ /// the value wrapped in `Some`.
///
/// # Safety
///
- /// While this method and its mutable counterpart are useful for null-safety,
- /// it is important to note that this is still an unsafe operation because
- /// the returned value could be pointing to invalid memory.
+ /// While this method and its mutable counterpart are useful for
+ /// null-safety, it is important to note that this is still an unsafe
+ /// operation because the returned value could be pointing to invalid
+ /// memory.
+ #[unstable = "Option is not clearly the right return type, and we may want \
+ to tie the return lifetime to a borrow of the raw pointer"]
unsafe fn as_ref<'a>(&self) -> Option<&'a T>;
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
///
/// # Safety
///
- /// The offset must be in-bounds of the object, or one-byte-past-the-end. Otherwise
- /// `offset` invokes Undefined Behaviour, regardless of whether the pointer is used.
+ /// The offset must be in-bounds of the object, or one-byte-past-the-end.
+ /// Otherwise `offset` invokes Undefined Behaviour, regardless of whether
+ /// the pointer is used.
+ #[stable]
unsafe fn offset(self, count: int) -> Self;
}
/// Methods on mutable raw pointers
-pub trait RawMutPtr<T>{
- /// Returns `None` if the pointer is null, or else returns a mutable reference
- /// to the value wrapped in `Some`.
+#[stable]
+pub trait MutPtrExt<T>{
+ /// Returns `None` if the pointer is null, or else returns a mutable
+ /// reference to the value wrapped in `Some`.
///
/// # Safety
///
/// As with `as_ref`, this is unsafe because it cannot verify the validity
/// of the returned pointer.
+ #[unstable = "Option is not clearly the right return type, and we may want \
+ to tie the return lifetime to a borrow of the raw pointer"]
unsafe fn as_mut<'a>(&self) -> Option<&'a mut T>;
}
-impl<T> RawPtr<T> for *const T {
+#[stable]
+impl<T> PtrExt<T> for *const T {
#[inline]
+ #[deprecated = "call ptr::null instead"]
fn null() -> *const T { null() }
#[inline]
- fn is_null(&self) -> bool { *self == RawPtr::null() }
+ #[stable]
+ fn is_null(self) -> bool { self as uint == 0 }
#[inline]
- fn to_uint(&self) -> uint { *self as uint }
+ #[deprecated = "use `as uint` instead"]
+ fn to_uint(self) -> uint { self as uint }
#[inline]
+ #[stable]
unsafe fn offset(self, count: int) -> *const T {
intrinsics::offset(self, count)
}
#[inline]
+ #[unstable = "return value does not necessarily convey all possible \
+ information"]
unsafe fn as_ref<'a>(&self) -> Option<&'a T> {
if self.is_null() {
None
}
}
-impl<T> RawPtr<T> for *mut T {
+#[stable]
+impl<T> PtrExt<T> for *mut T {
#[inline]
+ #[deprecated = "call ptr::null instead"]
fn null() -> *mut T { null_mut() }
#[inline]
- fn is_null(&self) -> bool { *self == RawPtr::null() }
+ #[stable]
+ fn is_null(self) -> bool { self as uint == 0 }
#[inline]
- fn to_uint(&self) -> uint { *self as uint }
+ #[deprecated = "use `as uint` instead"]
+ fn to_uint(self) -> uint { self as uint }
#[inline]
+ #[stable]
unsafe fn offset(self, count: int) -> *mut T {
intrinsics::offset(self as *const T, count) as *mut T
}
#[inline]
+ #[unstable = "return value does not necessarily convey all possible \
+ information"]
unsafe fn as_ref<'a>(&self) -> Option<&'a T> {
if self.is_null() {
None
}
}
-impl<T> RawMutPtr<T> for *mut T {
+#[stable]
+impl<T> MutPtrExt<T> for *mut T {
#[inline]
+ #[unstable = "return value does not necessarily convey all possible \
+ information"]
unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> {
if self.is_null() {
None
/// raw `*mut T` (which conveys no particular ownership semantics).
/// Useful for building abstractions like `Vec<T>` or `Box<T>`, which
/// internally use raw pointers to manage the memory that they own.
+#[unstable = "recently added to this module"]
pub struct Unique<T>(pub *mut T);
/// `Unique` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
+#[unstable = "recently added to this module"]
unsafe impl<T:Send> Send for Unique<T> { }
/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
+#[unstable = "recently added to this module"]
unsafe impl<T:Sync> Sync for Unique<T> { }
impl<T> Unique<T> {
/// Returns a null Unique.
+ #[unstable = "recently added to this module"]
pub fn null() -> Unique<T> {
- Unique(RawPtr::null())
+ Unique(null_mut())
}
/// Return an (unsafe) pointer into the memory owned by `self`.
+ #[unstable = "recently added to this module"]
pub unsafe fn offset(self, offset: int) -> *mut T {
- (self.0 as *const T).offset(offset) as *mut T
+ self.0.offset(offset)
}
}
/// Convert from `Result<T, E>` to `Option<E>`
///
- /// Converts `self` into an `Option<T>`, consuming `self`,
+ /// Converts `self` into an `Option<E>`, consuming `self`,
/// and discarding the value, if any.
///
/// # Example
use option::Option;
use option::Option::{None, Some};
use ptr;
-use ptr::RawPtr;
+use ptr::PtrExt;
use mem;
use mem::size_of;
use kinds::{Sized, marker};
fn as_mut_slice(&mut self) -> &mut [T] { self }
fn slice_mut(&mut self, start: uint, end: uint) -> &mut [T] {
- self[mut start..end]
+ ops::SliceMut::slice_or_fail_mut(self, &start, &end)
}
#[inline]
fn slice_from_mut(&mut self, start: uint) -> &mut [T] {
- self[mut start..]
+ ops::SliceMut::slice_from_or_fail_mut(self, &start)
}
#[inline]
fn slice_to_mut(&mut self, end: uint) -> &mut [T] {
- self[mut ..end]
+ ops::SliceMut::slice_to_or_fail_mut(self, &end)
}
#[inline]
fn split_at_mut(&mut self, mid: uint) -> (&mut [T], &mut [T]) {
unsafe {
let self2: &mut [T] = mem::transmute_copy(&self);
- (self[mut ..mid], self2[mut mid..])
+
+ (ops::SliceMut::slice_to_or_fail_mut(self, &mid),
+ ops::SliceMut::slice_from_or_fail_mut(self2, &mid))
}
}
#[inline]
fn tail_mut(&mut self) -> &mut [T] {
- let len = self.len();
- self[mut 1..len]
+ self.slice_from_mut(1)
}
#[inline]
fn init_mut(&mut self) -> &mut [T] {
let len = self.len();
- self[mut 0..len - 1]
+ self.slice_to_mut(len-1)
}
#[inline]
self.swap(j, i-1);
// Step 4: Reverse the (previously) weakly decreasing part
- self[mut i..].reverse();
+ self.slice_from_mut(i).reverse();
true
}
}
// Step 2: Reverse the weakly increasing part
- self[mut i..].reverse();
+ self.slice_from_mut(i).reverse();
// Step 3: Find the rightmost element equal to or bigger than the pivot (i-1)
let mut j = self.len() - 1;
Some(idx) => {
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
- self.v = tail[mut 1..];
+ self.v = tail.slice_from_mut(1);
Some(head)
}
}
let tmp = mem::replace(&mut self.v, &mut []);
let (head, tail) = tmp.split_at_mut(idx);
self.v = head;
- Some(tail[mut 1..])
+ Some(tail.slice_from_mut(1))
}
}
}
#[deprecated]
pub mod raw {
use mem::transmute;
- use ptr::RawPtr;
+ use ptr::PtrExt;
use raw::Slice;
use ops::FnOnce;
use option::Option;
+++ /dev/null
-// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-//
-// ignore-lexer-test FIXME #15679
-
-//! String manipulation
-//!
-//! For more details, see std::str
-
-#![doc(primitive = "str")]
-
-use self::Searcher::{Naive, TwoWay, TwoWayLong};
-
-use clone::Clone;
-use cmp::{mod, Eq};
-use default::Default;
-use iter::range;
-use iter::{DoubleEndedIteratorExt, ExactSizeIterator};
-use iter::{Map, Iterator, IteratorExt, DoubleEndedIterator};
-use kinds::Sized;
-use mem;
-use num::Int;
-use ops::{Fn, FnMut};
-use option::Option::{mod, None, Some};
-use ptr::RawPtr;
-use raw::{Repr, Slice};
-use result::Result::{mod, Ok, Err};
-use slice::{mod, SliceExt};
-use uint;
-
-/// A trait to abstract the idea of creating a new instance of a type from a
-/// string.
-// FIXME(#17307): there should be an `E` associated type for a `Result` return
-#[unstable = "will return a Result once associated types are working"]
-pub trait FromStr {
- /// Parses a string `s` to return an optional value of this type. If the
- /// string is ill-formatted, the None is returned.
- fn from_str(s: &str) -> Option<Self>;
-}
-
-/// A utility function that just calls FromStr::from_str
-#[deprecated = "call the .parse() method on the string instead"]
-pub fn from_str<A: FromStr>(s: &str) -> Option<A> {
- FromStr::from_str(s)
-}
-
-impl FromStr for bool {
- /// Parse a `bool` from a string.
- ///
- /// Yields an `Option<bool>`, because `s` may or may not actually be parseable.
- ///
- /// # Examples
- ///
- /// ```rust
- /// assert_eq!("true".parse(), Some(true));
- /// assert_eq!("false".parse(), Some(false));
- /// assert_eq!("not even a boolean".parse::<bool>(), None);
- /// ```
- #[inline]
- fn from_str(s: &str) -> Option<bool> {
- match s {
- "true" => Some(true),
- "false" => Some(false),
- _ => None,
- }
- }
-}
-
-/*
-Section: Creating a string
-*/
-
-/// Errors which can occur when attempting to interpret a byte slice as a `str`.
-#[deriving(Copy, Eq, PartialEq, Clone)]
-pub enum Utf8Error {
- /// An invalid byte was detected at the byte offset given.
- ///
- /// The offset is guaranteed to be in bounds of the slice in question, and
- /// the byte at the specified offset was the first invalid byte in the
- /// sequence detected.
- InvalidByte(uint),
-
- /// The byte slice was invalid because more bytes were needed but no more
- /// bytes were available.
- TooShort,
-}
-
-/// Converts a slice of bytes to a string slice without performing any
-/// allocations.
-///
-/// Once the slice has been validated as utf-8, it is transmuted in-place and
-/// returned as a '&str' instead of a '&[u8]'
-///
-/// # Failure
-///
-/// Returns `Err` if the slice is not utf-8 with a description as to why the
-/// provided slice is not utf-8.
-pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
- try!(run_utf8_validation_iterator(&mut v.iter()));
- Ok(unsafe { from_utf8_unchecked(v) })
-}
-
-/// Converts a slice of bytes to a string slice without checking
-/// that the string contains valid UTF-8.
-#[stable]
-pub unsafe fn from_utf8_unchecked<'a>(v: &'a [u8]) -> &'a str {
- mem::transmute(v)
-}
-
-/// Constructs a static string slice from a given raw pointer.
-///
-/// This function will read memory starting at `s` until it finds a 0, and then
-/// transmute the memory up to that point as a string slice, returning the
-/// corresponding `&'static str` value.
-///
-/// This function is unsafe because the caller must ensure the C string itself
-/// has the static lifetime and that the memory `s` is valid up to and including
-/// the first null byte.
-///
-/// # Panics
-///
-/// This function will panic if the string pointed to by `s` is not valid UTF-8.
-#[unstable = "may change location based on the outcome of the c_str module"]
-pub unsafe fn from_c_str(s: *const i8) -> &'static str {
- let s = s as *const u8;
- let mut len = 0u;
- while *s.offset(len as int) != 0 {
- len += 1u;
- }
- let v: &'static [u8] = ::mem::transmute(Slice { data: s, len: len });
- from_utf8(v).ok().expect("from_c_str passed invalid utf-8 data")
-}
-
-/// Something that can be used to compare against a character
-#[unstable = "definition may change as pattern-related methods are stabilized"]
-pub trait CharEq {
- /// Determine if the splitter should split at the given character
- fn matches(&mut self, char) -> bool;
- /// Indicate if this is only concerned about ASCII characters,
- /// which can allow for a faster implementation.
- fn only_ascii(&self) -> bool;
-}
-
-impl CharEq for char {
- #[inline]
- fn matches(&mut self, c: char) -> bool { *self == c }
-
- #[inline]
- fn only_ascii(&self) -> bool { (*self as uint) < 128 }
-}
-
-impl<F> CharEq for F where F: FnMut(char) -> bool {
- #[inline]
- fn matches(&mut self, c: char) -> bool { (*self)(c) }
-
- #[inline]
- fn only_ascii(&self) -> bool { false }
-}
-
-impl<'a> CharEq for &'a [char] {
- #[inline]
- fn matches(&mut self, c: char) -> bool {
- self.iter().any(|&mut m| m.matches(c))
- }
-
- #[inline]
- fn only_ascii(&self) -> bool {
- self.iter().all(|m| m.only_ascii())
- }
-}
-
-/*
-Section: Iterators
-*/
-
-/// Iterator for the char (representing *Unicode Scalar Values*) of a string
-///
-/// Created with the method `.chars()`.
-#[deriving(Clone, Copy)]
-pub struct Chars<'a> {
- iter: slice::Iter<'a, u8>
-}
-
-// Return the initial codepoint accumulator for the first byte.
-// The first byte is special, only want bottom 5 bits for width 2, 4 bits
-// for width 3, and 3 bits for width 4
-macro_rules! utf8_first_byte {
- ($byte:expr, $width:expr) => (($byte & (0x7F >> $width)) as u32)
-}
-
-// return the value of $ch updated with continuation byte $byte
-macro_rules! utf8_acc_cont_byte {
- ($ch:expr, $byte:expr) => (($ch << 6) | ($byte & CONT_MASK) as u32)
-}
-
-macro_rules! utf8_is_cont_byte {
- ($byte:expr) => (($byte & !CONT_MASK) == TAG_CONT_U8)
-}
-
-#[inline]
-fn unwrap_or_0(opt: Option<&u8>) -> u8 {
- match opt {
- Some(&byte) => byte,
- None => 0,
- }
-}
-
-impl<'a> Iterator<char> for Chars<'a> {
- #[inline]
- fn next(&mut self) -> Option<char> {
- // Decode UTF-8, using the valid UTF-8 invariant
- let x = match self.iter.next() {
- None => return None,
- Some(&next_byte) if next_byte < 128 => return Some(next_byte as char),
- Some(&next_byte) => next_byte,
- };
-
- // Multibyte case follows
- // Decode from a byte combination out of: [[[x y] z] w]
- // NOTE: Performance is sensitive to the exact formulation here
- let init = utf8_first_byte!(x, 2);
- let y = unwrap_or_0(self.iter.next());
- let mut ch = utf8_acc_cont_byte!(init, y);
- if x >= 0xE0 {
- // [[x y z] w] case
- // 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
- let z = unwrap_or_0(self.iter.next());
- let y_z = utf8_acc_cont_byte!((y & CONT_MASK) as u32, z);
- ch = init << 12 | y_z;
- if x >= 0xF0 {
- // [x y z w] case
- // use only the lower 3 bits of `init`
- let w = unwrap_or_0(self.iter.next());
- ch = (init & 7) << 18 | utf8_acc_cont_byte!(y_z, w);
- }
- }
-
- // str invariant says `ch` is a valid Unicode Scalar Value
- unsafe {
- Some(mem::transmute(ch))
- }
- }
-
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) {
- let (len, _) = self.iter.size_hint();
- (len.saturating_add(3) / 4, Some(len))
- }
-}
-
-impl<'a> DoubleEndedIterator<char> for Chars<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<char> {
- let w = match self.iter.next_back() {
- None => return None,
- Some(&back_byte) if back_byte < 128 => return Some(back_byte as char),
- Some(&back_byte) => back_byte,
- };
-
- // Multibyte case follows
- // Decode from a byte combination out of: [x [y [z w]]]
- let mut ch;
- let z = unwrap_or_0(self.iter.next_back());
- ch = utf8_first_byte!(z, 2);
- if utf8_is_cont_byte!(z) {
- let y = unwrap_or_0(self.iter.next_back());
- ch = utf8_first_byte!(y, 3);
- if utf8_is_cont_byte!(y) {
- let x = unwrap_or_0(self.iter.next_back());
- ch = utf8_first_byte!(x, 4);
- ch = utf8_acc_cont_byte!(ch, y);
- }
- ch = utf8_acc_cont_byte!(ch, z);
- }
- ch = utf8_acc_cont_byte!(ch, w);
-
- // str invariant says `ch` is a valid Unicode Scalar Value
- unsafe {
- Some(mem::transmute(ch))
- }
- }
-}
-
-/// External iterator for a string's characters and their byte offsets.
-/// Use with the `std::iter` module.
-#[deriving(Clone)]
-pub struct CharIndices<'a> {
- front_offset: uint,
- iter: Chars<'a>,
-}
-
-impl<'a> Iterator<(uint, char)> for CharIndices<'a> {
- #[inline]
- fn next(&mut self) -> Option<(uint, char)> {
- let (pre_len, _) = self.iter.iter.size_hint();
- match self.iter.next() {
- None => None,
- Some(ch) => {
- let index = self.front_offset;
- let (len, _) = self.iter.iter.size_hint();
- self.front_offset += pre_len - len;
- Some((index, ch))
- }
- }
- }
-
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) {
- self.iter.size_hint()
- }
-}
-
-impl<'a> DoubleEndedIterator<(uint, char)> for CharIndices<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<(uint, char)> {
- match self.iter.next_back() {
- None => None,
- Some(ch) => {
- let (len, _) = self.iter.iter.size_hint();
- let index = self.front_offset + len;
- Some((index, ch))
- }
- }
- }
-}
-
-/// External iterator for a string's bytes.
-/// Use with the `std::iter` module.
-#[stable]
-#[deriving(Clone)]
-pub struct Bytes<'a> {
- inner: Map<&'a u8, u8, slice::Iter<'a, u8>, BytesFn>,
-}
-
-/// A temporary new type wrapper that ensures that the `Bytes` iterator
-/// is cloneable.
-#[deriving(Copy)]
-struct BytesFn(fn(&u8) -> u8);
-
-impl<'a> Fn(&'a u8) -> u8 for BytesFn {
- extern "rust-call" fn call(&self, (ptr,): (&'a u8,)) -> u8 {
- (self.0)(ptr)
- }
-}
-
-impl Clone for BytesFn {
- fn clone(&self) -> BytesFn { *self }
-}
-
-/// An iterator over the substrings of a string, separated by `sep`.
-#[deriving(Clone)]
-pub struct CharSplits<'a, Sep> {
- /// The slice remaining to be iterated
- string: &'a str,
- sep: Sep,
- /// Whether an empty string at the end is allowed
- allow_trailing_empty: bool,
- only_ascii: bool,
- finished: bool,
-}
-
-/// An iterator over the substrings of a string, separated by `sep`,
-/// splitting at most `count` times.
-#[deriving(Clone)]
-pub struct CharSplitsN<'a, Sep> {
- iter: CharSplits<'a, Sep>,
- /// The number of splits remaining
- count: uint,
- invert: bool,
-}
-
-/// An iterator over the lines of a string, separated by `\n`.
-#[stable]
-pub struct Lines<'a> {
- inner: CharSplits<'a, char>,
-}
-
-/// An iterator over the lines of a string, separated by either `\n` or (`\r\n`).
-#[stable]
-pub struct LinesAny<'a> {
- inner: Map<&'a str, &'a str, Lines<'a>, fn(&str) -> &str>,
-}
-
-impl<'a, Sep> CharSplits<'a, Sep> {
- #[inline]
- fn get_end(&mut self) -> Option<&'a str> {
- if !self.finished && (self.allow_trailing_empty || self.string.len() > 0) {
- self.finished = true;
- Some(self.string)
- } else {
- None
- }
- }
-}
-
-impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplits<'a, Sep> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> {
- if self.finished { return None }
-
- let mut next_split = None;
- if self.only_ascii {
- for (idx, byte) in self.string.bytes().enumerate() {
- if self.sep.matches(byte as char) && byte < 128u8 {
- next_split = Some((idx, idx + 1));
- break;
- }
- }
- } else {
- for (idx, ch) in self.string.char_indices() {
- if self.sep.matches(ch) {
- next_split = Some((idx, self.string.char_range_at(idx).next));
- break;
- }
- }
- }
- match next_split {
- Some((a, b)) => unsafe {
- let elt = self.string.slice_unchecked(0, a);
- self.string = self.string.slice_unchecked(b, self.string.len());
- Some(elt)
- },
- None => self.get_end(),
- }
- }
-}
-
-impl<'a, Sep: CharEq> DoubleEndedIterator<&'a str>
-for CharSplits<'a, Sep> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a str> {
- if self.finished { return None }
-
- if !self.allow_trailing_empty {
- self.allow_trailing_empty = true;
- match self.next_back() {
- Some(elt) if !elt.is_empty() => return Some(elt),
- _ => if self.finished { return None }
- }
- }
- let len = self.string.len();
- let mut next_split = None;
-
- if self.only_ascii {
- for (idx, byte) in self.string.bytes().enumerate().rev() {
- if self.sep.matches(byte as char) && byte < 128u8 {
- next_split = Some((idx, idx + 1));
- break;
- }
- }
- } else {
- for (idx, ch) in self.string.char_indices().rev() {
- if self.sep.matches(ch) {
- next_split = Some((idx, self.string.char_range_at(idx).next));
- break;
- }
- }
- }
- match next_split {
- Some((a, b)) => unsafe {
- let elt = self.string.slice_unchecked(b, len);
- self.string = self.string.slice_unchecked(0, a);
- Some(elt)
- },
- None => { self.finished = true; Some(self.string) }
- }
- }
-}
-
-impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplitsN<'a, Sep> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> {
- if self.count != 0 {
- self.count -= 1;
- if self.invert { self.iter.next_back() } else { self.iter.next() }
- } else {
- self.iter.get_end()
- }
- }
-}
-
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using naive search
-#[deriving(Clone)]
-struct NaiveSearcher {
- position: uint
-}
-
-impl NaiveSearcher {
- fn new() -> NaiveSearcher {
- NaiveSearcher { position: 0 }
- }
-
- fn next(&mut self, haystack: &[u8], needle: &[u8]) -> Option<(uint, uint)> {
- while self.position + needle.len() <= haystack.len() {
- if haystack[self.position .. self.position + needle.len()] == needle {
- let match_pos = self.position;
- self.position += needle.len(); // add 1 for all matches
- return Some((match_pos, match_pos + needle.len()));
- } else {
- self.position += 1;
- }
- }
- None
- }
-}
-
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using two-way search
-#[deriving(Clone)]
-struct TwoWaySearcher {
- // constants
- crit_pos: uint,
- period: uint,
- byteset: u64,
-
- // variables
- position: uint,
- memory: uint
-}
-
-/*
- This is the Two-Way search algorithm, which was introduced in the paper:
- Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
-
- Here's some background information.
-
- A *word* is a string of symbols. The *length* of a word should be a familiar
- notion, and here we denote it for any word x by |x|.
- (We also allow for the possibility of the *empty word*, a word of length zero).
-
- If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
- *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
- For example, both 1 and 2 are periods for the string "aa". As another example,
- the only period of the string "abcd" is 4.
-
- We denote by period(x) the *smallest* period of x (provided that x is non-empty).
- This is always well-defined since every non-empty word x has at least one period,
- |x|. We sometimes call this *the period* of x.
-
- If u, v and x are words such that x = uv, where uv is the concatenation of u and
- v, then we say that (u, v) is a *factorization* of x.
-
- Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
- that both of the following hold
-
- - either w is a suffix of u or u is a suffix of w
- - either w is a prefix of v or v is a prefix of w
-
- then w is said to be a *repetition* for the factorization (u, v).
-
- Just to unpack this, there are four possibilities here. Let w = "abc". Then we
- might have:
-
- - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
- - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
- - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
- - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
-
- Note that the word vu is a repetition for any factorization (u,v) of x = uv,
- so every factorization has at least one repetition.
-
- If x is a string and (u, v) is a factorization for x, then a *local period* for
- (u, v) is an integer r such that there is some word w such that |w| = r and w is
- a repetition for (u, v).
-
- We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
- call this *the local period* of (u, v). Provided that x = uv is non-empty, this
- is well-defined (because each non-empty word has at least one factorization, as
- noted above).
-
- It can be proven that the following is an equivalent definition of a local period
- for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
- all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
- defined. (i.e. i > 0 and i + r < |x|).
-
- Using the above reformulation, it is easy to prove that
-
- 1 <= local_period(u, v) <= period(uv)
-
- A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
- *critical factorization*.
-
- The algorithm hinges on the following theorem, which is stated without proof:
-
- **Critical Factorization Theorem** Any word x has at least one critical
- factorization (u, v) such that |u| < period(x).
-
- The purpose of maximal_suffix is to find such a critical factorization.
-
-*/
-impl TwoWaySearcher {
- fn new(needle: &[u8]) -> TwoWaySearcher {
- let (crit_pos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
- let (crit_pos2, period2) = TwoWaySearcher::maximal_suffix(needle, true);
-
- let crit_pos;
- let period;
- if crit_pos1 > crit_pos2 {
- crit_pos = crit_pos1;
- period = period1;
- } else {
- crit_pos = crit_pos2;
- period = period2;
- }
-
- // This isn't in the original algorithm, as far as I'm aware.
- let byteset = needle.iter()
- .fold(0, |a, &b| (1 << ((b & 0x3f) as uint)) | a);
-
- // A particularly readable explanation of what's going on here can be found
- // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
- // see the code for "Algorithm CP" on p. 323.
- //
- // What's going on is we have some critical factorization (u, v) of the
- // needle, and we want to determine whether u is a suffix of
- // v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
- // "Algorithm CP2", which is optimized for when the period of the needle
- // is large.
- if needle[..crit_pos] == needle[period.. period + crit_pos] {
- TwoWaySearcher {
- crit_pos: crit_pos,
- period: period,
- byteset: byteset,
-
- position: 0,
- memory: 0
- }
- } else {
- TwoWaySearcher {
- crit_pos: crit_pos,
- period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
- byteset: byteset,
-
- position: 0,
- memory: uint::MAX // Dummy value to signify that the period is long
- }
- }
- }
-
- // One of the main ideas of Two-Way is that we factorize the needle into
- // two halves, (u, v), and begin trying to find v in the haystack by scanning
- // left to right. If v matches, we try to match u by scanning right to left.
- // How far we can jump when we encounter a mismatch is all based on the fact
- // that (u, v) is a critical factorization for the needle.
- #[inline]
- fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> Option<(uint, uint)> {
- 'search: loop {
- // Check that we have room to search in
- if self.position + needle.len() > haystack.len() {
- return None;
- }
-
- // Quickly skip by large portions unrelated to our substring
- if (self.byteset >>
- ((haystack[self.position + needle.len() - 1] & 0x3f)
- as uint)) & 1 == 0 {
- self.position += needle.len();
- if !long_period {
- self.memory = 0;
- }
- continue 'search;
- }
-
- // See if the right part of the needle matches
- let start = if long_period { self.crit_pos }
- else { cmp::max(self.crit_pos, self.memory) };
- for i in range(start, needle.len()) {
- if needle[i] != haystack[self.position + i] {
- self.position += i - self.crit_pos + 1;
- if !long_period {
- self.memory = 0;
- }
- continue 'search;
- }
- }
-
- // See if the left part of the needle matches
- let start = if long_period { 0 } else { self.memory };
- for i in range(start, self.crit_pos).rev() {
- if needle[i] != haystack[self.position + i] {
- self.position += self.period;
- if !long_period {
- self.memory = needle.len() - self.period;
- }
- continue 'search;
- }
- }
-
- // We have found a match!
- let match_pos = self.position;
- self.position += needle.len(); // add self.period for all matches
- if !long_period {
- self.memory = 0; // set to needle.len() - self.period for all matches
- }
- return Some((match_pos, match_pos + needle.len()));
- }
- }
-
- // Computes a critical factorization (u, v) of `arr`.
- // Specifically, returns (i, p), where i is the starting index of v in some
- // critical factorization (u, v) and p = period(v)
- #[inline]
- fn maximal_suffix(arr: &[u8], reversed: bool) -> (uint, uint) {
- let mut left = -1; // Corresponds to i in the paper
- let mut right = 0; // Corresponds to j in the paper
- let mut offset = 1; // Corresponds to k in the paper
- let mut period = 1; // Corresponds to p in the paper
-
- while right + offset < arr.len() {
- let a;
- let b;
- if reversed {
- a = arr[left + offset];
- b = arr[right + offset];
- } else {
- a = arr[right + offset];
- b = arr[left + offset];
- }
- if a < b {
- // Suffix is smaller, period is entire prefix so far.
- right += offset;
- offset = 1;
- period = right - left;
- } else if a == b {
- // Advance through repetition of the current period.
- if offset == period {
- right += offset;
- offset = 1;
- } else {
- offset += 1;
- }
- } else {
- // Suffix is larger, start over from current location.
- left = right;
- right += 1;
- offset = 1;
- period = 1;
- }
- }
- (left + 1, period)
- }
-}
-
-/// The internal state of an iterator that searches for matches of a substring
-/// within a larger string using a dynamically chosen search algorithm
-#[deriving(Clone)]
-enum Searcher {
- Naive(NaiveSearcher),
- TwoWay(TwoWaySearcher),
- TwoWayLong(TwoWaySearcher)
-}
-
-impl Searcher {
- fn new(haystack: &[u8], needle: &[u8]) -> Searcher {
- // FIXME: Tune this.
- // FIXME(#16715): This unsigned integer addition will probably not
- // overflow because that would mean that the memory almost solely
- // consists of the needle. Needs #16715 to be formally fixed.
- if needle.len() + 20 > haystack.len() {
- Naive(NaiveSearcher::new())
- } else {
- let searcher = TwoWaySearcher::new(needle);
- if searcher.memory == uint::MAX { // If the period is long
- TwoWayLong(searcher)
- } else {
- TwoWay(searcher)
- }
- }
- }
-}
-
-/// An iterator over the start and end indices of the matches of a
-/// substring within a larger string
-#[deriving(Clone)]
-pub struct MatchIndices<'a> {
- // constants
- haystack: &'a str,
- needle: &'a str,
- searcher: Searcher
-}
-
-/// An iterator over the substrings of a string separated by a given
-/// search string
-#[deriving(Clone)]
-pub struct StrSplits<'a> {
- it: MatchIndices<'a>,
- last_end: uint,
- finished: bool
-}
-
-impl<'a> Iterator<(uint, uint)> for MatchIndices<'a> {
- #[inline]
- fn next(&mut self) -> Option<(uint, uint)> {
- match self.searcher {
- Naive(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes()),
- TwoWay(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), false),
- TwoWayLong(ref mut searcher)
- => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), true)
- }
- }
-}
-
-impl<'a> Iterator<&'a str> for StrSplits<'a> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> {
- if self.finished { return None; }
-
- match self.it.next() {
- Some((from, to)) => {
- let ret = Some(self.it.haystack.slice(self.last_end, from));
- self.last_end = to;
- ret
- }
- None => {
- self.finished = true;
- Some(self.it.haystack.slice(self.last_end, self.it.haystack.len()))
- }
- }
- }
-}
-
-/*
-Section: Comparing strings
-*/
-
-// share the implementation of the lang-item vs. non-lang-item
-// eq_slice.
-/// NOTE: This function is (ab)used in rustc::middle::trans::_match
-/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
-#[inline]
-fn eq_slice_(a: &str, b: &str) -> bool {
- #[allow(improper_ctypes)]
- extern { fn memcmp(s1: *const i8, s2: *const i8, n: uint) -> i32; }
- a.len() == b.len() && unsafe {
- memcmp(a.as_ptr() as *const i8,
- b.as_ptr() as *const i8,
- a.len()) == 0
- }
-}
-
-/// Bytewise slice equality
-/// NOTE: This function is (ab)used in rustc::middle::trans::_match
-/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
-#[lang="str_eq"]
-#[inline]
-fn eq_slice(a: &str, b: &str) -> bool {
- eq_slice_(a, b)
-}
-
-/*
-Section: Misc
-*/
-
-/// Walk through `iter` checking that it's a valid UTF-8 sequence,
-/// returning `true` in that case, or, if it is invalid, `false` with
-/// `iter` reset such that it is pointing at the first byte in the
-/// invalid sequence.
-#[inline(always)]
-fn run_utf8_validation_iterator(iter: &mut slice::Iter<u8>)
- -> Result<(), Utf8Error> {
- let whole = iter.as_slice();
- loop {
- // save the current thing we're pointing at.
- let old = *iter;
-
- // restore the iterator we had at the start of this codepoint.
- macro_rules! err (() => { {
- *iter = old;
- return Err(Utf8Error::InvalidByte(whole.len() - iter.as_slice().len()))
- } });
- macro_rules! next ( () => {
- match iter.next() {
- Some(a) => *a,
- // we needed data, but there was none: error!
- None => return Err(Utf8Error::TooShort),
- }
- });
-
- let first = match iter.next() {
- Some(&b) => b,
- // we're at the end of the iterator and a codepoint
- // boundary at the same time, so this string is valid.
- None => return Ok(())
- };
-
- // ASCII characters are always valid, so only large
- // bytes need more examination.
- if first >= 128 {
- let w = UTF8_CHAR_WIDTH[first as uint] as uint;
- let second = next!();
- // 2-byte encoding is for codepoints \u{0080} to \u{07ff}
- // first C2 80 last DF BF
- // 3-byte encoding is for codepoints \u{0800} to \u{ffff}
- // first E0 A0 80 last EF BF BF
- // excluding surrogates codepoints \u{d800} to \u{dfff}
- // ED A0 80 to ED BF BF
- // 4-byte encoding is for codepoints \u{1000}0 to \u{10ff}ff
- // first F0 90 80 80 last F4 8F BF BF
- //
- // Use the UTF-8 syntax from the RFC
- //
- // https://tools.ietf.org/html/rfc3629
- // UTF8-1 = %x00-7F
- // UTF8-2 = %xC2-DF UTF8-tail
- // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
- // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
- // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
- // %xF4 %x80-8F 2( UTF8-tail )
- match w {
- 2 => if second & !CONT_MASK != TAG_CONT_U8 {err!()},
- 3 => {
- match (first, second, next!() & !CONT_MASK) {
- (0xE0 , 0xA0 ... 0xBF, TAG_CONT_U8) |
- (0xE1 ... 0xEC, 0x80 ... 0xBF, TAG_CONT_U8) |
- (0xED , 0x80 ... 0x9F, TAG_CONT_U8) |
- (0xEE ... 0xEF, 0x80 ... 0xBF, TAG_CONT_U8) => {}
- _ => err!()
- }
- }
- 4 => {
- match (first, second, next!() & !CONT_MASK, next!() & !CONT_MASK) {
- (0xF0 , 0x90 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
- (0xF1 ... 0xF3, 0x80 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
- (0xF4 , 0x80 ... 0x8F, TAG_CONT_U8, TAG_CONT_U8) => {}
- _ => err!()
- }
- }
- _ => err!()
- }
- }
- }
-}
-
-/// Determines if a vector of bytes contains valid UTF-8.
-#[deprecated = "call from_utf8 instead"]
-pub fn is_utf8(v: &[u8]) -> bool {
- run_utf8_validation_iterator(&mut v.iter()).is_ok()
-}
-
-/// Deprecated function
-#[deprecated = "this function will be removed"]
-pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
- match v.iter().position(|c| *c == 0) {
- // don't include the 0
- Some(i) => v[..i],
- None => v
- }
-}
-
-// https://tools.ietf.org/html/rfc3629
-static UTF8_CHAR_WIDTH: [u8, ..256] = [
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
-0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF
-0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
-2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF
-3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF
-4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF
-];
-
-/// Given a first byte, determine how many bytes are in this UTF-8 character
-#[inline]
-#[deprecated = "this function has moved to libunicode"]
-pub fn utf8_char_width(b: u8) -> uint {
- return UTF8_CHAR_WIDTH[b as uint] as uint;
-}
-
-/// Struct that contains a `char` and the index of the first byte of
-/// the next `char` in a string. This can be used as a data structure
-/// for iterating over the UTF-8 bytes of a string.
-#[deriving(Copy)]
-#[unstable = "naming is uncertain with container conventions"]
-pub struct CharRange {
- /// Current `char`
- pub ch: char,
- /// Index of the first byte of the next `char`
- pub next: uint,
-}
-
-/// Mask of the value bits of a continuation byte
-const CONT_MASK: u8 = 0b0011_1111u8;
-/// Value of the tag bits (tag mask is !CONT_MASK) of a continuation byte
-const TAG_CONT_U8: u8 = 0b1000_0000u8;
-
-/// Unsafe operations
-#[deprecated]
-pub mod raw {
- use ptr::RawPtr;
- use raw::Slice;
- use slice::SliceExt;
- use str::StrExt;
-
- /// Converts a slice of bytes to a string slice without checking
- /// that the string contains valid UTF-8.
- #[deprecated = "renamed to str::from_utf8_unchecked"]
- pub unsafe fn from_utf8<'a>(v: &'a [u8]) -> &'a str {
- super::from_utf8_unchecked(v)
- }
-
- /// Form a slice from a C string. Unsafe because the caller must ensure the
- /// C string has the static lifetime, or else the return value may be
- /// invalidated later.
- #[deprecated = "renamed to str::from_c_str"]
- pub unsafe fn c_str_to_static_slice(s: *const i8) -> &'static str {
- let s = s as *const u8;
- let mut curr = s;
- let mut len = 0u;
- while *curr != 0u8 {
- len += 1u;
- curr = s.offset(len as int);
- }
- let v = Slice { data: s, len: len };
- super::from_utf8(::mem::transmute(v)).unwrap()
- }
-
- /// Takes a bytewise (not UTF-8) slice from a string.
- ///
- /// Returns the substring from [`begin`..`end`).
- ///
- /// # Panics
- ///
- /// If begin is greater than end.
- /// If end is greater than the length of the string.
- #[inline]
- #[deprecated = "call the slice_unchecked method instead"]
- pub unsafe fn slice_bytes<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
- assert!(begin <= end);
- assert!(end <= s.len());
- s.slice_unchecked(begin, end)
- }
-
- /// Takes a bytewise (not UTF-8) slice from a string.
- ///
- /// Returns the substring from [`begin`..`end`).
- ///
- /// Caller must check slice boundaries!
- #[inline]
- #[deprecated = "this has moved to a method on `str` directly"]
- pub unsafe fn slice_unchecked<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
- s.slice_unchecked(begin, end)
- }
-}
-
-/*
-Section: Trait implementations
-*/
-
-#[allow(missing_docs)]
-pub mod traits {
- use cmp::{Ordering, Ord, PartialEq, PartialOrd, Equiv, Eq};
- use cmp::Ordering::{Less, Equal, Greater};
- use iter::IteratorExt;
- use option::Option;
- use option::Option::Some;
- use ops;
- use str::{Str, StrExt, eq_slice};
-
- impl Ord for str {
- #[inline]
- fn cmp(&self, other: &str) -> Ordering {
- for (s_b, o_b) in self.bytes().zip(other.bytes()) {
- match s_b.cmp(&o_b) {
- Greater => return Greater,
- Less => return Less,
- Equal => ()
- }
- }
-
- self.len().cmp(&other.len())
- }
- }
-
- impl PartialEq for str {
- #[inline]
- fn eq(&self, other: &str) -> bool {
- eq_slice(self, other)
- }
- #[inline]
- fn ne(&self, other: &str) -> bool { !(*self).eq(other) }
- }
-
- impl Eq for str {}
-
- impl PartialOrd for str {
- #[inline]
- fn partial_cmp(&self, other: &str) -> Option<Ordering> {
- Some(self.cmp(other))
- }
- }
-
- #[allow(deprecated)]
- #[deprecated = "Use overloaded `core::cmp::PartialEq`"]
- impl<S: Str> Equiv<S> for str {
- #[inline]
- fn equiv(&self, other: &S) -> bool { eq_slice(self, other.as_slice()) }
- }
-
- impl ops::Slice<uint, str> for str {
- #[inline]
- fn as_slice_<'a>(&'a self) -> &'a str {
- self
- }
-
- #[inline]
- fn slice_from_or_fail<'a>(&'a self, from: &uint) -> &'a str {
- self.slice_from(*from)
- }
-
- #[inline]
- fn slice_to_or_fail<'a>(&'a self, to: &uint) -> &'a str {
- self.slice_to(*to)
- }
-
- #[inline]
- fn slice_or_fail<'a>(&'a self, from: &uint, to: &uint) -> &'a str {
- self.slice(*from, *to)
- }
- }
-}
-
-/// Any string that can be represented as a slice
-#[unstable = "Instead of taking this bound generically, this trait will be \
- replaced with one of slicing syntax, deref coercions, or \
- a more generic conversion trait"]
-pub trait Str for Sized? {
- /// Work with `self` as a slice.
- fn as_slice<'a>(&'a self) -> &'a str;
-}
-
-#[allow(deprecated)]
-impl Str for str {
- #[inline]
- fn as_slice<'a>(&'a self) -> &'a str { self }
-}
-
-#[allow(deprecated)]
-impl<'a, Sized? S> Str for &'a S where S: Str {
- #[inline]
- fn as_slice(&self) -> &str { Str::as_slice(*self) }
-}
-
-/// Methods for string slices
-#[allow(missing_docs)]
-pub trait StrExt for Sized? {
- // NB there are no docs here are they're all located on the StrExt trait in
- // libcollections, not here.
-
- fn contains(&self, needle: &str) -> bool;
- fn contains_char(&self, needle: char) -> bool;
- fn chars<'a>(&'a self) -> Chars<'a>;
- fn bytes<'a>(&'a self) -> Bytes<'a>;
- fn char_indices<'a>(&'a self) -> CharIndices<'a>;
- fn split<'a, Sep: CharEq>(&'a self, sep: Sep) -> CharSplits<'a, Sep>;
- fn splitn<'a, Sep: CharEq>(&'a self, count: uint, sep: Sep) -> CharSplitsN<'a, Sep>;
- fn split_terminator<'a, Sep: CharEq>(&'a self, sep: Sep) -> CharSplits<'a, Sep>;
- fn rsplitn<'a, Sep: CharEq>(&'a self, count: uint, sep: Sep) -> CharSplitsN<'a, Sep>;
- fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a>;
- fn split_str<'a>(&'a self, &'a str) -> StrSplits<'a>;
- fn lines<'a>(&'a self) -> Lines<'a>;
- fn lines_any<'a>(&'a self) -> LinesAny<'a>;
- fn char_len(&self) -> uint;
- fn slice<'a>(&'a self, begin: uint, end: uint) -> &'a str;
- fn slice_from<'a>(&'a self, begin: uint) -> &'a str;
- fn slice_to<'a>(&'a self, end: uint) -> &'a str;
- fn slice_chars<'a>(&'a self, begin: uint, end: uint) -> &'a str;
- unsafe fn slice_unchecked<'a>(&'a self, begin: uint, end: uint) -> &'a str;
- fn starts_with(&self, needle: &str) -> bool;
- fn ends_with(&self, needle: &str) -> bool;
- fn trim_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
- fn trim_left_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
- fn trim_right_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
- fn is_char_boundary(&self, index: uint) -> bool;
- fn char_range_at(&self, start: uint) -> CharRange;
- fn char_range_at_reverse(&self, start: uint) -> CharRange;
- fn char_at(&self, i: uint) -> char;
- fn char_at_reverse(&self, i: uint) -> char;
- fn as_bytes<'a>(&'a self) -> &'a [u8];
- fn find<C: CharEq>(&self, search: C) -> Option<uint>;
- fn rfind<C: CharEq>(&self, search: C) -> Option<uint>;
- fn find_str(&self, &str) -> Option<uint>;
- fn slice_shift_char<'a>(&'a self) -> Option<(char, &'a str)>;
- fn subslice_offset(&self, inner: &str) -> uint;
- fn as_ptr(&self) -> *const u8;
- fn len(&self) -> uint;
- fn is_empty(&self) -> bool;
-}
-
-#[inline(never)]
-fn slice_error_fail(s: &str, begin: uint, end: uint) -> ! {
- assert!(begin <= end);
- panic!("index {} and/or {} in `{}` do not lie on character boundary",
- begin, end, s);
-}
-
-impl StrExt for str {
- #[inline]
- fn contains(&self, needle: &str) -> bool {
- self.find_str(needle).is_some()
- }
-
- #[inline]
- fn contains_char(&self, needle: char) -> bool {
- self.find(needle).is_some()
- }
-
- #[inline]
- fn chars(&self) -> Chars {
- Chars{iter: self.as_bytes().iter()}
- }
-
- #[inline]
- fn bytes(&self) -> Bytes {
- fn deref(&x: &u8) -> u8 { x }
-
- Bytes { inner: self.as_bytes().iter().map(BytesFn(deref)) }
- }
-
- #[inline]
- fn char_indices(&self) -> CharIndices {
- CharIndices { front_offset: 0, iter: self.chars() }
- }
-
- #[inline]
- fn split<Sep: CharEq>(&self, sep: Sep) -> CharSplits<Sep> {
- CharSplits {
- string: self,
- only_ascii: sep.only_ascii(),
- sep: sep,
- allow_trailing_empty: true,
- finished: false,
- }
- }
-
- #[inline]
- fn splitn<Sep: CharEq>(&self, count: uint, sep: Sep)
- -> CharSplitsN<Sep> {
- CharSplitsN {
- iter: self.split(sep),
- count: count,
- invert: false,
- }
- }
-
- #[inline]
- fn split_terminator<Sep: CharEq>(&self, sep: Sep)
- -> CharSplits<Sep> {
- CharSplits {
- allow_trailing_empty: false,
- ..self.split(sep)
- }
- }
-
- #[inline]
- fn rsplitn<Sep: CharEq>(&self, count: uint, sep: Sep)
- -> CharSplitsN<Sep> {
- CharSplitsN {
- iter: self.split(sep),
- count: count,
- invert: true,
- }
- }
-
- #[inline]
- fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a> {
- assert!(!sep.is_empty());
- MatchIndices {
- haystack: self,
- needle: sep,
- searcher: Searcher::new(self.as_bytes(), sep.as_bytes())
- }
- }
-
- #[inline]
- fn split_str<'a>(&'a self, sep: &'a str) -> StrSplits<'a> {
- StrSplits {
- it: self.match_indices(sep),
- last_end: 0,
- finished: false
- }
- }
-
- #[inline]
- fn lines(&self) -> Lines {
- Lines { inner: self.split_terminator('\n') }
- }
-
- fn lines_any(&self) -> LinesAny {
- fn f(line: &str) -> &str {
- let l = line.len();
- if l > 0 && line.as_bytes()[l - 1] == b'\r' { line.slice(0, l - 1) }
- else { line }
- }
-
- let f: fn(&str) -> &str = f; // coerce to fn pointer
- LinesAny { inner: self.lines().map(f) }
- }
-
- #[inline]
- fn char_len(&self) -> uint { self.chars().count() }
-
- #[inline]
- fn slice(&self, begin: uint, end: uint) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if begin <= end &&
- self.is_char_boundary(begin) &&
- self.is_char_boundary(end) {
- unsafe { self.slice_unchecked(begin, end) }
- } else {
- slice_error_fail(self, begin, end)
- }
- }
-
- #[inline]
- fn slice_from(&self, begin: uint) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(begin) {
- unsafe { self.slice_unchecked(begin, self.len()) }
- } else {
- slice_error_fail(self, begin, self.len())
- }
- }
-
- #[inline]
- fn slice_to(&self, end: uint) -> &str {
- // is_char_boundary checks that the index is in [0, .len()]
- if self.is_char_boundary(end) {
- unsafe { self.slice_unchecked(0, end) }
- } else {
- slice_error_fail(self, 0, end)
- }
- }
-
- fn slice_chars(&self, begin: uint, end: uint) -> &str {
- assert!(begin <= end);
- let mut count = 0;
- let mut begin_byte = None;
- let mut end_byte = None;
-
- // This could be even more efficient by not decoding,
- // only finding the char boundaries
- for (idx, _) in self.char_indices() {
- if count == begin { begin_byte = Some(idx); }
- if count == end { end_byte = Some(idx); break; }
- count += 1;
- }
- if begin_byte.is_none() && count == begin { begin_byte = Some(self.len()) }
- if end_byte.is_none() && count == end { end_byte = Some(self.len()) }
-
- match (begin_byte, end_byte) {
- (None, _) => panic!("slice_chars: `begin` is beyond end of string"),
- (_, None) => panic!("slice_chars: `end` is beyond end of string"),
- (Some(a), Some(b)) => unsafe { self.slice_unchecked(a, b) }
- }
- }
-
- #[inline]
- unsafe fn slice_unchecked(&self, begin: uint, end: uint) -> &str {
- mem::transmute(Slice {
- data: self.as_ptr().offset(begin as int),
- len: end - begin,
- })
- }
-
- #[inline]
- fn starts_with(&self, needle: &str) -> bool {
- let n = needle.len();
- self.len() >= n && needle.as_bytes() == self.as_bytes()[..n]
- }
-
- #[inline]
- fn ends_with(&self, needle: &str) -> bool {
- let (m, n) = (self.len(), needle.len());
- m >= n && needle.as_bytes() == self.as_bytes()[m-n..]
- }
-
- #[inline]
- fn trim_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
- let cur = match self.find(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(i) => unsafe { self.slice_unchecked(i, self.len()) }
- };
- match cur.rfind(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(i) => {
- let right = cur.char_range_at(i).next;
- unsafe { cur.slice_unchecked(0, right) }
- }
- }
- }
-
- #[inline]
- fn trim_left_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
- match self.find(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(first) => unsafe { self.slice_unchecked(first, self.len()) }
- }
- }
-
- #[inline]
- fn trim_right_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
- match self.rfind(|&mut: c: char| !to_trim.matches(c)) {
- None => "",
- Some(last) => {
- let next = self.char_range_at(last).next;
- unsafe { self.slice_unchecked(0u, next) }
- }
- }
- }
-
- #[inline]
- fn is_char_boundary(&self, index: uint) -> bool {
- if index == self.len() { return true; }
- match self.as_bytes().get(index) {
- None => false,
- Some(&b) => b < 128u8 || b >= 192u8,
- }
- }
-
- #[inline]
- fn char_range_at(&self, i: uint) -> CharRange {
- if self.as_bytes()[i] < 128u8 {
- return CharRange {ch: self.as_bytes()[i] as char, next: i + 1 };
- }
-
- // Multibyte case is a fn to allow char_range_at to inline cleanly
- fn multibyte_char_range_at(s: &str, i: uint) -> CharRange {
- let mut val = s.as_bytes()[i] as u32;
- let w = UTF8_CHAR_WIDTH[val as uint] as uint;
- assert!((w != 0));
-
- val = utf8_first_byte!(val, w);
- val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
-
- return CharRange {ch: unsafe { mem::transmute(val) }, next: i + w};
- }
-
- return multibyte_char_range_at(self, i);
- }
-
- #[inline]
- fn char_range_at_reverse(&self, start: uint) -> CharRange {
- let mut prev = start;
-
- prev = prev.saturating_sub(1);
- if self.as_bytes()[prev] < 128 {
- return CharRange{ch: self.as_bytes()[prev] as char, next: prev}
- }
-
- // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly
- fn multibyte_char_range_at_reverse(s: &str, mut i: uint) -> CharRange {
- // while there is a previous byte == 10......
- while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 {
- i -= 1u;
- }
-
- let mut val = s.as_bytes()[i] as u32;
- let w = UTF8_CHAR_WIDTH[val as uint] as uint;
- assert!((w != 0));
-
- val = utf8_first_byte!(val, w);
- val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
- if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
- if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
-
- return CharRange {ch: unsafe { mem::transmute(val) }, next: i};
- }
-
- return multibyte_char_range_at_reverse(self, prev);
- }
-
- #[inline]
- fn char_at(&self, i: uint) -> char {
- self.char_range_at(i).ch
- }
-
- #[inline]
- fn char_at_reverse(&self, i: uint) -> char {
- self.char_range_at_reverse(i).ch
- }
-
- #[inline]
- fn as_bytes(&self) -> &[u8] {
- unsafe { mem::transmute(self) }
- }
-
- fn find<C: CharEq>(&self, mut search: C) -> Option<uint> {
- if search.only_ascii() {
- self.bytes().position(|b| search.matches(b as char))
- } else {
- for (index, c) in self.char_indices() {
- if search.matches(c) { return Some(index); }
- }
- None
- }
- }
-
- fn rfind<C: CharEq>(&self, mut search: C) -> Option<uint> {
- if search.only_ascii() {
- self.bytes().rposition(|b| search.matches(b as char))
- } else {
- for (index, c) in self.char_indices().rev() {
- if search.matches(c) { return Some(index); }
- }
- None
- }
- }
-
- fn find_str(&self, needle: &str) -> Option<uint> {
- if needle.is_empty() {
- Some(0)
- } else {
- self.match_indices(needle)
- .next()
- .map(|(start, _end)| start)
- }
- }
-
- #[inline]
- fn slice_shift_char(&self) -> Option<(char, &str)> {
- if self.is_empty() {
- None
- } else {
- let CharRange {ch, next} = self.char_range_at(0u);
- let next_s = unsafe { self.slice_unchecked(next, self.len()) };
- Some((ch, next_s))
- }
- }
-
- fn subslice_offset(&self, inner: &str) -> uint {
- let a_start = self.as_ptr() as uint;
- let a_end = a_start + self.len();
- let b_start = inner.as_ptr() as uint;
- let b_end = b_start + inner.len();
-
- assert!(a_start <= b_start);
- assert!(b_end <= a_end);
- b_start - a_start
- }
-
- #[inline]
- fn as_ptr(&self) -> *const u8 {
- self.repr().data
- }
-
- #[inline]
- fn len(&self) -> uint { self.repr().len }
-
- #[inline]
- fn is_empty(&self) -> bool { self.len() == 0 }
-}
-
-#[stable]
-impl<'a> Default for &'a str {
- #[stable]
- fn default() -> &'a str { "" }
-}
-
-impl<'a> Iterator<&'a str> for Lines<'a> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> { self.inner.next() }
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
-}
-impl<'a> DoubleEndedIterator<&'a str> for Lines<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
-}
-impl<'a> Iterator<&'a str> for LinesAny<'a> {
- #[inline]
- fn next(&mut self) -> Option<&'a str> { self.inner.next() }
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
-}
-impl<'a> DoubleEndedIterator<&'a str> for LinesAny<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
-}
-impl<'a> Iterator<u8> for Bytes<'a> {
- #[inline]
- fn next(&mut self) -> Option<u8> { self.inner.next() }
- #[inline]
- fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
-}
-impl<'a> DoubleEndedIterator<u8> for Bytes<'a> {
- #[inline]
- fn next_back(&mut self) -> Option<u8> { self.inner.next_back() }
-}
-impl<'a> ExactSizeIterator<u8> for Bytes<'a> {}
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//
+// ignore-lexer-test FIXME #15679
+
+//! String manipulation
+//!
+//! For more details, see std::str
+
+#![doc(primitive = "str")]
+
+use self::Searcher::{Naive, TwoWay, TwoWayLong};
+
+use cmp::{mod, Eq};
+use default::Default;
+use iter::range;
+use iter::{DoubleEndedIteratorExt, ExactSizeIterator};
+use iter::{Map, Iterator, IteratorExt, DoubleEndedIterator};
+use kinds::Sized;
+use mem;
+use num::Int;
+use ops::{Fn, FnMut};
+use option::Option::{mod, None, Some};
+use ptr::PtrExt;
+use raw::{Repr, Slice};
+use result::Result::{mod, Ok, Err};
+use slice::{mod, SliceExt};
+use uint;
+
+macro_rules! delegate_iter {
+ (exact $te:ty in $ti:ty) => {
+ delegate_iter!{$te in $ti}
+ impl<'a> ExactSizeIterator<$te> for $ti {
+ #[inline]
+ fn rposition<P>(&mut self, predicate: P) -> Option<uint> where P: FnMut($te) -> bool{
+ self.0.rposition(predicate)
+ }
+ #[inline]
+ fn len(&self) -> uint {
+ self.0.len()
+ }
+ }
+ };
+ ($te:ty in $ti:ty) => {
+ impl<'a> Iterator<$te> for $ti {
+ #[inline]
+ fn next(&mut self) -> Option<$te> {
+ self.0.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.0.size_hint()
+ }
+ }
+ impl<'a> DoubleEndedIterator<$te> for $ti {
+ #[inline]
+ fn next_back(&mut self) -> Option<$te> {
+ self.0.next_back()
+ }
+ }
+ };
+ (pattern $te:ty in $ti:ty) => {
+ impl<'a, P: CharEq> Iterator<$te> for $ti {
+ #[inline]
+ fn next(&mut self) -> Option<$te> {
+ self.0.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.0.size_hint()
+ }
+ }
+ impl<'a, P: CharEq> DoubleEndedIterator<$te> for $ti {
+ #[inline]
+ fn next_back(&mut self) -> Option<$te> {
+ self.0.next_back()
+ }
+ }
+ };
+ (pattern forward $te:ty in $ti:ty) => {
+ impl<'a, P: CharEq> Iterator<$te> for $ti {
+ #[inline]
+ fn next(&mut self) -> Option<$te> {
+ self.0.next()
+ }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.0.size_hint()
+ }
+ }
+ }
+}
+
+/// A trait to abstract the idea of creating a new instance of a type from a
+/// string.
+// FIXME(#17307): there should be an `E` associated type for a `Result` return
+#[unstable = "will return a Result once associated types are working"]
+pub trait FromStr {
+ /// Parses a string `s` to return an optional value of this type. If the
+ /// string is ill-formatted, the None is returned.
+ fn from_str(s: &str) -> Option<Self>;
+}
+
+/// A utility function that just calls FromStr::from_str
+#[deprecated = "call the .parse() method on the string instead"]
+pub fn from_str<A: FromStr>(s: &str) -> Option<A> {
+ FromStr::from_str(s)
+}
+
+impl FromStr for bool {
+ /// Parse a `bool` from a string.
+ ///
+ /// Yields an `Option<bool>`, because `s` may or may not actually be parseable.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// assert_eq!("true".parse(), Some(true));
+ /// assert_eq!("false".parse(), Some(false));
+ /// assert_eq!("not even a boolean".parse::<bool>(), None);
+ /// ```
+ #[inline]
+ fn from_str(s: &str) -> Option<bool> {
+ match s {
+ "true" => Some(true),
+ "false" => Some(false),
+ _ => None,
+ }
+ }
+}
+
+/*
+Section: Creating a string
+*/
+
+/// Errors which can occur when attempting to interpret a byte slice as a `str`.
+#[deriving(Copy, Eq, PartialEq, Clone)]
+pub enum Utf8Error {
+ /// An invalid byte was detected at the byte offset given.
+ ///
+ /// The offset is guaranteed to be in bounds of the slice in question, and
+ /// the byte at the specified offset was the first invalid byte in the
+ /// sequence detected.
+ InvalidByte(uint),
+
+ /// The byte slice was invalid because more bytes were needed but no more
+ /// bytes were available.
+ TooShort,
+}
+
+/// Converts a slice of bytes to a string slice without performing any
+/// allocations.
+///
+/// Once the slice has been validated as utf-8, it is transmuted in-place and
+/// returned as a '&str' instead of a '&[u8]'
+///
+/// # Failure
+///
+/// Returns `Err` if the slice is not utf-8 with a description as to why the
+/// provided slice is not utf-8.
+pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
+ try!(run_utf8_validation_iterator(&mut v.iter()));
+ Ok(unsafe { from_utf8_unchecked(v) })
+}
+
+/// Converts a slice of bytes to a string slice without checking
+/// that the string contains valid UTF-8.
+#[stable]
+pub unsafe fn from_utf8_unchecked<'a>(v: &'a [u8]) -> &'a str {
+ mem::transmute(v)
+}
+
+/// Constructs a static string slice from a given raw pointer.
+///
+/// This function will read memory starting at `s` until it finds a 0, and then
+/// transmute the memory up to that point as a string slice, returning the
+/// corresponding `&'static str` value.
+///
+/// This function is unsafe because the caller must ensure the C string itself
+/// has the static lifetime and that the memory `s` is valid up to and including
+/// the first null byte.
+///
+/// # Panics
+///
+/// This function will panic if the string pointed to by `s` is not valid UTF-8.
+#[unstable = "may change location based on the outcome of the c_str module"]
+pub unsafe fn from_c_str(s: *const i8) -> &'static str {
+ let s = s as *const u8;
+ let mut len = 0u;
+ while *s.offset(len as int) != 0 {
+ len += 1u;
+ }
+ let v: &'static [u8] = ::mem::transmute(Slice { data: s, len: len });
+ from_utf8(v).ok().expect("from_c_str passed invalid utf-8 data")
+}
+
+/// Something that can be used to compare against a character
+#[unstable = "definition may change as pattern-related methods are stabilized"]
+pub trait CharEq {
+ /// Determine if the splitter should split at the given character
+ fn matches(&mut self, char) -> bool;
+ /// Indicate if this is only concerned about ASCII characters,
+ /// which can allow for a faster implementation.
+ fn only_ascii(&self) -> bool;
+}
+
+impl CharEq for char {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool { *self == c }
+
+ #[inline]
+ fn only_ascii(&self) -> bool { (*self as uint) < 128 }
+}
+
+impl<F> CharEq for F where F: FnMut(char) -> bool {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool { (*self)(c) }
+
+ #[inline]
+ fn only_ascii(&self) -> bool { false }
+}
+
+impl<'a> CharEq for &'a [char] {
+ #[inline]
+ fn matches(&mut self, c: char) -> bool {
+ self.iter().any(|&mut m| m.matches(c))
+ }
+
+ #[inline]
+ fn only_ascii(&self) -> bool {
+ self.iter().all(|m| m.only_ascii())
+ }
+}
+
+/*
+Section: Iterators
+*/
+
+/// Iterator for the char (representing *Unicode Scalar Values*) of a string
+///
+/// Created with the method `.chars()`.
+#[deriving(Clone, Copy)]
+pub struct Chars<'a> {
+ iter: slice::Iter<'a, u8>
+}
+
+// Return the initial codepoint accumulator for the first byte.
+// The first byte is special, only want bottom 5 bits for width 2, 4 bits
+// for width 3, and 3 bits for width 4
+macro_rules! utf8_first_byte {
+ ($byte:expr, $width:expr) => (($byte & (0x7F >> $width)) as u32)
+}
+
+// return the value of $ch updated with continuation byte $byte
+macro_rules! utf8_acc_cont_byte {
+ ($ch:expr, $byte:expr) => (($ch << 6) | ($byte & CONT_MASK) as u32)
+}
+
+macro_rules! utf8_is_cont_byte {
+ ($byte:expr) => (($byte & !CONT_MASK) == TAG_CONT_U8)
+}
+
+#[inline]
+fn unwrap_or_0(opt: Option<&u8>) -> u8 {
+ match opt {
+ Some(&byte) => byte,
+ None => 0,
+ }
+}
+
+impl<'a> Iterator<char> for Chars<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ // Decode UTF-8, using the valid UTF-8 invariant
+ let x = match self.iter.next() {
+ None => return None,
+ Some(&next_byte) if next_byte < 128 => return Some(next_byte as char),
+ Some(&next_byte) => next_byte,
+ };
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [[[x y] z] w]
+ // NOTE: Performance is sensitive to the exact formulation here
+ let init = utf8_first_byte!(x, 2);
+ let y = unwrap_or_0(self.iter.next());
+ let mut ch = utf8_acc_cont_byte!(init, y);
+ if x >= 0xE0 {
+ // [[x y z] w] case
+ // 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
+ let z = unwrap_or_0(self.iter.next());
+ let y_z = utf8_acc_cont_byte!((y & CONT_MASK) as u32, z);
+ ch = init << 12 | y_z;
+ if x >= 0xF0 {
+ // [x y z w] case
+ // use only the lower 3 bits of `init`
+ let w = unwrap_or_0(self.iter.next());
+ ch = (init & 7) << 18 | utf8_acc_cont_byte!(y_z, w);
+ }
+ }
+
+ // str invariant says `ch` is a valid Unicode Scalar Value
+ unsafe {
+ Some(mem::transmute(ch))
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ let (len, _) = self.iter.size_hint();
+ (len.saturating_add(3) / 4, Some(len))
+ }
+}
+
+impl<'a> DoubleEndedIterator<char> for Chars<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ let w = match self.iter.next_back() {
+ None => return None,
+ Some(&back_byte) if back_byte < 128 => return Some(back_byte as char),
+ Some(&back_byte) => back_byte,
+ };
+
+ // Multibyte case follows
+ // Decode from a byte combination out of: [x [y [z w]]]
+ let mut ch;
+ let z = unwrap_or_0(self.iter.next_back());
+ ch = utf8_first_byte!(z, 2);
+ if utf8_is_cont_byte!(z) {
+ let y = unwrap_or_0(self.iter.next_back());
+ ch = utf8_first_byte!(y, 3);
+ if utf8_is_cont_byte!(y) {
+ let x = unwrap_or_0(self.iter.next_back());
+ ch = utf8_first_byte!(x, 4);
+ ch = utf8_acc_cont_byte!(ch, y);
+ }
+ ch = utf8_acc_cont_byte!(ch, z);
+ }
+ ch = utf8_acc_cont_byte!(ch, w);
+
+ // str invariant says `ch` is a valid Unicode Scalar Value
+ unsafe {
+ Some(mem::transmute(ch))
+ }
+ }
+}
+
+/// External iterator for a string's characters and their byte offsets.
+/// Use with the `std::iter` module.
+#[deriving(Clone)]
+pub struct CharIndices<'a> {
+ front_offset: uint,
+ iter: Chars<'a>,
+}
+
+impl<'a> Iterator<(uint, char)> for CharIndices<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<(uint, char)> {
+ let (pre_len, _) = self.iter.iter.size_hint();
+ match self.iter.next() {
+ None => None,
+ Some(ch) => {
+ let index = self.front_offset;
+ let (len, _) = self.iter.iter.size_hint();
+ self.front_offset += pre_len - len;
+ Some((index, ch))
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) {
+ self.iter.size_hint()
+ }
+}
+
+impl<'a> DoubleEndedIterator<(uint, char)> for CharIndices<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<(uint, char)> {
+ match self.iter.next_back() {
+ None => None,
+ Some(ch) => {
+ let (len, _) = self.iter.iter.size_hint();
+ let index = self.front_offset + len;
+ Some((index, ch))
+ }
+ }
+ }
+}
+
+/// External iterator for a string's bytes.
+/// Use with the `std::iter` module.
+///
+/// Created with `StrExt::bytes`
+#[stable]
+#[deriving(Clone)]
+pub struct Bytes<'a>(Map<&'a u8, u8, slice::Iter<'a, u8>, BytesDeref>);
+delegate_iter!{exact u8 in Bytes<'a>}
+
+/// A temporary fn new type that ensures that the `Bytes` iterator
+/// is cloneable.
+#[deriving(Copy, Clone)]
+struct BytesDeref;
+
+impl<'a> Fn(&'a u8) -> u8 for BytesDeref {
+ #[inline]
+ extern "rust-call" fn call(&self, (ptr,): (&'a u8,)) -> u8 {
+ *ptr
+ }
+}
+
+/// An iterator over the substrings of a string, separated by `sep`.
+#[deriving(Clone)]
+#[deprecated = "Type is now named `Split` or `SplitTerminator`"]
+pub struct CharSplits<'a, Sep> {
+ /// The slice remaining to be iterated
+ string: &'a str,
+ sep: Sep,
+ /// Whether an empty string at the end is allowed
+ allow_trailing_empty: bool,
+ only_ascii: bool,
+ finished: bool,
+}
+
+/// An iterator over the substrings of a string, separated by `sep`,
+/// splitting at most `count` times.
+#[deriving(Clone)]
+#[deprecated = "Type is now named `SplitN` or `RSplitN`"]
+pub struct CharSplitsN<'a, Sep> {
+ iter: CharSplits<'a, Sep>,
+ /// The number of splits remaining
+ count: uint,
+ invert: bool,
+}
+
+/// An iterator over the lines of a string, separated by `\n`.
+#[stable]
+pub struct Lines<'a> {
+ inner: CharSplits<'a, char>,
+}
+
+/// An iterator over the lines of a string, separated by either `\n` or (`\r\n`).
+#[stable]
+pub struct LinesAny<'a> {
+ inner: Map<&'a str, &'a str, Lines<'a>, fn(&str) -> &str>,
+}
+
+impl<'a, Sep> CharSplits<'a, Sep> {
+ #[inline]
+ fn get_end(&mut self) -> Option<&'a str> {
+ if !self.finished && (self.allow_trailing_empty || self.string.len() > 0) {
+ self.finished = true;
+ Some(self.string)
+ } else {
+ None
+ }
+ }
+}
+
+impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplits<'a, Sep> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.finished { return None }
+
+ let mut next_split = None;
+ if self.only_ascii {
+ for (idx, byte) in self.string.bytes().enumerate() {
+ if self.sep.matches(byte as char) && byte < 128u8 {
+ next_split = Some((idx, idx + 1));
+ break;
+ }
+ }
+ } else {
+ for (idx, ch) in self.string.char_indices() {
+ if self.sep.matches(ch) {
+ next_split = Some((idx, self.string.char_range_at(idx).next));
+ break;
+ }
+ }
+ }
+ match next_split {
+ Some((a, b)) => unsafe {
+ let elt = self.string.slice_unchecked(0, a);
+ self.string = self.string.slice_unchecked(b, self.string.len());
+ Some(elt)
+ },
+ None => self.get_end(),
+ }
+ }
+}
+
+impl<'a, Sep: CharEq> DoubleEndedIterator<&'a str>
+for CharSplits<'a, Sep> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> {
+ if self.finished { return None }
+
+ if !self.allow_trailing_empty {
+ self.allow_trailing_empty = true;
+ match self.next_back() {
+ Some(elt) if !elt.is_empty() => return Some(elt),
+ _ => if self.finished { return None }
+ }
+ }
+ let len = self.string.len();
+ let mut next_split = None;
+
+ if self.only_ascii {
+ for (idx, byte) in self.string.bytes().enumerate().rev() {
+ if self.sep.matches(byte as char) && byte < 128u8 {
+ next_split = Some((idx, idx + 1));
+ break;
+ }
+ }
+ } else {
+ for (idx, ch) in self.string.char_indices().rev() {
+ if self.sep.matches(ch) {
+ next_split = Some((idx, self.string.char_range_at(idx).next));
+ break;
+ }
+ }
+ }
+ match next_split {
+ Some((a, b)) => unsafe {
+ let elt = self.string.slice_unchecked(b, len);
+ self.string = self.string.slice_unchecked(0, a);
+ Some(elt)
+ },
+ None => { self.finished = true; Some(self.string) }
+ }
+ }
+}
+
+impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplitsN<'a, Sep> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.count != 0 {
+ self.count -= 1;
+ if self.invert { self.iter.next_back() } else { self.iter.next() }
+ } else {
+ self.iter.get_end()
+ }
+ }
+}
+
+/// The internal state of an iterator that searches for matches of a substring
+/// within a larger string using naive search
+#[deriving(Clone)]
+struct NaiveSearcher {
+ position: uint
+}
+
+impl NaiveSearcher {
+ fn new() -> NaiveSearcher {
+ NaiveSearcher { position: 0 }
+ }
+
+ fn next(&mut self, haystack: &[u8], needle: &[u8]) -> Option<(uint, uint)> {
+ while self.position + needle.len() <= haystack.len() {
+ if haystack[self.position .. self.position + needle.len()] == needle {
+ let match_pos = self.position;
+ self.position += needle.len(); // add 1 for all matches
+ return Some((match_pos, match_pos + needle.len()));
+ } else {
+ self.position += 1;
+ }
+ }
+ None
+ }
+}
+
+/// The internal state of an iterator that searches for matches of a substring
+/// within a larger string using two-way search
+#[deriving(Clone)]
+struct TwoWaySearcher {
+ // constants
+ crit_pos: uint,
+ period: uint,
+ byteset: u64,
+
+ // variables
+ position: uint,
+ memory: uint
+}
+
+/*
+ This is the Two-Way search algorithm, which was introduced in the paper:
+ Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
+
+ Here's some background information.
+
+ A *word* is a string of symbols. The *length* of a word should be a familiar
+ notion, and here we denote it for any word x by |x|.
+ (We also allow for the possibility of the *empty word*, a word of length zero).
+
+ If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
+ *period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
+ For example, both 1 and 2 are periods for the string "aa". As another example,
+ the only period of the string "abcd" is 4.
+
+ We denote by period(x) the *smallest* period of x (provided that x is non-empty).
+ This is always well-defined since every non-empty word x has at least one period,
+ |x|. We sometimes call this *the period* of x.
+
+ If u, v and x are words such that x = uv, where uv is the concatenation of u and
+ v, then we say that (u, v) is a *factorization* of x.
+
+ Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
+ that both of the following hold
+
+ - either w is a suffix of u or u is a suffix of w
+ - either w is a prefix of v or v is a prefix of w
+
+ then w is said to be a *repetition* for the factorization (u, v).
+
+ Just to unpack this, there are four possibilities here. Let w = "abc". Then we
+ might have:
+
+ - w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
+ - w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
+ - u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
+ - u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
+
+ Note that the word vu is a repetition for any factorization (u,v) of x = uv,
+ so every factorization has at least one repetition.
+
+ If x is a string and (u, v) is a factorization for x, then a *local period* for
+ (u, v) is an integer r such that there is some word w such that |w| = r and w is
+ a repetition for (u, v).
+
+ We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
+ call this *the local period* of (u, v). Provided that x = uv is non-empty, this
+ is well-defined (because each non-empty word has at least one factorization, as
+ noted above).
+
+ It can be proven that the following is an equivalent definition of a local period
+ for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
+ all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
+ defined. (i.e. i > 0 and i + r < |x|).
+
+ Using the above reformulation, it is easy to prove that
+
+ 1 <= local_period(u, v) <= period(uv)
+
+ A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
+ *critical factorization*.
+
+ The algorithm hinges on the following theorem, which is stated without proof:
+
+ **Critical Factorization Theorem** Any word x has at least one critical
+ factorization (u, v) such that |u| < period(x).
+
+ The purpose of maximal_suffix is to find such a critical factorization.
+
+*/
+impl TwoWaySearcher {
+ fn new(needle: &[u8]) -> TwoWaySearcher {
+ let (crit_pos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
+ let (crit_pos2, period2) = TwoWaySearcher::maximal_suffix(needle, true);
+
+ let crit_pos;
+ let period;
+ if crit_pos1 > crit_pos2 {
+ crit_pos = crit_pos1;
+ period = period1;
+ } else {
+ crit_pos = crit_pos2;
+ period = period2;
+ }
+
+ // This isn't in the original algorithm, as far as I'm aware.
+ let byteset = needle.iter()
+ .fold(0, |a, &b| (1 << ((b & 0x3f) as uint)) | a);
+
+ // A particularly readable explanation of what's going on here can be found
+ // in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
+ // see the code for "Algorithm CP" on p. 323.
+ //
+ // What's going on is we have some critical factorization (u, v) of the
+ // needle, and we want to determine whether u is a suffix of
+ // v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
+ // "Algorithm CP2", which is optimized for when the period of the needle
+ // is large.
+ if needle[..crit_pos] == needle[period.. period + crit_pos] {
+ TwoWaySearcher {
+ crit_pos: crit_pos,
+ period: period,
+ byteset: byteset,
+
+ position: 0,
+ memory: 0
+ }
+ } else {
+ TwoWaySearcher {
+ crit_pos: crit_pos,
+ period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
+ byteset: byteset,
+
+ position: 0,
+ memory: uint::MAX // Dummy value to signify that the period is long
+ }
+ }
+ }
+
+ // One of the main ideas of Two-Way is that we factorize the needle into
+ // two halves, (u, v), and begin trying to find v in the haystack by scanning
+ // left to right. If v matches, we try to match u by scanning right to left.
+ // How far we can jump when we encounter a mismatch is all based on the fact
+ // that (u, v) is a critical factorization for the needle.
+ #[inline]
+ fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> Option<(uint, uint)> {
+ 'search: loop {
+ // Check that we have room to search in
+ if self.position + needle.len() > haystack.len() {
+ return None;
+ }
+
+ // Quickly skip by large portions unrelated to our substring
+ if (self.byteset >>
+ ((haystack[self.position + needle.len() - 1] & 0x3f)
+ as uint)) & 1 == 0 {
+ self.position += needle.len();
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+
+ // See if the right part of the needle matches
+ let start = if long_period { self.crit_pos }
+ else { cmp::max(self.crit_pos, self.memory) };
+ for i in range(start, needle.len()) {
+ if needle[i] != haystack[self.position + i] {
+ self.position += i - self.crit_pos + 1;
+ if !long_period {
+ self.memory = 0;
+ }
+ continue 'search;
+ }
+ }
+
+ // See if the left part of the needle matches
+ let start = if long_period { 0 } else { self.memory };
+ for i in range(start, self.crit_pos).rev() {
+ if needle[i] != haystack[self.position + i] {
+ self.position += self.period;
+ if !long_period {
+ self.memory = needle.len() - self.period;
+ }
+ continue 'search;
+ }
+ }
+
+ // We have found a match!
+ let match_pos = self.position;
+ self.position += needle.len(); // add self.period for all matches
+ if !long_period {
+ self.memory = 0; // set to needle.len() - self.period for all matches
+ }
+ return Some((match_pos, match_pos + needle.len()));
+ }
+ }
+
+ // Computes a critical factorization (u, v) of `arr`.
+ // Specifically, returns (i, p), where i is the starting index of v in some
+ // critical factorization (u, v) and p = period(v)
+ #[inline]
+ fn maximal_suffix(arr: &[u8], reversed: bool) -> (uint, uint) {
+ let mut left = -1; // Corresponds to i in the paper
+ let mut right = 0; // Corresponds to j in the paper
+ let mut offset = 1; // Corresponds to k in the paper
+ let mut period = 1; // Corresponds to p in the paper
+
+ while right + offset < arr.len() {
+ let a;
+ let b;
+ if reversed {
+ a = arr[left + offset];
+ b = arr[right + offset];
+ } else {
+ a = arr[right + offset];
+ b = arr[left + offset];
+ }
+ if a < b {
+ // Suffix is smaller, period is entire prefix so far.
+ right += offset;
+ offset = 1;
+ period = right - left;
+ } else if a == b {
+ // Advance through repetition of the current period.
+ if offset == period {
+ right += offset;
+ offset = 1;
+ } else {
+ offset += 1;
+ }
+ } else {
+ // Suffix is larger, start over from current location.
+ left = right;
+ right += 1;
+ offset = 1;
+ period = 1;
+ }
+ }
+ (left + 1, period)
+ }
+}
+
+/// The internal state of an iterator that searches for matches of a substring
+/// within a larger string using a dynamically chosen search algorithm
+#[deriving(Clone)]
+enum Searcher {
+ Naive(NaiveSearcher),
+ TwoWay(TwoWaySearcher),
+ TwoWayLong(TwoWaySearcher)
+}
+
+impl Searcher {
+ fn new(haystack: &[u8], needle: &[u8]) -> Searcher {
+ // FIXME: Tune this.
+ // FIXME(#16715): This unsigned integer addition will probably not
+ // overflow because that would mean that the memory almost solely
+ // consists of the needle. Needs #16715 to be formally fixed.
+ if needle.len() + 20 > haystack.len() {
+ Naive(NaiveSearcher::new())
+ } else {
+ let searcher = TwoWaySearcher::new(needle);
+ if searcher.memory == uint::MAX { // If the period is long
+ TwoWayLong(searcher)
+ } else {
+ TwoWay(searcher)
+ }
+ }
+ }
+}
+
+/// An iterator over the start and end indices of the matches of a
+/// substring within a larger string
+#[deriving(Clone)]
+pub struct MatchIndices<'a> {
+ // constants
+ haystack: &'a str,
+ needle: &'a str,
+ searcher: Searcher
+}
+
+/// An iterator over the substrings of a string separated by a given
+/// search string
+#[deriving(Clone)]
+#[unstable = "Type might get removed"]
+pub struct SplitStr<'a> {
+ it: MatchIndices<'a>,
+ last_end: uint,
+ finished: bool
+}
+
+/// Deprecated
+#[deprecated = "Type is now named `SplitStr`"]
+pub type StrSplits<'a> = SplitStr<'a>;
+
+impl<'a> Iterator<(uint, uint)> for MatchIndices<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<(uint, uint)> {
+ match self.searcher {
+ Naive(ref mut searcher)
+ => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes()),
+ TwoWay(ref mut searcher)
+ => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), false),
+ TwoWayLong(ref mut searcher)
+ => searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), true)
+ }
+ }
+}
+
+impl<'a> Iterator<&'a str> for SplitStr<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> {
+ if self.finished { return None; }
+
+ match self.it.next() {
+ Some((from, to)) => {
+ let ret = Some(self.it.haystack.slice(self.last_end, from));
+ self.last_end = to;
+ ret
+ }
+ None => {
+ self.finished = true;
+ Some(self.it.haystack.slice(self.last_end, self.it.haystack.len()))
+ }
+ }
+ }
+}
+
+
+/*
+Section: Comparing strings
+*/
+
+// share the implementation of the lang-item vs. non-lang-item
+// eq_slice.
+/// NOTE: This function is (ab)used in rustc::middle::trans::_match
+/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
+#[inline]
+fn eq_slice_(a: &str, b: &str) -> bool {
+ #[allow(improper_ctypes)]
+ extern { fn memcmp(s1: *const i8, s2: *const i8, n: uint) -> i32; }
+ a.len() == b.len() && unsafe {
+ memcmp(a.as_ptr() as *const i8,
+ b.as_ptr() as *const i8,
+ a.len()) == 0
+ }
+}
+
+/// Bytewise slice equality
+/// NOTE: This function is (ab)used in rustc::middle::trans::_match
+/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
+#[lang="str_eq"]
+#[inline]
+fn eq_slice(a: &str, b: &str) -> bool {
+ eq_slice_(a, b)
+}
+
+/*
+Section: Misc
+*/
+
+/// Walk through `iter` checking that it's a valid UTF-8 sequence,
+/// returning `true` in that case, or, if it is invalid, `false` with
+/// `iter` reset such that it is pointing at the first byte in the
+/// invalid sequence.
+#[inline(always)]
+fn run_utf8_validation_iterator(iter: &mut slice::Iter<u8>)
+ -> Result<(), Utf8Error> {
+ let whole = iter.as_slice();
+ loop {
+ // save the current thing we're pointing at.
+ let old = *iter;
+
+ // restore the iterator we had at the start of this codepoint.
+ macro_rules! err (() => { {
+ *iter = old;
+ return Err(Utf8Error::InvalidByte(whole.len() - iter.as_slice().len()))
+ } });
+ macro_rules! next ( () => {
+ match iter.next() {
+ Some(a) => *a,
+ // we needed data, but there was none: error!
+ None => return Err(Utf8Error::TooShort),
+ }
+ });
+
+ let first = match iter.next() {
+ Some(&b) => b,
+ // we're at the end of the iterator and a codepoint
+ // boundary at the same time, so this string is valid.
+ None => return Ok(())
+ };
+
+ // ASCII characters are always valid, so only large
+ // bytes need more examination.
+ if first >= 128 {
+ let w = UTF8_CHAR_WIDTH[first as uint] as uint;
+ let second = next!();
+ // 2-byte encoding is for codepoints \u{0080} to \u{07ff}
+ // first C2 80 last DF BF
+ // 3-byte encoding is for codepoints \u{0800} to \u{ffff}
+ // first E0 A0 80 last EF BF BF
+ // excluding surrogates codepoints \u{d800} to \u{dfff}
+ // ED A0 80 to ED BF BF
+ // 4-byte encoding is for codepoints \u{1000}0 to \u{10ff}ff
+ // first F0 90 80 80 last F4 8F BF BF
+ //
+ // Use the UTF-8 syntax from the RFC
+ //
+ // https://tools.ietf.org/html/rfc3629
+ // UTF8-1 = %x00-7F
+ // UTF8-2 = %xC2-DF UTF8-tail
+ // UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
+ // %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
+ // UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
+ // %xF4 %x80-8F 2( UTF8-tail )
+ match w {
+ 2 => if second & !CONT_MASK != TAG_CONT_U8 {err!()},
+ 3 => {
+ match (first, second, next!() & !CONT_MASK) {
+ (0xE0 , 0xA0 ... 0xBF, TAG_CONT_U8) |
+ (0xE1 ... 0xEC, 0x80 ... 0xBF, TAG_CONT_U8) |
+ (0xED , 0x80 ... 0x9F, TAG_CONT_U8) |
+ (0xEE ... 0xEF, 0x80 ... 0xBF, TAG_CONT_U8) => {}
+ _ => err!()
+ }
+ }
+ 4 => {
+ match (first, second, next!() & !CONT_MASK, next!() & !CONT_MASK) {
+ (0xF0 , 0x90 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
+ (0xF1 ... 0xF3, 0x80 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
+ (0xF4 , 0x80 ... 0x8F, TAG_CONT_U8, TAG_CONT_U8) => {}
+ _ => err!()
+ }
+ }
+ _ => err!()
+ }
+ }
+ }
+}
+
+/// Determines if a vector of bytes contains valid UTF-8.
+#[deprecated = "call from_utf8 instead"]
+pub fn is_utf8(v: &[u8]) -> bool {
+ run_utf8_validation_iterator(&mut v.iter()).is_ok()
+}
+
+/// Deprecated function
+#[deprecated = "this function will be removed"]
+pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
+ match v.iter().position(|c| *c == 0) {
+ // don't include the 0
+ Some(i) => v[..i],
+ None => v
+ }
+}
+
+// https://tools.ietf.org/html/rfc3629
+static UTF8_CHAR_WIDTH: [u8, ..256] = [
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF
+0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF
+4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF
+];
+
+/// Given a first byte, determine how many bytes are in this UTF-8 character
+#[inline]
+#[deprecated = "this function has moved to libunicode"]
+pub fn utf8_char_width(b: u8) -> uint {
+ return UTF8_CHAR_WIDTH[b as uint] as uint;
+}
+
+/// Struct that contains a `char` and the index of the first byte of
+/// the next `char` in a string. This can be used as a data structure
+/// for iterating over the UTF-8 bytes of a string.
+#[deriving(Copy)]
+#[unstable = "naming is uncertain with container conventions"]
+pub struct CharRange {
+ /// Current `char`
+ pub ch: char,
+ /// Index of the first byte of the next `char`
+ pub next: uint,
+}
+
+/// Mask of the value bits of a continuation byte
+const CONT_MASK: u8 = 0b0011_1111u8;
+/// Value of the tag bits (tag mask is !CONT_MASK) of a continuation byte
+const TAG_CONT_U8: u8 = 0b1000_0000u8;
+
+/// Unsafe operations
+#[deprecated]
+pub mod raw {
+ use ptr::PtrExt;
+ use raw::Slice;
+ use slice::SliceExt;
+ use str::StrExt;
+
+ /// Converts a slice of bytes to a string slice without checking
+ /// that the string contains valid UTF-8.
+ #[deprecated = "renamed to str::from_utf8_unchecked"]
+ pub unsafe fn from_utf8<'a>(v: &'a [u8]) -> &'a str {
+ super::from_utf8_unchecked(v)
+ }
+
+ /// Form a slice from a C string. Unsafe because the caller must ensure the
+ /// C string has the static lifetime, or else the return value may be
+ /// invalidated later.
+ #[deprecated = "renamed to str::from_c_str"]
+ pub unsafe fn c_str_to_static_slice(s: *const i8) -> &'static str {
+ let s = s as *const u8;
+ let mut curr = s;
+ let mut len = 0u;
+ while *curr != 0u8 {
+ len += 1u;
+ curr = s.offset(len as int);
+ }
+ let v = Slice { data: s, len: len };
+ super::from_utf8(::mem::transmute(v)).unwrap()
+ }
+
+ /// Takes a bytewise (not UTF-8) slice from a string.
+ ///
+ /// Returns the substring from [`begin`..`end`).
+ ///
+ /// # Panics
+ ///
+ /// If begin is greater than end.
+ /// If end is greater than the length of the string.
+ #[inline]
+ #[deprecated = "call the slice_unchecked method instead"]
+ pub unsafe fn slice_bytes<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
+ assert!(begin <= end);
+ assert!(end <= s.len());
+ s.slice_unchecked(begin, end)
+ }
+
+ /// Takes a bytewise (not UTF-8) slice from a string.
+ ///
+ /// Returns the substring from [`begin`..`end`).
+ ///
+ /// Caller must check slice boundaries!
+ #[inline]
+ #[deprecated = "this has moved to a method on `str` directly"]
+ pub unsafe fn slice_unchecked<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
+ s.slice_unchecked(begin, end)
+ }
+}
+
+/*
+Section: Trait implementations
+*/
+
+#[allow(missing_docs)]
+pub mod traits {
+ use cmp::{Ordering, Ord, PartialEq, PartialOrd, Equiv, Eq};
+ use cmp::Ordering::{Less, Equal, Greater};
+ use iter::IteratorExt;
+ use option::Option;
+ use option::Option::Some;
+ use ops;
+ use str::{Str, StrExt, eq_slice};
+
+ impl Ord for str {
+ #[inline]
+ fn cmp(&self, other: &str) -> Ordering {
+ for (s_b, o_b) in self.bytes().zip(other.bytes()) {
+ match s_b.cmp(&o_b) {
+ Greater => return Greater,
+ Less => return Less,
+ Equal => ()
+ }
+ }
+
+ self.len().cmp(&other.len())
+ }
+ }
+
+ impl PartialEq for str {
+ #[inline]
+ fn eq(&self, other: &str) -> bool {
+ eq_slice(self, other)
+ }
+ #[inline]
+ fn ne(&self, other: &str) -> bool { !(*self).eq(other) }
+ }
+
+ impl Eq for str {}
+
+ impl PartialOrd for str {
+ #[inline]
+ fn partial_cmp(&self, other: &str) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+ }
+
+ #[allow(deprecated)]
+ #[deprecated = "Use overloaded `core::cmp::PartialEq`"]
+ impl<S: Str> Equiv<S> for str {
+ #[inline]
+ fn equiv(&self, other: &S) -> bool { eq_slice(self, other.as_slice()) }
+ }
+
+ impl ops::Slice<uint, str> for str {
+ #[inline]
+ fn as_slice_<'a>(&'a self) -> &'a str {
+ self
+ }
+
+ #[inline]
+ fn slice_from_or_fail<'a>(&'a self, from: &uint) -> &'a str {
+ self.slice_from(*from)
+ }
+
+ #[inline]
+ fn slice_to_or_fail<'a>(&'a self, to: &uint) -> &'a str {
+ self.slice_to(*to)
+ }
+
+ #[inline]
+ fn slice_or_fail<'a>(&'a self, from: &uint, to: &uint) -> &'a str {
+ self.slice(*from, *to)
+ }
+ }
+}
+
+/// Any string that can be represented as a slice
+#[unstable = "Instead of taking this bound generically, this trait will be \
+ replaced with one of slicing syntax, deref coercions, or \
+ a more generic conversion trait"]
+pub trait Str for Sized? {
+ /// Work with `self` as a slice.
+ fn as_slice<'a>(&'a self) -> &'a str;
+}
+
+#[allow(deprecated)]
+impl Str for str {
+ #[inline]
+ fn as_slice<'a>(&'a self) -> &'a str { self }
+}
+
+#[allow(deprecated)]
+impl<'a, Sized? S> Str for &'a S where S: Str {
+ #[inline]
+ fn as_slice(&self) -> &str { Str::as_slice(*self) }
+}
+
+/// Return type of `StrExt::split`
+#[deriving(Clone)]
+#[stable]
+pub struct Split<'a, P>(CharSplits<'a, P>);
+delegate_iter!{pattern &'a str in Split<'a, P>}
+
+/// Return type of `StrExt::split_terminator`
+#[deriving(Clone)]
+#[unstable = "might get removed in favour of a constructor method on Split"]
+pub struct SplitTerminator<'a, P>(CharSplits<'a, P>);
+delegate_iter!{pattern &'a str in SplitTerminator<'a, P>}
+
+/// Return type of `StrExt::splitn`
+#[deriving(Clone)]
+#[stable]
+pub struct SplitN<'a, P>(CharSplitsN<'a, P>);
+delegate_iter!{pattern forward &'a str in SplitN<'a, P>}
+
+/// Return type of `StrExt::rsplitn`
+#[deriving(Clone)]
+#[stable]
+pub struct RSplitN<'a, P>(CharSplitsN<'a, P>);
+delegate_iter!{pattern forward &'a str in RSplitN<'a, P>}
+
+/// Methods for string slices
+#[allow(missing_docs)]
+pub trait StrExt for Sized? {
+ // NB there are no docs here are they're all located on the StrExt trait in
+ // libcollections, not here.
+
+ fn contains(&self, pat: &str) -> bool;
+ fn contains_char<P: CharEq>(&self, pat: P) -> bool;
+ fn chars<'a>(&'a self) -> Chars<'a>;
+ fn bytes<'a>(&'a self) -> Bytes<'a>;
+ fn char_indices<'a>(&'a self) -> CharIndices<'a>;
+ fn split<'a, P: CharEq>(&'a self, pat: P) -> Split<'a, P>;
+ fn splitn<'a, P: CharEq>(&'a self, count: uint, pat: P) -> SplitN<'a, P>;
+ fn split_terminator<'a, P: CharEq>(&'a self, pat: P) -> SplitTerminator<'a, P>;
+ fn rsplitn<'a, P: CharEq>(&'a self, count: uint, pat: P) -> RSplitN<'a, P>;
+ fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a>;
+ fn split_str<'a>(&'a self, pat: &'a str) -> SplitStr<'a>;
+ fn lines<'a>(&'a self) -> Lines<'a>;
+ fn lines_any<'a>(&'a self) -> LinesAny<'a>;
+ fn char_len(&self) -> uint;
+ fn slice<'a>(&'a self, begin: uint, end: uint) -> &'a str;
+ fn slice_from<'a>(&'a self, begin: uint) -> &'a str;
+ fn slice_to<'a>(&'a self, end: uint) -> &'a str;
+ fn slice_chars<'a>(&'a self, begin: uint, end: uint) -> &'a str;
+ unsafe fn slice_unchecked<'a>(&'a self, begin: uint, end: uint) -> &'a str;
+ fn starts_with(&self, pat: &str) -> bool;
+ fn ends_with(&self, pat: &str) -> bool;
+ fn trim_matches<'a, P: CharEq>(&'a self, pat: P) -> &'a str;
+ fn trim_left_matches<'a, P: CharEq>(&'a self, pat: P) -> &'a str;
+ fn trim_right_matches<'a, P: CharEq>(&'a self, pat: P) -> &'a str;
+ fn is_char_boundary(&self, index: uint) -> bool;
+ fn char_range_at(&self, start: uint) -> CharRange;
+ fn char_range_at_reverse(&self, start: uint) -> CharRange;
+ fn char_at(&self, i: uint) -> char;
+ fn char_at_reverse(&self, i: uint) -> char;
+ fn as_bytes<'a>(&'a self) -> &'a [u8];
+ fn find<P: CharEq>(&self, pat: P) -> Option<uint>;
+ fn rfind<P: CharEq>(&self, pat: P) -> Option<uint>;
+ fn find_str(&self, pat: &str) -> Option<uint>;
+ fn slice_shift_char<'a>(&'a self) -> Option<(char, &'a str)>;
+ fn subslice_offset(&self, inner: &str) -> uint;
+ fn as_ptr(&self) -> *const u8;
+ fn len(&self) -> uint;
+ fn is_empty(&self) -> bool;
+}
+
+#[inline(never)]
+fn slice_error_fail(s: &str, begin: uint, end: uint) -> ! {
+ assert!(begin <= end);
+ panic!("index {} and/or {} in `{}` do not lie on character boundary",
+ begin, end, s);
+}
+
+impl StrExt for str {
+ #[inline]
+ fn contains(&self, needle: &str) -> bool {
+ self.find_str(needle).is_some()
+ }
+
+ #[inline]
+ fn contains_char<P: CharEq>(&self, pat: P) -> bool {
+ self.find(pat).is_some()
+ }
+
+ #[inline]
+ fn chars(&self) -> Chars {
+ Chars{iter: self.as_bytes().iter()}
+ }
+
+ #[inline]
+ fn bytes(&self) -> Bytes {
+ Bytes(self.as_bytes().iter().map(BytesDeref))
+ }
+
+ #[inline]
+ fn char_indices(&self) -> CharIndices {
+ CharIndices { front_offset: 0, iter: self.chars() }
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplits
+ fn split<P: CharEq>(&self, pat: P) -> Split<P> {
+ Split(CharSplits {
+ string: self,
+ only_ascii: pat.only_ascii(),
+ sep: pat,
+ allow_trailing_empty: true,
+ finished: false,
+ })
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplitsN
+ fn splitn<P: CharEq>(&self, count: uint, pat: P) -> SplitN<P> {
+ SplitN(CharSplitsN {
+ iter: self.split(pat).0,
+ count: count,
+ invert: false,
+ })
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplits
+ fn split_terminator<P: CharEq>(&self, pat: P) -> SplitTerminator<P> {
+ SplitTerminator(CharSplits {
+ allow_trailing_empty: false,
+ ..self.split(pat).0
+ })
+ }
+
+ #[inline]
+ #[allow(deprecated)] // For using CharSplitsN
+ fn rsplitn<P: CharEq>(&self, count: uint, pat: P) -> RSplitN<P> {
+ RSplitN(CharSplitsN {
+ iter: self.split(pat).0,
+ count: count,
+ invert: true,
+ })
+ }
+
+ #[inline]
+ fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a> {
+ assert!(!sep.is_empty());
+ MatchIndices {
+ haystack: self,
+ needle: sep,
+ searcher: Searcher::new(self.as_bytes(), sep.as_bytes())
+ }
+ }
+
+ #[inline]
+ fn split_str<'a>(&'a self, sep: &'a str) -> SplitStr<'a> {
+ SplitStr {
+ it: self.match_indices(sep),
+ last_end: 0,
+ finished: false
+ }
+ }
+
+ #[inline]
+ fn lines(&self) -> Lines {
+ Lines { inner: self.split_terminator('\n').0 }
+ }
+
+ fn lines_any(&self) -> LinesAny {
+ fn f(line: &str) -> &str {
+ let l = line.len();
+ if l > 0 && line.as_bytes()[l - 1] == b'\r' { line.slice(0, l - 1) }
+ else { line }
+ }
+
+ let f: fn(&str) -> &str = f; // coerce to fn pointer
+ LinesAny { inner: self.lines().map(f) }
+ }
+
+ #[inline]
+ fn char_len(&self) -> uint { self.chars().count() }
+
+ #[inline]
+ fn slice(&self, begin: uint, end: uint) -> &str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if begin <= end &&
+ self.is_char_boundary(begin) &&
+ self.is_char_boundary(end) {
+ unsafe { self.slice_unchecked(begin, end) }
+ } else {
+ slice_error_fail(self, begin, end)
+ }
+ }
+
+ #[inline]
+ fn slice_from(&self, begin: uint) -> &str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(begin) {
+ unsafe { self.slice_unchecked(begin, self.len()) }
+ } else {
+ slice_error_fail(self, begin, self.len())
+ }
+ }
+
+ #[inline]
+ fn slice_to(&self, end: uint) -> &str {
+ // is_char_boundary checks that the index is in [0, .len()]
+ if self.is_char_boundary(end) {
+ unsafe { self.slice_unchecked(0, end) }
+ } else {
+ slice_error_fail(self, 0, end)
+ }
+ }
+
+ fn slice_chars(&self, begin: uint, end: uint) -> &str {
+ assert!(begin <= end);
+ let mut count = 0;
+ let mut begin_byte = None;
+ let mut end_byte = None;
+
+ // This could be even more efficient by not decoding,
+ // only finding the char boundaries
+ for (idx, _) in self.char_indices() {
+ if count == begin { begin_byte = Some(idx); }
+ if count == end { end_byte = Some(idx); break; }
+ count += 1;
+ }
+ if begin_byte.is_none() && count == begin { begin_byte = Some(self.len()) }
+ if end_byte.is_none() && count == end { end_byte = Some(self.len()) }
+
+ match (begin_byte, end_byte) {
+ (None, _) => panic!("slice_chars: `begin` is beyond end of string"),
+ (_, None) => panic!("slice_chars: `end` is beyond end of string"),
+ (Some(a), Some(b)) => unsafe { self.slice_unchecked(a, b) }
+ }
+ }
+
+ #[inline]
+ unsafe fn slice_unchecked(&self, begin: uint, end: uint) -> &str {
+ mem::transmute(Slice {
+ data: self.as_ptr().offset(begin as int),
+ len: end - begin,
+ })
+ }
+
+ #[inline]
+ fn starts_with(&self, needle: &str) -> bool {
+ let n = needle.len();
+ self.len() >= n && needle.as_bytes() == self.as_bytes()[..n]
+ }
+
+ #[inline]
+ fn ends_with(&self, needle: &str) -> bool {
+ let (m, n) = (self.len(), needle.len());
+ m >= n && needle.as_bytes() == self.as_bytes()[m-n..]
+ }
+
+ #[inline]
+ fn trim_matches<P: CharEq>(&self, mut pat: P) -> &str {
+ let cur = match self.find(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(i) => unsafe { self.slice_unchecked(i, self.len()) }
+ };
+ match cur.rfind(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(i) => {
+ let right = cur.char_range_at(i).next;
+ unsafe { cur.slice_unchecked(0, right) }
+ }
+ }
+ }
+
+ #[inline]
+ fn trim_left_matches<P: CharEq>(&self, mut pat: P) -> &str {
+ match self.find(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(first) => unsafe { self.slice_unchecked(first, self.len()) }
+ }
+ }
+
+ #[inline]
+ fn trim_right_matches<P: CharEq>(&self, mut pat: P) -> &str {
+ match self.rfind(|&mut: c: char| !pat.matches(c)) {
+ None => "",
+ Some(last) => {
+ let next = self.char_range_at(last).next;
+ unsafe { self.slice_unchecked(0u, next) }
+ }
+ }
+ }
+
+ #[inline]
+ fn is_char_boundary(&self, index: uint) -> bool {
+ if index == self.len() { return true; }
+ match self.as_bytes().get(index) {
+ None => false,
+ Some(&b) => b < 128u8 || b >= 192u8,
+ }
+ }
+
+ #[inline]
+ fn char_range_at(&self, i: uint) -> CharRange {
+ if self.as_bytes()[i] < 128u8 {
+ return CharRange {ch: self.as_bytes()[i] as char, next: i + 1 };
+ }
+
+ // Multibyte case is a fn to allow char_range_at to inline cleanly
+ fn multibyte_char_range_at(s: &str, i: uint) -> CharRange {
+ let mut val = s.as_bytes()[i] as u32;
+ let w = UTF8_CHAR_WIDTH[val as uint] as uint;
+ assert!((w != 0));
+
+ val = utf8_first_byte!(val, w);
+ val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
+ if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
+ if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
+
+ return CharRange {ch: unsafe { mem::transmute(val) }, next: i + w};
+ }
+
+ return multibyte_char_range_at(self, i);
+ }
+
+ #[inline]
+ fn char_range_at_reverse(&self, start: uint) -> CharRange {
+ let mut prev = start;
+
+ prev = prev.saturating_sub(1);
+ if self.as_bytes()[prev] < 128 {
+ return CharRange{ch: self.as_bytes()[prev] as char, next: prev}
+ }
+
+ // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly
+ fn multibyte_char_range_at_reverse(s: &str, mut i: uint) -> CharRange {
+ // while there is a previous byte == 10......
+ while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 {
+ i -= 1u;
+ }
+
+ let mut val = s.as_bytes()[i] as u32;
+ let w = UTF8_CHAR_WIDTH[val as uint] as uint;
+ assert!((w != 0));
+
+ val = utf8_first_byte!(val, w);
+ val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
+ if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
+ if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
+
+ return CharRange {ch: unsafe { mem::transmute(val) }, next: i};
+ }
+
+ return multibyte_char_range_at_reverse(self, prev);
+ }
+
+ #[inline]
+ fn char_at(&self, i: uint) -> char {
+ self.char_range_at(i).ch
+ }
+
+ #[inline]
+ fn char_at_reverse(&self, i: uint) -> char {
+ self.char_range_at_reverse(i).ch
+ }
+
+ #[inline]
+ fn as_bytes(&self) -> &[u8] {
+ unsafe { mem::transmute(self) }
+ }
+
+ fn find<P: CharEq>(&self, mut pat: P) -> Option<uint> {
+ if pat.only_ascii() {
+ self.bytes().position(|b| pat.matches(b as char))
+ } else {
+ for (index, c) in self.char_indices() {
+ if pat.matches(c) { return Some(index); }
+ }
+ None
+ }
+ }
+
+ fn rfind<P: CharEq>(&self, mut pat: P) -> Option<uint> {
+ if pat.only_ascii() {
+ self.bytes().rposition(|b| pat.matches(b as char))
+ } else {
+ for (index, c) in self.char_indices().rev() {
+ if pat.matches(c) { return Some(index); }
+ }
+ None
+ }
+ }
+
+ fn find_str(&self, needle: &str) -> Option<uint> {
+ if needle.is_empty() {
+ Some(0)
+ } else {
+ self.match_indices(needle)
+ .next()
+ .map(|(start, _end)| start)
+ }
+ }
+
+ #[inline]
+ fn slice_shift_char(&self) -> Option<(char, &str)> {
+ if self.is_empty() {
+ None
+ } else {
+ let CharRange {ch, next} = self.char_range_at(0u);
+ let next_s = unsafe { self.slice_unchecked(next, self.len()) };
+ Some((ch, next_s))
+ }
+ }
+
+ fn subslice_offset(&self, inner: &str) -> uint {
+ let a_start = self.as_ptr() as uint;
+ let a_end = a_start + self.len();
+ let b_start = inner.as_ptr() as uint;
+ let b_end = b_start + inner.len();
+
+ assert!(a_start <= b_start);
+ assert!(b_end <= a_end);
+ b_start - a_start
+ }
+
+ #[inline]
+ fn as_ptr(&self) -> *const u8 {
+ self.repr().data
+ }
+
+ #[inline]
+ fn len(&self) -> uint { self.repr().len }
+
+ #[inline]
+ fn is_empty(&self) -> bool { self.len() == 0 }
+}
+
+#[stable]
+impl<'a> Default for &'a str {
+ #[stable]
+ fn default() -> &'a str { "" }
+}
+
+impl<'a> Iterator<&'a str> for Lines<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> { self.inner.next() }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+}
+impl<'a> DoubleEndedIterator<&'a str> for Lines<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
+}
+impl<'a> Iterator<&'a str> for LinesAny<'a> {
+ #[inline]
+ fn next(&mut self) -> Option<&'a str> { self.inner.next() }
+ #[inline]
+ fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
+}
+impl<'a> DoubleEndedIterator<&'a str> for LinesAny<'a> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() }
+}
mod hash;
mod iter;
mod mem;
+mod nonzero;
mod num;
mod ops;
mod option;
--- /dev/null
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use core::nonzero::NonZero;
+use core::option::Option;
+use core::option::Option::{Some, None};
+use std::mem::size_of;
+
+#[test]
+fn test_create_nonzero_instance() {
+ let _a = unsafe {
+ NonZero::new(21i)
+ };
+}
+
+#[test]
+fn test_size_nonzero_in_option() {
+ assert_eq!(size_of::<NonZero<u32>>(), size_of::<Option<NonZero<u32>>>());
+}
+
+#[test]
+fn test_match_on_nonzero_option() {
+ let a = Some(unsafe {
+ NonZero::new(42i)
+ });
+ match a {
+ Some(val) => assert_eq!(*val, 42),
+ None => panic!("unexpected None while matching on Some(NonZero(_))")
+ }
+
+ match unsafe { Some(NonZero::new(43i)) } {
+ Some(val) => assert_eq!(*val, 43),
+ None => panic!("unexpected None while matching on Some(NonZero(_))")
+ }
+}
+
+#[test]
+fn test_match_option_empty_vec() {
+ let a: Option<Vec<int>> = Some(vec![]);
+ match a {
+ None => panic!("unexpected None while matching on Some(vec![])"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_vec() {
+ let a = Some(vec![1i, 2, 3, 4]);
+ match a {
+ Some(v) => assert_eq!(v, vec![1i, 2, 3, 4]),
+ None => panic!("unexpected None while matching on Some(vec![1, 2, 3, 4])")
+ }
+}
+
+#[test]
+fn test_match_option_rc() {
+ use std::rc::Rc;
+
+ let five = Rc::new(5i);
+ match Some(five) {
+ Some(r) => assert_eq!(*r, 5i),
+ None => panic!("unexpected None while matching on Some(Rc::new(5))")
+ }
+}
+
+#[test]
+fn test_match_option_arc() {
+ use std::sync::Arc;
+
+ let five = Arc::new(5i);
+ match Some(five) {
+ Some(a) => assert_eq!(*a, 5i),
+ None => panic!("unexpected None while matching on Some(Arc::new(5))")
+ }
+}
+
+#[test]
+fn test_match_option_empty_string() {
+ let a = Some(String::new());
+ match a {
+ None => panic!("unexpected None while matching on Some(String::new())"),
+ _ => {}
+ }
+}
+
+#[test]
+fn test_match_option_string() {
+ let five = "Five".into_string();
+ match Some(five) {
+ Some(s) => assert_eq!(s, "Five"),
+ None => panic!("unexpected None while matching on Some(String { ... })")
+ }
+}
// except according to those terms.
use test::Bencher;
-use core::ops::{Range, FullRange, RangeFrom};
+use core::ops::{Range, FullRange, RangeFrom, RangeTo};
// Overhead of dtors
assert!(count == 10);
}
+#[test]
+fn test_range_to() {
+ // Not much to test.
+ let _ = RangeTo { end: 42u };
+}
+
#[test]
fn test_full_range() {
// Not much to test.
fn test_input(g: LabelledGraph) -> IoResult<String> {
let mut writer = Vec::new();
render(&g, &mut writer).unwrap();
- (&mut writer[]).read_to_string()
+ (&mut writer.as_slice()).read_to_string()
}
// All of the tests use raw-strings as the format for the expected outputs,
edge(1, 3, ";"), edge(2, 3, ";" )));
render(&g, &mut writer).unwrap();
- let r = (&mut writer[]).read_to_string();
+ let r = (&mut writer.as_slice()).read_to_string();
assert_eq!(r.unwrap(),
r#"digraph syntax_tree {
// Do the necessary writes
if left.len() > 0 {
- slice::bytes::copy_memory(self.buf[mut self.pos..], left);
+ slice::bytes::copy_memory(self.buf.slice_from_mut(self.pos), left);
}
if right.len() > 0 {
self.buf.push_all(right);
fn is_camel_case(ident: ast::Ident) -> bool {
let ident = token::get_ident(ident);
if ident.get().is_empty() { return true; }
- let ident = ident.get().trim_chars('_');
+ let ident = ident.get().trim_matches('_');
// start with a non-lowercase letter rather than non-uppercase
// ones (some scripts don't have a concept of upper/lowercase)
fn is_snake_case(ident: ast::Ident) -> bool {
let ident = token::get_ident(ident);
if ident.get().is_empty() { return true; }
- let ident = ident.get().trim_left_chars('\'');
- let ident = ident.trim_chars('_');
+ let ident = ident.get().trim_left_matches('\'');
+ let ident = ident.trim_matches('_');
let mut allow_underscore = true;
ident.chars().all(|c| {
}
ty::mk_struct(cx.tcx,
ast_util::local_def(item.id),
- Substs::empty())
+ cx.tcx.mk_substs(Substs::empty()))
}
ast::ItemEnum(_, ref ast_generics) => {
if ast_generics.is_parameterized() {
}
ty::mk_enum(cx.tcx,
ast_util::local_def(item.id),
- Substs::empty())
+ cx.tcx.mk_substs(Substs::empty()))
}
_ => return,
};
if self.is_internal(cx, item.span) { return }
match item.node {
- ast::ItemTrait(_, _, _, ref supertraits, _) => {
+ ast::ItemTrait(_, _, ref supertraits, _) => {
for t in supertraits.iter() {
- if let ast::TraitTyParamBound(ref t) = *t {
+ if let ast::TraitTyParamBound(ref t, _) = *t {
let id = ty::trait_ref_to_def_id(cx.tcx, &t.trait_ref);
self.lint(cx, id, t.trait_ref.path.span);
}
let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc) as uint);
let doc = reader::get_doc(rp_doc, tag_region_param_def_index);
- let index = reader::doc_as_u64(doc) as uint;
+ let index = reader::doc_as_u64(doc) as u32;
let mut bounds = Vec::new();
reader::tagged_docs(rp_doc, tag_items_data_region, |p| {
}
}
}
- ast::ItemTrait(_, _, _, _, ref ms) => {
+ ast::ItemTrait(_, _, _, ref ms) => {
add_to_index(item, rbml_w, index);
rbml_w.start_tag(tag_items_data_item);
encode_def_id(rbml_w, def_id);
let types =
parse_vec_per_param_space(st, |st| parse_ty(st, |x,y| conv(x,y)));
- return subst::Substs { types: types,
- regions: regions };
+ subst::Substs { types: types,
+ regions: regions }
}
fn parse_region_substs(st: &mut PState, conv: conv_did) -> subst::RegionSubsts {
fn parse_bound_region(st: &mut PState, conv: conv_did) -> ty::BoundRegion {
match next(st) {
'a' => {
- let id = parse_uint(st);
+ let id = parse_u32(st);
assert_eq!(next(st), '|');
ty::BrAnon(id)
}
ty::BrNamed(def, ident.name)
}
'f' => {
- let id = parse_uint(st);
+ let id = parse_u32(st);
assert_eq!(next(st), '|');
ty::BrFresh(id)
}
match next(st) {
'b' => {
assert_eq!(next(st), '[');
- let id = ty::DebruijnIndex::new(parse_uint(st));
+ let id = ty::DebruijnIndex::new(parse_u32(st));
assert_eq!(next(st), '|');
let br = parse_bound_region(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
assert_eq!(next(st), '|');
let space = parse_param_space(st);
assert_eq!(next(st), '|');
- let index = parse_uint(st);
+ let index = parse_u32(st);
assert_eq!(next(st), '|');
let nm = token::str_to_ident(parse_str(st, ']')[]);
ty::ReEarlyBound(node_id, space, index, nm.name)
-> ty::TraitRef<'tcx> {
let def = parse_def(st, NominalType, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
- ty::TraitRef {def_id: def, substs: substs}
+ ty::TraitRef {def_id: def, substs: st.tcx.mk_substs(substs)}
}
fn parse_ty<'a, 'tcx>(st: &mut PState<'a, 'tcx>, conv: conv_did) -> Ty<'tcx> {
let def = parse_def(st, NominalType, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
- return ty::mk_enum(st.tcx, def, substs);
+ return ty::mk_enum(st.tcx, def, st.tcx.mk_substs(substs));
}
'x' => {
assert_eq!(next(st), '[');
'p' => {
let did = parse_def(st, TypeParameter, |x,y| conv(x,y));
debug!("parsed ty_param: did={}", did);
- let index = parse_uint(st);
+ let index = parse_u32(st);
assert_eq!(next(st), '|');
let space = parse_param_space(st);
assert_eq!(next(st), '|');
'&' => {
let r = parse_region(st, |x,y| conv(x,y));
let mt = parse_mt(st, |x,y| conv(x,y));
- return ty::mk_rptr(st.tcx, r, mt);
+ return ty::mk_rptr(st.tcx, st.tcx.mk_region(r), mt);
}
'V' => {
let t = parse_ty(st, |x,y| conv(x,y));
}
'F' => {
let def_id = parse_def(st, NominalType, |x,y| conv(x,y));
- return ty::mk_bare_fn(st.tcx, Some(def_id), parse_bare_fn_ty(st, |x,y| conv(x,y)));
+ return ty::mk_bare_fn(st.tcx, Some(def_id),
+ st.tcx.mk_bare_fn(parse_bare_fn_ty(st, |x,y| conv(x,y))));
}
'G' => {
- return ty::mk_bare_fn(st.tcx, None, parse_bare_fn_ty(st, |x,y| conv(x,y)));
+ return ty::mk_bare_fn(st.tcx, None,
+ st.tcx.mk_bare_fn(parse_bare_fn_ty(st, |x,y| conv(x,y))));
}
'#' => {
let pos = parse_hex(st);
let did = parse_def(st, NominalType, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
- return ty::mk_struct(st.tcx, did, substs);
+ return ty::mk_struct(st.tcx, did, st.tcx.mk_substs(substs));
}
'k' => {
assert_eq!(next(st), '[');
let region = parse_region(st, |x,y| conv(x,y));
let substs = parse_substs(st, |x,y| conv(x,y));
assert_eq!(next(st), ']');
- return ty::mk_unboxed_closure(st.tcx, did, region, substs);
+ return ty::mk_unboxed_closure(st.tcx, did,
+ st.tcx.mk_region(region), st.tcx.mk_substs(substs));
}
'e' => {
return ty::mk_err();
};
}
+fn parse_u32(st: &mut PState) -> u32 {
+ let n = parse_uint(st);
+ let m = n as u32;
+ assert_eq!(m as uint, n);
+ m
+}
+
fn parse_param_space(st: &mut PState) -> subst::ParamSpace {
subst::ParamSpace::from_uint(parse_uint(st))
}
let def_id = parse_def(st, NominalType, |x,y| conv(x,y));
let space = parse_param_space(st);
assert_eq!(next(st), '|');
- let index = parse_uint(st);
+ let index = parse_u32(st);
assert_eq!(next(st), '|');
let associated_with = parse_opt(st, |st| {
parse_def(st, NominalType, |x,y| conv(x,y))
ast::TyF64 => mywrite!(w, "MF"),
}
}
- ty::ty_enum(def, ref substs) => {
+ ty::ty_enum(def, substs) => {
mywrite!(w, "t[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
ty::ty_ptr(mt) => { mywrite!(w, "*"); enc_mt(w, cx, mt); }
ty::ty_rptr(r, mt) => {
mywrite!(w, "&");
- enc_region(w, cx, r);
+ enc_region(w, cx, *r);
enc_mt(w, cx, mt);
}
ty::ty_vec(t, sz) => {
mywrite!(w, "f");
enc_closure_ty(w, cx, &**f);
}
- ty::ty_bare_fn(Some(def_id), ref f) => {
+ ty::ty_bare_fn(Some(def_id), f) => {
mywrite!(w, "F");
mywrite!(w, "{}|", (cx.ds)(def_id));
enc_bare_fn_ty(w, cx, f);
}
- ty::ty_bare_fn(None, ref f) => {
+ ty::ty_bare_fn(None, f) => {
mywrite!(w, "G");
enc_bare_fn_ty(w, cx, f);
}
ty::ty_param(ParamTy {space, idx: id, def_id: did}) => {
mywrite!(w, "p{}|{}|{}|", (cx.ds)(did), id, space.to_uint())
}
- ty::ty_struct(def, ref substs) => {
+ ty::ty_struct(def, substs) => {
mywrite!(w, "a[{}|", (cx.ds)(def));
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
- ty::ty_unboxed_closure(def, region, ref substs) => {
+ ty::ty_unboxed_closure(def, region, substs) => {
mywrite!(w, "k[{}|", (cx.ds)(def));
- enc_region(w, cx, region);
+ enc_region(w, cx, *region);
enc_substs(w, cx, substs);
mywrite!(w, "]");
}
pub fn enc_trait_ref<'a, 'tcx>(w: &mut SeekableMemWriter, cx: &ctxt<'a, 'tcx>,
s: &ty::TraitRef<'tcx>) {
mywrite!(w, "{}|", (cx.ds)(s.def_id));
- enc_substs(w, cx, &s.substs);
+ enc_substs(w, cx, s.substs);
}
pub fn enc_trait_store(w: &mut SeekableMemWriter, cx: &ctxt, s: ty::TraitStore) {
self.call(expr, pred, &**l, Some(&**r).into_iter())
}
- ast::ExprSlice(ref base, ref start, ref end, _) => {
- self.call(expr,
- pred,
- &**base,
- start.iter().chain(end.iter()).map(|x| &**x))
- }
-
ast::ExprRange(ref start, ref end) => {
- let fields = Some(&**start).into_iter()
+ let fields = start.as_ref().map(|e| &**e).into_iter()
.chain(end.as_ref().map(|e| &**e).into_iter());
self.straightline(expr, pred, fields)
}
DefAssociatedPath(TyParamProvenance, ast::Ident),
DefTrait(ast::DefId),
DefPrimTy(ast::PrimTy),
- DefTyParam(ParamSpace, ast::DefId, uint),
+ DefTyParam(ParamSpace, ast::DefId, u32),
DefUse(ast::DefId),
DefUpvar(ast::NodeId, // id of closed over local
ast::NodeId, // expr node that creates the closure
}
ast::ExprIndex(ref lhs, ref rhs) => { // lhs[rhs]
- if !self.walk_overloaded_operator(expr, &**lhs, vec![&**rhs], PassArgs::ByRef) {
- self.select_from_expr(&**lhs);
- self.consume_expr(&**rhs);
+ match rhs.node {
+ ast::ExprRange(ref start, ref end) => {
+ // Hacked slicing syntax (KILLME).
+ let args = match (start, end) {
+ (&Some(ref e1), &Some(ref e2)) => vec![&**e1, &**e2],
+ (&Some(ref e), &None) => vec![&**e],
+ (&None, &Some(ref e)) => vec![&**e],
+ (&None, &None) => Vec::new()
+ };
+ let overloaded =
+ self.walk_overloaded_operator(expr, &**lhs, args, PassArgs::ByRef);
+ assert!(overloaded);
+ }
+ _ => {
+ if !self.walk_overloaded_operator(expr,
+ &**lhs,
+ vec![&**rhs],
+ PassArgs::ByRef) {
+ self.select_from_expr(&**lhs);
+ self.consume_expr(&**rhs);
+ }
+ }
}
}
- ast::ExprSlice(ref base, ref start, ref end, _) => { // base[start..end]
- let args = match (start, end) {
- (&Some(ref e1), &Some(ref e2)) => vec![&**e1, &**e2],
- (&Some(ref e), &None) => vec![&**e],
- (&None, &Some(ref e)) => vec![&**e],
- (&None, &None) => Vec::new()
- };
- let overloaded =
- self.walk_overloaded_operator(expr, &**base, args, PassArgs::ByRef);
- assert!(overloaded);
- }
-
ast::ExprRange(ref start, ref end) => {
- self.consume_expr(&**start);
+ start.as_ref().map(|e| self.consume_expr(&**e));
end.as_ref().map(|e| self.consume_expr(&**e));
}
// Select just those fields of the `with`
// expression that will actually be used
let with_fields = match with_cmt.ty.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
ty::struct_fields(self.tcx(), did, substs)
}
_ => {
};
let bk = ty::BorrowKind::from_mutbl(m);
self.delegate.borrow(expr.id, expr.span, cmt,
- r, bk, AutoRef);
+ *r, bk, AutoRef);
}
}
}
Copy
}
}
-
self.unpack_actual_value(a, |a| {
match a.sty {
- ty::ty_bare_fn(Some(a_def_id), ref a_f) => {
+ ty::ty_bare_fn(Some(a_def_id), a_f) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
};
let a_borrowed = ty::mk_rptr(self.tcx(),
- r_borrow,
+ self.tcx().mk_region(r_borrow),
mt {ty: inner_ty, mutbl: mutbl_b});
try!(sub.tys(a_borrowed, b));
let coercion = Coercion(self.get_ref().trace.clone());
let r_borrow = self.get_ref().infcx.next_region_var(coercion);
let ty = ty::mk_rptr(self.tcx(),
- r_borrow,
+ self.tcx().mk_region(r_borrow),
ty::mt{ty: ty, mutbl: mt_b.mutbl});
try!(self.get_ref().infcx.try(|_| sub.tys(ty, b)));
debug!("Success, coerced with AutoDerefRef(1, \
bounds: bounds },
ty_a)))
}
- (&ty::ty_struct(did_a, ref substs_a), &ty::ty_struct(did_b, ref substs_b))
+ (&ty::ty_struct(did_a, substs_a), &ty::ty_struct(did_b, substs_b))
if did_a == did_b => {
debug!("unsizing a struct");
// Try unsizing each type param in turn to see if we end up with ty_b.
// Check that the whole types match.
let mut new_substs = substs_a.clone();
new_substs.types.get_mut_slice(subst::TypeSpace)[i] = new_tp;
- let ty = ty::mk_struct(tcx, did_a, new_substs);
+ let ty = ty::mk_struct(tcx, did_a, tcx.mk_substs(new_substs));
if self.get_ref().infcx.try(|_| sub.tys(ty, ty_b)).is_err() {
debug!("Unsized type parameter '{}', but still \
could not match types {} and {}",
let r_a = self.get_ref().infcx.next_region_var(coercion);
self.coerce_object(a, b, b_mutbl,
- |tr| ty::mk_rptr(tcx, r_a, ty::mt{ mutbl: b_mutbl, ty: tr }),
+ |tr| ty::mk_rptr(tcx, tcx.mk_region(r_a),
+ ty::mt{ mutbl: b_mutbl, ty: tr }),
|| AutoPtr(r_a, b_mutbl, None))
}
b.repr(self.tcx()));
match a.sty {
- ty::ty_bare_fn(Some(a_def_id), ref f) => {
+ ty::ty_bare_fn(Some(a_def_id), f) => {
self.coerce_from_fn_item(a, a_def_id, f, b)
}
_ => {
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_def_id_a: ast::DefId,
- fn_ty_a: &ty::BareFnTy<'tcx>,
+ fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
Ok(Some(adj))
}
ty::ty_bare_fn(None, _) => {
- let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, (*fn_ty_a).clone());
+ let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer(fn_def_id_a)))
}
if a.def_id != b.def_id {
Err(ty::terr_traits(expected_found(self, a.def_id, b.def_id)))
} else {
- let substs = try!(self.substs(a.def_id, &a.substs, &b.substs));
- Ok(ty::TraitRef { def_id: a.def_id, substs: substs })
+ let substs = try!(self.substs(a.def_id, a.substs, b.substs));
+ Ok(ty::TraitRef { def_id: a.def_id, substs: self.tcx().mk_substs(substs) })
}
}
Ok(a)
}
- (&ty::ty_enum(a_id, ref a_substs),
- &ty::ty_enum(b_id, ref b_substs))
+ (&ty::ty_enum(a_id, a_substs),
+ &ty::ty_enum(b_id, b_substs))
if a_id == b_id => {
let substs = try!(this.substs(a_id,
a_substs,
b_substs));
- Ok(ty::mk_enum(tcx, a_id, substs))
+ Ok(ty::mk_enum(tcx, a_id, tcx.mk_substs(substs)))
}
(&ty::ty_trait(ref a_),
Ok(ty::mk_trait(tcx, principal, bounds))
}
- (&ty::ty_struct(a_id, ref a_substs), &ty::ty_struct(b_id, ref b_substs))
+ (&ty::ty_struct(a_id, a_substs), &ty::ty_struct(b_id, b_substs))
if a_id == b_id => {
let substs = try!(this.substs(a_id, a_substs, b_substs));
- Ok(ty::mk_struct(tcx, a_id, substs))
+ Ok(ty::mk_struct(tcx, a_id, tcx.mk_substs(substs)))
}
- (&ty::ty_unboxed_closure(a_id, a_region, ref a_substs),
- &ty::ty_unboxed_closure(b_id, b_region, ref b_substs))
+ (&ty::ty_unboxed_closure(a_id, a_region, a_substs),
+ &ty::ty_unboxed_closure(b_id, b_region, b_substs))
if a_id == b_id => {
// All ty_unboxed_closure types with the same id represent
// the (anonymous) type of the same closure expression. So
// all of their regions should be equated.
- let region = try!(this.equate().regions(a_region, b_region));
+ let region = try!(this.equate().regions(*a_region, *b_region));
let substs = try!(this.substs_variances(None, a_substs, b_substs));
- Ok(ty::mk_unboxed_closure(tcx, a_id, region, substs))
+ Ok(ty::mk_unboxed_closure(tcx, a_id, tcx.mk_region(region), tcx.mk_substs(substs)))
}
(&ty::ty_uniq(a_inner), &ty::ty_uniq(b_inner)) => {
}
(&ty::ty_rptr(a_r, ref a_mt), &ty::ty_rptr(b_r, ref b_mt)) => {
- let r = try!(this.contraregions(a_r, b_r));
+ let r = try!(this.contraregions(*a_r, *b_r));
// FIXME(14985) If we have mutable references to trait objects, we
// used to use covariant subtyping. I have preserved this behaviour,
// even though it is probably incorrect. So don't go down the usual
}
_ => try!(this.mts(a_mt, b_mt))
};
- Ok(ty::mk_rptr(tcx, r, mt))
+ Ok(ty::mk_rptr(tcx, tcx.mk_region(r), mt))
}
(&ty::ty_vec(a_t, Some(sz_a)), &ty::ty_vec(b_t, Some(sz_b))) => {
}
}
- (&ty::ty_bare_fn(a_opt_def_id, ref a_fty), &ty::ty_bare_fn(b_opt_def_id, ref b_fty))
+ (&ty::ty_bare_fn(a_opt_def_id, a_fty), &ty::ty_bare_fn(b_opt_def_id, b_fty))
if a_opt_def_id == b_opt_def_id =>
{
let fty = try!(this.bare_fn_tys(a_fty, b_fty));
- Ok(ty::mk_bare_fn(tcx, a_opt_def_id, fty))
+ Ok(ty::mk_bare_fn(tcx, a_opt_def_id, tcx.mk_bare_fn(fty)))
}
(&ty::ty_closure(ref a_fty), &ty::ty_closure(ref b_fty)) => {
self.infcx.next_region_var(MiscVariable(self.span))
}
}
-
-
struct RebuildPathInfo<'a> {
path: &'a ast::Path,
// indexes to insert lifetime on path.lifetimes
- indexes: Vec<uint>,
+ indexes: Vec<u32>,
// number of lifetimes we expect to see on the type referred by `path`
// (e.g., expected=1 for struct Foo<'a>)
- expected: uint,
- anon_nums: &'a HashSet<uint>,
+ expected: u32,
+ anon_nums: &'a HashSet<u32>,
region_names: &'a HashSet<ast::Name>
}
generics: &'a ast::Generics,
same_regions: &'a [SameRegions],
life_giver: &'a LifeGiver,
- cur_anon: Cell<uint>,
- inserted_anons: RefCell<HashSet<uint>>,
+ cur_anon: Cell<u32>,
+ inserted_anons: RefCell<HashSet<u32>>,
}
enum FreshOrKept {
}
fn extract_anon_nums_and_names(&self, same_regions: &SameRegions)
- -> (HashSet<uint>, HashSet<ast::Name>) {
+ -> (HashSet<u32>, HashSet<ast::Name>) {
let mut anon_nums = HashSet::new();
let mut region_names = HashSet::new();
for br in same_regions.regions.iter() {
all_region_names
}
- fn inc_cur_anon(&self, n: uint) {
+ fn inc_cur_anon(&self, n: u32) {
let anon = self.cur_anon.get();
self.cur_anon.set(anon+n);
}
self.cur_anon.set(anon);
}
- fn inc_and_offset_cur_anon(&self, n: uint) {
+ fn inc_and_offset_cur_anon(&self, n: u32) {
self.inc_cur_anon(n);
self.offset_cur_anon();
}
- fn track_anon(&self, anon: uint) {
+ fn track_anon(&self, anon: u32) {
self.inserted_anons.borrow_mut().insert(anon);
}
ident: ty_param.ident,
id: ty_param.id,
bounds: bounds,
- unbound: ty_param.unbound.clone(),
default: ty_param.default.clone(),
span: ty_param.span,
}
// be passing down a map.
ast::RegionTyParamBound(lt)
}
- &ast::TraitTyParamBound(ref poly_tr) => {
+ &ast::TraitTyParamBound(ref poly_tr, modifier) => {
let tr = &poly_tr.trait_ref;
let last_seg = tr.path.segments.last().unwrap();
let mut insert = Vec::new();
let lifetimes = last_seg.parameters.lifetimes();
for (i, lt) in lifetimes.iter().enumerate() {
if region_names.contains(<.name) {
- insert.push(i);
+ insert.push(i as u32);
}
}
let rebuild_info = RebuildPathInfo {
path: &tr.path,
indexes: insert,
- expected: lifetimes.len(),
+ expected: lifetimes.len() as u32,
anon_nums: &HashSet::new(),
region_names: region_names
};
path: new_path,
ref_id: tr.ref_id,
}
- })
+ }, modifier)
}
}
})
fn rebuild_expl_self(&self,
expl_self_opt: Option<ast::ExplicitSelf_>,
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>)
-> Option<ast::ExplicitSelf_> {
match expl_self_opt {
fn rebuild_args_ty(&self,
inputs: &[ast::Arg],
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>)
-> Vec<ast::Arg> {
let mut new_inputs = Vec::new();
fn rebuild_output(&self, ty: &ast::FunctionRetTy,
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>) -> ast::FunctionRetTy {
match *ty {
ast::Return(ref ret_ty) => ast::Return(
fn rebuild_arg_ty_or_output(&self,
ty: &ast::Ty,
lifetime: ast::Lifetime,
- anon_nums: &HashSet<uint>,
+ anon_nums: &HashSet<u32>,
region_names: &HashSet<ast::Name>)
-> P<ast::Ty> {
let mut new_ty = P(ty.clone());
let generics = ty::lookup_item_type(self.tcx, did).generics;
let expected =
- generics.regions.len(subst::TypeSpace);
+ generics.regions.len(subst::TypeSpace) as u32;
let lifetimes =
path.segments.last().unwrap().parameters.lifetimes();
let mut insert = Vec::new();
for (i, a) in range(anon,
anon+expected).enumerate() {
if anon_nums.contains(&a) {
- insert.push(i);
+ insert.push(i as u32);
}
self.track_anon(a);
}
} else {
for (i, lt) in lifetimes.iter().enumerate() {
if region_names.contains(<.name) {
- insert.push(i);
+ insert.push(i as u32);
}
}
}
}
} else {
for (i, lt) in data.lifetimes.iter().enumerate() {
- if indexes.contains(&i) {
+ if indexes.contains(&(i as u32)) {
new_lts.push(lifetime);
} else {
new_lts.push(*lt);
pub struct TypeFreshener<'a, 'tcx:'a> {
infcx: &'a InferCtxt<'a, 'tcx>,
- freshen_count: uint,
+ freshen_count: u32,
freshen_map: hash_map::HashMap<ty::InferTy, Ty<'tcx>>,
}
key: ty::InferTy,
freshener: F)
-> Ty<'tcx> where
- F: FnOnce(uint) -> ty::InferTy,
+ F: FnOnce(u32) -> ty::InferTy,
{
match opt_ty {
Some(ty) => { return ty.fold_with(self); }
use util::ppaux::Repr;
use std::cell::{Cell, RefCell};
-use std::uint;
+use std::u32;
use syntax::ast;
mod doc;
lubs: RefCell<CombineMap>,
glbs: RefCell<CombineMap>,
- skolemization_count: Cell<uint>,
- bound_count: Cell<uint>,
+ skolemization_count: Cell<u32>,
+ bound_count: Cell<u32>,
// The undo log records actions that might later be undone.
//
#[allow(missing_copy_implementations)]
pub struct RegionSnapshot {
length: uint,
- skolemization_count: uint,
+ skolemization_count: u32,
}
impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> {
AddVar(vid) => {
let mut var_origins = self.var_origins.borrow_mut();
var_origins.pop().unwrap();
- assert_eq!(var_origins.len(), vid.index);
+ assert_eq!(var_origins.len(), vid.index as uint);
}
AddConstraint(ref constraint) => {
self.constraints.borrow_mut().remove(constraint);
self.skolemization_count.set(snapshot.skolemization_count);
}
- pub fn num_vars(&self) -> uint {
- self.var_origins.borrow().len()
+ pub fn num_vars(&self) -> u32 {
+ let len = self.var_origins.borrow().len();
+ // enforce no overflow
+ assert!(len as u32 as uint == len);
+ len as u32
}
pub fn new_region_var(&self, origin: RegionVariableOrigin<'tcx>) -> RegionVid {
match *self.values.borrow() {
None => {
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[rid.index].span(),
+ (*self.var_origins.borrow())[rid.index as uint].span(),
"attempt to resolve region variable before values have \
been computed!")
}
(ReInfer(ReVar(v_id)), _) | (_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[v_id.index].span(),
+ (*self.var_origins.borrow())[v_id.index as uint].span(),
format!("lub_concrete_regions invoked with \
non-concrete regions: {}, {}",
a,
(ReInfer(ReVar(v_id)), _) |
(_, ReInfer(ReVar(v_id))) => {
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[v_id.index].span(),
+ (*self.var_origins.borrow())[v_id.index as uint].span(),
format!("glb_concrete_regions invoked with \
non-concrete regions: {}, {}",
a,
}
fn construct_var_data(&self) -> Vec<VarData> {
- Vec::from_fn(self.num_vars(), |_| {
+ Vec::from_fn(self.num_vars() as uint, |_| {
VarData {
// All nodes are initially classified as contracting; during
// the expansion phase, we will shift the classification for
.repr(self.tcx));
match *constraint {
ConstrainRegSubVar(a_region, b_vid) => {
- let b_data = &mut var_data[b_vid.index];
+ let b_data = &mut var_data[b_vid.index as uint];
self.expand_node(a_region, b_vid, b_data)
}
ConstrainVarSubVar(a_vid, b_vid) => {
- match var_data[a_vid.index].value {
+ match var_data[a_vid.index as uint].value {
NoValue | ErrorValue => false,
Value(a_region) => {
- let b_node = &mut var_data[b_vid.index];
+ let b_node = &mut var_data[b_vid.index as uint];
self.expand_node(a_region, b_vid, b_node)
}
}
false
}
ConstrainVarSubVar(a_vid, b_vid) => {
- match var_data[b_vid.index].value {
+ match var_data[b_vid.index as uint].value {
NoValue | ErrorValue => false,
Value(b_region) => {
- let a_data = &mut var_data[a_vid.index];
+ let a_data = &mut var_data[a_vid.index as uint];
self.contract_node(a_vid, a_data, b_region)
}
}
}
ConstrainVarSubReg(a_vid, b_region) => {
- let a_data = &mut var_data[a_vid.index];
+ let a_data = &mut var_data[a_vid.index as uint];
self.contract_node(a_vid, a_data, b_region)
}
}
// idea is to report errors that derive from independent
// regions of the graph, but not those that derive from
// overlapping locations.
- let mut dup_vec = Vec::from_elem(self.num_vars(), uint::MAX);
+ let mut dup_vec = Vec::from_elem(self.num_vars() as uint, u32::MAX);
let mut opt_graph = None;
- for idx in range(0u, self.num_vars()) {
+ for idx in range(0u, self.num_vars() as uint) {
match var_data[idx].value {
Value(_) => {
/* Inference successful */
}
let graph = opt_graph.as_ref().unwrap();
- let node_vid = RegionVid { index: idx };
+ let node_vid = RegionVid { index: idx as u32 };
match var_data[idx].classification {
Expanding => {
self.collect_error_for_expanding_node(
}
}
- Vec::from_fn(self.num_vars(), |idx| var_data[idx].value)
+ Vec::from_fn(self.num_vars() as uint, |idx| var_data[idx].value)
}
fn construct_graph(&self) -> RegionGraph {
let constraints = self.constraints.borrow();
let num_edges = constraints.len();
- let mut graph = graph::Graph::with_capacity(num_vars + 1,
+ let mut graph = graph::Graph::with_capacity(num_vars as uint + 1,
num_edges);
- for _ in range(0u, num_vars) {
+ for _ in range(0, num_vars) {
graph.add_node(());
}
let dummy_idx = graph.add_node(());
for (constraint, _) in constraints.iter() {
match *constraint {
ConstrainVarSubVar(a_id, b_id) => {
- graph.add_edge(NodeIndex(a_id.index),
- NodeIndex(b_id.index),
+ graph.add_edge(NodeIndex(a_id.index as uint),
+ NodeIndex(b_id.index as uint),
*constraint);
}
ConstrainRegSubVar(_, b_id) => {
graph.add_edge(dummy_idx,
- NodeIndex(b_id.index),
+ NodeIndex(b_id.index as uint),
*constraint);
}
ConstrainVarSubReg(a_id, _) => {
- graph.add_edge(NodeIndex(a_id.index),
+ graph.add_edge(NodeIndex(a_id.index as uint),
dummy_idx,
*constraint);
}
&self,
graph: &RegionGraph,
var_data: &[VarData],
- dup_vec: &mut [uint],
+ dup_vec: &mut [u32],
node_idx: RegionVid,
errors: &mut Vec<RegionResolutionError<'tcx>>)
{
if !self.is_subregion_of(lower_bound.region,
upper_bound.region) {
errors.push(SubSupConflict(
- (*self.var_origins.borrow())[node_idx.index].clone(),
+ (*self.var_origins.borrow())[node_idx.index as uint].clone(),
lower_bound.origin.clone(),
lower_bound.region,
upper_bound.origin.clone(),
}
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[node_idx.index].span(),
+ (*self.var_origins.borrow())[node_idx.index as uint].span(),
format!("collect_error_for_expanding_node() could not find error \
for var {}, lower_bounds={}, upper_bounds={}",
node_idx,
&self,
graph: &RegionGraph,
var_data: &[VarData],
- dup_vec: &mut [uint],
+ dup_vec: &mut [u32],
node_idx: RegionVid,
errors: &mut Vec<RegionResolutionError<'tcx>>)
{
Ok(_) => {}
Err(_) => {
errors.push(SupSupConflict(
- (*self.var_origins.borrow())[node_idx.index].clone(),
+ (*self.var_origins.borrow())[node_idx.index as uint].clone(),
upper_bound_1.origin.clone(),
upper_bound_1.region,
upper_bound_2.origin.clone(),
}
self.tcx.sess.span_bug(
- (*self.var_origins.borrow())[node_idx.index].span(),
+ (*self.var_origins.borrow())[node_idx.index as uint].span(),
format!("collect_error_for_contracting_node() could not find error \
for var {}, upper_bounds={}",
node_idx,
var_data: &[VarData],
orig_node_idx: RegionVid,
dir: Direction,
- dup_vec: &mut [uint])
+ dup_vec: &mut [u32])
-> (Vec<RegionAndOrigin<'tcx>>, bool) {
struct WalkState<'tcx> {
set: FnvHashSet<RegionVid>,
while !state.stack.is_empty() {
let node_idx = state.stack.pop().unwrap();
- let classification = var_data[node_idx.index].classification;
+ let classification = var_data[node_idx.index as uint].classification;
// check whether we've visited this node on some previous walk
- if dup_vec[node_idx.index] == uint::MAX {
- dup_vec[node_idx.index] = orig_node_idx.index;
- } else if dup_vec[node_idx.index] != orig_node_idx.index {
+ if dup_vec[node_idx.index as uint] == u32::MAX {
+ dup_vec[node_idx.index as uint] = orig_node_idx.index;
+ } else if dup_vec[node_idx.index as uint] != orig_node_idx.index {
state.dup_found = true;
}
dir: Direction) {
debug!("process_edges(source_vid={}, dir={})", source_vid, dir);
- let source_node_index = NodeIndex(source_vid.index);
+ let source_node_index = NodeIndex(source_vid.index as uint);
graph.each_adjacent_edge(source_node_index, dir, |_, edge| {
match edge.data {
ConstrainVarSubVar(from_vid, to_vid) => {
}
fn lookup(values: &Vec<VarValue>, rid: ty::RegionVid) -> ty::Region {
- match values[rid.index] {
+ match values[rid.index as uint] {
Value(r) => r,
NoValue => ReEmpty, // No constraints, return ty::ReEmpty
ErrorValue => ReStatic, // Previously reported error.
use middle::ty::{mod, Ty};
use std::cmp::min;
use std::mem;
-use std::uint;
+use std::u32;
use util::snapshot_vec as sv;
pub struct TypeVariableTable<'tcx> {
}
fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec<Relation> {
- relations(self.values.get_mut(a.index))
+ relations(self.values.get_mut(a.index as uint))
}
pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool {
- self.values.get(vid.index).diverging
+ self.values.get(vid.index as uint).diverging
}
/// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`.
stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>)
{
let old_value = {
- let value_ptr = &mut self.values.get_mut(vid.index).value;
+ let value_ptr = &mut self.values.get_mut(vid.index as uint).value;
mem::replace(value_ptr, Known(ty))
};
value: Bounded(vec![]),
diverging: diverging
});
- ty::TyVid { index: index }
+ ty::TyVid { index: index as u32 }
}
pub fn probe(&self, vid: ty::TyVid) -> Option<Ty<'tcx>> {
- match self.values.get(vid.index).value {
+ match self.values.get(vid.index as uint).value {
Bounded(..) => None,
Known(t) => Some(t)
}
* unified `V1` with `T1`, this function would return `{T0}`.
*/
- let mut new_elem_threshold = uint::MAX;
+ let mut new_elem_threshold = u32::MAX;
let mut escaping_types = Vec::new();
let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot);
debug!("actions_since_snapshot.len() = {}", actions_since_snapshot.len());
// always be the first one we see). Note that this
// action must precede those variables being
// specified.
- new_elem_threshold = min(new_elem_threshold, index);
+ new_elem_threshold = min(new_elem_threshold, index as u32);
debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold);
}
action: UndoEntry) {
match action {
SpecifyVar(vid, relations) => {
- values[vid.index].value = Bounded(relations);
+ values[vid.index as uint].value = Bounded(relations);
}
Relate(a, b) => {
- relations(&mut (*values)[a.index]).pop();
- relations(&mut (*values)[b.index]).pop();
+ relations(&mut (*values)[a.index as uint]).pop();
+ relations(&mut (*values)[b.index as uint]).pop();
}
}
}
Bounded(ref mut relations) => relations
}
}
-
// Integral type keys
impl<'tcx> UnifyKey<'tcx, Option<IntVarValue>> for ty::IntVid {
- fn index(&self) -> uint { self.index }
+ fn index(&self) -> uint { self.index as uint }
- fn from_index(i: uint) -> ty::IntVid { ty::IntVid { index: i } }
+ fn from_index(i: uint) -> ty::IntVid { ty::IntVid { index: i as u32 } }
fn unification_table<'v>(infcx: &'v InferCtxt)
-> &'v RefCell<UnificationTable<ty::IntVid, Option<IntVarValue>>>
// Floating point type keys
impl<'tcx> UnifyKey<'tcx, Option<ast::FloatTy>> for ty::FloatVid {
- fn index(&self) -> uint { self.index }
+ fn index(&self) -> uint { self.index as uint }
- fn from_index(i: uint) -> ty::FloatVid { ty::FloatVid { index: i } }
+ fn from_index(i: uint) -> ty::FloatVid { ty::FloatVid { index: i as u32 } }
fn unification_table<'v>(infcx: &'v InferCtxt)
-> &'v RefCell<UnificationTable<ty::FloatVid, Option<ast::FloatTy>>>
// No need to continue; we now know the result.
false
}
- ty::ty_enum(did, ref substs) => {
+ ty::ty_enum(did, substs) => {
for enum_variant in (*ty::enum_variants(tcx, did)).iter() {
for argument_type in enum_variant.args.iter() {
let argument_type = argument_type.subst(tcx, substs);
// Don't traverse substitutions.
false
}
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
for field in ty::struct_fields(tcx, did, substs).iter() {
result = result ||
type_size_is_affected_by_type_parameters(tcx,
SliceMutTraitLangItem, "slice_mut", slice_mut_trait;
RangeStructLangItem, "range", range_struct;
RangeFromStructLangItem, "range_from", range_from_struct;
+ RangeToStructLangItem, "range_to", range_to_struct;
FullRangeStructLangItem, "full_range", full_range_struct;
UnsafeTypeLangItem, "unsafe", unsafe_type;
NoSyncItem, "no_sync_bound", no_sync_bound;
ManagedItem, "managed_bound", managed_bound;
+ NonZeroItem, "non_zero", non_zero;
+
IteratorItem, "iterator", iterator;
StackExhaustedLangItem, "stack_exhausted", stack_exhausted;
ast::ExprBlock(..) | ast::ExprAssign(..) | ast::ExprAssignOp(..) |
ast::ExprMac(..) | ast::ExprStruct(..) | ast::ExprRepeat(..) |
ast::ExprParen(..) | ast::ExprInlineAsm(..) | ast::ExprBox(..) |
- ast::ExprSlice(..) | ast::ExprRange(..) => {
+ ast::ExprRange(..) => {
visit::walk_expr(ir, expr);
}
}
self.propagate_through_expr(&**l, r_succ)
}
- ast::ExprSlice(ref e1, ref e2, ref e3, _) => {
- let succ = e3.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
- let succ = e2.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
- self.propagate_through_expr(&**e1, succ)
- }
-
ast::ExprRange(ref e1, ref e2) => {
let succ = e2.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ));
- self.propagate_through_expr(&**e1, succ)
+ e1.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ))
}
ast::ExprBox(None, ref e) |
ast::ExprBlock(..) | ast::ExprMac(..) | ast::ExprAddrOf(..) |
ast::ExprStruct(..) | ast::ExprRepeat(..) | ast::ExprParen(..) |
ast::ExprClosure(..) | ast::ExprPath(..) | ast::ExprBox(..) |
- ast::ExprSlice(..) | ast::ExprRange(..) => {
+ ast::ExprRange(..) => {
visit::walk_expr(this, expr);
}
ast::ExprIfLet(..) => {
ty::ty_rptr(r, mt) => {
let kind = ty::BorrowKind::from_mutbl(mt.mutbl);
- Some(deref_ptr(BorrowedPtr(kind, r)))
+ Some(deref_ptr(BorrowedPtr(kind, *r)))
}
ty::ty_closure(box ty::ClosureTy {
self.cat_tup_field(expr, base_cmt, idx.node, expr_ty)
}
- ast::ExprIndex(ref base, _) => {
- let method_call = ty::MethodCall::expr(expr.id());
- match self.typer.node_method_ty(method_call) {
- Some(method_ty) => {
- // If this is an index implemented by a method call, then it will
- // include an implicit deref of the result.
- let ret_ty = ty::ty_fn_ret(method_ty).unwrap();
- self.cat_deref(expr,
- self.cat_rvalue_node(expr.id(),
- expr.span(),
- ret_ty), 1, true)
+ ast::ExprIndex(ref base, ref idx) => {
+ match idx.node {
+ ast::ExprRange(..) => {
+ // Slicing syntax special case (KILLME).
+ self.cat_rvalue_node(expr.id(), expr.span(), expr_ty)
}
- None => {
- let base_cmt = self.cat_expr(&**base);
- self.cat_index(expr, base_cmt)
+ _ => {
+ let method_call = ty::MethodCall::expr(expr.id());
+ match self.typer.node_method_ty(method_call) {
+ Some(method_ty) => {
+ // If this is an index implemented by a method call, then it will
+ // include an implicit deref of the result.
+ let ret_ty = ty::ty_fn_ret(method_ty).unwrap();
+ self.cat_deref(expr,
+ self.cat_rvalue_node(expr.id(),
+ expr.span(),
+ ret_ty), 1, true)
+ }
+ None => {
+ self.cat_index(expr, self.cat_expr(&**base))
+ }
+ }
}
}
}
ast::ExprAddrOf(..) | ast::ExprCall(..) |
ast::ExprAssign(..) | ast::ExprAssignOp(..) |
ast::ExprClosure(..) | ast::ExprRet(..) |
- ast::ExprUnary(..) | ast::ExprSlice(..) | ast::ExprRange(..) |
+ ast::ExprUnary(..) | ast::ExprRange(..) |
ast::ExprMethodCall(..) | ast::ExprCast(..) |
ast::ExprVec(..) | ast::ExprTup(..) | ast::ExprIf(..) |
ast::ExprBinary(..) | ast::ExprWhile(..) |
-> (ast::Mutability, ty::Region) {
match slice_ty.sty {
ty::ty_rptr(r, ref mt) => match mt.ty.sty {
- ty::ty_vec(_, None) => (mt.mutbl, r),
+ ty::ty_vec(_, None) => (mt.mutbl, *r),
_ => vec_slice_info(tcx, pat, mt.ty),
},
// method to the root. In this case, if the trait is private, then
// parent all the methods to the trait to indicate that they're
// private.
- ast::ItemTrait(_, _, _, _, ref methods) if item.vis != ast::Public => {
+ ast::ItemTrait(_, _, _, ref methods) if item.vis != ast::Public => {
for m in methods.iter() {
match *m {
ast::ProvidedMethod(ref m) => {
// Default methods on traits are all public so long as the trait
// is public
- ast::ItemTrait(_, _, _, _, ref methods) if public_first => {
+ ast::ItemTrait(_, _, _, ref methods) if public_first => {
for method in methods.iter() {
match *method {
ast::ProvidedMethod(ref m) => {
}
}
- ast::ItemTrait(_, _, _, _, ref methods) => {
+ ast::ItemTrait(_, _, _, ref methods) => {
for m in methods.iter() {
match *m {
ast::ProvidedMethod(ref m) => {
ast::ItemStruct(ref def, _) => check_struct(&**def),
- ast::ItemTrait(_, _, _, _, ref methods) => {
+ ast::ItemTrait(_, _, _, ref methods) => {
for m in methods.iter() {
match *m {
ast::RequiredMethod(..) => {}
fn check_ty_param_bound(&self,
ty_param_bound: &ast::TyParamBound) {
- if let ast::TraitTyParamBound(ref trait_ref) = *ty_param_bound {
+ if let ast::TraitTyParamBound(ref trait_ref, _) = *ty_param_bound {
if !self.tcx.sess.features.borrow().visible_private_types &&
self.path_is_private_type(trait_ref.trait_ref.ref_id) {
let span = trait_ref.trait_ref.path.span;
// namespace (the contents have their own privacies).
ast::ItemForeignMod(_) => {}
- ast::ItemTrait(_, _, _, ref bounds, _) => {
+ ast::ItemTrait(_, _, ref bounds, _) => {
if !self.trait_is_public(item.id) {
return
}
let prev_cx = visitor.cx;
visitor.cx.parent = Some(expr.id);
+
{
let region_maps = &mut visitor.region_maps;
let terminating = |id| {
pub enum DefRegion {
DefStaticRegion,
DefEarlyBoundRegion(/* space */ subst::ParamSpace,
- /* index */ uint,
+ /* index */ u32,
/* lifetime decl */ ast::NodeId),
DefLateBoundRegion(ty::DebruijnIndex,
/* lifetime decl */ ast::NodeId),
ast::ItemTy(_, ref generics) |
ast::ItemEnum(_, ref generics) |
ast::ItemStruct(_, ref generics) |
- ast::ItemTrait(_, ref generics, _, _, _) |
+ ast::ItemTrait(_, ref generics, _, _) |
ast::ItemImpl(_, ref generics, _, _, _) => {
// These kinds of items have only early bound lifetime parameters.
let lifetimes = &generics.lifetimes;
}
}
- fn visit_poly_trait_ref(&mut self, trait_ref: &ast::PolyTraitRef) {
+ fn visit_poly_trait_ref(&mut self, trait_ref:
+ &ast::PolyTraitRef,
+ _modifier: &ast::TraitBoundModifier) {
debug!("visit_poly_trait_ref trait_ref={}", trait_ref);
self.with(LateScope(&trait_ref.bound_lifetimes, self.scope), |old_scope, this| {
fn search_lifetimes<'a>(lifetimes: &'a Vec<ast::LifetimeDef>,
lifetime_ref: &ast::Lifetime)
- -> Option<(uint, &'a ast::Lifetime)> {
+ -> Option<(u32, &'a ast::Lifetime)> {
for (i, lifetime_decl) in lifetimes.iter().enumerate() {
if lifetime_decl.lifetime.name == lifetime_ref.name {
- return Some((i, &lifetime_decl.lifetime));
+ return Some((i as u32, &lifetime_decl.lifetime));
}
}
return None;
}
pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> {
- *self.types.get(ty_param_def.space, ty_param_def.index)
+ *self.types.get(ty_param_def.space, ty_param_def.index as uint)
}
- pub fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ pub fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.types.iter().any(|&t| ty::type_escapes_depth(t, depth)) || {
match self.regions {
ErasedRegions =>
ty_stack_depth: uint,
// Number of region binders we have passed through while doing the substitution
- region_binders_passed: uint,
+ region_binders_passed: u32,
}
impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
match self.substs.regions {
ErasedRegions => ty::ReStatic,
NonerasedRegions(ref regions) =>
- match regions.opt_get(space, i) {
+ match regions.opt_get(space, i as uint) {
Some(&r) => {
self.shift_region_through_binders(r)
}
impl<'a,'tcx> SubstFolder<'a,'tcx> {
fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
// Look up the type in the substitutions. It really should be in there.
- let opt_ty = self.substs.types.opt_get(p.space, p.idx);
+ let opt_ty = self.substs.types.opt_get(p.space, p.idx as uint);
let ty = match opt_ty {
Some(t) => *t,
None => {
}
// provide an impl, but only for suitable `fn` pointers
- ty::ty_bare_fn(_, ty::BareFnTy {
+ ty::ty_bare_fn(_, &ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
} else {
// Recursively check all supertraits to find out if any further
// bounds are required and thus we must fulfill.
- let tmp_tr = data.principal_trait_ref_with_self_ty(ty::mk_err());
+ let tmp_tr = data.principal_trait_ref_with_self_ty(self.tcx(),
+ ty::mk_err());
for tr in util::supertraits(self.tcx(), tmp_tr) {
let td = ty::lookup_trait_def(self.tcx(), tr.def_id());
Ok(If(tys.clone()))
}
- ty::ty_unboxed_closure(def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(def_id, _, substs) => {
// FIXME -- This case is tricky. In the case of by-ref
// closures particularly, we need the results of
// inference to decide how to reflect the type of each
}
}
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
let types: Vec<Ty> =
ty::struct_fields(self.tcx(), def_id, substs)
.iter()
nominal(self, bound, def_id, types)
}
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) => {
let types: Vec<Ty> =
ty::substd_enum_variants(self.tcx(), def_id, substs)
.iter()
let self_ty = self.infcx.shallow_resolve(obligation.self_ty());
let sig = match self_ty.sty {
- ty::ty_bare_fn(_, ty::BareFnTy {
+ ty::ty_bare_fn(_, &ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
ref sig
self_ty);
let trait_ref = Rc::new(ty::Binder(ty::TraitRef {
def_id: obligation.trait_ref.def_id(),
- substs: substs,
+ substs: self.tcx().mk_substs(substs),
}));
try!(self.confirm_poly_trait_refs(obligation.cause.clone(),
obligation.self_ty());
let trait_ref = Rc::new(ty::Binder(ty::TraitRef {
def_id: obligation.trait_ref.def_id(),
- substs: substs,
+ substs: self.tcx().mk_substs(substs),
}));
debug!("confirm_unboxed_closure_candidate(closure_def_id={}, trait_ref={})",
-
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
Ok(def_id) => {
Ok(Rc::new(ty::Binder(ty::TraitRef {
def_id: def_id,
- substs: Substs::empty().with_self_ty(param_ty)
+ substs: tcx.mk_substs(Substs::empty().with_self_ty(param_ty))
})))
}
Err(e) => {
},
&AutoPtr(r, m, Some(box ref autoref)) => {
match type_of_autoref(cx, autoref) {
- Some(ty) => Some(mk_rptr(cx, r, mt {mutbl: m, ty: ty})),
+ Some(ty) => Some(mk_rptr(cx, cx.mk_region(r), mt {mutbl: m, ty: ty})),
None => None
}
}
pub id: ast::NodeId,
}
+/// Internal storage
+pub struct CtxtArenas<'tcx> {
+ type_: TypedArena<TyS<'tcx>>,
+ substs: TypedArena<Substs<'tcx>>,
+ bare_fn: TypedArena<BareFnTy<'tcx>>,
+ region: TypedArena<Region>,
+}
+
+impl<'tcx> CtxtArenas<'tcx> {
+ pub fn new() -> CtxtArenas<'tcx> {
+ CtxtArenas {
+ type_: TypedArena::new(),
+ substs: TypedArena::new(),
+ bare_fn: TypedArena::new(),
+ region: TypedArena::new(),
+ }
+ }
+}
+
/// The data structure to keep track of all the information that typechecker
/// generates so that so that it can be reused and doesn't have to be redone
/// later on.
pub struct ctxt<'tcx> {
- /// The arena that types are allocated from.
- type_arena: &'tcx TypedArena<TyS<'tcx>>,
+ /// The arenas that types etc are allocated from.
+ arenas: &'tcx CtxtArenas<'tcx>,
/// Specifically use a speedy hash algorithm for this hash map, it's used
/// quite often.
// FIXME(eddyb) use a FnvHashSet<InternedTy<'tcx>> when equivalent keys can
// queried from a HashSet.
interner: RefCell<FnvHashMap<InternedTy<'tcx>, Ty<'tcx>>>,
+ // FIXME as above, use a hashset if equivalent elements can be queried.
+ substs_interner: RefCell<FnvHashMap<&'tcx Substs<'tcx>, &'tcx Substs<'tcx>>>,
+ bare_fn_interner: RefCell<FnvHashMap<&'tcx BareFnTy<'tcx>, &'tcx BareFnTy<'tcx>>>,
+ region_interner: RefCell<FnvHashMap<&'tcx Region, &'tcx Region>>,
+
pub sess: Session,
pub def_map: DefMap,
}
}
+macro_rules! sty_debug_print {
+ ($ctxt: expr, $($variant: ident),*) => {{
+ // curious inner module to allow variant names to be used as
+ // variable names.
+ mod inner {
+ use middle::ty;
+ #[deriving(Copy)]
+ struct DebugStat {
+ total: uint,
+ region_infer: uint,
+ ty_infer: uint,
+ both_infer: uint,
+ }
+
+ pub fn go(tcx: &ty::ctxt) {
+ let mut total = DebugStat {
+ total: 0,
+ region_infer: 0, ty_infer: 0, both_infer: 0,
+ };
+ $(let mut $variant = total;)*
+
+
+ for (_, t) in tcx.interner.borrow().iter() {
+ let variant = match t.sty {
+ ty::ty_bool | ty::ty_char | ty::ty_int(..) | ty::ty_uint(..) |
+ ty::ty_float(..) | ty::ty_str => continue,
+ ty::ty_err => /* unimportant */ continue,
+ $(ty::$variant(..) => &mut $variant,)*
+ };
+ let region = t.flags.intersects(ty::HAS_RE_INFER);
+ let ty = t.flags.intersects(ty::HAS_TY_INFER);
+
+ variant.total += 1;
+ total.total += 1;
+ if region { total.region_infer += 1; variant.region_infer += 1 }
+ if ty { total.ty_infer += 1; variant.ty_infer += 1 }
+ if region && ty { total.both_infer += 1; variant.both_infer += 1 }
+ }
+ println!("Ty interner total ty region both");
+ $(println!(" {:18}: {uses:6} {usespc:4.1}%, \
+{ty:4.1}% {region:5.1}% {both:4.1}%",
+ stringify!($variant),
+ uses = $variant.total,
+ usespc = $variant.total as f64 * 100.0 / total.total as f64,
+ ty = $variant.ty_infer as f64 * 100.0 / total.total as f64,
+ region = $variant.region_infer as f64 * 100.0 / total.total as f64,
+ both = $variant.both_infer as f64 * 100.0 / total.total as f64);
+ )*
+ println!(" total {uses:6} \
+{ty:4.1}% {region:5.1}% {both:4.1}%",
+ uses = total.total,
+ ty = total.ty_infer as f64 * 100.0 / total.total as f64,
+ region = total.region_infer as f64 * 100.0 / total.total as f64,
+ both = total.both_infer as f64 * 100.0 / total.total as f64)
+ }
+ }
+
+ inner::go($ctxt)
+ }}
+}
+
+impl<'tcx> ctxt<'tcx> {
+ pub fn print_debug_stats(&self) {
+ sty_debug_print!(
+ self,
+ ty_enum, ty_uniq, ty_vec, ty_ptr, ty_rptr, ty_bare_fn, ty_closure, ty_trait,
+ ty_struct, ty_unboxed_closure, ty_tup, ty_param, ty_open, ty_infer);
+
+ println!("Substs interner: #{}", self.substs_interner.borrow().len());
+ println!("BareFnTy interner: #{}", self.bare_fn_interner.borrow().len());
+ println!("Region interner: #{}", self.region_interner.borrow().len());
+ }
+}
+
#[deriving(Show)]
pub struct TyS<'tcx> {
pub sty: sty<'tcx>,
pub flags: TypeFlags,
// the maximal depth of any bound regions appearing in this type.
- region_depth: uint,
+ region_depth: u32,
}
impl fmt::Show for TypeFlags {
type_escapes_depth(ty, 0)
}
-pub fn type_escapes_depth(ty: Ty, depth: uint) -> bool {
+pub fn type_escapes_depth(ty: Ty, depth: u32) -> bool {
ty.region_depth > depth
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash, Show)]
pub struct ParamTy {
pub space: subst::ParamSpace,
- pub idx: uint,
+ pub idx: u32,
pub def_id: DefId
}
pub struct DebruijnIndex {
// We maintain the invariant that this is never 0. So 1 indicates
// the innermost binder. To ensure this, create with `DebruijnIndex::new`.
- pub depth: uint,
+ pub depth: u32,
}
/// Representation of regions:
// parameters are substituted.
ReEarlyBound(/* param id */ ast::NodeId,
subst::ParamSpace,
- /*index*/ uint,
+ /*index*/ u32,
ast::Name),
// Region bound in a function scope, which will be substituted when the
}
}
- pub fn escapes_depth(&self, depth: uint) -> bool {
+ pub fn escapes_depth(&self, depth: u32) -> bool {
match *self {
ty::ReLateBound(debruijn, _) => debruijn.depth > depth,
_ => false,
RustcEncodable, RustcDecodable, Show, Copy)]
pub enum BoundRegion {
/// An anonymous region parameter for a given fn (&T)
- BrAnon(uint),
+ BrAnon(u32),
/// Named region parameters for functions (a in &'a T)
///
BrNamed(ast::DefId, ast::Name),
/// Fresh bound identifiers created during GLB computations.
- BrFresh(uint),
+ BrFresh(u32),
// Anonymous region for the implicit env pointer parameter
// to a closure
/// from the tcx, use the `NodeId` from the `ast::Ty` and look it up in
/// the `ast_ty_to_ty_cache`. This is probably true for `ty_struct` as
/// well.`
- ty_enum(DefId, Substs<'tcx>),
+ ty_enum(DefId, &'tcx Substs<'tcx>),
ty_uniq(Ty<'tcx>),
ty_str,
ty_vec(Ty<'tcx>, Option<uint>), // Second field is length.
ty_ptr(mt<'tcx>),
- ty_rptr(Region, mt<'tcx>),
+ ty_rptr(&'tcx Region, mt<'tcx>),
// If the def-id is Some(_), then this is the type of a specific
// fn item. Otherwise, if None(_), it a fn pointer type.
- ty_bare_fn(Option<DefId>, BareFnTy<'tcx>),
+ ty_bare_fn(Option<DefId>, &'tcx BareFnTy<'tcx>),
ty_closure(Box<ClosureTy<'tcx>>),
ty_trait(Box<TyTrait<'tcx>>),
- ty_struct(DefId, Substs<'tcx>),
+ ty_struct(DefId, &'tcx Substs<'tcx>),
- ty_unboxed_closure(DefId, Region, Substs<'tcx>),
+ ty_unboxed_closure(DefId, &'tcx Region, &'tcx Substs<'tcx>),
ty_tup(Vec<Ty<'tcx>>),
/// we convert the principal trait-ref into a normal trait-ref,
/// you must give *some* self-type. A common choice is `mk_err()`
/// or some skolemized type.
- pub fn principal_trait_ref_with_self_ty(&self, self_ty: Ty<'tcx>)
+ pub fn principal_trait_ref_with_self_ty(&self,
+ tcx: &ctxt<'tcx>, self_ty: Ty<'tcx>)
-> Rc<ty::PolyTraitRef<'tcx>>
{
Rc::new(ty::Binder(ty::TraitRef {
def_id: self.principal.def_id(),
- substs: self.principal.substs().with_self_ty(self_ty),
+ substs: tcx.mk_substs(self.principal.substs().with_self_ty(self_ty)),
}))
}
}
#[deriving(Clone, PartialEq, Eq, Hash, Show)]
pub struct TraitRef<'tcx> {
pub def_id: DefId,
- pub substs: Substs<'tcx>,
+ pub substs: &'tcx Substs<'tcx>,
}
pub type PolyTraitRef<'tcx> = Binder<TraitRef<'tcx>>;
self.0.def_id
}
- pub fn substs(&self) -> &Substs<'tcx> {
- &self.0.substs
+ pub fn substs(&self) -> &'tcx Substs<'tcx> {
+ self.0.substs
}
pub fn input_types(&self) -> &[Ty<'tcx>] {
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
pub struct TyVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
pub struct IntVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
pub struct FloatVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)]
pub struct RegionVid {
- pub index: uint
+ pub index: u32
}
#[deriving(Clone, Copy, PartialEq, Eq, Hash)]
/// A `FreshTy` is one that is generated as a replacement for an
/// unbound type variable. This is convenient for caching etc. See
/// `middle::infer::freshen` for more details.
- FreshTy(uint),
+ FreshTy(u32),
// FIXME -- once integral fallback is impl'd, we should remove
// this type. It's only needed to prevent spurious errors for
// integers whose type winds up never being constrained.
- FreshIntTy(uint),
+ FreshIntTy(u32),
}
#[deriving(Clone, RustcEncodable, RustcDecodable, Eq, Hash, Show, Copy)]
pub enum InferRegion {
ReVar(RegionVid),
- ReSkolemized(uint, BoundRegion)
+ ReSkolemized(u32, BoundRegion)
}
impl cmp::PartialEq for InferRegion {
pub name: ast::Name,
pub def_id: ast::DefId,
pub space: subst::ParamSpace,
- pub index: uint,
+ pub index: u32,
pub associated_with: Option<ast::DefId>,
pub bounds: ParamBounds<'tcx>,
pub default: Option<Ty<'tcx>>,
pub name: ast::Name,
pub def_id: ast::DefId,
pub space: subst::ParamSpace,
- pub index: uint,
+ pub index: u32,
pub bounds: Vec<ty::Region>,
}
}
impl<'tcx> TraitRef<'tcx> {
- pub fn new(def_id: ast::DefId, substs: Substs<'tcx>) -> TraitRef<'tcx> {
+ pub fn new(def_id: ast::DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> {
TraitRef { def_id: def_id, substs: substs }
}
}
pub fn mk_ctxt<'tcx>(s: Session,
- type_arena: &'tcx TypedArena<TyS<'tcx>>,
+ arenas: &'tcx CtxtArenas<'tcx>,
dm: DefMap,
named_region_map: resolve_lifetime::NamedRegionMap,
map: ast_map::Map<'tcx>,
lang_items: middle::lang_items::LanguageItems,
stability: stability::Index) -> ctxt<'tcx> {
ctxt {
- type_arena: type_arena,
+ arenas: arenas,
interner: RefCell::new(FnvHashMap::new()),
+ substs_interner: RefCell::new(FnvHashMap::new()),
+ bare_fn_interner: RefCell::new(FnvHashMap::new()),
+ region_interner: RefCell::new(FnvHashMap::new()),
named_region_map: named_region_map,
item_variance_map: RefCell::new(DefIdMap::new()),
variance_computed: Cell::new(false),
// Type constructors
+impl<'tcx> ctxt<'tcx> {
+ pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> {
+ if let Some(substs) = self.substs_interner.borrow().get(&substs) {
+ return *substs;
+ }
+
+ let substs = self.arenas.substs.alloc(substs);
+ self.substs_interner.borrow_mut().insert(substs, substs);
+ substs
+ }
+
+ pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> {
+ if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) {
+ return *bare_fn;
+ }
+
+ let bare_fn = self.arenas.bare_fn.alloc(bare_fn);
+ self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn);
+ bare_fn
+ }
+
+ pub fn mk_region(&self, region: Region) -> &'tcx Region {
+ if let Some(region) = self.region_interner.borrow().get(®ion) {
+ return *region;
+ }
+
+ let region = self.arenas.region.alloc(region);
+ self.region_interner.borrow_mut().insert(region, region);
+ region
+ }
+}
+
// Interns a type/name combination, stores the resulting box in cx.interner,
// and returns the box as cast to an unsafe ptr (see comments for Ty above).
pub fn mk_t<'tcx>(cx: &ctxt<'tcx>, st: sty<'tcx>) -> Ty<'tcx> {
let flags = FlagComputation::for_sty(&st);
- let ty = cx.type_arena.alloc(TyS {
+ let ty = cx.arenas.type_.alloc(TyS {
sty: st,
flags: flags.flags,
region_depth: flags.depth,
flags: TypeFlags,
// maximum depth of any bound region that we have seen thus far
- depth: uint,
+ depth: u32,
}
impl FlagComputation {
self.flags = self.flags | flags;
}
- fn add_depth(&mut self, depth: uint) {
+ fn add_depth(&mut self, depth: u32) {
if depth > self.depth {
self.depth = depth;
}
}
}
- &ty_unboxed_closure(_, ref region, ref substs) => {
+ &ty_unboxed_closure(_, region, substs) => {
self.add_region(*region);
self.add_substs(substs);
}
self.add_flags(HAS_TY_INFER)
}
- &ty_enum(_, ref substs) | &ty_struct(_, ref substs) => {
+ &ty_enum(_, substs) | &ty_struct(_, substs) => {
self.add_substs(substs);
}
}
&ty_rptr(r, ref m) => {
- self.add_region(r);
+ self.add_region(*r);
self.add_ty(m.ty);
}
mk_t(cx, ty_str)
}
-pub fn mk_str_slice<'tcx>(cx: &ctxt<'tcx>, r: Region, m: ast::Mutability) -> Ty<'tcx> {
+pub fn mk_str_slice<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, m: ast::Mutability) -> Ty<'tcx> {
mk_rptr(cx, r,
mt {
ty: mk_t(cx, ty_str),
})
}
-pub fn mk_enum<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId, substs: Substs<'tcx>) -> Ty<'tcx> {
+pub fn mk_enum<'tcx>(cx: &ctxt<'tcx>, did: ast::DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
mk_t(cx, ty_enum(did, substs))
}
pub fn mk_ptr<'tcx>(cx: &ctxt<'tcx>, tm: mt<'tcx>) -> Ty<'tcx> { mk_t(cx, ty_ptr(tm)) }
-pub fn mk_rptr<'tcx>(cx: &ctxt<'tcx>, r: Region, tm: mt<'tcx>) -> Ty<'tcx> {
+pub fn mk_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, tm: mt<'tcx>) -> Ty<'tcx> {
mk_t(cx, ty_rptr(r, tm))
}
-pub fn mk_mut_rptr<'tcx>(cx: &ctxt<'tcx>, r: Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+pub fn mk_mut_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
mk_rptr(cx, r, mt {ty: ty, mutbl: ast::MutMutable})
}
-pub fn mk_imm_rptr<'tcx>(cx: &ctxt<'tcx>, r: Region, ty: Ty<'tcx>) -> Ty<'tcx> {
+pub fn mk_imm_rptr<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> {
mk_rptr(cx, r, mt {ty: ty, mutbl: ast::MutImmutable})
}
mk_t(cx, ty_vec(ty, sz))
}
-pub fn mk_slice<'tcx>(cx: &ctxt<'tcx>, r: Region, tm: mt<'tcx>) -> Ty<'tcx> {
+pub fn mk_slice<'tcx>(cx: &ctxt<'tcx>, r: &'tcx Region, tm: mt<'tcx>) -> Ty<'tcx> {
mk_rptr(cx, r,
mt {
ty: mk_vec(cx, tm.ty, None),
pub fn mk_bare_fn<'tcx>(cx: &ctxt<'tcx>,
opt_def_id: Option<ast::DefId>,
- fty: BareFnTy<'tcx>) -> Ty<'tcx> {
+ fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> {
mk_t(cx, ty_bare_fn(opt_def_id, fty))
}
let input_args = input_tys.iter().map(|ty| *ty).collect();
mk_bare_fn(cx,
Some(def_id),
- BareFnTy {
+ cx.mk_bare_fn(BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(FnSig {
output: ty::FnConverging(output),
variadic: false
})
- })
+ }))
}
}
pub fn mk_struct<'tcx>(cx: &ctxt<'tcx>, struct_id: ast::DefId,
- substs: Substs<'tcx>) -> Ty<'tcx> {
+ substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
// take a copy of substs so that we own the vectors inside
mk_t(cx, ty_struct(struct_id, substs))
}
pub fn mk_unboxed_closure<'tcx>(cx: &ctxt<'tcx>, closure_id: ast::DefId,
- region: Region, substs: Substs<'tcx>)
+ region: &'tcx Region, substs: &'tcx Substs<'tcx>)
-> Ty<'tcx> {
mk_t(cx, ty_unboxed_closure(closure_id, region, substs))
}
}
pub fn mk_param<'tcx>(cx: &ctxt<'tcx>, space: subst::ParamSpace,
- n: uint, k: DefId) -> Ty<'tcx> {
+ n: u32, k: DefId) -> Ty<'tcx> {
mk_t(cx, ty_param(ParamTy { space: space, idx: n, def_id: k }))
}
impl ParamTy {
pub fn new(space: subst::ParamSpace,
- index: uint,
+ index: u32,
def_id: ast::DefId)
-> ParamTy {
ParamTy { space: space, idx: index, def_id: def_id }
pub fn simd_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let fields = lookup_struct_fields(cx, did);
lookup_field_type(cx, did, fields[0].id, substs)
}
ty_bool | ty_int(_) | ty_uint(_) |
ty_float(_) | ty_tup(_) | ty_ptr(_) => false,
- ty_enum(did, ref substs) =>
+ ty_enum(did, substs) =>
enum_variants(cx, did).iter().any(|v|
v.args.iter().any(|aty| {
let t = aty.subst(cx, substs);
ty_rptr(r, ref mt) => {
TC::ReachesFfiUnsafe | match mt.ty.sty {
- ty_str => borrowed_contents(r, ast::MutImmutable),
- ty_vec(..) => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(r, mt.mutbl)),
- _ => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(r, mt.mutbl)),
+ ty_str => borrowed_contents(*r, ast::MutImmutable),
+ ty_vec(..) => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(*r,
+ mt.mutbl)),
+ _ => tc_ty(cx, mt.ty, cache).reference(borrowed_contents(*r, mt.mutbl)),
}
}
}
ty_str => TC::Nonsized,
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let flds = struct_fields(cx, did, substs);
let mut res =
TypeContents::union(flds[],
apply_lang_items(cx, did, res)
}
- ty_unboxed_closure(did, r, ref substs) => {
+ ty_unboxed_closure(did, r, substs) => {
// FIXME(#14449): `borrowed_contents` below assumes `&mut`
// unboxed closure.
let upvars = unboxed_closure_upvars(cx, did, substs);
TypeContents::union(upvars.as_slice(),
|f| tc_ty(cx, f.ty, cache))
- | borrowed_contents(r, MutMutable)
+ | borrowed_contents(*r, MutMutable)
}
ty_tup(ref tys) => {
|ty| tc_ty(cx, *ty, cache))
}
- ty_enum(did, ref substs) => {
+ ty_enum(did, substs) => {
let variants = substd_enum_variants(cx, did, substs);
let mut res =
TypeContents::union(variants[], |variant| {
false
}
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
seen.push(did);
let fields = struct_fields(cx, did, substs);
let r = fields.iter().any(|f| type_requires(cx, seen, r_ty, f.mt.ty));
r
}
- ty_unboxed_closure(did, _, ref substs) => {
+ ty_unboxed_closure(did, _, substs) => {
let upvars = unboxed_closure_upvars(cx, did, substs);
upvars.iter().any(|f| type_requires(cx, seen, r_ty, f.ty))
}
false
}
- ty_enum(did, ref substs) => {
+ ty_enum(did, substs) => {
seen.push(did);
let vs = enum_variants(cx, did);
let r = !vs.is_empty() && vs.iter().all(|variant| {
ty_vec(ty, Some(_)) => {
is_type_structurally_recursive(cx, sp, seen, ty)
}
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let fields = struct_fields(cx, did, substs);
find_nonrepresentable(cx, sp, seen, fields.iter().map(|f| f.mt.ty))
}
- ty_enum(did, ref substs) => {
+ ty_enum(did, substs) => {
let vs = enum_variants(cx, did);
let iter = vs.iter()
.flat_map(|variant| { variant.args.iter() })
find_nonrepresentable(cx, sp, seen, iter)
}
- ty_unboxed_closure(did, _, ref substs) => {
+ ty_unboxed_closure(did, _, substs) => {
let upvars = unboxed_closure_upvars(cx, did, substs);
find_nonrepresentable(cx, sp, seen, upvars.iter().map(|f| f.ty))
}
pub fn unsized_part_of_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
ty_str | ty_trait(..) | ty_vec(..) => ty,
- ty_struct(def_id, ref substs) => {
+ ty_struct(def_id, substs) => {
let unsized_fields: Vec<_> = struct_fields(cx, def_id, substs).iter()
.map(|f| f.mt.ty).filter(|ty| !type_is_sized(cx, *ty)).collect();
// Exactly one of the fields must be unsized.
pub fn close_type<'tcx>(cx: &ctxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
match ty.sty {
- ty_open(ty) => mk_rptr(cx, ReStatic, mt {ty: ty, mutbl:ast::MutImmutable}),
+ ty_open(ty) => mk_rptr(cx, cx.mk_region(ReStatic), mt {ty: ty, mutbl:ast::MutImmutable}),
_ => cx.sess.bug(format!("Trying to close a non-open type {}",
ty_to_string(cx, ty))[])
}
(&ty_tup(ref v), None) => v.get(i).map(|&t| t),
- (&ty_struct(def_id, ref substs), None) => lookup_struct_fields(cx, def_id)
+ (&ty_struct(def_id, substs), None) => lookup_struct_fields(cx, def_id)
.get(i)
.map(|&t|lookup_item_type(cx, t.id).ty.subst(cx, substs)),
- (&ty_enum(def_id, ref substs), Some(variant_def_id)) => {
+ (&ty_enum(def_id, substs), Some(variant_def_id)) => {
let variant_info = enum_variant_with_id(cx, def_id, variant_def_id);
variant_info.args.get(i).map(|t|t.subst(cx, substs))
}
- (&ty_enum(def_id, ref substs), None) => {
+ (&ty_enum(def_id, substs), None) => {
assert!(enum_is_univariant(cx, def_id));
let enum_variants = enum_variants(cx, def_id);
let variant_info = &(*enum_variants)[0];
variant: Option<ast::DefId>) -> Option<Ty<'tcx>> {
match (&ty.sty, variant) {
- (&ty_struct(def_id, ref substs), None) => {
+ (&ty_struct(def_id, substs), None) => {
let r = lookup_struct_fields(cx, def_id);
r.iter().find(|f| f.name == n)
.map(|&f| lookup_field_type(cx, def_id, f.id, substs))
}
- (&ty_enum(def_id, ref substs), Some(variant_def_id)) => {
+ (&ty_enum(def_id, substs), Some(variant_def_id)) => {
let variant_info = enum_variant_with_id(cx, def_id, variant_def_id);
variant_info.arg_names.as_ref()
.expect("must have struct enum variant if accessing a named fields")
span: Span,
ty: Ty) -> Region {
match ty.sty {
- ty_rptr(r, _) => r,
+ ty_rptr(r, _) => *r,
ref s => {
tcx.sess.span_bug(
span,
AdjustReifyFnPointer(_) => {
match unadjusted_ty.sty {
- ty::ty_bare_fn(Some(_), ref b) => {
- ty::mk_bare_fn(cx, None, (*b).clone())
+ ty::ty_bare_fn(Some(_), b) => {
+ ty::mk_bare_fn(cx, None, b)
}
ref b => {
cx.sess.bug(
&Some(box ref a) => adjust_ty_for_autoref(cx, span, ty, Some(a)),
&None => ty
};
- mk_rptr(cx, r, mt {
+ mk_rptr(cx, cx.mk_region(r), mt {
ty: adjusted_ty,
mutbl: m
})
ty_to_string(cx, ty))[])
},
&UnsizeStruct(box ref k, tp_index) => match ty.sty {
- ty_struct(did, ref substs) => {
+ ty_struct(did, substs) => {
let ty_substs = substs.types.get_slice(subst::TypeSpace);
let new_ty = unsize_ty(cx, ty_substs[tp_index], k, span);
let mut unsized_substs = substs.clone();
unsized_substs.types.get_mut_slice(subst::TypeSpace)[tp_index] = new_ty;
- mk_struct(cx, did, unsized_substs)
+ mk_struct(cx, did, cx.mk_substs(unsized_substs))
}
_ => cx.sess.span_bug(span,
format!("UnsizeStruct with bad sty: {}",
// the index method invoked for `a[i]` always yields an `&T`
ast::ExprIndex(..) => LvalueExpr,
- // the slice method invoked for `a[..]` always yields an `&T`
- ast::ExprSlice(..) => LvalueExpr,
-
// `for` loops are statements
ast::ExprForLoop(..) => RvalueStmtExpr,
ast::ExprUnary(ast::UnDeref, _) |
ast::ExprField(..) |
ast::ExprTupField(..) |
- ast::ExprIndex(..) |
- ast::ExprSlice(..) => {
+ ast::ExprIndex(..) => {
LvalueExpr
}
match cx.map.find(id.node) {
Some(ast_map::NodeItem(item)) => {
match item.node {
- ItemTrait(_, _, _, _, ref ms) => {
+ ItemTrait(_, _, _, ref ms) => {
let (_, p) =
ast_util::split_trait_methods(ms[]);
p.iter()
-> uint {
for type_parameter_def in trait_def.generics.types.iter() {
if type_parameter_def.def_id == associated_type_id {
- return type_parameter_def.index
+ return type_parameter_def.index as uint
}
}
cx.sess.bug("couldn't find associated type parameter index")
trait_def.bounds.trait_bounds
.iter()
.map(|bound_trait_ref| {
+ let substs = tcx.mk_substs(bound_trait_ref.substs().subst(tcx, trait_ref.substs()));
ty::Binder(
ty::TraitRef::new(bound_trait_ref.def_id(),
- bound_trait_ref.substs().subst(tcx, trait_ref.substs())))
+ substs))
})
.map(|bound_trait_ref| Rc::new(bound_trait_ref))
.collect();
var_id: freevar_def_id.node,
closure_expr_id: closure_id.node
}].clone();
- freevar_ty = mk_rptr(tcx, borrow.region, ty::mt {
+ freevar_ty = mk_rptr(tcx, tcx.mk_region(borrow.region), ty::mt {
ty: freevar_ty,
mutbl: borrow.kind.to_mutbl_lossy()
});
let opt_trait_ref = opt_principal.map_or(Vec::new(), |principal| {
let substs = principal.substs().with_self_ty(open_ty);
- vec!(Rc::new(ty::Binder(ty::TraitRef::new(principal.def_id(), substs))))
+ vec!(Rc::new(ty::Binder(ty::TraitRef::new(principal.def_id(), tcx.mk_substs(substs)))))
});
let param_bounds = ty::ParamBounds {
}
ty_rptr(r, m) => {
byte!(13);
- region(state, r);
+ region(state, *r);
mt(state, m);
}
ty_bare_fn(opt_def_id, ref b) => {
ty_unboxed_closure(d, r, _) => {
byte!(24);
did(state, d);
- region(state, r);
+ region(state, *r);
}
}
true
space,
def.repr(tcx),
i);
- let ty = ty::mk_param(tcx, space, i, def.def_id);
+ let ty = ty::mk_param(tcx, space, i as u32, def.def_id);
types.push(space, ty);
}
}
walk_ty(ty, |ty| {
match ty.sty {
ty_rptr(region, _) => {
- accumulator.push(region)
+ accumulator.push(*region)
}
ty_trait(ref t) => {
accumulator.push_all(t.principal.substs().regions().as_slice());
}
- ty_enum(_, ref substs) |
- ty_struct(_, ref substs) => {
+ ty_enum(_, substs) |
+ ty_struct(_, substs) => {
accum_substs(accumulator, substs);
}
ty_closure(ref closure_ty) => {
UniqTraitStore => {}
}
}
- ty_unboxed_closure(_, ref region, ref substs) => {
+ ty_unboxed_closure(_, region, substs) => {
accumulator.push(*region);
accum_substs(accumulator, substs);
}
}
impl DebruijnIndex {
- pub fn new(depth: uint) -> DebruijnIndex {
+ pub fn new(depth: u32) -> DebruijnIndex {
assert!(depth > 0);
DebruijnIndex { depth: depth }
}
- pub fn shifted(&self, amount: uint) -> DebruijnIndex {
+ pub fn shifted(&self, amount: u32) -> DebruijnIndex {
DebruijnIndex { depth: self.depth + amount }
}
}
param_env: &ParameterEnvironment<'tcx>)
-> Result<(),CopyImplementationError> {
match self_type.sty {
- ty::ty_struct(struct_did, ref substs) => {
+ ty::ty_struct(struct_did, substs) => {
let fields = ty::struct_fields(tcx, struct_did, substs);
for field in fields.iter() {
if type_moves_by_default(tcx, field.mt.ty, param_env) {
}
}
}
- ty::ty_enum(enum_did, ref substs) => {
+ ty::ty_enum(enum_did, substs) => {
let enum_variants = ty::enum_variants(tcx, enum_did);
for variant in enum_variants.iter() {
for variant_arg_type in variant.args.iter() {
self.has_regions_escaping_depth(0)
}
- fn has_regions_escaping_depth(&self, depth: uint) -> bool;
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool;
}
impl<'tcx> RegionEscape for Ty<'tcx> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
ty::type_escapes_depth(*self, depth)
}
}
impl RegionEscape for Region {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.escapes_depth(depth)
}
}
impl<'tcx> RegionEscape for TraitRef<'tcx> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.substs.types.iter().any(|t| t.has_regions_escaping_depth(depth)) &&
self.substs.regions().iter().any(|t| t.has_regions_escaping_depth(depth))
}
}
impl<'tcx,T:RegionEscape> RegionEscape for Binder<T> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.0.has_regions_escaping_depth(depth + 1)
}
}
impl<'tcx> RegionEscape for EquatePredicate<'tcx> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.0.has_regions_escaping_depth(depth) || self.1.has_regions_escaping_depth(depth)
}
}
impl<T:RegionEscape,U:RegionEscape> RegionEscape for OutlivesPredicate<T,U> {
- fn has_regions_escaping_depth(&self, depth: uint) -> bool {
+ fn has_regions_escaping_depth(&self, depth: u32) -> bool {
self.0.has_regions_escaping_depth(depth) || self.1.has_regions_escaping_depth(depth)
}
}
-
ty::ty_open(typ.fold_with(this))
}
ty::ty_enum(tid, ref substs) => {
- ty::ty_enum(tid, substs.fold_with(this))
+ let substs = substs.fold_with(this);
+ ty::ty_enum(tid, this.tcx().mk_substs(substs))
}
ty::ty_trait(box ty::TyTrait { ref principal, bounds }) => {
ty::ty_trait(box ty::TyTrait {
ty::ty_tup(ts.fold_with(this))
}
ty::ty_bare_fn(opt_def_id, ref f) => {
- ty::ty_bare_fn(opt_def_id, f.fold_with(this))
+ let bfn = f.fold_with(this);
+ ty::ty_bare_fn(opt_def_id, this.tcx().mk_bare_fn(bfn))
}
ty::ty_closure(ref f) => {
ty::ty_closure(box f.fold_with(this))
}
ty::ty_rptr(r, ref tm) => {
- ty::ty_rptr(r.fold_with(this), tm.fold_with(this))
+ let r = r.fold_with(this);
+ ty::ty_rptr(this.tcx().mk_region(r), tm.fold_with(this))
}
ty::ty_struct(did, ref substs) => {
- ty::ty_struct(did, substs.fold_with(this))
+ let substs = substs.fold_with(this);
+ ty::ty_struct(did, this.tcx().mk_substs(substs))
}
ty::ty_unboxed_closure(did, ref region, ref substs) => {
- ty::ty_unboxed_closure(did, region.fold_with(this), substs.fold_with(this))
+ let r = region.fold_with(this);
+ let s = substs.fold_with(this);
+ ty::ty_unboxed_closure(did, this.tcx().mk_region(r), this.tcx().mk_substs(s))
}
ty::ty_bool | ty::ty_char | ty::ty_str |
ty::ty_int(_) | ty::ty_uint(_) | ty::ty_float(_) |
t: &ty::TraitRef<'tcx>)
-> ty::TraitRef<'tcx>
{
+ let substs = t.substs.fold_with(this);
ty::TraitRef {
def_id: t.def_id,
- substs: t.substs.fold_with(this),
+ substs: this.tcx().mk_substs(substs),
}
}
pub struct RegionFolder<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>,
- current_depth: uint,
- fld_r: &'a mut (FnMut(ty::Region, uint) -> ty::Region + 'a),
+ current_depth: u32,
+ fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a),
}
impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
pub fn new<F>(tcx: &'a ty::ctxt<'tcx>, fld_r: &'a mut F) -> RegionFolder<'a, 'tcx>
- where F : FnMut(ty::Region, uint) -> ty::Region
+ where F : FnMut(ty::Region, u32) -> ty::Region
{
RegionFolder {
tcx: tcx,
value: &T,
mut f: F)
-> T
- where F : FnMut(ty::Region, uint) -> ty::Region,
+ where F : FnMut(ty::Region, u32) -> ty::Region,
T : TypeFoldable<'tcx>,
{
value.fold_with(&mut RegionFolder::new(tcx, &mut f))
// regions. See comment on `shift_regions_through_binders` method in
// `subst.rs` for more details.
-pub fn shift_region(region: ty::Region, amount: uint) -> ty::Region {
+pub fn shift_region(region: ty::Region, amount: u32) -> ty::Region {
match region {
ty::ReLateBound(debruijn, br) => {
ty::ReLateBound(debruijn.shifted(amount), br)
}
pub fn shift_regions<'tcx, T:TypeFoldable<'tcx>+Repr<'tcx>>(tcx: &ty::ctxt<'tcx>,
- amount: uint, value: &T) -> T {
+ amount: u32, value: &T) -> T {
debug!("shift_regions(value={}, amount={})",
value.repr(tcx), amount);
shift_region(region, amount)
}))
}
-
PARSE_ONLY,
NO_TRANS,
NO_ANALYSIS,
- UNSTABLE_OPTIONS
+ UNSTABLE_OPTIONS,
+ PRINT_ENUM_SIZES
]
0
}
("no-analysis", "Parse and expand the source, but run no analysis and",
NO_TRANS),
("unstable-options", "Adds unstable command line options to rustc interface",
- UNSTABLE_OPTIONS)]
+ UNSTABLE_OPTIONS),
+ ("print-enum-sizes", "Print the size of enums and their variants", PRINT_ENUM_SIZES),
+ ]
}
#[deriving(Clone)]
pub fn show_span(&self) -> bool {
self.debugging_opt(config::SHOW_SPAN)
}
+ pub fn print_enum_sizes(&self) -> bool {
+ self.debugging_opt(config::PRINT_ENUM_SIZES)
+ }
pub fn sysroot<'a>(&'a self) -> &'a Path {
match self.opts.maybe_sysroot {
Some (ref sysroot) => sysroot,
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
emitter.emit(None, msg, None, diagnostic::Warning);
}
-
}, ty_to_string(cx, tm.ty))
}
ty_rptr(r, ref tm) => {
- let mut buf = region_ptr_to_string(cx, r);
+ let mut buf = region_ptr_to_string(cx, *r);
buf.push_str(mt_to_string(cx, tm)[]);
buf
}
param_ty.user_string(cx)
}
}
- ty_enum(did, ref substs) | ty_struct(did, ref substs) => {
+ ty_enum(did, substs) | ty_struct(did, substs) => {
let base = ty::item_path_str(cx, did);
let generics = ty::lookup_item_type(cx, did).generics;
parameterized(cx, base.as_slice(), substs, &generics, did)
bound_str)
}
ty_str => "str".to_string(),
- ty_unboxed_closure(ref did, _, ref substs) => {
+ ty_unboxed_closure(ref did, _, substs) => {
let unboxed_closures = cx.unboxed_closures.borrow();
unboxed_closures.get(did).map(|cl| {
closure_to_string(cx, &cl.closure_type.subst(cx, substs))
let trait_def = ty::lookup_trait_def(tcx, self.def_id);
format!("TraitRef({}, {})",
self.substs.self_ty().repr(tcx),
- parameterized(tcx, base.as_slice(), &self.substs, &trait_def.generics, self.def_id))
+ parameterized(tcx, base.as_slice(), self.substs, &trait_def.generics, self.def_id))
}
}
fn user_string(&self, tcx: &ctxt<'tcx>) -> String {
let path_str = ty::item_path_str(tcx, self.def_id);
let trait_def = ty::lookup_trait_def(tcx, self.def_id);
- parameterized(tcx, path_str.as_slice(), &self.substs,
+ parameterized(tcx, path_str.as_slice(), self.substs,
&trait_def.generics, self.def_id)
}
}
let buffer_remaining = size - self.buffer_idx;
if input.len() >= buffer_remaining {
copy_memory(
- self.buffer[mut self.buffer_idx..size],
+ self.buffer.slice_mut(self.buffer_idx, size),
input[..buffer_remaining]);
self.buffer_idx = 0;
func(&self.buffer);
i += buffer_remaining;
} else {
copy_memory(
- self.buffer[mut self.buffer_idx..self.buffer_idx + input.len()],
+ self.buffer.slice_mut(self.buffer_idx, self.buffer_idx + input.len()),
input);
self.buffer_idx += input.len();
return;
// be empty.
let input_remaining = input.len() - i;
copy_memory(
- self.buffer[mut ..input_remaining],
+ self.buffer.slice_to_mut(input_remaining),
input[i..]);
self.buffer_idx += input_remaining;
}
fn zero_until(&mut self, idx: uint) {
assert!(idx >= self.buffer_idx);
- self.buffer[mut self.buffer_idx..idx].set_memory(0);
+ self.buffer.slice_mut(self.buffer_idx, idx).set_memory(0);
self.buffer_idx = idx;
}
fn next<'s>(&'s mut self, len: uint) -> &'s mut [u8] {
self.buffer_idx += len;
- return self.buffer[mut self.buffer_idx - len..self.buffer_idx];
+ return self.buffer.slice_mut(self.buffer_idx - len, self.buffer_idx);
}
fn full_buffer<'s>(&'s mut self) -> &'s [u8] {
)
);
- read_u32v_be(w[mut 0..16], data);
+ read_u32v_be(w.slice_mut(0, 16), data);
// Putting the message schedule inside the same loop as the round calculations allows for
// the compiler to generate better code.
fn result(&mut self, out: &mut [u8]) {
self.engine.finish();
- write_u32_be(out[mut 0..4], self.engine.state.h0);
- write_u32_be(out[mut 4..8], self.engine.state.h1);
- write_u32_be(out[mut 8..12], self.engine.state.h2);
- write_u32_be(out[mut 12..16], self.engine.state.h3);
- write_u32_be(out[mut 16..20], self.engine.state.h4);
- write_u32_be(out[mut 20..24], self.engine.state.h5);
- write_u32_be(out[mut 24..28], self.engine.state.h6);
- write_u32_be(out[mut 28..32], self.engine.state.h7);
+ write_u32_be(out.slice_mut(0, 4), self.engine.state.h0);
+ write_u32_be(out.slice_mut(4, 8), self.engine.state.h1);
+ write_u32_be(out.slice_mut(8, 12), self.engine.state.h2);
+ write_u32_be(out.slice_mut(12, 16), self.engine.state.h3);
+ write_u32_be(out.slice_mut(16, 20), self.engine.state.h4);
+ write_u32_be(out.slice_mut(20, 24), self.engine.state.h5);
+ write_u32_be(out.slice_mut(24, 28), self.engine.state.h6);
+ write_u32_be(out.slice_mut(28, 32), self.engine.state.h7);
}
fn reset(&mut self) {
SawExprAssign,
SawExprAssignOp(ast::BinOp),
SawExprIndex,
- SawExprSlice,
SawExprRange,
SawExprPath,
SawExprAddrOf(ast::Mutability),
ExprField(_, id) => SawExprField(content(id.node)),
ExprTupField(_, id) => SawExprTupField(id.node),
ExprIndex(..) => SawExprIndex,
- ExprSlice(..) => SawExprSlice,
ExprRange(..) => SawExprRange,
ExprPath(..) => SawExprPath,
ExprAddrOf(m, _) => SawExprAddrOf(m),
}
}
- (&ty::ty_enum(enum_def_id, ref substs), ref enum_variant_info) => {
+ (&ty::ty_enum(enum_def_id, substs), ref enum_variant_info) => {
let variant_info = {
let mut variants = ty::substd_enum_variants(tcx, enum_def_id, substs);
match *enum_variant_info {
use std::io;
use std::io::fs;
use std::os;
-use arena::TypedArena;
use syntax::ast;
use syntax::ast_map;
use syntax::attr;
if stop_after_phase_2(&sess) { return; }
- let type_arena = TypedArena::new();
- let analysis = phase_3_run_analysis_passes(sess, ast_map, &type_arena, id);
+ let arenas = ty::CtxtArenas::new();
+ let analysis = phase_3_run_analysis_passes(sess, ast_map, &arenas, id);
phase_save_analysis(&analysis.ty_cx.sess, analysis.ty_cx.map.krate(), &analysis, outdir);
+
+ if log_enabled!(::log::INFO) {
+ println!("Pre-trans");
+ analysis.ty_cx.print_debug_stats();
+ }
+
if stop_after_phase_3(&analysis.ty_cx.sess) { return; }
let (tcx, trans) = phase_4_translate_to_llvm(analysis);
+ if log_enabled!(::log::INFO) {
+ println!("Post-trans");
+ tcx.print_debug_stats();
+ }
+
// Discard interned strings as they are no longer required.
token::get_ident_interner().clear();
/// structures carrying the results of the analysis.
pub fn phase_3_run_analysis_passes<'tcx>(sess: Session,
ast_map: ast_map::Map<'tcx>,
- type_arena: &'tcx TypedArena<ty::TyS<'tcx>>,
+ arenas: &'tcx ty::CtxtArenas<'tcx>,
name: String) -> ty::CrateAnalysis<'tcx> {
let time_passes = sess.time_passes();
let krate = ast_map.krate();
middle::check_static_recursion::check_crate(&sess, krate, &def_map, &ast_map));
let ty_cx = ty::mk_ctxt(sess,
- type_arena,
+ arenas,
def_map,
named_region_map,
ast_map,
match cfg.spawn(move || { std::io::stdio::set_stderr(box w); f() }).join() {
Ok(()) => { /* fallthrough */ }
Err(value) => {
- // Task panicked without emitting a fatal diagnostic
+ // Thread panicked without emitting a fatal diagnostic
if !value.is::<diagnostic::FatalError>() {
let mut emitter = diagnostic::EmitterWriter::stderr(diagnostic::Auto, None);
use std::io::{mod, MemReader};
use std::option;
use std::str::FromStr;
-use arena::TypedArena;
#[deriving(Copy, PartialEq, Show)]
pub enum PpSourceMode {
fn call_with_pp_support<'tcx, A, B, F>(&self,
sess: Session,
ast_map: Option<ast_map::Map<'tcx>>,
- type_arena: &'tcx TypedArena<ty::TyS<'tcx>>,
+ arenas: &'tcx ty::CtxtArenas<'tcx>,
id: String,
payload: B,
f: F) -> A where
}
PpmTyped => {
let ast_map = ast_map.expect("--pretty=typed missing ast_map");
- let analysis = driver::phase_3_run_analysis_passes(sess, ast_map,
- type_arena, id);
+ let analysis = driver::phase_3_run_analysis_passes(sess, ast_map, arenas, id);
let annotation = TypedAnnotation { analysis: analysis };
f(&annotation, payload)
}
};
let mut forest = ast_map::Forest::new(krate);
- let type_arena = TypedArena::new();
+ let arenas = ty::CtxtArenas::new();
let (krate, ast_map) = if compute_ast_map {
let map = driver::assign_node_ids_and_map(&sess, &mut forest);
match (ppm, opt_uii) {
(PpmSource(s), None) =>
s.call_with_pp_support(
- sess, ast_map, &type_arena, id, out, |annotation, out| {
+ sess, ast_map, &arenas, id, out, |annotation, out| {
debug!("pretty printing source code {}", s);
let sess = annotation.sess();
pprust::print_crate(sess.codemap(),
(PpmSource(s), Some(uii)) =>
s.call_with_pp_support(
- sess, ast_map, &type_arena, id, (out,uii), |annotation, (out,uii)| {
+ sess, ast_map, &arenas, id, (out,uii), |annotation, (out,uii)| {
debug!("pretty printing source code {}", s);
let sess = annotation.sess();
let ast_map = annotation.ast_map()
match code {
Some(code) => {
let variants = gather_flowgraph_variants(&sess);
- let analysis = driver::phase_3_run_analysis_passes(sess, ast_map,
- &type_arena, id);
+ let analysis = driver::phase_3_run_analysis_passes(sess, ast_map, &arenas, id);
print_flowgraph(variants, analysis, code, out)
}
None => {
let named_region_map = resolve_lifetime::krate(&sess, krate, &def_map);
let region_map = region::resolve_crate(&sess, krate);
let stability_index = stability::Index::build(krate);
- let type_arena = TypedArena::new();
+ let arenas = ty::CtxtArenas::new();
let tcx = ty::mk_ctxt(sess,
- &type_arena,
+ &arenas,
def_map,
named_region_map,
ast_map,
let input_args = input_tys.iter().map(|ty| *ty).collect();
ty::mk_bare_fn(self.infcx.tcx,
None,
- ty::BareFnTy {
+ self.infcx.tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(output_ty),
variadic: false
})
- })
+ }))
}
pub fn t_nil(&self) -> Ty<'tcx> {
})
}
- pub fn t_param(&self, space: subst::ParamSpace, index: uint) -> Ty<'tcx> {
+ pub fn t_param(&self, space: subst::ParamSpace, index: u32) -> Ty<'tcx> {
ty::mk_param(self.infcx.tcx, space, index, ast_util::local_def(ast::DUMMY_NODE_ID))
}
pub fn re_early_bound(&self,
space: subst::ParamSpace,
- index: uint,
+ index: u32,
name: &'static str)
-> ty::Region
{
ty::ReEarlyBound(ast::DUMMY_NODE_ID, space, index, name)
}
- pub fn re_late_bound_with_debruijn(&self, id: uint, debruijn: ty::DebruijnIndex) -> ty::Region {
+ pub fn re_late_bound_with_debruijn(&self, id: u32, debruijn: ty::DebruijnIndex) -> ty::Region {
ty::ReLateBound(debruijn, ty::BrAnon(id))
}
pub fn t_rptr(&self, r: ty::Region) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, r, ty::mk_int())
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r), ty::mk_int())
}
- pub fn t_rptr_late_bound(&self, id: uint) -> Ty<'tcx> {
+ pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> {
+ let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1));
ty::mk_imm_rptr(self.infcx.tcx,
- self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1)),
+ self.infcx.tcx.mk_region(r),
ty::mk_int())
}
pub fn t_rptr_late_bound_with_debruijn(&self,
- id: uint,
+ id: u32,
debruijn: ty::DebruijnIndex)
-> Ty<'tcx> {
+ let r = self.re_late_bound_with_debruijn(id, debruijn);
ty::mk_imm_rptr(self.infcx.tcx,
- self.re_late_bound_with_debruijn(id, debruijn),
+ self.infcx.tcx.mk_region(r),
ty::mk_int())
}
pub fn t_rptr_scope(&self, id: ast::NodeId) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, ty::ReScope(CodeExtent::from_node_id(id)), ty::mk_int())
+ let r = ty::ReScope(CodeExtent::from_node_id(id));
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r), ty::mk_int())
}
- pub fn re_free(&self, nid: ast::NodeId, id: uint) -> ty::Region {
+ pub fn re_free(&self, nid: ast::NodeId, id: u32) -> ty::Region {
ty::ReFree(ty::FreeRegion { scope: CodeExtent::from_node_id(nid),
bound_region: ty::BrAnon(id)})
}
- pub fn t_rptr_free(&self, nid: ast::NodeId, id: uint) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, self.re_free(nid, id), ty::mk_int())
+ pub fn t_rptr_free(&self, nid: ast::NodeId, id: u32) -> Ty<'tcx> {
+ let r = self.re_free(nid, id);
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(r), ty::mk_int())
}
pub fn t_rptr_static(&self) -> Ty<'tcx> {
- ty::mk_imm_rptr(self.infcx.tcx, ty::ReStatic, ty::mk_int())
+ ty::mk_imm_rptr(self.infcx.tcx, self.infcx.tcx.mk_region(ty::ReStatic), ty::mk_int())
}
pub fn dummy_type_trace(&self) -> infer::TypeTrace<'tcx> {
assert_eq!(t_substituted, t_expected);
})
}
-
ItemImpl(_, _, Some(_), _, _) => parent,
- ItemTrait(_, _, _, _, ref items) => {
+ ItemTrait(_, _, _, ref items) => {
let name_bindings =
self.add_child(name,
parent.clone(),
}
}
+ /// Searches the current set of local scopes and
+ /// applies translations for closures.
fn search_ribs(&self,
ribs: &[Rib],
name: Name,
None
}
+ /// Searches the current set of local scopes for labels.
+ /// Stops after meeting a closure.
+ fn search_label(&self, name: Name) -> Option<DefLike> {
+ for rib in self.label_ribs.iter().rev() {
+ match rib.kind {
+ NormalRibKind => {
+ // Continue
+ }
+ _ => {
+ // Do not resolve labels across function boundary
+ return None
+ }
+ }
+ let result = rib.bindings.get(&name).cloned();
+ if result.is_some() {
+ return result
+ }
+ }
+ None
+ }
+
fn resolve_crate(&mut self, krate: &ast::Crate) {
debug!("(resolving crate) starting");
impl_items[]);
}
- ItemTrait(_, ref generics, ref unbound, ref bounds, ref trait_items) => {
+ ItemTrait(_, ref generics, ref bounds, ref trait_items) => {
// Create a new rib for the self type.
let mut self_type_rib = Rib::new(ItemRibKind);
this.resolve_type_parameter_bounds(item.id, bounds,
TraitDerivation);
- match *unbound {
- Some(ref tpb) => {
- this.resolve_trait_reference(item.id, tpb, TraitDerivation);
- }
- None => {}
- }
-
for trait_item in (*trait_items).iter() {
// Create a new rib for the trait_item-specific type
// parameters.
let def_like = DlDef(DefTyParam(space,
local_def(type_parameter.id),
- index));
+ index as u32));
// Associate this type parameter with
// the item that bound it
self.record_def(type_parameter.id,
self.resolve_type_parameter_bound(type_parameter.id, bound,
TraitBoundingTypeParameter);
}
- match &type_parameter.unbound {
- &Some(ref unbound) =>
- self.resolve_trait_reference(
- type_parameter.id, unbound, TraitBoundingTypeParameter),
- &None => {}
- }
match type_parameter.default {
Some(ref ty) => self.resolve_type(&**ty),
None => {}
type_parameter_bound: &TyParamBound,
reference_type: TraitReferenceType) {
match *type_parameter_bound {
- TraitTyParamBound(ref tref) => {
+ TraitTyParamBound(ref tref, _) => {
self.resolve_poly_trait_reference(id, tref, reference_type)
}
RegionTyParamBound(..) => {}
ExprBreak(Some(label)) | ExprAgain(Some(label)) => {
let renamed = mtwt::resolve(label);
- match self.search_ribs(self.label_ribs[],
- renamed, expr.span) {
+ match self.search_label(renamed) {
None => {
self.resolve_error(
expr.span,
}
fn dump(&mut self, handler: &Handler) {
- let mut buffer = self.buffer.lock();
+ let mut buffer = self.buffer.lock().unwrap();
for diag in buffer.iter() {
match diag.code {
Some(ref code) => {
msg: &str, code: Option<&str>, lvl: Level) {
assert!(cmsp.is_none(), "SharedEmitter doesn't support spans");
- self.buffer.lock().push(Diagnostic {
+ self.buffer.lock().unwrap().push(Diagnostic {
msg: msg.to_string(),
code: code.map(|s| s.to_string()),
lvl: lvl,
loop {
// Avoid holding the lock for the entire duration of the match.
- let maybe_work = work_items_arc.lock().pop();
+ let maybe_work = work_items_arc.lock().unwrap().pop();
match maybe_work {
Some(work) => {
execute_work_item(&cgcx, work);
// super-traits
for super_bound in trait_refs.iter() {
let trait_ref = match *super_bound {
- ast::TraitTyParamBound(ref trait_ref) => {
+ ast::TraitTyParamBound(ref trait_ref, _) => {
trait_ref
}
ast::RegionTyParamBound(..) => {
&**typ,
impl_items)
}
- ast::ItemTrait(_, ref generics, _, ref trait_refs, ref methods) =>
+ ast::ItemTrait(_, ref generics, ref trait_refs, ref methods) =>
self.process_trait(item, generics, trait_refs, methods),
ast::ItemMod(ref m) => self.process_mod(item, m),
ast::ItemTy(ref ty, ref ty_params) => {
fn visit_generics(&mut self, generics: &ast::Generics) {
for param in generics.ty_params.iter() {
for bound in param.bounds.iter() {
- if let ast::TraitTyParamBound(ref trait_ref) = *bound {
+ if let ast::TraitTyParamBound(ref trait_ref, _) = *bound {
self.process_trait_ref(&trait_ref.trait_ref, None);
}
}
let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right);
let slice_len = Sub(bcx, len, slice_len_offset);
let slice_ty = ty::mk_slice(bcx.tcx(),
- ty::ReStatic,
+ bcx.tcx().mk_region(ty::ReStatic),
ty::mt {ty: vt.unit_ty, mutbl: ast::MutImmutable});
let scratch = rvalue_scratch_datum(bcx, slice_ty, "");
Store(bcx, slice_begin,
ty::ty_uint(ast::TyU8) => {
// NOTE: cast &[u8] to &str and abuse the str_eq lang item,
// which calls memcmp().
- let t = ty::mk_str_slice(cx.tcx(), ty::ReStatic, ast::MutImmutable);
+ let t = ty::mk_str_slice(cx.tcx(),
+ cx.tcx().mk_region(ty::ReStatic),
+ ast::MutImmutable);
let lhs = BitCast(cx, lhs, type_of::type_of(cx.ccx(), t).ptr_to());
let rhs = BitCast(cx, rhs, type_of::type_of(cx.ccx(), t).ptr_to());
compare_str(cx, lhs, rhs, rhs_t)
#![allow(unsigned_negation)]
-pub use self::PointerField::*;
pub use self::Repr::*;
use std::num::Int;
use std::rc::Rc;
use llvm::{ValueRef, True, IntEQ, IntNE};
-use back::abi;
+use back::abi::FAT_PTR_ADDR;
use middle::subst;
use middle::subst::Subst;
use trans::_match;
type Hint = attr::ReprAttr;
-
/// Representations.
#[deriving(Eq, PartialEq, Show)]
pub enum Repr<'tcx> {
nullfields: Vec<Ty<'tcx>>
},
/// Two cases distinguished by a nullable pointer: the case with discriminant
- /// `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th
+ /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
/// field is known to be nonnull due to its type; if that field is null, then
/// it represents the other case, which is inhabited by at most one value
/// (and all other fields are undefined/unused).
StructWrappedNullablePointer {
nonnull: Struct<'tcx>,
nndiscr: Disr,
- ptrfield: PointerField,
+ discrfield: DiscrField,
nullfields: Vec<Ty<'tcx>>,
}
}
ty::ty_tup(ref elems) => {
Univariant(mk_struct(cx, elems[], false, t), false)
}
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
let fields = ty::lookup_struct_fields(cx.tcx(), def_id);
let mut ftys = fields.iter().map(|field| {
ty::lookup_field_type(cx.tcx(), def_id, field.id, substs)
Univariant(mk_struct(cx, ftys[], packed, t), dtor)
}
- ty::ty_unboxed_closure(def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(def_id, _, substs) => {
let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id, substs);
let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>();
Univariant(mk_struct(cx, upvar_types[], false, t), false)
}
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) => {
let cases = get_cases(cx.tcx(), def_id, substs);
let hint = *ty::lookup_repr_hints(cx.tcx(), def_id)[].get(0)
.unwrap_or(&attr::ReprAny);
let st = mk_struct(cx, cases[discr].tys[],
false, t);
match cases[discr].find_ptr(cx) {
- Some(ThinPointer(_)) if st.fields.len() == 1 => {
+ Some(ref df) if df.len() == 1 && st.fields.len() == 1 => {
return RawNullablePointer {
nndiscr: discr as Disr,
nnty: st.fields[0],
nullfields: cases[1 - discr].tys.clone()
};
}
- Some(ptrfield) => {
+ Some(mut discrfield) => {
+ discrfield.push(0);
+ discrfield.reverse();
return StructWrappedNullablePointer {
nndiscr: discr as Disr,
nonnull: st,
- ptrfield: ptrfield,
+ discrfield: discrfield,
nullfields: cases[1 - discr].tys.clone()
};
}
- None => { }
+ None => {}
}
}
discr += 1;
tys: Vec<Ty<'tcx>>
}
+/// This represents the (GEP) indices to follow to get to the discriminant field
+pub type DiscrField = Vec<uint>;
+
+fn find_discr_field_candidate<'tcx>(tcx: &ty::ctxt<'tcx>,
+ ty: Ty<'tcx>,
+ mut path: DiscrField) -> Option<DiscrField> {
+ match ty.sty {
+ // Fat &T/&mut T/Box<T> i.e. T is [T], str, or Trait
+ ty::ty_rptr(_, ty::mt { ty, .. }) | ty::ty_uniq(ty) if !ty::type_is_sized(tcx, ty) => {
+ path.push(FAT_PTR_ADDR);
+ Some(path)
+ },
-#[deriving(Copy, Eq, PartialEq, Show)]
-pub enum PointerField {
- ThinPointer(uint),
- FatPointer(uint)
-}
+ // Regular thin pointer: &T/&mut T/Box<T>
+ ty::ty_rptr(..) | ty::ty_uniq(..) => Some(path),
-impl<'tcx> Case<'tcx> {
- fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>)
- -> bool {
- mk_struct(cx, self.tys[], false, scapegoat).size == 0
- }
+ // Functions are just pointers
+ ty::ty_bare_fn(..) => Some(path),
- fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<PointerField> {
- for (i, &ty) in self.tys.iter().enumerate() {
- match ty.sty {
- // &T/&mut T/Box<T> could either be a thin or fat pointer depending on T
- ty::ty_rptr(_, ty::mt { ty, .. }) | ty::ty_uniq(ty) => match ty.sty {
- // &[T] and &str are a pointer and length pair
- ty::ty_vec(_, None) | ty::ty_str => return Some(FatPointer(i)),
+ // Closures are a pair of pointers: the code and environment
+ ty::ty_closure(..) => {
+ path.push(FAT_PTR_ADDR);
+ Some(path)
+ },
+
+ // Is this the NonZero lang item wrapping a pointer or integer type?
+ ty::ty_struct(did, substs) if Some(did) == tcx.lang_items.non_zero() => {
+ let nonzero_fields = ty::lookup_struct_fields(tcx, did);
+ assert_eq!(nonzero_fields.len(), 1);
+ let nonzero_field = ty::lookup_field_type(tcx, did, nonzero_fields[0].id, substs);
+ match nonzero_field.sty {
+ ty::ty_ptr(..) | ty::ty_int(..) | ty::ty_uint(..) => {
+ path.push(0);
+ Some(path)
+ },
+ _ => None
+ }
+ },
- // &Trait is a pair of pointers: the actual object and a vtable
- ty::ty_trait(..) => return Some(FatPointer(i)),
+ // Perhaps one of the fields of this struct is non-zero
+ // let's recurse and find out
+ ty::ty_struct(def_id, substs) => {
+ let fields = ty::lookup_struct_fields(tcx, def_id);
+ for (j, field) in fields.iter().enumerate() {
+ let field_ty = ty::lookup_field_type(tcx, def_id, field.id, substs);
+ if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) {
+ fpath.push(j);
+ return Some(fpath);
+ }
+ }
+ None
+ },
- ty::ty_struct(..) if !ty::type_is_sized(cx.tcx(), ty) => {
- return Some(FatPointer(i))
- }
+ // Can we use one of the fields in this tuple?
+ ty::ty_tup(ref tys) => {
+ for (j, &ty) in tys.iter().enumerate() {
+ if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) {
+ fpath.push(j);
+ return Some(fpath);
+ }
+ }
+ None
+ },
- // Any other &T is just a pointer
- _ => return Some(ThinPointer(i))
- },
+ // Is this a fixed-size array of something non-zero
+ // with at least one element?
+ ty::ty_vec(ety, Some(d)) if d > 0 => {
+ if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) {
+ vpath.push(0);
+ Some(vpath)
+ } else {
+ None
+ }
+ },
- // Functions are just pointers
- ty::ty_bare_fn(..) => return Some(ThinPointer(i)),
+ // Anything else is not a pointer
+ _ => None
+ }
+}
- // Closures are a pair of pointers: the code and environment
- ty::ty_closure(..) => return Some(FatPointer(i)),
+impl<'tcx> Case<'tcx> {
+ fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool {
+ mk_struct(cx, self.tys[], false, scapegoat).size == 0
+ }
- // Anything else is not a pointer
- _ => continue
+ fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<DiscrField> {
+ for (i, &ty) in self.tys.iter().enumerate() {
+ if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) {
+ path.push(i);
+ return Some(path);
}
}
-
None
}
}
val = ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty));
signed = false;
}
- StructWrappedNullablePointer { nndiscr, ptrfield, .. } => {
- val = struct_wrapped_nullable_bitdiscr(bcx, nndiscr, ptrfield, scrutinee);
+ StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
+ val = struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee);
signed = false;
}
}
}
}
-fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, ptrfield: PointerField,
+fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField,
scrutinee: ValueRef) -> ValueRef {
- let llptrptr = match ptrfield {
- ThinPointer(field) => GEPi(bcx, scrutinee, &[0, field]),
- FatPointer(field) => GEPi(bcx, scrutinee, &[0, field, abi::FAT_PTR_ADDR])
- };
+ let llptrptr = GEPi(bcx, scrutinee, discrfield[]);
let llptr = Load(bcx, llptrptr);
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)))
Store(bcx, C_null(llptrty), val)
}
}
- StructWrappedNullablePointer { ref nonnull, nndiscr, ptrfield, .. } => {
+ StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
if discr != nndiscr {
- let (llptrptr, llptrty) = match ptrfield {
- ThinPointer(field) =>
- (GEPi(bcx, val, &[0, field]),
- type_of::type_of(bcx.ccx(), nonnull.fields[field])),
- FatPointer(field) => {
- let v = GEPi(bcx, val, &[0, field, abi::FAT_PTR_ADDR]);
- (v, val_ty(v).element_type())
- }
- };
+ let llptrptr = GEPi(bcx, val, discrfield[]);
+ let llptrty = val_ty(llptrptr).element_type();
Store(bcx, C_null(llptrty), llptrptr)
}
}
false)
} else {
let vals = nonnull.fields.iter().map(|&ty| {
- // Always use null even if it's not the `ptrfield`th
+ // Always use null even if it's not the `discrfield`th
// field; see #8506.
C_null(type_of::sizing_type_of(ccx, ty))
}).collect::<Vec<ValueRef>>();
#[inline]
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
-/// Get the discriminant of a constant value. (Not currently used.)
-pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef)
- -> Disr {
+/// Get the discriminant of a constant value.
+pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr {
match *r {
CEnum(ity, _, _) => {
match ity {
}
}
Univariant(..) => 0,
- RawNullablePointer { nndiscr, .. } => {
- if is_null(val) {
- /* subtraction as uint is ok because nndiscr is either 0 or 1 */
- (1 - nndiscr) as Disr
- } else {
- nndiscr
- }
- }
- StructWrappedNullablePointer { nndiscr, ptrfield, .. } => {
- let (idx, sub_idx) = match ptrfield {
- ThinPointer(field) => (field, None),
- FatPointer(field) => (field, Some(abi::FAT_PTR_ADDR))
- };
- if is_null(const_struct_field(ccx, val, idx, sub_idx)) {
- /* subtraction as uint is ok because nndiscr is either 0 or 1 */
- (1 - nndiscr) as Disr
- } else {
- nndiscr
- }
+ RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
+ ccx.sess().bug("const discrim access of non c-like enum")
}
}
}
_discr: Disr, ix: uint) -> ValueRef {
match *r {
CEnum(..) => ccx.sess().bug("element access in C-like enum const"),
- Univariant(..) => const_struct_field(ccx, val, ix, None),
- General(..) => const_struct_field(ccx, val, ix + 1, None),
+ Univariant(..) => const_struct_field(ccx, val, ix),
+ General(..) => const_struct_field(ccx, val, ix + 1),
RawNullablePointer { .. } => {
assert_eq!(ix, 0);
val
- }
- StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix, None)
+ },
+ StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix)
}
}
/// Extract field of struct-like const, skipping our alignment padding.
-fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: uint, sub_idx: Option<uint>)
- -> ValueRef {
+fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: uint) -> ValueRef {
// Get the ix-th non-undef element of the struct.
let mut real_ix = 0; // actual position in the struct
let mut ix = ix; // logical index relative to real_ix
let mut field;
loop {
loop {
- field = match sub_idx {
- Some(si) => const_get_elt(ccx, val, &[real_ix, si as u32]),
- None => const_get_elt(ccx, val, &[real_ix])
- };
+ field = const_get_elt(ccx, val, &[real_ix]);
if !is_undef(field) {
break;
}
let unboxed_closure = &(*unboxed_closures)[closure_id];
match unboxed_closure.kind {
ty::FnUnboxedClosureKind => {
- ty::mk_imm_rptr(ccx.tcx(), ty::ReStatic, fn_ty)
+ ty::mk_imm_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
}
ty::FnMutUnboxedClosureKind => {
- ty::mk_mut_rptr(ccx.tcx(), ty::ReStatic, fn_ty)
+ ty::mk_mut_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
}
ty::FnOnceUnboxedClosureKind => fn_ty
}
ty::ty_closure(ref f) => {
(f.sig.0.inputs.clone(), f.sig.0.output, f.abi, Some(Type::i8p(ccx)))
}
- ty::ty_unboxed_closure(closure_did, _, ref substs) => {
+ ty::ty_unboxed_closure(closure_did, _, substs) => {
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let unboxed_closure = &(*unboxed_closures)[closure_did];
let function_type = unboxed_closure.closure_type.clone();
assert_eq!(did.krate, ast::LOCAL_CRATE);
// Since we're in trans we don't care for any region parameters
- let ref substs = subst::Substs::erased(substs.types.clone());
+ let substs = subst::Substs::erased(substs.types.clone());
- let (val, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
+ let (val, _) = monomorphize::monomorphic_fn(ccx, did, &substs, None);
val
} else if did.krate == ast::LOCAL_CRATE {
}
})
}
- ty::ty_unboxed_closure(def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(def_id, _, substs) => {
let repr = adt::represent_type(cx.ccx(), t);
let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id, substs);
for (i, upvar) in upvars.iter().enumerate() {
cx = f(cx, llfld_a, *arg);
}
}
- ty::ty_enum(tid, ref substs) => {
+ ty::ty_enum(tid, substs) => {
let fcx = cx.fcx;
let ccx = fcx.ccx;
llfn: ValueRef,
llargs: &[ValueRef],
fn_ty: Ty<'tcx>,
- call_info: Option<NodeInfo>,
- // FIXME(15064) is_lang_item is a horrible hack, please remove it
- // at the soonest opportunity.
- is_lang_item: bool)
+ call_info: Option<NodeInfo>)
-> (ValueRef, Block<'blk, 'tcx>) {
let _icx = push_ctxt("invoke_");
if bcx.unreachable.get() {
return (C_null(Type::i8(bcx.ccx())), bcx);
}
- // FIXME(15064) Lang item methods may (in the reflect case) not have proper
- // types, so doing an attribute lookup will fail.
- let attributes = if is_lang_item {
- llvm::AttrBuilder::new()
- } else {
- get_fn_llvm_attributes(bcx.ccx(), fn_ty)
- };
+ let attributes = get_fn_llvm_attributes(bcx.ccx(), fn_ty);
match bcx.opt_node_id {
None => {
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
+ let print_info = ccx.sess().print_enum_sizes();
+
let levels = ccx.tcx().node_lint_levels.borrow();
let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
- let lvlsrc = match levels.get(&(id, lint_id)) {
- None | Some(&(lint::Allow, _)) => return,
- Some(&lvlsrc) => lvlsrc,
- };
+ let lvlsrc = levels.get(&(id, lint_id));
+ let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow);
- let avar = adt::represent_type(ccx, ty::node_id_to_type(ccx.tcx(), id));
+ if is_allow && !print_info {
+ // we're not interested in anything here
+ return
+ }
+
+ let ty = ty::node_id_to_type(ccx.tcx(), id);
+ let avar = adt::represent_type(ccx, ty);
match *avar {
adt::General(_, ref variants, _) => {
for var in variants.iter() {
}
);
+ if print_info {
+ let llty = type_of::sizing_type_of(ccx, ty);
+
+ let sess = &ccx.tcx().sess;
+ sess.span_note(sp, &*format!("total size: {} bytes", llsize_of_real(ccx, llty)));
+ match *avar {
+ adt::General(..) => {
+ for (i, var) in enum_def.variants.iter().enumerate() {
+ ccx.tcx().sess.span_note(var.span,
+ &*format!("variant data: {} bytes", sizes[i]));
+ }
+ }
+ _ => {}
+ }
+ }
+
// we only warn if the largest variant is at least thrice as large as
// the second-largest.
- if largest > slargest * 3 && slargest > 0 {
+ if !is_allow && largest > slargest * 3 && slargest > 0 {
// Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
// pass for the latter already ran.
lint::raw_emit_lint(&ccx.tcx().sess, lint::builtin::VARIANT_SIZE_DIFFERENCES,
- lvlsrc, Some(sp),
+ *lvlsrc.unwrap(), Some(sp),
format!("enum variant is more than three times larger \
({} bytes) than the next largest (ignoring padding)",
largest)[]);
ast::ItemMod(ref m) => {
trans_mod(&ccx.rotate(), m);
}
- ast::ItemEnum(ref enum_definition, _) => {
- enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
+ ast::ItemEnum(ref enum_definition, ref gens) => {
+ if gens.ty_params.is_empty() {
+ // sizes only make sense for non-generic types
+
+ enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
+ }
}
ast::ItemConst(_, ref expr) => {
// Recurse on the expression to catch items in blocks
let (fn_sig, abi, has_env) = match fn_ty.sty {
ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
ty::ty_bare_fn(_, ref f) => (f.sig.clone(), f.abi, false),
- ty::ty_unboxed_closure(closure_did, _, ref substs) => {
+ ty::ty_unboxed_closure(closure_did, _, substs) => {
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
let ref function_type = (*unboxed_closures)[closure_did]
.closure_type;
attrs.arg(idx, llvm::ReadOnlyAttribute);
}
- if let ReLateBound(_, BrAnon(_)) = b {
+ if let ReLateBound(_, BrAnon(_)) = *b {
attrs.arg(idx, llvm::NoCaptureAttribute);
}
}
// When a reference in an argument has no named lifetime, it's impossible for that
// reference to escape this function (returned or stored beyond the call by a closure).
- ty::ty_rptr(ReLateBound(_, BrAnon(_)), mt) => {
+ ty::ty_rptr(&ReLateBound(_, BrAnon(_)), mt) => {
let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
attrs.arg(idx, llvm::NoCaptureAttribute)
.arg(idx, llvm::DereferenceableAttribute(llsz));
debug!("Store {} -> {}",
self.ccx.tn().val_to_string(val),
self.ccx.tn().val_to_string(ptr));
- assert!(self.llbuilder.is_not_null());
+ assert!(!self.llbuilder.is_null());
self.count_insn("store");
unsafe {
llvm::LLVMBuildStore(self.llbuilder, val, ptr);
debug!("Store {} -> {}",
self.ccx.tn().val_to_string(val),
self.ccx.tn().val_to_string(ptr));
- assert!(self.llbuilder.is_not_null());
+ assert!(!self.llbuilder.is_null());
self.count_insn("store.volatile");
unsafe {
let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
bare_fn_ty.repr(tcx));
// This is an impl of `Fn` trait, so receiver is `&self`.
- let bare_fn_ty_ref = ty::mk_imm_rptr(tcx, ty::ReStatic, bare_fn_ty);
+ let bare_fn_ty_ref = ty::mk_imm_rptr(tcx, tcx.mk_region(ty::ReStatic), bare_fn_ty);
// Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`,
// which is the fn pointer, and `args`, which is the arguments tuple.
let (opt_def_id, input_tys, output_ty) =
match bare_fn_ty.sty {
ty::ty_bare_fn(opt_def_id,
- ty::BareFnTy { unsafety: ast::Unsafety::Normal,
+ &ty::BareFnTy { unsafety: ast::Unsafety::Normal,
abi: synabi::Rust,
sig: ty::Binder(ty::FnSig { inputs: ref input_tys,
output: output_ty,
let tuple_input_ty = ty::mk_tup(tcx, input_tys.to_vec());
let tuple_fn_ty = ty::mk_bare_fn(tcx,
opt_def_id,
- ty::BareFnTy { unsafety: ast::Unsafety::Normal,
- abi: synabi::RustCall,
- sig: ty::Binder(ty::FnSig {
- inputs: vec![bare_fn_ty_ref,
- tuple_input_ty],
- output: output_ty,
- variadic: false
- })});
+ tcx.mk_bare_fn(ty::BareFnTy {
+ unsafety: ast::Unsafety::Normal,
+ abi: synabi::RustCall,
+ sig: ty::Binder(ty::FnSig {
+ inputs: vec![bare_fn_ty_ref,
+ tuple_input_ty],
+ output: output_ty,
+ variadic: false
+ })}));
debug!("tuple_fn_ty: {}", tuple_fn_ty.repr(tcx));
//
llfn,
llargs[],
callee_ty,
- call_info,
- dest.is_none());
+ call_info);
bcx = b;
llresult = llret;
use trans::common::{Block, FunctionContext, ExprId, NodeInfo};
use trans::debuginfo;
use trans::glue;
-use middle::region;
+// Temporary due to slicing syntax hacks (KILLME)
+//use middle::region;
use trans::type_::Type;
use middle::ty::{mod, Ty};
use std::fmt;
// excluding id's that correspond to closure bodies only). For
// now we just say that if there is already an AST scope on the stack,
// this new AST scope had better be its immediate child.
- let top_scope = self.top_ast_scope();
+ // Temporarily removed due to slicing syntax hacks (KILLME).
+ /*let top_scope = self.top_ast_scope();
if top_scope.is_some() {
assert_eq!(self.ccx
.tcx()
.opt_encl_scope(region::CodeExtent::from_node_id(debug_loc.id))
.map(|s|s.node_id()),
top_scope);
- }
+ }*/
self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id),
Some(debug_loc)));
fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> bool {
match ty.sty {
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
let fields = ty::struct_fields(ccx.tcx(), def_id, substs);
fields.len() == 1 &&
fields[0].name ==
}
}
+#[allow(dead_code)] // potentially useful
pub fn is_null(val: ValueRef) -> bool {
unsafe {
llvm::LLVMIsNull(val) != False
ty::ty_float(_) => {
push_debuginfo_type_name(cx, type_, false, &mut unique_type_id);
},
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) => {
unique_type_id.push_str("enum ");
from_def_id_and_substs(self, cx, def_id, substs, &mut unique_type_id);
},
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
unique_type_id.push_str("struct ");
from_def_id_and_substs(self, cx, def_id, substs, &mut unique_type_id);
},
trait_data.principal.substs(),
&mut unique_type_id);
},
- ty::ty_bare_fn(_, ty::BareFnTy{ unsafety, abi, ref sig } ) => {
+ ty::ty_bare_fn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
unique_type_id.push_str("unsafe ");
}
closure_ty.clone(),
&mut unique_type_id);
},
- ty::ty_unboxed_closure(ref def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(ref def_id, _, substs) => {
let closure_ty = cx.tcx().unboxed_closures.borrow()
.get(def_id).unwrap().closure_type.subst(cx.tcx(), substs);
self.get_unique_type_id_of_closure_type(cx,
},
adt::StructWrappedNullablePointer { nonnull: ref struct_def,
nndiscr,
- ptrfield, ..} => {
+ ref discrfield, ..} => {
// Create a description of the non-null variant
let (variant_type_metadata, variant_llvm_type, member_description_factory) =
describe_enum_variant(cx,
self.enum_type,
struct_def,
&*(*self.variants)[nndiscr as uint],
- OptimizedDiscriminant(ptrfield),
+ OptimizedDiscriminant,
self.containing_scope,
self.span);
// member's name.
let null_variant_index = (1 - nndiscr) as uint;
let null_variant_name = token::get_name((*self.variants)[null_variant_index].name);
- let discrfield = match ptrfield {
- adt::ThinPointer(field) => format!("{}", field),
- adt::FatPointer(field) => format!("{}", field)
- };
+ let discrfield = discrfield.iter()
+ .skip(1)
+ .map(|x| x.to_string())
+ .collect::<Vec<_>>().connect("$");
let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
discrfield,
null_variant_name);
#[deriving(Copy)]
enum EnumDiscriminantInfo {
RegularDiscriminant(DIType),
- OptimizedDiscriminant(adt::PointerField),
+ OptimizedDiscriminant,
NoDiscriminant
}
ty::ty_closure(ref closurety) => {
subroutine_type_metadata(cx, unique_type_id, &closurety.sig, usage_site_span)
}
- ty::ty_unboxed_closure(ref def_id, _, ref substs) => {
+ ty::ty_unboxed_closure(ref def_id, _, substs) => {
let sig = cx.tcx().unboxed_closures.borrow()
.get(def_id).unwrap().closure_type.sig.subst(cx.tcx(), substs);
subroutine_type_metadata(cx, unique_type_id, &sig, usage_site_span)
}
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) => {
prepare_struct_metadata(cx,
t,
def_id,
}
ast::ExprAssignOp(_, ref lhs, ref rhs) |
- ast::ExprIndex(ref lhs, ref rhs) |
+ ast::ExprIndex(ref lhs, ref rhs) |
ast::ExprBinary(_, ref lhs, ref rhs) => {
walk_expr(cx, &**lhs, scope_stack, scope_map);
walk_expr(cx, &**rhs, scope_stack, scope_map);
}
- ast::ExprSlice(ref base, ref start, ref end, _) => {
- walk_expr(cx, &**base, scope_stack, scope_map);
- start.as_ref().map(|x| walk_expr(cx, &**x, scope_stack, scope_map));
- end.as_ref().map(|x| walk_expr(cx, &**x, scope_stack, scope_map));
- }
-
ast::ExprRange(ref start, ref end) => {
- walk_expr(cx, &**start, scope_stack, scope_map);
+ start.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
end.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map));
}
ty::ty_uint(ast::TyU64) => output.push_str("u64"),
ty::ty_float(ast::TyF32) => output.push_str("f32"),
ty::ty_float(ast::TyF64) => output.push_str("f64"),
- ty::ty_struct(def_id, ref substs) |
- ty::ty_enum(def_id, ref substs) => {
+ ty::ty_struct(def_id, substs) |
+ ty::ty_enum(def_id, substs) => {
push_item_name(cx, def_id, qualified, output);
push_type_params(cx, substs, output);
},
push_item_name(cx, trait_data.principal.def_id(), false, output);
push_type_params(cx, trait_data.principal.substs(), output);
},
- ty::ty_bare_fn(_, ty::BareFnTy{ unsafety, abi, ref sig } ) => {
+ ty::ty_bare_fn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => {
if unsafety == ast::Unsafety::Unsafe {
output.push_str("unsafe ");
}
let substs = principal.substs().with_self_ty(unadjusted_ty).erase_regions();
let trait_ref =
Rc::new(ty::Binder(ty::TraitRef { def_id: principal.def_id(),
- substs: substs }));
+ substs: bcx.tcx().mk_substs(substs) }));
let trait_ref = trait_ref.subst(bcx.tcx(), bcx.fcx.param_substs);
let box_ty = mk_ty(unadjusted_ty);
PointerCast(bcx,
expr.id,
datum_ty,
|t| ty::mk_rptr(tcx,
- ty::ReStatic,
+ tcx.mk_region(ty::ReStatic),
ty::mt{
ty: t,
mutbl: ast::MutImmutable
trans_rec_tup_field(bcx, &**base, idx.node)
}
ast::ExprIndex(ref base, ref idx) => {
- trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
- }
- ast::ExprSlice(ref base, ref start, ref end, _) => {
- let _icx = push_ctxt("trans_slice");
- let ccx = bcx.ccx();
-
- let method_call = MethodCall::expr(expr.id);
- let method_ty = ccx.tcx()
- .method_map
- .borrow()
- .get(&method_call)
- .map(|method| method.ty);
- let base_datum = unpack_datum!(bcx, trans(bcx, &**base));
-
- let mut args = vec![];
- start.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
- end.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
-
- let result_ty = ty::ty_fn_ret(monomorphize_type(bcx, method_ty.unwrap())).unwrap();
- let scratch = rvalue_scratch_datum(bcx, result_ty, "trans_slice");
-
- unpack_result!(bcx,
- trans_overloaded_op(bcx,
- expr,
- method_call,
- base_datum,
- args,
- Some(SaveIn(scratch.val)),
- true));
- DatumBlock::new(bcx, scratch.to_expr_datum())
+ match idx.node {
+ ast::ExprRange(ref start, ref end) => {
+ // Special case for slicing syntax (KILLME).
+ let _icx = push_ctxt("trans_slice");
+ let ccx = bcx.ccx();
+
+ let method_call = MethodCall::expr(expr.id);
+ let method_ty = ccx.tcx()
+ .method_map
+ .borrow()
+ .get(&method_call)
+ .map(|method| method.ty);
+ let base_datum = unpack_datum!(bcx, trans(bcx, &**base));
+
+ let mut args = vec![];
+ start.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
+ end.as_ref().map(|e| args.push((unpack_datum!(bcx, trans(bcx, &**e)), e.id)));
+
+ let result_ty = ty::ty_fn_ret(monomorphize_type(bcx,
+ method_ty.unwrap())).unwrap();
+ let scratch = rvalue_scratch_datum(bcx, result_ty, "trans_slice");
+
+ unpack_result!(bcx,
+ trans_overloaded_op(bcx,
+ expr,
+ method_call,
+ base_datum,
+ args,
+ Some(SaveIn(scratch.val)),
+ true));
+ DatumBlock::new(bcx, scratch.to_expr_datum())
+ }
+ _ => trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id))
+ }
}
ast::ExprBox(_, ref contents) => {
// Special case for `Box<T>`
}
// A range just desugars into a struct.
- let (did, fields) = match end {
- &Some(ref end) => {
+ // Note that the type of the start and end may not be the same, but
+ // they should only differ in their lifetime, which should not matter
+ // in trans.
+ let (did, fields, ty_params) = match (start, end) {
+ (&Some(ref start), &Some(ref end)) => {
// Desugar to Range
- let fields = vec!(make_field("start", start.clone()),
- make_field("end", end.clone()));
- (tcx.lang_items.range_struct(), fields)
+ let fields = vec![make_field("start", start.clone()),
+ make_field("end", end.clone())];
+ (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)])
}
- &None => {
+ (&Some(ref start), &None) => {
// Desugar to RangeFrom
- let fields = vec!(make_field("start", start.clone()));
- (tcx.lang_items.range_from_struct(), fields)
+ let fields = vec![make_field("start", start.clone())];
+ (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)])
+ }
+ (&None, &Some(ref end)) => {
+ // Desugar to RangeTo
+ let fields = vec![make_field("end", end.clone())];
+ (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)])
+ }
+ _ => {
+ // Desugar to FullRange
+ (tcx.lang_items.full_range_struct(), vec![], vec![])
}
};
if let Some(did) = did {
- let substs = Substs::new_type(vec![node_id_type(bcx, start.id)], vec![]);
+ let substs = Substs::new_type(ty_params, vec![]);
trans_struct(bcx,
fields.as_slice(),
None,
expr.span,
expr.id,
- ty::mk_struct(tcx, did, substs),
+ ty::mk_struct(tcx, did, tcx.mk_substs(substs)),
dest)
} else {
tcx.sess.span_bug(expr.span,
F: FnOnce(ty::Disr, &[ty::field<'tcx>]) -> R,
{
match ty.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
op(0, struct_fields(tcx, did, substs)[])
}
op(0, tup_fields(v[])[])
}
- ty::ty_enum(_, ref substs) => {
+ ty::ty_enum(_, substs) => {
// We want the *variant* ID here, not the enum ID.
match node_id_opt {
None => {
// Compute final type. Note that we are loose with the region and
// mutability, since those things don't matter in trans.
let referent_ty = lv_datum.ty;
- let ptr_ty = ty::mk_imm_rptr(bcx.tcx(), ty::ReStatic, referent_ty);
+ let ptr_ty = ty::mk_imm_rptr(bcx.tcx(), bcx.tcx().mk_region(ty::ReStatic), referent_ty);
// Get the pointer.
let llref = lv_datum.to_llref();
class_did,
&[get_drop_glue_type(bcx.ccx(), t)],
ty::mk_nil(bcx.tcx()));
- let (_, variant_cx) = invoke(variant_cx, dtor_addr, args[], dtor_ty, None, false);
+ let (_, variant_cx) = invoke(variant_cx, dtor_addr, args[], dtor_ty, None);
variant_cx.fcx.pop_and_trans_custom_cleanup_scope(variant_cx, field_scope);
variant_cx
return (size, align);
}
match t.sty {
- ty::ty_struct(id, ref substs) => {
+ ty::ty_struct(id, substs) => {
let ccx = bcx.ccx();
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized.
}
}
}
- ty::ty_struct(did, ref substs) | ty::ty_enum(did, ref substs) => {
+ ty::ty_struct(did, substs) | ty::ty_enum(did, substs) => {
let tcx = bcx.tcx();
match ty::ty_dtor(tcx, did) {
ty::TraitDtor(dtor, true) => {
Vec::new()));
debug!("trait_substs={}", trait_substs.repr(bcx.tcx()));
let trait_ref = Rc::new(ty::Binder(ty::TraitRef { def_id: trait_id,
- substs: trait_substs }));
+ substs: bcx.tcx().mk_substs(trait_substs) }));
let vtbl = fulfill_obligation(bcx.ccx(),
DUMMY_SP,
trait_ref);
m.repr(tcx),
substs.repr(tcx));
if m.generics.has_type_params(subst::FnSpace) ||
- ty::type_has_self(ty::mk_bare_fn(tcx, None, m.fty.clone()))
+ ty::type_has_self(ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(m.fty.clone())))
{
debug!("(making impl vtable) method has self or type \
params: {}",
macro_rules! opt_val { ($e:expr) => (
unsafe {
match $e {
- p if p.is_not_null() => Some(Value(p)),
+ p if !p.is_null() => Some(Value(p)),
_ => None
}
}
pub fn get_parent(self) -> Option<BasicBlock> {
unsafe {
match llvm::LLVMGetInstructionParent(self.get()) {
- p if p.is_not_null() => Some(BasicBlock(p)),
+ p if !p.is_null() => Some(BasicBlock(p)),
_ => None
}
}
pub fn get_first_use(self) -> Option<Use> {
unsafe {
match llvm::LLVMGetFirstUse(self.get()) {
- u if u.is_not_null() => Some(Use(u)),
+ u if !u.is_null() => Some(Use(u)),
_ => None
}
}
/// Tests if this value is a terminator instruction
pub fn is_a_terminator_inst(self) -> bool {
unsafe {
- llvm::LLVMIsATerminatorInst(self.get()).is_not_null()
+ !llvm::LLVMIsATerminatorInst(self.get()).is_null()
}
}
}
pub fn get_next_use(self) -> Option<Use> {
unsafe {
match llvm::LLVMGetNextUse(self.get()) {
- u if u.is_not_null() => Some(Use(u)),
+ u if !u.is_null() => Some(Use(u)),
_ => None
}
}
regions,
assoc_bindings);
- ty::TraitRef::new(trait_def_id, substs)
+ ty::TraitRef::new(trait_def_id, this.tcx().mk_substs(substs))
}
pub fn ast_path_to_ty<'tcx, AC: AstConv<'tcx>, RS: RegionScope>(
let r = opt_ast_region_to_region(this, rscope, ast_ty.span, region);
debug!("ty_rptr r={}", r.repr(this.tcx()));
let t = ast_ty_to_ty(this, rscope, &*mt.ty);
- ty::mk_rptr(tcx, r, ty::mt {ty: t, mutbl: mt.mutbl})
+ ty::mk_rptr(tcx, tcx.mk_region(r), ty::mt {ty: t, mutbl: mt.mutbl})
}
ast::TyTup(ref fields) => {
let flds = fields.iter()
tcx.sess.span_err(ast_ty.span,
"variadic function must have C calling convention");
}
- ty::mk_bare_fn(tcx, None, ty_of_bare_fn(this, bf.unsafety, bf.abi, &*bf.decl))
+ let bare_fn = ty_of_bare_fn(this, bf.unsafety, bf.abi, &*bf.decl);
+ ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(bare_fn))
}
ast::TyClosure(ref f) => {
// Use corresponding trait store to figure out default bounds
}
ty::ByReferenceExplicitSelfCategory(region, mutability) => {
(Some(ty::mk_rptr(this.tcx(),
- region,
+ this.tcx().mk_region(region),
ty::mt {
ty: self_info.untransformed_self_ty,
mutbl: mutability
ty::ByValueExplicitSelfCategory
} else {
match explicit_type.sty {
- ty::ty_rptr(r, mt) => ty::ByReferenceExplicitSelfCategory(r, mt.mutbl),
+ ty::ty_rptr(r, mt) => ty::ByReferenceExplicitSelfCategory(*r, mt.mutbl),
ty::ty_uniq(_) => ty::ByBoxExplicitSelfCategory,
_ => ty::ByValueExplicitSelfCategory,
}
let mut trait_def_ids = DefIdMap::new();
for ast_bound in ast_bounds.iter() {
match *ast_bound {
- ast::TraitTyParamBound(ref b) => {
+ ast::TraitTyParamBound(ref b, ast::TraitBoundModifier::None) => {
match ::lookup_def_tcx(tcx, b.trait_ref.path.span, b.trait_ref.ref_id) {
def::DefTrait(trait_did) => {
match trait_def_ids.get(&trait_did) {
}
trait_bounds.push(b);
}
+ ast::TraitTyParamBound(_, ast::TraitBoundModifier::Maybe) => {}
ast::RegionTyParamBound(ref l) => {
region_bounds.push(l);
}
// and T is the expected type
let region_var = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
let mt = ty::mt { ty: expected, mutbl: mutbl };
- let region_ty = ty::mk_rptr(tcx, region_var, mt);
+ let region_ty = ty::mk_rptr(tcx, tcx.mk_region(region_var), mt);
demand::eqtype(fcx, pat.span, region_ty, typ);
}
// otherwise the type of x is the expected type T
let mt = ty::mt { ty: inner_ty, mutbl: mutbl };
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- let rptr_ty = ty::mk_rptr(tcx, region, mt);
+ let rptr_ty = ty::mk_rptr(tcx, tcx.mk_region(region), mt);
if check_dereferencable(pcx, pat.span, expected, &**inner) {
demand::suptype(fcx, pat.span, expected, rptr_ty);
})),
_ => {
let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span));
- ty::mk_slice(tcx, region, ty::mt {
+ ty::mk_slice(tcx, tcx.mk_region(region), ty::mt {
ty: inner_ty,
mutbl: ty::deref(expected_ty, true)
.map_or(ast::MutImmutable, |mt| mt.mutbl)
let mutbl = ty::deref(expected_ty, true)
.map_or(ast::MutImmutable, |mt| mt.mutbl);
- let slice_ty = ty::mk_slice(tcx, region, ty::mt {
+ let slice_ty = ty::mk_slice(tcx, tcx.mk_region(region), ty::mt {
ty: inner_ty,
mutbl: mutbl
});
let real_path_ty = fcx.node_ty(pat.id);
let (arg_tys, kind_name) = match real_path_ty.sty {
- ty::ty_enum(enum_def_id, ref expected_substs)
+ ty::ty_enum(enum_def_id, expected_substs)
if def == def::DefVariant(enum_def_id, def.def_id(), false) => {
let variant = ty::enum_variant_with_id(tcx, enum_def_id, def.def_id());
(variant.args.iter().map(|t| t.subst(tcx, expected_substs)).collect::<Vec<_>>(),
"variant")
}
- ty::ty_struct(struct_def_id, ref expected_substs) => {
+ ty::ty_struct(struct_def_id, expected_substs) => {
let struct_fields = ty::struct_fields(tcx, struct_def_id, expected_substs);
(struct_fields.iter().map(|field| field.mt.ty).collect::<Vec<_>>(),
"struct")
let closure_type = ty::mk_unboxed_closure(fcx.ccx.tcx,
expr_def_id,
- region,
- fcx.inh.param_env.free_substs.clone());
+ fcx.ccx.tcx.mk_region(region),
+ fcx.ccx.tcx.mk_substs(
+ fcx.inh.param_env.free_substs.clone()));
fcx.write_ty(expr.id, closure_type);
self.add_obligations(&pick, &method_bounds_substs, &method_bounds);
// Create the final `MethodCallee`.
- let fty = ty::mk_bare_fn(self.tcx(), None, ty::BareFnTy {
+ let fty = ty::mk_bare_fn(self.tcx(), None, self.tcx().mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(method_sig),
unsafety: pick.method_ty.fty.unsafety,
abi: pick.method_ty.fty.abi.clone(),
- });
+ }));
let callee = MethodCallee {
origin: method_origin,
ty: fty,
// been ruled out when we deemed the trait to be
// "object safe".
let original_poly_trait_ref =
- data.principal_trait_ref_with_self_ty(object_ty);
+ data.principal_trait_ref_with_self_ty(this.tcx(), object_ty);
let upcast_poly_trait_ref =
this.upcast(original_poly_trait_ref.clone(), trait_def_id);
let upcast_trait_ref =
self.infcx().next_ty_var());
let trait_ref =
- Rc::new(ty::TraitRef::new(trait_def_id, substs.clone()));
+ Rc::new(ty::TraitRef::new(trait_def_id, self.tcx().mk_substs(substs.clone())));
let origin = MethodTypeParam(MethodParam { trait_ref: trait_ref,
method_num: method_num });
(substs, origin)
ast::ExprParen(ref expr) |
ast::ExprField(ref expr, _) |
ast::ExprTupField(ref expr, _) |
- ast::ExprSlice(ref expr, _, _, _) |
ast::ExprIndex(ref expr, _) |
ast::ExprUnary(ast::UnDeref, ref expr) => exprs.push(&**expr),
_ => break,
// Construct a trait-reference `self_ty : Trait<input_tys>`
let substs = subst::Substs::new_trait(input_types, Vec::new(), assoc_types, self_ty);
- let trait_ref = Rc::new(ty::TraitRef::new(trait_def_id, substs));
+ let trait_ref = Rc::new(ty::TraitRef::new(trait_def_id, fcx.tcx().mk_substs(substs)));
// Construct an obligation
let poly_trait_ref = Rc::new(ty::Binder((*trait_ref).clone()));
// Substitute the trait parameters into the method type and
// instantiate late-bound regions to get the actual method type.
let ref bare_fn_ty = method_ty.fty;
- let fn_sig = bare_fn_ty.sig.subst(tcx, &trait_ref.substs);
+ let fn_sig = bare_fn_ty.sig.subst(tcx, trait_ref.substs);
let fn_sig = fcx.infcx().replace_late_bound_regions_with_fresh_var(span,
infer::FnCall,
&fn_sig).0;
let transformed_self_ty = fn_sig.inputs[0];
- let fty = ty::mk_bare_fn(tcx, None, ty::BareFnTy {
+ let fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(ty::BareFnTy {
sig: ty::Binder(fn_sig),
unsafety: bare_fn_ty.unsafety,
abi: bare_fn_ty.abi.clone(),
- });
+ }));
debug!("lookup_in_trait_adjusted: matched method fty={} obligation={}",
fty.repr(fcx.tcx()),
//
// Note that as the method comes from a trait, it should not have
// any late-bound regions appearing in its bounds.
- let method_bounds = method_ty.generics.to_bounds(fcx.tcx(), &trait_ref.substs);
+ let method_bounds = method_ty.generics.to_bounds(fcx.tcx(), trait_ref.substs);
assert!(!method_bounds.has_escaping_regions());
fcx.add_obligations_for_parameters(
traits::ObligationCause::misc(span, fcx.body_id),
span,
ty::AdjustDerefRef(ty::AutoDerefRef {
autoderefs: autoderefs,
- autoref: Some(ty::AutoPtr(region, mutbl, autoref))
+ autoref: Some(ty::AutoPtr(*region, mutbl, autoref))
}));
}
.find(|m| m.name() == method_name)
.and_then(|item| item.as_opt_method())
}
-
// a substitution that replaces `Self` with the object type
// itself. Hence, a `&self` method will wind up with an
// argument type like `&Trait`.
- let trait_ref = data.principal_trait_ref_with_self_ty(self_ty);
+ let trait_ref = data.principal_trait_ref_with_self_ty(self.tcx(), self_ty);
self.elaborate_bounds(&[trait_ref.clone()], false, |this, new_trait_ref, m, method_num| {
let vtable_index =
get_method_index(tcx, &*new_trait_ref, trait_ref.clone(), method_num);
// Determine the receiver type that the method itself expects.
let xform_self_ty =
- self.xform_self_ty(&method, &impl_trait_ref.substs);
+ self.xform_self_ty(&method, impl_trait_ref.substs);
debug!("xform_self_ty={}", xform_self_ty.repr(self.tcx()));
let tcx = self.tcx();
self.search_mutabilities(
|m| AutoRef(m, box step.adjustment.clone()),
- |m,r| ty::mk_rptr(tcx, r, ty::mt {ty:step.self_ty, mutbl:m}))
+ |m,r| ty::mk_rptr(tcx, tcx.mk_region(r), ty::mt {ty:step.self_ty, mutbl:m}))
}
fn search_mutabilities<F, G>(&mut self,
}
}
- ast::ItemTrait(_, _, _, _, ref trait_methods) => {
+ ast::ItemTrait(_, _, _, ref trait_methods) => {
let trait_def = ty::lookup_trait_def(ccx.tcx, local_def(it.id));
for trait_method in trait_methods.iter() {
match *trait_method {
}
// Compute skolemized form of impl and trait method tys.
- let impl_fty = ty::mk_bare_fn(tcx, None, impl_m.fty.clone());
+ let impl_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(impl_m.fty.clone()));
let impl_fty = impl_fty.subst(tcx, &impl_to_skol_substs);
- let trait_fty = ty::mk_bare_fn(tcx, None, trait_m.fty.clone());
+ let trait_fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(trait_m.fty.clone()));
let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs);
// Check the impl method type IM is a subtype of the trait method
}
}
-#[deriving(Copy, Show)]
+#[deriving(Copy, Show,PartialEq,Eq)]
pub enum LvaluePreference {
PreferMutLvalue,
NoPreference
}
}
-/// Autoderefs `base_expr`, looking for a `Slice` impl. If it finds one, installs the relevant
-/// method info and returns the result type (else None).
-fn try_overloaded_slice<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
- method_call: MethodCall,
- expr: &ast::Expr,
- base_expr: &ast::Expr,
- base_ty: Ty<'tcx>,
- start_expr: &Option<P<ast::Expr>>,
- end_expr: &Option<P<ast::Expr>>,
- mutbl: ast::Mutability)
- -> Option<Ty<'tcx>> // return type is result of slice
-{
- let lvalue_pref = match mutbl {
- ast::MutMutable => PreferMutLvalue,
- ast::MutImmutable => NoPreference
- };
-
- let opt_method_ty =
- autoderef_for_index(fcx, base_expr, base_ty, lvalue_pref, |adjusted_ty, autoderefref| {
- try_overloaded_slice_step(fcx, method_call, expr, base_expr,
- adjusted_ty, autoderefref, mutbl,
- start_expr, end_expr)
- });
-
- // Regardless of whether the lookup succeeds, check the method arguments
- // so that we have *some* type for each argument.
- let method_ty_or_err = opt_method_ty.unwrap_or(ty::mk_err());
-
- let mut args = vec![];
- start_expr.as_ref().map(|x| args.push(x));
- end_expr.as_ref().map(|x| args.push(x));
-
- check_method_argument_types(fcx,
- expr.span,
- method_ty_or_err,
- expr,
- args.as_slice(),
- AutorefArgs::Yes,
- DontTupleArguments);
-
- opt_method_ty.map(|method_ty| {
- let result_ty = ty::ty_fn_ret(method_ty);
- match result_ty {
- ty::FnConverging(result_ty) => result_ty,
- ty::FnDiverging => {
- fcx.tcx().sess.span_bug(expr.span,
- "slice trait does not define a `!` return")
- }
- }
- })
-}
/// Checks for a `Slice` (or `SliceMut`) impl at the relevant level of autoderef. If it finds one,
/// installs method info and returns type of method (else None).
base_expr: &ast::Expr,
base_ty: Ty<'tcx>, // autoderef'd type
autoderefref: ty::AutoDerefRef<'tcx>,
- mutbl: ast::Mutability,
+ lvalue_pref: LvaluePreference,
start_expr: &Option<P<ast::Expr>>,
end_expr: &Option<P<ast::Expr>>)
- // result type is type of method being called
- -> Option<Ty<'tcx>>
+ -> Option<(Ty<'tcx>, /* index type */
+ Ty<'tcx>)> /* return type */
{
- let method = if mutbl == ast::MutMutable {
- // Try `SliceMut` first, if preferred.
- match fcx.tcx().lang_items.slice_mut_trait() {
- Some(trait_did) => {
- let method_name = match (start_expr, end_expr) {
- (&Some(_), &Some(_)) => "slice_or_fail_mut",
- (&Some(_), &None) => "slice_from_or_fail_mut",
- (&None, &Some(_)) => "slice_to_or_fail_mut",
- (&None, &None) => "as_mut_slice_",
- };
+ let input_ty = fcx.infcx().next_ty_var();
+ let return_ty = fcx.infcx().next_ty_var();
- method::lookup_in_trait_adjusted(fcx,
- expr.span,
- Some(&*base_expr),
- token::intern(method_name),
- trait_did,
- autoderefref,
- base_ty,
- None)
+ let method = match lvalue_pref {
+ PreferMutLvalue => {
+ // Try `SliceMut` first, if preferred.
+ match fcx.tcx().lang_items.slice_mut_trait() {
+ Some(trait_did) => {
+ let method_name = match (start_expr, end_expr) {
+ (&Some(_), &Some(_)) => "slice_or_fail_mut",
+ (&Some(_), &None) => "slice_from_or_fail_mut",
+ (&None, &Some(_)) => "slice_to_or_fail_mut",
+ (&None, &None) => "as_mut_slice_",
+ };
+
+ method::lookup_in_trait_adjusted(fcx,
+ expr.span,
+ Some(&*base_expr),
+ token::intern(method_name),
+ trait_did,
+ autoderefref,
+ base_ty,
+ Some(vec![input_ty, return_ty]))
+ }
+ _ => None,
}
- _ => None,
}
- } else {
- // Otherwise, fall back to `Slice`.
- // FIXME(#17293) this will not coerce base_expr, so we miss the Slice
- // trait for `&mut [T]`.
- match fcx.tcx().lang_items.slice_trait() {
- Some(trait_did) => {
- let method_name = match (start_expr, end_expr) {
- (&Some(_), &Some(_)) => "slice_or_fail",
- (&Some(_), &None) => "slice_from_or_fail",
- (&None, &Some(_)) => "slice_to_or_fail",
- (&None, &None) => "as_slice_",
- };
+ NoPreference => {
+ // Otherwise, fall back to `Slice`.
+ match fcx.tcx().lang_items.slice_trait() {
+ Some(trait_did) => {
+ let method_name = match (start_expr, end_expr) {
+ (&Some(_), &Some(_)) => "slice_or_fail",
+ (&Some(_), &None) => "slice_from_or_fail",
+ (&None, &Some(_)) => "slice_to_or_fail",
+ (&None, &None) => "as_slice_",
+ };
- method::lookup_in_trait_adjusted(fcx,
- expr.span,
- Some(&*base_expr),
- token::intern(method_name),
- trait_did,
- autoderefref,
- base_ty,
- None)
+ method::lookup_in_trait_adjusted(fcx,
+ expr.span,
+ Some(&*base_expr),
+ token::intern(method_name),
+ trait_did,
+ autoderefref,
+ base_ty,
+ Some(vec![input_ty, return_ty]))
+ }
+ _ => None,
}
- _ => None,
}
};
// If some lookup succeeded, install method in table
method.map(|method| {
- let ty = method.ty;
- fcx.inh.method_map.borrow_mut().insert(method_call, method);
- ty
+ let method_ty = method.ty;
+ make_overloaded_lvalue_return_type(fcx, Some(method_call), Some(method));
+
+ let result_ty = ty::ty_fn_ret(method_ty);
+ let result_ty = match result_ty {
+ ty::FnConverging(result_ty) => result_ty,
+ ty::FnDiverging => {
+ fcx.tcx().sess.span_bug(expr.span,
+ "slice trait does not define a `!` return")
+ }
+ };
+
+ (input_ty, result_ty)
})
}
let tcx = fcx.ccx.tcx;
match lit.node {
- ast::LitStr(..) => ty::mk_str_slice(tcx, ty::ReStatic, ast::MutImmutable),
+ ast::LitStr(..) => ty::mk_str_slice(tcx, tcx.mk_region(ty::ReStatic), ast::MutImmutable),
ast::LitBinary(..) => {
- ty::mk_slice(tcx, ty::ReStatic, ty::mt{ ty: ty::mk_u8(), mutbl: ast::MutImmutable })
+ ty::mk_slice(tcx, tcx.mk_region(ty::ReStatic),
+ ty::mt{ ty: ty::mk_u8(), mutbl: ast::MutImmutable })
}
ast::LitByte(_) => ty::mk_u8(),
ast::LitChar(_) => ty::mk_char(),
});
let fn_sig = match fn_ty.sty {
- ty::ty_bare_fn(_, ty::BareFnTy {ref sig, ..}) |
+ ty::ty_bare_fn(_, &ty::BareFnTy {ref sig, ..}) |
ty::ty_closure(box ty::ClosureTy {ref sig, ..}) => sig,
_ => {
fcx.type_error_message(call_expr.span, |actual| {
let (adj_ty, adjustment) = match lhs_ty.sty {
ty::ty_rptr(r_in, mt) => {
let r_adj = fcx.infcx().next_region_var(infer::Autoref(lhs.span));
- fcx.mk_subr(infer::Reborrow(lhs.span), r_adj, r_in);
- let adjusted_ty = ty::mk_rptr(fcx.tcx(), r_adj, mt);
+ fcx.mk_subr(infer::Reborrow(lhs.span), r_adj, *r_in);
+ let adjusted_ty = ty::mk_rptr(fcx.tcx(), fcx.tcx().mk_region(r_adj), mt);
let autoptr = ty::AutoPtr(r_adj, mt.mutbl, None);
let adjustment = ty::AutoDerefRef { autoderefs: 1, autoref: Some(autoptr) };
(adjusted_ty, adjustment)
let (_, autoderefs, field_ty) =
autoderef(fcx, expr.span, expr_t, Some(base.id), lvalue_pref, |base_t, _| {
match base_t.sty {
- ty::ty_struct(base_id, ref substs) => {
+ ty::ty_struct(base_id, substs) => {
debug!("struct named {}", ppaux::ty_to_string(tcx, base_t));
let fields = ty::lookup_struct_fields(tcx, base_id);
lookup_field_ty(tcx, base_id, fields[],
let (_, autoderefs, field_ty) =
autoderef(fcx, expr.span, expr_t, Some(base.id), lvalue_pref, |base_t, _| {
match base_t.sty {
- ty::ty_struct(base_id, ref substs) => {
+ ty::ty_struct(base_id, substs) => {
tuple_like = ty::is_tuple_struct(tcx, base_id);
if tuple_like {
debug!("tuple struct named {}", ppaux::ty_to_string(tcx, base_t));
span: Span,
class_id: ast::DefId,
node_id: ast::NodeId,
- substitutions: subst::Substs<'tcx>,
+ substitutions: &'tcx subst::Substs<'tcx>,
field_types: &[ty::field_ty],
ast_fields: &[ast::Field],
check_completeness: bool,
Some((field_id, false)) => {
expected_field_type =
ty::lookup_field_type(
- tcx, class_id, field_id, &substitutions);
+ tcx, class_id, field_id, substitutions);
class_field_map.insert(
field.ident.node.name, (field_id, true));
fields_found += 1;
span,
class_id,
id,
- struct_substs,
+ fcx.ccx.tcx.mk_substs(struct_substs),
class_fields[],
fields,
base_expr.is_none(),
span,
variant_id,
id,
- substitutions,
+ fcx.ccx.tcx.mk_substs(substitutions),
variant_fields[],
fields,
true,
Some(mt) => mt.ty,
None => {
let is_newtype = match oprnd_t.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
let fields = ty::struct_fields(fcx.tcx(), did, substs);
fields.len() == 1
&& fields[0].name ==
// `'static`!
let region = fcx.infcx().next_region_var(
infer::AddrOfSlice(expr.span));
- ty::mk_rptr(tcx, region, tm)
+ ty::mk_rptr(tcx, tcx.mk_region(region), tm)
}
_ => {
let region = fcx.infcx().next_region_var(infer::AddrOfRegion(expr.span));
- ty::mk_rptr(tcx, region, tm)
+ ty::mk_rptr(tcx, tcx.mk_region(region), tm)
}
}
};
}
ast::ExprIndex(ref base, ref idx) => {
check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref);
- check_expr(fcx, &**idx);
let base_t = fcx.expr_ty(&**base);
- let idx_t = fcx.expr_ty(&**idx);
if ty::type_is_error(base_t) {
fcx.write_ty(id, base_t);
- } else if ty::type_is_error(idx_t) {
- fcx.write_ty(id, idx_t);
} else {
- let base_t = structurally_resolved_type(fcx, expr.span, base_t);
-
- let result =
- autoderef_for_index(fcx, &**base, base_t, lvalue_pref, |adj_ty, adj| {
- try_index_step(fcx,
- MethodCall::expr(expr.id),
- expr,
- &**base,
- adj_ty,
- adj,
- lvalue_pref)
- });
-
- match result {
- Some((index_ty, element_ty)) => {
- check_expr_has_type(fcx, &**idx, index_ty);
- fcx.write_ty(id, element_ty);
- }
- _ => {
- check_expr_has_type(fcx, &**idx, ty::mk_err());
- fcx.type_error_message(
- expr.span,
- |actual| {
- format!("cannot index a value of type `{}`",
- actual)
- },
- base_t,
- None);
- fcx.write_ty(id, ty::mk_err())
+ match idx.node {
+ ast::ExprRange(ref start, ref end) => {
+ // A slice, rather than an index. Special cased for now (KILLME).
+ let base_t = structurally_resolved_type(fcx, expr.span, base_t);
+
+ let result =
+ autoderef_for_index(fcx, &**base, base_t, lvalue_pref, |adj_ty, adj| {
+ try_overloaded_slice_step(fcx,
+ MethodCall::expr(expr.id),
+ expr,
+ &**base,
+ adj_ty,
+ adj,
+ lvalue_pref,
+ start,
+ end)
+ });
+
+ let mut args = vec![];
+ start.as_ref().map(|x| args.push(x));
+ end.as_ref().map(|x| args.push(x));
+
+ match result {
+ Some((index_ty, element_ty)) => {
+ for a in args.iter() {
+ check_expr_has_type(fcx, &***a, index_ty);
+ }
+ fcx.write_ty(idx.id, element_ty);
+ fcx.write_ty(id, element_ty)
+ }
+ _ => {
+ for a in args.iter() {
+ check_expr(fcx, &***a);
+ }
+ fcx.type_error_message(expr.span,
+ |actual| {
+ format!("cannot take a slice of a value with type `{}`",
+ actual)
+ },
+ base_t,
+ None);
+ fcx.write_ty(idx.id, ty::mk_err());
+ fcx.write_ty(id, ty::mk_err())
+ }
}
- }
- }
- }
- ast::ExprSlice(ref base, ref start, ref end, mutbl) => {
- check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref);
- let raw_base_t = fcx.expr_ty(&**base);
-
- let mut some_err = false;
- if ty::type_is_error(raw_base_t) {
- fcx.write_ty(id, raw_base_t);
- some_err = true;
- }
-
- {
- let check_slice_idx = |e: &ast::Expr| {
- check_expr(fcx, e);
- let e_t = fcx.expr_ty(e);
- if ty::type_is_error(e_t) {
- fcx.write_ty(e.id, e_t);
- some_err = true;
- }
- };
- start.as_ref().map(|e| check_slice_idx(&**e));
- end.as_ref().map(|e| check_slice_idx(&**e));
- }
-
- if !some_err {
- let base_t = structurally_resolved_type(fcx,
- expr.span,
- raw_base_t);
- let method_call = MethodCall::expr(expr.id);
- match try_overloaded_slice(fcx,
- method_call,
- expr,
- &**base,
- base_t,
- start,
- end,
- mutbl) {
- Some(ty) => fcx.write_ty(id, ty),
- None => {
- fcx.type_error_message(expr.span,
- |actual| {
- format!("cannot take a {}slice of a value with type `{}`",
- if mutbl == ast::MutMutable {
- "mutable "
- } else {
- ""
- },
- actual)
- },
- base_t,
- None);
- fcx.write_ty(id, ty::mk_err())
+ }
+ _ => {
+ check_expr(fcx, &**idx);
+ let idx_t = fcx.expr_ty(&**idx);
+ if ty::type_is_error(idx_t) {
+ fcx.write_ty(id, idx_t);
+ } else {
+ let base_t = structurally_resolved_type(fcx, expr.span, base_t);
+
+ let result =
+ autoderef_for_index(fcx, &**base, base_t, lvalue_pref, |adj_ty, adj| {
+ try_index_step(fcx,
+ MethodCall::expr(expr.id),
+ expr,
+ &**base,
+ adj_ty,
+ adj,
+ lvalue_pref)
+ });
+
+ match result {
+ Some((index_ty, element_ty)) => {
+ check_expr_has_type(fcx, &**idx, index_ty);
+ fcx.write_ty(id, element_ty);
+ }
+ _ => {
+ check_expr_has_type(fcx, &**idx, ty::mk_err());
+ fcx.type_error_message(
+ expr.span,
+ |actual| {
+ format!("cannot index a value of type `{}`",
+ actual)
+ },
+ base_t,
+ None);
+ fcx.write_ty(id, ty::mk_err())
+ }
+ }
}
+ }
}
}
}
ast::ExprRange(ref start, ref end) => {
- check_expr(fcx, &**start);
- let t_start = fcx.expr_ty(&**start);
-
- let idx_type = if let &Some(ref e) = end {
+ let t_start = start.as_ref().map(|e| {
check_expr(fcx, &**e);
- let t_end = fcx.expr_ty(&**e);
- if ty::type_is_error(t_end) {
- ty::mk_err()
- } else if t_start == ty::mk_err() {
- ty::mk_err()
- } else {
- infer::common_supertype(fcx.infcx(),
- infer::RangeExpression(expr.span),
- true,
- t_start,
- t_end)
+ fcx.expr_ty(&**e)
+ });
+ let t_end = end.as_ref().map(|e| {
+ check_expr(fcx, &**e);
+ fcx.expr_ty(&**e)
+ });
+
+ let idx_type = match (t_start, t_end) {
+ (Some(ty), None) | (None, Some(ty)) => Some(ty),
+ (Some(t_start), Some(t_end))
+ if ty::type_is_error(t_start) || ty::type_is_error(t_end) => {
+ Some(ty::mk_err())
}
- } else {
- t_start
+ (Some(t_start), Some(t_end)) => {
+ Some(infer::common_supertype(fcx.infcx(),
+ infer::RangeExpression(expr.span),
+ true,
+ t_start,
+ t_end))
+ }
+ _ => None
};
// Note that we don't check the type of start/end satisfy any
// bounds because right the range structs do not have any. If we add
// some bounds, then we'll need to check `t_start` against them here.
- let range_type = if idx_type == ty::mk_err() {
- ty::mk_err()
- } else {
- // Find the did from the appropriate lang item.
- let did = if end.is_some() {
- // Range
- tcx.lang_items.range_struct()
- } else {
- // RangeFrom
- tcx.lang_items.range_from_struct()
- };
-
- if let Some(did) = did {
- let polytype = ty::lookup_item_type(tcx, did);
- let substs = Substs::new_type(vec![idx_type], vec![]);
- let bounds = polytype.generics.to_bounds(tcx, &substs);
- fcx.add_obligations_for_parameters(
- traits::ObligationCause::new(expr.span,
- fcx.body_id,
- traits::ItemObligation(did)),
- &bounds);
-
- ty::mk_struct(tcx, did, substs)
- } else {
+ let range_type = match idx_type {
+ Some(idx_type) if ty::type_is_error(idx_type) => {
ty::mk_err()
}
+ Some(idx_type) => {
+ // Find the did from the appropriate lang item.
+ let did = match (start, end) {
+ (&Some(_), &Some(_)) => tcx.lang_items.range_struct(),
+ (&Some(_), &None) => tcx.lang_items.range_from_struct(),
+ (&None, &Some(_)) => tcx.lang_items.range_to_struct(),
+ (&None, &None) => {
+ tcx.sess.span_bug(expr.span, "full range should be dealt with above")
+ }
+ };
+
+ if let Some(did) = did {
+ let polytype = ty::lookup_item_type(tcx, did);
+ let substs = Substs::new_type(vec![idx_type], vec![]);
+ let bounds = polytype.generics.to_bounds(tcx, &substs);
+ fcx.add_obligations_for_parameters(
+ traits::ObligationCause::new(expr.span,
+ fcx.body_id,
+ traits::ItemObligation(did)),
+ &bounds);
+
+ ty::mk_struct(tcx, did, tcx.mk_substs(substs))
+ } else {
+ tcx.sess.span_err(expr.span, "No lang item for range syntax");
+ ty::mk_err()
+ }
+ }
+ None => {
+ // Neither start nor end => FullRange
+ if let Some(did) = tcx.lang_items.full_range_struct() {
+ let substs = Substs::new_type(vec![], vec![]);
+ ty::mk_struct(tcx, did, tcx.mk_substs(substs))
+ } else {
+ tcx.sess.span_err(expr.span, "No lang item for range syntax");
+ ty::mk_err()
+ }
+ }
};
+
fcx.write_ty(id, range_type);
}
return;
}
match t.sty {
- ty::ty_struct(did, ref substs) => {
+ ty::ty_struct(did, substs) => {
let fields = ty::lookup_struct_fields(tcx, did);
if fields.is_empty() {
span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
match t.sty {
ty::ty_param(ParamTy {idx, ..}) => {
debug!("Found use of ty param num {}", idx);
- tps_used[idx] = true;
+ tps_used[idx as uint] = true;
}
_ => ()
}
}
pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
- fn param<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, n: uint) -> Ty<'tcx> {
+ fn param<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, n: u32) -> Ty<'tcx> {
ty::mk_param(ccx.tcx, subst::FnSpace, n, local_def(0))
}
"breakpoint" => (0, Vec::new(), ty::mk_nil(tcx)),
"size_of" |
"pref_align_of" | "min_align_of" => (1u, Vec::new(), ty::mk_uint()),
- "init" => (1u, Vec::new(), param(ccx, 0u)),
- "uninit" => (1u, Vec::new(), param(ccx, 0u)),
+ "init" => (1u, Vec::new(), param(ccx, 0)),
+ "uninit" => (1u, Vec::new(), param(ccx, 0)),
"forget" => (1u, vec!( param(ccx, 0) ), ty::mk_nil(tcx)),
"transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)),
"move_val_init" => {
(1u,
vec!(
- ty::mk_mut_rptr(tcx, ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(0)),
+ ty::mk_mut_rptr(tcx,
+ tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1),
+ ty::BrAnon(0))),
param(ccx, 0)),
- param(ccx, 0u)
+ param(ccx, 0)
),
ty::mk_nil(tcx))
}
Ok(did) => (1u,
Vec::new(),
ty::mk_struct(ccx.tcx, did,
- subst::Substs::empty())),
+ ccx.tcx.mk_substs(subst::Substs::empty()))),
Err(msg) => {
tcx.sess.span_fatal(it.span, msg[]);
}
};
(n_tps, inputs, ty::FnConverging(output))
};
- let fty = ty::mk_bare_fn(tcx, None, ty::BareFnTy {
+ let fty = ty::mk_bare_fn(tcx, None, tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Unsafe,
abi: abi::RustIntrinsic,
sig: ty::Binder(FnSig {
output: output,
variadic: false,
}),
- });
+ }));
let i_ty = ty::lookup_item_type(ccx.tcx, local_def(it.id));
let i_n_tps = i_ty.generics.types.len(subst::FnSpace);
if i_n_tps != n_tps {
});
}
}
-
};
if let ty::ty_rptr(r_ptr, _) = base_ty.sty {
mk_subregion_due_to_dereference(
- rcx, expr.span, ty::ReScope(CodeExtent::from_node_id(expr.id)), r_ptr);
+ rcx, expr.span, ty::ReScope(CodeExtent::from_node_id(expr.id)), *r_ptr);
}
visit::walk_expr(rcx, expr);
/*From:*/ (&ty::ty_rptr(from_r, ref from_mt),
/*To: */ &ty::ty_rptr(to_r, ref to_mt)) => {
// Target cannot outlive source, naturally.
- rcx.fcx.mk_subr(infer::Reborrow(cast_expr.span), to_r, from_r);
+ rcx.fcx.mk_subr(infer::Reborrow(cast_expr.span), *to_r, *from_r);
walk_cast(rcx, cast_expr, from_mt.ty, to_mt.ty);
}
// Variables being referenced must be constrained and registered
// in the upvar borrow map
constrain_free_variables_in_by_ref_closure(
- rcx, region, expr, freevars);
+ rcx, *region, expr, freevars);
}
})
}
}
ty::ty_unboxed_closure(_, region, _) => {
ty::with_freevars(tcx, expr.id, |freevars| {
- let bounds = ty::region_existential_bound(region);
+ let bounds = ty::region_existential_bound(*region);
ensure_free_variable_types_outlive_closure_bound(rcx, bounds, expr, freevars);
})
}
let var_ty = match rcx.fcx.inh.upvar_borrow_map.borrow().get(&upvar_id) {
Some(upvar_borrow) => {
ty::mk_rptr(rcx.tcx(),
- upvar_borrow.region,
+ rcx.tcx().mk_region(upvar_borrow.region),
ty::mt { mutbl: upvar_borrow.kind.to_mutbl_lossy(),
ty: raw_var_ty })
}
{
let mc = mc::MemCategorizationContext::new(rcx.fcx);
let self_cmt = mc.cat_expr_autoderefd(deref_expr, i);
- link_region(rcx, deref_expr.span, r,
+ link_region(rcx, deref_expr.span, *r,
ty::BorrowKind::from_mutbl(m), self_cmt);
}
if let ty::ty_rptr(r_ptr, _) = derefd_ty.sty {
mk_subregion_due_to_dereference(rcx, deref_expr.span,
- r_deref_expr, r_ptr);
+ r_deref_expr, *r_ptr);
}
match ty::deref(derefd_ty, true) {
match mt.ty.sty {
ty::ty_vec(_, None) | ty::ty_str => {
rcx.fcx.mk_subr(infer::IndexSlice(index_expr.span),
- r_index_expr, r_ptr);
+ r_index_expr, *r_ptr);
}
_ => {}
}
// captured by reference it must also outlive the
// region bound on the closure, but this is explicitly
// handled by logic in regionck.
- self.push_region_constraint_from_top(region);
+ self.push_region_constraint_from_top(*region);
}
ty::ty_trait(ref t) => {
self.accumulate_from_object_ty(ty, t.bounds.region_bound, required_region_bounds)
}
- ty::ty_enum(def_id, ref substs) |
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) |
+ ty::ty_struct(def_id, substs) => {
self.accumulate_from_adt(ty, def_id, substs)
}
}
ty::ty_rptr(r_b, mt) => {
- self.accumulate_from_rptr(ty, r_b, mt.ty);
+ self.accumulate_from_rptr(ty, *r_b, mt.ty);
}
ty::ty_param(p) => {
// Ensure that if &'a T is cast to &'b Trait, then 'b <= 'a
infer::mk_subr(fcx.infcx(),
infer::RelateObjectBound(source_expr.span),
- target_region,
- referent_region);
+ *target_region,
+ *referent_region);
check_object_safety(fcx.tcx(), object_trait, source_expr.span);
}
object_trait: &ty::TyTrait<'tcx>,
span: Span)
{
- let object_trait_ref = object_trait.principal_trait_ref_with_self_ty(ty::mk_err());
+ let object_trait_ref = object_trait.principal_trait_ref_with_self_ty(tcx, ty::mk_err());
for tr in traits::supertraits(tcx, object_trait_ref) {
check_object_safety_inner(tcx, &*tr, span);
}
// Create the obligation for casting from T to Trait.
let object_trait_ref =
- object_trait.principal_trait_ref_with_self_ty(referent_ty);
+ object_trait.principal_trait_ref_with_self_ty(fcx.tcx(), referent_ty);
let object_obligation =
Obligation::new(
ObligationCause::new(span,
pub fn check_trait_ref(&mut self, trait_ref: &ty::TraitRef<'tcx>) {
let trait_def = ty::lookup_trait_def(self.fcx.tcx(), trait_ref.def_id);
- let bounds = trait_def.generics.to_bounds(self.tcx(), &trait_ref.substs);
+ let bounds = trait_def.generics.to_bounds(self.tcx(), trait_ref.substs);
self.fcx.add_obligations_for_parameters(
traits::ObligationCause::new(
self.span,
}
match t.sty{
- ty::ty_struct(type_id, ref substs) |
- ty::ty_enum(type_id, ref substs) => {
+ ty::ty_struct(type_id, substs) |
+ ty::ty_enum(type_id, substs) => {
let polytype = ty::lookup_item_type(self.fcx.tcx(), type_id);
if self.binding_count == 0 {
self.fold_substs(substs);
}
- ty::ty_bare_fn(_, ty::BareFnTy{sig: ref fn_sig, ..}) |
+ ty::ty_bare_fn(_, &ty::BareFnTy{sig: ref fn_sig, ..}) |
ty::ty_closure(box ty::ClosureTy{sig: ref fn_sig, ..}) => {
self.binding_count += 1;
// impl, plus its own.
let new_polytype = ty::Polytype {
generics: new_method_ty.generics.clone(),
- ty: ty::mk_bare_fn(tcx, Some(new_did), new_method_ty.fty.clone())
+ ty: ty::mk_bare_fn(tcx, Some(new_did),
+ tcx.mk_bare_fn(new_method_ty.fty.clone()))
};
debug!("new_polytype={}", new_polytype.repr(tcx));
trait_def: &ty::TraitDef<'tcx>) {
let tcx = ccx.tcx;
if let ast_map::NodeItem(item) = tcx.map.get(trait_id) {
- if let ast::ItemTrait(_, _, _, _, ref trait_items) = item.node {
+ if let ast::ItemTrait(_, _, _, ref trait_items) = item.node {
// For each method, construct a suitable ty::Method and
// store it into the `tcx.impl_or_trait_items` table:
for trait_item in trait_items.iter() {
m.def_id,
Polytype {
generics: m.generics.clone(),
- ty: ty::mk_bare_fn(ccx.tcx, Some(m.def_id), m.fty.clone()) });
+ ty: ty::mk_bare_fn(ccx.tcx, Some(m.def_id), ccx.tcx.mk_bare_fn(m.fty.clone())) });
}
fn ty_method_of_trait_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
untransformed_rcvr_ty,
rcvr_ty_generics,
rcvr_visibility));
- let fty = ty::mk_bare_fn(tcx, Some(m_def_id), mty.fty.clone());
+ let fty = ty::mk_bare_fn(tcx, Some(m_def_id), tcx.mk_bare_fn(mty.fty.clone()));
debug!("method {} (id {}) has type {}",
m.pe_ident().repr(tcx),
m.id,
ast::RegionTyParamBound(..) => { }
}
}
-
- match ty_param.unbound {
- Some(_) => { warn = true; }
- None => { }
- }
}
if warn {
generics: &ty::Generics)
-> bool {
if let ty::ty_param(param_ty) = ty.sty {
- let type_parameter = generics.types.get(param_ty.space, param_ty.idx);
+ let type_parameter = generics.types.get(param_ty.space, param_ty.idx as uint);
for trait_bound in type_parameter.bounds.trait_bounds.iter() {
if trait_bound.def_id() == trait_id {
return true
AllowEqConstraints::DontAllow);
}
},
- ast::ItemTrait(_, _, _, _, ref trait_methods) => {
+ ast::ItemTrait(_, _, _, ref trait_methods) => {
let trait_def = trait_def_of_item(ccx, it);
debug!("trait_def: ident={} trait_def={}",
tcx.struct_fields.borrow_mut().insert(local_def(id), Rc::new(field_tys));
let substs = mk_item_substs(ccx, &pty.generics);
- let selfty = ty::mk_struct(tcx, local_def(id), substs);
+ let selfty = ty::mk_struct(tcx, local_def(id), tcx.mk_substs(substs));
// If this struct is enum-like or tuple-like, create the type of its
// constructor.
return def.clone();
}
- let (unsafety, generics, unbound, bounds, items) = match it.node {
+ let (unsafety, generics, bounds, items) = match it.node {
ast::ItemTrait(unsafety,
ref generics,
- ref unbound,
ref supertraits,
ref items) => {
- (unsafety, generics, unbound, supertraits, items.as_slice())
+ (unsafety, generics, supertraits, items.as_slice())
}
ref s => {
tcx.sess.span_bug(
}
};
- let substs = mk_trait_substs(ccx, it.id, generics, items);
+ let substs = ccx.tcx.mk_substs(mk_trait_substs(ccx, it.id, generics, items));
let ty_generics = ty_generics_for_trait(ccx,
it.id,
- &substs,
+ substs,
generics,
items);
token::SELF_KEYWORD_NAME,
self_param_ty,
bounds.as_slice(),
- unbound,
it.span);
let substs = mk_item_substs(ccx, &ty_generics);
bounds: bounds,
trait_ref: Rc::new(ty::TraitRef {
def_id: def_id,
- substs: substs
+ substs: ccx.tcx.mk_substs(substs)
})
});
tcx.trait_defs.borrow_mut().insert(def_id, trait_def.clone());
.enumerate()
.map(|(i, def)| ty::ReEarlyBound(def.lifetime.id,
subst::TypeSpace,
- i,
+ i as u32,
def.lifetime.name))
.collect();
.iter()
.enumerate()
.map(|(i, def)| ty::mk_param(ccx.tcx, subst::TypeSpace,
- i, local_def(def.id)))
+ i as u32, local_def(def.id)))
.collect();
// ...and also create generics synthesized from the associated types.
};
let pty = Polytype {
generics: ty_generics,
- ty: ty::mk_bare_fn(ccx.tcx, Some(local_def(it.id)), tofd)
+ ty: ty::mk_bare_fn(ccx.tcx, Some(local_def(it.id)), ccx.tcx.mk_bare_fn(tofd))
};
debug!("type of {} (id {}) is {}",
token::get_ident(it.ident),
generics,
DontCreateTypeParametersForAssociatedTypes);
let substs = mk_item_substs(ccx, &ty_generics);
- let t = ty::mk_enum(tcx, local_def(it.id), substs);
+ let t = ty::mk_enum(tcx, local_def(it.id), tcx.mk_substs(substs));
let pty = Polytype {
generics: ty_generics,
ty: t
generics,
DontCreateTypeParametersForAssociatedTypes);
let substs = mk_item_substs(ccx, &ty_generics);
- let t = ty::mk_struct(tcx, local_def(it.id), substs);
+ let t = ty::mk_struct(tcx, local_def(it.id), tcx.mk_substs(substs));
let pty = Polytype {
generics: ty_generics,
ty: t
fn ty_generics_for_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>,
trait_id: ast::NodeId,
- substs: &subst::Substs<'tcx>,
+ substs: &'tcx subst::Substs<'tcx>,
ast_generics: &ast::Generics,
items: &[ast::TraitItem])
-> ty::Generics<'tcx>
ccx,
subst::AssocSpace,
&associated_type.ty_param,
- generics.types.len(subst::AssocSpace),
+ generics.types.len(subst::AssocSpace) as u32,
Some(local_def(trait_id)));
ccx.tcx.ty_param_defs.borrow_mut().insert(associated_type.ty_param.id,
def.clone());
let self_trait_ref =
Rc::new(ty::Binder(ty::TraitRef { def_id: local_def(trait_id),
- substs: (*substs).clone() }));
+ substs: substs }));
let def = ty::TypeParameterDef {
space: subst::SelfSpace,
create_type_parameters_for_associated_types)
}
-// Add the Sized bound, unless the type parameter is marked as `Sized?`.
+// Add the Sized bound, unless the type parameter is marked as `?Sized`.
fn add_unsized_bound<'tcx,AC>(this: &AC,
- unbound: &Option<ast::TraitRef>,
bounds: &mut ty::BuiltinBounds,
- desc: &str,
+ ast_bounds: &[ast::TyParamBound],
span: Span)
where AC: AstConv<'tcx> {
+ // Try to find an unbound in bounds.
+ let mut unbound = None;
+ for ab in ast_bounds.iter() {
+ if let &ast::TraitTyParamBound(ref ptr, ast::TraitBoundModifier::Maybe) = ab {
+ if unbound.is_none() {
+ assert!(ptr.bound_lifetimes.is_empty());
+ unbound = Some(ptr.trait_ref.clone());
+ } else {
+ this.tcx().sess.span_err(span, "type parameter has more than one relaxed default \
+ bound, only one is supported");
+ }
+ }
+ }
+
let kind_id = this.tcx().lang_items.require(SizedTraitLangItem);
match unbound {
- &Some(ref tpb) => {
+ Some(ref tpb) => {
// FIXME(#8559) currently requires the unbound to be built-in.
let trait_def_id = ty::trait_ref_to_def_id(this.tcx(), tpb);
match kind_id {
Ok(kind_id) if trait_def_id != kind_id => {
this.tcx().sess.span_warn(span,
- format!("default bound relaxed \
- for a {}, but this \
- does nothing because \
- the given bound is not \
- a default. \
- Only `Sized?` is \
- supported",
- desc)[]);
+ "default bound relaxed for a type parameter, but \
+ this does nothing because the given bound is not \
+ a default. Only `?Sized` is supported");
ty::try_add_builtin_trait(this.tcx(),
kind_id,
bounds);
ty::try_add_builtin_trait(this.tcx(), kind_id.unwrap(), bounds);
}
// No lang item for Sized, so we can't add it as a bound.
- &None => {}
+ None => {}
}
}
.collect();
let def = ty::RegionParameterDef { name: l.lifetime.name,
space: space,
- index: i,
+ index: i as u32,
def_id: local_def(l.lifetime.id),
bounds: bounds };
debug!("ty_generics: def for region param: {}", def);
let def = get_or_create_type_parameter_def(&gcx,
space,
param,
- i,
+ i as u32,
None);
debug!("ty_generics: def for type param: {}, {}",
def.repr(this.tcx()),
.get_slice(space)
.iter() {
assert!(result.types.get_slice(space).len() ==
- associated_type_param.index);
+ associated_type_param.index as uint);
debug!("ty_generics: def for associated type: {}, {}",
associated_type_param.repr(this.tcx()),
space);
for bound in bound_pred.bounds.iter() {
match bound {
- &ast::TyParamBound::TraitTyParamBound(ref poly_trait_ref) => {
+ &ast::TyParamBound::TraitTyParamBound(ref poly_trait_ref, _) => {
let trait_ref = astconv::instantiate_poly_trait_ref(
this,
&ExplicitRscope,
for bound in param.bounds.iter() {
// In the above example, `ast_trait_ref` is `Iterator`.
let ast_trait_ref = match *bound {
- ast::TraitTyParamBound(ref r) => r,
+ ast::TraitTyParamBound(ref r, _) => r,
ast::RegionTyParamBound(..) => { continue; }
};
name: associated_type_def.name,
def_id: associated_type_def.def_id,
space: space,
- index: types.len() + index,
+ index: types.len() as u32 + index,
bounds: ty::ParamBounds {
builtin_bounds: associated_type_def.bounds.builtin_bounds,
fn get_or_create_type_parameter_def<'tcx,AC>(this: &AC,
space: subst::ParamSpace,
param: &ast::TyParam,
- index: uint,
+ index: u32,
associated_with: Option<ast::DefId>)
-> ty::TypeParameterDef<'tcx>
where AC: AstConv<'tcx>
param.ident.name,
param_ty,
param.bounds[],
- ¶m.unbound,
param.span);
let default = match param.default {
None => None,
name_of_bounded_thing: ast::Name,
param_ty: ty::ParamTy,
ast_bounds: &[ast::TyParamBound],
- unbound: &Option<ast::TraitRef>,
span: Span)
-> ty::ParamBounds<'tcx>
where AC: AstConv<'tcx> {
param_ty,
ast_bounds);
-
add_unsized_bound(this,
- unbound,
&mut param_bounds.builtin_bounds,
- "type parameter",
+ ast_bounds,
span);
check_bounds_compatible(this.tcx(),
let t_fn = ty::mk_bare_fn(
ccx.tcx,
None,
- ty::BareFnTy {
+ ccx.tcx.mk_bare_fn(ty::BareFnTy {
abi: abi,
unsafety: ast::Unsafety::Unsafe,
sig: ty::Binder(ty::FnSig {inputs: input_tys,
- output: output,
- variadic: decl.variadic}),
- });
+ output: output,
+ variadic: decl.variadic})
+ }));
let pty = Polytype {
generics: ty_generics_for_fn_or_method,
ty: t_fn
}
_ => ()
}
- let se_ty = ty::mk_bare_fn(tcx, Some(local_def(main_id)), ty::BareFnTy {
+ let se_ty = ty::mk_bare_fn(tcx, Some(local_def(main_id)), tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(ty::mk_nil(tcx)),
variadic: false
})
- });
+ }));
require_same_types(tcx, None, false, main_span, main_t, se_ty,
|| {
_ => ()
}
- let se_ty = ty::mk_bare_fn(tcx, Some(local_def(start_id)), ty::BareFnTy {
+ let se_ty = ty::mk_bare_fn(tcx, Some(local_def(start_id)), tcx.mk_bare_fn(ty::BareFnTy {
unsafety: ast::Unsafety::Normal,
abi: abi::Rust,
sig: ty::Binder(ty::FnSig {
output: ty::FnConverging(ty::mk_int()),
variadic: false
}),
- });
+ }));
require_same_types(tcx, None, false, start_span, start_t, se_ty,
|| {
/// A scope in which we generate anonymous, late-bound regions for
/// omitted regions. This occurs in function signatures.
pub struct BindingRscope {
- anon_bindings: Cell<uint>,
+ anon_bindings: Cell<u32>,
}
impl BindingRscope {
match item.node {
ast::ItemEnum(_, ref generics) |
ast::ItemStruct(_, ref generics) |
- ast::ItemTrait(_, ref generics, _, _, _) => {
+ ast::ItemTrait(_, ref generics, _, _) => {
for (i, p) in generics.lifetimes.iter().enumerate() {
let id = p.lifetime.id;
self.add_inferred(item.id, RegionParam, TypeSpace, i, id);
ty::ty_rptr(region, ref mt) => {
let contra = self.contravariant(variance);
- self.add_constraints_from_region(region, contra);
+ self.add_constraints_from_region(*region, contra);
self.add_constraints_from_mt(mt, variance);
}
}
}
- ty::ty_enum(def_id, ref substs) |
- ty::ty_struct(def_id, ref substs) => {
+ ty::ty_enum(def_id, substs) |
+ ty::ty_struct(def_id, substs) => {
let item_type = ty::lookup_item_type(self.tcx(), def_id);
let generics = &item_type.generics;
}
}
- ty::ty_bare_fn(_, ty::BareFnTy { ref sig, .. }) |
+ ty::ty_bare_fn(_, &ty::BareFnTy { ref sig, .. }) |
ty::ty_closure(box ty::ClosureTy {
ref sig,
store: ty::UniqTraitStore,
for p in type_param_defs.iter() {
let variance_decl =
self.declared_variance(p.def_id, def_id, TypeParam,
- p.space, p.index);
+ p.space, p.index as uint);
let variance_i = self.xform(variance, variance_decl);
- let substs_ty = *substs.types.get(p.space, p.index);
+ let substs_ty = *substs.types.get(p.space, p.index as uint);
self.add_constraints_from_ty(substs_ty, variance_i);
}
for p in region_param_defs.iter() {
let variance_decl =
self.declared_variance(p.def_id, def_id,
- RegionParam, p.space, p.index);
+ RegionParam, p.space, p.index as uint);
let variance_i = self.xform(variance, variance_decl);
- let substs_r = *substs.regions().get(p.space, p.index);
+ let substs_r = *substs.regions().get(p.space, p.index as uint);
self.add_constraints_from_region(substs_r, variance_i);
}
}
}
});
let trait_def = ty::lookup_trait_def(tcx, did);
- let (bounds, default_unbound) = trait_def.bounds.clean(cx);
+ let bounds = trait_def.bounds.clean(cx);
clean::Trait {
unsafety: def.unsafety,
generics: (&def.generics, subst::TypeSpace).clean(cx),
items: items.collect(),
bounds: bounds,
- default_unbound: default_unbound
}
}
derived: clean::detect_derived(attrs.as_slice()),
trait_: associated_trait.clean(cx).map(|bound| {
match bound {
- clean::TraitBound(polyt) => polyt.trait_,
+ clean::TraitBound(polyt, _) => polyt.trait_,
clean::RegionBound(..) => unreachable!(),
}
}),
pub did: ast::DefId,
pub bounds: Vec<TyParamBound>,
pub default: Option<Type>,
- /// An optional default bound on the parameter which is unbound, like `Sized?`
- pub default_unbound: Option<Type>
}
impl Clean<TyParam> for ast::TyParam {
did: ast::DefId { krate: ast::LOCAL_CRATE, node: self.id },
bounds: self.bounds.clean(cx),
default: self.default.clean(cx),
- default_unbound: self.unbound.clean(cx)
}
}
}
fn clean(&self, cx: &DocContext) -> TyParam {
cx.external_typarams.borrow_mut().as_mut().unwrap()
.insert(self.def_id, self.name.clean(cx));
- let (bounds, default_unbound) = self.bounds.clean(cx);
+ let bounds = self.bounds.clean(cx);
TyParam {
name: self.name.clean(cx),
did: self.def_id,
bounds: bounds,
default: self.default.clean(cx),
- default_unbound: default_unbound
}
}
}
#[deriving(Clone, RustcEncodable, RustcDecodable, PartialEq)]
pub enum TyParamBound {
RegionBound(Lifetime),
- TraitBound(PolyTrait)
+ TraitBound(PolyTrait, ast::TraitBoundModifier)
}
impl Clean<TyParamBound> for ast::TyParamBound {
fn clean(&self, cx: &DocContext) -> TyParamBound {
match *self {
ast::RegionTyParamBound(lt) => RegionBound(lt.clean(cx)),
- ast::TraitTyParamBound(ref t) => TraitBound(t.clean(cx)),
+ ast::TraitTyParamBound(ref t, modifier) => TraitBound(t.clean(cx), modifier),
}
}
}
did: did,
},
lifetimes: vec![]
- })
+ }, ast::TraitBoundModifier::None)
}
}
let fqn = fqn.into_iter().map(|i| i.to_string())
.collect::<Vec<String>>();
let path = external_path(cx, fqn.last().unwrap().as_slice(),
- Some(self.def_id), &self.substs);
+ Some(self.def_id), self.substs);
cx.external_paths.borrow_mut().as_mut().unwrap().insert(self.def_id,
(fqn, TypeTrait));
if let sty::ty_tup(ref ts) = ty_s.sty {
for &ty_s in ts.iter() {
if let sty::ty_rptr(ref reg, _) = ty_s.sty {
- if let &Region::ReLateBound(_, _) = reg {
+ if let &Region::ReLateBound(_, _) = *reg {
debug!(" hit an ReLateBound {}", reg);
if let Some(lt) = reg.clean(cx) {
late_bounds.push(lt)
TraitBound(PolyTrait {
trait_: ResolvedPath { path: path, typarams: None, did: self.def_id, },
lifetimes: late_bounds
- })
+ }, ast::TraitBoundModifier::None)
}
}
-// Returns (bounds, default_unbound)
-impl<'tcx> Clean<(Vec<TyParamBound>, Option<Type>)> for ty::ParamBounds<'tcx> {
- fn clean(&self, cx: &DocContext) -> (Vec<TyParamBound>, Option<Type>) {
+impl<'tcx> Clean<Vec<TyParamBound>> for ty::ParamBounds<'tcx> {
+ fn clean(&self, cx: &DocContext) -> Vec<TyParamBound> {
let mut v = Vec::new();
- let mut has_sized_bound = false;
- for b in self.builtin_bounds.iter() {
- if b != ty::BoundSized {
- v.push(b.clean(cx));
- } else {
- has_sized_bound = true;
- }
- }
for t in self.trait_bounds.iter() {
v.push(t.clean(cx));
}
for r in self.region_bounds.iter().filter_map(|r| r.clean(cx)) {
v.push(RegionBound(r));
}
- if has_sized_bound {
- (v, None)
- } else {
- let ty = match ty::BoundSized.clean(cx) {
- TraitBound(polyt) => polyt.trait_,
- _ => unreachable!()
- };
- (v, Some(ty))
- }
+ v
}
}
v.extend(self.types.iter().map(|t| TraitBound(PolyTrait {
trait_: t.clean(cx),
lifetimes: vec![]
- })));
+ }, ast::TraitBoundModifier::None)));
if v.len() > 0 {Some(v)} else {None}
}
}
pub items: Vec<TraitMethod>,
pub generics: Generics,
pub bounds: Vec<TyParamBound>,
- /// An optional default bound not required for `Self`, like `Sized?`
- pub default_unbound: Option<Type>
}
impl Clean<Item> for doctree::Trait {
items: self.items.clean(cx),
generics: self.generics.clean(cx),
bounds: self.bounds.clean(cx),
- default_unbound: self.default_unbound.clean(cx)
}),
}
}
ty::RegionTraitStore(..) => Closure(decl),
}
}
- ty::ty_struct(did, ref substs) |
- ty::ty_enum(did, ref substs) |
+ ty::ty_struct(did, substs) |
+ ty::ty_enum(did, substs) |
ty::ty_trait(box ty::TyTrait {
- principal: ty::Binder(ty::TraitRef { def_id: did, ref substs }),
+ principal: ty::Binder(ty::TraitRef { def_id: did, substs }),
.. }) =>
{
let fqn = csearch::get_item_path(cx.tcx(), did);
},
bounds: vec![],
default: None,
- default_unbound: None
}),
visibility: None,
def_id: self.def_id,
use std::cell::RefCell;
use std::collections::{HashMap, HashSet};
-use arena::TypedArena;
use visit_ast::RustdocVisitor;
use clean;
let mut forest = ast_map::Forest::new(krate);
let ast_map = driver::assign_node_ids_and_map(&sess, &mut forest);
- let type_arena = TypedArena::new();
+ let arenas = ty::CtxtArenas::new();
let ty::CrateAnalysis {
exported_items, public_items, ty_cx, ..
- } = driver::phase_3_run_analysis_passes(sess, ast_map, &type_arena, name);
+ } = driver::phase_3_run_analysis_passes(sess, ast_map, &arenas, name);
let ctxt = DocContext {
krate: ty_cx.map.krate(),
pub whence: Span,
pub vis: ast::Visibility,
pub stab: Option<attr::Stability>,
- pub default_unbound: Option<ast::TraitRef> // FIXME(tomjakubowski)
}
pub struct Impl {
if i > 0 {
try!(f.write(", ".as_bytes()))
}
- if let Some(ref unbound) = tp.default_unbound {
- try!(write!(f, "{}? ", unbound));
- };
try!(f.write(tp.name.as_bytes()));
if tp.bounds.len() > 0 {
clean::RegionBound(ref lt) => {
write!(f, "{}", *lt)
}
- clean::TraitBound(ref ty) => {
- write!(f, "{}", *ty)
+ clean::TraitBound(ref ty, modifier) => {
+ let modifier_str = match modifier {
+ ast::TraitBoundModifier::None => "",
+ ast::TraitBoundModifier::Maybe => "?",
+ };
+ write!(f, "{}{}", modifier_str, *ty)
}
}
}
for bound in decl.bounds.iter() {
match *bound {
clean::RegionBound(..) => {}
- clean::TraitBound(ref t) => {
+ clean::TraitBound(ref t, modifier) => {
if ret.len() == 0 {
ret.push_str(": ");
} else {
ret.push_str(" + ");
}
+ if modifier == ast::TraitBoundModifier::Maybe {
+ ret.push_str("?");
+ }
ret.push_str(format!("{}",
*t).as_slice());
}
fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item,
t: &clean::Trait) -> fmt::Result {
let mut bounds = String::new();
- if let Some(ref ty) = t.default_unbound {
- bounds.push_str(format!(" for {}?", ty).as_slice());
- }
if t.bounds.len() > 0 {
if bounds.len() > 0 {
bounds.push(' ');
};
om.constants.push(s);
},
- ast::ItemTrait(unsafety, ref gen, ref def_ub, ref b, ref items) => {
+ ast::ItemTrait(unsafety, ref gen, ref b, ref items) => {
let t = Trait {
unsafety: unsafety,
name: name,
whence: item.span,
vis: item.vis,
stab: self.stability(item.id),
- default_unbound: def_ub.clone()
};
om.traits.push(t);
},
//! * `String`: equivalent to rust's `String`
//! * `Array`: equivalent to rust's `Vec<T>`, but also allowing objects of different types in the
//! same array
-//! * `Object`: equivalent to rust's `Treemap<String, json::Json>`
+//! * `Object`: equivalent to rust's `BTreeMap<String, json::Json>`
//! * `Null`
//!
//! An object is a series of string keys mapping to values, in `"key": value` format.
use ops::{Drop, FnOnce};
use option::Option;
use option::Option::{Some, None};
-use ptr::RawPtr;
+use ptr::PtrExt;
use ptr;
use raw;
use slice::AsSlice;
/// }
/// ```
///
-/// The easiest way to use `HashMap` with a custom type is to derive `Eq` and `Hash`.
+/// The easiest way to use `HashMap` with a custom type as key is to derive `Eq` and `Hash`.
/// We must also derive `PartialEq`.
///
/// ```
/// use std::collections::HashMap;
///
/// #[deriving(Hash, Eq, PartialEq, Show)]
-/// struct Viking<'a> {
-/// name: &'a str,
-/// power: uint,
+/// struct Viking {
+/// name: String,
+/// country: String,
/// }
///
+/// impl Viking {
+/// /// Create a new Viking.
+/// pub fn new(name: &str, country: &str) -> Viking {
+/// Viking { name: name.to_string(), country: country.to_string() }
+/// }
+/// }
+///
+/// // Use a HashMap to store the vikings' health points.
/// let mut vikings = HashMap::new();
///
-/// vikings.insert("Norway", Viking { name: "Einar", power: 9u });
-/// vikings.insert("Denmark", Viking { name: "Olaf", power: 4u });
-/// vikings.insert("Iceland", Viking { name: "Harald", power: 8u });
+/// vikings.insert(Viking::new("Einar", "Norway"), 25u);
+/// vikings.insert(Viking::new("Olaf", "Denmark"), 24u);
+/// vikings.insert(Viking::new("Harald", "Iceland"), 12u);
///
-/// // Use derived implementation to print the vikings.
-/// for (land, viking) in vikings.iter() {
-/// println!("{} at {}", viking, land);
+/// // Use derived implementation to print the status of the vikings.
+/// for (viking, health) in vikings.iter() {
+/// println!("{} has {} hp", viking, health);
/// }
/// ```
#[deriving(Clone)]
/// }
/// ```
#[unstable = "matches collection reform specification, waiting for dust to settle"]
- pub fn iter(&self) -> Entries<K, V> {
- Entries { inner: self.table.iter() }
+ pub fn iter(&self) -> Iter<K, V> {
+ Iter { inner: self.table.iter() }
}
/// An iterator visiting all key-value pairs in arbitrary order,
}
/// HashMap iterator
-pub struct Entries<'a, K: 'a, V: 'a> {
- inner: table::Entries<'a, K, V>
+pub struct Iter<'a, K: 'a, V: 'a> {
+ inner: table::Iter<'a, K, V>
}
/// HashMap mutable values iterator
/// HashMap keys iterator
pub struct Keys<'a, K: 'a, V: 'a> {
- inner: Map<(&'a K, &'a V), &'a K, Entries<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
+ inner: Map<(&'a K, &'a V), &'a K, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a K>
}
/// HashMap values iterator
pub struct Values<'a, K: 'a, V: 'a> {
- inner: Map<(&'a K, &'a V), &'a V, Entries<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
+ inner: Map<(&'a K, &'a V), &'a V, Iter<'a, K, V>, fn((&'a K, &'a V)) -> &'a V>
}
/// HashMap drain iterator
NoElem(EmptyBucket<K, V, M>),
}
-impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Iter<'a, K, V> {
#[inline] fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next() }
#[inline] fn size_hint(&self) -> (uint, Option<uint>) { self.inner.size_hint() }
}
use ops::{Deref, DerefMut, Drop};
use option::Option;
use option::Option::{Some, None};
-use ptr::{Unique, RawPtr, copy_nonoverlapping_memory, zero_memory};
+use ptr::{Unique, PtrExt, copy_nonoverlapping_memory, zero_memory};
use ptr;
use rt::heap::{allocate, deallocate};
}
}
- pub fn iter(&self) -> Entries<K, V> {
- Entries {
+ pub fn iter(&self) -> Iter<K, V> {
+ Iter {
iter: self.raw_buckets(),
elems_left: self.size(),
}
}
/// Iterator over shared references to entries in a table.
-pub struct Entries<'a, K: 'a, V: 'a> {
+pub struct Iter<'a, K: 'a, V: 'a> {
iter: RawBuckets<'a, K, V>,
elems_left: uint,
}
iter: RawBuckets<'static, K, V>,
}
-impl<'a, K, V> Iterator<(&'a K, &'a V)> for Entries<'a, K, V> {
+impl<'a, K, V> Iterator<(&'a K, &'a V)> for Iter<'a, K, V> {
fn next(&mut self) -> Option<(&'a K, &'a V)> {
self.iter.next().map(|bucket| {
self.elems_left -= 1;
// and that could cause problems on platforms where it is
// represented by opaque data structure
pub fn postinit_lock(&self) -> MutexGuard<()> {
- self.select_lock.lock()
+ self.select_lock.lock().unwrap()
}
// This function is used at the creation of a shared packet to inherit a
// about looking at and dealing with to_wake. Once we have acquired the
// lock, we are guaranteed that inherit_blocker is done.
{
- let _guard = self.select_lock.lock();
+ let _guard = self.select_lock.lock().unwrap();
}
// Like the stream implementation, we want to make sure that the count
NoneBlocked => {}
_ => unreachable!(),
}
- drop(guard); // unlock
- wait_token.wait(); // block
- lock.lock() // relock
+ drop(guard); // unlock
+ wait_token.wait(); // block
+ lock.lock().unwrap() // relock
}
/// Wakes up a thread, dropping the lock at the correct time
fn acquire_send_slot(&self) -> MutexGuard<State<T>> {
let mut node = Node { token: None, next: 0 as *mut Node };
loop {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// are we ready to go?
if guard.disconnected || guard.buf.size() < guard.buf.cap() {
return guard;
}
pub fn try_send(&self, t: T) -> Result<(), super::TrySendError<T>> {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected {
Err(super::RecvDisconnected(t))
} else if guard.buf.size() == guard.buf.cap() {
// When reading this, remember that there can only ever be one receiver at
// time.
pub fn recv(&self) -> Result<T, ()> {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// Wait for the buffer to have something in it. No need for a while loop
// because we're the only receiver.
}
pub fn try_recv(&self) -> Result<T, Failure> {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// Easy cases first
if guard.disconnected { return Err(Disconnected) }
}
// Not much to do other than wake up a receiver if one's there
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
match mem::replace(&mut guard.blocker, NoneBlocked) {
}
pub fn drop_port(&self) {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected { return }
guard.disconnected = true;
// If Ok, the value is whether this port has data, if Err, then the upgraded
// port needs to be checked instead of this one.
pub fn can_recv(&self) -> bool {
- let guard = self.lock.lock();
+ let guard = self.lock.lock().unwrap();
guard.disconnected || guard.buf.size() > 0
}
// Attempts to start selection on this port. This can either succeed or fail
// because there is data waiting.
pub fn start_selection(&self, token: SignalToken) -> StartResult {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
if guard.disconnected || guard.buf.size() > 0 {
Abort
} else {
//
// The return value indicates whether there's data on this port.
pub fn abort_selection(&self) -> bool {
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
match mem::replace(&mut guard.blocker, NoneBlocked) {
NoneBlocked => true,
BlockedSender(token) => {
impl<T: Send> Drop for Packet<T> {
fn drop(&mut self) {
assert_eq!(self.channels.load(atomic::SeqCst), 0);
- let mut guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
assert!(guard.queue.dequeue().is_none());
assert!(guard.canceled.is_none());
}
impl<R: Reader> Buffer for BufferedReader<R> {
fn fill_buf<'a>(&'a mut self) -> IoResult<&'a [u8]> {
if self.pos == self.cap {
- self.cap = try!(self.inner.read(self.buf[mut]));
+ self.cap = try!(self.inner.read(self.buf.as_mut_slice()));
self.pos = 0;
}
Ok(self.buf[self.pos..self.cap])
if buf.len() > self.buf.len() {
self.inner.as_mut().unwrap().write(buf)
} else {
- let dst = self.buf[mut self.pos..];
+ let dst = self.buf.slice_from_mut(self.pos);
slice::bytes::copy_memory(dst, buf);
self.pos += buf.len();
Ok(())
loop {
let count = match self.fill_buf().ok() {
Some(src) => {
- let dst = buf[mut num_read..];
+ let dst = buf.slice_from_mut(num_read);
let count = cmp::min(src.len(), dst.len());
bytes::copy_memory(dst, src[..count]);
count
use ops::FnOnce;
use option::Option;
use option::Option::{Some, None};
-use ptr::RawPtr;
+use ptr::PtrExt;
use result::Result::{Ok, Err};
use slice::{SliceExt, AsSlice};
{
let mut read_stream = File::open_mode(filename, Open, Read);
{
- let read_buf = read_mem[mut 0..4];
+ let read_buf = read_mem.slice_mut(0, 4);
check!(read_stream.read(read_buf));
}
{
- let read_buf = read_mem[mut 4..8];
+ let read_buf = read_mem.slice_mut(4, 8);
check!(read_stream.read(read_buf));
}
}
check!(rmdir_recursive(dir));
}
+ #[test]
+ fn mkdir_path_already_exists_error() {
+ use io::{IoError, PathAlreadyExists};
+
+ let tmpdir = tmpdir();
+ let dir = &tmpdir.join("mkdir_error_twice");
+ check!(mkdir(dir, io::USER_RWX));
+ match mkdir(dir, io::USER_RWX) {
+ Err(IoError{kind:PathAlreadyExists,..}) => (),
+ _ => assert!(false)
+ };
+ }
+
#[test]
fn recursive_mkdir() {
let tmpdir = tmpdir();
let write_len = min(buf.len(), self.buf.len() - self.pos);
{
let input = self.buf[self.pos.. self.pos + write_len];
- let output = buf[mut ..write_len];
+ let output = buf.slice_to_mut(write_len);
assert_eq!(input.len(), output.len());
slice::bytes::copy_memory(output, input);
}
let write_len = min(buf.len(), self.len());
{
let input = self[..write_len];
- let output = buf[mut ..write_len];
+ let output = buf.slice_to_mut(write_len);
slice::bytes::copy_memory(output, input);
}
impl<'a> Writer for BufWriter<'a> {
#[inline]
fn write(&mut self, src: &[u8]) -> IoResult<()> {
- let dst = self.buf[mut self.pos..];
+ let dst = self.buf.slice_from_mut(self.pos);
let dst_len = dst.len();
if dst_len == 0 {
let write_len = min(buf.len(), self.buf.len() - self.pos);
{
let input = self.buf[self.pos.. self.pos + write_len];
- let output = buf[mut ..write_len];
+ let output = buf.slice_to_mut(write_len);
assert_eq!(input.len(), output.len());
slice::bytes::copy_memory(output, input);
}
assert!(r.read_at_least(buf.len(), &mut buf).is_ok());
let b: &[_] = &[1, 2, 3];
assert_eq!(buf, b);
- assert!(r.read_at_least(0, buf[mut ..0]).is_ok());
+ assert!(r.read_at_least(0, buf.slice_to_mut(0)).is_ok());
assert_eq!(buf, b);
assert!(r.read_at_least(buf.len(), &mut buf).is_ok());
let b: &[_] = &[4, 5, 6];
while read < min {
let mut zeroes = 0;
loop {
- match self.read(buf[mut read..]) {
+ match self.read(buf.slice_from_mut(read)) {
Ok(0) => {
zeroes += 1;
if zeroes >= NO_PROGRESS_LIMIT {
// API yet. If so, it should be a method on Vec.
unsafe fn slice_vec_capacity<'a, T>(v: &'a mut Vec<T>, start: uint, end: uint) -> &'a mut [T] {
use raw::Slice;
- use ptr::RawPtr;
+ use ptr::PtrExt;
assert!(start <= end);
assert!(end <= v.capacity());
#[inline]
fn write_char(&mut self, c: char) -> IoResult<()> {
let mut buf = [0u8, ..4];
- let n = c.encode_utf8(buf[mut]).unwrap_or(0);
+ let n = c.encode_utf8(buf.as_mut_slice()).unwrap_or(0);
self.write(buf[..n])
}
{
let mut start = 1;
while start < width {
- match try!(self.read(buf[mut start..width])) {
+ match try!(self.read(buf.slice_mut(start, width))) {
n if n == width - start => break,
n if n < width - start => { start += n; }
_ => return Err(standard_error(InvalidInput)),
/// A mode specifies how a file should be opened or created. These modes are
/// passed to `File::open_mode` and are used to control where the file is
/// positioned when it is initially opened.
-#[deriving(Copy)]
+#[deriving(Copy, Clone, PartialEq, Eq)]
pub enum FileMode {
/// Opens a file positioned at the beginning.
Open,
/// Access permissions with which the file should be opened. `File`s
/// opened with `Read` will return an error if written to.
-#[deriving(Copy)]
+#[deriving(Copy, Clone, PartialEq, Eq)]
pub enum FileAccess {
/// Read-only access, requests to write will result in an error
Read,
assert!(head.len() + tail.len() <= 8);
let mut gs = [0u16, ..8];
gs.clone_from_slice(head);
- gs[mut 8 - tail.len() .. 8].clone_from_slice(tail);
+ gs.slice_mut(8 - tail.len(), 8).clone_from_slice(tail);
Ipv6Addr(gs[0], gs[1], gs[2], gs[3], gs[4], gs[5], gs[6], gs[7])
}
/// match socket.recv_from(&mut buf) {
/// Ok((amt, src)) => {
/// // Send a reply to the socket we received data from
-/// let buf = buf[mut ..amt];
+/// let buf = buf.slice_to_mut(amt);
/// buf.reverse();
/// socket.send_to(buf, src);
/// }
/// ```
pub fn lock<'a>(&'a mut self) -> StdinReaderGuard<'a> {
StdinReaderGuard {
- inner: self.inner.lock()
+ inner: self.inner.lock().unwrap()
}
}
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_line(&mut self) -> IoResult<String> {
- self.inner.lock().0.read_line()
+ self.inner.lock().unwrap().0.read_line()
}
/// Like `Buffer::read_until`.
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_until(&mut self, byte: u8) -> IoResult<Vec<u8>> {
- self.inner.lock().0.read_until(byte)
+ self.inner.lock().unwrap().0.read_until(byte)
}
/// Like `Buffer::read_char`.
/// The read is performed atomically - concurrent read calls in other
/// threads will not interleave with this one.
pub fn read_char(&mut self) -> IoResult<char> {
- self.inner.lock().0.read_char()
+ self.inner.lock().unwrap().0.read_char()
}
}
impl Reader for StdinReader {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint> {
- self.inner.lock().0.read(buf)
+ self.inner.lock().unwrap().0.read(buf)
}
// We have to manually delegate all of these because the default impls call
// incur the costs of repeated locking).
fn read_at_least(&mut self, min: uint, buf: &mut [u8]) -> IoResult<uint> {
- self.inner.lock().0.read_at_least(min, buf)
+ self.inner.lock().unwrap().0.read_at_least(min, buf)
}
fn push_at_least(&mut self, min: uint, len: uint, buf: &mut Vec<u8>) -> IoResult<uint> {
- self.inner.lock().0.push_at_least(min, len, buf)
+ self.inner.lock().unwrap().0.push_at_least(min, len, buf)
}
fn read_to_end(&mut self) -> IoResult<Vec<u8>> {
- self.inner.lock().0.read_to_end()
+ self.inner.lock().unwrap().0.read_to_end()
}
fn read_le_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
- self.inner.lock().0.read_le_uint_n(nbytes)
+ self.inner.lock().unwrap().0.read_le_uint_n(nbytes)
}
fn read_be_uint_n(&mut self, nbytes: uint) -> IoResult<u64> {
- self.inner.lock().0.read_be_uint_n(nbytes)
+ self.inner.lock().unwrap().0.read_be_uint_n(nbytes)
}
}
}
let len = cmp::min(self.limit, buf.len());
- let res = self.inner.read(buf[mut ..len]);
+ let res = self.inner.read(buf.slice_to_mut(len));
match res {
Ok(len) => self.limit -= len,
_ => {}
use option::Option::{Some, None};
use path::{Path, GenericPath, BytesContainer};
use sys;
-use ptr::RawPtr;
+use sys::os as os_imp;
+use ptr::PtrExt;
use ptr;
use result::Result;
use result::Result::{Err, Ok};
use option::Option;
use option::Option::{Some, None};
use slice::SliceExt;
-use str::{CharSplits, FromStr, StrVector, StrExt};
+use str::{SplitTerminator, FromStr, StrVector, StrExt};
use string::{String, ToString};
use unicode::char::UnicodeChar;
use vec::Vec;
/// Each component is yielded as Option<&str> for compatibility with PosixPath, but
/// every component in WindowsPath is guaranteed to be Some.
pub type StrComponents<'a> =
- Map<&'a str, Option<&'a str>, CharSplits<'a, char>, fn(&'a str) -> Option<&'a str>>;
+ Map<&'a str, Option<&'a str>, SplitTerminator<'a, char>, fn(&'a str) -> Option<&'a str>>;
/// Iterator that yields successive components of a Path as &[u8]
pub type Components<'a> =
#[doc(no_inline)] pub use option::Option;
#[doc(no_inline)] pub use option::Option::{Some, None};
#[doc(no_inline)] pub use path::{GenericPath, Path, PosixPath, WindowsPath};
-#[doc(no_inline)] pub use ptr::{RawPtr, RawMutPtr};
+#[doc(no_inline)] pub use ptr::{PtrExt, MutPtrExt};
#[doc(no_inline)] pub use result::Result;
#[doc(no_inline)] pub use result::Result::{Ok, Err};
#[doc(no_inline)] pub use io::{Buffer, Writer, Reader, Seek, BufferPrelude};
//! See the `distributions` submodule for sampling random numbers from
//! distributions like normal and exponential.
//!
-//! # Task-local RNG
+//! # Thread-local RNG
//!
//! There is built-in support for a RNG associated with each task stored
//! in task-local storage. This RNG can be accessed via `task_rng`, or
let mut read = 0;
let len = v.len();
while read < len {
- let result = getrandom(v[mut read..]);
+ let result = getrandom(v.slice_from_mut(read));
if result == -1 {
let err = errno() as libc::c_int;
if err == libc::EINTR {
mod at_exit_imp;
mod libunwind;
-/// The default error code of the rust runtime if the main task panics instead
+/// The default error code of the rust runtime if the main thread panics instead
/// of exiting cleanly.
pub const DEFAULT_ERROR_CODE: int = 101;
///
/// The procedure passed to this function will be executed as part of the
/// runtime cleanup phase. For normal rust programs, this means that it will run
-/// after all other tasks have exited.
+/// after all other threads have exited.
///
-/// The procedure is *not* executed with a local `Task` available to it, so
+/// The procedure is *not* executed with a local `Thread` available to it, so
/// primitives like logging, I/O, channels, spawning, etc, are *not* available.
/// This is meant for "bare bones" usage to clean up runtime details, this is
/// not meant as a general-purpose "let's clean everything up" function.
use rt::unwind;
use rt::unwind::Unwinder;
-/// State associated with Rust tasks.
+/// State associated with Rust threads
///
/// This structure is currently undergoing major changes, and is
/// likely to be move/be merged with a `Thread` structure.
awoken: bool, // used to prevent spurious wakeups
// This field holds the known bounds of the stack in (lo, hi) form. Not all
- // native tasks necessarily know their precise bounds, hence this is
+ // native threads necessarily know their precise bounds, hence this is
// optional.
stack_bounds: (uint, uint),
stack_guard: uint
}
-// Once a task has entered the `Armed` state it must be destroyed via `drop`,
+// Once a thread has entered the `Armed` state it must be destroyed via `drop`,
// and no other method. This state is used to track this transition.
#[deriving(PartialEq)]
enum TaskState {
}
pub struct TaskOpts {
- /// Invoke this procedure with the result of the task when it finishes.
+ /// Invoke this procedure with the result of the thread when it finishes.
pub on_exit: Option<Thunk<Result>>,
- /// A name for the task-to-be, for identification in panic messages
+ /// A name for the thread-to-be, for identification in panic messages
pub name: Option<SendStr>,
- /// The size of the stack for the spawned task
+ /// The size of the stack for the spawned thread
pub stack_size: Option<uint>,
}
-/// Indicates the manner in which a task exited.
+/// Indicates the manner in which a thread exited.
///
-/// A task that completes without panicking is considered to exit successfully.
+/// A thread that completes without panicking is considered to exit successfully.
///
/// If you wish for this result's delivery to block until all
-/// children tasks complete, recommend using a result future.
+/// children threads complete, recommend using a result future.
pub type Result = ::core::result::Result<(), Box<Any + Send>>;
-/// A handle to a blocked task. Usually this means having the Box<Task>
-/// pointer by ownership, but if the task is killable, a killer can steal it
+/// A handle to a blocked thread. Usually this means having the Box<Task>
+/// pointer by ownership, but if the thread is killable, a killer can steal it
/// at any time.
pub enum BlockedTask {
Owned(Box<Task>),
Shared(Arc<AtomicUint>),
}
-/// Per-task state related to task death, killing, panic, etc.
+/// Per-thread state related to thread death, killing, panic, etc.
pub struct Death {
pub on_exit: Option<Thunk<Result>>,
}
}
impl Task {
- /// Creates a new uninitialized task.
+ /// Creates a new uninitialized thread.
pub fn new(stack_bounds: Option<(uint, uint)>, stack_guard: Option<uint>) -> Task {
Task {
unwinder: Unwinder::new(),
})
}
- /// Consumes ownership of a task, runs some code, and returns the task back.
+ /// Consumes ownership of a thread, runs some code, and returns the thread back.
///
/// This function can be used as an emulated "try/catch" to interoperate
/// with the rust runtime at the outermost boundary. It is not possible to
/// use this function in a nested fashion (a try/catch inside of another
/// try/catch). Invoking this function is quite cheap.
///
- /// If the closure `f` succeeds, then the returned task can be used again
+ /// If the closure `f` succeeds, then the returned thread can be used again
/// for another invocation of `run`. If the closure `f` panics then `self`
/// will be internally destroyed along with all of the other associated
- /// resources of this task. The `on_exit` callback is invoked with the
+ /// resources of this thread. The `on_exit` callback is invoked with the
/// cause of panic (not returned here). This can be discovered by querying
/// `is_destroyed()`.
///
/// guaranteed to return if it panicks. Care should be taken to ensure that
/// stack references made by `f` are handled appropriately.
///
- /// It is invalid to call this function with a task that has been previously
+ /// It is invalid to call this function with a thread that has been previously
/// destroyed via a failed call to `run`.
pub fn run(mut self: Box<Task>, f: ||) -> Box<Task> {
- assert!(!self.is_destroyed(), "cannot re-use a destroyed task");
+ assert!(!self.is_destroyed(), "cannot re-use a destroyed thread");
// First, make sure that no one else is in TLS. This does not allow
// recursive invocations of run(). If there's no one else, then
// relinquish ownership of ourselves back into TLS.
if Local::exists(None::<Task>) {
- panic!("cannot run a task recursively inside another");
+ panic!("cannot run a thread recursively inside another");
}
self.state = Armed;
Local::put(self);
// There are two primary reasons that general try/catch is unsafe. The
// first is that we do not support nested try/catch. The above check for
- // an existing task in TLS is sufficient for this invariant to be
+ // an existing thread in TLS is sufficient for this invariant to be
// upheld. The second is that unwinding while unwinding is not defined.
- // We take care of that by having an 'unwinding' flag in the task
+ // We take care of that by having an 'unwinding' flag in the thread
// itself. For these reasons, this unsafety should be ok.
let result = unsafe { unwind::try(f) };
- // After running the closure given return the task back out if it ran
- // successfully, or clean up the task if it panicked.
+ // After running the closure given return the thread back out if it ran
+ // successfully, or clean up the thread if it panicked.
let task: Box<Task> = Local::take();
match result {
Ok(()) => task,
}
}
- /// Destroy all associated resources of this task.
+ /// Destroy all associated resources of this thread.
///
- /// This function will perform any necessary clean up to prepare the task
+ /// This function will perform any necessary clean up to prepare the thread
/// for destruction. It is required that this is called before a `Task`
/// falls out of scope.
///
- /// The returned task cannot be used for running any more code, but it may
+ /// The returned thread cannot be used for running any more code, but it may
/// be used to extract the runtime as necessary.
pub fn destroy(self: Box<Task>) -> Box<Task> {
if self.is_destroyed() {
}
}
- /// Cleans up a task, processing the result of the task as appropriate.
+ /// Cleans up a thread, processing the result of the thread as appropriate.
///
- /// This function consumes ownership of the task, deallocating it once it's
+ /// This function consumes ownership of the thread, deallocating it once it's
/// done being processed. It is assumed that TLD and the local heap have
/// already been destroyed and/or annihilated.
fn cleanup(mut self: Box<Task>, result: Result) -> Box<Task> {
// After taking care of the data above, we need to transmit the result
- // of this task.
+ // of this thread.
let what_to_do = self.death.on_exit.take();
Local::put(self);
// if this panics, this will also likely abort the runtime.
//
// This closure is currently limited to a channel send via the
- // standard library's task interface, but this needs
+ // standard library's thread interface, but this needs
// reconsideration to whether it's a reasonable thing to let a
- // task to do or not.
+ // thread to do or not.
match what_to_do {
Some(f) => { f.invoke(result) }
None => { drop(result) }
}
- // Now that we're done, we remove the task from TLS and flag it for
+ // Now that we're done, we remove the thread from TLS and flag it for
// destruction.
let mut task: Box<Task> = Local::take();
task.state = Destroyed;
/// Queries whether this can be destroyed or not.
pub fn is_destroyed(&self) -> bool { self.state == Destroyed }
- /// Deschedules the current task, invoking `f` `amt` times. It is not
+ /// Deschedules the current thread, invoking `f` `amt` times. It is not
/// recommended to use this function directly, but rather communication
/// primitives in `std::comm` should be used.
//
// shared state. Additionally, all of the violations are protected with a
// mutex, so in theory there are no races.
//
- // The first thing we need to do is to get a pointer to the task's internal
- // mutex. This address will not be changing (because the task is allocated
- // on the heap). We must have this handle separately because the task will
+ // The first thing we need to do is to get a pointer to the thread's internal
+ // mutex. This address will not be changing (because the thread is allocated
+ // on the heap). We must have this handle separately because the thread will
// have its ownership transferred to the given closure. We're guaranteed,
// however, that this memory will remain valid because *this* is the current
- // task's execution thread.
+ // thread's execution thread.
//
- // The next weird part is where ownership of the task actually goes. We
+ // The next weird part is where ownership of the thread actually goes. We
// relinquish it to the `f` blocking function, but upon returning this
- // function needs to replace the task back in TLS. There is no communication
- // from the wakeup thread back to this thread about the task pointer, and
- // there's really no need to. In order to get around this, we cast the task
+ // function needs to replace the thread back in TLS. There is no communication
+ // from the wakeup thread back to this thread about the thread pointer, and
+ // there's really no need to. In order to get around this, we cast the thread
// to a `uint` which is then used at the end of this function to cast back
// to a `Box<Task>` object. Naturally, this looks like it violates
// ownership semantics in that there may be two `Box<Task>` objects.
//
// The fun part is that the wakeup half of this implementation knows to
- // "forget" the task on the other end. This means that the awakening half of
+ // "forget" the thread on the other end. This means that the awakening half of
// things silently relinquishes ownership back to this thread, but not in a
- // way that the compiler can understand. The task's memory is always valid
- // for both tasks because these operations are all done inside of a mutex.
+ // way that the compiler can understand. The thread's memory is always valid
+ // for both threads because these operations are all done inside of a mutex.
//
// You'll also find that if blocking fails (the `f` function hands the
// BlockedTask back to us), we will `mem::forget` the handles. The
- // reasoning for this is the same logic as above in that the task silently
+ // reasoning for this is the same logic as above in that the thread silently
// transfers ownership via the `uint`, not through normal compiler
// semantics.
//
let guard = (*me).lock.lock();
(*me).awoken = false;
- // Apply the given closure to all of the "selectable tasks",
+ // Apply the given closure to all of the "selectable threads",
// bailing on the first one that produces an error. Note that
// care must be taken such that when an error is occurred, we
- // may not own the task, so we may still have to wait for the
- // task to become available. In other words, if task.wake()
+ // may not own the thread, so we may still have to wait for the
+ // thread to become available. In other words, if thread.wake()
// returns `None`, then someone else has ownership and we must
// wait for their signal.
match iter.map(f).filter_map(|a| a.err()).next() {
guard.wait();
}
}
- // put the task back in TLS, and everything is as it once was.
+ // put the thread back in TLS, and everything is as it once was.
Local::put(mem::transmute(me));
}
}
- /// Wakes up a previously blocked task. This function can only be
- /// called on tasks that were previously blocked in `deschedule`.
+ /// Wakes up a previously blocked thread. This function can only be
+ /// called on threads that were previously blocked in `deschedule`.
//
- // See the comments on `deschedule` for why the task is forgotten here, and
+ // See the comments on `deschedule` for why the thread is forgotten here, and
// why it's valid to do so.
pub fn reawaken(mut self: Box<Task>) {
unsafe {
}
}
- /// Yields control of this task to another task. This function will
+ /// Yields control of this thread to another thread. This function will
/// eventually return, but possibly not immediately. This is used as an
- /// opportunity to allow other tasks a chance to run.
+ /// opportunity to allow other threads a chance to run.
pub fn yield_now() {
Thread::yield_now();
}
- /// Returns the stack bounds for this task in (lo, hi) format. The stack
- /// bounds may not be known for all tasks, so the return value may be
+ /// Returns the stack bounds for this thread in (lo, hi) format. The stack
+ /// bounds may not be known for all threads, so the return value may be
/// `None`.
pub fn stack_bounds(&self) -> (uint, uint) {
self.stack_bounds
}
- /// Returns the stack guard for this task, if known.
+ /// Returns the stack guard for this thread, if known.
pub fn stack_guard(&self) -> Option<uint> {
if self.stack_guard != 0 {
Some(self.stack_guard)
}
}
- /// Consume this task, flagging it as a candidate for destruction.
+ /// Consume this thread, flagging it as a candidate for destruction.
///
- /// This function is required to be invoked to destroy a task. A task
+ /// This function is required to be invoked to destroy a thread. A thread
/// destroyed through a normal drop will abort.
pub fn drop(mut self) {
self.state = Destroyed;
impl Drop for Task {
fn drop(&mut self) {
- rtdebug!("called drop for a task: {}", self as *mut Task as uint);
+ rtdebug!("called drop for a thread: {}", self as *mut Task as uint);
rtassert!(self.state != Armed);
}
}
}
impl BlockedTask {
- /// Returns Some if the task was successfully woken; None if already killed.
+ /// Returns Some if the thread was successfully woken; None if already killed.
pub fn wake(self) -> Option<Box<Task>> {
match self {
Owned(task) => Some(task),
}
}
- /// Reawakens this task if ownership is acquired. If finer-grained control
+ /// Reawakens this thread if ownership is acquired. If finer-grained control
/// is desired, use `wake` instead.
pub fn reawaken(self) {
self.wake().map(|t| t.reawaken());
#[cfg(not(test))] pub fn trash(self) { }
#[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); }
- /// Create a blocked task, unless the task was already killed.
+ /// Create a blocked thread, unless the thread was already killed.
pub fn block(task: Box<Task>) -> BlockedTask {
Owned(task)
}
- /// Converts one blocked task handle to a list of many handles to the same.
+ /// Converts one blocked thread handle to a list of many handles to the same.
pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTasks> {
let arc = match self {
Owned(task) => {
drop(Task::new(None, None));
}
- // Task blocking tests
+ // Thread blocking tests
#[test]
fn block_and_wake() {
pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: uint);
-// Variables used for invoking callbacks when a task starts to unwind.
+// Variables used for invoking callbacks when a thread starts to unwind.
//
// For more information, see below.
const MAX_CALLBACKS: uint = 16;
///
/// * This is not safe to call in a nested fashion. The unwinding
/// interface for Rust is designed to have at most one try/catch block per
-/// task, not multiple. No runtime checking is currently performed to uphold
+/// thread, not multiple. No runtime checking is currently performed to uphold
/// this invariant, so this function is not safe. A nested try/catch block
/// may result in corruption of the outer try/catch block's state, especially
-/// if this is used within a task itself.
+/// if this is used within a thread itself.
///
-/// * It is not sound to trigger unwinding while already unwinding. Rust tasks
+/// * It is not sound to trigger unwinding while already unwinding. Rust threads
/// have runtime checks in place to ensure this invariant, but it is not
-/// guaranteed that a rust task is in place when invoking this function.
+/// guaranteed that a rust thread is in place when invoking this function.
/// Unwinding twice can lead to resource leaks where some destructors are not
/// run.
pub unsafe fn try<F: FnOnce()>(f: F) -> Result<(), Box<Any + Send>> {
// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
//
// This is pretty close to Rust's exception handling approach, except that Rust
-// does have a single "catch-all" handler at the bottom of each task's stack.
+// does have a single "catch-all" handler at the bottom of each thread's stack.
// So we have two versions of the personality routine:
// - rust_eh_personality, used by all cleanup landing pads, which never catches,
// so the behavior of __gcc_personality_v0 is perfectly adequate there, and
// Currently this means that panic!() on OOM will invoke this code path,
// but then again we're not really ready for panic on OOM anyway. If
// we do start doing this, then we should propagate this allocation to
- // be performed in the parent of this task instead of the task that's
+ // be performed in the parent of this thread instead of the thread that's
// panicking.
// see below for why we do the `Any` coercion here.
static INIT: Once = ONCE_INIT;
INIT.doit(|| unsafe { register(failure::on_fail); });
- // First, invoke call the user-defined callbacks triggered on task panic.
+ // First, invoke call the user-defined callbacks triggered on thread panic.
//
// By the time that we see a callback has been registered (by reading
// MAX_CALLBACKS), the actual callback itself may have not been stored yet,
// If a thread panics while it's already unwinding then we
// have limited options. Currently our preference is to
// just abort. In the future we may consider resuming
- // unwinding or otherwise exiting the task cleanly.
+ // unwinding or otherwise exiting the thread cleanly.
rterrln!("thread panicked while panicking. aborting.");
unsafe { intrinsics::abort() }
}
rust_panic(msg);
}
-/// Register a callback to be invoked when a task unwinds.
+/// Register a callback to be invoked when a thread unwinds.
///
/// This is an unsafe and experimental API which allows for an arbitrary
-/// callback to be invoked when a task panics. This callback is invoked on both
+/// callback to be invoked when a thread panics. This callback is invoked on both
/// the initial unwinding and a double unwinding if one occurs. Additionally,
/// the local `Task` will be in place for the duration of the callback, and
/// the callback must ensure that it remains in place once the callback returns.
}
impl<'a> FormatWriter for BufWriter<'a> {
fn write(&mut self, bytes: &[u8]) -> fmt::Result {
- let left = self.buf[mut self.pos..];
+ let left = self.buf.slice_from_mut(self.pos);
let to_write = bytes[..cmp::min(bytes.len(), left.len())];
slice::bytes::copy_memory(left, to_write);
self.pos += to_write.len();
/// Barriers are re-usable after all threads have rendezvoused once, and can
/// be used continuously.
pub fn wait(&self) {
- let mut lock = self.lock.lock();
+ let mut lock = self.lock.lock().unwrap();
let local_gen = lock.generation_id;
lock.count += 1;
if lock.count < self.num_threads {
// http://en.wikipedia.org/wiki/Spurious_wakeup
while local_gen == lock.generation_id &&
lock.count < self.num_threads {
- self.cvar.wait(&lock);
+ lock = self.cvar.wait(lock).unwrap();
}
} else {
lock.count = 0;
use prelude::*;
use sync::atomic::{mod, AtomicUint};
-use sync::{mutex, StaticMutexGuard};
+use sync::poison::{mod, LockResult};
use sys_common::condvar as sys;
use sys_common::mutex as sys_mutex;
use time::Duration;
+use sync::{mutex, MutexGuard};
/// A Condition Variable
///
/// // Inside of our lock, spawn a new thread, and then wait for it to start
/// Thread::spawn(move|| {
/// let &(ref lock, ref cvar) = &*pair2;
-/// let mut started = lock.lock();
+/// let mut started = lock.lock().unwrap();
/// *started = true;
/// cvar.notify_one();
/// }).detach();
///
/// // wait for the thread to start up
/// let &(ref lock, ref cvar) = &*pair;
-/// let started = lock.lock();
+/// let mut started = lock.lock().unwrap();
/// while !*started {
-/// cvar.wait(&started);
+/// started = cvar.wait(started).unwrap();
/// }
/// ```
+#[stable]
pub struct Condvar { inner: Box<StaticCondvar> }
unsafe impl Send for Condvar {}
///
/// static CVAR: StaticCondvar = CONDVAR_INIT;
/// ```
+#[unstable = "may be merged with Condvar in the future"]
pub struct StaticCondvar {
inner: sys::Condvar,
mutex: AtomicUint,
unsafe impl Sync for StaticCondvar {}
/// Constant initializer for a statically allocated condition variable.
+#[unstable = "may be merged with Condvar in the future"]
pub const CONDVAR_INIT: StaticCondvar = StaticCondvar {
inner: sys::CONDVAR_INIT,
mutex: atomic::INIT_ATOMIC_UINT,
};
-/// A trait for vaules which can be passed to the waiting methods of condition
-/// variables. This is implemented by the mutex guards in this module.
-///
-/// Note that this trait should likely not be implemented manually unless you
-/// really know what you're doing.
-pub trait AsMutexGuard {
- #[allow(missing_docs)]
- unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard;
-}
-
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and
/// notified.
+ #[stable]
pub fn new() -> Condvar {
Condvar {
inner: box StaticCondvar {
/// notification.
///
/// This function will atomically unlock the mutex specified (represented by
- /// `guard`) and block the current thread. This means that any calls to
- /// `notify_*()` which happen logically after the mutex is unlocked are
+ /// `mutex_guard`) and block the current thread. This means that any calls
+ /// to `notify_*()` which happen logically after the mutex is unlocked are
/// candidates to wake this thread up. When this function call returns, the
/// lock specified will have been re-acquired.
///
/// the predicate must always be checked each time this function returns to
/// protect against spurious wakeups.
///
+ /// # Failure
+ ///
+ /// This function will return an error if the mutex being waited on is
+ /// poisoned when this thread re-acquires the lock. For more information,
+ /// see information about poisoning on the Mutex type.
+ ///
/// # Panics
///
/// This function will `panic!()` if it is used with more than one mutex
/// over time. Each condition variable is dynamically bound to exactly one
/// mutex to ensure defined behavior across platforms. If this functionality
/// is not desired, then unsafe primitives in `sys` are provided.
- pub fn wait<T: AsMutexGuard>(&self, mutex_guard: &T) {
+ #[stable]
+ pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>)
+ -> LockResult<MutexGuard<'a, T>> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
- me.inner.wait(mutex_guard)
+ me.inner.wait(guard)
}
}
// provide. There are also additional concerns about the unix-specific
// implementation which may need to be addressed.
#[allow(dead_code)]
- fn wait_timeout<T: AsMutexGuard>(&self, mutex_guard: &T,
- dur: Duration) -> bool {
+ fn wait_timeout<'a, T>(&self, guard: MutexGuard<'a, T>, dur: Duration)
+ -> LockResult<(MutexGuard<'a, T>, bool)> {
unsafe {
let me: &'static Condvar = &*(self as *const _);
- me.inner.wait_timeout(mutex_guard, dur)
+ me.inner.wait_timeout(guard, dur)
}
}
/// `notify_one` are not buffered in any way.
///
/// To wake up all threads, see `notify_one()`.
+ #[stable]
pub fn notify_one(&self) { unsafe { self.inner.inner.notify_one() } }
/// Wake up all blocked threads on this condvar.
/// way.
///
/// To wake up only one thread, see `notify_one()`.
+ #[stable]
pub fn notify_all(&self) { unsafe { self.inner.inner.notify_all() } }
}
/// notification.
///
/// See `Condvar::wait`.
- pub fn wait<T: AsMutexGuard>(&'static self, mutex_guard: &T) {
- unsafe {
- let lock = mutex_guard.as_mutex_guard();
- let sys = mutex::guard_lock(lock);
- self.verify(sys);
- self.inner.wait(sys);
- (*mutex::guard_poison(lock)).check("mutex");
+ #[unstable = "may be merged with Condvar in the future"]
+ pub fn wait<'a, T>(&'static self, guard: MutexGuard<'a, T>)
+ -> LockResult<MutexGuard<'a, T>> {
+ let poisoned = unsafe {
+ let lock = mutex::guard_lock(&guard);
+ self.verify(lock);
+ self.inner.wait(lock);
+ mutex::guard_poison(&guard).get()
+ };
+ if poisoned {
+ Err(poison::new_poison_error(guard))
+ } else {
+ Ok(guard)
}
}
///
/// See `Condvar::wait_timeout`.
#[allow(dead_code)] // may want to stabilize this later, see wait_timeout above
- fn wait_timeout<T: AsMutexGuard>(&'static self, mutex_guard: &T,
- dur: Duration) -> bool {
- unsafe {
- let lock = mutex_guard.as_mutex_guard();
- let sys = mutex::guard_lock(lock);
- self.verify(sys);
- let ret = self.inner.wait_timeout(sys, dur);
- (*mutex::guard_poison(lock)).check("mutex");
- return ret;
+ fn wait_timeout<'a, T>(&'static self, guard: MutexGuard<'a, T>, dur: Duration)
+ -> LockResult<(MutexGuard<'a, T>, bool)> {
+ let (poisoned, success) = unsafe {
+ let lock = mutex::guard_lock(&guard);
+ self.verify(lock);
+ let success = self.inner.wait_timeout(lock, dur);
+ (mutex::guard_poison(&guard).get(), success)
+ };
+ if poisoned {
+ Err(poison::new_poison_error((guard, success)))
+ } else {
+ Ok((guard, success))
}
}
/// Wake up one blocked thread on this condvar.
///
/// See `Condvar::notify_one`.
+ #[unstable = "may be merged with Condvar in the future"]
pub fn notify_one(&'static self) { unsafe { self.inner.notify_one() } }
/// Wake up all blocked threads on this condvar.
///
/// See `Condvar::notify_all`.
+ #[unstable = "may be merged with Condvar in the future"]
pub fn notify_all(&'static self) { unsafe { self.inner.notify_all() } }
/// Deallocate all resources associated with this static condvar.
/// active users of the condvar, and this also doesn't prevent any future
/// users of the condvar. This method is required to be called to not leak
/// memory on all platforms.
+ #[unstable = "may be merged with Condvar in the future"]
pub unsafe fn destroy(&'static self) {
self.inner.destroy()
}
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
- let g = M.lock();
+ let g = M.lock().unwrap();
spawn(move|| {
- let _g = M.lock();
+ let _g = M.lock().unwrap();
C.notify_one();
});
- C.wait(&g);
+ let g = C.wait(g).unwrap();
drop(g);
unsafe { C.destroy(); M.destroy(); }
}
let tx = tx.clone();
spawn(move|| {
let &(ref lock, ref cond) = &*data;
- let mut cnt = lock.lock();
+ let mut cnt = lock.lock().unwrap();
*cnt += 1;
if *cnt == N {
tx.send(());
}
while *cnt != 0 {
- cond.wait(&cnt);
+ cnt = cond.wait(cnt).unwrap();
}
tx.send(());
});
let &(ref lock, ref cond) = &*data;
rx.recv();
- let mut cnt = lock.lock();
+ let mut cnt = lock.lock().unwrap();
*cnt = 0;
cond.notify_all();
drop(cnt);
static C: StaticCondvar = CONDVAR_INIT;
static M: StaticMutex = MUTEX_INIT;
- let g = M.lock();
- assert!(!C.wait_timeout(&g, Duration::nanoseconds(1000)));
+ let g = M.lock().unwrap();
+ let (g, success) = C.wait_timeout(g, Duration::nanoseconds(1000)).unwrap();
+ assert!(!success);
spawn(move|| {
- let _g = M.lock();
+ let _g = M.lock().unwrap();
C.notify_one();
});
- assert!(C.wait_timeout(&g, Duration::days(1)));
+ let (g, success) = C.wait_timeout(g, Duration::days(1)).unwrap();
+ assert!(success);
drop(g);
unsafe { C.destroy(); M.destroy(); }
}
static M2: StaticMutex = MUTEX_INIT;
static C: StaticCondvar = CONDVAR_INIT;
- let g = M1.lock();
+ let mut g = M1.lock().unwrap();
spawn(move|| {
- let _g = M1.lock();
+ let _g = M1.lock().unwrap();
C.notify_one();
});
- C.wait(&g);
+ g = C.wait(g).unwrap();
drop(g);
- C.wait(&M2.lock());
+ C.wait(M2.lock().unwrap()).unwrap();
}
}
pub use alloc::arc::{Arc, Weak};
-pub use self::mutex::{Mutex, MutexGuard, StaticMutex, StaticMutexGuard, MUTEX_INIT};
+pub use self::mutex::{Mutex, MutexGuard, StaticMutex};
+pub use self::mutex::MUTEX_INIT;
pub use self::rwlock::{RWLock, StaticRWLock, RWLOCK_INIT};
pub use self::rwlock::{RWLockReadGuard, RWLockWriteGuard};
-pub use self::rwlock::{StaticRWLockReadGuard, StaticRWLockWriteGuard};
-pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT, AsMutexGuard};
+pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT};
pub use self::once::{Once, ONCE_INIT};
pub use self::semaphore::{Semaphore, SemaphoreGuard};
pub use self::barrier::Barrier;
+pub use self::poison::{PoisonError, TryLockError, TryLockResult, LockResult};
pub use self::future::Future;
pub use self::task_pool::TaskPool;
use cell::UnsafeCell;
use kinds::marker;
-use sync::{poison, AsMutexGuard};
+use sync::poison::{mod, TryLockError, TryLockResult, LockResult};
use sys_common::mutex as sys;
/// A mutual exclusion primitive useful for protecting shared data
///
/// # Poisoning
///
-/// In order to prevent access to otherwise invalid data, each mutex will
-/// propagate any panics which occur while the lock is held. Once a thread has
-/// panicked while holding the lock, then all other threads will immediately
-/// panic as well once they hold the lock.
+/// The mutexes in this module implement a strategy called "poisoning" where a
+/// mutex is considered poisoned whenever a thread panics while holding the
+/// lock. Once a mutex is poisoned, all other tasks are unable to access the
+/// data by default as it is likely tainted (some invariant is not being
+/// upheld).
///
-/// # Example
+/// For a mutex, this means that the `lock` and `try_lock` methods return a
+/// `Result` which indicates whether a mutex has been poisoned or not. Most
+/// usage of a mutex will simply `unwrap()` these results, propagating panics
+/// among threads to ensure that a possibly invalid invariant is not witnessed.
+///
+/// A poisoned mutex, however, does not prevent all access to the underlying
+/// data. The `PoisonError` type has an `into_guard` method which will return
+/// the guard that would have otherwise been returned on a successful lock. This
+/// allows access to the data, despite the lock being poisoned.
+///
+/// # Examples
///
/// ```rust
/// use std::sync::{Arc, Mutex};
/// let (tx, rx) = channel();
/// for _ in range(0u, 10) {
/// let (data, tx) = (data.clone(), tx.clone());
-/// Thread::spawn(move|| {
+/// Thread::spawn(move || {
/// // The shared static can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
/// // which can access the shared state when the lock is held.
-/// let mut data = data.lock();
+/// //
+/// // We unwrap() the return value to assert that we are not expecting
+/// // tasks to ever fail while holding the lock.
+/// let mut data = data.lock().unwrap();
/// *data += 1;
/// if *data == N {
/// tx.send(());
///
/// rx.recv();
/// ```
+///
+/// To recover from a poisoned mutex:
+///
+/// ```rust
+/// use std::sync::{Arc, Mutex};
+/// use std::thread::Thread;
+///
+/// let lock = Arc::new(Mutex::new(0u));
+/// let lock2 = lock.clone();
+///
+/// let _ = Thread::spawn(move || -> () {
+/// // This thread will acquire the mutex first, unwrapping the result of
+/// // `lock` because the lock has not been poisoned.
+/// let _lock = lock2.lock().unwrap();
+///
+/// // This panic while holding the lock (`_guard` is in scope) will poison
+/// // the mutex.
+/// panic!();
+/// }).join();
+///
+/// // The lock is poisoned by this point, but the returned result can be
+/// // pattern matched on to return the underlying guard on both branches.
+/// let mut guard = match lock.lock() {
+/// Ok(guard) => guard,
+/// Err(poisoned) => poisoned.into_guard(),
+/// };
+///
+/// *guard += 1;
+/// ```
+#[stable]
pub struct Mutex<T> {
// Note that this static mutex is in a *box*, not inlined into the struct
// itself. Once a native mutex has been used once, its address can never
/// static LOCK: StaticMutex = MUTEX_INIT;
///
/// {
-/// let _g = LOCK.lock();
+/// let _g = LOCK.lock().unwrap();
/// // do some productive work
/// }
/// // lock is unlocked here.
/// ```
+#[unstable = "may be merged with Mutex in the future"]
pub struct StaticMutex {
lock: sys::Mutex,
- poison: UnsafeCell<poison::Flag>,
+ poison: poison::Flag,
}
unsafe impl Sync for StaticMutex {}
/// The data protected by the mutex can be access through this guard via its
/// Deref and DerefMut implementations
#[must_use]
+#[stable]
pub struct MutexGuard<'a, T: 'a> {
// funny underscores due to how Deref/DerefMut currently work (they
// disregard field privacy).
- __lock: &'a Mutex<T>,
- __guard: StaticMutexGuard,
-}
-
-/// An RAII implementation of a "scoped lock" of a static mutex. When this
-/// structure is dropped (falls out of scope), the lock will be unlocked.
-#[must_use]
-pub struct StaticMutexGuard {
- lock: &'static sys::Mutex,
- marker: marker::NoSend,
- poison: poison::Guard<'static>,
+ __lock: &'a StaticMutex,
+ __data: &'a UnsafeCell<T>,
+ __poison: poison::Guard,
+ __marker: marker::NoSend,
}
/// Static initialization of a mutex. This constant can be used to initialize
/// other mutex constants.
+#[unstable = "may be merged with Mutex in the future"]
pub const MUTEX_INIT: StaticMutex = StaticMutex {
lock: sys::MUTEX_INIT,
- poison: UnsafeCell { value: poison::Flag { failed: false } },
+ poison: poison::FLAG_INIT,
};
impl<T: Send> Mutex<T> {
/// Creates a new mutex in an unlocked state ready for use.
+ #[stable]
pub fn new(t: T) -> Mutex<T> {
Mutex {
inner: box MUTEX_INIT,
/// held. An RAII guard is returned to allow scoped unlock of the lock. When
/// the guard goes out of scope, the mutex will be unlocked.
///
- /// # Panics
+ /// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
- /// this call will immediately panic once the mutex is acquired.
- pub fn lock(&self) -> MutexGuard<T> {
- unsafe {
- let lock: &'static StaticMutex = &*(&*self.inner as *const _);
- MutexGuard::new(self, lock.lock())
- }
+ /// this call will return an error once the mutex is acquired.
+ #[stable]
+ pub fn lock(&self) -> LockResult<MutexGuard<T>> {
+ unsafe { self.inner.lock.lock() }
+ MutexGuard::new(&*self.inner, &self.data)
}
/// Attempts to acquire this lock.
///
/// This function does not block.
///
- /// # Panics
+ /// # Failure
///
/// If another user of this mutex panicked while holding the mutex, then
- /// this call will immediately panic if the mutex would otherwise be
+ /// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn try_lock(&self) -> Option<MutexGuard<T>> {
- unsafe {
- let lock: &'static StaticMutex = &*(&*self.inner as *const _);
- lock.try_lock().map(|guard| {
- MutexGuard::new(self, guard)
- })
+ #[stable]
+ pub fn try_lock(&self) -> TryLockResult<MutexGuard<T>> {
+ if unsafe { self.inner.lock.try_lock() } {
+ Ok(try!(MutexGuard::new(&*self.inner, &self.data)))
+ } else {
+ Err(TryLockError::WouldBlock)
}
}
}
}
}
+static DUMMY: UnsafeCell<()> = UnsafeCell { value: () };
+
impl StaticMutex {
/// Acquires this lock, see `Mutex::lock`
- pub fn lock(&'static self) -> StaticMutexGuard {
+ #[inline]
+ #[unstable = "may be merged with Mutex in the future"]
+ pub fn lock(&'static self) -> LockResult<MutexGuard<()>> {
unsafe { self.lock.lock() }
- StaticMutexGuard::new(self)
+ MutexGuard::new(self, &DUMMY)
}
/// Attempts to grab this lock, see `Mutex::try_lock`
- pub fn try_lock(&'static self) -> Option<StaticMutexGuard> {
+ #[inline]
+ #[unstable = "may be merged with Mutex in the future"]
+ pub fn try_lock(&'static self) -> TryLockResult<MutexGuard<()>> {
if unsafe { self.lock.try_lock() } {
- Some(StaticMutexGuard::new(self))
+ Ok(try!(MutexGuard::new(self, &DUMMY)))
} else {
- None
+ Err(TryLockError::WouldBlock)
}
}
/// *all* platforms. It may be the case that some platforms do not leak
/// memory if this method is not called, but this is not guaranteed to be
/// true on all platforms.
+ #[unstable = "may be merged with Mutex in the future"]
pub unsafe fn destroy(&'static self) {
self.lock.destroy()
}
}
impl<'mutex, T> MutexGuard<'mutex, T> {
- fn new(lock: &Mutex<T>, guard: StaticMutexGuard) -> MutexGuard<T> {
- MutexGuard { __lock: lock, __guard: guard }
+ fn new(lock: &'mutex StaticMutex, data: &'mutex UnsafeCell<T>)
+ -> LockResult<MutexGuard<'mutex, T>> {
+ poison::map_result(lock.poison.borrow(), |guard| {
+ MutexGuard {
+ __lock: lock,
+ __data: data,
+ __poison: guard,
+ __marker: marker::NoSend,
+ }
+ })
}
}
-impl<'mutex, T> AsMutexGuard for MutexGuard<'mutex, T> {
- unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard { &self.__guard }
-}
-
impl<'mutex, T> Deref<T> for MutexGuard<'mutex, T> {
- fn deref<'a>(&'a self) -> &'a T { unsafe { &*self.__lock.data.get() } }
+ fn deref<'a>(&'a self) -> &'a T {
+ unsafe { &*self.__data.get() }
+ }
}
impl<'mutex, T> DerefMut<T> for MutexGuard<'mutex, T> {
fn deref_mut<'a>(&'a mut self) -> &'a mut T {
- unsafe { &mut *self.__lock.data.get() }
+ unsafe { &mut *self.__data.get() }
}
}
-impl StaticMutexGuard {
- fn new(lock: &'static StaticMutex) -> StaticMutexGuard {
+#[unsafe_destructor]
+impl<'a, T> Drop for MutexGuard<'a, T> {
+ #[inline]
+ fn drop(&mut self) {
unsafe {
- let guard = StaticMutexGuard {
- lock: &lock.lock,
- marker: marker::NoSend,
- poison: (*lock.poison.get()).borrow(),
- };
- guard.poison.check("mutex");
- return guard;
+ self.__lock.poison.done(&self.__poison);
+ self.__lock.lock.unlock();
}
}
}
-pub fn guard_lock(guard: &StaticMutexGuard) -> &sys::Mutex { guard.lock }
-pub fn guard_poison(guard: &StaticMutexGuard) -> &poison::Guard {
- &guard.poison
+pub fn guard_lock<'a, T>(guard: &MutexGuard<'a, T>) -> &'a sys::Mutex {
+ &guard.__lock.lock
}
-impl AsMutexGuard for StaticMutexGuard {
- unsafe fn as_mutex_guard(&self) -> &StaticMutexGuard { self }
-}
-
-#[unsafe_destructor]
-impl Drop for StaticMutexGuard {
- fn drop(&mut self) {
- unsafe {
- self.poison.done();
- self.lock.unlock();
- }
- }
+pub fn guard_poison<'a, T>(guard: &MutexGuard<'a, T>) -> &'a poison::Flag {
+ &guard.__lock.poison
}
#[cfg(test)]
#[test]
fn smoke() {
let m = Mutex::new(());
- drop(m.lock());
- drop(m.lock());
+ drop(m.lock().unwrap());
+ drop(m.lock().unwrap());
}
#[test]
fn smoke_static() {
static M: StaticMutex = MUTEX_INIT;
unsafe {
- drop(M.lock());
- drop(M.lock());
+ drop(M.lock().unwrap());
+ drop(M.lock().unwrap());
M.destroy();
}
}
fn inc() {
for _ in range(0, J) {
unsafe {
- let _g = M.lock();
+ let _g = M.lock().unwrap();
CNT += 1;
}
}
#[test]
fn try_lock() {
let m = Mutex::new(());
- assert!(m.try_lock().is_some());
+ *m.try_lock().unwrap() = ();
}
#[test]
// wait until parent gets in
rx.recv();
let &(ref lock, ref cvar) = &*packet2.0;
- let mut lock = lock.lock();
+ let mut lock = lock.lock().unwrap();
*lock = true;
cvar.notify_one();
});
let &(ref lock, ref cvar) = &*packet.0;
- let lock = lock.lock();
+ let mut lock = lock.lock().unwrap();
tx.send(());
assert!(!*lock);
while !*lock {
- cvar.wait(&lock);
+ lock = cvar.wait(lock).unwrap();
}
}
#[test]
- #[should_fail]
fn test_arc_condvar_poison() {
let packet = Packet(Arc::new((Mutex::new(1i), Condvar::new())));
let packet2 = Packet(packet.0.clone());
spawn(move|| {
rx.recv();
let &(ref lock, ref cvar) = &*packet2.0;
- let _g = lock.lock();
+ let _g = lock.lock().unwrap();
cvar.notify_one();
// Parent should fail when it wakes up.
panic!();
});
let &(ref lock, ref cvar) = &*packet.0;
- let lock = lock.lock();
+ let mut lock = lock.lock().unwrap();
tx.send(());
while *lock == 1 {
- cvar.wait(&lock);
+ match cvar.wait(lock) {
+ Ok(l) => {
+ lock = l;
+ assert_eq!(*lock, 1);
+ }
+ Err(..) => break,
+ }
}
}
#[test]
- #[should_fail]
fn test_mutex_arc_poison() {
let arc = Arc::new(Mutex::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.lock();
+ Thread::spawn(move|| {
+ let lock = arc2.lock().unwrap();
assert_eq!(*lock, 2);
}).join();
- let lock = arc.lock();
- assert_eq!(*lock, 1);
+ assert!(arc.lock().is_err());
}
#[test]
let arc2 = Arc::new(Mutex::new(arc));
let (tx, rx) = channel();
spawn(move|| {
- let lock = arc2.lock();
- let lock2 = lock.deref().lock();
+ let lock = arc2.lock().unwrap();
+ let lock2 = lock.deref().lock().unwrap();
assert_eq!(*lock2, 1);
tx.send(());
});
}
impl Drop for Unwinder {
fn drop(&mut self) {
- *self.i.lock() += 1;
+ *self.i.lock().unwrap() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
- let lock = arc.lock();
+ let lock = arc.lock().unwrap();
assert_eq!(*lock, 2);
}
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use prelude::*;
+
+use cell::UnsafeCell;
+use error::FromError;
+use fmt;
use thread::Thread;
-pub struct Flag { pub failed: bool }
+pub struct Flag { failed: UnsafeCell<bool> }
+pub const FLAG_INIT: Flag = Flag { failed: UnsafeCell { value: false } };
impl Flag {
- pub fn borrow(&mut self) -> Guard {
- Guard { flag: &mut self.failed, panicking: Thread::panicking() }
+ #[inline]
+ pub fn borrow(&self) -> LockResult<Guard> {
+ let ret = Guard { panicking: Thread::panicking() };
+ if unsafe { *self.failed.get() } {
+ Err(new_poison_error(ret))
+ } else {
+ Ok(ret)
+ }
+ }
+
+ #[inline]
+ pub fn done(&self, guard: &Guard) {
+ if !guard.panicking && Thread::panicking() {
+ unsafe { *self.failed.get() = true; }
+ }
+ }
+
+ #[inline]
+ pub fn get(&self) -> bool {
+ unsafe { *self.failed.get() }
}
}
-pub struct Guard<'a> {
- flag: &'a mut bool,
+#[allow(missing_copy_implementations)]
+pub struct Guard {
panicking: bool,
}
-impl<'a> Guard<'a> {
- pub fn check(&self, name: &str) {
- if *self.flag {
- panic!("poisoned {} - another task failed inside", name);
- }
+/// A type of error which can be returned whenever a lock is acquired.
+///
+/// Both Mutexes and RWLocks are poisoned whenever a task fails while the lock
+/// is held. The precise semantics for when a lock is poisoned is documented on
+/// each lock, but once a lock is poisoned then all future acquisitions will
+/// return this error.
+#[stable]
+pub struct PoisonError<T> {
+ guard: T,
+}
+
+/// An enumeration of possible errors which can occur while calling the
+/// `try_lock` method.
+#[stable]
+pub enum TryLockError<T> {
+ /// The lock could not be acquired because another task failed while holding
+ /// the lock.
+ #[stable]
+ Poisoned(PoisonError<T>),
+ /// The lock could not be acquired at this time because the operation would
+ /// otherwise block.
+ #[stable]
+ WouldBlock,
+}
+
+/// A type alias for the result of a lock method which can be poisoned.
+///
+/// The `Ok` variant of this result indicates that the primitive was not
+/// poisoned, and the `Guard` is contained within. The `Err` variant indicates
+/// that the primitive was poisoned. Note that the `Err` variant *also* carries
+/// the associated guard, and it can be acquired through the `into_inner`
+/// method.
+#[stable]
+pub type LockResult<Guard> = Result<Guard, PoisonError<Guard>>;
+
+/// A type alias for the result of a nonblocking locking method.
+///
+/// For more information, see `LockResult`. A `TryLockResult` doesn't
+/// necessarily hold the associated guard in the `Err` type as the lock may not
+/// have been acquired for other reasons.
+#[stable]
+pub type TryLockResult<Guard> = Result<Guard, TryLockError<Guard>>;
+
+impl<T> fmt::Show for PoisonError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ "poisoned lock: another task failed inside".fmt(f)
+ }
+}
+
+impl<T> PoisonError<T> {
+ /// Consumes this error indicating that a lock is poisoned, returning the
+ /// underlying guard to allow access regardless.
+ #[stable]
+ pub fn into_guard(self) -> T { self.guard }
+}
+
+impl<T> FromError<PoisonError<T>> for TryLockError<T> {
+ fn from_error(err: PoisonError<T>) -> TryLockError<T> {
+ TryLockError::Poisoned(err)
}
+}
- pub fn done(&mut self) {
- if !self.panicking && Thread::panicking() {
- *self.flag = true;
+impl<T> fmt::Show for TryLockError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match *self {
+ TryLockError::Poisoned(ref p) => p.fmt(f),
+ TryLockError::WouldBlock => {
+ "try_lock failed because the operation would block".fmt(f)
+ }
}
}
}
+
+pub fn new_poison_error<T>(guard: T) -> PoisonError<T> {
+ PoisonError { guard: guard }
+}
+
+pub fn map_result<T, U, F>(result: LockResult<T>, f: F)
+ -> LockResult<U>
+ where F: FnOnce(T) -> U {
+ match result {
+ Ok(t) => Ok(f(t)),
+ Err(PoisonError { guard }) => Err(new_poison_error(f(guard)))
+ }
+}
use prelude::*;
-use kinds::marker;
use cell::UnsafeCell;
+use kinds::marker;
+use sync::poison::{mod, LockResult, TryLockError, TryLockResult};
use sys_common::rwlock as sys;
-use sync::poison;
/// A reader-writer lock
///
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
+/// # Poisoning
+///
/// RWLocks, like Mutexes, will become poisoned on panics. Note, however, that
/// an RWLock may only be poisoned if a panic occurs while it is locked
/// exclusively (write mode). If a panic occurs in any reader, then the lock
/// will not be poisoned.
///
-/// # Example
+/// # Examples
///
/// ```
/// use std::sync::RWLock;
///
/// // many reader locks can be held at once
/// {
-/// let r1 = lock.read();
-/// let r2 = lock.read();
+/// let r1 = lock.read().unwrap();
+/// let r2 = lock.read().unwrap();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
-/// let mut w = lock.write();
+/// let mut w = lock.write().unwrap();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
+#[stable]
pub struct RWLock<T> {
inner: Box<StaticRWLock>,
data: UnsafeCell<T>,
/// static LOCK: StaticRWLock = RWLOCK_INIT;
///
/// {
-/// let _g = LOCK.read();
+/// let _g = LOCK.read().unwrap();
/// // ... shared read access
/// }
/// {
-/// let _g = LOCK.write();
+/// let _g = LOCK.write().unwrap();
/// // ... exclusive write access
/// }
/// unsafe { LOCK.destroy() } // free all resources
/// ```
+#[unstable = "may be merged with RWLock in the future"]
pub struct StaticRWLock {
- inner: sys::RWLock,
- poison: UnsafeCell<poison::Flag>,
+ lock: sys::RWLock,
+ poison: poison::Flag,
}
unsafe impl Send for StaticRWLock {}
unsafe impl Sync for StaticRWLock {}
/// Constant initialization for a statically-initialized rwlock.
+#[unstable = "may be merged with RWLock in the future"]
pub const RWLOCK_INIT: StaticRWLock = StaticRWLock {
- inner: sys::RWLOCK_INIT,
- poison: UnsafeCell { value: poison::Flag { failed: false } },
+ lock: sys::RWLOCK_INIT,
+ poison: poison::FLAG_INIT,
};
/// RAII structure used to release the shared read access of a lock when
/// dropped.
#[must_use]
+#[stable]
pub struct RWLockReadGuard<'a, T: 'a> {
- __lock: &'a RWLock<T>,
- __guard: StaticRWLockReadGuard,
+ __lock: &'a StaticRWLock,
+ __data: &'a UnsafeCell<T>,
+ __marker: marker::NoSend,
}
/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
#[must_use]
+#[stable]
pub struct RWLockWriteGuard<'a, T: 'a> {
- __lock: &'a RWLock<T>,
- __guard: StaticRWLockWriteGuard,
-}
-
-/// RAII structure used to release the shared read access of a lock when
-/// dropped.
-#[must_use]
-pub struct StaticRWLockReadGuard {
- lock: &'static sys::RWLock,
- marker: marker::NoSend,
-}
-
-/// RAII structure used to release the exclusive write access of a lock when
-/// dropped.
-#[must_use]
-pub struct StaticRWLockWriteGuard {
- lock: &'static sys::RWLock,
- marker: marker::NoSend,
- poison: poison::Guard<'static>,
+ __lock: &'a StaticRWLock,
+ __data: &'a UnsafeCell<T>,
+ __poison: poison::Guard,
+ __marker: marker::NoSend,
}
impl<T: Send + Sync> RWLock<T> {
/// Creates a new instance of an RWLock which is unlocked and read to go.
+ #[stable]
pub fn new(t: T) -> RWLock<T> {
RWLock { inner: box RWLOCK_INIT, data: UnsafeCell::new(t) }
}
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. The
- /// panic will occur immediately after the lock has been acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock.
+ /// The failure will occur immediately after the lock has been acquired.
#[inline]
- pub fn read(&self) -> RWLockReadGuard<T> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- RWLockReadGuard::new(self, lock.read())
- }
+ #[stable]
+ pub fn read(&self) -> LockResult<RWLockReadGuard<T>> {
+ unsafe { self.inner.lock.read() }
+ RWLockReadGuard::new(&*self.inner, &self.data)
}
/// Attempt to acquire this lock with shared read access.
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. A
- /// panic will only occur if the lock is acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock. An
+ /// error will only be returned if the lock would have otherwise been
+ /// acquired.
#[inline]
- pub fn try_read(&self) -> Option<RWLockReadGuard<T>> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- lock.try_read().map(|guard| {
- RWLockReadGuard::new(self, guard)
- })
+ #[stable]
+ pub fn try_read(&self) -> TryLockResult<RWLockReadGuard<T>> {
+ if unsafe { self.inner.lock.try_read() } {
+ Ok(try!(RWLockReadGuard::new(&*self.inner, &self.data)))
+ } else {
+ Err(TryLockError::WouldBlock)
}
}
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. The
- /// panic will occur when the lock is acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock.
+ /// An error will be returned when the lock is acquired.
#[inline]
- pub fn write(&self) -> RWLockWriteGuard<T> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- RWLockWriteGuard::new(self, lock.write())
- }
+ #[stable]
+ pub fn write(&self) -> LockResult<RWLockWriteGuard<T>> {
+ unsafe { self.inner.lock.write() }
+ RWLockWriteGuard::new(&*self.inner, &self.data)
}
/// Attempt to lock this rwlock with exclusive write access.
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
- /// # Panics
+ /// # Failure
///
- /// This function will panic if the RWLock is poisoned. An RWLock is
- /// poisoned whenever a writer panics while holding an exclusive lock. A
- /// panic will only occur if the lock is acquired.
+ /// This function will return an error if the RWLock is poisoned. An RWLock
+ /// is poisoned whenever a writer panics while holding an exclusive lock. An
+ /// error will only be returned if the lock would have otherwise been
+ /// acquired.
#[inline]
- pub fn try_write(&self) -> Option<RWLockWriteGuard<T>> {
- unsafe {
- let lock: &'static StaticRWLock = &*(&*self.inner as *const _);
- lock.try_write().map(|guard| {
- RWLockWriteGuard::new(self, guard)
- })
+ #[stable]
+ pub fn try_write(&self) -> TryLockResult<RWLockWriteGuard<T>> {
+ if unsafe { self.inner.lock.try_read() } {
+ Ok(try!(RWLockWriteGuard::new(&*self.inner, &self.data)))
+ } else {
+ Err(TryLockError::WouldBlock)
}
}
}
#[unsafe_destructor]
impl<T> Drop for RWLock<T> {
fn drop(&mut self) {
- unsafe { self.inner.inner.destroy() }
+ unsafe { self.inner.lock.destroy() }
}
}
+static DUMMY: UnsafeCell<()> = UnsafeCell { value: () };
+
impl StaticRWLock {
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// See `RWLock::read`.
#[inline]
- pub fn read(&'static self) -> StaticRWLockReadGuard {
- unsafe { self.inner.read() }
- StaticRWLockReadGuard::new(self)
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn read(&'static self) -> LockResult<RWLockReadGuard<'static, ()>> {
+ unsafe { self.lock.read() }
+ RWLockReadGuard::new(self, &DUMMY)
}
/// Attempt to acquire this lock with shared read access.
///
/// See `RWLock::try_read`.
#[inline]
- pub fn try_read(&'static self) -> Option<StaticRWLockReadGuard> {
- if unsafe { self.inner.try_read() } {
- Some(StaticRWLockReadGuard::new(self))
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn try_read(&'static self)
+ -> TryLockResult<RWLockReadGuard<'static, ()>> {
+ if unsafe { self.lock.try_read() } {
+ Ok(try!(RWLockReadGuard::new(self, &DUMMY)))
} else {
- None
+ Err(TryLockError::WouldBlock)
}
}
///
/// See `RWLock::write`.
#[inline]
- pub fn write(&'static self) -> StaticRWLockWriteGuard {
- unsafe { self.inner.write() }
- StaticRWLockWriteGuard::new(self)
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn write(&'static self) -> LockResult<RWLockWriteGuard<'static, ()>> {
+ unsafe { self.lock.write() }
+ RWLockWriteGuard::new(self, &DUMMY)
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// See `RWLock::try_write`.
#[inline]
- pub fn try_write(&'static self) -> Option<StaticRWLockWriteGuard> {
- if unsafe { self.inner.try_write() } {
- Some(StaticRWLockWriteGuard::new(self))
+ #[unstable = "may be merged with RWLock in the future"]
+ pub fn try_write(&'static self)
+ -> TryLockResult<RWLockWriteGuard<'static, ()>> {
+ if unsafe { self.lock.try_write() } {
+ Ok(try!(RWLockWriteGuard::new(self, &DUMMY)))
} else {
- None
+ Err(TryLockError::WouldBlock)
}
}
/// active users of the lock, and this also doesn't prevent any future users
/// of this lock. This method is required to be called to not leak memory on
/// all platforms.
+ #[unstable = "may be merged with RWLock in the future"]
pub unsafe fn destroy(&'static self) {
- self.inner.destroy()
+ self.lock.destroy()
}
}
impl<'rwlock, T> RWLockReadGuard<'rwlock, T> {
- fn new(lock: &RWLock<T>, guard: StaticRWLockReadGuard)
- -> RWLockReadGuard<T> {
- RWLockReadGuard { __lock: lock, __guard: guard }
+ fn new(lock: &'rwlock StaticRWLock, data: &'rwlock UnsafeCell<T>)
+ -> LockResult<RWLockReadGuard<'rwlock, T>> {
+ poison::map_result(lock.poison.borrow(), |_| {
+ RWLockReadGuard {
+ __lock: lock,
+ __data: data,
+ __marker: marker::NoSend,
+ }
+ })
}
}
impl<'rwlock, T> RWLockWriteGuard<'rwlock, T> {
- fn new(lock: &RWLock<T>, guard: StaticRWLockWriteGuard)
- -> RWLockWriteGuard<T> {
- RWLockWriteGuard { __lock: lock, __guard: guard }
+ fn new(lock: &'rwlock StaticRWLock, data: &'rwlock UnsafeCell<T>)
+ -> LockResult<RWLockWriteGuard<'rwlock, T>> {
+ poison::map_result(lock.poison.borrow(), |guard| {
+ RWLockWriteGuard {
+ __lock: lock,
+ __data: data,
+ __poison: guard,
+ __marker: marker::NoSend,
+ }
+ })
}
}
impl<'rwlock, T> Deref<T> for RWLockReadGuard<'rwlock, T> {
- fn deref(&self) -> &T { unsafe { &*self.__lock.data.get() } }
+ fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
}
impl<'rwlock, T> Deref<T> for RWLockWriteGuard<'rwlock, T> {
- fn deref(&self) -> &T { unsafe { &*self.__lock.data.get() } }
+ fn deref(&self) -> &T { unsafe { &*self.__data.get() } }
}
impl<'rwlock, T> DerefMut<T> for RWLockWriteGuard<'rwlock, T> {
- fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.__lock.data.get() } }
-}
-
-impl StaticRWLockReadGuard {
- fn new(lock: &'static StaticRWLock) -> StaticRWLockReadGuard {
- let guard = StaticRWLockReadGuard {
- lock: &lock.inner,
- marker: marker::NoSend,
- };
- unsafe { (*lock.poison.get()).borrow().check("rwlock"); }
- return guard;
- }
-}
-impl StaticRWLockWriteGuard {
- fn new(lock: &'static StaticRWLock) -> StaticRWLockWriteGuard {
- unsafe {
- let guard = StaticRWLockWriteGuard {
- lock: &lock.inner,
- marker: marker::NoSend,
- poison: (*lock.poison.get()).borrow(),
- };
- guard.poison.check("rwlock");
- return guard;
- }
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.__data.get() }
}
}
#[unsafe_destructor]
-impl Drop for StaticRWLockReadGuard {
+impl<'a, T> Drop for RWLockReadGuard<'a, T> {
fn drop(&mut self) {
- unsafe { self.lock.read_unlock(); }
+ unsafe { self.__lock.lock.read_unlock(); }
}
}
#[unsafe_destructor]
-impl Drop for StaticRWLockWriteGuard {
+impl<'a, T> Drop for RWLockWriteGuard<'a, T> {
fn drop(&mut self) {
- self.poison.done();
- unsafe { self.lock.write_unlock(); }
+ self.__lock.poison.done(&self.__poison);
+ unsafe { self.__lock.lock.write_unlock(); }
}
}
#[test]
fn smoke() {
let l = RWLock::new(());
- drop(l.read());
- drop(l.write());
- drop((l.read(), l.read()));
- drop(l.write());
+ drop(l.read().unwrap());
+ drop(l.write().unwrap());
+ drop((l.read().unwrap(), l.read().unwrap()));
+ drop(l.write().unwrap());
}
#[test]
fn static_smoke() {
static R: StaticRWLock = RWLOCK_INIT;
- drop(R.read());
- drop(R.write());
- drop((R.read(), R.read()));
- drop(R.write());
+ drop(R.read().unwrap());
+ drop(R.write().unwrap());
+ drop((R.read().unwrap(), R.read().unwrap()));
+ drop(R.write().unwrap());
unsafe { R.destroy(); }
}
let mut rng = rand::task_rng();
for _ in range(0, M) {
if rng.gen_weighted_bool(N) {
- drop(R.write());
+ drop(R.write().unwrap());
} else {
- drop(R.read());
+ drop(R.read().unwrap());
}
}
drop(tx);
}
#[test]
- #[should_fail]
fn test_rw_arc_poison_wr() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.write();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.write().unwrap();
+ panic!();
}).join();
- let lock = arc.read();
- assert_eq!(*lock, 1);
+ assert!(arc.read().is_err());
}
#[test]
- #[should_fail]
fn test_rw_arc_poison_ww() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.write();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.write().unwrap();
+ panic!();
}).join();
- let lock = arc.write();
- assert_eq!(*lock, 1);
+ assert!(arc.write().is_err());
}
#[test]
fn test_rw_arc_no_poison_rr() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.read();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.read().unwrap();
+ panic!();
}).join();
- let lock = arc.read();
+ let lock = arc.read().unwrap();
assert_eq!(*lock, 1);
}
#[test]
fn test_rw_arc_no_poison_rw() {
let arc = Arc::new(RWLock::new(1i));
let arc2 = arc.clone();
- let _ = Thread::spawn(move|| {
- let lock = arc2.read();
- assert_eq!(*lock, 2);
+ let _: Result<uint, _> = Thread::spawn(move|| {
+ let _lock = arc2.read().unwrap();
+ panic!()
}).join();
- let lock = arc.write();
+ let lock = arc.write().unwrap();
assert_eq!(*lock, 1);
}
let (tx, rx) = channel();
Thread::spawn(move|| {
- let mut lock = arc2.write();
+ let mut lock = arc2.write().unwrap();
for _ in range(0u, 10) {
let tmp = *lock;
*lock = -1;
for _ in range(0u, 5) {
let arc3 = arc.clone();
children.push(Thread::spawn(move|| {
- let lock = arc3.read();
+ let lock = arc3.read().unwrap();
assert!(*lock >= 0);
}));
}
// Wait for writer to finish
rx.recv();
- let lock = arc.read();
+ let lock = arc.read().unwrap();
assert_eq!(*lock, 10);
}
}
impl Drop for Unwinder {
fn drop(&mut self) {
- let mut lock = self.i.write();
+ let mut lock = self.i.write().unwrap();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
- let lock = arc.read();
+ let lock = arc.read().unwrap();
assert_eq!(*lock, 2);
}
}
/// This method will block until the internal count of the semaphore is at
/// least 1.
pub fn acquire(&self) {
- let mut count = self.lock.lock();
+ let mut count = self.lock.lock().unwrap();
while *count <= 0 {
- self.cvar.wait(&count);
+ count = self.cvar.wait(count).unwrap();
}
*count -= 1;
}
/// This will increment the number of resources in this semaphore by 1 and
/// will notify any pending waiters in `acquire` or `access` if necessary.
pub fn release(&self) {
- *self.lock.lock() += 1;
+ *self.lock.lock().unwrap() += 1;
self.cvar.notify_one();
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! Abstraction of a task pool for basic parallelism.
+//! Abstraction of a thread pool for basic parallelism.
use core::prelude::*;
}
}
-/// A task pool used to execute functions in parallel.
+/// A thread pool used to execute functions in parallel.
///
-/// Spawns `n` worker tasks and replenishes the pool if any worker tasks
+/// Spawns `n` worker threads and replenishes the pool if any worker threads
/// panic.
///
/// # Example
/// assert_eq!(rx.iter().take(8u).sum(), 8u);
/// ```
pub struct TaskPool {
- // How the taskpool communicates with subtasks.
+ // How the threadpool communicates with subthreads.
//
- // This is the only such Sender, so when it is dropped all subtasks will
+ // This is the only such Sender, so when it is dropped all subthreads will
// quit.
jobs: Sender<Thunk>
}
impl TaskPool {
- /// Spawns a new task pool with `tasks` tasks.
+ /// Spawns a new thread pool with `threads` threads.
///
/// # Panics
///
- /// This function will panic if `tasks` is 0.
- pub fn new(tasks: uint) -> TaskPool {
- assert!(tasks >= 1);
+ /// This function will panic if `threads` is 0.
+ pub fn new(threads: uint) -> TaskPool {
+ assert!(threads >= 1);
let (tx, rx) = channel::<Thunk>();
let rx = Arc::new(Mutex::new(rx));
- // Taskpool tasks.
- for _ in range(0, tasks) {
+ // Threadpool threads
+ for _ in range(0, threads) {
spawn_in_pool(rx.clone());
}
TaskPool { jobs: tx }
}
- /// Executes the function `job` on a task in the pool.
+ /// Executes the function `job` on a thread in the pool.
pub fn execute<F>(&self, job: F)
where F : FnOnce(), F : Send
{
fn spawn_in_pool(jobs: Arc<Mutex<Receiver<Thunk>>>) {
Thread::spawn(move |:| {
- // Will spawn a new task on panic unless it is cancelled.
+ // Will spawn a new thread on panic unless it is cancelled.
let sentinel = Sentinel::new(&jobs);
loop {
let message = {
// Only lock jobs for the time it takes
// to get a job, not run it.
- let lock = jobs.lock();
+ let lock = jobs.lock().unwrap();
lock.recv_opt()
};
let pool = TaskPool::new(TEST_TASKS);
- // Panic all the existing tasks.
+ // Panic all the existing threads.
for _ in range(0, TEST_TASKS) {
pool.execute(move|| -> () { panic!() });
}
- // Ensure new tasks were spawned to compensate.
+ // Ensure new threads were spawned to compensate.
let (tx, rx) = channel();
for _ in range(0, TEST_TASKS) {
let tx = tx.clone();
let pool = TaskPool::new(TEST_TASKS);
let waiter = Arc::new(Barrier::new(TEST_TASKS + 1));
- // Panic all the existing tasks in a bit.
+ // Panic all the existing threads in a bit.
for _ in range(0, TEST_TASKS) {
let waiter = waiter.clone();
pool.execute(move|| {
// in theory we can demangle any Unicode code point, but
// for simplicity we just catch the common ones.
- "$x20" => " ",
- "$x27" => "'",
- "$x5b" => "[",
- "$x5d" => "]"
+ "$u{20}" => " ",
+ "$u{27}" => "'",
+ "$u{5b}" => "[",
+ "$u{5d}" => "]"
)
} else {
let idx = match rest.find('$') {
F: FnOnce() -> T,
{
unsafe {
- let _guard = self.lock.lock();
+ let _guard = self.lock.lock().unwrap();
if !*self.initialized.get() {
let (tx, rx) = channel();
*self.chan.get() = mem::transmute(box tx);
let t = f();
Thread::spawn(move |:| {
helper(receive.0, rx, t);
- let _g = self.lock.lock();
+ let _g = self.lock.lock().unwrap();
*self.shutdown.get() = true;
self.cond.notify_one()
}).detach();
/// This is only valid if the worker thread has previously booted
pub fn send(&'static self, msg: M) {
unsafe {
- let _guard = self.lock.lock();
+ let _guard = self.lock.lock().unwrap();
// Must send and *then* signal to ensure that the child receives the
// message. Otherwise it could wake up and go to sleep before we
// Shut down, but make sure this is done inside our lock to ensure
// that we'll always receive the exit signal when the thread
// returns.
- let guard = self.lock.lock();
+ let mut guard = self.lock.lock().unwrap();
// Close the channel by destroying it
let chan: Box<Sender<M>> = mem::transmute(*self.chan.get());
// Wait for the child to exit
while !*self.shutdown.get() {
- self.cond.wait(&guard);
+ guard = self.cond.wait(guard).unwrap();
}
drop(guard);
// Collect all the results we found
let mut addrs = Vec::new();
let mut rp = res;
- while rp.is_not_null() {
+ while !rp.is_null() {
unsafe {
let addr = try!(sockaddr_to_addr(mem::transmute((*rp).ai_addr),
(*rp).ai_addrlen as uint));
use iter::{Iterator, IteratorExt};
use os;
use path::GenericPath;
- use ptr::RawPtr;
+ use ptr::PtrExt;
use ptr;
use slice::SliceExt;
"file descriptor is not a TTY"),
libc::ETIMEDOUT => (io::TimedOut, "operation timed out"),
libc::ECANCELED => (io::TimedOut, "operation aborted"),
+ libc::consts::os::posix88::EEXIST =>
+ (io::PathAlreadyExists, "path already exists"),
// These two constants can have the same value on some systems,
// but different values on others, so we can't use a match
"invalid handle provided to function"),
libc::ERROR_NOTHING_TO_TERMINATE =>
(io::InvalidInput, "no process to kill"),
+ libc::ERROR_ALREADY_EXISTS =>
+ (io::PathAlreadyExists, "path already exists"),
// libuv maps this error code to EISDIR. we do too. if it is found
// to be incorrect, we can add in some more machinery to only
/// Spawn a new joinable thread, returning a `JoinGuard` for it.
///
- /// The join guard can be used to explicitly join the child thead (via
+ /// The join guard can be used to explicitly join the child thread (via
/// `join`), returning `Result<T>`, or it will implicitly join the child
/// upon being dropped. To detach the child, allowing it to outlive the
/// current thread, use `detach`. See the module documentation for additional details.
}
/// Determines whether the current thread is panicking.
+ #[inline]
pub fn panicking() -> bool {
unwind::panicking()
}
// or futuxes, and in either case may allow spurious wakeups.
pub fn park() {
let thread = Thread::current();
- let mut guard = thread.inner.lock.lock();
+ let mut guard = thread.inner.lock.lock().unwrap();
while !*guard {
- thread.inner.cvar.wait(&guard);
+ guard = thread.inner.cvar.wait(guard).unwrap();
}
*guard = false;
}
///
/// See the module doc for more detail.
pub fn unpark(&self) {
- let mut guard = self.inner.lock.lock();
+ let mut guard = self.inner.lock.lock().unwrap();
if !*guard {
*guard = true;
self.inner.cvar.notify_one();
unsafe {
let slot = slot.get().expect("cannot access a TLS value during or \
after it is destroyed");
- if (*slot.get()).is_none() {
- *slot.get() = Some((self.init)());
- }
- f((*slot.get()).as_ref().unwrap())
+ f(match *slot.get() {
+ Some(ref inner) => inner,
+ None => self.init(slot),
+ })
}
}
+ unsafe fn init(&self, slot: &UnsafeCell<Option<T>>) -> &T {
+ *slot.get() = Some((self.init)());
+ (*slot.get()).as_ref().unwrap()
+ }
+
/// Test this TLS key to determine whether its value has been destroyed for
/// the current thread or not.
///
/// detects Copy, Send and Sync.
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
pub enum TyParamBound {
- TraitTyParamBound(PolyTraitRef),
+ TraitTyParamBound(PolyTraitRef, TraitBoundModifier),
RegionTyParamBound(Lifetime)
}
+/// A modifier on a bound, currently this is only used for `?Sized`, where the
+/// modifier is `Maybe`. Negative bounds should also be handled here.
+#[deriving(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
+pub enum TraitBoundModifier {
+ None,
+ Maybe,
+}
+
pub type TyParamBounds = OwnedSlice<TyParamBound>;
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
pub ident: Ident,
pub id: NodeId,
pub bounds: TyParamBounds,
- pub unbound: Option<TraitRef>,
pub default: Option<P<Ty>>,
pub span: Span
}
/// Expr with trailing semi-colon (may have any type):
StmtSemi(P<Expr>, NodeId),
- StmtMac(Mac, MacStmtStyle),
+ StmtMac(P<Mac>, MacStmtStyle),
}
#[deriving(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show)]
ExprField(P<Expr>, SpannedIdent),
ExprTupField(P<Expr>, Spanned<uint>),
ExprIndex(P<Expr>, P<Expr>),
- ExprSlice(P<Expr>, Option<P<Expr>>, Option<P<Expr>>, Mutability),
- ExprRange(P<Expr>, Option<P<Expr>>),
+ ExprRange(Option<P<Expr>>, Option<P<Expr>>),
/// Variable reference, possibly containing `::` and/or
/// type parameters, e.g. foo::bar::<baz>
pub bound_lifetimes: Vec<LifetimeDef>,
/// The `Foo<&'a T>` in `<'a> Foo<&'a T>`
- pub trait_ref: TraitRef
+ pub trait_ref: TraitRef,
}
#[deriving(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
/// Represents a Trait Declaration
ItemTrait(Unsafety,
Generics,
- Option<TraitRef>, // (optional) default bound not required for Self.
- // Currently, only Sized makes sense here.
TyParamBounds,
Vec<TraitItem>),
ItemImpl(Unsafety,
None => {}
}
}
- ItemTrait(_, _, _, ref bounds, ref trait_items) => {
+ ItemTrait(_, _, ref bounds, ref trait_items) => {
for b in bounds.iter() {
- if let TraitTyParamBound(ref t) = *b {
+ if let TraitTyParamBound(ref t, TraitBoundModifier::None) = *b {
self.insert(t.trait_ref.ref_id, NodeItem(i));
}
}
.collect();
ast::ItemImpl(u, a, b, c, impl_items)
}
- ast::ItemTrait(u, a, b, c, methods) => {
+ ast::ItemTrait(u, a, b, methods) => {
let methods = methods.into_iter()
.filter(|m| trait_method_in_cfg(cx, m))
.collect();
- ast::ItemTrait(u, a, b, c, methods)
+ ast::ItemTrait(u, a, b, methods)
}
ast::ItemStruct(def, generics) => {
ast::ItemStruct(fold_struct(cx, def), generics)
span: Span,
id: ast::Ident,
bounds: OwnedSlice<ast::TyParamBound>,
- unbound: Option<ast::TraitRef>,
default: Option<P<ast::Ty>>) -> ast::TyParam;
fn trait_ref(&self, path: ast::Path) -> ast::TraitRef;
span: Span,
id: ast::Ident,
bounds: OwnedSlice<ast::TyParamBound>,
- unbound: Option<ast::TraitRef>,
default: Option<P<ast::Ty>>) -> ast::TyParam {
ast::TyParam {
ident: id,
id: ast::DUMMY_NODE_ID,
bounds: bounds,
- unbound: unbound,
default: default,
span: span
}
}
fn typarambound(&self, path: ast::Path) -> ast::TyParamBound {
- ast::TraitTyParamBound(self.poly_trait_ref(path))
+ ast::TraitTyParamBound(self.poly_trait_ref(path), ast::TraitBoundModifier::None)
}
fn lifetime(&self, span: Span, name: ast::Name) -> ast::Lifetime {
additional_bounds: Vec::new(),
generics: LifetimeBounds {
lifetimes: Vec::new(),
- bounds: vec!(("__D", None, vec!(Path::new_(
+ bounds: vec!(("__D", vec!(Path::new_(
vec!(krate, "Decoder"), None,
vec!(box Literal(Path::new_local("__E"))), true))),
- ("__E", None, vec!()))
+ ("__E", vec!()))
},
methods: vec!(
MethodDef {
additional_bounds: Vec::new(),
generics: LifetimeBounds {
lifetimes: Vec::new(),
- bounds: vec!(("__S", None, vec!(Path::new_(
+ bounds: vec!(("__S", vec!(Path::new_(
vec!(krate, "Encoder"), None,
vec!(box Literal(Path::new_local("__E"))), true))),
- ("__E", None, vec!()))
+ ("__E", vec!()))
},
methods: vec!(
MethodDef {
cx.typaram(self.span,
ty_param.ident,
OwnedSlice::from_vec(bounds),
- ty_param.unbound.clone(),
None)
}));
}
-fn mk_ty_param(cx: &ExtCtxt, span: Span, name: &str,
- bounds: &[Path], unbound: Option<ast::TraitRef>,
- self_ident: Ident, self_generics: &Generics) -> ast::TyParam {
+fn mk_ty_param(cx: &ExtCtxt,
+ span: Span,
+ name: &str,
+ bounds: &[Path],
+ self_ident: Ident,
+ self_generics: &Generics)
+ -> ast::TyParam {
let bounds =
bounds.iter().map(|b| {
let path = b.to_path(cx, span, self_ident, self_generics);
cx.typarambound(path)
}).collect();
- cx.typaram(span, cx.ident_of(name), bounds, unbound, None)
+ cx.typaram(span, cx.ident_of(name), bounds, None)
}
fn mk_generics(lifetimes: Vec<ast::LifetimeDef>, ty_params: Vec<ast::TyParam>)
#[deriving(Clone)]
pub struct LifetimeBounds<'a> {
pub lifetimes: Vec<(&'a str, Vec<&'a str>)>,
- pub bounds: Vec<(&'a str, Option<ast::TraitRef>, Vec<Path<'a>>)>,
+ pub bounds: Vec<(&'a str, Vec<Path<'a>>)>,
}
impl<'a> LifetimeBounds<'a> {
}).collect();
let ty_params = self.bounds.iter().map(|t| {
match t {
- &(ref name, ref unbound, ref bounds) => {
+ &(ref name, ref bounds) => {
mk_ty_param(cx,
span,
*name,
bounds.as_slice(),
- unbound.clone(),
self_ty,
self_generics)
}
vec!(box Literal(Path::new_local("__S"))), true),
LifetimeBounds {
lifetimes: Vec::new(),
- bounds: vec!(("__S", None,
+ bounds: vec!(("__S",
vec!(Path::new(vec!("std", "hash", "Writer"))))),
},
Path::new_local("__S"))
generics: LifetimeBounds {
lifetimes: Vec::new(),
bounds: vec!(("R",
- None,
vec!( Path::new(vec!("std", "rand", "Rng")) )))
},
explicit_self: None,
StmtMac(mac, style) => (mac, style),
_ => return expand_non_macro_stmt(s, fld)
};
- let expanded_stmt = match expand_mac_invoc(mac, s.span,
+ let expanded_stmt = match expand_mac_invoc(mac.and_then(|m| m), s.span,
|r| r.make_stmt(),
mark_stmt, fld) {
Some(stmt) => stmt,
fn visit_expr(&mut self, e: &ast::Expr) {
match e.node {
- ast::ExprSlice(..) => {
+ ast::ExprRange(..) => {
self.gate_feature("slicing_syntax",
e.span,
- "slicing syntax is experimental");
+ "range syntax is experimental");
}
_ => {}
}
-> TyParamBound
where T: Folder {
match tpb {
- TraitTyParamBound(ty) => TraitTyParamBound(fld.fold_poly_trait_ref(ty)),
+ TraitTyParamBound(ty, modifier) => TraitTyParamBound(fld.fold_poly_trait_ref(ty), modifier),
RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)),
}
}
pub fn noop_fold_ty_param<T: Folder>(tp: TyParam, fld: &mut T) -> TyParam {
- let TyParam {id, ident, bounds, unbound, default, span} = tp;
+ let TyParam {id, ident, bounds, default, span} = tp;
TyParam {
id: fld.new_id(id),
ident: ident,
bounds: fld.fold_bounds(bounds),
- unbound: unbound.map(|x| fld.fold_trait_ref(x)),
default: default.map(|x| fld.fold_ty(x)),
span: span
}
folder.fold_ty(ty),
new_impl_items)
}
- ItemTrait(unsafety, generics, unbound, bounds, methods) => {
+ ItemTrait(unsafety, generics, bounds, methods) => {
let bounds = folder.fold_bounds(bounds);
let methods = methods.into_iter().flat_map(|method| {
let r = match method {
}).collect();
ItemTrait(unsafety,
folder.fold_generics(generics),
- unbound,
bounds,
methods)
}
ExprIndex(el, er) => {
ExprIndex(folder.fold_expr(el), folder.fold_expr(er))
}
- ExprSlice(e, e1, e2, m) => {
- ExprSlice(folder.fold_expr(e),
- e1.map(|x| folder.fold_expr(x)),
- e2.map(|x| folder.fold_expr(x)),
- m)
- }
ExprRange(e1, e2) => {
- ExprRange(folder.fold_expr(e1),
+ ExprRange(e1.map(|x| folder.fold_expr(x)),
e2.map(|x| folder.fold_expr(x)))
}
ExprPath(pth) => ExprPath(folder.fold_path(pth)),
}))
}
StmtMac(mac, semi) => SmallVector::one(P(Spanned {
- node: StmtMac(folder.fold_mac(mac), semi),
+ node: StmtMac(mac.map(|m| folder.fold_mac(m)), semi),
span: span
}))
}
use abi;
use ast::{AssociatedType, BareFnTy, ClosureTy};
-use ast::{RegionTyParamBound, TraitTyParamBound};
+use ast::{RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
use ast::{ProvidedMethod, Public, Unsafety};
use ast::{Mod, BiAdd, Arg, Arm, Attribute, BindByRef, BindByValue};
use ast::{BiBitAnd, BiBitOr, BiBitXor, BiRem, Block};
use ast::{Expr, Expr_, ExprAddrOf, ExprMatch, ExprAgain};
use ast::{ExprAssign, ExprAssignOp, ExprBinary, ExprBlock, ExprBox};
use ast::{ExprBreak, ExprCall, ExprCast};
-use ast::{ExprField, ExprTupField, ExprClosure, ExprIf, ExprIfLet, ExprIndex, ExprSlice};
+use ast::{ExprField, ExprTupField, ExprClosure, ExprIf, ExprIfLet, ExprIndex};
use ast::{ExprLit, ExprLoop, ExprMac, ExprRange};
use ast::{ExprMethodCall, ExprParen, ExprPath};
use ast::{ExprRepeat, ExprRet, ExprStruct, ExprTup, ExprUnary};
use ast::{Visibility, WhereClause};
use ast;
use ast_util::{mod, as_prec, ident_to_path, operator_prec};
-use codemap::{mod, Span, BytePos, Spanned, spanned, mk_sp};
+use codemap::{mod, Span, BytePos, Spanned, spanned, mk_sp, DUMMY_SP};
use diagnostic;
use ext::tt::macro_parser;
use parse;
LifetimeAndTypesWithColons,
}
+/// How to parse a bound, whether to allow bound modifiers such as `?`.
+#[deriving(Copy, PartialEq)]
+pub enum BoundParsingMode {
+ Bare,
+ Modified,
+}
+
enum ItemOrViewItem {
/// Indicates a failure to parse any kind of item. The attributes are
/// returned.
let poly_trait_ref = ast::PolyTraitRef { bound_lifetimes: lifetime_defs,
trait_ref: trait_ref };
let other_bounds = if self.eat(&token::BinOp(token::Plus)) {
- self.parse_ty_param_bounds()
+ self.parse_ty_param_bounds(BoundParsingMode::Bare)
} else {
OwnedSlice::empty()
};
let all_bounds =
- Some(TraitTyParamBound(poly_trait_ref)).into_iter()
+ Some(TraitTyParamBound(poly_trait_ref, TraitBoundModifier::None)).into_iter()
.chain(other_bounds.into_vec().into_iter())
.collect();
ast::TyPolyTraitRef(all_bounds)
// To be helpful, parse the proc as ever
let _ = self.parse_legacy_lifetime_defs(lifetime_defs);
let _ = self.parse_fn_args(false, false);
- let _ = self.parse_colon_then_ty_param_bounds();
+ let _ = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare);
let _ = self.parse_ret_ty();
self.obsolete(proc_span, ObsoleteProcType);
inputs
};
- let bounds = self.parse_colon_then_ty_param_bounds();
+ let bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare);
let output = self.parse_ret_ty();
let decl = P(FnDecl {
return lhs;
}
- let bounds = self.parse_ty_param_bounds();
+ let bounds = self.parse_ty_param_bounds(BoundParsingMode::Bare);
// In type grammar, `+` is treated like a binary operator,
// and hence both L and R side are required.
expr: P<Expr>,
start: Option<P<Expr>>,
end: Option<P<Expr>>,
- mutbl: Mutability)
+ _mutbl: Mutability)
-> ast::Expr_ {
- ExprSlice(expr, start, end, mutbl)
+ // FIXME: we could give more accurate span info here.
+ let (lo, hi) = match (&start, &end) {
+ (&Some(ref s), &Some(ref e)) => (s.span.lo, e.span.hi),
+ (&Some(ref s), &None) => (s.span.lo, s.span.hi),
+ (&None, &Some(ref e)) => (e.span.lo, e.span.hi),
+ (&None, &None) => (DUMMY_SP.lo, DUMMY_SP.hi),
+ };
+ ExprIndex(expr, self.mk_expr(lo, hi, ExprRange(start, end)))
}
pub fn mk_range(&mut self,
start: P<Expr>,
end: Option<P<Expr>>)
-> ast::Expr_ {
- ExprRange(start, end)
+ ExprRange(Some(start), end)
}
pub fn mk_field(&mut self, expr: P<Expr>, ident: ast::SpannedIdent) -> ast::Expr_ {
if id.name == token::special_idents::invalid.name {
P(spanned(lo,
hi,
- StmtMac(spanned(lo,
+ StmtMac(P(spanned(lo,
hi,
- MacInvocTT(pth, tts, EMPTY_CTXT)),
+ MacInvocTT(pth, tts, EMPTY_CTXT))),
style)))
} else {
// if it has a special ident, it's definitely an item
_ => {
let e = self.mk_mac_expr(span.lo,
span.hi,
- macro.node);
+ macro.and_then(|m| m.node));
let e =
self.parse_dot_or_call_expr_with(e);
self.handle_expression_like_statement(
expr = Some(
self.mk_mac_expr(span.lo,
span.hi,
- m.node));
+ m.and_then(|x| x.node)));
}
_ => {
stmts.push(P(Spanned {
// Parses a sequence of bounds if a `:` is found,
// otherwise returns empty list.
- fn parse_colon_then_ty_param_bounds(&mut self)
+ fn parse_colon_then_ty_param_bounds(&mut self,
+ mode: BoundParsingMode)
-> OwnedSlice<TyParamBound>
{
if !self.eat(&token::Colon) {
OwnedSlice::empty()
} else {
- self.parse_ty_param_bounds()
+ self.parse_ty_param_bounds(mode)
}
}
// where boundseq = ( polybound + boundseq ) | polybound
// and polybound = ( 'for' '<' 'region '>' )? bound
// and bound = 'region | trait_ref
- // NB: The None/Some distinction is important for issue #7264.
- fn parse_ty_param_bounds(&mut self)
+ fn parse_ty_param_bounds(&mut self,
+ mode: BoundParsingMode)
-> OwnedSlice<TyParamBound>
{
let mut result = vec!();
loop {
+ let question_span = self.span;
+ let ate_question = self.eat(&token::Question);
match self.token {
token::Lifetime(lifetime) => {
+ if ate_question {
+ self.span_err(question_span,
+ "`?` may only modify trait bounds, not lifetime bounds");
+ }
result.push(RegionTyParamBound(ast::Lifetime {
id: ast::DUMMY_NODE_ID,
span: self.span,
}
token::ModSep | token::Ident(..) => {
let poly_trait_ref = self.parse_poly_trait_ref();
- result.push(TraitTyParamBound(poly_trait_ref))
+ let modifier = if ate_question {
+ if mode == BoundParsingMode::Modified {
+ TraitBoundModifier::Maybe
+ } else {
+ self.span_err(question_span,
+ "unexpected `?`");
+ TraitBoundModifier::None
+ }
+ } else {
+ TraitBoundModifier::None
+ };
+ result.push(TraitTyParamBound(poly_trait_ref, modifier))
}
_ => break,
}
}
}
- /// Matches typaram = (unbound`?`)? IDENT optbounds ( EQ ty )?
+ /// Matches typaram = (unbound `?`)? IDENT (`?` unbound)? optbounds ( EQ ty )?
fn parse_ty_param(&mut self) -> TyParam {
// This is a bit hacky. Currently we are only interested in a single
// unbound, and it may only be `Sized`. To avoid backtracking and other
// complications, we parse an ident, then check for `?`. If we find it,
// we use the ident as the unbound, otherwise, we use it as the name of
- // type param.
+ // type param. Even worse, for now, we need to check for `?` before or
+ // after the bound.
let mut span = self.span;
let mut ident = self.parse_ident();
let mut unbound = None;
ident = self.parse_ident();
}
- let bounds = self.parse_colon_then_ty_param_bounds();
+ let mut bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Modified);
+ if let Some(unbound) = unbound {
+ let mut bounds_as_vec = bounds.into_vec();
+ bounds_as_vec.push(TraitTyParamBound(PolyTraitRef { bound_lifetimes: vec![],
+ trait_ref: unbound },
+ TraitBoundModifier::Maybe));
+ bounds = OwnedSlice::from_vec(bounds_as_vec);
+ };
let default = if self.check(&token::Eq) {
self.bump();
ident: ident,
id: ast::DUMMY_NODE_ID,
bounds: bounds,
- unbound: unbound,
default: default,
span: span,
}
let bounded_ty = self.parse_ty();
if self.eat(&token::Colon) {
- let bounds = self.parse_ty_param_bounds();
+ let bounds = self.parse_ty_param_bounds(BoundParsingMode::Bare);
let hi = self.span.hi;
let span = mk_sp(lo, hi);
fn parse_item_trait(&mut self, unsafety: Unsafety) -> ItemInfo {
let ident = self.parse_ident();
let mut tps = self.parse_generics();
- let sized = self.parse_for_sized();
+ let unbound = self.parse_for_sized();
// Parse supertrait bounds.
- let bounds = self.parse_colon_then_ty_param_bounds();
+ let mut bounds = self.parse_colon_then_ty_param_bounds(BoundParsingMode::Bare);
+
+ if let Some(unbound) = unbound {
+ let mut bounds_as_vec = bounds.into_vec();
+ bounds_as_vec.push(TraitTyParamBound(PolyTraitRef { bound_lifetimes: vec![],
+ trait_ref: unbound },
+ TraitBoundModifier::Maybe));
+ bounds = OwnedSlice::from_vec(bounds_as_vec);
+ };
self.parse_where_clause(&mut tps);
let meths = self.parse_trait_items();
- (ident, ItemTrait(unsafety, tps, sized, bounds, meths), None)
+ (ident, ItemTrait(unsafety, tps, bounds, meths), None)
}
fn parse_impl_items(&mut self) -> (Vec<ImplItem>, Vec<Attribute>) {
}
fn parse_for_sized(&mut self) -> Option<ast::TraitRef> {
+ // FIXME, this should really use TraitBoundModifier, but it will get
+ // re-jigged shortly in any case, so leaving the hacky version for now.
if self.eat_keyword(keywords::For) {
let span = self.span;
+ let mut ate_question = false;
+ if self.eat(&token::Question) {
+ ate_question = true;
+ }
let ident = self.parse_ident();
- if !self.eat(&token::Question) {
+ if self.eat(&token::Question) {
+ if ate_question {
+ self.span_err(span,
+ "unexpected `?`");
+ }
+ ate_question = true;
+ }
+ if !ate_question {
self.span_err(span,
- "expected 'Sized?' after `for` in trait item");
+ "expected `?Sized` after `for` in trait item");
return None;
}
let tref = Parser::trait_ref_from_ident(ident, span);
use abi;
use ast::{mod, FnUnboxedClosureKind, FnMutUnboxedClosureKind};
use ast::{FnOnceUnboxedClosureKind};
-use ast::{MethodImplItem, RegionTyParamBound, TraitTyParamBound};
+use ast::{MethodImplItem, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier};
use ast::{RequiredMethod, ProvidedMethod, TypeImplItem, TypeTraitItem};
use ast::{UnboxedClosureKind};
use ast_util;
}
try!(self.bclose(item.span));
}
- ast::ItemTrait(unsafety, ref generics, ref unbound, ref bounds, ref methods) => {
+ ast::ItemTrait(unsafety, ref generics, ref bounds, ref methods) => {
try!(self.head(""));
try!(self.print_visibility(item.vis));
try!(self.print_unsafety(unsafety));
try!(self.word_nbsp("trait"));
try!(self.print_ident(item.ident));
try!(self.print_generics(generics));
- if let &Some(ref tref) = unbound {
- try!(space(&mut self.s));
- try!(self.word_space("for"));
- try!(self.print_trait_ref(tref));
- try!(word(&mut self.s, "?"));
+ let bounds: Vec<_> = bounds.iter().map(|b| b.clone()).collect();
+ let mut real_bounds = Vec::with_capacity(bounds.len());
+ for b in bounds.into_iter() {
+ if let TraitTyParamBound(ref ptr, ast::TraitBoundModifier::Maybe) = b {
+ try!(space(&mut self.s));
+ try!(self.word_space("for ?"));
+ try!(self.print_trait_ref(&ptr.trait_ref));
+ } else {
+ real_bounds.push(b);
+ }
}
- try!(self.print_bounds(":", bounds[]));
+ try!(self.print_bounds(":", real_bounds[]));
try!(self.print_where_clause(generics));
try!(word(&mut self.s, " "));
try!(self.bopen());
ast::MacStmtWithBraces => token::Brace,
_ => token::Paren
};
- try!(self.print_mac(mac, delim));
+ try!(self.print_mac(&**mac, delim));
match style {
ast::MacStmtWithBraces => {}
_ => try!(word(&mut self.s, ";")),
try!(self.print_expr(&**index));
try!(word(&mut self.s, "]"));
}
- ast::ExprSlice(ref e, ref start, ref end, ref mutbl) => {
- try!(self.print_expr(&**e));
- try!(word(&mut self.s, "["));
- if mutbl == &ast::MutMutable {
- try!(word(&mut self.s, "mut"));
- if start.is_some() || end.is_some() {
- try!(space(&mut self.s));
- }
- }
+ ast::ExprRange(ref start, ref end) => {
if let &Some(ref e) = start {
try!(self.print_expr(&**e));
}
if let &Some(ref e) = end {
try!(self.print_expr(&**e));
}
- try!(word(&mut self.s, "]"));
- }
- ast::ExprRange(ref start, ref end) => {
- try!(self.print_expr(&**start));
- try!(word(&mut self.s, ".."));
- if let &Some(ref e) = end {
- try!(self.print_expr(&**e));
- }
}
ast::ExprPath(ref path) => try!(self.print_path(path, true)),
ast::ExprBreak(opt_ident) => {
}
try!(match *bound {
- TraitTyParamBound(ref tref) => {
+ TraitTyParamBound(ref tref, TraitBoundModifier::None) => {
+ self.print_poly_trait_ref(tref)
+ }
+ TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => {
+ try!(word(&mut self.s, "?"));
self.print_poly_trait_ref(tref)
}
RegionTyParamBound(ref lt) => {
}
pub fn print_ty_param(&mut self, param: &ast::TyParam) -> IoResult<()> {
- if let Some(ref tref) = param.unbound {
- try!(self.print_trait_ref(tref));
- try!(self.word_space("?"));
- }
try!(self.print_ident(param.ident));
try!(self.print_bounds(":", param.bounds[]));
match param.default {
fn visit_ty_param_bound(&mut self, bounds: &'v TyParamBound) {
walk_ty_param_bound(self, bounds)
}
- fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef) {
- walk_poly_trait_ref(self, t)
+ fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: &'v TraitBoundModifier) {
+ walk_poly_trait_ref(self, t, m)
}
fn visit_struct_def(&mut self, s: &'v StructDef, _: Ident, _: &'v Generics, _: NodeId) {
walk_struct_def(self, s)
/// Like with walk_method_helper this doesn't correspond to a method
/// in Visitor, and so it gets a _helper suffix.
pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V,
- trait_ref: &'v PolyTraitRef)
+ trait_ref: &'v PolyTraitRef,
+ _modifier: &'v TraitBoundModifier)
where V: Visitor<'v>
{
walk_lifetime_decls_helper(visitor, &trait_ref.bound_lifetimes);
generics,
item.id)
}
- ItemTrait(_, ref generics, _, ref bounds, ref methods) => {
+ ItemTrait(_, ref generics, ref bounds, ref methods) => {
visitor.visit_generics(generics);
walk_ty_param_bounds_helper(visitor, bounds);
for method in methods.iter() {
pub fn walk_ty_param_bound<'v, V: Visitor<'v>>(visitor: &mut V,
bound: &'v TyParamBound) {
match *bound {
- TraitTyParamBound(ref typ) => {
- visitor.visit_poly_trait_ref(typ);
+ TraitTyParamBound(ref typ, ref modifier) => {
+ visitor.visit_poly_trait_ref(typ, modifier);
}
RegionTyParamBound(ref lifetime) => {
visitor.visit_lifetime_bound(lifetime);
StmtExpr(ref expression, _) | StmtSemi(ref expression, _) => {
visitor.visit_expr(&**expression)
}
- StmtMac(ref macro, _) => visitor.visit_mac(macro),
+ StmtMac(ref macro, _) => visitor.visit_mac(&**macro),
}
}
visitor.visit_expr(&**main_expression);
visitor.visit_expr(&**index_expression)
}
- ExprSlice(ref main_expression, ref start, ref end, _) => {
- visitor.visit_expr(&**main_expression);
- walk_expr_opt(visitor, start);
- walk_expr_opt(visitor, end)
- }
ExprRange(ref start, ref end) => {
- visitor.visit_expr(&**start);
+ walk_expr_opt(visitor, start);
walk_expr_opt(visitor, end)
}
ExprPath(ref path) => {
use core::mem;
use core::num::Int;
use core::slice;
-use core::str::CharSplits;
+use core::str::Split;
use u_char::UnicodeChar;
use tables::grapheme::GraphemeCat;
/// An iterator over the words of a string, separated by a sequence of whitespace
-/// FIXME: This should be opaque
#[stable]
pub struct Words<'a> {
- inner: Filter<&'a str, CharSplits<'a, fn(char) -> bool>, fn(&&str) -> bool>,
+ inner: Filter<&'a str, Split<'a, fn(char) -> bool>, fn(&&str) -> bool>,
}
/// Methods for Unicode string slices
#[inline]
fn trim_left(&self) -> &str {
- self.trim_left_chars(|&: c: char| c.is_whitespace())
+ self.trim_left_matches(|&: c: char| c.is_whitespace())
}
#[inline]
fn trim_right(&self) -> &str {
- self.trim_right_chars(|&: c: char| c.is_whitespace())
+ self.trim_right_matches(|&: c: char| c.is_whitespace())
}
}
let mut buf = [0u16, ..2];
self.chars.next().map(|ch| {
- let n = ch.encode_utf16(buf[mut]).unwrap_or(0);
+ let n = ch.encode_utf16(buf.as_mut_slice()).unwrap_or(0);
if n == 2 { self.extra = buf[1]; }
buf[0]
})
fn send(p: &pipe, msg: uint) {
let &(ref lock, ref cond) = &**p;
- let mut arr = lock.lock();
+ let mut arr = lock.lock().unwrap();
arr.push(msg);
cond.notify_one();
}
fn recv(p: &pipe) -> uint {
let &(ref lock, ref cond) = &**p;
- let mut arr = lock.lock();
+ let mut arr = lock.lock().unwrap();
while arr.is_empty() {
- cond.wait(&arr);
+ arr = cond.wait(arr).unwrap();
}
arr.pop().unwrap()
}
fn next_permutation(perm: &mut [i32], count: &mut [i32]) {
for i in range(1, perm.len()) {
- rotate(perm[mut ..i + 1]);
+ rotate(perm.slice_to_mut(i + 1));
let count_i = &mut count[i];
if *count_i >= i as i32 {
*count_i = 0;
fn reverse(tperm: &mut [i32], mut k: uint) {
- tperm[mut ..k].reverse()
+ tperm.slice_to_mut(k).reverse()
}
fn work(mut perm: Perm, n: uint, max: uint) -> (i32, i32) {
copy_memory(buf.as_mut_slice(), alu);
let buf_len = buf.len();
- copy_memory(buf[mut alu_len..buf_len],
+ copy_memory(buf.slice_mut(alu_len, buf_len),
alu[..LINE_LEN]);
let mut pos = 0;
fn main() {
let mut data = read_to_end(&mut stdin_raw()).unwrap();
let tables = &Tables::new();
- parallel(mut_dna_seqs(data[mut]), |&: seq| reverse_complement(seq, tables));
+ parallel(mut_dna_seqs(data.as_mut_slice()), |&: seq| reverse_complement(seq, tables));
stdout_raw().write(data.as_mut_slice()).unwrap();
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod foo {
+ pub use self::bar::X;
+ use self::bar::X;
+ //~^ ERROR a value named `X` has already been imported in this module
+ //~| ERROR a type named `X` has already been imported in this module
+
+ mod bar {
+ pub struct X;
+ }
+}
+
+fn main() {
+ let _ = foo::X;
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Foo {}
+
+fn foo<T: Foo + Foo>() {} //~ ERROR `Foo` already appears in the list of bounds
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait FromStructReader<'a> { }
+trait ResponseHook {
+ fn get<'a, T: FromStructReader<'a>>(&'a self);
+}
+fn foo(res : Box<ResponseHook>) { res.get } //~ ERROR attempted to take value of method
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![crate_type = "lib"]
+
+enum NodeContents<'a> {
+ Children(Vec<Node<'a>>),
+}
+
+impl<'a> Drop for NodeContents<'a> {
+ //~^ ERROR cannot implement a destructor on a structure with type parameters
+ fn drop( &mut self ) {
+ }
+}
+
+struct Node<'a> {
+ contents: NodeContents<'a>,
+}
+
+impl<'a> Node<'a> {
+ fn noName(contents: NodeContents<'a>) -> Node<'a> {
+ Node{ contents: contents,}
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct AutoBuilder<'a> {
+ context: &'a int
+}
+
+impl<'a> Drop for AutoBuilder<'a> {
+ //~^ ERROR cannot implement a destructor on a structure with type parameters
+ fn drop(&mut self) {
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Deserializer<'a> { }
+
+trait Deserializable {
+ fn deserialize_token<'a, D: Deserializer<'a>>(D, &'a str) -> Self;
+}
+
+impl<'a, T: Deserializable> Deserializable for &'a str {
+ //~^ ERROR unable to infer enough type information
+ fn deserialize_token<D: Deserializer<'a>>(_x: D, _y: &'a str) -> &'a str {
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+trait Node {
+ fn zomg();
+}
+
+trait Graph<N: Node> {
+ fn nodes<'a, I: Iterator<&'a N>>(&'a self) -> I;
+}
+
+impl<N: Node> Graph<N> for Vec<N> {
+ fn nodes<'a, I: Iterator<&'a N>>(&self) -> I {
+ self.iter() //~ ERROR mismatched types
+ }
+}
+
+struct Stuff;
+
+impl Node for Stuff {
+ fn zomg() {
+ println!("zomg");
+ }
+}
+
+fn iterate<N: Node, G: Graph<N>>(graph: &G) {
+ for node in graph.iter() { //~ ERROR does not implement any method in scope named
+ node.zomg();
+ }
+}
+
+pub fn main() {
+ let graph = Vec::new();
+
+ graph.push(Stuff);
+
+ iterate(graph); //~ ERROR mismatched types
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+extern {
+ pub static symbol: ();
+}
+static CRASH: () = symbol; //~ cannot refer to other statics by value
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub struct Lexer<'a> {
+ input: &'a str,
+}
+
+impl<'a> Lexer<'a> {
+ pub fn new(input: &'a str) -> Lexer<'a> {
+ Lexer { input: input }
+ }
+}
+
+struct Parser<'a> {
+ lexer: &'a mut Lexer<'a>,
+}
+
+impl<'a> Parser<'a> {
+ pub fn new(lexer: &'a mut Lexer) -> Parser<'a> {
+ Parser { lexer: lexer }
+ //~^ ERROR cannot infer an appropriate lifetime for lifetime parameter
+ }
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod Y {
+ type X = uint;
+ extern {
+ static x: *const uint;
+ }
+ fn foo(value: *const X) -> *const X {
+ value
+ }
+}
+
+static foo: *const Y::X = Y::foo(Y::x as *const Y::X);
+//~^ ERROR cannot refer to other statics by value
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt::{Show, Formatter, Error};
+use std::collections::HashMap;
+
+trait HasInventory {
+ fn getInventory<'s>(&'s self) -> &'s mut Inventory;
+ fn addToInventory(&self, item: &Item);
+ fn removeFromInventory(&self, itemName: &str) -> bool;
+}
+
+trait TraversesWorld {
+ fn attemptTraverse(&self, room: &Room, directionStr: &str) -> Result<&Room, &str> {
+ let direction = str_to_direction(directionStr);
+ let maybe_room = room.direction_to_room.find(&direction);
+ //~^ ERROR cannot infer an appropriate lifetime for autoref due to conflicting requirements
+ match maybe_room {
+ Some(entry) => Ok(entry),
+ _ => Err("Direction does not exist in room.")
+ }
+ }
+}
+
+
+#[deriving(Show, Eq, PartialEq, Hash)]
+enum RoomDirection {
+ West,
+ East,
+ North,
+ South,
+ Up,
+ Down,
+ In,
+ Out,
+
+ None
+}
+
+struct Room {
+ description: String,
+ items: Vec<Item>,
+ direction_to_room: HashMap<RoomDirection, Room>,
+}
+
+impl Room {
+ fn new(description: &'static str) -> Room {
+ Room {
+ description: description.to_string(),
+ items: Vec::new(),
+ direction_to_room: HashMap::new()
+ }
+ }
+
+ fn add_direction(&mut self, direction: RoomDirection, room: Room) {
+ self.direction_to_room.insert(direction, room);
+ }
+}
+
+struct Item {
+ name: String,
+}
+
+struct Inventory {
+ items: Vec<Item>,
+}
+
+impl Inventory {
+ fn new() -> Inventory {
+ Inventory {
+ items: Vec::new()
+ }
+ }
+}
+
+struct Player {
+ name: String,
+ inventory: Inventory,
+}
+
+impl Player {
+ fn new(name: &'static str) -> Player {
+ Player {
+ name: name.to_string(),
+ inventory: Inventory::new()
+ }
+ }
+}
+
+impl TraversesWorld for Player {
+}
+
+impl Show for Player {
+ fn fmt(&self, formatter: &mut Formatter) -> Result<(), Error> {
+ formatter.write_str("Player{ name:");
+ formatter.write_str(self.name.as_slice());
+ formatter.write_str(" }");
+ Ok(())
+ }
+}
+
+fn str_to_direction(to_parse: &str) -> RoomDirection {
+ match to_parse {
+ "w" | "west" => RoomDirection::West,
+ "e" | "east" => RoomDirection::East,
+ "n" | "north" => RoomDirection::North,
+ "s" | "south" => RoomDirection::South,
+ "in" => RoomDirection::In,
+ "out" => RoomDirection::Out,
+ "up" => RoomDirection::Up,
+ "down" => RoomDirection::Down,
+ _ => None //~ ERROR mismatched types
+ }
+}
+
+fn main() {
+ let mut player = Player::new("Test player");
+ let mut room = Room::new("A test room");
+ println!("Made a player: {}", player);
+ println!("Direction parse: {}", str_to_direction("east"));
+ match player.attemptTraverse(&room, "west") {
+ Ok(_) => println!("Was able to move west"),
+ Err(msg) => println!("Not able to move west: {}", msg)
+ };
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<'a> {
+ data: &'a[u8],
+}
+
+impl <'a> Foo<'a>{
+ fn bar(self: &mut Foo) {
+ //~^ mismatched types: expected `Foo<'a>`, found `Foo<'_>` (lifetime mismatch)
+ //~| mismatched types: expected `Foo<'a>`, found `Foo<'_>` (lifetime mismatch)
+ }
+}
+
+fn main() {}
-// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// ignore-test
-
-use std::io::ReaderUtil;
-use std::io::Reader;
-
-fn bar(r:@ReaderUtil) -> String { r.read_line() }
+trait Foo {}
+impl Foo for u8 {}
fn main() {
- let r : @Reader = io::stdin();
- let _m = bar(r as @ReaderUtil);
+ let r: Box<Foo> = box 5;
+ let _m: Box<Foo> = r as Box<Foo>;
+ //~^ ERROR `core::kinds::Sized` is not implemented for the type `Foo`
+ //~| ERROR `Foo` is not implemented for the type `Foo`
}
// Regresion test for issue 7364
static boxed: Box<RefCell<int>> = box RefCell::new(0);
//~^ ERROR statics are not allowed to have custom pointers
-//~^^ ERROR: the trait `core::kinds::Sync` is not implemented for the type
-//~^^^ ERROR: the trait `core::kinds::Sync` is not implemented for the type
-//~^^^^ ERROR: the trait `core::kinds::Sync` is not implemented for the type
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
+//~| ERROR: the trait `core::kinds::Sync` is not implemented for the type
fn main() { }
// except according to those terms.
// Test range syntax - type errors.
+#![feature(slicing_syntax)]
pub fn main() {
// Mixed types.
// except according to those terms.
// Test range syntax - borrow errors.
+#![feature(slicing_syntax)]
pub fn main() {
let r = {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn f() {
+ 'l: loop {
+ fn g() {
+ loop {
+ break 'l; //~ ERROR use of undeclared label
+ }
+ }
+ }
+}
+
+fn main() {}
x[Foo..]; //~ ERROR cannot take a slice of a value with type `Foo`
x[..Foo]; //~ ERROR cannot take a slice of a value with type `Foo`
x[Foo..Foo]; //~ ERROR cannot take a slice of a value with type `Foo`
- x[mut]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
- x[mut Foo..]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
- x[mut ..Foo]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
- x[mut Foo..Foo]; //~ ERROR cannot take a mutable slice of a value with type `Foo`
}
fn main() {
let x: &[int] = &[1, 2, 3, 4, 5];
// Can't mutably slice an immutable slice
- let y = x[mut 2..4]; //~ ERROR cannot borrow
+ let slice: &mut [int] = &mut [0, 1];
+ x[2..4] = slice; //~ ERROR cannot borrow
}
let x: &[int] = &[1, 2, 3, 4, 5];
// Immutable slices are not mutable.
let y: &mut[_] = x[2..4]; //~ ERROR cannot borrow immutable dereference of `&`-pointer as mutabl
-
- let x: &mut [int] = &mut [1, 2, 3, 4, 5];
- // Can't borrow mutably twice
- let y = x[mut 1..2];
- let y = x[mut 4..5]; //~ERROR cannot borrow
}
}
}
-struct Foo<Sized? T> {
+struct Foo<T: ?Sized> {
f: T
}
// As dst-struct.rs, but the unsized field is the only field in the struct.
-struct Fat<Sized? T> {
+struct Fat<T: ?Sized> {
ptr: T
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-struct Fat<Sized? T> {
+struct Fat<T: ?Sized> {
f1: int,
f2: &'static str,
ptr: T
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-struct Fat<Sized? T> {
+struct Fat<T: ?Sized> {
f1: int,
f2: &'static str,
ptr: T
// except according to those terms.
+extern crate core;
+
+use core::nonzero::NonZero;
use std::mem::size_of;
+use std::rc::Rc;
+use std::sync::Arc;
trait Trait {}
// Pointers - Box<T>
assert_eq!(size_of::<Box<int>>(), size_of::<Option<Box<int>>>());
-
// The optimization can't apply to raw pointers
assert!(size_of::<Option<*const int>>() != size_of::<*const int>());
assert!(Some(0 as *const int).is_some()); // Can't collapse None to null
+ struct Foo {
+ _a: Box<int>
+ }
+ struct Bar(Box<int>);
+
+ // Should apply through structs
+ assert_eq!(size_of::<Foo>(), size_of::<Option<Foo>>());
+ assert_eq!(size_of::<Bar>(), size_of::<Option<Bar>>());
+ // and tuples
+ assert_eq!(size_of::<(u8, Box<int>)>(), size_of::<Option<(u8, Box<int>)>>());
+ // and fixed-size arrays
+ assert_eq!(size_of::<[Box<int>, ..1]>(), size_of::<Option<[Box<int>, ..1]>>());
+
+ // Should apply to NonZero
+ assert_eq!(size_of::<NonZero<uint>>(), size_of::<Option<NonZero<uint>>>());
+ assert_eq!(size_of::<NonZero<*mut i8>>(), size_of::<Option<NonZero<*mut i8>>>());
+
+ // Should apply to types that use NonZero internally
+ assert_eq!(size_of::<Vec<int>>(), size_of::<Option<Vec<int>>>());
+ assert_eq!(size_of::<Arc<int>>(), size_of::<Option<Arc<int>>>());
+ assert_eq!(size_of::<Rc<int>>(), size_of::<Option<Rc<int>>>());
+
+ // Should apply to types that have NonZero transitively
+ assert_eq!(size_of::<String>(), size_of::<Option<String>>());
+
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo(_: &[&str]) {}
+
+fn bad(a: &str, b: &str) {
+ foo(&[a, b]);
+}
+
+fn good(a: &str, b: &str) {
+ foo(&[a.as_slice(), b.as_slice()]);
+}
+
+fn main() {}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![feature(unboxed_closures)]
+use std::ops::Fn;
+
+struct Foo<T>(T);
+
+impl<T: Copy> Fn<(), T> for Foo<T> {
+ extern "rust-call" fn call(&self, _: ()) -> T {
+ match *self {
+ Foo(t) => t
+ }
+ }
+}
+
+fn main() {
+ let t: u8 = 1;
+ println!("{}", Foo(t)());
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn foo<'r>() {
+ let maybe_value_ref: Option<&'r u8> = None;
+
+ let _ = maybe_value_ref.map(|& ref v| v);
+ let _ = maybe_value_ref.map(|& ref v| -> &'r u8 {v});
+ let _ = maybe_value_ref.map(|& ref v: &'r u8| -> &'r u8 {v});
+ let _ = maybe_value_ref.map(|& ref v: &'r u8| {v});
+}
+
+fn main() {
+ foo();
+}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<'a> {
+ listener: ||: 'a
+}
+
+impl<'a> Foo<'a> {
+ fn new(listener: ||: 'a) -> Foo<'a> {
+ Foo { listener: listener }
+ }
+}
+
+fn main() {
+ let a = Foo::new(|| {});
+}
impl TraitWithSend for IndirectBlah {}
impl IndirectTraitWithSend for IndirectBlah {}
-fn test_trait<Sized? T: Send>() { println!("got here!") }
+fn test_trait<T: Send + ?Sized>() { println!("got here!") }
fn main() {
test_trait::<TraitWithSend>();
// Test that astconv doesn't forget about mutability of &mut str
fn main() {
- fn foo<Sized? T>(_: &mut T) {}
+ fn foo<T: ?Sized>(_: &mut T) {}
let _f: fn(&mut str) = foo;
}
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+pub trait Borrow<Sized? Borrowed> {
+ fn borrow(&self) -> &Borrowed;
+}
+
+impl<T: Sized> Borrow<T> for T {
+ fn borrow(&self) -> &T { self }
+}
+
+trait Foo {
+ fn foo(&self, other: &Self);
+}
+
+fn bar<K, Q>(k: &K, q: &Q) where K: Borrow<Q>, Q: Foo {
+ q.foo(k.borrow())
+}
+
+struct MyTree<K>;
+
+impl<K> MyTree<K> {
+ // This caused a failure in #18906
+ fn bar<Q>(k: &K, q: &Q) where K: Borrow<Q>, Q: Foo {
+ q.foo(k.borrow())
+ }
+}
+
+fn main() {}
use std::sync::Mutex;
pub fn main() {
- unsafe {
- let x = Some(Mutex::new(true));
- match x {
- Some(ref z) if *z.lock() => {
- assert!(*z.lock());
- },
- _ => panic!()
- }
+ let x = Some(Mutex::new(true));
+ match x {
+ Some(ref z) if *z.lock().unwrap() => {
+ assert!(*z.lock().unwrap());
+ },
+ _ => panic!()
}
}
use std::kinds::Sized;
// Note: this must be generic for the problem to show up
-trait Foo<A> for Sized? {
+trait Foo<A> for ?Sized {
fn foo(&self);
}
// Test range syntax.
+#![feature(slicing_syntax)]
+
fn foo() -> int { 42 }
pub fn main() {
--- /dev/null
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+mod base {
+ pub trait HasNew<T> {
+ fn new() -> T;
+ }
+
+ pub struct Foo {
+ dummy: (),
+ }
+
+ impl HasNew<Foo> for Foo {
+ fn new() -> Foo {
+ Foo { dummy: () }
+ }
+ }
+
+ pub struct Bar {
+ dummy: (),
+ }
+
+ impl HasNew<Bar> for Bar {
+ fn new() -> Bar {
+ Bar { dummy: () }
+ }
+ }
+}
+
+pub fn main() {
+ let _f: base::Foo = base::HasNew::new();
+ let _b: base::Bar = base::HasNew::new();
+}
//
// ignore-lexer-test FIXME #15879
-// Test syntax checks for `Sized?` syntax.
+// Test syntax checks for `?Sized` syntax.
-trait T1 for Sized? {}
-pub trait T2 for Sized? {}
-trait T3<X: T1> for Sized?: T2 {}
-trait T4<Sized? X> {}
-trait T5<Sized? X, Y> {}
-trait T6<Y, Sized? X> {}
-trait T7<Sized? X, Sized? Y> {}
-trait T8<Sized? X: T2> {}
-struct S1<Sized? X>;
-enum E<Sized? X> {}
-impl <Sized? X> T1 for S1<X> {}
-fn f<Sized? X>() {}
-type TT<Sized? T> = T;
+trait T1 for ?Sized {}
+pub trait T2 for ?Sized {}
+trait T3<X: T1> for ?Sized: T2 {}
+trait T4<X: ?Sized> {}
+trait T5<X: ?Sized, Y> {}
+trait T6<Y, X: ?Sized> {}
+trait T7<X: ?Sized, Y: ?Sized> {}
+trait T8<X: ?Sized+T2> {}
+trait T9<X: T2 + ?Sized> {}
+struct S1<X: ?Sized>;
+enum E<X: ?Sized> {}
+impl <X: ?Sized> T1 for S1<X> {}
+fn f<X: ?Sized>() {}
+type TT<T: ?Sized> = T;
pub fn main() {
}
// Test sized-ness checking in substitution.
// Unbounded.
-fn f1<Sized? X>(x: &X) {
+fn f1<X: ?Sized>(x: &X) {
f1::<X>(x);
}
fn f2<X>(x: &X) {
}
// Bounded.
-trait T for Sized? {}
-fn f3<Sized? X: T>(x: &X) {
+trait T for ?Sized {}
+fn f3<X: T+?Sized>(x: &X) {
f3::<X>(x);
}
fn f4<X: T>(x: &X) {
}
// Self type.
-trait T2 for Sized? {
+trait T2 for ?Sized {
fn f() -> Box<Self>;
}
struct S;
box S
}
}
-fn f5<Sized? X: T2>(x: &X) {
+fn f5<X: ?Sized+T2>(x: &X) {
let _: Box<X> = T2::f();
}
fn f6<X: T2>(x: &X) {
let _: Box<X> = T2::f();
}
-trait T3 for Sized? {
+trait T3 for ?Sized {
fn f() -> Box<Self>;
}
impl T3 for S {
box S
}
}
-fn f7<Sized? X: T3>(x: &X) {
+fn f7<X: ?Sized+T3>(x: &X) {
// This is valid, but the unsized bound on X is irrelevant because any type
// which implements T3 must have statically known size.
let _: Box<X> = T3::f();
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
}
-trait T5<Sized? X> {
+trait T5<X: ?Sized> {
// not an error (for now)
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
}
-trait T7<Sized? X: T> {
+trait T7<X: ?Sized+T> {
// not an error (for now)
fn m1(x: &T4<X>);
fn m2(x: &T5<X>);
}
// The last field in a struct or variant may be unsized
-struct S2<Sized? X> {
+struct S2<X: ?Sized> {
f: X,
}
-struct S3<Sized? X> {
+struct S3<X: ?Sized> {
f1: int,
f2: X,
}
-enum E<Sized? X> {
+enum E<X: ?Sized> {
V1(X),
V2{x: X},
V3(int, X),
fn f(p: &mut Point) { p.z = 13; }
pub fn main() {
- unsafe {
- let x = Some(Mutex::new(true));
- match x {
- Some(ref z) if *z.lock() => {
- assert!(*z.lock());
- },
- _ => panic!()
- }
+ let x = Some(Mutex::new(true));
+ match x {
+ Some(ref z) if *z.lock().unwrap() => {
+ assert!(*z.lock().unwrap());
+ },
+ _ => panic!()
}
}