Erik Rose <erik@mozilla.com>
Evan McClanahan <evan@evanmcc.com>
Francisco Souza <f@souza.cc>
+Gabriel <g2p.code@gmail.com>
Gareth Daniel Smith <garethdanielsmith@gmail.com>
Glenn Willen <gwillen@nerdnet.org>
Gonçalo Cabrita <_@gmcabrita.com>
Haitao Li <lihaitao@gmail.com>
Ian D. Bollinger <ian.bollinger@gmail.com>
Ivano Coppola <rgbfirefox@gmail.com>
+Jacob Harris Cryer Kragh <jhckragh@gmail.com>
Jacob Parker <j3parker@csclub.uwaterloo.ca>
Jason Orendorff <jorendorff@mozilla.com>
Jed Davis <jld@panix.com>
Lennart Kudling
Lindsey Kuper <lindsey@rockstargirl.org>
Luca Bruno <lucab@debian.org>
+Luqman Aden <laden@csclub.uwaterloo.ca>
Magnus Auvinen <magnus.auvinen@gmail.com>
+Mahmut Bulut <mahmutbulut0@gmail.com>
Margaret Meyerhofer <mmeyerho@andrew.cmu.edu>
Marijn Haverbeke <marijnh@gmail.com>
Matt Brubeck <mbrubeck@limpet.net>
# version-string calculation
CFG_GIT_DIR := $(CFG_SRC_DIR).git
-CFG_RELEASE = 0.4
+CFG_RELEASE = 0.5
CFG_VERSION = $(CFG_RELEASE)
ifneq ($(wildcard $(CFG_GIT)),)
endif
endif
-ifdef CFG_DISABLE_VALGRIND
- $(info cfg: disabling valgrind (CFG_DISABLE_VALGRIND))
+ifdef CFG_ENABLE_VALGRIND
+ $(info cfg: enabling valgrind (CFG_ENABLE_VALGRIND))
+else
CFG_VALGRIND :=
endif
ifdef CFG_BAD_VALGRIND
## Installation
-The Rust compiler is slightly unusual in that it is written in Rust and
-therefore must be built by a precompiled "snapshot" version of itself (made in
-an earlier state of development). As such, source builds require that:
+The Rust compiler currently must be built from a [tarball], unless you
+are on Windows, in which case using the [installer][win-exe] is
+recommended.
-* You are connected to the internet, to fetch snapshots.
+Since the Rust compiler is written in Rust, it must be built by
+a precompiled "snapshot" version of itself (made in an earlier state
+of development). As such, source builds require a connection to
+the Internet, to fetch snapshots, and an OS that can execute the
+available snapshot binaries.
-* You can at least execute snapshot binaries of one of the forms we offer
- them in. Currently we build and test snapshots on:
+Snapshot binaries are currently built and tested on several platforms:
- * Windows (7, server 2008 r2) x86 only
- * Linux 2.6.x (various distributions) x86 and x86-64
- * OSX 10.6 ("Snow Leopard") or 10.7 ("Lion") x86 and x86-64
+* Windows (7, Server 2008 R2), x86 only
+* Linux (various distributions), x86 and x86-64
+* OSX 10.6 ("Snow Leopard") or greater, x86 and x86-64
-You may find other platforms work, but these are our "tier 1" supported build
-environments that are most likely to work. Further platforms will be added to
-the list in the future via cross-compilation.
+You may find that other platforms work, but these are our "tier 1"
+supported build environments that are most likely to work.
-To build from source you will also need the following prerequisite packages:
+> ***Note:*** Windows users should read the detailed
+> [getting started][wiki-start] notes on the wiki. Even when using
+> the binary installer the Windows build requires a MinGW installation,
+> the precise details of which are not discussed here.
+
+To build from source you will also need the following prerequisite
+packages:
* g++ 4.4 or clang++ 3.x
-* python 2.6 or later
+* python 2.6 or later (but not 3.x)
* perl 5.0 or later
* gnu make 3.81 or later
* curl
-Assuming you're on a relatively modern Linux/OSX system and have met the
-prerequisites, something along these lines should work:
+Assuming you're on a relatively modern *nix system and have met the
+prerequisites, something along these lines should work.
+ $ wget http://dl.rust-lang.org/dist/rust-0.4.tar.gz
$ tar -xzf rust-0.4.tar.gz
$ cd rust-0.4
$ ./configure
$ make && make install
-When complete, make install will place the following programs into
-/usr/local/bin:
-
-* rustc, the Rust compiler
-* rustdoc, the API-documentation tool
-* cargo, the Rust package manager
+You may need to use `sudo make install` if you do not normally have
+permission to modify the destination directory. The install locations
+can be adjusted by passing a `--prefix` argument to
+`configure`. Various other options are also supported, pass `--help`
+for more information on them.
-In addition to a manual page under /usr/local/share/man and a set of host and
-target libraries under /usr/local/lib/rustc.
+When complete, `make install` will place several programs into
+`/usr/local/bin`: `rustc`, the Rust compiler; `rustdoc`, the
+API-documentation tool, and `cargo`, the Rust package manager.
-The install locations can be adjusted by passing a --prefix argument to
-configure. Various other options are also supported, pass --help for more
-information on them.
+[wiki-start]: https://github.com/mozilla/rust/wiki/Note-getting-started-developing-Rust
+[tarball]: http://dl.rust-lang.org/dist/rust-0.4.tar.gz
+[win-exe]: http://dl.rust-lang.org/dist/rust-0.4-install.exe
## License
Version 0.4 (October 2012)
--------------------------
- * ~1500 changes, numerous bugfixes
+ * ~2000 changes, numerous bugfixes
* Syntax
* All keywords are now strict and may not be used as identifiers anywhere
need_cmd mkdir
need_cmd printf
need_cmd cut
+need_cmd head
need_cmd grep
need_cmd xargs
need_cmd cp
err "unknown CPU type: $CFG_CPUTYPE"
esac
+# Detect 64 bit linux systems with 32 bit userland and force 32 bit compilation
+if [ $CFG_OSTYPE = unknown-linux-gnu -a $CFG_CPUTYPE = x86_64 ]
+then
+ file -L "$SHELL" | grep -q "x86[_-]64"
+ if [ $? != 0 ]; then
+ CFG_CPUTYPE=i686
+ fi
+fi
+
+
DEFAULT_HOST_TRIPLE="${CFG_CPUTYPE}-${CFG_OSTYPE}"
CFG_SRC_DIR="$(cd $(dirname $0) && pwd)/"
fi
opt sharedstd 1 "build libstd as a shared library"
-opt valgrind 1 "run tests with valgrind (memcheck by default)"
+opt valgrind 0 "run tests with valgrind (memcheck by default)"
opt helgrind 0 "run tests with helgrind instead of memcheck"
opt docs 1 "build documentation"
opt optimize 1 "build optimized rust code"
if [ ! -z "$CFG_PANDOC" ]
then
- PV=$(pandoc --version | awk '/^pandoc/ {print $2}')
- if [ "$PV" \< "1.8" ]
+ PANDOC_VER_LINE=$(pandoc --version | grep '^pandoc ')
+ PANDOC_VER=${PANDOC_VER_LINE#pandoc }
+ PV_MAJOR_MINOR=${PANDOC_VER%.[0-9]*}
+ PV_MAJOR=${PV_MAJOR_MINOR%%[.][0-9]*}
+ PV_MINOR=${PV_MAJOR_MINOR#[0-9]*[.]}
+ PV_MINOR=${PV_MINOR%%[.][0-9]*}
+ if [ "$PV_MAJOR" -lt "1" ] || [ "$PV_MINOR" -lt "8" ]
then
- step_msg "pandoc $PV is too old. disabling"
- BAD_PANDOC=1
+ step_msg "pandoc $PV_MAJOR.$PV_MINOR is too old. disabling"
+ BAD_PANDOC=1
fi
fi
need_ok "git failed"
msg "git: submodule foreach sync"
- "${CFG_GIT}" submodule --quiet foreach --recursive git submodule sync
+ "${CFG_GIT}" submodule --quiet foreach --recursive 'if test -e .gitmodules; then git submodule sync; fi'
need_ok "git failed"
msg "git: submodule foreach update"
"as": "op", "true": "atom", "false": "atom", "assert": "op", "check": "op",
"claim": "op", "extern": "ignore", "unsafe": "ignore", "import": "else-style",
"export": "else-style", "copy": "op", "log": "op",
- "use": "op", "self": "atom"
+ "use": "op", "self": "atom", "pub": "atom", "priv": "atom"
};
var typeKeywords = function() {
var keywords = {"fn": "fn"};
the language: possible features that can be combined or omitted. We aim to
keep the size and complexity of the language under control.
-> **Note:** This manual is very out of date. The best source of Rust
-> documentation is currently the tutorial.
-
> **Note:** The grammar for Rust given in this document is rough and
> very incomplete; only a modest number of sections have accompanying grammar
> rules. Formalizing the grammar accepted by the Rust parser is ongoing work,
~~~~~~~~ {.ebnf .gram}
comment : block_comment | line_comment ;
block_comment : "/*" block_comment_body * "*/" ;
-block_comment_body : block_comment | non_star * | '*' non_slash ;
+block_comment_body : non_star * | '*' non_slash ;
line_comment : "//" non_eol * ;
~~~~~~~~
-Comments in Rust code follow the general C++ style of line and block-comment
-forms, with proper nesting of block-comment delimiters. Comments are
-interpreted as a form of whitespace.
+Comments in Rust code follow the general C++ style of line and block-comment forms,
+with no nesting of block-comment delimiters.
+
+Line comments beginning with _three_ slashes (`///`),
+and block comments beginning with a repeated asterisk in the block-open sequence (`/**`),
+are interpreted as a special syntax for `doc` [attributes](#attributes).
+That is, they are equivalent to writing `#[doc "..."]` around the comment's text.
+
+Non-doc comments are interpreted as a form of whitespace.
## Whitespace
12E+99_f64; // type f64
~~~~
-##### Nil and boolean literals
+##### Unit and boolean literals
-The _nil value_, the only value of the type by the same name, is
-written as `()`. The two values of the boolean type are written `true`
-and `false`.
+The _unit value_, the only value of the type that has the same name, is written as `()`.
+The two values of the boolean type are written `true` and `false`.
### Symbols
## Macros
-User-defined syntax extensions are called "macros", and they can be defined
-with the `macro_rules!` syntax extension. User-defined macros can currently
-only be invoked in expression position.
+~~~~~~~~ {.ebnf .gram}
-~~~~ {.ebnf .gram}
expr_macro_rules : "macro_rules" '!' ident '(' macro_rule * ')'
macro_rule : '(' matcher * ')' "=>" '(' transcriber * ')' ';'
matcher : '(' matcher * ')' | '[' matcher * ']'
| '$' '(' transcriber * ')' sep_token? [ '*' | '+' ]
| non_special_token
-~~~~
+~~~~~~~~
+
+User-defined syntax extensions are called "macros", and they can be defined
+with the `macro_rules!` syntax extension. User-defined macros can currently
+only be invoked in expression position.
+
(A `sep_token` is any token other than `*` and `+`. A `non_special_token` is
any token other than a delimiter or `$`.)
Macro invocations are looked up by name, and each macro rule is tried in turn;
-the first successful match is transcribed. The matching and transcribing
-processes are close cousins, and will be described together:
+the first successful match is transcribed. The matching and transcription
+processes are closely related, and will be described together:
### Macro By Example
-Everything that does not begin with a `$` is matched and transcirbed
-literally, including delimiters. For parsing reasons, they must be matched,
-but they are otherwise not special.
+The macro expander matches and transcribes every token that does not begin with a `$` literally, including delimiters.
+For parsing reasons, delimiters must be balanced, but they are otherwise not special.
In the matcher, `$` _name_ `:` _designator_ matches the nonterminal in the
Rust syntax named by _designator_. Valid designators are `item`, `block`,
macro rules. In the transcriber, the designator is already known, and so only
the name of a matched nonterminal comes after the dollar sign.
-In bothe the matcher and transcriber, the Kleene star-like operator,
-consisting of `$` and parens, optionally followed by a separator token,
-followed by `*` or `+`, indicates repetition. (`*` means zero or more
-repetitions, `+` means at least one repetition. The parens are not matched or
-transcribed). On the matcher side, a name is bound to _all_ of the names it
+In both the matcher and transcriber, the Kleene star-like operator indicates repetition.
+The Kleene star operator consists of `$` and parens, optionally followed by a separator token, followed by `*` or `+`.
+`*` means zero or more repetitions, `+` means at least one repetition.
+The parens are not matched or transcribed.
+On the matcher side, a name is bound to _all_ of the names it
matches, in a structure that mimics the structure of the repetition
encountered on a successful match. The job of the transcriber is to sort that
structure out.
1. The parser will always parse as much as possible. If it attempts to match
`$i:expr [ , ]` against `8 [ , ]`, it will attempt to parse `i` as an array
index operation and fail. Adding a separator can solve this problem.
-2. The parser must have eliminated all ambiguity by the time it reaches a
-`$` _name_ `:` _designator_. This most often affects them when they occur in
-the beginning of, or immediately after, a `$(...)*`; requiring a distinctive
-token in front can solve the problem.
+2. The parser must have eliminated all ambiguity by the time it reaches a `$` _name_ `:` _designator_.
+This requirement most often affects name-designator pairs when they occur at the beginning of, or immediately after, a `$(...)*`; requiring a distinctive token in front can solve the problem.
## Syntax extensions useful for the macro author
* `log_syntax!` : print out the arguments at compile time
-* `trace_macros!` : supply `true` or `false` to enable or disable printing
-of the macro expansion process.
-* `ident_to_str!` : turns the identifier argument into a string literal
-* `concat_idents!` : creates a new identifier by concatenating its arguments
+* `trace_macros!` : supply `true` or `false` to enable or disable printing of the macro expansion process.
+* `ident_to_str!` : turn the identifier argument into a string literal
+* `concat_idents!` : create a new identifier by concatenating the arguments
# Crates and source files
-Rust is a *compiled* language. Its semantics are divided along a
-*phase distinction* between compile-time and run-time. Those semantic
-rules that have a *static interpretation* govern the success or failure
-of compilation. A program that fails to compile due to violation of a
-compile-time rule has no defined semantics at run-time; the compiler should
-halt with an error report, and produce no executable artifact.
+Rust is a *compiled* language.
+Its semantics obey a *phase distinction* between compile-time and run-time.
+Those semantic rules that have a *static interpretation* govern the success or failure of compilation.
+We refer to these rules as "static semantics".
+Semantic rules called "dynamic semantics" govern the behavior of programs at run-time.
+A program that fails to compile due to violation of a compile-time rule has no defined dynamic semantics; the compiler should halt with an error report, and produce no executable artifact.
-The compilation model centres on artifacts called _crates_. Each compilation
-is directed towards a single crate in source form, and if successful,
-produces a single crate in binary form: either an executable or a library.
+The compilation model centres on artifacts called _crates_.
+Each compilation processes a single crate in source form, and if successful, produces a single crate in binary form: either an executable or a library.
-A _crate_ is a unit of compilation and linking, as well as versioning,
-distribution and runtime loading. A crate contains a _tree_ of nested
-[module](#modules) scopes. The top level of this tree is a module that is
-anonymous -- from the point of view of paths within the module -- and any item
-within a crate has a canonical [module path](#paths) denoting its location
-within the crate's module tree.
+A _crate_ is a unit of compilation and linking, as well as versioning, distribution and runtime loading.
+A crate contains a _tree_ of nested [module](#modules) scopes.
+The top level of this tree is a module that is anonymous (from the point of view of paths within the module) and any item within a crate has a canonical [module path](#paths) denoting its location within the crate's module tree.
Crates are provided to the Rust compiler through two kinds of file:
- _crate files_, that end in `.rc` and each define a `crate`.
- _source files_, that end in `.rs` and each define a `module`.
-The Rust compiler is always invoked with a single input file, and always
-produces a single output crate.
+> **Note:** The functionality of crate files will be merged into source files in future versions of Rust.
+> The separate processing of crate files, both their grammar and file extension, will be removed.
+
+The Rust compiler is always invoked with a single crate file as input, and always produces a single output crate.
When the Rust compiler is invoked with a crate file, it reads the _explicit_
definition of the crate it's compiling from that file, and populates the
crate with modules derived from all the source files referenced by the
crate, reading and processing all the referenced modules at once.
-When the Rust compiler is invoked with a source file, it creates an
-_implicit_ crate and treats the source file as though it was referenced as
-the sole module populating this implicit crate. The module name is derived
-from the source file name, with the `.rs` extension removed.
+When the Rust compiler is invoked with a source file, it creates an _implicit_ crate and treats the source file as if it is the sole module populating this explicit crate.
+The module name is derived from the source file name, with the `.rs` extension removed.
## Crate files
directory associated with a `dir_directive` module can either be explicit,
or if omitted, is implicitly the same name as the module.
-A `source_directive` references a source file, either explicitly or
-implicitly by combining the module name with the file extension `.rs`. The
-module contained in that source file is bound to the module path formed by
-the `dir_directive` modules containing the `source_directive`.
+A `source_directive` references a source file, either explicitly or implicitly, by combining the module name with the file extension `.rs`.
+The module contained in that source file is bound to the module path formed by the `dir_directive` modules containing the `source_directive`.
## Source files
from outside the source file: either by an explicit `source_directive` in
a referencing crate file, or by the filename of the source file itself.
-A source file that contains a `main` function can be compiled to an
-executable. If a `main` function is present, it must have no [type parameters](#type-parameters)
-and no [constraints](#constraints). Its return type must be [`nil`](#primitive-types) and it must either have no arguments, or a single argument of type `[~str]`.
+A source file that contains a `main` function can be compiled to an executable.
+If a `main` function is present, its return type must be [`unit`](#primitive-types) and it must take no arguments.
# Items and attributes
-A crate is a collection of [items](#items), each of which may have some number
-of [attributes](#attributes) attached to it.
+Crates contain [items](#items),
+each of which may have some number of [attributes](#attributes) attached to it.
## Items
~~~~~~~~ {.ebnf .gram}
item : mod_item | fn_item | type_item | enum_item
- | res_item | trait_item | impl_item | foreign_mod_item ;
+ | const_item | trait_item | impl_item | foreign_mod_item ;
~~~~~~~~
An _item_ is a component of a crate; some module items can be defined in crate
* [modules](#modules)
* [functions](#functions)
* [type definitions](#type-definitions)
+ * [structures](#structures)
* [enumerations](#enumerations)
- * [resources](#resources)
+ * [constants](#constants)
* [traits](#traits)
* [implementations](#implementations)
as if the item was declared outside the scope -- it is still a static item --
except that the item's *path name* within the module namespace is qualified by
the name of the enclosing item, or is private to the enclosing item (in the
-case of functions). The exact locations in which sub-items may be declared is
-given by the grammar.
+case of functions).
+The grammar specifies the exact locations in which sub-item declarations may appear.
### Type Parameters
-All items except modules may be *parametrized* by type. Type parameters are
+All items except modules may be *parameterized* by type. Type parameters are
given as a comma-separated list of identifiers enclosed in angle brackets
-(`<...>`), after the name of the item and before its definition. The type
-parameters of an item are considered "part of the name", not the type of the
-item; in order to refer to the type-parametrized item, a referencing
-[path](#paths) must in general provide type arguments as a list of
-comma-separated types enclosed within angle brackets. In practice, the
-type-inference system can usually infer such argument types from
-context. There are no general type-parametric types, only type-parametric
-items.
-
+(`<...>`), after the name of the item and before its definition.
+The type parameters of an item are considered "part of the name", not part of the type of the item.
+A referencing [path](#paths) must (in principle) provide type arguments as a list of comma-separated types enclosed within angle brackets, in order to refer to the type-parameterized item.
+In practice, the type-inference system can usually infer such argument types from context.
+There are no general type-parametric types, only type-parametric items.
+That is, Rust has no notion of type abstraction: there are no first-class "forall" types.
### Modules
view_item : extern_mod_decl | use_decl ;
~~~~~~~~
-A view item manages the namespace of a module; it does not define new items
-but simply changes the visibility of other items. There are several kinds of
-view item:
+A view item manages the namespace of a module.
+View items do not define new items, but rather, simply change other items' visibilit.
+There are several kinds of view item:
* [`extern mod` declarations](#extern-mod-declarations)
* [`use` declarations](#use-declarations)
crate when it was compiled. If no `link_attrs` are provided, a default `name`
attribute is assumed, equal to the `ident` given in the `use_decl`.
-Two examples of `extern mod` declarations:
+Three examples of `extern mod` declarations:
~~~~~~~~{.xfail-test}
extern mod pcre (uuid = "54aba0f8-a7b1-4beb-92f1-4cf625264841");
~~~~~~~~
A _use declaration_ creates one or more local name bindings synonymous
-with some other [path](#paths). Usually an use declaration is used to
+with some other [path](#paths). Usually a `use` declaration is used to
shorten the path required to refer to a module item.
*Note*: unlike many languages, Rust's `use` declarations do *not* declare
log(info, Some(1.0));
// Equivalent to 'log(core::info,
- // core::str::to_upper(core::str::slice(~"foo", 0u, 1u)));'
- log(info, to_upper(slice(~"foo", 0u, 1u)));
+ // core::str::to_upper(core::str::slice("foo", 0, 1)));'
+ log(info, to_upper(slice("foo", 0, 1)));
}
~~~~
### Functions
-A _function item_ defines a sequence of [statements](#statements) and an
-optional final [expression](#expressions) associated with a name and a set of
-parameters. Functions are declared with the keyword `fn`. Functions declare a
-set of *input* [*slots*](#memory-slots) as parameters, through which the
-caller passes arguments into the function, and an *output*
-[*slot*](#memory-slots) through which the function passes results back to
-the caller.
+A _function item_ defines a sequence of [statements](#statements) and an optional final [expression](#expressions), along with a name and a set of parameters.
+Functions are declared with the keyword `fn`.
+Functions declare a set of *input* [*slots*](#memory-slots) as parameters, through which the caller passes arguments into the function, and an *output* [*slot*](#memory-slots) through which the function passes results back to the caller.
A function may also be copied into a first class *value*, in which case the
value has the corresponding [*function type*](#function-types), and can be
}
~~~~
-#### Diverging functions
-A special kind of function can be declared with a `!` character where the
-output slot type would normally be. For example:
+#### Generic functions
-~~~~
-fn my_err(s: ~str) -> ! {
- log(info, s);
- fail;
+A _generic function_ allows one or more _parameterized types_ to
+appear in its signature. Each type parameter must be explicitly
+declared, in an angle-bracket-enclosed, comma-separated list following
+the function name.
+
+~~~~ {.xfail-test}
+fn iter<T>(seq: &[T], f: fn(T)) {
+ for seq.each |elt| { f(elt); }
+}
+fn map<T, U>(seq: &[T], f: fn(T) -> U) -> ~[U] {
+ let mut acc = ~[];
+ for seq.each |elt| { acc.push(f(elt)); }
+ acc
}
~~~~
-We call such functions "diverging" because they never return a value to the
-caller. Every control path in a diverging function must end with a
-[`fail`](#fail-expressions) or a call to another diverging function on every
-control path. The `!` annotation does *not* denote a type. Rather, the result
-type of a diverging function is a special type called $\bot$ ("bottom") that
-unifies with any type. Rust has no syntax for $\bot$.
+Inside the function signature and body, the name of the type parameter
+can be used as a type name.
-It might be necessary to declare a diverging function because as mentioned
-previously, the typechecker checks that every control path in a function ends
-with a [`return`](#return-expressions) or diverging expression. So, if `my_err`
-were declared without the `!` annotation, the following code would not
-typecheck:
+When a generic function is referenced, its type is instantiated based
+on the context of the reference. For example, calling the `iter`
+function defined above on `[1, 2]` will instantiate type parameter `T`
+with `int`, and require the closure parameter to have type
+`fn(int)`.
-~~~~
-# fn my_err(s: ~str) -> ! { fail }
+Since a parameter type is opaque to the generic function, the set of
+operations that can be performed on it is limited. Values of parameter
+type can always be moved, but they can only be copied when the
+parameter is given a [`Copy` bound](#type-kinds).
-fn f(i: int) -> int {
- if i == 42 {
- return 42;
- }
- else {
- my_err(~"Bad number!");
- }
-}
~~~~
+fn id<T: Copy>(x: T) -> T { x }
+~~~~
+
+Similarly, [trait](#traits) bounds can be specified for type
+parameters to allow methods with that trait to be called on values
+of that type.
+
+
+#### Unsafe functions
+
+Unsafe functions are those containing unsafe operations that are not contained in an [`unsafe` block](#unsafe-blocks).
+Such a function must be prefixed with the keyword `unsafe`.
+
+Unsafe operations are those that potentially violate the memory-safety guarantees of Rust's static semantics.
+Specifically, the following operations are considered unsafe:
+
+ - Dereferencing a [raw pointer](#pointer-types).
+ - Casting a [raw pointer](#pointer-types) to a safe pointer type.
+ - Breaking the [purity-checking rules](#pure-functions) in a `pure` function.
+ - Calling an unsafe function.
+
+##### Unsafe blocks
+
+A block of code can also be prefixed with the `unsafe` keyword, to permit a sequence of unsafe operations in an otherwise-safe function.
+This facility exists because the static semantics of Rust are a necessary approximation of the dynamic semantics.
+When a programmer has sufficient conviction that a sequence of unsafe operations is actually safe, they can encapsulate that sequence (taken as a whole) within an `unsafe` block. The compiler will consider uses of such code "safe", to the surrounding context.
-The typechecker would complain that `f` doesn't return a value in the
-`else` branch. Adding the `!` annotation on `my_err` would
-express that `f` requires no explicit `return`, as if it returns
-control to the caller, it returns a value (true because it never returns
-control).
#### Pure functions
A pure function declaration is identical to a function declaration, except that
it is declared with the additional keyword `pure`. In addition, the typechecker
checks the body of a pure function with a restricted set of typechecking rules.
-A pure function
-
-* may not contain an assignment or self-call expression; and
-* may only call other pure functions, not general functions.
+A pure function may only modify data owned by its own stack frame.
+So, a pure function may modify a local variable allocated on the stack, but not a mutable reference that it takes as an argument.
+A pure function may only call other pure functions, not general functions.
An example of a pure function:
These purity-checking rules approximate the concept of referential transparency:
that a call-expression could be rewritten with the literal-expression of its return value, without changing the meaning of the program.
Since they are an approximation, sometimes these rules are *too* restrictive.
-Rust allows programmers to violate these rules using [`unsafe` blocks](#unsafe-blocks).
+Rust allows programmers to violate these rules using [`unsafe` blocks](#unsafe-blocks), which we already saw.
As with any `unsafe` block, those that violate static purity carry transfer the burden of safety-proof from the compiler to the programmer.
Programmers should exercise caution when breaking such rules.
-An example of a pure function that uses an unsafe block:
+For more details on purity, see [the borrowed pointer tutorial][borrow].
-~~~~ {.xfail-test}
-# use std::list::*;
+[borrow]: tutorial-borrowed-ptr.html
-fn pure_foldl<T, U: Copy>(ls: List<T>, u: U, f: fn(&&T, &&U) -> U) -> U {
- match ls {
- Nil => u,
- Cons(hd, tl) => f(hd, pure_foldl(*tl, f(hd, u), f))
- }
-}
-
-pure fn pure_length<T>(ls: List<T>) -> uint {
- fn count<T>(_t: T, &&u: uint) -> uint { u + 1u }
- unsafe {
- pure_foldl(ls, 0u, count)
- }
-}
-~~~~
-
-Despite its name, `pure_foldl` is a `fn`, not a `pure fn`, because there is no
-way in Rust to specify that the higher-order function argument `f` is a pure
-function. So, to use `foldl` in a pure list length function that a pure function
-could then use, we must use an `unsafe` block wrapped around the call to
-`pure_foldl` in the definition of `pure_length`.
-
-#### Generic functions
+#### Diverging functions
-A _generic function_ allows one or more _parameterized types_ to
-appear in its signature. Each type parameter must be explicitly
-declared, in an angle-bracket-enclosed, comma-separated list following
-the function name.
+A special kind of function can be declared with a `!` character where the
+output slot type would normally be. For example:
-~~~~ {.xfail-test}
-fn iter<T>(seq: ~[T], f: fn(T)) {
- for seq.each |elt| { f(elt); }
-}
-fn map<T, U>(seq: ~[T], f: fn(T) -> U) -> ~[U] {
- let mut acc = ~[];
- for seq.each |elt| { acc.push(f(elt)); }
- acc
+~~~~
+fn my_err(s: &str) -> ! {
+ log(info, s);
+ fail;
}
~~~~
-Inside the function signature and body, the name of the type parameter
-can be used as a type name.
-
-When a generic function is referenced, its type is instantiated based
-on the context of the reference. For example, calling the `iter`
-function defined above on `[1, 2]` will instantiate type parameter `T`
-with `int`, and require the closure parameter to have type
-`fn(int)`.
+We call such functions "diverging" because they never return a value to the
+caller. Every control path in a diverging function must end with a
+[`fail`](#fail-expressions) or a call to another diverging function on every
+control path. The `!` annotation does *not* denote a type. Rather, the result
+type of a diverging function is a special type called $\bot$ ("bottom") that
+unifies with any type. Rust has no syntax for $\bot$.
-Since a parameter type is opaque to the generic function, the set of
-operations that can be performed on it is limited. Values of parameter
-type can always be moved, but they can only be copied when the
-parameter is given a [`copy` bound](#type-kinds).
+It might be necessary to declare a diverging function because as mentioned
+previously, the typechecker checks that every control path in a function ends
+with a [`return`](#return-expressions) or diverging expression. So, if `my_err`
+were declared without the `!` annotation, the following code would not
+typecheck:
~~~~
-fn id<T: Copy>(x: T) -> T { x }
-~~~~
-
-Similarly, [trait](#traits) bounds can be specified for type
-parameters to allow methods with that trait to be called on values
-of that type.
-
-#### Unsafe functions
-
-Unsafe functions are those containing unsafe operations that are not contained in an [`unsafe` block](#unsafe-blocks).
+# fn my_err(s: &str) -> ! { fail }
-Unsafe operations are those that potentially violate the memory-safety guarantees of Rust's static semantics.
-Specifically, the following operations are considered unsafe:
-
- - Dereferencing a [raw pointer](#pointer-types)
- - Casting a [raw pointer](#pointer-types) to a safe pointer type
- - Breaking the [purity-checking rules](#pure-functions)
- - Calling an unsafe function
-
-##### Unsafe blocks
+fn f(i: int) -> int {
+ if i == 42 {
+ return 42;
+ }
+ else {
+ my_err("Bad number!");
+ }
+}
+~~~~
-A block of code can also be prefixed with the `unsafe` keyword,
-to permit a sequence of unsafe operations in an otherwise-safe function.
-This facility exists because the static semantics of a Rust are a necessary approximation of the dynamic semantics.
-When a programmer has sufficient conviction that a sequence of unsafe operations is actually safe,
-they can encapsulate that sequence (taken as a whole) within an `unsafe` block.
-The compiler will consider uses of such code "safe", to the surrounding context.
+This will not compile without the `!` annotation on `my_err`,
+since the `else` branch of the conditional in `f` does not return an `int`,
+as required by the signature of `f`.
+Adding the `!` annotation to `my_err` informs the typechecker that,
+should control ever enter `my_err`, no further type judgments about `f` need to hold,
+since control will never resume in any context that relies on those judgments.
+Thus the return type on `f` only needs to reflect the `if` branch of the conditional.
#### Extern functions
* Whether the value represents textual or numerical information.
* Whether the value represents integral or floating-point information.
* The sequence of memory operations required to access the value.
-* The *kind* of the type (pinned, unique or shared).
+* The [kind](#type-kinds) of the type.
-For example, the type `{x: u8, y: u8`} defines the set of immutable values
-that are composite records, each containing two unsigned 8-bit integers
-accessed through the components `x` and `y`, and laid out in memory with the
-`x` component preceding the `y` component.
+For example, the type `(u8, u8)` defines the set of immutable values that are composite pairs,
+each containing two unsigned 8-bit integers accessed by pattern-matching and laid out in memory with the `x` component preceding the `y` component.
-### Enumerations
+### Structures
-An _enumeration item_ simultaneously declares a new nominal
-[enumerated type](#enumerated-types) as well as a set of *constructors* that
-can be used to create or pattern-match values of the corresponding enumerated
-type. Note that `enum` previously was referred to as a `tag`, however this
-definition has been deprecated. While `tag` is no longer used, the two are
-synonymous.
+A _structure_ is a nominal [structure type](#structure-types) defined with the keyword `struct`.
-The constructors of an `enum` type may be recursive: that is, each constructor
-may take an argument that refers, directly or indirectly, to the enumerated
-type the constructor is a member of. Such recursion has restrictions:
+An example of a `struct` item and its use:
-* Recursive types can be introduced only through `enum` constructors.
-* A recursive `enum` item must have at least one non-recursive constructor (in
- order to give the recursion a basis case).
-* The recursive argument of recursive `enum` constructors must be [*box*
- values](#box-types) (in order to bound the in-memory size of the
- constructor).
-* Recursive type definitions can cross module boundaries, but not module
- *visibility* boundaries or crate boundaries (in order to simplify the
- module system).
+~~~~
+struct Point {x: int, y: int}
+let p = Point {x: 10, y: 11};
+let px: int = p.x;
+~~~~
+
+### Enumerations
+An _enumeration_ is a simultaneous definition of a nominal [enumerated type](#enumerated-types) as well as a set of *constructors*,
+that can be used to create or pattern-match values of the corresponding enumerated type.
+
+Enumerations are declared with the keyword `enum`.
An example of an `enum` item and its use:
~~~~
-enum animal {
- dog,
- cat
+enum Animal {
+ Dog,
+ Cat
}
-let mut a: animal = dog;
-a = cat;
+let mut a: Animal = Dog;
+a = Cat;
~~~~
-An example of a *recursive* `enum` item and its use:
+### Constants
-~~~~
-enum list<T> {
- nil,
- cons(T, @list<T>)
-}
+~~~~~~~~ {.ebnf .gram}
+const_item : "const" ident ':' type '=' expr ';' ;
+~~~~~~~~
-let a: list<int> = cons(7, @cons(13, @nil));
-~~~~
+A *constant* is a named value stored in read-only memory in a crate.
+The value bound to a constant is evaluated at compile time.
+Constants are declared with the `const` keyword.
+A constant item must have an expression giving its definition.
+The definition expression of a constant is limited to expression forms that can be evaluated at compile time.
### Traits
-A _trait item_ describes a set of method types. [_implementation
-items_](#implementations) can be used to provide implementations of
-those methods for a specific type.
+A _trait_ describes a set of method types.
+
+Traits can include default implementations of methods,
+written in terms of some unknown [`self` type](#self-types);
+the `self` type may either be completely unspecified,
+or constrained by some other [trait type](#trait-types).
+
+Traits are implemented for specific types through separate [implementations](#implementations).
~~~~
-# type surface = int;
-# type bounding_box = int;
+# type Surface = int;
+# type BoundingBox = int;
-trait shape {
- fn draw(surface);
- fn bounding_box() -> bounding_box;
+trait Shape {
+ fn draw(Surface);
+ fn bounding_box() -> BoundingBox;
}
~~~~
-This defines a trait with two methods. All values that have
-[implementations](#implementations) of this trait in scope can
-have their `draw` and `bounding_box` methods called, using
-`value.bounding_box()` [syntax](#field-expressions).
+This defines a trait with two methods.
+All values that have [implementations](#implementations) of this trait in scope can have their `draw` and `bounding_box` methods called,
+using `value.bounding_box()` [syntax](#method-call-expressions).
Type parameters can be specified for a trait to make it generic.
-These appear after the name, using the same syntax used in [generic
-functions](#generic-functions).
+These appear after the trait name, using the same syntax used in [generic functions](#generic-functions).
~~~~
-trait seq<T> {
+trait Seq<T> {
fn len() -> uint;
fn elt_at(n: uint) -> T;
fn iter(fn(T));
}
~~~~
-Generic functions may use traits as bounds on their type
-parameters. This will have two effects: only types that have the trait
-may instantiate the parameter, and within the
-generic function, the methods of the trait can be called on values
-that have the parameter's type. For example:
+Generic functions may use traits as _bounds_ on their type parameters.
+This will have two effects: only types that have the trait may instantiate the parameter,
+and within the generic function,
+the methods of the trait can be called on values that have the parameter's type.
+For example:
~~~~
-# type surface = int;
-# trait shape { fn draw(surface); }
+# type Surface = int;
+# trait Shape { fn draw(Surface); }
-fn draw_twice<T: shape>(surface: surface, sh: T) {
+fn draw_twice<T: Shape>(surface: Surface, sh: T) {
sh.draw(surface);
sh.draw(surface);
}
~~~~
-Trait items also define a type with the same name as the
-trait. Values of this type are created by
-[casting](#type-cast-expressions) values (of a type for which an
-implementation of the given trait is in scope) to the trait
-type.
+Traits also define a [type](#trait-types) with the same name as the trait.
+Values of this type are created by [casting](#type-cast-expressions) pointer values
+(pointing to a type for which an implementation of the given trait is in scope)
+to pointers to the trait name, used as a type.
~~~~
-# trait shape { }
-# impl int: shape { }
+# trait Shape { }
+# impl int: Shape { }
# let mycircle = 0;
-let myshape: shape = mycircle as shape;
+let myshape: Shape = @mycircle as @Shape;
~~~~
-The resulting value is a reference-counted box containing the value
-that was cast along with information that identify the methods of the
-implementation that was used. Values with a trait type can always
-have methods from their trait called on them, and can be used to
-instantiate type parameters that are bounded by their trait.
+The resulting value is a managed box containing the value that was cast,
+along with information that identifies the methods of the implementation that was used.
+Values with a trait type can have [methods called](#method-call-expressions) on them,
+for any method in the trait,
+and can be used to instantiate type parameters that are bounded by the trait.
### Implementations
-An _implementation item_ provides an implementation of a
-[trait](#traits) for a type.
+An _implementation_ is an item that implements a [trait](#traits) for a specific type.
+
+Implementations are defined with the keyword `impl`.
~~~~
-# type point = {x: float, y: float};
-# type surface = int;
-# type bounding_box = {x: float, y: float, width: float, height: float};
-# trait shape { fn draw(surface); fn bounding_box() -> bounding_box; }
-# fn do_draw_circle(s: surface, c: circle) { }
+# type Point = {x: float, y: float};
+# type Surface = int;
+# type BoundingBox = {x: float, y: float, width: float, height: float};
+# trait Shape { fn draw(Surface); fn bounding_box() -> BoundingBox; }
+# fn do_draw_circle(s: Surface, c: Circle) { }
-type circle = {radius: float, center: point};
+type Circle = {radius: float, center: Point};
-impl circle: shape {
- fn draw(s: surface) { do_draw_circle(s, self); }
- fn bounding_box() -> bounding_box {
+impl Circle: Shape {
+ fn draw(s: Surface) { do_draw_circle(s, self); }
+ fn bounding_box() -> BoundingBox {
let r = self.radius;
{x: self.center.x - r, y: self.center.y - r,
width: 2.0 * r, height: 2.0 * r}
}
~~~~
-It is possible to define an implementation without referring to a
-trait. The methods in such an implementation can only be used
-statically (as direct calls on the values of the type that the
-implementation targets). In such an implementation, the type after the colon is omitted,
-and the name is mandatory. Such implementations are
-limited to nominal types (enums, structs) and the implementation must
-appear in the same module or a sub-module as the receiver type.
+It is possible to define an implementation without referring to a trait.
+The methods in such an implementation can only be used statically
+(as direct calls on the values of the type that the implementation targets).
+In such an implementation, the type after the colon is omitted.
+Such implementations are limited to nominal types (enums, structs),
+and the implementation must appear in the same module or a sub-module as the `self` type.
-_When_ a trait is specified, all methods declared as part of the
-trait must be present, with matching types and type parameter
-counts, in the implementation.
+When a trait _is_ specified in an `impl`,
+all methods declared as part of the trait must be implemented,
+with matching types and type parameter counts.
-An implementation can take type parameters, which can be different
-from the type parameters taken by the trait it implements. They
-are written after the name of the implementation, or if that is not
-specified, after the `impl` keyword.
+An implementation can take type parameters,
+which can be different from the type parameters taken by the trait it implements.
+Implementation parameters are written after after the `impl` keyword.
~~~~
-# trait seq<T> { }
+# trait Seq<T> { }
-impl<T> ~[T]: seq<T> {
+impl<T> ~[T]: Seq<T> {
...
}
-impl u32: seq<bool> {
+impl u32: Seq<bool> {
/* Treat the integer as a sequence of bits */
}
~~~~
Foreign modules form the basis for Rust's foreign function interface. A
foreign module describes functions in external, non-Rust
-libraries. Functions within foreign modules are declared the same as other
-Rust functions, with the exception that they may not have a body and are
-instead terminated by a semi-colon.
+libraries.
+Functions within foreign modules are declared in the same way as other Rust functions,
+with the exception that they may not have a body and are instead terminated by a semicolon.
~~~
# use libc::{c_char, FILE};
}
~~~
-Functions within foreign modules may be called by Rust code as it would any
-normal function and the Rust compiler will automatically translate between
-the Rust ABI and the foreign ABI.
+Functions within foreign modules may be called by Rust code, just like functions defined in Rust.
+The Rust compiler automatically translates between the Rust ABI and the foreign ABI.
The name of the foreign module has special meaning to the Rust compiler in
that it will treat the module name as the name of a library to link to,
modules.
By default foreign modules assume that the library they are calling use the
-standard C "cdecl" ABI. Other ABI's may be specified using the `abi`
+standard C "cdecl" ABI. Other ABIs may be specified using the `abi`
attribute as in
~~~{.xfail-test}
~~~
The `link_name` attribute allows the default library naming behavior to
-be overriden by explicitly specifying the name of the library.
+be overridden by explicitly specifying the name of the library.
~~~{.xfail-test}
#[link_name = "crypto"]
extern mod mycrypto { }
~~~
-The `nolink` attribute tells the Rust compiler not to perform any linking
-for the foreign module. This is particularly useful for creating foreign
+The `nolink` attribute tells the Rust compiler not to do any linking for the foreign module.
+This is particularly useful for creating foreign
modules for libc, which tends to not follow standard library naming
conventions and is linked to all Rust programs anyway.
Static entities in Rust -- crates, modules and items -- may have _attributes_
applied to them. ^[Attributes in Rust are modeled on Attributes in ECMA-335,
-C#] An attribute is a general, free-form piece of metadata that is interpreted
-according to name, convention, and language and compiler version. Attributes
-may appear as any of:
+C#]
+An attribute is a general, free-form metadatum that is interpreted according to name, convention, and language and compiler version.
+Attributes may appear as any of
* A single identifier, the attribute name
* An identifier followed by the equals sign '=' and a literal, providing a key/value pair
fn add(x: int, y: int) { x + y }
~~~~~~~~
-In future versions of Rust, user-provided extensions to the compiler will be
-able to interpret attributes. When this facility is provided, the compiler
-will distinguish will be made between language-reserved and user-available
-attributes.
+> **Note:** In future versions of Rust, user-provided extensions to the compiler will be able to interpret attributes.
+> When this facility is provided, the compiler will distinguish between language-reserved and user-available attributes.
At present, only the Rust compiler interprets attributes, so all attribute
names are effectively reserved. Some significant attributes include:
## Statements
A _statement_ is a component of a block, which is in turn a component of an
-outer [expression](#expressions) or [function](#functions). When a function is
-spawned into a [task](#tasks), the task *executes* statements in an order
-determined by the body of the enclosing function. Each statement causes the
-task to perform certain actions.
+outer [expression](#expressions) or [function](#functions).
Rust has two kinds of statement:
[declaration statements](#declaration-statements) and
### Declaration statements
-A _declaration statement_ is one that introduces a *name* into the enclosing
-statement block. The declared name may denote a new slot or a new item.
+A _declaration statement_ is one that introduces one or more *names* into the enclosing statement block.
+The declared names may denote new slots or new items.
#### Item declarations
An _item declaration statement_ has a syntactic form identical to an
[item](#items) declaration within a module. Declaring an item -- a function,
-enumeration, type, resource, trait, implementation or module -- locally
+enumeration, type, constant, trait, implementation or module -- locally
within a statement block is simply a way of restricting its scope to a narrow
region containing all of its uses; it is otherwise identical in meaning to
declaring the item outside the statement block.
init : [ '=' | '<-' ] expr ;
~~~~~~~~
-
-A _slot declaration_ has one of two forms:
-
-* `let` `pattern` `optional-init`;
-* `let` `pattern` : `type` `optional-init`;
-
-Where `type` is a type expression, `pattern` is an irrefutable pattern (often
-just the name of a single slot), and `optional-init` is an optional
-initializer. If present, the initializer consists of either an assignment
-operator (`=`) or move operator (`<-`), followed by an expression.
-
-Both forms introduce a new slot into the enclosing block scope. The new slot
-is visible from the point of declaration until the end of the enclosing block
-scope.
-
-The former form, with no type annotation, causes the compiler to infer the
-static type of the slot through unification with the types of values assigned
-to the slot in the remaining code in the block scope. Inference only occurs on
-frame-local variable, not argument slots. Function signatures must
-always declare types for all argument slots.
-
+A _slot declaration_ introduces a new set of slots, given by a pattern.
+The pattern may be followed by a type annotation, and/or an initializer expression.
+When no type annotation is given, the compiler will infer the type,
+or signal an error if insufficient type information is available for definite inference.
+Any slots introduced by a slot declaration are visible from the point of declaration until the end of the enclosing block scope.
### Expression statements
Likewise within each expression, sub-expressions may occur in _lvalue context_ or _rvalue context_.
The evaluation of an expression depends both on its own category and the context it occurs within.
-Path, field and index expressions are lvalues.
+[Path](#path-expressions), [field](#field-expressions) and [index](#index-expressions) expressions are lvalues.
All other expressions are rvalues.
-The left operand of an assignment expression and the operand of the borrow operator are lvalue contexts.
+The left operand of an [assignment](#assignment-expressions),
+[binary move](#binary-move-expressions) or
+[compound-assignment](#compound-assignment-expressions) expression is an lvalue context,
+as is the single operand of a unary [borrow](#unary-operator-expressions),
+or [move](#unary-move-expressions) expression,
+and _both_ operands of a [swap](#swap-expressions) expression.
All other expression contexts are rvalue contexts.
When an lvalue is evaluated in an _lvalue context_, it denotes a memory location;
A _literal expression_ consists of one of the [literal](#literals)
forms described earlier. It directly describes a number, character,
-string, boolean value, or the nil value.
+string, boolean value, or the unit value.
~~~~~~~~ {.literals}
-(); // nil type
-~"hello"; // string type
+(); // unit type
+"hello"; // string type
'5'; // character type
5; // integer type
~~~~~~~~
+### Path expressions
+
+A [path](#paths) used as an expression context denotes either a local variable or an item.
+Path expressions are [lvalues](#lvalues-rvalues-and-temporaries).
+
### Tuple expressions
Tuples are written by enclosing two or more comma-separated
~~~~~~~~ {.tuple}
(0f, 4.5f);
-(~"a", 4u, true);
+("a", 4u, true);
~~~~~~~~
### Record expressions
~~~~~~~~{.ebnf .gram}
rec_expr : '{' ident ':' expr
[ ',' ident ':' expr ] *
- [ "with" expr ] '}'
+ [ ".." expr ] '}'
~~~~~~~~
A [_record_](#record-types) _expression_ is one or more comma-separated
~~~~
{x: 10f, y: 20f};
-{name: ~"Joe", age: 35u, score: 100_000};
-{ident: ~"X", mut count: 0u};
+{name: "Joe", age: 35u, score: 100_000};
+{ident: "X", mut count: 0u};
~~~~
The order of the fields in a record expression is significant, and
{y: 0, z: 10, .. base};
~~~~
+### Method-call expressions
+
+~~~~~~~~{.ebnf .gram}
+method_call_expr : expr '.' ident paren_expr_list ;
+~~~~~~~~
+
+A _method call_ consists of an expression followed by a single dot, an identifier, and a parenthesized expression-list.
+Method calls are resolved to methods on specific traits,
+either statically dispatching to a method if the exact `self`-type of the left-hand-side is known,
+or dynamically dispatching if the left-hand-side expression is an indirect [trait type](#trait-types).
+
+
### Field expressions
~~~~~~~~{.ebnf .gram}
-field_expr : expr '.' expr
+field_expr : expr '.' ident
~~~~~~~~
-A dot can be used to access a field in a record.
+A _field expression_ consists of an expression followed by a single dot and an identifier,
+when not immediately followed by a parenthesized expression-list (the latter is a [method call expression](#method-call-expressions)).
+A field expression denotes a field of a [structure](#structure-types) or [record](#record-types).
~~~~~~~~ {.field}
myrecord.myfield;
{a: 10, b: 20}.a;
~~~~~~~~
-A field access on a record is an _lval_ referring to the value of that
-field. When the field is mutable, it can be
-[assigned](#assignment-expressions) to.
+A field access on a record is an [lvalue](#lvalues-rvalues-and-temporaries) referring to the value of that field.
+When the field is mutable, it can be [assigned](#assignment-expressions) to.
-When the type of the expression to the left of the dot is a boxed
-record, it is automatically derferenced to make the field access
-possible.
+When the type of the expression to the left of the dot is a pointer to a record or structure,
+it is automatically derferenced to make the field access possible.
-Field access syntax is overloaded for [trait method](#traits)
-access. When no matching field is found, or the expression to the left
-of the dot is not a (boxed) record, an
-[implementation](#implementations) that matches this type and the
-given method name is looked up instead, and the result of the
-expression is this method, with its _self_ argument bound to the
-expression on the left of the dot.
### Vector expressions
When no mutability is specified, the vector is immutable.
~~~~
-~[1, 2, 3, 4];
-~[~"a", ~"b", ~"c", ~"d"];
-~[mut 0u8, 0u8, 0u8, 0u8];
+[1, 2, 3, 4];
+["a", "b", "c", "d"];
+[mut 0u8, 0u8, 0u8, 0u8];
~~~~
### Index expressions
[Vector](#vector-types)-typed expressions can be indexed by writing a
square-bracket-enclosed expression (the index) after them. When the
-vector is mutable, the resulting _lval_ can be assigned to.
+vector is mutable, the resulting [lvalue](#lvalues-rvalues-and-temporaries) can be assigned to.
Indices are zero-based, and may be of any integral type. Vector access
is bounds-checked at run-time. When the check fails, it will put the
~~~~
# do task::spawn_unlinked {
-(~[1, 2, 3, 4])[0];
-(~[mut 'x', 'y'])[1] = 'z';
-(~[~"a", ~"b"])[10]; // fails
+([1, 2, 3, 4])[0];
+([mut 'x', 'y'])[1] = 'z';
+(["a", "b"])[10]; // fails
# }
~~~~
### Unary operator expressions
-Rust defines five unary operators. They are all written as prefix
-operators, before the expression they apply to.
+Rust defines six symbolic unary operators,
+in addition to the unary [copy](#unary-copy-expressions) and [move](#unary-move-expressions) operators.
+They are all written as prefix operators, before the expression they apply to.
`-`
: Negation. May only be applied to numeric types.
`*`
- : Dereference. When applied to a [box](#box-types) or
- [resource](#resources) type, it accesses the inner value. For
- mutable boxes, the resulting _lval_ can be assigned to. For
- [enums](#enumerated-types) that have only a single variant,
- containing a single parameter, the dereference operator accesses
- this parameter.
+ : Dereference. When applied to a [pointer](#pointer-types) it denotes the pointed-to location.
+ For pointers to mutable locations, the resulting [lvalue](#lvalues-rvalues-and-temporaries) can be assigned to.
+ For [enums](#enumerated-types) that have only a single variant, containing a single parameter,
+ the dereference operator accesses this parameter.
`!`
: Logical negation. On the boolean type, this flips between `true` and
`false`. On integer types, this inverts the individual bits in the
two's complement representation of the value.
`@` and `~`
- : [Boxing](#box-types) operators. Allocate a box to hold the value
- they are applied to, and store the value in it. `@` creates a
- shared, reference-counted box, whereas `~` creates a unique box.
+ : [Boxing](#pointer-types) operators. Allocate a box to hold the value they are applied to,
+ and store the value in it. `@` creates a managed box, whereas `~` creates an owned box.
+`&`
+ : Borrow operator. Returns a borrowed pointer, pointing to its operand.
+ The operand of a borrowed pointer is statically proven to outlive the resulting pointer.
+ If the borrow-checker cannot prove this, it is a compilation error.
### Binary operator expressions
#### Arithmetic operators
-Binary arithmetic expressions require both their operands to be of the
-same type, and can be applied only to numeric types, with the
-exception of `+`, which acts both as addition operator on numbers and
-as concatenate operator on vectors and strings.
+Binary arithmetic expressions are syntactic sugar for calls to built-in traits,
+defined in the `core::ops` module of the `core` library.
+This means that arithmetic operators can be overridden for user-defined types.
+The default meaning of the operators on standard types is given here.
`+`
: Addition and vector/string concatenation.
+ Calls the `add` method on the `core::ops::Add` trait.
`-`
: Subtraction.
+ Calls the `sub` method on the `core::ops::Sub` trait.
`*`
: Multiplication.
+ Calls the `mul` method on the `core::ops::Mul` trait.
`/`
: Division.
+ Calls the `div` method on the `core::ops::Div` trait.
`%`
- : Remainder.
+ : Modulo (a.k.a. "remainder").
+ Calls the `modulo` method on the `core::ops::Modulo` trait.
#### Bitwise operators
-Bitwise operators apply only to integer types, and perform their
-operation on the bits of the two's complement representation of the
-values.
+Bitwise operators are, like the [arithmetic operators](#arithmetic-operators),
+syntactic sugar for calls to built-in traits.
+This means that bitwise operators can be overridden for user-defined types.
+The default meaning of the operators on standard types is given here.
`&`
: And.
+ Calls the `bitand` method on the `core::ops::BitAnd` trait.
`|`
: Inclusive or.
+ Calls the `bitor` method on the `core::ops::BitOr` trait.
`^`
: Exclusive or.
+ Calls the `bitxor` method on the `core::ops::BitXor` trait.
`<<`
: Logical left shift.
+ Calls the `shl` method on the `core::ops::Shl` trait.
`>>`
: Logical right shift.
-`>>>`
- : Arithmetic right shift.
+ Calls the `shr` method on the `core::ops::Shr` trait.
#### Lazy boolean operators
#### Comparison operators
+Comparison operators are, like the [arithmetic operators](#arithmetic-operators),
+and [bitwise operators](#bitwise-operators),
+syntactic sugar for calls to built-in traits.
+This means that comparison operators can be overridden for user-defined types.
+The default meaning of the operators on standard types is given here.
+
`==`
: Equal to.
+ Calls the `eq` method on the `core::cmp::Eq` trait.
`!=`
: Unequal to.
+ Calls the `ne` method on the `core::cmp::Eq` trait.
`<`
: Less than.
+ Calls the `lt` method on the `core::cmp::Ord` trait.
`>`
: Greater than.
+ Calls the `gt` method on the `core::cmp::Ord` trait.
`<=`
: Less than or equal.
+ Calls the `le` method on the `core::cmp::Ord` trait.
`>=`
: Greater than or equal.
+ Calls the `ge` method on the `core::cmp::Ord` trait.
-The binary comparison operators can be applied to any two operands of
-the same type, and produce a boolean value.
-
-*TODO* details on how types are descended during comparison.
#### Type cast expressions
An example of an `as` expression:
~~~~
-# fn sum(v: ~[float]) -> float { 0.0 }
-# fn len(v: ~[float]) -> int { 0 }
+# fn sum(v: &[float]) -> float { 0.0 }
+# fn len(v: &[float]) -> int { 0 }
-fn avg(v: ~[float]) -> float {
+fn avg(v: &[float]) -> float {
let sum: float = sum(v);
let sz: float = len(v) as float;
return sum / sz;
}
~~~~
-A cast is a *trivial cast* iff the type of the casted expression and the
-target type are identical after replacing all occurrences of `int`, `uint`,
-`float` with their machine type equivalents of the target architecture in both
-types.
-
#### Binary move expressions
-A _binary move expression_ consists of an *lval* followed by a left-pointing
-arrow (`<-`) and an *rval* expression.
+A _binary move expression_ consists of an [lvalue](#lvalues-rvalues-and-temporaries) followed by a left-pointing
+arrow (`<-`) and an [rvalue](#lvalues-rvalues-and-temporaries) expression.
-Evaluating a move expression causes, as a side effect, the *rval* to be
-*moved* into the *lval*. If the *rval* was itself an *lval*, it must be a
-local variable, as it will be de-initialized in the process.
+Evaluating a move expression causes, as a side effect,
+the rvalue to be *moved* into the lvalue.
+If the rvalue was itself an lvalue, it must be a local variable,
+as it will be de-initialized in the process.
-Evaluating a move expression does not change reference counts, nor does it
-cause a deep copy of any unique structure pointed to by the moved
-*rval*. Instead, the move expression represents an indivisible *transfer of
-ownership* from the right-hand-side to the left-hand-side of the
-expression. No allocation or destruction is entailed.
+Evaluating a move expression does not change reference counts,
+nor does it cause a deep copy of any owned structure pointed to by the moved rvalue.
+Instead, the move expression represents an indivisible *transfer of ownership*
+from the right-hand-side to the left-hand-side of the expression.
+No allocation or destruction is entailed.
An example of three different move expressions:
~~~~~~~~
-# let mut x = ~[mut 0];
-# let a = ~[mut 0];
+# let mut x = &[mut 0];
+# let a = &[mut 0];
# let b = 0;
# let y = {mut z: 0};
# let c = 0;
#### Swap expressions
-A _swap expression_ consists of an *lval* followed by a bi-directional arrow
-(`<->`) and another *lval* expression.
+A _swap expression_ consists of an [lvalue](#lvalues-rvalues-and-temporaries) followed by a bi-directional arrow (`<->`) and another [lvalue](#lvalues-rvalues-and-temporaries).
-Evaluating a swap expression causes, as a side effect, the values held in the
-left-hand-side and right-hand-side *lvals* to be exchanged indivisibly.
+Evaluating a swap expression causes, as a side effect, the values held in the left-hand-side and right-hand-side [lvalues](#lvalues-rvalues-and-temporaries) to be exchanged indivisibly.
-Evaluating a swap expression neither changes reference counts nor deeply
-copies any unique structure pointed to by the moved
-*rval*. Instead, the swap expression represents an indivisible *exchange of
-ownership* between the right-hand-side and the left-hand-side of the
-expression. No allocation or destruction is entailed.
+Evaluating a swap expression neither changes reference counts,
+nor deeply copies any owned structure pointed to by the moved [rvalue](#lvalues-rvalues-and-temporaries).
+Instead, the swap expression represents an indivisible *exchange of ownership*,
+between the right-hand-side and the left-hand-side of the expression.
+No allocation or destruction is entailed.
An example of three different swap expressions:
~~~~~~~~
-# let mut x = ~[mut 0];
-# let mut a = ~[mut 0];
+# let mut x = &[mut 0];
+# let mut a = &[mut 0];
# let i = 0;
# let y = {mut z: 0};
# let b = {mut c: 0};
#### Assignment expressions
-An _assignment expression_ consists of an *lval* expression followed by an
-equals sign (`=`) and an *rval* expression.
+An _assignment expression_ consists of an [lvalue](#lvalues-rvalues-and-temporaries) expression followed by an
+equals sign (`=`) and an [rvalue](#lvalues-rvalues-and-temporaries) expression.
Evaluating an assignment expression is equivalent to evaluating a [binary move
expression](#binary-move-expressions) applied to a [unary copy
#### Compound assignment expressions
-The `+`, `-`, `*`, `/`, `%`, `&`, `|`, `^`, `<<`, `>>`, and `>>>`
+The `+`, `-`, `*`, `/`, `%`, `&`, `|`, `^`, `<<`, and `>>`
operators may be composed with the `=` operator. The expression `lval
OP= val` is equivalent to `lval = lval OP val`. For example, `x = x +
1` may be written as `x += 1`.
-Any such expression always has the [`nil`](#primitive-types) type.
+Any such expression always has the [`unit`](#primitive-types) type.
#### Operator precedence
* / %
as
+ -
-<< >> >>>
+<< >>
&
^
|
copies the resulting value, allocating any memory necessary to hold the new
copy.
-[Shared boxes](#box-types) (type `@`) are, as usual, shallow-copied, as they
-may be cyclic. [Unique boxes](#box-types), [vectors](#vector-types) and
-similar unique types are deep-copied.
+[Managed boxes](#pointer-types) (type `@`) are, as usual, shallow-copied,
+as are raw and borrowed pointers.
+[Owned boxes](#pointer-types), [owned vectors](#vector-types) and similar owned types are deep-copied.
-Since the binary [assignment operator](#assignment-expressions) `=` performs a
-copy implicitly, the unary copy operator is typically only used to cause an
-argument to a function to be copied and passed by value.
+Since the binary [assignment operator](#assignment-expressions) `=` performs a copy implicitly,
+the unary copy operator is typically only used to cause an argument to a function to be copied and passed by value.
An example of a copy expression:
assert v[0] == 1; // Original was not modified
~~~~
+### Unary move expressions
+
+~~~~~~~~{.ebnf .gram}
+move_expr : "move" expr ;
+~~~~~~~~
+
+A _unary move expression_ is similar to a [unary copy](#unary-copy-expressions) expression,
+except that it can only be applied to an [lvalue](#lvalues-rvalues-and-temporaries),
+and it performs a _move_ on its operand, rather than a copy.
+That is, the memory location denoted by its operand is de-initialized after evaluation,
+and the resulting value is a shallow copy of the operand,
+even if the operand is an [owning type](#type-kinds).
+
+
### Call expressions
~~~~~~~~ {.abnf .gram}
let x: int = add(1, 2);
~~~~
-### Shared function expressions
+### Lambda expressions
-*TODO*.
+~~~~~~~~ {.abnf .gram}
+ident_list : [ ident [ ',' ident ]* ] ? ;
+lambda_expr : '|' ident_list '| expr ;
+~~~~~~~~
-### Unique function expressions
+A _lambda expression_ (a.k.a. "anonymous function expression") defines a function and denotes it as a value,
+in a single expression.
+Lambda expressions are written by prepending a list of identifiers, surrounded by pipe symbols (`|`),
+to an expression.
-*TODO*.
+A lambda expression denotes a function mapping parameters to the expression to the right of the `ident_list`.
+The identifiers in the `ident_list` are the parameters to the function, with types inferred from context.
+
+Lambda expressions are most useful when passing functions as arguments to other functions,
+as an abbreviation for defining and capturing a separate fucntion.
+
+Significantly, lambda expressions _capture their environment_,
+which regular [function definitions](#functions) do not.
+
+The exact type of capture depends on the [function type](#function-types) inferred for the lambda expression;
+in the simplest and least-expensive form, the environment is captured by reference,
+effectively borrowing pointers to all outer variables referenced inside the function.
+Other forms of capture include making copies of captured variables,
+and moving values from the environment into the lambda expression's captured environment.
+
+An example of a lambda expression:
+
+~~~~
+fn ten_times(f: fn(int)) {
+ let mut i = 0;
+ while i < 10 {
+ f(i);
+ i += 1;
+ }
+}
+
+ten_times(|j| io::println(fmt!("hello, %d", j)));
+
+~~~~
### While loops
~~~~~~~~{.ebnf .gram}
-while_expr : "while" expr '{' block '}'
- | "do" '{' block '}' "while" expr ;
+while_expr : "while" expr '{' block '}' ;
~~~~~~~~
A `while` loop begins by evaluating the boolean loop conditional expression.
let mut i = 0;
while i < 10 {
- io::println(~"hello\n");
+ io::println("hello\n");
i = i + 1;
}
~~~~
A `loop` expression is only permitted in the body of a loop.
+### Do expressions
+
+~~~~~~~~{.ebnf .gram}
+do_expr : "do" expr [ '|' ident_list '|' ] ? '{' block '}' ;
+~~~~~~~~
+
+A _do expression_ provides a more-familiar block-syntax for a [lambda expression](#lambda-expressions),
+including a special translation of [return expressions](#return-expressions) inside the supplied block.
+
+The optional `ident_list` and `block` provided in a `do` expression are parsed as though they constitute a lambda expression;
+if the `ident_list` is missing, an empty `ident_list` is implied.
+
+The lambda expression is then provided as a _trailing argument_
+to the outermost [call](#call-expressions) or [method call](#method-call-expressions) expression
+in the `expr` following `do`.
+If the `expr` is a [path expression](#path-expressions), it is parsed as though it is a call expression.
+If the `expr` is a [field expression](#field-expressions), it is parsed as though it is a method call expression.
+
+In this example, both calls to `f` are equivalent:
+
+~~~~
+# fn f(f: fn(int)) { }
+# fn g(i: int) { }
+
+f(|j| g(j));
+
+do f |j| {
+ g(j);
+}
+~~~~
+
+
### For expressions
~~~~~~~~{.ebnf .gram}
-for_expr : "for" pat "in" expr '{' block '}' ;
+for_expr : "for" expr [ '|' ident_list '|' ] ? '{' block '}' ;
~~~~~~~~
-A _for loop_ is controlled by a vector or string. The for loop bounds-checks
-the underlying sequence *once* when initiating the loop, then repeatedly
-executes the loop body with the loop variable referencing the successive
-elements of the underlying sequence, one iteration per sequence element.
+A _for expression_ is similar to a [`do` expression](#do-expressions),
+in that it provides a special block-form of lambda expression,
+suited to passing the `block` function to a higher-order function implementing a loop.
+
+Like a `do` expression, a `return` expression inside a `for` expresison is rewritten,
+to access a local flag that causes an early return in the caller.
+
+Additionally, any occurrence of a [return expression](#return-expressions)
+inside the `block` of a `for` expression is rewritten
+as a reference to an (anonymous) flag set in the caller's environment,
+which is checked on return from the `expr` and, if set,
+causes a corresponding return from the caller.
+In this way, the meaning of `return` statements in language built-in control blocks is preserved,
+if they are rewritten using lambda functions and `do` expressions as abstractions.
+
+Like `return` expressions, any [`break`](#break-expressions) and [`loop`](#loop-expressions) expressions
+are rewritten inside `for` expressions, with a combination of local flag variables,
+and early boolean-valued returns from the `block` function,
+such that the meaning of `break` and `loop` is preserved in a primitive loop
+when rewritten as a `for` loop controlled by a higher order function.
An example a for loop:
# fn bar(f: foo) { }
# let a = 0, b = 0, c = 0;
-let v: ~[foo] = ~[a, b, c];
+let v: &[foo] = &[a, b, c];
for v.each |e| {
bar(*e);
~~~~~~~~{.ebnf .gram}
match_expr : "match" expr '{' match_arm [ '|' match_arm ] * '}' ;
-match_arm : match_pat '=>' expr_or_blockish ;
+match_arm : match_pat '=>' [ expr "," | '{' block '}' ] ;
-match_pat : pat [ "to" pat ] ? [ "if" expr ] ;
+match_pat : pat [ ".." pat ] ? [ "if" expr ] ;
~~~~~~~~
A `match` expression branches on a *pattern*. The exact form of matching that
occurs depends on the pattern. Patterns consist of some combination of
-literals, destructured enum constructors, records and tuples, variable binding
+literals, destructured enum constructors, structures, records and tuples, variable binding
specifications, wildcards (`*`), and placeholders (`_`). A `match` expression has a *head
expression*, which is the value to compare to the patterns. The type of the
patterns must equal the type of the head expression.
variant. For example:
~~~~
-enum list<X> { nil, cons(X, @list<X>) }
+enum List<X> { Nil, Cons(X, @List<X>) }
-let x: list<int> = cons(10, @cons(11, @nil));
+let x: List<int> = Cons(10, @Cons(11, @Nil));
match x {
- cons(_, @nil) => fail ~"singleton list",
- cons(*) => return,
- nil => fail ~"empty list"
+ Cons(_, @Nil) => fail ~"singleton list",
+ Cons(*) => return,
+ Nil => fail ~"empty list"
}
~~~~
-The first pattern matches lists constructed by applying `cons` to any head value, and a
-tail value of `@nil`. The second pattern matches `any` list constructed with `cons`,
+The first pattern matches lists constructed by applying `Cons` to any head value, and a
+tail value of `@Nil`. The second pattern matches _any_ list constructed with `Cons`,
ignoring the values of its arguments. The difference between `_` and `*` is that the pattern `C(_)` is only type-correct if
`C` has exactly one argument, while the pattern `C(*)` is type-correct for any enum variant `C`, regardless of how many arguments `C` has.
# fn process_pair(a: int, b: int) { }
# fn process_ten() { }
-enum list<X> { nil, cons(X, @list<X>) }
+enum List<X> { Nil, Cons(X, @List<X>) }
-let x: list<int> = cons(10, @cons(11, @nil));
+let x: List<int> = Cons(10, @Cons(11, @Nil));
match x {
- cons(a, @cons(b, _)) => {
+ Cons(a, @Cons(b, _)) => {
process_pair(a,b);
}
- cons(10, _) => {
+ Cons(10, _) => {
process_ten();
}
- nil => {
+ Nil => {
return;
}
_ => {
}
~~~~
-Records can also be pattern-matched and their fields bound to variables.
+Records and structures can also be pattern-matched and their fields bound to variables.
When matching fields of a record, the fields being matched are specified
first, then a placeholder (`_`) represents the remaining fields.
# let x = 2;
let message = match x {
- 0 | 1 => ~"not many",
- 2 .. 9 => ~"a few",
- _ => ~"lots"
+ 0 | 1 => "not many",
+ 2 .. 9 => "a few",
+ _ => "lots"
};
~~~~
Evaluating a `fail` expression causes a task to enter the *failing* state. In
the *failing* state, a task unwinds its stack, destroying all frames and
-freeing all resources until it reaches its entry frame, at which point it
+running all destructors until it reaches its entry frame, at which point it
halts execution in the *dead* state.
-### Note expressions
-
-~~~~~~~~{.ebnf .gram}
-note_expr : "note" expr ;
-~~~~~~~~
-
-**Note: Note expressions are not yet supported by the compiler.**
-
-A `note` expression has no effect during normal execution. The purpose of a
-`note` expression is to provide additional diagnostic information to the
-logging subsystem during task failure. See [log
-expressions](#log-expressions). Using `note` expressions, normal diagnostic
-logging can be kept relatively sparse, while still providing verbose
-diagnostic "back-traces" when a task fails.
-
-When a task is failing, control frames *unwind* from the innermost frame to
-the outermost, and from the innermost lexical block within an unwinding frame
-to the outermost. When unwinding a lexical block, the runtime processes all
-the `note` expressions in the block sequentially, from the first expression of
-the block to the last. During processing, a `note` expression has equivalent
-meaning to a `log` expression: it causes the runtime to append the argument of
-the `note` to the internal logging diagnostic buffer.
-
-An example of a `note` expression:
-
-~~~~{.xfail-test}
-fn read_file_lines(path: ~str) -> ~[~str] {
- note path;
- let r: [~str];
- let f: file = open_read(path);
- lines(f) |s| {
- r += ~[s];
- }
- return r;
-}
-~~~~
-
-In this example, if the task fails while attempting to open or read a file,
-the runtime will log the path name that was being read. If the function
-completes normally, the runtime will not log the path.
-
-A value that is marked by a `note` expression is *not* copied aside
-when control passes through the `note`. In other words, if a `note`
-expression notes a particular `lval`, and code after the `note`
-mutates that slot, and then a subsequent failure occurs, the *mutated*
-value will be logged during unwinding, *not* the original value that was
-denoted by the `lval` at the moment control passed through the `note`
-expression.
### Return expressions
~~~~~~~~
Return expressions are denoted with the keyword `return`. Evaluating a `return`
-expression^[A `return` expression is analogous to a `return` expression
-in the C family.] moves its argument into the output slot of the current
+expression moves its argument into the output slot of the current
function, destroys the current function activation frame, and transfers
control to the caller frame.
logging level:
~~~~
-# let filename = ~"bulbasaur";
+# let filename = "bulbasaur";
// Full version, logging a value.
log(core::error, ~"file not found: " + filename);
assert_expr : "assert" expr ;
~~~~~~~~
-An `assert` expression is similar to a `check` expression, except
-the condition may be any boolean-typed expression, and the compiler makes no
-use of the knowledge that the condition holds if the program continues to
-execute after the `assert`.
+> **Note:** In future versions of Rust, `assert` will be changed from a full expression to a macro.
+An `assert` expression causes the program to fail if its `expr` argument evaluates to `false`.
+The failure carries string representation of the false expression.
# Type system
## Types
-Every slot and value in a Rust program has a type. The _type_ of a *value*
-defines the interpretation of the memory holding it. The type of a *slot* may
-also include [constraints](#constraints).
+Every slot, item and value in a Rust program has a type. The _type_ of a *value*
+defines the interpretation of the memory holding it.
Built-in types and type-constructors are tightly integrated into the language,
in nontrivial ways that are not possible to emulate in user-defined
-types. User-defined types have limited capabilities. In addition, every
-built-in type or type-constructor name is reserved as a *keyword* in Rust;
-they cannot be used as user-defined identifiers in any context.
+types. User-defined types have limited capabilities.
### Primitive types
The primitive types are the following:
-* The "nil" type `()`, having the single "nil" value `()`.^[The "nil" value
- `()` is *not* a sentinel "null pointer" value for reference slots; the "nil"
- type is the implicit return type from functions otherwise lacking a return
- type, and can be used in other contexts (such as message-sending or
- type-parametric code) as a zero-size type.]
+* The "unit" type `()`, having the single "unit" value `()` (occasionally called "nil").
+ ^[The "unit" value `()` is *not* a sentinel "null pointer" value for reference slots; the "unit" type is the implicit return type from functions otherwise lacking a return type, and can be used in other contexts (such as message-sending or type-parametric code) as a zero-size type.]
* The boolean type `bool` with values `true` and `false`.
* The machine types.
* The machine-dependent integer and floating-point types.
* The unsigned word types `u8`, `u16`, `u32` and `u64`, with values drawn from
- the integer intervals $[0, 2^8 - 1]$, $[0, 2^16 - 1]$, $[0, 2^32 - 1]$ and
- $[0, 2^64 - 1]$ respectively.
+ the integer intervals $[0, 2^8 - 1]$, $[0, 2^{16} - 1]$, $[0, 2^{32} - 1]$ and
+ $[0, 2^{64} - 1]$ respectively.
* The signed two's complement word types `i8`, `i16`, `i32` and `i64`, with
values drawn from the integer intervals $[-(2^7), 2^7 - 1]$,
- $[-(2^15), 2^15 - 1]$, $[-(2^31), 2^31 - 1]$, $[-(2^63), 2^63 - 1]$
+ $[-(2^{15}), 2^{15} - 1]$, $[-(2^{31}), 2^{31} - 1]$, $[-(2^{63}), 2^{63} - 1]$
respectively.
* The IEEE 754-2008 `binary32` and `binary64` floating-point types: `f32` and
### Textual types
-The types `char` and `~str` hold textual data.
+The types `char` and `str` hold textual data.
A value of type `char` is a Unicode character, represented as a 32-bit
unsigned word holding a UCS-4 codepoint.
-A value of type `~str` is a Unicode string, represented as a vector of 8-bit
+A value of type `str` is a Unicode string, represented as a vector of 8-bit
unsigned bytes holding a sequence of UTF-8 codepoints.
+Since `str` is of indefinite size, it is not a _first class_ type,
+but can only be instantiated through a pointer type,
+such as `&str`, `@str` or `~str`.
-### Record types
-
-The record type-constructor forms a new heterogeneous product of values.^[The
-record type-constructor is analogous to the `struct` type-constructor in the
-Algol/C family, the *record* types of the ML family, or the *structure* types
-of the Lisp family.] Fields of a record type are accessed by name and are
-arranged in memory in the order specified by the record type.
-
-An example of a record type and its use:
-
-~~~~
-type point = {x: int, y: int};
-let p: point = {x: 10, y: 11};
-let px: int = p.x;
-~~~~
-
### Tuple types
The tuple type-constructor forms a new heterogeneous product of values similar
An example of a tuple type and its use:
~~~~
-type pair = (int,~str);
-let p: pair = (10,~"hello");
+type Pair = (int,&str);
+let p: Pair = (10,"hello");
let (a, b) = p;
-assert b != ~"world";
+assert b != "world";
~~~~
+
### Vector types
-The vector type-constructor represents a homogeneous array of values of a
-given type. A vector has a fixed size. The kind of a vector type depends on
-the kind of its member type, as with other simple structural types.
+The vector type-constructor represents a homogeneous array of values of a given type.
+A vector has a fixed size.
+A vector type can be accompanied by _definite_ size, written with a trailing asterisk and integer literal, such as `[int * 10]`.
+Such a definite-sized vector can be treated as a first class type since its size is known statically.
+A vector without such a size is said to be of _indefinite_ size,
+and is therefore not a _first class_ type,
+can only be instantiated through a pointer type,
+such as `&[T]`, `@[T]` or `~[T]`.
+The kind of a vector type depends on the kind of its member type, as with other simple structural types.
An example of a vector type and its use:
~~~~
-let v: ~[int] = ~[7, 5, 3];
+let v: &[int] = &[7, 5, 3];
let i: int = v[2];
assert (i == 3);
~~~~
-Vectors always *allocate* a storage region sufficient to store the first power
-of two worth of elements greater than or equal to the size of the vector. This
-behaviour supports idiomatic in-place "growth" of a mutable slot holding a
-vector:
+All accessible elements of a vector are always initialized, and access to a vector is always bounds-checked.
+
+
+### Structure types
+
+A `struct` *type* is a heterogeneous product of other types, called the *fields* of the type.
+^[`struct` types are analogous `struct` types in C,
+the *record* types of the ML family,
+or the *structure* types of the Lisp family.]
+
+New instances of a `struct` can be constructed with a [struct expression](#struct-expressions).
+
+The memory order of fields in a `struct` is given by the item defining it.
+Fields may be given in any order in a corresponding struct *expression*;
+the resulting `struct` value will always be laid out in memory in the order specified by the corresponding *item*.
+
+The fields of a `struct` may be qualified by [visibility modifiers](#visibility-modifiers),
+to restrict access to implementation-private data in a structure.
+### Enumerated types
+
+An *enumerated type* is a nominal, heterogeneous disjoint union type,
+denoted by the name of an [`enum` item](#enumerations).
+^[The `enum` type is analogous to a `data` constructor declaration in ML,
+or a *pick ADT* in Limbo.]
+
+An [`enum` item](#enumerations) declares both the type and a number of *variant constructors*,
+each of which is independently named and takes an optional tuple of arguments.
+
+New instances of an `enum` can be constructed by calling one of the variant constructors,
+in a [call expression](#call-expressions).
+
+Any `enum` value consumes as much memory as the largest variant constructor for its corresponding `enum` type.
+
+Enum types cannot be denoted *structurally* as types,
+but must be denoted by named reference to an [`enum` item](#enumerations).
+
+
+### Recursive types
+
+Nominal types -- [enumerations](#enumerated-types) and [structures](#structure-types) -- may be recursive.
+That is, each `enum` constructor or `struct` field may refer, directly or indirectly, to the enclosing `enum` or `struct` type itself.
+Such recursion has restrictions:
+
+* Recursive types must include a nominal type in the recursion
+ (not mere [type definitions](#type-definitions),
+ or other structural types such as [vectors](#vector-types) or [tuples](#tuple-types)).
+* A recursive `enum` item must have at least one non-recursive constructor
+ (in order to give the recursion a basis case).
+* The size of a recursive type must be finite;
+ in other words the recursive fields of the type must be [pointer types](#pointer-types).
+* Recursive type definitions can cross module boundaries, but not module *visibility* boundaries,
+ or crate boundaries (in order to simplify the module system and type checker).
+
+An example of a *recursive* type and its use:
+
~~~~
-let mut v: ~[int] = ~[1, 2, 3];
-v += ~[4, 5, 6];
+enum List<T> {
+ Nil,
+ Cons(T, @List<T>)
+}
+
+let a: List<int> = Cons(7, @Cons(13, @Nil));
~~~~
-Normal vector concatenation causes the allocation of a fresh vector to hold
-the result; in this case, however, the slot holding the vector recycles the
-underlying storage in-place (since the reference-count of the underlying
-storage is equal to 1).
-All accessible elements of a vector are always initialized, and access to a
-vector is always bounds-checked.
+### Record types
+> **Note:** Records are not nominal types, thus do not directly support recursion, visibility control,
+> out-of-order field initialization, or coherent trait implementation.
+> Records are therefore deprecared and will be removed in future versions of Rust.
+> [Structure types](#structure-types) should be used instead.
-### Enumerated types
+The record type-constructor forms a new heterogeneous product of values.
+Fields of a record type are accessed by name and are arranged in memory in the order specified by the record type.
+
+An example of a record type and its use:
-An *enumerated type* is a nominal, heterogeneous disjoint union type.^[The
-`enum` type is analogous to a `data` constructor declaration in ML or a *pick
-ADT* in Limbo.] An [`enum` *item*](#enumerations) consists of a number of
-*constructors*, each of which is independently named and takes an optional
-tuple of arguments.
+~~~~
+type Point = {x: int, y: int};
+let p: Point = {x: 10, y: 11};
+let px: int = p.x;
+~~~~
-Enumerated types cannot be denoted *structurally* as types, but must be
-denoted by named reference to an [*enumeration* item](#enumerations).
### Pointer types
they exist to support interoperability with foreign code,
and writing performance-critical or low-level functions.
+
### Function types
The function type-constructor `fn` forms new function types. A function type
-consists of a sequence of input slots, an optional set of
-[input constraints](#constraints) and an output slot.
+consists of a set of function-type modifiers (`pure`, `unsafe`, `extern`, etc.),
+a sequence of input slots and an output slot.
An example of a `fn` type:
let mut x = add(5,7);
-type binop = fn(int,int) -> int;
-let bo: binop = add;
+type Binop = fn(int,int) -> int;
+let bo: Binop = add;
x = bo(5,7);
~~~~~~~~
as the trait. For a trait `T`, cast expressions introduce values of type `T`:
~~~~~~~~
-trait printable {
+trait Printable {
fn to_str() -> ~str;
}
-impl ~str: printable {
- fn to_str() -> ~str { self }
+impl int: Printable {
+ fn to_str() -> ~str { int::to_str(self, 10) }
}
-fn print(a: printable) {
+fn print(a: @Printable) {
io::println(a.to_str());
}
fn main() {
- print(~"meow" as printable);
+ print(@10 as @Printable);
}
~~~~~~~~
-In this example, the trait `printable` occurs as a type in both the type signature of
+In this example, the trait `Printable` occurs as a type in both the type signature of
`print`, and the cast expression in `main`.
-### Struct types
-
-Every struct item defines a type.
-
### Type parameters
Within the body of an item that has type parameter declarations, the names of its type parameters are types:
~~~~~~~
-fn map<A: Copy, B: Copy>(f: fn(A) -> B, xs: ~[A]) -> ~[B] {
+fn map<A: Copy, B: Copy>(f: fn(A) -> B, xs: &[A]) -> ~[B] {
if xs.len() == 0 { return ~[]; }
let first: B = f(xs[0]);
let rest: ~[B] = map(f, xs.slice(1, xs.len()));
Here, `first` has type `B`, referring to `map`'s `B` type parameter; and `rest` has
type `~[B]`, a vector type with element type `B`.
-### Self type
+### Self types
The special type `self` has a meaning within methods inside an
impl item. It refers to the type of the implicit `self` argument. For
example, in:
~~~~~~
-trait printable {
+trait Printable {
fn to_str() -> ~str;
}
-impl ~str: printable {
+impl ~str: Printable {
fn to_str() -> ~str { self }
}
~~~~~~
-`self` refers to the value of type `str` that is the receiver for a
+`self` refers to the value of type `~str` that is the receiver for a
call to the method `to_str`.
## Type kinds
-Types in Rust are categorized into three kinds, based on whether they
-allow copying of their values, and sending to different tasks. The
-kinds are:
+Types in Rust are categorized into kinds, based on various properties of the components of the type.
+The kinds are:
-Sendable
- : Values with a sendable type can be safely sent to another task.
- This kind includes scalars, unique pointers, unique closures, and
+`Const`
+ : Types of this kind are deeply immutable;
+ they contain no mutable memory locations directly or indirectly via pointers.
+`Send`
+ : Types of this kind can be safely sent between tasks.
+ This kind includes scalars, owning pointers, owned closures, and
structural types containing only other sendable types.
-Copyable
+`Owned`
+ : Types of this kind do not contain any borrowed pointers;
+ this can be a useful guarantee for code that breaks borrowing assumptions using [`unsafe` operations](#unsafe-functions).
+`Copy`
: This kind includes all types that can be copied. All types with
- sendable kind are copyable, as are shared boxes, shared closures,
+ sendable kind are copyable, as are managed boxes, managed closures,
trait types, and structural types built out of these.
-Noncopyable
- : [Resource](#resources) types, and every type that includes a
- resource without storing it in a shared box, may not be copied.
- Types of sendable or copyable type can always be used in places
- where a noncopyable type is expected, so in effect this kind
- includes all types.
-
-These form a hierarchy. The noncopyable kind is the widest, including
-all types in the language. The copyable kind is a subset of that, and
-the sendable kind is a subset of the copyable kind.
-
-Any operation that causes a value to be copied requires the type of
-that value to be of copyable kind. Type parameter types are assumed to
-be noncopyable, unless one of the special bounds `send` or `copy` is
-declared for it. For example, this is not a valid program:
+_Default_
+ : Types with destructors, closure environments,
+ and various other _non-first-class_ types,
+ are not copyable at all.
+ Such types can usually only be accessed through pointers,
+ or in some cases, moved between mutable locations.
+
+Kinds can be supplied as _bounds_ on type parameters, like traits,
+in which case the parameter is constrained to types satisfying that kind.
+
+By default, type parameters do not carry any assumed kind-bounds at all.
+
+Any operation that causes a value to be copied requires the type of that value to be of copyable kind,
+so the `Copy` bound is frequently required on function type parameters.
+For example, this is not a valid program:
~~~~{.xfail-test}
fn box<T>(x: T) -> @T { @x }
~~~~
-Putting `x` into a shared box involves copying, and the `T` parameter
-is assumed to be noncopyable. To change that, a bound is declared:
+Putting `x` into a managed box involves copying, and the `T` parameter has the default (non-copyable) kind.
+To change that, a bound is declared:
~~~~
fn box<T: Copy>(x: T) -> @T { @x }
when control leaves the frame containing it.
The _heap_ is a general term that describes two separate sets of boxes:
-shared boxes -- which may be subject to garbage collection -- and unique
+managed boxes -- which may be subject to garbage collection -- and owned
boxes. The lifetime of an allocation in the heap depends on the lifetime of
the box values pointing to it. Since box values may themselves be passed in
and out of frames, or stored in the heap, heap allocations may outlive the
never including managed or borrowed pointers.
When a stack frame is exited, its local allocations are all released, and its
-references to boxes (both shared and owned) are dropped.
+references to boxes (both managed and owned) are dropped.
-A shared box may (in the case of a recursive, mutable shared type) be cyclic;
-in this case the release of memory inside the shared structure may be deferred
+A managed box may (in the case of a recursive, mutable managed type) be cyclic;
+in this case the release of memory inside the managed structure may be deferred
until task-local garbage collection can reclaim it. Code can ensure no such
-delayed deallocation occurs by restricting itself to unique boxes and similar
-unshared kinds of data.
+delayed deallocation occurs by restricting itself to owned boxes and similar
+unmanaged kinds of data.
When a task finishes, its stack is necessarily empty and it therefore has no
references to any boxes; the remainder of its heap is immediately freed.
A task's stack contains slots.
-A _slot_ is a component of a stack frame. A slot is either a *local variable*
-or a *reference*.
+A _slot_ is a component of a stack frame, either a function parameter,
+a [temporary](#lvalues-rvalues-and-temporaries), or a local variable.
A _local variable_ (or *stack-local* allocation) holds a value directly,
allocated within the stack's memory. The value is a part of the stack frame.
-A _reference_ references a value outside the frame. It may refer to a
-value allocated in another frame *or* a boxed value in the heap. The
-reference-formation rules ensure that the referent will outlive the reference.
-
Local variables are immutable unless declared with `let mut`. The
`mut` keyword applies to all local variables declared within that
declaration (so `let mut x, y` declares two mutable variables, `x` and
local variables. Local variables can be used only after they have been
initialized; this is enforced by the compiler.
-References are created for function arguments. If the compiler can not prove
-that the referred-to value will outlive the reference, it will try to set
-aside a copy of that value to refer to. If this is not semantically safe (for
-example, if the referred-to value contains mutable fields), it will reject the
-program. If the compiler deems copying the value expensive, it will warn.
-
-A function with an argument of type `&mut T`, for some type `T`, can write to
-the slot that its argument refers to. An example of such a function is:
-
-~~~~~~~~
-fn incr(i: &mut int) {
- *i = *i + 1;
-}
-~~~~~~~~
### Memory boxes
A _box_ is a reference to a heap allocation holding another value. There
-are two kinds of boxes: *shared boxes* and *unique boxes*.
+are two kinds of boxes: *managed boxes* and *owned boxes*.
-A _shared box_ type or value is constructed by the prefix *at* sigil `@`.
+A _managed box_ type or value is constructed by the prefix *at* sigil `@`.
-A _unique box_ type or value is constructed by the prefix *tilde* sigil `~`.
+An _owned box_ type or value is constructed by the prefix *tilde* sigil `~`.
-Multiple shared box values can point to the same heap allocation; copying a
-shared box value makes a shallow copy of the pointer (optionally incrementing
-a reference count, if the shared box is implemented through
+Multiple managed box values can point to the same heap allocation; copying a
+managed box value makes a shallow copy of the pointer (optionally incrementing
+a reference count, if the managed box is implemented through
reference-counting).
-Unique box values exist in 1:1 correspondence with their heap allocation;
-copying a unique box value makes a deep copy of the heap allocation and
+Owned box values exist in 1:1 correspondence with their heap allocation;
+copying an owned box value makes a deep copy of the heap allocation and
produces a pointer to the new allocation.
-An example of constructing one shared box type and value, and one unique box
+An example of constructing one managed box type and value, and one owned box
type and value:
~~~~~~~~
Inter-task communication and co-ordination facilities are provided in the standard library.
These include:
+
- synchronous and asynchronous communication channels with various communication topologies
- read-only and read-write shared variables with various safe mutual exclusion patterns
- simple locks and semaphores
A call to `core::task::spawn`, passing a 0-argument function as its single
argument, causes the runtime to construct a new task executing the passed
function. The passed function is referred to as the _entry function_ for
-the spawned task, and any captured environment is carries is moved from the
+the spawned task, and any captured environment it carries is moved from the
spawning task to the spawned task before the spawned task begins execution.
-The result of a `spawn` call is a `core::task::task` value.
+The result of a `spawn` call is a `core::task::Task` value.
An example of a `spawn` call:
let s = comm::recv(po);
~~~~~~~~
+> **Note:** this communication system will be replaced by a higher-performance system called "pipes",
+> in future versions of Rust.
+
# Runtime services, linkage and debugging
-The Rust _runtime_ is a relatively compact collection of C and Rust code
+The Rust _runtime_ is a relatively compact collection of C++ and Rust code
that provides fundamental services and datatypes to all Rust tasks at
run-time. It is smaller and simpler than many modern language runtimes. It is
tightly integrated into the language's execution model of memory, tasks,
communication and logging.
+> **Note:** The runtime library will merge with the `core` library in future versions of Rust.
### Memory allocation
* The stack-growth implementation of Go.
* The structural algebraic types and compilation manager of SML.
* The attribute and assembly systems of C#.
-* The deterministic destructor system of C++.
+* The references and deterministic destructor system of C++.
+* The memory region systems of the ML Kit and Cyclone.
* The typeclass system of Haskell.
* The lexical identifier rule of Python.
* The block syntax of Ruby.
# Introduction
-Borrowed pointers are one of the more flexible and powerful tools
-available in Rust. A borrowed pointer can be used to point anywhere:
-into the managed and exchange heaps, into the stack, and even into the
-interior of another data structure. With regard to flexibility, it is
-comparable to a C pointer or C++ reference. However, unlike C and C++,
-the Rust compiler includes special checks that ensure that borrowed
-pointers are being used safely. Another advantage of borrowed pointers
-is that they are invisible to the garbage collector, so working with
-borrowed pointers helps keep things efficient.
-
-Despite the fact that they are completely safe, at runtime, a borrowed
-pointer is “just a pointer”. They introduce zero overhead. All safety
-checks are done at compilation time.
+Borrowed pointers are one of the more flexible and powerful tools available in
+Rust. A borrowed pointer can point anywhere: into the managed or exchange
+heap, into the stack, and even into the interior of another data structure. A
+borrowed pointer is as flexible as a C pointer or C++ reference. However,
+unlike C and C++ compilers, the Rust compiler includes special static checks
+that ensure that programs use borrowed pointers safely. Another advantage of
+borrowed pointers is that they are invisible to the garbage collector, so
+working with borrowed pointers helps reduce the overhead of automatic memory
+management.
+
+Despite their complete safety, a borrowed pointer's representation at runtime
+is the same as that of an ordinary pointer in a C program. They introduce zero
+overhead. The compiler does all safety checks at compile time.
Although borrowed pointers have rather elaborate theoretical
underpinnings (region pointers), the core concepts will be familiar to
-anyone who worked with C or C++. Therefore, the best way to explain
+anyone who has worked with C or C++. Therefore, the best way to explain
how they are used—and their limitations—is probably just to work
through several examples.
# By example
-Borrowed pointers are called borrowed because they are only valid for
-a limit duration. Borrowed pointers never claim any kind of ownership
-over the data that they point at: instead, they are used for cases
-where you like to make use of data for a short time.
+Borrowed pointers are called *borrowed* because they are only valid for
+a limited duration. Borrowed pointers never claim any kind of ownership
+over the data that they point to: instead, they are used for cases
+where you would like to use data for a short time.
As an example, consider a simple struct type `Point`:
struct Point {x: float, y: float}
~~~
-We can use this simple definition to allocate points in many ways. For
+We can use this simple definition to allocate points in many different ways. For
example, in this code, each of these three local variables contains a
point, but allocated in a different place:
let unique_box : ~Point = ~Point {x: 7.0, y: 9.0};
~~~
-Suppose we wanted to write a procedure that computed the distance
-between any two points, no matter where they were stored. For example,
-we might like to compute the distance between `on_the_stack` and
-`shared_box`, or between `shared_box` and `unique_box`. One option is
-to define a function that takes two arguments of type point—that is,
-it takes the points by value. But this will cause the points to be
-copied when we call the function. For points, this is probably not so
-bad, but often copies are expensive or, worse, if there are mutable
-fields, they can change the semantics of your program. So we’d like to
-define a function that takes the points by pointer. We can use
-borrowed pointers to do this:
+Suppose we wanted to write a procedure that computed the distance between any
+two points, no matter where they were stored. For example, we might like to
+compute the distance between `on_the_stack` and `shared_box`, or between
+`shared_box` and `unique_box`. One option is to define a function that takes
+two arguments of type `Point`—that is, it takes the points by value. But we
+define it this way, calling the function will cause the points to be
+copied. For points, this is probably not so bad, but often copies are
+expensive. Worse, if the data type contains mutable fields, copying can change
+the semantics of your program in unexpected ways. So we'd like to define a
+function that takes the points by pointer. We can use borrowed pointers to do
+this:
~~~
# struct Point {x: float, y: float}
compute_distance(shared_box, unique_box);
~~~
-Here the `&` operator is used to take the address of the variable
+Here, the `&` operator takes the address of the variable
`on_the_stack`; this is because `on_the_stack` has the type `Point`
(that is, a struct value) and we have to take its address to get a
value. We also call this _borrowing_ the local variable
-`on_the_stack`, because we are created an alias: that is, another
-route to the same data.
-
-In the case of the boxes `shared_box` and `unique_box`, however, no
-explicit action is necessary. The compiler will automatically convert
-a box like `@Point` or `~Point` to a borrowed pointer like
-`&Point`. This is another form of borrowing; in this case, the
-contents of the shared/unique box is being lent out.
-
-Whenever a value is borrowed, there are some limitations on what you
-can do with the original. For example, if the contents of a variable
-have been lent out, you cannot send that variable to another task, nor
-will you be permitted to take actions that might cause the borrowed
-value to be freed or to change its type (I’ll get into what kinds of
-actions those are shortly). This rule should make intuitive sense: you
-must wait for a borrowed value to be returned (that is, for the
-borrowed pointer to go out of scope) before you can make full use of
-it again.
+`on_the_stack`, because we have created an alias: that is, another
+name for the same data.
+
+In contrast, we can pass the boxes `shared_box` and `unique_box` to
+`compute_distance` directly. The compiler automatically converts a box like
+`@Point` or `~Point` to a borrowed pointer like `&Point`. This is another form
+of borrowing: in this case, the caller lends the contents of the shared or
+unique box to the callee.
+
+Whenever a caller lends data to a callee, there are some limitations on what
+the caller can do with the original. For example, if the contents of a
+variable have been lent out, you cannot send that variable to another task. In
+addition, the compiler will reject any code that might cause the borrowed
+value to be freed or overwrite its component fields with values of different
+types (I'll get into what kinds of actions those are shortly). This rule
+should make intuitive sense: you must wait for a borrower to return the value
+that you lent it (that is, wait for the borrowed pointer to go out of scope)
+before you can make full use of it again.
# Other uses for the & operator
let on_the_stack: Point = Point {x: 3.0, y: 4.0};
~~~
-This results in a by-value variable. As a consequence, we had to
-explicitly take the address of `on_the_stack` to get a borrowed
-pointer. Sometimes however it is more convenient to move the &
-operator into the definition of `on_the_stack`:
+This declaration means that code can only pass `Point` by value to other
+functions. As a consequence, we had to explicitly take the address of
+`on_the_stack` to get a borrowed pointer. Sometimes however it is more
+convenient to move the & operator into the definition of `on_the_stack`:
~~~
# struct Point {x: float, y: float}
~~~
Applying `&` to an rvalue (non-assignable location) is just a convenient
-shorthand for creating a temporary and taking its address:
+shorthand for creating a temporary and taking its address. A more verbose
+way to write the same code is:
~~~
# struct Point {x: float, y: float}
# Taking the address of fields
As in C, the `&` operator is not limited to taking the address of
-local variables. It can also be used to take the address of fields or
+local variables. It can also take the address of fields or
individual array elements. For example, consider this type definition
for `rectangle`:
struct Rectangle {origin: Point, size: Size}
~~~
-Now again I can define rectangles in a few different ways:
+Now, as before, we can define rectangles in a few different ways:
~~~
# struct Point {x: float, y: float}
size: Size {w: 3f, h: 4f}};
~~~
-In each case I can use the `&` operator to extact out individual
-subcomponents. For example, I could write:
+In each case, we can extract out individual subcomponents with the `&`
+operator. For example, I could write:
~~~
# struct Point {x: float, y: float} // as before
~~~
which would borrow the field `origin` from the rectangle on the stack
-from the managed box and then compute the distance between them.
+as well as from the managed box, and then compute the distance between them.
# Borrowing managed boxes and rooting
-We’ve seen a few examples so far where heap boxes (both managed and
-unique) are borrowed. Up till this point, we’ve glossed over issues of
+We’ve seen a few examples so far of borrowing heap boxes, both managed
+and unique. Up till this point, we’ve glossed over issues of
safety. As stated in the introduction, at runtime a borrowed pointer
-is simply a pointer, nothing more. Therefore, if we wish to avoid the
-issues that C has with dangling pointers (and we do!), a compile-time
-safety check is required.
-
-The basis for the check is the notion of _lifetimes_. A lifetime is
-basically a static approximation of the period in which the pointer is
-valid: it always corresponds to some expression or block within the
-program. Within that expression, the pointer can be used freely, but
-if the pointer somehow leaks outside of that expression, the compiler
-will report an error. We’ll be discussing lifetimes more in the
-examples to come, and a more thorough introduction is also available.
-
-When a borrowed pointer is created, the compiler must ensure that it
-will remain valid for its entire lifetime. Sometimes this is
-relatively easy, such as when taking the address of a local variable
-or a field that is stored on the stack:
+is simply a pointer, nothing more. Therefore, avoiding C's problems
+with dangling pointers requires a compile-time safety check.
+
+The basis for the check is the notion of _lifetimes_. A lifetime is a
+static approximation of the span of execution during which the pointer
+is valid: it always corresponds to some expression or block within the
+program. Code inside that expression can use the pointer without
+restrictions. But if the pointer escapes from that expression (for
+example, if the expression contains an assignment expression that
+assigns the pointer to a mutable field of a data structure with a
+broader scope than the pointer itself), the compiler reports an
+error. We'll be discussing lifetimes more in the examples to come, and
+a more thorough introduction is also available.
+
+When the `&` operator creates a borrowed pointer, the compiler must
+ensure that the pointer remains valid for its entire
+lifetime. Sometimes this is relatively easy, such as when taking the
+address of a local variable or a field that is stored on the stack:
~~~
struct X { f: int }
} // -+
~~~
-Here, the lifetime of the borrowed pointer is simply L, the remainder
-of the function body. No extra work is required to ensure that `x.f`
-will not be freed. This is true even if `x` is mutated.
+Here, the lifetime of the borrowed pointer `y` is simply L, the
+remainder of the function body. The compiler need not do any other
+work to prove that code will not free `x.f`. This is true even if the
+code mutates `x`.
-The situation gets more complex when borrowing data that resides in
-heap boxes:
+The situation gets more complex when borrowing data inside heap boxes:
~~~
# struct X { f: int }
} // -+
~~~
-In this example, the value `x` is in fact a heap box, and `y` is
-therefore a pointer into that heap box. Again the lifetime of `y` will
-be L, the remainder of the function body. But there is a crucial
-difference: suppose `x` were reassigned during the lifetime L? If
-we’re not careful, that could mean that the managed box would become
-unrooted and therefore be subject to garbage collection
+In this example, the value `x` is a heap box, and `y` is therefore a
+pointer into that heap box. Again the lifetime of `y` is L, the
+remainder of the function body. But there is a crucial difference:
+suppose `x` were to be reassigned during the lifetime L? If the
+compiler isn't careful, the managed box could become *unrooted*, and
+would therefore be subject to garbage collection. A heap box that is
+unrooted is one such that no pointer values in the heap point to
+it. It would violate memory safety for the box that was originally
+assigned to `x` to be garbage-collected, since a non-heap
+pointer---`y`---still points into it.
-> ***Note:***In our current implementation, the garbage collector is
-> implemented using reference counting and cycle detection.
+> ***Note:*** Our current implementation implements the garbage collector
+> using reference counting and cycle detection.
-For this reason, whenever the interior of a managed box stored in a
-mutable location is borrowed, the compiler will insert a temporary
-that ensures that the managed box remains live for the entire
-lifetime. So, the above example would be compiled as:
+For this reason, whenever an `&` expression borrows the interior of a
+managed box stored in a mutable location, the compiler inserts a
+temporary that ensures that the managed box remains live for the
+entire lifetime. So, the above example would be compiled as if it were
+written
~~~
# struct X { f: int }
The previous example demonstrated *rooting*, the process by which the
compiler ensures that managed boxes remain live for the duration of a
-borrow. Unfortunately, rooting does not work if the data being
-borrowed is a unique box, as it is not possible to have two references
-to a unique box.
+borrow. Unfortunately, rooting does not work for borrows of unique
+boxes, because it is not possible to have two references to a unique
+box.
For unique boxes, therefore, the compiler will only allow a borrow *if
the compiler can guarantee that the unique box will not be reassigned
~~~
Here, as before, the interior of the variable `x` is being borrowed
-and `x` is declared as mutable. However, the compiler can clearly see
-that `x` is not assigned anywhere in the lifetime L of the variable
+and `x` is declared as mutable. However, the compiler can prove that
+`x` is not assigned anywhere in the lifetime L of the variable
`y`. Therefore, it accepts the function, even though `x` is mutable
and in fact is mutated later in the function.
-It may not be clear why we are so concerned about the variable which
-was borrowed being mutated. The reason is that unique boxes are freed
-_as soon as their owning reference is changed or goes out of
+It may not be clear why we are so concerned about mutating a borrowed
+variable. The reason is that the runtime system frees any unique box
+_as soon as its owning reference changes or goes out of
scope_. Therefore, a program like this is illegal (and would be
rejected by the compiler):
Here you can see that the variable `y` still points at the old box,
which has been freed.
-In fact, the compiler can apply this same kind of reasoning can be
-applied to any memory which is _(uniquely) owned by the stack
-frame_. So we could modify the previous example to introduce
-additional unique pointers and structs, and the compiler will still be
-able to detect possible mutations:
+In fact, the compiler can apply the same kind of reasoning to any
+memory that is _(uniquely) owned by the stack frame_. So we could
+modify the previous example to introduce additional unique pointers
+and structs, and the compiler will still be able to detect possible
+mutations:
~~~ {.xfail-test}
fn example3() -> int {
In this case, two errors are reported, one when the variable `x` is
modified and another when `x.f` is modified. Either modification would
-cause the pointer `y` to be invalidated.
+invalidate the pointer `y`.
-Things get tricker when the unique box is not uniquely owned by the
-stack frame (or when the compiler doesn’t know who the owner
-is). Consider a program like this:
+Things get trickier when the unique box is not uniquely owned by the
+stack frame, or when there is no way for the compiler to determine the
+box's owner. Consider a program like this:
~~~
struct R { g: int }
+------+
~~~
-In this case, the owning reference to the value being borrowed is in
-fact `x.f`. Moreover, `x.f` is both mutable and aliasable. Aliasable
-means that it is possible that there are other pointers to that same
-managed box, so even if the compiler were to prevent `x.f` from being
-mutated, the field might still be changed through some alias of
-`x`. Therefore, to be safe, the compiler only accepts pure actions
-during the lifetime of `y`. We’ll have a final example on purity but
-inn unique fields, as in the following example:
+In this case, the owning reference to the value being borrowed is
+`x.f`. Moreover, `x.f` is both mutable and *aliasable*. Aliasable
+means that there may be other pointers to that same managed box, so
+even if the compiler were to prove an absence of mutations to `x.f`,
+code could mutate `x.f` indirectly by changing an alias of
+`x`. Therefore, to be safe, the compiler only accepts *pure* actions
+during the lifetime of `y`. We define what "pure" means in the section
+on [purity](#purity).
Besides ensuring purity, the only way to borrow the interior of a
-unique found in aliasable memory is to ensure that it is stored within
-unique fields, as in the following example:
+unique found in aliasable memory is to ensure that the borrowed field
+itself is also unique, as in the following example:
~~~
struct R { g: int }
cannot be changed and hence the unique box `g` will remain valid.
If you do have a unique box in a mutable field, and you wish to borrow
-it, one option is to use the swap operator to bring that unique box
+it, one option is to use the swap operator to move that unique box
onto your stack:
~~~
Of course, this has the side effect of modifying your managed box for
the duration of the borrow, so it only works when you know that you
-won’t be accessing that same box for the duration of the loan. Note
-also that sometimes it is necessary to introduce additional blocks to
-constrain the scope of the loan. In this example, the borrowed
-pointer `y` would still be in scope when you moved the value `v` back
-into `x.f`, and hence moving `v` would be considered illegal. You
-cannot move values if they are outstanding loans which are still
-valid. By introducing the block, the scope of `y` is restricted and so
-the move is legal.
+won't be accessing that same box for the duration of the loan. Also,
+it is sometimes necessary to introduce additional blocks to constrain
+the scope of the loan. In this example, the borrowed pointer `y`
+would still be in scope when you moved the value `v` back into `x.f`,
+and hence moving `v` would be considered illegal. You cannot move
+values if they are the targets of valid outstanding loans. Introducing
+the block restricts the scope of `y`, making the move legal.
# Borrowing and enums
-The previous example showed that borrowing unique boxes found in
-aliasable, mutable memory is not permitted, so as to prevent pointers
-into freed memory. There is one other case where the compiler must be
-very careful to ensure that pointers remain valid: pointers into the
-interior of an enum.
+The previous example showed that the type system forbids any borrowing
+of unique boxes found in aliasable, mutable memory. This restriction
+prevents pointers from pointing into freed memory. There is one other
+case where the compiler must be very careful to ensure that pointers
+remain valid: pointers into the interior of an `enum`.
As an example, let’s look at the following `shape` type that can
represent both rectangles and circles:
}
~~~
-Now I might write a function to compute the area of a shape. This
-function takes a borrowed pointer to a shape to avoid the need of
-copying them.
+Now we might write a function to compute the area of a shape. This
+function takes a borrowed pointer to a shape, to avoid the need for
+copying.
~~~
# struct Point {x: float, y: float}; // as before
}
~~~
-The first case matches against circles. Here the radius is extracted
-from the shape variant and used to compute the area of the circle
-(Like any up-to-date engineer, we use the [tau circle constant][tau]
-and not that dreadfully outdated notion of pi).
+The first case matches against circles. Here, the pattern extracts the
+radius from the shape variant and the action uses it to compute the
+area of the circle. (Like any up-to-date engineer, we use the [tau
+circle constant][tau] and not that dreadfully outdated notion of pi).
[tau]: http://www.math.utah.edu/~palais/pi.html
The second match is more interesting. Here we match against a
-rectangle and extract its size: but rather than copy the `size` struct,
-we use a by-reference binding to create a pointer to it. In other
-words, a pattern binding like `ref size` in fact creates a pointer of
-type `&size` into the _interior of the enum_.
+rectangle and extract its size: but rather than copy the `size`
+struct, we use a by-reference binding to create a pointer to it. In
+other words, a pattern binding like `ref size` binds the name `size`
+to a pointer of type `&size` into the _interior of the enum_.
-To make this more clear, let’s look at a diagram of how things are
-laid out in memory in the case where `shape` points at a rectangle:
+To make this more clear, let's look at a diagram of memory layout in
+the case where `shape` points at a rectangle:
~~~ {.notrust}
Stack Memory
Perhaps you can see where the danger lies: if the shape were somehow
to be reassigned, perhaps to a circle, then although the memory used
to store that shape value would still be valid, _it would have a
-different type_! This is shown in the following diagram, depicting what
-the state of memory would be if shape were overwritten with a circle:
+different type_! The following diagram shows what memory would look
+like if code overwrote `shape` with a circle:
~~~ {.notrust}
Stack Memory
+---------------+
~~~
-As you can see, the `size` pointer would not be pointing at a `float` and
-not a struct. This is not good.
+As you can see, the `size` pointer would be pointing at a `float`
+instead of a struct. This is not good: dereferencing the second field
+of a `float` as if it were a struct with two fields would be a memory
+safety violation.
So, in fact, for every `ref` binding, the compiler will impose the
same rules as the ones we saw for borrowing the interior of a unique
-box: it must be able to guarantee that the enum will not be
-overwritten for the duration of the borrow. In fact, the example I
-gave earlier would be considered safe. This is because the shape
-pointer has type `&Shape`, which means “borrowed pointer to immutable
-memory containing a shape”. If however the type of that pointer were
-`&const Shape` or `&mut Shape`, then the ref binding would not be
-permitted. Just as with unique boxes, the compiler will permit ref
-bindings into data owned by the stack frame even if it is mutable, but
-otherwise it requires that the data reside in immutable memory.
+box: it must be able to guarantee that the `enum` will not be
+overwritten for the duration of the borrow. In fact, the compiler
+would accept the example we gave earlier. The example is safe because
+the shape pointer has type `&Shape`, which means "borrowed pointer to
+immutable memory containing a `shape`". If, however, the type of that
+pointer were `&const Shape` or `&mut Shape`, then the ref binding
+would be ill-typed. Just as with unique boxes, the compiler will
+permit `ref` bindings into data owned by the stack frame even if the
+data are mutable, but otherwise it requires that the data reside in
+immutable memory.
> ***Note:*** Right now, pattern bindings not explicitly annotated
> with `ref` or `copy` use a special mode of "implicit by reference".
# Returning borrowed pointers
-So far, all of the examples we’ve looked at use borrowed pointers in a
-“downward” direction. That is, the borrowed pointer is created and
-then used during the method or code block which created it. It is also
-possible to return borrowed pointers to the caller, but as we'll see
-this requires some explicit annotation.
+So far, all of the examples we've looked at use borrowed pointers in a
+“downward” direction. That is, a method or code block creates a
+borrowed pointer, then uses it within the same scope. It is also
+possible to return borrowed pointers as the result of a function, but
+as we'll see, doing so requires some explicit annotation.
For example, we could write a subroutine like this:
fn get_x(p: &r/Point) -> &r/float { &p.x }
~~~
-Here, the function `get_x()` returns a pointer into the structure it was
-given. The type of the parameter (`&r/Point`) and return type (`&r/float`) both
-make use of a new syntactic form that we have not seen so far. Here the identifier `r`
-serves as an explicit name for the lifetime of the pointer. So in effect
-this function is declaring that it takes in a pointer with lifetime `r` and returns
-a pointer with that same lifetime.
+Here, the function `get_x()` returns a pointer into the structure it
+was given. The type of the parameter (`&r/Point`) and return type
+(`&r/float`) both use a new syntactic form that we have not seen so
+far. Here the identifier `r` names the lifetime of the pointer
+explicitly. So in effect, this function declares that it takes a
+pointer with lifetime `r` and returns a pointer with that same
+lifetime.
In general, it is only possible to return borrowed pointers if they
-are derived from a borrowed pointer which was given as input to the
-procedure. In that case, they will always have the same lifetime as
-one of the parameters; named lifetimes are used to indicate which
-parameter that is.
+are derived from a parameter to the procedure. In that case, the
+pointer result will always have the same lifetime as one of the
+parameters; named lifetimes indicate which parameter that
+is.
-In the examples before, function parameter types did not include a
-lifetime name. In this case, the compiler simply creates a new,
-anonymous name, meaning that the parameter is assumed to have a
-distinct lifetime from all other parameters.
+In the previous examples, function parameter types did not include a
+lifetime name. In those examples, the compiler simply creates a fresh
+name for the lifetime automatically: that is, the lifetime name is
+guaranteed to refer to a distinct lifetime from the lifetimes of all
+other parameters.
Named lifetimes that appear in function signatures are conceptually
the same as the other lifetimes we've seen before, but they are a bit
by the caller to `get_x()`, just as the value for the parameter `p` is
defined by that caller.
-In any case, whatever the lifetime `r` is, the pointer produced by
-`&p.x` always has the same lifetime as `p` itself, as a pointer to a
+In any case, whatever the lifetime of `r` is, the pointer produced by
+`&p.x` always has the same lifetime as `p` itself: a pointer to a
field of a struct is valid as long as the struct is valid. Therefore,
-the compiler is satisfied with the function `get_x()`.
+the compiler accepts the function `get_x()`.
-To drill in this point, let’s look at a variation on the example, this
-time one which does not compile:
+To emphasize this point, let’s look at a variation on the example, this
+time one that does not compile:
~~~ {.xfail-test}
struct Point {x: float, y: float}
Here, the function `get_x_sh()` takes a managed box as input and
returns a borrowed pointer. As before, the lifetime of the borrowed
pointer that will be returned is a parameter (specified by the
-caller). That means that effectively `get_x_sh()` is promising to
-return a borrowed pointer that is valid for as long as the caller
-would like: this is subtly different from the first example, which
-promised to return a pointer that was valid for as long as the pointer
-it was given.
+caller). That means that `get_x_sh()` promises to return a borrowed
+pointer that is valid for as long as the caller would like: this is
+subtly different from the first example, which promised to return a
+pointer that was valid for as long as its pointer argument was valid.
Within `get_x_sh()`, we see the expression `&p.x` which takes the
-address of a field of a managed box. This implies that the compiler
-must guarantee that, so long as the resulting pointer is valid, the
-managed box will not be reclaimed by the garbage collector. But recall
-that `get_x_sh()` also promised to return a pointer that was valid for
-as long as the caller wanted it to be. Clearly, `get_x_sh()` is not in
-a position to make both of these guarantees; in fact, it cannot
-guarantee that the pointer will remain valid at all once it returns,
-as the parameter `p` may or may not be live in the caller. Therefore,
-the compiler will report an error here.
+address of a field of a managed box. The presence of this expression
+implies that the compiler must guarantee that, so long as the
+resulting pointer is valid, the managed box will not be reclaimed by
+the garbage collector. But recall that `get_x_sh()` also promised to
+return a pointer that was valid for as long as the caller wanted it to
+be. Clearly, `get_x_sh()` is not in a position to make both of these
+guarantees; in fact, it cannot guarantee that the pointer will remain
+valid at all once it returns, as the parameter `p` may or may not be
+live in the caller. Therefore, the compiler will report an error here.
In general, if you borrow a managed (or unique) box to create a
borrowed pointer, the pointer will only be valid within the function
# Named lifetimes
-Let's look at named lifetimes in more detail. In effect, the use of
-named lifetimes allows you to group parameters by lifetime. For
-example, consider this function:
+Let's look at named lifetimes in more detail. Named lifetimes allow
+for grouping of parameters by lifetime. For example, consider this
+function:
~~~
# struct Point {x: float, y: float}; // as before
}
~~~
-Here you can see the lifetime of shape is now being called `tmp`. The
-parameters `a`, `b`, and the return value are all given the lifetime
-`r`. However, since the lifetime `tmp` is not returned, it would be shorter
-to just omit the named lifetime for `shape` altogether:
+Here you can see that `shape`'s lifetime is now named `tmp`. The
+parameters `a`, `b`, and the return value all have the lifetime `r`.
+However, since the lifetime `tmp` is not returned, it would be more
+concise to just omit the named lifetime for `shape` altogether:
~~~
# struct Point {x: float, y: float}; // as before
# Purity
As mentioned before, the Rust compiler offers a kind of escape hatch
-that permits borrowing of any data, but only if the actions that occur
+that permits borrowing of any data, as long as the actions that occur
during the lifetime of the borrow are pure. Pure actions are those
-which only modify data owned by the current stack frame. The compiler
+that only modify data owned by the current stack frame. The compiler
can therefore permit arbitrary pointers into the heap, secure in the
knowledge that no pure action will ever cause them to become
invalidated (the compiler must still track data on the stack which is
-borrowed and enforce those rules normally, of course).
-
-Let’s revisit a previous example and show how purity can affect the
-compiler’s result. Here is `example5a()`, which borrows the interior of
-a unique box found in an aliasable, mutable location, only now we’ve
+borrowed and enforce those rules normally, of course). A pure function
+in Rust is referentially transparent: it returns the same results
+given the same (observably equivalent) inputs. That is because while
+pure functions are allowed to modify data, they may only modify
+*stack-local* data, which cannot be observed outside the scope of the
+function itself. (Using an `unsafe` block invalidates this guarantee.)
+
+Let’s revisit a previous example and show how purity can affect
+typechecking. Here is `example5a()`, which borrows the interior of a
+unique box found in an aliasable, mutable location, only now we’ve
replaced the `...` with some specific code:
~~~
}
~~~
-The new code simply returns an incremented version of `y`. This clearly
-doesn’t do mutate anything in the heap, so the compiler is satisfied.
+The new code simply returns an incremented version of `y`. This code
+clearly doesn't mutate the heap, so the compiler is satisfied.
But suppose we wanted to pull the increment code into a helper, like
this:
~~~
But now the compiler will report an error again. The reason is that it
-only considers one function at a time (like most type checkers), and
-so it does not know that `add_one()` only takes pure actions. We can
+only considers one function at a time (like most typecheckers), and
+so it does not know that `add_one()` consists of pure code. We can
help the compiler by labeling `add_one()` as pure:
~~~
# Conclusion
-So there you have it. A (relatively) brief tour of borrowed pointer
-system. For more details, I refer to the (yet to be written) reference
+So there you have it: a (relatively) brief tour of the borrowed pointer
+system. For more details, we refer to the (yet to be written) reference
document on borrowed pointers, which will explain the full notation
and give more examples.
# Introduction
-One of Rust's aims, as a system programming language, is to
+Because Rust is a systems programming language, one of its goals is to
interoperate well with C code.
-We'll start with an example. It's a bit bigger than usual, and
-contains a number of new concepts. We'll go over it one piece at a
-time.
-
-This is a program that uses OpenSSL's `SHA1` function to compute the
-hash of its first command-line argument, which it then converts to a
-hexadecimal string and prints to standard output. If you have the
-OpenSSL libraries installed, it should 'just work'.
+We'll start with an example, which is a bit bigger than usual. We'll
+go over it one piece at a time. This is a program that uses OpenSSL's
+`SHA1` function to compute the hash of its first command-line
+argument, which it then converts to a hexadecimal string and prints to
+standard output. If you have the OpenSSL libraries installed, it
+should compile and run without any extra effort.
~~~~ {.xfail-test}
extern mod std;
let bytes = str::to_bytes(data);
let hash = crypto::SHA1(vec::raw::to_ptr(bytes),
vec::len(bytes) as c_uint, ptr::null());
- return as_hex(vec::raw::from_buf(hash, 20u));
+ return as_hex(vec::from_buf(hash, 20));
}
fn main(args: ~[~str]) {
# Foreign modules
-Before we can call `SHA1`, we have to declare it. That is what this
-part of the program is responsible for:
+Before we can call the `SHA1` function defined in the OpenSSL library, we have
+to declare it. That is what this part of the program does:
~~~~ {.xfail-test}
extern mod crypto {
- fn SHA1(src: *u8, sz: uint, out: *u8) -> *u8;
-}
+ fn SHA1(src: *u8, sz: uint, out: *u8) -> *u8; }
~~~~
-An `extern` module declaration containing function signatures introduces
-the functions listed as _foreign functions_, that are implemented in some
-other language (usually C) and accessed through Rust's foreign function
-interface (FFI). An extern module like this is called a foreign module, and
-implicitly tells the compiler to link with a library with the same name as
-the module, and that it will find the foreign functions in that library.
+An `extern` module declaration containing function signatures introduces the
+functions listed as _foreign functions_. Foreign functions differ from regular
+Rust functions in that they are implemented in some other language (usually C)
+and called through Rust's foreign function interface (FFI). An extern module
+like this is called a foreign module, and implicitly tells the compiler to
+link with a library that contains the listed foreign functions, and has the
+same name as the module.
-In this case, it'll change the name `crypto` to a shared library name
-in a platform-specific way (`libcrypto.so` on Linux, for example), and
-link that in. If you want the module to have a different name from the
-actual library, you can use the `"link_name"` attribute, like:
+In this case, the Rust compiler changes the name `crypto` to a shared library
+name in a platform-specific way (`libcrypto.so` on Linux, for example),
+searches for the shared library with that name, and links the library into the
+program. If you want the module to have a different name from the actual
+library, you can use the `"link_name"` attribute, like:
~~~~ {.xfail-test}
#[link_name = "crypto"]
# Foreign calling conventions
-Most foreign code will be C code, which usually uses the `cdecl` calling
+Most foreign code is C code, which usually uses the `cdecl` calling
convention, so that is what Rust uses by default when calling foreign
functions. Some foreign functions, most notably the Windows API, use other
-calling conventions, so Rust provides a way to hint to the compiler which
-is expected by using the `"abi"` attribute:
+calling conventions. Rust provides the `"abi"` attribute as a way to hint to
+the compiler which calling convention to use:
~~~~
#[cfg(target_os = "win32")]
}
~~~~
-The `"abi"` attribute applies to a foreign module (it can not be applied
+The `"abi"` attribute applies to a foreign module (it cannot be applied
to a single function within a module), and must be either `"cdecl"`
-or `"stdcall"`. Other conventions may be defined in the future.
+or `"stdcall"`. We may extend the compiler in the future to support other
+calling conventions.
# Unsafe pointers
-The foreign `SHA1` function is declared to take three arguments, and
-return a pointer.
+The foreign `SHA1` function takes three arguments, and returns a pointer.
~~~~ {.xfail-test}
# extern mod crypto {
When declaring the argument types to a foreign function, the Rust
compiler has no way to check whether your declaration is correct, so
you have to be careful. If you get the number or types of the
-arguments wrong, you're likely to get a segmentation fault. Or,
+arguments wrong, you're likely to cause a segmentation fault. Or,
probably even worse, your code will work on one platform, but break on
another.
-In this case, `SHA1` is defined as taking two `unsigned char*`
-arguments and one `unsigned long`. The rust equivalents are `*u8`
+In this case, we declare that `SHA1` takes two `unsigned char*`
+arguments and one `unsigned long`. The Rust equivalents are `*u8`
unsafe pointers and an `uint` (which, like `unsigned long`, is a
machine-word-sized type).
-Unsafe pointers can be created through various functions in the
-standard lib, usually with `unsafe` somewhere in their name. You can
-dereference an unsafe pointer with `*` operator, but use
-caution—unlike Rust's other pointer types, unsafe pointers are
-completely unmanaged, so they might point at invalid memory, or be
-null pointers.
+The standard library provides various functions to create unsafe pointers,
+such as those in `core::cast`. Most of these functions have `unsafe` in their
+name. You can dereference an unsafe pointer with the `*` operator, but use
+caution: unlike Rust's other pointer types, unsafe pointers are completely
+unmanaged, so they might point at invalid memory, or be null pointers.
# Unsafe blocks
let bytes = str::to_bytes(data);
let hash = crypto::SHA1(vec::raw::to_ptr(bytes),
vec::len(bytes), ptr::null());
- return as_hex(vec::raw::from_buf(hash, 20u));
+ return as_hex(vec::from_buf(hash, 20));
}
}
~~~~
-Firstly, what does the `unsafe` keyword at the top of the function
+First, what does the `unsafe` keyword at the top of the function
mean? `unsafe` is a block modifier—it declares the block following it
to be known to be unsafe.
unsafe fn kaboom() { ~"I'm harmless!"; }
~~~~
-This function can only be called from an unsafe block or another
-unsafe function.
+This function can only be called from an `unsafe` block or another
+`unsafe` function.
# Pointer fiddling
let bytes = str::to_bytes(data);
let hash = crypto::SHA1(vec::raw::to_ptr(bytes),
vec::len(bytes), ptr::null());
-return as_hex(vec::raw::from_buf(hash, 20u));
+return as_hex(vec::from_buf(hash, 20));
# }
# }
~~~~
-The `str::to_bytes` function is perfectly safe: it converts a string to
-a `[u8]`. This byte array is then fed to `vec::raw::to_ptr`, which
+The `str::to_bytes` function is perfectly safe: it converts a string to a
+`~[u8]`. The program then feeds this byte array to `vec::raw::to_ptr`, which
returns an unsafe pointer to its contents.
-This pointer will become invalid as soon as the vector it points into
-is cleaned up, so you should be very careful how you use it. In this
-case, the local variable `bytes` outlives the pointer, so we're good.
+This pointer will become invalid at the end of the scope in which the vector
+it points to (`bytes`) is valid, so you should be very careful how you use
+it. In this case, the local variable `bytes` outlives the pointer, so we're
+good.
Passing a null pointer as the third argument to `SHA1` makes it use a
static buffer, and thus save us the effort of allocating memory
-ourselves. `ptr::null` is a generic function that will return an
-unsafe null pointer of the correct type (Rust generics are awesome
-like that—they can take the right form depending on the type that they
-are expected to return).
-
-Finally, `vec::raw::from_buf` builds up a new `[u8]` from the
-unsafe pointer that was returned by `SHA1`. SHA1 digests are always
-twenty bytes long, so we can pass `20u` for the length of the new
+ourselves. `ptr::null` is a generic function that, in this case, returns an
+unsafe null pointer of type `*u8`. (Rust generics are awesome
+like that: they can take the right form depending on the type that they
+are expected to return.)
+
+Finally, `vec::from_buf` builds up a new `~[u8]` from the
+unsafe pointer that `SHA1` returned. SHA1 digests are always
+twenty bytes long, so we can pass `20` for the length of the new
vector.
# Passing structures
C functions often take pointers to structs as arguments. Since Rust
-structs are binary-compatible with C structs, Rust programs can call
+`struct`s are binary-compatible with C structs, Rust programs can call
such functions directly.
This program uses the POSIX function `gettimeofday` to get a
The `#[nolink]` attribute indicates that there's no foreign library to
link in. The standard C library is already linked with Rust programs.
-A `timeval`, in C, is a struct with two 32-bit integers. Thus, we
-define a struct type with the same contents, and declare
-`gettimeofday` to take a pointer to such a struct.
+In C, a `timeval` is a struct with two 32-bit integer fields. Thus, we
+define a `struct` type with the same contents, and declare
+`gettimeofday` to take a pointer to such a `struct`.
-The second argument to `gettimeofday` (the time zone) is not used by
-this program, so it simply declares it to be a pointer to the nil
-type. Since all null pointers have the same representation regardless of
-their referent type, this is safe.
+This program does not use the second argument to `gettimeofday` (the time
+ zone), so the `extern mod` declaration for it simply declares this argument
+ to be a pointer to the unit type (written `()`). Since all null pointers have
+ the same representation regardless of their referent type, this is safe.
# Introduction
-Functions are the programmer's primary tool of abstraction, but there are
-cases in which they are insufficient, because the programmer wants to
-abstract over concepts not represented as values. Consider the following
-example:
+Functions are the primary tool that programmers can use to build abstractions.
+Sometimes, however, programmers want to perform abstractions over things that are not
+runtime values. Macros provide a syntactic abstraction. For an example of how this
+can be useful, consider the following two code fragments, which both pattern-match
+on their input and return early in one case, and do nothing otherwise:
~~~~
# enum t { special_a(uint), special_b(uint) };
# }
~~~~
-This code could become tiresome if repeated many times. However, there is
-no reasonable function that could be written to solve this problem. In such a
-case, it's possible to define a macro to solve the problem. Macros are
+This code could become tiresome if repeated many times. However, there is no
+straightforward way to rewrite it without the repeated code, using functions
+alone. There is a solution, though: defining a macro to solve the problem. Macros are
lightweight custom syntax extensions, themselves defined using the
-`macro_rules!` syntax extension:
+`macro_rules!` syntax extension. The following `early_return` macro captures
+the pattern in the above code:
~~~~
# enum t { special_a(uint), special_b(uint) };
# }
~~~~
-Macros are defined in pattern-matching style:
+Macros are defined in pattern-matching style: in the above example, the text
+`($inp:expr $sp:ident)` that appears on the left-hand side of the `=>` is the
+*macro invocation syntax*, a pattern denoting how to write a call to the
+macro. The text on the right-hand side of the `=>`, beginning with `match
+$inp`, is the *macro transcription syntax*: what the macro expands to.
# Invocation syntax
-On the left-hand-side of the `=>` is the macro invocation syntax. It is
-free-form, excepting the following rules:
+The macro invocation syntax specifies the syntax for the arguments to the
+macro. It appears on the left-hand side of the `=>` in a macro definition. It
+conforms to the following rules:
-1. It must be surrounded in parentheses.
+1. It must be surrounded by parentheses.
2. `$` has special meaning.
3. The `()`s, `[]`s, and `{}`s it contains must balance. For example, `([)` is
forbidden.
+Otherwise, the invocation syntax is free-form.
+
To take as an argument a fragment of Rust code, write `$` followed by a name
- (for use on the right-hand side), followed by a `:`, followed by the sort of
-fragment to match (the most common ones are `ident`, `expr`, `ty`, `pat`, and
-`block`). Anything not preceded by a `$` is taken literally. The standard
+ (for use on the right-hand side), followed by a `:`, followed by a *fragment
+ specifier*. The fragment specifier denotes the sort of fragment to match. The
+ most common fragment specifiers are:
+
+* `ident` (an identifier, referring to a variable or item. Examples: `f`, `x`,
+ `foo`.)
+* `expr` (an expression. Examples: `2 + 2`; `if true then { 1 } else { 2 }`;
+ `f(42)`.)
+* `ty` (a type. Examples: `int`, `~[(char, ~str)]`, `&T`.)
+* `pat` (a pattern, usually appearing in a `match` or on the left-hand side of
+ a declaration. Examples: `Some(t)`; `(17, 'a')`; `_`.)
+* `block` (a sequence of actions. Example: `{ log(error, "hi"); return 12; }`)
+
+The parser interprets any token that's not preceded by a `$` literally. Rust's usual
rules of tokenization apply,
-So `($x:ident => (($e:expr)))`, though excessively fancy, would create a macro
-that could be invoked like `my_macro!(i=>(( 2+2 )))`.
+So `($x:ident -> (($e:expr)))`, though excessively fancy, would designate a macro
+that could be invoked like: `my_macro!(i->(( 2+2 )))`.
# Transcription syntax
The right-hand side of the `=>` follows the same rules as the left-hand side,
-except that `$` need only be followed by the name of the syntactic fragment
-to transcribe.
+except that a `$` need only be followed by the name of the syntactic fragment
+to transcribe into the macro expansion; its type need not be repeated.
-The right-hand side must be surrounded by delimiters of some kind, and must be
-an expression; currently, user-defined macros can only be invoked in
-expression position (even though `macro_rules!` itself can be in item
-position).
+The right-hand side must be enclosed by delimiters, and must be
+an expression. Currently, invocations of user-defined macros can only appear in a context
+where the Rust grammar requires an expression, even though `macro_rules!` itself can appear
+in a context where the grammar requires an item.
# Multiplicity
## Invocation
-Going back to the motivating example, suppose that we wanted each invocation
-of `early_return` to potentially accept multiple "special" identifiers. The
-syntax `$(...)*` accepts zero or more occurrences of its contents, much like
-the Kleene star operator in regular expressions. It also supports a separator
-token (a comma-separated list could be written `$(...),*`), and `+` instead of
-`*` to mean "at least one".
+Going back to the motivating example, recall that `early_return` expanded into
+a `match` that would `return` if the `match`'s scrutinee matched the
+"special case" identifier provided as the second argument to `early_return`,
+and do nothing otherwise. Now suppose that we wanted to write a
+version of `early_return` that could handle a variable number of "special"
+cases.
+
+The syntax `$(...)*` on the left-hand side of the `=>` in a macro definition
+accepts zero or more occurrences of its contents. It works much
+like the `*` operator in regular expressions. It also supports a
+separator token (a comma-separated list could be written `$(...),*`), and `+`
+instead of `*` to mean "at least one".
~~~~
# enum t { special_a(uint),special_b(uint),special_c(uint),special_d(uint)};
### Transcription
As the above example demonstrates, `$(...)*` is also valid on the right-hand
-side of a macro definition. The behavior of Kleene star in transcription,
-especially in cases where multiple stars are nested, and multiple different
+side of a macro definition. The behavior of `*` in transcription,
+especially in cases where multiple `*`s are nested, and multiple different
names are involved, can seem somewhat magical and intuitive at first. The
system that interprets them is called "Macro By Example". The two rules to
keep in mind are (1) the behavior of `$(...)*` is to walk through one "layer"
of repetitions for all of the `$name`s it contains in lockstep, and (2) each
`$name` must be under at least as many `$(...)*`s as it was matched against.
-If it is under more, it'll will be repeated, as appropriate.
+If it is under more, it'll be repeated, as appropriate.
## Parsing limitations
-The parser used by the macro system is reasonably powerful, but the parsing of
-Rust syntax is restricted in two ways:
-1. The parser will always parse as much as possible. For example, if the comma
-were omitted from the syntax of `early_return!` above, `input_1 [` would've
-been interpreted as the beginning of an array index. In fact, invoking the
-macro would have been impossible.
-2. The parser must have eliminated all ambiguity by the time it reaches a
-`$name:fragment_specifier`. This most often affects them when they occur in
-the beginning of, or immediately after, a `$(...)*`; requiring a distinctive
-token in front can solve the problem.
+For technical reasons, there are two limitations to the treatment of syntax
+fragments by the macro parser:
+
+1. The parser will always parse as much as possible of a Rust syntactic
+fragment. For example, if the comma were omitted from the syntax of
+`early_return!` above, `input_1 [` would've been interpreted as the beginning
+of an array index. In fact, invoking the macro would have been impossible.
+2. The parser must have eliminated all ambiguity by the time it reaches a
+`$name:fragment_specifier` declaration. This limitation can result in parse
+errors when declarations occur at the beginning of, or immediately after,
+a `$(...)*`. For example, the grammar `$($t:ty)* $e:expr` will always fail to
+parse because the parser would be forced to choose between parsing `t` and
+parsing `e`. Changing the invocation syntax to require a distinctive token in
+front can solve the problem. In the above example, `$(T $t:ty)* E $e:exp`
+solves the problem.
## A final note
Macros, as currently implemented, are not for the faint of heart. Even
-ordinary syntax errors can be more difficult to debug when they occur inside
-a macro, and errors caused by parse problems in generated code can be very
+ordinary syntax errors can be more difficult to debug when they occur inside a
+macro, and errors caused by parse problems in generated code can be very
tricky. Invoking the `log_syntax!` macro can help elucidate intermediate
-states, using `trace_macros!(true)` will automatically print those
-intermediate states out, and using `--pretty expanded` as an argument to the
-compiler will show the result of expansion.
-
-
+states, invoking `trace_macros!(true)` will automatically print those
+intermediate states out, and passing the flag `--pretty expanded` as a
+command-line argument to the compiler will show the result of expansion.
# Introduction
-The Rust language is designed from the ground up to support pervasive
+The designers of Rust designed the language from the ground up to support pervasive
and safe concurrency through lightweight, memory-isolated tasks and
message passing.
-Rust tasks are not the same as traditional threads - they are what are
-often referred to as _green threads_, cooperatively scheduled by the
-Rust runtime onto a small number of operating system threads. Being
-significantly cheaper to create than traditional threads, Rust can
-create hundreds of thousands of concurrent tasks on a typical 32-bit
-system.
-
-Tasks provide failure isolation and recovery. When an exception occurs
-in rust code (either by calling `fail` explicitly or by otherwise performing
-an invalid operation) the entire task is destroyed - there is no way
-to `catch` an exception as in other languages. Instead tasks may monitor
-each other to detect when failure has occurred.
-
-Rust tasks have dynamically sized stacks. When a task is first created
-it starts off with a small amount of stack (currently in the low
-thousands of bytes, depending on platform) and more stack is acquired as
-needed. A Rust task will never run off the end of the stack as is
-possible in many other languages, but they do have a stack budget, and
-if a Rust task exceeds its stack budget then it will fail safely.
-
-Tasks make use of Rust's type system to provide strong memory safety
-guarantees, disallowing shared mutable state. Communication between
-tasks is facilitated by the transfer of _owned_ data through the
-global _exchange heap_.
-
-This tutorial will explain the basics of tasks and communication in Rust,
-explore some typical patterns in concurrent Rust code, and finally
-discuss some of the more exotic synchronization types in the standard
+Rust tasks are not the same as traditional threads: rather, they are more like
+_green threads_. The Rust runtime system schedules tasks cooperatively onto a
+small number of operating system threads. Because tasks are significantly
+cheaper to create than traditional threads, Rust can create hundreds of
+thousands of concurrent tasks on a typical 32-bit system.
+
+Tasks provide failure isolation and recovery. When an exception occurs in Rust
+code (as a result of an explicit call to `fail`, an assertion failure, or
+another invalid operation), the runtime system destroys the entire
+task. Unlike in languages such as Java and C++, there is no way to `catch` an
+exception. Instead, tasks may monitor each other for failure.
+
+Rust tasks have dynamically sized stacks. A task begins its life with a small
+amount of stack space (currently in the low thousands of bytes, depending on
+platform), and acquires more stack as needed. Unlike in languages such as C, a
+Rust task cannot run off the end of the stack. However, tasks do have a stack
+budget. If a Rust task exceeds its stack budget, then it will fail safely:
+with a checked exception.
+
+Tasks use Rust's type system to provide strong memory safety guarantees. In
+particular, the type system guarantees that tasks cannot share mutable state
+with each other. Tasks communicate with each other by transferring _owned_
+data through the global _exchange heap_.
+
+This tutorial explains the basics of tasks and communication in Rust,
+explores some typical patterns in concurrent Rust code, and finally
+discusses some of the more unusual synchronization types in the standard
library.
+> ***Warning:*** This tutorial is incomplete
+
## A note about the libraries
While Rust's type system provides the building blocks needed for safe
in the core and standard libraries, which are still under development
and do not always present a consistent interface.
-In particular, there are currently two independent modules that provide
-a message passing interface to Rust code: `core::comm` and `core::pipes`.
-`core::comm` is an older, less efficient system that is being phased out
-in favor of `pipes`. At some point the existing `core::comm` API will
-be removed and the user-facing portions of `core::pipes` will be moved
-to `core::comm`. In this tutorial we will discuss `pipes` and ignore
-the `comm` API.
+In particular, there are currently two independent modules that provide a
+message passing interface to Rust code: `core::comm` and `core::pipes`.
+`core::comm` is an older, less efficient system that is being phased out in
+favor of `pipes`. At some point, we will remove the existing `core::comm` API
+and move the user-facing portions of `core::pipes` to `core::comm`. In this
+tutorial, we discuss `pipes` and ignore the `comm` API.
For your reference, these are the standard modules involved in Rust
-concurrency at the moment.
+concurrency at this writing.
* [`core::task`] - All code relating to tasks and task scheduling
* [`core::comm`] - The deprecated message passing API
* [`core::pipes`] - The new message passing infrastructure and API
* [`std::comm`] - Higher level messaging types based on `core::pipes`
* [`std::sync`] - More exotic synchronization tools, including locks
-* [`std::arc`] - The ARC type, for safely sharing immutable data
+* [`std::arc`] - The ARC (atomic reference counted) type, for safely sharing
+ immutable data
* [`std::par`] - Some basic tools for implementing parallel algorithms
[`core::task`]: core/task.html
# Basics
-The programming interface for creating and managing tasks is contained
-in the `task` module of the `core` library, making it available to all
-Rust code by default. At it's simplest, creating a task is a matter of
-calling the `spawn` function, passing a closure to run in the new
-task.
+The programming interface for creating and managing tasks lives
+in the `task` module of the `core` library, and is thus available to all
+Rust code by default. At its simplest, creating a task is a matter of
+calling the `spawn` function with a closure argument. `spawn` executes the
+closure in the new task.
~~~~
# use io::println;
}
~~~~
-In Rust, there is nothing special about creating tasks - the language
-itself doesn't know what a 'task' is. Instead, Rust provides in the
-type system all the tools necessary to implement safe concurrency,
-_owned types_ in particular, and leaves the dirty work up to the
-core library.
+In Rust, there is nothing special about creating tasks: a task is not a
+concept that appears in the language semantics. Instead, Rust's type system
+provides all the tools necessary to implement safe concurrency: particularly,
+_owned types_. The language leaves the implementation details to the core
+library.
The `spawn` function has a very simple type signature: `fn spawn(f:
~fn())`. Because it accepts only owned closures, and owned closures
-contained only owned data, `spawn` can safely move the entire closure
+contain only owned data, `spawn` can safely move the entire closure
and all its associated state into an entirely different task for
-execution. Like any closure, the function passed to spawn may capture
+execution. Like any closure, the function passed to `spawn` may capture
an environment that it carries across tasks.
~~~
}
~~~
-By default tasks will be multiplexed across the available cores, running
-in parallel, thus on a multicore machine, running the following code
+By default, the scheduler multiplexes tasks across the available cores, running
+in parallel. Thus, on a multicore machine, running the following code
should interleave the output in vaguely random order.
~~~
state, so one task may not manipulate variables owned by another task.
Instead we use *pipes*.
-Pipes are simply a pair of endpoints, with one for sending messages
-and another for receiving messages. Pipes are low-level communication
-building-blocks and so come in a variety of forms, appropriate for
-different use cases, but there are just a few varieties that are most
-commonly used, which we will cover presently.
+A pipe is simply a pair of endpoints: one for sending messages and another for
+receiving messages. Pipes are low-level communication building-blocks and so
+come in a variety of forms, each one appropriate for a different use case. In
+what follows, we cover the most commonly used varieties.
The simplest way to create a pipe is to use the `pipes::stream`
-function to create a `(Chan, Port)` pair. In Rust parlance a 'channel'
-is a sending endpoint of a pipe, and a 'port' is the receiving
-endpoint. Consider the following example of performing two calculations
-concurrently.
+function to create a `(Chan, Port)` pair. In Rust parlance, a *channel*
+is a sending endpoint of a pipe, and a *port* is the receiving
+endpoint. Consider the following example of calculating two results
+concurrently:
~~~~
use task::spawn;
let (chan, port): (Chan<int>, Port<int>) = stream();
-do spawn {
+do spawn |move chan| {
let result = some_expensive_computation();
chan.send(result);
}
# fn some_other_expensive_computation() {}
~~~~
-Let's examine this example in detail. The `let` statement first creates a
-stream for sending and receiving integers (recall that `let` can be
-used for destructuring patterns, in this case separating a tuple into
-its component parts).
+Let's examine this example in detail. First, the `let` statement creates a
+stream for sending and receiving integers (the left-hand side of the `let`,
+`(chan, port)`, is an example of a *destructuring let*: the pattern separates
+a tuple into its component parts).
~~~~
# use pipes::{stream, Chan, Port};
let (chan, port): (Chan<int>, Port<int>) = stream();
~~~~
-The channel will be used by the child task to send data to the parent task,
+The child task will use the channel to send data to the parent task,
which will wait to receive the data on the port. The next statement
spawns the child task.
# use pipes::{stream, Port, Chan};
# fn some_expensive_computation() -> int { 42 }
# let (chan, port) = stream();
-do spawn {
+do spawn |move chan| {
let result = some_expensive_computation();
chan.send(result);
}
~~~~
-Notice that `chan` was transferred to the child task implicitly by
-capturing it in the task closure. Both `Chan` and `Port` are sendable
-types and may be captured into tasks or otherwise transferred between
-them. In the example, the child task performs an expensive computation
-then sends the result over the captured channel.
+Notice that the creation of the task closure transfers `chan` to the child
+task implicitly: the closure captures `chan` in its environment. Both `Chan`
+and `Port` are sendable types and may be captured into tasks or otherwise
+transferred between them. In the example, the child task runs an expensive
+computation, then sends the result over the captured channel.
-Finally, the parent continues by performing some other expensive
-computation and then waiting for the child's result to arrive on the
+Finally, the parent continues with some other expensive
+computation, then waits for the child's result to arrive on the
port:
~~~~
let result = port.recv();
~~~~
-The `Port` and `Chan` pair created by `stream` enable efficient
-communication between a single sender and a single receiver, but
-multiple senders cannot use a single `Chan`, nor can multiple
-receivers use a single `Port`. What if our example needed to perform
-multiple computations across a number of tasks? The following cannot
-be written:
+The `Port` and `Chan` pair created by `stream` enables efficient communication
+between a single sender and a single receiver, but multiple senders cannot use
+a single `Chan`, and multiple receivers cannot use a single `Port`. What if our
+example needed to computer multiple results across a number of tasks? The
+following program is ill-typed:
~~~ {.xfail-test}
# use task::{spawn};
# fn some_expensive_computation() -> int { 42 }
let (chan, port) = stream();
-do spawn {
+do spawn |move chan| {
chan.send(some_expensive_computation());
}
for uint::range(0, 3) |init_val| {
// Create a new channel handle to distribute to the child task
let child_chan = chan.clone();
- do spawn {
+ do spawn |move child_chan| {
child_chan.send(some_expensive_computation(init_val));
}
}
# fn some_expensive_computation(_i: uint) -> int { 42 }
~~~
-Here we transfer ownership of the channel into a new `SharedChan`
-value. Like `Chan`, `SharedChan` is a non-copyable, owned type
-(sometimes also referred to as an 'affine' or 'linear' type). Unlike
-`Chan` though, `SharedChan` may be duplicated with the `clone()`
-method. A cloned `SharedChan` produces a new handle to the same
-channel, allowing multiple tasks to send data to a single port.
-Between `spawn`, `stream` and `SharedChan` we have enough tools
-to implement many useful concurrency patterns.
+Here we transfer ownership of the channel into a new `SharedChan` value. Like
+`Chan`, `SharedChan` is a non-copyable, owned type (sometimes also referred to
+as an *affine* or *linear* type). Unlike with `Chan`, though, the programmer
+may duplicate a `SharedChan`, with the `clone()` method. A cloned
+`SharedChan` produces a new handle to the same channel, allowing multiple
+tasks to send data to a single port. Between `spawn`, `stream` and
+`SharedChan`, we have enough tools to implement many useful concurrency
+patterns.
Note that the above `SharedChan` example is somewhat contrived since
you could also simply use three `stream` pairs, but it serves to
-illustrate the point. For reference, written with multiple streams it
+illustrate the point. For reference, written with multiple streams, it
might look like the example below.
~~~
// Create a vector of ports, one for each child task
let ports = do vec::from_fn(3) |init_val| {
let (chan, port) = stream();
- do spawn {
+ do spawn |move chan| {
chan.send(some_expensive_computation(init_val));
}
- port
+ move port
};
// Wait on each port, accumulating the results
# fn some_expensive_computation(_i: uint) -> int { 42 }
~~~
-# TODO
-
# Handling task failure
-Rust has a built-in mechanism for raising exceptions, written `fail`
-(or `fail ~"reason"`, or sometimes `assert expr`), and it causes the
-task to unwind its stack, running destructors and freeing memory along
-the way, and then exit itself. Unlike C++, exceptions in Rust are
-unrecoverable within a single task - once a task fails there is no way
-to "catch" the exception.
+Rust has a built-in mechanism for raising exceptions. The `fail` construct
+(which can also be written with an error string as an argument: `fail
+~reason`) and the `assert` construct (which effectively calls `fail` if a
+boolean expression is false) are both ways to raise exceptions. When a task
+raises an exception the task unwinds its stack---running destructors and
+freeing memory along the way---and then exits. Unlike exceptions in C++,
+exceptions in Rust are unrecoverable within a single task: once a task fails,
+there is no way to "catch" the exception.
-All tasks are, by default, _linked_ to each other, meaning their fate
-is intertwined, and if one fails so do all of them.
+All tasks are, by default, _linked_ to each other. That means that the fates
+of all tasks are intertwined: if one fails, so do all the others.
~~~
# use task::spawn;
# };
~~~
-While it isn't possible for a task to recover from failure,
-tasks may be notified when _other_ tasks fail. The simplest way
-of handling task failure is with the `try` function, which is
-similar to spawn, but immediately blocks waiting for the child
-task to finish.
+While it isn't possible for a task to recover from failure, tasks may notify
+each other of failure. The simplest way of handling task failure is with the
+`try` function, which is similar to `spawn`, but immediately blocks waiting
+for the child task to finish. `try` returns a value of type `Result<int,
+()>`. `Result` is an `enum` type with two variants: `Ok` and `Err`. In this
+case, because the type arguments to `Result` are `int` and `()`, callers can
+pattern-match on a result to check whether it's an `Ok` result with an `int`
+field (representing a successful result) or an `Err` result (representing
+termination with an error).
~~~
# fn some_condition() -> bool { false }
[`Result`]: core/result.html
> ***Note:*** A failed task does not currently produce a useful error
-> value (all error results from `try` are equal to `Err(())`). In the
-> future it may be possible for tasks to intercept the value passed to
+> value (`try` always returns `Err(())`). In the
+> future, it may be possible for tasks to intercept the value passed to
> `fail`.
TODO: Need discussion of `future_result` in order to make failure
might want to contain the failure at a certain boundary (perhaps a
small piece of input from the outside world, which you happen to be
processing in parallel, is malformed and its processing task can't
-proceed). Hence the need for different _linked failure modes_.
+proceed). Hence, you will need different _linked failure modes_.
## Failure modes
-By default, task failure is _bidirectionally linked_, which means if
+By default, task failure is _bidirectionally linked_, which means that if
either task dies, it kills the other one.
~~~
# };
~~~
-If you want parent tasks to kill their children, but not for a child
-task's failure to kill the parent, you can call
+If you want parent tasks to be able to kill their children, but do not want a
+parent to die automatically if one of its child task dies, you can call
`task::spawn_supervised` for _unidirectionally linked_ failure. The
function `task::try`, which we saw previously, uses `spawn_supervised`
internally, with additional logic to wait for the child task to finish
# fn sleep_forever() { loop { task::yield() } }
# do task::try {
let (sender, receiver): (Chan<int>, Port<int>) = stream();
-do spawn { // Bidirectionally linked
+do spawn |move receiver| { // Bidirectionally linked
// Wait for the supervised child task to exist.
let message = receiver.recv();
// Kill both it and the parent task.
assert message != 42;
}
-do try { // Unidirectionally linked
+do try |move sender| { // Unidirectionally linked
sender.send(42);
sleep_forever(); // Will get woken up by force
}
Supervised failure is useful in any situation where one task manages
multiple fallible child tasks, and the parent task can recover
-if any child files. On the other hand, if the _parent_ (supervisor) fails
+if any child fails. On the other hand, if the _parent_ (supervisor) fails,
then there is nothing the children can do to recover, so they should
also fail.
# };
~~~
-
-# Unfinished notes
-
-## Actor patterns
-
-## Linearity, option dancing, owned closures
-
## Creating a task with a bi-directional communication path
A very common thing to do is to spawn a child task where the parent
and child both need to exchange messages with each other. The
function `std::comm::DuplexStream()` supports this pattern. We'll
-look briefly at how it is used.
+look briefly at how to use it.
To see how `spawn_conversation()` works, we will create a child task
-that receives `uint` messages, converts them to a string, and sends
-the string in response. The child terminates when `0` is received.
+that repeatedly receives a `uint` message, converts it to a string, and sends
+the string in response. The child terminates when it receives `0`.
Here is the function that implements the child task:
~~~~
let mut value: uint;
loop {
value = channel.recv();
- channel.send(uint::to_str(value, 10u));
- if value == 0u { break; }
+ channel.send(uint::to_str(value, 10));
+ if value == 0 { break; }
}
}
~~~~
send strings (the first type parameter) and receive `uint` messages
(the second type parameter). The body itself simply loops, reading
from the channel and then sending its response back. The actual
-response itself is simply the strified version of the received value,
+response itself is simply the stringified version of the received value,
`uint::to_str(value)`.
Here is the code for the parent task:
let (from_child, to_child) = DuplexStream();
-do spawn || {
+do spawn |move to_child| {
stringifier(&to_child);
};
-from_child.send(22u);
+from_child.send(22);
assert from_child.recv() == ~"22";
-from_child.send(23u);
-from_child.send(0u);
+from_child.send(23);
+from_child.send(0);
assert from_child.recv() == ~"23";
assert from_child.recv() == ~"0";
# }
~~~~
-The parent task first calls `DuplexStream` to create a pair of bidirectional endpoints. It then uses `task::spawn` to create the child task, which captures one end of the communication channel. As a result, both parent
-and child can send and receive data to and from the other.
+The parent task first calls `DuplexStream` to create a pair of bidirectional
+endpoints. It then uses `task::spawn` to create the child task, which captures
+one end of the communication channel. As a result, both parent and child can
+send and receive data to and from the other.
-% Rust Language Tutorial
+% The Rust Language Tutorial
# Introduction
Rust is a programming language with a focus on type safety, memory
safety, concurrency and performance. It is intended for writing
-large-scale, high-performance software while preventing several
+large-scale, high-performance software that is free from several
classes of common errors. Rust has a sophisticated memory model that
encourages efficient data structures and safe concurrency patterns,
forbidding invalid memory accesses that would otherwise cause
procedural, functional and object-oriented styles. Some of its
pleasant high-level features include:
-* **Pattern matching and algebraic data types (enums).** As
- popularized by functional languages, pattern matching on ADTs
- provides a compact and expressive way to encode program logic.
-* **Type inference.** Type annotations on local variable
- declarations are optional.
-* **Task-based concurrency.** Rust uses lightweight tasks that do
- not share memory.
-* **Higher-order functions.** Rust's efficient and flexible closures
- are heavily relied on to provide iteration and other control
- structures
-* **Parametric polymorphism (generics).** Functions and types can be
- parameterized over type variables with optional trait-based type
- constraints.
-* **Trait polymorphism.** Rust's type system features a unique
- combination of type classes and object-oriented interfaces.
+* **Type inference.** Type annotations on local variable declarations
+ are optional.
+* **Safe task-based concurrency.** Rust's lightweight tasks do not share
+ memory, instead communicating through messages.
+* **Higher-order functions.** Efficient and flexible closures provide
+ iteration and other control structures
+* **Pattern matching and algebraic data types.** Pattern matching on
+ Rust's enumeration types (a more powerful version of C's enums,
+ similar to algebraic data types in functional languages) is a
+ compact and expressive way to encode program logic.
+* **Polymorphism.** Rust has type-parametric functions and
+ types, type classes and OO-style interfaces.
## Scope
This is an introductory tutorial for the Rust programming language. It
covers the fundamentals of the language, including the syntax, the
-type system and memory model, and generics. [Additional
+type system and memory model, generics, and modules. [Additional
tutorials](#what-next) cover specific language features in greater
depth.
-It assumes the reader is familiar with the basic concepts of
+This tutorial assumes that the reader is familiar with the basic concepts of
programming, and has programmed in one or more other languages
-before. It will often make comparisons to other languages,
+before. We will often compare Rust to other languages,
particularly those in the C family.
## Conventions
-Throughout the tutorial, words that indicate language keywords or
-identifiers defined in example code are displayed in `code font`.
+Throughout the tutorial, language keywords and identifiers defined in
+example code are displayed in `code font`.
Code snippets are indented, and also shown in a monospaced font. Not
all snippets constitute whole programs. For brevity, we'll often show
fragments of programs that don't compile on their own. To try them
out, you might have to wrap them in `fn main() { ... }`, and make sure
-they don't contain references to things that aren't actually defined.
+they don't contain references to names that aren't actually defined.
-> ***Warning:*** Rust is a language under heavy development. Notes
+> ***Warning:*** Rust is a language under ongoing development. Notes
> about potential changes to the language, implementation
> deficiencies, and other caveats appear offset in blockquotes.
* Windows (7, Server 2008 R2), x86 only
* Linux (various distributions), x86 and x86-64
-* OSX 10.6 ("Snow Leopard") or 10.7 ("Lion"), x86 and x86-64
+* OSX 10.6 ("Snow Leopard") or greater, x86 and x86-64
You may find that other platforms work, but these are our "tier 1"
supported build environments that are most likely to work.
> ***Note:*** Windows users should read the detailed
-> [getting started][wiki-start] notes on the wiki. Even when using
-> the binary installer the Windows build requires a MinGW installation,
-> the precise details of which are not discussed in this tutorial.
+> "[getting started][wiki-start]" notes on the wiki. Even when using
+> the binary installer, the Windows build requires a MinGW installation,
+> the precise details of which are not discussed here. Finally, `rustc` may
+> need to be [referred to as `rustc.exe`][bug-3319]. It's a bummer, I
+> know.
+
+[bug-3319]: https://github.com/mozilla/rust/issues/3319
+[wiki-start]: https://github.com/mozilla/rust/wiki/Note-getting-started-developing-Rust
To build from source you will also need the following prerequisite
packages:
* gnu make 3.81 or later
* curl
-Assuming you're on a relatively modern *nix system and have met the
-prerequisites, something along these lines should work.
+If you've fulfilled those prerequisites, something along these lines
+should work.
~~~~ {.notrust}
$ wget http://dl.rust-lang.org/dist/rust-0.4.tar.gz
You may need to use `sudo make install` if you do not normally have
permission to modify the destination directory. The install locations
can be adjusted by passing a `--prefix` argument to
-`configure`. Various other options are also supported, pass `--help`
+`configure`. Various other options are also supported: pass `--help`
for more information on them.
When complete, `make install` will place several programs into
If the Rust compiler was installed successfully, running `rustc
hello.rs` will produce an executable called `hello` (or `hello.exe` on
-Windows) which, upon running, will likely do exactly what you expect
-(unless you are on Windows, in which case what it does is subject
-to local weather conditions).
-
-> ***Note:*** That may or may not be hyperbole, but there are some
-> 'gotchas' to be aware of on Windows. First, the MinGW environment
-> must be set up perfectly. Please read [the
-> wiki][wiki-started]. Second, `rustc` may need to be [referred to as
-> `rustc.exe`][bug-3319]. It's a bummer, I know, and I am so very
-> sorry.
-
-[bug-3319]: https://github.com/mozilla/rust/issues/3319
-[wiki-started]: https://github.com/mozilla/rust/wiki/Note-getting-started-developing-Rust
+Windows) which, upon running, will likely do exactly what you expect.
-The Rust compiler tries to provide useful information when it runs
-into an error. If you modify the program to make it invalid (for
-example, by changing `io::println` to some nonexistent function), and
-then compile it, you'll see an error message like this:
+The Rust compiler tries to provide useful information when it encounters an
+error. If you introduce an error into the program (for example, by changing
+`io::println` to some nonexistent function), and then compile it, you'll see
+an error message like this:
~~~~ {.notrust}
hello.rs:2:4: 2:16 error: unresolved name: io::print_with_unicorns
In its simplest form, a Rust program is a `.rs` file with some types
and functions defined in it. If it has a `main` function, it can be
compiled to an executable. Rust does not allow code that's not a
-declaration to appear at the top level of the file—all statements must
+declaration to appear at the top level of the file: all statements must
live inside a function. Rust programs can also be compiled as
libraries, and included in other programs.
under `src/etc/kate`.
There is ctags support via `src/etc/ctags.rust`, but many other
-tools and editors are not provided for yet. If you end up writing a Rust
+tools and editors are not yet supported. If you end up writing a Rust
mode for your favorite editor, let us know so that we can link to it.
[sublime]: http://github.com/dbp/sublime-rust
[sublime-pkg]: http://wbond.net/sublime_packages/package_control
-# Syntax Basics
+# Syntax basics
Assuming you've programmed in any C-family language (C++, Java,
JavaScript, C#, or PHP), Rust will feel familiar. Code is arranged
The main surface difference to be aware of is that the condition at
the head of control structures like `if` and `while` do not require
-paretheses, while their bodies *must* be wrapped in
-brackets. Single-statement, bracket-less bodies are not allowed.
+parentheses, while their bodies *must* be wrapped in
+braces. Single-statement, unbraced bodies are not allowed.
~~~~
# fn recalibrate_universe() -> bool { true }
}
~~~~
-The `let` keyword introduces a local variable. Variables are immutable
-by default, so `let mut` can be used to introduce a local variable
-that can be reassigned.
+The `let` keyword introduces a local variable. Variables are immutable by
+default. To introduce a local variable that you can re-assign later, use `let
+mut` instead.
~~~~
let hi = "hi";
Although Rust can almost always infer the types of local variables, you
can specify a variable's type by following it with a colon, then the type
-name.
+name.
~~~~
let monster_size: float = 57.8;
let monster_size: int = 50;
~~~~
-Local variables may shadow earlier declarations, as in the previous
-example in which `monster_size` is first declared as a `float`
-then a second `monster_size` is declared as an int. If you were to actually
-compile this example though, the compiler will see that the second
-`monster_size` is unused, assume that you have made a mistake, and issue
-a warning. For occasions where unused variables are intentional, their
-name may be prefixed with an underscore to silence the warning, like
-`let _monster_size = 50;`.
+Local variables may shadow earlier declarations, as in the previous example:
+`monster_size` was first declared as a `float`, and then then a second
+`monster_size` was declared as an int. If you were to actually compile this
+example, though, the compiler will determine that the second `monster_size` is
+unused and issue a warning (because this situation is likely to indicate a
+programmer error). For occasions where unused variables are intentional, their
+name may be prefixed with an underscore to silence the warning, like `let
+_monster_size = 50;`.
Rust identifiers follow the same rules as C; they start with an alphabetic
character or an underscore, and after that may contain any sequence of
};
~~~~
-Both pieces of code are exactly equivalent—they assign a value to
+Both pieces of code are exactly equivalent: they assign a value to
`price` depending on the condition that holds. Note that there
-are not semicolons in the blocks of the second snippet. This is
-important; the lack of a semicolon after the last statement in a
+are no semicolons in the blocks of the second snippet. This is
+important: the lack of a semicolon after the last statement in a
braced block gives the whole block the value of that last expression.
Put another way, the semicolon in Rust *ignores the value of an expression*.
branch has a different value, and `price` gets the value of the branch that
was taken.
-In short, everything that's not a declaration (`let` for variables,
-`fn` for functions, et cetera) is an expression, including function bodies.
+In short, everything that's not a declaration (declarations are `let` for
+variables, `fn` for functions, and any top-level named items such as
+[traits](#traits), [enum types](#enums), and [constants](#constants)) is an
+expression, including function bodies.
~~~~
fn is_four(x: int) -> bool {
# fn foo() -> bool { true }
# fn bar() -> bool { true }
# fn baz() -> bool { true }
-// `let` is not an expression, so it is semi-colon terminated;
+// `let` is not an expression, so it is semicolon-terminated;
let x = foo();
// When used in statement position, bracy expressions do not
The basic types include the usual boolean, integral, and floating-point types.
------------------------- -----------------------------------------------
-`()` Nil, the type that has only a single value
+`()` Unit, the type that has only a single value
`bool` Boolean type, with values `true` and `false`
`int`, `uint` Machine-pointer-sized signed and unsigned integers
`i8`, `i16`, `i32`, `i64` Signed integers with a specific size (in bits)
`[mut T]` Mutable vector with unknown size
------------------------- -----------------------------------------------
+> ***Note***: In the future, mutability for vectors may be defined by
+> the slot that contains the vector, not the type of the vector itself,
+> deprecating [mut T] syntax.
+
In function types, the return type is specified with an arrow, as in
the type `fn() -> bool` or the function declaration `fn foo() -> bool
{ }`. For functions that do not return a meaningful value, you can
This will provide a synonym, `MonsterSize`, for unsigned integers. It will not
actually create a new, incompatible type—`MonsterSize` and `uint` can be used
interchangeably, and using one where the other is expected is not a type
-error.
+error. In that sense, types declared with `type` are *structural*: their
+meaning follows from their structure, and their names are irrelevant in the
+type system.
-To create data types which are not synonyms, `struct` and `enum`
-can be used. They're described in more detail below, but they look like this:
+Sometimes, you want your data types to be *nominal* instead of structural: you
+want their name to be part of their meaning, so that types with the same
+structure but different names are not interchangeable. Rust has two ways to
+create nominal data types: `struct` and `enum`. They're described in more
+detail below, but they look like this:
~~~~
enum HidingPlaces {
## Literals
-Integers can be written in decimal (`144`), hexadecimal (`0x90`), and
+Integers can be written in decimal (`144`), hexadecimal (`0x90`), or
binary (`0b10010000`) base. Each integral type has a corresponding literal
suffix that can be used to indicate the type of a literal: `i` for `int`,
`u` for `uint`, and `i8` for the `i8` type, etc.
-In the absense of an integer literal suffix, Rust will infer the
+In the absence of an integer literal suffix, Rust will infer the
integer type based on type annotations and function signatures in the
surrounding program. In the absence of any type information at all,
Rust will assume that an unsuffixed integer literal has type
(32-bit) and `f64` (64-bit) can be used to create literals of a
specific type.
-The nil literal is written just like the type: `()`. The keywords
+The unit literal is written just like the type: `()`. The keywords
`true` and `false` produce the boolean literals.
-Character literals are written between single quotes, as in `'x'`. Just as in
+Character literals are written between single quotes, as in `'x'`. Just like
C, Rust understands a number of character escapes, using the backslash
character, such as `\n`, `\r`, and `\t`. String literals,
written between double quotes, allow the same escape sequences. Rust strings
## Constants
-Compile-time constants are declared with `const`. All scalar types,
-like integers and floats, may be declared `const`, as well as fixed
-length vectors, static strings (more on this later), and structs.
-Constants may be declared in any scope and may refer to other
-constants. Constant declarations are not type inferred, so must always
-have a type annotation. By convention they are written in all capital
-letters.
+Compile-time constants are declared with `const`. A constant may have any
+scalar type (for example, integer or float). Other allowable constant types
+are fixed-length vectors, static strings (more on this later), and
+structs. Constants may be declared in any scope and may refer to other
+constants. The compiler does not infer types for constants, so constants must
+always be declared with a type annotation. By convention, they are written in
+all capital letters.
~~~
// Scalars can be constants
Rust's set of operators contains very few surprises. Arithmetic is done with
`*`, `/`, `%`, `+`, and `-` (multiply, divide, remainder, plus, minus). `-` is
-also a unary prefix operator that does negation. As in C, the bit operators
+also a unary prefix operator that negates numbers. As in C, the bit operators
`>>`, `<<`, `&`, `|`, and `^` are also supported.
Note that, if applied to an integer value, `!` flips all the bits (like `~` in
~~~~
The main difference with C is that `++` and `--` are missing, and that
-the logical bitwise operators have higher precedence — in C, `x & 2 > 0`
+the logical bitwise operators have higher precedence—in C, `x & 2 > 0`
means `x & (2 > 0)`, but in Rust, it means `(x & 2) > 0`, which is
-more likely what a novice expects.
+more likely to be what a novice expects.
## Syntax extensions
*Syntax extensions* are special forms that are not built into the language,
but are instead provided by the libraries. To make it clear to the reader when
-a syntax extension is being used, the names of all syntax extensions end with
-`!`. The standard library defines a few syntax extensions, the most useful of
-which is `fmt!`, a `sprintf`-style text formatter that is expanded at compile
-time.
+a name refers to a syntax extension, the names of all syntax extensions end
+with `!`. The standard library defines a few syntax extensions, the most
+useful of which is `fmt!`, a `sprintf`-style text formatter that an early
+compiler phase expands statically.
-`fmt!` supports most of the directives that [printf][pf] supports, but
-will give you a compile-time error when the types of the directives
+`fmt!` supports most of the directives that [printf][pf] supports, but unlike
+printf, will give you a compile-time error when the types of the directives
don't match the types of the arguments.
~~~~
[pf]: http://en.cppreference.com/w/cpp/io/c/fprintf
-You can define your own syntax extensions with the macro system, which is out
-of scope of this tutorial.
+You can define your own syntax extensions with the macro system. For details, see the [macro tutorial][macros].
+
+[macros]: tutorial-macros.html
# Control structures
## Conditionals
-We've seen `if` pass by a few times already. To recap, braces are
-compulsory, an optional `else` clause can be appended, and multiple
+We've seen `if` expressions a few times already. To recap, braces are
+compulsory, an `if` can have an optional `else` clause, and multiple
`if`/`else` constructs can be chained together:
~~~~
}
~~~~
-The condition given to an `if` construct *must* be of type boolean (no
-implicit conversion happens). If the arms return a value, this value
-must be of the same type for every arm in which control reaches the
-end of the block:
+The condition given to an `if` construct *must* be of type `bool` (no
+implicit conversion happens). If the arms are blocks that have a
+value, this value must be of the same type for every arm in which
+control reaches the end of the block:
~~~~
fn signum(x: int) -> int {
## Pattern matching
Rust's `match` construct is a generalized, cleaned-up version of C's
-`switch` construct. You provide it with a value and a number of *arms*,
-each labelled with a pattern, and the code will attempt to match each pattern
-in order. For the first one that matches, the arm is executed.
+`switch` construct. You provide it with a value and a number of
+*arms*, each labelled with a pattern, and the code compares the value
+against each pattern in order until one matches. The matching pattern
+executes its corresponding arm.
~~~~
# let my_number = 1;
}
~~~~
-There is no 'falling through' between arms, as in C—only one arm is
-executed, and it doesn't have to explicitly `break` out of the
+Unlike in C, there is no 'falling through' between arms: only one arm
+executes, and it doesn't have to explicitly `break` out of the
construct when it is finished.
-The part to the left of the arrow `=>` is called the *pattern*. Literals are
-valid patterns and will match only their own value. The pipe operator
-(`|`) can be used to assign multiple patterns to a single arm. Ranges
-of numeric literal patterns can be expressed with two dots, as in `M..N`. The
-underscore (`_`) is a wildcard pattern that matches everything.
+A `match` arm consists of a *pattern*, then an arrow `=>`, followed by
+an *action* (expression). Literals are valid patterns and match only
+their own value. A single arm may match multiple different patterns by
+combining them with the pipe operator (`|`), so long as every pattern
+binds the same set of variables. Ranges of numeric literal patterns
+can be expressed with two dots, as in `M..N`. The underscore (`_`) is
+a wildcard pattern that matches any single value. The asterisk (`*`)
+is a different wildcard that can match one or more fields in an `enum`
+variant.
The patterns in an match arm are followed by a fat arrow, `=>`, then an
expression to evaluate. Each case is separated by commas. It's often
}
~~~
-`match` constructs must be *exhaustive*: they must have an arm covering every
-possible case. For example, if the arm with the wildcard pattern was left off
-in the above example, the typechecker would reject it.
+`match` constructs must be *exhaustive*: they must have an arm
+covering every possible case. For example, the typechecker would
+reject the previous example if the arm with the wildcard pattern was
+omitted.
-A powerful application of pattern matching is *destructuring*, where
-you use the matching to get at the contents of data types. Remember
-that `(float, float)` is a tuple of two floats:
+A powerful application of pattern matching is *destructuring*:
+matching in order to bind names to the contents of data
+types. Remember that `(float, float)` is a tuple of two floats:
~~~~
fn angle(vector: (float, float)) -> float {
}
~~~~
-A variable name in a pattern matches everything, *and* binds that name
-to the value of the matched thing inside of the arm block. Thus, `(0f,
+A variable name in a pattern matches any value, *and* binds that name
+to the value of the matched value inside of the arm's action. Thus, `(0f,
y)` matches any tuple whose first element is zero, and binds `y` to
the second element. `(x, y)` matches any tuple, and binds both
-elements to a variable.
+elements to variables.
-Any `match` arm can have a guard clause (written `if EXPR`), which is
-an expression of type `bool` that determines, after the pattern is
-found to match, whether the arm is taken or not. The variables bound
-by the pattern are available in this guard expression.
+Any `match` arm can have a guard clause (written `if EXPR`), called a
+*pattern guard*, which is an expression of type `bool` that
+determines, after the pattern is found to match, whether the arm is
+taken or not. The variables bound by the pattern are in scope in this
+guard expression. The first arm in the `angle` example shows an
+example of a pattern guard.
You've already seen simple `let` bindings, but `let` is a little
-fancier than you've been led to believe. It too supports destructuring
-patterns. For example, you can say this to extract the fields from a
-tuple, introducing two variables, `a` and `b`.
+fancier than you've been led to believe. It, too, supports destructuring
+patterns. For example, you can write this to extract the fields from a
+tuple, introducing two variables at once: `a` and `b`.
~~~~
# fn get_tuple_of_two_ints() -> (int, int) { (1, 1) }
let (a, b) = get_tuple_of_two_ints();
~~~~
-Let bindings only work with _irrefutable_ patterns, that is, patterns
+Let bindings only work with _irrefutable_ patterns: that is, patterns
that can never fail to match. This excludes `let` from matching
-literals and most enum variants.
+literals and most `enum` variants.
## Loops
-`while` produces a loop that runs as long as its given condition
-(which must have type `bool`) evaluates to true. Inside a loop, the
-keyword `break` can be used to abort the loop, and `loop` can be used
-to abort the current iteration and continue with the next.
+`while` denotes a loop that iterates as long as its given condition
+(which must have type `bool`) evaluates to `true`. Inside a loop, the
+keyword `break` aborts the loop, and `loop` aborts the current
+iteration and continues with the next.
~~~~
let mut cake_amount = 8;
}
~~~~
-`loop` is the preferred way of writing `while true`:
+`loop` denotes an infinite loop, and is the preferred way of writing `while true`:
~~~~
let mut x = 5;
This code prints out a weird sequence of numbers and stops as soon as
it finds one that can be divided by five.
-For more involved iteration, such as going over the elements of a
-collection, Rust uses higher-order functions. We'll come back to those
-in a moment.
+For more involved iteration, such as enumerating the elements of a
+collection, Rust uses [higher-order functions](#closures).
-# Basic datatypes
-
-The core datatypes of Rust are structs, enums (tagged unions, algebraic data
-types), and tuples. They are immutable by default.
-
-~~~~
-struct Point { x: float, y: float }
-
-enum Shape {
- Circle(Point, float),
- Rectangle(Point, Point)
-}
-~~~~
+# Data structures
## Structs
Rust struct types must be declared before they are used using the `struct`
syntax: `struct Name { field1: T1, field2: T2 [, ...] }`, where `T1`, `T2`,
... denote types. To construct a struct, use the same syntax, but leave off
-the `struct`; for example: `Point { x: 1.0, y: 2.0 }`.
+the `struct`: for example: `Point { x: 1.0, y: 2.0 }`.
Structs are quite similar to C structs and are even laid out the same way in
-memory (so you can read from a Rust struct in C, and vice-versa). The dot
-operator is used to access struct fields (`mypoint.x`).
+memory (so you can read from a Rust struct in C, and vice-versa). Use the dot
+operator to access struct fields, as in `mypoint.x`.
Fields that you want to mutate must be explicitly marked `mut`.
With a value of such a type, you can do `mystack.head += 1`. If `mut` were
omitted from the type, such an assignment would result in a type error.
-Structs can be destructured in `match` patterns. The basic syntax is
+`match` patterns destructure structs. The basic syntax is
`Name {fieldname: pattern, ...}`:
~~~~
}
~~~
-Structs are the only type in Rust that may have user-defined destructors,
-using `drop` blocks, inside of which the struct's value may be referred
-to with the name `self`.
+Structs are the only type in Rust that may have user-defined
+destructors, defined with `drop` blocks. Inside a `drop`, the name
+`self` refers to the struct's value.
~~~
struct TimeBomb {
`Point` struct and a float, or a `Rectangle`, in which case it contains
two `Point` structs. The run-time representation of such a value
includes an identifier of the actual form that it holds, much like the
-'tagged union' pattern in C, but with better ergonomics.
+'tagged union' pattern in C, but with better static guarantees.
-The above declaration will define a type `Shape` that can be used to
-refer to such shapes, and two functions, `Circle` and `Rectangle`,
-which can be used to construct values of the type (taking arguments of
-the specified types). So `Circle(Point {x: 0f, y: 0f}, 10f)` is the way to
+The above declaration will define a type `Shape` that can refer to
+such shapes, and two functions, `Circle` and `Rectangle`, which can be
+used to construct values of the type (taking arguments of the
+specified types). So `Circle(Point {x: 0f, y: 0f}, 10f)` is the way to
create a new circle.
-Enum variants need not have type parameters. This, for example, is
-equivalent to a C enum:
+Enum variants need not have type parameters. This `enum` declaration,
+for example, is equivalent to a C enum:
~~~~
enum Direction {
}
~~~~
-This will define `North`, `East`, `South`, and `West` as constants,
+This declaration defines `North`, `East`, `South`, and `West` as constants,
all of which have type `Direction`.
-When an enum is C-like, that is, when none of the variants have
-parameters, it is possible to explicitly set the discriminator values
-to an integer value:
+When an enum is C-like (that is, when none of the variants have
+parameters), it is possible to explicitly set the discriminator values
+to a constant value:
~~~~
enum Color {
If an explicit discriminator is not specified for a variant, the value
defaults to the value of the previous variant plus one. If the first
variant does not have a discriminator, it defaults to 0. For example,
-the value of `North` is 0, `East` is 1, etc.
+the value of `North` is 0, `East` is 1, `South` is 2, and `West` is 3.
-When an enum is C-like the `as` cast operator can be used to get the
-discriminator's value.
+When an enum is C-like, you can apply the `as` cast operator to
+convert it to its discriminator value as an int.
<a name="single_variant_enum"></a>
-There is a special case for enums with a single variant. These are
-used to define new types in such a way that the new name is not just a
-synonym for an existing type, but its own distinct type. If you say:
+There is a special case for enums with a single variant, which are
+sometimes called "newtype-style enums" (after Haskell's "newtype"
+feature). These are used to define new types in such a way that the
+new name is not just a synonym for an existing type, but its own
+distinct type: `type` creates a structural synonym, while this form of
+`enum` creates a nominal synonym. If you say:
~~~~
enum GizmoId = int;
enum GizmoId { GizmoId(int) }
~~~~
-Enum types like this can have their content extracted with the
+You can extract the contents of such an enum type with the
dereference (`*`) unary operator:
~~~~
let id_int: int = *my_gizmo_id;
~~~~
+Types like this can be useful to differentiate between data that have
+the same type but must be used in different ways.
+
+~~~~
+enum Inches = int;
+enum Centimeters = int;
+~~~~
+
+The above definitions allow for a simple way for programs to avoid
+confusing numbers that correspond to different units.
+
For enum types with multiple variants, destructuring is the only way to
get at their contents. All variant constructors can be used as
patterns, as in this definition of `area`:
~~~~
-# type Point = {x: float, y: float};
+# struct Point {x: float, y: float}
# enum Shape { Circle(Point, float), Rectangle(Point, Point) }
fn area(sh: Shape) -> float {
match sh {
Circle(_, size) => float::consts::pi * size * size,
- Rectangle({x, y}, {x: x2, y: y2}) => (x2 - x) * (y2 - y)
+ Rectangle(Point {x, y}, Point {x: x2, y: y2}) => (x2 - x) * (y2 - y)
}
}
~~~~
-Like other patterns, a lone underscore ignores individual fields.
-Ignoring all fields of a variant can be written `Circle(*)`. As in
-their introductory form, nullary enum patterns are written without
+You can write a lone `_` to ignore an individual fields, and can
+ignore all fields of a variant like: `Circle(*)`. As in their
+introduction form, nullary enum patterns are written without
parentheses.
~~~~
-# type Point = {x: float, y: float};
+# struct Point {x: float, y: float}
# enum Direction { North, East, South, West }
fn point_from_direction(dir: Direction) -> Point {
match dir {
- North => {x: 0f, y: 1f},
- East => {x: 1f, y: 0f},
- South => {x: 0f, y: -1f},
- West => {x: -1f, y: 0f}
+ North => Point {x: 0f, y: 1f},
+ East => Point {x: 1f, y: 0f},
+ South => Point {x: 0f, y: -1f},
+ West => Point {x: -1f, y: 0f}
}
}
~~~~
## Tuples
Tuples in Rust behave exactly like structs, except that their fields
-do not have names (and can thus not be accessed with dot notation).
+do not have names. Thus, you cannot access their fields with dot notation.
Tuples can have any arity except for 0 or 1 (though you may consider
-nil, `()`, as the empty tuple if you like).
+unit, `()`, as the empty tuple if you like).
~~~~
let mytup: (int, int, float) = (10, 20, 30.0);
We've already seen several function definitions. Like all other static
declarations, such as `type`, functions can be declared both at the
-top level and inside other functions (or modules, which we'll come
-back to [later](#modules-and-crates)). They are introduced with the
-`fn` keyword, the type of arguments are specified following colons and
-the return type follows the arrow.
+top level and inside other functions (or in modules, which we'll come
+back to [later](#modules-and-crates)). The `fn` keyword introduces a
+function. A function has an argument list, which is a parenthesized
+list of `expr: type` pairs separated by commas. An arrow `->`
+separates the argument list and the function's return type.
~~~~
fn line(a: int, b: int, x: int) -> int {
}
~~~~
-Functions that do not return a value are said to return nil, `()`,
-and both the return type and the return value may be omitted from
-the definition. The following two functions are equivalent.
+It's better Rust style to write a return value this way instead of
+writing an explicit `return`. The utility of `return` comes in when
+returning early from a function. Functions that do not return a value
+are said to return nil, `()`, and both the return type and the return
+value may be omitted from the definition. The following two functions
+are equivalent.
~~~~
fn do_nothing_the_hard_way() -> () { return (); }
assert () == oops(5, 3, 1);
~~~~
-Methods are like functions, except that they are defined for a specific
-'self' type (like 'this' in C++). Calling a method is done with
-dot notation, as in `my_vec.len()`. Methods may be defined on most
-Rust types with the `impl` keyword. As an example, lets define a draw
+Methods are like functions, except that they have an implicit argument
+called `self`, which has the type that the method's receiver has. The
+`self` argument is like 'this' in C++. An expression with dot
+notation, as in `my_vec.len()`, denotes a method
+call. Implementations, written with the `impl` keyword, can define
+methods on most Rust types. As an example, let's define a `draw`
method on our `Shape` enum.
~~~
~~~
This defines an _implementation_ for `Shape` containing a single
-method, `draw`. In most most respects the `draw` method is defined
-like any other function, with the exception of the name `self`. `self`
-is a special value that is automatically defined in each method,
+method, `draw`. In most respects the `draw` method is defined
+like any other function, except for the name `self`. `self`
+is a special value that is automatically in scope inside each method,
referring to the value being operated on. If we wanted we could add
additional methods to the same impl, or multiple impls for the same
type. We'll discuss methods more in the context of [traits and
generics](#generics).
-> ***Note:*** The method definition syntax will change to require
-> declaring the self type explicitly, as the first argument.
+> ***Note:*** In the future, the method definition syntax will change to
+> require declaring the `self` type explicitly, as the first argument.
# The Rust memory model
-At this junction let's take a detour to explain the concepts involved
+At this junction, let's take a detour to explain the concepts involved
in Rust's memory model. We've seen some of Rust's pointer sigils (`@`,
`~`, and `&`) float by in a few examples, and we aren't going to get
much further without explaining them. Rust has a very particular
approach to memory management that plays a significant role in shaping
-the "feel" of the language. Understanding the memory landscape will
-illuminate several of Rust's unique features as we encounter them.
+the subjective experience of programming in the
+language. Understanding the memory landscape will illuminate several
+of Rust's unique features as we encounter them.
Rust has three competing goals that inform its view of memory:
-* Memory safety: memory that is managed by and is accessible to the
- Rust language must be guaranteed to be valid; under normal
- circumstances it must be impossible for Rust to trigger a
- segmentation fault or leak memory
-* Performance: high-performance low-level code must be able to employ
- a number of allocation strategies; low-performance high-level code
- must be able to employ a single, garbage-collection-based, heap
- allocation strategy
-* Concurrency: Rust must maintain memory safety guarantees, even for
- code running in parallel
+* Memory safety: Memory that the Rust language can observe must be
+ guaranteed to be valid. Under normal circumstances, it must be
+ impossible for Rust to trigger a segmentation fault or leak memory.
+* Performance: High-performance low-level code must be able to use
+ a number of different allocation strategies. Tracing garbage collection must be
+ optional and, if it is not desired, memory safety must not be compromised.
+ Less performance-critical, high-level code should be able to employ a single,
+ garbage-collection-based, heap allocation strategy.
+* Concurrency: Rust code must be free of in-memory data races. (Note that other
+ types of races are still possible.)
## How performance considerations influence the memory model
-Most languages that offer strong memory safety guarantees rely upon a
+Most languages that offer strong memory safety guarantees rely on a
garbage-collected heap to manage all of the objects. This approach is
straightforward both in concept and in implementation, but has
significant costs. Languages that follow this path tend to
by the garbage collector.
By comparison, languages like C++ offer very precise control over
-where objects are allocated. In particular, it is common to put them
+where objects are allocated. In particular, it is common to allocate them
directly on the stack, avoiding expensive heap allocation. In Rust
-this is possible as well, and the compiler will use a clever _pointer
-lifetime analysis_ to ensure that no variable can refer to stack
+this is possible as well, and the compiler uses a [clever _pointer
+lifetime analysis_][borrow] to ensure that no variable can refer to stack
objects after they are destroyed.
+[borrow]: tutorial-borrowed-ptr.html
+
## How concurrency considerations influence the memory model
Memory safety in a concurrent environment involves avoiding race
conditions between two threads of execution accessing the same
-memory. Even high-level languages often require programmers to
-correctly employ locking to ensure that a program is free of races.
+memory. Even high-level languages often require programmers to make
+correct use of locking to ensure that a program is free of races.
Rust starts from the position that memory cannot be shared between
tasks. Experience in other languages has proven that isolating each
additional benefit that garbage collection must only be done
per-heap. Rust never "stops the world" to reclaim memory.
-Complete isolation of heaps between tasks would, however, mean that any data
-transferred between tasks must be copied. While this is a fine and
-useful way to implement communication between tasks, it is also very
-inefficient for large data structures. Because of this, Rust also
-employs a global _exchange heap_. Objects allocated in the exchange
-heap have _ownership semantics_, meaning that there is only a single
-variable that refers to them. For this reason, they are referred to as
-_owned boxes_. All tasks may allocate objects on the exchange heap,
-then transfer ownership of those objects to other tasks, avoiding
-expensive copies.
+Complete isolation of heaps between tasks would, however, mean that
+any data transferred between tasks must be copied. While this is a
+fine and useful way to implement communication between tasks, it is
+also very inefficient for large data structures. To reduce the amount
+of copying, Rust also uses a global _exchange heap_. Objects allocated
+in the exchange heap have _ownership semantics_, meaning that there is
+only a single variable that refers to them. For this reason, they are
+referred to as _owned boxes_. All tasks may allocate objects on the
+exchange heap, then transfer ownership of those objects to other
+tasks, avoiding expensive copies.
# Boxes and pointers
-In contrast to a lot of modern languages, aggregate types like structs
-and enums are _not_ represented as pointers to allocated memory in
-Rust. They are, as in C and C++, represented directly. This means that
-if you `let x = Point {x: 1f, y: 1f};`, you are creating a struct on the
-stack. If you then copy it into a data structure, the whole struct is
-copied, not just a pointer.
+Many modern languages have a so-called "uniform representation" for
+aggregate types like structs and enums, so as to represent these types
+as pointers to heap memory by default. In contrast, Rust, like C and
+C++, represents such types directly. Another way to say this is that
+aggregate data in Rust are *unboxed*. This means that if you `let x =
+Point {x: 1f, y: 1f};`, you are creating a struct on the stack. If you
+then copy it into a data structure, you copy the entire struct, not
+just a pointer.
For small structs like `Point`, this is usually more efficient than
-allocating memory and going through a pointer. But for big structs, or
+allocating memory and indirecting through a pointer. But for big structs, or
those with mutable fields, it can be useful to have a single copy on
-the heap, and refer to that through a pointer.
+the stack or on the heap, and refer to that through a pointer.
Rust supports several types of pointers. The safe pointer types are
`@T` for managed boxes allocated on the local heap, `~T`, for
> ***Note***: You may also hear managed boxes referred to as 'shared
> boxes' or 'shared pointers', and owned boxes as 'unique boxes/pointers'.
> Borrowed pointers are sometimes called 'region pointers'. The preferred
-> terminology is as presented here.
+> terminology is what we present here.
## Managed boxes
-Managed boxes are pointers to heap-allocated, garbage collected memory.
-Creating a managed box is done by simply applying the unary `@`
-operator to an expression. The result of the expression will be boxed,
-resulting in a box of the right type. Copying a shared box, as happens
-during assignment, only copies a pointer, never the contents of the
-box.
+Managed boxes are pointers to heap-allocated, garbage collected
+memory. Applying the unary `@` operator to an expression creates a
+managed box. The resulting box contains the result of the
+expression. Copying a shared box, as happens during assignment, only
+copies a pointer, never the contents of the box.
~~~~
let x: @int = @10; // New box
// then the allocation will be freed.
~~~~
-Any type that contains managed boxes or other managed types is
-considered _managed_. Managed types are the only types that can
-construct cyclic data structures in Rust, such as doubly-linked lists.
+A _managed_ type is either of the form `@T` for some type `T`, or any
+type that contains managed boxes or other managed types.
~~~
// A linked list node
Managed boxes never cross task boundaries.
-> ***Note:*** managed boxes are currently reclaimed through reference
-> counting and cycle collection, but we will switch to a tracing
-> garbage collector eventually.
+> ***Note:*** Currently, the Rust compiler generates code to reclaim
+> managed boxes through reference counting and a cycle collector, but
+> we will switch to a tracing garbage collector eventually.
## Owned boxes
-In contrast to managed boxes, owned boxes have a single owning memory
-slot and thus two owned boxes may not refer to the same memory. All
-owned boxes across all tasks are allocated on a single _exchange
-heap_, where their uniquely owned nature allows them to be passed
-between tasks efficiently.
+In contrast with managed boxes, owned boxes have a single owning
+memory slot and thus two owned boxes may not refer to the same
+memory. All owned boxes across all tasks are allocated on a single
+_exchange heap_, where their uniquely owned nature allows tasks to
+exchange them efficiently.
-Because owned boxes are uniquely owned, copying them involves allocating
+Because owned boxes are uniquely owned, copying them requires allocating
a new owned box and duplicating the contents. Copying owned boxes
is expensive so the compiler will complain if you do so without writing
the word `copy`.
assert z == 20;
~~~~
-This is where the 'move' operator comes in. It is similar to
-`copy`, but it de-initializes its source. Thus, the owned box can move
-from `x` to `y`, without violating the constraint that it only has a
-single owner (if you used assignment instead of the move operator, the
-box would, in principle, be copied).
+This is where the 'move' operator comes in. It is similar to `copy`,
+but it de-initializes its source. Thus, the owned box can move from
+`x` to `y`, without violating the constraint that it only has a single
+owner (using assignment instead of the move operator would, in
+principle, copy the box).
~~~~ {.xfail-test}
let x = ~10;
and won't be able to access it afterwards. The receiving task will
become the sole owner of the box.
-> ***Note:*** this discussion of copying vs moving does not account
+> ***Note:*** This discussion of copying vs. moving does not account
> for the "last use" rules that automatically promote copy operations
-> to moves. Last use is expected to be removed from the language in
+> to moves. We plan to remove last use from the language in
> favor of explicit moves.
## Borrowed pointers
Rust borrowed pointers are a general purpose reference/pointer type,
similar to the C++ reference type, but guaranteed to point to valid
-memory. In contrast to owned pointers, where the holder of a unique
+memory. In contrast with owned pointers, where the holder of a unique
pointer is the owner of the pointed-to memory, borrowed pointers never
imply ownership. Pointers may be borrowed from any type, in which case
the pointer is guaranteed not to outlive the value it points to.
}
~~~~
-We can use this simple definition to allocate points in many ways. For
-example, in this code, each of these three local variables contains a
-point, but allocated in a different place:
+We can use this simple definition to allocate points in many different
+ways. For example, in this code, each of these three local variables
+contains a point, but allocated in a different location:
~~~
# struct Point { x: float, y: float }
~~~
Dereferenced mutable pointers may appear on the left hand side of
-assignments, in which case the value they point to is modified.
+assignments. Such an assignment modifies the value that the pointer
+points to.
~~~
let managed = @mut 10;
~~~
Pointers have high operator precedence, but lower precedence than the
-dot operator used for field and method access. This can lead to some
-awkward code filled with parenthesis.
+dot operator used for field and method access. This precedence order
+can sometimes make code awkward and parenthesis-filled.
~~~
# struct Point { x: float, y: float }
let area = (*rect).area();
~~~
-To combat this ugliness the dot operator performs _automatic pointer
-dereferencing_ on the receiver (the value on the left hand side of the
-dot), so in most cases dereferencing the receiver is not necessary.
+To combat this ugliness the dot operator applies _automatic pointer
+dereferencing_ to the receiver (the value on the left hand side of the
+dot), so in most cases, explicitly dereferencing the receiver is not necessary.
~~~
# struct Point { x: float, y: float }
let area = rect.area();
~~~
-Auto-dereferencing is performed through any number of pointers. If you
-felt inclined you could write something silly like
+You can write an expression that dereferences any number of pointers
+automatically. For example, if you felt inclined, you could write
+something silly like
~~~
# struct Point { x: float, y: float }
io::println(fmt!("%f", point.x));
~~~
-The indexing operator (`[]`) is also auto-dereferencing.
+The indexing operator (`[]`) also auto-dereferences.
# Vectors and strings
-Vectors are a contiguous section of memory containing zero or more
+A vector is a contiguous section of memory containing zero or more
values of the same type. Like other types in Rust, vectors can be
stored on the stack, the local heap, or the exchange heap. Borrowed
pointers to vectors are also called 'slices'.
~~~
-enum Crayon {
- Almond, AntiqueBrass, Apricot,
- Aquamarine, Asparagus, AtomicTangerine,
- BananaMania, Beaver, Bittersweet,
- Black, BlizzardBlue, Blue
-}
-
+# enum Crayon {
+# Almond, AntiqueBrass, Apricot,
+# Aquamarine, Asparagus, AtomicTangerine,
+# BananaMania, Beaver, Bittersweet,
+# Black, BlizzardBlue, Blue
+# }
// A fixed-size stack vector
let stack_crayons: [Crayon * 3] = [Almond, AntiqueBrass, Apricot];
// Add two vectors to create a new one
let our_crayons = my_crayons + your_crayons;
-// += will append to a vector, provided it leves
-// in a mutable slot
+// += will append to a vector, provided it lives in a mutable slot
let mut my_crayons = move my_crayons;
my_crayons += your_crayons;
~~~~
> ***Note:*** The above examples of vector addition use owned
> vectors. Some operations on slices and stack vectors are
-> not well supported yet, owned vectors are often the most
+> not yet well-supported. Owned vectors are often the most
> usable.
-Indexing into vectors is done with square brackets:
+Square brackets denote indexing into a vector:
~~~~
# enum Crayon { Almond, AntiqueBrass, Apricot,
~~~~
The elements of a vector _inherit the mutability of the vector_,
-and as such individual elements may not be reassigned when the
+and as such, individual elements may not be reassigned when the
vector lives in an immutable slot.
~~~ {.xfail-test}
This is a simple example of Rust's _dual-mode data structures_, also
referred to as _freezing and thawing_.
-Strings are implemented with vectors of `u8`, though they have a distinct
-type. They support most of the same allocation options as
-vectors, though the string literal without a storage sigil, e.g.
-`"foo"` is treated differently than a comparable vector (`[foo]`).
-Whereas plain vectors are stack-allocated fixed-length vectors,
-plain strings are region pointers to read-only memory. Strings
-are always immutable.
+Strings are implemented with vectors of `u8`, though they have a
+distinct type. They support most of the same allocation options as
+vectors, though the string literal without a storage sigil (for
+example, `"foo"`) is treated differently than a comparable vector
+(`[foo]`). Whereas plain vectors are stack-allocated fixed-length
+vectors, plain strings are region pointers to read-only
+memory. All strings are immutable.
~~~
// A plain string is a slice to read-only (static) memory
# fn unwrap_crayon(c: Crayon) -> int { 0 }
# fn eat_crayon_wax(i: int) { }
# fn store_crayon_in_nasal_cavity(i: uint, c: Crayon) { }
-# fn crayon_to_str(c: Crayon) -> ~str { ~"" }
+# fn crayon_to_str(c: Crayon) -> &str { "" }
-let crayons = &[Almond, AntiqueBrass, Apricot];
+let crayons = [Almond, AntiqueBrass, Apricot];
// Check the length of the vector
assert crayons.len() == 3;
# Closures
Named functions, like those we've seen so far, may not refer to local
-variables declared outside the function - they do not "close over
-their environment". For example, you couldn't write the following:
+variables declared outside the function: they do not close over their
+environment (sometimes referred to as "capturing" variables in their
+environment). For example, you couldn't write the following:
~~~~ {.ignore}
let foo = 10;
call_closure_with_ten(closure);
~~~~
-Closures begin with the argument list between bars and are followed by
+Closures begin with the argument list between vertical bars and are followed by
a single expression. The types of the arguments are generally omitted,
as is the return type, because the compiler can almost always infer
-them. In the rare case where the compiler needs assistance though, the
+them. In the rare case where the compiler needs assistance, though, the
arguments and return types may be annotated.
~~~~
~~~~
There are several forms of closure, each with its own role. The most
-common, called a _stack closure_, has type `fn&` and can directly
+common, called a _stack closure_, has type `&fn` and can directly
access local variables in the enclosing scope.
~~~~
Stack closures are very efficient because their environment is
allocated on the call stack and refers by pointer to captured
locals. To ensure that stack closures never outlive the local
-variables to which they refer, they can only be used in argument
-position and cannot be stored in structures nor returned from
-functions. Despite the limitations stack closures are used
+variables to which they refer, stack closures are not
+first-class. That is, they can only be used in argument position; they
+cannot be stored in data structures or returned from
+functions. Despite these limitations, stack closures are used
pervasively in Rust code.
## Managed closures
When you need to store a closure in a data structure, a stack closure
will not do, since the compiler will refuse to let you store it. For
this purpose, Rust provides a type of closure that has an arbitrary
-lifetime, written `fn@` (boxed closure, analogous to the `@` pointer
-type described earlier).
+lifetime, written `@fn` (boxed closure, analogous to the `@` pointer
+type described earlier). This type of closure *is* first-class.
A managed closure does not directly access its environment, but merely
copies out the values that it closes over into a private data
structure. This means that it can not assign to these variables, and
-will not 'see' updates to them.
+cannot observe updates to them.
This code creates a closure that adds a given string to its argument,
returns it from a function, and then calls it:
~~~~
# extern mod std;
-fn mk_appender(suffix: ~str) -> fn@(~str) -> ~str {
- return fn@(s: ~str) -> ~str { s + suffix };
+fn mk_appender(suffix: ~str) -> @fn(~str) -> ~str {
+ // The compiler knows that we intend this closure to be of type @fn
+ return |s| s + suffix;
}
fn main() {
}
~~~~
-This example uses the long closure syntax, `fn@(s: ~str) ...`,
-making the fact that we are declaring a box closure explicit. In
-practice boxed closures are usually defined with the short closure
-syntax introduced earlier, in which case the compiler will infer
-the type of closure. Thus our managed closure example could also
-be written:
-
-~~~~
-fn mk_appender(suffix: ~str) -> fn@(~str) -> ~str {
- return |s| s + suffix;
-}
-~~~~
-
## Owned closures
-Owned closures, written `fn~` in analogy to the `~` pointer type,
+Owned closures, written `~fn` in analogy to the `~` pointer type,
hold on to things that can safely be sent between
processes. They copy the values they close over, much like managed
-closures, but they also 'own' them—meaning no other code can access
+closures, but they also own them: that is, no other code can access
them. Owned closures are used in concurrent code, particularly
-for spawning [tasks](#tasks).
+for spawning [tasks][tasks].
+
+[tasks]: tutorial-tasks.html
## Closure compatibility
-A nice property of Rust closures is that you can pass any kind of
+Rust closures have a convenient subtyping property: you can pass any kind of
closure (as long as the arguments and return types match) to functions
that expect a `fn()`. Thus, when writing a higher-order function that
-wants to do nothing with its function argument beyond calling it, you
-should almost always specify the type of that argument as `fn()`, so
-that callers have the flexibility to pass whatever they want.
+only calls its function argument, and does nothing else with it, you
+should almost always declare the type of that argument as `fn()`. That way,
+callers may pass any kind of closure.
~~~~
fn call_twice(f: fn()) { f(); f(); }
-call_twice(|| { ~"I am an inferred stack closure"; } );
-call_twice(fn&() { ~"I am also a stack closure"; } );
-call_twice(fn@() { ~"I am a managed closure"; });
-call_twice(fn~() { ~"I am an owned closure"; });
-fn bare_function() { ~"I am a plain function"; }
-call_twice(bare_function);
+let closure = || { "I'm a closure, and it doesn't matter what type I am"; };
+fn function() { "I'm a normal function"; }
+call_twice(closure);
+call_twice(function);
~~~~
> ***Note:*** Both the syntax and the semantics will be changing
-> in small ways. At the moment they can be unsound in multiple
+> in small ways. At the moment they can be unsound in some
> scenarios, particularly with non-copyable types.
## Do syntax
-The `do` expression is syntactic sugar for use with functions which
-take a closure as a final argument, because closures in Rust
-are so frequently used in combination with higher-order
-functions.
+The `do` expression provides a way to treat higher-order functions
+(functions that take closures as arguments) as control structures.
-Consider this function which iterates over a vector of
+Consider this function that iterates over a vector of
integers, passing in a pointer to each integer in the vector:
~~~~
}
~~~~
-The reason we pass in a *pointer* to an integer rather than the
-integer itself is that this is how the actual `each()` function for
-vectors works. Using a pointer means that the function can be used
-for vectors of any type, even large structs that would be impractical
-to copy out of the vector on each iteration. As a caller, if we use a
-closure to provide the final operator argument, we can write it in a
-way that has a pleasant, block-like structure.
+As an aside, the reason we pass in a *pointer* to an integer rather
+than the integer itself is that this is how the actual `each()`
+function for vectors works. `vec::each` though is a
+[generic](#generics) function, so must be efficient to use for all
+types. Passing the elements by pointer avoids copying potentially
+large objects.
+
+As a caller, if we use a closure to provide the final operator
+argument, we can write it in a way that has a pleasant, block-like
+structure.
~~~~
# fn each(v: &[int], op: fn(v: &int)) { }
-# fn do_some_work(i: int) { }
-each(&[1, 2, 3], |n| {
- debug!("%i", *n);
- do_some_work(*n);
+# fn do_some_work(i: &int) { }
+each([1, 2, 3], |n| {
+ do_some_work(n);
});
~~~~
~~~~
# fn each(v: &[int], op: fn(v: &int)) { }
-# fn do_some_work(i: int) { }
-do each(&[1, 2, 3]) |n| {
- debug!("%i", *n);
- do_some_work(*n);
+# fn do_some_work(i: &int) { }
+do each([1, 2, 3]) |n| {
+ do_some_work(n);
}
~~~~
The call is prefixed with the keyword `do` and, instead of writing the
-final closure inside the argument list it is moved outside of the
-parenthesis where it looks visually more like a typical block of
+final closure inside the argument list, it appears outside of the
+parentheses, where it looks more like a typical block of
code.
-`do` is often used for task spawning.
+`do` is a convenient way to create tasks with the `task::spawn`
+function. `spawn` has the signature `spawn(fn: ~fn())`. In other
+words, it is a function that takes an owned closure that takes no
+arguments.
~~~~
use task::spawn;
}
~~~~
-That's nice, but look at all those bars and parentheses - that's two empty
-argument lists back to back. Wouldn't it be great if they weren't
-there?
+Look at all those bars and parentheses - that's two empty argument
+lists back to back. Since that is so unsightly, empty argument lists
+may be omitted from `do` expressions.
~~~~
# use task::spawn;
}
~~~~
-Empty argument lists can be omitted from `do` expressions.
-
## For loops
-Most iteration in Rust is done with `for` loops. Like `do`,
-`for` is a nice syntax for doing control flow with closures.
-Additionally, within a `for` loop, `break`, `loop`, and `return`
-work just as they do with `while` and `loop`.
+The most common way to express iteration in Rust is with a `for`
+loop. Like `do`, `for` is a nice syntax for describing control flow
+with closures. Additionally, within a `for` loop, `break`, `loop`,
+and `return` work just as they do with `while` and `loop`.
Consider again our `each` function, this time improved to
break early when the iteratee returns `false`:
~~~~
# use each = vec::each;
# use println = io::println;
-each(&[2, 4, 8, 5, 16], |n| {
+each([2, 4, 8, 5, 16], |n| {
if *n % 2 != 0 {
- println(~"found odd number!");
+ println("found odd number!");
false
} else { true }
});
~~~~
With `for`, functions like `each` can be treated more
-like builtin looping structures. When calling `each`
+like built-in looping structures. When calling `each`
in a `for` loop, instead of returning `false` to break
out of the loop, you just write `break`. To skip ahead
to the next iteration, write `loop`.
~~~~
# use each = vec::each;
# use println = io::println;
-for each(&[2, 4, 8, 5, 16]) |n| {
+for each([2, 4, 8, 5, 16]) |n| {
if *n % 2 != 0 {
- println(~"found odd number!");
+ println("found odd number!");
break;
}
}
As an added bonus, you can use the `return` keyword, which is not
normally allowed in closures, in a block that appears as the body of a
-`for` loop — this will cause a return to happen from the outer
-function, not just the loop body.
+`for` loop: the meaning of `return` in such a block is to return from
+the enclosing function, not just the loop body.
~~~~
# use each = vec::each;
# Generics
-Throughout this tutorial, we've been defining functions that act only on
-specific data types. With type parameters we can also define functions whose
-arguments represent generic types, and which can be invoked with a variety
-of types. Consider a generic `map` function.
+Throughout this tutorial, we've been defining functions that act only
+on specific data types. With type parameters we can also define
+functions whose arguments have generic types, and which can be invoked
+with a variety of types. Consider a generic `map` function, which
+takes a function `function` and a vector `vector` and returns a new
+vector consisting of the result of applying `function` to each element
+of `vector`:
~~~~
fn map<T, U>(vector: &[T], function: fn(v: &T) -> U) -> ~[U] {
for vec::each(vector) |element| {
accumulator.push(function(element));
}
- return accumulator;
+ return (move accumulator);
}
~~~~
When defined with type parameters, as denoted by `<T, U>`, this
function can be applied to any type of vector, as long as the type of
-`function`'s argument and the type of the vector's content agree with
+`function`'s argument and the type of the vector's contents agree with
each other.
Inside a generic function, the names of the type parameters
-(capitalized by convention) stand for opaque types. You can't look
-inside them, but you can pass them around. Note that instances of
-generic types are often passed by pointer. For example, the
-parameter `function()` is supplied with a pointer to a value of type
-`T` and not a value of type `T` itself. This ensures that the
-function works with the broadest set of types possible, since some
-types are expensive or illegal to copy and pass by value.
+(capitalized by convention) stand for opaque types. All you can do
+with instances of these types is pass them around: you can't apply any
+operations to them or pattern-match on them. Note that instances of
+generic types are often passed by pointer. For example, the parameter
+`function()` is supplied with a pointer to a value of type `T` and not
+a value of type `T` itself. This ensures that the function works with
+the broadest set of types possible, since some types are expensive or
+illegal to copy and pass by value.
Generic `type`, `struct`, and `enum` declarations follow the same pattern:
}
~~~~
-These declarations produce valid types like `Set<int>`, `Stack<int>`
-and `Maybe<int>`.
+These declarations can be instantiated to valid types like `Set<int>`,
+`Stack<int>` and `Maybe<int>`.
-Generic functions in Rust are compiled to very efficient runtime code
-through a process called _monomorphisation_. This is a fancy way of
-saying that, for each generic function you call, the compiler
-generates a specialized version that is optimized specifically for the
-argument types. In this respect Rust's generics have similar
-performance characteristics to C++ templates.
+The Rust compiler compiles generic functions very efficiently by
+*monomorphizing* them. *Monomorphization* is a fancy name for a simple
+idea: generate a separate copy of each generic function at each call
+site where it is called, a copy that is specialized to the argument
+types and can thus be optimized specifically for them. In this
+respect, Rust's generics have similar performance characteristics to
+C++ templates.
## Traits
types it is operating on, it can't safely modify or query their
values. This is where _traits_ come into play. Traits are Rust's most
powerful tool for writing polymorphic code. Java developers will see
-in them aspects of Java interfaces, and Haskellers will notice their
-similarities to type classes.
-
-As motivation, let us consider copying in Rust. Perhaps surprisingly,
-the copy operation is not defined for all Rust types. In
-particular, types with user-defined destructors cannot be copied,
-either implicitly or explicitly, and neither can types that own other
-types containing destructors (the actual mechanism for defining
-destructors will be discussed elsewhere).
+them as similar to Java interfaces, and Haskellers will notice their
+similarities to type classes. Rust's traits are a form of *bounded
+polymorphism*: a trait is a way of limiting the set of possible types
+that a type parameter could refer to.
+
+As motivation, let us consider copying in Rust. The `copy` operation
+is not defined for all Rust types. One reason is user-defined
+destructors: copying a type that has a destructor could result in the
+destructor running multiple times. Therefore, types with user-defined
+destructors cannot be copied, either implicitly or explicitly, and
+neither can types that own other types containing destructors (see the
+section on [structs](#structs) for the actual mechanism for defining
+destructors).
This complicates handling of generic functions. If you have a type
parameter `T`, can you copy values of that type? In Rust, you can't,
}
~~~~
-We can tell the compiler though that the `head` function is only for
-copyable types with the `Copy` trait.
+However, we can tell the compiler that the `head` function is only for
+copyable types: that is, those that have the `Copy` trait.
~~~~
// This does
This says that we can call `head` on any type `T` as long as that type
implements the `Copy` trait. When instantiating a generic function,
you can only instantiate it with types that implement the correct
-trait, so you could not apply `head` to a type with a destructor.
+trait, so you could not apply `head` to a type with a
+destructor. (`Copy` is a special trait that is built in to the
+compiler, making it possible for the compiler to enforce this
+restriction.)
While most traits can be defined and implemented by user code, three
traits are automatically derived and implemented for all applicable
types by the compiler, and may not be overridden:
-* `Copy` - Types that can be copied, either implicitly, or using the
- `copy` expression. All types are copyable unless they are classes
+* `Copy` - Types that can be copied: either implicitly, or explicitly with the
+ `copy` operator. All types are copyable unless they are classes
with destructors or otherwise contain classes with destructors.
* `Send` - Sendable (owned) types. All types are sendable unless they
~~~~
Traits may be implemented for specific types with [impls]. An impl
-that implements a trait includes the name of the trait at the start of
+that implements a trait includes the name of the trait at the start of
the definition, as in the following impls of `Printable` for `int`
and `~str`.
fn print() { io::println(fmt!("%d", self)) }
}
-impl ~str: Printable {
+impl &str: Printable {
fn print() { io::println(self) }
}
# 1.print();
-# (~"foo").print();
+# ("foo").print();
~~~~
-Methods defined in an implementation of a trait may be called just as
+Methods defined in an implementation of a trait may be called just like
any other method, using dot notation, as in `1.print()`. Traits may
themselves contain type parameters. A trait for generalized sequence
types might look like the following:
The implementation has to explicitly declare the type parameter that
it binds, `T`, before using it to specify its trait type. Rust
requires this declaration because the `impl` could also, for example,
-specify an implementation of `Seq<int>`. The trait type -- appearing
-after the colon in the `impl` -- *refers* to a type, rather than
+specify an implementation of `Seq<int>`. The trait type (appearing
+after the colon in the `impl`) *refers* to a type, rather than
defining one.
The type parameters bound by a trait are in scope in each of the
method declarations. So, re-declaring the type parameter
-`T` as an explicit type parameter for `len` -- in either the trait or
-the impl -- would be a compile-time error.
+`T` as an explicit type parameter for `len`, in either the trait or
+the impl, would be a compile-time error.
Within a trait definition, `self` is a special type that you can think
of as a type parameter. An implementation of the trait for any given
}
~~~~
-Notice that in the trait definition, `equals` takes a `self` type
-argument, whereas, in the impl, `equals` takes an `int` type argument,
-and uses `self` as the name of the receiver (analogous to the `this` pointer
-in C++).
+Notice that in the trait definition, `equals` takes a parameter of
+type `self`. In contrast, in the `impl`, `equals` takes a parameter of
+type `int`, and uses `self` as the name of the receiver (analogous to
+the `this` pointer in C++).
## Bounded type parameters and static method dispatch
-Traits give us a language for talking about the abstract capabilities
-of types, and we can use this to place _bounds_ on type parameters,
-so that we can then operate on generic types.
+Traits give us a language for defining predicates on types, or
+abstract properties that types can have. We can use this language to
+define _bounds_ on type parameters, so that we can then operate on
+generic types.
~~~~
# trait Printable { fn print(); }
}
~~~~
-By declaring `T` as conforming to the `Printable` trait (as we earlier
-did with `Copy`), it becomes possible to call methods from that trait
-on values of that type inside the function. It will also cause a
+Declaring `T` as conforming to the `Printable` trait (as we earlier
+did with `Copy`) makes it possible to call methods from that trait
+on values of type `T` inside the function. It will also cause a
compile-time error when anyone tries to call `print_all` on an array
whose element type does not have a `Printable` implementation.
Type parameters can have multiple bounds by separating them with spaces,
-as in this version of `print_all` that makes copies of elements.
+as in this version of `print_all` that copies elements.
~~~
# trait Printable { fn print(); }
# type Circle = int; type Rectangle = int;
# impl int: Drawable { fn draw() {} }
# fn new_circle() -> int { 1 }
-
trait Drawable { fn draw(); }
fn draw_all<T: Drawable>(shapes: ~[T]) {
for shapes.each |shape| { shape.draw(); }
}
-
# let c: Circle = new_circle();
# draw_all(~[c]);
~~~~
~~~~
# trait Drawable { fn draw(); }
-fn draw_all(shapes: ~[@Drawable]) {
+fn draw_all(shapes: &[@Drawable]) {
for shapes.each |shape| { shape.draw(); }
}
~~~~
-In this example there is no type parameter. Instead, the `@Drawable`
-type is used to refer to any managed box value that implements the
-`Drawable` trait. To construct such a value, you use the `as` operator
-to cast a value to a trait type:
+In this example, there is no type parameter. Instead, the `@Drawable`
+type denotes any managed box value that implements the `Drawable`
+trait. To construct such a value, you use the `as` operator to cast a
+value to a trait type:
~~~~
# type Circle = int; type Rectangle = bool;
# trait Drawable { fn draw(); }
# fn new_circle() -> Circle { 1 }
# fn new_rectangle() -> Rectangle { true }
-# fn draw_all(shapes: ~[Drawable]) {}
+# fn draw_all(shapes: &[@Drawable]) {}
impl @Circle: Drawable { fn draw() { ... } }
let c: @Circle = @new_circle();
let r: @Rectangle = @new_rectangle();
-draw_all(~[c as @Drawable, r as @Drawable]);
+draw_all([c as @Drawable, r as @Drawable]);
~~~~
-Note that, like strings and vectors, trait types have dynamic size
-and may only be used via one of the pointer types. In turn, the
-`impl` is defined for `@Circle` and `@Rectangle` instead of for
-just `Circle` and `Rectangle`. Other pointer types work as well.
+We omit the code for `new_circle` and `new_rectangle`; imagine that
+these just return `Circle`s and `Rectangle`s with a default size. Note
+that, like strings and vectors, trait types have dynamic size and may
+only be referred to via one of the pointer types. That's why the `impl` is
+defined for `@Circle` and `@Rectangle` instead of for just `Circle`
+and `Rectangle`. Other pointer types work as well.
~~~{.xfail-test}
# type Circle = int; type Rectangle = int;
let stacky: &Drawable = &new_circle() as &Drawable;
~~~
-> ***Note:*** Other pointer types actually _do not_ work here. This is
+> ***Note:*** Other pointer types actually _do not_ work here yet. This is
> an evolving corner of the language.
Method calls to trait types are _dynamically dispatched_. Since the
compiler doesn't know specifically which functions to call at compile
-time it uses a lookup table (vtable) to decide at runtime which
-method to call.
+time it uses a lookup table (also known as a vtable or dictionary) to
+select the method to call at runtime.
This usage of traits is similar to Java interfaces.
# Modules and crates
-The Rust namespace is divided into modules. Each source file starts
-with its own module.
-
-## Local modules
-
-The `mod` keyword can be used to open a new, local module. In the
-example below, `chicken` lives in the module `farm`, so, unless you
-explicitly import it, you must refer to it by its long name,
-`farm::chicken`.
+The Rust namespace is arranged in a hierarchy of modules. Each source
+(.rs) file represents a single module and may in turn contain
+additional modules.
~~~~
-#[legacy_exports]
mod farm {
- fn chicken() -> ~str { ~"cluck cluck" }
- fn cow() -> ~str { ~"mooo" }
+ pub fn chicken() -> &str { "cluck cluck" }
+ pub fn cow() -> &str { "mooo" }
}
+
fn main() {
io::println(farm::chicken());
}
~~~~
-Modules can be nested to arbitrary depth.
+The contents of modules can be imported into the current scope
+with the `use` keyword, optionally giving it an alias. `use`
+may appear at the beginning of crates, `mod`s, `fn`s, and other
+blocks.
+
+~~~
+# mod farm { pub fn chicken() { } }
+# fn main() {
+// Bring `chicken` into scope
+use farm::chicken;
+
+fn chicken_farmer() {
+ // The same, but name it `my_chicken`
+ use my_chicken = farm::chicken;
+ ...
+}
+# }
+~~~
+
+These farm animal functions have a new keyword, `pub`, attached to
+them. The `pub` keyword modifies an item's visibility, making it
+visible outside its containing module. An expression with `::`, like
+`farm::chicken`, can name an item outside of its containing
+module. Items, such as those declared with `fn`, `struct`, `enum`,
+`type`, or `const`, are module-private by default.
+
+Visibility restrictions in Rust exist only at module boundaries. This
+is quite different from most object-oriented languages that also
+enforce restrictions on objects themselves. That's not to say that
+Rust doesn't support encapsulation: both struct fields and methods can
+be private. But this encapsulation is at the module level, not the
+struct level. Note that fields and methods are _public_ by default.
+
+~~~
+mod farm {
+# pub fn make_me_a_farm() -> farm::Farm { farm::Farm { chickens: ~[], cows: ~[], farmer: Human(0) } }
+ pub struct Farm {
+ priv mut chickens: ~[Chicken],
+ priv mut cows: ~[Cow],
+ farmer: Human
+ }
+
+ // Note - visibility modifiers on impls currently have no effect
+ impl Farm {
+ priv fn feed_chickens() { ... }
+ priv fn feed_cows() { ... }
+ fn add_chicken(c: Chicken) { ... }
+ }
+
+ pub fn feed_animals(farm: &Farm) {
+ farm.feed_chickens();
+ farm.feed_cows();
+ }
+}
+
+fn main() {
+ let f = make_me_a_farm();
+ f.add_chicken(make_me_a_chicken());
+ farm::feed_animals(&f);
+ f.farmer.rest();
+}
+# type Chicken = int;
+# type Cow = int;
+# enum Human = int;
+# fn make_me_a_farm() -> farm::Farm { farm::make_me_a_farm() }
+# fn make_me_a_chicken() -> Chicken { 0 }
+# impl Human { fn rest() { } }
+~~~
## Crates
-The unit of independent compilation in Rust is the crate. Libraries
-tend to be packaged as crates, and your own programs may consist of
-one or more crates.
+The unit of independent compilation in Rust is the crate: rustc
+compiles a single crate at a time, from which it produces either a
+library or executable.
When compiling a single `.rs` file, the file acts as the whole crate.
You can compile it with the `--lib` compiler switch to create a shared
library, or without, provided that your file contains a `fn main`
somewhere, to create an executable.
-It is also possible to include multiple files in a crate. For this
-purpose, you create a `.rc` crate file, which references any number of
-`.rs` code files. A crate file could look like this:
+Larger crates typically span multiple files and are compiled from
+a crate (.rc) file. Crate files contain their own syntax for loading
+modules from .rs files and typically include metadata about the crate.
-~~~~ {.ignore}
+~~~~ { .xfail-test }
#[link(name = "farm", vers = "2.5", author = "mjh")];
#[crate_type = "lib"];
+
mod cow;
mod chicken;
mod horse;
~~~~
Compiling this file will cause `rustc` to look for files named
-`cow.rs`, `chicken.rs`, `horse.rs` in the same directory as the `.rc`
-file, compile them all together, and, depending on the presence of the
-`crate_type = "lib"` attribute, output a shared library or an executable.
-(If the line `#[crate_type = "lib"];` was omitted, `rustc` would create an
-executable.)
+`cow.rs`, `chicken.rs`, and `horse.rs` in the same directory as the
+`.rc` file, compile them all together, and, based on the presence of
+the `crate_type = "lib"` attribute, output a shared library or an
+executable. (If the line `#[crate_type = "lib"];` was omitted,
+`rustc` would create an executable.)
-The `#[link(...)]` part provides meta information about the module,
-which other crates can use to load the right module. More about that
-later.
+The `#[link(...)]` attribute provides meta information about the
+module, which other crates can use to load the right module. More
+about that later.
To have a nested directory structure for your source files, you can
nest mods in your `.rc` file:
and `poultry::turkey`. You can also provide a `poultry.rs` to add
content to the `poultry` module itself.
-The compiler then builds the crate as a platform-specific shared library or
-executable which can be distributed.
+When compiling .rc files, if rustc finds a .rs file with the same
+name, then that .rs file provides the top-level content of the crate.
-## Using other crates
+~~~ {.xfail-test}
+// foo.rc
+#[link(name = "foo", vers="1.0")];
-Having compiled a crate that contains the `#[crate_type = "lib"]`
-attribute, you can use it in another crate with a `use`
-directive. We've already seen `extern mod std` in several of the
-examples, which loads in the [standard library][std].
+mod bar;
+~~~
-[std]: http://doc.rust-lang.org/doc/std/index/General.html
+~~~ {.xfail-test}
+// foo.rs
+fn main() { bar::baz(); }
+~~~
-`use` directives can appear in a crate file, or at the top level of a
-single-file `.rs` crate. They will cause the compiler to search its
-library search path (which you can extend with `-L` switch) for a Rust
-crate library with the right name.
+> ***Note***: The way rustc looks for .rs files to pair with .rc
+> files is a major source of confusion and will change. It's likely
+> that the crate and source file grammars will merge.
-It is possible to provide more specific information when using an
-external crate.
+> ***Note***: The way that directory modules are handled will also
+> change. The code for directory modules currently lives in a .rs
+> file with the same name as the directory, _next to_ the directory.
+> A new scheme will make that file live _inside_ the directory.
-~~~~ {.ignore}
-extern mod myfarm (name = "farm", vers = "2.7");
-~~~~
+## Using other crates
+
+The `extern mod` directive lets you use a crate (once it's been
+compiled into a library) from inside another crate. `extern mod` can
+appear at the top of a crate file or at the top of modules. It will
+cause the compiler to look in the library search path (which you can
+extend with the `-L` switch) for a compiled Rust library with the
+right name, then add a module with that crate's name into the local
+scope.
+
+For example, `extern mod std` links the [standard library].
+
+[standard library]: std/index.html
-When a comma-separated list of name/value pairs is given after `use`,
-these are matched against the attributes provided in the `link`
-attribute of the crate file, and a crate is only used when the two
-match. A `name` value can be given to override the name used to search
-for the crate. So the above would import the `farm` crate under the
-local name `myfarm`.
+When a comma-separated list of name/value pairs appears after `extern
+mod`, the compiler front-end matches these pairs against the
+attributes provided in the `link` attribute of the crate file. The
+front-end will only select this crate for use if the actual pairs
+match the declared attributes. You can provide a `name` value to
+override the name used to search for the crate.
Our example crate declared this set of `link` attributes:
-~~~~ {.ignore}
+~~~~ {.xfail-test}
#[link(name = "farm", vers = "2.5", author = "mjh")];
~~~~
-The version does not match the one provided in the `use` directive, so
-unless the compiler can find another crate with the right version
-somewhere, it will complain that no matching crate was found.
-
-## The core library
-
-A set of basic library routines, mostly related to built-in datatypes
-and the task system, are always implicitly linked and included in any
-Rust program.
+Which you can then link with any (or all) of the following:
-This library is documented [here][core].
+~~~~ {.xfail-test}
+extern mod farm;
+extern mod my_farm (name = "farm", vers = "2.5");
+extern mod my_auxiliary_farm (name = "farm", author = "mjh");
+~~~~
-[core]: core/index.html
+If any of the requested metadata do not match, then the crate
+will not be compiled successfully.
## A minimal example
~~~~
// world.rs
#[link(name = "world", vers = "1.0")];
-fn explore() -> ~str { ~"world" }
+pub fn explore() -> &str { "world" }
~~~~
-~~~~ {.ignore}
+~~~~ {.xfail-test}
// main.rs
extern mod world;
-fn main() { io::println(~"hello " + world::explore()); }
+fn main() { io::println("hello " + world::explore()); }
~~~~
Now compile and run like this (adjust to your platform if necessary):
"hello world"
~~~~
-## Importing
-
-When using identifiers from other modules, it can get tiresome to
-qualify them with the full module path every time (especially when
-that path is several modules deep). Rust allows you to import
-identifiers at the top of a file, module, or block.
+Notice that the library produced contains the version in the filename
+as well as an inscrutable string of alphanumerics. These are both
+part of Rust's library versioning scheme. The alphanumerics are
+a hash representing the crate metadata.
-~~~~
-extern mod std;
-use io::println;
-fn main() {
- println(~"that was easy");
-}
-~~~~
-
-
-It is also possible to import just the name of a module (`use
-std::list;`, then use `list::find`), to import all identifiers exported
-by a given module (`use io::*`), or to import a specific set
-of identifiers (`use math::{min, max, pi}`).
-
-Rust uses different namespaces for modules, types, and values. You
-can also rename an identifier when importing using the `=` operator:
-
-~~~~
-use prnt = io::println;
-~~~~
-
-## Exporting
-
-By default, a module exports everything that it defines. This can be
-restricted with `export` directives at the top of the module or file.
-
-~~~~
-mod enc {
- export encrypt, decrypt;
- const SUPER_SECRET_NUMBER: int = 10;
- fn encrypt(n: int) -> int { n + SUPER_SECRET_NUMBER }
- fn decrypt(n: int) -> int { n - SUPER_SECRET_NUMBER }
-}
-~~~~
-
-This defines a rock-solid encryption algorithm. Code outside of the
-module can refer to the `enc::encrypt` and `enc::decrypt` identifiers
-just fine, but it does not have access to `enc::super_secret_number`.
-
-## Resolution
-
-The resolution process in Rust simply goes up the chain of contexts,
-looking for the name in each context. Nested functions and modules
-create new contexts inside their parent function or module. A file
-that's part of a bigger crate will have that crate's context as its
-parent context.
-
-Identifiers can shadow each other. In this program, `x` is of type
-`int`:
-
-~~~~
-type MyType = ~str;
-fn main() {
- type MyType = int;
- let x: MyType = 17;
-}
-~~~~
-
-An `use` directive will only import into the namespaces for which
-identifiers are actually found. Consider this example:
-
-~~~~
-mod foo {
- fn bar() {}
-}
-
-fn main() {
- let bar = 10;
-
- {
- use foo::bar;
- let quux = bar;
- assert quux == 10;
- }
-}
-~~~~
+## The core library
-When resolving the type name `bar` in the `quux` definition, the
-resolver will first look at local block context for `baz`. This has an
-import named `bar`, but that's function, not a value, So it continues
-to the `baz` function context and finds a value named `bar` defined
-there.
+The Rust [core] library is the language runtime and contains
+required memory management and task scheduling code as well as a
+number of modules necessary for effective usage of the primitive
+types. Methods on [vectors] and [strings], implementations of most
+comparison and math operators, and pervasive types like [`Option`]
+and [`Result`] live in core.
-Normally, multiple definitions of the same identifier in a scope are
-disallowed. Local variables defined with `let` are an exception to
-this—multiple `let` directives can redefine the same variable in a
-single scope. When resolving the name of such a variable, the most
-recent definition is used.
+All Rust programs link to the core library and import its contents,
+as if the following were written at the top of the crate.
-~~~~
-fn main() {
- let x = 10;
- let x = x + 10;
- assert x == 20;
-}
-~~~~
+~~~ {.xfail-test}
+extern mod core;
+use core::*;
+~~~
-This makes it possible to rebind a variable without actually mutating
-it, which is mostly useful for destructuring (which can rebind, but
-not assign).
+[core]: core/index.html
+[vectors]: core/vec.html
+[strings]: core/str.html
+[`Option`]: core/option.html
+[`Result`]: core/result.html
# What next?
-.TH RUSTC "1" "July 2012" "rustc 0.3" "User Commands"
+.TH RUSTC "1" "October 2012" "rustc 0.4" "User Commands"
.SH NAME
rustc \- rust compiler
.SH SYNOPSIS
\fB\-\-ls\fR
List the symbols defined by a compiled library crate
.TP
+\fB\-\-jit\fR
+Execute using JIT (experimental)
+.TP
\fB\-\-no\-trans\fR
Run all passes except translation; no output
.TP
.TP
\fB\-\-pretty\fR [type]
Pretty\-print the input instead of compiling;
-valid types are: \fBnormal\fR (un\-annotated source),
-\fBexpanded\fR (crates expanded), \fBtyped\fR (crates expanded,
-with type annotations), or \fBidentified\fR (fully
+valid types are: normal (un\-annotated source),
+expanded (crates expanded), typed (crates expanded,
+with type annotations), or identified (fully
parenthesized, AST nodes and blocks with IDs)
.TP
\fB\-S\fR
in addition to normal output
.TP
\fB\-\-static\fR
-Use or produce static libraries or binaries (experimental)
+Use or produce static libraries or binaries
+(experimental)
.TP
\fB\-\-sysroot\fR <path>
Override the system root
Build a test harness
.TP
\fB\-\-target\fR <triple>
-Target cpu\-manufacturer\-kernel[\-os] to compile for (default: host triple)
-(see <\fBhttp://sources.redhat.com/autobook/autobook/autobook_17.html\fR> for
-detail)
-.TP
-\fB\-W\fR <foo>
-enable warning <foo>
-.TP
-\fB\-W\fR no\-<foo>
-disable warning <foo>
-.TP
-\fB\-W\fR err\-<foo>
-enable warning <foo> as an error
+Target cpu\-manufacturer\-kernel[\-os] to compile for
+(default: host triple)
+(see http://sources.redhat.com/autobook/autobook/
+autobook_17.html for detail)
.TP
-\fB\-W\fR help
-Print available warnings and default settings
+\fB\-W help\fR
+Print 'lint' options and default settings
.TP
-\fB\-Z\fR help
-list internal options for debugging rustc
+\fB\-Z help\fR
+Print internal options for debugging rustc
.TP
\fB\-v\fR \fB\-\-version\fR
Print version info and exit
$ rustc hello.rc
.SH "BUGS"
-See <\fBhttps://github.com/mozilla/rust/issues\fR> for a list of known bugs.
+See <\fBhttps://github.com/mozilla/rust/issues\fR> for issues.
.SH "AUTHOR"
See \fBAUTHORS.txt\fR in the rust source distribution. Graydon Hoare
<\fIgraydon@mozilla.com\fR> is the project leader.
.SH "COPYRIGHT"
-See \fBLICENSE.txt\fR in the rust source distribution.
+This work is licensed under MIT-like terms. See \fBLICENSE.txt\fR
+in the rust source distribution.
# embedded into the executable, so use a no-op command.
CFG_DSYMUTIL := true
+# Add a dSYM glob for all platforms, even though it will do nothing on
+# non-Darwin platforms; omitting it causes a full -R copy of lib/
+CFG_LIB_DSYM_GLOB=lib$(1)-*.dylib.dSYM
+
ifneq ($(findstring freebsd,$(CFG_OSTYPE)),)
CFG_LIB_NAME=lib$(1).so
CFG_LIB_GLOB=lib$(1)-*.so
ifneq ($(findstring darwin,$(CFG_OSTYPE)),)
CFG_LIB_NAME=lib$(1).dylib
CFG_LIB_GLOB=lib$(1)-*.dylib
- CFG_LIB_DSYM_GLOB=lib$(1)-*.dylib.dSYM
CFG_UNIXY := 1
CFG_LDENV := DYLD_LIBRARY_PATH
CFG_GCCISH_LINK_FLAGS += -dynamiclib -lpthread -framework CoreServices -Wl,-no_compact_unwind
// End:
#[link(name = "cargo",
- vers = "0.4",
+ vers = "0.5",
uuid = "9ff87a04-8fed-4295-9ff8-f99bb802650b",
url = "https://github.com/mozilla/rust/tree/master/src/cargo")];
#[allow(deprecated_mode)];
#[allow(deprecated_pattern)];
-extern mod core(vers = "0.4");
-extern mod std(vers = "0.4");
-extern mod rustc(vers = "0.4");
-extern mod syntax(vers = "0.4");
+extern mod core(vers = "0.5");
+extern mod std(vers = "0.5");
+extern mod rustc(vers = "0.5");
+extern mod syntax(vers = "0.5");
use core::*;
~" or package manager to get it to work correctly");
}
- c
+ move c
}
fn for_each_package(c: &Cargo, b: fn(s: @Source, p: &Package)) {
}
match (src.key, src.keyfp) {
(Some(_), Some(f)) => {
- let r = pgp::verify(&c.root, &pkgfile, &sigfile, f);
+ let r = pgp::verify(&c.root, &pkgfile, &sigfile);
if !r {
- error(fmt!("signature verification failed for source %s",
- name));
+ error(fmt!("signature verification failed for source %s with \
+ key %s", name, f));
return false;
}
if has_src_file {
- let e = pgp::verify(&c.root, &srcfile, &srcsigfile, f);
+ let e = pgp::verify(&c.root, &srcfile, &srcsigfile);
if !e {
- error(fmt!("signature verification failed for source %s",
- name));
+ error(fmt!("signature verification failed for source %s \
+ with key %s", name, f));
return false;
}
}
}
match (src.key, src.keyfp) {
(Some(_), Some(f)) => {
- let r = pgp::verify(&c.root, &pkgfile, &sigfile, f);
+ let r = pgp::verify(&c.root, &pkgfile, &sigfile);
if !r {
- error(fmt!("signature verification failed for source %s",
- name));
+ error(fmt!("signature verification failed for source %s with \
+ key %s", name, f));
rollback(name, dir, false);
return false;
}
if has_src_file {
- let e = pgp::verify(&c.root, &srcfile, &srcsigfile, f);
+ let e = pgp::verify(&c.root, &srcfile, &srcsigfile);
if !e {
- error(fmt!("signature verification failed for source %s",
- name));
+ error(fmt!("signature verification failed for source %s \
+ with key %s", name, f));
rollback(name, dir, false);
return false;
}
return false;
}
- let r = pgp::verify(&c.root, &pkgfile, &sigfile, f);
+ let r = pgp::verify(&c.root, &pkgfile, &sigfile);
if !r {
- error(fmt!("signature verification failed for source %s",
- name));
+ error(fmt!("signature verification failed for source %s with \
+ key %s", name, f));
return false;
}
return false;
}
- let e = pgp::verify(&c.root, &srcfile, &srcsigfile, f);
+ let e = pgp::verify(&c.root, &srcfile, &srcsigfile);
if !e {
error(~"signature verification failed for " +
- ~"source " + name);
+ ~"source " + name + ~" with key " + f);
return false;
}
}
return;
}
- let r = pgp::verify(&c.root, &srcfile, &sigfile,
- pgp::signing_key_fp());
+ let r = pgp::verify(&c.root, &srcfile, &sigfile);
if !r {
error(fmt!("signature verification failed for '%s'",
srcfile.to_str()));
_ => ()
}
- hash.insert(copy k, json::Object(chash));
+ hash.insert(copy k, json::Object(move chash));
}
- json::to_writer(writer, &json::Object(hash))
+ json::to_writer(writer, &json::Object(move hash))
}
result::Err(e) => {
error(fmt!("could not dump sources: %s", e));
-fn gpg(args: ~[~str]) -> { status: int, out: ~str, err: ~str } {
- return run::program_output(~"gpg", args);
+fn gpgv(args: ~[~str]) -> { status: int, out: ~str, err: ~str } {
+ return run::program_output(~"gpgv", args);
}
fn signing_key() -> ~str {
}
fn supported() -> bool {
- let r = gpg(~[~"--version"]);
+ let r = gpgv(~[~"--version"]);
r.status == 0
}
}
}
-fn verify(root: &Path, data: &Path, sig: &Path, keyfp: ~str) -> bool {
+fn verify(root: &Path, data: &Path, sig: &Path) -> bool {
let path = root.push("gpg");
- let p = gpg(~[~"--homedir", path.to_str(),
- ~"--with-fingerprint",
- ~"--verify", sig.to_str(),
- data.to_str()]);
- let res = ~"Primary key fingerprint: " + keyfp;
- for str::split_char_each(p.err, '\n') |line| {
- if line == res { return true; }
+ let res = gpgv(~[~"--homedir", path.to_str(),
+ ~"--keyring", ~"pubring.gpg",
+ ~"--verbose",
+ sig.to_str(), data.to_str()]);
+ if res.status != 0 {
+ return false;
}
- return false;
+ return true;
}
#[allow(deprecated_mode)];
#[allow(deprecated_pattern)];
-extern mod core(vers = "0.4");
-extern mod std(vers = "0.4");
+extern mod core(vers = "0.5");
+extern mod std(vers = "0.5");
use core::*;
tests.push(make_test(config, file))
}
}
- return tests;
+ move tests
}
fn is_test(config: config, testfile: &Path) -> bool {
writeclose(pipe_in.out, input);
let p = pipes::PortSet();
let ch = p.chan();
- do task::spawn_sched(task::SingleThreaded) {
+ do task::spawn_sched(task::SingleThreaded) |move ch| {
let errput = readclose(pipe_err.in);
ch.send((2, errput));
}
let ch = p.chan();
- do task::spawn_sched(task::SingleThreaded) {
+ do task::spawn_sched(task::SingleThreaded) |move ch| {
let output = readclose(pipe_out.in);
ch.send((1, output));
}
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE language SYSTEM "language.dtd">
-<language name="Rust" version="0.3.1" kateversion="2.4" section="Sources" extensions="*.rs;*.rc" mimetype="text/x-rust" priority="15">
+<language name="Rust" version="0.4.0" kateversion="2.4" section="Sources" extensions="*.rs;*.rc" mimetype="text/x-rust" priority="15">
<highlighting>
<list name="fn">
<item> fn </item>
<item> type </item>
</list>
<list name="keywords">
- <item> alt </item>
- <item> again </item>
<item> as </item>
<item> assert </item>
<item> break </item>
- <item> check </item>
- <item> claim </item>
<item> const </item>
<item> copy </item>
<item> do </item>
<item> drop </item>
<item> else </item>
+ <item> enum </item>
<item> export </item>
<item> extern </item>
- <item> f16 </item>
- <item> f80 </item>
- <item> f128 </item>
<item> fail </item>
<item> for </item>
<item> if </item>
<item> impl </item>
- <item> import </item>
- <item> in </item>
<item> let </item>
<item> log </item>
<item> loop </item>
- <item> m32 </item>
- <item> m64 </item>
- <item> m128 </item>
<item> match </item>
<item> mod </item>
- <item> module </item>
<item> move </item>
<item> mut </item>
- <item> new </item>
- <item> of </item>
- <item> owned </item>
<item> priv </item>
<item> pub </item>
<item> pure </item>
- <item> ret </item>
+ <item> ref </item>
<item> return </item>
- <item> to </item>
- <item> unchecked </item>
+ <item> static </item>
+ <item> struct </item>
+ <item> trait </item>
<item> unsafe </item>
<item> use </item>
<item> while </item>
- <item> with </item>
- <item> mod </item>
- <item> trait </item>
- <item> class </item>
- <item> struct </item>
- <item> enum </item>
</list>
<list name="types">
<item> bool </item>
<item> float </item>
<item> char </item>
<item> str </item>
- <item> option </item>
- <item> either </item>
+ <item> Either </item>
+ <item> Option </item>
+ <item> Result </item>
</list>
<list name="ctypes">
<item> c_float </item>
<list name="constants">
<item> true </item>
<item> false </item>
- <item> some </item>
- <item> none </item>
- <item> left </item>
- <item> right </item>
- <item> ok </item>
- <item> err </item>
- <item> success </item>
- <item> failure </item>
- <item> cons </item>
- <item> nil </item>
+ <item> Some </item>
+ <item> None </item>
+ <item> Left </item>
+ <item> Right </item>
+ <item> Ok </item>
+ <item> Err </item>
+ <item> Success </item>
+ <item> Failure </item>
+ <item> Cons </item>
+ <item> Nil </item>
</list>
<list name="cconstants">
<item> EXIT_FAILURE </item>
match = re.match(r"([ST]) (\d{4}-\d{2}-\d{2}) ([a-fA-F\d]+)\s*$", line);
if (not match):
- raise Exception("%s:%d:E syntax error" % (snapshotfile, n))
+ raise Exception("%s:%d:E syntax error: " % (snapshotfile, n))
return {"type": "snapshot",
"date": match.group(2),
"rev": match.group(3)}
#[allow(deprecated_mode)];
#[allow(deprecated_pattern)];
-extern mod core(vers = "0.4");
-extern mod std(vers = "0.4");
-extern mod syntax(vers = "0.4");
+extern mod core(vers = "0.5");
+extern mod std(vers = "0.5");
+extern mod syntax(vers = "0.5");
use core::*;
}
}
-fn safe_to_steal_ty(t: @ast::ty, tm: test_mode) -> bool {
+fn safe_to_steal_ty(t: @ast::Ty, tm: test_mode) -> bool {
// Restrictions happen to be the same.
safe_to_replace_ty(t.node, tm)
}
} else {/* now my indices are wrong :( */ }
}
-fn stash_ty_if(c: fn@(@ast::ty, test_mode)->bool,
- es: @mut ~[ast::ty],
- e: @ast::ty,
+fn stash_ty_if(c: fn@(@ast::Ty, test_mode)->bool,
+ es: @mut ~[ast::Ty],
+ e: @ast::Ty,
tm: test_mode) {
if c(e, tm) {
es.push(*e);
} else {/* now my indices are wrong :( */ }
}
-type stolen_stuff = {exprs: ~[ast::expr], tys: ~[ast::ty]};
+type stolen_stuff = {exprs: ~[ast::expr], tys: ~[ast::Ty]};
fn steal(crate: ast::crate, tm: test_mode) -> stolen_stuff {
let exprs = @mut ~[];
// Replace the |i|th ty (in fold order) of |crate| with |newty|.
-fn replace_ty_in_crate(crate: ast::crate, i: uint, newty: ast::ty,
+fn replace_ty_in_crate(crate: ast::crate, i: uint, newty: ast::Ty,
tm: test_mode) -> ast::crate {
let j: @mut uint = @mut 0u;
fn fold_ty_rep(j_: @mut uint, i_: uint, newty_: ast::ty_,
io::with_str_writer(f)
}
-fn check_variants_of_ast(crate: ast::crate, codemap: codemap::codemap,
+fn check_variants_of_ast(crate: ast::crate, codemap: codemap::CodeMap,
filename: &Path, cx: context) {
let stolen = steal(crate, cx.mode);
let extra_exprs = vec::filter(common_exprs(),
fn check_variants_T<T: Copy>(
crate: ast::crate,
- codemap: codemap::codemap,
+ codemap: codemap::CodeMap,
filename: &Path,
thing_label: ~str,
things: ~[T],
}
fn last_part(filename: ~str) -> ~str {
- let ix = option::get(&str::rfind_char(filename, '/'));
+ let ix = option::get(str::rfind_char(filename, '/'));
str::slice(filename, ix + 1u, str::len(filename) - 3u)
}
fn has_raw_pointers(c: ast::crate) -> bool {
let has_rp = @mut false;
- fn visit_ty(flag: @mut bool, t: @ast::ty) {
+ fn visit_ty(flag: @mut bool, t: @ast::Ty) {
match t.node {
ast::ty_ptr(_) => { *flag = true; }
_ => { }
pub fn test_transmute() {
unsafe {
let x = @1;
- let x: *int = transmute(x);
+ let x: *int = transmute(move x);
assert *x == 1;
- let _x: @int = transmute(x);
+ let _x: @int = transmute(move x);
}
}
assert (*box).header.prev == null();
debug!("freeing box: %x", box as uint);
- rt_free(transmute(box));
+ rt_free(transmute(move box));
}
}
#[link_name = "m"]
#[abi = "cdecl"]
-pub extern mod c_double {
+pub extern mod c_double_utils {
// Alpabetically sorted by link_name
#[link_name = "m"]
#[abi = "cdecl"]
-pub extern mod c_float {
+pub extern mod c_float_utils {
// Alpabetically sorted by link_name
--- /dev/null
+// helper for transmutation, shown below.
+type RustClosure = (int,int);
+
+struct Condition<T, U:Copy> {
+ key: task::local_data::LocalDataKey<Handler<T,U>>
+}
+
+struct Handler<T, U:Copy> {
+ handle: RustClosure
+}
+
+
+struct ProtectBlock<T, U:Copy> {
+ cond: &Condition<T, U>,
+ inner: RustClosure
+}
+
+struct Guard<T, U:Copy> {
+ cond: &Condition<T,U>,
+ prev: Option<@Handler<T, U>>,
+ drop {
+ match self.prev {
+ None => (),
+ Some(p) =>
+ unsafe {
+ debug!("Guard: popping handler from TLS");
+ task::local_data::local_data_set(self.cond.key, p)
+ }
+ }
+ }
+}
+
+struct HandleBlock<T, U:Copy> {
+ pb: &ProtectBlock<T,U>,
+ prev: Option<@Handler<T,U>>,
+ handler: @Handler<T,U>,
+ drop {
+ unsafe {
+ debug!("HandleBlock: pushing handler to TLS");
+ let _g = Guard { cond: self.pb.cond,
+ prev: self.prev };
+ task::local_data::local_data_set(self.pb.cond.key,
+ self.handler);
+ // transmutation to avoid copying non-copyable, should
+ // be fixable by tracking closure pointees in regionck.
+ let f : &fn() = ::cast::transmute(self.pb.inner);
+ debug!("HandleBlock: invoking protected code");
+ f();
+ debug!("HandleBlock: returned from protected code");
+ }
+ }
+}
+
+struct Trap<T, U:Copy> {
+ cond: &Condition<T,U>,
+ handler: @Handler<T, U>
+}
+
+impl<T, U: Copy> ProtectBlock<T,U> {
+ fn handle(&self, h: &self/fn(&T) ->U) -> HandleBlock/&self<T,U> {
+ unsafe {
+ debug!("ProtectBlock.handle: setting up handler block");
+ let p : *RustClosure = ::cast::transmute(&h);
+ let prev = task::local_data::local_data_get(self.cond.key);
+ HandleBlock { pb: self,
+ prev: prev,
+ handler: @Handler{handle: *p} }
+ }
+ }
+}
+
+
+
+impl<T, U: Copy> Trap<T,U> {
+ fn in<V: Copy>(&self, inner: &self/fn() -> V) -> V {
+ unsafe {
+ let prev = task::local_data::local_data_get(self.cond.key);
+ let _g = Guard { cond: self.cond,
+ prev: prev };
+ debug!("Trap: pushing handler to TLS");
+ task::local_data::local_data_set(self.cond.key, self.handler);
+ inner()
+ }
+ }
+}
+
+impl<T, U: Copy> Condition<T,U> {
+
+ fn guard(&self, h: &self/fn(&T) ->U) -> Guard/&self<T,U> {
+ unsafe {
+ let prev = task::local_data::local_data_get(self.key);
+ let g = Guard { cond: self, prev: prev };
+ debug!("Guard: pushing handler to TLS");
+ let p : *RustClosure = ::cast::transmute(&h);
+ let h = @Handler{handle: *p};
+ task::local_data::local_data_set(self.key, h);
+ move g
+ }
+ }
+
+ fn trap(&self, h: &self/fn(&T) ->U) -> Trap/&self<T,U> {
+ unsafe {
+ let p : *RustClosure = ::cast::transmute(&h);
+ let h = @Handler{handle: *p};
+ move Trap { cond: self, handler: h }
+ }
+ }
+
+ fn protect(&self, inner: &self/fn()) -> ProtectBlock/&self<T,U> {
+ unsafe {
+ // transmutation to avoid copying non-copyable, should
+ // be fixable by tracking closure pointees in regionck.
+ debug!("Condition.protect: setting up protected block");
+ let p : *RustClosure = ::cast::transmute(&inner);
+ ProtectBlock { cond: self,
+ inner: *p }
+ }
+ }
+
+ fn raise(t:&T) -> U {
+ unsafe {
+ match task::local_data::local_data_get(self.key) {
+ None => {
+ debug!("Condition.raise: found no handler");
+ fail
+ }
+
+ Some(handler) => {
+ debug!("Condition.raise: found handler");
+ let f : &fn(&T) -> U = ::cast::transmute(handler.handle);
+ f(t)
+ }
+ }
+ }
+ }
+}
+
+
+#[cfg(test)]
+fn sadness_key(_x: @Handler<int,int>) { }
+
+#[cfg(test)]
+fn trouble(i: int) {
+ // Condition should work as a const, just limitations in consts.
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+ debug!("trouble: raising conition");
+ let j = sadness_condition.raise(&i);
+ debug!("trouble: handler recovered with %d", j);
+}
+
+#[test]
+fn test1() {
+
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+
+ let mut i = 10;
+
+ let b = do sadness_condition.protect {
+ debug!("test1: in protected block");
+ trouble(1);
+ trouble(2);
+ trouble(3);
+ };
+
+ do b.handle |j| {
+ debug!("test1: in handler");
+ i += *j;
+ i
+ };
+
+ assert i == 16;
+}
+#[cfg(test)]
+fn nested_test_inner() {
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+
+ let mut inner_trapped = false;
+
+ let b = do sadness_condition.protect {
+ debug!("nested_test_inner: in protected block");
+ trouble(1);
+ };
+
+ do b.handle |_j| {
+ debug!("nested_test_inner: in handler");
+ inner_trapped = true;
+ 0
+ };
+
+ assert inner_trapped;
+}
+
+#[test]
+fn nested_test_outer() {
+
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+
+ let mut outer_trapped = false;
+
+ let b = do sadness_condition.protect {
+ debug!("nested_test_outer: in protected block");
+ nested_test_inner();
+ trouble(1);
+ };
+
+ do b.handle |_j| {
+ debug!("nested_test_outer: in handler");
+ outer_trapped = true;
+ 0
+ };
+
+ assert outer_trapped;
+}
+
+
+#[cfg(test)]
+fn nested_guard_test_inner() {
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+
+ let mut inner_trapped = false;
+
+ let _g = do sadness_condition.guard |_j| {
+ debug!("nested_guard_test_inner: in handler");
+ inner_trapped = true;
+ 0
+ };
+
+ debug!("nested_guard_test_inner: in protected block");
+ trouble(1);
+
+ assert inner_trapped;
+}
+
+#[test]
+fn nested_guard_test_outer() {
+
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+
+ let mut outer_trapped = false;
+
+ let _g = do sadness_condition.guard |_j| {
+ debug!("nested_guard_test_outer: in handler");
+ outer_trapped = true;
+ 0
+ };
+
+ debug!("nested_guard_test_outer: in protected block");
+ nested_guard_test_inner();
+ trouble(1);
+
+ assert outer_trapped;
+}
+
+
+
+#[cfg(test)]
+fn nested_trap_test_inner() {
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+
+ let mut inner_trapped = false;
+
+ do sadness_condition.trap(|_j| {
+ debug!("nested_trap_test_inner: in handler");
+ inner_trapped = true;
+ 0
+ }).in {
+ debug!("nested_trap_test_inner: in protected block");
+ trouble(1);
+ }
+
+ assert inner_trapped;
+}
+
+#[test]
+fn nested_trap_test_outer() {
+
+ let sadness_condition : Condition<int,int> =
+ Condition { key: sadness_key };
+
+ let mut outer_trapped = false;
+
+ do sadness_condition.trap(|_j| {
+ debug!("nested_trap_test_outer: in handler");
+ outer_trapped = true; 0
+ }).in {
+ debug!("nested_guard_test_outer: in protected block");
+ nested_trap_test_inner();
+ trouble(1);
+ }
+
+
+ assert outer_trapped;
+}
*/
#[link(name = "core",
- vers = "0.4",
+ vers = "0.5",
uuid = "c70c24a7-5551-4f73-8e37-380b11d80be8",
url = "https://github.com/mozilla/rust/tree/master/src/libcore")];
pub mod repr;
pub mod cleanup;
pub mod reflect;
+pub mod condition;
// Modules supporting compiler-generated code
// Exported but not part of the public interface
pub use WindowsPath = path::WindowsPath;
pub use PosixPath = path::PosixPath;
-pub use tuple::{TupleOps, ExtendedTupleOps};
-pub use str::{StrSlice, UniqueStr};
+pub use tuple::{CopyableTuple, ImmutableTuple, ExtendedTupleOps};
+pub use str::{StrSlice, Trimmable};
pub use vec::{ConstVector, CopyableVector, ImmutableVector};
pub use vec::{ImmutableEqVector, ImmutableCopyableVector};
pub use vec::{MutableVector, MutableCopyableVector};
pub use ops::{Shl, Shr, Index};
#[cfg(test)]
-extern mod coreops(name = "core", vers = "0.4");
+extern mod coreops(name = "core", vers = "0.5");
#[cfg(test)]
pub use coreops::ops::{Const, Copy, Send, Owned};
// Similar to above. Some magic to make core testable.
#[cfg(test)]
mod std {
- extern mod std(vers = "0.4");
+ extern mod std(vers = "0.5");
pub use std::test;
}
fn push_head_n(data: T) -> DListNode<T> {
let mut nobe = self.new_link(move data);
self.add_head(nobe);
- option::get(&nobe)
+ option::get(nobe)
}
/// Add data to the tail of the list. O(1).
fn push(data: T) {
fn push_n(data: T) -> DListNode<T> {
let mut nobe = self.new_link(move data);
self.add_tail(nobe);
- option::get(&nobe)
+ option::get(nobe)
}
/**
* Insert data into the middle of the list, left of the given node.
fn insert_before_n(data: T, neighbour: DListNode<T>) -> DListNode<T> {
let mut nobe = self.new_link(move data);
self.insert_left(nobe, neighbour);
- option::get(&nobe)
+ option::get(nobe)
}
/**
* Insert data into the middle of the list, right of the given node.
fn insert_after_n(data: T, neighbour: DListNode<T>) -> DListNode<T> {
let mut nobe = self.new_link(move data);
self.insert_right(neighbour, nobe);
- option::get(&nobe)
+ option::get(nobe)
}
/// Remove a node from the head of the list. O(1).
let mut link = self.peek_n();
let mut rabbit = link;
while option::is_some(&link) {
- let nobe = option::get(&link);
+ let nobe = option::get(link);
assert nobe.linked;
// check cycle
if option::is_some(&rabbit) {
- rabbit = option::get(&rabbit).next;
+ rabbit = option::get(rabbit).next;
}
if option::is_some(&rabbit) {
- rabbit = option::get(&rabbit).next;
+ rabbit = option::get(rabbit).next;
}
if option::is_some(&rabbit) {
- assert !box::ptr_eq(*option::get(&rabbit), *nobe);
+ assert !box::ptr_eq(*option::get(rabbit), *nobe);
}
// advance
link = nobe.next_link();
link = self.peek_tail_n();
rabbit = link;
while option::is_some(&link) {
- let nobe = option::get(&link);
+ let nobe = option::get(link);
assert nobe.linked;
// check cycle
if option::is_some(&rabbit) {
- rabbit = option::get(&rabbit).prev;
+ rabbit = option::get(rabbit).prev;
}
if option::is_some(&rabbit) {
- rabbit = option::get(&rabbit).prev;
+ rabbit = option::get(rabbit).prev;
}
if option::is_some(&rabbit) {
- assert !box::ptr_eq(*option::get(&rabbit), *nobe);
+ assert !box::ptr_eq(*option::get(rabbit), *nobe);
}
// advance
link = nobe.prev_link();
}
/// Creates a new, empty dvec
-pub fn DVec<A>() -> DVec<A> {
+pub pure fn DVec<A>() -> DVec<A> {
DVec_({mut data: ~[]})
}
unsafe { str::unshift_char(&mut s, ' ') };
}
}
- return unsafe { pad(cv, s, PadSigned) };
+ return unsafe { pad(cv, move s, PadSigned) };
}
pub pure fn conv_uint(cv: Conv, u: uint) -> ~str {
let prec = get_int_precision(cv);
TyBits => uint_to_str_prec(u, 2u, prec),
TyOctal => uint_to_str_prec(u, 8u, prec)
};
- return unsafe { pad(cv, rs, PadUnsigned) };
+ return unsafe { pad(cv, move rs, PadUnsigned) };
}
pub pure fn conv_bool(cv: Conv, b: bool) -> ~str {
let s = if b { ~"true" } else { ~"false" };
}
pub pure fn conv_char(cv: Conv, c: char) -> ~str {
let mut s = str::from_char(c);
- return unsafe { pad(cv, s, PadNozero) };
+ return unsafe { pad(cv, move s, PadNozero) };
}
pub pure fn conv_str(cv: Conv, s: &str) -> ~str {
// For strings, precision is the maximum characters
// displayed
let mut unpadded = match cv.precision {
- CountImplied => s.to_unique(),
+ CountImplied => s.to_owned(),
CountIs(max) => if max as uint < str::char_len(s) {
str::substr(s, 0u, max as uint)
} else {
- s.to_unique()
+ s.to_owned()
}
};
- return unsafe { pad(cv, unpadded, PadNozero) };
+ return unsafe { pad(cv, move unpadded, PadNozero) };
}
pub pure fn conv_float(cv: Conv, f: float) -> ~str {
let (to_str, digits) = match cv.precision {
s = ~" " + s;
}
}
- return unsafe { pad(cv, s, PadFloat) };
+ return unsafe { pad(cv, move s, PadFloat) };
}
pub pure fn conv_poly<T>(cv: Conv, v: &T) -> ~str {
let s = sys::log_str(v);
pub fn pad(cv: Conv, s: ~str, mode: PadMode) -> ~str {
let mut s = move s; // sadtimes
let uwidth : uint = match cv.width {
- CountImplied => return s,
+ CountImplied => return (move s),
CountIs(width) => {
// FIXME: width should probably be uint (see Issue #1996)
width as uint
}
};
let strlen = str::char_len(s);
- if uwidth <= strlen { return s; }
+ if uwidth <= strlen { return (move s); }
let mut padchar = ' ';
let diff = uwidth - strlen;
if have_flag(cv.flags, flag_left_justify) {
//! Operations and constants for `f32`
-pub use cmath::c_float::*;
+pub use cmath::c_float_utils::*;
pub use cmath::c_float_targ_consts::*;
// These are not defined inside consts:: for consistency with
//! Operations and constants for `f64`
-pub use cmath::c_double::*;
+pub use cmath::c_double_utils::*;
pub use cmath::c_double_targ_consts::*;
// FIXME (#1433): obtain these in a different way
pub pure fn gt(x: f64, y: f64) -> bool { return x > y; }
pub pure fn sqrt(x: f64) -> f64 {
- cmath::c_double::sqrt(x as libc::c_double) as f64
+ cmath::c_double_utils::sqrt(x as libc::c_double) as f64
}
/// Returns true if `x` is a positive number, including +0.0f640 and +Infinity
ptr::addr_of(&outsz),
lz_norm);
assert res as int != 0;
- let out = vec::raw::from_buf(res as *u8,
+ let out = vec::raw::from_buf_raw(res as *u8,
outsz as uint);
libc::free(res);
move out
ptr::addr_of(&outsz),
0);
assert res as int != 0;
- let out = vec::raw::from_buf(res as *u8,
+ let out = vec::raw::from_buf_raw(res as *u8,
outsz as uint);
libc::free(res);
move out
* * digits - The number of significant digits
* * exact - Whether to enforce the exact number of significant digits
*/
-pub fn to_str_common(num: float, digits: uint, exact: bool) -> ~str {
+pub pure fn to_str_common(num: float, digits: uint, exact: bool) -> ~str {
if is_NaN(num) { return ~"NaN"; }
if num == infinity { return ~"inf"; }
if num == neg_infinity { return ~"-inf"; }
// store the next digit
frac *= 10.0;
let digit = frac as uint;
- fractionalParts.push(digit);
+ // Bleh: not really unsafe.
+ unsafe { fractionalParts.push(digit); }
// calculate the next frac
frac -= digit as float;
// turn digits into string
// using stack of digits
while fractionalParts.is_not_empty() {
- let mut adjusted_digit = carry + fractionalParts.pop();
+ // Bleh; shouldn't need to be unsafe
+ let mut adjusted_digit = carry + unsafe { fractionalParts.pop() };
if adjusted_digit == 10 {
carry = 1;
* * num - The float value
* * digits - The number of significant digits
*/
-pub fn to_str(num: float, digits: uint) -> ~str {
+pub pure fn to_str(num: float, digits: uint) -> ~str {
to_str_common(num, digits, false)
}
*
* `NaN` if both `x` and `pow` are `0u`, otherwise `x^pow`
*/
-pub fn pow_with_uint(base: uint, pow: uint) -> float {
+pub pure fn pow_with_uint(base: uint, pow: uint) -> float {
if base == 0u {
if pow == 0u {
return NaN as float;
#[test]
pub fn test_from_port() {
let (po, ch) = future_pipe::init();
- future_pipe::server::completed(ch, ~"whale");
- let f = from_port(po);
+ future_pipe::server::completed(move ch, ~"whale");
+ let f = from_port(move po);
assert get(&f) == ~"whale";
}
pub fn test_sendable_future() {
let expected = ~"schlorf";
let f = do spawn |copy expected| { copy expected };
- do task::spawn {
+ do task::spawn |move f, move expected| {
let actual = get(&f);
assert actual == expected;
}
mut v1: u64,
mut v2: u64,
mut v3: u64,
- tail: [mut u8]/8, // unprocessed bytes
+ tail: [mut u8 * 8], // unprocessed bytes
mut ntail: uint, // how many bytes in tail are valid
}
#[test]
pub fn test_siphash() {
- let vecs : [[u8]/8]/64 = [
- [ 0x31, 0x0e, 0x0e, 0xdd, 0x47, 0xdb, 0x6f, 0x72, ]/_,
- [ 0xfd, 0x67, 0xdc, 0x93, 0xc5, 0x39, 0xf8, 0x74, ]/_,
- [ 0x5a, 0x4f, 0xa9, 0xd9, 0x09, 0x80, 0x6c, 0x0d, ]/_,
- [ 0x2d, 0x7e, 0xfb, 0xd7, 0x96, 0x66, 0x67, 0x85, ]/_,
- [ 0xb7, 0x87, 0x71, 0x27, 0xe0, 0x94, 0x27, 0xcf, ]/_,
- [ 0x8d, 0xa6, 0x99, 0xcd, 0x64, 0x55, 0x76, 0x18, ]/_,
- [ 0xce, 0xe3, 0xfe, 0x58, 0x6e, 0x46, 0xc9, 0xcb, ]/_,
- [ 0x37, 0xd1, 0x01, 0x8b, 0xf5, 0x00, 0x02, 0xab, ]/_,
- [ 0x62, 0x24, 0x93, 0x9a, 0x79, 0xf5, 0xf5, 0x93, ]/_,
- [ 0xb0, 0xe4, 0xa9, 0x0b, 0xdf, 0x82, 0x00, 0x9e, ]/_,
- [ 0xf3, 0xb9, 0xdd, 0x94, 0xc5, 0xbb, 0x5d, 0x7a, ]/_,
- [ 0xa7, 0xad, 0x6b, 0x22, 0x46, 0x2f, 0xb3, 0xf4, ]/_,
- [ 0xfb, 0xe5, 0x0e, 0x86, 0xbc, 0x8f, 0x1e, 0x75, ]/_,
- [ 0x90, 0x3d, 0x84, 0xc0, 0x27, 0x56, 0xea, 0x14, ]/_,
- [ 0xee, 0xf2, 0x7a, 0x8e, 0x90, 0xca, 0x23, 0xf7, ]/_,
- [ 0xe5, 0x45, 0xbe, 0x49, 0x61, 0xca, 0x29, 0xa1, ]/_,
- [ 0xdb, 0x9b, 0xc2, 0x57, 0x7f, 0xcc, 0x2a, 0x3f, ]/_,
- [ 0x94, 0x47, 0xbe, 0x2c, 0xf5, 0xe9, 0x9a, 0x69, ]/_,
- [ 0x9c, 0xd3, 0x8d, 0x96, 0xf0, 0xb3, 0xc1, 0x4b, ]/_,
- [ 0xbd, 0x61, 0x79, 0xa7, 0x1d, 0xc9, 0x6d, 0xbb, ]/_,
- [ 0x98, 0xee, 0xa2, 0x1a, 0xf2, 0x5c, 0xd6, 0xbe, ]/_,
- [ 0xc7, 0x67, 0x3b, 0x2e, 0xb0, 0xcb, 0xf2, 0xd0, ]/_,
- [ 0x88, 0x3e, 0xa3, 0xe3, 0x95, 0x67, 0x53, 0x93, ]/_,
- [ 0xc8, 0xce, 0x5c, 0xcd, 0x8c, 0x03, 0x0c, 0xa8, ]/_,
- [ 0x94, 0xaf, 0x49, 0xf6, 0xc6, 0x50, 0xad, 0xb8, ]/_,
- [ 0xea, 0xb8, 0x85, 0x8a, 0xde, 0x92, 0xe1, 0xbc, ]/_,
- [ 0xf3, 0x15, 0xbb, 0x5b, 0xb8, 0x35, 0xd8, 0x17, ]/_,
- [ 0xad, 0xcf, 0x6b, 0x07, 0x63, 0x61, 0x2e, 0x2f, ]/_,
- [ 0xa5, 0xc9, 0x1d, 0xa7, 0xac, 0xaa, 0x4d, 0xde, ]/_,
- [ 0x71, 0x65, 0x95, 0x87, 0x66, 0x50, 0xa2, 0xa6, ]/_,
- [ 0x28, 0xef, 0x49, 0x5c, 0x53, 0xa3, 0x87, 0xad, ]/_,
- [ 0x42, 0xc3, 0x41, 0xd8, 0xfa, 0x92, 0xd8, 0x32, ]/_,
- [ 0xce, 0x7c, 0xf2, 0x72, 0x2f, 0x51, 0x27, 0x71, ]/_,
- [ 0xe3, 0x78, 0x59, 0xf9, 0x46, 0x23, 0xf3, 0xa7, ]/_,
- [ 0x38, 0x12, 0x05, 0xbb, 0x1a, 0xb0, 0xe0, 0x12, ]/_,
- [ 0xae, 0x97, 0xa1, 0x0f, 0xd4, 0x34, 0xe0, 0x15, ]/_,
- [ 0xb4, 0xa3, 0x15, 0x08, 0xbe, 0xff, 0x4d, 0x31, ]/_,
- [ 0x81, 0x39, 0x62, 0x29, 0xf0, 0x90, 0x79, 0x02, ]/_,
- [ 0x4d, 0x0c, 0xf4, 0x9e, 0xe5, 0xd4, 0xdc, 0xca, ]/_,
- [ 0x5c, 0x73, 0x33, 0x6a, 0x76, 0xd8, 0xbf, 0x9a, ]/_,
- [ 0xd0, 0xa7, 0x04, 0x53, 0x6b, 0xa9, 0x3e, 0x0e, ]/_,
- [ 0x92, 0x59, 0x58, 0xfc, 0xd6, 0x42, 0x0c, 0xad, ]/_,
- [ 0xa9, 0x15, 0xc2, 0x9b, 0xc8, 0x06, 0x73, 0x18, ]/_,
- [ 0x95, 0x2b, 0x79, 0xf3, 0xbc, 0x0a, 0xa6, 0xd4, ]/_,
- [ 0xf2, 0x1d, 0xf2, 0xe4, 0x1d, 0x45, 0x35, 0xf9, ]/_,
- [ 0x87, 0x57, 0x75, 0x19, 0x04, 0x8f, 0x53, 0xa9, ]/_,
- [ 0x10, 0xa5, 0x6c, 0xf5, 0xdf, 0xcd, 0x9a, 0xdb, ]/_,
- [ 0xeb, 0x75, 0x09, 0x5c, 0xcd, 0x98, 0x6c, 0xd0, ]/_,
- [ 0x51, 0xa9, 0xcb, 0x9e, 0xcb, 0xa3, 0x12, 0xe6, ]/_,
- [ 0x96, 0xaf, 0xad, 0xfc, 0x2c, 0xe6, 0x66, 0xc7, ]/_,
- [ 0x72, 0xfe, 0x52, 0x97, 0x5a, 0x43, 0x64, 0xee, ]/_,
- [ 0x5a, 0x16, 0x45, 0xb2, 0x76, 0xd5, 0x92, 0xa1, ]/_,
- [ 0xb2, 0x74, 0xcb, 0x8e, 0xbf, 0x87, 0x87, 0x0a, ]/_,
- [ 0x6f, 0x9b, 0xb4, 0x20, 0x3d, 0xe7, 0xb3, 0x81, ]/_,
- [ 0xea, 0xec, 0xb2, 0xa3, 0x0b, 0x22, 0xa8, 0x7f, ]/_,
- [ 0x99, 0x24, 0xa4, 0x3c, 0xc1, 0x31, 0x57, 0x24, ]/_,
- [ 0xbd, 0x83, 0x8d, 0x3a, 0xaf, 0xbf, 0x8d, 0xb7, ]/_,
- [ 0x0b, 0x1a, 0x2a, 0x32, 0x65, 0xd5, 0x1a, 0xea, ]/_,
- [ 0x13, 0x50, 0x79, 0xa3, 0x23, 0x1c, 0xe6, 0x60, ]/_,
- [ 0x93, 0x2b, 0x28, 0x46, 0xe4, 0xd7, 0x06, 0x66, ]/_,
- [ 0xe1, 0x91, 0x5f, 0x5c, 0xb1, 0xec, 0xa4, 0x6c, ]/_,
- [ 0xf3, 0x25, 0x96, 0x5c, 0xa1, 0x6d, 0x62, 0x9f, ]/_,
- [ 0x57, 0x5f, 0xf2, 0x8e, 0x60, 0x38, 0x1b, 0xe5, ]/_,
- [ 0x72, 0x45, 0x06, 0xeb, 0x4c, 0x32, 0x8a, 0x95, ]/_
- ]/_;
+ let vecs : [[u8 * 8] * 64] = [
+ [ 0x31, 0x0e, 0x0e, 0xdd, 0x47, 0xdb, 0x6f, 0x72, ],
+ [ 0xfd, 0x67, 0xdc, 0x93, 0xc5, 0x39, 0xf8, 0x74, ],
+ [ 0x5a, 0x4f, 0xa9, 0xd9, 0x09, 0x80, 0x6c, 0x0d, ],
+ [ 0x2d, 0x7e, 0xfb, 0xd7, 0x96, 0x66, 0x67, 0x85, ],
+ [ 0xb7, 0x87, 0x71, 0x27, 0xe0, 0x94, 0x27, 0xcf, ],
+ [ 0x8d, 0xa6, 0x99, 0xcd, 0x64, 0x55, 0x76, 0x18, ],
+ [ 0xce, 0xe3, 0xfe, 0x58, 0x6e, 0x46, 0xc9, 0xcb, ],
+ [ 0x37, 0xd1, 0x01, 0x8b, 0xf5, 0x00, 0x02, 0xab, ],
+ [ 0x62, 0x24, 0x93, 0x9a, 0x79, 0xf5, 0xf5, 0x93, ],
+ [ 0xb0, 0xe4, 0xa9, 0x0b, 0xdf, 0x82, 0x00, 0x9e, ],
+ [ 0xf3, 0xb9, 0xdd, 0x94, 0xc5, 0xbb, 0x5d, 0x7a, ],
+ [ 0xa7, 0xad, 0x6b, 0x22, 0x46, 0x2f, 0xb3, 0xf4, ],
+ [ 0xfb, 0xe5, 0x0e, 0x86, 0xbc, 0x8f, 0x1e, 0x75, ],
+ [ 0x90, 0x3d, 0x84, 0xc0, 0x27, 0x56, 0xea, 0x14, ],
+ [ 0xee, 0xf2, 0x7a, 0x8e, 0x90, 0xca, 0x23, 0xf7, ],
+ [ 0xe5, 0x45, 0xbe, 0x49, 0x61, 0xca, 0x29, 0xa1, ],
+ [ 0xdb, 0x9b, 0xc2, 0x57, 0x7f, 0xcc, 0x2a, 0x3f, ],
+ [ 0x94, 0x47, 0xbe, 0x2c, 0xf5, 0xe9, 0x9a, 0x69, ],
+ [ 0x9c, 0xd3, 0x8d, 0x96, 0xf0, 0xb3, 0xc1, 0x4b, ],
+ [ 0xbd, 0x61, 0x79, 0xa7, 0x1d, 0xc9, 0x6d, 0xbb, ],
+ [ 0x98, 0xee, 0xa2, 0x1a, 0xf2, 0x5c, 0xd6, 0xbe, ],
+ [ 0xc7, 0x67, 0x3b, 0x2e, 0xb0, 0xcb, 0xf2, 0xd0, ],
+ [ 0x88, 0x3e, 0xa3, 0xe3, 0x95, 0x67, 0x53, 0x93, ],
+ [ 0xc8, 0xce, 0x5c, 0xcd, 0x8c, 0x03, 0x0c, 0xa8, ],
+ [ 0x94, 0xaf, 0x49, 0xf6, 0xc6, 0x50, 0xad, 0xb8, ],
+ [ 0xea, 0xb8, 0x85, 0x8a, 0xde, 0x92, 0xe1, 0xbc, ],
+ [ 0xf3, 0x15, 0xbb, 0x5b, 0xb8, 0x35, 0xd8, 0x17, ],
+ [ 0xad, 0xcf, 0x6b, 0x07, 0x63, 0x61, 0x2e, 0x2f, ],
+ [ 0xa5, 0xc9, 0x1d, 0xa7, 0xac, 0xaa, 0x4d, 0xde, ],
+ [ 0x71, 0x65, 0x95, 0x87, 0x66, 0x50, 0xa2, 0xa6, ],
+ [ 0x28, 0xef, 0x49, 0x5c, 0x53, 0xa3, 0x87, 0xad, ],
+ [ 0x42, 0xc3, 0x41, 0xd8, 0xfa, 0x92, 0xd8, 0x32, ],
+ [ 0xce, 0x7c, 0xf2, 0x72, 0x2f, 0x51, 0x27, 0x71, ],
+ [ 0xe3, 0x78, 0x59, 0xf9, 0x46, 0x23, 0xf3, 0xa7, ],
+ [ 0x38, 0x12, 0x05, 0xbb, 0x1a, 0xb0, 0xe0, 0x12, ],
+ [ 0xae, 0x97, 0xa1, 0x0f, 0xd4, 0x34, 0xe0, 0x15, ],
+ [ 0xb4, 0xa3, 0x15, 0x08, 0xbe, 0xff, 0x4d, 0x31, ],
+ [ 0x81, 0x39, 0x62, 0x29, 0xf0, 0x90, 0x79, 0x02, ],
+ [ 0x4d, 0x0c, 0xf4, 0x9e, 0xe5, 0xd4, 0xdc, 0xca, ],
+ [ 0x5c, 0x73, 0x33, 0x6a, 0x76, 0xd8, 0xbf, 0x9a, ],
+ [ 0xd0, 0xa7, 0x04, 0x53, 0x6b, 0xa9, 0x3e, 0x0e, ],
+ [ 0x92, 0x59, 0x58, 0xfc, 0xd6, 0x42, 0x0c, 0xad, ],
+ [ 0xa9, 0x15, 0xc2, 0x9b, 0xc8, 0x06, 0x73, 0x18, ],
+ [ 0x95, 0x2b, 0x79, 0xf3, 0xbc, 0x0a, 0xa6, 0xd4, ],
+ [ 0xf2, 0x1d, 0xf2, 0xe4, 0x1d, 0x45, 0x35, 0xf9, ],
+ [ 0x87, 0x57, 0x75, 0x19, 0x04, 0x8f, 0x53, 0xa9, ],
+ [ 0x10, 0xa5, 0x6c, 0xf5, 0xdf, 0xcd, 0x9a, 0xdb, ],
+ [ 0xeb, 0x75, 0x09, 0x5c, 0xcd, 0x98, 0x6c, 0xd0, ],
+ [ 0x51, 0xa9, 0xcb, 0x9e, 0xcb, 0xa3, 0x12, 0xe6, ],
+ [ 0x96, 0xaf, 0xad, 0xfc, 0x2c, 0xe6, 0x66, 0xc7, ],
+ [ 0x72, 0xfe, 0x52, 0x97, 0x5a, 0x43, 0x64, 0xee, ],
+ [ 0x5a, 0x16, 0x45, 0xb2, 0x76, 0xd5, 0x92, 0xa1, ],
+ [ 0xb2, 0x74, 0xcb, 0x8e, 0xbf, 0x87, 0x87, 0x0a, ],
+ [ 0x6f, 0x9b, 0xb4, 0x20, 0x3d, 0xe7, 0xb3, 0x81, ],
+ [ 0xea, 0xec, 0xb2, 0xa3, 0x0b, 0x22, 0xa8, 0x7f, ],
+ [ 0x99, 0x24, 0xa4, 0x3c, 0xc1, 0x31, 0x57, 0x24, ],
+ [ 0xbd, 0x83, 0x8d, 0x3a, 0xaf, 0xbf, 0x8d, 0xb7, ],
+ [ 0x0b, 0x1a, 0x2a, 0x32, 0x65, 0xd5, 0x1a, 0xea, ],
+ [ 0x13, 0x50, 0x79, 0xa3, 0x23, 0x1c, 0xe6, 0x60, ],
+ [ 0x93, 0x2b, 0x28, 0x46, 0xe4, 0xd7, 0x06, 0x66, ],
+ [ 0xe1, 0x91, 0x5f, 0x5c, 0xb1, 0xec, 0xa4, 0x6c, ],
+ [ 0xf3, 0x25, 0x96, 0x5c, 0xa1, 0x6d, 0x62, 0x9f, ],
+ [ 0x57, 0x5f, 0xf2, 0x8e, 0x60, 0x38, 0x1b, 0xe5, ],
+ [ 0x72, 0x45, 0x06, 0xeb, 0x4c, 0x32, 0x8a, 0x95, ]
+ ];
let k0 = 0x_07_06_05_04_03_02_01_00_u64;
let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08_u64;
let stream_inc = &State(k0,k1);
let stream_full = &State(k0,k1);
- fn to_hex_str(r: &[u8]/8) -> ~str {
+ fn to_hex_str(r: &[u8 * 8]) -> ~str {
let mut s = ~"";
for vec::each(*r) |b| {
s += uint::to_str(*b as uint, 16u);
}
- return s;
+ move s
}
while t < 64 {
}
/// Computes the absolute value
-// FIXME: abs should return an unsigned int (#2353)
pub pure fn abs(i: T) -> T {
if is_negative(i) { -i } else { i }
}
}
/// Convert to a string in a given base
-pub fn to_str(n: T, radix: uint) -> ~str {
+pub pure fn to_str(n: T, radix: uint) -> ~str {
do to_str_bytes(n, radix) |slice| {
do vec::as_imm_buf(slice) |p, len| {
unsafe { str::raw::from_buf_len(p, len) }
}
}
-pub fn to_str_bytes<U>(n: T, radix: uint, f: fn(v: &[u8]) -> U) -> U {
+pub pure fn to_str_bytes<U>(n: T, radix: uint, f: fn(v: &[u8]) -> U) -> U {
if n < 0 as T {
uint::to_str_bytes(true, -n as uint, radix, f)
} else {
}
/// Convert to a string
-pub fn str(i: T) -> ~str { return to_str(i, 10u); }
+pub pure fn str(i: T) -> ~str { return to_str(i, 10u); }
// FIXME: Has alignment issues on windows and 32-bit linux (#2609)
#[test]
// FIXME (#2004): Seekable really should be orthogonal.
// FIXME (#2982): This should probably return an error.
- fn read(buf: &[mut u8], len: uint) -> uint;
+ fn read(bytes: &[mut u8], len: uint) -> uint;
fn read_byte() -> int;
fn unread_byte(int);
fn eof() -> bool;
impl<T: Reader> T : ReaderUtil {
fn read_bytes(len: uint) -> ~[u8] {
- let mut buf = vec::with_capacity(len);
- unsafe { vec::raw::set_len(&mut buf, len); }
+ let mut bytes = vec::with_capacity(len);
+ unsafe { vec::raw::set_len(&mut bytes, len); }
- let count = self.read(buf, len);
+ let count = self.read(bytes, len);
- unsafe { vec::raw::set_len(&mut buf, count); }
- move buf
+ unsafe { vec::raw::set_len(&mut bytes, count); }
+ move bytes
}
fn read_line() -> ~str {
- let mut buf = ~[];
+ let mut bytes = ~[];
loop {
let ch = self.read_byte();
if ch == -1 || ch == 10 { break; }
- buf.push(ch as u8);
+ bytes.push(ch as u8);
}
- str::from_bytes(buf)
+ str::from_bytes(bytes)
}
fn read_chars(n: uint) -> ~[char] {
// returns the (consumed offset, n_req), appends characters to &chars
- fn chars_from_bytes<T: Reader>(buf: &~[u8], chars: &mut ~[char])
+ fn chars_from_bytes<T: Reader>(bytes: &~[u8], chars: &mut ~[char])
-> (uint, uint) {
let mut i = 0;
- let buf_len = buf.len();
- while i < buf_len {
- let b0 = buf[i];
+ let bytes_len = bytes.len();
+ while i < bytes_len {
+ let b0 = bytes[i];
let w = str::utf8_char_width(b0);
let end = i + w;
i += 1;
loop;
}
// can't satisfy this char with the existing data
- if end > buf_len {
- return (i - 1, end - buf_len);
+ if end > bytes_len {
+ return (i - 1, end - bytes_len);
}
let mut val = 0;
while i < end {
- let next = buf[i] as int;
+ let next = bytes[i] as int;
i += 1;
assert (next > -1);
assert (next & 192 == 128);
}
return (i, 0);
}
- let mut buf: ~[u8] = ~[];
- let mut chars: ~[char] = ~[];
+ let mut bytes = ~[];
+ let mut chars = ~[];
// might need more bytes, but reading n will never over-read
let mut nbread = n;
while nbread > 0 {
// we're split in a unicode char?
break;
}
- buf.push_all(data);
- let (offset, nbreq) = chars_from_bytes::<T>(&buf, &mut chars);
+ bytes.push_all(data);
+ let (offset, nbreq) = chars_from_bytes::<T>(&bytes, &mut chars);
let ncreq = n - chars.len();
// again we either know we need a certain number of bytes
// to complete a character, or we make sure we don't
// over-read by reading 1-byte per char needed
nbread = if ncreq > nbreq { ncreq } else { nbreq };
if nbread > 0 {
- buf = vec::slice(buf, offset, buf.len());
+ bytes = vec::slice(bytes, offset, bytes.len());
}
}
move chars
}
fn read_c_str() -> ~str {
- let mut buf: ~[u8] = ~[];
+ let mut bytes: ~[u8] = ~[];
loop {
let ch = self.read_byte();
- if ch < 1 { break; } else { buf.push(ch as u8); }
+ if ch < 1 { break; } else { bytes.push(ch as u8); }
}
- str::from_bytes(buf)
+ str::from_bytes(bytes)
}
// FIXME deal with eof? // #2004
}
fn read_whole_stream() -> ~[u8] {
- let mut buf: ~[u8] = ~[];
- while !self.eof() { buf.push_all(self.read_bytes(2048u)); }
- move buf
+ let mut bytes: ~[u8] = ~[];
+ while !self.eof() { bytes.push_all(self.read_bytes(2048u)); }
+ move bytes
}
fn each_byte(it: fn(int) -> bool) {
}
impl *libc::FILE: Reader {
- fn read(buf: &[mut u8], len: uint) -> uint {
- do vec::as_mut_buf(buf) |buf_p, buf_len| {
+ fn read(bytes: &[mut u8], len: uint) -> uint {
+ do vec::as_mut_buf(bytes) |buf_p, buf_len| {
assert buf_len <= len;
let count = libc::fread(buf_p as *mut c_void, 1u as size_t,
// duration of its lifetime.
// FIXME there really should be a better way to do this // #2004
impl<T: Reader, C> {base: T, cleanup: C}: Reader {
- fn read(buf: &[mut u8], len: uint) -> uint { self.base.read(buf, len) }
+ fn read(bytes: &[mut u8], len: uint) -> uint {
+ self.base.read(bytes, len)
+ }
fn read_byte() -> int { self.base.read_byte() }
fn unread_byte(byte: int) { self.base.unread_byte(byte); }
fn eof() -> bool { self.base.eof() }
}
-// Byte buffer readers
-
-pub type ByteBuf = {buf: &[const u8], mut pos: uint};
+// Byte readers
+pub struct BytesReader {
+ bytes: &[u8],
+ mut pos: uint
+}
-impl ByteBuf: Reader {
- fn read(buf: &[mut u8], len: uint) -> uint {
- let count = uint::min(len, self.buf.len() - self.pos);
+impl BytesReader: Reader {
+ fn read(bytes: &[mut u8], len: uint) -> uint {
+ let count = uint::min(len, self.bytes.len() - self.pos);
- let view = vec::const_view(self.buf, self.pos, self.buf.len());
- vec::bytes::memcpy(buf, view, count);
+ let view = vec::view(self.bytes, self.pos, self.bytes.len());
+ vec::bytes::memcpy(bytes, view, count);
self.pos += count;
count
}
fn read_byte() -> int {
- if self.pos == self.buf.len() { return -1; }
- let b = self.buf[self.pos];
+ if self.pos == self.bytes.len() { return -1; }
+ let b = self.bytes[self.pos];
self.pos += 1u;
return b as int;
}
// FIXME (#2738): implement this
fn unread_byte(_byte: int) { error!("Unimplemented: unread_byte"); fail; }
- fn eof() -> bool { self.pos == self.buf.len() }
+ fn eof() -> bool { self.pos == self.bytes.len() }
fn seek(offset: int, whence: SeekStyle) {
let pos = self.pos;
- self.pos = seek_in_buf(offset, pos, self.buf.len(), whence);
+ self.pos = seek_in_buf(offset, pos, self.bytes.len(), whence);
}
fn tell() -> uint { self.pos }
}
-pub fn with_bytes_reader<t>(bytes: &[u8], f: fn(Reader) -> t) -> t {
- f({buf: bytes, mut pos: 0u} as Reader)
+pub pure fn with_bytes_reader<t>(bytes: &[u8], f: fn(Reader) -> t) -> t {
+ f(BytesReader { bytes: bytes, pos: 0u } as Reader)
}
pub fn with_str_reader<T>(s: &str, f: fn(Reader) -> T) -> T {
self.write_str(&"\n");
}
fn write_int(n: int) {
- int::to_str_bytes(n, 10u, |buf| self.write(buf))
+ int::to_str_bytes(n, 10u, |bytes| self.write(bytes))
}
fn write_uint(n: uint) {
- uint::to_str_bytes(false, n, 10u, |buf| self.write(buf))
+ uint::to_str_bytes(false, n, 10u, |bytes| self.write(bytes))
}
fn write_le_uint(n: uint) {
u64_to_le_bytes(n as u64, uint::bytes, |v| self.write(v))
pub fn println(s: &str) { stdout().write_line(s); }
pub struct BytesWriter {
- buf: DVec<u8>,
+ bytes: DVec<u8>,
mut pos: uint,
}
impl BytesWriter: Writer {
fn write(v: &[const u8]) {
- do self.buf.swap |buf| {
- let mut buf <- buf;
+ do self.bytes.swap |bytes| {
+ let mut bytes <- bytes;
let v_len = v.len();
- let buf_len = buf.len();
+ let bytes_len = bytes.len();
- let count = uint::max(buf_len, self.pos + v_len);
- vec::reserve(&mut buf, count);
- unsafe { vec::raw::set_len(&mut buf, count); }
+ let count = uint::max(bytes_len, self.pos + v_len);
+ vec::reserve(&mut bytes, count);
+ unsafe { vec::raw::set_len(&mut bytes, count); }
{
- let view = vec::mut_view(buf, self.pos, count);
+ let view = vec::mut_view(bytes, self.pos, count);
vec::bytes::memcpy(view, v, v_len);
}
self.pos += v_len;
- move buf
+ move bytes
}
}
fn seek(offset: int, whence: SeekStyle) {
let pos = self.pos;
- let len = self.buf.len();
+ let len = self.bytes.len();
self.pos = seek_in_buf(offset, pos, len, whence);
}
fn tell() -> uint { self.pos }
fn get_type() -> WriterType { (*self).get_type() }
}
-pub fn BytesWriter() -> BytesWriter {
- BytesWriter { buf: DVec(), mut pos: 0u }
+pub pure fn BytesWriter() -> BytesWriter {
+ BytesWriter { bytes: DVec(), mut pos: 0u }
}
-pub fn with_bytes_writer(f: fn(Writer)) -> ~[u8] {
+pub pure fn with_bytes_writer(f: fn(Writer)) -> ~[u8] {
let wr = @BytesWriter();
f(wr as Writer);
- wr.buf.check_out(|buf| buf)
+ // FIXME (#3758): This should not be needed.
+ unsafe { wr.bytes.check_out(|bytes| move bytes) }
}
-pub fn with_str_writer(f: fn(Writer)) -> ~str {
+pub pure fn with_str_writer(f: fn(Writer)) -> ~str {
let mut v = with_bytes_writer(f);
- // Make sure the vector has a trailing null and is proper utf8.
- v.push(0);
+ // FIXME (#3758): This should not be needed.
+ unsafe {
+ // Make sure the vector has a trailing null and is proper utf8.
+ v.push(0);
+ }
assert str::is_utf8(v);
- unsafe { move ::cast::transmute(v) }
+ unsafe { move ::cast::transmute(move v) }
}
// Utility functions
fn bytes_buffer_overwrite() {
let wr = BytesWriter();
wr.write(~[0u8, 1u8, 2u8, 3u8]);
- assert wr.buf.borrow(|buf| buf == ~[0u8, 1u8, 2u8, 3u8]);
+ assert wr.bytes.borrow(|bytes| bytes == ~[0u8, 1u8, 2u8, 3u8]);
wr.seek(-2, SeekCur);
wr.write(~[4u8, 5u8, 6u8, 7u8]);
- assert wr.buf.borrow(|buf| buf == ~[0u8, 1u8, 4u8, 5u8, 6u8, 7u8]);
+ assert wr.bytes.borrow(|bytes| bytes ==
+ ~[0u8, 1u8, 4u8, 5u8, 6u8, 7u8]);
wr.seek(-2, SeekEnd);
wr.write(~[8u8]);
wr.seek(1, SeekSet);
wr.write(~[9u8]);
- assert wr.buf.borrow(|buf| buf == ~[0u8, 9u8, 4u8, 5u8, 8u8, 7u8]);
+ assert wr.bytes.borrow(|bytes| bytes ==
+ ~[0u8, 9u8, 4u8, 5u8, 8u8, 7u8]);
}
}
pub pure fn EACH<A>(self: &IMPL_T<A>, f: fn(v: &A) -> bool) {
let mut link = self.peek_n();
while option::is_some(&link) {
- let nobe = option::get(&link);
+ let nobe = option::get(link);
assert nobe.linked;
if !f(&nobe.data) { break; }
// Check (weakly) that the user didn't do a remove.
// Initial glob-exports mean that all the contents of all the modules
// wind up exported, if you're interested in writing platform-specific code.
-// FIXME (#2006): change these to glob-exports when sufficiently supported.
-
pub use types::common::c95::*;
pub use types::common::c99::*;
pub use types::common::posix88::*;
pub use size_t;
pub use c_float, c_double, c_void, FILE, fpos_t;
-pub use DIR, dirent;
+pub use DIR, dirent_t;
pub use c_char, c_schar, c_uchar;
pub use c_short, c_ushort, c_int, c_uint, c_long, c_ulong;
pub use size_t, ptrdiff_t, clock_t, time_t;
}
pub mod posix88 {
pub enum DIR {}
- pub enum dirent {}
+ pub enum dirent_t {}
}
pub mod posix01 {}
pub mod posix08 {}
pub extern mod dirent {
fn opendir(dirname: *c_char) -> *DIR;
fn closedir(dirp: *DIR) -> c_int;
- fn readdir(dirp: *DIR) -> *dirent;
+ fn readdir(dirp: *DIR) -> *dirent_t;
fn rewinddir(dirp: *DIR);
fn seekdir(dirp: *DIR, loc: c_long);
fn telldir(dirp: *DIR) -> c_long;
#[cfg(notest)]
#[lang="log_type"]
pub fn log_type<T>(level: u32, object: &T) {
- let bytes = do io::with_bytes_writer() |writer| {
+ let bytes = do io::with_bytes_writer |writer| {
repr::write_repr(writer, object);
};
unsafe {
pub type Mut<T> = Data<T>;
pub fn Mut<T>(t: T) -> Mut<T> {
- Data {value: t, mode: ReadOnly}
+ Data {value: move t, mode: ReadOnly}
}
pub fn unwrap<T>(m: Mut<T>) -> T {
// is in use, as that would be a move from a borrowed value.
assert (m.mode as uint) == (ReadOnly as uint);
let Data {value: move value, mode: _} = move m;
- return value;
+ move value
}
impl<T> Data<T> {
Some(T),
}
-pub pure fn get<T: Copy>(opt: &Option<T>) -> T {
+pub pure fn get<T: Copy>(opt: Option<T>) -> T {
/*!
Gets the value out of an option
case explicitly.
*/
- match *opt {
+ match opt {
Some(copy x) => return x,
None => fail ~"option::get none"
}
}
}
-pub pure fn expect<T: Copy>(opt: &Option<T>, reason: ~str) -> T {
+pub pure fn expect<T: Copy>(opt: Option<T>, reason: ~str) -> T {
/*!
* Gets the value out of an option, printing a specified message on
* failure
*
* Fails if the value equals `none`
*/
- match *opt { Some(copy x) => x, None => fail reason }
+ match opt { Some(copy x) => x, None => fail reason }
}
pub pure fn map<T, U>(opt: &Option<T>, f: fn(x: &T) -> U) -> Option<U> {
* function that returns an option.
*/
- // XXX write with move match
- if opt.is_some() {
- f(unwrap(opt))
- } else {
- None
+ match move opt {
+ Some(move t) => f(move t),
+ None => None
}
}
!is_none(opt)
}
-pub pure fn get_default<T: Copy>(opt: &Option<T>, def: T) -> T {
+pub pure fn get_default<T: Copy>(opt: Option<T>, def: T) -> T {
//! Returns the contained value or a default
- match *opt { Some(copy x) => x, None => def }
+ match opt { Some(copy x) => x, None => def }
}
pub pure fn map_default<T, U>(opt: &Option<T>, def: U,
pub pure fn unwrap_expect<T>(opt: Option<T>, reason: &str) -> T {
//! As unwrap, but with a specified failure message.
- if opt.is_none() { fail reason.to_unique(); }
+ if opt.is_none() { fail reason.to_owned(); }
unwrap(move opt)
}
Instead, prefer to use pattern matching and handle the `None`
case explicitly.
*/
- pure fn get() -> T { get(&self) }
- pure fn get_default(def: T) -> T { get_default(&self, def) }
+ pure fn get() -> T { get(self) }
+ pure fn get_default(def: T) -> T { get_default(self, def) }
/**
* Gets the value out of an option, printing a specified message on
* failure
*
* Fails if the value equals `none`
*/
- pure fn expect(reason: ~str) -> T { expect(&self, reason) }
+ pure fn expect(reason: ~str) -> T { expect(self, move reason) }
/// Applies a function zero or more times until the result is none.
pure fn while_some(blk: fn(v: T) -> Option<T>) { while_some(self, blk) }
}
fn test_unwrap_ptr() {
let x = ~0;
let addr_x = ptr::addr_of(&(*x));
- let opt = Some(x);
- let y = unwrap(opt);
+ let opt = Some(move x);
+ let y = unwrap(move opt);
let addr_y = ptr::addr_of(&(*y));
assert addr_x == addr_y;
}
let i = @mut 0;
{
let x = R(i);
- let opt = Some(x);
- let _y = unwrap(opt);
+ let opt = Some(move x);
+ let _y = unwrap(move opt);
}
assert *i == 1;
}
#[cfg(unix)]
#[allow(non_implicitly_copyable_typarams)]
fn lookup() -> Path {
- option::get_default(&getenv_nonempty("TMPDIR"),
+ option::get_default(getenv_nonempty("TMPDIR"),
Path("/tmp"))
}
#[allow(non_implicitly_copyable_typarams)]
fn lookup() -> Path {
option::get_default(
- &option::or(getenv_nonempty("TMP"),
+ option::or(getenv_nonempty("TMP"),
option::or(getenv_nonempty("TEMP"),
option::or(getenv_nonempty("USERPROFILE"),
getenv_nonempty("WINDIR")))),
for uint::range(0, argc as uint) |i| {
vec::push(&mut args, str::raw::from_c_str(*argv.offset(i)));
}
- return args;
+ move args
}
/**
let rng: rand::Rng = rand::Rng();
let n = ~"TEST" + rng.gen_str(10u);
assert getenv(n).is_none();
- n
+ move n
}
#[test]
let n = make_rand_name();
setenv(n, s);
log(debug, s);
- assert getenv(n) == option::Some(s);
+ assert getenv(n) == option::Some(move s);
}
#[test]
// MingW seems to set some funky environment variables like
// "=C:=C:\MinGW\msys\1.0\bin" and "!::=::\" that are returned
// from env() but not visible from getenv().
- assert v2.is_none() || v2 == option::Some(v);
+ assert v2.is_none() || v2 == option::Some(move v);
}
}
assert !vec::contains(e, &(copy n, ~"VALUE"));
e = env();
- assert vec::contains(e, &(n, ~"VALUE"));
+ assert vec::contains(e, &(move n, ~"VALUE"));
}
#[test]
}
impl PosixPath : ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str {
let mut s = ~"";
if self.is_absolute {
s += "/";
let mut components = str::split_nonempty(s, |c| c == '/');
let is_absolute = (s.len() != 0 && s[0] == '/' as u8);
return PosixPath { is_absolute: is_absolute,
- components: components }
+ components: move components }
}
pure fn dirname() -> ~str {
Some(ref f) => ~[copy *f]
};
return PosixPath { is_absolute: false,
- components: cs }
+ components: move cs }
}
pure fn push_rel(other: &PosixPath) -> PosixPath {
|c| windows::is_sep(c as u8));
unsafe { v.push_all_move(move ss); }
}
- PosixPath { components: move v, ..self }
+ PosixPath { is_absolute: self.is_absolute,
+ components: move v }
}
pure fn push(s: &str) -> PosixPath {
if cs.len() != 0 {
unsafe { cs.pop(); }
}
- return PosixPath { components: move cs, ..self }
+ return PosixPath {
+ is_absolute: self.is_absolute,
+ components: move cs
+ }
+ //..self }
}
pure fn normalize() -> PosixPath {
return PosixPath {
- components: normalize(self.components),
- ..self
+ is_absolute: self.is_absolute,
+ components: normalize(self.components)
+ // ..self
}
}
}
impl WindowsPath : ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str {
let mut s = ~"";
match self.host {
Some(ref h) => { s += "\\\\"; s += *h; }
let mut components =
str::split_nonempty(rest, |c| windows::is_sep(c as u8));
let is_absolute = (rest.len() != 0 && windows::is_sep(rest[0]));
- return WindowsPath { host: host,
- device: device,
+ return WindowsPath { host: move host,
+ device: move device,
is_absolute: is_absolute,
- components: components }
+ components: move components }
}
pure fn dirname() -> ~str {
return WindowsPath { host: None,
device: None,
is_absolute: false,
- components: cs }
+ components: move cs }
}
pure fn push_rel(other: &WindowsPath) -> WindowsPath {
|c| windows::is_sep(c as u8));
unsafe { v.push_all_move(move ss); }
}
- return WindowsPath { components: move v, ..self }
+ // tedious, but as-is, we can't use ..self
+ return WindowsPath {
+ host: copy self.host,
+ device: copy self.device,
+ is_absolute: self.is_absolute,
+ components: move v
+ }
}
pure fn push(s: &str) -> WindowsPath {
if cs.len() != 0 {
unsafe { cs.pop(); }
}
- return WindowsPath { components: move cs, ..self }
+ return WindowsPath {
+ host: copy self.host,
+ device: copy self.device,
+ is_absolute: self.is_absolute,
+ components: move cs
+ }
}
pure fn normalize() -> WindowsPath {
return WindowsPath {
- components: normalize(self.components),
- ..self
+ host: copy self.host,
+ device: copy self.device,
+ is_absolute: self.is_absolute,
+ components: normalize(self.components)
}
}
}
atomic_add_acq(&mut b.header.ref_count, 1);
BufferResource {
- buffer: b
+ // tjc: ????
+ buffer: move b
}
}
let this = rustrt::rust_get_task();
rustrt::task_clear_event_reject(this);
rustrt::rust_task_ref(this);
+ debug!("blocked = %x this = %x", p.header.blocked_task as uint,
+ this as uint);
let old_task = swap_task(&mut p.header.blocked_task, this);
+ debug!("blocked = %x this = %x old_task = %x",
+ p.header.blocked_task as uint,
+ this as uint, old_task as uint);
assert old_task.is_null();
let mut first = true;
let mut count = SPIN_COUNT;
-> Either<(Option<A>, RecvPacketBuffered<B, Bb>),
(RecvPacketBuffered<A, Ab>, Option<B>)>
{
- let i = wait_many([a.header(), b.header()]/_);
+ let i = wait_many([a.header(), b.header()]);
match i {
0 => Left((try_recv(move a), move b)),
/// Returns 0 or 1 depending on which endpoint is ready to receive
pub fn select2i<A: Selectable, B: Selectable>(a: &A, b: &B) ->
Either<(), ()> {
- match wait_many([a.header(), b.header()]/_) {
+ match wait_many([a.header(), b.header()]) {
0 => Left(()),
1 => Right(()),
_ => fail ~"wait returned unexpected index"
c1.send(~"abc");
- match (p1, p2).select() {
+ match (move p1, move p2).select() {
Right(_) => fail,
_ => ()
}
pub fn test_oneshot() {
let (c, p) = oneshot::init();
- oneshot::client::send(c, ());
+ oneshot::client::send(move c, ());
- recv_one(p)
+ recv_one(move p)
}
}
rc.data = ptr::null();
// Step 1 - drop our own reference.
let new_count = rustrt::rust_atomic_decrement(&mut ptr.count);
- assert new_count >= 0;
+ // assert new_count >= 0;
if new_count == 0 {
// We were the last owner. Can unwrap immediately.
// Also we have to free the server endpoints.
pub fn exclusive<T:Send >(user_data: T) -> Exclusive<T> {
let data = ExData {
- lock: LittleLock(), mut failed: false, mut data: user_data
+ lock: LittleLock(), mut failed: false, mut data: move user_data
};
Exclusive { x: unsafe { shared_mutable_state(move data) } }
}
}
}
-// FIXME(#2585) make this a by-move method on the exclusive
+// FIXME(#3724) make this a by-move method on the exclusive
pub fn unwrap_exclusive<T: Send>(arc: Exclusive<T>) -> T {
let Exclusive { x: x } <- arc;
let inner = unsafe { unwrap_shared_mutable_state(move x) };
pub fn exclusive_arc() {
let mut futures = ~[];
- let num_tasks = 10u;
- let count = 10u;
+ let num_tasks = 10;
+ let count = 10;
- let total = exclusive(~mut 0u);
+ let total = exclusive(~mut 0);
- for uint::range(0u, num_tasks) |_i| {
+ for uint::range(0, num_tasks) |_i| {
let total = total.clone();
- futures.push(future::spawn(|| {
- for uint::range(0u, count) |_i| {
+ futures.push(future::spawn(|move total| {
+ for uint::range(0, count) |_i| {
do total.with |count| {
- **count += 1u;
+ **count += 1;
}
}
}));
// accesses will also fail.
let x = exclusive(1);
let x2 = x.clone();
- do task::try {
+ do task::try |move x2| {
do x2.with |one| {
assert *one == 2;
}
#[test]
pub fn exclusive_unwrap_basic() {
let x = exclusive(~~"hello");
- assert unwrap_exclusive(x) == ~~"hello";
+ assert unwrap_exclusive(move x) == ~~"hello";
}
#[test]
pub fn exclusive_unwrap_contended() {
let x = exclusive(~~"hello");
let x2 = ~mut Some(x.clone());
- do task::spawn {
+ do task::spawn |move x2| {
let x2 = option::swap_unwrap(x2);
do x2.with |_hello| { }
task::yield();
}
- assert unwrap_exclusive(x) == ~~"hello";
+ assert unwrap_exclusive(move x) == ~~"hello";
// Now try the same thing, but with the child task blocking.
let x = exclusive(~~"hello");
let x2 = ~mut Some(x.clone());
let mut res = None;
- do task::task().future_result(|+r| res = Some(r)).spawn {
+ do task::task().future_result(|+r| res = Some(move r)).spawn
+ |move x2| {
let x2 = option::swap_unwrap(x2);
- assert unwrap_exclusive(x2) == ~~"hello";
+ assert unwrap_exclusive(move x2) == ~~"hello";
}
// Have to get rid of our reference before blocking.
{ let _x = move x; } // FIXME(#3161) util::ignore doesn't work here
let x = exclusive(~~"hello");
let x2 = ~mut Some(x.clone());
let mut res = None;
- do task::task().future_result(|+r| res = Some(r)).spawn {
+ do task::task().future_result(|+r| res = Some(move r)).spawn
+ |move x2| {
let x2 = option::swap_unwrap(x2);
- assert unwrap_exclusive(x2) == ~~"hello";
+ assert unwrap_exclusive(move x2) == ~~"hello";
}
- assert unwrap_exclusive(x) == ~~"hello";
+ assert unwrap_exclusive(move x) == ~~"hello";
let res = option::swap_unwrap(&mut res);
future::get(&res);
}
for 10.times { task::yield(); } // try to let the unwrapper go
fail; // punt it awake from its deadlock
}
- let _z = unwrap_exclusive(x);
+ let _z = unwrap_exclusive(move x);
do x2.with |_hello| { }
};
assert result.is_err();
}
fn visit_unboxed_vec(mtbl: uint, inner: *TyDesc) -> bool {
- self.align_to::<vec::raw::UnboxedVecRepr>();
+ self.align_to::<vec::UnboxedVecRepr>();
if ! self.inner.visit_vec(mtbl, inner) { return false; }
true
}
use cast::transmute;
use intrinsic::{TyDesc, TyVisitor, visit_tydesc};
use reflect::{MovePtr, MovePtrAdaptor};
-use vec::raw::{VecRepr, UnboxedVecRepr, SliceRepr};
+use vec::UnboxedVecRepr;
+use vec::raw::{VecRepr, SliceRepr};
pub use box::raw::BoxRepr;
use box::raw::BoxHeaderRepr;
fn visit_ptr_inner(ptr: *c_void, inner: *TyDesc) -> bool {
let mut u = ReprVisitor(ptr, self.writer);
let v = reflect::MovePtrAdaptor(move u);
- visit_tydesc(inner, v as @TyVisitor);
+ visit_tydesc(inner, (move v) as @TyVisitor);
true
}
fn visit_unboxed_vec(mtbl: uint, inner: *TyDesc) -> bool {
- do self.get::<vec::raw::UnboxedVecRepr> |b| {
+ do self.get::<vec::UnboxedVecRepr> |b| {
self.write_unboxed_vec_repr(mtbl, b, inner);
}
}
let tydesc = intrinsic::get_tydesc::<T>();
let mut u = ReprVisitor(ptr, writer);
let v = reflect::MovePtrAdaptor(move u);
- visit_tydesc(tydesc, v as @TyVisitor)
+ visit_tydesc(tydesc, (move v) as @TyVisitor)
}
#[test]
unsafe {
self.align(sys::min_align_of::<T>());
let value_addr: &T = transmute(copy self.ptr);
- (*value_addr).write_repr(self.writer);
+ value_addr.write_repr(self.writer);
self.bump(sys::size_of::<T>());
true
}
unsafe {
let ptr = ptr::to_unsafe_ptr(object) as *c_void;
let tydesc = sys::get_type_desc::<T>();
- let tydesc = cast::transmute(tydesc);
+ let tydesc = cast::transmute(move tydesc);
let repr_printer = @ReprPrinter {
ptr: ptr,
*/
pub fn chain<T, U: Copy, V: Copy>(res: Result<T, V>, op: fn(t: T)
-> Result<U, V>) -> Result<U, V> {
- // XXX: Should be writable with move + match
- if res.is_ok() {
- op(unwrap(res))
- } else {
- Err(unwrap_err(res))
+ match move res {
+ Ok(move t) => op(move t),
+ Err(move e) => Err(e)
}
}
fn ProgRes(r: ProgRepr) -> ProgRes {
ProgRes {
- r: r
+ r: move r
}
}
}
fn read_all(rd: io::Reader) -> ~str {
- let mut buf = ~"";
- while !rd.eof() {
- let bytes = rd.read_bytes(4096u);
- buf += str::from_bytes(bytes);
- }
- move buf
+ let buf = io::with_bytes_writer(|wr| {
+ let mut bytes = [mut 0, ..4096];
+ while !rd.eof() {
+ let nread = rd.read(bytes, bytes.len());
+ wr.write(bytes.view(0, nread));
+ }
+ });
+ str::from_bytes(buf)
}
/**
let stream = comm::recv(p);
match stream {
(1, copy s) => {
- outs = s;
+ outs = move s;
}
(2, copy s) => {
- errs = s;
+ errs = move s;
}
(n, _) => {
fail(fmt!("program_output received an unexpected file \
fn readclose(fd: c_int) -> ~str {
let file = os::fdopen(fd);
let reader = io::FILE_reader(file, false);
- let mut buf = ~"";
- while !reader.eof() {
- let bytes = reader.read_bytes(4096u);
- buf += str::from_bytes(bytes);
- }
+ let buf = io::with_bytes_writer(|writer| {
+ let mut bytes = [mut 0, ..4096];
+ while !reader.eof() {
+ let nread = reader.read(bytes, bytes.len());
+ writer.write(bytes.view(0, nread));
+ }
+ });
os::fclose(file);
- move buf
+ str::from_bytes(buf)
}
/// Waits for a process to exit and returns the exit code
fn insert(&mut self, k: K, +v: V) -> bool;
fn remove(&mut self, k: &K) -> bool;
+ fn pop(&mut self, k: &K) -> Option<V>;
+ fn swap(&mut self, k: K, +v: V) -> Option<V>;
+ fn consume(&mut self, f: fn(K, V));
fn clear(&mut self);
pure fn len(&const self) -> uint;
pure fn is_empty(&const self) -> bool;
debug!("insert fresh (%?->%?) at idx %?, hash %?",
k, v, idx, hash);
self.buckets[idx] = Some(Bucket {hash: hash,
- key: k,
- value: v});
+ key: move k,
+ value: move v});
self.size += 1;
true
}
debug!("insert overwrite (%?->%?) at idx %?, hash %?",
k, v, idx, hash);
self.buckets[idx] = Some(Bucket {hash: hash,
- key: k,
- value: v});
+ key: move k,
+ value: move v});
false
}
}
}
+ fn pop_internal(&mut self, hash: uint, k: &K) -> Option<V> {
+ // Removing from an open-addressed hashtable
+ // is, well, painful. The problem is that
+ // the entry may lie on the probe path for other
+ // entries, so removing it would make you think that
+ // those probe paths are empty.
+ //
+ // To address this we basically have to keep walking,
+ // re-inserting entries we find until we reach an empty
+ // bucket. We know we will eventually reach one because
+ // we insert one ourselves at the beginning (the removed
+ // entry).
+ //
+ // I found this explanation elucidating:
+ // http://www.maths.lse.ac.uk/Courses/MA407/del-hash.pdf
+ let mut idx = match self.bucket_for_key_with_hash(self.buckets,
+ hash, k) {
+ TableFull | FoundHole(_) => return None,
+ FoundEntry(idx) => idx
+ };
+
+ let len_buckets = self.buckets.len();
+ let mut bucket = None;
+ self.buckets[idx] <-> bucket;
+
+ let value = match move bucket {
+ None => None,
+ Some(move bucket) => {
+ let Bucket { value: move value, _ } = move bucket;
+ Some(move value)
+ },
+ };
+
+ idx = self.next_bucket(idx, len_buckets);
+ while self.buckets[idx].is_some() {
+ let mut bucket = None;
+ bucket <-> self.buckets[idx];
+ self.insert_opt_bucket(move bucket);
+ idx = self.next_bucket(idx, len_buckets);
+ }
+ self.size -= 1;
+
+ move value
+
+ }
+
fn search(&self,
hash: uint,
op: fn(x: &Option<Bucket<K,V>>) -> bool) {
}
fn remove(&mut self, k: &K) -> bool {
- // Removing from an open-addressed hashtable
- // is, well, painful. The problem is that
- // the entry may lie on the probe path for other
- // entries, so removing it would make you think that
- // those probe paths are empty.
- //
- // To address this we basically have to keep walking,
- // re-inserting entries we find until we reach an empty
- // bucket. We know we will eventually reach one because
- // we insert one ourselves at the beginning (the removed
- // entry).
- //
- // I found this explanation elucidating:
- // http://www.maths.lse.ac.uk/Courses/MA407/del-hash.pdf
+ match self.pop(k) {
+ Some(_) => true,
+ None => false,
+ }
+ }
- let mut idx = match self.bucket_for_key(self.buckets, k) {
- TableFull | FoundHole(_) => return false,
- FoundEntry(idx) => idx
- };
+ fn pop(&mut self, k: &K) -> Option<V> {
+ let hash = k.hash_keyed(self.k0, self.k1) as uint;
+ self.pop_internal(hash, k)
+ }
- let len_buckets = self.buckets.len();
- self.buckets[idx] = None;
- idx = self.next_bucket(idx, len_buckets);
- while self.buckets[idx].is_some() {
- let mut bucket = None;
- bucket <-> self.buckets[idx];
- self.insert_opt_bucket(move bucket);
- idx = self.next_bucket(idx, len_buckets);
+ fn swap(&mut self, k: K, v: V) -> Option<V> {
+ // this could be faster.
+ let hash = k.hash_keyed(self.k0, self.k1) as uint;
+ let old_value = self.pop_internal(hash, &k);
+
+ if self.size >= self.resize_at {
+ // n.b.: We could also do this after searching, so
+ // that we do not resize if this call to insert is
+ // simply going to update a key in place. My sense
+ // though is that it's worse to have to search through
+ // buckets to find the right spot twice than to just
+ // resize in this corner case.
+ self.expand();
+ }
+
+ self.insert_internal(hash, move k, move v);
+
+ move old_value
+ }
+
+ fn consume(&mut self, f: fn(K, V)) {
+ let mut buckets = ~[];
+ self.buckets <-> buckets;
+ self.size = 0;
+
+ do vec::consume(move buckets) |_i, bucket| {
+ match move bucket {
+ None => { },
+ Some(move bucket) => {
+ let Bucket {
+ key: move key,
+ value: move value,
+ _
+ } = move bucket;
+ f(move key, move value)
+ }
+ }
}
- self.size -= 1;
- return true;
}
fn clear(&mut self) {
}
option::unwrap(move value)
}
-
}
}
assert m.is_empty();
}
+ #[test]
+ pub fn pops() {
+ let mut m = ~LinearMap();
+ m.insert(1, 2);
+ assert m.pop(&1) == Some(2);
+ assert m.pop(&1) == None;
+ }
+
+ #[test]
+ pub fn swaps() {
+ let mut m = ~LinearMap();
+ assert m.swap(1, 2) == None;
+ assert m.swap(1, 3) == Some(2);
+ assert m.swap(1, 4) == Some(3);
+ }
+
+ #[test]
+ pub fn consumes() {
+ let mut m = ~LinearMap();
+ assert m.insert(1, 2);
+ assert m.insert(2, 3);
+ let mut m2 = ~LinearMap();
+ do m.consume |k, v| {
+ m2.insert(k, v);
+ }
+ assert m.len() == 0;
+ assert m2.len() == 2;
+ assert m2.find(&1) == Some(2);
+ assert m2.find(&2) == Some(3);
+ }
+
#[test]
pub fn iterate() {
let mut m = linear::linear_map_with_capacity(4);
}
/// Appends a character at the end of a string
-pub fn push_char(s: &const ~str, ch: char) {
+pub fn push_char(s: &mut ~str, ch: char) {
unsafe {
let code = ch as uint;
let nb = if code < max_one_b { 1u }
/// Appends a string slice to the back of a string, without overallocating
#[inline(always)]
-pub fn push_str_no_overallocate(lhs: &const ~str, rhs: &str) {
+pub fn push_str_no_overallocate(lhs: &mut ~str, rhs: &str) {
unsafe {
let llen = lhs.len();
let rlen = rhs.len();
}
/// Appends a string slice to the back of a string
#[inline(always)]
-pub fn push_str(lhs: &const ~str, rhs: &str) {
+pub fn push_str(lhs: &mut ~str, rhs: &str) {
unsafe {
let llen = lhs.len();
let rlen = rhs.len();
move s
}
+/// Given a string, make a new string with repeated copies of it
+pub fn repeat(ss: &str, nn: uint) -> ~str {
+ let mut acc = ~"";
+ for nn.times { acc += ss; }
+ move acc
+}
+
/*
Section: Adding to and removing from a string
*/
*
* If the string does not contain any characters
*/
-pub fn pop_char(s: &const ~str) -> char {
+pub fn pop_char(s: &mut ~str) -> char {
let end = len(*s);
assert end > 0u;
let {ch, prev} = char_range_at_reverse(*s, end);
split_nonempty(s, |c| char::is_whitespace(c))
}
+/** Split a string into a vector of substrings,
+ * each of which is less than a limit
+ */
+pub fn split_within(ss: &str, lim: uint) -> ~[~str] {
+ let words = str::words(ss);
+
+ // empty?
+ if words == ~[] { return ~[]; }
+
+ let mut rows : ~[~str] = ~[];
+ let mut row : ~str = ~"";
+
+ for words.each |wptr| {
+ let word = copy *wptr;
+
+ // if adding this word to the row would go over the limit,
+ // then start a new row
+ if row.len() + word.len() + 1 > lim {
+ rows.push(copy row); // save previous row
+ row = move word; // start a new one
+ } else {
+ if row.len() > 0 { row += ~" " } // separate words
+ row += word; // append to this row
+ }
+ }
+
+ // save the last row
+ if row != ~"" { rows.push(move row); }
+
+ move rows
+}
+
+
+
/// Convert a string to lowercase. ASCII only
pub pure fn to_lower(s: &str) -> ~str {
map(s,
*
* Alphanumeric characters are determined by `char::is_alphanumeric`
*/
-fn is_alphanumeric(s: &str) -> bool {
+pure fn is_alphanumeric(s: &str) -> bool {
return all(s, char::is_alphanumeric);
}
move buf
}
+pub pure fn with_capacity(capacity: uint) -> ~str {
+ let mut buf = ~"";
+ unsafe { reserve(&mut buf, capacity); }
+ move buf
+}
/**
* As char_len but for a slice of a string
* * s - A string
* * n - The number of bytes to reserve space for
*/
-pub fn reserve(s: &const ~str, n: uint) {
+pub fn reserve(s: &mut ~str, n: uint) {
unsafe {
- let v: *mut ~[u8] = cast::transmute(copy s);
+ let v: *mut ~[u8] = cast::transmute(s);
vec::reserve(&mut *v, n + 1);
}
}
* * s - A string
* * n - The number of bytes to reserve space for
*/
-pub fn reserve_at_least(s: &const ~str, n: uint) {
+pub fn reserve_at_least(s: &mut ~str, n: uint) {
reserve(s, uint::next_power_of_two(n + 1u) - 1u)
}
}
/// Converts a vector of bytes to a string.
- pub pub unsafe fn from_bytes(v: &[const u8]) -> ~str {
+ pub unsafe fn from_bytes(v: &[const u8]) -> ~str {
do vec::as_const_buf(v) |buf, len| {
from_buf_len(buf, len)
}
}
/// Appends a byte to a string. (Not UTF-8 safe).
- pub unsafe fn push_byte(s: &const ~str, b: u8) {
+ pub unsafe fn push_byte(s: &mut ~str, b: u8) {
reserve_at_least(s, s.len() + 1);
do as_buf(*s) |buf, len| {
let buf: *mut u8 = ::cast::reinterpret_cast(&buf);
}
/// Appends a vector of bytes to a string. (Not UTF-8 safe).
- unsafe fn push_bytes(s: &const ~str, bytes: &[u8]) {
+ unsafe fn push_bytes(s: &mut ~str, bytes: &[u8]) {
reserve_at_least(s, s.len() + bytes.len());
for vec::each(bytes) |byte| { push_byte(s, *byte); }
}
/// Removes the last byte from a string and returns it. (Not UTF-8 safe).
- pub unsafe fn pop_byte(s: &const ~str) -> u8 {
+ pub unsafe fn pop_byte(s: &mut ~str) -> u8 {
let len = len(*s);
assert (len > 0u);
let b = s[len - 1u];
}
/// Sets the length of the string and adds the null terminator
- pub unsafe fn set_len(v: &const ~str, new_len: uint) {
+ pub unsafe fn set_len(v: &mut ~str, new_len: uint) {
let v: **vec::raw::VecRepr = cast::transmute(copy v);
let repr: *vec::raw::VecRepr = *v;
(*repr).unboxed.fill = new_len + 1u;
}
-pub trait UniqueStr {
- fn trim() -> self;
- fn trim_left() -> self;
- fn trim_right() -> self;
+pub trait Trimmable {
+ pure fn trim() -> self;
+ pure fn trim_left() -> self;
+ pure fn trim_right() -> self;
}
/// Extension methods for strings
-impl ~str: UniqueStr {
+impl ~str: Trimmable {
/// Returns a string with leading and trailing whitespace removed
#[inline]
- fn trim() -> ~str { trim(self) }
+ pure fn trim() -> ~str { trim(self) }
/// Returns a string with leading whitespace removed
#[inline]
- fn trim_left() -> ~str { trim_left(self) }
+ pure fn trim_left() -> ~str { trim_left(self) }
/// Returns a string with trailing whitespace removed
#[inline]
- fn trim_right() -> ~str { trim_right(self) }
+ pure fn trim_right() -> ~str { trim_right(self) }
}
#[cfg(notest)]
pub mod traits {}
pub trait StrSlice {
- fn all(it: fn(char) -> bool) -> bool;
- fn any(it: fn(char) -> bool) -> bool;
- fn contains(needle: &a/str) -> bool;
- fn contains_char(needle: char) -> bool;
- fn each(it: fn(u8) -> bool);
- fn eachi(it: fn(uint, u8) -> bool);
- fn each_char(it: fn(char) -> bool);
- fn each_chari(it: fn(uint, char) -> bool);
- fn ends_with(needle: &str) -> bool;
- fn is_empty() -> bool;
- fn is_not_empty() -> bool;
- fn is_whitespace() -> bool;
- fn is_alphanumeric() -> bool;
+ pure fn all(it: fn(char) -> bool) -> bool;
+ pure fn any(it: fn(char) -> bool) -> bool;
+ pure fn contains(needle: &a/str) -> bool;
+ pure fn contains_char(needle: char) -> bool;
+ pure fn each(it: fn(u8) -> bool);
+ pure fn eachi(it: fn(uint, u8) -> bool);
+ pure fn each_char(it: fn(char) -> bool);
+ pure fn each_chari(it: fn(uint, char) -> bool);
+ pure fn ends_with(needle: &str) -> bool;
+ pure fn is_empty() -> bool;
+ pure fn is_not_empty() -> bool;
+ pure fn is_whitespace() -> bool;
+ pure fn is_alphanumeric() -> bool;
pure fn len() -> uint;
pure fn slice(begin: uint, end: uint) -> ~str;
- fn split(sepfn: fn(char) -> bool) -> ~[~str];
- fn split_char(sep: char) -> ~[~str];
- fn split_str(sep: &a/str) -> ~[~str];
- fn starts_with(needle: &a/str) -> bool;
- fn substr(begin: uint, n: uint) -> ~str;
+ pure fn split(sepfn: fn(char) -> bool) -> ~[~str];
+ pure fn split_char(sep: char) -> ~[~str];
+ pure fn split_str(sep: &a/str) -> ~[~str];
+ pure fn starts_with(needle: &a/str) -> bool;
+ pure fn substr(begin: uint, n: uint) -> ~str;
pure fn to_lower() -> ~str;
pure fn to_upper() -> ~str;
- fn escape_default() -> ~str;
- fn escape_unicode() -> ~str;
- pure fn to_unique() -> ~str;
+ pure fn escape_default() -> ~str;
+ pure fn escape_unicode() -> ~str;
+ pure fn trim() -> ~str;
+ pure fn trim_left() -> ~str;
+ pure fn trim_right() -> ~str;
+ pure fn to_owned() -> ~str;
+ pure fn to_managed() -> @str;
pure fn char_at(i: uint) -> char;
}
* contains no characters
*/
#[inline]
- fn all(it: fn(char) -> bool) -> bool { all(self, it) }
+ pure fn all(it: fn(char) -> bool) -> bool { all(self, it) }
/**
* Return true if a predicate matches any character (and false if it
* matches none or there are no characters)
*/
#[inline]
- fn any(it: fn(char) -> bool) -> bool { any(self, it) }
+ pure fn any(it: fn(char) -> bool) -> bool { any(self, it) }
/// Returns true if one string contains another
#[inline]
- fn contains(needle: &a/str) -> bool { contains(self, needle) }
+ pure fn contains(needle: &a/str) -> bool { contains(self, needle) }
/// Returns true if a string contains a char
#[inline]
- fn contains_char(needle: char) -> bool { contains_char(self, needle) }
+ pure fn contains_char(needle: char) -> bool {
+ contains_char(self, needle)
+ }
/// Iterate over the bytes in a string
#[inline]
- fn each(it: fn(u8) -> bool) { each(self, it) }
+ pure fn each(it: fn(u8) -> bool) { each(self, it) }
/// Iterate over the bytes in a string, with indices
#[inline]
- fn eachi(it: fn(uint, u8) -> bool) { eachi(self, it) }
+ pure fn eachi(it: fn(uint, u8) -> bool) { eachi(self, it) }
/// Iterate over the chars in a string
#[inline]
- fn each_char(it: fn(char) -> bool) { each_char(self, it) }
+ pure fn each_char(it: fn(char) -> bool) { each_char(self, it) }
/// Iterate over the chars in a string, with indices
#[inline]
- fn each_chari(it: fn(uint, char) -> bool) { each_chari(self, it) }
+ pure fn each_chari(it: fn(uint, char) -> bool) { each_chari(self, it) }
/// Returns true if one string ends with another
#[inline]
- fn ends_with(needle: &str) -> bool { ends_with(self, needle) }
+ pure fn ends_with(needle: &str) -> bool { ends_with(self, needle) }
/// Returns true if the string has length 0
#[inline]
- fn is_empty() -> bool { is_empty(self) }
+ pure fn is_empty() -> bool { is_empty(self) }
/// Returns true if the string has length greater than 0
#[inline]
- fn is_not_empty() -> bool { is_not_empty(self) }
+ pure fn is_not_empty() -> bool { is_not_empty(self) }
/**
* Returns true if the string contains only whitespace
*
* Whitespace characters are determined by `char::is_whitespace`
*/
#[inline]
- fn is_whitespace() -> bool { is_whitespace(self) }
+ pure fn is_whitespace() -> bool { is_whitespace(self) }
/**
* Returns true if the string contains only alphanumerics
*
* Alphanumeric characters are determined by `char::is_alphanumeric`
*/
#[inline]
- fn is_alphanumeric() -> bool { is_alphanumeric(self) }
+ pure fn is_alphanumeric() -> bool { is_alphanumeric(self) }
#[inline]
/// Returns the size in bytes not counting the null terminator
pure fn len() -> uint { len(self) }
pure fn slice(begin: uint, end: uint) -> ~str { slice(self, begin, end) }
/// Splits a string into substrings using a character function
#[inline]
- fn split(sepfn: fn(char) -> bool) -> ~[~str] { split(self, sepfn) }
+ pure fn split(sepfn: fn(char) -> bool) -> ~[~str] { split(self, sepfn) }
/**
* Splits a string into substrings at each occurrence of a given character
*/
#[inline]
- fn split_char(sep: char) -> ~[~str] { split_char(self, sep) }
+ pure fn split_char(sep: char) -> ~[~str] { split_char(self, sep) }
/**
* Splits a string into a vector of the substrings separated by a given
* string
*/
#[inline]
- fn split_str(sep: &a/str) -> ~[~str] { split_str(self, sep) }
+ pure fn split_str(sep: &a/str) -> ~[~str] { split_str(self, sep) }
/// Returns true if one string starts with another
#[inline]
- fn starts_with(needle: &a/str) -> bool { starts_with(self, needle) }
+ pure fn starts_with(needle: &a/str) -> bool { starts_with(self, needle) }
/**
* Take a substring of another.
*
* `begin`.
*/
#[inline]
- fn substr(begin: uint, n: uint) -> ~str { substr(self, begin, n) }
+ pure fn substr(begin: uint, n: uint) -> ~str { substr(self, begin, n) }
/// Convert a string to lowercase
#[inline]
pure fn to_lower() -> ~str { to_lower(self) }
pure fn to_upper() -> ~str { to_upper(self) }
/// Escape each char in `s` with char::escape_default.
#[inline]
- fn escape_default() -> ~str { escape_default(self) }
+ pure fn escape_default() -> ~str { escape_default(self) }
/// Escape each char in `s` with char::escape_unicode.
#[inline]
- fn escape_unicode() -> ~str { escape_unicode(self) }
+ pure fn escape_unicode() -> ~str { escape_unicode(self) }
+ /// Returns a string with leading and trailing whitespace removed
+ #[inline]
+ pure fn trim() -> ~str { trim(self) }
+ /// Returns a string with leading whitespace removed
#[inline]
- pure fn to_unique() -> ~str { self.slice(0, self.len()) }
+ pure fn trim_left() -> ~str { trim_left(self) }
+ /// Returns a string with trailing whitespace removed
+ #[inline]
+ pure fn trim_right() -> ~str { trim_right(self) }
+
+ #[inline]
+ pure fn to_owned() -> ~str { self.slice(0, self.len()) }
+
+ #[inline]
+ pure fn to_managed() -> @str {
+ let v = at_vec::from_fn(self.len() + 1, |i| {
+ if i == self.len() { 0 } else { self[i] }
+ });
+ unsafe { ::cast::transmute(v) }
+ }
#[inline]
pure fn char_at(i: uint) -> char { char_at(self, i) }
assert ~[] == words(~"");
}
+ #[test]
+ fn test_split_within() {
+ assert split_within(~"", 0) == ~[];
+ assert split_within(~"", 15) == ~[];
+ assert split_within(~"hello", 15) == ~[~"hello"];
+
+ let data = ~"\nMary had a little lamb\nLittle lamb\n";
+ error!("~~~~ %?", split_within(data, 15));
+ assert split_within(data, 15) == ~[~"Mary had a",
+ ~"little lamb",
+ ~"Little lamb"];
+ }
+
#[test]
fn test_find_str() {
// byte positions
t(~[~"hi"], ~" ", ~"hi");
}
+ #[test]
+ fn test_repeat() {
+ assert repeat(~"x", 4) == ~"xxxx";
+ assert repeat(~"hi", 4) == ~"hihihihi";
+ assert repeat(~"ไท华", 3) == ~"ไท华ไท华ไท华";
+ assert repeat(~"", 4) == ~"";
+ assert repeat(~"hi", 0) == ~"";
+ }
+
#[test]
fn test_to_upper() {
// libc::toupper, and hence str::to_upper
#[test]
fn test_unsafe_slice() {
unsafe {
- assert ~"ab" == raw::slice_bytes(~"abc", 0u, 2u);
- assert ~"bc" == raw::slice_bytes(~"abc", 1u, 3u);
- assert ~"" == raw::slice_bytes(~"abc", 1u, 1u);
+ assert ~"ab" == raw::slice_bytes(~"abc", 0, 2);
+ assert ~"bc" == raw::slice_bytes(~"abc", 1, 3);
+ assert ~"" == raw::slice_bytes(~"abc", 1, 1);
fn a_million_letter_a() -> ~str {
let mut i = 0;
let mut rs = ~"";
while i < 100000 { push_str(&mut rs, ~"aaaaaaaaaa"); i += 1; }
- return rs;
+ move rs
}
fn half_a_million_letter_a() -> ~str {
let mut i = 0;
let mut rs = ~"";
while i < 100000 { push_str(&mut rs, ~"aaaaa"); i += 1; }
- return rs;
+ move rs
}
assert half_a_million_letter_a() ==
- raw::slice_bytes(a_million_letter_a(), 0u, 500000u);
+ raw::slice_bytes(a_million_letter_a(), 0u, 500000);
}
}
#[test]
fn test_slice() {
- assert ~"ab" == slice(~"abc", 0u, 2u);
- assert ~"bc" == slice(~"abc", 1u, 3u);
- assert ~"" == slice(~"abc", 1u, 1u);
- assert ~"\u65e5" == slice(~"\u65e5\u672c", 0u, 3u);
+ assert ~"ab" == slice(~"abc", 0, 2);
+ assert ~"bc" == slice(~"abc", 1, 3);
+ assert ~"" == slice(~"abc", 1, 1);
+ assert ~"\u65e5" == slice(~"\u65e5\u672c", 0, 3);
let data = ~"ประเทศไทย中华";
- assert ~"ป" == slice(data, 0u, 3u);
- assert ~"ร" == slice(data, 3u, 6u);
- assert ~"" == slice(data, 3u, 3u);
- assert ~"华" == slice(data, 30u, 33u);
+ assert ~"ป" == slice(data, 0, 3);
+ assert ~"ร" == slice(data, 3, 6);
+ assert ~"" == slice(data, 3, 3);
+ assert ~"华" == slice(data, 30, 33);
fn a_million_letter_X() -> ~str {
let mut i = 0;
push_str(&mut rs, ~"华华华华华华华华华华");
i += 1;
}
- return rs;
+ move rs
}
fn half_a_million_letter_X() -> ~str {
let mut i = 0;
let mut rs = ~"";
while i < 100000 { push_str(&mut rs, ~"华华华华华"); i += 1; }
- return rs;
+ move rs
}
assert half_a_million_letter_X() ==
slice(a_million_letter_X(), 0u, 3u * 500000u);
assert escape_default(~"\U0001d4ea\r") == ~"\\U0001d4ea\\r";
}
+ #[test]
+ fn test_to_managed() {
+ assert (~"abc").to_managed() == @"abc";
+ assert view("abcdef", 1, 5).to_managed() == @"bcde";
+ }
+
}
assert f(20) == 30;
- let original_closure: Closure = cast::transmute(f);
+ let original_closure: Closure = cast::transmute(move f);
let actual_function_pointer = original_closure.code;
let environment = original_closure.env;
env: environment
};
- let new_f: fn(int) -> int = cast::transmute(new_closure);
+ let new_f: fn(int) -> int = cast::transmute(move new_closure);
assert new_f(20) == 30;
}
}
// sidestep that whole issue by making builders uncopyable and making
// the run function move them in.
-// FIXME (#2585): Replace the 'consumed' bit with move mode on self
+// FIXME (#3724): Replace the 'consumed' bit with move mode on self
pub enum TaskBuilder = {
opts: TaskOpts,
gen_body: fn@(v: fn~()) -> fn~(),
* Fails if a future_result was already set for this task.
*/
fn future_result(blk: fn(v: future::Future<TaskResult>)) -> TaskBuilder {
- // FIXME (#1087, #1857): Once linked failure and notification are
+ // FIXME (#3725): Once linked failure and notification are
// handled in the library, I can imagine implementing this by just
// registering an arbitrary number of task::on_exit handlers and
// sending out messages.
mut notify_chan: move notify_chan,
sched: self.opts.sched
},
- gen_body: |body| { wrapper(prev_gen_body(move body)) },
+ // tjc: I think this is the line that gets miscompiled
+ // w/ last-use off, if we leave out the move prev_gen_body?
+ // that makes no sense, though...
+ gen_body: |move prev_gen_body,
+ body| { wrapper(prev_gen_body(move body)) },
can_not_copy: None,
.. *self.consume()
})
fn test_cant_dup_task_builder() {
let b = task().unlinked();
do b.spawn { }
- // FIXME(#2585): For now, this is a -runtime- failure, because we haven't
- // got modes on self. When 2585 is fixed, this test should fail to compile
- // instead, and should go in tests/compile-fail.
+ // FIXME(#3724): For now, this is a -runtime- failure, because we haven't
+ // got move mode on self. When 3724 is fixed, this test should fail to
+ // compile instead, and should go in tests/compile-fail.
do b.spawn { } // b should have been consumed by the previous call
}
let ch = comm::Chan(&po);
let b0 = task();
let b1 = do b0.add_wrapper |body| {
- fn~() {
+ fn~(move body) {
body();
comm::send(ch, ());
}
#[ignore(cfg(windows))]
fn test_future_result() {
let mut result = None;
- do task().future_result(|+r| { result = Some(r); }).spawn { }
- assert future::get(&option::unwrap(result)) == Success;
+ do task().future_result(|+r| { result = Some(move r); }).spawn { }
+ assert future::get(&option::unwrap(move result)) == Success;
result = None;
- do task().future_result(|+r| { result = Some(r); }).unlinked().spawn {
+ do task().future_result(|+r|
+ { result = Some(move r); }).unlinked().spawn {
fail;
}
- assert future::get(&option::unwrap(result)) == Failure;
+ assert future::get(&option::unwrap(move result)) == Failure;
}
#[test] #[should_fail] #[ignore(cfg(windows))]
let (recv_str, send_int) = do spawn_conversation |recv_int, send_str| {
let input = comm::recv(recv_int);
let output = int::str(input);
- comm::send(send_str, output);
+ comm::send(send_str, move output);
};
comm::send(send_int, 1);
assert comm::recv(recv_str) == ~"1";
let x = ~1;
let x_in_parent = ptr::addr_of(&(*x)) as uint;
- do spawnfn {
+ do spawnfn |move x| {
let x_in_child = ptr::addr_of(&(*x)) as uint;
comm::send(ch, x_in_child);
}
#[test]
fn test_avoid_copying_the_body_task_spawn() {
do avoid_copying_the_body |f| {
- do task().spawn {
+ do task().spawn |move f| {
f();
}
}
#[test]
fn test_avoid_copying_the_body_try() {
do avoid_copying_the_body |f| {
- do try {
+ do try |move f| {
f()
};
}
#[test]
fn test_avoid_copying_the_body_unlinked() {
do avoid_copying_the_body |f| {
- do spawn_unlinked {
+ do spawn_unlinked |move f| {
f();
}
}
// We want to do this after failing
do spawn_unlinked {
- for iter::repeat(10u) { yield() }
+ for iter::repeat(10) { yield() }
ch.send(());
}
unsafe {
do unkillable {
let p = ~0;
- let pp: *uint = cast::transmute(p);
+ let pp: *uint = cast::transmute(move p);
// If we are killed here then the box will leak
po.recv();
- let _p: ~int = cast::transmute(pp);
+ let _p: ~int = cast::transmute(move pp);
}
}
let (ch, po) = pipes::stream();
// We want to do this after failing
- do spawn_unlinked {
- for iter::repeat(10u) { yield() }
+ do spawn_unlinked |move ch| {
+ for iter::repeat(10) { yield() }
ch.send(());
}
do unkillable {
do unkillable {} // Here's the difference from the previous test.
let p = ~0;
- let pp: *uint = cast::transmute(p);
+ let pp: *uint = cast::transmute(move p);
// If we are killed here then the box will leak
po.recv();
- let _p: ~int = cast::transmute(pp);
+ let _p: ~int = cast::transmute(move pp);
}
}
fn test_sched_thread_per_core() {
let (chan, port) = pipes::stream();
- do spawn_sched(ThreadPerCore) {
+ do spawn_sched(ThreadPerCore) |move chan| {
let cores = rt::rust_num_threads();
let reported_threads = rt::rust_sched_threads();
assert(cores as uint == reported_threads as uint);
fn test_spawn_thread_on_demand() {
let (chan, port) = pipes::stream();
- do spawn_sched(ManualThreads(2)) {
+ do spawn_sched(ManualThreads(2)) |move chan| {
let max_threads = rt::rust_sched_threads();
assert(max_threads as int == 2);
let running_threads = rt::rust_sched_current_nonlazy_threads();
let (chan2, port2) = pipes::stream();
- do spawn() {
+ do spawn() |move chan2| {
chan2.send(());
}
TCB {
me: me,
- tasks: tasks,
- ancestors: ancestors,
+ tasks: move tasks,
+ ancestors: move ancestors,
is_main: is_main,
notifier: move notifier
}
fn AutoNotify(chan: Chan<Notification>) -> AutoNotify {
AutoNotify {
- notify_chan: chan,
+ notify_chan: move chan,
failed: true // Un-set above when taskgroup successfully made.
}
}
mut notify_chan: None,
.. default_task_opts()
};
- do spawn_raw(opts) {
+ do spawn_raw(move opts) {
fail;
}
}
notify_chan: Some(move notify_ch),
.. default_task_opts()
};
- do spawn_raw(opts) |move task_ch| {
+ do spawn_raw(move opts) |move task_ch| {
task_ch.send(get_task());
}
let task_ = task_po.recv();
let opts = {
linked: false,
- notify_chan: Some(notify_ch),
+ notify_chan: Some(move notify_ch),
.. default_task_opts()
};
- do spawn_raw(opts) {
+ do spawn_raw(move opts) |move task_ch| {
task_ch.send(get_task());
fail;
}
#[forbid(deprecated_mode)];
#[forbid(deprecated_pattern)];
-pub trait ToStr { fn to_str() -> ~str; }
+pub trait ToStr { pure fn to_str() -> ~str; }
impl int: ToStr {
- fn to_str() -> ~str { int::str(self) }
+ pure fn to_str() -> ~str { int::str(self) }
}
impl i8: ToStr {
- fn to_str() -> ~str { i8::str(self) }
+ pure fn to_str() -> ~str { i8::str(self) }
}
impl i16: ToStr {
- fn to_str() -> ~str { i16::str(self) }
+ pure fn to_str() -> ~str { i16::str(self) }
}
impl i32: ToStr {
- fn to_str() -> ~str { i32::str(self) }
+ pure fn to_str() -> ~str { i32::str(self) }
}
impl i64: ToStr {
- fn to_str() -> ~str { i64::str(self) }
+ pure fn to_str() -> ~str { i64::str(self) }
}
impl uint: ToStr {
- fn to_str() -> ~str { uint::str(self) }
+ pure fn to_str() -> ~str { uint::str(self) }
}
impl u8: ToStr {
- fn to_str() -> ~str { u8::str(self) }
+ pure fn to_str() -> ~str { u8::str(self) }
}
impl u16: ToStr {
- fn to_str() -> ~str { u16::str(self) }
+ pure fn to_str() -> ~str { u16::str(self) }
}
impl u32: ToStr {
- fn to_str() -> ~str { u32::str(self) }
+ pure fn to_str() -> ~str { u32::str(self) }
}
impl u64: ToStr {
- fn to_str() -> ~str { u64::str(self) }
+ pure fn to_str() -> ~str { u64::str(self) }
}
impl float: ToStr {
- fn to_str() -> ~str { float::to_str(self, 4u) }
+ pure fn to_str() -> ~str { float::to_str(self, 4u) }
}
impl f32: ToStr {
- fn to_str() -> ~str { float::to_str(self as float, 4u) }
+ pure fn to_str() -> ~str { float::to_str(self as float, 4u) }
}
impl f64: ToStr {
- fn to_str() -> ~str { float::to_str(self as float, 4u) }
+ pure fn to_str() -> ~str { float::to_str(self as float, 4u) }
}
impl bool: ToStr {
- fn to_str() -> ~str { bool::to_str(self) }
+ pure fn to_str() -> ~str { bool::to_str(self) }
}
impl (): ToStr {
- fn to_str() -> ~str { ~"()" }
+ pure fn to_str() -> ~str { ~"()" }
}
impl ~str: ToStr {
- fn to_str() -> ~str { copy self }
+ pure fn to_str() -> ~str { copy self }
}
impl &str: ToStr {
- fn to_str() -> ~str { str::from_slice(self) }
+ pure fn to_str() -> ~str { str::from_slice(self) }
}
impl @str: ToStr {
- fn to_str() -> ~str { str::from_slice(self) }
+ pure fn to_str() -> ~str { str::from_slice(self) }
}
impl<A: ToStr Copy, B: ToStr Copy> (A, B): ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str {
let (a, b) = self;
~"(" + a.to_str() + ~", " + b.to_str() + ~")"
}
}
impl<A: ToStr Copy, B: ToStr Copy, C: ToStr Copy> (A, B, C): ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str {
let (a, b, c) = self;
~"(" + a.to_str() + ~", " + b.to_str() + ~", " + c.to_str() + ~")"
}
}
impl<A: ToStr> ~[A]: ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str unsafe {
+ // Bleh -- not really unsafe
+ // push_str and push_char
let mut acc = ~"[", first = true;
- for vec::each(self) |elt| {
+ for vec::each(self) |elt| unsafe {
if first { first = false; }
else { str::push_str(&mut acc, ~", "); }
str::push_str(&mut acc, elt.to_str());
}
impl<A: ToStr> @A: ToStr {
- fn to_str() -> ~str { ~"@" + (*self).to_str() }
+ pure fn to_str() -> ~str { ~"@" + (*self).to_str() }
}
impl<A: ToStr> ~A: ToStr {
- fn to_str() -> ~str { ~"~" + (*self).to_str() }
+ pure fn to_str() -> ~str { ~"~" + (*self).to_str() }
}
#[cfg(test)]
use cmp::{Eq, Ord};
-pub trait TupleOps<T,U> {
+pub trait CopyableTuple<T, U> {
pure fn first() -> T;
pure fn second() -> U;
pure fn swap() -> (U, T);
}
-impl<T: Copy, U: Copy> (T, U): TupleOps<T,U> {
+impl<T: Copy, U: Copy> (T, U): CopyableTuple<T, U> {
/// Return the first element of self
pure fn first() -> T {
}
+pub trait ImmutableTuple<T, U> {
+ pure fn first_ref(&self) -> &self/T;
+ pure fn second_ref(&self) -> &self/U;
+}
+
+impl<T, U> (T, U): ImmutableTuple<T, U> {
+ pure fn first_ref(&self) -> &self/T {
+ match *self {
+ (ref t, _) => t,
+ }
+ }
+ pure fn second_ref(&self) -> &self/U {
+ match *self {
+ (_, ref u) => u,
+ }
+ }
+}
+
pub trait ExtendedTupleOps<A,B> {
fn zip(&self) -> ~[(A, B)];
fn map<C>(&self, f: &fn(a: &A, b: &B) -> C) -> ~[C];
pure fn gt(other: &(A, B, C)) -> bool { (*other).lt(&self) }
}
+#[test]
+fn test_tuple_ref() {
+ let x = (~"foo", ~"bar");
+ assert x.first_ref() == &~"foo";
+ assert x.second_ref() == &~"bar";
+}
+
#[test]
#[allow(non_implicitly_copyable_typarams)]
fn test_tuple() {
// Enough room to hold any number in any radix.
// Worst case: 64-bit number, binary-radix, with
// a leading negative sign = 65 bytes.
- let buf : [mut u8]/65 =
- [mut
- 0u8,0u8,0u8,0u8,0u8, 0u8,0u8,0u8,0u8,0u8,
- 0u8,0u8,0u8,0u8,0u8, 0u8,0u8,0u8,0u8,0u8,
-
- 0u8,0u8,0u8,0u8,0u8, 0u8,0u8,0u8,0u8,0u8,
- 0u8,0u8,0u8,0u8,0u8, 0u8,0u8,0u8,0u8,0u8,
-
- 0u8,0u8,0u8,0u8,0u8, 0u8,0u8,0u8,0u8,0u8,
- 0u8,0u8,0u8,0u8,0u8, 0u8,0u8,0u8,0u8,0u8,
-
- 0u8,0u8,0u8,0u8,0u8
- ]/65;
+ let buf : [mut u8 * 65] = [mut 0u8, ..65];
// FIXME (#2649): post-snapshot, you can do this without the raw
// pointers and unsafe bits, and the codegen will prove it's all
}
/// Convert to a string
-pub fn str(i: T) -> ~str { return to_str(i, 10u); }
+pub pure fn str(i: T) -> ~str { return to_str(i, 10u); }
#[test]
pub fn test_to_str() {
let x = ~[(5, false)];
//FIXME #3387 assert x.eq(id(copy x));
let y = copy x;
- assert x.eq(&id(y));
+ assert x.eq(&id(move y));
}
#[test]
fn test_swap() {
*/
pub fn rsplit<T: Copy>(v: &[T], f: fn(t: &T) -> bool) -> ~[~[T]] {
let ln = len(v);
- if (ln == 0u) { return ~[] }
+ if (ln == 0) { return ~[] }
let mut end = ln;
let mut result = ~[];
- while end > 0u {
- match rposition_between(v, 0u, end, f) {
+ while end > 0 {
+ match rposition_between(v, 0, end, f) {
None => break,
Some(i) => {
- result.push(slice(v, i + 1u, end));
+ result.push(slice(v, i + 1, end));
end = i;
}
}
pub fn unshift<T>(v: &mut ~[T], x: T) {
let mut vv = ~[move x];
*v <-> vv;
- v.push_all_move(vv);
+ v.push_all_move(move vv);
}
pub fn consume<T>(v: ~[T], f: fn(uint, v: T)) unsafe {
}
pub fn consume_mut<T>(v: ~[mut T], f: fn(uint, v: T)) {
- consume(vec::from_mut(v), f)
+ consume(vec::from_mut(move v), f)
}
/// Remove the last element from a vector and return it
#[inline(always)]
pure fn append_mut<T: Copy>(lhs: ~[mut T], rhs: &[const T]) -> ~[mut T] {
- to_mut(append(from_mut(lhs), rhs))
+ to_mut(append(from_mut(move lhs), rhs))
}
/**
}
fn unshift(&mut self, x: T) {
- unshift(self, x)
+ unshift(self, move x)
}
fn swap_remove(&mut self, index: uint) -> T {
}
}
+
+/**
+* Constructs a vector from an unsafe pointer to a buffer
+*
+* # Arguments
+*
+* * ptr - An unsafe pointer to a buffer of `T`
+* * elts - The number of elements in the buffer
+*/
+// Wrapper for fn in raw: needs to be called by net_tcp::on_tcp_read_cb
+pub unsafe fn from_buf<T>(ptr: *T, elts: uint) -> ~[T] {
+ raw::from_buf_raw(ptr, elts)
+}
+
+/// The internal 'unboxed' representation of a vector
+pub struct UnboxedVecRepr {
+ mut fill: uint,
+ mut alloc: uint,
+ data: u8
+}
+
/// Unsafe operations
-pub mod raw {
- // FIXME: This should have crate visibility (#1893 blocks that)
+mod raw {
/// The internal representation of a (boxed) vector
pub struct VecRepr {
unboxed: UnboxedVecRepr
}
- /// The internal 'unboxed' representation of a vector
- pub struct UnboxedVecRepr {
- mut fill: uint,
- mut alloc: uint,
- data: u8
- }
-
pub type SliceRepr = {
mut data: *u8,
mut len: uint
};
- /**
- * Constructs a vector from an unsafe pointer to a buffer
- *
- * # Arguments
- *
- * * ptr - An unsafe pointer to a buffer of `T`
- * * elts - The number of elements in the buffer
- */
- #[inline(always)]
- pub unsafe fn from_buf<T>(ptr: *T, elts: uint) -> ~[T] {
- let mut dst = with_capacity(elts);
- set_len(&mut dst, elts);
- as_mut_buf(dst, |p_dst, _len_dst| ptr::memcpy(p_dst, ptr, elts));
- move dst
- }
-
/**
* Sets the length of a vector
*
}
}
+ /**
+ * Constructs a vector from an unsafe pointer to a buffer
+ *
+ * # Arguments
+ *
+ * * ptr - An unsafe pointer to a buffer of `T`
+ * * elts - The number of elements in the buffer
+ */
+ // Was in raw, but needs to be called by net_tcp::on_tcp_read_cb
+ #[inline(always)]
+ pub unsafe fn from_buf_raw<T>(ptr: *T, elts: uint) -> ~[T] {
+ let mut dst = with_capacity(elts);
+ set_len(&mut dst, elts);
+ as_mut_buf(dst, |p_dst, _len_dst| ptr::memcpy(p_dst, ptr, elts));
+ move dst
+ }
+
/**
* Copies data from one vector to another.
*
// Test on-stack copy-from-buf.
let a = ~[1, 2, 3];
let mut ptr = raw::to_ptr(a);
- let b = raw::from_buf(ptr, 3u);
+ let b = from_buf(ptr, 3u);
assert (len(b) == 3u);
assert (b[0] == 1);
assert (b[1] == 2);
// Test on-heap copy-from-buf.
let c = ~[1, 2, 3, 4, 5];
ptr = raw::to_ptr(c);
- let d = raw::from_buf(ptr, 5u);
+ let d = from_buf(ptr, 5u);
assert (len(d) == 5u);
assert (d[0] == 1);
assert (d[1] == 2);
#[test]
fn test_dedup() {
fn case(a: ~[uint], b: ~[uint]) {
- let mut v = a;
+ let mut v = move a;
v.dedup();
assert(v == b);
}
let v1 = ~[1, 2, 3];
let v2 = ~[4, 5, 6];
- let z1 = zip(v1, v2);
+ let z1 = zip(move v1, move v2);
assert ((1, 4) == z1[0]);
assert ((2, 5) == z1[1]);
assert ((3, 6) == z1[2]);
- let (left, right) = unzip(z1);
+ let (left, right) = unzip(move z1);
assert ((1, 4) == (left[0], right[0]));
assert ((2, 5) == (left[1], right[1]));
unsafe {
let x = ~[1, 2, 3];
let addr = raw::to_ptr(x);
- let x_mut = to_mut(x);
+ let x_mut = to_mut(move x);
let addr_mut = raw::to_ptr(x_mut);
assert addr == addr_mut;
}
unsafe {
let x = ~[mut 1, 2, 3];
let addr = raw::to_ptr(x);
- let x_imm = from_mut(x);
+ let x_imm = from_mut(move x);
let addr_imm = raw::to_ptr(x_imm);
assert addr == addr_imm;
}
fn test_consume_fail() {
let v = ~[(~0, @0), (~0, @0), (~0, @0), (~0, @0)];
let mut i = 0;
- do consume(v) |_i, _elt| {
+ do consume(move v) |_i, _elt| {
if i == 2 {
fail
}
fn test_consume_mut_fail() {
let v = ~[mut (~0, @0), (~0, @0), (~0, @0), (~0, @0)];
let mut i = 0;
- do consume_mut(v) |_i, _elt| {
+ do consume_mut(move v) |_i, _elt| {
if i == 2 {
fail
}
fn test_map_consume_fail() {
let v = ~[(~0, @0), (~0, @0), (~0, @0), (~0, @0)];
let mut i = 0;
- do map_consume(v) |_elt| {
+ do map_consume(move v) |_elt| {
if i == 2 {
fail
}
num_condvars: uint) -> MutexARC<T> {
let data =
MutexARCInner { lock: mutex_with_condvars(num_condvars),
- failed: false, data: user_data };
+ failed: false, data: move user_data };
MutexARC { x: unsafe { shared_mutable_state(move data) } }
}
*
* Will additionally fail if another task has failed while accessing the arc.
*/
-// FIXME(#2585) make this a by-move method on the arc
+// FIXME(#3724) make this a by-move method on the arc
pub fn unwrap_mutex_arc<T: Send>(arc: MutexARC<T>) -> T {
let MutexARC { x: x } <- arc;
let inner = unsafe { unwrap_shared_mutable_state(move x) };
num_condvars: uint) -> RWARC<T> {
let data =
RWARCInner { lock: rwlock_with_condvars(num_condvars),
- failed: false, data: user_data };
+ failed: false, data: move user_data };
RWARC { x: unsafe { shared_mutable_state(move data) }, cant_nest: () }
}
* Will additionally fail if another task has failed while accessing the arc
* in write mode.
*/
-// FIXME(#2585) make this a by-move method on the arc
+// FIXME(#3724) make this a by-move method on the arc
pub fn unwrap_rw_arc<T: Const Send>(arc: RWARC<T>) -> T {
let RWARC { x: x, _ } <- arc;
let inner = unsafe { unwrap_shared_mutable_state(move x) };
let (c, p) = pipes::stream();
- do task::spawn() {
+ do task::spawn() |move c| {
let p = pipes::PortSet();
c.send(p.chan());
let arc = ~MutexARC(false);
let arc2 = ~arc.clone();
let (c,p) = pipes::oneshot();
- let (c,p) = (~mut Some(c), ~mut Some(p));
- do task::spawn {
+ let (c,p) = (~mut Some(move c), ~mut Some(move p));
+ do task::spawn |move arc2, move p| {
// wait until parent gets in
pipes::recv_one(option::swap_unwrap(p));
do arc2.access_cond |state, cond| {
let arc2 = ~arc.clone();
let (c,p) = pipes::stream();
- do task::spawn_unlinked {
+ do task::spawn_unlinked |move arc2, move p| {
let _ = p.recv();
do arc2.access_cond |one, cond| {
cond.signal();
fn test_mutex_arc_poison() {
let arc = ~MutexARC(1);
let arc2 = ~arc.clone();
- do task::try {
+ do task::try |move arc2| {
do arc2.access |one| {
assert *one == 2;
}
let arc = MutexARC(1);
let arc2 = ~(&arc).clone();
let (c,p) = pipes::stream();
- do task::spawn {
+ do task::spawn |move c, move arc2| {
do arc2.access |one| {
c.send(());
assert *one == 2;
}
}
let _ = p.recv();
- let one = unwrap_mutex_arc(arc);
+ let one = unwrap_mutex_arc(move arc);
assert one == 1;
}
#[test] #[should_fail] #[ignore(cfg(windows))]
fn test_rw_arc_poison_wr() {
let arc = ~RWARC(1);
let arc2 = ~arc.clone();
- do task::try {
+ do task::try |move arc2| {
do arc2.write |one| {
assert *one == 2;
}
fn test_rw_arc_poison_ww() {
let arc = ~RWARC(1);
let arc2 = ~arc.clone();
- do task::try {
+ do task::try |move arc2| {
do arc2.write |one| {
assert *one == 2;
}
fn test_rw_arc_poison_dw() {
let arc = ~RWARC(1);
let arc2 = ~arc.clone();
- do task::try {
+ do task::try |move arc2| {
do arc2.write_downgrade |write_mode| {
do (&write_mode).write |one| {
assert *one == 2;
fn test_rw_arc_no_poison_rr() {
let arc = ~RWARC(1);
let arc2 = ~arc.clone();
- do task::try {
+ do task::try |move arc2| {
do arc2.read |one| {
assert *one == 2;
}
fn test_rw_arc_no_poison_rw() {
let arc = ~RWARC(1);
let arc2 = ~arc.clone();
- do task::try {
+ do task::try |move arc2| {
do arc2.read |one| {
assert *one == 2;
}
fn test_rw_arc_no_poison_dr() {
let arc = ~RWARC(1);
let arc2 = ~arc.clone();
- do task::try {
+ do task::try |move arc2| {
do arc2.write_downgrade |write_mode| {
- let read_mode = arc2.downgrade(write_mode);
+ let read_mode = arc2.downgrade(move write_mode);
do (&read_mode).read |one| {
assert *one == 2;
}
let arc2 = ~arc.clone();
let (c,p) = pipes::stream();
- do task::spawn {
+ do task::spawn |move arc2, move c| {
do arc2.write |num| {
for 10.times {
let tmp = *num;
let mut children = ~[];
for 5.times {
let arc3 = ~arc.clone();
- do task::task().future_result(|+r| children.push(r)).spawn {
+ do task::task().future_result(|+r| children.push(move r)).spawn
+ |move arc3| {
do arc3.read |num| {
assert *num >= 0;
}
let mut reader_convos = ~[];
for 10.times {
let ((rc1,rp1),(rc2,rp2)) = (pipes::stream(),pipes::stream());
- reader_convos.push((rc1,rp2));
+ reader_convos.push((move rc1, move rp2));
let arcn = ~arc.clone();
- do task::spawn {
+ do task::spawn |move rp1, move rc2, move arcn| {
rp1.recv(); // wait for downgrader to give go-ahead
do arcn.read |state| {
assert *state == 31337;
// Writer task
let arc2 = ~arc.clone();
let ((wc1,wp1),(wc2,wp2)) = (pipes::stream(),pipes::stream());
- do task::spawn {
+ do task::spawn |move arc2, move wc2, move wp1| {
wp1.recv();
do arc2.write_cond |state, cond| {
assert *state == 0;
}
}
}
- let read_mode = arc.downgrade(write_mode);
+ let read_mode = arc.downgrade(move write_mode);
do (&read_mode).read |state| {
// complete handshake with other readers
for vec::each(reader_convos) |x| {
do arena.alloc { @i };
// Allocate something with funny size and alignment, to keep
// things interesting.
- do arena.alloc { [0u8, 1u8, 2u8]/3 };
+ do arena.alloc { [0u8, 1u8, 2u8] };
}
}
do arena.alloc { @i };
// Allocate something with funny size and alignment, to keep
// things interesting.
- do arena.alloc { [0u8, 1u8, 2u8]/3 };
+ do arena.alloc { [0u8, 1u8, 2u8] };
}
// Now, fail while allocating
do arena.alloc::<@int> {
}
fn BigBitv(storage: ~[mut uint]) -> BigBitv {
- BigBitv {storage: storage}
+ BigBitv {storage: move storage}
}
/**
let s = to_mut(from_elem(nelems, elem));
Big(~BigBitv(move s))
};
- Bitv {rep: rep, nbits: nbits}
+ Bitv {rep: move rep, nbits: nbits}
}
priv impl Bitv {
let st = to_mut(from_elem(self.nbits / uint_bits + 1, 0));
let len = st.len();
for uint::range(0, len) |i| { st[i] = b.storage[i]; };
- Bitv{nbits: self.nbits, rep: Big(~BigBitv{storage: st})}
+ Bitv{nbits: self.nbits, rep: Big(~BigBitv{storage: move st})}
}
}
}
let value = value_cell.take();
assert value == ~10;
assert value_cell.is_empty();
- value_cell.put_back(value);
+ value_cell.put_back(move value);
assert !value_cell.is_empty();
}
let (c2, p1) = pipes::stream();
let (c1, p2) = pipes::stream();
(DuplexStream {
- chan: c1,
- port: p1
+ chan: move c1,
+ port: move p1
},
DuplexStream {
- chan: c2,
- port: p2
+ chan: move c2,
+ port: move p2
})
}
#[forbid(deprecated_mode)];
+use serialization;
+
// Simple Extensible Binary Markup Language (ebml) reader and writer on a
// cursor model. See the specification here:
// http://www.matroska.org/technical/specs/rfc/index.html
-use core::Option;
-use option::{Some, None};
-type EbmlTag = {id: uint, size: uint};
+struct EbmlTag {
+ id: uint,
+ size: uint,
+}
-type EbmlState = {ebml_tag: EbmlTag, tag_pos: uint, data_pos: uint};
+struct EbmlState {
+ ebml_tag: EbmlTag,
+ tag_pos: uint,
+ data_pos: uint,
+}
// FIXME (#2739): When we have module renaming, make "reader" and "writer"
// separate modules within this file.
// ebml reading
-pub type Doc = {data: @~[u8], start: uint, end: uint};
+struct Doc {
+ data: @~[u8],
+ start: uint,
+ end: uint,
+}
-type TaggedDoc = {tag: uint, doc: Doc};
+struct TaggedDoc {
+ tag: uint,
+ doc: Doc,
+}
impl Doc: ops::Index<uint,Doc> {
pure fn index(tag: uint) -> Doc {
}
pub fn Doc(data: @~[u8]) -> Doc {
- return {data: data, start: 0u, end: vec::len::<u8>(*data)};
+ Doc { data: data, start: 0u, end: vec::len::<u8>(*data) }
}
pub fn doc_at(data: @~[u8], start: uint) -> TaggedDoc {
let elt_tag = vuint_at(*data, start);
let elt_size = vuint_at(*data, elt_tag.next);
let end = elt_size.next + elt_size.val;
- return {tag: elt_tag.val,
- doc: {data: data, start: elt_size.next, end: end}};
+ TaggedDoc {
+ tag: elt_tag.val,
+ doc: Doc { data: data, start: elt_size.next, end: end }
+ }
}
pub fn maybe_get_doc(d: Doc, tg: uint) -> Option<Doc> {
let elt_size = vuint_at(*d.data, elt_tag.next);
pos = elt_size.next + elt_size.val;
if elt_tag.val == tg {
- return Some::<Doc>({
- data: d.data,
- start: elt_size.next,
- end: pos
- });
+ return Some(Doc { data: d.data, start: elt_size.next, end: pos });
}
}
- return None::<Doc>;
+ None
}
pub fn get_doc(d: Doc, tg: uint) -> Doc {
match maybe_get_doc(d, tg) {
- Some(d) => return d,
+ Some(d) => d,
None => {
error!("failed to find block with tag %u", tg);
fail;
let elt_tag = vuint_at(*d.data, pos);
let elt_size = vuint_at(*d.data, elt_tag.next);
pos = elt_size.next + elt_size.val;
- if !it(elt_tag.val, {data: d.data, start: elt_size.next, end: pos}) {
+ let doc = Doc { data: d.data, start: elt_size.next, end: pos };
+ if !it(elt_tag.val, doc) {
break;
}
}
let elt_size = vuint_at(*d.data, elt_tag.next);
pos = elt_size.next + elt_size.val;
if elt_tag.val == tg {
- if !it({data: d.data, start: elt_size.next, end: pos}) {
+ let doc = Doc { data: d.data, start: elt_size.next, end: pos };
+ if !it(doc) {
break;
}
}
pub fn doc_data(d: Doc) -> ~[u8] { vec::slice::<u8>(*d.data, d.start, d.end) }
pub fn with_doc_data<T>(d: Doc, f: fn(x: &[u8]) -> T) -> T {
- return f(vec::view(*d.data, d.start, d.end));
+ f(vec::view(*d.data, d.start, d.end))
}
-pub fn doc_as_str(d: Doc) -> ~str { return str::from_bytes(doc_data(d)); }
+pub fn doc_as_str(d: Doc) -> ~str { str::from_bytes(doc_data(d)) }
pub fn doc_as_u8(d: Doc) -> u8 {
assert d.end == d.start + 1u;
- return (*d.data)[d.start];
+ (*d.data)[d.start]
}
pub fn doc_as_u16(d: Doc) -> u16 {
assert d.end == d.start + 2u;
- return io::u64_from_be_bytes(*d.data, d.start, 2u) as u16;
+ io::u64_from_be_bytes(*d.data, d.start, 2u) as u16
}
pub fn doc_as_u32(d: Doc) -> u32 {
assert d.end == d.start + 4u;
- return io::u64_from_be_bytes(*d.data, d.start, 4u) as u32;
+ io::u64_from_be_bytes(*d.data, d.start, 4u) as u32
}
pub fn doc_as_u64(d: Doc) -> u64 {
assert d.end == d.start + 8u;
- return io::u64_from_be_bytes(*d.data, d.start, 8u);
+ io::u64_from_be_bytes(*d.data, d.start, 8u)
}
pub fn doc_as_i8(d: Doc) -> i8 { doc_as_u8(d) as i8 }
pub fn doc_as_i64(d: Doc) -> i64 { doc_as_u64(d) as i64 }
// ebml writing
-type Writer_ = {writer: io::Writer, mut size_positions: ~[uint]};
-
-pub enum Writer {
- Writer_(Writer_)
+struct Serializer {
+ writer: io::Writer,
+ priv mut size_positions: ~[uint],
}
fn write_sized_vuint(w: io::Writer, n: uint, size: uint) {
fail fmt!("vint to write too big: %?", n);
}
-pub fn Writer(w: io::Writer) -> Writer {
+pub fn Serializer(w: io::Writer) -> Serializer {
let size_positions: ~[uint] = ~[];
- return Writer_({writer: w, mut size_positions: size_positions});
+ Serializer { writer: w, mut size_positions: size_positions }
}
// FIXME (#2741): Provide a function to write the standard ebml header.
-impl Writer {
+impl Serializer {
fn start_tag(tag_id: uint) {
debug!("Start tag %u", tag_id);
EsLabel // Used only when debugging
}
-trait SerializerPriv {
- fn _emit_tagged_uint(t: EbmlSerializerTag, v: uint);
- fn _emit_label(label: &str);
-}
-
-impl ebml::Writer: SerializerPriv {
+priv impl Serializer {
// used internally to emit things like the vector length and so on
fn _emit_tagged_uint(t: EbmlSerializerTag, v: uint) {
assert v <= 0xFFFF_FFFF_u;
}
}
-impl ebml::Writer {
- fn emit_opaque(f: fn()) {
+impl Serializer {
+ fn emit_opaque(&self, f: fn()) {
do self.wr_tag(EsOpaque as uint) {
f()
}
}
}
-impl ebml::Writer: serialization::Serializer {
- fn emit_nil() {}
+impl Serializer: serialization::Serializer {
+ fn emit_nil(&self) {}
- fn emit_uint(v: uint) { self.wr_tagged_u64(EsUint as uint, v as u64); }
- fn emit_u64(v: u64) { self.wr_tagged_u64(EsU64 as uint, v); }
- fn emit_u32(v: u32) { self.wr_tagged_u32(EsU32 as uint, v); }
- fn emit_u16(v: u16) { self.wr_tagged_u16(EsU16 as uint, v); }
- fn emit_u8(v: u8) { self.wr_tagged_u8 (EsU8 as uint, v); }
+ fn emit_uint(&self, v: uint) {
+ self.wr_tagged_u64(EsUint as uint, v as u64);
+ }
+ fn emit_u64(&self, v: u64) { self.wr_tagged_u64(EsU64 as uint, v); }
+ fn emit_u32(&self, v: u32) { self.wr_tagged_u32(EsU32 as uint, v); }
+ fn emit_u16(&self, v: u16) { self.wr_tagged_u16(EsU16 as uint, v); }
+ fn emit_u8(&self, v: u8) { self.wr_tagged_u8 (EsU8 as uint, v); }
- fn emit_int(v: int) { self.wr_tagged_i64(EsInt as uint, v as i64); }
- fn emit_i64(v: i64) { self.wr_tagged_i64(EsI64 as uint, v); }
- fn emit_i32(v: i32) { self.wr_tagged_i32(EsI32 as uint, v); }
- fn emit_i16(v: i16) { self.wr_tagged_i16(EsI16 as uint, v); }
- fn emit_i8(v: i8) { self.wr_tagged_i8 (EsI8 as uint, v); }
+ fn emit_int(&self, v: int) {
+ self.wr_tagged_i64(EsInt as uint, v as i64);
+ }
+ fn emit_i64(&self, v: i64) { self.wr_tagged_i64(EsI64 as uint, v); }
+ fn emit_i32(&self, v: i32) { self.wr_tagged_i32(EsI32 as uint, v); }
+ fn emit_i16(&self, v: i16) { self.wr_tagged_i16(EsI16 as uint, v); }
+ fn emit_i8(&self, v: i8) { self.wr_tagged_i8 (EsI8 as uint, v); }
- fn emit_bool(v: bool) { self.wr_tagged_u8(EsBool as uint, v as u8) }
+ fn emit_bool(&self, v: bool) {
+ self.wr_tagged_u8(EsBool as uint, v as u8)
+ }
// FIXME (#2742): implement these
- fn emit_f64(_v: f64) { fail ~"Unimplemented: serializing an f64"; }
- fn emit_f32(_v: f32) { fail ~"Unimplemented: serializing an f32"; }
- fn emit_float(_v: float) { fail ~"Unimplemented: serializing a float"; }
+ fn emit_f64(&self, _v: f64) { fail ~"Unimplemented: serializing an f64"; }
+ fn emit_f32(&self, _v: f32) { fail ~"Unimplemented: serializing an f32"; }
+ fn emit_float(&self, _v: float) {
+ fail ~"Unimplemented: serializing a float";
+ }
+
+ fn emit_char(&self, _v: char) {
+ fail ~"Unimplemented: serializing a char";
+ }
- fn emit_str(v: &str) { self.wr_tagged_str(EsStr as uint, v) }
+ fn emit_borrowed_str(&self, v: &str) {
+ self.wr_tagged_str(EsStr as uint, v)
+ }
- fn emit_enum(name: &str, f: fn()) {
+ fn emit_owned_str(&self, v: &str) {
+ self.emit_borrowed_str(v)
+ }
+
+ fn emit_managed_str(&self, v: &str) {
+ self.emit_borrowed_str(v)
+ }
+
+ fn emit_borrowed(&self, f: fn()) { f() }
+ fn emit_owned(&self, f: fn()) { f() }
+ fn emit_managed(&self, f: fn()) { f() }
+
+ fn emit_enum(&self, name: &str, f: fn()) {
self._emit_label(name);
self.wr_tag(EsEnum as uint, f)
}
- fn emit_enum_variant(_v_name: &str, v_id: uint, _cnt: uint, f: fn()) {
+ fn emit_enum_variant(&self, _v_name: &str, v_id: uint, _cnt: uint,
+ f: fn()) {
self._emit_tagged_uint(EsEnumVid, v_id);
self.wr_tag(EsEnumBody as uint, f)
}
- fn emit_enum_variant_arg(_idx: uint, f: fn()) { f() }
+ fn emit_enum_variant_arg(&self, _idx: uint, f: fn()) { f() }
- fn emit_vec(len: uint, f: fn()) {
+ fn emit_borrowed_vec(&self, len: uint, f: fn()) {
do self.wr_tag(EsVec as uint) {
self._emit_tagged_uint(EsVecLen, len);
f()
}
}
- fn emit_vec_elt(_idx: uint, f: fn()) {
+ fn emit_owned_vec(&self, len: uint, f: fn()) {
+ self.emit_borrowed_vec(len, f)
+ }
+
+ fn emit_managed_vec(&self, len: uint, f: fn()) {
+ self.emit_borrowed_vec(len, f)
+ }
+
+ fn emit_vec_elt(&self, _idx: uint, f: fn()) {
self.wr_tag(EsVecElt as uint, f)
}
- fn emit_box(f: fn()) { f() }
- fn emit_uniq(f: fn()) { f() }
- fn emit_rec(f: fn()) { f() }
- fn emit_rec_field(f_name: &str, _f_idx: uint, f: fn()) {
- self._emit_label(f_name);
+ fn emit_rec(&self, f: fn()) { f() }
+ fn emit_struct(&self, _name: &str, f: fn()) { f() }
+ fn emit_field(&self, name: &str, _idx: uint, f: fn()) {
+ self._emit_label(name);
f()
}
- fn emit_tup(_sz: uint, f: fn()) { f() }
- fn emit_tup_elt(_idx: uint, f: fn()) { f() }
-}
-type EbmlDeserializer_ = {mut parent: ebml::Doc,
- mut pos: uint};
+ fn emit_tup(&self, _len: uint, f: fn()) { f() }
+ fn emit_tup_elt(&self, _idx: uint, f: fn()) { f() }
+}
-pub enum EbmlDeserializer {
- EbmlDeserializer_(EbmlDeserializer_)
+struct Deserializer {
+ priv mut parent: Doc,
+ priv mut pos: uint,
}
-pub fn ebml_deserializer(d: ebml::Doc) -> EbmlDeserializer {
- EbmlDeserializer_({mut parent: d, mut pos: d.start})
+pub fn Deserializer(d: Doc) -> Deserializer {
+ Deserializer { mut parent: d, mut pos: d.start }
}
-priv impl EbmlDeserializer {
+priv impl Deserializer {
fn _check_label(lbl: &str) {
if self.pos < self.parent.end {
- let {tag: r_tag, doc: r_doc} =
- ebml::doc_at(self.parent.data, self.pos);
+ let TaggedDoc { tag: r_tag, doc: r_doc } =
+ doc_at(self.parent.data, self.pos);
+
if r_tag == (EsLabel as uint) {
self.pos = r_doc.end;
- let str = ebml::doc_as_str(r_doc);
+ let str = doc_as_str(r_doc);
if lbl != str {
fail fmt!("Expected label %s but found %s", lbl, str);
}
}
}
- fn next_doc(exp_tag: EbmlSerializerTag) -> ebml::Doc {
+ fn next_doc(exp_tag: EbmlSerializerTag) -> Doc {
debug!(". next_doc(exp_tag=%?)", exp_tag);
if self.pos >= self.parent.end {
fail ~"no more documents in current node!";
}
- let {tag: r_tag, doc: r_doc} =
- ebml::doc_at(self.parent.data, self.pos);
+ let TaggedDoc { tag: r_tag, doc: r_doc } =
+ doc_at(self.parent.data, self.pos);
debug!("self.parent=%?-%? self.pos=%? r_tag=%? r_doc=%?-%?",
copy self.parent.start, copy self.parent.end,
copy self.pos, r_tag, r_doc.start, r_doc.end);
r_doc.end, self.parent.end);
}
self.pos = r_doc.end;
- return r_doc;
+ r_doc
}
- fn push_doc<T>(d: ebml::Doc, f: fn() -> T) -> T{
+ fn push_doc<T>(d: Doc, f: fn() -> T) -> T{
let old_parent = self.parent;
let old_pos = self.pos;
self.parent = d;
}
fn _next_uint(exp_tag: EbmlSerializerTag) -> uint {
- let r = ebml::doc_as_u32(self.next_doc(exp_tag));
+ let r = doc_as_u32(self.next_doc(exp_tag));
debug!("_next_uint exp_tag=%? result=%?", exp_tag, r);
- return r as uint;
+ r as uint
}
}
-impl EbmlDeserializer {
- fn read_opaque<R>(op: fn(ebml::Doc) -> R) -> R {
+impl Deserializer {
+ fn read_opaque<R>(&self, op: fn(Doc) -> R) -> R {
do self.push_doc(self.next_doc(EsOpaque)) {
op(copy self.parent)
}
}
}
-impl EbmlDeserializer: serialization::Deserializer {
- fn read_nil() -> () { () }
+impl Deserializer: serialization::Deserializer {
+ fn read_nil(&self) -> () { () }
- fn read_u64() -> u64 { ebml::doc_as_u64(self.next_doc(EsU64)) }
- fn read_u32() -> u32 { ebml::doc_as_u32(self.next_doc(EsU32)) }
- fn read_u16() -> u16 { ebml::doc_as_u16(self.next_doc(EsU16)) }
- fn read_u8 () -> u8 { ebml::doc_as_u8 (self.next_doc(EsU8 )) }
- fn read_uint() -> uint {
- let v = ebml::doc_as_u64(self.next_doc(EsUint));
+ fn read_u64(&self) -> u64 { doc_as_u64(self.next_doc(EsU64)) }
+ fn read_u32(&self) -> u32 { doc_as_u32(self.next_doc(EsU32)) }
+ fn read_u16(&self) -> u16 { doc_as_u16(self.next_doc(EsU16)) }
+ fn read_u8 (&self) -> u8 { doc_as_u8 (self.next_doc(EsU8 )) }
+ fn read_uint(&self) -> uint {
+ let v = doc_as_u64(self.next_doc(EsUint));
if v > (core::uint::max_value as u64) {
fail fmt!("uint %? too large for this architecture", v);
}
- return v as uint;
+ v as uint
}
- fn read_i64() -> i64 { ebml::doc_as_u64(self.next_doc(EsI64)) as i64 }
- fn read_i32() -> i32 { ebml::doc_as_u32(self.next_doc(EsI32)) as i32 }
- fn read_i16() -> i16 { ebml::doc_as_u16(self.next_doc(EsI16)) as i16 }
- fn read_i8 () -> i8 { ebml::doc_as_u8 (self.next_doc(EsI8 )) as i8 }
- fn read_int() -> int {
- let v = ebml::doc_as_u64(self.next_doc(EsInt)) as i64;
+ fn read_i64(&self) -> i64 { doc_as_u64(self.next_doc(EsI64)) as i64 }
+ fn read_i32(&self) -> i32 { doc_as_u32(self.next_doc(EsI32)) as i32 }
+ fn read_i16(&self) -> i16 { doc_as_u16(self.next_doc(EsI16)) as i16 }
+ fn read_i8 (&self) -> i8 { doc_as_u8 (self.next_doc(EsI8 )) as i8 }
+ fn read_int(&self) -> int {
+ let v = doc_as_u64(self.next_doc(EsInt)) as i64;
if v > (int::max_value as i64) || v < (int::min_value as i64) {
fail fmt!("int %? out of range for this architecture", v);
}
- return v as int;
+ v as int
}
- fn read_bool() -> bool { ebml::doc_as_u8(self.next_doc(EsBool)) as bool }
+ fn read_bool(&self) -> bool { doc_as_u8(self.next_doc(EsBool)) as bool }
+
+ fn read_f64(&self) -> f64 { fail ~"read_f64()"; }
+ fn read_f32(&self) -> f32 { fail ~"read_f32()"; }
+ fn read_float(&self) -> float { fail ~"read_float()"; }
- fn read_f64() -> f64 { fail ~"read_f64()"; }
- fn read_f32() -> f32 { fail ~"read_f32()"; }
- fn read_float() -> float { fail ~"read_float()"; }
+ fn read_char(&self) -> char { fail ~"read_char()"; }
- fn read_str() -> ~str { ebml::doc_as_str(self.next_doc(EsStr)) }
+ fn read_owned_str(&self) -> ~str { doc_as_str(self.next_doc(EsStr)) }
+ fn read_managed_str(&self) -> @str { fail ~"read_managed_str()"; }
// Compound types:
- fn read_enum<T>(name: &str, f: fn() -> T) -> T {
+ fn read_owned<T>(&self, f: fn() -> T) -> T {
+ debug!("read_owned()");
+ f()
+ }
+
+ fn read_managed<T>(&self, f: fn() -> T) -> T {
+ debug!("read_managed()");
+ f()
+ }
+
+ fn read_enum<T>(&self, name: &str, f: fn() -> T) -> T {
debug!("read_enum(%s)", name);
self._check_label(name);
self.push_doc(self.next_doc(EsEnum), f)
}
- fn read_enum_variant<T>(f: fn(uint) -> T) -> T {
+ fn read_enum_variant<T>(&self, f: fn(uint) -> T) -> T {
debug!("read_enum_variant()");
let idx = self._next_uint(EsEnumVid);
debug!(" idx=%u", idx);
}
}
- fn read_enum_variant_arg<T>(idx: uint, f: fn() -> T) -> T {
+ fn read_enum_variant_arg<T>(&self, idx: uint, f: fn() -> T) -> T {
debug!("read_enum_variant_arg(idx=%u)", idx);
f()
}
- fn read_vec<T>(f: fn(uint) -> T) -> T {
- debug!("read_vec()");
+ fn read_owned_vec<T>(&self, f: fn(uint) -> T) -> T {
+ debug!("read_owned_vec()");
do self.push_doc(self.next_doc(EsVec)) {
let len = self._next_uint(EsVecLen);
debug!(" len=%u", len);
}
}
- fn read_vec_elt<T>(idx: uint, f: fn() -> T) -> T {
- debug!("read_vec_elt(idx=%u)", idx);
- self.push_doc(self.next_doc(EsVecElt), f)
+ fn read_managed_vec<T>(&self, f: fn(uint) -> T) -> T {
+ debug!("read_managed_vec()");
+ do self.push_doc(self.next_doc(EsVec)) {
+ let len = self._next_uint(EsVecLen);
+ debug!(" len=%u", len);
+ f(len)
+ }
}
- fn read_box<T>(f: fn() -> T) -> T {
- debug!("read_box()");
- f()
+ fn read_vec_elt<T>(&self, idx: uint, f: fn() -> T) -> T {
+ debug!("read_vec_elt(idx=%u)", idx);
+ self.push_doc(self.next_doc(EsVecElt), f)
}
- fn read_uniq<T>(f: fn() -> T) -> T {
- debug!("read_uniq()");
+ fn read_rec<T>(&self, f: fn() -> T) -> T {
+ debug!("read_rec()");
f()
}
- fn read_rec<T>(f: fn() -> T) -> T {
- debug!("read_rec()");
+ fn read_struct<T>(&self, name: &str, f: fn() -> T) -> T {
+ debug!("read_struct(name=%s)", name);
f()
}
- fn read_rec_field<T>(f_name: &str, f_idx: uint, f: fn() -> T) -> T {
- debug!("read_rec_field(%s, idx=%u)", f_name, f_idx);
- self._check_label(f_name);
+ fn read_field<T>(&self, name: &str, idx: uint, f: fn() -> T) -> T {
+ debug!("read_field(name=%s, idx=%u)", name, idx);
+ self._check_label(name);
f()
}
- fn read_tup<T>(sz: uint, f: fn() -> T) -> T {
- debug!("read_tup(sz=%u)", sz);
+ fn read_tup<T>(&self, len: uint, f: fn() -> T) -> T {
+ debug!("read_tup(len=%u)", len);
f()
}
- fn read_tup_elt<T>(idx: uint, f: fn() -> T) -> T {
+ fn read_tup_elt<T>(&self, idx: uint, f: fn() -> T) -> T {
debug!("read_tup_elt(idx=%u)", idx);
f()
}
}
-
// ___________________________________________________________________________
// Testing
-#[test]
-fn test_option_int() {
- fn serialize_1<S: serialization::Serializer>(s: &S, v: int) {
- s.emit_i64(v as i64);
- }
-
- fn serialize_0<S: serialization::Serializer>(s: &S, v: Option<int>) {
- do s.emit_enum(~"core::option::t") {
- match v {
- None => s.emit_enum_variant(
- ~"core::option::None", 0u, 0u, || { } ),
- Some(v0) => {
- do s.emit_enum_variant(~"core::option::some", 1u, 1u) {
- s.emit_enum_variant_arg(0u, || serialize_1(s, v0));
- }
- }
- }
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn test_option_int() {
+ fn test_v(v: Option<int>) {
+ debug!("v == %?", v);
+ let bytes = do io::with_bytes_writer |wr| {
+ let ebml_w = Serializer(wr);
+ v.serialize(&ebml_w)
+ };
+ let ebml_doc = Doc(@bytes);
+ let deser = Deserializer(ebml_doc);
+ let v1 = serialization::deserialize(&deser);
+ debug!("v1 == %?", v1);
+ assert v == v1;
}
- }
- fn deserialize_1<S: serialization::Deserializer>(s: &S) -> int {
- s.read_i64() as int
- }
-
- fn deserialize_0<S: serialization::Deserializer>(s: &S) -> Option<int> {
- do s.read_enum(~"core::option::t") {
- do s.read_enum_variant |i| {
- match i {
- 0 => None,
- 1 => {
- let v0 = do s.read_enum_variant_arg(0u) {
- deserialize_1(s)
- };
- Some(v0)
- }
- _ => {
- fail #fmt("deserialize_0: unexpected variant %u", i);
- }
- }
- }
- }
+ test_v(Some(22));
+ test_v(None);
+ test_v(Some(3));
}
-
- fn test_v(v: Option<int>) {
- debug!("v == %?", v);
- let bytes = do io::with_bytes_writer |wr| {
- let ebml_w = ebml::Writer(wr);
- serialize_0(&ebml_w, v);
- };
- let ebml_doc = ebml::Doc(@bytes);
- let deser = ebml_deserializer(ebml_doc);
- let v1 = deserialize_0(&deser);
- debug!("v1 == %?", v1);
- assert v == v1;
- }
-
- test_v(Some(22));
- test_v(None);
- test_v(Some(3));
}
+++ /dev/null
-#[forbid(deprecated_mode)];
-use serialization2;
-
-// Simple Extensible Binary Markup Language (ebml) reader and writer on a
-// cursor model. See the specification here:
-// http://www.matroska.org/technical/specs/rfc/index.html
-
-struct EbmlTag {
- id: uint,
- size: uint,
-}
-
-struct EbmlState {
- ebml_tag: EbmlTag,
- tag_pos: uint,
- data_pos: uint,
-}
-
-// FIXME (#2739): When we have module renaming, make "reader" and "writer"
-// separate modules within this file.
-
-// ebml reading
-struct Doc {
- data: @~[u8],
- start: uint,
- end: uint,
-}
-
-struct TaggedDoc {
- tag: uint,
- doc: Doc,
-}
-
-impl Doc: ops::Index<uint,Doc> {
- pure fn index(tag: uint) -> Doc {
- unsafe {
- get_doc(self, tag)
- }
- }
-}
-
-fn vuint_at(data: &[u8], start: uint) -> {val: uint, next: uint} {
- let a = data[start];
- if a & 0x80u8 != 0u8 {
- return {val: (a & 0x7fu8) as uint, next: start + 1u};
- }
- if a & 0x40u8 != 0u8 {
- return {val: ((a & 0x3fu8) as uint) << 8u |
- (data[start + 1u] as uint),
- next: start + 2u};
- } else if a & 0x20u8 != 0u8 {
- return {val: ((a & 0x1fu8) as uint) << 16u |
- (data[start + 1u] as uint) << 8u |
- (data[start + 2u] as uint),
- next: start + 3u};
- } else if a & 0x10u8 != 0u8 {
- return {val: ((a & 0x0fu8) as uint) << 24u |
- (data[start + 1u] as uint) << 16u |
- (data[start + 2u] as uint) << 8u |
- (data[start + 3u] as uint),
- next: start + 4u};
- } else { error!("vint too big"); fail; }
-}
-
-pub fn Doc(data: @~[u8]) -> Doc {
- Doc { data: data, start: 0u, end: vec::len::<u8>(*data) }
-}
-
-pub fn doc_at(data: @~[u8], start: uint) -> TaggedDoc {
- let elt_tag = vuint_at(*data, start);
- let elt_size = vuint_at(*data, elt_tag.next);
- let end = elt_size.next + elt_size.val;
- TaggedDoc {
- tag: elt_tag.val,
- doc: Doc { data: data, start: elt_size.next, end: end }
- }
-}
-
-pub fn maybe_get_doc(d: Doc, tg: uint) -> Option<Doc> {
- let mut pos = d.start;
- while pos < d.end {
- let elt_tag = vuint_at(*d.data, pos);
- let elt_size = vuint_at(*d.data, elt_tag.next);
- pos = elt_size.next + elt_size.val;
- if elt_tag.val == tg {
- return Some(Doc { data: d.data, start: elt_size.next, end: pos });
- }
- }
- None
-}
-
-pub fn get_doc(d: Doc, tg: uint) -> Doc {
- match maybe_get_doc(d, tg) {
- Some(d) => d,
- None => {
- error!("failed to find block with tag %u", tg);
- fail;
- }
- }
-}
-
-pub fn docs(d: Doc, it: fn(uint, Doc) -> bool) {
- let mut pos = d.start;
- while pos < d.end {
- let elt_tag = vuint_at(*d.data, pos);
- let elt_size = vuint_at(*d.data, elt_tag.next);
- pos = elt_size.next + elt_size.val;
- let doc = Doc { data: d.data, start: elt_size.next, end: pos };
- if !it(elt_tag.val, doc) {
- break;
- }
- }
-}
-
-pub fn tagged_docs(d: Doc, tg: uint, it: fn(Doc) -> bool) {
- let mut pos = d.start;
- while pos < d.end {
- let elt_tag = vuint_at(*d.data, pos);
- let elt_size = vuint_at(*d.data, elt_tag.next);
- pos = elt_size.next + elt_size.val;
- if elt_tag.val == tg {
- let doc = Doc { data: d.data, start: elt_size.next, end: pos };
- if !it(doc) {
- break;
- }
- }
- }
-}
-
-pub fn doc_data(d: Doc) -> ~[u8] { vec::slice::<u8>(*d.data, d.start, d.end) }
-
-pub fn with_doc_data<T>(d: Doc, f: fn(x: &[u8]) -> T) -> T {
- f(vec::view(*d.data, d.start, d.end))
-}
-
-pub fn doc_as_str(d: Doc) -> ~str { str::from_bytes(doc_data(d)) }
-
-pub fn doc_as_u8(d: Doc) -> u8 {
- assert d.end == d.start + 1u;
- (*d.data)[d.start]
-}
-
-pub fn doc_as_u16(d: Doc) -> u16 {
- assert d.end == d.start + 2u;
- io::u64_from_be_bytes(*d.data, d.start, 2u) as u16
-}
-
-pub fn doc_as_u32(d: Doc) -> u32 {
- assert d.end == d.start + 4u;
- io::u64_from_be_bytes(*d.data, d.start, 4u) as u32
-}
-
-pub fn doc_as_u64(d: Doc) -> u64 {
- assert d.end == d.start + 8u;
- io::u64_from_be_bytes(*d.data, d.start, 8u)
-}
-
-pub fn doc_as_i8(d: Doc) -> i8 { doc_as_u8(d) as i8 }
-pub fn doc_as_i16(d: Doc) -> i16 { doc_as_u16(d) as i16 }
-pub fn doc_as_i32(d: Doc) -> i32 { doc_as_u32(d) as i32 }
-pub fn doc_as_i64(d: Doc) -> i64 { doc_as_u64(d) as i64 }
-
-// ebml writing
-struct Serializer {
- writer: io::Writer,
- priv mut size_positions: ~[uint],
-}
-
-fn write_sized_vuint(w: io::Writer, n: uint, size: uint) {
- match size {
- 1u => w.write(&[0x80u8 | (n as u8)]),
- 2u => w.write(&[0x40u8 | ((n >> 8_u) as u8), n as u8]),
- 3u => w.write(&[0x20u8 | ((n >> 16_u) as u8), (n >> 8_u) as u8,
- n as u8]),
- 4u => w.write(&[0x10u8 | ((n >> 24_u) as u8), (n >> 16_u) as u8,
- (n >> 8_u) as u8, n as u8]),
- _ => fail fmt!("vint to write too big: %?", n)
- };
-}
-
-fn write_vuint(w: io::Writer, n: uint) {
- if n < 0x7f_u { write_sized_vuint(w, n, 1u); return; }
- if n < 0x4000_u { write_sized_vuint(w, n, 2u); return; }
- if n < 0x200000_u { write_sized_vuint(w, n, 3u); return; }
- if n < 0x10000000_u { write_sized_vuint(w, n, 4u); return; }
- fail fmt!("vint to write too big: %?", n);
-}
-
-pub fn Serializer(w: io::Writer) -> Serializer {
- let size_positions: ~[uint] = ~[];
- Serializer { writer: w, mut size_positions: size_positions }
-}
-
-// FIXME (#2741): Provide a function to write the standard ebml header.
-impl Serializer {
- fn start_tag(tag_id: uint) {
- debug!("Start tag %u", tag_id);
-
- // Write the enum ID:
- write_vuint(self.writer, tag_id);
-
- // Write a placeholder four-byte size.
- self.size_positions.push(self.writer.tell());
- let zeroes: &[u8] = &[0u8, 0u8, 0u8, 0u8];
- self.writer.write(zeroes);
- }
-
- fn end_tag() {
- let last_size_pos = self.size_positions.pop();
- let cur_pos = self.writer.tell();
- self.writer.seek(last_size_pos as int, io::SeekSet);
- let size = (cur_pos - last_size_pos - 4u);
- write_sized_vuint(self.writer, size, 4u);
- self.writer.seek(cur_pos as int, io::SeekSet);
-
- debug!("End tag (size = %u)", size);
- }
-
- fn wr_tag(tag_id: uint, blk: fn()) {
- self.start_tag(tag_id);
- blk();
- self.end_tag();
- }
-
- fn wr_tagged_bytes(tag_id: uint, b: &[u8]) {
- write_vuint(self.writer, tag_id);
- write_vuint(self.writer, vec::len(b));
- self.writer.write(b);
- }
-
- fn wr_tagged_u64(tag_id: uint, v: u64) {
- do io::u64_to_be_bytes(v, 8u) |v| {
- self.wr_tagged_bytes(tag_id, v);
- }
- }
-
- fn wr_tagged_u32(tag_id: uint, v: u32) {
- do io::u64_to_be_bytes(v as u64, 4u) |v| {
- self.wr_tagged_bytes(tag_id, v);
- }
- }
-
- fn wr_tagged_u16(tag_id: uint, v: u16) {
- do io::u64_to_be_bytes(v as u64, 2u) |v| {
- self.wr_tagged_bytes(tag_id, v);
- }
- }
-
- fn wr_tagged_u8(tag_id: uint, v: u8) {
- self.wr_tagged_bytes(tag_id, &[v]);
- }
-
- fn wr_tagged_i64(tag_id: uint, v: i64) {
- do io::u64_to_be_bytes(v as u64, 8u) |v| {
- self.wr_tagged_bytes(tag_id, v);
- }
- }
-
- fn wr_tagged_i32(tag_id: uint, v: i32) {
- do io::u64_to_be_bytes(v as u64, 4u) |v| {
- self.wr_tagged_bytes(tag_id, v);
- }
- }
-
- fn wr_tagged_i16(tag_id: uint, v: i16) {
- do io::u64_to_be_bytes(v as u64, 2u) |v| {
- self.wr_tagged_bytes(tag_id, v);
- }
- }
-
- fn wr_tagged_i8(tag_id: uint, v: i8) {
- self.wr_tagged_bytes(tag_id, &[v as u8]);
- }
-
- fn wr_tagged_str(tag_id: uint, v: &str) {
- str::byte_slice(v, |b| self.wr_tagged_bytes(tag_id, b));
- }
-
- fn wr_bytes(b: &[u8]) {
- debug!("Write %u bytes", vec::len(b));
- self.writer.write(b);
- }
-
- fn wr_str(s: &str) {
- debug!("Write str: %?", s);
- self.writer.write(str::to_bytes(s));
- }
-}
-
-// FIXME (#2743): optionally perform "relaxations" on end_tag to more
-// efficiently encode sizes; this is a fixed point iteration
-
-// Set to true to generate more debugging in EBML serialization.
-// Totally lame approach.
-const debug: bool = false;
-
-enum EbmlSerializerTag {
- EsUint, EsU64, EsU32, EsU16, EsU8,
- EsInt, EsI64, EsI32, EsI16, EsI8,
- EsBool,
- EsStr,
- EsF64, EsF32, EsFloat,
- EsEnum, EsEnumVid, EsEnumBody,
- EsVec, EsVecLen, EsVecElt,
-
- EsOpaque,
-
- EsLabel // Used only when debugging
-}
-
-priv impl Serializer {
- // used internally to emit things like the vector length and so on
- fn _emit_tagged_uint(t: EbmlSerializerTag, v: uint) {
- assert v <= 0xFFFF_FFFF_u;
- self.wr_tagged_u32(t as uint, v as u32);
- }
-
- fn _emit_label(label: &str) {
- // There are various strings that we have access to, such as
- // the name of a record field, which do not actually appear in
- // the serialized EBML (normally). This is just for
- // efficiency. When debugging, though, we can emit such
- // labels and then they will be checked by deserializer to
- // try and check failures more quickly.
- if debug { self.wr_tagged_str(EsLabel as uint, label) }
- }
-}
-
-impl Serializer {
- fn emit_opaque(&self, f: fn()) {
- do self.wr_tag(EsOpaque as uint) {
- f()
- }
- }
-}
-
-impl Serializer: serialization2::Serializer {
- fn emit_nil(&self) {}
-
- fn emit_uint(&self, v: uint) {
- self.wr_tagged_u64(EsUint as uint, v as u64);
- }
- fn emit_u64(&self, v: u64) { self.wr_tagged_u64(EsU64 as uint, v); }
- fn emit_u32(&self, v: u32) { self.wr_tagged_u32(EsU32 as uint, v); }
- fn emit_u16(&self, v: u16) { self.wr_tagged_u16(EsU16 as uint, v); }
- fn emit_u8(&self, v: u8) { self.wr_tagged_u8 (EsU8 as uint, v); }
-
- fn emit_int(&self, v: int) {
- self.wr_tagged_i64(EsInt as uint, v as i64);
- }
- fn emit_i64(&self, v: i64) { self.wr_tagged_i64(EsI64 as uint, v); }
- fn emit_i32(&self, v: i32) { self.wr_tagged_i32(EsI32 as uint, v); }
- fn emit_i16(&self, v: i16) { self.wr_tagged_i16(EsI16 as uint, v); }
- fn emit_i8(&self, v: i8) { self.wr_tagged_i8 (EsI8 as uint, v); }
-
- fn emit_bool(&self, v: bool) {
- self.wr_tagged_u8(EsBool as uint, v as u8)
- }
-
- // FIXME (#2742): implement these
- fn emit_f64(&self, _v: f64) { fail ~"Unimplemented: serializing an f64"; }
- fn emit_f32(&self, _v: f32) { fail ~"Unimplemented: serializing an f32"; }
- fn emit_float(&self, _v: float) {
- fail ~"Unimplemented: serializing a float";
- }
-
- fn emit_char(&self, _v: char) {
- fail ~"Unimplemented: serializing a char";
- }
-
- fn emit_borrowed_str(&self, v: &str) {
- self.wr_tagged_str(EsStr as uint, v)
- }
-
- fn emit_owned_str(&self, v: &str) {
- self.emit_borrowed_str(v)
- }
-
- fn emit_managed_str(&self, v: &str) {
- self.emit_borrowed_str(v)
- }
-
- fn emit_borrowed(&self, f: fn()) { f() }
- fn emit_owned(&self, f: fn()) { f() }
- fn emit_managed(&self, f: fn()) { f() }
-
- fn emit_enum(&self, name: &str, f: fn()) {
- self._emit_label(name);
- self.wr_tag(EsEnum as uint, f)
- }
- fn emit_enum_variant(&self, _v_name: &str, v_id: uint, _cnt: uint,
- f: fn()) {
- self._emit_tagged_uint(EsEnumVid, v_id);
- self.wr_tag(EsEnumBody as uint, f)
- }
- fn emit_enum_variant_arg(&self, _idx: uint, f: fn()) { f() }
-
- fn emit_borrowed_vec(&self, len: uint, f: fn()) {
- do self.wr_tag(EsVec as uint) {
- self._emit_tagged_uint(EsVecLen, len);
- f()
- }
- }
-
- fn emit_owned_vec(&self, len: uint, f: fn()) {
- self.emit_borrowed_vec(len, f)
- }
-
- fn emit_managed_vec(&self, len: uint, f: fn()) {
- self.emit_borrowed_vec(len, f)
- }
-
- fn emit_vec_elt(&self, _idx: uint, f: fn()) {
- self.wr_tag(EsVecElt as uint, f)
- }
-
- fn emit_rec(&self, f: fn()) { f() }
- fn emit_struct(&self, _name: &str, f: fn()) { f() }
- fn emit_field(&self, name: &str, _idx: uint, f: fn()) {
- self._emit_label(name);
- f()
- }
-
- fn emit_tup(&self, _len: uint, f: fn()) { f() }
- fn emit_tup_elt(&self, _idx: uint, f: fn()) { f() }
-}
-
-struct Deserializer {
- priv mut parent: Doc,
- priv mut pos: uint,
-}
-
-pub fn Deserializer(d: Doc) -> Deserializer {
- Deserializer { mut parent: d, mut pos: d.start }
-}
-
-priv impl Deserializer {
- fn _check_label(lbl: &str) {
- if self.pos < self.parent.end {
- let TaggedDoc { tag: r_tag, doc: r_doc } =
- doc_at(self.parent.data, self.pos);
-
- if r_tag == (EsLabel as uint) {
- self.pos = r_doc.end;
- let str = doc_as_str(r_doc);
- if lbl != str {
- fail fmt!("Expected label %s but found %s", lbl, str);
- }
- }
- }
- }
-
- fn next_doc(exp_tag: EbmlSerializerTag) -> Doc {
- debug!(". next_doc(exp_tag=%?)", exp_tag);
- if self.pos >= self.parent.end {
- fail ~"no more documents in current node!";
- }
- let TaggedDoc { tag: r_tag, doc: r_doc } =
- doc_at(self.parent.data, self.pos);
- debug!("self.parent=%?-%? self.pos=%? r_tag=%? r_doc=%?-%?",
- copy self.parent.start, copy self.parent.end,
- copy self.pos, r_tag, r_doc.start, r_doc.end);
- if r_tag != (exp_tag as uint) {
- fail fmt!("expected EMBL doc with tag %? but found tag %?",
- exp_tag, r_tag);
- }
- if r_doc.end > self.parent.end {
- fail fmt!("invalid EBML, child extends to 0x%x, parent to 0x%x",
- r_doc.end, self.parent.end);
- }
- self.pos = r_doc.end;
- r_doc
- }
-
- fn push_doc<T>(d: Doc, f: fn() -> T) -> T{
- let old_parent = self.parent;
- let old_pos = self.pos;
- self.parent = d;
- self.pos = d.start;
- let r = f();
- self.parent = old_parent;
- self.pos = old_pos;
- move r
- }
-
- fn _next_uint(exp_tag: EbmlSerializerTag) -> uint {
- let r = doc_as_u32(self.next_doc(exp_tag));
- debug!("_next_uint exp_tag=%? result=%?", exp_tag, r);
- r as uint
- }
-}
-
-impl Deserializer {
- fn read_opaque<R>(&self, op: fn(Doc) -> R) -> R {
- do self.push_doc(self.next_doc(EsOpaque)) {
- op(copy self.parent)
- }
- }
-}
-
-impl Deserializer: serialization2::Deserializer {
- fn read_nil(&self) -> () { () }
-
- fn read_u64(&self) -> u64 { doc_as_u64(self.next_doc(EsU64)) }
- fn read_u32(&self) -> u32 { doc_as_u32(self.next_doc(EsU32)) }
- fn read_u16(&self) -> u16 { doc_as_u16(self.next_doc(EsU16)) }
- fn read_u8 (&self) -> u8 { doc_as_u8 (self.next_doc(EsU8 )) }
- fn read_uint(&self) -> uint {
- let v = doc_as_u64(self.next_doc(EsUint));
- if v > (core::uint::max_value as u64) {
- fail fmt!("uint %? too large for this architecture", v);
- }
- v as uint
- }
-
- fn read_i64(&self) -> i64 { doc_as_u64(self.next_doc(EsI64)) as i64 }
- fn read_i32(&self) -> i32 { doc_as_u32(self.next_doc(EsI32)) as i32 }
- fn read_i16(&self) -> i16 { doc_as_u16(self.next_doc(EsI16)) as i16 }
- fn read_i8 (&self) -> i8 { doc_as_u8 (self.next_doc(EsI8 )) as i8 }
- fn read_int(&self) -> int {
- let v = doc_as_u64(self.next_doc(EsInt)) as i64;
- if v > (int::max_value as i64) || v < (int::min_value as i64) {
- fail fmt!("int %? out of range for this architecture", v);
- }
- v as int
- }
-
- fn read_bool(&self) -> bool { doc_as_u8(self.next_doc(EsBool)) as bool }
-
- fn read_f64(&self) -> f64 { fail ~"read_f64()"; }
- fn read_f32(&self) -> f32 { fail ~"read_f32()"; }
- fn read_float(&self) -> float { fail ~"read_float()"; }
-
- fn read_char(&self) -> char { fail ~"read_char()"; }
-
- fn read_owned_str(&self) -> ~str { doc_as_str(self.next_doc(EsStr)) }
- fn read_managed_str(&self) -> @str { fail ~"read_managed_str()"; }
-
- // Compound types:
- fn read_owned<T>(&self, f: fn() -> T) -> T {
- debug!("read_owned()");
- f()
- }
-
- fn read_managed<T>(&self, f: fn() -> T) -> T {
- debug!("read_managed()");
- f()
- }
-
- fn read_enum<T>(&self, name: &str, f: fn() -> T) -> T {
- debug!("read_enum(%s)", name);
- self._check_label(name);
- self.push_doc(self.next_doc(EsEnum), f)
- }
-
- fn read_enum_variant<T>(&self, f: fn(uint) -> T) -> T {
- debug!("read_enum_variant()");
- let idx = self._next_uint(EsEnumVid);
- debug!(" idx=%u", idx);
- do self.push_doc(self.next_doc(EsEnumBody)) {
- f(idx)
- }
- }
-
- fn read_enum_variant_arg<T>(&self, idx: uint, f: fn() -> T) -> T {
- debug!("read_enum_variant_arg(idx=%u)", idx);
- f()
- }
-
- fn read_owned_vec<T>(&self, f: fn(uint) -> T) -> T {
- debug!("read_owned_vec()");
- do self.push_doc(self.next_doc(EsVec)) {
- let len = self._next_uint(EsVecLen);
- debug!(" len=%u", len);
- f(len)
- }
- }
-
- fn read_managed_vec<T>(&self, f: fn(uint) -> T) -> T {
- debug!("read_managed_vec()");
- do self.push_doc(self.next_doc(EsVec)) {
- let len = self._next_uint(EsVecLen);
- debug!(" len=%u", len);
- f(len)
- }
- }
-
- fn read_vec_elt<T>(&self, idx: uint, f: fn() -> T) -> T {
- debug!("read_vec_elt(idx=%u)", idx);
- self.push_doc(self.next_doc(EsVecElt), f)
- }
-
- fn read_rec<T>(&self, f: fn() -> T) -> T {
- debug!("read_rec()");
- f()
- }
-
- fn read_struct<T>(&self, name: &str, f: fn() -> T) -> T {
- debug!("read_struct(name=%s)", name);
- f()
- }
-
- fn read_field<T>(&self, name: &str, idx: uint, f: fn() -> T) -> T {
- debug!("read_field(name=%s, idx=%u)", name, idx);
- self._check_label(name);
- f()
- }
-
- fn read_tup<T>(&self, len: uint, f: fn() -> T) -> T {
- debug!("read_tup(len=%u)", len);
- f()
- }
-
- fn read_tup_elt<T>(&self, idx: uint, f: fn() -> T) -> T {
- debug!("read_tup_elt(idx=%u)", idx);
- f()
- }
-}
-
-// ___________________________________________________________________________
-// Testing
-
-#[cfg(test)]
-mod tests {
- #[test]
- fn test_option_int() {
- fn test_v(v: Option<int>) {
- debug!("v == %?", v);
- let bytes = do io::with_bytes_writer |wr| {
- let ebml_w = Serializer(wr);
- v.serialize(&ebml_w)
- };
- let ebml_doc = Doc(@bytes);
- let deser = Deserializer(ebml_doc);
- let v1 = serialization2::deserialize(&deser);
- debug!("v1 == %?", v1);
- assert v == v1;
- }
-
- test_v(Some(22));
- test_v(None);
- test_v(Some(3));
- }
-}
fn mkname(nm: &str) -> Name {
let unm = str::from_slice(nm);
- return if str::len(nm) == 1u {
+ return if nm.len() == 1u {
Short(str::char_at(unm, 0u))
} else { Long(unm) };
}
pure fn ne(other: &Occur) -> bool { !self.eq(other) }
}
+impl HasArg : Eq {
+ pure fn eq(other: &HasArg) -> bool {
+ (self as uint) == ((*other) as uint)
+ }
+ pure fn ne(other: &HasArg) -> bool { !self.eq(other) }
+}
+
+impl Opt : Eq {
+ pure fn eq(other: &Opt) -> bool {
+ self.name == (*other).name &&
+ self.hasarg == (*other).hasarg &&
+ self.occur == (*other).occur
+ }
+ pure fn ne(other: &Opt) -> bool { !self.eq(other) }
+}
+
/// Create an option that is required and takes an argument
pub fn reqopt(name: &str) -> Opt {
return {name: mkname(name), hasarg: Yes, occur: Req};
*/
pub type Matches = {opts: ~[Opt], vals: ~[~[Optval]], free: ~[~str]};
+impl Optval : Eq {
+ pure fn eq(other: &Optval) -> bool {
+ match self {
+ Val(ref s) => match *other { Val (ref os) => s == os,
+ Given => false },
+ Given => match *other { Val(_) => false,
+ Given => true }
+ }
+ }
+ pure fn ne(other: &Optval) -> bool { !self.eq(other) }
+}
+
+impl Matches : Eq {
+ pure fn eq(other: &Matches) -> bool {
+ self.opts == (*other).opts &&
+ self.vals == (*other).vals &&
+ self.free == (*other).free
+ }
+ pure fn ne(other: &Matches) -> bool { !self.eq(other) }
+}
+
fn is_arg(arg: &str) -> bool {
- return str::len(arg) > 1u && arg[0] == '-' as u8;
+ return arg.len() > 1u && arg[0] == '-' as u8;
}
fn name_str(nm: &Name) -> ~str {
UnexpectedArgument(~str),
}
+impl Fail_ : Eq {
+ // this whole thing should be easy to infer...
+ pure fn eq(other: &Fail_) -> bool {
+ match self {
+ ArgumentMissing(ref s) => {
+ match *other { ArgumentMissing(ref so) => s == so,
+ _ => false }
+ }
+ UnrecognizedOption(ref s) => {
+ match *other { UnrecognizedOption(ref so) => s == so,
+ _ => false }
+ }
+ OptionMissing(ref s) => {
+ match *other { OptionMissing(ref so) => s == so,
+ _ => false }
+ }
+ OptionDuplicated(ref s) => {
+ match *other { OptionDuplicated(ref so) => s == so,
+ _ => false }
+ }
+ UnexpectedArgument(ref s) => {
+ match *other { UnexpectedArgument(ref so) => s == so,
+ _ => false }
+ }
+ }
+ }
+ pure fn ne(other: &Fail_) -> bool { !self.eq(other) }
+}
+
/// Convert a `fail_` enum into an error string
pub fn fail_str(f: Fail_) -> ~str {
return match f {
let mut i = 0u;
while i < l {
let cur = args[i];
- let curlen = str::len(cur);
+ let curlen = cur.len();
if !is_arg(cur) {
free.push(cur);
} else if cur == ~"--" {
pure fn ne(other: &FailType) -> bool { !self.eq(other) }
}
+/** A module which provides a way to specify descriptions and
+ * groups of short and long option names, together.
+ */
+pub mod groups {
+
+ /** one group of options, e.g., both -h and --help, along with
+ * their shared description and properties
+ */
+ pub type OptGroup = {
+ short_name: ~str,
+ long_name: ~str,
+ hint: ~str,
+ desc: ~str,
+ hasarg: HasArg,
+ occur: Occur
+ };
+
+ impl OptGroup : Eq {
+ pure fn eq(other: &OptGroup) -> bool {
+ self.short_name == (*other).short_name &&
+ self.long_name == (*other).long_name &&
+ self.hint == (*other).hint &&
+ self.desc == (*other).desc &&
+ self.hasarg == (*other).hasarg &&
+ self.occur == (*other).occur
+ }
+ pure fn ne(other: &OptGroup) -> bool { !self.eq(other) }
+ }
+
+ /// Create a long option that is required and takes an argument
+ pub fn reqopt(short_name: &str, long_name: &str,
+ desc: &str, hint: &str) -> OptGroup {
+ let len = short_name.len();
+ assert len == 1 || len == 0;
+ return {short_name: str::from_slice(short_name),
+ long_name: str::from_slice(long_name),
+ hint: str::from_slice(hint),
+ desc: str::from_slice(desc),
+ hasarg: Yes,
+ occur: Req};
+ }
+
+ /// Create a long option that is optional and takes an argument
+ pub fn optopt(short_name: &str, long_name: &str,
+ desc: &str, hint: &str) -> OptGroup {
+ let len = short_name.len();
+ assert len == 1 || len == 0;
+ return {short_name: str::from_slice(short_name),
+ long_name: str::from_slice(long_name),
+ hint: str::from_slice(hint),
+ desc: str::from_slice(desc),
+ hasarg: Yes,
+ occur: Optional};
+ }
+
+ /// Create a long option that is optional and does not take an argument
+ pub fn optflag(short_name: &str, long_name: &str,
+ desc: &str) -> OptGroup {
+ let len = short_name.len();
+ assert len == 1 || len == 0;
+ return {short_name: str::from_slice(short_name),
+ long_name: str::from_slice(long_name),
+ hint: ~"",
+ desc: str::from_slice(desc),
+ hasarg: No,
+ occur: Optional};
+ }
+
+ /// Create a long option that is optional and takes an optional argument
+ pub fn optflagopt(short_name: &str, long_name: &str,
+ desc: &str, hint: &str) -> OptGroup {
+ let len = short_name.len();
+ assert len == 1 || len == 0;
+ return {short_name: str::from_slice(short_name),
+ long_name: str::from_slice(long_name),
+ hint: str::from_slice(hint),
+ desc: str::from_slice(desc),
+ hasarg: Maybe,
+ occur: Optional};
+ }
+
+ /**
+ * Create a long option that is optional, takes an argument, and may occur
+ * multiple times
+ */
+ pub fn optmulti(short_name: &str, long_name: &str,
+ desc: &str, hint: &str) -> OptGroup {
+ let len = short_name.len();
+ assert len == 1 || len == 0;
+ return {short_name: str::from_slice(short_name),
+ long_name: str::from_slice(long_name),
+ hint: str::from_slice(hint),
+ desc: str::from_slice(desc),
+ hasarg: Yes,
+ occur: Multi};
+ }
+
+ // translate OptGroup into Opt
+ // (both short and long names correspond to different Opts)
+ pub fn long_to_short(lopt: &OptGroup) -> ~[Opt] {
+ match ((*lopt).short_name.len(),
+ (*lopt).long_name.len()) {
+
+ (0,0) => fail ~"this long-format option was given no name",
+
+ (0,_) => ~[{name: Long(((*lopt).long_name)),
+ hasarg: (*lopt).hasarg,
+ occur: (*lopt).occur}],
+
+ (1,0) => ~[{name: Short(str::char_at((*lopt).short_name, 0)),
+ hasarg: (*lopt).hasarg,
+ occur: (*lopt).occur}],
+
+ (1,_) => ~[{name: Short(str::char_at((*lopt).short_name, 0)),
+ hasarg: (*lopt).hasarg,
+ occur: (*lopt).occur},
+ {name: Long(((*lopt).long_name)),
+ hasarg: (*lopt).hasarg,
+ occur: (*lopt).occur}],
+
+ (_,_) => fail ~"something is wrong with the long-form opt"
+ }
+ }
+
+ /*
+ * Parse command line args with the provided long format options
+ */
+ pub fn getopts(args: &[~str], opts: &[OptGroup]) -> Result {
+ ::getopts::getopts(args, vec::flat_map(opts, long_to_short))
+ }
+
+ /**
+ * Derive a usage message from a set of long options
+ */
+ pub fn usage(brief: &str, opts: &[OptGroup]) -> ~str {
+
+ let desc_sep = ~"\n" + str::repeat(~" ", 24);
+
+ let rows = vec::map(opts, |optref| {
+ let short_name = (*optref).short_name;
+ let long_name = (*optref).long_name;
+ let hint = (*optref).hint;
+ let desc = (*optref).desc;
+ let hasarg = (*optref).hasarg;
+
+ let mut row = str::repeat(~" ", 4);
+
+ // short option
+ row += match short_name.len() {
+ 0 => ~"",
+ 1 => ~"-" + short_name + " ",
+ _ => fail ~"the short name should only be 1 char long",
+ };
+
+ // long option
+ row += match long_name.len() {
+ 0 => ~"",
+ _ => ~"--" + long_name + " ",
+ };
+
+ // arg
+ row += match hasarg {
+ No => ~"",
+ Yes => hint,
+ Maybe => ~"[" + hint + ~"]",
+ };
+
+ // here we just need to indent the start of the description
+ let rowlen = row.len();
+ row += if rowlen < 24 {
+ str::repeat(~" ", 24 - rowlen)
+ } else {
+ desc_sep
+ };
+
+ // wrapped description
+ row += str::connect(str::split_within(desc, 54), desc_sep);
+
+ row
+ });
+
+ return str::from_slice(brief) +
+ ~"\n\nOptions:\n" +
+ str::connect(rows, ~"\n") +
+ ~"\n\n";
+ }
+} // end groups module
+
#[cfg(test)]
mod tests {
#[legacy_exports];
assert opts_present(matches, ~[~"L"]);
assert opts_str(matches, ~[~"L"]) == ~"foo";
}
+
+ #[test]
+ fn test_groups_reqopt() {
+ let opt = groups::reqopt(~"b", ~"banana", ~"some bananas", ~"VAL");
+ assert opt == { short_name: ~"b",
+ long_name: ~"banana",
+ hint: ~"VAL",
+ desc: ~"some bananas",
+ hasarg: Yes,
+ occur: Req }
+ }
+
+ #[test]
+ fn test_groups_optopt() {
+ let opt = groups::optopt(~"a", ~"apple", ~"some apples", ~"VAL");
+ assert opt == { short_name: ~"a",
+ long_name: ~"apple",
+ hint: ~"VAL",
+ desc: ~"some apples",
+ hasarg: Yes,
+ occur: Optional }
+ }
+
+ #[test]
+ fn test_groups_optflag() {
+ let opt = groups::optflag(~"k", ~"kiwi", ~"some kiwis");
+ assert opt == { short_name: ~"k",
+ long_name: ~"kiwi",
+ hint: ~"",
+ desc: ~"some kiwis",
+ hasarg: No,
+ occur: Optional }
+ }
+
+ #[test]
+ fn test_groups_optflagopt() {
+ let opt = groups::optflagopt(~"p", ~"pineapple",
+ ~"some pineapples", ~"VAL");
+ assert opt == { short_name: ~"p",
+ long_name: ~"pineapple",
+ hint: ~"VAL",
+ desc: ~"some pineapples",
+ hasarg: Maybe,
+ occur: Optional }
+ }
+
+ #[test]
+ fn test_groups_optmulti() {
+ let opt = groups::optmulti(~"l", ~"lime",
+ ~"some limes", ~"VAL");
+ assert opt == { short_name: ~"l",
+ long_name: ~"lime",
+ hint: ~"VAL",
+ desc: ~"some limes",
+ hasarg: Yes,
+ occur: Multi }
+ }
+
+ #[test]
+ fn test_groups_long_to_short() {
+ let short = ~[reqopt(~"b"), reqopt(~"banana")];
+ let verbose = groups::reqopt(~"b", ~"banana",
+ ~"some bananas", ~"VAL");
+
+ assert groups::long_to_short(&verbose) == short;
+ }
+
+ #[test]
+ fn test_groups_getopts() {
+ let short = ~[
+ reqopt(~"b"), reqopt(~"banana"),
+ optopt(~"a"), optopt(~"apple"),
+ optflag(~"k"), optflagopt(~"kiwi"),
+ optflagopt(~"p"),
+ optmulti(~"l")
+ ];
+
+ let verbose = ~[
+ groups::reqopt(~"b", ~"banana", ~"Desc", ~"VAL"),
+ groups::optopt(~"a", ~"apple", ~"Desc", ~"VAL"),
+ groups::optflag(~"k", ~"kiwi", ~"Desc"),
+ groups::optflagopt(~"p", ~"", ~"Desc", ~"VAL"),
+ groups::optmulti(~"l", ~"", ~"Desc", ~"VAL"),
+ ];
+
+ let sample_args = ~[~"-k", ~"15", ~"--apple", ~"1", ~"k",
+ ~"-p", ~"16", ~"l", ~"35"];
+
+ // NOTE: we should sort before comparing
+ assert getopts(sample_args, short)
+ == groups::getopts(sample_args, verbose);
+ }
+
+ #[test]
+ fn test_groups_usage() {
+ let optgroups = ~[
+ groups::reqopt(~"b", ~"banana", ~"Desc", ~"VAL"),
+ groups::optopt(~"a", ~"012345678901234567890123456789",
+ ~"Desc", ~"VAL"),
+ groups::optflag(~"k", ~"kiwi", ~"Desc"),
+ groups::optflagopt(~"p", ~"", ~"Desc", ~"VAL"),
+ groups::optmulti(~"l", ~"", ~"Desc", ~"VAL"),
+ ];
+
+ let expected =
+~"Usage: fruits
+
+Options:
+ -b --banana VAL Desc
+ -a --012345678901234567890123456789 VAL
+ Desc
+ -k --kiwi Desc
+ -p [VAL] Desc
+ -l VAL Desc
+
+";
+
+ let generated_usage = groups::usage(~"Usage: fruits", optgroups);
+
+ debug!("expected: <<%s>>", expected);
+ debug!("generated: <<%s>>", generated_usage);
+ assert generated_usage == expected;
+ }
+
+ #[test]
+ fn test_groups_usage_description_wrapping() {
+ // indentation should be 24 spaces
+ // lines wrap after 78: or rather descriptions wrap after 54
+
+ let optgroups = ~[
+ groups::optflag(~"k", ~"kiwi",
+ ~"This is a long description which won't be wrapped..+.."), // 54
+ groups::optflag(~"a", ~"apple",
+ ~"This is a long description which _will_ be wrapped..+.."), // 55
+ ];
+
+ let expected =
+~"Usage: fruits
+
+Options:
+ -k --kiwi This is a long description which won't be wrapped..+..
+ -a --apple This is a long description which _will_ be
+ wrapped..+..
+
+";
+
+ let usage = groups::usage(~"Usage: fruits", optgroups);
+
+ debug!("expected: <<%s>>", expected);
+ debug!("generated: <<%s>>", usage);
+ assert usage == expected
+ }
}
// Local Variables:
fn spaces(n: uint) -> ~str {
let mut ss = ~"";
- for n.times { str::push_str(&ss, " "); }
+ for n.times { str::push_str(&mut ss, " "); }
return ss;
}
Serializer { wr: wr }
}
-pub impl Serializer: serialization2::Serializer {
+pub impl Serializer: serialization::Serializer {
fn emit_nil(&self) { self.wr.write_str("null") }
fn emit_uint(&self, v: uint) { self.emit_float(v as float); }
PrettySerializer { wr: wr, indent: 0 }
}
-pub impl PrettySerializer: serialization2::Serializer {
+pub impl PrettySerializer: serialization::Serializer {
fn emit_nil(&self) { self.wr.write_str("null") }
fn emit_uint(&self, v: uint) { self.emit_float(v as float); }
}
}
-pub impl Json: serialization2::Serializable {
- fn serialize<S: serialization2::Serializer>(&self, s: &S) {
+#[cfg(stage0)]
+pub impl Json: serialization::Serializable {
+ fn serialize<S: serialization::Serializer>(&self, s: &S) {
+ match *self {
+ Number(v) => v.serialize(s),
+ String(ref v) => v.serialize(s),
+ Boolean(v) => v.serialize(s),
+ List(v) => v.serialize(s),
+ Object(ref v) => {
+ do s.emit_rec || {
+ let mut idx = 0;
+ for v.each |key, value| {
+ do s.emit_field(*key, idx) {
+ value.serialize(s);
+ }
+ idx += 1;
+ }
+ }
+ },
+ Null => s.emit_nil(),
+ }
+ }
+}
+
+#[cfg(stage1)]
+#[cfg(stage2)]
+pub impl<
+ S: serialization::Serializer
+> Json: serialization::Serializable<S> {
+ fn serialize(&self, s: &S) {
match *self {
Number(v) => v.serialize(s),
String(ref v) => v.serialize(s),
}
/// Serializes a json value into a string
-pub fn to_str(json: &Json) -> ~str {
+pub pure fn to_str(json: &Json) -> ~str unsafe {
+ // ugh, should be safe
io::with_str_writer(|wr| to_writer(wr, json))
}
Parser {
rdr: rdr,
ch: rdr.read_char(),
- line: 1u,
- col: 1u,
+ line: 1,
+ col: 1,
}
}
self.parse_whitespace();
// Make sure there is no trailing characters.
if self.eof() {
- Ok(value)
+ Ok(move value)
} else {
self.error(~"trailing characters")
}
if (escape) {
match self.ch {
- '"' => str::push_char(&res, '"'),
- '\\' => str::push_char(&res, '\\'),
- '/' => str::push_char(&res, '/'),
- 'b' => str::push_char(&res, '\x08'),
- 'f' => str::push_char(&res, '\x0c'),
- 'n' => str::push_char(&res, '\n'),
- 'r' => str::push_char(&res, '\r'),
- 't' => str::push_char(&res, '\t'),
+ '"' => str::push_char(&mut res, '"'),
+ '\\' => str::push_char(&mut res, '\\'),
+ '/' => str::push_char(&mut res, '/'),
+ 'b' => str::push_char(&mut res, '\x08'),
+ 'f' => str::push_char(&mut res, '\x0c'),
+ 'n' => str::push_char(&mut res, '\n'),
+ 'r' => str::push_char(&mut res, '\r'),
+ 't' => str::push_char(&mut res, '\t'),
'u' => {
// Parse \u1234.
let mut i = 0u;
~"invalid \\u escape (not four digits)");
}
- str::push_char(&res, n as char);
+ str::push_char(&mut res, n as char);
}
_ => return self.error(~"invalid escape")
}
self.bump();
return Ok(res);
}
- str::push_char(&res, self.ch);
+ str::push_char(&mut res, self.ch);
}
}
if self.ch == ']' {
self.bump();
- return Ok(List(values));
+ return Ok(List(move values));
}
loop {
match move self.parse_value() {
- Ok(move v) => values.push(v),
+ Ok(move v) => values.push(move v),
Err(move e) => return Err(e)
}
match self.ch {
',' => self.bump(),
- ']' => { self.bump(); return Ok(List(values)); }
+ ']' => { self.bump(); return Ok(List(move values)); }
_ => return self.error(~"expected `,` or `]`")
}
};
if self.ch == '}' {
self.bump();
- return Ok(Object(values));
+ return Ok(Object(move values));
}
while !self.eof() {
self.bump();
match move self.parse_value() {
- Ok(move value) => { values.insert(key, value); }
+ Ok(move value) => { values.insert(key, move value); }
Err(move e) => return Err(e)
}
self.parse_whitespace();
match self.ch {
',' => self.bump(),
- '}' => { self.bump(); return Ok(Object(values)); }
+ '}' => { self.bump(); return Ok(Object(move values)); }
_ => {
if self.eof() { break; }
return self.error(~"expected `,` or `}`");
pub fn Deserializer(rdr: io::Reader) -> Result<Deserializer, Error> {
match move from_reader(rdr) {
Ok(move json) => {
- let des = Deserializer { json: json, stack: ~[] };
+ let des = Deserializer { json: move json, stack: ~[] };
Ok(move des)
}
Err(move e) => Err(e)
}
}
-pub impl Deserializer: serialization2::Deserializer {
+pub impl Deserializer: serialization::Deserializer {
fn read_nil(&self) -> () {
debug!("read_nil");
match *self.pop() {
};
let res = f(len);
self.pop();
- res
+ move res
}
fn read_managed_vec<T>(&self, f: fn(uint) -> T) -> T {
};
let res = f(len);
self.pop();
- res
+ move res
}
fn read_vec_elt<T>(&self, idx: uint, f: fn() -> T) -> T {
debug!("read_rec()");
let value = f();
self.pop();
- value
+ move value
}
fn read_struct<T>(&self, _name: &str, f: fn() -> T) -> T {
debug!("read_struct()");
let value = f();
self.pop();
- value
+ move value
}
fn read_field<T>(&self, name: &str, idx: uint, f: fn() -> T) -> T {
// FIXME(#3148) This hint should not be necessary.
let obj: &self/~Object = obj;
- match obj.find_ref(&name.to_unique()) {
+ match obj.find_ref(&name.to_owned()) {
None => fail fmt!("no such field: %s", name),
Some(json) => {
self.stack.push(json);
debug!("read_tup(len=%u)", len);
let value = f();
self.pop();
- value
+ move value
}
fn read_tup_elt<T>(&self, idx: uint, f: fn() -> T) -> T {
}
impl Json: to_str::ToStr {
- fn to_str() -> ~str { to_str(&self) }
+ pure fn to_str() -> ~str { to_str(&self) }
}
impl Error: to_str::ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str {
fmt!("%u:%u: %s", self.line, self.col, *self.msg)
}
}
for items.each |item| {
match *item {
- (copy key, copy value) => { d.insert(key, value); },
+ (copy key, copy value) => { d.insert(key, move value); },
}
};
- Object(d)
+ Object(move d)
}
#[test]
wr.write_str(~" }");
}
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str unsafe {
+ // Meh -- this should be safe
do io::with_str_writer |wr| { self.to_writer(wr) }
}
}
let map = map::HashMap::<~str, ~str>();
assert (option::is_none(&map.find(key)));
map.insert(key, ~"val");
- assert (option::get(&map.find(key)) == ~"val");
+ assert (option::get(map.find(key)) == ~"val");
}
#[test]
use uv_getaddrinfo_t = uv::ll::uv_getaddrinfo_t;
use uv_ip4_addr = uv::ll::ip4_addr;
use uv_ip4_name = uv::ll::ip4_name;
+use uv_ip4_port = uv::ll::ip4_port;
use uv_ip6_addr = uv::ll::ip6_addr;
use uv_ip6_name = uv::ll::ip6_name;
+use uv_ip6_port = uv::ll::ip6_port;
use uv_getaddrinfo = uv::ll::getaddrinfo;
use uv_freeaddrinfo = uv::ll::freeaddrinfo;
use create_uv_getaddrinfo_t = uv::ll::getaddrinfo_t;
};
/**
- * Convert a `ip_addr` to a str
+ * Convert a `IpAddr` to a str
*
* # Arguments
*
- * * ip - a `std::net::ip::ip_addr`
+ * * ip - a `std::net::ip::IpAddr`
*/
pub fn format_addr(ip: &IpAddr) -> ~str {
match *ip {
}
}
+/**
+ * Get the associated port
+ *
+ * # Arguments
+ * * ip - a `std::net::ip::IpAddr`
+ */
+pub fn get_port(ip: &IpAddr) -> uint {
+ match *ip {
+ Ipv4(ref addr) => unsafe {
+ uv_ip4_port(addr)
+ },
+ Ipv6(ref addr) => unsafe {
+ uv_ip6_port(addr)
+ }
+ }
+}
+
/// Represents errors returned from `net::ip::get_addr()`
enum IpGetAddrErr {
GetAddrUnknownError
*/
pub fn parse_addr(ip: &str) -> IpAddr {
match try_parse_addr(ip) {
- result::Ok(copy addr) => addr,
+ result::Ok(move addr) => move addr,
result::Err(ref err_data) => fail err_data.err_msg
}
}
*/
pub fn parse_addr(ip: &str) -> IpAddr {
match try_parse_addr(ip) {
- result::Ok(copy addr) => addr,
+ result::Ok(move addr) => move addr,
result::Err(copy err_data) => fail err_data.err_msg
}
}
}
// note really sure how to realiably test/assert
// this.. mostly just wanting to see it work, atm.
- let results = result::unwrap(ga_result);
+ let results = result::unwrap(move ga_result);
log(debug, fmt!("test_get_addr: Number of results for %s: %?",
localhost_name, vec::len(results)));
for vec::each(results) |r| {
}
// at least one result.. this is going to vary from system
// to system, based on stuff like the contents of /etc/hosts
- assert vec::len(results) > 0;
+ assert !results.is_empty();
}
#[test]
#[ignore(reason = "valgrind says it's leaky")]
use uv::iotask;
use uv::iotask::IoTask;
use future_spawn = future::spawn;
-// FIXME #1935
-// should be able to, but can't atm, replace w/ result::{result, extensions};
-use result::*;
+use result::{Result};
use libc::size_t;
use io::{Reader, ReaderUtil, Writer};
use comm = core::comm;
stream_handle_ptr: stream_handle_ptr,
connect_req: uv::ll::connect_t(),
write_req: uv::ll::write_t(),
+ ipv6: match input_ip {
+ ip::Ipv4(_) => { false }
+ ip::Ipv6(_) => { true }
+ },
iotask: iotask
};
let socket_data_ptr = ptr::addr_of(&(*socket_data));
stream_handle_ptr : stream_handle_ptr,
connect_req : uv::ll::connect_t(),
write_req : uv::ll::write_t(),
+ ipv6: (*server_data_ptr).ipv6,
iotask : iotask
};
let client_socket_data_ptr = ptr::addr_of(&(*client_socket_data));
new_connect_cb: fn~(TcpNewConnection,
comm::Chan<Option<TcpErrData>>))
-> result::Result<(), TcpListenErrData> unsafe {
- do listen_common(move host_ip, port, backlog, iotask, on_establish_cb)
+ do listen_common(move host_ip, port, backlog, iotask,
+ move on_establish_cb)
// on_connect_cb
|move new_connect_cb, handle| unsafe {
let server_data_ptr = uv::ll::get_data_for_uv_handle(handle)
kill_ch: kill_ch,
on_connect_cb: move on_connect_cb,
iotask: iotask,
+ ipv6: match host_ip {
+ ip::Ipv4(_) => { false }
+ ip::Ipv6(_) => { true }
+ },
mut active: true
};
let server_data_ptr = ptr::addr_of(&server_data);
-> future::Future<result::Result<(), TcpErrData>> {
write_future(&self, raw_write_data)
}
+ pub fn get_peer_addr() -> ip::IpAddr {
+ unsafe {
+ if self.socket_data.ipv6 {
+ let addr = uv::ll::ip6_addr("", 0);
+ uv::ll::tcp_getpeername6(self.socket_data.stream_handle_ptr,
+ ptr::addr_of(&addr));
+ ip::Ipv6(move addr)
+ } else {
+ let addr = uv::ll::ip4_addr("", 0);
+ uv::ll::tcp_getpeername(self.socket_data.stream_handle_ptr,
+ ptr::addr_of(&addr));
+ ip::Ipv4(move addr)
+ }
+ }
+ }
}
/// Implementation of `io::reader` trait for a buffered `net::tcp::tcp_socket`
kill_ch: comm::Chan<Option<TcpErrData>>,
on_connect_cb: fn~(*uv::ll::uv_tcp_t),
iotask: IoTask,
+ ipv6: bool,
mut active: bool
};
log(debug, fmt!("tcp on_read_cb nread: %d", nread as int));
let reader_ch = (*socket_data_ptr).reader_ch;
let buf_base = uv::ll::get_base_from_buf(buf);
- let new_bytes = vec::raw::from_buf(buf_base, nread as uint);
+ let new_bytes = vec::from_buf(buf_base, nread as uint);
core::comm::send(reader_ch, result::Ok(new_bytes));
}
}
stream_handle_ptr: *uv::ll::uv_tcp_t,
connect_req: uv::ll::uv_connect_t,
write_req: uv::ll::uv_write_t,
+ ipv6: bool,
iotask: IoTask
};
impl_gl_tcp_ipv4_server_and_client();
}
#[test]
+ fn test_gl_tcp_get_peer_addr() unsafe {
+ impl_gl_tcp_ipv4_get_peer_addr();
+ }
+ #[test]
fn test_gl_tcp_ipv4_client_error_connection_refused() unsafe {
impl_gl_tcp_ipv4_client_error_connection_refused();
}
}
#[test]
#[ignore(cfg(target_os = "linux"))]
+ fn test_gl_tcp_get_peer_addr() unsafe {
+ impl_gl_tcp_ipv4_get_peer_addr();
+ }
+ #[test]
+ #[ignore(cfg(target_os = "linux"))]
fn test_gl_tcp_ipv4_client_error_connection_refused() unsafe {
impl_gl_tcp_ipv4_client_error_connection_refused();
}
assert str::contains(actual_req, expected_req);
assert str::contains(actual_resp, expected_resp);
}
+ fn impl_gl_tcp_ipv4_get_peer_addr() {
+ let hl_loop = uv::global_loop::get();
+ let server_ip = ~"127.0.0.1";
+ let server_port = 8887u;
+ let expected_resp = ~"pong";
+
+ let server_result_po = core::comm::Port::<~str>();
+ let server_result_ch = core::comm::Chan(&server_result_po);
+
+ let cont_po = core::comm::Port::<()>();
+ let cont_ch = core::comm::Chan(&cont_po);
+ // server
+ do task::spawn_sched(task::ManualThreads(1u)) {
+ let actual_req = do comm::listen |server_ch| {
+ run_tcp_test_server(
+ server_ip,
+ server_port,
+ expected_resp,
+ server_ch,
+ cont_ch,
+ hl_loop)
+ };
+ server_result_ch.send(actual_req);
+ };
+ core::comm::recv(cont_po);
+ // client
+ log(debug, ~"server started, firing up client..");
+ do core::comm::listen |client_ch| {
+ let server_ip_addr = ip::v4::parse_addr(server_ip);
+ let iotask = uv::global_loop::get();
+ let connect_result = connect(move server_ip_addr, server_port,
+ iotask);
+
+ let sock = result::unwrap(move connect_result);
+
+ // This is what we are actually testing!
+ assert net::ip::format_addr(&sock.get_peer_addr()) ==
+ ~"127.0.0.1";
+ assert net::ip::get_port(&sock.get_peer_addr()) == 8887;
+
+ // Fulfill the protocol the test server expects
+ let resp_bytes = str::to_bytes(~"ping");
+ tcp_write_single(&sock, resp_bytes);
+ let read_result = sock.read(0u);
+ client_ch.send(str::from_bytes(read_result.get()));
+ };
+ }
fn impl_gl_tcp_ipv4_client_error_connection_refused() {
let hl_loop = uv::global_loop::get();
let server_ip = ~"127.0.0.1";
~"SERVER/WORKER: send on cont ch");
cont_ch.send(());
let sock = result::unwrap(move accept_result);
+ let peer_addr = sock.get_peer_addr();
log(debug, ~"SERVER: successfully accepted"+
- ~"connection!");
+ fmt!(" connection from %s:%u",
+ ip::format_addr(&peer_addr),
+ ip::get_port(&peer_addr)));
let received_req_bytes = read(&sock, 0u);
match move received_req_bytes {
result::Ok(move data) => {
str::push_char(&mut out, ch);
}
- _ => out += #fmt("%%%X", ch as uint)
+ _ => out += fmt!("%%%X", ch as uint)
}
} else {
- out += #fmt("%%%X", ch as uint);
+ out += fmt!("%%%X", ch as uint);
}
}
}
*
* This function is compliant with RFC 3986.
*/
+
pub fn encode_component(s: &str) -> ~str {
encode_inner(s, false)
}
str::push_char(&mut out, ch);
}
' ' => str::push_char(&mut out, '+'),
- _ => out += #fmt("%%%X", ch as uint)
+ _ => out += fmt!("%%%X", ch as uint)
}
}
first = false;
}
- out += #fmt("%s=%s", key, encode_plus(**value));
+ out += fmt!("%s=%s", key, encode_plus(**value));
}
}
return UserInfo(user, pass);
}
-fn userinfo_to_str(userinfo: UserInfo) -> ~str {
+pure fn userinfo_to_str(userinfo: UserInfo) -> ~str {
if option::is_some(&userinfo.pass) {
return str::concat(~[copy userinfo.user, ~":",
option::unwrap(copy userinfo.pass),
return query;
}
-pub fn query_to_str(query: Query) -> ~str {
+pub pure fn query_to_str(query: Query) -> ~str {
let mut strvec = ~[];
for query.each |kv| {
let (k, v) = copy *kv;
- strvec += ~[#fmt("%s=%s", encode_component(k), encode_component(v))];
+ // This is really safe...
+ unsafe {
+ strvec += ~[fmt!("%s=%s",
+ encode_component(k), encode_component(v))];
+ }
};
return str::connect(strvec, ~"&");
}
* result in just "http://somehost.com".
*
*/
-pub fn to_str(url: Url) -> ~str {
+pub pure fn to_str(url: Url) -> ~str {
let user = if url.user.is_some() {
userinfo_to_str(option::unwrap(copy url.user))
} else {
} else {
str::concat(~[~"?", query_to_str(url.query)])
};
- let fragment = if url.fragment.is_some() {
+ // ugh, this really is safe
+ let fragment = if url.fragment.is_some() unsafe {
str::concat(~[~"#", encode_component(
option::unwrap(copy url.fragment))])
} else {
}
impl Url: to_str::ToStr {
- pub fn to_str() -> ~str {
+ pub pure fn to_str() -> ~str {
to_str(self)
}
}
fn test_url_parse_host_slash() {
let urlstr = ~"http://0.42.42.42/";
let url = from_str(urlstr).get();
- #debug("url: %?", url);
+ debug!("url: %?", url);
assert url.host == ~"0.42.42.42";
assert url.path == ~"/";
}
fn test_url_with_underscores() {
let urlstr = ~"http://dotcom.com/file_name.html";
let url = from_str(urlstr).get();
- #debug("url: %?", url);
+ debug!("url: %?", url);
assert url.path == ~"/file_name.html";
}
fn test_url_with_dashes() {
let urlstr = ~"http://dotcom.com/file-name.html";
let url = from_str(urlstr).get();
- #debug("url: %?", url);
+ debug!("url: %?", url);
assert url.path == ~"/file-name.html";
}
use io::Writer;
use io::WriterUtil;
-use serialization::Serializer;
+use serialization;
-impl Writer: Serializer {
- fn emit_nil() {
- self.write_str(~"()")
+pub struct Serializer {
+ wr: io::Writer,
+}
+
+pub fn Serializer(wr: io::Writer) -> Serializer {
+ Serializer { wr: wr }
+}
+
+pub impl Serializer: serialization::Serializer {
+ fn emit_nil(&self) {
+ self.wr.write_str(~"()")
+ }
+
+ fn emit_uint(&self, v: uint) {
+ self.wr.write_str(fmt!("%?u", v));
+ }
+
+ fn emit_u64(&self, v: u64) {
+ self.wr.write_str(fmt!("%?_u64", v));
+ }
+
+ fn emit_u32(&self, v: u32) {
+ self.wr.write_str(fmt!("%?_u32", v));
+ }
+
+ fn emit_u16(&self, v: u16) {
+ self.wr.write_str(fmt!("%?_u16", v));
+ }
+
+ fn emit_u8(&self, v: u8) {
+ self.wr.write_str(fmt!("%?_u8", v));
}
- fn emit_uint(v: uint) {
- self.write_str(fmt!("%?u", v));
+ fn emit_int(&self, v: int) {
+ self.wr.write_str(fmt!("%?", v));
}
- fn emit_u64(v: u64) {
- self.write_str(fmt!("%?_u64", v));
+ fn emit_i64(&self, v: i64) {
+ self.wr.write_str(fmt!("%?_i64", v));
}
- fn emit_u32(v: u32) {
- self.write_str(fmt!("%?_u32", v));
+ fn emit_i32(&self, v: i32) {
+ self.wr.write_str(fmt!("%?_i32", v));
}
- fn emit_u16(v: u16) {
- self.write_str(fmt!("%?_u16", v));
+ fn emit_i16(&self, v: i16) {
+ self.wr.write_str(fmt!("%?_i16", v));
}
- fn emit_u8(v: u8) {
- self.write_str(fmt!("%?_u8", v));
+ fn emit_i8(&self, v: i8) {
+ self.wr.write_str(fmt!("%?_i8", v));
}
- fn emit_int(v: int) {
- self.write_str(fmt!("%?", v));
+ fn emit_bool(&self, v: bool) {
+ self.wr.write_str(fmt!("%b", v));
}
- fn emit_i64(v: i64) {
- self.write_str(fmt!("%?_i64", v));
+ fn emit_float(&self, v: float) {
+ self.wr.write_str(fmt!("%?_f", v));
}
- fn emit_i32(v: i32) {
- self.write_str(fmt!("%?_i32", v));
+ fn emit_f64(&self, v: f64) {
+ self.wr.write_str(fmt!("%?_f64", v));
}
- fn emit_i16(v: i16) {
- self.write_str(fmt!("%?_i16", v));
+ fn emit_f32(&self, v: f32) {
+ self.wr.write_str(fmt!("%?_f32", v));
}
- fn emit_i8(v: i8) {
- self.write_str(fmt!("%?_i8", v));
+ fn emit_char(&self, v: char) {
+ self.wr.write_str(fmt!("%?", v));
}
- fn emit_bool(v: bool) {
- self.write_str(fmt!("%b", v));
+ fn emit_borrowed_str(&self, v: &str) {
+ self.wr.write_str(fmt!("&%?", v));
}
- fn emit_float(v: float) {
- self.write_str(fmt!("%?_f", v));
+ fn emit_owned_str(&self, v: &str) {
+ self.wr.write_str(fmt!("~%?", v));
}
- fn emit_f64(v: f64) {
- self.write_str(fmt!("%?_f64", v));
+ fn emit_managed_str(&self, v: &str) {
+ self.wr.write_str(fmt!("@%?", v));
}
- fn emit_f32(v: f32) {
- self.write_str(fmt!("%?_f32", v));
+ fn emit_borrowed(&self, f: fn()) {
+ self.wr.write_str(~"&");
+ f();
}
- fn emit_str(v: &str) {
- self.write_str(fmt!("%?", v));
+ fn emit_owned(&self, f: fn()) {
+ self.wr.write_str(~"~");
+ f();
+ }
+
+ fn emit_managed(&self, f: fn()) {
+ self.wr.write_str(~"@");
+ f();
+ }
+
+ fn emit_enum(&self, _name: &str, f: fn()) {
+ f();
}
- fn emit_enum(_name: &str, f: fn()) {
+ fn emit_enum_variant(&self, v_name: &str, _v_id: uint, sz: uint,
+ f: fn()) {
+ self.wr.write_str(v_name);
+ if sz > 0u { self.wr.write_str(~"("); }
f();
+ if sz > 0u { self.wr.write_str(~")"); }
}
- fn emit_enum_variant(v_name: &str, _v_id: uint, sz: uint, f: fn()) {
- self.write_str(v_name);
- if sz > 0u { self.write_str(~"("); }
+ fn emit_enum_variant_arg(&self, idx: uint, f: fn()) {
+ if idx > 0u { self.wr.write_str(~", "); }
f();
- if sz > 0u { self.write_str(~")"); }
}
- fn emit_enum_variant_arg(idx: uint, f: fn()) {
- if idx > 0u { self.write_str(~", "); }
+ fn emit_borrowed_vec(&self, _len: uint, f: fn()) {
+ self.wr.write_str(~"&[");
f();
+ self.wr.write_str(~"]");
}
- fn emit_vec(_len: uint, f: fn()) {
- self.write_str(~"[");
+ fn emit_owned_vec(&self, _len: uint, f: fn()) {
+ self.wr.write_str(~"~[");
f();
- self.write_str(~"]");
+ self.wr.write_str(~"]");
}
- fn emit_vec_elt(idx: uint, f: fn()) {
- if idx > 0u { self.write_str(~", "); }
+ fn emit_managed_vec(&self, _len: uint, f: fn()) {
+ self.wr.write_str(~"@[");
f();
+ self.wr.write_str(~"]");
}
- fn emit_box(f: fn()) {
- self.write_str(~"@");
+ fn emit_vec_elt(&self, idx: uint, f: fn()) {
+ if idx > 0u { self.wr.write_str(~", "); }
f();
}
- fn emit_uniq(f: fn()) {
- self.write_str(~"~");
+ fn emit_rec(&self, f: fn()) {
+ self.wr.write_str(~"{");
f();
+ self.wr.write_str(~"}");
}
- fn emit_rec(f: fn()) {
- self.write_str(~"{");
+ fn emit_struct(&self, name: &str, f: fn()) {
+ self.wr.write_str(fmt!("%s {", name));
f();
- self.write_str(~"}");
+ self.wr.write_str(~"}");
}
- fn emit_rec_field(f_name: &str, f_idx: uint, f: fn()) {
- if f_idx > 0u { self.write_str(~", "); }
- self.write_str(f_name);
- self.write_str(~": ");
+ fn emit_field(&self, name: &str, idx: uint, f: fn()) {
+ if idx > 0u { self.wr.write_str(~", "); }
+ self.wr.write_str(name);
+ self.wr.write_str(~": ");
f();
}
- fn emit_tup(_sz: uint, f: fn()) {
- self.write_str(~"(");
+ fn emit_tup(&self, _len: uint, f: fn()) {
+ self.wr.write_str(~"(");
f();
- self.write_str(~")");
+ self.wr.write_str(~")");
}
- fn emit_tup_elt(idx: uint, f: fn()) {
- if idx > 0u { self.write_str(~", "); }
+ fn emit_tup_elt(&self, idx: uint, f: fn()) {
+ if idx > 0u { self.wr.write_str(~", "); }
f();
}
}
+++ /dev/null
-#[forbid(deprecated_mode)];
-
-use io::Writer;
-use io::WriterUtil;
-use serialization2;
-
-pub struct Serializer {
- wr: io::Writer,
-}
-
-pub fn Serializer(wr: io::Writer) -> Serializer {
- Serializer { wr: wr }
-}
-
-pub impl Serializer: serialization2::Serializer {
- fn emit_nil(&self) {
- self.wr.write_str(~"()")
- }
-
- fn emit_uint(&self, v: uint) {
- self.wr.write_str(fmt!("%?u", v));
- }
-
- fn emit_u64(&self, v: u64) {
- self.wr.write_str(fmt!("%?_u64", v));
- }
-
- fn emit_u32(&self, v: u32) {
- self.wr.write_str(fmt!("%?_u32", v));
- }
-
- fn emit_u16(&self, v: u16) {
- self.wr.write_str(fmt!("%?_u16", v));
- }
-
- fn emit_u8(&self, v: u8) {
- self.wr.write_str(fmt!("%?_u8", v));
- }
-
- fn emit_int(&self, v: int) {
- self.wr.write_str(fmt!("%?", v));
- }
-
- fn emit_i64(&self, v: i64) {
- self.wr.write_str(fmt!("%?_i64", v));
- }
-
- fn emit_i32(&self, v: i32) {
- self.wr.write_str(fmt!("%?_i32", v));
- }
-
- fn emit_i16(&self, v: i16) {
- self.wr.write_str(fmt!("%?_i16", v));
- }
-
- fn emit_i8(&self, v: i8) {
- self.wr.write_str(fmt!("%?_i8", v));
- }
-
- fn emit_bool(&self, v: bool) {
- self.wr.write_str(fmt!("%b", v));
- }
-
- fn emit_float(&self, v: float) {
- self.wr.write_str(fmt!("%?_f", v));
- }
-
- fn emit_f64(&self, v: f64) {
- self.wr.write_str(fmt!("%?_f64", v));
- }
-
- fn emit_f32(&self, v: f32) {
- self.wr.write_str(fmt!("%?_f32", v));
- }
-
- fn emit_char(&self, v: char) {
- self.wr.write_str(fmt!("%?", v));
- }
-
- fn emit_borrowed_str(&self, v: &str) {
- self.wr.write_str(fmt!("&%?", v));
- }
-
- fn emit_owned_str(&self, v: &str) {
- self.wr.write_str(fmt!("~%?", v));
- }
-
- fn emit_managed_str(&self, v: &str) {
- self.wr.write_str(fmt!("@%?", v));
- }
-
- fn emit_borrowed(&self, f: fn()) {
- self.wr.write_str(~"&");
- f();
- }
-
- fn emit_owned(&self, f: fn()) {
- self.wr.write_str(~"~");
- f();
- }
-
- fn emit_managed(&self, f: fn()) {
- self.wr.write_str(~"@");
- f();
- }
-
- fn emit_enum(&self, _name: &str, f: fn()) {
- f();
- }
-
- fn emit_enum_variant(&self, v_name: &str, _v_id: uint, sz: uint,
- f: fn()) {
- self.wr.write_str(v_name);
- if sz > 0u { self.wr.write_str(~"("); }
- f();
- if sz > 0u { self.wr.write_str(~")"); }
- }
-
- fn emit_enum_variant_arg(&self, idx: uint, f: fn()) {
- if idx > 0u { self.wr.write_str(~", "); }
- f();
- }
-
- fn emit_borrowed_vec(&self, _len: uint, f: fn()) {
- self.wr.write_str(~"&[");
- f();
- self.wr.write_str(~"]");
- }
-
- fn emit_owned_vec(&self, _len: uint, f: fn()) {
- self.wr.write_str(~"~[");
- f();
- self.wr.write_str(~"]");
- }
-
- fn emit_managed_vec(&self, _len: uint, f: fn()) {
- self.wr.write_str(~"@[");
- f();
- self.wr.write_str(~"]");
- }
-
- fn emit_vec_elt(&self, idx: uint, f: fn()) {
- if idx > 0u { self.wr.write_str(~", "); }
- f();
- }
-
- fn emit_rec(&self, f: fn()) {
- self.wr.write_str(~"{");
- f();
- self.wr.write_str(~"}");
- }
-
- fn emit_struct(&self, name: &str, f: fn()) {
- self.wr.write_str(fmt!("%s {", name));
- f();
- self.wr.write_str(~"}");
- }
-
- fn emit_field(&self, name: &str, idx: uint, f: fn()) {
- if idx > 0u { self.wr.write_str(~", "); }
- self.wr.write_str(name);
- self.wr.write_str(~": ");
- f();
- }
-
- fn emit_tup(&self, _len: uint, f: fn()) {
- self.wr.write_str(~"(");
- f();
- self.wr.write_str(~")");
- }
-
- fn emit_tup_elt(&self, idx: uint, f: fn()) {
- if idx > 0u { self.wr.write_str(~", "); }
- f();
- }
-}
//! Support code for serialization.
-#[allow(deprecated_mode)];
-
/*
Core serialization interfaces.
*/
+#[forbid(deprecated_mode)];
+#[forbid(non_camel_case_types)];
+
pub trait Serializer {
// Primitive types:
- fn emit_nil();
- fn emit_uint(v: uint);
- fn emit_u64(v: u64);
- fn emit_u32(v: u32);
- fn emit_u16(v: u16);
- fn emit_u8(v: u8);
- fn emit_int(v: int);
- fn emit_i64(v: i64);
- fn emit_i32(v: i32);
- fn emit_i16(v: i16);
- fn emit_i8(v: i8);
- fn emit_bool(v: bool);
- fn emit_float(v: float);
- fn emit_f64(v: f64);
- fn emit_f32(v: f32);
- fn emit_str(v: &str);
+ fn emit_nil(&self);
+ fn emit_uint(&self, v: uint);
+ fn emit_u64(&self, v: u64);
+ fn emit_u32(&self, v: u32);
+ fn emit_u16(&self, v: u16);
+ fn emit_u8(&self, v: u8);
+ fn emit_int(&self, v: int);
+ fn emit_i64(&self, v: i64);
+ fn emit_i32(&self, v: i32);
+ fn emit_i16(&self, v: i16);
+ fn emit_i8(&self, v: i8);
+ fn emit_bool(&self, v: bool);
+ fn emit_float(&self, v: float);
+ fn emit_f64(&self, v: f64);
+ fn emit_f32(&self, v: f32);
+ fn emit_char(&self, v: char);
+ fn emit_borrowed_str(&self, v: &str);
+ fn emit_owned_str(&self, v: &str);
+ fn emit_managed_str(&self, v: &str);
// Compound types:
- fn emit_enum(name: &str, f: fn());
- fn emit_enum_variant(v_name: &str, v_id: uint, sz: uint, f: fn());
- fn emit_enum_variant_arg(idx: uint, f: fn());
- fn emit_vec(len: uint, f: fn());
- fn emit_vec_elt(idx: uint, f: fn());
- fn emit_box(f: fn());
- fn emit_uniq(f: fn());
- fn emit_rec(f: fn());
- fn emit_rec_field(f_name: &str, f_idx: uint, f: fn());
- fn emit_tup(sz: uint, f: fn());
- fn emit_tup_elt(idx: uint, f: fn());
+ fn emit_borrowed(&self, f: fn());
+ fn emit_owned(&self, f: fn());
+ fn emit_managed(&self, f: fn());
+
+ fn emit_enum(&self, name: &str, f: fn());
+ fn emit_enum_variant(&self, v_name: &str, v_id: uint, sz: uint, f: fn());
+ fn emit_enum_variant_arg(&self, idx: uint, f: fn());
+
+ fn emit_borrowed_vec(&self, len: uint, f: fn());
+ fn emit_owned_vec(&self, len: uint, f: fn());
+ fn emit_managed_vec(&self, len: uint, f: fn());
+ fn emit_vec_elt(&self, idx: uint, f: fn());
+
+ fn emit_rec(&self, f: fn());
+ fn emit_struct(&self, name: &str, f: fn());
+ fn emit_field(&self, f_name: &str, f_idx: uint, f: fn());
+
+ fn emit_tup(&self, len: uint, f: fn());
+ fn emit_tup_elt(&self, idx: uint, f: fn());
}
pub trait Deserializer {
// Primitive types:
- fn read_nil() -> ();
+ fn read_nil(&self) -> ();
+ fn read_uint(&self) -> uint;
+ fn read_u64(&self) -> u64;
+ fn read_u32(&self) -> u32;
+ fn read_u16(&self) -> u16;
+ fn read_u8(&self) -> u8;
+ fn read_int(&self) -> int;
+ fn read_i64(&self) -> i64;
+ fn read_i32(&self) -> i32;
+ fn read_i16(&self) -> i16;
+ fn read_i8(&self) -> i8;
+ fn read_bool(&self) -> bool;
+ fn read_f64(&self) -> f64;
+ fn read_f32(&self) -> f32;
+ fn read_float(&self) -> float;
+ fn read_char(&self) -> char;
+ fn read_owned_str(&self) -> ~str;
+ fn read_managed_str(&self) -> @str;
- fn read_uint() -> uint;
- fn read_u64() -> u64;
- fn read_u32() -> u32;
- fn read_u16() -> u16;
- fn read_u8() -> u8;
+ // Compound types:
+ fn read_enum<T>(&self, name: &str, f: fn() -> T) -> T;
+ fn read_enum_variant<T>(&self, f: fn(uint) -> T) -> T;
+ fn read_enum_variant_arg<T>(&self, idx: uint, f: fn() -> T) -> T;
- fn read_int() -> int;
- fn read_i64() -> i64;
- fn read_i32() -> i32;
- fn read_i16() -> i16;
- fn read_i8() -> i8;
+ fn read_owned<T>(&self, f: fn() -> T) -> T;
+ fn read_managed<T>(&self, f: fn() -> T) -> T;
+ fn read_owned_vec<T>(&self, f: fn(uint) -> T) -> T;
+ fn read_managed_vec<T>(&self, f: fn(uint) -> T) -> T;
+ fn read_vec_elt<T>(&self, idx: uint, f: fn() -> T) -> T;
- fn read_bool() -> bool;
+ fn read_rec<T>(&self, f: fn() -> T) -> T;
+ fn read_struct<T>(&self, name: &str, f: fn() -> T) -> T;
+ fn read_field<T>(&self, name: &str, idx: uint, f: fn() -> T) -> T;
- fn read_str() -> ~str;
+ fn read_tup<T>(&self, sz: uint, f: fn() -> T) -> T;
+ fn read_tup_elt<T>(&self, idx: uint, f: fn() -> T) -> T;
+}
- fn read_f64() -> f64;
- fn read_f32() -> f32;
- fn read_float() -> float;
+#[cfg(stage0)]
+pub mod traits {
+pub trait Serializable {
+ fn serialize<S: Serializer>(&self, s: &S);
+}
- // Compound types:
- fn read_enum<T>(name: &str, f: fn() -> T) -> T;
- fn read_enum_variant<T>(f: fn(uint) -> T) -> T;
- fn read_enum_variant_arg<T>(idx: uint, f: fn() -> T) -> T;
- fn read_vec<T>(f: fn(uint) -> T) -> T;
- fn read_vec_elt<T>(idx: uint, f: fn() -> T) -> T;
- fn read_box<T>(f: fn() -> T) -> T;
- fn read_uniq<T>(f: fn() -> T) -> T;
- fn read_rec<T>(f: fn() -> T) -> T;
- fn read_rec_field<T>(f_name: &str, f_idx: uint, f: fn() -> T) -> T;
- fn read_tup<T>(sz: uint, f: fn() -> T) -> T;
- fn read_tup_elt<T>(idx: uint, f: fn() -> T) -> T;
+pub trait Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> self;
+}
+
+pub impl uint: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_uint(*self) }
+}
+
+pub impl uint: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> uint {
+ d.read_uint()
+ }
+}
+
+pub impl u8: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_u8(*self) }
+}
+
+pub impl u8: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> u8 {
+ d.read_u8()
+ }
+}
+
+pub impl u16: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_u16(*self) }
+}
+
+pub impl u16: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> u16 {
+ d.read_u16()
+ }
+}
+
+pub impl u32: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_u32(*self) }
+}
+
+pub impl u32: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> u32 {
+ d.read_u32()
+ }
+}
+
+pub impl u64: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_u64(*self) }
+}
+
+pub impl u64: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> u64 {
+ d.read_u64()
+ }
+}
+
+pub impl int: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_int(*self) }
+}
+
+pub impl int: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> int {
+ d.read_int()
+ }
+}
+
+pub impl i8: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_i8(*self) }
+}
+
+pub impl i8: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> i8 {
+ d.read_i8()
+ }
+}
+
+pub impl i16: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_i16(*self) }
+}
+
+pub impl i16: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> i16 {
+ d.read_i16()
+ }
+}
+
+pub impl i32: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_i32(*self) }
+}
+
+pub impl i32: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> i32 {
+ d.read_i32()
+ }
+}
+
+pub impl i64: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_i64(*self) }
+}
+
+pub impl i64: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> i64 {
+ d.read_i64()
+ }
+}
+
+pub impl &str: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_borrowed_str(*self) }
+}
+
+pub impl ~str: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_owned_str(*self) }
+}
+
+pub impl ~str: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> ~str {
+ d.read_owned_str()
+ }
+}
+
+pub impl @str: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_managed_str(*self) }
+}
+
+pub impl @str: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> @str {
+ d.read_managed_str()
+ }
+}
+
+pub impl float: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_float(*self) }
+}
+
+pub impl float: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> float {
+ d.read_float()
+ }
+}
+
+pub impl f32: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_f32(*self) }
+}
+
+pub impl f32: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> f32 {
+ d.read_f32() }
+}
+
+pub impl f64: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_f64(*self) }
+}
+
+pub impl f64: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> f64 {
+ d.read_f64()
+ }
+}
+
+pub impl bool: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_bool(*self) }
+}
+
+pub impl bool: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> bool {
+ d.read_bool()
+ }
+}
+
+pub impl (): Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) { s.emit_nil() }
+}
+
+pub impl (): Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> () {
+ d.read_nil()
+ }
+}
+
+pub impl<T: Serializable> &T: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ s.emit_borrowed(|| (**self).serialize(s))
+ }
+}
+
+pub impl<T: Serializable> ~T: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ s.emit_owned(|| (**self).serialize(s))
+ }
+}
+
+pub impl<T: Deserializable> ~T: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> ~T {
+ d.read_owned(|| ~deserialize(d))
+ }
+}
+
+pub impl<T: Serializable> @T: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ s.emit_managed(|| (**self).serialize(s))
+ }
+}
+
+pub impl<T: Deserializable> @T: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> @T {
+ d.read_managed(|| @deserialize(d))
+ }
+}
+
+pub impl<T: Serializable> &[T]: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ do s.emit_borrowed_vec(self.len()) {
+ for self.eachi |i, e| {
+ s.emit_vec_elt(i, || e.serialize(s))
+ }
+ }
+ }
+}
+
+pub impl<T: Serializable> ~[T]: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ do s.emit_owned_vec(self.len()) {
+ for self.eachi |i, e| {
+ s.emit_vec_elt(i, || e.serialize(s))
+ }
+ }
+ }
+}
+
+pub impl<T: Deserializable> ~[T]: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> ~[T] {
+ do d.read_owned_vec |len| {
+ do vec::from_fn(len) |i| {
+ d.read_vec_elt(i, || deserialize(d))
+ }
+ }
+ }
+}
+
+pub impl<T: Serializable> @[T]: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ do s.emit_managed_vec(self.len()) {
+ for self.eachi |i, e| {
+ s.emit_vec_elt(i, || e.serialize(s))
+ }
+ }
+ }
+}
+
+pub impl<T: Deserializable> @[T]: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> @[T] {
+ do d.read_managed_vec |len| {
+ do at_vec::from_fn(len) |i| {
+ d.read_vec_elt(i, || deserialize(d))
+ }
+ }
+ }
+}
+
+pub impl<T: Serializable> Option<T>: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ do s.emit_enum(~"option") {
+ match *self {
+ None => do s.emit_enum_variant(~"none", 0u, 0u) {
+ },
+
+ Some(ref v) => do s.emit_enum_variant(~"some", 1u, 1u) {
+ s.emit_enum_variant_arg(0u, || v.serialize(s))
+ }
+ }
+ }
+ }
+}
+
+pub impl<T: Deserializable> Option<T>: Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> Option<T> {
+ do d.read_enum(~"option") {
+ do d.read_enum_variant |i| {
+ match i {
+ 0 => None,
+ 1 => Some(d.read_enum_variant_arg(0u, || deserialize(d))),
+ _ => fail(fmt!("Bad variant for option: %u", i))
+ }
+ }
+ }
+ }
+}
+
+pub impl<
+ T0: Serializable,
+ T1: Serializable
+> (T0, T1): Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1) => {
+ do s.emit_tup(2) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ }
+ }
+ }
+ }
+}
+
+pub impl<
+ T0: Deserializable,
+ T1: Deserializable
+> (T0, T1): Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> (T0, T1) {
+ do d.read_tup(2) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d))
+ )
+ }
+ }
+}
+
+pub impl<
+ T0: Serializable,
+ T1: Serializable,
+ T2: Serializable
+> (T0, T1, T2): Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1, ref t2) => {
+ do s.emit_tup(3) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ s.emit_tup_elt(2, || t2.serialize(s));
+ }
+ }
+ }
+ }
+}
+
+pub impl<
+ T0: Deserializable,
+ T1: Deserializable,
+ T2: Deserializable
+> (T0, T1, T2): Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> (T0, T1, T2) {
+ do d.read_tup(3) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d)),
+ d.read_tup_elt(2, || deserialize(d))
+ )
+ }
+ }
+}
+
+pub impl<
+ T0: Serializable,
+ T1: Serializable,
+ T2: Serializable,
+ T3: Serializable
+> (T0, T1, T2, T3): Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1, ref t2, ref t3) => {
+ do s.emit_tup(4) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ s.emit_tup_elt(2, || t2.serialize(s));
+ s.emit_tup_elt(3, || t3.serialize(s));
+ }
+ }
+ }
+ }
+}
+
+pub impl<
+ T0: Deserializable,
+ T1: Deserializable,
+ T2: Deserializable,
+ T3: Deserializable
+> (T0, T1, T2, T3): Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D) -> (T0, T1, T2, T3) {
+ do d.read_tup(4) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d)),
+ d.read_tup_elt(2, || deserialize(d)),
+ d.read_tup_elt(3, || deserialize(d))
+ )
+ }
+ }
+}
+
+pub impl<
+ T0: Serializable,
+ T1: Serializable,
+ T2: Serializable,
+ T3: Serializable,
+ T4: Serializable
+> (T0, T1, T2, T3, T4): Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1, ref t2, ref t3, ref t4) => {
+ do s.emit_tup(5) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ s.emit_tup_elt(2, || t2.serialize(s));
+ s.emit_tup_elt(3, || t3.serialize(s));
+ s.emit_tup_elt(4, || t4.serialize(s));
+ }
+ }
+ }
+ }
+}
+
+pub impl<
+ T0: Deserializable,
+ T1: Deserializable,
+ T2: Deserializable,
+ T3: Deserializable,
+ T4: Deserializable
+> (T0, T1, T2, T3, T4): Deserializable {
+ static fn deserialize<D: Deserializer>(&self, d: &D)
+ -> (T0, T1, T2, T3, T4) {
+ do d.read_tup(5) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d)),
+ d.read_tup_elt(2, || deserialize(d)),
+ d.read_tup_elt(3, || deserialize(d)),
+ d.read_tup_elt(4, || deserialize(d))
+ )
+ }
+ }
}
// ___________________________________________________________________________
//
// In some cases, these should eventually be coded as traits.
-pub fn emit_from_vec<S: Serializer, T>(&&s: S, &&v: ~[T], f: fn(&&x: T)) {
- do s.emit_vec(vec::len(v)) {
- for vec::eachi(v) |i,e| {
- do s.emit_vec_elt(i) {
- f(*e)
+pub trait SerializerHelpers {
+ fn emit_from_vec<T>(&self, v: &[T], f: fn(&T));
+}
+
+pub impl<S: Serializer> S: SerializerHelpers {
+ fn emit_from_vec<T>(&self, v: &[T], f: fn(&T)) {
+ do self.emit_owned_vec(v.len()) {
+ for v.eachi |i, e| {
+ do self.emit_vec_elt(i) {
+ f(e)
+ }
}
}
}
}
-pub fn read_to_vec<D: Deserializer, T: Copy>(&&d: D, f: fn() -> T) -> ~[T] {
- do d.read_vec |len| {
- do vec::from_fn(len) |i| {
- d.read_vec_elt(i, || f())
+pub trait DeserializerHelpers {
+ fn read_to_vec<T>(&self, f: fn() -> T) -> ~[T];
+}
+
+pub impl<D: Deserializer> D: DeserializerHelpers {
+ fn read_to_vec<T>(&self, f: fn() -> T) -> ~[T] {
+ do self.read_owned_vec |len| {
+ do vec::from_fn(len) |i| {
+ self.read_vec_elt(i, || f())
+ }
}
}
}
+}
-pub trait SerializerHelpers {
- fn emit_from_vec<T>(&&v: ~[T], f: fn(&&x: T));
+#[cfg(stage1)]
+#[cfg(stage2)]
+pub mod traits {
+pub trait Serializable<S: Serializer> {
+ fn serialize(&self, s: &S);
+}
+
+pub trait Deserializable<D: Deserializer> {
+ static fn deserialize(&self, d: &D) -> self;
+}
+
+pub impl<S: Serializer> uint: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_uint(*self) }
}
-impl<S: Serializer> S: SerializerHelpers {
- fn emit_from_vec<T>(&&v: ~[T], f: fn(&&x: T)) {
- emit_from_vec(self, v, f)
+pub impl<D: Deserializer> uint: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> uint {
+ d.read_uint()
}
}
-pub trait DeserializerHelpers {
- fn read_to_vec<T: Copy>(f: fn() -> T) -> ~[T];
+pub impl<S: Serializer> u8: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_u8(*self) }
+}
+
+pub impl<D: Deserializer> u8: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> u8 {
+ d.read_u8()
+ }
+}
+
+pub impl<S: Serializer> u16: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_u16(*self) }
+}
+
+pub impl<D: Deserializer> u16: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> u16 {
+ d.read_u16()
+ }
+}
+
+pub impl<S: Serializer> u32: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_u32(*self) }
+}
+
+pub impl<D: Deserializer> u32: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> u32 {
+ d.read_u32()
+ }
+}
+
+pub impl<S: Serializer> u64: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_u64(*self) }
+}
+
+pub impl<D: Deserializer> u64: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> u64 {
+ d.read_u64()
+ }
+}
+
+pub impl<S: Serializer> int: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_int(*self) }
+}
+
+pub impl<D: Deserializer> int: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> int {
+ d.read_int()
+ }
+}
+
+pub impl<S: Serializer> i8: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_i8(*self) }
+}
+
+pub impl<D: Deserializer> i8: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> i8 {
+ d.read_i8()
+ }
+}
+
+pub impl<S: Serializer> i16: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_i16(*self) }
+}
+
+pub impl<D: Deserializer> i16: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> i16 {
+ d.read_i16()
+ }
+}
+
+pub impl<S: Serializer> i32: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_i32(*self) }
+}
+
+pub impl<D: Deserializer> i32: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> i32 {
+ d.read_i32()
+ }
+}
+
+pub impl<S: Serializer> i64: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_i64(*self) }
}
-impl<D: Deserializer> D: DeserializerHelpers {
- fn read_to_vec<T: Copy>(f: fn() -> T) -> ~[T] {
- read_to_vec(self, f)
+pub impl<D: Deserializer> i64: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> i64 {
+ d.read_i64()
}
}
-pub fn serialize_uint<S: Serializer>(&&s: S, v: uint) {
- s.emit_uint(v);
+pub impl<S: Serializer> &str: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_borrowed_str(*self) }
+}
+
+pub impl<S: Serializer> ~str: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_owned_str(*self) }
}
-pub fn deserialize_uint<D: Deserializer>(&&d: D) -> uint {
- d.read_uint()
+pub impl<D: Deserializer> ~str: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> ~str {
+ d.read_owned_str()
+ }
}
-pub fn serialize_u8<S: Serializer>(&&s: S, v: u8) {
- s.emit_u8(v);
+pub impl<S: Serializer> @str: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_managed_str(*self) }
}
-pub fn deserialize_u8<D: Deserializer>(&&d: D) -> u8 {
- d.read_u8()
+pub impl<D: Deserializer> @str: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> @str {
+ d.read_managed_str()
+ }
}
-pub fn serialize_u16<S: Serializer>(&&s: S, v: u16) {
- s.emit_u16(v);
+pub impl<S: Serializer> float: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_float(*self) }
}
-pub fn deserialize_u16<D: Deserializer>(&&d: D) -> u16 {
- d.read_u16()
+pub impl<D: Deserializer> float: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> float {
+ d.read_float()
+ }
}
-pub fn serialize_u32<S: Serializer>(&&s: S, v: u32) {
- s.emit_u32(v);
+pub impl<S: Serializer> f32: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_f32(*self) }
}
-pub fn deserialize_u32<D: Deserializer>(&&d: D) -> u32 {
- d.read_u32()
+pub impl<D: Deserializer> f32: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> f32 {
+ d.read_f32() }
}
-pub fn serialize_u64<S: Serializer>(&&s: S, v: u64) {
- s.emit_u64(v);
+pub impl<S: Serializer> f64: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_f64(*self) }
}
-pub fn deserialize_u64<D: Deserializer>(&&d: D) -> u64 {
- d.read_u64()
+pub impl<D: Deserializer> f64: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> f64 {
+ d.read_f64()
+ }
}
-pub fn serialize_int<S: Serializer>(&&s: S, v: int) {
- s.emit_int(v);
+pub impl<S: Serializer> bool: Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_bool(*self) }
}
-pub fn deserialize_int<D: Deserializer>(&&d: D) -> int {
- d.read_int()
+pub impl<D: Deserializer> bool: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> bool {
+ d.read_bool()
+ }
}
-pub fn serialize_i8<S: Serializer>(&&s: S, v: i8) {
- s.emit_i8(v);
+pub impl<S: Serializer> (): Serializable<S> {
+ fn serialize(&self, s: &S) { s.emit_nil() }
}
-pub fn deserialize_i8<D: Deserializer>(&&d: D) -> i8 {
- d.read_i8()
+pub impl<D: Deserializer> (): Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> () {
+ d.read_nil()
+ }
}
-pub fn serialize_i16<S: Serializer>(&&s: S, v: i16) {
- s.emit_i16(v);
+pub impl<S: Serializer, T: Serializable<S>> &T: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ s.emit_borrowed(|| (**self).serialize(s))
+ }
}
-pub fn deserialize_i16<D: Deserializer>(&&d: D) -> i16 {
- d.read_i16()
+pub impl<S: Serializer, T: Serializable<S>> ~T: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ s.emit_owned(|| (**self).serialize(s))
+ }
}
-pub fn serialize_i32<S: Serializer>(&&s: S, v: i32) {
- s.emit_i32(v);
+pub impl<D: Deserializer, T: Deserializable<D>> ~T: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> ~T {
+ d.read_owned(|| ~deserialize(d))
+ }
}
-pub fn deserialize_i32<D: Deserializer>(&&d: D) -> i32 {
- d.read_i32()
+pub impl<S: Serializer, T: Serializable<S>> @T: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ s.emit_managed(|| (**self).serialize(s))
+ }
}
-pub fn serialize_i64<S: Serializer>(&&s: S, v: i64) {
- s.emit_i64(v);
+pub impl<D: Deserializer, T: Deserializable<D>> @T: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> @T {
+ d.read_managed(|| @deserialize(d))
+ }
}
-pub fn deserialize_i64<D: Deserializer>(&&d: D) -> i64 {
- d.read_i64()
+pub impl<S: Serializer, T: Serializable<S>> &[T]: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ do s.emit_borrowed_vec(self.len()) {
+ for self.eachi |i, e| {
+ s.emit_vec_elt(i, || e.serialize(s))
+ }
+ }
+ }
+}
+
+pub impl<S: Serializer, T: Serializable<S>> ~[T]: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ do s.emit_owned_vec(self.len()) {
+ for self.eachi |i, e| {
+ s.emit_vec_elt(i, || e.serialize(s))
+ }
+ }
+ }
+}
+
+pub impl<D: Deserializer, T: Deserializable<D>> ~[T]: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> ~[T] {
+ do d.read_owned_vec |len| {
+ do vec::from_fn(len) |i| {
+ d.read_vec_elt(i, || deserialize(d))
+ }
+ }
+ }
+}
+
+pub impl<S: Serializer, T: Serializable<S>> @[T]: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ do s.emit_managed_vec(self.len()) {
+ for self.eachi |i, e| {
+ s.emit_vec_elt(i, || e.serialize(s))
+ }
+ }
+ }
+}
+
+pub impl<D: Deserializer, T: Deserializable<D>> @[T]: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> @[T] {
+ do d.read_managed_vec |len| {
+ do at_vec::from_fn(len) |i| {
+ d.read_vec_elt(i, || deserialize(d))
+ }
+ }
+ }
}
-pub fn serialize_str<S: Serializer>(&&s: S, v: &str) {
- s.emit_str(v);
+pub impl<S: Serializer, T: Serializable<S>> Option<T>: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ do s.emit_enum(~"option") {
+ match *self {
+ None => do s.emit_enum_variant(~"none", 0u, 0u) {
+ },
+
+ Some(ref v) => do s.emit_enum_variant(~"some", 1u, 1u) {
+ s.emit_enum_variant_arg(0u, || v.serialize(s))
+ }
+ }
+ }
+ }
}
-pub fn deserialize_str<D: Deserializer>(&&d: D) -> ~str {
- d.read_str()
+pub impl<D: Deserializer, T: Deserializable<D>> Option<T>: Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> Option<T> {
+ do d.read_enum(~"option") {
+ do d.read_enum_variant |i| {
+ match i {
+ 0 => None,
+ 1 => Some(d.read_enum_variant_arg(0u, || deserialize(d))),
+ _ => fail(#fmt("Bad variant for option: %u", i))
+ }
+ }
+ }
+ }
}
-pub fn serialize_float<S: Serializer>(&&s: S, v: float) {
- s.emit_float(v);
+pub impl<
+ S: Serializer,
+ T0: Serializable<S>,
+ T1: Serializable<S>
+> (T0, T1): Serializable<S> {
+ fn serialize(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1) => {
+ do s.emit_tup(2) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ }
+ }
+ }
+ }
}
-pub fn deserialize_float<D: Deserializer>(&&d: D) -> float {
- d.read_float()
+pub impl<
+ D: Deserializer,
+ T0: Deserializable<D>,
+ T1: Deserializable<D>
+> (T0, T1): Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> (T0, T1) {
+ do d.read_tup(2) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d))
+ )
+ }
+ }
}
-pub fn serialize_f32<S: Serializer>(&&s: S, v: f32) {
- s.emit_f32(v);
+pub impl<
+ S: Serializer,
+ T0: Serializable<S>,
+ T1: Serializable<S>,
+ T2: Serializable<S>
+> (T0, T1, T2): Serializable<S> {
+ fn serialize(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1, ref t2) => {
+ do s.emit_tup(3) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ s.emit_tup_elt(2, || t2.serialize(s));
+ }
+ }
+ }
+ }
}
-pub fn deserialize_f32<D: Deserializer>(&&d: D) -> f32 {
- d.read_f32()
+pub impl<
+ D: Deserializer,
+ T0: Deserializable<D>,
+ T1: Deserializable<D>,
+ T2: Deserializable<D>
+> (T0, T1, T2): Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> (T0, T1, T2) {
+ do d.read_tup(3) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d)),
+ d.read_tup_elt(2, || deserialize(d))
+ )
+ }
+ }
}
-pub fn serialize_f64<S: Serializer>(&&s: S, v: f64) {
- s.emit_f64(v);
+pub impl<
+ S: Serializer,
+ T0: Serializable<S>,
+ T1: Serializable<S>,
+ T2: Serializable<S>,
+ T3: Serializable<S>
+> (T0, T1, T2, T3): Serializable<S> {
+ fn serialize(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1, ref t2, ref t3) => {
+ do s.emit_tup(4) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ s.emit_tup_elt(2, || t2.serialize(s));
+ s.emit_tup_elt(3, || t3.serialize(s));
+ }
+ }
+ }
+ }
}
-pub fn deserialize_f64<D: Deserializer>(&&d: D) -> f64 {
- d.read_f64()
+pub impl<
+ D: Deserializer,
+ T0: Deserializable<D>,
+ T1: Deserializable<D>,
+ T2: Deserializable<D>,
+ T3: Deserializable<D>
+> (T0, T1, T2, T3): Deserializable<D> {
+ static fn deserialize(&self, d: &D) -> (T0, T1, T2, T3) {
+ do d.read_tup(4) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d)),
+ d.read_tup_elt(2, || deserialize(d)),
+ d.read_tup_elt(3, || deserialize(d))
+ )
+ }
+ }
}
-pub fn serialize_bool<S: Serializer>(&&s: S, v: bool) {
- s.emit_bool(v);
+pub impl<
+ S: Serializer,
+ T0: Serializable<S>,
+ T1: Serializable<S>,
+ T2: Serializable<S>,
+ T3: Serializable<S>,
+ T4: Serializable<S>
+> (T0, T1, T2, T3, T4): Serializable<S> {
+ fn serialize(&self, s: &S) {
+ match *self {
+ (ref t0, ref t1, ref t2, ref t3, ref t4) => {
+ do s.emit_tup(5) {
+ s.emit_tup_elt(0, || t0.serialize(s));
+ s.emit_tup_elt(1, || t1.serialize(s));
+ s.emit_tup_elt(2, || t2.serialize(s));
+ s.emit_tup_elt(3, || t3.serialize(s));
+ s.emit_tup_elt(4, || t4.serialize(s));
+ }
+ }
+ }
+ }
}
-pub fn deserialize_bool<D: Deserializer>(&&d: D) -> bool {
- d.read_bool()
+pub impl<
+ D: Deserializer,
+ T0: Deserializable<D>,
+ T1: Deserializable<D>,
+ T2: Deserializable<D>,
+ T3: Deserializable<D>,
+ T4: Deserializable<D>
+> (T0, T1, T2, T3, T4): Deserializable<D> {
+ static fn deserialize(&self, d: &D)
+ -> (T0, T1, T2, T3, T4) {
+ do d.read_tup(5) {
+ (
+ d.read_tup_elt(0, || deserialize(d)),
+ d.read_tup_elt(1, || deserialize(d)),
+ d.read_tup_elt(2, || deserialize(d)),
+ d.read_tup_elt(3, || deserialize(d)),
+ d.read_tup_elt(4, || deserialize(d))
+ )
+ }
+ }
}
-pub fn serialize_Option<S: Serializer,T>(&&s: S, &&v: Option<T>,
- st: fn(&&x: T)) {
- do s.emit_enum(~"option") {
- match v {
- None => do s.emit_enum_variant(~"none", 0u, 0u) {
- },
+// ___________________________________________________________________________
+// Helper routines
+//
+// In some cases, these should eventually be coded as traits.
- Some(ref v) => do s.emit_enum_variant(~"some", 1u, 1u) {
- do s.emit_enum_variant_arg(0u) {
- st(*v)
+pub trait SerializerHelpers {
+ fn emit_from_vec<T>(&self, v: ~[T], f: fn(v: &T));
+}
+
+pub impl<S: Serializer> S: SerializerHelpers {
+ fn emit_from_vec<T>(&self, v: ~[T], f: fn(v: &T)) {
+ do self.emit_owned_vec(v.len()) {
+ for v.eachi |i, e| {
+ do self.emit_vec_elt(i) {
+ f(e)
+ }
}
- }
}
}
}
-pub fn deserialize_Option<D: Deserializer,T: Copy>(&&d: D, st: fn() -> T)
- -> Option<T> {
- do d.read_enum(~"option") {
- do d.read_enum_variant |i| {
- match i {
- 0 => None,
- 1 => Some(d.read_enum_variant_arg(0u, || st() )),
- _ => fail(#fmt("Bad variant for option: %u", i))
+pub trait DeserializerHelpers {
+ fn read_to_vec<T>(&self, f: fn() -> T) -> ~[T];
+}
+
+pub impl<D: Deserializer> D: DeserializerHelpers {
+ fn read_to_vec<T>(&self, f: fn() -> T) -> ~[T] {
+ do self.read_owned_vec |len| {
+ do vec::from_fn(len) |i| {
+ self.read_vec_elt(i, || f())
}
}
}
}
+}
+
+pub use traits::*;
+++ /dev/null
-//! Support code for serialization.
-
-/*
-Core serialization interfaces.
-*/
-
-#[forbid(deprecated_mode)];
-#[forbid(non_camel_case_types)];
-
-pub trait Serializer {
- // Primitive types:
- fn emit_nil(&self);
- fn emit_uint(&self, v: uint);
- fn emit_u64(&self, v: u64);
- fn emit_u32(&self, v: u32);
- fn emit_u16(&self, v: u16);
- fn emit_u8(&self, v: u8);
- fn emit_int(&self, v: int);
- fn emit_i64(&self, v: i64);
- fn emit_i32(&self, v: i32);
- fn emit_i16(&self, v: i16);
- fn emit_i8(&self, v: i8);
- fn emit_bool(&self, v: bool);
- fn emit_float(&self, v: float);
- fn emit_f64(&self, v: f64);
- fn emit_f32(&self, v: f32);
- fn emit_char(&self, v: char);
- fn emit_borrowed_str(&self, v: &str);
- fn emit_owned_str(&self, v: &str);
- fn emit_managed_str(&self, v: &str);
-
- // Compound types:
- fn emit_borrowed(&self, f: fn());
- fn emit_owned(&self, f: fn());
- fn emit_managed(&self, f: fn());
-
- fn emit_enum(&self, name: &str, f: fn());
- fn emit_enum_variant(&self, v_name: &str, v_id: uint, sz: uint, f: fn());
- fn emit_enum_variant_arg(&self, idx: uint, f: fn());
-
- fn emit_borrowed_vec(&self, len: uint, f: fn());
- fn emit_owned_vec(&self, len: uint, f: fn());
- fn emit_managed_vec(&self, len: uint, f: fn());
- fn emit_vec_elt(&self, idx: uint, f: fn());
-
- fn emit_rec(&self, f: fn());
- fn emit_struct(&self, name: &str, f: fn());
- fn emit_field(&self, f_name: &str, f_idx: uint, f: fn());
-
- fn emit_tup(&self, len: uint, f: fn());
- fn emit_tup_elt(&self, idx: uint, f: fn());
-}
-
-pub trait Deserializer {
- // Primitive types:
- fn read_nil(&self) -> ();
- fn read_uint(&self) -> uint;
- fn read_u64(&self) -> u64;
- fn read_u32(&self) -> u32;
- fn read_u16(&self) -> u16;
- fn read_u8(&self) -> u8;
- fn read_int(&self) -> int;
- fn read_i64(&self) -> i64;
- fn read_i32(&self) -> i32;
- fn read_i16(&self) -> i16;
- fn read_i8(&self) -> i8;
- fn read_bool(&self) -> bool;
- fn read_f64(&self) -> f64;
- fn read_f32(&self) -> f32;
- fn read_float(&self) -> float;
- fn read_char(&self) -> char;
- fn read_owned_str(&self) -> ~str;
- fn read_managed_str(&self) -> @str;
-
- // Compound types:
- fn read_enum<T>(&self, name: &str, f: fn() -> T) -> T;
- fn read_enum_variant<T>(&self, f: fn(uint) -> T) -> T;
- fn read_enum_variant_arg<T>(&self, idx: uint, f: fn() -> T) -> T;
-
- fn read_owned<T>(&self, f: fn() -> T) -> T;
- fn read_managed<T>(&self, f: fn() -> T) -> T;
-
- fn read_owned_vec<T>(&self, f: fn(uint) -> T) -> T;
- fn read_managed_vec<T>(&self, f: fn(uint) -> T) -> T;
- fn read_vec_elt<T>(&self, idx: uint, f: fn() -> T) -> T;
-
- fn read_rec<T>(&self, f: fn() -> T) -> T;
- fn read_struct<T>(&self, name: &str, f: fn() -> T) -> T;
- fn read_field<T>(&self, name: &str, idx: uint, f: fn() -> T) -> T;
-
- fn read_tup<T>(&self, sz: uint, f: fn() -> T) -> T;
- fn read_tup_elt<T>(&self, idx: uint, f: fn() -> T) -> T;
-}
-
-pub trait Serializable {
- fn serialize<S: Serializer>(&self, s: &S);
-}
-
-pub trait Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> self;
-}
-
-pub impl uint: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_uint(*self) }
-}
-
-pub impl uint: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> uint {
- d.read_uint()
- }
-}
-
-pub impl u8: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_u8(*self) }
-}
-
-pub impl u8: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> u8 {
- d.read_u8()
- }
-}
-
-pub impl u16: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_u16(*self) }
-}
-
-pub impl u16: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> u16 {
- d.read_u16()
- }
-}
-
-pub impl u32: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_u32(*self) }
-}
-
-pub impl u32: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> u32 {
- d.read_u32()
- }
-}
-
-pub impl u64: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_u64(*self) }
-}
-
-pub impl u64: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> u64 {
- d.read_u64()
- }
-}
-
-pub impl int: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_int(*self) }
-}
-
-pub impl int: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> int {
- d.read_int()
- }
-}
-
-pub impl i8: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_i8(*self) }
-}
-
-pub impl i8: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> i8 {
- d.read_i8()
- }
-}
-
-pub impl i16: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_i16(*self) }
-}
-
-pub impl i16: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> i16 {
- d.read_i16()
- }
-}
-
-pub impl i32: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_i32(*self) }
-}
-
-pub impl i32: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> i32 {
- d.read_i32()
- }
-}
-
-pub impl i64: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_i64(*self) }
-}
-
-pub impl i64: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> i64 {
- d.read_i64()
- }
-}
-
-pub impl &str: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_borrowed_str(*self) }
-}
-
-pub impl ~str: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_owned_str(*self) }
-}
-
-pub impl ~str: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> ~str {
- d.read_owned_str()
- }
-}
-
-pub impl @str: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_managed_str(*self) }
-}
-
-pub impl @str: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> @str {
- d.read_managed_str()
- }
-}
-
-pub impl float: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_float(*self) }
-}
-
-pub impl float: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> float {
- d.read_float()
- }
-}
-
-pub impl f32: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_f32(*self) }
-}
-
-pub impl f32: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> f32 {
- d.read_f32() }
-}
-
-pub impl f64: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_f64(*self) }
-}
-
-pub impl f64: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> f64 {
- d.read_f64()
- }
-}
-
-pub impl bool: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_bool(*self) }
-}
-
-pub impl bool: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> bool {
- d.read_bool()
- }
-}
-
-pub impl (): Serializable {
- fn serialize<S: Serializer>(&self, s: &S) { s.emit_nil() }
-}
-
-pub impl (): Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> () {
- d.read_nil()
- }
-}
-
-pub impl<T: Serializable> &T: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- s.emit_borrowed(|| (**self).serialize(s))
- }
-}
-
-pub impl<T: Serializable> ~T: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- s.emit_owned(|| (**self).serialize(s))
- }
-}
-
-pub impl<T: Deserializable> ~T: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> ~T {
- d.read_owned(|| ~deserialize(d))
- }
-}
-
-pub impl<T: Serializable> @T: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- s.emit_managed(|| (**self).serialize(s))
- }
-}
-
-pub impl<T: Deserializable> @T: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> @T {
- d.read_managed(|| @deserialize(d))
- }
-}
-
-pub impl<T: Serializable> &[T]: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- do s.emit_borrowed_vec(self.len()) {
- for self.eachi |i, e| {
- s.emit_vec_elt(i, || e.serialize(s))
- }
- }
- }
-}
-
-pub impl<T: Serializable> ~[T]: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- do s.emit_owned_vec(self.len()) {
- for self.eachi |i, e| {
- s.emit_vec_elt(i, || e.serialize(s))
- }
- }
- }
-}
-
-pub impl<T: Deserializable> ~[T]: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> ~[T] {
- do d.read_owned_vec |len| {
- do vec::from_fn(len) |i| {
- d.read_vec_elt(i, || deserialize(d))
- }
- }
- }
-}
-
-pub impl<T: Serializable> @[T]: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- do s.emit_managed_vec(self.len()) {
- for self.eachi |i, e| {
- s.emit_vec_elt(i, || e.serialize(s))
- }
- }
- }
-}
-
-pub impl<T: Deserializable> @[T]: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> @[T] {
- do d.read_managed_vec |len| {
- do at_vec::from_fn(len) |i| {
- d.read_vec_elt(i, || deserialize(d))
- }
- }
- }
-}
-
-pub impl<T: Serializable> Option<T>: Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- do s.emit_enum(~"option") {
- match *self {
- None => do s.emit_enum_variant(~"none", 0u, 0u) {
- },
-
- Some(ref v) => do s.emit_enum_variant(~"some", 1u, 1u) {
- s.emit_enum_variant_arg(0u, || v.serialize(s))
- }
- }
- }
- }
-}
-
-pub impl<T: Deserializable> Option<T>: Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> Option<T> {
- do d.read_enum(~"option") {
- do d.read_enum_variant |i| {
- match i {
- 0 => None,
- 1 => Some(d.read_enum_variant_arg(0u, || deserialize(d))),
- _ => fail(#fmt("Bad variant for option: %u", i))
- }
- }
- }
- }
-}
-
-pub impl<
- T0: Serializable,
- T1: Serializable
-> (T0, T1): Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- match *self {
- (ref t0, ref t1) => {
- do s.emit_tup(2) {
- s.emit_tup_elt(0, || t0.serialize(s));
- s.emit_tup_elt(1, || t1.serialize(s));
- }
- }
- }
- }
-}
-
-pub impl<
- T0: Deserializable,
- T1: Deserializable
-> (T0, T1): Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> (T0, T1) {
- do d.read_tup(2) {
- (
- d.read_tup_elt(0, || deserialize(d)),
- d.read_tup_elt(1, || deserialize(d))
- )
- }
- }
-}
-
-pub impl<
- T0: Serializable,
- T1: Serializable,
- T2: Serializable
-> (T0, T1, T2): Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- match *self {
- (ref t0, ref t1, ref t2) => {
- do s.emit_tup(3) {
- s.emit_tup_elt(0, || t0.serialize(s));
- s.emit_tup_elt(1, || t1.serialize(s));
- s.emit_tup_elt(2, || t2.serialize(s));
- }
- }
- }
- }
-}
-
-pub impl<
- T0: Deserializable,
- T1: Deserializable,
- T2: Deserializable
-> (T0, T1, T2): Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> (T0, T1, T2) {
- do d.read_tup(3) {
- (
- d.read_tup_elt(0, || deserialize(d)),
- d.read_tup_elt(1, || deserialize(d)),
- d.read_tup_elt(2, || deserialize(d))
- )
- }
- }
-}
-
-pub impl<
- T0: Serializable,
- T1: Serializable,
- T2: Serializable,
- T3: Serializable
-> (T0, T1, T2, T3): Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- match *self {
- (ref t0, ref t1, ref t2, ref t3) => {
- do s.emit_tup(4) {
- s.emit_tup_elt(0, || t0.serialize(s));
- s.emit_tup_elt(1, || t1.serialize(s));
- s.emit_tup_elt(2, || t2.serialize(s));
- s.emit_tup_elt(3, || t3.serialize(s));
- }
- }
- }
- }
-}
-
-pub impl<
- T0: Deserializable,
- T1: Deserializable,
- T2: Deserializable,
- T3: Deserializable
-> (T0, T1, T2, T3): Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D) -> (T0, T1, T2, T3) {
- do d.read_tup(4) {
- (
- d.read_tup_elt(0, || deserialize(d)),
- d.read_tup_elt(1, || deserialize(d)),
- d.read_tup_elt(2, || deserialize(d)),
- d.read_tup_elt(3, || deserialize(d))
- )
- }
- }
-}
-
-pub impl<
- T0: Serializable,
- T1: Serializable,
- T2: Serializable,
- T3: Serializable,
- T4: Serializable
-> (T0, T1, T2, T3, T4): Serializable {
- fn serialize<S: Serializer>(&self, s: &S) {
- match *self {
- (ref t0, ref t1, ref t2, ref t3, ref t4) => {
- do s.emit_tup(5) {
- s.emit_tup_elt(0, || t0.serialize(s));
- s.emit_tup_elt(1, || t1.serialize(s));
- s.emit_tup_elt(2, || t2.serialize(s));
- s.emit_tup_elt(3, || t3.serialize(s));
- s.emit_tup_elt(4, || t4.serialize(s));
- }
- }
- }
- }
-}
-
-pub impl<
- T0: Deserializable,
- T1: Deserializable,
- T2: Deserializable,
- T3: Deserializable,
- T4: Deserializable
-> (T0, T1, T2, T3, T4): Deserializable {
- static fn deserialize<D: Deserializer>(&self, d: &D)
- -> (T0, T1, T2, T3, T4) {
- do d.read_tup(5) {
- (
- d.read_tup_elt(0, || deserialize(d)),
- d.read_tup_elt(1, || deserialize(d)),
- d.read_tup_elt(2, || deserialize(d)),
- d.read_tup_elt(3, || deserialize(d)),
- d.read_tup_elt(4, || deserialize(d))
- )
- }
- }
-}
-
-// ___________________________________________________________________________
-// Helper routines
-//
-// In some cases, these should eventually be coded as traits.
-
-pub trait SerializerHelpers {
- fn emit_from_vec<T>(&self, v: &[T], f: fn(&T));
-}
-
-pub impl<S: Serializer> S: SerializerHelpers {
- fn emit_from_vec<T>(&self, v: &[T], f: fn(&T)) {
- do self.emit_owned_vec(v.len()) {
- for v.eachi |i, e| {
- do self.emit_vec_elt(i) {
- f(e)
- }
- }
- }
- }
-}
-
-pub trait DeserializerHelpers {
- fn read_to_vec<T>(&self, f: fn() -> T) -> ~[T];
-}
-
-pub impl<D: Deserializer> D: DeserializerHelpers {
- fn read_to_vec<T>(&self, f: fn() -> T) -> ~[T] {
- do self.read_owned_vec |len| {
- do vec::from_fn(len) |i| {
- self.read_vec_elt(i, || f())
- }
- }
- }
-}
do sort::quick_sort(names) |x, y| { int::le(*x, *y) };
- let immut_names = vec::from_mut(names);
+ let immut_names = vec::from_mut(move names);
let pairs = vec::zip(expected, immut_names);
for vec::each(pairs) |p| {
*/
#[link(name = "std",
- vers = "0.4",
+ vers = "0.5",
uuid = "122bed0b-c19b-4b82-b0b7-7ae8aead7297",
url = "https://github.com/mozilla/rust/tree/master/src/libstd")];
#[allow(deprecated_mode)];
#[forbid(deprecated_pattern)];
-extern mod core(vers = "0.4");
+extern mod core(vers = "0.5");
use core::*;
// General io and system-services modules
// And ... other stuff
pub mod ebml;
-pub mod ebml2;
pub mod dbg;
pub mod getopts;
pub mod json;
pub mod term;
pub mod time;
pub mod prettyprint;
-pub mod prettyprint2;
pub mod arena;
pub mod par;
pub mod cmp;
pub mod test;
pub mod serialization;
-pub mod serialization2;
// Local Variables:
// mode: rust;
fn new_waitqueue() -> Waitqueue {
let (block_tail, block_head) = pipes::stream();
- Waitqueue { head: block_head, tail: block_tail }
+ Waitqueue { head: move block_head, tail: move block_tail }
}
// Signals one live task from the queue.
#[doc(hidden)]
fn new_sem<Q: Send>(count: int, q: Q) -> Sem<Q> {
Sem(exclusive(SemInner {
- mut count: count, waiters: new_waitqueue(), blocked: q }))
+ mut count: count, waiters: new_waitqueue(), blocked: move q }))
}
#[doc(hidden)]
fn new_sem_and_signal(count: int, num_condvars: uint)
}
}
-// FIXME(#3136) should go inside of access()
+// FIXME(#3588) should go inside of access()
#[doc(hidden)]
struct SemRelease {
sem: &Sem<()>,
}
}
-// FIXME(#3136) should go inside of read()
+// FIXME(#3588) should go inside of read()
#[doc(hidden)]
struct RWlockReleaseRead {
lock: &RWlock,
}
}
-// FIXME(#3136) should go inside of downgrade()
+// FIXME(#3588) should go inside of downgrade()
#[doc(hidden)]
struct RWlockReleaseDowngrade {
lock: &RWlock,
fn test_sem_as_mutex() {
let s = ~semaphore(1);
let s2 = ~s.clone();
- do task::spawn {
+ do task::spawn |move s2| {
do s2.access {
for 5.times { task::yield(); }
}
let (c,p) = pipes::stream();
let s = ~semaphore(0);
let s2 = ~s.clone();
- do task::spawn {
+ do task::spawn |move s2, move c| {
s2.acquire();
c.send(());
}
let (c,p) = pipes::stream();
let s = ~semaphore(0);
let s2 = ~s.clone();
- do task::spawn {
+ do task::spawn |move s2, move p| {
for 5.times { task::yield(); }
s2.release();
let _ = p.recv();
let s2 = ~s.clone();
let (c1,p1) = pipes::stream();
let (c2,p2) = pipes::stream();
- do task::spawn {
+ do task::spawn |move s2, move c1, move p2| {
do s2.access {
let _ = p2.recv();
c1.send(());
let s = ~semaphore(1);
let s2 = ~s.clone();
let (c,p) = pipes::stream();
- let child_data = ~mut Some((s2,c));
+ let child_data = ~mut Some((move s2, move c));
do s.access {
let (s2,c) = option::swap_unwrap(child_data);
- do task::spawn {
+ do task::spawn |move c, move s2| {
c.send(());
do s2.access { }
c.send(());
let m2 = ~m.clone();
let mut sharedstate = ~0;
let ptr = ptr::addr_of(&(*sharedstate));
- do task::spawn {
+ do task::spawn |move m2, move c| {
let sharedstate: &mut int =
unsafe { cast::reinterpret_cast(&ptr) };
access_shared(sharedstate, m2, 10);
// Child wakes up parent
do m.lock_cond |cond| {
let m2 = ~m.clone();
- do task::spawn {
+ do task::spawn |move m2| {
do m2.lock_cond |cond| {
let woken = cond.signal();
assert woken;
// Parent wakes up child
let (chan,port) = pipes::stream();
let m3 = ~m.clone();
- do task::spawn {
+ do task::spawn |move chan, move m3| {
do m3.lock_cond |cond| {
chan.send(());
cond.wait();
for num_waiters.times {
let mi = ~m.clone();
let (chan, port) = pipes::stream();
- ports.push(port);
- do task::spawn {
+ ports.push(move port);
+ do task::spawn |move chan, move mi| {
do mi.lock_cond |cond| {
chan.send(());
cond.wait();
fn test_mutex_cond_no_waiter() {
let m = ~Mutex();
let m2 = ~m.clone();
- do task::try {
+ do task::try |move m| {
do m.lock_cond |_x| { }
};
do m2.lock_cond |cond| {
let m = ~Mutex();
let m2 = ~m.clone();
- let result: result::Result<(),()> = do task::try {
+ let result: result::Result<(),()> = do task::try |move m2| {
do m2.lock {
fail;
}
let m = ~Mutex();
let m2 = ~m.clone();
- let result: result::Result<(),()> = do task::try {
+ let result: result::Result<(),()> = do task::try |move m2| {
let (c,p) = pipes::stream();
- do task::spawn { // linked
+ do task::spawn |move p| { // linked
let _ = p.recv(); // wait for sibling to get in the mutex
task::yield();
fail;
let m2 = ~m.clone();
let (c,p) = pipes::stream();
- let result: result::Result<(),()> = do task::try {
+ let result: result::Result<(),()> = do task::try |move c, move m2| {
let mut sibling_convos = ~[];
for 2.times {
let (c,p) = pipes::stream();
- let c = ~mut Some(c);
- sibling_convos.push(p);
+ let c = ~mut Some(move c);
+ sibling_convos.push(move p);
let mi = ~m2.clone();
// spawn sibling task
- do task::spawn { // linked
+ do task::spawn |move mi, move c| { // linked
do mi.lock_cond |cond| {
let c = option::swap_unwrap(c);
c.send(()); // tell sibling to go ahead
- let _z = SendOnFailure(c);
+ let _z = SendOnFailure(move c);
cond.wait(); // block forever
}
}
let _ = p.recv(); // wait for sibling to get in the mutex
}
do m2.lock { }
- c.send(sibling_convos); // let parent wait on all children
+ c.send(move sibling_convos); // let parent wait on all children
fail;
};
assert result.is_err();
fn SendOnFailure(c: pipes::Chan<()>) -> SendOnFailure {
SendOnFailure {
- c: c
+ c: move c
}
}
}
let m = ~Mutex();
do m.lock_cond |cond| {
let m2 = ~m.clone();
- do task::spawn {
+ do task::spawn |move m2| {
do m2.lock_cond |cond| {
cond.signal_on(0);
}
let m = ~mutex_with_condvars(2);
let m2 = ~m.clone();
let (c,p) = pipes::stream();
- do task::spawn {
+ do task::spawn |move m2, move c| {
do m2.lock_cond |cond| {
c.send(());
cond.wait_on(1);
},
DowngradeRead =>
do x.write_downgrade |mode| {
- let mode = x.downgrade(mode);
+ let mode = x.downgrade(move mode);
(&mode).read(blk);
},
}
let x2 = ~x.clone();
let mut sharedstate = ~0;
let ptr = ptr::addr_of(&(*sharedstate));
- do task::spawn {
+ do task::spawn |move c, move x2| {
let sharedstate: &mut int =
unsafe { cast::reinterpret_cast(&ptr) };
access_shared(sharedstate, x2, mode1, 10);
let x2 = ~x.clone();
let (c1,p1) = pipes::stream();
let (c2,p2) = pipes::stream();
- do task::spawn {
+ do task::spawn |move c1, move x2, move p2| {
if !make_mode2_go_first {
let _ = p2.recv(); // parent sends to us once it locks, or ...
}
// Tests that downgrade can unlock the lock in both modes
let x = ~RWlock();
do lock_rwlock_in_mode(x, Downgrade) { }
- test_rwlock_handshake(x, Read, Read, false);
+ test_rwlock_handshake(move x, Read, Read, false);
let y = ~RWlock();
do lock_rwlock_in_mode(y, DowngradeRead) { }
- test_rwlock_exclusion(y, Write, Write);
+ test_rwlock_exclusion(move y, Write, Write);
}
#[test]
fn test_rwlock_read_recursive() {
// Child wakes up parent
do x.write_cond |cond| {
let x2 = ~x.clone();
- do task::spawn {
+ do task::spawn |move x2| {
do x2.write_cond |cond| {
let woken = cond.signal();
assert woken;
// Parent wakes up child
let (chan,port) = pipes::stream();
let x3 = ~x.clone();
- do task::spawn {
+ do task::spawn |move x3, move chan| {
do x3.write_cond |cond| {
chan.send(());
cond.wait();
for num_waiters.times {
let xi = ~x.clone();
let (chan, port) = pipes::stream();
- ports.push(port);
- do task::spawn {
+ ports.push(move port);
+ do task::spawn |move chan, move xi| {
do lock_cond(xi, dg1) |cond| {
chan.send(());
cond.wait();
let x = ~RWlock();
let x2 = ~x.clone();
- let result: result::Result<(),()> = do task::try {
+ let result: result::Result<(),()> = do task::try |move x2| {
do lock_rwlock_in_mode(x2, mode1) {
fail;
}
let x = ~RWlock();
let y = ~RWlock();
do x.write_downgrade |xwrite| {
- let mut xopt = Some(xwrite);
+ let mut xopt = Some(move xwrite);
do y.write_downgrade |_ywrite| {
y.downgrade(option::swap_unwrap(&mut xopt));
error!("oops, y.downgrade(x) should have failed!");
st.failed += 1u;
write_failed(st.out, st.use_color);
st.out.write_line(~"");
- st.failures.push(test);
+ st.failures.push(move test);
}
TrIgnored => {
st.ignored += 1u;
mut passed: 0u,
mut failed: 0u,
mut ignored: 0u,
- mut failures: ~[test_b, test_a]};
+ mut failures: ~[move test_b, move test_a]};
print_failures(st);
};
for vec::each(names) |name| {
let test = {name: *name, testfn: copy testfn, ignore: false,
should_fail: false};
- tests.push(test);
+ tests.push(move test);
}
- tests
+ move tests
};
let filtered = filter_tests(&opts, tests);
~"test::parse_ignored_flag",
~"test::sort_tests"];
- let pairs = vec::zip(expected, filtered);
+ let pairs = vec::zip(expected, move filtered);
for vec::each(pairs) |p| {
match *p {
fn strftime(format: &str, tm: Tm) -> ~str {
fn parse_type(ch: char, tm: &Tm) -> ~str {
//FIXME (#2350): Implement missing types.
- let die = || #fmt("strftime: can't understand this format %c ",
- ch);
+ let die = || fmt!("strftime: can't understand this format %c ", ch);
match ch {
'A' => match tm.tm_wday as int {
0 => ~"Sunday",
* * ch - a channel of type T to send a `val` on
* * val - a value of type T to send over the provided `ch`
*/
-pub fn delayed_send<T: Copy Send>(iotask: IoTask,
+pub fn delayed_send<T: Send>(iotask: IoTask,
msecs: uint, ch: comm::Chan<T>, val: T) {
unsafe {
let timer_done_po = core::comm::Port::<()>();
// delayed_send_cb has been processed by libuv
core::comm::recv(timer_done_po);
// notify the caller immediately
- core::comm::send(ch, copy(val));
+ core::comm::send(ch, move(val));
// uv_close for this timer has been processed
core::comm::recv(timer_done_po);
};
use core::option::{Some, None};
use Option = core::Option;
-pub type TreeMap<K, V> = @mut TreeEdge<K, V>;
+pub type TreeMap<K: Copy Eq Ord, V: Copy> = @mut TreeEdge<K, V>;
-type TreeEdge<K, V> = Option<@TreeNode<K, V>>;
+type TreeEdge<K: Copy Eq Ord, V: Copy> = Option<@TreeNode<K, V>>;
-enum TreeNode<K, V> = {
+struct TreeNode<K: Copy Eq Ord, V: Copy> {
key: K,
mut value: V,
mut left: TreeEdge<K, V>,
mut right: TreeEdge<K, V>
-};
+}
/// Create a treemap
-pub fn TreeMap<K, V>() -> TreeMap<K, V> { @mut None }
+pub fn TreeMap<K: Copy Eq Ord, V: Copy>() -> TreeMap<K, V> { @mut None }
/// Insert a value into the map
pub fn insert<K: Copy Eq Ord, V: Copy>(m: &mut TreeEdge<K, V>, k: K, v: V) {
match copy *m {
None => {
- *m = Some(@TreeNode({key: k,
- mut value: v,
- mut left: None,
- mut right: None}));
+ *m = Some(@TreeNode {key: k,
+ mut value: v,
+ mut left: None,
+ mut right: None});
return;
}
Some(node) => {
}
/// Visit all pairs in the map in order.
-pub fn traverse<K, V: Copy>(m: &const TreeEdge<K, V>, f: fn((&K), (&V))) {
+pub fn traverse<K: Copy Eq Ord, V: Copy>(m: &const TreeEdge<K, V>,
+ f: fn((&K), (&V))) {
match copy *m {
None => (),
Some(node) => {
}
}
+/// Compare two treemaps and return true iff
+/// they contain same keys and values
+pub fn equals<K: Copy Eq Ord, V: Copy Eq>(t1: &const TreeEdge<K, V>,
+ t2: &const TreeEdge<K, V>)
+ -> bool {
+ let mut v1 = ~[];
+ let mut v2 = ~[];
+ traverse(t1, |k,v| { v1.push((copy *k, copy *v)) });
+ traverse(t2, |k,v| { v2.push((copy *k, copy *v)) });
+ return v1 == v2;
+}
+
+
#[cfg(test)]
mod tests {
#[legacy_exports];
traverse(m, |x,y| t(n, *x, *y));
}
+ #[test]
+ fn equality() {
+ let m1 = TreeMap();
+ insert(m1, 3, ());
+ insert(m1, 0, ());
+ insert(m1, 4, ());
+ insert(m1, 2, ());
+ insert(m1, 1, ());
+ let m2 = TreeMap();
+ insert(m2, 2, ());
+ insert(m2, 1, ());
+ insert(m2, 3, ());
+ insert(m2, 0, ());
+ insert(m2, 4, ());
+
+ assert equals(m1, m2);
+
+ let m3 = TreeMap();
+ assert !equals(m1,m3);
+
+ }
+
#[test]
fn u8_map() {
let m = TreeMap();
-> libc::c_int;
fn rust_uv_ip6_name(src: *sockaddr_in6, dst: *u8, size: libc::size_t)
-> libc::c_int;
+ fn rust_uv_ip4_port(src: *sockaddr_in) -> libc::c_uint;
+ fn rust_uv_ip6_port(src: *sockaddr_in6) -> libc::c_uint;
// FIXME ref #2064
fn rust_uv_tcp_connect(connect_ptr: *uv_connect_t,
tcp_handle_ptr: *uv_tcp_t,
// FIXME ref #2064
fn rust_uv_tcp_bind6(tcp_server: *uv_tcp_t,
++addr: *sockaddr_in6) -> libc::c_int;
+ fn rust_uv_tcp_getpeername(tcp_handle_ptr: *uv_tcp_t,
+ ++name: *sockaddr_in) -> libc::c_int;
+ fn rust_uv_tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t,
+ ++name: *sockaddr_in6) ->libc::c_int;
fn rust_uv_listen(stream: *libc::c_void, backlog: libc::c_int,
cb: *u8) -> libc::c_int;
fn rust_uv_accept(server: *libc::c_void, client: *libc::c_void)
addr_ptr);
}
+pub unsafe fn tcp_getpeername(tcp_handle_ptr: *uv_tcp_t,
+ name: *sockaddr_in) -> libc::c_int {
+ return rustrt::rust_uv_tcp_getpeername(tcp_handle_ptr, name);
+}
+
+pub unsafe fn tcp_getpeername6(tcp_handle_ptr: *uv_tcp_t,
+ name: *sockaddr_in6) ->libc::c_int {
+ return rustrt::rust_uv_tcp_getpeername6(tcp_handle_ptr, name);
+}
+
pub unsafe fn listen<T>(stream: *T, backlog: libc::c_int,
cb: *u8) -> libc::c_int {
return rustrt::rust_uv_listen(stream as *libc::c_void, backlog, cb);
}
}
}
+pub unsafe fn ip4_port(src: &sockaddr_in) -> uint {
+ rustrt::rust_uv_ip4_port(to_unsafe_ptr(src)) as uint
+}
+pub unsafe fn ip6_port(src: &sockaddr_in6) -> uint {
+ rustrt::rust_uv_ip6_port(to_unsafe_ptr(src)) as uint
+}
pub unsafe fn timer_init(loop_ptr: *libc::c_void,
timer_ptr: *uv_timer_t) -> libc::c_int {
as *request_wrapper;
let buf_base = get_base_from_buf(buf);
let buf_len = get_len_from_buf(buf);
- let bytes = vec::raw::from_buf(buf_base, buf_len as uint);
+ let bytes = vec::from_buf(buf_base, buf_len as uint);
let read_chan = *((*client_data).read_chan);
let msg_from_server = str::from_bytes(bytes);
core::comm::send(read_chan, msg_from_server);
buf_base as uint,
buf_len as uint,
nread));
- let bytes = vec::raw::from_buf(buf_base, buf_len);
+ let bytes = vec::from_buf(buf_base, buf_len);
let request_str = str::from_bytes(bytes);
let client_data = get_data_for_uv_handle(
fn impl_uv_tcp_server_and_request() unsafe {
let bind_ip = ~"0.0.0.0";
let request_ip = ~"127.0.0.1";
- let port = 8887;
+ let port = 8886;
let kill_server_msg = ~"does a dog have buddha nature?";
let server_resp_msg = ~"mu!";
let client_port = core::comm::Port::<~str>();
// The Rust abstract syntax tree.
+use std::serialization::{Serializable,
+ Deserializable,
+ Serializer,
+ Deserializer};
use codemap::{span, filename};
-use std::serialization::{Serializer,
- Deserializer,
- serialize_Option,
- deserialize_Option,
- serialize_uint,
- deserialize_uint,
- serialize_int,
- deserialize_int,
- serialize_i64,
- deserialize_i64,
- serialize_u64,
- deserialize_u64,
- serialize_str,
- deserialize_str,
- serialize_bool,
- deserialize_bool};
use parse::token;
-/* Note #1972 -- spans are serialized but not deserialized */
-fn serialize_span<S>(_s: S, _v: span) {
-}
-
-fn deserialize_span<D>(_d: D) -> span {
- ast_util::dummy_sp()
-}
-
#[auto_serialize]
+#[auto_deserialize]
type spanned<T> = {node: T, span: span};
// implemented.
struct ident { repr: uint }
-fn serialize_ident<S: Serializer>(s: S, i: ident) {
- let intr = match unsafe{
- task::local_data::local_data_get(interner_key!())
- } {
- None => fail ~"serialization: TLS interner not set up",
- Some(intr) => intr
- };
+#[cfg(stage0)]
+impl ident: Serializable {
+ fn serialize<S: Serializer>(&self, s: &S) {
+ let intr = match unsafe {
+ task::local_data::local_data_get(interner_key!())
+ } {
+ None => fail ~"serialization: TLS interner not set up",
+ Some(intr) => intr
+ };
+
+ s.emit_owned_str(*(*intr).get(*self));
+ }
+}
+
+#[cfg(stage0)]
+impl ident: Deserializable {
+ static fn deserialize<D: Deserializer>(d: &D) -> ident {
+ let intr = match unsafe {
+ task::local_data::local_data_get(interner_key!())
+ } {
+ None => fail ~"deserialization: TLS interner not set up",
+ Some(intr) => intr
+ };
- s.emit_str(*(*intr).get(i));
+ (*intr).intern(@d.read_owned_str())
+ }
}
-fn deserialize_ident<D: Deserializer>(d: D) -> ident {
- let intr = match unsafe{
- task::local_data::local_data_get(interner_key!())
- } {
- None => fail ~"deserialization: TLS interner not set up",
- Some(intr) => intr
- };
- (*intr).intern(@d.read_str())
+#[cfg(stage1)]
+#[cfg(stage2)]
+impl<S: Serializer> ident: Serializable<S> {
+ fn serialize(&self, s: &S) {
+ let intr = match unsafe {
+ task::local_data::local_data_get(interner_key!())
+ } {
+ None => fail ~"serialization: TLS interner not set up",
+ Some(intr) => intr
+ };
+
+ s.emit_owned_str(*(*intr).get(*self));
+ }
+}
+
+#[cfg(stage1)]
+#[cfg(stage2)]
+impl<D: Deserializer> ident: Deserializable<D> {
+ static fn deserialize(d: &D) -> ident {
+ let intr = match unsafe {
+ task::local_data::local_data_get(interner_key!())
+ } {
+ None => fail ~"deserialization: TLS interner not set up",
+ Some(intr) => intr
+ };
+
+ (*intr).intern(@d.read_owned_str())
+ }
}
impl ident: cmp::Eq {
}
// Functions may or may not have names.
-#[auto_serialize]
type fn_ident = Option<ident>;
#[auto_serialize]
+#[auto_deserialize]
type path = {span: span,
global: bool,
idents: ~[ident],
rp: Option<@region>,
- types: ~[@ty]};
+ types: ~[@Ty]};
-#[auto_serialize]
type crate_num = int;
-#[auto_serialize]
type node_id = int;
#[auto_serialize]
+#[auto_deserialize]
type def_id = {crate: crate_num, node: node_id};
impl def_id : cmp::Eq {
const crate_node_id: node_id = 0;
#[auto_serialize]
-enum ty_param_bound {
- bound_copy,
- bound_send,
- bound_const,
- bound_owned,
- bound_trait(@ty),
-}
+#[auto_deserialize]
+// The AST represents all type param bounds as types.
+// typeck::collect::compute_bounds matches these against
+// the "special" built-in traits (see middle::lang_items) and
+// detects Copy, Send, Owned, and Const.
+enum ty_param_bound = @Ty;
#[auto_serialize]
+#[auto_deserialize]
type ty_param = {ident: ident, id: node_id, bounds: @~[ty_param_bound]};
#[auto_serialize]
+#[auto_deserialize]
enum def {
def_fn(def_id, purity),
- def_static_method(def_id, purity),
+ def_static_method(/* method */ def_id,
+ /* trait */ Option<def_id>,
+ purity),
def_self(node_id),
def_mod(def_id),
def_foreign_mod(def_id),
@def, // closed over def
node_id, // expr node that creates the closure
node_id), // id for the block/body of the closure expr
- def_class(def_id, bool /* has constructor */),
+ def_class(def_id),
def_typaram_binder(node_id), /* class, impl or trait that has ty params */
def_region(node_id),
def_label(node_id)
_ => false
}
}
- def_static_method(e0a, e1a) => {
+ def_static_method(e0a, e1a, e2a) => {
match (*other) {
- def_static_method(e0b, e1b) => e0a == e0b && e1a == e1b,
+ def_static_method(e0b, e1b, e2b) =>
+ e0a == e0b && e1a == e1b && e2a == e2b,
_ => false
}
}
_ => false
}
}
- def_class(e0a, e1a) => {
+ def_class(e0a) => {
match (*other) {
- def_class(e0b, e1b) => e0a == e0b && e1a == e1b,
+ def_class(e0b) => e0a == e0b,
_ => false
}
}
type crate_directive = spanned<crate_directive_>;
-#[auto_serialize]
type meta_item = spanned<meta_item_>;
#[auto_serialize]
+#[auto_deserialize]
enum meta_item_ {
meta_word(~str),
meta_list(~str, ~[@meta_item]),
meta_name_value(~str, lit),
}
-#[auto_serialize]
type blk = spanned<blk_>;
#[auto_serialize]
+#[auto_deserialize]
type blk_ = {view_items: ~[@view_item],
stmts: ~[@stmt],
expr: Option<@expr>,
rules: blk_check_mode};
#[auto_serialize]
+#[auto_deserialize]
type pat = {id: node_id, node: pat_, span: span};
#[auto_serialize]
+#[auto_deserialize]
type field_pat = {ident: ident, pat: @pat};
#[auto_serialize]
+#[auto_deserialize]
enum binding_mode {
bind_by_value,
bind_by_move,
}
#[auto_serialize]
+#[auto_deserialize]
enum pat_ {
pat_wild,
// A pat_ident may either be a new bound variable,
}
#[auto_serialize]
+#[auto_deserialize]
enum mutability { m_mutbl, m_imm, m_const, }
impl mutability : to_bytes::IterBytes {
}
#[auto_serialize]
+#[auto_deserialize]
enum proto {
proto_bare, // foreign fn
proto_uniq, // fn~
}
#[auto_serialize]
+#[auto_deserialize]
enum vstore {
- // FIXME (#2112): Change uint to @expr (actually only constant exprs)
- vstore_fixed(Option<uint>), // [1,2,3,4]/_ or 4
+ // FIXME (#3469): Change uint to @expr (actually only constant exprs)
+ vstore_fixed(Option<uint>), // [1,2,3,4]
vstore_uniq, // ~[1,2,3,4]
vstore_box, // @[1,2,3,4]
vstore_slice(@region) // &[1,2,3,4](foo)?
}
#[auto_serialize]
+#[auto_deserialize]
enum expr_vstore {
- // FIXME (#2112): Change uint to @expr (actually only constant exprs)
- expr_vstore_fixed(Option<uint>), // [1,2,3,4]/_ or 4
+ // FIXME (#3469): Change uint to @expr (actually only constant exprs)
+ expr_vstore_fixed(Option<uint>), // [1,2,3,4]
expr_vstore_uniq, // ~[1,2,3,4]
expr_vstore_box, // @[1,2,3,4]
expr_vstore_slice // &[1,2,3,4]
}
#[auto_serialize]
+#[auto_deserialize]
enum binop {
add,
subtract,
}
#[auto_serialize]
+#[auto_deserialize]
enum unop {
box(mutability),
uniq(mutability),
// Generally, after typeck you can get the inferred value
// using ty::resolved_T(...).
#[auto_serialize]
+#[auto_deserialize]
enum inferable<T> {
expl(T),
infer(node_id)
// "resolved" mode: the real modes.
#[auto_serialize]
+#[auto_deserialize]
enum rmode { by_ref, by_val, by_move, by_copy }
impl rmode : to_bytes::IterBytes {
}
// inferable mode.
-#[auto_serialize]
type mode = inferable<rmode>;
-#[auto_serialize]
type stmt = spanned<stmt_>;
#[auto_serialize]
+#[auto_deserialize]
enum stmt_ {
stmt_decl(@decl, node_id),
}
#[auto_serialize]
+#[auto_deserialize]
enum init_op { init_assign, init_move, }
impl init_op : cmp::Eq {
}
#[auto_serialize]
+#[auto_deserialize]
type initializer = {op: init_op, expr: @expr};
// FIXME (pending discussion of #1697, #2178...): local should really be
// a refinement on pat.
#[auto_serialize]
-type local_ = {is_mutbl: bool, ty: @ty, pat: @pat,
+#[auto_deserialize]
+type local_ = {is_mutbl: bool, ty: @Ty, pat: @pat,
init: Option<initializer>, id: node_id};
-#[auto_serialize]
type local = spanned<local_>;
-#[auto_serialize]
type decl = spanned<decl_>;
#[auto_serialize]
+#[auto_deserialize]
enum decl_ { decl_local(~[@local]), decl_item(@item), }
#[auto_serialize]
+#[auto_deserialize]
type arm = {pats: ~[@pat], guard: Option<@expr>, body: blk};
#[auto_serialize]
+#[auto_deserialize]
type field_ = {mutbl: mutability, ident: ident, expr: @expr};
-#[auto_serialize]
type field = spanned<field_>;
#[auto_serialize]
+#[auto_deserialize]
enum blk_check_mode { default_blk, unsafe_blk, }
impl blk_check_mode : cmp::Eq {
}
#[auto_serialize]
+#[auto_deserialize]
type expr = {id: node_id, callee_id: node_id, node: expr_, span: span};
// Extra node ID is only used for index, assign_op, unary, binary
#[auto_serialize]
+#[auto_deserialize]
enum log_level { error, debug, other }
// 0 = error, 1 = debug, 2 = other
#[auto_serialize]
-enum alt_mode { alt_check, alt_exhaustive, }
-
-#[auto_serialize]
+#[auto_deserialize]
enum expr_ {
expr_vstore(@expr, expr_vstore),
expr_vec(~[@expr], mutability),
expr_binary(binop, @expr, @expr),
expr_unary(unop, @expr),
expr_lit(@lit),
- expr_cast(@expr, @ty),
+ expr_cast(@expr, @Ty),
expr_if(@expr, blk, Option<@expr>),
expr_while(@expr, blk),
/* Conditionless loop (can be exited with break, cont, ret, or fail)
expr_assign(@expr, @expr),
expr_swap(@expr, @expr),
expr_assign_op(binop, @expr, @expr),
- expr_field(@expr, ident, ~[@ty]),
+ expr_field(@expr, ident, ~[@Ty]),
expr_index(@expr, @expr),
expr_path(@path),
expr_addr_of(mutability, @expr),
}
#[auto_serialize]
-type capture_item = @{
+#[auto_deserialize]
+type capture_item_ = {
id: int,
is_move: bool,
name: ident, // Currently, can only capture a local var.
span: span
};
-#[auto_serialize]
+type capture_item = @capture_item_;
+
type capture_clause = @~[capture_item];
//
// error.
//
#[auto_serialize]
+#[auto_deserialize]
#[doc="For macro invocations; parsing is delegated to the macro"]
enum token_tree {
- tt_tok(span, token::token),
+ tt_tok(span, token::Token),
tt_delim(~[token_tree]),
// These only make sense for right-hand-sides of MBE macros
- tt_seq(span, ~[token_tree], Option<token::token>, bool),
+ tt_seq(span, ~[token_tree], Option<token::Token>, bool),
tt_nonterminal(span, ident)
}
// If you understand that, you have closed to loop and understand the whole
// macro system. Congratulations.
//
-#[auto_serialize]
type matcher = spanned<matcher_>;
#[auto_serialize]
+#[auto_deserialize]
enum matcher_ {
// match one token
- match_tok(token::token),
+ match_tok(token::Token),
// match repetitions of a sequence: body, separator, zero ok?,
// lo, hi position-in-match-array used:
- match_seq(~[matcher], Option<token::token>, bool, uint, uint),
+ match_seq(~[matcher], Option<token::Token>, bool, uint, uint),
// parse a Rust NT: name to bind, name of NT, position in match array:
match_nonterminal(ident, ident, uint)
}
-#[auto_serialize]
type mac = spanned<mac_>;
-#[auto_serialize]
type mac_arg = Option<@expr>;
#[auto_serialize]
+#[auto_deserialize]
type mac_body_ = {span: span};
-#[auto_serialize]
type mac_body = Option<mac_body_>;
#[auto_serialize]
+#[auto_deserialize]
enum mac_ {
mac_invoc(@path, mac_arg, mac_body), // old macro-invocation
mac_invoc_tt(@path,~[token_tree]), // new macro-invocation
mac_var(uint)
}
-#[auto_serialize]
type lit = spanned<lit_>;
#[auto_serialize]
+#[auto_deserialize]
enum lit_ {
lit_str(@~str),
lit_int(i64, int_ty),
// NB: If you change this, you'll probably want to change the corresponding
// type structure in middle/ty.rs as well.
#[auto_serialize]
-type mt = {ty: @ty, mutbl: mutability};
+#[auto_deserialize]
+type mt = {ty: @Ty, mutbl: mutability};
#[auto_serialize]
+#[auto_deserialize]
type ty_field_ = {ident: ident, mt: mt};
-#[auto_serialize]
type ty_field = spanned<ty_field_>;
#[auto_serialize]
+#[auto_deserialize]
type ty_method = {ident: ident, attrs: ~[attribute], purity: purity,
decl: fn_decl, tps: ~[ty_param], self_ty: self_ty,
id: node_id, span: span};
#[auto_serialize]
+#[auto_deserialize]
// A trait method is either required (meaning it doesn't have an
// implementation, just a signature) or provided (meaning it has a default
// implementation).
}
#[auto_serialize]
+#[auto_deserialize]
enum int_ty { ty_i, ty_char, ty_i8, ty_i16, ty_i32, ty_i64, }
impl int_ty : to_bytes::IterBytes {
}
#[auto_serialize]
+#[auto_deserialize]
enum uint_ty { ty_u, ty_u8, ty_u16, ty_u32, ty_u64, }
impl uint_ty : to_bytes::IterBytes {
}
#[auto_serialize]
+#[auto_deserialize]
enum float_ty { ty_f, ty_f32, ty_f64, }
impl float_ty : to_bytes::IterBytes {
}
#[auto_serialize]
-type ty = {id: node_id, node: ty_, span: span};
+#[auto_deserialize]
+type Ty = {id: node_id, node: ty_, span: span};
// Not represented directly in the AST, referred to by name through a ty_path.
#[auto_serialize]
+#[auto_deserialize]
enum prim_ty {
ty_int(int_ty),
ty_uint(uint_ty),
}
#[auto_serialize]
+#[auto_deserialize]
type region = {id: node_id, node: region_};
#[auto_serialize]
+#[auto_deserialize]
enum region_ {
re_anon,
re_static,
}
#[auto_serialize]
+#[auto_deserialize]
enum ty_ {
ty_nil,
ty_bot, /* bottom type */
ty_rptr(@region, mt),
ty_rec(~[ty_field]),
ty_fn(proto, purity, @~[ty_param_bound], fn_decl),
- ty_tup(~[@ty]),
+ ty_tup(~[@Ty]),
ty_path(@path, node_id),
- ty_fixed_length(@ty, Option<uint>),
+ ty_fixed_length(@Ty, Option<uint>),
ty_mac(mac),
// ty_infer means the type should be inferred instead of it having been
// specified. This should only appear at the "top level" of a type and not
// Equality and byte-iter (hashing) can be quite approximate for AST types.
// since we only care about this for normalizing them to "real" types.
-impl ty : cmp::Eq {
- pure fn eq(other: &ty) -> bool {
+impl Ty : cmp::Eq {
+ pure fn eq(other: &Ty) -> bool {
ptr::addr_of(&self) == ptr::addr_of(&(*other))
}
- pure fn ne(other: &ty) -> bool {
+ pure fn ne(other: &Ty) -> bool {
ptr::addr_of(&self) != ptr::addr_of(&(*other))
}
}
-impl ty : to_bytes::IterBytes {
+impl Ty : to_bytes::IterBytes {
pure fn iter_bytes(+lsb0: bool, f: to_bytes::Cb) {
to_bytes::iter_bytes_2(&self.span.lo, &self.span.hi, lsb0, f);
}
#[auto_serialize]
-type arg = {mode: mode, ty: @ty, ident: ident, id: node_id};
+#[auto_deserialize]
+type arg = {mode: mode, ty: @Ty, ident: ident, id: node_id};
#[auto_serialize]
+#[auto_deserialize]
type fn_decl =
{inputs: ~[arg],
- output: @ty,
+ output: @Ty,
cf: ret_style};
#[auto_serialize]
+#[auto_deserialize]
enum purity {
pure_fn, // declared with "pure fn"
unsafe_fn, // declared with "unsafe fn"
}
#[auto_serialize]
+#[auto_deserialize]
enum ret_style {
noreturn, // functions with return type _|_ that always
// raise an error or exit (i.e. never return to the caller)
}
#[auto_serialize]
+#[auto_deserialize]
enum self_ty_ {
sty_static, // no self: static method
sty_by_ref, // old by-reference self: ``
pure fn ne(other: &self_ty_) -> bool { !self.eq(other) }
}
-#[auto_serialize]
type self_ty = spanned<self_ty_>;
#[auto_serialize]
+#[auto_deserialize]
type method = {ident: ident, attrs: ~[attribute],
tps: ~[ty_param], self_ty: self_ty,
purity: purity, decl: fn_decl, body: blk,
vis: visibility};
#[auto_serialize]
+#[auto_deserialize]
type _mod = {view_items: ~[@view_item], items: ~[@item]};
#[auto_serialize]
+#[auto_deserialize]
enum foreign_abi {
foreign_abi_rust_intrinsic,
foreign_abi_cdecl,
// Foreign mods can be named or anonymous
#[auto_serialize]
+#[auto_deserialize]
enum foreign_mod_sort { named, anonymous }
impl foreign_mod_sort : cmp::Eq {
}
#[auto_serialize]
+#[auto_deserialize]
type foreign_mod =
{sort: foreign_mod_sort,
view_items: ~[@view_item],
items: ~[@foreign_item]};
#[auto_serialize]
-type variant_arg = {ty: @ty, id: node_id};
+#[auto_deserialize]
+type variant_arg = {ty: @Ty, id: node_id};
#[auto_serialize]
+#[auto_deserialize]
enum variant_kind {
tuple_variant_kind(~[variant_arg]),
struct_variant_kind(@struct_def),
}
#[auto_serialize]
-enum enum_def = { variants: ~[variant], common: Option<@struct_def> };
+#[auto_deserialize]
+type enum_def_ = { variants: ~[variant], common: Option<@struct_def> };
+
+#[auto_serialize]
+#[auto_deserialize]
+enum enum_def = enum_def_;
#[auto_serialize]
+#[auto_deserialize]
type variant_ = {name: ident, attrs: ~[attribute], kind: variant_kind,
id: node_id, disr_expr: Option<@expr>, vis: visibility};
-#[auto_serialize]
type variant = spanned<variant_>;
#[auto_serialize]
+#[auto_deserialize]
type path_list_ident_ = {name: ident, id: node_id};
-#[auto_serialize]
type path_list_ident = spanned<path_list_ident_>;
#[auto_serialize]
+#[auto_deserialize]
enum namespace { module_ns, type_value_ns }
impl namespace : cmp::Eq {
pure fn ne(other: &namespace) -> bool { !self.eq(other) }
}
-#[auto_serialize]
type view_path = spanned<view_path_>;
#[auto_serialize]
+#[auto_deserialize]
enum view_path_ {
// quux = foo::bar::baz
}
#[auto_serialize]
+#[auto_deserialize]
type view_item = {node: view_item_, attrs: ~[attribute],
vis: visibility, span: span};
#[auto_serialize]
+#[auto_deserialize]
enum view_item_ {
view_item_use(ident, ~[@meta_item], node_id),
view_item_import(~[@view_path]),
}
// Meta-data associated with an item
-#[auto_serialize]
type attribute = spanned<attribute_>;
// Distinguishes between attributes that decorate items and attributes that
// are contained as statements within items. These two cases need to be
// distinguished for pretty-printing.
#[auto_serialize]
+#[auto_deserialize]
enum attr_style { attr_outer, attr_inner, }
impl attr_style : cmp::Eq {
// doc-comments are promoted to attributes that have is_sugared_doc = true
#[auto_serialize]
+#[auto_deserialize]
type attribute_ = {style: attr_style, value: meta_item, is_sugared_doc: bool};
/*
trait)
*/
#[auto_serialize]
+#[auto_deserialize]
type trait_ref = {path: @path, ref_id: node_id, impl_id: node_id};
#[auto_serialize]
+#[auto_deserialize]
enum visibility { public, private, inherited }
impl visibility : cmp::Eq {
}
#[auto_serialize]
+#[auto_deserialize]
type struct_field_ = {
kind: struct_field_kind,
id: node_id,
- ty: @ty
+ ty: @Ty
};
-#[auto_serialize]
type struct_field = spanned<struct_field_>;
#[auto_serialize]
+#[auto_deserialize]
enum struct_field_kind {
named_field(ident, class_mutability, visibility),
unnamed_field // element of a tuple-like struct
}
#[auto_serialize]
+#[auto_deserialize]
type struct_def = {
traits: ~[@trait_ref], /* traits this struct implements */
fields: ~[@struct_field], /* fields */
methods: ~[@method], /* methods */
/* (not including ctor or dtor) */
- /* ctor is optional, and will soon go away */
- ctor: Option<class_ctor>,
/* dtor is optional */
dtor: Option<class_dtor>
};
we just use dummy names for anon items.
*/
#[auto_serialize]
+#[auto_deserialize]
type item = {ident: ident, attrs: ~[attribute],
id: node_id, node: item_,
vis: visibility, span: span};
#[auto_serialize]
+#[auto_deserialize]
enum item_ {
- item_const(@ty, @expr),
+ item_const(@Ty, @expr),
item_fn(fn_decl, purity, ~[ty_param], blk),
item_mod(_mod),
item_foreign_mod(foreign_mod),
- item_ty(@ty, ~[ty_param]),
+ item_ty(@Ty, ~[ty_param]),
item_enum(enum_def, ~[ty_param]),
item_class(@struct_def, ~[ty_param]),
item_trait(~[ty_param], ~[@trait_ref], ~[trait_method]),
item_impl(~[ty_param],
Option<@trait_ref>, /* (optional) trait this impl implements */
- @ty, /* self */
+ @Ty, /* self */
~[@method]),
item_mac(mac),
}
#[auto_serialize]
+#[auto_deserialize]
enum class_mutability { class_mutable, class_immutable }
impl class_mutability : to_bytes::IterBytes {
pure fn ne(other: &class_mutability) -> bool { !self.eq(other) }
}
-#[auto_serialize]
type class_ctor = spanned<class_ctor_>;
#[auto_serialize]
+#[auto_deserialize]
type class_ctor_ = {id: node_id,
attrs: ~[attribute],
self_id: node_id,
dec: fn_decl,
body: blk};
-#[auto_serialize]
type class_dtor = spanned<class_dtor_>;
#[auto_serialize]
+#[auto_deserialize]
type class_dtor_ = {id: node_id,
attrs: ~[attribute],
self_id: node_id,
body: blk};
#[auto_serialize]
+#[auto_deserialize]
type foreign_item =
{ident: ident,
attrs: ~[attribute],
vis: visibility};
#[auto_serialize]
+#[auto_deserialize]
enum foreign_item_ {
foreign_item_fn(fn_decl, purity, ~[ty_param]),
- foreign_item_const(@ty)
+ foreign_item_const(@Ty)
}
// The data we save and restore about an inlined item or method. This is not
// part of the AST that we parse from a file, but it becomes part of the tree
// that we trans.
#[auto_serialize]
+#[auto_deserialize]
enum inlined_item {
ii_item(@item),
ii_method(def_id /* impl id */, @method),
ii_foreign(@foreign_item),
- ii_ctor(class_ctor, ident, ~[ty_param], def_id /* parent id */),
ii_dtor(class_dtor, ident, ~[ty_param], def_id /* parent id */)
}
// order they are introduced.
node_arg(arg, uint),
node_local(uint),
- // Constructor for a class
- // def_id is parent id
- node_ctor(ident, ~[ty_param], @class_ctor, def_id, @path),
// Destructor for a class
node_dtor(~[ty_param], @class_dtor, def_id, @path),
node_block(blk),
// don't decode and instantiate the impl, but just the method, we have to
// add it to the table now:
match ii {
- ii_item(*) | ii_ctor(*) | ii_dtor(*) => { /* fallthrough */ }
+ ii_item(*) | ii_dtor(*) => { /* fallthrough */ }
ii_foreign(i) => {
cx.map.insert(i.id, node_foreign_item(i, foreign_abi_rust_intrinsic,
@path));
cx.local_id += 1u;
}
match fk {
- visit::fk_ctor(nm, attrs, tps, self_id, parent_id) => {
- let ct = @{node: {id: id,
- attrs: attrs,
- self_id: self_id,
- dec: /* FIXME (#2543) */ copy decl,
- body: /* FIXME (#2543) */ copy body},
- span: sp};
- cx.map.insert(id, node_ctor(/* FIXME (#2543) */ copy nm,
- /* FIXME (#2543) */ copy tps,
- ct, parent_id,
- @/* FIXME (#2543) */ copy cx.path));
- }
visit::fk_dtor(tps, attrs, self_id, parent_id) => {
let dt = @{node: {id: id, attrs: attrs, self_id: self_id,
body: /* FIXME (#2543) */ copy body}, span: sp};
Some(node_local(_)) => { // add more info here
fmt!("local (id=%?)", id)
}
- Some(node_ctor(*)) => { // add more info here
- fmt!("node_ctor (id=%?)", id)
- }
Some(node_dtor(*)) => { // add more info here
fmt!("node_dtor (id=%?)", id)
}
pure fn def_id_of_def(d: def) -> def_id {
match d {
- def_fn(id, _) | def_static_method(id, _) | def_mod(id) |
+ def_fn(id, _) | def_static_method(id, _, _) | def_mod(id) |
def_foreign_mod(id) | def_const(id) |
def_variant(_, id) | def_ty(id) | def_ty_param(id, _) |
- def_use(id) | def_class(id, _) => {
+ def_use(id) | def_class(id) => {
id
}
def_arg(id, _) | def_local(id, _) | def_self(id) |
}
}
- // FIXME: glob-exports aren't supported yet. (#2006)
_ => ()
}
}
ii_item(i) => /* FIXME (#2543) */ copy i.ident,
ii_foreign(i) => /* FIXME (#2543) */ copy i.ident,
ii_method(_, m) => /* FIXME (#2543) */ copy m.ident,
- ii_ctor(_, nm, _, _) => /* FIXME (#2543) */ copy nm,
ii_dtor(_, nm, _, _) => /* FIXME (#2543) */ copy nm
}
}
ii_item(i) => i.id,
ii_foreign(i) => i.id,
ii_method(_, m) => m.id,
- ii_ctor(ctor, _, _, _) => ctor.node.id,
ii_dtor(dtor, _, _, _) => dtor.node.id
}
}
ii_item(i) => v.visit_item(i, e, v),
ii_foreign(i) => v.visit_foreign_item(i, e, v),
ii_method(_, m) => visit::visit_method_helper(m, e, v),
- ii_ctor(ctor, nm, tps, parent_id) => {
- visit::visit_class_ctor_helper(ctor, nm, tps, parent_id, e, v);
- }
ii_dtor(dtor, _, tps, parent_id) => {
visit::visit_class_dtor_helper(dtor, tps, parent_id, e, v);
}
// Enumerating the IDs which appear in an AST
#[auto_serialize]
+#[auto_deserialize]
type id_range = {min: node_id, max: node_id};
fn empty(range: id_range) -> bool {
visit_expr_post: fn@(_e: @expr) {
},
- visit_ty: fn@(t: @ty) {
+ visit_ty: fn@(t: @Ty) {
match t.node {
ty_path(_, id) => vfn(id),
_ => { /* fall through */ }
vfn(id);
match fk {
- visit::fk_ctor(_, _, tps, self_id, parent_id) => {
- for vec::each(tps) |tp| { vfn(tp.id); }
- vfn(id);
- vfn(self_id);
- vfn(parent_id.node);
- }
visit::fk_dtor(tps, _, self_id, parent_id) => {
for vec::each(tps) |tp| { vfn(tp.id); }
vfn(id);
// Get the meta_items from inside a vector of attributes
fn attr_metas(attrs: ~[ast::attribute]) -> ~[@ast::meta_item] {
- let mut mitems = ~[];
- for attrs.each |a| { mitems.push(attr_meta(*a)); }
- return mitems;
+ do attrs.map |a| { attr_meta(*a) }
}
fn desugar_doc_attr(attr: &ast::attribute) -> ast::attribute {
use dvec::DVec;
+use std::serialization::{Serializable,
+ Deserializable,
+ Serializer,
+ Deserializer};
export filename;
export filemap;
export fss_none;
export fss_internal;
export fss_external;
-export codemap;
+export CodeMap;
export expn_info;
export expn_info_;
export expanded_from;
@{name: filename, substr: file_substr, src: @~str,
start_pos: file_pos, mut lines: ~[file_pos]};
-type codemap = @{files: DVec<filemap>};
+type CodeMap = @{files: DVec<filemap>};
type loc = {file: filemap, line: uint, col: uint};
-fn new_codemap() -> codemap { @{files: DVec()} }
+fn new_codemap() -> CodeMap { @{files: DVec()} }
fn new_filemap_w_substr(+filename: filename, +substr: file_substr,
src: @~str,
start_pos_ch, start_pos_byte);
}
-fn mk_substr_filename(cm: codemap, sp: span) -> ~str
+fn mk_substr_filename(cm: CodeMap, sp: span) -> ~str
{
let pos = lookup_char_pos(cm, sp.lo);
return fmt!("<%s:%u:%u>", pos.file.name, pos.line, pos.col);
type lookup_fn = pure fn(file_pos) -> uint;
-fn lookup_line(map: codemap, pos: uint, lookup: lookup_fn)
+fn lookup_line(map: CodeMap, pos: uint, lookup: lookup_fn)
-> {fm: filemap, line: uint}
{
let len = map.files.len();
return {fm: f, line: a};
}
-fn lookup_pos(map: codemap, pos: uint, lookup: lookup_fn) -> loc {
+fn lookup_pos(map: CodeMap, pos: uint, lookup: lookup_fn) -> loc {
let {fm: f, line: a} = lookup_line(map, pos, lookup);
return {file: f, line: a + 1u, col: pos - lookup(f.lines[a])};
}
-fn lookup_char_pos(map: codemap, pos: uint) -> loc {
+fn lookup_char_pos(map: CodeMap, pos: uint) -> loc {
pure fn lookup(pos: file_pos) -> uint { return pos.ch; }
return lookup_pos(map, pos, lookup);
}
-fn lookup_byte_pos(map: codemap, pos: uint) -> loc {
+fn lookup_byte_pos(map: CodeMap, pos: uint) -> loc {
pure fn lookup(pos: file_pos) -> uint { return pos.byte; }
return lookup_pos(map, pos, lookup);
}
-fn lookup_char_pos_adj(map: codemap, pos: uint)
+fn lookup_char_pos_adj(map: CodeMap, pos: uint)
-> {filename: ~str, line: uint, col: uint, file: Option<filemap>}
{
let loc = lookup_char_pos(map, pos);
}
}
-fn adjust_span(map: codemap, sp: span) -> span {
+fn adjust_span(map: CodeMap, sp: span) -> span {
pure fn lookup(pos: file_pos) -> uint { return pos.ch; }
let line = lookup_line(map, sp.lo, lookup);
match (line.fm.substr) {
pure fn ne(other: &span) -> bool { !self.eq(other) }
}
-fn span_to_str_no_adj(sp: span, cm: codemap) -> ~str {
+#[cfg(stage0)]
+impl span: Serializable {
+ /* Note #1972 -- spans are serialized but not deserialized */
+ fn serialize<S: Serializer>(&self, _s: &S) { }
+}
+
+#[cfg(stage0)]
+impl span: Deserializable {
+ static fn deserialize<D: Deserializer>(_d: &D) -> span {
+ ast_util::dummy_sp()
+ }
+}
+
+#[cfg(stage1)]
+#[cfg(stage2)]
+impl<S: Serializer> span: Serializable<S> {
+ /* Note #1972 -- spans are serialized but not deserialized */
+ fn serialize(&self, _s: &S) { }
+}
+
+#[cfg(stage1)]
+#[cfg(stage2)]
+impl<D: Deserializer> span: Deserializable<D> {
+ static fn deserialize(_d: &D) -> span {
+ ast_util::dummy_sp()
+ }
+}
+
+fn span_to_str_no_adj(sp: span, cm: CodeMap) -> ~str {
let lo = lookup_char_pos(cm, sp.lo);
let hi = lookup_char_pos(cm, sp.hi);
return fmt!("%s:%u:%u: %u:%u", lo.file.name,
lo.line, lo.col, hi.line, hi.col)
}
-fn span_to_str(sp: span, cm: codemap) -> ~str {
+fn span_to_str(sp: span, cm: CodeMap) -> ~str {
let lo = lookup_char_pos_adj(cm, sp.lo);
let hi = lookup_char_pos_adj(cm, sp.hi);
return fmt!("%s:%u:%u: %u:%u", lo.filename,
type file_lines = {file: filemap, lines: ~[uint]};
-fn span_to_filename(sp: span, cm: codemap::codemap) -> filename {
+fn span_to_filename(sp: span, cm: codemap::CodeMap) -> filename {
let lo = lookup_char_pos(cm, sp.lo);
return /* FIXME (#2543) */ copy lo.file.name;
}
-fn span_to_lines(sp: span, cm: codemap::codemap) -> @file_lines {
+fn span_to_lines(sp: span, cm: codemap::CodeMap) -> @file_lines {
let lo = lookup_char_pos(cm, sp.lo);
let hi = lookup_char_pos(cm, sp.hi);
let mut lines = ~[];
str::slice(*fm.src, begin, end)
}
-fn lookup_byte_offset(cm: codemap::codemap, chpos: uint)
+fn lookup_byte_offset(cm: codemap::CodeMap, chpos: uint)
-> {fm: filemap, pos: uint} {
pure fn lookup(pos: file_pos) -> uint { return pos.ch; }
let {fm, line} = lookup_line(cm, chpos, lookup);
{fm: fm, pos: line_offset + col_offset}
}
-fn span_to_snippet(sp: span, cm: codemap::codemap) -> ~str {
+fn span_to_snippet(sp: span, cm: codemap::CodeMap) -> ~str {
let begin = lookup_byte_offset(cm, sp.lo);
let end = lookup_byte_offset(cm, sp.hi);
assert begin.fm.start_pos == end.fm.start_pos;
return str::slice(*begin.fm.src, begin.pos, end.pos);
}
-fn get_snippet(cm: codemap::codemap, fidx: uint, lo: uint, hi: uint) -> ~str
+fn get_snippet(cm: codemap::CodeMap, fidx: uint, lo: uint, hi: uint) -> ~str
{
let fm = cm.files[fidx];
return str::slice(*fm.src, lo, hi)
}
-fn get_filemap(cm: codemap, filename: ~str) -> filemap {
+fn get_filemap(cm: CodeMap, filename: ~str) -> filemap {
for cm.files.each |fm| { if fm.name == filename { return *fm; } }
//XXjdm the following triggers a mismatched type bug
// (or expected function, found _|_)
export ice_msg;
export expect;
-type emitter = fn@(cmsp: Option<(codemap::codemap, span)>,
+type emitter = fn@(cmsp: Option<(codemap::CodeMap, span)>,
msg: &str, lvl: level);
fn note(msg: &str);
fn bug(msg: &str) -> !;
fn unimpl(msg: &str) -> !;
- fn emit(cmsp: Option<(codemap::codemap, span)>, msg: &str, lvl: level);
+ fn emit(cmsp: Option<(codemap::CodeMap, span)>, msg: &str, lvl: level);
}
type handler_t = @{
type codemap_t = @{
handler: handler,
- cm: codemap::codemap
+ cm: codemap::CodeMap
};
impl codemap_t: span_handler {
self.fatal(ice_msg(msg));
}
fn unimpl(msg: &str) -> ! { self.bug(~"unimplemented " + msg); }
- fn emit(cmsp: Option<(codemap::codemap, span)>, msg: &str, lvl: level) {
+ fn emit(cmsp: Option<(codemap::CodeMap, span)>, msg: &str, lvl: level) {
self.emit(cmsp, msg, lvl);
}
}
fmt!("internal compiler error: %s", msg)
}
-fn mk_span_handler(handler: handler, cm: codemap::codemap) -> span_handler {
+fn mk_span_handler(handler: handler, cm: codemap::CodeMap) -> span_handler {
@{ handler: handler, cm: cm } as span_handler
}
let emit = match emitter {
Some(e) => e,
None => {
- let f = fn@(cmsp: Option<(codemap::codemap, span)>,
+ let f = fn@(cmsp: Option<(codemap::CodeMap, span)>,
msg: &str, t: level) {
emit(cmsp, msg, t);
};
io::stderr().write_str(fmt!(" %s\n", msg));
}
-fn emit(cmsp: Option<(codemap::codemap, span)>,
- msg: &str, lvl: level) {
+fn emit(cmsp: Option<(codemap::CodeMap, span)>, msg: &str, lvl: level) {
match cmsp {
Some((cm, sp)) => {
let sp = codemap::adjust_span(cm,sp);
}
}
-fn highlight_lines(cm: codemap::codemap, sp: span,
+fn highlight_lines(cm: codemap::CodeMap, sp: span,
lines: @codemap::file_lines) {
let fm = lines.file;
}
}
-fn print_macro_backtrace(cm: codemap::codemap, sp: span) {
+fn print_macro_backtrace(cm: codemap::CodeMap, sp: span) {
do option::iter(&sp.expn_info) |ei| {
let ss = option::map_default(&ei.callie.span, @~"",
|span| @codemap::span_to_str(*span, cm));
print_diagnostic(*ss, note,
- fmt!("in expansion of #%s", ei.callie.name));
+ fmt!("in expansion of %s!", ei.callie.name));
let ss = codemap::span_to_str(ei.call_site, cm);
print_diagnostic(ss, note, ~"expansion site");
print_macro_backtrace(cm, ei.call_site);
/*
-The compiler code necessary to implement the #[auto_serialize]
-extension. The idea here is that type-defining items may be tagged
-with #[auto_serialize], which will cause us to generate a little
-companion module with the same name as the item.
+The compiler code necessary to implement the #[auto_serialize] and
+#[auto_deserialize] extension. The idea here is that type-defining items may
+be tagged with #[auto_serialize] and #[auto_deserialize], which will cause
+us to generate a little companion module with the same name as the item.
For example, a type like:
- type node_id = uint;
+ #[auto_serialize]
+ #[auto_deserialize]
+ struct Node {id: uint}
-would generate two functions like:
+would generate two implementations like:
- fn serialize_node_id<S: serializer>(s: S, v: node_id) {
- s.emit_uint(v);
+ impl<S: Serializer> node_id: Serializable<S> {
+ fn serialize(s: &S) {
+ do s.emit_struct("Node") {
+ s.emit_field("id", 0, || s.emit_uint(self))
+ }
+ }
}
- fn deserialize_node_id<D: deserializer>(d: D) -> node_id {
- d.read_uint()
+
+ impl<D: Deserializer> node_id: Deserializable {
+ static fn deserialize(d: &D) -> Node {
+ do d.read_struct("Node") {
+ Node {
+ id: d.read_field(~"x", 0, || deserialize(d))
+ }
+ }
+ }
}
Other interesting scenarios are whe the item has type parameters or
references other non-built-in types. A type definition like:
+ #[auto_serialize]
+ #[auto_deserialize]
type spanned<T> = {node: T, span: span};
would yield functions like:
- fn serialize_spanned<S: serializer,T>(s: S, v: spanned<T>, t: fn(T)) {
- s.emit_rec(2u) {||
- s.emit_rec_field("node", 0u) {||
- t(s.node);
- };
- s.emit_rec_field("span", 1u) {||
- serialize_span(s, s.span);
- };
- }
- }
- fn deserialize_spanned<D: deserializer>(d: D, t: fn() -> T) -> node_id {
- d.read_rec(2u) {||
- {node: d.read_rec_field("node", 0u, t),
- span: d.read_rec_field("span", 1u) {||deserialize_span(d)}}
- }
+ impl<
+ S: Serializer,
+ T: Serializable<S>
+ > spanned<T>: Serializable<S> {
+ fn serialize<S: Serializer>(s: &S) {
+ do s.emit_rec {
+ s.emit_field("node", 0, || self.node.serialize(s));
+ s.emit_field("span", 1, || self.span.serialize(s));
+ }
+ }
}
-In general, the code to serialize an instance `v` of a non-built-in
-type a::b::c<T0,...,Tn> looks like:
-
- a::b::serialize_c(s, {|v| c_T0}, ..., {|v| c_Tn}, v)
-
-where `c_Ti` is the code to serialize an instance `v` of the type
-`Ti`.
-
-Similarly, the code to deserialize an instance of a non-built-in type
-`a::b::c<T0,...,Tn>` using the deserializer `d` looks like:
-
- a::b::deserialize_c(d, {|| c_T0}, ..., {|| c_Tn})
-
-where `c_Ti` is the code to deserialize an instance of `Ti` using the
-deserializer `d`.
+ impl<
+ D: Deserializer,
+ T: Deserializable<D>
+ > spanned<T>: Deserializable<D> {
+ static fn deserialize(d: &D) -> spanned<T> {
+ do d.read_rec {
+ {
+ node: d.read_field(~"node", 0, || deserialize(d)),
+ span: d.read_field(~"span", 1, || deserialize(d)),
+ }
+ }
+ }
+ }
FIXME (#2810)--Hygiene. Search for "__" strings. We also assume "std" is the
standard library.
node twice.
*/
+
use base::*;
use codemap::span;
use std::map;
use std::map::HashMap;
-export expand;
+export expand_auto_serialize;
+export expand_auto_deserialize;
// Transitional reexports so qquote can find the paths it is looking for
mod syntax {
- #[legacy_exports];
pub use ext;
pub use parse;
}
-type ser_tps_map = map::HashMap<ast::ident, fn@(@ast::expr) -> ~[@ast::stmt]>;
-type deser_tps_map = map::HashMap<ast::ident, fn@() -> @ast::expr>;
-
-fn expand(cx: ext_ctxt,
- span: span,
- _mitem: ast::meta_item,
- in_items: ~[@ast::item]) -> ~[@ast::item] {
- fn not_auto_serialize(a: &ast::attribute) -> bool {
- attr::get_attr_name(*a) != ~"auto_serialize"
+fn expand_auto_serialize(
+ cx: ext_ctxt,
+ span: span,
+ _mitem: ast::meta_item,
+ in_items: ~[@ast::item]
+) -> ~[@ast::item] {
+ fn is_auto_serialize(a: &ast::attribute) -> bool {
+ attr::get_attr_name(*a) == ~"auto_serialize"
}
fn filter_attrs(item: @ast::item) -> @ast::item {
- @{attrs: vec::filter(item.attrs, not_auto_serialize),
+ @{attrs: vec::filter(item.attrs, |a| !is_auto_serialize(a)),
.. *item}
}
- do vec::flat_map(in_items) |in_item| {
- match in_item.node {
- ast::item_ty(ty, tps) => {
- vec::append(~[filter_attrs(*in_item)],
- ty_fns(cx, in_item.ident, ty, tps))
- }
-
- ast::item_enum(enum_definition, tps) => {
- vec::append(~[filter_attrs(*in_item)],
- enum_fns(cx, in_item.ident,
- in_item.span, enum_definition.variants, tps))
- }
-
- _ => {
- cx.span_err(span, ~"#[auto_serialize] can only be \
- applied to type and enum \
- definitions");
- ~[*in_item]
- }
+ do vec::flat_map(in_items) |item| {
+ if item.attrs.any(is_auto_serialize) {
+ match item.node {
+ ast::item_ty(@{node: ast::ty_rec(fields), _}, tps) => {
+ let ser_impl = mk_rec_ser_impl(
+ cx,
+ item.span,
+ item.ident,
+ fields,
+ tps
+ );
+
+ ~[filter_attrs(*item), ser_impl]
+ },
+ ast::item_class(@{ fields, _}, tps) => {
+ let ser_impl = mk_struct_ser_impl(
+ cx,
+ item.span,
+ item.ident,
+ fields,
+ tps
+ );
+
+ ~[filter_attrs(*item), ser_impl]
+ },
+ ast::item_enum(enum_def, tps) => {
+ let ser_impl = mk_enum_ser_impl(
+ cx,
+ item.span,
+ item.ident,
+ enum_def,
+ tps
+ );
+
+ ~[filter_attrs(*item), ser_impl]
+ },
+ _ => {
+ cx.span_err(span, ~"#[auto_serialize] can only be \
+ applied to structs, record types, \
+ and enum definitions");
+ ~[*item]
+ }
+ }
+ } else {
+ ~[*item]
}
}
}
-trait ext_ctxt_helpers {
- fn helper_path(base_path: @ast::path, helper_name: ~str) -> @ast::path;
- fn path(span: span, strs: ~[ast::ident]) -> @ast::path;
- fn path_tps(span: span, strs: ~[ast::ident],
- tps: ~[@ast::ty]) -> @ast::path;
- fn ty_path(span: span, strs: ~[ast::ident], tps: ~[@ast::ty]) -> @ast::ty;
- fn ty_fn(span: span,
- -input_tys: ~[@ast::ty],
- -output: @ast::ty) -> @ast::ty;
- fn ty_nil(span: span) -> @ast::ty;
- fn expr(span: span, node: ast::expr_) -> @ast::expr;
- fn var_ref(span: span, name: ast::ident) -> @ast::expr;
- fn blk(span: span, stmts: ~[@ast::stmt]) -> ast::blk;
- fn expr_blk(expr: @ast::expr) -> ast::blk;
- fn binder_pat(span: span, nm: ast::ident) -> @ast::pat;
- fn stmt(expr: @ast::expr) -> @ast::stmt;
- fn alt_stmt(arms: ~[ast::arm], span: span, -v: @ast::expr) -> @ast::stmt;
- fn lit_str(span: span, s: @~str) -> @ast::expr;
- fn lit_uint(span: span, i: uint) -> @ast::expr;
- fn lambda(blk: ast::blk) -> @ast::expr;
- fn clone_folder() -> fold::ast_fold;
- fn clone(v: @ast::expr) -> @ast::expr;
- fn clone_ty(v: @ast::ty) -> @ast::ty;
- fn clone_ty_param(v: ast::ty_param) -> ast::ty_param;
- fn at(span: span, expr: @ast::expr) -> @ast::expr;
-}
-
-impl ext_ctxt: ext_ctxt_helpers {
- fn helper_path(base_path: @ast::path,
- helper_name: ~str) -> @ast::path {
- let head = vec::init(base_path.idents);
- let tail = vec::last(base_path.idents);
- self.path(base_path.span,
- vec::append(head,
- ~[self.parse_sess().interner.
- intern(@(helper_name + ~"_" +
- *self.parse_sess().interner.get(
- tail)))]))
+fn expand_auto_deserialize(
+ cx: ext_ctxt,
+ span: span,
+ _mitem: ast::meta_item,
+ in_items: ~[@ast::item]
+) -> ~[@ast::item] {
+ fn is_auto_deserialize(a: &ast::attribute) -> bool {
+ attr::get_attr_name(*a) == ~"auto_deserialize"
}
- fn path(span: span, strs: ~[ast::ident]) -> @ast::path {
- @{span: span, global: false, idents: strs, rp: None, types: ~[]}
- }
-
- fn path_tps(span: span, strs: ~[ast::ident],
- tps: ~[@ast::ty]) -> @ast::path {
- @{span: span, global: false, idents: strs, rp: None, types: tps}
+ fn filter_attrs(item: @ast::item) -> @ast::item {
+ @{attrs: vec::filter(item.attrs, |a| !is_auto_deserialize(a)),
+ .. *item}
}
- fn ty_path(span: span, strs: ~[ast::ident],
- tps: ~[@ast::ty]) -> @ast::ty {
- @{id: self.next_id(),
- node: ast::ty_path(self.path_tps(span, strs, tps), self.next_id()),
- span: span}
+ do vec::flat_map(in_items) |item| {
+ if item.attrs.any(is_auto_deserialize) {
+ match item.node {
+ ast::item_ty(@{node: ast::ty_rec(fields), _}, tps) => {
+ let deser_impl = mk_rec_deser_impl(
+ cx,
+ item.span,
+ item.ident,
+ fields,
+ tps
+ );
+
+ ~[filter_attrs(*item), deser_impl]
+ },
+ ast::item_class(@{ fields, _}, tps) => {
+ let deser_impl = mk_struct_deser_impl(
+ cx,
+ item.span,
+ item.ident,
+ fields,
+ tps
+ );
+
+ ~[filter_attrs(*item), deser_impl]
+ },
+ ast::item_enum(enum_def, tps) => {
+ let deser_impl = mk_enum_deser_impl(
+ cx,
+ item.span,
+ item.ident,
+ enum_def,
+ tps
+ );
+
+ ~[filter_attrs(*item), deser_impl]
+ },
+ _ => {
+ cx.span_err(span, ~"#[auto_deserialize] can only be \
+ applied to structs, record types, \
+ and enum definitions");
+ ~[*item]
+ }
+ }
+ } else {
+ ~[*item]
+ }
}
+}
- fn ty_fn(span: span,
- -input_tys: ~[@ast::ty],
- -output: @ast::ty) -> @ast::ty {
- let args = do vec::map(input_tys) |ty| {
- {mode: ast::expl(ast::by_ref),
- ty: *ty,
- ident: parse::token::special_idents::invalid,
- id: self.next_id()}
- };
-
- @{id: self.next_id(),
- node: ast::ty_fn(ast::proto_block,
- ast::impure_fn,
- @~[],
- {inputs: args,
- output: output,
- cf: ast::return_val}),
- span: span}
- }
+priv impl ext_ctxt {
+ fn bind_path(
+ span: span,
+ ident: ast::ident,
+ path: @ast::path,
+ bounds: @~[ast::ty_param_bound]
+ ) -> ast::ty_param {
+ let bound = ast::ty_param_bound(@{
+ id: self.next_id(),
+ node: ast::ty_path(path, self.next_id()),
+ span: span,
+ });
- fn ty_nil(span: span) -> @ast::ty {
- @{id: self.next_id(), node: ast::ty_nil, span: span}
+ {
+ ident: ident,
+ id: self.next_id(),
+ bounds: @vec::append(~[bound], *bounds)
+ }
}
fn expr(span: span, node: ast::expr_) -> @ast::expr {
node: node, span: span}
}
- fn var_ref(span: span, name: ast::ident) -> @ast::expr {
- self.expr(span, ast::expr_path(self.path(span, ~[name])))
+ fn path(span: span, strs: ~[ast::ident]) -> @ast::path {
+ @{span: span, global: false, idents: strs, rp: None, types: ~[]}
}
- fn blk(span: span, stmts: ~[@ast::stmt]) -> ast::blk {
- {node: {view_items: ~[],
- stmts: stmts,
- expr: None,
- id: self.next_id(),
- rules: ast::default_blk},
- span: span}
+ fn path_tps(span: span, strs: ~[ast::ident],
+ tps: ~[@ast::Ty]) -> @ast::path {
+ @{span: span, global: false, idents: strs, rp: None, types: tps}
}
- fn expr_blk(expr: @ast::expr) -> ast::blk {
- {node: {view_items: ~[],
- stmts: ~[],
- expr: Some(expr),
- id: self.next_id(),
- rules: ast::default_blk},
- span: expr.span}
+ fn ty_path(span: span, strs: ~[ast::ident],
+ tps: ~[@ast::Ty]) -> @ast::Ty {
+ @{id: self.next_id(),
+ node: ast::ty_path(self.path_tps(span, strs, tps), self.next_id()),
+ span: span}
}
fn binder_pat(span: span, nm: ast::ident) -> @ast::pat {
span: expr.span}
}
- fn alt_stmt(arms: ~[ast::arm],
- span: span, -v: @ast::expr) -> @ast::stmt {
- self.stmt(
- self.expr(
- span,
- ast::expr_match(v, arms)))
- }
-
fn lit_str(span: span, s: @~str) -> @ast::expr {
self.expr(
span,
#ast{ || $(blk_e) }
}
- fn clone_folder() -> fold::ast_fold {
- fold::make_fold(@{
- new_id: |_id| self.next_id(),
- .. *fold::default_ast_fold()
- })
+ fn blk(span: span, stmts: ~[@ast::stmt]) -> ast::blk {
+ {node: {view_items: ~[],
+ stmts: stmts,
+ expr: None,
+ id: self.next_id(),
+ rules: ast::default_blk},
+ span: span}
}
- fn clone(v: @ast::expr) -> @ast::expr {
- let fld = self.clone_folder();
- fld.fold_expr(v)
+ fn expr_blk(expr: @ast::expr) -> ast::blk {
+ {node: {view_items: ~[],
+ stmts: ~[],
+ expr: Some(expr),
+ id: self.next_id(),
+ rules: ast::default_blk},
+ span: expr.span}
}
- fn clone_ty(v: @ast::ty) -> @ast::ty {
- let fld = self.clone_folder();
- fld.fold_ty(v)
+ fn expr_path(span: span, strs: ~[ast::ident]) -> @ast::expr {
+ self.expr(span, ast::expr_path(self.path(span, strs)))
}
- fn clone_ty_param(v: ast::ty_param) -> ast::ty_param {
- let fld = self.clone_folder();
- fold::fold_ty_param(v, fld)
+ fn expr_var(span: span, var: ~str) -> @ast::expr {
+ self.expr_path(span, ~[self.ident_of(var)])
}
- fn at(span: span, expr: @ast::expr) -> @ast::expr {
- fn repl_sp(old_span: span, repl_span: span, with_span: span) -> span {
- if old_span == repl_span {
- with_span
- } else {
- old_span
- }
- }
-
- let fld = fold::make_fold(@{
- new_span: |a| repl_sp(a, ast_util::dummy_sp(), span),
- .. *fold::default_ast_fold()
- });
+ fn expr_field(
+ span: span,
+ expr: @ast::expr,
+ ident: ast::ident
+ ) -> @ast::expr {
+ self.expr(span, ast::expr_field(expr, ident, ~[]))
+ }
- fld.fold_expr(expr)
+ fn expr_call(
+ span: span,
+ expr: @ast::expr,
+ args: ~[@ast::expr]
+ ) -> @ast::expr {
+ self.expr(span, ast::expr_call(expr, args, false))
}
-}
-fn ser_path(cx: ext_ctxt, tps: ser_tps_map, path: @ast::path,
- -s: @ast::expr, -v: @ast::expr)
- -> ~[@ast::stmt] {
- let ext_cx = cx; // required for #ast{}
-
- // We want to take a path like a::b::c<...> and generate a call
- // like a::b::c::serialize(s, ...), as described above.
-
- let callee =
- cx.expr(
- path.span,
- ast::expr_path(
- cx.helper_path(path, ~"serialize")));
-
- let ty_args = do vec::map(path.types) |ty| {
- let sv_stmts = ser_ty(cx, tps, *ty, cx.clone(s), #ast{ __v });
- let sv = cx.expr(path.span,
- ast::expr_block(cx.blk(path.span, sv_stmts)));
- cx.at(ty.span, #ast{ |__v| $(sv) })
- };
+ fn lambda_expr(expr: @ast::expr) -> @ast::expr {
+ self.lambda(self.expr_blk(expr))
+ }
- ~[cx.stmt(
- cx.expr(
- path.span,
- ast::expr_call(callee, vec::append(~[s, v], ty_args), false)))]
+ fn lambda_stmts(span: span, stmts: ~[@ast::stmt]) -> @ast::expr {
+ self.lambda(self.blk(span, stmts))
+ }
}
-fn ser_variant(cx: ext_ctxt,
- tps: ser_tps_map,
- tys: ~[@ast::ty],
- span: span,
- -s: @ast::expr,
- pfn: fn(~[@ast::pat]) -> ast::pat_,
- bodyfn: fn(-v: @ast::expr, ast::blk) -> @ast::expr,
- argfn: fn(-v: @ast::expr, uint, ast::blk) -> @ast::expr)
- -> ast::arm {
- let vnames = do vec::from_fn(vec::len(tys)) |i| {
- cx.parse_sess().interner.intern(@fmt!("__v%u", i))
- };
- let pats = do vec::from_fn(vec::len(tys)) |i| {
- cx.binder_pat(tys[i].span, vnames[i])
- };
- let pat: @ast::pat = @{id: cx.next_id(), node: pfn(pats), span: span};
- let stmts = do vec::from_fn(vec::len(tys)) |i| {
- let v = cx.var_ref(span, vnames[i]);
- let arg_blk =
- cx.blk(
- span,
- ser_ty(cx, tps, tys[i], cx.clone(s), move v));
- cx.stmt(argfn(cx.clone(s), i, arg_blk))
- };
-
- let body_blk = cx.blk(span, stmts);
- let body = cx.blk(span, ~[cx.stmt(bodyfn(move s, body_blk))]);
+fn mk_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ ty_param: ast::ty_param,
+ path: @ast::path,
+ tps: ~[ast::ty_param],
+ f: fn(@ast::Ty) -> @ast::method
+) -> @ast::item {
+ // All the type parameters need to bound to the trait.
+ let mut trait_tps = vec::append(
+ ~[ty_param],
+ do tps.map |tp| {
+ let t_bound = ast::ty_param_bound(@{
+ id: cx.next_id(),
+ node: ast::ty_path(path, cx.next_id()),
+ span: span,
+ });
- {pats: ~[pat], guard: None, body: body}
+ {
+ ident: tp.ident,
+ id: cx.next_id(),
+ bounds: @vec::append(~[t_bound], *tp.bounds)
+ }
+ }
+ );
+
+ let opt_trait = Some(@{
+ path: path,
+ ref_id: cx.next_id(),
+ impl_id: cx.next_id(),
+ });
+
+ let ty = cx.ty_path(
+ span,
+ ~[ident],
+ tps.map(|tp| cx.ty_path(span, ~[tp.ident], ~[]))
+ );
+
+ @{
+ // This is a new-style impl declaration.
+ // XXX: clownshoes
+ ident: ast::token::special_idents::clownshoes_extensions,
+ attrs: ~[],
+ id: cx.next_id(),
+ node: ast::item_impl(trait_tps, opt_trait, ty, ~[f(ty)]),
+ vis: ast::public,
+ span: span,
+ }
}
-fn ser_lambda(cx: ext_ctxt, tps: ser_tps_map, ty: @ast::ty,
- -s: @ast::expr, -v: @ast::expr) -> @ast::expr {
- cx.lambda(cx.blk(ty.span, ser_ty(cx, tps, ty, move s, move v)))
+fn mk_ser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ tps: ~[ast::ty_param],
+ body: @ast::expr
+) -> @ast::item {
+ // Make a path to the std::serialization::Serializable typaram.
+ let ty_param = cx.bind_path(
+ span,
+ cx.ident_of(~"__S"),
+ cx.path(
+ span,
+ ~[
+ cx.ident_of(~"std"),
+ cx.ident_of(~"serialization"),
+ cx.ident_of(~"Serializer"),
+ ]
+ ),
+ @~[]
+ );
+
+ // Make a path to the std::serialization::Serializable trait.
+ let path = cx.path_tps(
+ span,
+ ~[
+ cx.ident_of(~"std"),
+ cx.ident_of(~"serialization"),
+ cx.ident_of(~"Serializable"),
+ ],
+ ~[cx.ty_path(span, ~[cx.ident_of(~"__S")], ~[])]
+ );
+
+ mk_impl(
+ cx,
+ span,
+ ident,
+ ty_param,
+ path,
+ tps,
+ |_ty| mk_ser_method(cx, span, cx.expr_blk(body))
+ )
}
-fn is_vec_or_str(ty: @ast::ty) -> bool {
- match ty.node {
- ast::ty_vec(_) => true,
- // This may be wrong if the user has shadowed (!) str
- ast::ty_path(@{span: _, global: _, idents: ids,
- rp: None, types: _}, _)
- if ids == ~[parse::token::special_idents::str] => true,
- _ => false
- }
+fn mk_deser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ tps: ~[ast::ty_param],
+ body: @ast::expr
+) -> @ast::item {
+ // Make a path to the std::serialization::Deserializable typaram.
+ let ty_param = cx.bind_path(
+ span,
+ cx.ident_of(~"__D"),
+ cx.path(
+ span,
+ ~[
+ cx.ident_of(~"std"),
+ cx.ident_of(~"serialization"),
+ cx.ident_of(~"Deserializer"),
+ ]
+ ),
+ @~[]
+ );
+
+ // Make a path to the std::serialization::Deserializable trait.
+ let path = cx.path_tps(
+ span,
+ ~[
+ cx.ident_of(~"std"),
+ cx.ident_of(~"serialization"),
+ cx.ident_of(~"Deserializable"),
+ ],
+ ~[cx.ty_path(span, ~[cx.ident_of(~"__D")], ~[])]
+ );
+
+ mk_impl(
+ cx,
+ span,
+ ident,
+ ty_param,
+ path,
+ tps,
+ |ty| mk_deser_method(cx, span, ty, cx.expr_blk(body))
+ )
}
-fn ser_ty(cx: ext_ctxt, tps: ser_tps_map,
- ty: @ast::ty, -s: @ast::expr, -v: @ast::expr)
- -> ~[@ast::stmt] {
-
- let ext_cx = cx; // required for #ast{}
-
- match ty.node {
- ast::ty_nil => {
- ~[#ast[stmt]{$(s).emit_nil()}]
- }
-
- ast::ty_bot => {
- cx.span_err(
- ty.span, fmt!("Cannot serialize bottom type"));
- ~[]
- }
-
- ast::ty_box(mt) => {
- let l = ser_lambda(cx, tps, mt.ty, cx.clone(s), #ast{ *$(v) });
- ~[#ast[stmt]{$(s).emit_box($(l));}]
- }
-
- // For unique evecs/estrs, just pass through to underlying vec or str
- ast::ty_uniq(mt) if is_vec_or_str(mt.ty) => {
- ser_ty(cx, tps, mt.ty, move s, move v)
- }
-
- ast::ty_uniq(mt) => {
- let l = ser_lambda(cx, tps, mt.ty, cx.clone(s), #ast{ *$(v) });
- ~[#ast[stmt]{$(s).emit_uniq($(l));}]
- }
-
- ast::ty_ptr(_) | ast::ty_rptr(_, _) => {
- cx.span_err(ty.span, ~"cannot serialize pointer types");
- ~[]
- }
-
- ast::ty_rec(flds) => {
- let fld_stmts = do vec::from_fn(vec::len(flds)) |fidx| {
- let fld = flds[fidx];
- let vf = cx.expr(fld.span,
- ast::expr_field(cx.clone(v),
- fld.node.ident,
- ~[]));
- let s = cx.clone(s);
- let f = cx.lit_str(fld.span, cx.parse_sess().interner.get(
- fld.node.ident));
- let i = cx.lit_uint(fld.span, fidx);
- let l = ser_lambda(cx, tps, fld.node.mt.ty, cx.clone(s), move vf);
- #ast[stmt]{$(s).emit_rec_field($(f), $(i), $(l));}
- };
- let fld_lambda = cx.lambda(cx.blk(ty.span, fld_stmts));
- ~[#ast[stmt]{$(s).emit_rec($(fld_lambda));}]
- }
-
- ast::ty_fn(*) => {
- cx.span_err(ty.span, ~"cannot serialize function types");
- ~[]
- }
-
- ast::ty_tup(tys) => {
- // Generate code like
- //
- // match v {
- // (v1, v2, v3) {
- // .. serialize v1, v2, v3 ..
- // }
- // };
-
- let arms = ~[
- ser_variant(
-
- cx, tps, tys, ty.span, move s,
-
- // Generate pattern (v1, v2, v3)
- |pats| ast::pat_tup(pats),
-
- // Generate body s.emit_tup(3, {|| blk })
- |-s, blk| {
- let sz = cx.lit_uint(ty.span, vec::len(tys));
- let body = cx.lambda(blk);
- #ast{ $(s).emit_tup($(sz), $(body)) }
- },
-
- // Generate s.emit_tup_elt(i, {|| blk })
- |-s, i, blk| {
- let idx = cx.lit_uint(ty.span, i);
- let body = cx.lambda(blk);
- #ast{ $(s).emit_tup_elt($(idx), $(body)) }
- })
- ];
- ~[cx.alt_stmt(arms, ty.span, move v)]
- }
-
- ast::ty_path(path, _) => {
- if path.idents.len() == 1 && path.types.is_empty() {
- let ident = path.idents[0];
-
- match tps.find(ident) {
- Some(f) => f(v),
- None => ser_path(cx, tps, path, move s, move v)
+fn mk_ser_method(
+ cx: ext_ctxt,
+ span: span,
+ ser_body: ast::blk
+) -> @ast::method {
+ let ty_s = @{
+ id: cx.next_id(),
+ node: ast::ty_rptr(
+ @{
+ id: cx.next_id(),
+ node: ast::re_anon,
+ },
+ {
+ ty: cx.ty_path(span, ~[cx.ident_of(~"__S")], ~[]),
+ mutbl: ast::m_imm
}
- } else {
- ser_path(cx, tps, path, move s, move v)
- }
- }
+ ),
+ span: span,
+ };
- ast::ty_mac(_) => {
- cx.span_err(ty.span, ~"cannot serialize macro types");
- ~[]
- }
+ let ser_inputs = ~[{
+ mode: ast::infer(cx.next_id()),
+ ty: ty_s,
+ ident: cx.ident_of(~"__s"),
+ id: cx.next_id(),
+ }];
+
+ let ser_output = @{
+ id: cx.next_id(),
+ node: ast::ty_nil,
+ span: span,
+ };
- ast::ty_infer => {
- cx.span_err(ty.span, ~"cannot serialize inferred types");
- ~[]
- }
+ let ser_decl = {
+ inputs: ser_inputs,
+ output: ser_output,
+ cf: ast::return_val,
+ };
- ast::ty_vec(mt) => {
- let ser_e =
- cx.expr(
- ty.span,
- ast::expr_block(
- cx.blk(
- ty.span,
- ser_ty(
- cx, tps, mt.ty,
- cx.clone(s),
- cx.at(ty.span, #ast{ __e })))));
-
- ~[#ast[stmt]{
- std::serialization::emit_from_vec($(s), $(v), |__e| $(ser_e))
- }]
- }
-
- ast::ty_fixed_length(_, _) => {
- cx.span_unimpl(ty.span, ~"serialization for fixed length types");
- }
+ @{
+ ident: cx.ident_of(~"serialize"),
+ attrs: ~[],
+ tps: ~[],
+ self_ty: { node: ast::sty_region(ast::m_imm), span: span },
+ purity: ast::impure_fn,
+ decl: ser_decl,
+ body: ser_body,
+ id: cx.next_id(),
+ span: span,
+ self_id: cx.next_id(),
+ vis: ast::public,
}
}
-fn mk_ser_fn(cx: ext_ctxt, span: span, name: ast::ident,
- tps: ~[ast::ty_param],
- f: fn(ext_ctxt, ser_tps_map,
- -v: @ast::expr, -v: @ast::expr) -> ~[@ast::stmt])
- -> @ast::item {
- let ext_cx = cx; // required for #ast
-
- let tp_types = vec::map(tps, |tp| cx.ty_path(span, ~[tp.ident], ~[]));
- let v_ty = cx.ty_path(span, ~[name], tp_types);
-
- let tp_inputs =
- vec::map(tps, |tp|
- {mode: ast::expl(ast::by_ref),
- ty: cx.ty_fn(span,
- ~[cx.ty_path(span, ~[tp.ident], ~[])],
- cx.ty_nil(span)),
- ident: cx.ident_of(~"__s" + cx.str_of(tp.ident)),
- id: cx.next_id()});
-
- debug!("tp_inputs = %?", tp_inputs);
-
-
- let ser_inputs: ~[ast::arg] =
- vec::append(~[{mode: ast::expl(ast::by_ref),
- ty: cx.ty_path(span, ~[cx.ident_of(~"__S")], ~[]),
- ident: cx.ident_of(~"__s"),
- id: cx.next_id()},
- {mode: ast::expl(ast::by_ref),
- ty: v_ty,
- ident: cx.ident_of(~"__v"),
- id: cx.next_id()}],
- tp_inputs);
-
- let tps_map = map::HashMap();
- for vec::each2(tps, tp_inputs) |tp, arg| {
- let arg_ident = arg.ident;
- tps_map.insert(
- tp.ident,
- fn@(v: @ast::expr) -> ~[@ast::stmt] {
- let f = cx.var_ref(span, arg_ident);
- debug!("serializing type arg %s", cx.str_of(arg_ident));
- ~[#ast[stmt]{$(f)($(v));}]
- });
- }
+fn mk_deser_method(
+ cx: ext_ctxt,
+ span: span,
+ ty: @ast::Ty,
+ deser_body: ast::blk
+) -> @ast::method {
+ let ty_d = @{
+ id: cx.next_id(),
+ node: ast::ty_rptr(
+ @{
+ id: cx.next_id(),
+ node: ast::re_anon,
+ },
+ {
+ ty: cx.ty_path(span, ~[cx.ident_of(~"__D")], ~[]),
+ mutbl: ast::m_imm
+ }
+ ),
+ span: span,
+ };
- let ser_bnds = @~[
- ast::bound_trait(cx.ty_path(span,
- ~[cx.ident_of(~"std"),
- cx.ident_of(~"serialization"),
- cx.ident_of(~"Serializer")],
- ~[]))];
-
- let ser_tps: ~[ast::ty_param] =
- vec::append(~[{ident: cx.ident_of(~"__S"),
- id: cx.next_id(),
- bounds: ser_bnds}],
- vec::map(tps, |tp| cx.clone_ty_param(*tp)));
-
- let ser_output: @ast::ty = @{id: cx.next_id(),
- node: ast::ty_nil,
- span: span};
-
- let ser_blk = cx.blk(span,
- f(cx, tps_map, #ast{ __s }, #ast{ __v }));
-
- @{ident: cx.ident_of(~"serialize_" + cx.str_of(name)),
- attrs: ~[],
- id: cx.next_id(),
- node: ast::item_fn({inputs: ser_inputs,
- output: ser_output,
- cf: ast::return_val},
- ast::impure_fn,
- ser_tps,
- ser_blk),
- vis: ast::public,
- span: span}
+ let deser_inputs = ~[{
+ mode: ast::infer(cx.next_id()),
+ ty: ty_d,
+ ident: cx.ident_of(~"__d"),
+ id: cx.next_id(),
+ }];
+
+ let deser_decl = {
+ inputs: deser_inputs,
+ output: ty,
+ cf: ast::return_val,
+ };
+
+ @{
+ ident: cx.ident_of(~"deserialize"),
+ attrs: ~[],
+ tps: ~[],
+ self_ty: { node: ast::sty_static, span: span },
+ purity: ast::impure_fn,
+ decl: deser_decl,
+ body: deser_body,
+ id: cx.next_id(),
+ span: span,
+ self_id: cx.next_id(),
+ vis: ast::public,
+ }
}
-// ______________________________________________________________________
+fn mk_rec_ser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ fields: ~[ast::ty_field],
+ tps: ~[ast::ty_param]
+) -> @ast::item {
+ let fields = mk_ser_fields(cx, span, mk_rec_fields(fields));
+
+ // ast for `__s.emit_rec(|| $(fields))`
+ let body = cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__s"),
+ cx.ident_of(~"emit_rec")
+ ),
+ ~[cx.lambda_stmts(span, fields)]
+ );
-fn deser_path(cx: ext_ctxt, tps: deser_tps_map, path: @ast::path,
- -d: @ast::expr) -> @ast::expr {
- // We want to take a path like a::b::c<...> and generate a call
- // like a::b::c::deserialize(d, ...), as described above.
+ mk_ser_impl(cx, span, ident, tps, body)
+}
- let callee =
- cx.expr(
- path.span,
- ast::expr_path(
- cx.helper_path(path, ~"deserialize")));
+fn mk_rec_deser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ fields: ~[ast::ty_field],
+ tps: ~[ast::ty_param]
+) -> @ast::item {
+ let fields = mk_deser_fields(cx, span, mk_rec_fields(fields));
+
+ // ast for `read_rec(|| $(fields))`
+ let body = cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__d"),
+ cx.ident_of(~"read_rec")
+ ),
+ ~[
+ cx.lambda_expr(
+ cx.expr(
+ span,
+ ast::expr_rec(fields, None)
+ )
+ )
+ ]
+ );
- let ty_args = do vec::map(path.types) |ty| {
- let dv_expr = deser_ty(cx, tps, *ty, cx.clone(d));
- cx.lambda(cx.expr_blk(dv_expr))
- };
+ mk_deser_impl(cx, span, ident, tps, body)
+}
- cx.expr(path.span, ast::expr_call(callee, vec::append(~[d], ty_args),
- false))
+fn mk_struct_ser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ fields: ~[@ast::struct_field],
+ tps: ~[ast::ty_param]
+) -> @ast::item {
+ let fields = mk_ser_fields(cx, span, mk_struct_fields(fields));
+
+ // ast for `__s.emit_struct($(name), || $(fields))`
+ let ser_body = cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__s"),
+ cx.ident_of(~"emit_struct")
+ ),
+ ~[
+ cx.lit_str(span, @cx.str_of(ident)),
+ cx.lambda_stmts(span, fields),
+ ]
+ );
+
+ mk_ser_impl(cx, span, ident, tps, ser_body)
}
-fn deser_lambda(cx: ext_ctxt, tps: deser_tps_map, ty: @ast::ty,
- -d: @ast::expr) -> @ast::expr {
- cx.lambda(cx.expr_blk(deser_ty(cx, tps, ty, move d)))
+fn mk_struct_deser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ fields: ~[@ast::struct_field],
+ tps: ~[ast::ty_param]
+) -> @ast::item {
+ let fields = mk_deser_fields(cx, span, mk_struct_fields(fields));
+
+ // ast for `read_struct($(name), || $(fields))`
+ let body = cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__d"),
+ cx.ident_of(~"read_struct")
+ ),
+ ~[
+ cx.lit_str(span, @cx.str_of(ident)),
+ cx.lambda_expr(
+ cx.expr(
+ span,
+ ast::expr_struct(
+ cx.path(span, ~[ident]),
+ fields,
+ None
+ )
+ )
+ ),
+ ]
+ );
+
+ mk_deser_impl(cx, span, ident, tps, body)
}
-fn deser_ty(cx: ext_ctxt, tps: deser_tps_map,
- ty: @ast::ty, -d: @ast::expr) -> @ast::expr {
-
- let ext_cx = cx; // required for #ast{}
-
- match ty.node {
- ast::ty_nil => {
- #ast{ $(d).read_nil() }
- }
-
- ast::ty_bot => {
- #ast{ fail }
- }
-
- ast::ty_box(mt) => {
- let l = deser_lambda(cx, tps, mt.ty, cx.clone(d));
- #ast{ @$(d).read_box($(l)) }
- }
-
- // For unique evecs/estrs, just pass through to underlying vec or str
- ast::ty_uniq(mt) if is_vec_or_str(mt.ty) => {
- deser_ty(cx, tps, mt.ty, move d)
- }
-
- ast::ty_uniq(mt) => {
- let l = deser_lambda(cx, tps, mt.ty, cx.clone(d));
- #ast{ ~$(d).read_uniq($(l)) }
- }
-
- ast::ty_ptr(_) | ast::ty_rptr(_, _) => {
- #ast{ fail }
- }
-
- ast::ty_rec(flds) => {
- let fields = do vec::from_fn(vec::len(flds)) |fidx| {
- let fld = flds[fidx];
- let d = cx.clone(d);
- let f = cx.lit_str(fld.span, @cx.str_of(fld.node.ident));
- let i = cx.lit_uint(fld.span, fidx);
- let l = deser_lambda(cx, tps, fld.node.mt.ty, cx.clone(d));
- {node: {mutbl: fld.node.mt.mutbl,
- ident: fld.node.ident,
- expr: #ast{ $(d).read_rec_field($(f), $(i), $(l))} },
- span: fld.span}
- };
- let fld_expr = cx.expr(ty.span, ast::expr_rec(fields, None));
- let fld_lambda = cx.lambda(cx.expr_blk(fld_expr));
- #ast{ $(d).read_rec($(fld_lambda)) }
- }
-
- ast::ty_fn(*) => {
- #ast{ fail }
- }
-
- ast::ty_tup(tys) => {
- // Generate code like
- //
- // d.read_tup(3u) {||
- // (d.read_tup_elt(0u, {||...}),
- // d.read_tup_elt(1u, {||...}),
- // d.read_tup_elt(2u, {||...}))
- // }
-
- let arg_exprs = do vec::from_fn(vec::len(tys)) |i| {
- let idx = cx.lit_uint(ty.span, i);
- let body = deser_lambda(cx, tps, tys[i], cx.clone(d));
- #ast{ $(d).read_tup_elt($(idx), $(body)) }
- };
- let body =
- cx.lambda(cx.expr_blk(
- cx.expr(ty.span, ast::expr_tup(arg_exprs))));
- let sz = cx.lit_uint(ty.span, vec::len(tys));
- #ast{ $(d).read_tup($(sz), $(body)) }
- }
-
- ast::ty_path(path, _) => {
- if vec::len(path.idents) == 1u &&
- vec::is_empty(path.types) {
- let ident = path.idents[0];
-
- match tps.find(ident) {
- Some(f) => f(),
- None => deser_path(cx, tps, path, move d)
- }
- } else {
- deser_path(cx, tps, path, move d)
+// Records and structs don't have the same fields types, but they share enough
+// that if we extract the right subfields out we can share the serialization
+// generator code.
+type field = { span: span, ident: ast::ident, mutbl: ast::mutability };
+
+fn mk_rec_fields(fields: ~[ast::ty_field]) -> ~[field] {
+ do fields.map |field| {
+ {
+ span: field.span,
+ ident: field.node.ident,
+ mutbl: field.node.mt.mutbl,
}
- }
-
- ast::ty_mac(_) => {
- #ast{ fail }
- }
-
- ast::ty_infer => {
- #ast{ fail }
- }
+ }
+}
- ast::ty_vec(mt) => {
- let l = deser_lambda(cx, tps, mt.ty, cx.clone(d));
- #ast{ std::serialization::read_to_vec($(d), $(l)) }
- }
+fn mk_struct_fields(fields: ~[@ast::struct_field]) -> ~[field] {
+ do fields.map |field| {
+ let (ident, mutbl) = match field.node.kind {
+ ast::named_field(ident, mutbl, _) => (ident, mutbl),
+ _ => fail ~"[auto_serialize] does not support \
+ unnamed fields",
+ };
- ast::ty_fixed_length(_, _) => {
- cx.span_unimpl(ty.span, ~"deserialization for fixed length types");
- }
+ {
+ span: field.span,
+ ident: ident,
+ mutbl: match mutbl {
+ ast::class_mutable => ast::m_mutbl,
+ ast::class_immutable => ast::m_imm,
+ },
+ }
}
}
-fn mk_deser_fn(cx: ext_ctxt, span: span,
- name: ast::ident, tps: ~[ast::ty_param],
- f: fn(ext_ctxt, deser_tps_map, -v: @ast::expr) -> @ast::expr)
- -> @ast::item {
- let ext_cx = cx; // required for #ast
-
- let tp_types = vec::map(tps, |tp| cx.ty_path(span, ~[tp.ident], ~[]));
- let v_ty = cx.ty_path(span, ~[name], tp_types);
-
- let tp_inputs =
- vec::map(tps, |tp|
- {mode: ast::expl(ast::by_ref),
- ty: cx.ty_fn(span,
- ~[],
- cx.ty_path(span, ~[tp.ident], ~[])),
- ident: cx.ident_of(~"__d" + cx.str_of(tp.ident)),
- id: cx.next_id()});
-
- debug!("tp_inputs = %?", tp_inputs);
-
- let deser_inputs: ~[ast::arg] =
- vec::append(~[{mode: ast::expl(ast::by_ref),
- ty: cx.ty_path(span, ~[cx.ident_of(~"__D")], ~[]),
- ident: cx.ident_of(~"__d"),
- id: cx.next_id()}],
- tp_inputs);
-
- let tps_map = map::HashMap();
- for vec::each2(tps, tp_inputs) |tp, arg| {
- let arg_ident = arg.ident;
- tps_map.insert(
- tp.ident,
- fn@() -> @ast::expr {
- let f = cx.var_ref(span, arg_ident);
- #ast{ $(f)() }
- });
+fn mk_ser_fields(
+ cx: ext_ctxt,
+ span: span,
+ fields: ~[field]
+) -> ~[@ast::stmt] {
+ do fields.mapi |idx, field| {
+ // ast for `|| self.$(name).serialize(__s)`
+ let expr_lambda = cx.lambda_expr(
+ cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"self"),
+ field.ident
+ ),
+ cx.ident_of(~"serialize")
+ ),
+ ~[cx.expr_var(span, ~"__s")]
+ )
+ );
+
+ // ast for `__s.emit_field($(name), $(idx), $(expr_lambda))`
+ cx.stmt(
+ cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__s"),
+ cx.ident_of(~"emit_field")
+ ),
+ ~[
+ cx.lit_str(span, @cx.str_of(field.ident)),
+ cx.lit_uint(span, idx),
+ expr_lambda,
+ ]
+ )
+ )
}
+}
- let deser_bnds = @~[
- ast::bound_trait(cx.ty_path(
+fn mk_deser_fields(
+ cx: ext_ctxt,
+ span: span,
+ fields: ~[{ span: span, ident: ast::ident, mutbl: ast::mutability }]
+) -> ~[ast::field] {
+ do fields.mapi |idx, field| {
+ // ast for `|| std::serialization::deserialize(__d)`
+ let expr_lambda = cx.lambda(
+ cx.expr_blk(
+ cx.expr_call(
+ span,
+ cx.expr_path(span, ~[
+ cx.ident_of(~"std"),
+ cx.ident_of(~"serialization"),
+ cx.ident_of(~"deserialize"),
+ ]),
+ ~[cx.expr_var(span, ~"__d")]
+ )
+ )
+ );
+
+ // ast for `__d.read_field($(name), $(idx), $(expr_lambda))`
+ let expr: @ast::expr = cx.expr_call(
span,
- ~[cx.ident_of(~"std"), cx.ident_of(~"serialization"),
- cx.ident_of(~"Deserializer")],
- ~[]))];
-
- let deser_tps: ~[ast::ty_param] =
- vec::append(~[{ident: cx.ident_of(~"__D"),
- id: cx.next_id(),
- bounds: deser_bnds}],
- vec::map(tps, |tp| {
- let cloned = cx.clone_ty_param(*tp);
- {bounds: @(vec::append(*cloned.bounds,
- ~[ast::bound_copy])),
- .. cloned}
- }));
-
- let deser_blk = cx.expr_blk(f(cx, tps_map, #ast[expr]{__d}));
-
- @{ident: cx.ident_of(~"deserialize_" + cx.str_of(name)),
- attrs: ~[],
- id: cx.next_id(),
- node: ast::item_fn({inputs: deser_inputs,
- output: v_ty,
- cf: ast::return_val},
- ast::impure_fn,
- deser_tps,
- deser_blk),
- vis: ast::public,
- span: span}
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__d"),
+ cx.ident_of(~"read_field")
+ ),
+ ~[
+ cx.lit_str(span, @cx.str_of(field.ident)),
+ cx.lit_uint(span, idx),
+ expr_lambda,
+ ]
+ );
+
+ {
+ node: { mutbl: field.mutbl, ident: field.ident, expr: expr },
+ span: span,
+ }
+ }
}
-fn ty_fns(cx: ext_ctxt, name: ast::ident,
- ty: @ast::ty, tps: ~[ast::ty_param])
- -> ~[@ast::item] {
+fn mk_enum_ser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ enum_def: ast::enum_def,
+ tps: ~[ast::ty_param]
+) -> @ast::item {
+ let body = mk_enum_ser_body(
+ cx,
+ span,
+ ident,
+ enum_def.variants
+ );
+
+ mk_ser_impl(cx, span, ident, tps, body)
+}
- let span = ty.span;
- ~[
- mk_ser_fn(cx, span, name, tps, |a,b,c,d| ser_ty(a, b, ty, move c,
- move d)),
- mk_deser_fn(cx, span, name, tps, |a,b,c| deser_ty(a, b, ty, move c))
- ]
+fn mk_enum_deser_impl(
+ cx: ext_ctxt,
+ span: span,
+ ident: ast::ident,
+ enum_def: ast::enum_def,
+ tps: ~[ast::ty_param]
+) -> @ast::item {
+ let body = mk_enum_deser_body(
+ cx,
+ span,
+ ident,
+ enum_def.variants
+ );
+
+ mk_deser_impl(cx, span, ident, tps, body)
}
-fn ser_enum(cx: ext_ctxt, tps: ser_tps_map, e_name: ast::ident,
- e_span: span, variants: ~[ast::variant],
- -s: @ast::expr, -v: @ast::expr) -> ~[@ast::stmt] {
- let ext_cx = cx;
- let arms = do vec::from_fn(vec::len(variants)) |vidx| {
- let variant = variants[vidx];
- let v_span = variant.span;
- let v_name = variant.node.name;
+fn ser_variant(
+ cx: ext_ctxt,
+ span: span,
+ v_name: ast::ident,
+ v_idx: uint,
+ args: ~[ast::variant_arg]
+) -> ast::arm {
+ // Name the variant arguments.
+ let names = args.mapi(|i, _arg| cx.ident_of(fmt!("__v%u", i)));
+
+ // Bind the names to the variant argument type.
+ let pats = args.mapi(|i, arg| cx.binder_pat(arg.ty.span, names[i]));
+
+ let pat_node = if pats.is_empty() {
+ ast::pat_ident(
+ ast::bind_by_implicit_ref,
+ cx.path(span, ~[v_name]),
+ None
+ )
+ } else {
+ ast::pat_enum(
+ cx.path(span, ~[v_name]),
+ Some(pats)
+ )
+ };
- match variant.node.kind {
- ast::tuple_variant_kind(args) => {
- let variant_tys = vec::map(args, |a| a.ty);
-
- ser_variant(
- cx, tps, variant_tys, v_span, cx.clone(s),
-
- // Generate pattern var(v1, v2, v3)
- |pats| {
- if vec::is_empty(pats) {
- ast::pat_ident(ast::bind_by_implicit_ref,
- cx.path(v_span, ~[v_name]),
- None)
- } else {
- ast::pat_enum(cx.path(v_span, ~[v_name]),
- Some(pats))
- }
- },
+ let pat = @{
+ id: cx.next_id(),
+ node: pat_node,
+ span: span,
+ };
- // Generate body s.emit_enum_variant("foo", 0u,
- // 3u, {|| blk })
- |-s, blk| {
- let v_name = cx.lit_str(v_span, @cx.str_of(v_name));
- let v_id = cx.lit_uint(v_span, vidx);
- let sz = cx.lit_uint(v_span, vec::len(variant_tys));
- let body = cx.lambda(blk);
- #ast[expr]{
- $(s).emit_enum_variant($(v_name), $(v_id),
- $(sz), $(body))
- }
- },
+ let stmts = do args.mapi |a_idx, _arg| {
+ // ast for `__s.emit_enum_variant_arg`
+ let expr_emit = cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__s"),
+ cx.ident_of(~"emit_enum_variant_arg")
+ );
- // Generate s.emit_enum_variant_arg(i, {|| blk })
- |-s, i, blk| {
- let idx = cx.lit_uint(v_span, i);
- let body = cx.lambda(blk);
- #ast[expr]{
- $(s).emit_enum_variant_arg($(idx), $(body))
- }
- })
- }
- _ =>
- fail ~"struct variants unimplemented for auto serialize"
- }
+ // ast for `|| $(v).serialize(__s)`
+ let expr_serialize = cx.lambda_expr(
+ cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_path(span, ~[names[a_idx]]),
+ cx.ident_of(~"serialize")
+ ),
+ ~[cx.expr_var(span, ~"__s")]
+ )
+ );
+
+ // ast for `$(expr_emit)($(a_idx), $(expr_serialize))`
+ cx.stmt(
+ cx.expr_call(
+ span,
+ expr_emit,
+ ~[cx.lit_uint(span, a_idx), expr_serialize]
+ )
+ )
};
- let lam = cx.lambda(cx.blk(e_span, ~[cx.alt_stmt(arms, e_span, move v)]));
- let e_name = cx.lit_str(e_span, @cx.str_of(e_name));
- ~[#ast[stmt]{ $(s).emit_enum($(e_name), $(lam)) }]
-}
-fn deser_enum(cx: ext_ctxt, tps: deser_tps_map, e_name: ast::ident,
- e_span: span, variants: ~[ast::variant],
- -d: @ast::expr) -> @ast::expr {
- let ext_cx = cx;
- let mut arms: ~[ast::arm] = do vec::from_fn(vec::len(variants)) |vidx| {
- let variant = variants[vidx];
- let v_span = variant.span;
- let v_name = variant.node.name;
+ // ast for `__s.emit_enum_variant($(name), $(idx), $(sz), $(lambda))`
+ let body = cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__s"),
+ cx.ident_of(~"emit_enum_variant")
+ ),
+ ~[
+ cx.lit_str(span, @cx.str_of(v_name)),
+ cx.lit_uint(span, v_idx),
+ cx.lit_uint(span, stmts.len()),
+ cx.lambda_stmts(span, stmts),
+ ]
+ );
+
+ { pats: ~[pat], guard: None, body: cx.expr_blk(body) }
+}
- let body;
+fn mk_enum_ser_body(
+ cx: ext_ctxt,
+ span: span,
+ name: ast::ident,
+ variants: ~[ast::variant]
+) -> @ast::expr {
+ let arms = do variants.mapi |v_idx, variant| {
match variant.node.kind {
- ast::tuple_variant_kind(args) => {
- let tys = vec::map(args, |a| a.ty);
-
- let arg_exprs = do vec::from_fn(vec::len(tys)) |i| {
- let idx = cx.lit_uint(v_span, i);
- let body = deser_lambda(cx, tps, tys[i], cx.clone(d));
- #ast{ $(d).read_enum_variant_arg($(idx), $(body)) }
- };
-
- body = {
- if vec::is_empty(tys) {
- // for a nullary variant v, do "v"
- cx.var_ref(v_span, v_name)
- } else {
- // for an n-ary variant v, do "v(a_1, ..., a_n)"
- cx.expr(v_span, ast::expr_call(
- cx.var_ref(v_span, v_name), arg_exprs, false))
- }
- };
- }
+ ast::tuple_variant_kind(args) =>
+ ser_variant(cx, span, variant.node.name, v_idx, args),
ast::struct_variant_kind(*) =>
fail ~"struct variants unimplemented",
ast::enum_variant_kind(*) =>
- fail ~"enum variants unimplemented"
+ fail ~"enum variants unimplemented",
}
+ };
- {pats: ~[@{id: cx.next_id(),
- node: ast::pat_lit(cx.lit_uint(v_span, vidx)),
- span: v_span}],
- guard: None,
- body: cx.expr_blk(body)}
+ // ast for `match *self { $(arms) }`
+ let match_expr = cx.expr(
+ span,
+ ast::expr_match(
+ cx.expr(
+ span,
+ ast::expr_unary(ast::deref, cx.expr_var(span, ~"self"))
+ ),
+ arms
+ )
+ );
+
+ // ast for `__s.emit_enum($(name), || $(match_expr))`
+ cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__s"),
+ cx.ident_of(~"emit_enum")
+ ),
+ ~[
+ cx.lit_str(span, @cx.str_of(name)),
+ cx.lambda_expr(match_expr),
+ ]
+ )
+}
+
+fn mk_enum_deser_variant_nary(
+ cx: ext_ctxt,
+ span: span,
+ name: ast::ident,
+ args: ~[ast::variant_arg]
+) -> @ast::expr {
+ let args = do args.mapi |idx, _arg| {
+ // ast for `|| std::serialization::deserialize(__d)`
+ let expr_lambda = cx.lambda_expr(
+ cx.expr_call(
+ span,
+ cx.expr_path(span, ~[
+ cx.ident_of(~"std"),
+ cx.ident_of(~"serialization"),
+ cx.ident_of(~"deserialize"),
+ ]),
+ ~[cx.expr_var(span, ~"__d")]
+ )
+ );
+
+ // ast for `__d.read_enum_variant_arg($(a_idx), $(expr_lambda))`
+ cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__d"),
+ cx.ident_of(~"read_enum_variant_arg")
+ ),
+ ~[cx.lit_uint(span, idx), expr_lambda]
+ )
};
- let impossible_case = {pats: ~[@{id: cx.next_id(),
- node: ast::pat_wild,
- span: e_span}],
- guard: None,
- // FIXME #3198: proper error message
- body: cx.expr_blk(cx.expr(e_span,
- ast::expr_fail(None)))};
- arms += ~[impossible_case];
-
- // Generate code like:
- let e_name = cx.lit_str(e_span, @cx.str_of(e_name));
- let alt_expr = cx.expr(e_span,
- ast::expr_match(#ast{__i}, arms));
- let var_lambda = #ast{ |__i| $(alt_expr) };
- let read_var = #ast{ $(cx.clone(d)).read_enum_variant($(var_lambda)) };
- let read_lambda = cx.lambda(cx.expr_blk(read_var));
- #ast{ $(d).read_enum($(e_name), $(read_lambda)) }
+ // ast for `$(name)($(args))`
+ cx.expr_call(span, cx.expr_path(span, ~[name]), args)
}
-fn enum_fns(cx: ext_ctxt, e_name: ast::ident, e_span: span,
- variants: ~[ast::variant], tps: ~[ast::ty_param])
- -> ~[@ast::item] {
- ~[
- mk_ser_fn(cx, e_span, e_name, tps,
- |a,b,c,d| ser_enum(a, b, e_name, e_span, variants, move c,
- move d)),
- mk_deser_fn(cx, e_span, e_name, tps,
- |a,b,c| deser_enum(a, b, e_name, e_span, variants, move c))
- ]
+fn mk_enum_deser_body(
+ cx: ext_ctxt,
+ span: span,
+ name: ast::ident,
+ variants: ~[ast::variant]
+) -> @ast::expr {
+ let mut arms = do variants.mapi |v_idx, variant| {
+ let body = match variant.node.kind {
+ ast::tuple_variant_kind(args) => {
+ if args.is_empty() {
+ // for a nullary variant v, do "v"
+ cx.expr_path(span, ~[variant.node.name])
+ } else {
+ // for an n-ary variant v, do "v(a_1, ..., a_n)"
+ mk_enum_deser_variant_nary(
+ cx,
+ span,
+ variant.node.name,
+ args
+ )
+ }
+ },
+ ast::struct_variant_kind(*) =>
+ fail ~"struct variants unimplemented",
+ ast::enum_variant_kind(*) =>
+ fail ~"enum variants unimplemented",
+ };
+
+ let pat = @{
+ id: cx.next_id(),
+ node: ast::pat_lit(cx.lit_uint(span, v_idx)),
+ span: span,
+ };
+
+ {
+ pats: ~[pat],
+ guard: None,
+ body: cx.expr_blk(body),
+ }
+ };
+
+ let impossible_case = {
+ pats: ~[@{ id: cx.next_id(), node: ast::pat_wild, span: span}],
+ guard: None,
+
+ // FIXME(#3198): proper error message
+ body: cx.expr_blk(cx.expr(span, ast::expr_fail(None))),
+ };
+
+ arms.push(impossible_case);
+
+ // ast for `|i| { match i { $(arms) } }`
+ let expr_lambda = cx.expr(
+ span,
+ ast::expr_fn_block(
+ {
+ inputs: ~[{
+ mode: ast::infer(cx.next_id()),
+ ty: @{
+ id: cx.next_id(),
+ node: ast::ty_infer,
+ span: span
+ },
+ ident: cx.ident_of(~"i"),
+ id: cx.next_id(),
+ }],
+ output: @{
+ id: cx.next_id(),
+ node: ast::ty_infer,
+ span: span,
+ },
+ cf: ast::return_val,
+ },
+ cx.expr_blk(
+ cx.expr(
+ span,
+ ast::expr_match(cx.expr_var(span, ~"i"), arms)
+ )
+ ),
+ @~[]
+ )
+ );
+
+ // ast for `__d.read_enum_variant($(expr_lambda))`
+ let expr_lambda = cx.lambda_expr(
+ cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__d"),
+ cx.ident_of(~"read_enum_variant")
+ ),
+ ~[expr_lambda]
+ )
+ );
+
+ // ast for `__d.read_enum($(e_name), $(expr_lambda))`
+ cx.expr_call(
+ span,
+ cx.expr_field(
+ span,
+ cx.expr_var(span, ~"__d"),
+ cx.ident_of(~"read_enum")
+ ),
+ ~[
+ cx.lit_str(span, @cx.str_of(name)),
+ expr_lambda
+ ]
+ )
}
+++ /dev/null
-/*
-
-The compiler code necessary to implement the #[auto_serialize2] and
-#[auto_deserialize2] extension. The idea here is that type-defining items may
-be tagged with #[auto_serialize2] and #[auto_deserialize2], which will cause
-us to generate a little companion module with the same name as the item.
-
-For example, a type like:
-
- #[auto_serialize2]
- #[auto_deserialize2]
- struct Node {id: uint}
-
-would generate two implementations like:
-
- impl Node: Serializable {
- fn serialize<S: Serializer>(s: &S) {
- do s.emit_struct("Node") {
- s.emit_field("id", 0, || s.emit_uint(self))
- }
- }
- }
-
- impl node_id: Deserializable {
- static fn deserialize<D: Deserializer>(d: &D) -> Node {
- do d.read_struct("Node") {
- Node {
- id: d.read_field(~"x", 0, || deserialize(d))
- }
- }
- }
- }
-
-Other interesting scenarios are whe the item has type parameters or
-references other non-built-in types. A type definition like:
-
- #[auto_serialize2]
- #[auto_deserialize2]
- type spanned<T> = {node: T, span: span};
-
-would yield functions like:
-
- impl<T: Serializable> spanned<T>: Serializable {
- fn serialize<S: Serializer>(s: &S) {
- do s.emit_rec {
- s.emit_field("node", 0, || self.node.serialize(s));
- s.emit_field("span", 1, || self.span.serialize(s));
- }
- }
- }
-
- impl<T: Deserializable> spanned<T>: Deserializable {
- static fn deserialize<D: Deserializer>(d: &D) -> spanned<T> {
- do d.read_rec {
- {
- node: d.read_field(~"node", 0, || deserialize(d)),
- span: d.read_field(~"span", 1, || deserialize(d)),
- }
- }
- }
- }
-
-FIXME (#2810)--Hygiene. Search for "__" strings. We also assume "std" is the
-standard library.
-
-Misc notes:
------------
-
-I use move mode arguments for ast nodes that will get inserted as is
-into the tree. This is intended to prevent us from inserting the same
-node twice.
-
-*/
-
-use base::*;
-use codemap::span;
-use std::map;
-use std::map::HashMap;
-
-export expand_auto_serialize;
-export expand_auto_deserialize;
-
-// Transitional reexports so qquote can find the paths it is looking for
-mod syntax {
- pub use ext;
- pub use parse;
-}
-
-fn expand_auto_serialize(
- cx: ext_ctxt,
- span: span,
- _mitem: ast::meta_item,
- in_items: ~[@ast::item]
-) -> ~[@ast::item] {
- fn is_auto_serialize2(a: &ast::attribute) -> bool {
- attr::get_attr_name(*a) == ~"auto_serialize2"
- }
-
- fn filter_attrs(item: @ast::item) -> @ast::item {
- @{attrs: vec::filter(item.attrs, |a| !is_auto_serialize2(a)),
- .. *item}
- }
-
- do vec::flat_map(in_items) |item| {
- if item.attrs.any(is_auto_serialize2) {
- match item.node {
- ast::item_ty(@{node: ast::ty_rec(fields), _}, tps) => {
- let ser_impl = mk_rec_ser_impl(
- cx,
- item.span,
- item.ident,
- fields,
- tps
- );
-
- ~[filter_attrs(*item), ser_impl]
- },
- ast::item_class(@{ fields, _}, tps) => {
- let ser_impl = mk_struct_ser_impl(
- cx,
- item.span,
- item.ident,
- fields,
- tps
- );
-
- ~[filter_attrs(*item), ser_impl]
- },
- ast::item_enum(enum_def, tps) => {
- let ser_impl = mk_enum_ser_impl(
- cx,
- item.span,
- item.ident,
- enum_def,
- tps
- );
-
- ~[filter_attrs(*item), ser_impl]
- },
- _ => {
- cx.span_err(span, ~"#[auto_serialize2] can only be \
- applied to structs, record types, \
- and enum definitions");
- ~[*item]
- }
- }
- } else {
- ~[*item]
- }
- }
-}
-
-fn expand_auto_deserialize(
- cx: ext_ctxt,
- span: span,
- _mitem: ast::meta_item,
- in_items: ~[@ast::item]
-) -> ~[@ast::item] {
- fn is_auto_deserialize2(a: &ast::attribute) -> bool {
- attr::get_attr_name(*a) == ~"auto_deserialize2"
- }
-
- fn filter_attrs(item: @ast::item) -> @ast::item {
- @{attrs: vec::filter(item.attrs, |a| !is_auto_deserialize2(a)),
- .. *item}
- }
-
- do vec::flat_map(in_items) |item| {
- if item.attrs.any(is_auto_deserialize2) {
- match item.node {
- ast::item_ty(@{node: ast::ty_rec(fields), _}, tps) => {
- let deser_impl = mk_rec_deser_impl(
- cx,
- item.span,
- item.ident,
- fields,
- tps
- );
-
- ~[filter_attrs(*item), deser_impl]
- },
- ast::item_class(@{ fields, _}, tps) => {
- let deser_impl = mk_struct_deser_impl(
- cx,
- item.span,
- item.ident,
- fields,
- tps
- );
-
- ~[filter_attrs(*item), deser_impl]
- },
- ast::item_enum(enum_def, tps) => {
- let deser_impl = mk_enum_deser_impl(
- cx,
- item.span,
- item.ident,
- enum_def,
- tps
- );
-
- ~[filter_attrs(*item), deser_impl]
- },
- _ => {
- cx.span_err(span, ~"#[auto_deserialize2] can only be \
- applied to structs, record types, \
- and enum definitions");
- ~[*item]
- }
- }
- } else {
- ~[*item]
- }
- }
-}
-
-priv impl ext_ctxt {
- fn expr_path(span: span, strs: ~[ast::ident]) -> @ast::expr {
- self.expr(span, ast::expr_path(self.path(span, strs)))
- }
-
- fn expr_var(span: span, var: ~str) -> @ast::expr {
- self.expr_path(span, ~[self.ident_of(var)])
- }
-
- fn expr_field(
- span: span,
- expr: @ast::expr,
- ident: ast::ident
- ) -> @ast::expr {
- self.expr(span, ast::expr_field(expr, ident, ~[]))
- }
-
- fn expr_call(
- span: span,
- expr: @ast::expr,
- args: ~[@ast::expr]
- ) -> @ast::expr {
- self.expr(span, ast::expr_call(expr, args, false))
- }
-
- fn lambda_expr(expr: @ast::expr) -> @ast::expr {
- self.lambda(self.expr_blk(expr))
- }
-
- fn lambda_stmts(span: span, stmts: ~[@ast::stmt]) -> @ast::expr {
- self.lambda(self.blk(span, stmts))
- }
-}
-
-fn mk_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- path: @ast::path,
- tps: ~[ast::ty_param],
- f: fn(@ast::ty) -> @ast::method
-) -> @ast::item {
- // All the type parameters need to bound to the trait.
- let trait_tps = do tps.map |tp| {
- let t_bound = ast::bound_trait(@{
- id: cx.next_id(),
- node: ast::ty_path(path, cx.next_id()),
- span: span,
- });
-
- {
- ident: tp.ident,
- id: cx.next_id(),
- bounds: @vec::append(~[t_bound], *tp.bounds)
- }
- };
-
- let opt_trait = Some(@{
- path: path,
- ref_id: cx.next_id(),
- impl_id: cx.next_id(),
- });
-
- let ty = cx.ty_path(
- span,
- ~[ident],
- tps.map(|tp| cx.ty_path(span, ~[tp.ident], ~[]))
- );
-
- @{
- // This is a new-style impl declaration.
- // XXX: clownshoes
- ident: ast::token::special_idents::clownshoes_extensions,
- attrs: ~[],
- id: cx.next_id(),
- node: ast::item_impl(trait_tps, opt_trait, ty, ~[f(ty)]),
- vis: ast::public,
- span: span,
- }
-}
-
-fn mk_ser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- tps: ~[ast::ty_param],
- body: @ast::expr
-) -> @ast::item {
- // Make a path to the std::serialization2::Serializable trait.
- let path = cx.path(
- span,
- ~[
- cx.ident_of(~"std"),
- cx.ident_of(~"serialization2"),
- cx.ident_of(~"Serializable"),
- ]
- );
-
- mk_impl(
- cx,
- span,
- ident,
- path,
- tps,
- |_ty| mk_ser_method(cx, span, cx.expr_blk(body))
- )
-}
-
-fn mk_deser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- tps: ~[ast::ty_param],
- body: @ast::expr
-) -> @ast::item {
- // Make a path to the std::serialization2::Deserializable trait.
- let path = cx.path(
- span,
- ~[
- cx.ident_of(~"std"),
- cx.ident_of(~"serialization2"),
- cx.ident_of(~"Deserializable"),
- ]
- );
-
- mk_impl(
- cx,
- span,
- ident,
- path,
- tps,
- |ty| mk_deser_method(cx, span, ty, cx.expr_blk(body))
- )
-}
-
-fn mk_ser_method(
- cx: ext_ctxt,
- span: span,
- ser_body: ast::blk
-) -> @ast::method {
- let ser_bound = cx.ty_path(
- span,
- ~[
- cx.ident_of(~"std"),
- cx.ident_of(~"serialization2"),
- cx.ident_of(~"Serializer"),
- ],
- ~[]
- );
-
- let ser_tps = ~[{
- ident: cx.ident_of(~"__S"),
- id: cx.next_id(),
- bounds: @~[ast::bound_trait(ser_bound)],
- }];
-
- let ty_s = @{
- id: cx.next_id(),
- node: ast::ty_rptr(
- @{
- id: cx.next_id(),
- node: ast::re_anon,
- },
- {
- ty: cx.ty_path(span, ~[cx.ident_of(~"__S")], ~[]),
- mutbl: ast::m_imm
- }
- ),
- span: span,
- };
-
- let ser_inputs = ~[{
- mode: ast::infer(cx.next_id()),
- ty: ty_s,
- ident: cx.ident_of(~"__s"),
- id: cx.next_id(),
- }];
-
- let ser_output = @{
- id: cx.next_id(),
- node: ast::ty_nil,
- span: span,
- };
-
- let ser_decl = {
- inputs: ser_inputs,
- output: ser_output,
- cf: ast::return_val,
- };
-
- @{
- ident: cx.ident_of(~"serialize"),
- attrs: ~[],
- tps: ser_tps,
- self_ty: { node: ast::sty_region(ast::m_imm), span: span },
- purity: ast::impure_fn,
- decl: ser_decl,
- body: ser_body,
- id: cx.next_id(),
- span: span,
- self_id: cx.next_id(),
- vis: ast::public,
- }
-}
-
-fn mk_deser_method(
- cx: ext_ctxt,
- span: span,
- ty: @ast::ty,
- deser_body: ast::blk
-) -> @ast::method {
- let deser_bound = cx.ty_path(
- span,
- ~[
- cx.ident_of(~"std"),
- cx.ident_of(~"serialization2"),
- cx.ident_of(~"Deserializer"),
- ],
- ~[]
- );
-
- let deser_tps = ~[{
- ident: cx.ident_of(~"__D"),
- id: cx.next_id(),
- bounds: @~[ast::bound_trait(deser_bound)],
- }];
-
- let ty_d = @{
- id: cx.next_id(),
- node: ast::ty_rptr(
- @{
- id: cx.next_id(),
- node: ast::re_anon,
- },
- {
- ty: cx.ty_path(span, ~[cx.ident_of(~"__D")], ~[]),
- mutbl: ast::m_imm
- }
- ),
- span: span,
- };
-
- let deser_inputs = ~[{
- mode: ast::infer(cx.next_id()),
- ty: ty_d,
- ident: cx.ident_of(~"__d"),
- id: cx.next_id(),
- }];
-
- let deser_decl = {
- inputs: deser_inputs,
- output: ty,
- cf: ast::return_val,
- };
-
- @{
- ident: cx.ident_of(~"deserialize"),
- attrs: ~[],
- tps: deser_tps,
- self_ty: { node: ast::sty_static, span: span },
- purity: ast::impure_fn,
- decl: deser_decl,
- body: deser_body,
- id: cx.next_id(),
- span: span,
- self_id: cx.next_id(),
- vis: ast::public,
- }
-}
-
-fn mk_rec_ser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- fields: ~[ast::ty_field],
- tps: ~[ast::ty_param]
-) -> @ast::item {
- let fields = mk_ser_fields(cx, span, mk_rec_fields(fields));
-
- // ast for `__s.emit_rec(|| $(fields))`
- let body = cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__s"),
- cx.ident_of(~"emit_rec")
- ),
- ~[cx.lambda_stmts(span, fields)]
- );
-
- mk_ser_impl(cx, span, ident, tps, body)
-}
-
-fn mk_rec_deser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- fields: ~[ast::ty_field],
- tps: ~[ast::ty_param]
-) -> @ast::item {
- let fields = mk_deser_fields(cx, span, mk_rec_fields(fields));
-
- // ast for `read_rec(|| $(fields))`
- let body = cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__d"),
- cx.ident_of(~"read_rec")
- ),
- ~[
- cx.lambda_expr(
- cx.expr(
- span,
- ast::expr_rec(fields, None)
- )
- )
- ]
- );
-
- mk_deser_impl(cx, span, ident, tps, body)
-}
-
-fn mk_struct_ser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- fields: ~[@ast::struct_field],
- tps: ~[ast::ty_param]
-) -> @ast::item {
- let fields = mk_ser_fields(cx, span, mk_struct_fields(fields));
-
- // ast for `__s.emit_struct($(name), || $(fields))`
- let ser_body = cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__s"),
- cx.ident_of(~"emit_struct")
- ),
- ~[
- cx.lit_str(span, @cx.str_of(ident)),
- cx.lambda_stmts(span, fields),
- ]
- );
-
- mk_ser_impl(cx, span, ident, tps, ser_body)
-}
-
-fn mk_struct_deser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- fields: ~[@ast::struct_field],
- tps: ~[ast::ty_param]
-) -> @ast::item {
- let fields = mk_deser_fields(cx, span, mk_struct_fields(fields));
-
- // ast for `read_struct($(name), || $(fields))`
- let body = cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__d"),
- cx.ident_of(~"read_struct")
- ),
- ~[
- cx.lit_str(span, @cx.str_of(ident)),
- cx.lambda_expr(
- cx.expr(
- span,
- ast::expr_struct(
- cx.path(span, ~[ident]),
- fields,
- None
- )
- )
- ),
- ]
- );
-
- mk_deser_impl(cx, span, ident, tps, body)
-}
-
-// Records and structs don't have the same fields types, but they share enough
-// that if we extract the right subfields out we can share the serialization
-// generator code.
-type field = { span: span, ident: ast::ident, mutbl: ast::mutability };
-
-fn mk_rec_fields(fields: ~[ast::ty_field]) -> ~[field] {
- do fields.map |field| {
- {
- span: field.span,
- ident: field.node.ident,
- mutbl: field.node.mt.mutbl,
- }
- }
-}
-
-fn mk_struct_fields(fields: ~[@ast::struct_field]) -> ~[field] {
- do fields.map |field| {
- let (ident, mutbl) = match field.node.kind {
- ast::named_field(ident, mutbl, _) => (ident, mutbl),
- _ => fail ~"[auto_serialize2] does not support \
- unnamed fields",
- };
-
- {
- span: field.span,
- ident: ident,
- mutbl: match mutbl {
- ast::class_mutable => ast::m_mutbl,
- ast::class_immutable => ast::m_imm,
- },
- }
- }
-}
-
-fn mk_ser_fields(
- cx: ext_ctxt,
- span: span,
- fields: ~[field]
-) -> ~[@ast::stmt] {
- do fields.mapi |idx, field| {
- // ast for `|| self.$(name).serialize(__s)`
- let expr_lambda = cx.lambda_expr(
- cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"self"),
- field.ident
- ),
- cx.ident_of(~"serialize")
- ),
- ~[cx.expr_var(span, ~"__s")]
- )
- );
-
- // ast for `__s.emit_field($(name), $(idx), $(expr_lambda))`
- cx.stmt(
- cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__s"),
- cx.ident_of(~"emit_field")
- ),
- ~[
- cx.lit_str(span, @cx.str_of(field.ident)),
- cx.lit_uint(span, idx),
- expr_lambda,
- ]
- )
- )
- }
-}
-
-fn mk_deser_fields(
- cx: ext_ctxt,
- span: span,
- fields: ~[{ span: span, ident: ast::ident, mutbl: ast::mutability }]
-) -> ~[ast::field] {
- do fields.mapi |idx, field| {
- // ast for `|| std::serialization2::deserialize(__d)`
- let expr_lambda = cx.lambda(
- cx.expr_blk(
- cx.expr_call(
- span,
- cx.expr_path(span, ~[
- cx.ident_of(~"std"),
- cx.ident_of(~"serialization2"),
- cx.ident_of(~"deserialize"),
- ]),
- ~[cx.expr_var(span, ~"__d")]
- )
- )
- );
-
- // ast for `__d.read_field($(name), $(idx), $(expr_lambda))`
- let expr: @ast::expr = cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__d"),
- cx.ident_of(~"read_field")
- ),
- ~[
- cx.lit_str(span, @cx.str_of(field.ident)),
- cx.lit_uint(span, idx),
- expr_lambda,
- ]
- );
-
- {
- node: { mutbl: field.mutbl, ident: field.ident, expr: expr },
- span: span,
- }
- }
-}
-
-fn mk_enum_ser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- enum_def: ast::enum_def,
- tps: ~[ast::ty_param]
-) -> @ast::item {
- let body = mk_enum_ser_body(
- cx,
- span,
- ident,
- enum_def.variants
- );
-
- mk_ser_impl(cx, span, ident, tps, body)
-}
-
-fn mk_enum_deser_impl(
- cx: ext_ctxt,
- span: span,
- ident: ast::ident,
- enum_def: ast::enum_def,
- tps: ~[ast::ty_param]
-) -> @ast::item {
- let body = mk_enum_deser_body(
- cx,
- span,
- ident,
- enum_def.variants
- );
-
- mk_deser_impl(cx, span, ident, tps, body)
-}
-
-fn ser_variant(
- cx: ext_ctxt,
- span: span,
- v_name: ast::ident,
- v_idx: uint,
- args: ~[ast::variant_arg]
-) -> ast::arm {
- // Name the variant arguments.
- let names = args.mapi(|i, _arg| cx.ident_of(fmt!("__v%u", i)));
-
- // Bind the names to the variant argument type.
- let pats = args.mapi(|i, arg| cx.binder_pat(arg.ty.span, names[i]));
-
- let pat_node = if pats.is_empty() {
- ast::pat_ident(
- ast::bind_by_implicit_ref,
- cx.path(span, ~[v_name]),
- None
- )
- } else {
- ast::pat_enum(
- cx.path(span, ~[v_name]),
- Some(pats)
- )
- };
-
- let pat = @{
- id: cx.next_id(),
- node: pat_node,
- span: span,
- };
-
- let stmts = do args.mapi |a_idx, _arg| {
- // ast for `__s.emit_enum_variant_arg`
- let expr_emit = cx.expr_field(
- span,
- cx.expr_var(span, ~"__s"),
- cx.ident_of(~"emit_enum_variant_arg")
- );
-
- // ast for `|| $(v).serialize(__s)`
- let expr_serialize = cx.lambda_expr(
- cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_path(span, ~[names[a_idx]]),
- cx.ident_of(~"serialize")
- ),
- ~[cx.expr_var(span, ~"__s")]
- )
- );
-
- // ast for `$(expr_emit)($(a_idx), $(expr_serialize))`
- cx.stmt(
- cx.expr_call(
- span,
- expr_emit,
- ~[cx.lit_uint(span, a_idx), expr_serialize]
- )
- )
- };
-
- // ast for `__s.emit_enum_variant($(name), $(idx), $(sz), $(lambda))`
- let body = cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__s"),
- cx.ident_of(~"emit_enum_variant")
- ),
- ~[
- cx.lit_str(span, @cx.str_of(v_name)),
- cx.lit_uint(span, v_idx),
- cx.lit_uint(span, stmts.len()),
- cx.lambda_stmts(span, stmts),
- ]
- );
-
- { pats: ~[pat], guard: None, body: cx.expr_blk(body) }
-}
-
-fn mk_enum_ser_body(
- cx: ext_ctxt,
- span: span,
- name: ast::ident,
- variants: ~[ast::variant]
-) -> @ast::expr {
- let arms = do variants.mapi |v_idx, variant| {
- match variant.node.kind {
- ast::tuple_variant_kind(args) =>
- ser_variant(cx, span, variant.node.name, v_idx, args),
- ast::struct_variant_kind(*) =>
- fail ~"struct variants unimplemented",
- ast::enum_variant_kind(*) =>
- fail ~"enum variants unimplemented",
- }
- };
-
- // ast for `match *self { $(arms) }`
- let match_expr = cx.expr(
- span,
- ast::expr_match(
- cx.expr(
- span,
- ast::expr_unary(ast::deref, cx.expr_var(span, ~"self"))
- ),
- arms
- )
- );
-
- // ast for `__s.emit_enum($(name), || $(match_expr))`
- cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__s"),
- cx.ident_of(~"emit_enum")
- ),
- ~[
- cx.lit_str(span, @cx.str_of(name)),
- cx.lambda_expr(match_expr),
- ]
- )
-}
-
-fn mk_enum_deser_variant_nary(
- cx: ext_ctxt,
- span: span,
- name: ast::ident,
- args: ~[ast::variant_arg]
-) -> @ast::expr {
- let args = do args.mapi |idx, _arg| {
- // ast for `|| std::serialization2::deserialize(__d)`
- let expr_lambda = cx.lambda_expr(
- cx.expr_call(
- span,
- cx.expr_path(span, ~[
- cx.ident_of(~"std"),
- cx.ident_of(~"serialization2"),
- cx.ident_of(~"deserialize"),
- ]),
- ~[cx.expr_var(span, ~"__d")]
- )
- );
-
- // ast for `__d.read_enum_variant_arg($(a_idx), $(expr_lambda))`
- cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__d"),
- cx.ident_of(~"read_enum_variant_arg")
- ),
- ~[cx.lit_uint(span, idx), expr_lambda]
- )
- };
-
- // ast for `$(name)($(args))`
- cx.expr_call(span, cx.expr_path(span, ~[name]), args)
-}
-
-fn mk_enum_deser_body(
- cx: ext_ctxt,
- span: span,
- name: ast::ident,
- variants: ~[ast::variant]
-) -> @ast::expr {
- let mut arms = do variants.mapi |v_idx, variant| {
- let body = match variant.node.kind {
- ast::tuple_variant_kind(args) => {
- if args.is_empty() {
- // for a nullary variant v, do "v"
- cx.expr_path(span, ~[variant.node.name])
- } else {
- // for an n-ary variant v, do "v(a_1, ..., a_n)"
- mk_enum_deser_variant_nary(
- cx,
- span,
- variant.node.name,
- args
- )
- }
- },
- ast::struct_variant_kind(*) =>
- fail ~"struct variants unimplemented",
- ast::enum_variant_kind(*) =>
- fail ~"enum variants unimplemented",
- };
-
- let pat = @{
- id: cx.next_id(),
- node: ast::pat_lit(cx.lit_uint(span, v_idx)),
- span: span,
- };
-
- {
- pats: ~[pat],
- guard: None,
- body: cx.expr_blk(body),
- }
- };
-
- let impossible_case = {
- pats: ~[@{ id: cx.next_id(), node: ast::pat_wild, span: span}],
- guard: None,
-
- // FIXME(#3198): proper error message
- body: cx.expr_blk(cx.expr(span, ast::expr_fail(None))),
- };
-
- arms.push(impossible_case);
-
- // ast for `|i| { match i { $(arms) } }`
- let expr_lambda = cx.expr(
- span,
- ast::expr_fn_block(
- {
- inputs: ~[{
- mode: ast::infer(cx.next_id()),
- ty: @{
- id: cx.next_id(),
- node: ast::ty_infer,
- span: span
- },
- ident: cx.ident_of(~"i"),
- id: cx.next_id(),
- }],
- output: @{
- id: cx.next_id(),
- node: ast::ty_infer,
- span: span,
- },
- cf: ast::return_val,
- },
- cx.expr_blk(
- cx.expr(
- span,
- ast::expr_match(cx.expr_var(span, ~"i"), arms)
- )
- ),
- @~[]
- )
- );
-
- // ast for `__d.read_enum_variant($(expr_lambda))`
- let expr_lambda = cx.lambda_expr(
- cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__d"),
- cx.ident_of(~"read_enum_variant")
- ),
- ~[expr_lambda]
- )
- );
-
- // ast for `__d.read_enum($(e_name), $(expr_lambda))`
- cx.expr_call(
- span,
- cx.expr_field(
- span,
- cx.expr_var(span, ~"__d"),
- cx.ident_of(~"read_enum")
- ),
- ~[
- cx.lit_str(span, @cx.str_of(name)),
- expr_lambda
- ]
- )
-}
use std::map::HashMap;
use parse::parser;
use diagnostic::span_handler;
-use codemap::{codemap, span, expn_info, expanded_from};
+use codemap::{CodeMap, span, expn_info, expanded_from};
// obsolete old-style #macro code:
//
builtin_item_tt(
ext::tt::macro_rules::add_new_extension));
syntax_expanders.insert(~"fmt", builtin(ext::fmt::expand_syntax_ext));
- syntax_expanders.insert(~"auto_serialize",
- item_decorator(ext::auto_serialize::expand));
syntax_expanders.insert(
- ~"auto_serialize2",
- item_decorator(ext::auto_serialize2::expand_auto_serialize));
+ ~"auto_serialize",
+ item_decorator(ext::auto_serialize::expand_auto_serialize));
syntax_expanders.insert(
- ~"auto_deserialize2",
- item_decorator(ext::auto_serialize2::expand_auto_deserialize));
+ ~"auto_deserialize",
+ item_decorator(ext::auto_serialize::expand_auto_deserialize));
syntax_expanders.insert(~"env", builtin(ext::env::expand_syntax_ext));
syntax_expanders.insert(~"concat_idents",
builtin(ext::concat_idents::expand_syntax_ext));
return syntax_expanders;
}
-
// One of these is made during expansion and incrementally updated as we go;
// when a macro expansion occurs, the resulting nodes have the backtrace()
// -> expn_info of their expansion context stored into their span.
trait ext_ctxt {
- fn codemap() -> codemap;
+ fn codemap() -> CodeMap;
fn parse_sess() -> parse::parse_sess;
fn cfg() -> ast::crate_cfg;
fn print_backtrace();
mut mod_path: ~[ast::ident],
mut trace_mac: bool};
impl ctxt_repr: ext_ctxt {
- fn codemap() -> codemap { self.parse_sess.cm }
+ fn codemap() -> CodeMap { self.parse_sess.cm }
fn parse_sess() -> parse::parse_sess { self.parse_sess }
fn cfg() -> ast::crate_cfg { self.cfg }
fn print_backtrace() { }
mut mod_path: ~[],
mut trace_mac: false
};
- move (imp as ext_ctxt)
+ move ((move imp) as ext_ctxt)
}
fn expr_to_str(cx: ext_ctxt, expr: @ast::expr, error: ~str) -> ~str {
match max {
Some(max) if ! (min <= elts_len && elts_len <= max) => {
cx.span_fatal(sp,
- fmt!("#%s takes between %u and %u arguments.",
+ fmt!("%s! takes between %u and %u arguments.",
name, min, max));
}
None if ! (min <= elts_len) => {
- cx.span_fatal(sp, fmt!("#%s needs at least %u arguments.",
+ cx.span_fatal(sp, fmt!("%s! needs at least %u arguments.",
name, min));
}
_ => return elts /* we are good */
}
}
_ => {
- cx.span_fatal(sp, fmt!("#%s: malformed invocation", name))
+ cx.span_fatal(sp, fmt!("%s!: malformed invocation", name))
}
},
- None => cx.span_fatal(sp, fmt!("#%s: missing arguments", name))
+ None => cx.span_fatal(sp, fmt!("%s!: missing arguments", name))
}
}
/*
- * The compiler code necessary to support the #env extension. Eventually this
+ * The compiler code necessary to support the env! extension. Eventually this
* should all get sucked into either the compiler syntax extension plugin
* interface.
*/
// FIXME (#2248): if this was more thorough it would manufacture an
// Option<str> rather than just an maybe-empty string.
- let var = expr_to_str(cx, args[0], ~"#env requires a string");
+ let var = expr_to_str(cx, args[0], ~"env! requires a string");
match os::getenv(var) {
option::None => return mk_uniq_str(cx, sp, ~""),
option::Some(s) => return mk_uniq_str(cx, sp, s)
/*
- * The compiler code necessary to support the #fmt extension. Eventually this
+ * The compiler code necessary to support the fmt! extension. Eventually this
* should all get sucked into either the standard library extfmt module or the
* compiler syntax extension plugin interface.
*/
let args = get_mac_args_no_max(cx, sp, arg, 1u, ~"fmt");
let fmt =
expr_to_str(cx, args[0],
- ~"first argument to #fmt must be a string literal.");
+ ~"first argument to fmt! must be a string literal.");
let fmtspan = args[0].span;
debug!("Format string:");
log(debug, fmt);
let count_is_args = ~[count_lit];
return mk_call(cx, sp, count_is_path, count_is_args);
}
- _ => cx.span_unimpl(sp, ~"unimplemented #fmt conversion")
+ _ => cx.span_unimpl(sp, ~"unimplemented fmt! conversion")
}
}
fn make_ty(cx: ext_ctxt, sp: span, t: Ty) -> @ast::expr {
_ => return false
}
}
- let unsupported = ~"conversion not supported in #fmt string";
+ let unsupported = ~"conversion not supported in fmt! string";
match cnv.param {
option::None => (),
_ => cx.span_unimpl(sp, unsupported)
if !is_signed_type(cnv) {
cx.span_fatal(sp,
~"+ flag only valid in " +
- ~"signed #fmt conversion");
+ ~"signed fmt! conversion");
}
}
FlagSpaceForSign => {
if !is_signed_type(cnv) {
cx.span_fatal(sp,
~"space flag only valid in " +
- ~"signed #fmt conversions");
+ ~"signed fmt! conversions");
}
}
FlagLeftZeroPad => (),
n += 1u;
if n >= nargs {
cx.span_fatal(sp,
- ~"not enough arguments to #fmt " +
+ ~"not enough arguments to fmt! " +
~"for the given format string");
}
debug!("Building conversion:");
if expected_nargs < nargs {
cx.span_fatal
- (sp, fmt!("too many arguments to #fmt. found %u, expected %u",
+ (sp, fmt!("too many arguments to fmt!. found %u, expected %u",
nargs, expected_nargs));
}
use ext::base::ext_ctxt;
use ast::tt_delim;
use parse::lexer::{new_tt_reader, reader};
-use parse::parser::{parser, SOURCE_FILE};
+use parse::parser::{Parser, SOURCE_FILE};
use parse::common::parser_common;
use pipes::parse_proto::proto_parser;
let tt_rdr = new_tt_reader(cx.parse_sess().span_diagnostic,
cx.parse_sess().interner, None, tt);
let rdr = tt_rdr as reader;
- let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
+ let rust_parser = Parser(sess, cfg, rdr.dup(), SOURCE_FILE);
let proto = rust_parser.parse_proto(cx.str_of(id));
}
trait append_types {
- fn add_ty(ty: @ast::ty) -> @ast::path;
- fn add_tys(+tys: ~[@ast::ty]) -> @ast::path;
+ fn add_ty(ty: @ast::Ty) -> @ast::path;
+ fn add_tys(+tys: ~[@ast::Ty]) -> @ast::path;
}
impl @ast::path: append_types {
- fn add_ty(ty: @ast::ty) -> @ast::path {
+ fn add_ty(ty: @ast::Ty) -> @ast::path {
@{types: vec::append_one(self.types, ty),
.. *self}
}
- fn add_tys(+tys: ~[@ast::ty]) -> @ast::path {
+ fn add_tys(+tys: ~[@ast::Ty]) -> @ast::path {
@{types: vec::append(self.types, tys),
.. *self}
}
trait ext_ctxt_ast_builder {
fn ty_param(id: ast::ident, +bounds: ~[ast::ty_param_bound])
-> ast::ty_param;
- fn arg(name: ident, ty: @ast::ty) -> ast::arg;
+ fn arg(name: ident, ty: @ast::Ty) -> ast::arg;
fn expr_block(e: @ast::expr) -> ast::blk;
- fn fn_decl(+inputs: ~[ast::arg], output: @ast::ty) -> ast::fn_decl;
+ fn fn_decl(+inputs: ~[ast::arg], output: @ast::Ty) -> ast::fn_decl;
fn item(name: ident, span: span, +node: ast::item_) -> @ast::item;
fn item_fn_poly(name: ident,
+inputs: ~[ast::arg],
- output: @ast::ty,
+ output: @ast::Ty,
+ty_params: ~[ast::ty_param],
+body: ast::blk) -> @ast::item;
fn item_fn(name: ident,
+inputs: ~[ast::arg],
- output: @ast::ty,
+ output: @ast::Ty,
+body: ast::blk) -> @ast::item;
fn item_enum_poly(name: ident,
span: span,
+ty_params: ~[ast::ty_param]) -> @ast::item;
fn item_enum(name: ident, span: span,
+enum_definition: ast::enum_def) -> @ast::item;
- fn variant(name: ident, span: span, +tys: ~[@ast::ty]) -> ast::variant;
+ fn variant(name: ident, span: span, +tys: ~[@ast::Ty]) -> ast::variant;
fn item_mod(name: ident, span: span, +items: ~[@ast::item]) -> @ast::item;
- fn ty_path_ast_builder(path: @ast::path) -> @ast::ty;
+ fn ty_path_ast_builder(path: @ast::path) -> @ast::Ty;
fn item_ty_poly(name: ident,
span: span,
- ty: @ast::ty,
+ ty: @ast::Ty,
+params: ~[ast::ty_param]) -> @ast::item;
- fn item_ty(name: ident, span: span, ty: @ast::ty) -> @ast::item;
- fn ty_vars(+ty_params: ~[ast::ty_param]) -> ~[@ast::ty];
- fn ty_field_imm(name: ident, ty: @ast::ty) -> ast::ty_field;
- fn ty_rec(+v: ~[ast::ty_field]) -> @ast::ty;
+ fn item_ty(name: ident, span: span, ty: @ast::Ty) -> @ast::item;
+ fn ty_vars(+ty_params: ~[ast::ty_param]) -> ~[@ast::Ty];
+ fn ty_field_imm(name: ident, ty: @ast::Ty) -> ast::ty_field;
+ fn ty_rec(+v: ~[ast::ty_field]) -> @ast::Ty;
fn field_imm(name: ident, e: @ast::expr) -> ast::field;
fn rec(+v: ~[ast::field]) -> @ast::expr;
fn block(+stmts: ~[@ast::stmt], e: @ast::expr) -> ast::blk;
fn stmt_expr(e: @ast::expr) -> @ast::stmt;
fn block_expr(b: ast::blk) -> @ast::expr;
fn empty_span() -> span;
- fn ty_option(ty: @ast::ty) -> @ast::ty;
+ fn ty_option(ty: @ast::Ty) -> @ast::Ty;
}
impl ext_ctxt: ext_ctxt_ast_builder {
- fn ty_option(ty: @ast::ty) -> @ast::ty {
+ fn ty_option(ty: @ast::Ty) -> @ast::Ty {
self.ty_path_ast_builder(path(~[self.ident_of(~"Option")],
self.empty_span())
.add_ty(ty))
span: self.empty_span()}
}
- fn ty_field_imm(name: ident, ty: @ast::ty) -> ast::ty_field {
+ fn ty_field_imm(name: ident, ty: @ast::Ty) -> ast::ty_field {
{node: {ident: name, mt: { ty: ty, mutbl: ast::m_imm } },
span: self.empty_span()}
}
- fn ty_rec(+fields: ~[ast::ty_field]) -> @ast::ty {
+ fn ty_rec(+fields: ~[ast::ty_field]) -> @ast::Ty {
@{id: self.next_id(),
node: ast::ty_rec(fields),
span: self.empty_span()}
}
- fn ty_infer() -> @ast::ty {
+ fn ty_infer() -> @ast::Ty {
@{id: self.next_id(),
node: ast::ty_infer,
span: self.empty_span()}
{ident: id, id: self.next_id(), bounds: @bounds}
}
- fn arg(name: ident, ty: @ast::ty) -> ast::arg {
+ fn arg(name: ident, ty: @ast::Ty) -> ast::arg {
{mode: ast::infer(self.next_id()),
ty: ty,
ident: name,
}
fn fn_decl(+inputs: ~[ast::arg],
- output: @ast::ty) -> ast::fn_decl {
+ output: @ast::Ty) -> ast::fn_decl {
{inputs: inputs,
output: output,
cf: ast::return_val}
fn item_fn_poly(name: ident,
+inputs: ~[ast::arg],
- output: @ast::ty,
+ output: @ast::Ty,
+ty_params: ~[ast::ty_param],
+body: ast::blk) -> @ast::item {
self.item(name,
fn item_fn(name: ident,
+inputs: ~[ast::arg],
- output: @ast::ty,
+ output: @ast::Ty,
+body: ast::blk) -> @ast::item {
self.item_fn_poly(name, inputs, output, ~[], body)
}
fn variant(name: ident,
span: span,
- +tys: ~[@ast::ty]) -> ast::variant {
+ +tys: ~[@ast::Ty]) -> ast::variant {
let args = tys.map(|ty| {ty: *ty, id: self.next_id()});
{node: {name: name,
items: items}))
}
- fn ty_path_ast_builder(path: @ast::path) -> @ast::ty {
+ fn ty_path_ast_builder(path: @ast::path) -> @ast::Ty {
@{id: self.next_id(),
node: ast::ty_path(path, self.next_id()),
span: path.span}
}
- fn ty_nil_ast_builder() -> @ast::ty {
+ fn ty_nil_ast_builder() -> @ast::Ty {
@{id: self.next_id(),
node: ast::ty_nil,
span: self.empty_span()}
fn item_ty_poly(name: ident,
span: span,
- ty: @ast::ty,
+ ty: @ast::Ty,
+params: ~[ast::ty_param]) -> @ast::item {
self.item(name, span, ast::item_ty(ty, params))
}
- fn item_ty(name: ident, span: span, ty: @ast::ty) -> @ast::item {
+ fn item_ty(name: ident, span: span, ty: @ast::Ty) -> @ast::item {
self.item_ty_poly(name, span, ty, ~[])
}
- fn ty_vars(+ty_params: ~[ast::ty_param]) -> ~[@ast::ty] {
+ fn ty_vars(+ty_params: ~[ast::ty_param]) -> ~[@ast::Ty] {
ty_params.map(|p| self.ty_path_ast_builder(
path(~[p.ident], self.empty_span())))
}
}
}
- fn visit_message(name: ~str, _span: span, _tys: &[@ast::ty],
+ fn visit_message(name: ~str, _span: span, _tys: &[@ast::Ty],
this: state, next: next_state) {
match next {
Some({state: next, tys: next_tys}) => {
None => ()
}
}
-}
\ No newline at end of file
+}
fn parse_state(proto: protocol);
}
-impl parser: proto_parser {
+impl parser::Parser: proto_parser {
fn parse_proto(id: ~str) -> protocol {
let proto = protocol(id, self.span);
}
}
- fn to_ty(cx: ext_ctxt) -> @ast::ty {
+ fn to_ty(cx: ext_ctxt) -> @ast::Ty {
cx.ty_path_ast_builder(path(~[cx.ident_of(self.name())], self.span())
.add_tys(cx.ty_vars(self.get_params())))
}
}}
}
- fn buffer_ty_path(cx: ext_ctxt) -> @ast::ty {
+ fn buffer_ty_path(cx: ext_ctxt) -> @ast::Ty {
let mut params: ~[ast::ty_param] = ~[];
for (copy self.states).each |s| {
for s.ty_params.each |tp| {
}
}
-impl @ast::ty: to_source {
+impl @ast::Ty: to_source {
fn to_source(cx: ext_ctxt) -> ~str {
ty_to_str(self, cx.parse_sess().interner)
}
}
-impl ~[@ast::ty]: to_source {
+impl ~[@ast::Ty]: to_source {
fn to_source(cx: ext_ctxt) -> ~str {
str::connect(self.map(|i| i.to_source(cx)), ~", ")
}
}
impl direction: ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str {
match self {
send => ~"Send",
recv => ~"Recv"
}
}
-type next_state = Option<{state: ~str, tys: ~[@ast::ty]}>;
+type next_state = Option<{state: ~str, tys: ~[@ast::Ty]}>;
enum message {
// name, span, data, current state, next state
- message(~str, span, ~[@ast::ty], state, next_state)
+ message(~str, span, ~[@ast::Ty], state, next_state)
}
impl message {
impl state {
fn add_message(name: ~str, span: span,
- +data: ~[@ast::ty], next: next_state) {
+ +data: ~[@ast::Ty], next: next_state) {
self.messages.push(message(name, span, data, self,
next));
}
}
/// Returns the type that is used for the messages.
- fn to_ty(cx: ext_ctxt) -> @ast::ty {
+ fn to_ty(cx: ext_ctxt) -> @ast::Ty {
cx.ty_path_ast_builder
(path(~[cx.ident_of(self.name)],self.span).add_tys(
cx.ty_vars(self.ty_params)))
trait visitor<Tproto, Tstate, Tmessage> {
fn visit_proto(proto: protocol, st: &[Tstate]) -> Tproto;
fn visit_state(state: state, m: &[Tmessage]) -> Tstate;
- fn visit_message(name: ~str, spane: span, tys: &[@ast::ty],
+ fn visit_message(name: ~str, spane: span, tys: &[@ast::Ty],
this: state, next: next_state) -> Tmessage;
}
use ast::{crate, expr_, mac_invoc,
mac_aq, mac_var};
use parse::parser;
-use parse::parser::parse_from_source_str;
+use parse::parser::{Parser, parse_from_source_str};
use dvec::DVec;
use parse::token::ident_interner;
type aq_ctxt = @{lo: uint, gather: DVec<gather_item>};
enum fragment {
from_expr(@ast::expr),
- from_ty(@ast::ty)
+ from_ty(@ast::Ty)
}
fn ids_ext(cx: ext_ctxt, strs: ~[~str]) -> ~[ast::ident] {
}
fn get_fold_fn() -> ~str {~"fold_expr"}
}
-impl @ast::ty: qq_helper {
+impl @ast::Ty: qq_helper {
fn span() -> span {self.span}
fn visit(cx: aq_ctxt, v: vt<aq_ctxt>) {visit_ty(self, cx, v);}
fn extract_mac() -> Option<ast::mac_> {
};
}
-fn parse_crate(p: parser) -> @ast::crate { p.parse_crate_mod(~[]) }
-fn parse_ty(p: parser) -> @ast::ty { p.parse_ty(false) }
-fn parse_stmt(p: parser) -> @ast::stmt { p.parse_stmt(~[]) }
-fn parse_expr(p: parser) -> @ast::expr { p.parse_expr() }
-fn parse_pat(p: parser) -> @ast::pat { p.parse_pat(true) }
+fn parse_crate(p: Parser) -> @ast::crate { p.parse_crate_mod(~[]) }
+fn parse_ty(p: Parser) -> @ast::Ty { p.parse_ty(false) }
+fn parse_stmt(p: Parser) -> @ast::stmt { p.parse_stmt(~[]) }
+fn parse_expr(p: Parser) -> @ast::expr { p.parse_expr() }
+fn parse_pat(p: Parser) -> @ast::pat { p.parse_pat(true) }
-fn parse_item(p: parser) -> @ast::item {
+fn parse_item(p: Parser) -> @ast::item {
match p.parse_item(~[]) {
Some(item) => item,
None => fail ~"parse_item: parsing an item failed"
}
fn finish<T: qq_helper>
- (ecx: ext_ctxt, body: ast::mac_body_, f: fn (p: parser) -> T)
+ (ecx: ext_ctxt, body: ast::mac_body_, f: fn (p: Parser) -> T)
-> @ast::expr
{
let cm = ecx.codemap();
@f.fold_crate(*n)
}
fn fold_expr(f: ast_fold, &&n: @ast::expr) -> @ast::expr {f.fold_expr(n)}
-fn fold_ty(f: ast_fold, &&n: @ast::ty) -> @ast::ty {f.fold_ty(n)}
+fn fold_ty(f: ast_fold, &&n: @ast::Ty) -> @ast::Ty {f.fold_ty(n)}
fn fold_item(f: ast_fold, &&n: @ast::item) -> @ast::item {
f.fold_item(n).get() //HACK: we know we don't drop items
}
use fold::*;
use ast_util::respan;
-use ast::{ident, path, ty, blk_, expr, expr_path,
+use ast::{ident, path, Ty, blk_, expr, expr_path,
expr_vec, expr_mac, mac_invoc, node_id, expr_index};
export add_new_extension;
match_expr(@expr),
match_path(@path),
match_ident(ast::spanned<ident>),
- match_ty(@ty),
+ match_ty(@Ty),
match_block(ast::blk),
match_exact, /* don't bind anything, just verify the AST traversal */
}
use ext::base::ext_ctxt;
use ast::tt_delim;
use parse::lexer::{new_tt_reader, reader};
-use parse::parser::{parser, SOURCE_FILE};
+use parse::parser::{Parser, SOURCE_FILE};
use parse::common::parser_common;
fn expand_trace_macros(cx: ext_ctxt, sp: span,
let tt_rdr = new_tt_reader(cx.parse_sess().span_diagnostic,
cx.parse_sess().interner, None, tt);
let rdr = tt_rdr as reader;
- let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
+ let rust_parser = Parser(sess, cfg, rdr.dup(), SOURCE_FILE);
let arg = cx.str_of(rust_parser.parse_ident());
match arg {
~"false" => cx.set_trace_macros(false),
_ => cx.span_fatal(sp, ~"trace_macros! only accepts `true` or `false`")
}
- let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
+ let rust_parser = Parser(sess, cfg, rdr.dup(), SOURCE_FILE);
let result = rust_parser.parse_expr();
base::mr_expr(result)
}
// Earley-like parser for macros.
use parse::token;
-use parse::token::{token, EOF, to_str, nonterminal};
+use parse::token::{Token, EOF, to_str, nonterminal};
use parse::lexer::*; //resolve bug?
//import parse::lexer::{reader, tt_reader, tt_reader_as_reader};
-use parse::parser::{parser,SOURCE_FILE};
+use parse::parser::{Parser, SOURCE_FILE};
//import parse::common::parser_common;
use parse::common::*; //resolve bug?
use parse::parse_sess;
type matcher_pos = ~{
elts: ~[ast::matcher], // maybe should be /&? Need to understand regions.
- sep: Option<token>,
+ sep: Option<Token>,
mut idx: uint,
mut up: matcher_pos_up, // mutable for swapping only
matches: ~[DVec<@named_match>],
}
#[allow(non_implicitly_copyable_typarams)]
-fn initial_matcher_pos(ms: ~[matcher], sep: Option<token>, lo: uint)
+fn initial_matcher_pos(ms: ~[matcher], sep: Option<Token>, lo: uint)
-> matcher_pos {
let mut match_idx_hi = 0u;
for ms.each() |elt| {
built-in NTs %s or %u other options.",
nts, next_eis.len()));
} else if (bb_eis.len() == 0u && next_eis.len() == 0u) {
- return failure(sp, ~"No rules expected the token "
+ return failure(sp, ~"No rules expected the token: "
+ to_str(rdr.interner(), tok));
} else if (next_eis.len() > 0u) {
/* Now process the next token */
}
rdr.next_token();
} else /* bb_eis.len() == 1 */ {
- let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
+ let rust_parser = Parser(sess, cfg, rdr.dup(), SOURCE_FILE);
let ei = bb_eis.pop();
match ei.elts[ei.idx].node {
}
}
-fn parse_nt(p: parser, name: ~str) -> nonterminal {
+fn parse_nt(p: Parser, name: ~str) -> nonterminal {
match name {
~"item" => match p.parse_item(~[]) {
Some(i) => token::nt_item(i),
match_nonterminal, match_seq, tt_delim};
use parse::lexer::{new_tt_reader, reader};
use parse::token::{FAT_ARROW, SEMI, LBRACE, RBRACE, nt_matchers, nt_tt};
-use parse::parser::{parser, SOURCE_FILE};
+use parse::parser::{Parser, SOURCE_FILE};
use macro_parser::{parse, parse_or_else, success, failure, named_match,
matched_seq, matched_nonterminal, error};
use std::map::HashMap;
// rhs has holes ( `$id` and `$(...)` that need filled)
let trncbr = new_tt_reader(s_d, itr, Some(named_matches),
~[rhs]);
- let p = parser(cx.parse_sess(), cx.cfg(),
+ let p = Parser(cx.parse_sess(), cx.cfg(),
trncbr as reader, SOURCE_FILE);
let e = p.parse_expr();
return mr_expr(e);
name: *cx.parse_sess().interner.get(name),
ext: expr_tt({expander: exp, span: Some(sp)})
});
-}
\ No newline at end of file
+}
use ast::{token_tree, tt_delim, tt_tok, tt_seq, tt_nonterminal,ident};
use macro_parser::{named_match, matched_seq, matched_nonterminal};
use codemap::span;
-use parse::token::{EOF, INTERPOLATED, IDENT, token, nt_ident,
- ident_interner};
+use parse::token::{EOF, INTERPOLATED, IDENT, Token, nt_ident, ident_interner};
use std::map::HashMap;
export tt_reader, new_tt_reader, dup_tt_reader, tt_next_token;
readme: ~[ast::token_tree],
mut idx: uint,
dotdotdoted: bool,
- sep: Option<token>,
+ sep: Option<Token>,
up: tt_frame_up,
};
mut repeat_idx: ~[uint],
mut repeat_len: ~[uint],
/* cached: */
- mut cur_tok: token,
+ mut cur_tok: Token,
mut cur_span: span
};
}
-fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
+fn tt_next_token(&&r: tt_reader) -> {tok: Token, sp: span} {
let ret_val = { tok: r.cur_tok, sp: r.cur_span };
while r.cur.idx >= r.cur.readme.len() {
/* done with this set; pop or repeat? */
fn fold_pat(&&v: @pat) -> @pat;
fn fold_decl(&&v: @decl) -> @decl;
fn fold_expr(&&v: @expr) -> @expr;
- fn fold_ty(&&v: @ty) -> @ty;
+ fn fold_ty(&&v: @Ty) -> @Ty;
fn fold_mod(_mod) -> _mod;
fn fold_foreign_mod(foreign_mod) -> foreign_mod;
fn fold_variant(variant) -> variant;
}
fn fold_ty_param_bound(tpb: ty_param_bound, fld: ast_fold) -> ty_param_bound {
- match tpb {
- bound_copy | bound_send | bound_const | bound_owned => tpb,
- bound_trait(ty) => bound_trait(fld.fold_ty(ty))
- }
+ ty_param_bound(fld.fold_ty(*tpb))
}
fn fold_ty_param(tp: ty_param, fld: ast_fold) -> ty_param {
fn fold_struct_def(struct_def: @ast::struct_def, fld: ast_fold)
-> @ast::struct_def {
- let resulting_optional_constructor;
- match struct_def.ctor {
- None => {
- resulting_optional_constructor = None;
- }
- Some(constructor) => {
- resulting_optional_constructor = Some({
- node: {
- body: fld.fold_block(constructor.node.body),
- dec: fold_fn_decl(constructor.node.dec, fld),
- id: fld.new_id(constructor.node.id),
- .. constructor.node
- },
- .. constructor
- });
- }
- }
let dtor = do option::map(&struct_def.dtor) |dtor| {
let dtor_body = fld.fold_block(dtor.node.body);
let dtor_id = fld.new_id(dtor.node.id);
traits: vec::map(struct_def.traits, |p| fold_trait_ref(*p, fld)),
fields: vec::map(struct_def.fields, |f| fold_struct_field(*f, fld)),
methods: vec::map(struct_def.methods, |m| fld.fold_method(*m)),
- ctor: resulting_optional_constructor,
dtor: dtor
};
}
|f| fld.fold_struct_field(*f)),
methods: vec::map(struct_def.methods,
|m| fld.fold_method(*m)),
- ctor: None,
dtor: dtor
})
}
node: n,
span: self.new_span(s)};
}
- fn fold_ty(&&x: @ty) -> @ty {
+ fn fold_ty(&&x: @Ty) -> @Ty {
let (n, s) = self.fold_ty(x.node, x.span, self as ast_fold);
return @{id: self.new_id(x.id), node: n, span: self.new_span(s)};
}
export parse_stmt_from_source_str;
export parse_from_source_str;
-use parser::parser;
+use parser::Parser;
use attr::parser_attr;
use common::parser_common;
use ast::node_id;
use parse::token::{ident_interner, mk_ident_interner};
type parse_sess = @{
- cm: codemap::codemap,
+ cm: codemap::CodeMap,
mut next_id: node_id,
span_diagnostic: span_handler,
interner: @ident_interner,
mut chpos: 0u, mut byte_pos: 0u};
}
-fn new_parse_sess_special_handler(sh: span_handler, cm: codemap::codemap)
+fn new_parse_sess_special_handler(sh: span_handler, cm: codemap::CodeMap)
-> parse_sess {
return @{cm: cm,
mut next_id: 1,
return r;
}
-fn parse_from_source_str<T>(f: fn (p: parser) -> T,
+fn parse_from_source_str<T>(f: fn (p: Parser) -> T,
name: ~str, ss: codemap::file_substr,
source: @~str, cfg: ast::crate_cfg,
sess: parse_sess)
fn new_parser_etc_from_source_str(sess: parse_sess, cfg: ast::crate_cfg,
+name: ~str, +ss: codemap::file_substr,
- source: @~str) -> (parser, string_reader) {
+ source: @~str) -> (Parser, string_reader) {
let ftype = parser::SOURCE_FILE;
let filemap = codemap::new_filemap_w_substr
(name, ss, source, sess.chpos, sess.byte_pos);
sess.cm.files.push(filemap);
let srdr = lexer::new_string_reader(sess.span_diagnostic, filemap,
sess.interner);
- return (parser(sess, cfg, srdr as reader, ftype), srdr);
+ return (Parser(sess, cfg, srdr as reader, ftype), srdr);
}
fn new_parser_from_source_str(sess: parse_sess, cfg: ast::crate_cfg,
+name: ~str, +ss: codemap::file_substr,
- source: @~str) -> parser {
+ source: @~str) -> Parser {
let (p, _) = new_parser_etc_from_source_str(sess, cfg, name, ss, source);
move p
}
fn new_parser_etc_from_file(sess: parse_sess, cfg: ast::crate_cfg,
path: &Path, ftype: parser::file_type) ->
- (parser, string_reader) {
+ (Parser, string_reader) {
let res = io::read_whole_file_str(path);
match res {
result::Ok(_) => { /* Continue. */ }
sess.cm.files.push(filemap);
let srdr = lexer::new_string_reader(sess.span_diagnostic, filemap,
sess.interner);
- return (parser(sess, cfg, srdr as reader, ftype), srdr);
+ return (Parser(sess, cfg, srdr as reader, ftype), srdr);
}
fn new_parser_from_file(sess: parse_sess, cfg: ast::crate_cfg, path: &Path,
- ftype: parser::file_type) -> parser {
+ ftype: parser::file_type) -> Parser {
let (p, _) = new_parser_etc_from_file(sess, cfg, path, ftype);
move p
}
fn new_parser_from_tt(sess: parse_sess, cfg: ast::crate_cfg,
- tt: ~[ast::token_tree]) -> parser {
+ tt: ~[ast::token_tree]) -> Parser {
let trdr = lexer::new_tt_reader(sess.span_diagnostic, sess.interner,
None, tt);
- return parser(sess, cfg, trdr as reader, parser::SOURCE_FILE)
+ return Parser(sess, cfg, trdr as reader, parser::SOURCE_FILE)
}
fn parse_optional_meta() -> ~[@ast::meta_item];
}
-impl parser: parser_attr {
+impl Parser: parser_attr {
fn parse_outer_attrs_or_ext(first_item_attrs: ~[ast::attribute])
-> attr_or_ext
use std::map::{HashMap};
use ast_util::spanned;
-use parser::parser;
+use parser::Parser;
use lexer::reader;
type seq_sep = {
- sep: Option<token::token>,
+ sep: Option<token::Token>,
trailing_sep_allowed: bool
};
-fn seq_sep_trailing_disallowed(t: token::token) -> seq_sep {
+fn seq_sep_trailing_disallowed(t: token::Token) -> seq_sep {
return {sep: option::Some(t), trailing_sep_allowed: false};
}
-fn seq_sep_trailing_allowed(t: token::token) -> seq_sep {
+fn seq_sep_trailing_allowed(t: token::Token) -> seq_sep {
return {sep: option::Some(t), trailing_sep_allowed: true};
}
fn seq_sep_none() -> seq_sep {
return {sep: option::None, trailing_sep_allowed: false};
}
-fn token_to_str(reader: reader, ++token: token::token) -> ~str {
+fn token_to_str(reader: reader, ++token: token::Token) -> ~str {
token::to_str(reader.interner(), token)
}
trait parser_common {
- fn unexpected_last(t: token::token) -> !;
+ fn unexpected_last(t: token::Token) -> !;
fn unexpected() -> !;
- fn expect(t: token::token);
+ fn expect(t: token::Token);
fn parse_ident() -> ast::ident;
fn parse_path_list_ident() -> ast::path_list_ident;
fn parse_value_ident() -> ast::ident;
- fn eat(tok: token::token) -> bool;
+ fn eat(tok: token::Token) -> bool;
// A sanity check that the word we are asking for is a known keyword
fn require_keyword(word: ~str);
- fn token_is_keyword(word: ~str, ++tok: token::token) -> bool;
+ fn token_is_keyword(word: ~str, ++tok: token::Token) -> bool;
fn is_keyword(word: ~str) -> bool;
- fn is_any_keyword(tok: token::token) -> bool;
+ fn is_any_keyword(tok: token::Token) -> bool;
fn eat_keyword(word: ~str) -> bool;
fn expect_keyword(word: ~str);
fn expect_gt();
- fn parse_seq_to_before_gt<T: Copy>(sep: Option<token::token>,
- f: fn(parser) -> T) -> ~[T];
- fn parse_seq_to_gt<T: Copy>(sep: Option<token::token>,
- f: fn(parser) -> T) -> ~[T];
- fn parse_seq_lt_gt<T: Copy>(sep: Option<token::token>,
- f: fn(parser) -> T) -> spanned<~[T]>;
- fn parse_seq_to_end<T: Copy>(ket: token::token, sep: seq_sep,
- f: fn(parser) -> T) -> ~[T];
- fn parse_seq_to_before_end<T: Copy>(ket: token::token, sep: seq_sep,
- f: fn(parser) -> T) -> ~[T];
- fn parse_unspanned_seq<T: Copy>(bra: token::token,
- ket: token::token,
+ fn parse_seq_to_before_gt<T: Copy>(sep: Option<token::Token>,
+ f: fn(Parser) -> T) -> ~[T];
+ fn parse_seq_to_gt<T: Copy>(sep: Option<token::Token>,
+ f: fn(Parser) -> T) -> ~[T];
+ fn parse_seq_lt_gt<T: Copy>(sep: Option<token::Token>,
+ f: fn(Parser) -> T) -> spanned<~[T]>;
+ fn parse_seq_to_end<T: Copy>(ket: token::Token, sep: seq_sep,
+ f: fn(Parser) -> T) -> ~[T];
+ fn parse_seq_to_before_end<T: Copy>(ket: token::Token, sep: seq_sep,
+ f: fn(Parser) -> T) -> ~[T];
+ fn parse_unspanned_seq<T: Copy>(bra: token::Token,
+ ket: token::Token,
sep: seq_sep,
- f: fn(parser) -> T) -> ~[T];
- fn parse_seq<T: Copy>(bra: token::token, ket: token::token, sep: seq_sep,
- f: fn(parser) -> T) -> spanned<~[T]>;
+ f: fn(Parser) -> T) -> ~[T];
+ fn parse_seq<T: Copy>(bra: token::Token, ket: token::Token, sep: seq_sep,
+ f: fn(Parser) -> T) -> spanned<~[T]>;
}
-impl parser: parser_common {
- fn unexpected_last(t: token::token) -> ! {
+impl Parser: parser_common {
+ fn unexpected_last(t: token::Token) -> ! {
self.span_fatal(
copy self.last_span,
~"unexpected token: `" + token_to_str(self.reader, t) + ~"`");
+ token_to_str(self.reader, self.token) + ~"`");
}
- fn expect(t: token::token) {
+ fn expect(t: token::Token) {
if self.token == t {
self.bump();
} else {
return self.parse_ident();
}
- fn eat(tok: token::token) -> bool {
+ fn eat(tok: token::Token) -> bool {
return if self.token == tok { self.bump(); true } else { false };
}
}
}
- fn token_is_word(word: ~str, ++tok: token::token) -> bool {
+ fn token_is_word(word: ~str, ++tok: token::Token) -> bool {
match tok {
token::IDENT(sid, false) => { *self.id_to_str(sid) == word }
_ => { false }
}
}
- fn token_is_keyword(word: ~str, ++tok: token::token) -> bool {
+ fn token_is_keyword(word: ~str, ++tok: token::Token) -> bool {
self.require_keyword(word);
self.token_is_word(word, tok)
}
self.token_is_keyword(word, self.token)
}
- fn is_any_keyword(tok: token::token) -> bool {
+ fn is_any_keyword(tok: token::Token) -> bool {
match tok {
token::IDENT(sid, false) => {
self.keywords.contains_key_ref(self.id_to_str(sid))
}
}
- fn parse_seq_to_before_gt<T: Copy>(sep: Option<token::token>,
- f: fn(parser) -> T) -> ~[T] {
+ fn parse_seq_to_before_gt<T: Copy>(sep: Option<token::Token>,
+ f: fn(Parser) -> T) -> ~[T] {
let mut first = true;
let mut v = ~[];
while self.token != token::GT
return v;
}
- fn parse_seq_to_gt<T: Copy>(sep: Option<token::token>,
- f: fn(parser) -> T) -> ~[T] {
+ fn parse_seq_to_gt<T: Copy>(sep: Option<token::Token>,
+ f: fn(Parser) -> T) -> ~[T] {
let v = self.parse_seq_to_before_gt(sep, f);
self.expect_gt();
return v;
}
- fn parse_seq_lt_gt<T: Copy>(sep: Option<token::token>,
- f: fn(parser) -> T) -> spanned<~[T]> {
+ fn parse_seq_lt_gt<T: Copy>(sep: Option<token::Token>,
+ f: fn(Parser) -> T) -> spanned<~[T]> {
let lo = self.span.lo;
self.expect(token::LT);
let result = self.parse_seq_to_before_gt::<T>(sep, f);
return spanned(lo, hi, result);
}
- fn parse_seq_to_end<T: Copy>(ket: token::token, sep: seq_sep,
- f: fn(parser) -> T) -> ~[T] {
+ fn parse_seq_to_end<T: Copy>(ket: token::Token, sep: seq_sep,
+ f: fn(Parser) -> T) -> ~[T] {
let val = self.parse_seq_to_before_end(ket, sep, f);
self.bump();
return val;
}
- fn parse_seq_to_before_end<T: Copy>(ket: token::token, sep: seq_sep,
- f: fn(parser) -> T) -> ~[T] {
+ fn parse_seq_to_before_end<T: Copy>(ket: token::Token, sep: seq_sep,
+ f: fn(Parser) -> T) -> ~[T] {
let mut first: bool = true;
let mut v: ~[T] = ~[];
while self.token != ket {
return v;
}
- fn parse_unspanned_seq<T: Copy>(bra: token::token,
- ket: token::token,
+ fn parse_unspanned_seq<T: Copy>(bra: token::Token,
+ ket: token::Token,
sep: seq_sep,
- f: fn(parser) -> T) -> ~[T] {
+ f: fn(Parser) -> T) -> ~[T] {
self.expect(bra);
let result = self.parse_seq_to_before_end::<T>(ket, sep, f);
self.bump();
// NB: Do not use this function unless you actually plan to place the
// spanned list in the AST.
- fn parse_seq<T: Copy>(bra: token::token, ket: token::token, sep: seq_sep,
- f: fn(parser) -> T) -> spanned<~[T]> {
+ fn parse_seq<T: Copy>(bra: token::Token, ket: token::Token, sep: seq_sep,
+ f: fn(Parser) -> T) -> spanned<~[T]> {
let lo = self.span.lo;
self.expect(bra);
let result = self.parse_seq_to_before_end::<T>(ket, sep, f);
-use parser::{parser, SOURCE_FILE};
+use parser::{Parser, SOURCE_FILE};
use attr::parser_attr;
export eval_crate_directives_to_mod;
trait reader {
fn is_eof() -> bool;
- fn next_token() -> {tok: token::token, sp: span};
+ fn next_token() -> {tok: token::Token, sp: span};
fn fatal(~str) -> !;
fn span_diag() -> span_handler;
pure fn interner() -> @token::ident_interner;
- fn peek() -> {tok: token::token, sp: span};
+ fn peek() -> {tok: token::Token, sp: span};
fn dup() -> reader;
}
filemap: codemap::filemap,
interner: @token::ident_interner,
/* cached: */
- mut peek_tok: token::token,
+ mut peek_tok: token::Token,
mut peek_span: span
};
impl string_reader: reader {
fn is_eof() -> bool { is_eof(self) }
- fn next_token() -> {tok: token::token, sp: span} {
+ fn next_token() -> {tok: token::Token, sp: span} {
let ret_val = {tok: self.peek_tok, sp: self.peek_span};
string_advance_token(self);
return ret_val;
}
fn span_diag() -> span_handler { self.span_diagnostic }
pure fn interner() -> @token::ident_interner { self.interner }
- fn peek() -> {tok: token::token, sp: span} {
+ fn peek() -> {tok: token::Token, sp: span} {
{tok: self.peek_tok, sp: self.peek_span}
}
fn dup() -> reader { dup_string_reader(self) as reader }
impl tt_reader: reader {
fn is_eof() -> bool { self.cur_tok == token::EOF }
- fn next_token() -> {tok: token::token, sp: span} {
+ fn next_token() -> {tok: token::Token, sp: span} {
/* weird resolve bug: if the following `if`, or any of its
statements are removed, we get resolution errors */
if false {
}
fn span_diag() -> span_handler { self.sp_diag }
pure fn interner() -> @token::ident_interner { self.interner }
- fn peek() -> {tok: token::token, sp: span} {
+ fn peek() -> {tok: token::Token, sp: span} {
{ tok: self.cur_tok, sp: self.cur_span }
}
fn dup() -> reader { dup_tt_reader(self) as reader }
// might return a sugared-doc-attr
fn consume_whitespace_and_comments(rdr: string_reader)
- -> Option<{tok: token::token, sp: span}> {
+ -> Option<{tok: token::Token, sp: span}> {
while is_whitespace(rdr.curr) { bump(rdr); }
return consume_any_line_comment(rdr);
}
// might return a sugared-doc-attr
fn consume_any_line_comment(rdr: string_reader)
- -> Option<{tok: token::token, sp: span}> {
+ -> Option<{tok: token::Token, sp: span}> {
if rdr.curr == '/' {
match nextch(rdr) {
'/' => {
// might return a sugared-doc-attr
fn consume_block_comment(rdr: string_reader)
- -> Option<{tok: token::token, sp: span}> {
+ -> Option<{tok: token::Token, sp: span}> {
// block comments starting with "/**" or "/*!" are doc-comments
if rdr.curr == '*' || rdr.curr == '!' {
};
}
-fn scan_number(c: char, rdr: string_reader) -> token::token {
+fn scan_number(c: char, rdr: string_reader) -> token::Token {
let mut num_str, base = 10u, c = c, n = nextch(rdr);
if c == '0' && n == 'x' {
bump(rdr);
return accum_int as char;
}
-fn next_token_inner(rdr: string_reader) -> token::token {
+fn next_token_inner(rdr: string_reader) -> token::Token {
let mut accum_str = ~"";
let mut c = rdr.curr;
if (c >= 'a' && c <= 'z')
if is_dec_digit(c) {
return scan_number(c, rdr);
}
- fn binop(rdr: string_reader, op: token::binop) -> token::token {
+ fn binop(rdr: string_reader, op: token::binop) -> token::Token {
bump(rdr);
if rdr.curr == '=' {
bump(rdr);
use codemap::span;
use ast::{expr, expr_lit, lit_nil};
use ast_util::{respan};
-use token::token;
+use token::Token;
/// The specific types of unsupported syntax
pub enum ObsoleteSyntax {
ObsoleteClassTraits,
ObsoletePrivSection,
ObsoleteModeInFnType,
- ObsoleteByMutRefMode
+ ObsoleteByMutRefMode,
+ ObsoleteFixedLengthVec,
}
impl ObsoleteSyntax : cmp::Eq {
fn obsolete_expr(sp: span, kind: ObsoleteSyntax) -> @expr;
}
-impl parser : ObsoleteReporter {
+impl Parser : ObsoleteReporter {
/// Reports an obsolete syntax non-fatal error.
fn obsolete(sp: span, kind: ObsoleteSyntax) {
let (kind_str, desc) = match kind {
"by-mutable-reference mode",
"Declare an argument of type &mut T instead"
),
+ ObsoleteFixedLengthVec => (
+ "fixed-length vector",
+ "Fixed-length types are now written `[T * N]`, and instances \
+ are type-inferred"
+ )
};
self.report(sp, kind, kind_str, desc);
}
}
- fn token_is_obsolete_ident(ident: &str, token: token) -> bool {
+ fn token_is_obsolete_ident(ident: &str, token: Token) -> bool {
match token {
token::IDENT(copy sid, _) => {
str::eq_slice(*self.id_to_str(sid), ident)
false
}
}
+
+ fn try_parse_obsolete_fixed_vstore() -> Option<Option<uint>> {
+ if self.token == token::BINOP(token::SLASH) {
+ self.bump();
+ match copy self.token {
+ token::UNDERSCORE => {
+ self.obsolete(copy self.last_span,
+ ObsoleteFixedLengthVec);
+ self.bump(); Some(None)
+ }
+ token::LIT_INT_UNSUFFIXED(i) if i >= 0i64 => {
+ self.obsolete(copy self.last_span,
+ ObsoleteFixedLengthVec);
+ self.bump(); Some(Some(i as uint))
+ }
+ _ => None
+ }
+ } else {
+ None
+ }
+ }
+
+ fn try_convert_ty_to_obsolete_fixed_length_vstore(sp: span, t: ast::ty_)
+ -> ast::ty_ {
+ match self.try_parse_obsolete_fixed_vstore() {
+ // Consider a fixed length vstore suffix (/N or /_)
+ None => t,
+ Some(v) => {
+ ast::ty_fixed_length(
+ @{id: self.get_id(), node: t, span: sp}, v)
+ }
+ }
+ }
+
+ fn try_convert_expr_to_obsolete_fixed_length_vstore(
+ lo: uint, hi: uint, ex: ast::expr_
+ ) -> (uint, ast::expr_) {
+
+ let mut hi = hi;
+ let mut ex = ex;
+
+ // Vstore is legal following expr_lit(lit_str(...)) and expr_vec(...)
+ // only.
+ match ex {
+ ast::expr_lit(@{node: ast::lit_str(_), span: _}) |
+ ast::expr_vec(_, _) => {
+ match self.try_parse_obsolete_fixed_vstore() {
+ None => (),
+ Some(v) => {
+ hi = self.span.hi;
+ ex = ast::expr_vstore(self.mk_expr(lo, hi, ex),
+ ast::expr_vstore_fixed(v));
+ }
+ }
+ }
+ _ => ()
+ }
+
+ return (hi, ex);
+ }
+
}
use token::{can_begin_expr, is_ident, is_ident_or_path, is_plain_ident,
INTERPOLATED, special_idents};
use codemap::{span,fss_none};
-use util::interner::interner;
+use util::interner::Interner;
use ast_util::{spanned, respan, mk_sp, ident_to_path, operator_prec};
use lexer::reader;
use prec::{as_prec, token_to_binop};
ObsoleteWith, ObsoleteClassMethod, ObsoleteClassTraits,
ObsoleteModeInFnType, ObsoleteByMutRefMode
};
-use ast::{_mod, add, alt_check, alt_exhaustive, arg, arm, attribute,
+use ast::{_mod, add, arg, arm, attribute,
bind_by_ref, bind_by_implicit_ref, bind_by_value, bind_by_move,
- bitand, bitor, bitxor, blk, blk_check_mode, bound_const,
- bound_copy, bound_send, bound_trait, bound_owned, box, by_copy,
+ bitand, bitor, bitxor, blk, blk_check_mode, box, by_copy,
by_move, by_ref, by_val, capture_clause,
capture_item, cdir_dir_mod, cdir_src_mod, cdir_view_item,
class_immutable, class_mutable,
stmt_semi, struct_def, struct_field, struct_variant_kind,
subtract, sty_box, sty_by_ref, sty_region, sty_static, sty_uniq,
sty_value, token_tree, trait_method, trait_ref, tt_delim, tt_seq,
- tt_tok, tt_nonterminal, tuple_variant_kind, ty, ty_, ty_bot,
+ tt_tok, tt_nonterminal, tuple_variant_kind, Ty, ty_, ty_bot,
ty_box, ty_field, ty_fn, ty_infer, ty_mac, ty_method, ty_nil,
ty_param, ty_param_bound, ty_path, ty_ptr, ty_rec, ty_rptr,
ty_tup, ty_u32, ty_uniq, ty_vec, ty_fixed_length, type_value_ns,
expr_vstore_uniq};
export file_type;
-export parser;
+export Parser;
export CRATE_FILE;
export SOURCE_FILE;
-// FIXME (#1893): #ast expects to find this here but it's actually
+// FIXME (#3726): #ast expects to find this here but it's actually
// defined in `parse` Fixing this will be easier when we have export
// decls on individual items -- then parse can export this publicly, and
// everything else crate-visibly.
So that we can distinguish a class ctor or dtor
from other class members
*/
-enum class_contents { ctor_decl(fn_decl, ~[attribute], blk, codemap::span),
- dtor_decl(blk, ~[attribute], codemap::span),
+enum class_contents { dtor_decl(blk, ~[attribute], codemap::span),
members(~[@class_member]) }
type arg_or_capture_item = Either<arg, capture_item>;
enum item_or_view_item {
iovi_none,
iovi_item(@item),
+ iovi_foreign_item(@foreign_item),
iovi_view_item(@view_item)
}
enum view_item_parse_mode {
VIEW_ITEMS_AND_ITEMS_ALLOWED,
- VIEW_ITEMS_ALLOWED,
+ VIEW_ITEMS_AND_FOREIGN_ITEMS_ALLOWED,
IMPORTS_AND_ITEMS_ALLOWED
}
/* ident is handled by common.rs */
-fn parser(sess: parse_sess, cfg: ast::crate_cfg,
- +rdr: reader, ftype: file_type) -> parser {
+fn Parser(sess: parse_sess, cfg: ast::crate_cfg,
+ +rdr: reader, ftype: file_type) -> Parser {
let tok0 = rdr.next_token();
let span0 = tok0.sp;
let interner = rdr.interner();
- parser {
+ Parser {
reader: move rdr,
interner: move interner,
sess: sess,
token: tok0.tok,
span: span0,
last_span: span0,
- buffer: [mut
- {tok: tok0.tok, sp: span0},
- {tok: tok0.tok, sp: span0},
- {tok: tok0.tok, sp: span0},
- {tok: tok0.tok, sp: span0}
- ]/4,
+ buffer: [mut {tok: tok0.tok, sp: span0}, ..4],
buffer_start: 0,
buffer_end: 0,
restriction: UNRESTRICTED,
}
}
-struct parser {
+struct Parser {
sess: parse_sess,
cfg: crate_cfg,
file_type: file_type,
- mut token: token::token,
+ mut token: token::Token,
mut span: span,
mut last_span: span,
- mut buffer: [mut {tok: token::token, sp: span}]/4,
+ mut buffer: [mut {tok: token::Token, sp: span} * 4],
mut buffer_start: int,
mut buffer_end: int,
mut restriction: restriction,
drop {} /* do not copy the parser; its state is tied to outside state */
}
-impl parser {
+impl Parser {
fn bump() {
self.last_span = self.span;
let next = if self.buffer_start == self.buffer_end {
self.token = next.tok;
self.span = next.sp;
}
- fn swap(next: token::token, lo: uint, hi: uint) {
+ fn swap(next: token::Token, lo: uint, hi: uint) {
self.token = next;
self.span = mk_sp(lo, hi);
}
}
return (4 - self.buffer_start) + self.buffer_end;
}
- fn look_ahead(distance: uint) -> token::token {
+ fn look_ahead(distance: uint) -> token::Token {
let dist = distance as int;
while self.buffer_length() < dist {
self.buffer[self.buffer_end] = self.reader.next_token();
});
}
- fn parse_ret_ty() -> (ret_style, @ty) {
+ fn parse_ret_ty() -> (ret_style, @Ty) {
return if self.eat(token::RARROW) {
let lo = self.span.lo;
if self.eat(token::NOT) {
self.region_from_name(name)
}
- fn parse_ty(colons_before_params: bool) -> @ty {
+ fn parse_ty(colons_before_params: bool) -> @Ty {
maybe_whole!(self, nt_ty);
let lo = self.span.lo;
} else { self.fatal(~"expected type"); };
let sp = mk_sp(lo, self.last_span.hi);
- return @{id: self.get_id(),
- node: match self.maybe_parse_fixed_vstore() {
- // Consider a fixed vstore suffix (/N or /_)
- None => t,
- Some(v) => {
- ty_fixed_length(@{id: self.get_id(), node:t, span: sp}, v)
- } },
+ return {
+ let node =
+ self.try_convert_ty_to_obsolete_fixed_length_vstore(sp, t);
+ @{id: self.get_id(),
+ node: node,
span: sp}
+ };
}
fn parse_arg_mode() -> mode {
}
}
- fn parse_capture_item_or(parse_arg_fn: fn(parser) -> arg_or_capture_item)
+ fn parse_capture_item_or(parse_arg_fn: fn(Parser) -> arg_or_capture_item)
-> arg_or_capture_item {
- fn parse_capture_item(p:parser, is_move: bool) -> capture_item {
+ fn parse_capture_item(p:Parser, is_move: bool) -> capture_item {
let sp = mk_sp(p.span.lo, p.span.hi);
let ident = p.parse_ident();
@{id: p.get_id(), is_move: is_move, name: ident, span: sp}
}
}
- fn maybe_parse_fixed_vstore() -> Option<Option<uint>> {
- if self.token == token::BINOP(token::SLASH) {
- self.bump();
- match copy self.token {
- token::UNDERSCORE => {
- self.bump(); Some(None)
- }
- token::LIT_INT_UNSUFFIXED(i) if i >= 0i64 => {
- self.bump(); Some(Some(i as uint))
- }
- _ => None
- }
- } else {
- None
- }
- }
-
fn maybe_parse_fixed_vstore_with_star() -> Option<Option<uint>> {
if self.eat(token::BINOP(token::STAR)) {
match copy self.token {
}
}
- fn lit_from_token(tok: token::token) -> lit_ {
+ fn lit_from_token(tok: token::Token) -> lit_ {
match tok {
token::LIT_INT(i, it) => lit_int(i, it),
token::LIT_UINT(u, ut) => lit_uint(u, ut),
}
fn parse_path_without_tps_(
- parse_ident: fn(parser) -> ident,
- parse_last_ident: fn(parser) -> ident) -> @path {
+ parse_ident: fn(Parser) -> ident,
+ parse_last_ident: fn(Parser) -> ident) -> @path {
maybe_whole!(self, nt_path);
let lo = self.span.lo;
}
}
- fn parse_field(sep: token::token) -> field {
+ fn parse_field(sep: token::Token) -> field {
let lo = self.span.lo;
let m = self.parse_mutability();
let i = self.parse_ident();
ex = expr_lit(@lit);
}
- // Vstore is legal following expr_lit(lit_str(...)) and expr_vec(...)
- // only.
- match ex {
- expr_lit(@{node: lit_str(_), span: _}) |
- expr_vec(_, _) => match self.maybe_parse_fixed_vstore() {
- None => (),
- Some(v) => {
- hi = self.span.hi;
- ex = expr_vstore(self.mk_expr(lo, hi, ex),
- expr_vstore_fixed(v));
- }
- },
- _ => ()
- }
+ let (hi, ex) =
+ self.try_convert_expr_to_obsolete_fixed_length_vstore(lo, hi, ex);
return self.mk_pexpr(lo, hi, ex);
}
return e;
}
- fn parse_sep_and_zerok() -> (Option<token::token>, bool) {
+ fn parse_sep_and_zerok() -> (Option<token::Token>, bool) {
if self.token == token::BINOP(token::STAR)
|| self.token == token::BINOP(token::PLUS) {
let zerok = self.token == token::BINOP(token::STAR);
fn parse_token_tree() -> token_tree {
maybe_whole!(deref self, nt_tt);
- fn parse_tt_tok(p: parser, delim_ok: bool) -> token_tree {
+ fn parse_tt_tok(p: Parser, delim_ok: bool) -> token_tree {
match p.token {
token::RPAREN | token::RBRACE | token::RBRACKET
if !delim_ok => {
// This goofy function is necessary to correctly match parens in matchers.
// Otherwise, `$( ( )` would be a valid matcher, and `$( () )` would be
// invalid. It's similar to common::parse_seq.
- fn parse_matcher_subseq(name_idx: @mut uint, bra: token::token,
- ket: token::token) -> ~[matcher] {
+ fn parse_matcher_subseq(name_idx: @mut uint, bra: token::Token,
+ ket: token::Token) -> ~[matcher] {
let mut ret_val = ~[];
let mut lparens = 0u;
fn parse_stmt(+first_item_attrs: ~[attribute]) -> @stmt {
maybe_whole!(self, nt_stmt);
- fn check_expected_item(p: parser, current_attrs: ~[attribute]) {
+ fn check_expected_item(p: Parser, current_attrs: ~[attribute]) {
// If we have attributes then we should have an item
if vec::is_not_empty(current_attrs) {
p.fatal(~"expected item");
let item_attrs = vec::append(first_item_attrs, item_attrs);
- match self.parse_item_or_view_item(item_attrs, true) {
+ match self.parse_item_or_view_item(item_attrs, true, false) {
iovi_item(i) => {
let mut hi = i.span.hi;
let decl = @spanned(lo, hi, decl_item(i));
self.span_fatal(vi.span, ~"view items must be declared at \
the top of the block");
}
+ iovi_foreign_item(_) => {
+ self.fatal(~"foreign items are not allowed here");
+ }
iovi_none() => { /* fallthrough */ }
}
maybe_whole!(pair_empty self, nt_block);
- fn maybe_parse_inner_attrs_and_next(p: parser, parse_attrs: bool) ->
+ fn maybe_parse_inner_attrs_and_next(p: Parser, parse_attrs: bool) ->
{inner: ~[attribute], next: ~[attribute]} {
if parse_attrs {
p.parse_inner_attrs_and_next()
let mut stmts = ~[];
let mut expr = None;
- let {attrs_remaining, view_items, items: items} =
+ let {attrs_remaining, view_items, items: items, _} =
self.parse_items_and_view_items(first_item_attrs,
IMPORTS_AND_ITEMS_ALLOWED);
return spanned(lo, hi, bloc);
}
+ fn mk_ty_path(i: ident) -> @Ty {
+ @{id: self.get_id(), node: ty_path(
+ ident_to_path(copy self.last_span, i),
+ self.get_id()), span: self.last_span}
+ }
+
fn parse_optional_ty_param_bounds() -> @~[ty_param_bound] {
let mut bounds = ~[];
if self.eat(token::COLON) {
while is_ident(self.token) {
if is_ident(self.token) {
- // XXX: temporary until kinds become traits
let maybe_bound = match self.token {
token::IDENT(copy sid, _) => {
match *self.id_to_str(sid) {
- ~"Send" => Some(bound_send),
- ~"Copy" => Some(bound_copy),
- ~"Const" => Some(bound_const),
- ~"Owned" => Some(bound_owned),
~"send"
| ~"copy"
ObsoleteLowerCaseKindBounds);
// Bogus value, but doesn't matter, since
// is an error
- Some(bound_send)
+ Some(ty_param_bound(self.mk_ty_path(sid)))
}
_ => None
bounds.push(bound);
}
None => {
- bounds.push(bound_trait(self.parse_ty(false)));
+ bounds.push(ty_param_bound(self.parse_ty(false)));
}
}
} else {
- bounds.push(bound_trait(self.parse_ty(false)));
+ bounds.push(ty_param_bound(self.parse_ty(false)));
}
}
}
} else { ~[] }
}
- fn parse_fn_decl(parse_arg_fn: fn(parser) -> arg_or_capture_item)
+ fn parse_fn_decl(parse_arg_fn: fn(Parser) -> arg_or_capture_item)
-> (fn_decl, capture_clause) {
let args_or_capture_items: ~[arg_or_capture_item] =
fn expect_self_ident() {
if !self.is_self_ident() {
- self.fatal(#fmt("expected `self` but found `%s`",
+ self.fatal(fmt!("expected `self` but found `%s`",
token_to_str(self.reader, self.token)));
}
self.bump();
}
fn parse_fn_decl_with_self(parse_arg_fn:
- fn(parser) -> arg_or_capture_item)
+ fn(Parser) -> arg_or_capture_item)
-> (self_ty, fn_decl, capture_clause) {
fn maybe_parse_self_ty(cnstr: fn(+v: mutability) -> ast::self_ty_,
- p: parser) -> ast::self_ty_ {
+ p: Parser) -> ast::self_ty_ {
// We need to make sure it isn't a mode or a type
if p.token_is_keyword(~"self", p.look_ahead(1)) ||
((p.token_is_keyword(~"const", p.look_ahead(1)) ||
// Parses four variants (with the region/type params always optional):
// impl<T> ~[T] : to_str { ... }
fn parse_item_impl() -> item_info {
- fn wrap_path(p: parser, pt: @path) -> @ty {
+ fn wrap_path(p: Parser, pt: @path) -> @Ty {
@{id: p.get_id(), node: ty_path(pt, p.get_id()), span: pt.span}
}
ref_id: self.get_id(), impl_id: self.get_id()}
}
- fn parse_trait_ref_list(ket: token::token) -> ~[@trait_ref] {
+ fn parse_trait_ref_list(ket: token::Token) -> ~[@trait_ref] {
self.parse_seq_to_before_end(
ket, seq_sep_trailing_disallowed(token::COMMA),
|p| p.parse_trait_ref())
let mut fields: ~[@struct_field];
let mut methods: ~[@method] = ~[];
- let mut the_ctor: Option<(fn_decl, ~[attribute], blk, codemap::span)>
- = None;
let mut the_dtor: Option<(blk, ~[attribute], codemap::span)> = None;
- let ctor_id = self.get_id();
if self.eat(token::LBRACE) {
// It's a record-like struct.
fields = ~[];
while self.token != token::RBRACE {
match self.parse_class_item() {
- ctor_decl(a_fn_decl, attrs, blk, s) => {
- match the_ctor {
- Some((_, _, _, s_first)) => {
- self.span_note(s, #fmt("Duplicate constructor \
- declaration for class %s",
- *self.interner.get(class_name)));
- self.span_fatal(copy s_first, ~"First constructor \
- declared here");
- }
- None => {
- the_ctor = Some((a_fn_decl, attrs, blk, s));
- }
- }
- }
dtor_decl(blk, attrs, s) => {
match the_dtor {
Some((_, _, s_first)) => {
- self.span_note(s, #fmt("Duplicate destructor \
+ self.span_note(s, fmt!("Duplicate destructor \
declaration for class %s",
*self.interner.get(class_name)));
self.span_fatal(copy s_first, ~"First destructor \
self_id: self.get_id(),
body: d_body},
span: d_s}};
- match the_ctor {
- Some((ct_d, ct_attrs, ct_b, ct_s)) => {
- (class_name,
- item_class(@{
- traits: traits,
- fields: move fields,
- methods: move methods,
- ctor: Some({
- node: {id: ctor_id,
- attrs: ct_attrs,
- self_id: self.get_id(),
- dec: ct_d,
- body: ct_b},
- span: ct_s}),
- dtor: actual_dtor
- }, ty_params),
- None)
- }
- None => {
- (class_name,
- item_class(@{
- traits: traits,
- fields: move fields,
- methods: move methods,
- ctor: None,
- dtor: actual_dtor
- }, ty_params),
- None)
- }
- }
+ (class_name,
+ item_class(@{
+ traits: traits,
+ fields: move fields,
+ methods: move methods,
+ dtor: actual_dtor
+ }, ty_params),
+ None)
}
- fn token_is_pound_or_doc_comment(++tok: token::token) -> bool {
+ fn token_is_pound_or_doc_comment(++tok: token::Token) -> bool {
match tok {
token::POUND | token::DOC_COMMENT(_) => true,
_ => false
self.eat_keyword(~"static")
}
- fn parse_mod_items(term: token::token,
+ fn parse_mod_items(term: token::Token,
+first_item_attrs: ~[attribute]) -> _mod {
// Shouldn't be any view items since we've already parsed an item attr
- let {attrs_remaining, view_items, items: starting_items} =
+ let {attrs_remaining, view_items, items: starting_items, _} =
self.parse_items_and_view_items(first_item_attrs,
VIEW_ITEMS_AND_ITEMS_ALLOWED);
let mut items: ~[@item] = move starting_items;
}
debug!("parse_mod_items: parse_item_or_view_item(attrs=%?)",
attrs);
- match self.parse_item_or_view_item(attrs, true) {
+ match self.parse_item_or_view_item(attrs, true, false) {
iovi_item(item) => items.push(item),
iovi_view_item(view_item) => {
self.span_fatal(view_item.span, ~"view items must be \
+first_item_attrs: ~[attribute]) ->
foreign_mod {
// Shouldn't be any view items since we've already parsed an item attr
- let {attrs_remaining, view_items, items: _} =
+ let {attrs_remaining, view_items, items: _, foreign_items} =
self.parse_items_and_view_items(first_item_attrs,
- VIEW_ITEMS_ALLOWED);
+ VIEW_ITEMS_AND_FOREIGN_ITEMS_ALLOWED);
- let mut items: ~[@foreign_item] = ~[];
+ let mut items: ~[@foreign_item] = move foreign_items;
let mut initial_attrs = attrs_remaining;
while self.token != token::RBRACE {
let attrs = vec::append(initial_attrs,
items.push(self.parse_foreign_item(attrs));
}
return {sort: sort, view_items: view_items,
- items: items};
+ items: items};
}
fn parse_item_foreign_mod(lo: uint,
let mut methods: ~[@method] = ~[];
while self.token != token::RBRACE {
match self.parse_class_item() {
- ctor_decl(*) => {
- self.span_fatal(copy self.span,
- ~"deprecated explicit \
- constructors are not allowed \
- here");
- }
dtor_decl(blk, attrs, s) => {
match the_dtor {
Some((_, _, s_first)) => {
traits: ~[],
fields: move fields,
methods: move methods,
- ctor: None,
dtor: actual_dtor
};
}
}
}
- fn fn_expr_lookahead(tok: token::token) -> bool {
+ fn fn_expr_lookahead(tok: token::Token) -> bool {
match tok {
token::LPAREN | token::AT | token::TILDE | token::BINOP(_) => true,
_ => false
}
}
- fn parse_item_or_view_item(+attrs: ~[attribute], items_allowed: bool)
+ fn parse_item_or_view_item(+attrs: ~[attribute], items_allowed: bool,
+ foreign_items_allowed: bool)
-> item_or_view_item {
+ assert items_allowed != foreign_items_allowed;
+
maybe_whole!(iovi self,nt_item);
let lo = self.span.lo;
return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_,
visibility,
maybe_append(attrs, extra_attrs)));
+ } else if foreign_items_allowed && self.is_keyword(~"const") {
+ let item = self.parse_item_foreign_const(visibility, attrs);
+ return iovi_foreign_item(item);
} else if items_allowed &&
self.is_keyword(~"fn") &&
!self.fn_expr_lookahead(self.look_ahead(1u)) {
return iovi_item(self.mk_item(lo, self.last_span.hi, ident, item_,
visibility,
maybe_append(attrs, extra_attrs)));
+ } else if foreign_items_allowed &&
+ (self.is_keyword(~"fn") || self.is_keyword(~"pure")) {
+ let item = self.parse_item_foreign_fn(visibility, attrs);
+ return iovi_foreign_item(item);
} else if items_allowed && self.is_keyword(~"unsafe")
&& self.look_ahead(1u) != token::LBRACE {
self.bump();
return iovi_item(self.mk_item(lo, self.last_span.hi, id, item_,
visibility, attrs));
} else {
+ if visibility != inherited {
+ let mut s = ~"unmatched visibility `";
+ s += if visibility == public { ~"pub" } else { ~"priv" };
+ s += ~"`";
+ self.span_fatal(copy self.last_span, s);
+ }
return iovi_none;
};
}
fn parse_item(+attrs: ~[attribute]) -> Option<@ast::item> {
- match self.parse_item_or_view_item(attrs, true) {
+ match self.parse_item_or_view_item(attrs, true, false) {
iovi_none =>
None,
iovi_view_item(_) =>
self.fatal(~"view items are not allowed here"),
+ iovi_foreign_item(_) =>
+ self.fatal(~"foreign items are not allowed here"),
iovi_item(item) =>
Some(item)
}
mode: view_item_parse_mode)
-> {attrs_remaining: ~[attribute],
view_items: ~[@view_item],
- items: ~[@item]} {
+ items: ~[@item],
+ foreign_items: ~[@foreign_item]} {
let mut attrs = vec::append(first_item_attrs,
self.parse_outer_attributes());
- let items_allowed;
- match mode {
- VIEW_ITEMS_AND_ITEMS_ALLOWED | IMPORTS_AND_ITEMS_ALLOWED =>
- items_allowed = true,
- VIEW_ITEMS_ALLOWED =>
- items_allowed = false
- }
+ let items_allowed = match mode {
+ VIEW_ITEMS_AND_ITEMS_ALLOWED | IMPORTS_AND_ITEMS_ALLOWED => true,
+ VIEW_ITEMS_AND_FOREIGN_ITEMS_ALLOWED => false
+ };
- let (view_items, items) = (DVec(), DVec());
+ let restricted_to_imports = match mode {
+ IMPORTS_AND_ITEMS_ALLOWED => true,
+ VIEW_ITEMS_AND_ITEMS_ALLOWED |
+ VIEW_ITEMS_AND_FOREIGN_ITEMS_ALLOWED => false
+ };
+
+ let foreign_items_allowed = match mode {
+ VIEW_ITEMS_AND_FOREIGN_ITEMS_ALLOWED => true,
+ VIEW_ITEMS_AND_ITEMS_ALLOWED | IMPORTS_AND_ITEMS_ALLOWED => false
+ };
+
+ let (view_items, items, foreign_items) = (DVec(), DVec(), DVec());
loop {
- match self.parse_item_or_view_item(attrs, items_allowed) {
+ match self.parse_item_or_view_item(attrs, items_allowed,
+ foreign_items_allowed) {
iovi_none =>
break,
iovi_view_item(view_item) => {
- match mode {
- VIEW_ITEMS_AND_ITEMS_ALLOWED |
- VIEW_ITEMS_ALLOWED => {}
- IMPORTS_AND_ITEMS_ALLOWED =>
+ if restricted_to_imports {
match view_item.node {
view_item_import(_) => {}
view_item_export(_) | view_item_use(*) =>
assert items_allowed;
items.push(item)
}
+ iovi_foreign_item(foreign_item) => {
+ assert foreign_items_allowed;
+ foreign_items.push(foreign_item);
+ }
}
attrs = self.parse_outer_attributes();
}
{attrs_remaining: attrs,
view_items: dvec::unwrap(move view_items),
- items: dvec::unwrap(move items)}
+ items: dvec::unwrap(move items),
+ foreign_items: dvec::unwrap(move foreign_items)}
}
// Parses a source module as a crate
return self.fatal(~"expected crate directive");
}
- fn parse_crate_directives(term: token::token,
+ fn parse_crate_directives(term: token::Token,
first_outer_attr: ~[attribute]) ->
~[@crate_directive] {
export token_to_binop;
use token::*;
-use token::token;
+use token::Token;
use ast::*;
/// Unary operators have higher precedence than binary
* Maps a token to a record specifying the corresponding binary
* operator and its precedence
*/
-fn token_to_binop(tok: token) -> Option<ast::binop> {
+fn token_to_binop(tok: Token) -> Option<ast::binop> {
match tok {
BINOP(STAR) => Some(mul),
BINOP(SLASH) => Some(div),
use util::interner;
-use util::interner::interner;
+use util::interner::Interner;
use std::map::HashMap;
-use std::serialization::{Serializer,
- Deserializer,
- serialize_uint,
- deserialize_uint,
- serialize_i64,
- deserialize_i64,
- serialize_u64,
- deserialize_u64,
- serialize_bool,
- deserialize_bool};
#[auto_serialize]
+#[auto_deserialize]
enum binop {
PLUS,
MINUS,
}
#[auto_serialize]
-enum token {
+#[auto_deserialize]
+enum Token {
/* Expression-operator symbols. */
EQ,
LT,
}
#[auto_serialize]
+#[auto_deserialize]
/// For interpolation during macro expansion.
enum nonterminal {
nt_item(@ast::item),
nt_stmt(@ast::stmt),
nt_pat( @ast::pat),
nt_expr(@ast::expr),
- nt_ty( @ast::ty),
+ nt_ty( @ast::Ty),
nt_ident(ast::ident, bool),
nt_path(@ast::path),
nt_tt( @ast::token_tree), //needs @ed to break a circularity
}
}
-fn to_str(in: @ident_interner, t: token) -> ~str {
+fn to_str(in: @ident_interner, t: Token) -> ~str {
match t {
EQ => ~"=",
LT => ~"<",
}
}
-pure fn can_begin_expr(t: token) -> bool {
+pure fn can_begin_expr(t: Token) -> bool {
match t {
LPAREN => true,
LBRACE => true,
}
/// what's the opposite delimiter?
-fn flip_delimiter(t: token::token) -> token::token {
+fn flip_delimiter(t: token::Token) -> token::Token {
match t {
token::LPAREN => token::RPAREN,
token::LBRACE => token::RBRACE,
-fn is_lit(t: token) -> bool {
+fn is_lit(t: Token) -> bool {
match t {
LIT_INT(_, _) => true,
LIT_UINT(_, _) => true,
}
}
-pure fn is_ident(t: token) -> bool {
+pure fn is_ident(t: Token) -> bool {
match t { IDENT(_, _) => true, _ => false }
}
-pure fn is_ident_or_path(t: token) -> bool {
+pure fn is_ident_or_path(t: Token) -> bool {
match t {
IDENT(_, _) | INTERPOLATED(nt_path(*)) => true,
_ => false
}
}
-pure fn is_plain_ident(t: token) -> bool {
+pure fn is_plain_ident(t: Token) -> bool {
match t { IDENT(_, false) => true, _ => false }
}
-pure fn is_bar(t: token) -> bool {
+pure fn is_bar(t: Token) -> bool {
match t { BINOP(OR) | OROR => true, _ => false }
}
}
struct ident_interner {
- priv interner: util::interner::interner<@~str>,
+ priv interner: util::interner::Interner<@~str>,
}
impl ident_interner {
)
fn mk_ident_interner() -> @ident_interner {
- /* the indices here must correspond to the numbers in special_idents */
- let init_vec = ~[@~"_", @~"anon", @~"drop", @~"", @~"unary", @~"!",
- @~"[]", @~"unary-", @~"__extensions__", @~"self",
- @~"item", @~"block", @~"stmt", @~"pat", @~"expr",
- @~"ty", @~"ident", @~"path", @~"tt", @~"matchers",
- @~"str", @~"TyVisitor", @~"arg", @~"descrim",
- @~"__rust_abi", @~"__rust_stack_shim", @~"TyDesc",
- @~"dtor", @~"main", @~"<opaque>", @~"blk", @~"static",
- @~"intrinsic", @~"__foreign_mod__"];
-
- let rv = @ident_interner {
- interner: interner::mk_prefill::<@~str>(init_vec)
- };
-
- /* having multiple interners will just confuse the serializer */
unsafe {
- assert task::local_data::local_data_get(interner_key!()).is_none()
- };
- unsafe {
- task::local_data::local_data_set(interner_key!(), @rv)
- };
- rv
+ match task::local_data::local_data_get(interner_key!()) {
+ Some(interner) => *interner,
+ None => {
+ // the indices here must correspond to the numbers in
+ // special_idents.
+ let init_vec = ~[
+ @~"_", @~"anon", @~"drop", @~"", @~"unary", @~"!",
+ @~"[]", @~"unary-", @~"__extensions__", @~"self",
+ @~"item", @~"block", @~"stmt", @~"pat", @~"expr",
+ @~"ty", @~"ident", @~"path", @~"tt", @~"matchers",
+ @~"str", @~"TyVisitor", @~"arg", @~"descrim",
+ @~"__rust_abi", @~"__rust_stack_shim", @~"TyDesc",
+ @~"dtor", @~"main", @~"<opaque>", @~"blk", @~"static",
+ @~"intrinsic", @~"__foreign_mod__"
+ ];
+
+ let rv = @ident_interner {
+ interner: interner::mk_prefill(init_vec)
+ };
+
+ task::local_data::local_data_set(interner_key!(), @rv);
+
+ rv
+ }
+ }
+ }
}
/* for when we don't care about the contents; doesn't interact with TLD or
pure fn ne(other: &binop) -> bool { !self.eq(other) }
}
-impl token : cmp::Eq {
- pure fn eq(other: &token) -> bool {
+impl Token : cmp::Eq {
+ pure fn eq(other: &Token) -> bool {
match self {
EQ => {
match (*other) {
}
}
}
- pure fn ne(other: &token) -> bool { !self.eq(other) }
+ pure fn ne(other: &Token) -> bool { !self.eq(other) }
}
// Local Variables:
use parse::{comments, lexer, token};
-use codemap::codemap;
+use codemap::CodeMap;
use pp::{break_offset, word, printer, space, zerobreak, hardbreak, breaks};
use pp::{consistent, inconsistent, eof};
use ast::{required, provided};
type ps =
@{s: pp::printer,
- cm: Option<codemap>,
+ cm: Option<CodeMap>,
intr: @token::ident_interner,
comments: Option<~[comments::cmnt]>,
literals: Option<~[comments::lit]>,
fn rust_printer(writer: io::Writer, intr: @ident_interner) -> ps {
return @{s: pp::mk_printer(writer, default_columns),
- cm: None::<codemap>,
+ cm: None::<CodeMap>,
intr: intr,
comments: None::<~[comments::cmnt]>,
literals: None::<~[comments::lit]>,
// Requires you to pass an input filename and reader so that
// it can scan the input text for comments and literals to
// copy forward.
-fn print_crate(cm: codemap, intr: @ident_interner,
+fn print_crate(cm: CodeMap, intr: @ident_interner,
span_diagnostic: diagnostic::span_handler,
crate: @ast::crate, filename: ~str, in: io::Reader,
out: io::Writer, ann: pp_ann, is_expanded: bool) {
eof(s.s);
}
-fn ty_to_str(ty: @ast::ty, intr: @ident_interner) -> ~str {
+fn ty_to_str(ty: @ast::Ty, intr: @ident_interner) -> ~str {
to_str(ty, print_type, intr)
}
word(s.s, sep);
}
-fn print_type(s: ps, &&ty: @ast::ty) {
+fn print_type(s: ps, &&ty: @ast::Ty) {
print_type_ex(s, ty, false);
}
-fn print_type_ex(s: ps, &&ty: @ast::ty, print_colons: bool) {
+fn print_type_ex(s: ps, &&ty: @ast::Ty, print_colons: bool) {
maybe_print_comment(s, ty.span.lo);
ibox(s, 0u);
match ty.node {
}
ast::ty_path(path, _) => print_path(s, path, print_colons),
ast::ty_fixed_length(t, v) => {
- print_type(s, t);
- word(s.s, ~"/");
+ word(s.s, ~"[");
+ match t.node {
+ ast::ty_vec(mt) => {
+ match mt.mutbl {
+ ast::m_mutbl => word_space(s, ~"mut"),
+ ast::m_const => word_space(s, ~"const"),
+ ast::m_imm => ()
+ }
+ print_type(s, mt.ty);
+ }
+ _ => fail ~"ty_fixed_length can only contain ty_vec as type"
+ }
+ word(s.s, ~" * ");
print_vstore(s, ast::vstore_fixed(v));
+ word(s.s, ~"]");
}
ast::ty_mac(_) => {
fail ~"print_type doesn't know how to print a ty_mac";
print_type(s, t);
word(s.s, ~";");
end(s); // end the head-ibox
+ end(s); // end the outer cbox
}
}
}
print_outer_attributes(s, item.attrs);
let ann_node = node_item(s, item);
s.ann.pre(ann_node);
- print_visibility(s, item.vis);
match item.node {
ast::item_const(ty, expr) => {
head(s, visibility_qualified(item.vis, ~"const"));
ast::named => {
word_nbsp(s, ~"mod");
print_ident(s, item.ident);
+ nbsp(s);
}
ast::anonymous => {}
}
- nbsp(s);
bopen(s);
print_foreign_mod(s, nmod, item.attrs);
bclose(s, item.span);
ast::item_ty(ty, params) => {
ibox(s, indent_unit);
ibox(s, 0u);
- word_nbsp(s, ~"type");
+ word_nbsp(s, visibility_qualified(item.vis, ~"type"));
print_ident(s, item.ident);
print_type_params(s, params);
end(s); // end the inner ibox
end(s); // end the outer ibox
}
ast::item_enum(enum_definition, params) => {
- print_enum_def(s, enum_definition, params, item.ident, item.span);
+ print_enum_def(s, enum_definition, params, item.ident,
+ item.span, item.vis);
}
ast::item_class(struct_def, tps) => {
- head(s, ~"struct");
+ head(s, visibility_qualified(item.vis, ~"struct"));
print_struct(s, struct_def, tps, item.ident, item.span);
}
ast::item_impl(tps, opt_trait, ty, methods) => {
- head(s, ~"impl");
+ head(s, visibility_qualified(item.vis, ~"impl"));
if tps.is_not_empty() {
print_type_params(s, tps);
space(s.s);
bclose(s, item.span);
}
ast::item_trait(tps, traits, methods) => {
- head(s, ~"trait");
+ head(s, visibility_qualified(item.vis, ~"trait"));
print_ident(s, item.ident);
print_type_params(s, tps);
if vec::len(traits) != 0u {
bclose(s, item.span);
}
ast::item_mac({node: ast::mac_invoc_tt(pth, tts), _}) => {
+ print_visibility(s, item.vis);
print_path(s, pth, false);
word(s.s, ~"! ");
print_ident(s, item.ident);
fn print_enum_def(s: ps, enum_definition: ast::enum_def,
params: ~[ast::ty_param], ident: ast::ident,
- span: ast::span) {
+ span: ast::span, visibility: ast::visibility) {
let mut newtype =
vec::len(enum_definition.variants) == 1u &&
ident == enum_definition.variants[0].node.name;
}
if newtype {
ibox(s, indent_unit);
- word_space(s, ~"enum");
+ word_space(s, visibility_qualified(visibility, ~"enum"));
} else {
- head(s, ~"enum");
+ head(s, visibility_qualified(visibility, ~"enum"));
}
print_ident(s, ident);
}
bopen(s);
hardbreak_if_not_bol(s);
- do struct_def.ctor.iter |ctor| {
- maybe_print_comment(s, ctor.span.lo);
- print_outer_attributes(s, ctor.node.attrs);
- // Doesn't call head because there shouldn't be a space after new.
- cbox(s, indent_unit);
- ibox(s, 4);
- word(s.s, ~"new(");
- print_fn_args(s, ctor.node.dec, ~[], None);
- word(s.s, ~")");
- space(s.s);
- print_block(s, ctor.node.body);
- }
do struct_def.dtor.iter |dtor| {
hardbreak_if_not_bol(s);
maybe_print_comment(s, dtor.span.lo);
indented: uint, attrs: ~[ast::attribute],
close_box: bool) {
match blk.node.rules {
- ast::unsafe_blk => word(s.s, ~"unsafe"),
+ ast::unsafe_blk => word_space(s, ~"unsafe"),
ast::default_blk => ()
}
maybe_print_comment(s, blk.span.lo);
ast::expr_loop(blk, opt_ident) => {
head(s, ~"loop");
space(s.s);
- opt_ident.iter(|ident| {print_ident(s, *ident); space(s.s)});
+ opt_ident.iter(|ident| {
+ print_ident(s, *ident);
+ word_space(s, ~":");
+ });
print_block(s, blk);
}
ast::expr_match(expr, arms) => {
}
fn print_bounds(s: ps, bounds: @~[ast::ty_param_bound]) {
- if vec::len(*bounds) > 0u {
+ if bounds.is_not_empty() {
word(s.s, ~":");
for vec::each(*bounds) |bound| {
nbsp(s);
- match *bound {
- ast::bound_copy => word(s.s, ~"Copy"),
- ast::bound_send => word(s.s, ~"Send"),
- ast::bound_const => word(s.s, ~"Const"),
- ast::bound_owned => word(s.s, ~"Owned"),
- ast::bound_trait(t) => print_type(s, t)
- }
+ print_type(s, **bound);
}
}
}
#[link(name = "syntax",
- vers = "0.4",
+ vers = "0.5",
uuid = "9311401b-d6ea-4cd9-a1d9-61f89499c645")];
#[allow(deprecated_mode)];
#[allow(deprecated_pattern)];
-extern mod core(vers = "0.4");
-extern mod std(vers = "0.4");
+extern mod core(vers = "0.5");
+extern mod std(vers = "0.5");
use core::*;
#[legacy_exports]
mod auto_serialize;
#[legacy_exports]
- mod auto_serialize2;
- #[legacy_exports]
mod source_util;
mod pipes {
{map: HashMap<T, uint>,
vect: DVec<T>};
-fn mk<T:Eq IterBytes Hash Const Copy>() -> interner<T> {
+fn mk<T:Eq IterBytes Hash Const Copy>() -> Interner<T> {
let m = map::HashMap::<T, uint>();
let hi: hash_interner<T> =
{map: m, vect: DVec()};
- move (hi as interner::<T>)
+ move ((move hi) as Interner::<T>)
}
-fn mk_prefill<T:Eq IterBytes Hash Const Copy>(init: ~[T]) -> interner<T> {
+fn mk_prefill<T:Eq IterBytes Hash Const Copy>(init: ~[T]) -> Interner<T> {
let rv = mk();
for init.each() |v| { rv.intern(*v); }
return rv;
/* when traits can extend traits, we should extend index<uint,T> to get [] */
-trait interner<T:Eq IterBytes Hash Const Copy> {
+trait Interner<T:Eq IterBytes Hash Const Copy> {
fn intern(T) -> uint;
fn gensym(T) -> uint;
pure fn get(uint) -> T;
fn len() -> uint;
}
-impl <T:Eq IterBytes Hash Const Copy> hash_interner<T>: interner<T> {
+impl <T:Eq IterBytes Hash Const Copy> hash_interner<T>: Interner<T> {
fn intern(val: T) -> uint {
match self.map.find(val) {
Some(idx) => return idx,
fk_method(ident, ~[ty_param], @method),
fk_anon(proto, capture_clause), //< an anonymous function like fn@(...)
fk_fn_block(capture_clause), //< a block {||...}
- fk_ctor(ident, ~[attribute], ~[ty_param], node_id /* self id */,
- def_id /* parent class id */), // class constructor
fk_dtor(~[ty_param], ~[attribute], node_id /* self id */,
def_id /* parent class id */) // class destructor
fn name_of_fn(fk: fn_kind) -> ident {
match fk {
- fk_item_fn(name, _, _) | fk_method(name, _, _)
- | fk_ctor(name, _, _, _, _) => /* FIXME (#2543) */ copy name,
+ fk_item_fn(name, _, _) | fk_method(name, _, _) => {
+ /* FIXME (#2543) */ copy name
+ }
fk_anon(*) | fk_fn_block(*) => parse::token::special_idents::anon,
fk_dtor(*) => parse::token::special_idents::dtor
}
fn tps_of_fn(fk: fn_kind) -> ~[ty_param] {
match fk {
- fk_item_fn(_, tps, _) | fk_method(_, tps, _)
- | fk_ctor(_, _, tps, _, _) | fk_dtor(tps, _, _, _) => {
- /* FIXME (#2543) */ copy tps
- }
- fk_anon(*) | fk_fn_block(*) => ~[]
+ fk_item_fn(_, tps, _) | fk_method(_, tps, _) |
+ fk_dtor(tps, _, _, _) => {
+ /* FIXME (#2543) */ copy tps
+ }
+ fk_anon(*) | fk_fn_block(*) => ~[]
}
}
visit_decl: fn@(@decl, E, vt<E>),
visit_expr: fn@(@expr, E, vt<E>),
visit_expr_post: fn@(@expr, E, vt<E>),
- visit_ty: fn@(@ty, E, vt<E>),
+ visit_ty: fn@(@Ty, E, vt<E>),
visit_ty_params: fn@(~[ty_param], E, vt<E>),
visit_fn: fn@(fn_kind, fn_decl, blk, span, node_id, E, vt<E>),
visit_ty_method: fn@(ty_method, E, vt<E>),
visit_enum_def(enum_definition, tps, e, v);
}
}
+ // Visit the disr expr if it exists
+ vr.node.disr_expr.iter(|ex| v.visit_expr(*ex, e, v));
}
}
-fn skip_ty<E>(_t: @ty, _e: E, _v: vt<E>) {}
+fn skip_ty<E>(_t: @Ty, _e: E, _v: vt<E>) {}
-fn visit_ty<E>(t: @ty, e: E, v: vt<E>) {
+fn visit_ty<E>(t: @Ty, e: E, v: vt<E>) {
match t.node {
ty_box(mt) | ty_uniq(mt) |
ty_vec(mt) | ty_ptr(mt) | ty_rptr(_, mt) => {
fn visit_ty_param_bounds<E>(bounds: @~[ty_param_bound], e: E, v: vt<E>) {
for vec::each(*bounds) |bound| {
- match *bound {
- bound_trait(t) => v.visit_ty(t, e, v),
- bound_copy | bound_send | bound_const | bound_owned => ()
- }
+ v.visit_ty(**bound, e, v)
}
}
m.decl, m.body, m.span, m.id, e, v);
}
-// Similar logic to the comment on visit_method_helper - Tim
-fn visit_class_ctor_helper<E>(ctor: class_ctor, nm: ident, tps: ~[ty_param],
- parent_id: def_id, e: E, v: vt<E>) {
- v.visit_fn(fk_ctor(/* FIXME (#2543) */ copy nm,
- ctor.node.attrs,
- /* FIXME (#2543) */ copy tps,
- ctor.node.self_id, parent_id),
- ctor.node.dec, ctor.node.body, ctor.span, ctor.node.id, e, v)
-
-}
-
fn visit_class_dtor_helper<E>(dtor: class_dtor, tps: ~[ty_param],
parent_id: def_id, e: E, v: vt<E>) {
v.visit_fn(fk_dtor(/* FIXME (#2543) */ copy tps, dtor.node.attrs,
}
}
-fn visit_struct_def<E>(sd: @struct_def, nm: ast::ident, tps: ~[ty_param],
+fn visit_struct_def<E>(sd: @struct_def, _nm: ast::ident, tps: ~[ty_param],
id: node_id, e: E, v: vt<E>) {
for sd.fields.each |f| {
v.visit_struct_field(*f, e, v);
for sd.traits.each |p| {
visit_path(p.path, e, v);
}
- do option::iter(&sd.ctor) |ctor| {
- visit_class_ctor_helper(*ctor, nm, tps, ast_util::local_def(id), e, v);
- };
do option::iter(&sd.dtor) |dtor| {
visit_class_dtor_helper(*dtor, tps, ast_util::local_def(id), e, v)
};
visit_decl: fn@(@decl),
visit_expr: fn@(@expr),
visit_expr_post: fn@(@expr),
- visit_ty: fn@(@ty),
+ visit_ty: fn@(@Ty),
visit_ty_params: fn@(~[ty_param]),
visit_fn: fn@(fn_kind, fn_decl, blk, span, node_id),
visit_ty_method: fn@(ty_method),
visit_struct_field: fn@(@struct_field),
visit_struct_method: fn@(@method)};
-fn simple_ignore_ty(_t: @ty) {}
+fn simple_ignore_ty(_t: @Ty) {}
fn default_simple_visitor() -> simple_visitor {
return @{visit_mod: fn@(_m: _mod, _sp: span, _id: node_id) { },
fn v_expr_post(f: fn@(@expr), ex: @expr, &&_e: (), _v: vt<()>) {
f(ex);
}
- fn v_ty(f: fn@(@ty), ty: @ty, &&e: (), v: vt<()>) {
+ fn v_ty(f: fn@(@Ty), ty: @Ty, &&e: (), v: vt<()>) {
f(ty);
visit_ty(ty, e, v);
}
return uv_tcp_bind6(tcp_server, addr);
}
+extern "C" int
+rust_uv_tcp_getpeername
+(uv_tcp_t* handle, sockaddr_in* name) {
+ int namelen = sizeof(sockaddr_in);
+ return uv_tcp_getpeername(handle, (sockaddr*)name, &namelen);
+}
+
+extern "C" int
+rust_uv_tcp_getpeername6
+(uv_tcp_t* handle, sockaddr_in6* name) {
+ int namelen = sizeof(sockaddr_in6);
+ return uv_tcp_getpeername(handle, (sockaddr*)name, &namelen);
+}
+
extern "C" int
rust_uv_listen(uv_stream_t* stream, int backlog,
uv_connection_cb cb) {
int result = uv_ip6_name(src, dst, size);
return result;
}
+extern "C" unsigned int
+rust_uv_ip4_port(struct sockaddr_in* src) {
+ return ntohs(src->sin_port);
+}
+extern "C" unsigned int
+rust_uv_ip6_port(struct sockaddr_in6* src) {
+ return ntohs(src->sin6_port);
+}
extern "C" uintptr_t*
rust_uv_get_kernel_global_chan_ptr() {
tinfl_decompress_mem_to_heap
rust_annihilate_box
rust_gc_metadata
+rust_uv_ip4_port
+rust_uv_ip6_port
+rust_uv_tcp_getpeername
+rust_uv_tcp_getpeername6
\ No newline at end of file
use libc::{c_int, c_uint, c_char};
use driver::session;
-use session::session;
+use session::Session;
use lib::llvm::llvm;
use syntax::attr;
use middle::ty;
pure fn ne(other: &output_type) -> bool { !self.eq(other) }
}
-fn llvm_err(sess: session, msg: ~str) -> ! unsafe {
+fn llvm_err(sess: Session, msg: ~str) -> ! unsafe {
let cstr = llvm::LLVMRustGetLastError();
if cstr == ptr::null() {
sess.fatal(msg);
} else { sess.fatal(msg + ~": " + str::raw::from_c_str(cstr)); }
}
-fn WriteOutputFile(sess:session,
+fn WriteOutputFile(sess: Session,
PM: lib::llvm::PassManagerRef, M: ModuleRef,
Triple: *c_char,
// FIXME: When #2334 is fixed, change
env: *(),
}
- fn exec(sess: session,
+ fn exec(sess: Session,
pm: PassManagerRef,
m: ModuleRef,
opt: c_int,
return false;
}
- fn run_passes(sess: session, llmod: ModuleRef, output: &Path) {
+ fn run_passes(sess: Session, llmod: ModuleRef, output: &Path) {
let opts = sess.opts;
if sess.time_llvm_passes() { llvm::LLVMRustEnableTimePasses(); }
let mut pm = mk_pass_manager();
*
*/
-fn build_link_meta(sess: session, c: ast::crate, output: &Path,
+fn build_link_meta(sess: Session, c: ast::crate, output: &Path,
symbol_hasher: &hash::State) -> link_meta {
type provided_metas =
vers: Option<~str>,
cmh_items: ~[@ast::meta_item]};
- fn provided_link_metas(sess: session, c: ast::crate) ->
+ fn provided_link_metas(sess: Session, c: ast::crate) ->
provided_metas {
let mut name: Option<~str> = None;
let mut vers: Option<~str> = None;
return truncated_hash_result(symbol_hasher);
}
- fn warn_missing(sess: session, name: ~str, default: ~str) {
+ fn warn_missing(sess: Session, name: ~str, default: ~str) {
if !sess.building_library { return; }
sess.warn(fmt!("missing crate link meta `%s`, using `%s` as default",
name, default));
}
- fn crate_meta_name(sess: session, _crate: ast::crate,
+ fn crate_meta_name(sess: Session, _crate: ast::crate,
output: &Path, metas: provided_metas) -> ~str {
return match metas.name {
Some(v) => v,
};
}
- fn crate_meta_vers(sess: session, _crate: ast::crate,
+ fn crate_meta_vers(sess: Session, _crate: ast::crate,
metas: provided_metas) -> ~str {
return match metas.vers {
Some(v) => v,
return result;
}
-fn mangle(sess: session, ss: path) -> ~str {
+fn mangle(sess: Session, ss: path) -> ~str {
// Follow C++ namespace-mangling style
let mut n = ~"_ZN"; // Begin name-sequence.
n
}
-fn exported_name(sess: session, path: path, hash: ~str, vers: ~str) -> ~str {
+fn exported_name(sess: Session, path: path, hash: ~str, vers: ~str) -> ~str {
return mangle(sess,
vec::append_one(
vec::append_one(path, path_name(sess.ident_of(hash))),
// If the user wants an exe generated we need to invoke
// cc to link the object file with some libs
-fn link_binary(sess: session,
+fn link_binary(sess: Session,
obj_filename: &Path,
out_filename: &Path,
lm: link_meta) {
}
}
- if !sess.debugging_opt(session::no_rt) {
- // Always want the runtime linked in
- cc_args.push(~"-lrustrt");
- }
+ // Always want the runtime linked in
+ cc_args.push(~"-lrustrt");
// On linux librt and libdl are an indirect dependencies via rustrt,
// and binutils 2.22+ won't add them automatically
}
}
-fn get_rpath_flags(sess: session::session, out_filename: &Path) -> ~[~str] {
+fn get_rpath_flags(sess: session::Session, out_filename: &Path) -> ~[~str] {
let os = sess.targ_cfg.os;
// No rpath on windows
rpaths_to_flags(rpaths)
}
-fn get_sysroot_absolute_rt_lib(sess: session::session) -> Path {
+fn get_sysroot_absolute_rt_lib(sess: session::Session) -> Path {
let r = filesearch::relative_target_lib_path(sess.opts.target_triple);
sess.filesearch.sysroot().push_rel(&r).push(os::dll_filename("rustrt"))
}
fn decl(llmod: ModuleRef, prefix: ~str, name: ~str,
tys: ~[TypeRef], rv: TypeRef) ->
ValueRef {
- let mut arg_tys: ~[TypeRef] = ~[];
- for tys.each |t| { arg_tys.push(*t); }
+ let arg_tys = tys.map(|t| *t);
let fn_ty = T_fn(arg_tys, rv);
return base::decl_cdecl_fn(llmod, prefix + name, fn_ty);
}
// -*- rust -*-
use metadata::{creader, cstore, filesearch};
-use session::{session, session_, OptLevel, No, Less, Default, Aggressive};
+use session::{Session, Session_, OptLevel, No, Less, Default, Aggressive};
use syntax::parse;
use syntax::{ast, codemap};
use syntax::attr;
use back::link;
use result::{Ok, Err};
use std::getopts;
+use std::getopts::{opt_present};
+use std::getopts::groups;
+use std::getopts::groups::{optopt, optmulti, optflag, optflagopt, getopts};
use io::WriterUtil;
-use getopts::{optopt, optmulti, optflag, optflagopt, opt_present};
use back::{x86, x86_64};
use std::map::HashMap;
use lib::llvm::llvm;
}
}
-fn default_configuration(sess: session, argv0: ~str, input: input) ->
+fn default_configuration(sess: Session, argv0: ~str, input: input) ->
ast::crate_cfg {
let libc = match sess.targ_cfg.os {
session::os_win32 => ~"msvcrt.dll",
}
}
-fn build_configuration(sess: session, argv0: ~str, input: input) ->
+fn build_configuration(sess: Session, argv0: ~str, input: input) ->
ast::crate_cfg {
// Combine the configuration requested by the session (command line) with
// some default and generated configuration items
str_input(~str)
}
-fn parse_input(sess: session, cfg: ast::crate_cfg, input: input)
+fn parse_input(sess: Session, cfg: ast::crate_cfg, input: input)
-> @ast::crate {
match input {
file_input(file) => {
pure fn ne(other: &compile_upto) -> bool { !self.eq(other) }
}
-fn compile_upto(sess: session, cfg: ast::crate_cfg,
+fn compile_upto(sess: Session, cfg: ast::crate_cfg,
input: input, upto: compile_upto,
outputs: Option<output_filenames>)
-> {crate: @ast::crate, tcx: Option<ty::ctxt>} {
return {crate: crate, tcx: Some(ty_cx)};
}
-fn compile_input(sess: session, cfg: ast::crate_cfg, input: input,
+fn compile_input(sess: Session, cfg: ast::crate_cfg, input: input,
outdir: &Option<Path>, output: &Option<Path>) {
let upto = if sess.opts.parse_only { cu_parse }
compile_upto(sess, cfg, input, upto, Some(outputs));
}
-fn pretty_print_input(sess: session, cfg: ast::crate_cfg, input: input,
+fn pretty_print_input(sess: Session, cfg: ast::crate_cfg, input: input,
ppm: pp_mode) {
fn ann_paren_for_expr(node: pprust::ann_node) {
match node {
}
fn build_session(sopts: @session::options,
- demitter: diagnostic::emitter) -> session {
+ demitter: diagnostic::emitter) -> Session {
let codemap = codemap::new_codemap();
let diagnostic_handler =
diagnostic::mk_handler(Some(demitter));
}
fn build_session_(sopts: @session::options,
- cm: codemap::codemap,
+ cm: codemap::CodeMap,
demitter: diagnostic::emitter,
span_diagnostic_handler: diagnostic::span_handler)
- -> session {
-
+ -> Session {
let target_cfg = build_target_config(sopts, demitter);
let p_s = parse::new_parse_sess_special_handler(span_diagnostic_handler,
cm);
sopts.target_triple,
sopts.addl_lib_search_paths);
let lint_settings = lint::mk_lint_settings();
- session_(@{targ_cfg: target_cfg,
+ Session_(@{targ_cfg: target_cfg,
opts: sopts,
cstore: cstore,
parse_sess: p_s,
lint_settings: lint_settings})
}
-fn parse_pretty(sess: session, &&name: ~str) -> pp_mode {
+fn parse_pretty(sess: Session, &&name: ~str) -> pp_mode {
match name {
~"normal" => ppm_normal,
~"expanded" => ppm_expanded,
}
}
-fn opts() -> ~[getopts::Opt] {
- return ~[optflag(~"h"), optflag(~"help"),
- optflag(~"v"), optflag(~"version"),
- optflag(~"emit-llvm"), optflagopt(~"pretty"),
- optflag(~"ls"), optflag(~"parse-only"), optflag(~"no-trans"),
- optflag(~"O"), optopt(~"opt-level"), optmulti(~"L"), optflag(~"S"),
- optopt(~"o"), optopt(~"out-dir"), optflag(~"xg"),
- optflag(~"c"), optflag(~"g"), optflag(~"save-temps"),
- optopt(~"sysroot"), optopt(~"target"),
- optflag(~"jit"),
-
- optmulti(~"W"), optmulti(~"warn"),
- optmulti(~"A"), optmulti(~"allow"),
- optmulti(~"D"), optmulti(~"deny"),
- optmulti(~"F"), optmulti(~"forbid"),
-
- optmulti(~"Z"),
-
- optmulti(~"cfg"), optflag(~"test"),
- optflag(~"lib"), optflag(~"bin"),
- optflag(~"static"), optflag(~"gc")];
+// rustc command line options
+fn optgroups() -> ~[getopts::groups::OptGroup] {
+ ~[
+ optflag(~"", ~"bin", ~"Compile an executable crate (default)"),
+ optflag(~"c", ~"", ~"Compile and assemble, but do not link"),
+ optmulti(~"", ~"cfg", ~"Configure the compilation
+ environment", ~"SPEC"),
+ optflag(~"", ~"emit-llvm",
+ ~"Produce an LLVM bitcode file"),
+ optflag(~"g", ~"", ~"Produce debug info (experimental)"),
+ optflag(~"", ~"gc", ~"Garbage collect shared data (experimental)"),
+ optflag(~"h", ~"help",~"Display this message"),
+ optmulti(~"L", ~"", ~"Add a directory to the library search path",
+ ~"PATH"),
+ optflag(~"", ~"lib", ~"Compile a library crate"),
+ optflag(~"", ~"ls", ~"List the symbols defined by a library crate"),
+ optflag(~"", ~"jit", ~"Execute using JIT (experimental)"),
+ optflag(~"", ~"no-trans",
+ ~"Run all passes except translation; no output"),
+ optflag(~"O", ~"", ~"Equivalent to --opt-level=2"),
+ optopt(~"o", ~"", ~"Write output to <filename>", ~"FILENAME"),
+ optopt(~"", ~"opt-level",
+ ~"Optimize with possible levels 0-3", ~"LEVEL"),
+ optopt( ~"", ~"out-dir",
+ ~"Write output to compiler-chosen filename
+ in <dir>", ~"DIR"),
+ optflag(~"", ~"parse-only",
+ ~"Parse only; do not compile, assemble, or link"),
+ optflagopt(~"", ~"pretty",
+ ~"Pretty-print the input instead of compiling;
+ valid types are: normal (un-annotated source),
+ expanded (crates expanded),
+ typed (crates expanded, with type annotations),
+ or identified (fully parenthesized,
+ AST nodes and blocks with IDs)", ~"TYPE"),
+ optflag(~"S", ~"", ~"Compile only; do not assemble or link"),
+ optflag(~"", ~"xg", ~"Extra debugging info (experimental)"),
+ optflag(~"", ~"save-temps",
+ ~"Write intermediate files (.bc, .opt.bc, .o)
+ in addition to normal output"),
+ optflag(~"", ~"static",
+ ~"Use or produce static libraries or binaries
+ (experimental)"),
+ optopt(~"", ~"sysroot",
+ ~"Override the system root", ~"PATH"),
+ optflag(~"", ~"test", ~"Build a test harness"),
+ optopt(~"", ~"target",
+ ~"Target triple cpu-manufacturer-kernel[-os]
+ to compile for (see
+ http://sources.redhat.com/autobook/autobook/autobook_17.html
+ for detail)", ~"TRIPLE"),
+ optmulti(~"W", ~"warn",
+ ~"Set lint warnings", ~"OPT"),
+ optmulti(~"A", ~"allow",
+ ~"Set lint allowed", ~"OPT"),
+ optmulti(~"D", ~"deny",
+ ~"Set lint denied", ~"OPT"),
+ optmulti(~"F", ~"forbid",
+ ~"Set lint forbidden", ~"OPT"),
+ optmulti(~"Z", ~"", ~"Set internal debugging options", "FLAG"),
+ optflag( ~"v", ~"version",
+ ~"Print version info and exit"),
+ ]
}
type output_filenames = @{out_filename:Path, obj_filename:Path};
fn build_output_filenames(input: input,
odir: &Option<Path>,
ofile: &Option<Path>,
- sess: session)
+ sess: Session)
-> output_filenames {
let obj_path;
let out_path;
fail;
}
-fn list_metadata(sess: session, path: &Path, out: io::Writer) {
+fn list_metadata(sess: Session, path: &Path, out: io::Writer) {
metadata::loader::list_file_metadata(
sess.parse_sess.interner,
session::sess_os_to_meta_os(sess.targ_cfg.os), path, out);
#[test]
fn test_switch_implies_cfg_test() {
let matches =
- match getopts::getopts(~[~"--test"], opts()) {
+ match getopts(~[~"--test"], optgroups()) {
Ok(m) => m,
Err(f) => fail ~"test_switch_implies_cfg_test: " +
getopts::fail_str(f)
#[test]
fn test_switch_implies_cfg_test_unless_cfg_test() {
let matches =
- match getopts::getopts(~[~"--test", ~"--cfg=test"], opts()) {
+ match getopts(~[~"--test", ~"--cfg=test"], optgroups()) {
Ok(m) => m,
Err(f) => {
fail ~"test_switch_implies_cfg_test_unless_cfg_test: " +
#[allow(non_camel_case_types)];
#[legacy_modes];
-extern mod core(vers = "0.4");
-extern mod std(vers = "0.4");
-extern mod rustc(vers = "0.4");
-extern mod syntax(vers = "0.4");
+extern mod core(vers = "0.5");
+extern mod std(vers = "0.5");
+extern mod rustc(vers = "0.5");
+extern mod syntax(vers = "0.5");
use core::*;
use std::getopts;
use std::map::HashMap;
use getopts::{opt_present};
+use getopts::groups;
use rustc::driver::driver::*;
use syntax::codemap;
use syntax::diagnostic;
}
fn usage(argv0: &str) {
- io::println(fmt!("Usage: %s [options] <input>\n", argv0) +
- ~"
-Options:
-
- --bin Compile an executable crate (default)
- -c Compile and assemble, but do not link
- --cfg <cfgspec> Configure the compilation environment
- --emit-llvm Produce an LLVM bitcode file
- -g Produce debug info (experimental)
- --gc Garbage collect shared data (experimental/temporary)
- -h --help Display this message
- -L <path> Add a directory to the library search path
- --lib Compile a library crate
- --ls List the symbols defined by a compiled library crate
- --jit Execute using JIT (experimental)
- --no-trans Run all passes except translation; no output
- -O Equivalent to --opt-level=2
- -o <filename> Write output to <filename>
- --opt-level <lvl> Optimize with possible levels 0-3
- --out-dir <dir> Write output to compiler-chosen filename in <dir>
- --parse-only Parse only; do not compile, assemble, or link
- --pretty [type] Pretty-print the input instead of compiling;
- valid types are: normal (un-annotated source),
- expanded (crates expanded), typed (crates expanded,
- with type annotations), or identified (fully
- parenthesized, AST nodes and blocks with IDs)
- -S Compile only; do not assemble or link
- --save-temps Write intermediate files (.bc, .opt.bc, .o)
- in addition to normal output
- --static Use or produce static libraries or binaries
- (experimental)
- --sysroot <path> Override the system root
- --test Build a test harness
- --target <triple> Target cpu-manufacturer-kernel[-os] to compile for
- (default: host triple)
- (see http://sources.redhat.com/autobook/autobook/
- autobook_17.html for detail)
-
- -(W|A|D|F) help Print available 'lint' checks and default settings
-
- -W <foo> warn about <foo> by default
- -A <foo> allow <foo> by default
- -D <foo> deny <foo> by default
- -F <foo> forbid <foo> (deny, and deny all overrides)
-
- -Z help list internal options for debugging rustc
-
- -v --version Print version info and exit
+ let message = fmt!("Usage: %s [OPTIONS] INPUT", argv0);
+ io::println(groups::usage(message, optgroups()) +
+ ~"Additional help:
+ -W help Print 'lint' options and default settings
+ -Z help Print internal options for debugging rustc
");
}
fn describe_warnings() {
+ io::println(fmt!("
+Available lint options:
+ -W <foo> Warn about <foo>
+ -A <foo> Allow <foo>
+ -D <foo> Deny <foo>
+ -F <foo> Forbid <foo> (deny, and deny all overrides)
+"));
+
let lint_dict = lint::get_lint_dict();
let mut max_key = 0;
for lint_dict.each_key |k| { max_key = uint::max(k.len(), max_key); }
io::println(fmt!("\nAvailable debug options:\n"));
for session::debugging_opts_map().each |pair| {
let (name, desc, _) = *pair;
- io::println(fmt!(" -Z%-20s -- %s", name, desc));
+ io::println(fmt!(" -Z %-20s -- %s", name, desc));
}
}
if args.is_empty() { usage(binary); return; }
let matches =
- match getopts::getopts(args, opts()) {
+ match getopts::groups::getopts(args, optgroups()) {
Ok(m) => m,
Err(f) => {
early_error(demitter, getopts::fail_str(f))
// The 'diagnostics emitter'. Every error, warning, etc. should
// go through this function.
- let demitter = fn@(cmsp: Option<(codemap::codemap, codemap::span)>,
+ let demitter = fn@(cmsp: Option<(codemap::CodeMap, codemap::span)>,
msg: &str, lvl: diagnostic::level) {
if lvl == diagnostic::fatal {
comm::send(ch, fatal);
~"try running with RUST_LOG=rustc=0,::rt::backtrace \
to get further details and report the results \
to github.com/mozilla/rust/issues"
- ]/_.each |note| {
+ ].each |note| {
diagnostic::emit(None, *note, diagnostic::note)
}
}
const no_asm_comments: uint = 1 << 5;
const no_verify: uint = 1 << 6;
const trace: uint = 1 << 7;
-// FIXME (#2377): This exists to transition to a Rust crate runtime
-// It should be removed
-const no_rt: uint = 1 << 8;
-const coherence: uint = 1 << 9;
-const borrowck_stats: uint = 1 << 10;
-const borrowck_note_pure: uint = 1 << 11;
-const borrowck_note_loan: uint = 1 << 12;
-const no_landing_pads: uint = 1 << 13;
-const debug_llvm: uint = 1 << 14;
-const count_type_sizes: uint = 1 << 15;
-const meta_stats: uint = 1 << 16;
-const no_opt: uint = 1 << 17;
+const coherence: uint = 1 << 8;
+const borrowck_stats: uint = 1 << 9;
+const borrowck_note_pure: uint = 1 << 10;
+const borrowck_note_loan: uint = 1 << 11;
+const no_landing_pads: uint = 1 << 12;
+const debug_llvm: uint = 1 << 13;
+const count_type_sizes: uint = 1 << 14;
+const meta_stats: uint = 1 << 15;
+const no_opt: uint = 1 << 16;
fn debugging_opts_map() -> ~[(~str, ~str, uint)] {
~[(~"verbose", ~"in general, enable more debug printouts", verbose),
(~"no-asm-comments", ~"omit comments when using -S", no_asm_comments),
(~"no-verify", ~"skip LLVM verification", no_verify),
(~"trace", ~"emit trace logs", trace),
- (~"no-rt", ~"do not link to the runtime", no_rt),
(~"coherence", ~"perform coherence checking", coherence),
(~"borrowck-stats", ~"gather borrowck statistics", borrowck_stats),
(~"borrowck-note-pure", ~"note where purity is req'd",
type crate_metadata = {name: ~str, data: ~[u8]};
-type session_ = {targ_cfg: @config,
+type Session_ = {targ_cfg: @config,
opts: @options,
- cstore: metadata::cstore::cstore,
+ cstore: metadata::cstore::CStore,
parse_sess: parse_sess,
- codemap: codemap::codemap,
+ codemap: codemap::CodeMap,
// For a library crate, this is always none
mut main_fn: Option<(node_id, codemap::span)>,
span_diagnostic: diagnostic::span_handler,
- filesearch: filesearch::filesearch,
+ filesearch: filesearch::FileSearch,
mut building_library: bool,
working_dir: Path,
lint_settings: lint::lint_settings};
-enum session {
- session_(@session_)
+enum Session {
+ Session_(@Session_)
}
-impl session {
+impl Session {
fn span_fatal(sp: span, msg: ~str) -> ! {
self.span_diagnostic.span_fatal(sp, msg)
}
// This exists to help with refactoring to eliminate impossible
// cases later on
fn impossible_case(sp: span, msg: &str) -> ! {
- self.span_bug(sp, #fmt("Impossible case reached: %s", msg));
+ self.span_bug(sp, fmt!("Impossible case reached: %s", msg));
}
fn verbose() -> bool { self.debugging_opt(verbose) }
fn time_passes() -> bool { self.debugging_opt(time_passes) }
}
// Seems out of place, but it uses session, so I'm putting it here
-fn expect<T: Copy>(sess: session, opt: Option<T>, msg: fn() -> ~str) -> T {
+fn expect<T: Copy>(sess: Session, opt: Option<T>, msg: fn() -> ~str) -> T {
diagnostic::expect(sess.diagnostic(), opt, msg)
}
-use driver::session::session;
+use driver::session::Session;
use syntax::codemap;
use syntax::ast;
use syntax::ast_util::*;
export maybe_inject_libcore_ref;
-fn maybe_inject_libcore_ref(sess: session,
+fn maybe_inject_libcore_ref(sess: Session,
crate: @ast::crate) -> @ast::crate {
if use_core(crate) {
inject_libcore_ref(sess, crate)
!attr::attrs_contains_name(crate.node.attrs, ~"no_core")
}
-fn inject_libcore_ref(sess: session,
+fn inject_libcore_ref(sess: Session,
crate: @ast::crate) -> @ast::crate {
fn spanned<T: Copy>(x: T) -> @ast::spanned<T> {
pub use rusti::visit_tydesc;
- // FIXME (#2712): remove this when the interface has settled and the
+ // FIXME (#3727): remove this when the interface has settled and the
// version in sys is no longer present.
pub fn get_tydesc<T>() -> *TyDesc {
rusti::get_tydesc::<T>() as *TyDesc
-use driver::session::session;
+use driver::session::Session;
use syntax::parse;
use syntax::ast;
export inject_intrinsic;
-fn inject_intrinsic(sess: session,
+fn inject_intrinsic(sess: Session,
crate: @ast::crate) -> @ast::crate {
let intrinsic_module = @include_str!("intrinsic.rs");
use syntax::print::pprust;
use syntax::codemap::span;
use driver::session;
-use session::session;
+use session::Session;
use syntax::attr;
use dvec::DVec;
ignore: bool, should_fail: bool};
type test_ctxt =
- @{sess: session::session,
+ @{sess: session::Session,
crate: @ast::crate,
mut path: ~[ast::ident],
testfns: DVec<test>};
// Traverse the crate, collecting all the test functions, eliding any
// existing main functions, and synthesizing a main test harness
-fn modify_for_testing(sess: session::session,
+fn modify_for_testing(sess: session::Session,
crate: @ast::crate) -> @ast::crate {
if sess.opts.test {
}
}
-fn generate_test_harness(sess: session::session,
+fn generate_test_harness(sess: session::Session,
crate: @ast::crate) -> @ast::crate {
let cx: test_ctxt =
@{sess: sess,
else { vec::append(~[cx.sess.ident_of(~"std")], path) }
}
-// The ast::ty of ~[std::test::test_desc]
-fn mk_test_desc_vec_ty(cx: test_ctxt) -> @ast::ty {
+// The ast::Ty of ~[std::test::test_desc]
+fn mk_test_desc_vec_ty(cx: test_ctxt) -> @ast::Ty {
let test_desc_ty_path =
path_node(mk_path(cx, ~[cx.sess.ident_of(~"test"),
cx.sess.ident_of(~"TestDesc")]));
- let test_desc_ty: ast::ty =
+ let test_desc_ty: ast::Ty =
{id: cx.sess.next_node_id(),
node: ast::ty_path(test_desc_ty_path, cx.sess.next_node_id()),
span: dummy_sp()};
tag_table_legacy_boxed_trait = 0x63
}
+const tag_item_trait_method_sort: uint = 0x70;
+
+const tag_item_impl_type_basename: uint = 0x71;
+
type link_meta = {name: ~str, vers: ~str, extras_hash: ~str};
use syntax::codemap::span;
use std::map::HashMap;
use syntax::print::pprust;
-use filesearch::filesearch;
+use filesearch::FileSearch;
use common::*;
use dvec::DVec;
use syntax::parse::token::ident_interner;
// Traverses an AST, reading all the information about use'd crates and extern
// libraries necessary for later resolving, typechecking, linking, etc.
fn read_crates(diag: span_handler, crate: ast::crate,
- cstore: cstore::cstore, filesearch: filesearch,
+ cstore: cstore::CStore, filesearch: FileSearch,
os: loader::os, static: bool, intr: @ident_interner) {
let e = @{diag: diag,
filesearch: filesearch,
}
type env = @{diag: span_handler,
- filesearch: filesearch,
- cstore: cstore::cstore,
+ filesearch: FileSearch,
+ cstore: cstore::CStore,
os: loader::os,
static: bool,
crate_cache: DVec<cache_entry>,
// Searching for information from the cstore
-use std::{ebml};
+use std::ebml;
use syntax::ast;
use syntax::ast_util;
use syntax::ast_map;
export get_enum_variants;
export get_impls_for_mod;
export get_trait_methods;
+export get_provided_trait_methods;
export get_method_names_if_trait;
+export get_type_name_if_impl;
+export get_static_methods_if_impl;
export get_item_attrs;
export each_path;
export get_type;
export get_impl_method;
export get_item_path;
export maybe_get_item_ast, found_ast, found, found_parent, not_found;
+export ProvidedTraitMethodInfo;
+export StaticMethodInfo;
-fn get_symbol(cstore: cstore::cstore, def: ast::def_id) -> ~str {
+struct ProvidedTraitMethodInfo {
+ ty: ty::method,
+ def_id: ast::def_id
+}
+
+struct StaticMethodInfo {
+ ident: ast::ident,
+ def_id: ast::def_id,
+ purity: ast::purity
+}
+
+fn get_symbol(cstore: cstore::CStore, def: ast::def_id) -> ~str {
let cdata = cstore::get_crate_data(cstore, def.crate).data;
return decoder::get_symbol(cdata, def.node);
}
-fn get_type_param_count(cstore: cstore::cstore, def: ast::def_id) -> uint {
+fn get_type_param_count(cstore: cstore::CStore, def: ast::def_id) -> uint {
let cdata = cstore::get_crate_data(cstore, def.crate).data;
return decoder::get_type_param_count(cdata, def.node);
}
/// Iterates over all the paths in the given crate.
-fn each_path(cstore: cstore::cstore, cnum: ast::crate_num,
+fn each_path(cstore: cstore::CStore, cnum: ast::crate_num,
f: fn(decoder::path_entry) -> bool) {
let crate_data = cstore::get_crate_data(cstore, cnum);
decoder::each_path(cstore.intr, crate_data, f);
return decoder::get_enum_variants(cstore.intr, cdata, def.node, tcx)
}
-fn get_impls_for_mod(cstore: cstore::cstore, def: ast::def_id,
+fn get_impls_for_mod(cstore: cstore::CStore, def: ast::def_id,
name: Option<ast::ident>)
-> @~[@decoder::_impl] {
let cdata = cstore::get_crate_data(cstore, def.crate);
decoder::get_trait_methods(cstore.intr, cdata, def.node, tcx)
}
-fn get_method_names_if_trait(cstore: cstore::cstore, def: ast::def_id)
+fn get_provided_trait_methods(tcx: ty::ctxt, def: ast::def_id) ->
+ ~[ProvidedTraitMethodInfo] {
+ let cstore = tcx.cstore;
+ let cdata = cstore::get_crate_data(cstore, def.crate);
+ decoder::get_provided_trait_methods(cstore.intr, cdata, def.node, tcx)
+}
+
+fn get_method_names_if_trait(cstore: cstore::CStore, def: ast::def_id)
-> Option<@DVec<(ast::ident, ast::self_ty_)>> {
let cdata = cstore::get_crate_data(cstore, def.crate);
return decoder::get_method_names_if_trait(cstore.intr, cdata, def.node);
}
-fn get_item_attrs(cstore: cstore::cstore,
+fn get_type_name_if_impl(cstore: cstore::CStore, def: ast::def_id) ->
+ Option<ast::ident> {
+ let cdata = cstore::get_crate_data(cstore, def.crate);
+ decoder::get_type_name_if_impl(cstore.intr, cdata, def.node)
+}
+
+fn get_static_methods_if_impl(cstore: cstore::CStore, def: ast::def_id) ->
+ Option<~[StaticMethodInfo]> {
+ let cdata = cstore::get_crate_data(cstore, def.crate);
+ decoder::get_static_methods_if_impl(cstore.intr, cdata, def.node)
+}
+
+fn get_item_attrs(cstore: cstore::CStore,
def_id: ast::def_id,
f: fn(~[@ast::meta_item])) {
decoder::get_type(cdata, def.node, tcx)
}
-fn get_region_param(cstore: metadata::cstore::cstore,
+fn get_region_param(cstore: metadata::cstore::CStore,
def: ast::def_id) -> Option<ty::region_variance> {
let cdata = cstore::get_crate_data(cstore, def.crate);
return decoder::get_region_param(cdata, def.node);
decoder::get_impl_traits(cdata, def.node, tcx)
}
-fn get_impl_method(cstore: cstore::cstore,
+fn get_impl_method(cstore: cstore::CStore,
def: ast::def_id, mname: ast::ident)
-> ast::def_id {
let cdata = cstore::get_crate_data(cstore, def.crate);
for their methods (so that get_trait_methods can be reused to get
class methods), classes require a slightly different version of
get_impl_method. Sigh. */
-fn get_class_method(cstore: cstore::cstore,
+fn get_class_method(cstore: cstore::CStore,
def: ast::def_id, mname: ast::ident)
-> ast::def_id {
let cdata = cstore::get_crate_data(cstore, def.crate);
}
/* If def names a class with a dtor, return it. Otherwise, return none. */
-fn class_dtor(cstore: cstore::cstore, def: ast::def_id)
+fn class_dtor(cstore: cstore::CStore, def: ast::def_id)
-> Option<ast::def_id> {
let cdata = cstore::get_crate_data(cstore, def.crate);
decoder::class_dtor(cdata, def.node)
use syntax::{ast, attr};
use syntax::parse::token::ident_interner;
-export cstore;
+export CStore;
export cnum_map;
export crate_metadata;
export mk_cstore;
// other modules to access the cstore's private data. This could also be
// achieved with an obj, but at the expense of a vtable. Not sure if this is a
// good pattern or not.
-enum cstore { private(cstore_private), }
+enum CStore { private(cstore_private), }
type cstore_private =
@{metas: map::HashMap<ast::crate_num, crate_metadata>,
type use_crate_map = map::HashMap<ast::node_id, ast::crate_num>;
// Internal method to retrieve the data from the cstore
-pure fn p(cstore: cstore) -> cstore_private {
+pure fn p(cstore: CStore) -> cstore_private {
match cstore { private(p) => p }
}
-fn mk_cstore(intr: @ident_interner) -> cstore {
+fn mk_cstore(intr: @ident_interner) -> CStore {
let meta_cache = map::HashMap();
let crate_map = map::HashMap();
let mod_path_map = HashMap();
intr: intr});
}
-fn get_crate_data(cstore: cstore, cnum: ast::crate_num) -> crate_metadata {
+fn get_crate_data(cstore: CStore, cnum: ast::crate_num) -> crate_metadata {
return p(cstore).metas.get(cnum);
}
-fn get_crate_hash(cstore: cstore, cnum: ast::crate_num) -> ~str {
+fn get_crate_hash(cstore: CStore, cnum: ast::crate_num) -> ~str {
let cdata = get_crate_data(cstore, cnum);
return decoder::get_crate_hash(cdata.data);
}
-fn get_crate_vers(cstore: cstore, cnum: ast::crate_num) -> ~str {
+fn get_crate_vers(cstore: CStore, cnum: ast::crate_num) -> ~str {
let cdata = get_crate_data(cstore, cnum);
return decoder::get_crate_vers(cdata.data);
}
-fn set_crate_data(cstore: cstore, cnum: ast::crate_num,
+fn set_crate_data(cstore: CStore, cnum: ast::crate_num,
data: crate_metadata) {
p(cstore).metas.insert(cnum, data);
for vec::each(decoder::get_crate_module_paths(cstore.intr, data)) |dp| {
}
}
-fn have_crate_data(cstore: cstore, cnum: ast::crate_num) -> bool {
+fn have_crate_data(cstore: CStore, cnum: ast::crate_num) -> bool {
return p(cstore).metas.contains_key(cnum);
}
-fn iter_crate_data(cstore: cstore, i: fn(ast::crate_num, crate_metadata)) {
+fn iter_crate_data(cstore: CStore, i: fn(ast::crate_num, crate_metadata)) {
for p(cstore).metas.each |k,v| { i(k, v);};
}
-fn add_used_crate_file(cstore: cstore, lib: &Path) {
+fn add_used_crate_file(cstore: CStore, lib: &Path) {
if !vec::contains(p(cstore).used_crate_files, lib) {
p(cstore).used_crate_files.push(copy *lib);
}
}
-fn get_used_crate_files(cstore: cstore) -> ~[Path] {
+fn get_used_crate_files(cstore: CStore) -> ~[Path] {
return p(cstore).used_crate_files;
}
-fn add_used_library(cstore: cstore, lib: ~str) -> bool {
+fn add_used_library(cstore: CStore, lib: ~str) -> bool {
assert lib != ~"";
if vec::contains(p(cstore).used_libraries, &lib) { return false; }
return true;
}
-fn get_used_libraries(cstore: cstore) -> ~[~str] {
+fn get_used_libraries(cstore: CStore) -> ~[~str] {
return p(cstore).used_libraries;
}
-fn add_used_link_args(cstore: cstore, args: ~str) {
+fn add_used_link_args(cstore: CStore, args: ~str) {
p(cstore).used_link_args.push_all(str::split_char(args, ' '));
}
-fn get_used_link_args(cstore: cstore) -> ~[~str] {
+fn get_used_link_args(cstore: CStore) -> ~[~str] {
return p(cstore).used_link_args;
}
-fn add_use_stmt_cnum(cstore: cstore, use_id: ast::node_id,
+fn add_use_stmt_cnum(cstore: CStore, use_id: ast::node_id,
cnum: ast::crate_num) {
p(cstore).use_crate_map.insert(use_id, cnum);
}
-fn find_use_stmt_cnum(cstore: cstore,
+fn find_use_stmt_cnum(cstore: CStore,
use_id: ast::node_id) -> Option<ast::crate_num> {
p(cstore).use_crate_map.find(use_id)
}
// returns hashes of crates directly used by this crate. Hashes are
// sorted by crate name.
-fn get_dep_hashes(cstore: cstore) -> ~[~str] {
+fn get_dep_hashes(cstore: CStore) -> ~[~str] {
type crate_hash = {name: ~str, hash: ~str};
let mut result = ~[];
return vec::map(sorted, mapper);
}
-fn get_path(cstore: cstore, d: ast::def_id) -> ~[~str] {
+fn get_path(cstore: CStore, d: ast::def_id) -> ~[~str] {
option::map_default(&p(cstore).mod_path_map.find(d), ~[],
|ds| str::split_str(**ds, ~"::"))
}
// Decoding metadata from a single crate's metadata
-use std::{ebml, map};
+use std::ebml;
+use std::map;
use std::map::HashMap;
+use std::serialization::deserialize;
use io::WriterUtil;
use dvec::DVec;
use syntax::{ast, ast_util};
use common::*;
use syntax::parse::token::ident_interner;
use hash::{Hash, HashUtil};
+use csearch::{ProvidedTraitMethodInfo, StaticMethodInfo};
export class_dtor;
export get_class_fields;
export get_impl_traits;
export get_class_method;
export get_impl_method;
+export get_static_methods_if_impl;
export lookup_def;
export resolve_path;
export get_crate_attributes;
export get_crate_vers;
export get_impls_for_mod;
export get_trait_methods;
+export get_provided_trait_methods;
export get_method_names_if_trait;
+export get_type_name_if_impl;
export get_item_attrs;
export get_crate_module_paths;
export def_like;
let table = ebml::get_doc(index, tag_index_table);
let hash_pos = table.start + hash % 256u * 4u;
let pos = io::u64_from_be_bytes(*d.data, hash_pos, 4u) as uint;
- let {tag:_, doc:bucket} = ebml::doc_at(d.data, pos);
+ let tagged_doc = ebml::doc_at(d.data, pos);
let belt = tag_index_buckets_bucket_elt;
- for ebml::tagged_docs(bucket, belt) |elt| {
+ for ebml::tagged_docs(tagged_doc.doc, belt) |elt| {
let pos = io::u64_from_be_bytes(*elt.data, elt.start, 4u) as uint;
if eq_fn(vec::view(*elt.data, elt.start + 4u, elt.end)) {
return Some(ebml::doc_at(d.data, pos).doc);
Variant, // v
Impl, // i
Trait, // I
- Class, // C
Struct, // S
PublicField, // g
PrivateField, // j
'v' => Variant,
'i' => Impl,
'I' => Trait,
- 'C' => Class,
'S' => Struct,
'g' => PublicField,
'j' => PrivateField,
'N' => InheritedField,
- c => fail (#fmt("unexpected family char: %c", c))
+ c => fail (fmt!("unexpected family char: %c", c))
}
}
+fn item_method_sort(item: ebml::Doc) -> char {
+ for ebml::tagged_docs(item, tag_item_trait_method_sort) |doc| {
+ return str::from_bytes(ebml::doc_data(doc))[0] as char;
+ }
+ return 'r';
+}
+
fn item_symbol(item: ebml::Doc) -> ~str {
let sym = ebml::get_doc(item, tag_items_data_item_symbol);
return str::from_bytes(ebml::doc_data(sym));
None
}
+fn translated_parent_item_opt(cnum: ast::crate_num, d: ebml::Doc) ->
+ Option<ast::def_id> {
+ let trait_did_opt = item_parent_item(d);
+ trait_did_opt.map(|trait_did| {crate: cnum, node: trait_did.node})
+}
+
+fn item_reqd_and_translated_parent_item(cnum: ast::crate_num,
+ d: ebml::Doc) -> ast::def_id {
+ let trait_did = item_parent_item(d).expect(~"item without parent");
+ {crate: cnum, node: trait_did.node}
+}
+
fn item_def_id(d: ebml::Doc, cdata: cmd) -> ast::def_id {
let tagdoc = ebml::get_doc(d, tag_def_id);
return translate_def_id(cdata, ebml::with_doc_data(tagdoc,
fn item_ty_region_param(item: ebml::Doc) -> Option<ty::region_variance> {
ebml::maybe_get_doc(item, tag_region_param).map(|doc| {
- let d = ebml::ebml_deserializer(*doc);
- ty::deserialize_region_variance(d)
+ deserialize(&ebml::Deserializer(*doc))
})
}
}
fn item_to_def_like(item: ebml::Doc, did: ast::def_id, cnum: ast::crate_num)
- -> def_like {
+ -> def_like
+{
let fam = item_family(item);
match fam {
- Const => dl_def(ast::def_const(did)),
- Class => dl_def(ast::def_class(did, true)),
- Struct => dl_def(ast::def_class(did, false)),
- UnsafeFn => dl_def(ast::def_fn(did, ast::unsafe_fn)),
- Fn => dl_def(ast::def_fn(did, ast::impure_fn)),
- PureFn => dl_def(ast::def_fn(did, ast::pure_fn)),
- ForeignFn => dl_def(ast::def_fn(did, ast::extern_fn)),
- UnsafeStaticMethod => dl_def(ast::def_static_method(did,
- ast::unsafe_fn)),
- StaticMethod => dl_def(ast::def_static_method(did, ast::impure_fn)),
- PureStaticMethod => dl_def(ast::def_static_method(did, ast::pure_fn)),
- Type | ForeignType => dl_def(ast::def_ty(did)),
- Mod => dl_def(ast::def_mod(did)),
- ForeignMod => dl_def(ast::def_foreign_mod(did)),
- Variant => {
- match item_parent_item(item) {
- Some(t) => {
- let tid = {crate: cnum, node: t.node};
- dl_def(ast::def_variant(tid, did))
- }
- None => fail ~"item_to_def_like: enum item has no parent"
- }
- }
- Trait | Enum => dl_def(ast::def_ty(did)),
- Impl => dl_impl(did),
- PublicField | PrivateField | InheritedField => dl_field,
+ Const => dl_def(ast::def_const(did)),
+ Struct => dl_def(ast::def_class(did)),
+ UnsafeFn => dl_def(ast::def_fn(did, ast::unsafe_fn)),
+ Fn => dl_def(ast::def_fn(did, ast::impure_fn)),
+ PureFn => dl_def(ast::def_fn(did, ast::pure_fn)),
+ ForeignFn => dl_def(ast::def_fn(did, ast::extern_fn)),
+ UnsafeStaticMethod => {
+ let trait_did_opt = translated_parent_item_opt(cnum, item);
+ dl_def(ast::def_static_method(did, trait_did_opt, ast::unsafe_fn))
+ }
+ StaticMethod => {
+ let trait_did_opt = translated_parent_item_opt(cnum, item);
+ dl_def(ast::def_static_method(did, trait_did_opt, ast::impure_fn))
+ }
+ PureStaticMethod => {
+ let trait_did_opt = translated_parent_item_opt(cnum, item);
+ dl_def(ast::def_static_method(did, trait_did_opt, ast::pure_fn))
+ }
+ Type | ForeignType => dl_def(ast::def_ty(did)),
+ Mod => dl_def(ast::def_mod(did)),
+ ForeignMod => dl_def(ast::def_foreign_mod(did)),
+ Variant => {
+ let enum_did = item_reqd_and_translated_parent_item(cnum, item);
+ dl_def(ast::def_variant(enum_did, did))
+ }
+ Trait | Enum => dl_def(ast::def_ty(did)),
+ Impl => dl_impl(did),
+ PublicField | PrivateField | InheritedField => dl_field,
}
}
let ctor_ty = item_type({crate: cdata.cnum, node: id}, item,
tcx, cdata);
let name = item_name(intr, item);
- let mut arg_tys: ~[ty::t] = ~[];
- match ty::get(ctor_ty).sty {
- ty::ty_fn(f) => {
- for f.sig.inputs.each |a| { arg_tys.push(a.ty); }
- }
- _ => { /* Nullary enum variant. */ }
- }
+ let arg_tys = match ty::get(ctor_ty).sty {
+ ty::ty_fn(f) => f.sig.inputs.map(|a| a.ty),
+
+ // Nullary enum variant.
+ _ => ~[],
+ };
match variant_disr_val(item) {
Some(val) => { disr_val = val; }
_ => { /* empty */ }
let mth_item = lookup_item(m_did.node, cdata.data);
let self_ty = get_self_ty(mth_item);
rslt.push(@{did: translate_def_id(cdata, m_did),
- /* FIXME (maybe #2323) tjc: take a look at this. */
n_tps: item_ty_param_count(mth_item) - base_tps,
ident: item_name(intr, mth_item),
self_type: self_ty});
let bounds = item_ty_param_bounds(mth, tcx, cdata);
let name = item_name(intr, mth);
let ty = doc_type(mth, tcx, cdata);
+ let def_id = item_def_id(mth, cdata);
let fty = match ty::get(ty).sty {
ty::ty_fn(f) => f,
_ => {
~"get_trait_methods: id has non-function type");
} };
let self_ty = get_self_ty(mth);
- result.push({ident: name, tps: bounds, fty: fty,
- self_ty: self_ty,
- vis: ast::public});
+ result.push({ident: name, tps: bounds, fty: fty, self_ty: self_ty,
+ vis: ast::public, def_id: def_id});
}
- #debug("get_trait_methods: }");
+ debug!("get_trait_methods: }");
@result
}
+fn get_provided_trait_methods(intr: @ident_interner, cdata: cmd,
+ id: ast::node_id, tcx: ty::ctxt) ->
+ ~[ProvidedTraitMethodInfo] {
+ let data = cdata.data;
+ let item = lookup_item(id, data);
+ let mut result = ~[];
+
+ for ebml::tagged_docs(item, tag_item_trait_method) |mth| {
+ if item_method_sort(mth) != 'p' { loop; }
+
+ let did = item_def_id(mth, cdata);
+
+ let bounds = item_ty_param_bounds(mth, tcx, cdata);
+ let name = item_name(intr, mth);
+ let ty = doc_type(mth, tcx, cdata);
+
+ let fty;
+ match ty::get(ty).sty {
+ ty::ty_fn(f) => fty = f,
+ _ => {
+ tcx.diag.handler().bug(~"get_provided_trait_methods(): id \
+ has non-function type");
+ }
+ }
+
+ let self_ty = get_self_ty(mth);
+ let ty_method = {ident: name, tps: bounds, fty: fty, self_ty: self_ty,
+ vis: ast::public, def_id: did};
+ let provided_trait_method_info = ProvidedTraitMethodInfo {
+ ty: ty_method,
+ def_id: did
+ };
+
+ vec::push(&mut result, move provided_trait_method_info);
+ }
+
+ return move result;
+}
+
// If the item in question is a trait, returns its set of methods and
// their self types. Otherwise, returns none. This overlaps in an
// annoying way with get_trait_methods.
return Some(resulting_methods);
}
+fn get_type_name_if_impl(intr: @ident_interner,
+ cdata: cmd,
+ node_id: ast::node_id) -> Option<ast::ident> {
+ let item = lookup_item(node_id, cdata.data);
+ if item_family(item) != Impl {
+ return None;
+ }
+
+ for ebml::tagged_docs(item, tag_item_impl_type_basename) |doc| {
+ return Some(intr.intern(@str::from_bytes(ebml::doc_data(doc))));
+ }
+
+ return None;
+}
+
+fn get_static_methods_if_impl(intr: @ident_interner,
+ cdata: cmd,
+ node_id: ast::node_id) ->
+ Option<~[StaticMethodInfo]> {
+ let item = lookup_item(node_id, cdata.data);
+ if item_family(item) != Impl {
+ return None;
+ }
+
+ // If this impl has a trait ref, don't consider it.
+ for ebml::tagged_docs(item, tag_impl_trait) |_doc| {
+ return None;
+ }
+
+ let impl_method_ids = DVec();
+ for ebml::tagged_docs(item, tag_item_impl_method) |impl_method_doc| {
+ impl_method_ids.push(parse_def_id(ebml::doc_data(impl_method_doc)));
+ }
+
+ let static_impl_methods = DVec();
+ for impl_method_ids.each |impl_method_id| {
+ let impl_method_doc = lookup_item(impl_method_id.node, cdata.data);
+ let family = item_family(impl_method_doc);
+ match family {
+ StaticMethod | UnsafeStaticMethod | PureStaticMethod => {
+ let purity;
+ match item_family(impl_method_doc) {
+ StaticMethod => purity = ast::impure_fn,
+ UnsafeStaticMethod => purity = ast::unsafe_fn,
+ PureStaticMethod => purity = ast::pure_fn,
+ _ => fail
+ }
+
+ static_impl_methods.push(StaticMethodInfo {
+ ident: item_name(intr, impl_method_doc),
+ def_id: item_def_id(impl_method_doc, cdata),
+ purity: purity
+ });
+ }
+ _ => {}
+ }
+ }
+
+ return Some(dvec::unwrap(move static_impl_methods));
+}
+
fn get_item_attrs(cdata: cmd,
node_id: ast::node_id,
f: fn(~[@ast::meta_item])) {
Variant => ~"variant",
Impl => ~"impl",
Trait => ~"trait",
- Class => ~"class",
Struct => ~"struct",
PublicField => ~"public field",
PrivateField => ~"private field",
use std::{ebml, map};
use std::map::HashMap;
use io::WriterUtil;
-use ebml::Writer;
+use ebml::Serializer;
use syntax::ast::*;
use syntax::print::pprust;
use syntax::{ast_util, visit};
type abbrev_map = map::HashMap<ty::t, tyencode::ty_abbrev>;
type encode_inlined_item = fn@(ecx: @encode_ctxt,
- ebml_w: ebml::Writer,
+ ebml_w: ebml::Serializer,
path: ast_map::path,
ii: ast::inlined_item);
item_symbols: HashMap<ast::node_id, ~str>,
discrim_symbols: HashMap<ast::node_id, ~str>,
link_meta: link_meta,
- cstore: cstore::cstore,
+ cstore: cstore::CStore,
encode_inlined_item: encode_inlined_item
};
item_symbols: HashMap<ast::node_id, ~str>,
discrim_symbols: HashMap<ast::node_id, ~str>,
link_meta: link_meta,
- cstore: cstore::cstore,
+ cstore: cstore::CStore,
encode_inlined_item: encode_inlined_item,
type_abbrevs: abbrev_map
};
ecx.reachable.contains_key(id)
}
-fn encode_name(ecx: @encode_ctxt, ebml_w: ebml::Writer, name: ident) {
+fn encode_name(ecx: @encode_ctxt, ebml_w: ebml::Serializer, name: ident) {
ebml_w.wr_tagged_str(tag_paths_data_name, ecx.tcx.sess.str_of(name));
}
-fn encode_def_id(ebml_w: ebml::Writer, id: def_id) {
+fn encode_impl_type_basename(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
+ name: ident) {
+ ebml_w.wr_tagged_str(tag_item_impl_type_basename,
+ ecx.tcx.sess.str_of(name));
+}
+
+fn encode_def_id(ebml_w: ebml::Serializer, id: def_id) {
ebml_w.wr_tagged_str(tag_def_id, def_to_str(id));
}
-fn encode_region_param(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_region_param(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
it: @ast::item) {
let opt_rp = ecx.tcx.region_paramd_items.find(it.id);
for opt_rp.each |rp| {
do ebml_w.wr_tag(tag_region_param) {
- ty::serialize_region_variance(ebml_w, *rp);
+ (*rp).serialize(&ebml_w);
}
}
}
-fn encode_mutability(ebml_w: ebml::Writer, mt: class_mutability) {
+fn encode_mutability(ebml_w: ebml::Serializer, mt: class_mutability) {
do ebml_w.wr_tag(tag_class_mut) {
let val = match mt {
class_immutable => 'a',
type entry<T> = {val: T, pos: uint};
-fn add_to_index(ecx: @encode_ctxt, ebml_w: ebml::Writer, path: &[ident],
+fn add_to_index(ecx: @encode_ctxt, ebml_w: ebml::Serializer, path: &[ident],
index: &mut ~[entry<~str>], name: ident) {
let mut full_path = ~[];
full_path.push_all(path);
pos: ebml_w.writer.tell()});
}
-fn encode_trait_ref(ebml_w: ebml::Writer, ecx: @encode_ctxt, t: @trait_ref) {
+fn encode_trait_ref(ebml_w: ebml::Serializer, ecx: @encode_ctxt,
+ t: @trait_ref) {
ebml_w.start_tag(tag_impl_trait);
encode_type(ecx, ebml_w, node_id_to_type(ecx.tcx, t.ref_id));
ebml_w.end_tag();
// Item info table encoding
-fn encode_family(ebml_w: ebml::Writer, c: char) {
+fn encode_family(ebml_w: ebml::Serializer, c: char) {
ebml_w.start_tag(tag_items_data_item_family);
ebml_w.writer.write(&[c as u8]);
ebml_w.end_tag();
fn def_to_str(did: def_id) -> ~str { fmt!("%d:%d", did.crate, did.node) }
-fn encode_ty_type_param_bounds(ebml_w: ebml::Writer, ecx: @encode_ctxt,
+fn encode_ty_type_param_bounds(ebml_w: ebml::Serializer, ecx: @encode_ctxt,
params: @~[ty::param_bounds]) {
let ty_str_ctxt = @{diag: ecx.diag,
ds: def_to_str,
}
}
-fn encode_type_param_bounds(ebml_w: ebml::Writer, ecx: @encode_ctxt,
+fn encode_type_param_bounds(ebml_w: ebml::Serializer, ecx: @encode_ctxt,
params: ~[ty_param]) {
let ty_param_bounds =
@params.map(|param| ecx.tcx.ty_param_bounds.get(param.id));
}
-fn encode_variant_id(ebml_w: ebml::Writer, vid: def_id) {
+fn encode_variant_id(ebml_w: ebml::Serializer, vid: def_id) {
ebml_w.start_tag(tag_items_data_item_variant);
ebml_w.writer.write(str::to_bytes(def_to_str(vid)));
ebml_w.end_tag();
}
-fn write_type(ecx: @encode_ctxt, ebml_w: ebml::Writer, typ: ty::t) {
+fn write_type(ecx: @encode_ctxt, ebml_w: ebml::Serializer, typ: ty::t) {
let ty_str_ctxt =
@{diag: ecx.diag,
ds: def_to_str,
tyencode::enc_ty(ebml_w.writer, ty_str_ctxt, typ);
}
-fn write_vstore(ecx: @encode_ctxt, ebml_w: ebml::Writer, vstore: ty::vstore) {
+fn write_vstore(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
+ vstore: ty::vstore) {
let ty_str_ctxt =
@{diag: ecx.diag,
ds: def_to_str,
tyencode::enc_vstore(ebml_w.writer, ty_str_ctxt, vstore);
}
-fn encode_type(ecx: @encode_ctxt, ebml_w: ebml::Writer, typ: ty::t) {
+fn encode_type(ecx: @encode_ctxt, ebml_w: ebml::Serializer, typ: ty::t) {
ebml_w.start_tag(tag_items_data_item_type);
write_type(ecx, ebml_w, typ);
ebml_w.end_tag();
}
-fn encode_symbol(ecx: @encode_ctxt, ebml_w: ebml::Writer, id: node_id) {
+fn encode_symbol(ecx: @encode_ctxt, ebml_w: ebml::Serializer, id: node_id) {
ebml_w.start_tag(tag_items_data_item_symbol);
let sym = match ecx.item_symbols.find(id) {
Some(x) => x,
ebml_w.end_tag();
}
-fn encode_discriminant(ecx: @encode_ctxt, ebml_w: ebml::Writer, id: node_id) {
+fn encode_discriminant(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
+ id: node_id) {
ebml_w.start_tag(tag_items_data_item_symbol);
ebml_w.writer.write(str::to_bytes(ecx.discrim_symbols.get(id)));
ebml_w.end_tag();
}
-fn encode_disr_val(_ecx: @encode_ctxt, ebml_w: ebml::Writer, disr_val: int) {
+fn encode_disr_val(_ecx: @encode_ctxt, ebml_w: ebml::Serializer,
+ disr_val: int) {
ebml_w.start_tag(tag_disr_val);
ebml_w.writer.write(str::to_bytes(int::to_str(disr_val,10u)));
ebml_w.end_tag();
}
-fn encode_parent_item(ebml_w: ebml::Writer, id: def_id) {
+fn encode_parent_item(ebml_w: ebml::Serializer, id: def_id) {
ebml_w.start_tag(tag_items_data_parent_item);
ebml_w.writer.write(str::to_bytes(def_to_str(id)));
ebml_w.end_tag();
}
-fn encode_enum_variant_info(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_enum_variant_info(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
id: node_id, variants: ~[variant],
path: ast_map::path, index: @mut ~[entry<int>],
ty_params: ~[ty_param]) {
}
}
-fn encode_path(ecx: @encode_ctxt, ebml_w: ebml::Writer, path: ast_map::path,
- name: ast_map::path_elt) {
- fn encode_path_elt(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_path(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
+ path: ast_map::path, name: ast_map::path_elt) {
+ fn encode_path_elt(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
elt: ast_map::path_elt) {
let (tag, name) = match elt {
ast_map::path_mod(name) => (tag_path_elt_mod, name),
}
}
-fn encode_info_for_mod(ecx: @encode_ctxt, ebml_w: ebml::Writer, md: _mod,
+fn encode_info_for_mod(ecx: @encode_ctxt, ebml_w: ebml::Serializer, md: _mod,
id: node_id, path: ast_map::path, name: ident) {
ebml_w.start_tag(tag_items_data_item);
encode_def_id(ebml_w, local_def(id));
ebml_w.end_tag();
}
-fn encode_visibility(ebml_w: ebml::Writer, visibility: visibility) {
+fn encode_visibility(ebml_w: ebml::Serializer, visibility: visibility) {
encode_family(ebml_w, match visibility {
public => 'g',
private => 'j',
});
}
-fn encode_self_type(ebml_w: ebml::Writer, self_type: ast::self_ty_) {
+fn encode_self_type(ebml_w: ebml::Serializer, self_type: ast::self_ty_) {
ebml_w.start_tag(tag_item_trait_method_self_ty);
// Encode the base self type.
ebml_w.end_tag();
}
+fn encode_method_sort(ebml_w: ebml::Serializer, sort: char) {
+ ebml_w.start_tag(tag_item_trait_method_sort);
+ ebml_w.writer.write(&[ sort as u8 ]);
+ ebml_w.end_tag();
+}
+
/* Returns an index of items in this class */
-fn encode_info_for_class(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_info_for_class(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
id: node_id, path: ast_map::path,
class_tps: ~[ty_param],
fields: ~[@struct_field],
}
// This is for encoding info for ctors and dtors
-fn encode_info_for_ctor(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_info_for_ctor(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
id: node_id, ident: ident, path: ast_map::path,
item: Option<inlined_item>, tps: ~[ty_param]) {
ebml_w.start_tag(tag_items_data_item);
ebml_w.end_tag();
}
-fn encode_info_for_method(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_info_for_method(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
impl_path: ast_map::path, should_inline: bool,
parent_id: node_id,
m: @method, all_tps: ~[ty_param]) {
ecx.tcx.sess.str_of(m.ident), all_tps.len());
ebml_w.start_tag(tag_items_data_item);
encode_def_id(ebml_w, local_def(m.id));
- encode_family(ebml_w, purity_fn_family(m.purity));
+ match m.self_ty.node {
+ ast::sty_static => {
+ encode_family(ebml_w, purity_static_method_family(m.purity));
+ }
+ _ => encode_family(ebml_w, purity_fn_family(m.purity))
+ }
encode_type_param_bounds(ebml_w, ecx, all_tps);
encode_type(ecx, ebml_w, node_id_to_type(ecx.tcx, m.id));
encode_name(ecx, ebml_w, m.ident);
}
-fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::Writer, item: @item,
- index: @mut ~[entry<int>], path: ast_map::path) {
+fn encode_info_for_item(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
+ item: @item, index: @mut ~[entry<int>],
+ path: ast_map::path) {
let tcx = ecx.tcx;
let must_write =
};
if !must_write && !reachable(ecx, item.id) { return; }
- fn add_to_index_(item: @item, ebml_w: ebml::Writer,
+ fn add_to_index_(item: @item, ebml_w: ebml::Serializer,
index: @mut ~[entry<int>]) {
index.push({val: item.id, pos: ebml_w.writer.tell()});
}
/* Now, make an item for the class itself */
ebml_w.start_tag(tag_items_data_item);
encode_def_id(ebml_w, local_def(item.id));
-
- match struct_def.ctor {
- None => encode_family(ebml_w, 'S'),
- Some(_) => encode_family(ebml_w, 'C')
- }
-
+ encode_family(ebml_w, 'S');
encode_type_param_bounds(ebml_w, ecx, tps);
encode_type(ecx, ebml_w, node_id_to_type(tcx, item.id));
encode_name(ecx, ebml_w, item.ident);
let bkts = create_index(idx);
encode_index(ebml_w, bkts, write_int);
ebml_w.end_tag();
-
- /* Encode the constructor */
- for struct_def.ctor.each |ctor| {
- debug!("encoding info for ctor %s %d",
- ecx.tcx.sess.str_of(item.ident), ctor.node.id);
- index.push({
- val: ctor.node.id,
- pos: ebml_w.writer.tell()
- });
- encode_info_for_ctor(ecx, ebml_w, ctor.node.id, item.ident,
- path, if tps.len() > 0u {
- Some(ii_ctor(*ctor, item.ident, tps,
- local_def(item.id))) }
- else { None }, tps);
- }
}
- item_impl(tps, opt_trait, _, methods) => {
+ item_impl(tps, opt_trait, ty, methods) => {
add_to_index();
ebml_w.start_tag(tag_items_data_item);
encode_def_id(ebml_w, local_def(item.id));
encode_type(ecx, ebml_w, node_id_to_type(tcx, item.id));
encode_name(ecx, ebml_w, item.ident);
encode_attributes(ebml_w, item.attrs);
+ match ty.node {
+ ast::ty_path(path, _) if path.idents.len() == 1 => {
+ encode_impl_type_basename(ecx, ebml_w,
+ ast_util::path_to_ident(path));
+ }
+ _ => {}
+ }
for methods.each |m| {
ebml_w.start_tag(tag_item_impl_method);
ebml_w.writer.write(str::to_bytes(def_to_str(local_def(m.id))));
}
}
item_trait(tps, traits, ms) => {
+ let provided_methods = dvec::DVec();
+
add_to_index();
ebml_w.start_tag(tag_items_data_item);
encode_def_id(ebml_w, local_def(item.id));
encode_type(ecx, ebml_w, ty::mk_fn(tcx, mty.fty));
encode_family(ebml_w, purity_fn_family(mty.fty.meta.purity));
encode_self_type(ebml_w, mty.self_ty);
+ encode_method_sort(ebml_w, 'r');
ebml_w.end_tag();
}
provided(m) => {
- encode_info_for_method(ecx, ebml_w, path,
- should_inline(m.attrs), item.id,
- m, m.tps);
+ provided_methods.push(m);
+
+ ebml_w.start_tag(tag_item_trait_method);
+ encode_def_id(ebml_w, local_def(m.id));
+ encode_name(ecx, ebml_w, mty.ident);
+ encode_type_param_bounds(ebml_w, ecx, m.tps);
+ encode_type(ecx, ebml_w, ty::mk_fn(tcx, mty.fty));
+ encode_family(ebml_w, purity_fn_family(mty.fty.meta.purity));
+ encode_self_type(ebml_w, mty.self_ty);
+ encode_method_sort(ebml_w, 'p');
+ ebml_w.end_tag();
}
}
i += 1u;
ebml_w.start_tag(tag_items_data_item);
encode_def_id(ebml_w, local_def(ty_m.id));
+ encode_parent_item(ebml_w, local_def(item.id));
encode_name(ecx, ebml_w, ty_m.ident);
encode_family(ebml_w,
purity_static_method_family(ty_m.purity));
ebml_w.end_tag();
}
-
+ // Finally, output all the provided methods as items.
+ for provided_methods.each |m| {
+ index.push({val: m.id, pos: ebml_w.writer.tell()});
+ encode_info_for_method(ecx, ebml_w, path, true, item.id, *m,
+ m.tps);
+ }
}
item_mac(*) => fail ~"item macros unimplemented"
}
}
-fn encode_info_for_foreign_item(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_info_for_foreign_item(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
nitem: @foreign_item,
index: @mut ~[entry<int>],
path: ast_map::path, abi: foreign_abi) {
ebml_w.end_tag();
}
-fn encode_info_for_items(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_info_for_items(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
crate: @crate) -> ~[entry<int>] {
let index = @mut ~[];
ebml_w.start_tag(tag_items_data);
return buckets_frozen;
}
-fn encode_index<T>(ebml_w: ebml::Writer, buckets: ~[@~[entry<T>]],
+fn encode_index<T>(ebml_w: ebml::Serializer, buckets: ~[@~[entry<T>]],
write_fn: fn(io::Writer, T)) {
let writer = ebml_w.writer;
ebml_w.start_tag(tag_index);
writer.write_be_u32(n as u32);
}
-fn encode_meta_item(ebml_w: ebml::Writer, mi: meta_item) {
+fn encode_meta_item(ebml_w: ebml::Serializer, mi: meta_item) {
match mi.node {
meta_word(name) => {
ebml_w.start_tag(tag_meta_item_word);
}
}
-fn encode_attributes(ebml_w: ebml::Writer, attrs: ~[attribute]) {
+fn encode_attributes(ebml_w: ebml::Serializer, attrs: ~[attribute]) {
ebml_w.start_tag(tag_attributes);
for attrs.each |attr| {
ebml_w.start_tag(tag_attribute);
return attrs;
}
-fn encode_crate_deps(ecx: @encode_ctxt, ebml_w: ebml::Writer,
- cstore: cstore::cstore) {
+fn encode_crate_deps(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
+ cstore: cstore::CStore) {
- fn get_ordered_deps(ecx: @encode_ctxt, cstore: cstore::cstore)
+ fn get_ordered_deps(ecx: @encode_ctxt, cstore: cstore::CStore)
-> ~[decoder::crate_dep] {
type hashkv = @{key: crate_num, val: cstore::crate_metadata};
ebml_w.end_tag();
}
-fn encode_crate_dep(ecx: @encode_ctxt, ebml_w: ebml::Writer,
+fn encode_crate_dep(ecx: @encode_ctxt, ebml_w: ebml::Serializer,
dep: decoder::crate_dep) {
ebml_w.start_tag(tag_crate_dep);
ebml_w.start_tag(tag_crate_dep_name);
ebml_w.end_tag();
}
-fn encode_hash(ebml_w: ebml::Writer, hash: ~str) {
+fn encode_hash(ebml_w: ebml::Serializer, hash: ~str) {
ebml_w.start_tag(tag_crate_hash);
ebml_w.writer.write(str::to_bytes(hash));
ebml_w.end_tag();
type_abbrevs: ty::new_ty_hash()
});
- let ebml_w = ebml::Writer(wr as io::Writer);
+ let ebml_w = ebml::Serializer(wr as io::Writer);
encode_hash(ebml_w, ecx.link_meta.extras_hash);
if (parms.tcx.sess.meta_stats()) {
- do wr.buf.borrow |v| {
+ do wr.bytes.borrow |v| {
do v.each |e| {
if *e == 0 {
ecx.stats.zero_bytes += 1;
(do str::as_bytes(&~"rust\x00\x00\x00\x01") |bytes| {
vec::slice(*bytes, 0, 8)
- }) + flate::deflate_bytes(wr.buf.check_out(|buf| buf))
+ }) + flate::deflate_bytes(wr.bytes.check_out(|buf| buf))
}
// Get the encoded string for a type
// probably just be folded into cstore.
use result::Result;
-export filesearch;
+export FileSearch;
export mk_filesearch;
export pick;
export pick_file;
else { option::None }
}
-trait filesearch {
+trait FileSearch {
fn sysroot() -> Path;
fn lib_search_paths() -> ~[Path];
fn get_target_lib_path() -> Path;
fn mk_filesearch(maybe_sysroot: Option<Path>,
target_triple: &str,
- addl_lib_search_paths: ~[Path]) -> filesearch {
+ addl_lib_search_paths: ~[Path]) -> FileSearch {
type filesearch_impl = {sysroot: Path,
addl_lib_search_paths: ~[Path],
target_triple: ~str};
- impl filesearch_impl: filesearch {
+ impl filesearch_impl: FileSearch {
fn sysroot() -> Path { self.sysroot }
fn lib_search_paths() -> ~[Path] {
let mut paths = self.addl_lib_search_paths;
debug!("using sysroot = %s", sysroot.to_str());
{sysroot: sysroot,
addl_lib_search_paths: addl_lib_search_paths,
- target_triple: str::from_slice(target_triple)} as filesearch
+ target_triple: str::from_slice(target_triple)} as FileSearch
}
-fn search<T: Copy>(filesearch: filesearch, pick: pick<T>) -> Option<T> {
+fn search<T: Copy>(filesearch: FileSearch, pick: pick<T>) -> Option<T> {
let mut rslt = None;
for filesearch.lib_search_paths().each |lib_search_path| {
debug!("searching %s", lib_search_path.to_str());
use syntax::print::pprust;
use syntax::codemap::span;
use lib::llvm::{False, llvm, mk_object_file, mk_section_iter};
-use filesearch::filesearch;
+use filesearch::FileSearch;
use io::WriterUtil;
use syntax::parse::token::ident_interner;
type ctxt = {
diag: span_handler,
- filesearch: filesearch,
+ filesearch: FileSearch,
span: span,
ident: ast::ident,
metas: ~[@ast::meta_item],
fn find_library_crate_aux(cx: ctxt,
nn: {prefix: ~str, suffix: ~str},
- filesearch: filesearch::filesearch) ->
+ filesearch: filesearch::FileSearch) ->
Option<{ident: ~str, data: @~[u8]}> {
let crate_name = crate_name_from_metas(cx.metas);
let prefix: ~str = nn.prefix + crate_name + ~"-";
}
}
-fn parse_region(st: @pstate) -> ty::region {
+fn parse_region(st: @pstate) -> ty::Region {
match next(st) {
'b' => {
ty::re_bound(parse_bound_region(st))
w.write_char(']');
}
-fn enc_region(w: io::Writer, cx: @ctxt, r: ty::region) {
+fn enc_region(w: io::Writer, cx: @ctxt, r: ty::Region) {
match r {
ty::re_bound(br) => {
w.write_char('b');
use syntax::ast_util;
use syntax::codemap::span;
use std::ebml;
-use std::ebml::Writer;
-use std::ebml::get_doc;
+use std::ebml::{Serializer, get_doc};
use std::map::HashMap;
-use std::serialization::Serializer;
-use std::serialization::Deserializer;
-use std::serialization::SerializerHelpers;
-use std::serialization::DeserializerHelpers;
-use std::prettyprint::Serializer;
+use std::serialization;
+use std::serialization::{Serializable,
+ SerializerHelpers,
+ DeserializerHelpers,
+ deserialize};
use middle::{ty, typeck};
use middle::typeck::{method_origin, method_map_entry,
vtable_res,
vtable_origin};
-use driver::session::session;
-use middle::freevars::{freevar_entry,
- serialize_freevar_entry,
- deserialize_freevar_entry};
+use driver::session::Session;
+use middle::freevars::freevar_entry;
use c = metadata::common;
use e = metadata::encoder;
use cstore = metadata::cstore;
// Top-level methods.
fn encode_inlined_item(ecx: @e::encode_ctxt,
- ebml_w: ebml::Writer,
+ ebml_w: ebml::Serializer,
path: ast_map::path,
ii: ast::inlined_item,
maps: maps) {
let id_range = ast_util::compute_id_range_for_inlined_item(ii);
do ebml_w.wr_tag(c::tag_ast as uint) {
- ast_util::serialize_id_range(ebml_w, id_range);
+ id_range.serialize(&ebml_w);
encode_ast(ebml_w, simplify_ast(ii));
encode_side_tables_for_ii(ecx, maps, ebml_w, ii);
}
Some(ast_doc) => {
debug!("> Decoding inlined fn: %s::?",
ast_map::path_to_str(path, tcx.sess.parse_sess.interner));
- let ast_dsr = ebml::ebml_deserializer(ast_doc);
- let from_id_range = ast_util::deserialize_id_range(ast_dsr);
+ let ast_dsr = &ebml::Deserializer(ast_doc);
+ let from_id_range = deserialize(ast_dsr);
let to_id_range = reserve_id_range(dcx.tcx.sess, from_id_range);
let xcx = extended_decode_ctxt_(@{dcx: dcx,
from_id_range: from_id_range,
// ______________________________________________________________________
// Enumerating the IDs which appear in an AST
-fn reserve_id_range(sess: session,
+fn reserve_id_range(sess: Session,
from_id_range: ast_util::id_range) -> ast_util::id_range {
// Handle the case of an empty range:
if ast_util::empty(from_id_range) { return from_id_range; }
fn emit_def_id(did: ast::def_id);
}
-impl<S: Serializer> S: def_id_serializer_helpers {
+impl<S: serialization::Serializer> S: def_id_serializer_helpers {
fn emit_def_id(did: ast::def_id) {
- ast::serialize_def_id(self, did)
+ did.serialize(&self)
}
}
fn read_def_id(xcx: extended_decode_ctxt) -> ast::def_id;
}
-impl<D: Deserializer> D: def_id_deserializer_helpers {
+impl<D: serialization::Deserializer> D: def_id_deserializer_helpers {
fn read_def_id(xcx: extended_decode_ctxt) -> ast::def_id {
- let did = ast::deserialize_def_id(self);
+ let did: ast::def_id = deserialize(&self);
did.tr(xcx)
}
}
// We also have to adjust the spans: for now we just insert a dummy span,
// but eventually we should add entries to the local codemap as required.
-fn encode_ast(ebml_w: ebml::Writer, item: ast::inlined_item) {
+fn encode_ast(ebml_w: ebml::Serializer, item: ast::inlined_item) {
do ebml_w.wr_tag(c::tag_tree as uint) {
- ast::serialize_inlined_item(ebml_w, item)
+ item.serialize(&ebml_w)
}
}
ast::ii_foreign(i) => {
ast::ii_foreign(fld.fold_foreign_item(i))
}
- ast::ii_ctor(ctor, nm, tps, parent_id) => {
- let ctor_body = fld.fold_block(ctor.node.body);
- let ctor_decl = fold::fold_fn_decl(ctor.node.dec, fld);
- ast::ii_ctor({node: {body: ctor_body, dec: ctor_decl,
- .. ctor.node},
- .. ctor}, nm, tps, parent_id)
- }
ast::ii_dtor(dtor, nm, tps, parent_id) => {
let dtor_body = fld.fold_block(dtor.node.body);
ast::ii_dtor({node: {body: dtor_body,
fn decode_ast(par_doc: ebml::Doc) -> ast::inlined_item {
let chi_doc = par_doc[c::tag_tree as uint];
- let d = ebml::ebml_deserializer(chi_doc);
- ast::deserialize_inlined_item(d)
+ let d = &ebml::Deserializer(chi_doc);
+ deserialize(d)
}
fn renumber_ast(xcx: extended_decode_ctxt, ii: ast::inlined_item)
ast::ii_foreign(i) => {
ast::ii_foreign(fld.fold_foreign_item(i))
}
- ast::ii_ctor(ctor, nm, tps, parent_id) => {
- let ctor_body = fld.fold_block(ctor.node.body);
- let ctor_attrs = fld.fold_attributes(ctor.node.attrs);
- let ctor_decl = fold::fold_fn_decl(ctor.node.dec, fld);
- let new_params = fold::fold_ty_params(tps, fld);
- let ctor_id = fld.new_id(ctor.node.id);
- let new_parent = xcx.tr_def_id(parent_id);
- ast::ii_ctor({node: {body: ctor_body, attrs: ctor_attrs,
- dec: ctor_decl, id: ctor_id,
- .. ctor.node},
- .. ctor}, nm, new_params, new_parent)
- }
ast::ii_dtor(dtor, nm, tps, parent_id) => {
let dtor_body = fld.fold_block(dtor.node.body);
let dtor_attrs = fld.fold_attributes(dtor.node.attrs);
// ______________________________________________________________________
// Encoding and decoding of ast::def
-fn encode_def(ebml_w: ebml::Writer, def: ast::def) {
- ast::serialize_def(ebml_w, def)
+fn encode_def(ebml_w: ebml::Serializer, def: ast::def) {
+ def.serialize(&ebml_w)
}
fn decode_def(xcx: extended_decode_ctxt, doc: ebml::Doc) -> ast::def {
- let dsr = ebml::ebml_deserializer(doc);
- let def = ast::deserialize_def(dsr);
+ let dsr = &ebml::Deserializer(doc);
+ let def: ast::def = deserialize(dsr);
def.tr(xcx)
}
fn tr(xcx: extended_decode_ctxt) -> ast::def {
match self {
ast::def_fn(did, p) => { ast::def_fn(did.tr(xcx), p) }
- ast::def_static_method(did, p) => {
- ast::def_static_method(did.tr(xcx), p)
+ ast::def_static_method(did, did2_opt, p) => {
+ ast::def_static_method(did.tr(xcx),
+ did2_opt.map(|did2| did2.tr(xcx)),
+ p)
}
ast::def_self(nid) => { ast::def_self(xcx.tr_id(nid)) }
ast::def_mod(did) => { ast::def_mod(did.tr(xcx)) }
xcx.tr_id(nid2),
xcx.tr_id(nid3))
}
- ast::def_class(did, has_constructor) => {
- ast::def_class(did.tr(xcx), has_constructor)
+ ast::def_class(did) => {
+ ast::def_class(did.tr(xcx))
}
ast::def_region(nid) => ast::def_region(xcx.tr_id(nid)),
ast::def_typaram_binder(nid) => {
}
}
-impl ty::region: tr {
- fn tr(xcx: extended_decode_ctxt) -> ty::region {
+impl ty::Region: tr {
+ fn tr(xcx: extended_decode_ctxt) -> ty::Region {
match self {
ty::re_bound(br) => ty::re_bound(br.tr(xcx)),
ty::re_free(id, br) => ty::re_free(xcx.tr_id(id), br.tr(xcx)),
// ______________________________________________________________________
// Encoding and decoding of freevar information
-fn encode_freevar_entry(ebml_w: ebml::Writer, fv: freevar_entry) {
- serialize_freevar_entry(ebml_w, fv)
+fn encode_freevar_entry(ebml_w: ebml::Serializer, fv: @freevar_entry) {
+ (*fv).serialize(&ebml_w)
}
trait ebml_deserializer_helper {
fn read_freevar_entry(xcx: extended_decode_ctxt) -> freevar_entry;
}
-impl ebml::EbmlDeserializer: ebml_deserializer_helper {
+impl ebml::Deserializer: ebml_deserializer_helper {
fn read_freevar_entry(xcx: extended_decode_ctxt) -> freevar_entry {
- let fv = deserialize_freevar_entry(self);
+ let fv: freevar_entry = deserialize(&self);
fv.tr(xcx)
}
}
}
fn serialize_method_map_entry(ecx: @e::encode_ctxt,
- ebml_w: ebml::Writer,
+ ebml_w: ebml::Serializer,
mme: method_map_entry) {
do ebml_w.emit_rec {
- do ebml_w.emit_rec_field(~"self_arg", 0u) {
+ do ebml_w.emit_field(~"self_arg", 0u) {
ebml_w.emit_arg(ecx, mme.self_arg);
}
- do ebml_w.emit_rec_field(~"origin", 1u) {
- typeck::serialize_method_origin(ebml_w, mme.origin);
+ do ebml_w.emit_field(~"origin", 1u) {
+ mme.origin.serialize(&ebml_w);
}
}
}
-impl ebml::EbmlDeserializer: read_method_map_entry_helper {
+impl ebml::Deserializer: read_method_map_entry_helper {
fn read_method_map_entry(xcx: extended_decode_ctxt) -> method_map_entry {
do self.read_rec {
{self_arg:
- self.read_rec_field(~"self_arg", 0u, || {
+ self.read_field(~"self_arg", 0u, || {
self.read_arg(xcx)
}),
origin:
- self.read_rec_field(~"origin", 1u, || {
- typeck::deserialize_method_origin(self).tr(xcx)
+ self.read_field(~"origin", 1u, || {
+ let method_origin: method_origin = deserialize(&self);
+ method_origin.tr(xcx)
})}
}
}
// Encoding and decoding vtable_res
fn encode_vtable_res(ecx: @e::encode_ctxt,
- ebml_w: ebml::Writer,
+ ebml_w: ebml::Serializer,
dr: typeck::vtable_res) {
// can't autogenerate this code because automatic serialization of
// ty::t doesn't work, and there is no way (atm) to have
// hand-written serialization routines combine with auto-generated
// ones. perhaps we should fix this.
do ebml_w.emit_from_vec(*dr) |vtable_origin| {
- encode_vtable_origin(ecx, ebml_w, vtable_origin)
+ encode_vtable_origin(ecx, ebml_w, *vtable_origin)
}
}
fn encode_vtable_origin(ecx: @e::encode_ctxt,
- ebml_w: ebml::Writer,
+ ebml_w: ebml::Serializer,
vtable_origin: typeck::vtable_origin) {
do ebml_w.emit_enum(~"vtable_origin") {
match vtable_origin {
fn read_vtable_origin(xcx: extended_decode_ctxt) -> typeck::vtable_origin;
}
-impl ebml::EbmlDeserializer: vtable_deserialization_helpers {
+impl ebml::Deserializer: vtable_deserialization_helpers {
fn read_vtable_res(xcx: extended_decode_ctxt) -> typeck::vtable_res {
@self.read_to_vec(|| self.read_vtable_origin(xcx) )
}
fn emit_tpbt(ecx: @e::encode_ctxt, tpbt: ty::ty_param_bounds_and_ty);
}
-impl ebml::Writer: ebml_writer_helpers {
+impl ebml::Serializer: ebml_writer_helpers {
fn emit_ty(ecx: @e::encode_ctxt, ty: ty::t) {
do self.emit_opaque {
e::write_type(ecx, self, ty)
fn emit_tys(ecx: @e::encode_ctxt, tys: ~[ty::t]) {
do self.emit_from_vec(tys) |ty| {
- self.emit_ty(ecx, ty)
+ self.emit_ty(ecx, *ty)
}
}
fn emit_tpbt(ecx: @e::encode_ctxt, tpbt: ty::ty_param_bounds_and_ty) {
do self.emit_rec {
- do self.emit_rec_field(~"bounds", 0u) {
+ do self.emit_field(~"bounds", 0u) {
do self.emit_from_vec(*tpbt.bounds) |bs| {
- self.emit_bounds(ecx, bs);
+ self.emit_bounds(ecx, *bs);
}
}
- do self.emit_rec_field(~"region_param", 1u) {
- ty::serialize_opt_region_variance(
- self,
- tpbt.region_param);
+ do self.emit_field(~"region_param", 1u) {
+ tpbt.region_param.serialize(&self);
}
- do self.emit_rec_field(~"ty", 2u) {
+ do self.emit_field(~"ty", 2u) {
self.emit_ty(ecx, tpbt.ty);
}
}
fn id(id: ast::node_id);
}
-impl ebml::Writer: write_tag_and_id {
+impl ebml::Serializer: write_tag_and_id {
fn tag(tag_id: c::astencode_tag, f: fn()) {
do self.wr_tag(tag_id as uint) { f() }
}
fn encode_side_tables_for_ii(ecx: @e::encode_ctxt,
maps: maps,
- ebml_w: ebml::Writer,
+ ebml_w: ebml::Serializer,
ii: ast::inlined_item) {
do ebml_w.wr_tag(c::tag_table as uint) {
ast_util::visit_ids_for_inlined_item(
fn encode_side_tables_for_id(ecx: @e::encode_ctxt,
maps: maps,
- ebml_w: ebml::Writer,
+ ebml_w: ebml::Serializer,
id: ast::node_id) {
let tcx = ecx.tcx;
do ebml_w.tag(c::tag_table_def) {
ebml_w.id(id);
do ebml_w.tag(c::tag_table_val) {
- ast::serialize_def(ebml_w, *def)
+ (*def).serialize(&ebml_w)
}
}
}
ebml_w.id(id);
do ebml_w.tag(c::tag_table_val) {
do ebml_w.emit_from_vec((*m).get()) |id| {
- ebml_w.emit_int(id);
+ id.serialize(&ebml_w);
}
}
}
do ebml_w.tag(c::tag_table_adjustments) {
ebml_w.id(id);
do ebml_w.tag(c::tag_table_val) {
- ty::serialize_AutoAdjustment(ebml_w, **adj)
+ (**adj).serialize(&ebml_w)
}
}
}
-> ty::ty_param_bounds_and_ty;
}
-impl ebml::EbmlDeserializer: ebml_deserializer_decoder_helpers {
+impl ebml::Deserializer: ebml_deserializer_decoder_helpers {
fn read_arg(xcx: extended_decode_ctxt) -> ty::arg {
do self.read_opaque |doc| {
{
do self.read_rec {
{
- bounds: self.read_rec_field(~"bounds", 0u, || {
+ bounds: self.read_field(~"bounds", 0u, || {
@self.read_to_vec(|| self.read_bounds(xcx) )
}),
- region_param: self.read_rec_field(~"region_param", 1u, || {
- ty::deserialize_opt_region_variance(self)
+ region_param: self.read_field(~"region_param", 1u, || {
+ deserialize(&self)
}),
- ty: self.read_rec_field(~"ty", 2u, || {
+ ty: self.read_field(~"ty", 2u, || {
self.read_ty(xcx)
})
}
dcx.tcx.legacy_boxed_traits.insert(id, ());
} else {
let val_doc = entry_doc[c::tag_table_val as uint];
- let val_dsr = ebml::ebml_deserializer(val_doc);
+ let val_dsr = &ebml::Deserializer(val_doc);
if tag == (c::tag_table_def as uint) {
let def = decode_def(xcx, val_doc);
dcx.tcx.def_map.insert(id, def);
dcx.maps.vtable_map.insert(id,
val_dsr.read_vtable_res(xcx));
} else if tag == (c::tag_table_adjustments as uint) {
- let adj = @ty::deserialize_AutoAdjustment(val_dsr).tr(xcx);
+ let adj: @ty::AutoAdjustment = @deserialize(val_dsr);
+ adj.tr(xcx);
dcx.tcx.adjustments.insert(id, adj);
} else {
xcx.dcx.tcx.sess.bug(
// Testing of astencode_gen
#[cfg(test)]
-fn encode_item_ast(ebml_w: ebml::Writer, item: @ast::item) {
+fn encode_item_ast(ebml_w: ebml::Serializer, item: @ast::item) {
do ebml_w.wr_tag(c::tag_tree as uint) {
- ast::serialize_item(ebml_w, *item);
+ (*item).serialize(&ebml_w)
}
}
#[cfg(test)]
fn decode_item_ast(par_doc: ebml::Doc) -> @ast::item {
let chi_doc = par_doc[c::tag_tree as uint];
- let d = ebml::ebml_deserializer(chi_doc);
- @ast::deserialize_item(d)
+ let d = &ebml::Deserializer(chi_doc);
+ @deserialize(d)
}
#[cfg(test)]
#[cfg(test)]
fn roundtrip(in_item: @ast::item) {
let bytes = do io::with_bytes_writer |wr| {
- let ebml_w = ebml::Writer(wr);
+ let ebml_w = ebml::Serializer(wr);
encode_item_ast(ebml_w, in_item);
};
let ebml_doc = ebml::Doc(@bytes);
let out_item = decode_item_ast(ebml_doc);
- let exp_str =
- io::with_str_writer(|w| ast::serialize_item(w, *in_item) );
- let out_str =
- io::with_str_writer(|w| ast::serialize_item(w, *out_item) );
+ let exp_str = do io::with_str_writer |w| {
+ in_item.serialize(&std::prettyprint::Serializer(w))
+ };
+ let out_str = do io::with_str_writer |w| {
+ out_item.serialize(&std::prettyprint::Serializer(w))
+ };
debug!("expected string: %s", exp_str);
debug!("actual string : %s", out_str);
use syntax::print::pprust;
use util::common::indenter;
use ty::to_str;
-use driver::session::session;
use dvec::DVec;
use mem_categorization::*;
err_mut_variant,
err_root_not_permitted,
err_mutbl(ast::mutability),
- err_out_of_root_scope(ty::region, ty::region), // superscope, subscope
- err_out_of_scope(ty::region, ty::region) // superscope, subscope
+ err_out_of_root_scope(ty::Region, ty::Region), // superscope, subscope
+ err_out_of_scope(ty::Region, ty::Region) // superscope, subscope
}
impl bckerr_code : cmp::Eq {
type bckres<T> = Result<T, bckerr>;
/// a complete record of a loan that was granted
-type loan = {lp: @loan_path, cmt: cmt, mutbl: ast::mutability};
+struct Loan {lp: @loan_path, cmt: cmt, mutbl: ast::mutability}
/// maps computed by `gather_loans` that are then used by `check_loans`
///
/// - `pure_map`: map from block/expr that must be pure to the error message
/// that should be reported if they are not pure
type req_maps = {
- req_loan_map: HashMap<ast::node_id, @DVec<@DVec<loan>>>,
+ req_loan_map: HashMap<ast::node_id, @DVec<Loan>>,
pure_map: HashMap<ast::node_id, bckerr>
};
// Misc
impl borrowck_ctxt {
- fn is_subregion_of(r_sub: ty::region, r_sup: ty::region) -> bool {
+ fn is_subregion_of(r_sub: ty::Region, r_sup: ty::Region) -> bool {
region::is_subregion_of(self.tcx.region_map, r_sub, r_sup)
}
method_map: self.method_map};
mc.mut_to_str(mutbl)
}
+
+ fn loan_to_repr(loan: &Loan) -> ~str {
+ fmt!("Loan(lp=%?, cmt=%s, mutbl=%?)",
+ loan.lp, self.cmt_to_repr(loan.cmt), loan.mutbl)
+ }
}
// The inherent mutability of a component is its default mutability
reported: HashMap<ast::node_id, ()>,
- // Keep track of whether we're inside a ctor, so as to
- // allow mutating immutable fields in the same class if
- // we are in a ctor, we track the self id
- mut in_ctor: bool,
mut declared_purity: ast::purity,
mut fn_args: @~[ast::node_id]
};
let clcx = check_loan_ctxt(@{bccx: bccx,
req_maps: req_maps,
reported: HashMap(),
- mut in_ctor: false,
mut declared_purity: ast::impure_fn,
mut fn_args: @~[]});
let vt = visit::mk_vt(@{visit_expr: check_loans_in_expr,
}
}
- fn walk_loans(scope_id: ast::node_id,
- f: fn(v: &loan) -> bool) {
+ fn walk_loans(scope_id: ast::node_id, f: fn(v: &Loan) -> bool) {
let mut scope_id = scope_id;
let region_map = self.tcx().region_map;
let req_loan_map = self.req_maps.req_loan_map;
loop {
- for req_loan_map.find(scope_id).each |loanss| {
- for loanss.each |loans| {
- for loans.each |loan| {
- if !f(loan) { return; }
- }
+ for req_loan_map.find(scope_id).each |loans| {
+ for loans.each |loan| {
+ if !f(loan) { return; }
}
}
fn walk_loans_of(scope_id: ast::node_id,
lp: @loan_path,
- f: fn(v: &loan) -> bool) {
+ f: fn(v: &Loan) -> bool) {
for self.walk_loans(scope_id) |loan| {
if loan.lp == lp {
if !f(loan) { return; }
}
fn check_for_conflicting_loans(scope_id: ast::node_id) {
- let new_loanss = match self.req_maps.req_loan_map.find(scope_id) {
+ debug!("check_for_conflicting_loans(scope_id=%?)", scope_id);
+
+ let new_loans = match self.req_maps.req_loan_map.find(scope_id) {
None => return,
- Some(loanss) => loanss
+ Some(loans) => loans
};
+ debug!("new_loans has length %?", new_loans.len());
+
let par_scope_id = self.tcx().region_map.get(scope_id);
for self.walk_loans(par_scope_id) |old_loan| {
- for new_loanss.each |new_loans| {
- for new_loans.each |new_loan| {
- if old_loan.lp != new_loan.lp { loop; }
- match (old_loan.mutbl, new_loan.mutbl) {
- (m_const, _) | (_, m_const) |
- (m_mutbl, m_mutbl) | (m_imm, m_imm) => {
- /*ok*/
- }
-
- (m_mutbl, m_imm) | (m_imm, m_mutbl) => {
- self.bccx.span_err(
- new_loan.cmt.span,
- fmt!("loan of %s as %s \
- conflicts with prior loan",
- self.bccx.cmt_to_str(new_loan.cmt),
- self.bccx.mut_to_str(new_loan.mutbl)));
- self.bccx.span_note(
- old_loan.cmt.span,
- fmt!("prior loan as %s granted here",
- self.bccx.mut_to_str(old_loan.mutbl)));
- }
- }
- }
+ debug!("old_loan=%?", self.bccx.loan_to_repr(old_loan));
+
+ for new_loans.each |new_loan| {
+ self.report_error_if_loans_conflict(old_loan, new_loan);
+ }
+ }
+
+ let len = new_loans.len();
+ for uint::range(0, len) |i| {
+ let loan_i = new_loans[i];
+ for uint::range(i+1, len) |j| {
+ let loan_j = new_loans[j];
+ self.report_error_if_loans_conflict(&loan_i, &loan_j);
+ }
+ }
+ }
+
+ fn report_error_if_loans_conflict(&self,
+ old_loan: &Loan,
+ new_loan: &Loan) {
+ if old_loan.lp != new_loan.lp {
+ return;
+ }
+
+ match (old_loan.mutbl, new_loan.mutbl) {
+ (m_const, _) | (_, m_const) |
+ (m_mutbl, m_mutbl) | (m_imm, m_imm) => {
+ /*ok*/
+ }
+
+ (m_mutbl, m_imm) | (m_imm, m_mutbl) => {
+ self.bccx.span_err(
+ new_loan.cmt.span,
+ fmt!("loan of %s as %s \
+ conflicts with prior loan",
+ self.bccx.cmt_to_str(new_loan.cmt),
+ self.bccx.mut_to_str(new_loan.mutbl)));
+ self.bccx.span_note(
+ old_loan.cmt.span,
+ fmt!("prior loan as %s granted here",
+ self.bccx.mut_to_str(old_loan.mutbl)));
}
}
}
debug!("check_assignment(cmt=%s)",
self.bccx.cmt_to_repr(cmt));
- if self.in_ctor && self.is_self_field(cmt)
- && at.checked_by_liveness() {
- // assigning to self.foo in a ctor is always allowed.
- } else if self.is_local_variable(cmt) && at.checked_by_liveness() {
+ if self.is_local_variable(cmt) && at.checked_by_liveness() {
// liveness guarantees that immutable local variables
// are only assigned once
} else {
visitor: visit::vt<check_loan_ctxt>) {
debug!("purity on entry=%?", copy self.declared_purity);
- do save_and_restore(&mut(self.in_ctor)) {
- do save_and_restore(&mut(self.declared_purity)) {
- do save_and_restore(&mut(self.fn_args)) {
- let is_stack_closure = self.is_stack_closure(id);
- let fty = ty::node_id_to_type(self.tcx(), id);
- self.declared_purity = ty::determine_inherited_purity(
- copy self.declared_purity,
- ty::ty_fn_purity(fty),
- ty::ty_fn_proto(fty));
-
- // In principle, we could consider fk_anon(*) or
- // fk_fn_block(*) to be in a ctor, I suppose, but the
- // purpose of the in_ctor flag is to allow modifications
- // of otherwise immutable fields and typestate wouldn't be
- // able to "see" into those functions anyway, so it
- // wouldn't be very helpful.
- match fk {
- visit::fk_ctor(*) => {
- self.in_ctor = true;
- self.fn_args = @decl.inputs.map(|i| i.id );
- }
- visit::fk_anon(*) |
- visit::fk_fn_block(*) if is_stack_closure => {
- self.in_ctor = false;
+ do save_and_restore(&mut(self.declared_purity)) {
+ do save_and_restore(&mut(self.fn_args)) {
+ let is_stack_closure = self.is_stack_closure(id);
+ let fty = ty::node_id_to_type(self.tcx(), id);
+ self.declared_purity = ty::determine_inherited_purity(
+ copy self.declared_purity,
+ ty::ty_fn_purity(fty),
+ ty::ty_fn_proto(fty));
+
+ match fk {
+ visit::fk_anon(*) |
+ visit::fk_fn_block(*) if is_stack_closure => {
// inherits the fn_args from enclosing ctxt
- }
- visit::fk_anon(*) | visit::fk_fn_block(*) |
- visit::fk_method(*) | visit::fk_item_fn(*) |
- visit::fk_dtor(*) => {
- self.in_ctor = false;
+ }
+ visit::fk_anon(*) | visit::fk_fn_block(*) |
+ visit::fk_method(*) | visit::fk_item_fn(*) |
+ visit::fk_dtor(*) => {
self.fn_args = @decl.inputs.map(|i| i.id );
- }
}
-
- visit::visit_fn(fk, decl, body, sp, id, self, visitor);
}
+
+ visit::visit_fn(fk, decl, body, sp, id, self, visitor);
}
}
debug!("purity on exit=%?", copy self.declared_purity);
self.root_ub = body.node.id;
match fk {
- visit::fk_anon(*) | visit::fk_fn_block(*) => {}
- visit::fk_item_fn(*) | visit::fk_method(*) |
- visit::fk_ctor(*) | visit::fk_dtor(*) => {
- self.item_ub = body.node.id;
- }
+ visit::fk_anon(*) | visit::fk_fn_block(*) => {}
+ visit::fk_item_fn(*) | visit::fk_method(*) |
+ visit::fk_dtor(*) => {
+ self.item_ub = body.node.id;
+ }
}
visit::visit_fn(fk, decl, body, sp, id, self, v);
let arg_cmt = self.bccx.cat_expr(*arg);
self.guarantee_valid(arg_cmt, m_imm, scope_r);
}
- ast::by_val => {
- // FIXME (#2493): safety checks would be required here,
- // but the correct set is really hard to get right,
- // and modes are going away anyhow.
- }
- ast::by_move | ast::by_copy => {}
+ ast::by_val | ast::by_move | ast::by_copy => {}
}
}
visit::visit_expr(ex, self, vt);
}
impl gather_loan_ctxt {
- fn tcx() -> ty::ctxt { self.bccx.tcx }
+ fn tcx(&self) -> ty::ctxt { self.bccx.tcx }
- fn guarantee_adjustments(expr: @ast::expr,
+ fn guarantee_adjustments(&self,
+ expr: @ast::expr,
adjustment: &ty::AutoAdjustment) {
debug!("guarantee_adjustments(expr=%s, adjustment=%?)",
expr_repr(self.tcx(), expr), adjustment);
// out loans, which will be added to the `req_loan_map`. This can
// also entail "rooting" GC'd pointers, which means ensuring
// dynamically that they are not freed.
- fn guarantee_valid(cmt: cmt,
+ fn guarantee_valid(&self,
+ cmt: cmt,
req_mutbl: ast::mutability,
- scope_r: ty::region) {
+ scope_r: ty::Region) {
self.bccx.guaranteed_paths += 1;
// it within that scope, the loan will be detected and an
// error will be reported.
Some(_) => {
- match self.bccx.loan(cmt, scope_r, req_mutbl) {
- Err(e) => { self.bccx.report(e); }
- Ok(loans) if loans.len() == 0 => {}
- Ok(loans) => {
- match scope_r {
- ty::re_scope(scope_id) => {
- self.add_loans(scope_id, loans);
-
- if req_mutbl == m_imm && cmt.mutbl != m_imm {
- self.bccx.loaned_paths_imm += 1;
-
- if self.tcx().sess.borrowck_note_loan() {
- self.bccx.span_note(
- cmt.span,
- fmt!("immutable loan required"));
- }
- } else {
- self.bccx.loaned_paths_same += 1;
- }
- }
- _ => {
- self.bccx.tcx.sess.span_bug(
- cmt.span,
- #fmt["loans required but scope is scope_region is %s",
- region_to_str(self.tcx(), scope_r)]);
+ match self.bccx.loan(cmt, scope_r, req_mutbl) {
+ Err(e) => { self.bccx.report(e); }
+ Ok(move loans) => {
+ self.add_loans(cmt, req_mutbl, scope_r, move loans);
}
- }
}
- }
}
// The path is not loanable: in that case, we must try and
// has type `@mut{f:int}`, this check might fail because `&x.f`
// reqires an immutable pointer, but `f` lives in (aliased)
// mutable memory.
- fn check_mutbl(req_mutbl: ast::mutability,
+ fn check_mutbl(&self,
+ req_mutbl: ast::mutability,
cmt: cmt) -> bckres<preserve_condition> {
debug!("check_mutbl(req_mutbl=%?, cmt.mutbl=%?)",
req_mutbl, cmt.mutbl);
}
}
- fn add_loans(scope_id: ast::node_id, loans: @DVec<loan>) {
+ fn add_loans(&self,
+ cmt: cmt,
+ req_mutbl: ast::mutability,
+ scope_r: ty::Region,
+ +loans: ~[Loan]) {
+ if loans.len() == 0 {
+ return;
+ }
+
+ let scope_id = match scope_r {
+ ty::re_scope(scope_id) => scope_id,
+ _ => {
+ self.bccx.tcx.sess.span_bug(
+ cmt.span,
+ fmt!("loans required but scope is scope_region is %s",
+ region_to_str(self.tcx(), scope_r)));
+ }
+ };
+
+ self.add_loans_to_scope_id(scope_id, move loans);
+
+ if req_mutbl == m_imm && cmt.mutbl != m_imm {
+ self.bccx.loaned_paths_imm += 1;
+
+ if self.tcx().sess.borrowck_note_loan() {
+ self.bccx.span_note(
+ cmt.span,
+ fmt!("immutable loan required"));
+ }
+ } else {
+ self.bccx.loaned_paths_same += 1;
+ }
+ }
+
+ fn add_loans_to_scope_id(&self, scope_id: ast::node_id, +loans: ~[Loan]) {
debug!("adding %u loans to scope_id %?", loans.len(), scope_id);
match self.req_maps.req_loan_map.find(scope_id) {
- Some(l) => {
- l.push(loans);
+ Some(req_loans) => {
+ req_loans.push_all(loans);
}
None => {
- self.req_maps.req_loan_map.insert(
- scope_id, @dvec::from_vec(~[loans]));
+ let dvec = @dvec::from_vec(move loans);
+ self.req_maps.req_loan_map.insert(scope_id, dvec);
}
}
}
- fn gather_pat(discr_cmt: cmt, root_pat: @ast::pat,
- arm_id: ast::node_id, alt_id: ast::node_id) {
+ fn gather_pat(&self,
+ discr_cmt: cmt,
+ root_pat: @ast::pat,
+ arm_id: ast::node_id,
+ alt_id: ast::node_id) {
do self.bccx.cat_pattern(discr_cmt, root_pat) |cmt, pat| {
match pat.node {
ast::pat_ident(bm, _, _) if !self.pat_is_variant(pat) => {
}
}
- fn pat_is_variant(pat: @ast::pat) -> bool {
+ fn pat_is_variant(&self, pat: @ast::pat) -> bool {
pat_util::pat_is_variant(self.bccx.tcx.def_map, pat)
}
}
impl borrowck_ctxt {
fn loan(cmt: cmt,
- scope_region: ty::region,
- mutbl: ast::mutability) -> bckres<@DVec<loan>> {
- let lc = loan_ctxt_(@{bccx: self,
- scope_region: scope_region,
- loans: @DVec()});
+ scope_region: ty::Region,
+ mutbl: ast::mutability) -> bckres<~[Loan]> {
+ let lc = LoanContext {
+ bccx: self,
+ scope_region: scope_region,
+ loans: ~[]
+ };
match lc.loan(cmt, mutbl) {
- Ok(()) => {Ok(lc.loans)}
- Err(e) => {Err(e)}
+ Err(e) => Err(e),
+ Ok(()) => {
+ let LoanContext {loans, _} = move lc;
+ Ok(loans)
+ }
}
}
}
-type loan_ctxt_ = {
+struct LoanContext {
bccx: borrowck_ctxt,
// the region scope for which we must preserve the memory
- scope_region: ty::region,
+ scope_region: ty::Region,
// accumulated list of loans that will be required
- loans: @DVec<loan>
-};
-
-enum loan_ctxt {
- loan_ctxt_(@loan_ctxt_)
+ mut loans: ~[Loan]
}
-impl loan_ctxt {
- fn tcx() -> ty::ctxt { self.bccx.tcx }
+impl LoanContext {
+ fn tcx(&self) -> ty::ctxt { self.bccx.tcx }
- fn issue_loan(cmt: cmt,
- scope_ub: ty::region,
+ fn issue_loan(&self,
+ cmt: cmt,
+ scope_ub: ty::Region,
req_mutbl: ast::mutability) -> bckres<()> {
if self.bccx.is_subregion_of(self.scope_region, scope_ub) {
match req_mutbl {
}
}
- (*self.loans).push({
+ self.loans.push(Loan {
// Note: cmt.lp must be Some(_) because otherwise this
// loan process does not apply at all.
lp: cmt.lp.get(),
cmt: cmt,
- mutbl: req_mutbl});
+ mutbl: req_mutbl
+ });
return Ok(());
} else {
// The loan being requested lives longer than the data
}
}
- fn loan(cmt: cmt, req_mutbl: ast::mutability) -> bckres<()> {
+ fn loan(&self, cmt: cmt, req_mutbl: ast::mutability) -> bckres<()> {
debug!("loan(%s, %s)",
self.bccx.cmt_to_repr(cmt),
self.bccx.mut_to_str(req_mutbl));
cat_discr(base, _) => {
self.loan(base, req_mutbl)
}
- cat_comp(cmt_base, comp_field(*)) |
- cat_comp(cmt_base, comp_index(*)) |
- cat_comp(cmt_base, comp_tuple) => {
+ cat_comp(cmt_base, comp_field(_, m)) |
+ cat_comp(cmt_base, comp_index(_, m)) => {
// For most components, the type of the embedded data is
// stable. Therefore, the base structure need only be
// const---unless the component must be immutable. In
// that case, it must also be embedded in an immutable
// location, or else the whole structure could be
// overwritten and the component along with it.
- self.loan_stable_comp(cmt, cmt_base, req_mutbl)
+ self.loan_stable_comp(cmt, cmt_base, req_mutbl, m)
+ }
+ cat_comp(cmt_base, comp_tuple) => {
+ // As above.
+ self.loan_stable_comp(cmt, cmt_base, req_mutbl, m_imm)
}
cat_comp(cmt_base, comp_variant(enum_did)) => {
// For enums, the memory is unstable if there are multiple
// variants, because if the enum value is overwritten then
// the memory changes type.
if ty::enum_is_univariant(self.bccx.tcx, enum_did) {
- self.loan_stable_comp(cmt, cmt_base, req_mutbl)
+ self.loan_stable_comp(cmt, cmt_base, req_mutbl, m_imm)
} else {
self.loan_unstable_deref(cmt, cmt_base, req_mutbl)
}
// A "stable component" is one where assigning the base of the
// component cannot cause the component itself to change types.
// Example: record fields.
- fn loan_stable_comp(cmt: cmt,
+ fn loan_stable_comp(&self,
+ cmt: cmt,
cmt_base: cmt,
- req_mutbl: ast::mutability) -> bckres<()> {
- let base_mutbl = match req_mutbl {
- m_imm => m_imm,
- m_const | m_mutbl => m_const
+ req_mutbl: ast::mutability,
+ comp_mutbl: ast::mutability) -> bckres<()> {
+ // Determine the mutability that the base component must have,
+ // given the required mutability of the pointer (`req_mutbl`)
+ // and the declared mutability of the component (`comp_mutbl`).
+ // This is surprisingly subtle.
+ //
+ // Note that the *declared* mutability of the component is not
+ // necessarily the same as cmt.mutbl, since a component
+ // declared as immutable but embedded in a mutable context
+ // becomes mutable. It's best to think of comp_mutbl as being
+ // either MUTABLE or DEFAULT, not MUTABLE or IMMUTABLE. We
+ // should really patch up the AST to reflect this distinction.
+ //
+ // Let's consider the cases below:
+ //
+ // 1. mut required, mut declared: In this case, the base
+ // component must merely be const. The reason is that it
+ // does not matter if the base component is borrowed as
+ // mutable or immutable, as the mutability of the base
+ // component is overridden in the field declaration itself
+ // (see `compile-fail/borrowck-mut-field-imm-base.rs`)
+ //
+ // 2. mut required, imm declared: This would only be legal if
+ // the component is embeded in a mutable context. However,
+ // we detect mismatches between the mutability of the value
+ // as a whole and the required mutability in `issue_loan()`
+ // above. In any case, presuming that the component IS
+ // embedded in a mutable context, both the component and
+ // the base must be loaned as MUTABLE. This is to ensure
+ // that there is no loan of the base as IMMUTABLE, which
+ // would imply that the component must be IMMUTABLE too
+ // (see `compile-fail/borrowck-imm-field-imm-base.rs`).
+ //
+ // 3. mut required, const declared: this shouldn't really be
+ // possible, since I don't think you can declare a const
+ // field, but I guess if we DID permit such a declaration
+ // it would be equivalent to the case above?
+ //
+ // 4. imm required, * declared: In this case, the base must be
+ // immutable. This is true regardless of what was declared
+ // for this subcomponent, this if the base is mutable, the
+ // subcomponent must be mutable.
+ // (see `compile-fail/borrowck-imm-field-mut-base.rs`).
+ //
+ // 5. const required, * declared: In this case, the base need
+ // only be const, since we don't ultimately care whether
+ // the subcomponent is mutable or not.
+ let base_mutbl = match (req_mutbl, comp_mutbl) {
+ (m_mutbl, m_mutbl) => m_const, // (1)
+ (m_mutbl, _) => m_mutbl, // (2, 3)
+ (m_imm, _) => m_imm, // (4)
+ (m_const, _) => m_const // (5)
};
do self.loan(cmt_base, base_mutbl).chain |_ok| {
// An "unstable deref" means a deref of a ptr/comp where, if the
// base of the deref is assigned to, pointers into the result of the
// deref would be invalidated. Examples: interior of variants, uniques.
- fn loan_unstable_deref(cmt: cmt,
+ fn loan_unstable_deref(&self,
+ cmt: cmt,
cmt_base: cmt,
req_mutbl: ast::mutability) -> bckres<()> {
// Variant components: the base must be immutable, because
impl borrowck_ctxt {
fn preserve(cmt: cmt,
- scope_region: ty::region,
+ scope_region: ty::Region,
item_ub: ast::node_id,
root_ub: ast::node_id)
-> bckres<preserve_condition> {
bccx: borrowck_ctxt,
// the region scope for which we must preserve the memory
- scope_region: ty::region,
+ scope_region: ty::Region,
// the scope for the body of the enclosing fn/method item
item_ub: ast::node_id,
/// Checks that the scope for which the value must be preserved
/// is a subscope of `scope_ub`; if so, success.
fn compare_scope(cmt: cmt,
- scope_ub: ty::region) -> bckres<preserve_condition> {
+ scope_ub: ty::Region) -> bckres<preserve_condition> {
if self.bccx.is_subregion_of(self.scope_region, scope_ub) {
Ok(pc_ok)
} else {
// we can only root values if the desired region is some concrete
// scope within the fn body
ty::re_scope(scope_id) => {
- #debug["Considering root map entry for %s: \
+ debug!("Considering root map entry for %s: \
node %d:%u -> scope_id %?, root_ub %?",
self.bccx.cmt_to_repr(cmt), base.id,
- derefs, scope_id, self.root_ub];
+ derefs, scope_id, self.root_ub);
if self.bccx.is_subregion_of(self.scope_region, root_region) {
- #debug["Elected to root"];
+ debug!("Elected to root");
let rk = {id: base.id, derefs: derefs};
self.bccx.root_map.insert(rk, scope_id);
return Ok(pc_ok);
} else {
- #debug["Unable to root"];
+ debug!("Unable to root");
return Err({cmt:cmt,
code:err_out_of_root_scope(root_region,
self.scope_region)});
use syntax::{ast, ast_util};
-use driver::session::session;
use syntax::codemap::span;
use std::map;
use std::map::HashMap;
use util::ppaux::ty_to_str;
use pat_util::*;
use syntax::visit;
-use driver::session::session;
use middle::ty;
use middle::ty::*;
use std::map::HashMap;
if arms.is_empty() {
if !type_is_empty(tcx, pat_ty) {
// We know the type is inhabited, so this must be wrong
- tcx.sess.span_err(ex.span, #fmt("non-exhaustive patterns: \
+ tcx.sess.span_err(ex.span, fmt!("non-exhaustive patterns: \
type %s is non-empty", ty_to_str(tcx, pat_ty)));
}
// If the type *is* empty, it's vacuously exhaustive
use syntax::ast::*;
use syntax::{visit, ast_util, ast_map};
-use driver::session::session;
+use driver::session::Session;
use std::map::HashMap;
use dvec::DVec;
-fn check_crate(sess: session, crate: @crate, ast_map: ast_map::map,
+fn check_crate(sess: Session, crate: @crate, ast_map: ast_map::map,
def_map: resolve::DefMap,
method_map: typeck::method_map, tcx: ty::ctxt) {
visit::visit_crate(*crate, false, visit::mk_vt(@{
sess.abort_if_errors();
}
-fn check_item(sess: session, ast_map: ast_map::map,
+fn check_item(sess: Session, ast_map: ast_map::map,
def_map: resolve::DefMap,
it: @item, &&_is_const: bool, v: visit::vt<bool>) {
match it.node {
}
}
-fn check_expr(sess: session, def_map: resolve::DefMap,
+fn check_expr(sess: Session, def_map: resolve::DefMap,
method_map: typeck::method_map, tcx: ty::ctxt,
e: @expr, &&is_const: bool, v: visit::vt<bool>) {
if is_const {
// Make sure a const item doesn't recursively refer to itself
// FIXME: Should use the dependency graph when it's available (#1356)
-fn check_item_recursion(sess: session, ast_map: ast_map::map,
+fn check_item_recursion(sess: Session, ast_map: ast_map::map,
def_map: resolve::DefMap, it: @item) {
type env = {
root_it: @item,
- sess: session,
+ sess: Session,
ast_map: ast_map::map,
def_map: resolve::DefMap,
idstack: @DVec<node_id>,
use syntax::ast::*;
use syntax::visit;
-use driver::session::session;
type ctx = {in_loop: bool, can_ret: bool};
-use syntax::{ast,ast_util,visit};
+use syntax::{ast,ast_map,ast_util,visit};
use ast::*;
//
// target uses". This _includes_ integer-constants, plus the following
// constructors:
//
-// fixed-size vectors and strings: []/_ and ""/_
+// fixed-size vectors and strings: [] and ""/_
// vector and string slices: &[] and &""
// tuples: (,)
// records: {...}
classify(base, def_map, tcx)
}
- // FIXME: #1272, we can probably do something CCI-ish
+ // FIXME: (#3728) we can probably do something CCI-ish
// surrounding nonlocal constants. But we don't yet.
ast::expr_path(_) => {
- match def_map.find(e.id) {
- Some(ast::def_const(def_id)) => {
- if ast_util::is_local(def_id) {
- let ty = ty::expr_ty(tcx, e);
- if ty::type_is_integral(ty) {
- integral_const
- } else {
- general_const
- }
- } else {
- non_const
- }
- }
- Some(_) => {
- non_const
- }
- None => {
- tcx.sess.span_bug(e.span,
- ~"unknown path when \
- classifying constants");
- }
- }
+ lookup_constness(tcx, e)
}
_ => non_const
}
}
+fn lookup_const(tcx: ty::ctxt, e: @expr) -> Option<@expr> {
+ match tcx.def_map.find(e.id) {
+ Some(ast::def_const(def_id)) => {
+ if ast_util::is_local(def_id) {
+ match tcx.items.find(def_id.node) {
+ None => None,
+ Some(ast_map::node_item(it, _)) => match it.node {
+ item_const(_, const_expr) => Some(const_expr),
+ _ => None
+ },
+ Some(_) => None
+ }
+ }
+ else { None }
+ }
+ Some(_) => None,
+ None => None
+ }
+}
+
+fn lookup_constness(tcx: ty::ctxt, e: @expr) -> constness {
+ match lookup_const(tcx, e) {
+ Some(rhs) => {
+ let ty = ty::expr_ty(tcx, rhs);
+ if ty::type_is_integral(ty) {
+ integral_const
+ } else {
+ general_const
+ }
+ }
+ None => non_const
+ }
+}
+
fn process_crate(crate: @ast::crate,
def_map: resolve::DefMap,
tcx: ty::ctxt) {
pure fn ne(other: &const_val) -> bool { !self.eq(other) }
}
-// FIXME: issue #1417
fn eval_const_expr(tcx: middle::ty::ctxt, e: @expr) -> const_val {
+ match eval_const_expr_partial(tcx, e) {
+ Ok(r) => r,
+ Err(s) => fail s
+ }
+}
+
+fn eval_const_expr_partial(tcx: middle::ty::ctxt, e: @expr)
+ -> Result<const_val, ~str> {
use middle::ty;
- fn fromb(b: bool) -> const_val { const_int(b as i64) }
+ fn fromb(b: bool) -> Result<const_val, ~str> { Ok(const_int(b as i64)) }
match e.node {
expr_unary(neg, inner) => {
- match eval_const_expr(tcx, inner) {
- const_float(f) => const_float(-f),
- const_int(i) => const_int(-i),
- const_uint(i) => const_uint(-i),
- const_str(_) => fail ~"Negate on string",
- const_bool(_) => fail ~"Negate on boolean"
+ match eval_const_expr_partial(tcx, inner) {
+ Ok(const_float(f)) => Ok(const_float(-f)),
+ Ok(const_int(i)) => Ok(const_int(-i)),
+ Ok(const_uint(i)) => Ok(const_uint(-i)),
+ Ok(const_str(_)) => Err(~"Negate on string"),
+ Ok(const_bool(_)) => Err(~"Negate on boolean"),
+ err => err
}
}
expr_unary(not, inner) => {
- match eval_const_expr(tcx, inner) {
- const_int(i) => const_int(!i),
- const_uint(i) => const_uint(!i),
- const_bool(b) => const_bool(!b),
- _ => fail ~"Not on float or string"
+ match eval_const_expr_partial(tcx, inner) {
+ Ok(const_int(i)) => Ok(const_int(!i)),
+ Ok(const_uint(i)) => Ok(const_uint(!i)),
+ Ok(const_bool(b)) => Ok(const_bool(!b)),
+ _ => Err(~"Not on float or string")
}
}
expr_binary(op, a, b) => {
- match (eval_const_expr(tcx, a), eval_const_expr(tcx, b)) {
- (const_float(a), const_float(b)) => {
+ match (eval_const_expr_partial(tcx, a),
+ eval_const_expr_partial(tcx, b)) {
+ (Ok(const_float(a)), Ok(const_float(b))) => {
match op {
- add => const_float(a + b),
- subtract => const_float(a - b),
- mul => const_float(a * b),
- div => const_float(a / b),
- rem => const_float(a % b),
+ add => Ok(const_float(a + b)),
+ subtract => Ok(const_float(a - b)),
+ mul => Ok(const_float(a * b)),
+ div => Ok(const_float(a / b)),
+ rem => Ok(const_float(a % b)),
eq => fromb(a == b),
lt => fromb(a < b),
le => fromb(a <= b),
ne => fromb(a != b),
ge => fromb(a >= b),
gt => fromb(a > b),
- _ => fail ~"Can't do this op on floats"
+ _ => Err(~"Can't do this op on floats")
}
}
- (const_int(a), const_int(b)) => {
+ (Ok(const_int(a)), Ok(const_int(b))) => {
match op {
- add => const_int(a + b),
- subtract => const_int(a - b),
- mul => const_int(a * b),
- div => const_int(a / b),
- rem => const_int(a % b),
- and | bitand => const_int(a & b),
- or | bitor => const_int(a | b),
- bitxor => const_int(a ^ b),
- shl => const_int(a << b),
- shr => const_int(a >> b),
+ add => Ok(const_int(a + b)),
+ subtract => Ok(const_int(a - b)),
+ mul => Ok(const_int(a * b)),
+ div => Ok(const_int(a / b)),
+ rem => Ok(const_int(a % b)),
+ and | bitand => Ok(const_int(a & b)),
+ or | bitor => Ok(const_int(a | b)),
+ bitxor => Ok(const_int(a ^ b)),
+ shl => Ok(const_int(a << b)),
+ shr => Ok(const_int(a >> b)),
eq => fromb(a == b),
lt => fromb(a < b),
le => fromb(a <= b),
gt => fromb(a > b)
}
}
- (const_uint(a), const_uint(b)) => {
+ (Ok(const_uint(a)), Ok(const_uint(b))) => {
match op {
- add => const_uint(a + b),
- subtract => const_uint(a - b),
- mul => const_uint(a * b),
- div => const_uint(a / b),
- rem => const_uint(a % b),
- and | bitand => const_uint(a & b),
- or | bitor => const_uint(a | b),
- bitxor => const_uint(a ^ b),
- shl => const_uint(a << b),
- shr => const_uint(a >> b),
+ add => Ok(const_uint(a + b)),
+ subtract => Ok(const_uint(a - b)),
+ mul => Ok(const_uint(a * b)),
+ div => Ok(const_uint(a / b)),
+ rem => Ok(const_uint(a % b)),
+ and | bitand => Ok(const_uint(a & b)),
+ or | bitor => Ok(const_uint(a | b)),
+ bitxor => Ok(const_uint(a ^ b)),
+ shl => Ok(const_uint(a << b)),
+ shr => Ok(const_uint(a >> b)),
eq => fromb(a == b),
lt => fromb(a < b),
le => fromb(a <= b),
}
}
// shifts can have any integral type as their rhs
- (const_int(a), const_uint(b)) => {
+ (Ok(const_int(a)), Ok(const_uint(b))) => {
match op {
- shl => const_int(a << b),
- shr => const_int(a >> b),
- _ => fail ~"Can't do this op on an int and uint"
+ shl => Ok(const_int(a << b)),
+ shr => Ok(const_int(a >> b)),
+ _ => Err(~"Can't do this op on an int and uint")
}
}
- (const_uint(a), const_int(b)) => {
+ (Ok(const_uint(a)), Ok(const_int(b))) => {
match op {
- shl => const_uint(a << b),
- shr => const_uint(a >> b),
- _ => fail ~"Can't do this op on a uint and int"
+ shl => Ok(const_uint(a << b)),
+ shr => Ok(const_uint(a >> b)),
+ _ => Err(~"Can't do this op on a uint and int")
}
}
- (const_bool(a), const_bool(b)) => {
- const_bool(match op {
+ (Ok(const_bool(a)), Ok(const_bool(b))) => {
+ Ok(const_bool(match op {
and => a && b,
or => a || b,
bitxor => a ^ b,
bitor => a | b,
eq => a == b,
ne => a != b,
- _ => fail ~"Can't do this op on bools"
- })
+ _ => return Err(~"Can't do this op on bools")
+ }))
}
- _ => fail ~"Bad operands for binary"
+ _ => Err(~"Bad operands for binary")
}
}
expr_cast(base, _) => {
let ety = ty::expr_ty(tcx, e);
- let base = eval_const_expr(tcx, base);
+ let base = eval_const_expr_partial(tcx, base);
match ty::get(ety).sty {
ty::ty_float(_) => {
match base {
- const_uint(u) => const_float(u as f64),
- const_int(i) => const_float(i as f64),
- const_float(_) => base,
- _ => fail ~"Can't cast float to str"
+ Ok(const_uint(u)) => Ok(const_float(u as f64)),
+ Ok(const_int(i)) => Ok(const_float(i as f64)),
+ Ok(const_float(_)) => base,
+ _ => Err(~"Can't cast float to str")
}
}
ty::ty_uint(_) => {
match base {
- const_uint(_) => base,
- const_int(i) => const_uint(i as u64),
- const_float(f) => const_uint(f as u64),
- _ => fail ~"Can't cast str to uint"
+ Ok(const_uint(_)) => base,
+ Ok(const_int(i)) => Ok(const_uint(i as u64)),
+ Ok(const_float(f)) => Ok(const_uint(f as u64)),
+ _ => Err(~"Can't cast str to uint")
}
}
ty::ty_int(_) | ty::ty_bool => {
match base {
- const_uint(u) => const_int(u as i64),
- const_int(_) => base,
- const_float(f) => const_int(f as i64),
- _ => fail ~"Can't cast str to int"
+ Ok(const_uint(u)) => Ok(const_int(u as i64)),
+ Ok(const_int(_)) => base,
+ Ok(const_float(f)) => Ok(const_int(f as i64)),
+ _ => Err(~"Can't cast str to int")
}
}
- _ => fail ~"Can't cast this type"
+ _ => Err(~"Can't cast this type")
}
}
- expr_lit(lit) => lit_to_const(lit),
+ expr_path(_) => {
+ match lookup_const(tcx, e) {
+ Some(actual_e) => eval_const_expr_partial(tcx, actual_e),
+ None => Err(~"Non-constant path in constant expr")
+ }
+ }
+ expr_lit(lit) => Ok(lit_to_const(lit)),
// If we have a vstore, just keep going; it has to be a string
- expr_vstore(e, _) => eval_const_expr(tcx, e),
- _ => fail ~"Unsupported constant expr"
+ expr_vstore(e, _) => eval_const_expr_partial(tcx, e),
+ _ => Err(~"Unsupported constant expr")
}
}
use std::map::*;
use option::*;
use syntax::{ast, ast_util, visit};
-use syntax::ast::{serialize_span, deserialize_span};
use syntax::codemap::span;
export annotate_freevars;
export freevar_map;
export freevar_info;
-export freevar_entry, serialize_freevar_entry, deserialize_freevar_entry;
+export freevar_entry;
export get_freevars;
export has_freevars;
// A vector of defs representing the free variables referred to in a function.
// (The def_upvar will already have been stripped).
#[auto_serialize]
+#[auto_deserialize]
type freevar_entry = {
def: ast::def, //< The variable being accessed free.
span: span //< First span where it is accessed (there can be multiple)
use syntax::{visit, ast_util};
use syntax::ast::*;
use syntax::codemap::span;
-use ty::{kind, kind_copyable, kind_noncopyable, kind_const};
-use driver::session::session;
+use middle::ty::{Kind, kind_copyable, kind_noncopyable, kind_const};
use std::map::HashMap;
use util::ppaux::{ty_to_str, tys_to_str};
use syntax::print::pprust::expr_to_str;
// primitives in the stdlib are explicitly annotated to only take sendable
// types.
-fn kind_to_str(k: kind) -> ~str {
+const try_adding: &str = "Try adding a move";
+
+fn kind_to_str(k: Kind) -> ~str {
let mut kinds = ~[];
if ty::kind_lteq(kind_const(), k) {
tcx.sess.abort_if_errors();
}
+// bool flag is only used for checking closures,
+// where it refers to whether a var is 'move' in the
+// capture clause
type check_fn = fn@(ctx, node_id, Option<@freevar_entry>,
- bool, ty::t, sp: span);
+ bool, ty::t, sp: span);
// Yields the appropriate function to check the kind of closed over
// variables. `id` is the node_id for some expression that creates the
"to copy values into a ~fn closure, use a \
capture clause: `fn~(copy x)` or `|copy x|`")));
}
-
// check that only immutable variables are implicitly copied in
for fv.each |fv| {
check_imm_free_var(cx, fv.def, fv.span);
"to copy values into a @fn closure, use a \
capture clause: `fn~(copy x)` or `|copy x|`")));
}
-
// check that only immutable variables are implicitly copied in
for fv.each |fv| {
check_imm_free_var(cx, fv.def, fv.span);
}
fn check_for_bare(cx: ctx, _id: node_id, _fv: Option<@freevar_entry>,
- _is_move: bool,_var_t: ty::t, sp: span) {
+ _is_move: bool, _var_t: ty::t, sp: span) {
cx.tcx.sess.span_err(sp, ~"attempted dynamic environment capture");
}
// variables. This list is used below to avoid checking and reporting
// on a given variable twice.
let cap_clause = match fk {
- visit::fk_anon(_, cc) | visit::fk_fn_block(cc) => cc,
- visit::fk_item_fn(*) | visit::fk_method(*) |
- visit::fk_ctor(*) | visit::fk_dtor(*) => @~[]
+ visit::fk_anon(_, cc) | visit::fk_fn_block(cc) => cc,
+ visit::fk_item_fn(*) | visit::fk_method(*) |
+ visit::fk_dtor(*) => @~[]
};
let captured_vars = do (*cap_clause).map |cap_item| {
let cap_def = cx.tcx.def_map.get(cap_item.id);
let cap_def_id = ast_util::def_id_of_def(cap_def).node;
let ty = ty::node_id_to_type(cx.tcx, cap_def_id);
+ // Here's where is_move isn't always false...
chk(cx, fn_id, None, cap_item.is_move, ty, cap_item.span);
cap_def_id
};
// skip over free variables that appear in the cap clause
if captured_vars.contains(&id) { loop; }
- // if this is the last use of the variable, then it will be
- // a move and not a copy
- let is_move = {
- match cx.last_use_map.find(fn_id) {
- Some(vars) => (*vars).contains(&id),
- None => false
- }
- };
-
let ty = ty::node_id_to_type(cx.tcx, id);
- chk(cx, fn_id, Some(*fv), is_move, ty, fv.span);
+ // is_move is always false here. See the let captured_vars...
+ // code above for where it's not always false.
+ chk(cx, fn_id, Some(*fv), false, ty, fv.span);
}
}
fn check_block(b: blk, cx: ctx, v: visit::vt<ctx>) {
match b.node.expr {
- Some(ex) => maybe_copy(cx, ex, None),
+ Some(ex) => maybe_copy(cx, ex,
+ Some(("Tail expressions in blocks must be copyable",
+ try_adding))),
_ => ()
}
visit::visit_block(b, cx, v);
expr_assign(_, ex) |
expr_unary(box(_), ex) | expr_unary(uniq(_), ex) |
expr_ret(Some(ex)) => {
- maybe_copy(cx, ex, None);
+ maybe_copy(cx, ex, Some(("Returned values must be copyable",
+ try_adding)));
}
expr_cast(source, _) => {
- maybe_copy(cx, source, None);
+ maybe_copy(cx, source, Some(("Casted values must be copyable",
+ try_adding)));
check_cast_for_escaping_regions(cx, source, e);
}
- expr_copy(expr) => check_copy_ex(cx, expr, false, None),
+ expr_copy(expr) => check_copy_ex(cx, expr, false,
+ Some(("Explicit copy requires a copyable argument", ""))),
// Vector add copies, but not "implicitly"
- expr_assign_op(_, _, ex) => check_copy_ex(cx, ex, false, None),
+ expr_assign_op(_, _, ex) => check_copy_ex(cx, ex, false,
+ Some(("Assignment with operation requires \
+ a copyable argument", ""))),
expr_binary(add, ls, rs) => {
- check_copy_ex(cx, ls, false, None);
- check_copy_ex(cx, rs, false, None);
+ let reason = Some(("Binary operators require copyable arguments",
+ ""));
+ check_copy_ex(cx, ls, false, reason);
+ check_copy_ex(cx, rs, false, reason);
}
- expr_rec(fields, def) => {
- for fields.each |field| { maybe_copy(cx, field.node.expr, None); }
+ expr_rec(fields, def) | expr_struct(_, fields, def) => {
+ for fields.each |field| { maybe_copy(cx, field.node.expr,
+ Some(("Record or struct fields require \
+ copyable arguments", ""))); }
match def {
Some(ex) => {
// All noncopyable fields must be overridden
let t = ty::expr_ty(cx.tcx, ex);
let ty_fields = match ty::get(t).sty {
ty::ty_rec(f) => f,
- _ => cx.tcx.sess.span_bug(ex.span, ~"bad expr type in record")
+ ty::ty_class(did, substs) =>
+ ty::class_items_as_fields(cx.tcx, did, &substs),
+ _ => cx.tcx.sess.span_bug(ex.span,
+ ~"bad base expr type in record")
};
for ty_fields.each |tf| {
if !vec::any(fields, |f| f.node.ident == tf.ident ) &&
!ty::kind_can_be_copied(ty::type_kind(cx.tcx, tf.mt.ty)) {
- cx.tcx.sess.span_err(ex.span,
+ cx.tcx.sess.span_err(e.span,
~"copying a noncopyable value");
}
}
}
}
expr_tup(exprs) | expr_vec(exprs, _) => {
- for exprs.each |expr| { maybe_copy(cx, *expr, None); }
+ for exprs.each |expr| { maybe_copy(cx, *expr,
+ Some(("Tuple or vec elements must be copyable", ""))); }
}
expr_call(f, args, _) => {
- let mut i = 0u;
- for ty::ty_fn_args(ty::expr_ty(cx.tcx, f)).each |arg_t| {
+ for ty::ty_fn_args(ty::expr_ty(cx.tcx, f)).eachi |i, arg_t| {
match ty::arg_mode(cx.tcx, *arg_t) {
- by_copy => maybe_copy(cx, args[i], None),
+ by_copy => maybe_copy(cx, args[i],
+ Some(("Callee takes its argument by copy", ""))),
by_ref | by_val | by_move => ()
}
- i += 1u;
}
}
expr_field(lhs, _, _) => {
match cx.method_map.find(e.id) {
Some(ref mme) => {
match ty::arg_mode(cx.tcx, mme.self_arg) {
- by_copy => maybe_copy(cx, lhs, None),
+ by_copy => maybe_copy(cx, lhs,
+ Some(("Method call takes its self argument by copy",
+ ""))),
by_ref | by_val | by_move => ()
}
}
expr_repeat(element, count_expr, _) => {
let count = ty::eval_repeat_count(cx.tcx, count_expr, e.span);
if count == 1 {
- maybe_copy(cx, element, None);
+ maybe_copy(cx, element, Some(("Trivial repeat takes its element \
+ by copy", "")));
} else {
let element_ty = ty::expr_ty(cx.tcx, element);
- check_copy(cx, element.id, element_ty, element.span, true, None);
+ check_copy(cx, element.id, element_ty, element.span, true,
+ Some(("Repeat takes its elements by copy", "")));
}
}
_ => { }
stmt_decl(@{node: decl_local(locals), _}, _) => {
for locals.each |local| {
match local.node.init {
- Some({op: init_assign, expr}) => maybe_copy(cx, expr, None),
+ Some({op: init_assign, expr}) =>
+ maybe_copy(cx, expr, Some(("Initializer statement \
+ takes its right-hand side by copy", ""))),
_ => {}
}
}
visit::visit_stmt(stmt, cx, v);
}
-fn check_ty(aty: @ty, cx: ctx, v: visit::vt<ctx>) {
+fn check_ty(aty: @Ty, cx: ctx, v: visit::vt<ctx>) {
match aty.node {
ty_path(_, id) => {
do option::iter(&cx.tcx.node_type_substs.find(id)) |ts| {
why: Option<(&str,&str)>) {
if ty::expr_is_lval(cx.tcx, cx.method_map, ex) &&
- // this is a move
- !cx.last_use_map.contains_key(ex.id) &&
-
// a reference to a constant like `none`... no need to warn
// about *this* even if the type is Option<~int>
!is_nullary_variant(cx, ex) &&
//
// * Functions called by the compiler itself.
-use driver::session::session;
+use driver::session::Session;
use metadata::csearch::{each_path, get_item_attrs};
use metadata::cstore::{iter_crate_data};
use metadata::decoder::{dl_def, dl_field, dl_impl};
mut log_type_fn: Option<def_id>
}
-mod LanguageItems {
+mod language_items {
#[legacy_exports];
fn make() -> LanguageItems {
LanguageItems {
}
}
-fn LanguageItemCollector(crate: @crate, session: session,
+fn LanguageItemCollector(crate: @crate, session: Session,
items: &r/LanguageItems)
-> LanguageItemCollector/&r {
items: &LanguageItems,
crate: @crate,
- session: session,
+ session: Session,
item_refs: HashMap<~str,&mut Option<def_id>>,
}
}
}
-fn collect_language_items(crate: @crate, session: session) -> LanguageItems {
- let items = LanguageItems::make();
+fn collect_language_items(crate: @crate, session: Session) -> LanguageItems {
+ let items = language_items::make();
let collector = LanguageItemCollector(crate, session, &items);
collector.collect();
copy items
use driver::session;
-use driver::session::session;
+use driver::session::Session;
use middle::ty;
use syntax::{ast, ast_util, visit};
use syntax::attr;
type ctxt_ = {dict: lint_dict,
curr: lint_modes,
is_default: bool,
- sess: session};
+ sess: Session};
enum ctxt {
ctxt_(ctxt_)
}
}
-fn build_settings_crate(sess: session::session, crate: @ast::crate) {
+fn build_settings_crate(sess: session::Session, crate: @ast::crate) {
let cx = ctxt_({dict: get_lint_dict(),
curr: std::smallintmap::mk(),
* Any use of the variable where the variable is dead afterwards is a
* last use.
*
- * # Extension to handle constructors
- *
- * Each field is assigned an index just as with local variables. A use of
- * `self` is considered a use of all fields. A use of `self.f` is just a use
- * of `f`.
- *
* # Implementation details
*
* The actual implementation contains two (nested) walks over the AST.
* - `no_ret_var`: a synthetic variable that is only 'read' from, the
* fallthrough node. This allows us to detect functions where we fail
* to return explicitly.
- *
- * - `self_var`: a variable representing 'self'
*/
use dvec::DVec;
use std::map::HashMap;
use syntax::{visit, ast_util};
-use syntax::print::pprust::{expr_to_str};
+use syntax::print::pprust::{expr_to_str, block_to_str};
use visit::vt;
-use syntax::codemap::span;
+use syntax::codemap::{span, span_to_str};
use syntax::ast::*;
-use driver::session::session;
use io::WriterUtil;
use capture::{cap_move, cap_drop, cap_copy, cap_ref};
pure fn ne(other: &LiveNodeKind) -> bool { !self.eq(other) }
}
+fn live_node_kind_to_str(lnk: LiveNodeKind, cx: ty::ctxt) -> ~str {
+ let cm = cx.sess.codemap;
+ match lnk {
+ FreeVarNode(s) => fmt!("Free var node [%s]", span_to_str(s, cm)),
+ ExprNode(s) => fmt!("Expr node [%s]", span_to_str(s, cm)),
+ VarDefNode(s) => fmt!("Var def node [%s]", span_to_str(s, cm)),
+ ExitNode => ~"Exit node"
+ }
+}
+
fn check_crate(tcx: ty::ctxt,
method_map: typeck::method_map,
crate: @crate) -> last_use_map {
}
impl LiveNode: to_str::ToStr {
- fn to_str() -> ~str { fmt!("ln(%u)", *self) }
+ pure fn to_str() -> ~str { fmt!("ln(%u)", *self) }
}
impl Variable: to_str::ToStr {
- fn to_str() -> ~str { fmt!("v(%u)", *self) }
+ pure fn to_str() -> ~str { fmt!("v(%u)", *self) }
}
// ______________________________________________________________________
fn invalid_node() -> LiveNode { LiveNode(uint::max_value) }
-enum RelevantDef { RelevantVar(node_id), RelevantSelf }
-
-type CaptureInfo = {ln: LiveNode, is_move: bool, rv: RelevantDef};
+struct CaptureInfo {
+ ln: LiveNode,
+ is_move: bool,
+ var_nid: node_id
+}
enum LocalKind {
FromMatch(binding_mode),
enum VarKind {
Arg(node_id, ident, rmode),
Local(LocalInfo),
- Field(ident),
Self,
ImplicitRet
}
-fn relevant_def(def: def) -> Option<RelevantDef> {
+fn relevant_def(def: def) -> Option<node_id> {
match def {
- def_self(_) => Some(RelevantSelf),
-
def_binding(nid, _) |
def_arg(nid, _) |
- def_local(nid, _) => Some(RelevantVar(nid)),
+ def_local(nid, _) => Some(nid),
_ => None
}
mut num_vars: uint,
live_node_map: HashMap<node_id, LiveNode>,
variable_map: HashMap<node_id, Variable>,
- field_map: HashMap<ident, Variable>,
capture_map: HashMap<node_id, @~[CaptureInfo]>,
mut var_kinds: ~[VarKind],
mut lnks: ~[LiveNodeKind],
tcx: tcx,
method_map: method_map,
last_use_map: last_use_map,
- num_live_nodes: 0u,
- num_vars: 0u,
+ num_live_nodes: 0,
+ num_vars: 0,
live_node_map: HashMap(),
variable_map: HashMap(),
capture_map: HashMap(),
- field_map: HashMap(),
var_kinds: ~[],
lnks: ~[]
}
fn add_live_node(lnk: LiveNodeKind) -> LiveNode {
let ln = LiveNode(self.num_live_nodes);
self.lnks.push(lnk);
- self.num_live_nodes += 1u;
+ self.num_live_nodes += 1;
- debug!("%s is of kind %?", ln.to_str(), lnk);
+ debug!("%s is of kind %s", ln.to_str(),
+ live_node_kind_to_str(lnk, self.tcx));
ln
}
fn add_variable(vk: VarKind) -> Variable {
let v = Variable(self.num_vars);
self.var_kinds.push(vk);
- self.num_vars += 1u;
+ self.num_vars += 1;
match vk {
- Local(LocalInfo {id:node_id, _}) |
- Arg(node_id, _, _) => {
- self.variable_map.insert(node_id, v);
- }
- Field(name) => {
- self.field_map.insert(name, v);
- }
- Self | ImplicitRet => {
- }
+ Local(LocalInfo {id:node_id, _}) |
+ Arg(node_id, _, _) => {
+ self.variable_map.insert(node_id, v);
+ }
+ Self | ImplicitRet => {
+ }
}
debug!("%s is %?", v.to_str(), vk);
fn variable_name(var: Variable) -> ~str {
match copy self.var_kinds[*var] {
- Local(LocalInfo {ident: nm, _}) |
- Arg(_, nm, _) => self.tcx.sess.str_of(nm),
- Field(nm) => ~"self." + self.tcx.sess.str_of(nm),
- Self => ~"self",
- ImplicitRet => ~"<implicit-ret>"
+ Local(LocalInfo {ident: nm, _}) |
+ Arg(_, nm, _) => self.tcx.sess.str_of(nm),
+ Self => ~"self",
+ ImplicitRet => ~"<implicit-ret>"
}
}
(*v).push(id);
}
Arg(_, _, by_ref) |
- Arg(_, _, by_val) | Self | Field(_) | ImplicitRet |
+ Arg(_, _, by_val) | Self | ImplicitRet |
Local(LocalInfo {kind: FromMatch(bind_by_implicit_ref), _}) => {
debug!("--but it is not owned");
}
// and so forth:
visit::visit_fn(fk, decl, body, sp, id, fn_maps, v);
- match fk {
- visit::fk_ctor(_, _, _, _, class_did) => {
- add_class_fields(fn_maps, class_did);
- }
- _ => {}
- }
-
// Special nodes and variables:
// - exit_ln represents the end of the fn, either by return or fail
// - implicit_ret_var is a pseudo-variable that represents
let specials = {
exit_ln: (*fn_maps).add_live_node(ExitNode),
fallthrough_ln: (*fn_maps).add_live_node(ExitNode),
- no_ret_var: (*fn_maps).add_variable(ImplicitRet),
- self_var: (*fn_maps).add_variable(Self)
+ no_ret_var: (*fn_maps).add_variable(ImplicitRet)
};
// compute liveness
});
check_vt.visit_block(body, lsets, check_vt);
lsets.check_ret(id, sp, fk, entry_ln);
- lsets.check_fields(sp, entry_ln);
lsets.warn_about_unused_args(sp, decl, entry_ln);
}
-fn add_class_fields(self: @IrMaps, did: def_id) {
- for ty::lookup_class_fields(self.tcx, did).each |field_ty| {
- assert field_ty.id.crate == local_crate;
- let var = self.add_variable(Field(field_ty.ident));
- self.field_map.insert(field_ty.ident, var);
- }
-}
-
fn visit_local(local: @local, &&self: @IrMaps, vt: vt<@IrMaps>) {
let def_map = self.tcx.def_map;
do pat_util::pat_bindings(def_map, local.node.pat) |_bm, p_id, sp, path| {
}
expr_fn(_, _, _, cap_clause) |
expr_fn_block(_, _, cap_clause) => {
+ // Interesting control flow (for loops can contain labeled
+ // breaks or continues)
+ self.add_live_node_for_node(expr.id, ExprNode(expr.span));
+
// Make a live_node for each captured variable, with the span
// being the location that the variable is used. This results
// in better error messages than just pointing at the closure
cap_move | cap_drop => true, // var must be dead afterwards
cap_copy | cap_ref => false // var can still be used
};
- call_caps.push({ln: cv_ln, is_move: is_move, rv: rv});
+ call_caps.push(CaptureInfo {ln: cv_ln,
+ is_move: is_move,
+ var_nid: rv});
}
None => {}
}
type Specials = {
exit_ln: LiveNode,
fallthrough_ln: LiveNode,
- no_ret_var: Variable,
- self_var: Variable
+ no_ret_var: Variable
};
const ACC_READ: uint = 1u;
const ACC_WRITE: uint = 2u;
const ACC_USE: uint = 4u;
+type LiveNodeMap = HashMap<node_id, LiveNode>;
+
struct Liveness {
tcx: ty::ctxt,
ir: @IrMaps,
s: Specials,
successors: ~[mut LiveNode],
users: ~[mut users],
- mut break_ln: LiveNode,
- mut cont_ln: LiveNode,
+ // The list of node IDs for the nested loop scopes
+ // we're in.
+ loop_scope: DVec<node_id>,
+ // mappings from loop node ID to LiveNode
+ // ("break" label should map to loop node ID,
+ // it probably doesn't now)
+ break_ln: LiveNodeMap,
+ cont_ln: LiveNodeMap
}
fn Liveness(ir: @IrMaps, specials: Specials) -> Liveness {
vec::to_mut(
vec::from_elem(ir.num_live_nodes * ir.num_vars,
invalid_users())),
- break_ln: invalid_node(),
- cont_ln: invalid_node()
+ loop_scope: DVec(),
+ break_ln: HashMap(),
+ cont_ln: HashMap()
}
}
}
}
- fn variable_from_rdef(rv: RelevantDef, span: span) -> Variable {
- match rv {
- RelevantSelf => self.s.self_var,
- RelevantVar(nid) => self.variable(nid, span)
- }
- }
-
fn variable_from_path(expr: @expr) -> Option<Variable> {
match expr.node {
expr_path(_) => {
let def = self.tcx.def_map.get(expr.id);
relevant_def(def).map(
- |rdef| self.variable_from_rdef(*rdef, expr.span)
+ |rdef| self.variable(*rdef, expr.span)
)
}
_ => None
match self.tcx.def_map.find(node_id) {
Some(def) => {
relevant_def(def).map(
- |rdef| self.variable_from_rdef(*rdef, span)
+ |rdef| self.variable(*rdef, span)
)
}
None => {
if reader.is_valid() {Some((*self.ir).lnk(reader))} else {None}
}
+ /*
+ Is this variable live on entry to any of its successor nodes?
+ */
fn live_on_exit(ln: LiveNode, var: Variable)
-> Option<LiveNodeKind> {
}
fn indices(ln: LiveNode, op: fn(uint)) {
- let node_base_idx = self.idx(ln, Variable(0u));
- for uint::range(0u, self.ir.num_vars) |var_idx| {
+ let node_base_idx = self.idx(ln, Variable(0));
+ for uint::range(0, self.ir.num_vars) |var_idx| {
op(node_base_idx + var_idx)
}
}
fn write_vars(wr: io::Writer,
ln: LiveNode,
test: fn(uint) -> LiveNode) {
- let node_base_idx = self.idx(ln, Variable(0u));
- for uint::range(0u, self.ir.num_vars) |var_idx| {
+ let node_base_idx = self.idx(ln, Variable(0));
+ for uint::range(0, self.ir.num_vars) |var_idx| {
let idx = node_base_idx + var_idx;
if test(idx).is_valid() {
wr.write_str(~" ");
}
}
+ fn find_loop_scope(opt_label: Option<ident>, id: node_id, sp: span)
+ -> node_id {
+ match opt_label {
+ Some(_) => // Refers to a labeled loop. Use the results of resolve
+ // to find with one
+ match self.tcx.def_map.find(id) {
+ Some(def_label(loop_id)) => loop_id,
+ _ => self.tcx.sess.span_bug(sp, ~"Label on break/loop \
+ doesn't refer to a loop")
+ },
+ None =>
+ // Vanilla 'break' or 'loop', so use the enclosing
+ // loop scope
+ if self.loop_scope.len() == 0 {
+ self.tcx.sess.span_bug(sp, ~"break outside loop");
+ }
+ else {
+ self.loop_scope.last()
+ }
+ }
+ }
+
fn ln_str(ln: LiveNode) -> ~str {
do io::with_str_writer |wr| {
wr.write_str(~"[ln(");
let idx = self.idx(ln, var);
let user = &mut self.users[idx];
- if (acc & ACC_WRITE) != 0u {
+ if (acc & ACC_WRITE) != 0 {
user.reader = invalid_node();
user.writer = ln;
}
// Important: if we both read/write, must do read second
// or else the write will override.
- if (acc & ACC_READ) != 0u {
+ if (acc & ACC_READ) != 0 {
user.reader = ln;
}
- if (acc & ACC_USE) != 0u {
+ if (acc & ACC_USE) != 0 {
self.users[idx].used = true;
}
// if there is a `break` or `again` at the top level, then it's
// effectively a return---this only occurs in `for` loops,
// where the body is really a closure.
+
+ debug!("compute: using id for block, %s", block_to_str(body,
+ self.tcx.sess.intr()));
+
let entry_ln: LiveNode =
- self.with_loop_nodes(self.s.exit_ln, self.s.exit_ln, || {
- self.propagate_through_fn_block(decl, body)
- });
+ self.with_loop_nodes(body.node.id, self.s.exit_ln, self.s.exit_ln,
+ || { self.propagate_through_fn_block(decl, body) });
- // hack to skip the loop unless #debug is enabled:
+ // hack to skip the loop unless debug! is enabled:
debug!("^^ liveness computation results for body %d (entry=%s)",
{
for uint::range(0u, self.ir.num_live_nodes) |ln_idx| {
- #debug["%s", self.ln_str(LiveNode(ln_idx))];
+ debug!("%s", self.ln_str(LiveNode(ln_idx)));
}
body.node.id
},
}
}
- // as above, the "self" variable is a non-owned variable
- self.acc(self.s.exit_ln, self.s.self_var, ACC_READ);
-
- // in a ctor, there is an implicit use of self.f for all fields f:
- for self.ir.field_map.each_value |var| {
- self.acc(self.s.exit_ln, var, ACC_READ|ACC_USE);
- }
-
// the fallthrough exit is only for those cases where we do not
// explicitly return:
self.init_from_succ(self.s.fallthrough_ln, self.s.exit_ln);
}
fn propagate_through_expr(expr: @expr, succ: LiveNode) -> LiveNode {
+ debug!("propagate_through_expr: %s",
+ expr_to_str(expr, self.tcx.sess.intr()));
+
match expr.node {
// Interesting cases with control flow or which gen/kill
expr_path(_) => {
- self.access_path(expr, succ, ACC_READ | ACC_USE)
+ self.access_path(expr, succ, ACC_READ | ACC_USE)
}
- expr_field(e, nm, _) => {
- // If this is a reference to `self.f` inside of a ctor,
- // then we treat it as a read of that variable.
- // Otherwise, we ignore it and just propagate down to
- // process `e`.
- match self.as_self_field(e, nm) {
- Some((ln, var)) => {
- self.init_from_succ(ln, succ);
- self.acc(ln, var, ACC_READ | ACC_USE);
- ln
- }
- None => {
- self.propagate_through_expr(e, succ)
- }
- }
+ expr_field(e, _, _) => {
+ self.propagate_through_expr(e, succ)
}
- expr_fn(*) | expr_fn_block(*) => {
- // the construction of a closure itself is not important,
- // but we have to consider the closed over variables.
- let caps = (*self.ir).captures(expr);
- do (*caps).foldr(succ) |cap, succ| {
- self.init_from_succ(cap.ln, succ);
- let var = self.variable_from_rdef(cap.rv, expr.span);
- self.acc(cap.ln, var, ACC_READ | ACC_USE);
- cap.ln
- }
+ expr_fn(_, _, blk, _) | expr_fn_block(_, blk, _) => {
+ debug!("%s is an expr_fn or expr_fn_block",
+ expr_to_str(expr, self.tcx.sess.intr()));
+
+ /*
+ The next-node for a break is the successor of the entire
+ loop. The next-node for a continue is the top of this loop.
+ */
+ self.with_loop_nodes(blk.node.id, succ,
+ self.live_node(expr.id, expr.span), || {
+
+ // the construction of a closure itself is not important,
+ // but we have to consider the closed over variables.
+ let caps = (*self.ir).captures(expr);
+ do (*caps).foldr(succ) |cap, succ| {
+ self.init_from_succ(cap.ln, succ);
+ let var = self.variable(cap.var_nid, expr.span);
+ self.acc(cap.ln, var, ACC_READ | ACC_USE);
+ cap.ln
+ }
+ })
}
expr_if(cond, then, els) => {
self.propagate_through_loop(expr, Some(cond), blk, succ)
}
+ // Note that labels have been resolved, so we don't need to look
+ // at the label ident
expr_loop(blk, _) => {
self.propagate_through_loop(expr, None, blk, succ)
}
}
expr_break(opt_label) => {
- if !self.break_ln.is_valid() {
- self.tcx.sess.span_bug(
- expr.span, ~"break with invalid break_ln");
- }
+ // Find which label this break jumps to
+ let sc = self.find_loop_scope(opt_label, expr.id, expr.span);
- if opt_label.is_some() {
- self.tcx.sess.span_unimpl(expr.span, ~"labeled break");
- }
+ // Now that we know the label we're going to,
+ // look it up in the break loop nodes table
- self.break_ln
+ match self.break_ln.find(sc) {
+ Some(b) => b,
+ None => self.tcx.sess.span_bug(expr.span,
+ ~"Break to unknown label")
+ }
}
expr_again(opt_label) => {
- if !self.cont_ln.is_valid() {
- self.tcx.sess.span_bug(
- expr.span, ~"cont with invalid cont_ln");
- }
+ // Find which label this expr continues to to
+ let sc = self.find_loop_scope(opt_label, expr.id, expr.span);
- if opt_label.is_some() {
- self.tcx.sess.span_unimpl(expr.span, ~"labeled again");
- }
+ // Now that we know the label we're going to,
+ // look it up in the continue loop nodes table
- self.cont_ln
+ match self.cont_ln.find(sc) {
+ Some(b) => b,
+ None => self.tcx.sess.span_bug(expr.span,
+ ~"Loop to unknown label")
+ }
}
expr_move(l, r) | expr_assign(l, r) => {
// In general, the full flow graph structure for an
// assignment/move/etc can be handled in one of two ways,
// depending on whether what is being assigned is a "tracked
- // value" or not. A tracked value is basically a local variable
- // or argument, or a self-field (`self.f`) in a ctor.
+ // value" or not. A tracked value is basically a local
+ // variable or argument.
//
// The two kinds of graphs are:
//
//
// # Tracked lvalues
//
- // A tracked lvalue is either a local variable/argument `x` or
- // else it is a self-field `self.f` in a constructor. In
+ // A tracked lvalue is a local variable/argument `x`. In
// these cases, the link_node where the write occurs is linked
- // to node id of `x` or `self`, respectively. The
- // `write_lvalue()` routine generates the contents of this
- // node. There are no subcomponents to consider.
+ // to node id of `x`. The `write_lvalue()` routine generates
+ // the contents of this node. There are no subcomponents to
+ // consider.
//
// # Non-tracked lvalues
//
// just ignore such cases and treat them as reads.
match expr.node {
- expr_path(_) => succ,
- expr_field(e, nm, _) => match self.as_self_field(e, nm) {
- Some(_) => succ,
- None => self.propagate_through_expr(e, succ)
- },
- _ => self.propagate_through_expr(expr, succ)
+ expr_path(_) => succ,
+ expr_field(e, _, _) => self.propagate_through_expr(e, succ),
+ _ => self.propagate_through_expr(expr, succ)
}
}
acc: uint) -> LiveNode {
match expr.node {
expr_path(_) => self.access_path(expr, succ, acc),
- expr_field(e, nm, _) => match self.as_self_field(e, nm) {
- Some((ln, var)) => {
- self.init_from_succ(ln, succ);
- self.acc(ln, var, acc);
- ln
- }
- None => succ
- },
// We do not track other lvalues, so just propagate through
// to their subcomponents. Also, it may happen that
fn access_path(expr: @expr, succ: LiveNode, acc: uint) -> LiveNode {
let def = self.tcx.def_map.get(expr.id);
match relevant_def(def) {
- Some(RelevantSelf) => {
- // Accessing `self` is like accessing every field of
- // the current object. This allows something like
- // `self = ...;` (it will be considered a write to
- // every field, sensibly enough), though the borrowck
- // pass will reject it later on.
- //
- // Also, note that, within a ctor at least, an
- // expression like `self.f` is "shortcircuiting"
- // before it reaches this point by the code for
- // expr_field.
- let ln = self.live_node(expr.id, expr.span);
- if acc != 0u {
- self.init_from_succ(ln, succ);
- for self.ir.field_map.each_value |var| {
- self.acc(ln, var, acc);
- }
- }
- ln
- }
- Some(RelevantVar(nid)) => {
+ Some(nid) => {
let ln = self.live_node(expr.id, expr.span);
if acc != 0u {
self.init_from_succ(ln, succ);
}
}
- fn as_self_field(expr: @expr,
- fld: ident) -> Option<(LiveNode,Variable)> {
- // If we checking a constructor, then we treat self.f as a
- // variable. we use the live_node id that will be assigned to
- // the reference to self but the variable id for `f`.
- match expr.node {
- expr_path(_) => {
- let def = self.tcx.def_map.get(expr.id);
- match def {
- def_self(_) => {
- // Note: the field_map is empty unless we are in a ctor
- return self.ir.field_map.find(fld).map(|var| {
- let ln = self.live_node(expr.id, expr.span);
- (ln, *var)
- });
- }
- _ => return None
- }
- }
- _ => return None
- }
- }
-
fn propagate_through_loop(expr: @expr,
cond: Option<@expr>,
body: blk,
*/
+
// first iteration:
let mut first_merge = true;
let ln = self.live_node(expr.id, expr.span);
self.merge_from_succ(ln, succ, first_merge);
first_merge = false;
}
+ debug!("propagate_through_loop: using id for loop body %d %s",
+ expr.id, block_to_str(body, self.tcx.sess.intr()));
+
let cond_ln = self.propagate_through_opt_expr(cond, ln);
- let body_ln = self.with_loop_nodes(succ, ln, || {
+ let body_ln = self.with_loop_nodes(expr.id, succ, ln, || {
self.propagate_through_block(body, cond_ln)
});
while self.merge_from_succ(ln, body_ln, first_merge) {
first_merge = false;
assert cond_ln == self.propagate_through_opt_expr(cond, ln);
- assert body_ln == self.with_loop_nodes(succ, ln, || {
+ assert body_ln == self.with_loop_nodes(expr.id, succ, ln,
+ || {
self.propagate_through_block(body, cond_ln)
});
}
cond_ln
}
- fn with_loop_nodes<R>(break_ln: LiveNode,
+ fn with_loop_nodes<R>(loop_node_id: node_id,
+ break_ln: LiveNode,
cont_ln: LiveNode,
f: fn() -> R) -> R {
- let bl = self.break_ln, cl = self.cont_ln;
- self.break_ln = break_ln;
- self.cont_ln = cont_ln;
- let r <- f();
- self.break_ln = bl;
- self.cont_ln = cl;
+ debug!("with_loop_nodes: %d %u", loop_node_id, *break_ln);
+ self.loop_scope.push(loop_node_id);
+ self.break_ln.insert(loop_node_id, break_ln);
+ self.cont_ln.insert(loop_node_id, cont_ln);
+ let r = f();
+ self.loop_scope.pop();
move r
}
}
expr_fn(*) | expr_fn_block(*) => {
let caps = (*self.ir).captures(expr);
for (*caps).each |cap| {
- let var = self.variable_from_rdef(cap.rv, expr.span);
+ let var = self.variable(cap.var_nid, expr.span);
self.consider_last_use(expr, cap.ln, var);
if cap.is_move {
self.check_move_from_var(expr.span, cap.ln, var);
match ty::resolved_mode(self.tcx, arg_ty.mode) {
by_val | by_copy | by_ref => {}
by_move => {
- self.check_move_from_expr(*arg_expr, vt);
+ if ty::expr_is_lval(self.tcx, self.ir.method_map,
+ *arg_expr) {
+ // Probably a bad error message (what's an rvalue?)
+ // but I can't think of anything better
+ self.tcx.sess.span_err(arg_expr.span,
+ #fmt("Move mode argument must be an rvalue: try \
+ (move %s) instead", expr_to_str(*arg_expr,
+ self.tcx.sess.intr())));
+ }
}
}
}
}
impl @Liveness {
- fn check_fields(sp: span, entry_ln: LiveNode) {
- for self.ir.field_map.each |nm, var| {
- match self.live_on_entry(entry_ln, var) {
- None => { /* ok */ }
- Some(ExitNode) => {
- self.tcx.sess.span_err(
- sp, fmt!("field `self.%s` is never initialized",
- self.tcx.sess.str_of(nm)));
- }
- Some(lnk) => {
- self.report_illegal_read(
- sp, lnk, var, PossiblyUninitializedField);
- }
- }
- }
- }
-
- fn check_ret(id: node_id, sp: span, fk: visit::fn_kind,
+ fn check_ret(id: node_id, sp: span, _fk: visit::fn_kind,
entry_ln: LiveNode) {
if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() {
// if no_ret_var is live, then we fall off the end of the
self.tcx.sess.span_err(
sp, ~"some control paths may return");
} else {
- match fk {
- visit::fk_ctor(*) => {
- // ctors are written as though they are unit.
- }
- _ => {
- self.tcx.sess.span_err(
- sp, ~"not all control paths return a value");
- }
- }
+ self.tcx.sess.span_err(
+ sp, ~"not all control paths return a value");
}
}
}
+ /*
+ Checks whether <var> is live on entry to any of the successors of <ln>.
+ If it is, report an error.
+ */
fn check_move_from_var(span: span, ln: LiveNode, var: Variable) {
debug!("check_move_from_var(%s, %s)",
ln.to_str(), var.to_str());
}
def => {
match relevant_def(def) {
- Some(RelevantVar(nid)) => {
+ Some(nid) => {
let ln = self.live_node(expr.id, expr.span);
let var = self.variable(nid, expr.span);
self.warn_about_dead_assign(expr.span, ln, var);
}
- Some(RelevantSelf) => {}
None => {}
}
}
copy or move mode", self.tcx.sess.str_of(name)));
return;
}
- Field(name) => {
- self.tcx.sess.span_err(
- move_span,
- fmt!("illegal move from field `%s`",
- self.tcx.sess.str_of(name)));
- return;
- }
Self => {
self.tcx.sess.span_err(
move_span,
enum ptr_kind {
uniq_ptr,
gc_ptr,
- region_ptr(ty::region),
+ region_ptr(ty::Region),
unsafe_ptr
}
}
}
- fn region_to_str(r: ty::region) -> ~str {
+ fn region_to_str(r: ty::Region) -> ~str {
region_to_str(self.tcx, r)
}
}
*/
-use driver::session::session;
+use driver::session::Session;
use middle::ty;
use syntax::{ast, visit};
use syntax::codemap::span;
type region_map = HashMap<ast::node_id, ast::node_id>;
struct ctxt {
- sess: session,
+ sess: Session,
def_map: resolve::DefMap,
// Generated maps:
/// intended to run *after inference* and sadly the logic is somewhat
/// duplicated with the code in infer.rs.
fn is_subregion_of(region_map: region_map,
- sub_region: ty::region,
- super_region: ty::region) -> bool {
+ sub_region: ty::Region,
+ super_region: ty::Region) -> bool {
sub_region == super_region ||
match (sub_region, super_region) {
(_, ty::re_static) => {
visitor: visit::vt<ctxt>) {
let fn_cx = match fk {
- visit::fk_item_fn(*) | visit::fk_method(*) |
- visit::fk_ctor(*) | visit::fk_dtor(*) => {
- // Top-level functions are a root scope.
- ctxt {parent: Some(id),.. cx}
- }
+ visit::fk_item_fn(*) | visit::fk_method(*) |
+ visit::fk_dtor(*) => {
+ // Top-level functions are a root scope.
+ ctxt {parent: Some(id),.. cx}
+ }
- visit::fk_anon(*) | visit::fk_fn_block(*) => {
- // Closures continue with the inherited scope.
- cx
- }
+ visit::fk_anon(*) | visit::fk_fn_block(*) => {
+ // Closures continue with the inherited scope.
+ cx
+ }
};
debug!("visiting fn with body %d. cx.parent: %? \
visit::visit_fn(fk, decl, body, sp, id, fn_cx, visitor);
}
-fn resolve_crate(sess: session, def_map: resolve::DefMap,
+fn resolve_crate(sess: Session, def_map: resolve::DefMap,
crate: @ast::crate) -> region_map {
let cx: ctxt = ctxt {sess: sess,
def_map: def_map,
}
type determine_rp_ctxt_ = {
- sess: session,
+ sess: Session,
ast_map: ast_map::map,
def_map: resolve::DefMap,
region_paramd_items: region_paramd_items,
}
}
-fn determine_rp_in_ty(ty: @ast::ty,
+fn determine_rp_in_ty(ty: @ast::Ty,
&&cx: determine_rp_ctxt,
visitor: visit::vt<determine_rp_ctxt>) {
// that as a direct dependency.
match ty.node {
ast::ty_path(path, id) => {
- match cx.def_map.get(id) {
- ast::def_ty(did) | ast::def_class(did, _) => {
+ match cx.def_map.find(id) {
+ Some(ast::def_ty(did)) | Some(ast::def_class(did)) => {
if did.crate == ast::local_crate {
if cx.opt_region_is_relevant(path.rp) {
cx.add_dep(did.node);
}
}
-fn determine_rp_in_crate(sess: session,
+fn determine_rp_in_crate(sess: Session,
ast_map: ast_map::map,
def_map: resolve::DefMap,
crate: @ast::crate) -> region_paramd_items {
-use driver::session::session;
+use driver::session::Session;
use metadata::csearch::{each_path, get_method_names_if_trait};
+use metadata::csearch::{get_static_methods_if_impl, get_type_name_if_impl};
use metadata::cstore::find_use_stmt_cnum;
use metadata::decoder::{def_like, dl_def, dl_field, dl_impl};
use middle::lang_items::LanguageItems;
use syntax::ast::{_mod, add, arm};
use syntax::ast::{bind_by_ref, bind_by_implicit_ref, bind_by_value};
use syntax::ast::{bitand, bitor, bitxor};
-use syntax::ast::{blk, bound_const, bound_copy, bound_owned, bound_send};
-use syntax::ast::{bound_trait, binding_mode,
+use syntax::ast::{binding_mode, blk,
capture_clause, class_ctor, class_dtor};
use syntax::ast::{crate, crate_num, decl_item};
use syntax::ast::{def, def_arg, def_binding, def_class, def_const, def_fn};
use syntax::ast::{pat_tup, pat_uniq, pat_wild, private, provided, public};
use syntax::ast::{required, rem, self_ty_, shl, shr, stmt_decl};
use syntax::ast::{struct_field, struct_variant_kind, sty_static, subtract};
-use syntax::ast::{trait_ref, tuple_variant_kind, ty, ty_bool, ty_char};
+use syntax::ast::{trait_ref, tuple_variant_kind, Ty, ty_bool, ty_char};
use syntax::ast::{ty_f, ty_f32, ty_f64, ty_float, ty_i, ty_i16, ty_i32};
use syntax::ast::{ty_i64, ty_i8, ty_int, ty_param, ty_path, ty_str, ty_u};
use syntax::ast::{ty_u16, ty_u32, ty_u64, ty_u8, ty_uint, type_value_ns};
+use syntax::ast::{ty_param_bound};
use syntax::ast::{variant, view_item, view_item_export, view_item_import};
use syntax::ast::{view_item_use, view_path_glob, view_path_list};
use syntax::ast::{view_path_simple, visibility, anonymous, named};
enum Namespace {
- ModuleNS,
TypeNS,
ValueNS
}
type ResolveVisitor = vt<()>;
-enum ModuleDef {
- NoModuleDef, // Does not define a module.
- ModuleDef(Privacy, @Module), // Defines a module.
-}
-
-impl ModuleDef {
- pure fn is_none() -> bool {
- match self { NoModuleDef => true, _ => false }
- }
-}
-
enum ImportDirectiveNS {
- ModuleNSOnly,
+ TypeNSOnly,
AnyNS
}
MethodRibKind(node_id, MethodSort),
// We passed through a function *item* scope. Disallow upvars.
- OpaqueFunctionRibKind
+ OpaqueFunctionRibKind,
+
+ // We're in a constant item. Can't refer to dynamic stuff.
+ ConstantItemRibKind
}
// Methods can be required or provided. Required methods only occur in traits.
EnumVariantOrConstNotFound
}
+// Specifies how duplicates should be handled when adding a child item if
+// another item exists with the same name in some namespace.
+enum DuplicateCheckingMode {
+ ForbidDuplicateModules,
+ ForbidDuplicateTypes,
+ ForbidDuplicateValues,
+ ForbidDuplicateTypesAndValues,
+ OverwriteDuplicates
+}
+
+impl DuplicateCheckingMode : cmp::Eq {
+ pure fn eq(other: &DuplicateCheckingMode) -> bool {
+ (self as uint) == (*other as uint)
+ }
+ pure fn ne(other: &DuplicateCheckingMode) -> bool { !self.eq(other) }
+}
+
+// Returns the namespace associated with the given duplicate checking mode,
+// or fails for OverwriteDuplicates. This is used for error messages.
+fn namespace_for_duplicate_checking_mode(mode: DuplicateCheckingMode) ->
+ Namespace {
+ match mode {
+ ForbidDuplicateModules | ForbidDuplicateTypes |
+ ForbidDuplicateTypesAndValues => TypeNS,
+ ForbidDuplicateValues => ValueNS,
+ OverwriteDuplicates => fail ~"OverwriteDuplicates has no namespace"
+ }
+}
+
/// One local scope.
struct Rib {
bindings: HashMap<ident,def_like>,
mut outstanding_references: uint,
- mut module_target: Option<Target>,
mut value_target: Option<Target>,
mut type_target: Option<Target>,
privacy: privacy,
span: span,
outstanding_references: 0u,
- module_target: None,
value_target: None,
type_target: None,
used: false
impl ImportResolution {
fn target_for_namespace(namespace: Namespace) -> Option<Target> {
match namespace {
- ModuleNS => return copy self.module_target,
TypeNS => return copy self.type_target,
ValueNS => return copy self.value_target
}
}
}
-fn unused_import_lint_level(session: session) -> level {
+fn unused_import_lint_level(session: Session) -> level {
for session.opts.lint_opts.each |lint_option_pair| {
let (lint_type, lint_level) = *lint_option_pair;
if lint_type == unused_imports {
pure fn ne(other: &Privacy) -> bool { !self.eq(other) }
}
-// Records a possibly-private definition.
-struct Definition {
+// Records a possibly-private type definition.
+struct TypeNsDef {
+ mut privacy: Privacy,
+ mut module_def: Option<@Module>,
+ mut type_def: Option<def>
+}
+
+// Records a possibly-private value definition.
+struct ValueNsDef {
privacy: Privacy,
def: def,
}
// Records the definitions (at most one for each namespace) that a name is
// bound to.
struct NameBindings {
- mut module_def: ModuleDef, //< Meaning in module namespace.
- mut type_def: Option<Definition>, //< Meaning in type namespace.
- mut value_def: Option<Definition>, //< Meaning in value namespace.
+ mut type_def: Option<TypeNsDef>, //< Meaning in type namespace.
+ mut value_def: Option<ValueNsDef>, //< Meaning in value namespace.
// For error reporting
- // XXX: Merge me into Definition.
- mut module_span: Option<span>,
+ // FIXME (#3783): Merge me into TypeNsDef and ValueNsDef.
mut type_span: Option<span>,
mut value_span: Option<span>,
}
def_id: Option<def_id>,
legacy_exports: bool,
sp: span) {
- if self.module_def.is_none() {
- let module_ = @Module(parent_link, def_id, legacy_exports);
- self.module_def = ModuleDef(privacy, module_);
- self.module_span = Some(sp);
+ // Merges the module with the existing type def or creates a new one.
+ let module_ = @Module(parent_link, def_id, legacy_exports);
+ match self.type_def {
+ None => {
+ self.type_def = Some(TypeNsDef {
+ privacy: privacy,
+ module_def: Some(module_),
+ type_def: None
+ });
+ }
+ Some(copy type_def) => {
+ self.type_def = Some(TypeNsDef {
+ privacy: privacy,
+ module_def: Some(module_),
+ .. type_def
+ });
+ }
}
+ self.type_span = Some(sp);
}
/// Records a type definition.
fn define_type(privacy: Privacy, def: def, sp: span) {
- self.type_def = Some(Definition { privacy: privacy, def: def });
+ // Merges the type with the existing type def or creates a new one.
+ match self.type_def {
+ None => {
+ self.type_def = Some(TypeNsDef {
+ privacy: privacy,
+ module_def: None,
+ type_def: Some(def)
+ });
+ }
+ Some(copy type_def) => {
+ self.type_def = Some(TypeNsDef {
+ privacy: privacy,
+ type_def: Some(def),
+ .. type_def
+ });
+ }
+ }
self.type_span = Some(sp);
}
/// Records a value definition.
fn define_value(privacy: Privacy, def: def, sp: span) {
- self.value_def = Some(Definition { privacy: privacy, def: def });
+ self.value_def = Some(ValueNsDef { privacy: privacy, def: def });
self.value_span = Some(sp);
}
/// Returns the module node if applicable.
fn get_module_if_available() -> Option<@Module> {
- match self.module_def {
- NoModuleDef => return None,
- ModuleDef(_privacy, module_) => return Some(module_)
+ match self.type_def {
+ Some(type_def) => type_def.module_def,
+ None => None
}
}
* definition.
*/
fn get_module() -> @Module {
- match self.module_def {
- NoModuleDef => {
- fail
- ~"get_module called on a node with no module definition!";
- }
- ModuleDef(_, module_) => {
- return module_;
+ match self.get_module_if_available() {
+ None => {
+ fail ~"get_module called on a node with no module \
+ definition!"
}
+ Some(module_def) => module_def
}
}
fn defined_in_namespace(namespace: Namespace) -> bool {
match namespace {
- ModuleNS => {
- match self.module_def {
- NoModuleDef => false,
- _ => true
- }
- }
TypeNS => return self.type_def.is_some(),
ValueNS => return self.value_def.is_some()
}
}
- fn def_for_namespace(namespace: Namespace) -> Option<Definition> {
+ fn def_for_namespace(namespace: Namespace) -> Option<def> {
match namespace {
- TypeNS => return self.type_def,
- ValueNS => return self.value_def,
- ModuleNS => match self.module_def {
- NoModuleDef => return None,
- ModuleDef(privacy, module_) =>
- match module_.def_id {
- None => return None,
- Some(def_id) => {
- return Some(Definition {
- privacy: privacy,
- def: def_mod(def_id)
- });
+ TypeNS => {
+ match self.type_def {
+ None => None,
+ Some(type_def) => {
+ // FIXME (#3784): This is reallllly questionable.
+ // Perhaps the right thing to do is to merge def_mod
+ // and def_ty.
+ match type_def.type_def {
+ Some(type_def) => Some(type_def),
+ None => {
+ match type_def.module_def {
+ Some(module_def) => {
+ module_def.def_id.map(|def_id|
+ def_mod(*def_id))
+ }
+ None => None
+ }
+ }
+ }
}
}
- }
+ }
+ ValueNS => {
+ match self.value_def {
+ None => None,
+ Some(value_def) => Some(value_def.def)
+ }
+ }
+ }
+ }
+
+ fn privacy_for_namespace(namespace: Namespace) -> Option<Privacy> {
+ match namespace {
+ TypeNS => {
+ match self.type_def {
+ None => None,
+ Some(type_def) => Some(type_def.privacy)
+ }
+ }
+ ValueNS => {
+ match self.value_def {
+ None => None,
+ Some(value_def) => Some(value_def.privacy)
+ }
+ }
}
}
fn span_for_namespace(namespace: Namespace) -> Option<span> {
- match self.def_for_namespace(namespace) {
- Some(_) => {
+ if self.defined_in_namespace(namespace) {
match namespace {
- TypeNS => self.type_span,
- ValueNS => self.value_span,
- ModuleNS => self.module_span
+ TypeNS => self.type_span,
+ ValueNS => self.value_span,
}
- }
- None => None
+ } else {
+ None
}
}
}
fn NameBindings() -> NameBindings {
NameBindings {
- module_def: NoModuleDef,
type_def: None,
value_def: None,
- module_span: None,
type_span: None,
value_span: None
}
fn namespace_to_str(ns: Namespace) -> ~str {
match ns {
- TypeNS => ~"type",
- ValueNS => ~"value",
- ModuleNS => ~"module"
+ TypeNS => ~"type",
+ ValueNS => ~"value",
}
}
return false;
}
-fn Resolver(session: session, lang_items: LanguageItems,
+fn Resolver(session: Session, lang_items: LanguageItems,
crate: @crate) -> Resolver {
-
let graph_root = @NameBindings();
(*graph_root).define_module(Public,
primitive_type_table: @PrimitiveTypeTable(session.
parse_sess.interner),
- namespaces: ~[ ModuleNS, TypeNS, ValueNS ],
+ namespaces: ~[ TypeNS, ValueNS ],
def_map: HashMap(),
export_map2: HashMap(),
/// The main resolver class.
struct Resolver {
- session: session,
+ session: Session,
lang_items: LanguageItems,
crate: @crate,
unused_import_lint_level: level,
trait_info: HashMap<def_id,@HashMap<ident,()>>,
- structs: HashMap<def_id,bool>,
+ structs: HashMap<def_id,()>,
// The number of imports that are currently unresolved.
mut unresolved_imports: uint,
*/
fn add_child(name: ident,
reduced_graph_parent: ReducedGraphParent,
- // Pass in the namespaces for the child item so that we can
- // check for duplicate items in the same namespace
- ns: ~[Namespace],
+ duplicate_checking_mode: DuplicateCheckingMode,
// For printing errors
sp: span)
-> (@NameBindings, ReducedGraphParent) {
let new_parent = ModuleReducedGraphParent(module_);
match module_.children.find(name) {
None => {
- let child = @NameBindings();
- module_.children.insert(name, child);
- return (child, new_parent);
+ let child = @NameBindings();
+ module_.children.insert(name, child);
+ return (child, new_parent);
}
Some(child) => {
- // We don't want to complain if the multiple definitions
- // are in different namespaces.
- match ns.find(|n| child.defined_in_namespace(n)) {
- Some(ns) => {
- self.session.span_err(sp,
- #fmt("Duplicate definition of %s %s",
- namespace_to_str(ns),
- self.session.str_of(name)));
- do child.span_for_namespace(ns).iter() |sp| {
- self.session.span_note(*sp,
- #fmt("First definition of %s %s here:",
- namespace_to_str(ns),
- self.session.str_of(name)));
- }
+ // Enforce the duplicate checking mode. If we're requesting
+ // duplicate module checking, check that there isn't a module
+ // in the module with the same name. If we're requesting
+ // duplicate type checking, check that there isn't a type in
+ // the module with the same name. If we're requesting
+ // duplicate value checking, check that there isn't a value in
+ // the module with the same name. If we're requesting
+ // duplicate type checking and duplicate value checking, check
+ // that there isn't a duplicate type and a duplicate value
+ // with the same name. If no duplicate checking was requested
+ // at all, do nothing.
+
+ let mut is_duplicate = false;
+ match duplicate_checking_mode {
+ ForbidDuplicateModules => {
+ is_duplicate =
+ child.get_module_if_available().is_some();
+ }
+ ForbidDuplicateTypes => {
+ match child.def_for_namespace(TypeNS) {
+ Some(def_mod(_)) | None => {}
+ Some(_) => is_duplicate = true
+ }
+ }
+ ForbidDuplicateValues => {
+ is_duplicate = child.defined_in_namespace(ValueNS);
+ }
+ ForbidDuplicateTypesAndValues => {
+ match child.def_for_namespace(TypeNS) {
+ Some(def_mod(_)) | None => {}
+ Some(_) => is_duplicate = true
+ };
+ if child.defined_in_namespace(ValueNS) {
+ is_duplicate = true;
+ }
+ }
+ OverwriteDuplicates => {}
+ }
+ if duplicate_checking_mode != OverwriteDuplicates &&
+ is_duplicate {
+ // Return an error here by looking up the namespace that
+ // had the duplicate.
+ let ns = namespace_for_duplicate_checking_mode(
+ duplicate_checking_mode);
+ self.session.span_err(sp,
+ fmt!("duplicate definition of %s %s",
+ namespace_to_str(ns),
+ self.session.str_of(name)));
+ do child.span_for_namespace(ns).iter() |sp| {
+ self.session.span_note(*sp,
+ fmt!("first definition of %s %s here:",
+ namespace_to_str(ns),
+ self.session.str_of(name)));
+ }
}
- _ => {}
- }
- return (child, new_parent);
+ return (child, new_parent);
}
}
}
fn build_reduced_graph_for_item(item: @item,
parent: ReducedGraphParent,
&&visitor: vt<ReducedGraphParent>) {
-
let ident = item.ident;
let sp = item.span;
let legacy = match parent {
match item.node {
item_mod(module_) => {
- let legacy = has_legacy_export_attr(item.attrs);
- let (name_bindings, new_parent) = self.add_child(ident, parent,
- ~[ModuleNS], sp);
+ let legacy = has_legacy_export_attr(item.attrs);
+ let (name_bindings, new_parent) =
+ self.add_child(ident, parent, ForbidDuplicateModules, sp);
let parent_link = self.get_parent_link(new_parent, ident);
let def_id = { crate: 0, node: item.id };
- (*name_bindings).define_module(privacy, parent_link,
- Some(def_id), legacy, sp);
+ (*name_bindings).define_module(privacy, parent_link,
+ Some(def_id), legacy, sp);
let new_parent =
ModuleReducedGraphParent((*name_bindings).get_module());
visit_mod(module_, sp, item.id, new_parent, visitor);
}
+
item_foreign_mod(fm) => {
- let legacy = has_legacy_export_attr(item.attrs);
- let new_parent = match fm.sort {
- named => {
- let (name_bindings, new_parent) = self.add_child(ident,
- parent, ~[ModuleNS], sp);
+ let legacy = has_legacy_export_attr(item.attrs);
+ let new_parent = match fm.sort {
+ named => {
+ let (name_bindings, new_parent) =
+ self.add_child(ident, parent,
+ ForbidDuplicateModules, sp);
- let parent_link = self.get_parent_link(new_parent, ident);
- let def_id = { crate: 0, node: item.id };
- (*name_bindings).define_module(privacy, parent_link,
- Some(def_id), legacy, sp);
+ let parent_link = self.get_parent_link(new_parent,
+ ident);
+ let def_id = { crate: 0, node: item.id };
+ (*name_bindings).define_module(privacy,
+ parent_link,
+ Some(def_id),
+ legacy,
+ sp);
+
+ ModuleReducedGraphParent(name_bindings.get_module())
+ }
- ModuleReducedGraphParent((*name_bindings).get_module())
- }
- // For anon foreign mods, the contents just go in the
- // current scope
- anonymous => parent
- };
+ // For anon foreign mods, the contents just go in the
+ // current scope
+ anonymous => parent
+ };
- visit_item(item, new_parent, visitor);
+ visit_item(item, new_parent, visitor);
}
// These items live in the value namespace.
item_const(*) => {
- let (name_bindings, _) = self.add_child(ident, parent,
- ~[ValueNS], sp);
+ let (name_bindings, _) =
+ self.add_child(ident, parent, ForbidDuplicateValues, sp);
(*name_bindings).define_value
(privacy, def_const(local_def(item.id)), sp);
}
item_fn(_, purity, _, _) => {
- let (name_bindings, new_parent) = self.add_child(ident, parent,
- ~[ValueNS], sp);
+ let (name_bindings, new_parent) =
+ self.add_child(ident, parent, ForbidDuplicateValues, sp);
let def = def_fn(local_def(item.id), purity);
(*name_bindings).define_value(privacy, def, sp);
// These items live in the type namespace.
item_ty(*) => {
- let (name_bindings, _) = self.add_child(ident, parent,
- ~[TypeNS], sp);
+ let (name_bindings, _) =
+ self.add_child(ident, parent, ForbidDuplicateTypes, sp);
(*name_bindings).define_type
(privacy, def_ty(local_def(item.id)), sp);
}
item_enum(enum_definition, _) => {
-
- let (name_bindings, new_parent) = self.add_child(ident, parent,
- ~[TypeNS], sp);
+ let (name_bindings, new_parent) =
+ self.add_child(ident, parent, ForbidDuplicateTypes, sp);
(*name_bindings).define_type
(privacy, def_ty(local_def(item.id)), sp);
}
// These items live in both the type and value namespaces.
- item_class(struct_definition, _) => {
- let new_parent =
- match struct_definition.ctor {
- None => {
- let (name_bindings, new_parent) =
- self.add_child(ident, parent, ~[TypeNS], sp);
-
- (*name_bindings).define_type
- (privacy, def_ty(local_def(item.id)), sp);
- new_parent
- }
- Some(ctor) => {
- let (name_bindings, new_parent) =
- self.add_child(ident, parent, ~[ValueNS, TypeNS],
- sp);
-
- (*name_bindings).define_type
- (privacy, def_ty(local_def(item.id)), sp);
+ item_class(*) => {
+ let (name_bindings, new_parent) =
+ self.add_child(ident, parent, ForbidDuplicateTypes, sp);
- let purity = impure_fn;
- let ctor_def = def_fn(local_def(ctor.node.id),
- purity);
- (*name_bindings).define_value(privacy, ctor_def, sp);
- new_parent
- }
- };
+ (*name_bindings).define_type
+ (privacy, def_ty(local_def(item.id)), sp);
// Record the def ID of this struct.
- self.structs.insert(local_def(item.id),
- struct_definition.ctor.is_some());
+ self.structs.insert(local_def(item.id), ());
visit_item(item, new_parent, visitor);
}
- item_impl(*) => {
+ item_impl(_, trait_ref_opt, ty, methods) => {
+ // If this implements an anonymous trait and it has static
+ // methods, then add all the static methods within to a new
+ // module, if the type was defined within this module.
+ //
+ // FIXME (#3785): This is quite unsatisfactory. Perhaps we
+ // should modify anonymous traits to only be implementable in
+ // the same module that declared the type.
+
+ // Bail out early if there are no static methods.
+ let mut has_static_methods = false;
+ for methods.each |method| {
+ match method.self_ty.node {
+ sty_static => has_static_methods = true,
+ _ => {}
+ }
+ }
+
+ // If there are static methods, then create the module
+ // and add them.
+ match (trait_ref_opt, ty) {
+ (None, @{ id: _, node: ty_path(path, _), span: _ }) if
+ has_static_methods && path.idents.len() == 1 => {
+ // Create the module.
+ let name = path_to_ident(path);
+ let (name_bindings, new_parent) =
+ self.add_child(name,
+ parent,
+ ForbidDuplicateModules,
+ sp);
+
+ let parent_link = self.get_parent_link(new_parent,
+ ident);
+ let def_id = local_def(item.id);
+ name_bindings.define_module(privacy, parent_link,
+ Some(def_id), false, sp);
+
+ let new_parent = ModuleReducedGraphParent(
+ name_bindings.get_module());
+
+ // For each static method...
+ for methods.each |method| {
+ match method.self_ty.node {
+ sty_static => {
+ // Add the static method to the module.
+ let ident = method.ident;
+ let (method_name_bindings, _) =
+ self.add_child(ident,
+ new_parent,
+ ForbidDuplicateValues,
+ method.span);
+ let def = def_fn(local_def(method.id),
+ method.purity);
+ method_name_bindings.define_value(
+ Public, def, method.span);
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {}
+ }
+
visit_item(item, parent, visitor);
}
item_trait(_, _, methods) => {
- let (name_bindings, new_parent) = self.add_child(ident, parent,
- ~[TypeNS], sp);
+ let (name_bindings, new_parent) =
+ self.add_child(ident, parent, ForbidDuplicateTypes, sp);
// Add the names of all the methods to the trait info.
let method_names = @HashMap();
sty_static => {
// which parent to use??
let (method_name_bindings, _) =
- self.add_child(ident, new_parent, ~[ValueNS],
- ty_m.span);
+ self.add_child(ident, new_parent,
+ ForbidDuplicateValues, ty_m.span);
let def = def_static_method(local_def(ty_m.id),
+ Some(local_def(item.id)),
ty_m.purity);
(*method_name_bindings).define_value
(Public, def, ty_m.span);
&&visitor: vt<ReducedGraphParent>) {
let ident = variant.node.name;
- let (child, _) = self.add_child(ident, parent, ~[ValueNS],
+ let (child, _) = self.add_child(ident, parent, ForbidDuplicateValues,
variant.span);
let privacy;
def_variant(item_id,
local_def(variant.node.id)),
variant.span);
- self.structs.insert(local_def(variant.node.id), false);
+ self.structs.insert(local_def(variant.node.id), ());
}
enum_variant_kind(enum_definition) => {
(*child).define_type(privacy,
match view_path.node {
view_path_simple(binding, full_path, ns, _) => {
let ns = match ns {
- module_ns => ModuleNSOnly,
+ module_ns => TypeNSOnly,
type_value_ns => AnyNS
};
match find_use_stmt_cnum(self.session.cstore, node_id) {
Some(crate_id) => {
let (child_name_bindings, new_parent) =
- // should this be in ModuleNS? --tjc
- self.add_child(name, parent, ~[ModuleNS],
+ self.add_child(name, parent, ForbidDuplicateTypes,
view_item.span);
let def_id = { crate: crate_id, node: 0 };
let name = foreign_item.ident;
let (name_bindings, new_parent) =
- self.add_child(name, parent, ~[ValueNS], foreign_item.span);
+ self.add_child(name, parent, ForbidDuplicateValues,
+ foreign_item.span);
match foreign_item.node {
foreign_item_fn(_, purity, type_parameters) => {
ident: ident, new_parent: ReducedGraphParent) {
match def {
def_mod(def_id) | def_foreign_mod(def_id) => {
- match copy child_name_bindings.module_def {
- NoModuleDef => {
+ match copy child_name_bindings.type_def {
+ Some(TypeNsDef { module_def: Some(copy module_def), _ }) => {
+ debug!("(building reduced graph for external crate) \
+ already created module");
+ module_def.def_id = Some(def_id);
+ modules.insert(def_id, module_def);
+ }
+ Some(_) | None => {
debug!("(building reduced graph for \
external crate) building module \
%s", final_ident);
fail ~"can't happen";
}
ModuleParentLink(parent_module, ident) => {
-
let name_bindings = parent_module.children.get(ident);
-
- resolution.module_target =
+ resolution.type_target =
Some(Target(parent_module, name_bindings));
}
}
}
}
}
- ModuleDef(_priv, module_) => {
- debug!("(building reduced graph for \
- external crate) already created \
- module");
- module_.def_id = Some(def_id);
- modules.insert(def_id, module_);
- }
}
}
def_fn(*) | def_static_method(*) | def_const(*) |
// If this is a trait, add all the method names
// to the trait info.
- match get_method_names_if_trait(self.session.cstore,
- def_id) {
+ match get_method_names_if_trait(self.session.cstore, def_id) {
None => {
// Nothing to do.
}
child_name_bindings.define_type(Public, def, dummy_sp());
}
- def_class(def_id, has_constructor) => {
+ def_class(def_id) => {
debug!("(building reduced graph for external \
- crate) building type %s (value? %d)",
- final_ident,
- if has_constructor { 1 } else { 0 });
+ crate) building type %s",
+ final_ident);
child_name_bindings.define_type(Public, def, dummy_sp());
-
- if has_constructor {
- child_name_bindings.define_value(Public, def, dummy_sp());
- }
-
- self.structs.insert(def_id, has_constructor);
+ self.structs.insert(def_id, ());
}
def_self(*) | def_arg(*) | def_local(*) |
def_prim_ty(*) | def_ty_param(*) | def_binding(*) |
let (child_name_bindings, new_parent) =
self.add_child(ident,
ModuleReducedGraphParent(current_module),
- // May want a better span
- ~[], dummy_sp());
+ OverwriteDuplicates,
+ dummy_sp());
// Define or reuse the module node.
- match child_name_bindings.module_def {
- NoModuleDef => {
+ match child_name_bindings.type_def {
+ None => {
debug!("(building reduced graph for external crate) \
autovivifying %s", *ident_str);
let parent_link = self.get_parent_link(new_parent,
None, false,
dummy_sp());
}
- ModuleDef(*) => { /* Fall through. */ }
+ Some(_) => { /* Fall through. */ }
}
current_module = (*child_name_bindings).get_module();
}
- // Add the new child item.
- let (child_name_bindings, new_parent) =
- self.add_child(final_ident,
- ModuleReducedGraphParent(current_module),
- ~[], dummy_sp());
-
match path_entry.def_like {
dl_def(def) => {
+ // Add the new child item.
+ let (child_name_bindings, new_parent) =
+ self.add_child(final_ident,
+ ModuleReducedGraphParent(
+ current_module),
+ OverwriteDuplicates,
+ dummy_sp());
+
self.handle_external_def(def, modules,
child_name_bindings,
self.session.str_of(final_ident),
final_ident, new_parent);
}
- dl_impl(_) => {
- // Because of the infelicitous way the metadata is
- // written, we can't process this impl now. We'll get it
- // later.
-
+ dl_impl(def) => {
+ // We only process static methods of impls here.
debug!("(building reduced graph for external crate) \
- ignoring impl %s", final_ident_str);
+ processing impl %s", final_ident_str);
+
+ match get_type_name_if_impl(self.session.cstore, def) {
+ None => {}
+ Some(final_ident) => {
+ let static_methods_opt =
+ get_static_methods_if_impl(
+ self.session.cstore, def);
+ match static_methods_opt {
+ Some(static_methods) if
+ static_methods.len() >= 1 => {
+ debug!("(building reduced graph for \
+ external crate) processing \
+ static methods for type name %s",
+ self.session.str_of(final_ident));
+
+ let (child_name_bindings, new_parent) =
+ self.add_child(final_ident,
+ ModuleReducedGraphParent(
+ current_module),
+ OverwriteDuplicates,
+ dummy_sp());
+
+ // Process the static methods. First,
+ // create the module.
+ let type_module;
+ match copy child_name_bindings.type_def {
+ Some(TypeNsDef {
+ module_def: Some(copy module_def),
+ _
+ }) => {
+ // We already have a module. This
+ // is OK.
+ type_module = module_def;
+ }
+ Some(_) | None => {
+ let parent_link =
+ self.get_parent_link(
+ new_parent, final_ident);
+ child_name_bindings.define_module(
+ Public,
+ parent_link,
+ Some(def),
+ false,
+ dummy_sp());
+ type_module =
+ child_name_bindings.
+ get_module();
+ }
+ }
+
+ // Add each static method to the module.
+ let new_parent = ModuleReducedGraphParent(
+ type_module);
+ for static_methods.each
+ |static_method_info| {
+ let ident = static_method_info.ident;
+ debug!("(building reduced graph for \
+ external crate) creating \
+ static method '%s'",
+ self.session.str_of(ident));
+
+ let (method_name_bindings, _) =
+ self.add_child(
+ ident,
+ new_parent,
+ OverwriteDuplicates,
+ dummy_sp());
+ let def = def_fn(
+ static_method_info.def_id,
+ static_method_info.purity);
+ method_name_bindings.define_value(
+ Public, def, dummy_sp());
+ }
+ }
+
+ // Otherwise, do nothing.
+ Some(_) | None => {}
+ }
+ }
+ }
}
dl_field => {
debug!("(building reduced graph for external crate) \
module_path: @DVec<ident>,
subclass: @ImportDirectiveSubclass,
span: span) {
-
let directive = @ImportDirective(privacy, module_path,
subclass, span);
module_.imports.push(directive);
target,
source);
}
- SingleImport(target, source, ModuleNSOnly) => {
+ SingleImport(target, source, TypeNSOnly) => {
resolution_result =
self.resolve_single_module_import
(module_, containing_module, target,
return Failed;
}
- // We need to resolve all four namespaces for this to succeed.
+ // We need to resolve both namespaces for this to succeed.
//
// XXX: See if there's some way of handling namespaces in a more
- // generic way. We have four of them; it seems worth doing...
+ // generic way. We have two of them; it seems worth doing...
- let mut module_result = UnknownResult;
let mut value_result = UnknownResult;
let mut type_result = UnknownResult;
// Continue.
}
Some(child_name_bindings) => {
- if (*child_name_bindings).defined_in_namespace(ModuleNS) {
- module_result = BoundResult(containing_module,
- child_name_bindings);
- }
if (*child_name_bindings).defined_in_namespace(ValueNS) {
value_result = BoundResult(containing_module,
child_name_bindings);
}
}
- // Unless we managed to find a result in all four namespaces
- // (exceedingly unlikely), search imports as well.
-
- match (module_result, value_result, type_result) {
- (BoundResult(*), BoundResult(*), BoundResult(*)) => {
+ // Unless we managed to find a result in both namespaces (unlikely),
+ // search imports as well.
+ match (value_result, type_result) {
+ (BoundResult(*), BoundResult(*)) => {
// Continue.
}
_ => {
// therefore accurately report that the names are
// unbound.
- if module_result.is_unknown() {
- module_result = UnboundResult;
- }
if value_result.is_unknown() {
value_result = UnboundResult;
}
// The name is an import which has been fully
// resolved. We can, therefore, just follow it.
-
- if module_result.is_unknown() {
- module_result = get_binding(import_resolution,
- ModuleNS);
- }
if value_result.is_unknown() {
value_result = get_binding(import_resolution,
ValueNS);
assert module_.import_resolutions.contains_key(target);
let import_resolution = module_.import_resolutions.get(target);
- match module_result {
- BoundResult(target_module, name_bindings) => {
- debug!("(resolving single import) found module binding");
- import_resolution.module_target =
- Some(Target(target_module, name_bindings));
- }
- UnboundResult => {
- debug!("(resolving single import) didn't find module \
- binding");
- }
- UnknownResult => {
- fail ~"module result should be known at this point";
- }
- }
match value_result {
BoundResult(target_module, name_bindings) => {
import_resolution.value_target =
}
let i = import_resolution;
- match (i.module_target, i.value_target, i.type_target) {
- /*
- If this name wasn't found in any of the four namespaces, it's
- definitely unresolved
- */
- (None, None, None) => { return Failed; }
+ match (i.value_target, i.type_target) {
+ // If this name wasn't found in either namespace, it's definitely
+ // unresolved.
+ (None, None) => { return Failed; }
_ => {}
}
// Continue.
}
Some(child_name_bindings) => {
- if (*child_name_bindings).defined_in_namespace(ModuleNS) {
+ if (*child_name_bindings).defined_in_namespace(TypeNS) {
module_result = BoundResult(containing_module,
child_name_bindings);
}
// resolved. We can, therefore, just follow it.
if module_result.is_unknown() {
- match (*import_resolution).
- target_for_namespace(ModuleNS) {
+ match (*import_resolution).target_for_namespace(
+ TypeNS) {
None => {
module_result = UnboundResult;
}
match module_result {
BoundResult(target_module, name_bindings) => {
debug!("(resolving single import) found module binding");
- import_resolution.module_target =
+ import_resolution.type_target =
Some(Target(target_module, name_bindings));
}
UnboundResult => {
}
let i = import_resolution;
- if i.module_target.is_none() {
- // If this name wasn't found in the module namespace, it's
+ if i.type_target.is_none() {
+ // If this name wasn't found in the type namespace, it's
// definitely unresolved.
return Failed;
}
debug!("(resolving glob import) writing module resolution \
%? into `%s`",
- is_none(target_import_resolution.module_target),
+ is_none(target_import_resolution.type_target),
self.module_to_str(module_));
// Here we merge two import resolutions.
let new_import_resolution =
@ImportResolution(privacy,
target_import_resolution.span);
- new_import_resolution.module_target =
- copy target_import_resolution.module_target;
new_import_resolution.value_target =
copy target_import_resolution.value_target;
new_import_resolution.type_target =
// Merge the two import resolutions at a finer-grained
// level.
- match copy target_import_resolution.module_target {
- None => {
- // Continue.
- }
- Some(module_target) => {
- dest_import_resolution.module_target =
- Some(copy module_target);
- }
- }
match copy target_import_resolution.value_target {
None => {
// Continue.
self.module_to_str(module_));
// Merge the child item into the import resolution.
- if (*name_bindings).defined_in_namespace(ModuleNS) {
- debug!("(resolving glob import) ... for module target");
- dest_import_resolution.module_target =
- Some(Target(containing_module, name_bindings));
- }
if (*name_bindings).defined_in_namespace(ValueNS) {
debug!("(resolving glob import) ... for value target");
dest_import_resolution.value_target =
xray: XrayFlag,
span: span)
-> ResolveResult<@Module> {
-
let mut search_module = module_;
let mut index = index;
let module_path_len = (*module_path).len();
while index < module_path_len {
let name = (*module_path).get_elt(index);
- match self.resolve_name_in_module(search_module, name, ModuleNS,
- xray) {
-
+ match self.resolve_name_in_module(search_module, name, TypeNS,
+ xray) {
Failed => {
self.session.span_err(span, ~"unresolved name");
return Failed;
return Indeterminate;
}
Success(target) => {
- match target.bindings.module_def {
- NoModuleDef => {
- // Not a module.
+ // Check to see whether there are type bindings, and, if
+ // so, whether there is a module within.
+ match target.bindings.type_def {
+ Some(copy type_def) => {
+ match type_def.module_def {
+ None => {
+ // Not a module.
+ self.session.span_err(span,
+ fmt!("not a \
+ module: %s",
+ self.session.
+ str_of(
+ name)));
+ return Failed;
+ }
+ Some(copy module_def) => {
+ search_module = module_def;
+ }
+ }
+ }
+ None => {
+ // There are no type bindings at all.
self.session.span_err(span,
fmt!("not a module: %s",
- self.session.
- str_of(name)));
+ self.session.str_of(
+ name)));
return Failed;
}
- ModuleDef(_, copy module_) => {
- search_module = module_;
- }
}
}
}
match module_.children.find(name) {
Some(name_bindings)
if (*name_bindings).defined_in_namespace(namespace) => {
-
return Success(Target(module_, name_bindings));
}
Some(_) | None => { /* Not found; continue. */ }
fn resolve_module_in_lexical_scope(module_: @Module, name: ident)
-> ResolveResult<@Module> {
-
- match self.resolve_item_in_lexical_scope(module_, name, ModuleNS) {
+ match self.resolve_item_in_lexical_scope(module_, name, TypeNS) {
Success(target) => {
- match target.bindings.module_def {
- NoModuleDef => {
+ match target.bindings.type_def {
+ Some(type_def) => {
+ match type_def.module_def {
+ None => {
+ error!("!!! (resolving module in lexical \
+ scope) module wasn't actually a \
+ module!");
+ return Failed;
+ }
+ Some(module_def) => {
+ return Success(module_def);
+ }
+ }
+ }
+ None => {
error!("!!! (resolving module in lexical scope) module
wasn't actually a module!");
return Failed;
}
- ModuleDef(_, module_) => {
- return Success(module_);
- }
}
}
Indeterminate => {
debug!("(resolving one-level naming result) searching for module");
match self.resolve_item_in_lexical_scope(module_,
source_name,
- ModuleNS) {
-
+ TypeNS) {
Failed => {
debug!("(resolving one-level renaming import) didn't find \
module result");
let mut value_result;
let mut type_result;
- if allowable_namespaces == ModuleNSOnly {
+ if allowable_namespaces == TypeNSOnly {
value_result = None;
type_result = None;
} else {
self.session.str_of(target_name),
self.module_to_str(module_));
- import_resolution.module_target = module_result;
import_resolution.value_target = value_result;
import_resolution.type_target = type_result;
ident: ident,
namebindings: @NameBindings,
reexport: bool) {
- for [ModuleNS, TypeNS, ValueNS].each |ns| {
- match namebindings.def_for_namespace(*ns) {
- Some(d) if d.privacy == Public => {
+ for [ TypeNS, ValueNS ].each |ns| {
+ match (namebindings.def_for_namespace(*ns),
+ namebindings.privacy_for_namespace(*ns)) {
+ (Some(d), Some(Public)) => {
debug!("(computing exports) YES: %s '%s' \
=> %?",
if reexport { ~"reexport" } else { ~"export"},
self.session.str_of(ident),
- def_id_of_def(d.def));
+ def_id_of_def(d));
exports2.push(Export2 {
reexport: reexport,
name: self.session.str_of(ident),
- def_id: def_id_of_def(d.def)
+ def_id: def_id_of_def(d)
});
}
_ => ()
}
for module_.import_resolutions.each_ref |ident, importresolution| {
- for [ModuleNS, TypeNS, ValueNS].each |ns| {
+ for [ TypeNS, ValueNS ].each |ns| {
match importresolution.target_for_namespace(*ns) {
Some(target) => {
debug!("(computing exports) maybe reexport '%s'",
self.session.str_of(*ident));
- self.add_exports_of_namebindings(exports2, *ident,
+ self.add_exports_of_namebindings(exports2,
+ *ident,
target.bindings,
true)
}
return None;
}
+ ConstantItemRibKind => {
+ // Still doesn't deal with upvars
+ self.session.span_err(span,
+ ~"attempt to use a non-constant \
+ value in a constant");
+
+ }
}
- rib_index += 1u;
+ rib_index += 1;
}
return Some(dl_def(def));
// XXX: Try caching?
let mut i = (*ribs).len();
- while i != 0u {
- i -= 1u;
+ while i != 0 {
+ i -= 1;
let rib = (*ribs).get_elt(i);
match rib.bindings.find(name) {
Some(def_like) => {
}
match item.node {
- item_enum(_, type_parameters) |
+
+ // enum item: resolve all the variants' discrs,
+ // then resolve the ty params
+ item_enum(enum_def, type_parameters) => {
+
+ for enum_def.variants.each() |variant| {
+ do variant.node.disr_expr.iter() |dis_expr| {
+ // resolve the discriminator expr
+ // as a constant
+ self.with_constant_rib(|| {
+ self.resolve_expr(*dis_expr, visitor);
+ });
+ }
+ }
+
+ // n.b. the discr expr gets visted twice.
+ // but maybe it's okay since the first time will signal an
+ // error if there is one? -- tjc
+ do self.with_type_parameter_rib
+ (HasTypeParameters(&type_parameters, item.id, 0,
+ NormalRibKind))
+ || {
+
+ visit_item(item, (), visitor);
+ }
+ }
+
item_ty(_, type_parameters) => {
do self.with_type_parameter_rib
(HasTypeParameters(&type_parameters, item.id, 0u,
struct_def.traits,
struct_def.fields,
struct_def.methods,
- struct_def.ctor,
struct_def.dtor,
visitor);
}
}
item_const(*) => {
- visit_item(item, (), visitor);
+ self.with_constant_rib(|| {
+ visit_item(item, (), visitor);
+ });
}
item_mac(*) => {
f();
(*self.label_ribs).pop();
}
+ fn with_constant_rib(f: fn()) {
+ (*self.value_ribs).push(@Rib(ConstantItemRibKind));
+ f();
+ (*self.value_ribs).pop();
+ }
+
fn resolve_function(rib_kind: RibKind,
optional_declaration: Option<@fn_decl>,
self_binding: SelfBinding,
capture_clause: CaptureClause,
visitor: ResolveVisitor) {
-
// Check each element of the capture clause.
match capture_clause {
NoCaptureClause => {
fn resolve_type_parameters(type_parameters: ~[ty_param],
visitor: ResolveVisitor) {
-
for type_parameters.each |type_parameter| {
for type_parameter.bounds.each |bound| {
- match *bound {
- bound_copy | bound_send | bound_const | bound_owned => {
- // Nothing to do.
- }
- bound_trait(trait_type) => {
- self.resolve_type(trait_type, visitor);
- }
- }
+ self.resolve_type(**bound, visitor);
}
}
}
traits: ~[@trait_ref],
fields: ~[@struct_field],
methods: ~[@method],
- optional_constructor: Option<class_ctor>,
optional_destructor: Option<class_dtor>,
visitor: ResolveVisitor) {
-
// If applicable, create a rib for the type parameters.
let outer_type_parameter_count = (*type_parameters).len();
let borrowed_type_parameters: &~[ty_param] = &*type_parameters;
do self.with_type_parameter_rib(HasTypeParameters
- (borrowed_type_parameters, id, 0u,
- NormalRibKind)) {
+ (borrowed_type_parameters, id, 0,
+ OpaqueFunctionRibKind)) {
// Resolve the type parameters.
self.resolve_type_parameters(*type_parameters, visitor);
self.resolve_type(field.node.ty, visitor);
}
- // Resolve the constructor, if applicable.
- match optional_constructor {
- None => {
- // Nothing to do.
- }
- Some(constructor) => {
- self.resolve_function(NormalRibKind,
- Some(@constructor.node.dec),
- NoTypeParameters,
- constructor.node.body,
- HasSelfBinding(constructor.node.
- self_id),
- NoCaptureClause,
- visitor);
- }
- }
-
// Resolve the destructor, if applicable.
match optional_destructor {
None => {
span: span,
type_parameters: ~[ty_param],
opt_trait_reference: Option<@trait_ref>,
- self_type: @ty,
+ self_type: @Ty,
methods: ~[@method],
visitor: ResolveVisitor) {
-
// If applicable, create a rib for the type parameters.
let outer_type_parameter_count = type_parameters.len();
let borrowed_type_parameters: &~[ty_param] = &type_parameters;
do self.with_type_parameter_rib(HasTypeParameters
(borrowed_type_parameters, id, 0u,
NormalRibKind)) {
-
// Resolve the type parameters.
self.resolve_type_parameters(type_parameters, visitor);
// Resolve the trait reference, if necessary.
let original_trait_refs = self.current_trait_refs;
match opt_trait_reference {
- Some(trait_reference) => {
- let new_trait_refs = @DVec();
- match self.resolve_path(
+ Some(trait_reference) => {
+ let new_trait_refs = @DVec();
+ match self.resolve_path(
trait_reference.path, TypeNS, true, visitor) {
None => {
self.session.span_err(span,
(*new_trait_refs).push(def_id_of_def(def));
}
}
- // Record the current set of trait references.
- self.current_trait_refs = Some(new_trait_refs);
- }
- None => ()
+ // Record the current set of trait references.
+ self.current_trait_refs = Some(new_trait_refs);
+ }
+ None => ()
}
// Resolve the self type.
debug!("(resolving block) leaving block");
}
- fn resolve_type(ty: @ty, visitor: ResolveVisitor) {
+ fn resolve_type(ty: @Ty, visitor: ResolveVisitor) {
match ty.node {
// Like path expressions, the interpretation of path types depends
// on whether the path has multiple elements in it or not.
ty_path(path, path_id) => {
// This is a path in the type namespace. Walk through scopes
// scopes looking for it.
+ let mut result_def = None;
- let mut result_def;
- match self.resolve_path(path, TypeNS, true, visitor) {
- Some(def) => {
- debug!("(resolving type) resolved `%s` to type",
- self.session.str_of(path.idents.last()));
- result_def = Some(def);
- }
- None => {
- result_def = None;
+ // First, check to see whether the name is a primitive type.
+ if path.idents.len() == 1 {
+ let name = path.idents.last();
+
+ match self.primitive_type_table
+ .primitive_types
+ .find(name) {
+
+ Some(primitive_type) => {
+ result_def =
+ Some(def_prim_ty(primitive_type));
+ }
+ None => {
+ // Continue.
+ }
}
}
match result_def {
- Some(_) => {
- // Continue.
- }
None => {
- // Check to see whether the name is a primitive type.
- if path.idents.len() == 1u {
- let name = path.idents.last();
-
- match self.primitive_type_table
- .primitive_types
- .find(name) {
-
- Some(primitive_type) => {
- result_def =
- Some(def_prim_ty(primitive_type));
- }
- None => {
- // Continue.
- }
+ match self.resolve_path(path, TypeNS, true, visitor) {
+ Some(def) => {
+ debug!("(resolving type) resolved `%s` to \
+ type %?",
+ self.session.str_of(
+ path.idents.last()),
+ def);
+ result_def = Some(def);
+ }
+ None => {
+ result_def = None;
}
}
}
+ Some(_) => {
+ // Continue.
+ }
}
match copy result_def {
match self.resolve_path(path, TypeNS, false, visitor) {
Some(def_ty(class_id))
if self.structs.contains_key(class_id) => {
- let has_constructor = self.structs.get(class_id);
- let class_def = def_class(class_id,
- has_constructor);
+ let class_def = def_class(class_id);
self.record_def(pattern.id, class_def);
}
Some(definition @ def_variant(_, variant_id))
namespace);
}
- if path.idents.len() > 1u {
+ if path.idents.len() > 1 {
return self.resolve_module_relative_path(path,
self.xray_context,
namespace);
// First, search children.
match containing_module.children.find(name) {
Some(child_name_bindings) => {
- match (*child_name_bindings).def_for_namespace(namespace) {
- Some(def) if def.privacy == Public || xray == Xray => {
+ match (child_name_bindings.def_for_namespace(namespace),
+ child_name_bindings.privacy_for_namespace(namespace)) {
+ (Some(def), Some(Public)) => {
+ // Found it. Stop the search here.
+ return ChildNameDefinition(def);
+ }
+ (Some(def), _) if xray == Xray => {
// Found it. Stop the search here.
- return ChildNameDefinition(def.def);
+ return ChildNameDefinition(def);
}
- Some(_) | None => {
+ (Some(_), _) | (None, _) => {
// Continue.
}
}
xray == Xray => {
match (*import_resolution).target_for_namespace(namespace) {
Some(target) => {
- match (*target.bindings)
- .def_for_namespace(namespace) {
- Some(def) if def.privacy == Public => {
+ match (target.bindings.def_for_namespace(namespace),
+ target.bindings.privacy_for_namespace(
+ namespace)) {
+ (Some(def), Some(Public)) => {
// Found it.
import_resolution.used = true;
- return ImportNameDefinition(def.def);
+ return ImportNameDefinition(def);
}
- Some(_) | None => {
+ (Some(_), _) | (None, _) => {
// This can happen with external impls, due to
// the imperfect way we read the metadata.
search_result = self.search_ribs(self.type_ribs, ident, span,
AllowCapturingSelf);
}
- ModuleNS => {
- fail ~"module namespaces do not have local ribs";
- }
}
match copy search_result {
fn resolve_item_by_identifier_in_lexical_scope(ident: ident,
namespace: Namespace)
-> Option<def> {
-
// Check the items.
match self.resolve_item_in_lexical_scope(self.current_module,
ident,
namespace) {
-
Success(target) => {
match (*target.bindings).def_for_namespace(namespace) {
None => {
- fail ~"resolved name in a namespace to a set of name \
- bindings with no def for that namespace?!";
+ // This can happen if we were looking for a type and
+ // found a module instead. Modules don't have defs.
+ return None;
}
Some(def) => {
debug!("(resolving item path in lexical scope) \
resolved `%s` to item",
self.session.str_of(ident));
- return Some(def.def);
+ return Some(def);
}
}
}
// let bar = Bar { ... } // no type parameters
match self.resolve_path(path, TypeNS, false, visitor) {
- Some(def_ty(class_id)) | Some(def_class(class_id, _))
+ Some(def_ty(class_id)) | Some(def_class(class_id))
if self.structs.contains_key(class_id) => {
- let has_constructor = self.structs.get(class_id);
- let class_def = def_class(class_id, has_constructor);
+ let class_def = def_class(class_id);
self.record_def(expr.id, class_def);
}
Some(definition @ def_variant(_, class_id))
}
fn search_for_traits_containing_method(name: ident) -> @DVec<def_id> {
+ debug!("(searching for traits containing method) looking for '%s'",
+ self.session.str_of(name));
+
let found_traits = @DVec();
let mut search_module = self.current_module;
loop {
match copy self.current_trait_refs {
Some(trait_def_ids) => {
for trait_def_ids.each |trait_def_id| {
- self.add_trait_info_if_containing_method
- (found_traits, *trait_def_id, name);
+ self.add_trait_info_if_containing_method(
+ found_traits, *trait_def_id, name);
}
}
None => {
for search_module.children.each |_name, child_name_bindings| {
match child_name_bindings.def_for_namespace(TypeNS) {
Some(def) => {
- match def.def {
+ match def {
def_ty(trait_def_id) => {
- self.add_trait_info_if_containing_method
- (found_traits, trait_def_id, name);
+ self.add_trait_info_if_containing_method(
+ found_traits, trait_def_id, name);
}
_ => {
// Continue.
Some(target) => {
match target.bindings.def_for_namespace(TypeNS) {
Some(def) => {
- match def.def {
+ match def {
def_ty(trait_def_id) => {
self.
- add_trait_info_if_containing_method
- (found_traits, trait_def_id, name);
+ add_trait_info_if_containing_method(
+ found_traits, trait_def_id, name);
}
_ => {
// Continue.
trait_def_id: def_id,
name: ident) {
+ debug!("(adding trait info if containing method) trying trait %d:%d \
+ for method '%s'",
+ trait_def_id.crate,
+ trait_def_id.node,
+ self.session.str_of(name));
+
match self.trait_info.find(trait_def_id) {
Some(trait_info) if trait_info.contains_key(name) => {
debug!("(adding trait info if containing method) found trait \
debug!("Import resolutions:");
for module_.import_resolutions.each |name, import_resolution| {
- let mut module_repr;
- match (*import_resolution).target_for_namespace(ModuleNS) {
- None => { module_repr = ~""; }
- Some(_) => {
- module_repr = ~" module:?";
- // XXX
- }
- }
-
let mut value_repr;
match (*import_resolution).target_for_namespace(ValueNS) {
None => { value_repr = ~""; }
}
}
- debug!("* %s:%s%s%s",
- self.session.str_of(name),
- module_repr, value_repr, type_repr);
+ debug!("* %s:%s%s", self.session.str_of(name),
+ value_repr, type_repr);
}
}
}
/// Entry point to crate resolution.
-fn resolve_crate(session: session, lang_items: LanguageItems, crate: @crate)
+fn resolve_crate(session: Session, lang_items: LanguageItems, crate: @crate)
-> { def_map: DefMap,
exp_map2: ExportMap2,
trait_map: TraitMap } {
*
*/
-use driver::session::session;
use lib::llvm::llvm;
use lib::llvm::{ValueRef, BasicBlockRef};
use pat_util::*;
match p.node {
ast::pat_enum(_, subpats) => {
if opt_eq(tcx, &variant_opt(tcx, p.id), opt) {
- Some(option::get_default(&subpats,
+ Some(option::get_default(subpats,
vec::from_elem(variant_size,
dummy)))
} else {
arm_cxs.push(bcx);
}
- return controlflow::join_blocks(scope_cx, dvec::unwrap(arm_cxs));
+ return controlflow::join_blocks(scope_cx, dvec::unwrap(move arm_cxs));
fn mk_fail(bcx: block, sp: span, msg: ~str,
done: @mut Option<BasicBlockRef>) -> BasicBlockRef {
use std::{map, time, list};
use std::map::HashMap;
use driver::session;
-use session::session;
+use session::Session;
use syntax::attr;
use back::{link, abi, upcall};
use syntax::{ast, ast_util, codemap, ast_map};
assert ix < variant.args.len();
let arg_lltys = vec::map(variant.args, |aty| {
- type_of(ccx, ty::subst_tps(ccx.tcx, ty_substs, *aty))
+ type_of(ccx, ty::subst_tps(ccx.tcx, ty_substs, None, *aty))
});
let typed_blobptr = PointerCast(bcx, llblobptr,
T_ptr(T_struct(arg_lltys)));
fn non_gc_box_cast(bcx: block, val: ValueRef) -> ValueRef {
debug!("non_gc_box_cast");
add_comment(bcx, ~"non_gc_box_cast");
- assert(llvm::LLVMGetPointerAddressSpace(val_ty(val)) == gc_box_addrspace);
+ assert(llvm::LLVMGetPointerAddressSpace(val_ty(val)) == gc_box_addrspace
+ || bcx.unreachable);
let non_gc_t = T_ptr(llvm::LLVMGetElementType(val_ty(val)));
PointerCast(bcx, val, non_gc_t)
}
let _icx = ccx.insn_ctxt("trans_res_dtor");
if (substs.is_not_empty()) {
let did = if did.crate != ast::local_crate {
- inline::maybe_instantiate_inline(ccx, did)
+ inline::maybe_instantiate_inline(ccx, did, true)
} else { did };
assert did.crate == ast::local_crate;
- monomorphize::monomorphic_fn(ccx, did, substs, None, None).val
+ monomorphize::monomorphic_fn(ccx, did, substs, None, None, None).val
} else if did.crate == ast::local_crate {
get_item_val(ccx, did.node)
} else {
let tcx = ccx.tcx;
let name = csearch::get_symbol(ccx.sess.cstore, did);
- let class_ty = ty::subst_tps(tcx, substs,
+ let class_ty = ty::subst_tps(tcx, substs, None,
ty::lookup_item_type(tcx, parent_id).ty);
let llty = type_of_dtor(ccx, class_ty);
get_extern_fn(ccx.externs, ccx.llmod, name, lib::llvm::CCallConv,
let v_id = variant.id;
for vec::each(fn_ty.sig.inputs) |a| {
let llfldp_a = GEP_enum(cx, a_tup, tid, v_id, tps, j);
- let ty_subst = ty::subst_tps(ccx.tcx, tps, a.ty);
+ // XXX: Is "None" right here?
+ let ty_subst = ty::subst_tps(ccx.tcx, tps, None, a.ty);
cx = f(cx, llfldp_a, ty_subst);
j += 1u;
}
if lhs_sz < rhs_sz {
trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz {
- // FIXME (See discussion at #1570): If shifting by negative
+ // FIXME (#1877: If shifting by negative
// values becomes not undefined then this is wrong.
zext(rhs, lhs_llty)
} else {
}
fn simple_block_scope() -> block_kind {
- block_scope({loop_break: None, mut cleanups: ~[],
+ block_scope({loop_break: None, loop_label: None, mut cleanups: ~[],
mut cleanup_paths: ~[], mut landing_pad: None})
}
n, opt_node_info);
}
-fn loop_scope_block(bcx: block, loop_break: block, n: ~str,
- opt_node_info: Option<node_info>) -> block {
+fn loop_scope_block(bcx: block, loop_break: block, loop_label: Option<ident>,
+ n: ~str, opt_node_info: Option<node_info>) -> block {
return new_block(bcx.fcx, Some(bcx), block_scope({
loop_break: Some(loop_break),
+ loop_label: loop_label,
mut cleanups: ~[],
mut cleanup_paths: ~[],
mut landing_pad: None
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
-fn new_fn_ctxt_w_id(ccx: @crate_ctxt, path: path,
- llfndecl: ValueRef, id: ast::node_id,
+fn new_fn_ctxt_w_id(ccx: @crate_ctxt,
+ path: path,
+ llfndecl: ValueRef,
+ id: ast::node_id,
+ impl_id: Option<ast::def_id>,
param_substs: Option<param_substs>,
sp: Option<span>) -> fn_ctxt {
let llbbs = mk_standard_basic_blocks(llfndecl);
lllocals: HashMap(),
llupvars: HashMap(),
id: id,
+ impl_id: impl_id,
param_substs: param_substs,
span: sp,
path: path,
fn new_fn_ctxt(ccx: @crate_ctxt, path: path, llfndecl: ValueRef,
sp: Option<span>) -> fn_ctxt {
- return new_fn_ctxt_w_id(ccx, path, llfndecl, -1, None, sp);
+ return new_fn_ctxt_w_id(ccx, path, llfndecl, -1, None, None, sp);
}
// NB: must keep 4 fns in sync:
ty_self: self_arg,
param_substs: Option<param_substs>,
id: ast::node_id,
+ impl_id: Option<ast::def_id>,
maybe_load_env: fn(fn_ctxt),
finish: fn(block)) {
ccx.stats.n_closures += 1;
set_uwtable(llfndecl);
// Set up arguments to the function.
- let fcx = new_fn_ctxt_w_id(ccx, path, llfndecl, id, param_substs,
+ let fcx = new_fn_ctxt_w_id(ccx, path, llfndecl, id, impl_id, param_substs,
Some(body.span));
let raw_llargs = create_llargs_for_fn_args(fcx, ty_self, decl.inputs);
// translation calls that don't have a return value (trans_crate,
// trans_mod, trans_item, et cetera) and those that do
// (trans_block, trans_expr, et cetera).
-
- if !ccx.class_ctors.contains_key(id) // hack --
- /* avoids the need for special cases to assign a type to
- the constructor body (since it has no explicit return) */
- &&
- (body.node.expr.is_none() ||
- ty::type_is_bot(block_ty) ||
- ty::type_is_nil(block_ty)) {
+ if body.node.expr.is_none() || ty::type_is_bot(block_ty) ||
+ ty::type_is_nil(block_ty)
+ {
bcx = controlflow::trans_block(bcx, body, expr::Ignore);
} else {
bcx = controlflow::trans_block(bcx, body, expr::SaveIn(fcx.llretptr));
}
+
finish(bcx);
cleanup_and_Br(bcx, bcx_top, fcx.llreturn);
llfndecl: ValueRef,
ty_self: self_arg,
param_substs: Option<param_substs>,
- id: ast::node_id) {
+ id: ast::node_id,
+ impl_id: Option<ast::def_id>) {
let do_time = ccx.sess.trans_stats();
let start = if do_time { time::get_time() }
else { {sec: 0i64, nsec: 0i32} };
let _icx = ccx.insn_ctxt("trans_fn");
ccx.stats.n_fns += 1;
trans_closure(ccx, path, decl, body, llfndecl, ty_self,
- param_substs, id,
+ param_substs, id, impl_id,
|fcx| {
if ccx.sess.opts.extra_debuginfo {
debuginfo::create_function(fcx);
ty: varg.ty,
ident: special_idents::arg,
id: varg.id});
- let fcx = new_fn_ctxt_w_id(ccx, ~[], llfndecl, variant.node.id,
+ let fcx = new_fn_ctxt_w_id(ccx, ~[], llfndecl, variant.node.id, None,
param_substs, None);
let raw_llargs = create_llargs_for_fn_args(fcx, no_self, fn_args);
let ty_param_substs = match param_substs {
finish_fn(fcx, lltop);
}
-fn trans_class_ctor(ccx: @crate_ctxt, path: path, decl: ast::fn_decl,
- body: ast::blk, llctor_decl: ValueRef,
- psubsts: param_substs, ctor_id: ast::node_id,
- parent_id: ast::def_id, sp: span) {
- // Add ctor to the ctor map
- ccx.class_ctors.insert(ctor_id, parent_id);
-
- // Translate the ctor
-
- // Set up the type for the result of the ctor
- // kludgy -- this wouldn't be necessary if the typechecker
- // special-cased constructors, then we could just look up
- // the ctor's return type.
- let rslt_ty = ty::mk_class(ccx.tcx, parent_id,
- dummy_substs(psubsts.tys));
-
- // Make the fn context
- let fcx = new_fn_ctxt_w_id(ccx, path, llctor_decl, ctor_id,
- Some(psubsts), Some(sp));
- let raw_llargs = create_llargs_for_fn_args(fcx, no_self, decl.inputs);
- let mut bcx_top = top_scope_block(fcx, body.info());
- let lltop = bcx_top.llbb;
- let arg_tys = ty::ty_fn_args(node_id_type(bcx_top, ctor_id));
- bcx_top = copy_args_to_allocas(fcx, bcx_top, decl.inputs,
- raw_llargs, arg_tys);
-
- // Create a temporary for `self` that we will return at the end
- let selfdatum = datum::scratch_datum(bcx_top, rslt_ty, true);
-
- // Initialize dtor flag (if any) to 1
- if ty::ty_dtor(bcx_top.tcx(), parent_id).is_some() {
- let flag = GEPi(bcx_top, selfdatum.val, [0, 1]);
- Store(bcx_top, C_u8(1), flag);
- }
-
- // initialize fields to zero
- let mut bcx = bcx_top;
-
- // note we don't want to take *or* drop self.
- fcx.llself = Some(ValSelfData {v: selfdatum.val,
- t: rslt_ty,
- is_owned: false});
-
- // Translate the body of the ctor
- bcx = controlflow::trans_block(bcx, body, expr::Ignore);
-
- // Generate the return expression
- bcx = selfdatum.move_to(bcx, datum::INIT, fcx.llretptr);
-
- cleanup_and_leave(bcx, None, Some(fcx.llreturn));
- Unreachable(bcx);
- finish_fn(fcx, lltop);
-}
-
fn trans_class_dtor(ccx: @crate_ctxt, path: path,
body: ast::blk, dtor_id: ast::node_id,
psubsts: Option<param_substs>,
let mut class_ty = ty::lookup_item_type(tcx, parent_id).ty;
/* Substitute in the class type if necessary */
do option::iter(&psubsts) |ss| {
- class_ty = ty::subst_tps(tcx, ss.tys, class_ty);
+ class_ty = ty::subst_tps(tcx, ss.tys, ss.self_ty, class_ty);
}
/* The dtor takes a (null) output pointer, and a self argument,
}
/* Translate the dtor body */
trans_fn(ccx, path, ast_util::dtor_dec(),
- body, lldecl, impl_self(class_ty), psubsts, dtor_id);
+ body, lldecl, impl_self(class_ty), psubsts, dtor_id, None);
lldecl
}
let llfndecl = get_item_val(ccx, item.id);
trans_fn(ccx,
vec::append(*path, ~[path_name(item.ident)]),
- decl, body, llfndecl, no_self, None, item.id);
+ decl, body, llfndecl, no_self, None, item.id, None);
} else {
for vec::each(body.node.stmts) |stmt| {
match stmt.node {
}
}
}
- ast::item_impl(tps, trait_refs, self_ast_ty, ms) => {
- meth::trans_impl(ccx, *path, item.ident, ms, tps, None);
-
- // Translate any methods that have provided implementations.
- for trait_refs.each |trait_ref_ptr| {
- let trait_def = ccx.tcx.def_map.get(trait_ref_ptr.ref_id);
-
- // XXX: Cross-crate default methods.
- let trait_id = def_id_of_def(trait_def);
- if trait_id.crate != ast::local_crate {
- loop;
- }
-
- // Get the self type.
- let self_ty;
- match ccx.tcx.ast_ty_to_ty_cache.get(self_ast_ty) {
- ty::atttce_resolved(self_type) => self_ty = self_type,
- ty::atttce_unresolved => {
- ccx.tcx.sess.impossible_case(item.span,
- ~"didn't cache self ast ty");
- }
- }
-
- match ccx.tcx.items.get(trait_id.node) {
- ast_map::node_item(trait_item, _) => {
- match trait_item.node {
- ast::item_trait(tps, _, trait_methods) => {
- trans_trait(ccx, tps, trait_methods, path,
- item.ident, self_ty);
- }
- _ => {
- ccx.tcx.sess.impossible_case(item.span,
- ~"trait item not a \
- trait");
- }
- }
- }
- _ => {
- ccx.tcx.sess.impossible_case(item.span, ~"no trait item");
- }
- }
- }
+ ast::item_impl(tps, _, _, ms) => {
+ meth::trans_impl(ccx, *path, item.ident, ms, tps, None, item.id);
}
ast::item_mod(m) => {
trans_mod(ccx, m);
tps: ~[ast::ty_param], path: @ast_map::path,
ident: ast::ident, id: ast::node_id) {
if tps.len() == 0u {
- let psubsts = {tys: ty::ty_params_to_tys(ccx.tcx, tps),
- vtables: None,
- bounds: @~[]};
- do option::iter(&struct_def.ctor) |ctor| {
- trans_class_ctor(ccx, *path, ctor.node.dec, ctor.node.body,
- get_item_val(ccx, ctor.node.id), psubsts,
- ctor.node.id, local_def(id), ctor.span);
- }
do option::iter(&struct_def.dtor) |dtor| {
trans_class_dtor(ccx, *path, dtor.node.body,
dtor.node.id, None, None, local_def(id));
// If there are ty params, the ctor will get monomorphized
// Translate methods
- meth::trans_impl(ccx, *path, ident, struct_def.methods, tps, None);
-}
-
-fn trans_trait(ccx: @crate_ctxt, tps: ~[ast::ty_param],
- trait_methods: ~[ast::trait_method],
- path: @ast_map::path, ident: ast::ident,
- self_ty: ty::t) {
- // Translate any methods that have provided implementations
- let (_, provided_methods) = ast_util::split_trait_methods(trait_methods);
- meth::trans_impl(ccx, *path, ident, provided_methods, tps, Some(self_ty));
+ meth::trans_impl(ccx, *path, ident, struct_def.methods, tps, None, id);
}
// Translate a module. Doing this amounts to translating the items in the
// this to item_symbols
match substs {
Some(ss) => {
- let mono_ty = ty::subst_tps(ccx.tcx, ss.tys, t);
+ let mono_ty = ty::subst_tps(ccx.tcx, ss.tys, ss.self_ty, t);
mangle_exported_name(
ccx,
vec::append(path,
}
}
}
- ast_map::node_ctor(nm, _, ctor, _, pt) => {
- let my_path = vec::append(*pt, ~[path_name(nm)]);
- register_fn(ccx, ctor.span, my_path, ctor.node.id)
- }
ast_map::node_dtor(_, dt, parent_id, pt) => {
/*
Don't just call register_fn, since we don't want to add
}
-fn decl_crate_map(sess: session::session, mapmeta: link_meta,
+fn decl_crate_map(sess: session::Session, mapmeta: link_meta,
llmod: ModuleRef) -> ValueRef {
let targ_cfg = sess.targ_cfg;
let int_type = T_int(targ_cfg);
false);
}
-fn trans_crate(sess: session::session,
+fn trans_crate(sess: session::Session,
crate: @ast::crate,
tcx: ty::ctxt,
output: &Path,
crate_map: crate_map,
mut uses_gc: false,
dbg_cx: dbg_cx,
- class_ctors: HashMap(),
mut do_not_commit_warning_issued: false};
use lib::llvm::{ValueRef, TypeRef, BasicBlockRef, BuilderRef, ModuleRef};
use lib::llvm::{Opcode, IntPredicate, RealPredicate, True, False,
CallConv, TypeKind, AtomicBinOp, AtomicOrdering};
-use driver::session::session;
use common::*;
fn B(cx: block) -> BuilderRef {
//
// XXX: Use a small-vector optimization to avoid allocations here.
fn GEPi(cx: block, base: ValueRef, ixs: &[uint]) -> ValueRef {
- let mut v: ~[ValueRef] = ~[];
- for vec::each(ixs) |i| { v.push(C_i32(*i as i32)); }
+ let v = do vec::map(ixs) |i| { C_i32(*i as i32) };
count_insn(cx, "gepi");
return InBoundsGEP(cx, base, v);
}
fn trans_def(bcx: block, def: ast::def, ref_expr: @ast::expr) -> Callee {
match def {
- ast::def_fn(did, _) => {
+ ast::def_fn(did, _) | ast::def_static_method(did, None, _) => {
fn_callee(bcx, trans_fn_ref(bcx, did, ref_expr.id))
}
- ast::def_static_method(did, _) => {
- fn_callee(bcx, meth::trans_static_method_callee(bcx, did,
+ ast::def_static_method(impl_did, Some(trait_did), _) => {
+ fn_callee(bcx, meth::trans_static_method_callee(bcx, impl_did,
+ trait_did,
ref_expr.id))
}
ast::def_variant(tid, vid) => {
// Polytype of the function item (may have type params)
let fn_tpt = ty::lookup_item_type(tcx, def_id);
+ // Modify the def_id if this is a default method; we want to be
+ // monomorphizing the trait's code.
+ let (def_id, opt_impl_did) =
+ match tcx.provided_method_sources.find(def_id) {
+ None => (def_id, None),
+ Some(source) => (source.method_id, Some(source.impl_id))
+ };
+
// Check whether this fn has an inlined copy and, if so, redirect
// def_id to the local id of the inlined copy.
let def_id = {
if def_id.crate != ast::local_crate {
- inline::maybe_instantiate_inline(ccx, def_id)
+ let may_translate = opt_impl_did.is_none();
+ inline::maybe_instantiate_inline(ccx, def_id, may_translate)
} else {
def_id
}
};
- // We must monomorphise if the fn has type parameters or is a rust
- // intrinsic. In particular, if we see an intrinsic that is
- // inlined from a different crate, we want to reemit the intrinsic
- // instead of trying to call it in the other crate.
- let must_monomorphise = type_params.len() > 0 || {
+ // We must monomorphise if the fn has type parameters, is a rust
+ // intrinsic, or is a default method. In particular, if we see an
+ // intrinsic that is inlined from a different crate, we want to reemit the
+ // intrinsic instead of trying to call it in the other crate.
+ let must_monomorphise = type_params.len() > 0 ||
+ opt_impl_did.is_some() || {
if def_id.crate == ast::local_crate {
let map_node = session::expect(
ccx.sess,
let mut {val, must_cast} =
monomorphize::monomorphic_fn(ccx, def_id, type_params,
- vtables, Some(ref_id));
+ vtables, opt_impl_did, Some(ref_id));
if must_cast && ref_id != 0 {
// Monotype of the REFERENCE to the function (type params
// are subst'd)
match callee.data {
Fn(fn_data) => {
let substituted = ty::subst_tps(callee.bcx.tcx(),
- type_params, fty);
+ type_params,
+ None,
+ fty);
let mut llfnty = type_of::type_of(callee.bcx.ccx(),
substituted);
llfnty = T_ptr(struct_elt(llfnty, 0));
fn build_closure(bcx0: block,
cap_vars: ~[capture::capture_var],
ck: ty::closure_kind,
- id: ast::node_id,
include_ret_handle: Option<ValueRef>) -> closure_result {
let _icx = bcx0.insn_ctxt("closure::build_closure");
// If we need to, package up the iterator body to call
let mut env_vals = ~[];
for vec::each(cap_vars) |cap_var| {
debug!("Building closure: captured variable %?", *cap_var);
- let datum = expr::trans_local_var(bcx, id, cap_var.def);
+ let datum = expr::trans_local_var(bcx, cap_var.def);
match cap_var.mode {
capture::cap_ref => {
assert ck == ty::ck_block;
let cap_vars = capture::compute_capture_vars(ccx.tcx, id, proto,
cap_clause);
let ret_handle = match is_loop_body { Some(x) => x, None => None };
- let {llbox, cdata_ty, bcx} = build_closure(bcx, cap_vars, ck, id,
+ let {llbox, cdata_ty, bcx} = build_closure(bcx, cap_vars, ck,
ret_handle);
trans_closure(ccx, sub_path, decl, body, llfn, no_self,
- bcx.fcx.param_substs, id, |fcx| {
+ bcx.fcx.param_substs, id, None, |fcx| {
load_environment(fcx, cdata_ty, cap_vars,
ret_handle.is_some(), ck);
}, |bcx| {
}
ty::proto_bare => {
trans_closure(ccx, sub_path, decl, body, llfn, no_self, None,
- id, |_fcx| { }, |_bcx| { });
+ id, None, |_fcx| { }, |_bcx| { });
rslt(bcx, C_null(T_opaque_box_ptr(ccx)))
}
ty::proto_vstore(ty::vstore_fixed(_)) => {
use std::map::{HashMap,Set};
use syntax::{ast, ast_map};
use driver::session;
-use session::session;
+use session::Session;
use middle::ty;
use back::{link, abi, upcall};
use syntax::codemap::span;
// Crate context. Every crate we compile has one of these.
type crate_ctxt = {
- sess: session::session,
+ sess: session::Session,
llmod: ModuleRef,
td: target_data,
tn: type_names,
// is not emitted by LLVM's GC pass when no functions use GC.
mut uses_gc: bool,
dbg_cx: Option<debuginfo::debug_ctxt>,
- // Mapping from class constructors to parent class --
- // used in base::trans_closure
- // parent_class must be a def_id because ctors can be
- // inlined, so the parent may be in a different crate
- class_ctors: HashMap<ast::node_id, ast::def_id>,
mut do_not_commit_warning_issued: bool};
// Types used for llself.
enum local_val { local_mem(ValueRef), local_imm(ValueRef), }
+// Here `self_ty` is the real type of the self parameter to this method. It
+// will only be set in the case of default methods.
type param_substs = {tys: ~[ty::t],
vtables: Option<typeck::vtable_res>,
- bounds: @~[ty::param_bounds]};
+ bounds: @~[ty::param_bounds],
+ self_ty: Option<ty::t>};
fn param_substs_to_str(tcx: ty::ctxt, substs: ¶m_substs) -> ~str {
fmt!("param_substs {tys:%?, vtables:%?, bounds:%?}",
mut llreturn: BasicBlockRef,
// The 'self' value currently in use in this function, if there
// is one.
+ //
+ // NB: This is the type of the self *variable*, not the self *type*. The
+ // self type is set only for default methods, while the self variable is
+ // set for all methods.
mut llself: Option<ValSelfData>,
// The a value alloca'd for calls to upcalls.rust_personality. Used when
// outputting the resume instruction.
// a user-defined function.
id: ast::node_id,
+ // The def_id of the impl we're inside, or None if we aren't inside one.
+ impl_id: Option<ast::def_id>,
+
// If this function is being monomorphized, this contains the type
// substitutions used.
param_substs: Option<param_substs>,
type scope_info = {
loop_break: Option<block>,
+ loop_label: Option<ident>,
// A list of functions that must be run at when leaving this
// block, cleaning up any variables that were introduced in the
// block.
terminated: false,
unreachable: false,
parent: parent,
- kind: kind,
+ kind: move kind,
is_lpad: is_lpad,
node_info: node_info,
fcx: fcx
impl block {
pure fn ccx() -> @crate_ctxt { self.fcx.ccx }
pure fn tcx() -> ty::ctxt { self.fcx.ccx.tcx }
- pure fn sess() -> session { self.fcx.ccx.sess }
+ pure fn sess() -> Session { self.fcx.ccx.sess }
fn node_id_to_str(id: ast::node_id) -> ~str {
ast_map::node_id_to_str(self.tcx().items, id, self.sess().intr())
datum::DatumMode),
}
-type mono_id_ = {def: ast::def_id, params: ~[mono_param_id]};
+type mono_id_ = {
+ def: ast::def_id,
+ params: ~[mono_param_id],
+ impl_did_opt: Option<ast::def_id>
+};
type mono_id = @mono_id_;
return build::And(cx, bumped, build::Not(cx, mask));
}
-fn path_str(sess: session::session, p: path) -> ~str {
+fn path_str(sess: session::Session, p: path) -> ~str {
let mut r = ~"", first = true;
for vec::each(p) |e| {
match *e {
fn monomorphize_type(bcx: block, t: ty::t) -> ty::t {
match bcx.fcx.param_substs {
- Some(substs) => ty::subst_tps(bcx.tcx(), substs.tys, t),
+ Some(substs) => {
+ ty::subst_tps(bcx.tcx(), substs.tys, substs.self_ty, t)
+ }
_ => { assert !ty::type_has_params(t); t }
}
}
let params = ty::node_id_to_type_params(tcx, id);
match bcx.fcx.param_substs {
Some(substs) => {
- vec::map(params, |t| ty::subst_tps(tcx, substs.tys, *t))
+ do vec::map(params) |t| {
+ ty::subst_tps(tcx, substs.tys, substs.self_ty, *t)
+ }
}
_ => params
}
typeck::vtable_static(trait_id, tys, sub) => {
let tys = match fcx.param_substs {
Some(substs) => {
- vec::map(tys, |t| ty::subst_tps(tcx, substs.tys, *t))
+ do vec::map(tys) |t| {
+ ty::subst_tps(tcx, substs.tys, substs.self_ty, *t)
+ }
}
_ => tys
};
tps: tps}
}
-fn struct_field(index: uint) -> [uint]/3 {
+fn struct_field(index: uint) -> [uint * 3] {
//! The GEPi sequence to access a field of a record/struct.
[0, 0, index]
}
-fn struct_dtor() -> [uint]/2 {
+fn struct_dtor() -> [uint * 2] {
//! The GEPi sequence to access the dtor of a struct.
[0, 1]
// | body_bcx_out --+
// next_bcx
- let loop_bcx = loop_scope_block(bcx, next_bcx, ~"`while`", body.info());
+ let loop_bcx = loop_scope_block(bcx, next_bcx, None, ~"`while`",
+ body.info());
let cond_bcx_in = scope_block(loop_bcx, cond.info(), ~"while loop cond");
let body_bcx_in = scope_block(loop_bcx, body.info(), ~"while loop body");
Br(bcx, loop_bcx.llbb);
return next_bcx;
}
-fn trans_loop(bcx:block, body: ast::blk) -> block {
+fn trans_loop(bcx:block, body: ast::blk, opt_label: Option<ident>) -> block {
let _icx = bcx.insn_ctxt("trans_loop");
let next_bcx = sub_block(bcx, ~"next");
- let body_bcx_in = loop_scope_block(bcx, next_bcx, ~"`loop`", body.info());
+ let body_bcx_in = loop_scope_block(bcx, next_bcx, opt_label, ~"`loop`",
+ body.info());
Br(bcx, body_bcx_in.llbb);
let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore);
cleanup_and_Br(body_bcx_out, body_bcx_in, body_bcx_in.llbb);
}
}
-fn trans_break_cont(bcx: block, to_end: bool)
+fn trans_break_cont(bcx: block, opt_label: Option<ident>, to_end: bool)
-> block {
let _icx = bcx.insn_ctxt("trans_break_cont");
// Locate closest loop block, outputting cleanup as we go.
let mut target;
loop {
match unwind.kind {
- block_scope({loop_break: Some(brk), _}) => {
- target = if to_end {
- brk
- } else {
- unwind
- };
- break;
+ block_scope({loop_break: Some(brk), loop_label: l, _}) => {
+ // If we're looking for a labeled loop, check the label...
+ target = if to_end {
+ brk
+ } else {
+ unwind
+ };
+ match opt_label {
+ Some(desired) => match l {
+ Some(actual) if actual == desired => break,
+ // If it doesn't match the one we want,
+ // don't break
+ _ => ()
+ },
+ None => break
+ }
}
_ => ()
}
return bcx;
}
-fn trans_break(bcx: block) -> block {
- return trans_break_cont(bcx, true);
+fn trans_break(bcx: block, label_opt: Option<ident>) -> block {
+ return trans_break_cont(bcx, label_opt, true);
}
-fn trans_cont(bcx: block) -> block {
- return trans_break_cont(bcx, false);
+fn trans_cont(bcx: block, label_opt: Option<ident>) -> block {
+ return trans_break_cont(bcx, label_opt, false);
}
fn trans_ret(bcx: block, e: Option<@ast::expr>) -> block {
// This works like copy_val, except that it deinitializes the source.
// Since it needs to zero out the source, src also needs to be an lval.
//
- // FIXME (#839): We always zero out the source. Ideally we would
- // detect the case where a variable is always deinitialized by
- // block exit and thus doesn't need to be dropped.
fn move_to(bcx: block, action: CopyAction, dst: ValueRef) -> block {
let _icx = bcx.insn_ctxt("move_to");
let mut bcx = bcx;
use syntax::{ast, codemap, ast_util, ast_map};
use syntax::parse::token::ident_interner;
use codemap::span;
-use ast::ty;
+use ast::Ty;
use pat_util::*;
use util::ppaux::ty_to_str;
use driver::session::session;
return mdval;
}
-fn line_from_span(cm: codemap::codemap, sp: span) -> uint {
+fn line_from_span(cm: codemap::CodeMap, sp: span) -> uint {
codemap::lookup_char_pos(cm, sp.lo).line
}
}
fn create_vec(cx: @crate_ctxt, vec_t: ty::t, elem_t: ty::t,
- vec_ty_span: codemap::span, elem_ty: @ast::ty)
+ vec_ty_span: codemap::span, elem_ty: @ast::Ty)
-> @metadata<tydesc_md> {
let fname = filename_from_span(cx, vec_ty_span);
let file_node = create_file(cx, fname);
return @{node: llnode, data: {hash: ty::type_id(vec_t)}};
}
-fn create_ty(_cx: @crate_ctxt, _t: ty::t, _ty: @ast::ty)
+fn create_ty(_cx: @crate_ctxt, _t: ty::t, _ty: @ast::Ty)
-> @metadata<tydesc_md> {
/*let cache = get_cache(cx);
match cached_metadata::<@metadata<tydesc_md>>(
ast_map::node_method(method, _, _) => {
(method.ident, method.decl.output, method.id)
}
- ast_map::node_ctor(nm, _, ctor, _, _) => {
- // FIXME: output type may be wrong (#2194)
- (nm, ctor.node.dec.output, ctor.node.id)
- }
ast_map::node_expr(expr) => {
match expr.node {
ast::expr_fn(_, decl, _, _) => {
match expr.node {
ast::expr_break(label_opt) => {
- if label_opt.is_some() {
- bcx.tcx().sess.span_unimpl(expr.span, ~"labeled break");
- }
- return controlflow::trans_break(bcx);
+ return controlflow::trans_break(bcx, label_opt);
}
ast::expr_again(label_opt) => {
- if label_opt.is_some() {
- bcx.tcx().sess.span_unimpl(expr.span, ~"labeled again");
- }
- return controlflow::trans_cont(bcx);
+ return controlflow::trans_cont(bcx, label_opt);
}
ast::expr_ret(ex) => {
return controlflow::trans_ret(bcx, ex);
ast::expr_while(cond, body) => {
return controlflow::trans_while(bcx, cond, body);
}
- ast::expr_loop(body, _) => {
- return controlflow::trans_loop(bcx, body);
+ ast::expr_loop(body, opt_label) => {
+ return controlflow::trans_loop(bcx, body, opt_label);
}
ast::expr_assign(dst, src) => {
let src_datum = unpack_datum!(bcx, trans_to_datum(bcx, src));
};
match def {
- ast::def_fn(did, _) => {
+ ast::def_fn(did, _) | ast::def_static_method(did, None, _) => {
let fn_data = callee::trans_fn_ref(bcx, did, ref_expr.id);
return fn_data_to_datum(bcx, did, fn_data, lldest);
}
- ast::def_static_method(did, _) => {
- let fn_data = meth::trans_static_method_callee(bcx, did,
+ ast::def_static_method(impl_did, Some(trait_did), _) => {
+ let fn_data = meth::trans_static_method_callee(bcx, impl_did,
+ trait_did,
ref_expr.id);
- return fn_data_to_datum(bcx, did, fn_data, lldest);
+ return fn_data_to_datum(bcx, impl_did, fn_data, lldest);
}
ast::def_variant(tid, vid) => {
if ty::enum_variant_with_id(ccx.tcx, tid, vid).args.len() > 0u {
_ => {
DatumBlock {
bcx: bcx,
- datum: trans_local_var(bcx, ref_expr.id, def)
+ datum: trans_local_var(bcx, def)
}
}
}
}
-fn trans_local_var(bcx: block, ref_id: ast::node_id, def: ast::def) -> Datum {
+fn trans_local_var(bcx: block, def: ast::def) -> Datum {
let _icx = bcx.insn_ctxt("trans_local_var");
return match def {
}
}
ast::def_arg(nid, _) => {
- take_local(bcx, ref_id, bcx.fcx.llargs, nid)
+ take_local(bcx, bcx.fcx.llargs, nid)
}
ast::def_local(nid, _) | ast::def_binding(nid, _) => {
- take_local(bcx, ref_id, bcx.fcx.lllocals, nid)
+ take_local(bcx, bcx.fcx.lllocals, nid)
}
ast::def_self(nid) => {
let self_info: ValSelfData = match bcx.fcx.llself {
// This cast should not be necessary. We should cast self *once*,
// but right now this conflicts with default methods.
- let llselfty = T_ptr(type_of::type_of(bcx.ccx(), self_info.t));
+ let real_self_ty = monomorphize_type(bcx, self_info.t);
+ let llselfty = T_ptr(type_of::type_of(bcx.ccx(), real_self_ty));
+
let casted_val = PointerCast(bcx, self_info.v, llselfty);
Datum {
val: casted_val,
};
fn take_local(bcx: block,
- ref_id: ast::node_id,
table: HashMap<ast::node_id, local_val>,
nid: ast::node_id) -> Datum {
- let is_last_use = match bcx.ccx().maps.last_use_map.find(ref_id) {
- None => false,
- Some(vars) => (*vars).contains(&nid)
- };
-
- let source = if is_last_use {FromLastUseLvalue} else {FromLvalue};
let (v, mode) = match table.find(nid) {
Some(local_mem(v)) => (v, ByRef),
};
let ty = node_id_type(bcx, nid);
- debug!("take_local(nid=%?, last_use=%b, v=%s, mode=%?, ty=%s)",
- nid, is_last_use, bcx.val_str(v), mode, bcx.ty_to_str(ty));
+ debug!("take_local(nid=%?, v=%s, mode=%?, ty=%s)",
+ nid, bcx.val_str(v), mode, bcx.ty_to_str(ty));
- Datum { val: v, ty: ty, mode: mode, source: source }
+ Datum { val: v, ty: ty, mode: mode, source: FromLvalue }
}
}
// A user-defined operator method
if bcx.ccx().maps.method_map.find(expr.id).is_some() {
- // FIXME(#2582) evaluates the receiver twice!!
+ // FIXME(#2528) evaluates the receiver twice!!
let scratch = scratch_datum(bcx, dst_datum.ty, false);
let bcx = trans_overloaded_op(bcx, expr, dst, ~[src],
SaveIn(scratch.val), DoAutorefArg);
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
-use driver::session::{session, arch_x86_64};
+use driver::session::arch_x86_64;
use syntax::codemap::span;
use libc::c_uint;
use syntax::{attr, ast_map};
Float => 4,
Double => 8,
Struct => {
- do vec::foldl(0, struct_tys(ty)) |s, t| {
- s + ty_size(*t)
- }
+ let size = do vec::foldl(0, struct_tys(ty)) |s, t| {
+ align(s, *t) + ty_size(*t)
+ };
+ align(size, ty)
}
Array => {
let len = llvm::LLVMGetArrayLength(ty) as uint;
{
debug!("trans_intrinsic(item.ident=%s)", ccx.sess.str_of(item.ident));
- let fcx = new_fn_ctxt_w_id(ccx, path, decl, item.id,
+ let fcx = new_fn_ctxt_w_id(ccx, path, decl, item.id, None,
Some(substs), Some(item.span));
let mut bcx = top_scope_block(fcx, None), lltop = bcx.llbb;
match ccx.sess.str_of(item.ident) {
let static_ti = get_tydesc(ccx, tp_ty);
glue::lazily_emit_all_tydesc_glue(ccx, static_ti);
- // FIXME (#2712): change this to T_ptr(ccx.tydesc_ty) when the
+ // FIXME (#3727): change this to T_ptr(ccx.tydesc_ty) when the
// core::sys copy of the get_tydesc interface dies off.
let td = PointerCast(bcx, static_ti.tydesc, T_ptr(T_nil()));
Store(bcx, td, fcx.llretptr);
)));
let llty = type_of_fn_from_ty(ccx, t);
let llfndecl = decl_internal_cdecl_fn(ccx.llmod, ps, llty);
- trans_fn(ccx, path, decl, body, llfndecl, no_self, None, id);
+ trans_fn(ccx, path, decl, body, llfndecl, no_self, None, id, None);
return llfndecl;
}
// Index tydesc by addrspace.
if ti.addrspace > gc_box_addrspace {
let llty = T_ptr(ccx.tydesc_type);
- let addrspace_name = #fmt("_gc_addrspace_metadata_%u",
+ let addrspace_name = fmt!("_gc_addrspace_metadata_%u",
ti.addrspace as uint);
let addrspace_gvar = str::as_c_str(addrspace_name, |buf| {
llvm::LLVMAddGlobal(ccx.llmod, llty, buf)
use base::{trans_item, get_item_val, self_arg, trans_fn,
impl_self, get_insn_ctxt};
-fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id)
- -> ast::def_id
-{
+// `translate` will be true if this function is allowed to translate the
+// item and false otherwise. Currently, this parameter is set to false when
+// translating default methods.
+fn maybe_instantiate_inline(ccx: @crate_ctxt, fn_id: ast::def_id,
+ translate: bool)
+ -> ast::def_id {
let _icx = ccx.insn_ctxt("maybe_instantiate_inline");
match ccx.external.find(fn_id) {
Some(Some(node_id)) => {
csearch::found(ast::ii_item(item)) => {
ccx.external.insert(fn_id, Some(item.id));
ccx.stats.n_inlines += 1;
- trans_item(ccx, *item);
+ if translate { trans_item(ccx, *item); }
local_def(item.id)
}
- csearch::found(ast::ii_ctor(ctor, _, _, _)) => {
- ccx.external.insert(fn_id, Some(ctor.node.id));
- local_def(ctor.node.id)
- }
csearch::found(ast::ii_foreign(item)) => {
ccx.external.insert(fn_id, Some(item.id));
local_def(item.id)
_ => ccx.sess.bug(~"maybe_instantiate_inline: item has a \
non-enum parent")
}
- trans_item(ccx, *item);
+ if translate { trans_item(ccx, *item); }
local_def(my_id)
}
csearch::found_parent(_, _) => {
ccx.external.insert(fn_id, Some(mth.id));
let {bounds: impl_bnds, region_param: _, ty: impl_ty} =
ty::lookup_item_type(ccx.tcx, impl_did);
- if (*impl_bnds).len() + mth.tps.len() == 0u {
+ if translate && (*impl_bnds).len() + mth.tps.len() == 0u {
let llfn = get_item_val(ccx, mth.id);
let path = vec::append(
ty::item_path(ccx.tcx, impl_did),
~[path_name(mth.ident)]);
trans_fn(ccx, path, mth.decl, mth.body,
- llfn, impl_self(impl_ty), None, mth.id);
+ llfn, impl_self(impl_ty), None, mth.id,
+ Some(impl_did));
}
local_def(mth.id)
}
}
}
}
+
*/
fn trans_impl(ccx: @crate_ctxt, path: path, name: ast::ident,
methods: ~[@ast::method], tps: ~[ast::ty_param],
- self_ty: Option<ty::t>) {
+ self_ty: Option<ty::t>, id: ast::node_id) {
let _icx = ccx.insn_ctxt("impl::trans_impl");
if tps.len() > 0u { return; }
let sub_path = vec::append_one(path, path_name(name));
if method.tps.len() == 0u {
let llfn = get_item_val(ccx, method.id);
let path = vec::append_one(sub_path, path_name(method.ident));
- trans_method(ccx, path, *method, None, self_ty, llfn);
+
+ let param_substs_opt;
+ match self_ty {
+ None => param_substs_opt = None,
+ Some(self_ty) => {
+ param_substs_opt = Some({
+ tys: ~[],
+ vtables: None,
+ bounds: @~[],
+ self_ty: Some(self_ty)
+ });
+ }
+ }
+
+ trans_method(ccx, path, *method, param_substs_opt, self_ty, llfn,
+ ast_util::local_def(id));
}
}
}
will be none if this is not a default method and must always be present
if this is a default method.
- `llfn`: the LLVM ValueRef for the method
+- `impl_id`: the node ID of the impl this method is inside
*/
fn trans_method(ccx: @crate_ctxt,
path: path,
method: &ast::method,
param_substs: Option<param_substs>,
base_self_ty: Option<ty::t>,
- llfn: ValueRef) {
+ llfn: ValueRef,
+ impl_id: ast::def_id) {
// figure out how self is being passed
let self_arg = match method.self_ty.node {
Some(provided_self_ty) => self_ty = provided_self_ty
}
let self_ty = match param_substs {
- None => self_ty,
- Some({tys: ref tys, _}) => ty::subst_tps(ccx.tcx, *tys, self_ty)
+ None => self_ty,
+ Some({tys: ref tys, _}) => {
+ ty::subst_tps(ccx.tcx, *tys, None, self_ty)
+ }
};
match method.self_ty.node {
ast::sty_value => {
llfn,
self_arg,
param_substs,
- method.id);
+ method.id,
+ Some(impl_id));
}
-fn trans_self_arg(bcx: block, base: @ast::expr,
+fn trans_self_arg(bcx: block,
+ base: @ast::expr,
mentry: typeck::method_map_entry) -> Result {
let _icx = bcx.insn_ctxt("impl::trans_self_arg");
let mut temp_cleanups = ~[];
+
+ // Compute the mode and type of self.
let self_arg = {mode: mentry.self_arg.mode,
ty: monomorphize_type(bcx, mentry.self_arg.ty)};
+
let result = trans_arg_expr(bcx, self_arg, base,
&mut temp_cleanups, None, DontAutorefArg);
}
fn trans_method_callee(bcx: block, callee_id: ast::node_id,
- self: @ast::expr, mentry: typeck::method_map_entry)
- -> Callee
-{
+ self: @ast::expr, mentry: typeck::method_map_entry) ->
+ Callee {
let _icx = bcx.insn_ctxt("impl::trans_method_callee");
- match mentry.origin {
+
+ // Replace method_self with method_static here.
+ let mut origin = mentry.origin;
+ match origin {
+ typeck::method_self(copy trait_id, copy method_index) => {
+ // Get the ID of the impl we're inside.
+ let impl_def_id = bcx.fcx.impl_id.get();
+
+ debug!("impl_def_id is %?", impl_def_id);
+
+ // Get the ID of the method we're calling.
+ let method_name =
+ ty::trait_methods(bcx.tcx(), trait_id)[method_index].ident;
+ let method_id = method_with_name(bcx.ccx(), impl_def_id,
+ method_name);
+ origin = typeck::method_static(method_id);
+ }
+ typeck::method_static(*) | typeck::method_param(*) |
+ typeck::method_trait(*) => {}
+ }
+
+ match origin {
typeck::method_static(did) => {
let callee_fn = callee::trans_fn_ref(bcx, did, callee_id);
let Result {bcx, val} = trans_self_arg(bcx, self, mentry);
trans_trait_callee(bcx, callee_id, off, self, vstore)
}
typeck::method_self(*) => {
- bcx.tcx().sess.span_bug(self.span, ~"self method call");
+ fail ~"method_self should have been handled above"
}
}
}
fn trans_static_method_callee(bcx: block,
method_id: ast::def_id,
+ trait_id: ast::def_id,
callee_id: ast::node_id) -> FnData
{
let _icx = bcx.insn_ctxt("impl::trans_static_method_callee");
let ccx = bcx.ccx();
+ debug!("trans_static_method_callee(method_id=%?, trait_id=%s, \
+ callee_id=%?)",
+ method_id,
+ ty::item_path_str(bcx.tcx(), trait_id),
+ callee_id);
+ let _indenter = indenter();
+
+ // When we translate a static fn defined in a trait like:
+ //
+ // trait<T1...Tn> Trait {
+ // static fn foo<M1...Mn>(...) {...}
+ // }
+ //
+ // this winds up being translated as something like:
+ //
+ // fn foo<T1...Tn,self: Trait<T1...Tn>,M1...Mn>(...) {...}
+ //
+ // So when we see a call to this function foo, we have to figure
+ // out which impl the `Trait<T1...Tn>` bound on the type `self` was
+ // bound to. Due to the fact that we use a flattened list of
+ // impls, one per bound, this means we have to total up the bounds
+ // found on the type parametesr T1...Tn to find the index of the
+ // one we are interested in.
+ let bound_index = {
+ let trait_polyty = ty::lookup_item_type(bcx.tcx(), trait_id);
+ let mut index = 0;
+ for trait_polyty.bounds.each |param_bounds| {
+ for param_bounds.each |param_bound| {
+ match *param_bound {
+ ty::bound_trait(_) => { index += 1; }
+ ty::bound_copy | ty::bound_owned |
+ ty::bound_send | ty::bound_const => {}
+ }
+ }
+ }
+ index
+ };
+
let mname = if method_id.crate == ast::local_crate {
match bcx.tcx().items.get(method_id.node) {
ast_map::node_trait_method(trait_method, _, _) => {
let vtbls = resolve_vtables_in_fn_ctxt(
bcx.fcx, ccx.maps.vtable_map.get(callee_id));
- // FIXME(#3446) -- I am pretty sure index 0 is not the right one,
- // if the static method is implemented on a generic type. (NDM)
- match vtbls[0] {
+ match vtbls[bound_index] {
typeck::vtable_static(impl_did, rcvr_substs, rcvr_origins) => {
let mth_id = method_with_name(bcx.ccx(), impl_did, mname);
fn method_from_methods(ms: ~[@ast::method], name: ast::ident)
-> ast::def_id {
- local_def(option::get(&vec::find(ms, |m| m.ident == name)).id)
+ local_def(option::get(vec::find(ms, |m| m.ident == name)).id)
}
fn method_with_name(ccx: @crate_ctxt, impl_id: ast::def_id,
match origin {
typeck::vtable_static(impl_id, substs, sub_vtables) => {
monomorphize::make_mono_id(
- ccx, impl_id, substs,
- if (*sub_vtables).len() == 0u { None }
- else { Some(sub_vtables) }, None)
+ ccx,
+ impl_id,
+ substs,
+ if (*sub_vtables).len() == 0u {
+ None
+ } else {
+ Some(sub_vtables)
+ },
+ None,
+ None)
}
typeck::vtable_trait(trait_id, substs) => {
@{def: trait_id,
- params: vec::map(substs, |t| mono_precise(*t, None))}
+ params: vec::map(substs, |t| mono_precise(*t, None)),
+ impl_did_opt: None}
}
// can't this be checked at the callee?
_ => fail ~"vtable_id"
let has_tps = (*ty::lookup_item_type(ccx.tcx, impl_id).bounds).len() > 0u;
make_vtable(ccx, vec::map(*ty::trait_methods(tcx, trt_id), |im| {
- let fty = ty::subst_tps(tcx, substs, ty::mk_fn(tcx, im.fty));
+ let fty = ty::subst_tps(tcx, substs, None, ty::mk_fn(tcx, im.fty));
if (*im.tps).len() > 0u || ty::type_has_self(fty) {
C_null(T_ptr(T_nil()))
} else {
// If the method is in another crate, need to make an inlined
// copy first
if m_id.crate != ast::local_crate {
- m_id = inline::maybe_instantiate_inline(ccx, m_id);
+ // XXX: Set impl ID here?
+ m_id = inline::maybe_instantiate_inline(ccx, m_id, true);
}
monomorphize::monomorphic_fn(ccx, m_id, substs,
- Some(vtables), None).val
+ Some(vtables), None, None).val
} else if m_id.crate == ast::local_crate {
get_item_val(ccx, m_id.node)
} else {
use base::{trans_item, get_item_val, no_self, self_arg, trans_fn,
impl_self, decl_internal_cdecl_fn,
set_inline_hint_if_appr, set_inline_hint,
- trans_enum_variant, trans_class_ctor, trans_class_dtor,
+ trans_enum_variant, trans_class_dtor,
get_insn_ctxt};
use syntax::parse::token::special_idents;
use type_of::type_of_fn_from_ty;
fn_id: ast::def_id,
real_substs: ~[ty::t],
vtables: Option<typeck::vtable_res>,
- ref_id: Option<ast::node_id>)
- -> {val: ValueRef, must_cast: bool}
-{
+ impl_did_opt: Option<ast::def_id>,
+ ref_id: Option<ast::node_id>) ->
+ {val: ValueRef, must_cast: bool} {
let _icx = ccx.insn_ctxt("monomorphic_fn");
let mut must_cast = false;
let substs = vec::map(real_substs, |t| {
for real_substs.each() |s| { assert !ty::type_has_params(*s); }
for substs.each() |s| { assert !ty::type_has_params(*s); }
let param_uses = type_use::type_uses_for(ccx, fn_id, substs.len());
- let hash_id = make_mono_id(ccx, fn_id, substs, vtables, Some(param_uses));
+ let hash_id = make_mono_id(ccx, fn_id, substs, vtables, impl_did_opt,
+ Some(param_uses));
if vec::any(hash_id.params,
|p| match *p { mono_precise(_, _) => false, _ => true }) {
must_cast = true;
}
- #debug["monomorphic_fn(fn_id=%? (%s), real_substs=%?, substs=%?, \
+ debug!("monomorphic_fn(fn_id=%? (%s), real_substs=%?, substs=%?, \
hash_id = %?",
fn_id, ty::item_path_str(ccx.tcx, fn_id),
real_substs.map(|s| ty_to_str(ccx.tcx, *s)),
- substs.map(|s| ty_to_str(ccx.tcx, *s)), hash_id];
+ substs.map(|s| ty_to_str(ccx.tcx, *s)), hash_id);
match ccx.monomorphized.find(hash_id) {
Some(val) => {
return {val: get_item_val(ccx, fn_id.node),
must_cast: true};
}
- ast_map::node_ctor(nm, _, ct, _, pt) => (pt, nm, ct.span),
ast_map::node_dtor(_, dtor, _, pt) =>
(pt, special_idents::dtor, dtor.span),
- ast_map::node_trait_method(*) => {
- ccx.tcx.sess.bug(~"Can't monomorphize a trait method")
+ ast_map::node_trait_method(@ast::provided(m), _, pt) => {
+ (pt, m.ident, m.span)
+ }
+ ast_map::node_trait_method(@ast::required(_), _, _) => {
+ ccx.tcx.sess.bug(~"Can't monomorphize a required trait method")
}
ast_map::node_expr(*) => {
ccx.tcx.sess.bug(~"Can't monomorphize an expr")
ccx.tcx.sess.bug(~"Can't monomorphize a local")
}
};
- let mono_ty = ty::subst_tps(ccx.tcx, substs, llitem_ty);
+
+ // Look up the impl type if we're translating a default method.
+ // XXX: Generics.
+ let impl_ty_opt;
+ match impl_did_opt {
+ None => impl_ty_opt = None,
+ Some(impl_did) => {
+ impl_ty_opt = Some(ty::lookup_item_type(ccx.tcx, impl_did).ty);
+ }
+ }
+
+ let mono_ty = ty::subst_tps(ccx.tcx, substs, impl_ty_opt, llitem_ty);
let llfty = type_of_fn_from_ty(ccx, mono_ty);
ccx.stats.n_monos += 1;
- let depth = option::get_default(&ccx.monomorphizing.find(fn_id), 0u);
+ let depth = option::get_default(ccx.monomorphizing.find(fn_id), 0u);
// Random cut-off -- code that needs to instantiate the same function
// recursively more than ten times can probably safely be assumed to be
// causing an infinite expansion.
lldecl
};
- let psubsts = Some({tys: substs, vtables: vtables, bounds: tpt.bounds});
+ let psubsts = Some({
+ tys: substs,
+ vtables: vtables,
+ bounds: tpt.bounds,
+ self_ty: impl_ty_opt
+ });
+
let lldecl = match map_node {
ast_map::node_item(i@@{node: ast::item_fn(decl, _, _, body), _}, _) => {
let d = mk_lldecl();
set_inline_hint_if_appr(i.attrs, d);
- trans_fn(ccx, pt, decl, body, d, no_self, psubsts, fn_id.node);
+ trans_fn(ccx, pt, decl, body, d, no_self, psubsts, fn_id.node, None);
d
}
ast_map::node_item(*) => {
}
ast_map::node_variant(v, enum_item, _) => {
let tvs = ty::enum_variants(ccx.tcx, local_def(enum_item.id));
- let this_tv = option::get(&vec::find(*tvs, |tv| {
+ let this_tv = option::get(vec::find(*tvs, |tv| {
tv.id.node == fn_id.node}));
let d = mk_lldecl();
set_inline_hint(d);
}
d
}
- ast_map::node_method(mth, _, _) => {
+ ast_map::node_method(mth, supplied_impl_did, _) => {
// XXX: What should the self type be here?
let d = mk_lldecl();
set_inline_hint_if_appr(mth.attrs, d);
- meth::trans_method(ccx, pt, mth, psubsts, None, d);
- d
- }
- ast_map::node_ctor(_, tps, ctor, parent_id, _) => {
- // ctors don't have attrs, at least not right now
- let d = mk_lldecl();
- let tp_tys = ty::ty_params_to_tys(ccx.tcx, tps);
- trans_class_ctor(ccx, pt, ctor.node.dec, ctor.node.body, d,
- option::get_default(&psubsts,
- {tys:tp_tys, vtables: None, bounds: @~[]}),
- fn_id.node, parent_id, ctor.span);
+
+ // Override the impl def ID if necessary.
+ let impl_did;
+ match impl_did_opt {
+ None => impl_did = supplied_impl_did,
+ Some(override_impl_did) => impl_did = override_impl_did
+ }
+
+ meth::trans_method(ccx, pt, mth, psubsts, None, d, impl_did);
d
}
ast_map::node_dtor(_, dtor, _, pt) => {
trans_class_dtor(ccx, *pt, dtor.node.body,
dtor.node.id, psubsts, Some(hash_id), parent_id)
}
+ ast_map::node_trait_method(@ast::provided(mth), _, pt) => {
+ let d = mk_lldecl();
+ set_inline_hint_if_appr(mth.attrs, d);
+ debug!("monomorphic_fn impl_did_opt is %?", impl_did_opt);
+ meth::trans_method(ccx, *pt, mth, psubsts, None, d,
+ impl_did_opt.get());
+ d
+ }
+
// Ugh -- but this ensures any new variants won't be forgotten
ast_map::node_expr(*) |
ast_map::node_stmt(*) |
fn make_mono_id(ccx: @crate_ctxt, item: ast::def_id, substs: ~[ty::t],
vtables: Option<typeck::vtable_res>,
+ impl_did_opt: Option<ast::def_id>,
param_uses: Option<~[type_use::type_uses]>) -> mono_id {
let precise_param_ids = match vtables {
Some(vts) => {
})
}
};
- @{def: item, params: param_ids}
+ @{def: item, params: param_ids, impl_did_opt: impl_did_opt}
}
cx.rmap.insert(item.id, ());
}
ast_map::node_variant(v, _, _) => { cx.rmap.insert(v.node.id, ()); }
- // If it's a ctor, consider the parent reachable
- ast_map::node_ctor(_, _, _, parent_id, _) => {
- traverse_def_id(cx, parent_id);
- }
_ => ()
}
}
}
}
item_class(struct_def, tps) => {
- do option::iter(&struct_def.ctor) |ctor| {
- cx.rmap.insert(ctor.node.id, ());
- if tps.len() > 0u || attr::find_inline_attr(ctor.node.attrs)
- != attr::ia_none {
- traverse_inline_body(cx, ctor.node.body);
- }
- }
do option::iter(&struct_def.dtor) |dtor| {
cx.rmap.insert(dtor.node.id, ());
if tps.len() > 0u || attr::find_inline_attr(dtor.node.attrs)
visit::mk_vt(@{visit_ty: traverse_ty, ..*visit::default_visitor()})
}
-fn traverse_ty(ty: @ty, cx: ctx, v: visit::vt<ctx>) {
+fn traverse_ty(ty: @Ty, cx: ctx, v: visit::vt<ctx>) {
if cx.rmap.contains_key(ty.id) { return; }
cx.rmap.insert(ty.id, ());
use std::map::HashMap;
-use driver::session::session;
use lib::llvm::{TypeRef, ValueRef};
use syntax::ast;
use back::abi;
fn visit(ty_name: ~str, args: ~[ValueRef]) {
let tcx = self.bcx.tcx();
- let mth_idx = option::get(&ty::method_idx(
+ let mth_idx = option::get(ty::method_idx(
tcx.sess.ident_of(~"visit_" + ty_name),
*self.visitor_methods));
let mth_ty = ty::mk_fn(tcx, self.visitor_methods[mth_idx].fty);
use syntax::ast;
-use driver::session::session;
use lib::llvm::{ValueRef, TypeRef};
use back::abi;
use syntax::codemap::span;
{
//!
//
- // [...]/_ allocates a fixed-size array and moves it around "by value".
+ // [...] allocates a fixed-size array and moves it around "by value".
// In this case, it means that the caller has already given us a location
// to store the array of the suitable size, so all we have to do is
// generate the content.
let unit_ty = ty::sequence_element_type(bcx.tcx(), vec_ty);
// Calculate the last pointer address we want to handle.
- // FIXME (#2536): Optimize this when the size of the unit type is
+ // FIXME (#3729): Optimize this when the size of the unit type is
// statically known to not use pointer casts, which tend to confuse
// LLVM.
let data_end_ptr = pointer_add(bcx, data_ptr, fill);
use std::map::HashMap;
use std::list;
use std::list::{List, Cons, Nil};
-use driver::session::session;
use metadata::csearch;
use syntax::ast::*, syntax::ast_util, syntax::visit;
use syntax::ast_map;
Some(uses) => return uses,
None => ()
}
- let fn_id_loc = if fn_id.crate == local_crate { fn_id }
- else { inline::maybe_instantiate_inline(ccx, fn_id) };
+
+ let fn_id_loc = if fn_id.crate == local_crate {
+ fn_id
+ } else {
+ inline::maybe_instantiate_inline(ccx, fn_id, true)
+ };
+
// Conservatively assume full use for recursive loops
ccx.type_use_cache.insert(fn_id, vec::from_elem(n_tps, 3u));
for uint::range(0u, n_tps) |n| { cx.uses[n] |= flags;}
}
}
- ast_map::node_ctor(_, _, ctor, _, _) => {
- handle_body(cx, ctor.node.body);
- }
ast_map::node_dtor(_, dtor, _, _) => {
handle_body(cx, dtor.node.body);
}
use result::Result;
use std::map::HashMap;
use driver::session;
-use session::session;
+use session::Session;
use syntax::{ast, ast_map};
use syntax::ast_util;
use syntax::ast_util::{is_local, local_def};
use syntax::ast::*;
use syntax::print::pprust::*;
use util::ppaux::{ty_to_str, proto_ty_to_str, tys_to_str};
-use std::serialization::{serialize_Option,
- deserialize_Option,
- serialize_uint,
- deserialize_uint};
+export ProvidedMethodSource;
export TyVid, IntVid, FnVid, RegionVid, vid;
export br_hashmap;
export is_instantiable;
export InferTy, TyVar, IntVar;
export ty_self, mk_self, type_has_self;
export ty_class;
-export region, bound_region, encl_region;
+export Region, bound_region, encl_region;
export re_bound, re_free, re_scope, re_static, re_var;
export br_self, br_anon, br_named, br_cap_avoid;
export get, type_has_params, type_needs_infer, type_has_regions;
export ty_to_def_id;
export ty_fn_args;
export ty_region;
-export kind, kind_implicitly_copyable, kind_send_copy, kind_copyable;
+export Kind, kind_implicitly_copyable, kind_send_copy, kind_copyable;
export kind_noncopyable, kind_const;
export kind_can_be_copied, kind_can_be_sent, kind_can_be_implicitly_copied;
export kind_is_safe_for_default_mode;
export ty_sort_str;
export normalize_ty;
export to_str;
-export borrow, serialize_borrow, deserialize_borrow;
export bound_const;
export terr_no_integral_type, terr_ty_param_size, terr_self_substs;
export terr_in_field, terr_record_fields, terr_vstores_differ, terr_arg_count;
export method_call_bounds;
export hash_region;
export region_variance, rv_covariant, rv_invariant, rv_contravariant;
-export serialize_region_variance, deserialize_region_variance;
export opt_region_variance;
-export serialize_opt_region_variance, deserialize_opt_region_variance;
export determine_inherited_purity;
export provided_trait_methods;
-export AutoAdjustment, serialize_AutoAdjustment, deserialize_AutoAdjustment;
+export AutoAdjustment;
export AutoRef, AutoRefKind, AutoSlice, AutoPtr;
// Data types
tps: @~[param_bounds],
fty: FnTy,
self_ty: ast::self_ty_,
- vis: ast::visibility};
+ vis: ast::visibility,
+ def_id: ast::def_id};
type mt = {ty: t, mutbl: ast::mutability};
#[auto_serialize]
+#[auto_deserialize]
enum vstore {
vstore_fixed(uint),
vstore_uniq,
vstore_box,
- vstore_slice(region)
+ vstore_slice(Region)
}
type field_ty = {
atttce_resolved(t) /* resolved to a type, irrespective of region */
}
-#[auto_serialize]
type opt_region_variance = Option<region_variance>;
#[auto_serialize]
+#[auto_deserialize]
enum region_variance { rv_covariant, rv_invariant, rv_contravariant }
impl region_variance : cmp::Eq {
}
#[auto_serialize]
+#[auto_deserialize]
type AutoAdjustment = {
autoderefs: uint,
autoref: Option<AutoRef>
};
#[auto_serialize]
+#[auto_deserialize]
type AutoRef = {
kind: AutoRefKind,
- region: region,
+ region: Region,
mutbl: ast::mutability
};
#[auto_serialize]
+#[auto_deserialize]
enum AutoRefKind {
/// Convert from @[]/~[] to &[] (or str)
AutoSlice,
AutoPtr
}
+struct ProvidedMethodSource {
+ method_id: ast::def_id,
+ impl_id: ast::def_id
+}
+
type ctxt =
@{diag: syntax::diagnostic::span_handler,
interner: HashMap<intern_key, t_box>,
mut next_id: uint,
vecs_implicitly_copyable: bool,
legacy_modes: bool,
- cstore: metadata::cstore::cstore,
- sess: session::session,
+ cstore: metadata::cstore::CStore,
+ sess: session::Session,
def_map: resolve::DefMap,
region_map: middle::region::region_map,
short_names_cache: HashMap<t, @~str>,
needs_drop_cache: HashMap<t, bool>,
needs_unwind_cleanup_cache: HashMap<t, bool>,
- kind_cache: HashMap<t, kind>,
- ast_ty_to_ty_cache: HashMap<@ast::ty, ast_ty_to_ty_cache_entry>,
+ kind_cache: HashMap<t, Kind>,
+ ast_ty_to_ty_cache: HashMap<@ast::Ty, ast_ty_to_ty_cache_entry>,
enum_var_cache: HashMap<def_id, @~[variant_info]>,
trait_method_cache: HashMap<def_id, @~[method]>,
ty_param_bounds: HashMap<ast::node_id, param_bounds>,
adjustments: HashMap<ast::node_id, @AutoAdjustment>,
normalized_cache: HashMap<t, t>,
lang_items: middle::lang_items::LanguageItems,
- legacy_boxed_traits: HashMap<node_id, ()>};
+ legacy_boxed_traits: HashMap<node_id, ()>,
+ provided_method_sources: HashMap<ast::def_id, ProvidedMethodSource>};
enum tbox_flag {
has_params = 1,
/// Representation of regions:
#[auto_serialize]
-enum region {
+#[auto_deserialize]
+enum Region {
/// Bound regions are found (primarily) in function types. They indicate
/// region parameters that have yet to be replaced with actual regions
/// (analogous to type parameters, except that due to the monomorphic
}
#[auto_serialize]
+#[auto_deserialize]
enum bound_region {
/// The self region for classes, impls (&T in a type defn or &self/T)
br_self,
br_cap_avoid(ast::node_id, @bound_region),
}
-type opt_region = Option<region>;
+type opt_region = Option<Region>;
/**
* The type substs represents the kinds of things that can be substituted to
ty_uniq(mt),
ty_evec(mt, vstore),
ty_ptr(mt),
- ty_rptr(region, mt),
+ ty_rptr(Region, mt),
ty_rec(~[field]),
ty_fn(FnTy),
ty_trait(def_id, substs, vstore),
terr_record_fields(expected_found<ident>),
terr_arg_count,
terr_mode_mismatch(expected_found<mode>),
- terr_regions_does_not_outlive(region, region),
- terr_regions_not_same(region, region),
- terr_regions_no_overlap(region, region),
+ terr_regions_does_not_outlive(Region, Region),
+ terr_regions_not_same(Region, Region),
+ terr_regions_no_overlap(Region, Region),
terr_vstores_differ(terr_vstore_kind, expected_found<vstore>),
terr_in_field(@type_err, ast::ident),
terr_sorts(expected_found<t>),
enum IntVid = uint;
enum FnVid = uint;
#[auto_serialize]
+#[auto_deserialize]
enum RegionVid = uint;
enum InferTy {
}
}
-fn param_bounds_to_kind(bounds: param_bounds) -> kind {
+fn param_bounds_to_kind(bounds: param_bounds) -> Kind {
let mut kind = kind_noncopyable();
for vec::each(*bounds) |bound| {
match *bound {
map::HashMap()
}
-fn mk_ctxt(s: session::session,
+fn mk_ctxt(s: session::Session,
dm: resolve::DefMap,
amap: ast_map::map,
freevars: freevars::freevar_map,
adjustments: HashMap(),
normalized_cache: new_ty_hash(),
lang_items: move lang_items,
- legacy_boxed_traits: HashMap()}
+ legacy_boxed_traits: HashMap(),
+ provided_method_sources: HashMap()}
}
_ => ()
}
let mut flags = 0u;
- fn rflags(r: region) -> uint {
+ fn rflags(r: Region) -> uint {
(has_regions as uint) | {
match r {
ty::re_var(_) => needs_infer as uint,
fn mk_ptr(cx: ctxt, tm: mt) -> t { mk_t(cx, ty_ptr(tm)) }
-fn mk_rptr(cx: ctxt, r: region, tm: mt) -> t { mk_t(cx, ty_rptr(r, tm)) }
+fn mk_rptr(cx: ctxt, r: Region, tm: mt) -> t { mk_t(cx, ty_rptr(r, tm)) }
-fn mk_mut_rptr(cx: ctxt, r: region, ty: t) -> t {
+fn mk_mut_rptr(cx: ctxt, r: Region, ty: t) -> t {
mk_rptr(cx, r, {ty: ty, mutbl: ast::m_mutbl})
}
-fn mk_imm_rptr(cx: ctxt, r: region, ty: t) -> t {
+fn mk_imm_rptr(cx: ctxt, r: Region, ty: t) -> t {
mk_rptr(cx, r, {ty: ty, mutbl: ast::m_imm})
}
// Returns the narrowest lifetime enclosing the evaluation of the expression
// with id `id`.
-fn encl_region(cx: ctxt, id: ast::node_id) -> ty::region {
+fn encl_region(cx: ctxt, id: ast::node_id) -> ty::Region {
match cx.region_map.find(id) {
Some(encl_scope) => ty::re_scope(encl_scope),
None => ty::re_static
fn walk_regions_and_ty(
cx: ctxt,
ty: t,
- walkr: fn(r: region),
+ walkr: fn(r: Region),
walkt: fn(t: t) -> bool) {
if (walkt(ty)) {
fn fold_regions_and_ty(
cx: ctxt,
ty: t,
- fldr: fn(r: region) -> region,
+ fldr: fn(r: Region) -> Region,
fldfnt: fn(t: t) -> t,
fldt: fn(t: t) -> t) -> t {
fn fold_substs(
substs: &substs,
- fldr: fn(r: region) -> region,
+ fldr: fn(r: Region) -> Region,
fldt: fn(t: t) -> t) -> substs {
{self_r: substs.self_r.map(|r| fldr(*r)),
fn fold_regions(
cx: ctxt,
ty: t,
- fldr: fn(r: region, in_fn: bool) -> region) -> t {
+ fldr: fn(r: Region, in_fn: bool) -> Region) -> t {
fn do_fold(cx: ctxt, ty: t, in_fn: bool,
- fldr: fn(region, bool) -> region) -> t {
+ fldr: fn(Region, bool) -> Region) -> t {
if !type_has_regions(ty) { return ty; }
fold_regions_and_ty(
cx, ty,
do_fold(cx, ty, false, fldr)
}
-fn fold_region(cx: ctxt, t0: t, fldop: fn(region, bool) -> region) -> t {
+fn fold_region(cx: ctxt, t0: t, fldop: fn(Region, bool) -> Region) -> t {
fn do_fold(cx: ctxt, t0: t, under_r: bool,
- fldop: fn(region, bool) -> region) -> t {
+ fldop: fn(Region, bool) -> Region) -> t {
let tb = get(t0);
if !tbox_has_flag(tb, has_regions) { return t0; }
match tb.sty {
}
// Substitute *only* type parameters. Used in trans where regions are erased.
-fn subst_tps(cx: ctxt, tps: &[t], typ: t) -> t {
- if tps.len() == 0u { return typ; }
+fn subst_tps(cx: ctxt, tps: &[t], self_ty_opt: Option<t>, typ: t) -> t {
+ if tps.len() == 0u && self_ty_opt.is_none() { return typ; }
let tb = ty::get(typ);
- if !tbox_has_flag(tb, has_params) { return typ; }
+ if self_ty_opt.is_none() && !tbox_has_flag(tb, has_params) { return typ; }
match tb.sty {
- ty_param(p) => tps[p.idx],
- ref sty => fold_sty_to_ty(cx, sty, |t| subst_tps(cx, tps, t))
+ ty_param(p) => tps[p.idx],
+ ty_self => {
+ match self_ty_opt {
+ None => cx.sess.bug(~"ty_self unexpected here"),
+ Some(self_ty) => {
+ subst_tps(cx, tps, self_ty_opt, self_ty)
+ }
+ }
+ }
+ ref sty => {
+ fold_sty_to_ty(cx, sty, |t| subst_tps(cx, tps, self_ty_opt, t))
+ }
}
}
fold_regions_and_ty(
cx, typ,
|r| match r {
- re_bound(br_self) => substs.self_r.get(),
+ re_bound(br_self) => substs.self_r.expect(
+ #fmt("ty::subst: \
+ Reference to self region when given substs with no \
+ self region, ty = %s", ty_to_str(cx, typ))),
_ => r
},
|t| do_subst(cx, substs, t),
return needs_unwind_cleanup;
}
-enum kind { kind_(u32) }
+enum Kind { kind_(u32) }
/// can be copied (implicitly or explicitly)
const KIND_MASK_COPY : u32 = 0b000000000000000000000000001_u32;
/// safe for default mode (subset of KIND_MASK_IMPLICIT)
const KIND_MASK_DEFAULT_MODE : u32 = 0b000000000000000000000100000_u32;
-fn kind_noncopyable() -> kind {
+fn kind_noncopyable() -> Kind {
kind_(0u32)
}
-fn kind_copyable() -> kind {
+fn kind_copyable() -> Kind {
kind_(KIND_MASK_COPY)
}
-fn kind_implicitly_copyable() -> kind {
+fn kind_implicitly_copyable() -> Kind {
kind_(KIND_MASK_IMPLICIT | KIND_MASK_COPY)
}
-fn kind_safe_for_default_mode() -> kind {
+fn kind_safe_for_default_mode() -> Kind {
// similar to implicit copy, but always includes vectors and strings
kind_(KIND_MASK_DEFAULT_MODE | KIND_MASK_IMPLICIT | KIND_MASK_COPY)
}
-fn kind_implicitly_sendable() -> kind {
+fn kind_implicitly_sendable() -> Kind {
kind_(KIND_MASK_IMPLICIT | KIND_MASK_COPY | KIND_MASK_SEND)
}
-fn kind_safe_for_default_mode_send() -> kind {
+fn kind_safe_for_default_mode_send() -> Kind {
// similar to implicit copy, but always includes vectors and strings
kind_(KIND_MASK_DEFAULT_MODE | KIND_MASK_IMPLICIT |
KIND_MASK_COPY | KIND_MASK_SEND)
}
-fn kind_send_copy() -> kind {
+fn kind_send_copy() -> Kind {
kind_(KIND_MASK_COPY | KIND_MASK_SEND)
}
-fn kind_send_only() -> kind {
+fn kind_send_only() -> Kind {
kind_(KIND_MASK_SEND)
}
-fn kind_const() -> kind {
+fn kind_const() -> Kind {
kind_(KIND_MASK_CONST)
}
-fn kind_owned() -> kind {
+fn kind_owned() -> Kind {
kind_(KIND_MASK_OWNED)
}
-fn kind_top() -> kind {
+fn kind_top() -> Kind {
kind_(0xffffffffu32)
}
-fn remove_const(k: kind) -> kind {
+fn remove_const(k: Kind) -> Kind {
k - kind_const()
}
-fn remove_implicit(k: kind) -> kind {
+fn remove_implicit(k: Kind) -> Kind {
k - kind_(KIND_MASK_IMPLICIT | KIND_MASK_DEFAULT_MODE)
}
-fn remove_send(k: kind) -> kind {
+fn remove_send(k: Kind) -> Kind {
k - kind_(KIND_MASK_SEND)
}
-fn remove_owned_send(k: kind) -> kind {
+fn remove_owned_send(k: Kind) -> Kind {
k - kind_(KIND_MASK_OWNED) - kind_(KIND_MASK_SEND)
}
-fn remove_copyable(k: kind) -> kind {
+fn remove_copyable(k: Kind) -> Kind {
k - kind_(KIND_MASK_COPY | KIND_MASK_DEFAULT_MODE)
}
-impl kind : ops::BitAnd<kind,kind> {
- pure fn bitand(other: &kind) -> kind {
+impl Kind : ops::BitAnd<Kind,Kind> {
+ pure fn bitand(other: &Kind) -> Kind {
unsafe {
lower_kind(self, (*other))
}
}
}
-impl kind : ops::BitOr<kind,kind> {
- pure fn bitor(other: &kind) -> kind {
+impl Kind : ops::BitOr<Kind,Kind> {
+ pure fn bitor(other: &Kind) -> Kind {
unsafe {
raise_kind(self, (*other))
}
}
}
-impl kind : ops::Sub<kind,kind> {
- pure fn sub(other: &kind) -> kind {
+impl Kind : ops::Sub<Kind,Kind> {
+ pure fn sub(other: &Kind) -> Kind {
unsafe {
kind_(*self & !*(*other))
}
// Using these query functions is preferable to direct comparison or matching
// against the kind constants, as we may modify the kind hierarchy in the
// future.
-pure fn kind_can_be_implicitly_copied(k: kind) -> bool {
+pure fn kind_can_be_implicitly_copied(k: Kind) -> bool {
*k & KIND_MASK_IMPLICIT == KIND_MASK_IMPLICIT
}
-pure fn kind_is_safe_for_default_mode(k: kind) -> bool {
+pure fn kind_is_safe_for_default_mode(k: Kind) -> bool {
*k & KIND_MASK_DEFAULT_MODE == KIND_MASK_DEFAULT_MODE
}
-pure fn kind_can_be_copied(k: kind) -> bool {
+pure fn kind_can_be_copied(k: Kind) -> bool {
*k & KIND_MASK_COPY == KIND_MASK_COPY
}
-pure fn kind_can_be_sent(k: kind) -> bool {
+pure fn kind_can_be_sent(k: Kind) -> bool {
*k & KIND_MASK_SEND == KIND_MASK_SEND
}
-pure fn kind_is_owned(k: kind) -> bool {
+pure fn kind_is_owned(k: Kind) -> bool {
*k & KIND_MASK_OWNED == KIND_MASK_OWNED
}
-fn meta_kind(p: FnMeta) -> kind {
+fn meta_kind(p: FnMeta) -> Kind {
match p.proto { // XXX consider the kind bounds!
proto_vstore(vstore_slice(_)) =>
kind_noncopyable() | kind_(KIND_MASK_DEFAULT_MODE),
}
}
-fn kind_lteq(a: kind, b: kind) -> bool {
+fn kind_lteq(a: Kind, b: Kind) -> bool {
*a & *b == *a
}
-fn lower_kind(a: kind, b: kind) -> kind {
+fn lower_kind(a: Kind, b: Kind) -> Kind {
kind_(*a & *b)
}
-fn raise_kind(a: kind, b: kind) -> kind {
+fn raise_kind(a: Kind, b: Kind) -> Kind {
kind_(*a | *b)
}
// with the given mutability can have.
// This is used to prevent objects containing mutable state from being
// implicitly copied and to compute whether things have const kind.
-fn mutability_kind(m: mutability) -> kind {
+fn mutability_kind(m: mutability) -> Kind {
match (m) {
m_mutbl => remove_const(remove_implicit(kind_top())),
m_const => remove_implicit(kind_top()),
}
}
-fn mutable_type_kind(cx: ctxt, ty: mt) -> kind {
+fn mutable_type_kind(cx: ctxt, ty: mt) -> Kind {
lower_kind(mutability_kind(ty.mutbl), type_kind(cx, ty.ty))
}
-fn type_kind(cx: ctxt, ty: t) -> kind {
+fn type_kind(cx: ctxt, ty: t) -> Kind {
match cx.kind_cache.find(ty) {
Some(result) => return result,
None => {/* fall through */ }
}
}
-impl region : to_bytes::IterBytes {
+impl Region : to_bytes::IterBytes {
pure fn iter_bytes(+lsb0: bool, f: to_bytes::Cb) {
match self {
re_bound(ref br) =>
}
}
-fn ty_region(ty: t) -> region {
+fn ty_region(ty: t) -> Region {
match get(ty).sty {
ty_rptr(r, _) => r,
s => fail fmt!("ty_region() invoked on non-rptr: %?", s)
match vec::find(get_fields(rec_ty), |f| f.ident == id) {
Some(f) => f,
// Do we only call this when we know the field is legit?
- None => fail (#fmt("get_field: ty doesn't have a field %s",
+ None => fail (fmt!("get_field: ty doesn't have a field %s",
tcx.sess.str_of(id)))
}
}
fn def_has_ty_params(def: ast::def) -> bool {
match def {
- ast::def_fn(_, _) | ast::def_variant(_, _) | ast::def_class(_, _)
+ ast::def_fn(_, _) | ast::def_variant(_, _) | ast::def_class(_)
=> true,
_ => false
}
cx.trait_method_cache.insert(ast_util::local_def(id), ms);
}
-fn provided_trait_methods(cx: ctxt, id: ast::def_id) -> ~[@ast::method] {
+fn provided_trait_methods(cx: ctxt, id: ast::def_id) -> ~[ast::ident] {
if is_local(id) {
match cx.items.find(id.node) {
Some(ast_map::node_item(@{node: item_trait(_, _, ms),_}, _)) =>
match ast_util::split_trait_methods(ms) {
- (_, p) => p
+ (_, p) => p.map(|method| method.ident)
},
- _ => cx.sess.bug(#fmt("provided_trait_methods: %? is not a trait",
+ _ => cx.sess.bug(fmt!("provided_trait_methods: %? is not a trait",
id))
}
- }
- else {
- // FIXME #2794: default methods for traits don't work cross-crate
- ~[]
+ } else {
+ csearch::get_provided_trait_methods(cx, id).map(|info| info.ty.ident)
}
}
ast_map::path_name(variant.node.name))
}
- ast_map::node_ctor(nm, _, _, _, path) => {
- vec::append_one(*path, ast_map::path_name(nm))
- }
ast_map::node_dtor(_, _, _, path) => {
vec::append_one(*path, ast_map::path_name(
syntax::parse::token::special_idents::literally_dtor))
};
match variant.node.disr_expr {
Some (ex) => {
- // FIXME: issue #1417
disr_val = match const_eval::eval_const_expr(cx,
ex) {
const_eval::const_int(val) => val as int,
// the type cache. Returns the type parameters and type.
fn lookup_item_type(cx: ctxt, did: ast::def_id) -> ty_param_bounds_and_ty {
match cx.tcache.find(did) {
- Some(tpt) => return tpt,
- None => {
+ Some(tpt) => {
// The item is in this crate. The caller should have added it to the
// type cache already
+ return tpt;
+ }
+ None => {
assert did.crate != ast::local_crate;
let tyt = csearch::get_type(cx, did);
cx.tcache.insert(did, tyt);
ty_enum(did, r) =>
match r.self_r {
Some(_) =>
- // This enum has a self region. Get rid of it
+ // Use re_static since trans doesn't care about regions
mk_enum(cx, did,
- {self_r: None, self_ty: None, tps: r.tps}),
+ {self_r: Some(ty::re_static),
+ self_ty: None,
+ tps: r.tps}),
None =>
t
},
match r.self_r {
Some(_) =>
// Ditto.
- mk_class(cx, did, {self_r: None, self_ty: None, tps: r.tps}),
+ mk_class(cx, did, {self_r: Some(ty::re_static), self_ty: None,
+ tps: r.tps}),
None =>
t
},
t
};
- // FIXME #2187: This also reduced int types to their compatible machine
- // types, which isn't necessary after #2187
- let t = mk_t(cx, mach_sty(cx.sess.targ_cfg, t));
-
let sty = fold_sty(&get(t).sty, |t| { normalize_ty(cx, t) });
let t_norm = mk_t(cx, sty);
cx.normalized_cache.insert(t, t_norm);
pure fn ne(other: &RegionVid) -> bool { *self != *(*other) }
}
-impl region : cmp::Eq {
- pure fn eq(other: ®ion) -> bool {
+impl Region : cmp::Eq {
+ pure fn eq(other: &Region) -> bool {
match self {
re_bound(e0a) => {
match (*other) {
}
}
}
- pure fn ne(other: ®ion) -> bool { !self.eq(other) }
+ pure fn ne(other: &Region) -> bool { !self.eq(other) }
}
impl bound_region : cmp::Eq {
pure fn ne(other: ¶m_bound) -> bool { !self.eq(other) }
}
-impl kind : cmp::Eq {
- pure fn eq(other: &kind) -> bool { *self == *(*other) }
- pure fn ne(other: &kind) -> bool { *self != *(*other) }
+impl Kind : cmp::Eq {
+ pure fn eq(other: &Kind) -> bool { *self == *(*other) }
+ pure fn ne(other: &Kind) -> bool { *self != *(*other) }
}
use syntax::ast_util::{local_def, respan, split_trait_methods};
use syntax::visit;
use metadata::csearch;
-use driver::session::session;
-use util::common::may_break;
+use util::common::{block_query, loop_query};
use syntax::codemap::span;
use pat_util::{pat_is_variant, pat_id_map, PatIdMap};
use middle::ty;
use std::smallintmap;
use std::map;
use std::map::HashMap;
-use std::serialization::{serialize_uint, deserialize_uint};
use syntax::print::pprust::*;
use util::ppaux::{ty_to_str, tys_to_str, region_to_str,
bound_region_to_str, vstore_to_str, expr_repr};
use util::common::{indent, indenter};
use std::list;
use list::{List, Nil, Cons};
+use dvec::DVec;
export check_crate;
export infer;
export method_map;
-export method_origin, serialize_method_origin, deserialize_method_origin;
-export method_map_entry, serialize_method_map_entry;
-export deserialize_method_map_entry;
+export method_origin;
+export method_map_entry;
export vtable_map;
export vtable_res;
export vtable_origin;
export provided_methods_map;
#[auto_serialize]
+#[auto_deserialize]
enum method_origin {
// fully statically resolved method
method_static(ast::def_id),
// details for a method invoked with a receiver whose type is a type parameter
// with a bounded trait.
#[auto_serialize]
+#[auto_deserialize]
type method_param = {
// the trait containing the method to be invoked
trait_id: ast::def_id,
type vtable_map = HashMap<ast::node_id, vtable_res>;
-// Stores information about provided methods, aka "default methods" in traits.
-// Maps from a trait's def_id to a MethodInfo about
-// that method in that trait.
-type provided_methods_map = HashMap<ast::node_id,
- ~[@resolve::MethodInfo]>;
-
type ty_param_substs_and_ty = {substs: ty::substs, ty: ty::t};
type crate_ctxt_ = {// A mapping from method call sites to traits that have
method_map: method_map,
vtable_map: vtable_map,
coherence_info: @coherence::CoherenceInfo,
- provided_methods_map: provided_methods_map,
tcx: ty::ctxt};
enum crate_ctxt {
method_map: std::map::HashMap(),
vtable_map: std::map::HashMap(),
coherence_info: @coherence::CoherenceInfo(),
- provided_methods_map: std::map::HashMap(),
tcx: tcx});
collect::collect_item_types(ccx, crate);
coherence::check_coherence(ccx, crate);
fn get_region_reporting_err(tcx: ty::ctxt,
span: span,
- res: Result<ty::region, ~str>) -> ty::region {
+ res: Result<ty::Region, ~str>) -> ty::Region {
match res {
result::Ok(r) => r,
}
fn ast_region_to_region<AC: ast_conv, RS: region_scope Copy Owned>(
- self: AC, rscope: RS, span: span, a_r: @ast::region) -> ty::region {
+ self: AC, rscope: RS, span: span, a_r: @ast::region) -> ty::Region {
let res = match a_r.node {
ast::re_static => Ok(ty::re_static),
return {substs: substs, ty: ty};
}
-const NO_REGIONS: uint = 1u;
-const NO_TPS: uint = 2u;
+const NO_REGIONS: uint = 1;
+const NO_TPS: uint = 2;
// Parses the programmer's textual representation of a type into our
// internal notion of a type. `getter` is a function that returns the type
// corresponding to a definition ID:
fn ast_ty_to_ty<AC: ast_conv, RS: region_scope Copy Owned>(
- self: AC, rscope: RS, &&ast_ty: @ast::ty) -> ty::t {
+ self: AC, rscope: RS, &&ast_ty: @ast::Ty) -> ty::t {
fn ast_mt_to_mt<AC: ast_conv, RS: region_scope Copy Owned>(
self: AC, rscope: RS, mt: ast::mt) -> ty::mt {
Some(d) => d
};
match a_def {
- ast::def_ty(did) | ast::def_class(did, _) => {
+ ast::def_ty(did) | ast::def_class(did) => {
ast_path_to_ty(self, rscope, did, path, id).ty
}
ast::def_prim_ty(nty) => {
}
// a list of mapping from in-scope-region-names ("isr") to the
-// corresponding ty::region
-type isr_alist = @List<(ty::bound_region, ty::region)>;
+// corresponding ty::Region
+type isr_alist = @List<(ty::bound_region, ty::Region)>;
trait get_and_find_region {
- fn get(br: ty::bound_region) -> ty::region;
- fn find(br: ty::bound_region) -> Option<ty::region>;
+ fn get(br: ty::bound_region) -> ty::Region;
+ fn find(br: ty::bound_region) -> Option<ty::Region>;
}
impl isr_alist: get_and_find_region {
- fn get(br: ty::bound_region) -> ty::region {
+ fn get(br: ty::bound_region) -> ty::Region {
self.find(br).get()
}
- fn find(br: ty::bound_region) -> Option<ty::region> {
+ fn find(br: ty::bound_region) -> Option<ty::Region> {
for list::each(self) |isr| {
let (isr_br, isr_r) = *isr;
if isr_br == br { return Some(isr_r); }
let tcx = ccx.tcx;
let self_ty = ty::node_id_to_type(tcx, id);
- do option::iter(&struct_def.ctor) |ctor| {
- let class_t = {self_ty: self_ty,
- self_id: ctor.node.self_id,
- def_id: local_def(id),
- explicit_self: {node: ast::sty_by_ref,
- span: ast_util::dummy_sp()}};
- // typecheck the ctor
- check_bare_fn(ccx, ctor.node.dec,
- ctor.node.body, ctor.node.id,
- Some(class_t));
- }
-
do option::iter(&struct_def.dtor) |dtor| {
let class_t = {self_ty: self_ty,
self_id: dtor.node.self_id,
impl @fn_ctxt {
fn search_in_scope_regions(br: ty::bound_region)
- -> Result<ty::region, ~str>
+ -> Result<ty::Region, ~str>
{
match self.in_scope_regions.find(br) {
Some(r) => result::Ok(r),
}
impl @fn_ctxt: region_scope {
- fn anon_region(span: span) -> Result<ty::region, ~str> {
+ fn anon_region(span: span) -> Result<ty::Region, ~str> {
result::Ok(self.infcx().next_region_var_nb(span))
}
- fn self_region(_span: span) -> Result<ty::region, ~str> {
+ fn self_region(_span: span) -> Result<ty::Region, ~str> {
self.search_in_scope_regions(ty::br_self)
}
- fn named_region(_span: span, id: ast::ident) -> Result<ty::region, ~str> {
+ fn named_region(_span: span, id: ast::ident) -> Result<ty::Region, ~str> {
self.search_in_scope_regions(ty::br_named(id))
}
}
pprust::expr_to_str(expr, self.tcx().sess.intr()))
}
- fn block_region() -> ty::region {
+ fn block_region() -> ty::Region {
ty::re_scope(self.region_lb)
}
self.write_ty(node_id, ty::mk_bot(self.tcx()));
}
- fn to_ty(ast_t: @ast::ty) -> ty::t {
+ fn to_ty(ast_t: @ast::Ty) -> ty::t {
ast_ty_to_ty(self, self, ast_t)
}
}
fn mk_subr(a_is_expected: bool, span: span,
- sub: ty::region, sup: ty::region) -> Result<(), ty::type_err> {
+ sub: ty::Region, sup: ty::Region) -> Result<(), ty::type_err> {
infer::mk_subr(self.infcx(), a_is_expected, span, sub, sup)
}
fn region_var_if_parameterized(rp: Option<ty::region_variance>,
span: span,
- lower_bound: ty::region)
- -> Option<ty::region>
+ lower_bound: ty::Region)
+ -> Option<ty::Region>
{
rp.map(
|_rp| self.infcx().next_region_var_with_lb(span, lower_bound))
// functions. This is so that we have more information about the types
// of arguments when we typecheck the functions. This isn't really the
// right way to do this.
- for [false, true]/_.each |check_blocks| {
+ for [false, true].each |check_blocks| {
let check_blocks = *check_blocks;
debug!("check_blocks=%b", check_blocks);
// Check field access expressions
fn check_field(fcx: @fn_ctxt, expr: @ast::expr, is_callee: bool,
- base: @ast::expr, field: ast::ident, tys: ~[@ast::ty])
+ base: @ast::expr, field: ast::ident, tys: ~[@ast::Ty])
-> bool
{
let tcx = fcx.ccx.tcx;
ast::expr_loop(body, _) => {
check_block_no_value(fcx, body);
fcx.write_ty(id, ty::mk_nil(tcx));
- bot = !may_break(body);
+ bot = !may_break(tcx, expr.id, body);
}
ast::expr_match(discrim, arms) => {
bot = alt::check_alt(fcx, expr, discrim, arms);
// Resolve the path.
let class_id;
match tcx.def_map.find(id) {
- Some(ast::def_class(type_def_id, _)) => {
+ Some(ast::def_class(type_def_id)) => {
class_id = type_def_id;
}
_ => {
fn check_const(ccx: @crate_ctxt, _sp: span, e: @ast::expr, id: ast::node_id) {
let rty = ty::node_id_to_type(ccx.tcx, id);
let fcx = blank_fn_ctxt(ccx, rty, e.id);
+ let declty = fcx.ccx.tcx.tcache.get(local_def(id)).ty;
+ check_const_with_ty(fcx, _sp, e, declty);
+}
+
+fn check_const_with_ty(fcx: @fn_ctxt, _sp: span, e: @ast::expr,
+ declty: ty::t) {
check_expr(fcx, e, None);
let cty = fcx.expr_ty(e);
- let declty = fcx.ccx.tcx.tcache.get(local_def(id)).ty;
demand::suptype(fcx, e.span, declty, cty);
regionck::regionck_expr(fcx, e);
writeback::resolve_type_vars_in_expr(fcx, e);
variants: &mut ~[ty::variant_info]) {
let rty = ty::node_id_to_type(ccx.tcx, id);
for vs.each |v| {
- match v.node.disr_expr {
- Some(e) => {
- let fcx = blank_fn_ctxt(ccx, rty, e.id);
- check_expr(fcx, e, None);
- let cty = fcx.expr_ty(e);
+ do v.node.disr_expr.iter |e_ref| {
+ let e = *e_ref;
+ debug!("disr expr, checking %s",
+ expr_to_str(e, ccx.tcx.sess.intr()));
let declty = ty::mk_int(ccx.tcx);
- demand::suptype(fcx, e.span, declty, cty);
- // FIXME: issue #1417
- // Also, check_expr (from check_const pass) doesn't guarantee
+ let fcx = blank_fn_ctxt(ccx, rty, e.id);
+ check_const_with_ty(fcx, e.span, e, declty);
+ // check_expr (from check_const pass) doesn't guarantee
// that the expression is in an form that eval_const_expr can
// handle, so we may still get an internal compiler error
- match const_eval::eval_const_expr(ccx.tcx, e) {
- const_eval::const_int(val) => {
+
+ match const_eval::eval_const_expr_partial(ccx.tcx, e) {
+ Ok(const_eval::const_int(val)) => {
*disr_val = val as int;
}
- _ => {
+ Ok(_) => {
ccx.tcx.sess.span_err(e.span, ~"expected signed integer \
constant");
}
+ Err(err) => {
+ ccx.tcx.sess.span_err(e.span,
+ #fmt("expected constant: %s", err));
+
+ }
}
- }
- _ => ()
}
if vec::contains(*disr_vals, &*disr_val) {
ccx.tcx.sess.span_err(v.span,
}
ast::def_fn(id, ast::unsafe_fn) |
- ast::def_static_method(id, ast::unsafe_fn) => {
+ ast::def_static_method(id, _, ast::unsafe_fn) => {
// Unsafe functions can only be touched in an unsafe context
fcx.require_unsafe(sp, ~"access to unsafe function");
return ty::lookup_item_type(fcx.ccx.tcx, id);
}
- ast::def_fn(id, _) | ast::def_static_method(id, _) |
+ ast::def_fn(id, _) | ast::def_static_method(id, _, _) |
ast::def_const(id) | ast::def_variant(_, id) |
- ast::def_class(id, _) => {
+ ast::def_class(id) => {
return ty::lookup_item_type(fcx.ccx.tcx, id);
}
ast::def_upvar(_, inner, _, _) => {
tpt: ty_param_bounds_and_ty,
span: span,
node_id: ast::node_id,
- region_lb: ty::region) {
+ region_lb: ty::Region) {
let ty_param_count = vec::len(*tpt.bounds);
let ty_substs_len = vec::len(pth.types);
}
}
+// Returns true if b contains a break that can exit from b
+fn may_break(cx: ty::ctxt, id: ast::node_id, b: ast::blk) -> bool {
+ // First: is there an unlabeled break immediately
+ // inside the loop?
+ (loop_query(b, |e| {
+ match e {
+ ast::expr_break(_) => true,
+ _ => false
+ }
+ })) ||
+ // Second: is there a labeled break with label
+ // <id> nested anywhere inside the loop?
+ (block_query(b, |e| {
+ match e.node {
+ ast::expr_break(Some(_)) =>
+ match cx.def_map.find(e.id) {
+ Some(ast::def_label(loop_id)) if id == loop_id => true,
+ _ => false,
+ },
+ _ => false
+ }}))
+}
+
fn check_bounds_are_used(ccx: @crate_ctxt,
span: span,
tps: ~[ast::ty_param],
}
~"get_tydesc" => {
- // FIXME (#2712): return *intrinsic::tydesc, not *()
+ // FIXME (#3730): return *intrinsic::tydesc, not *()
(1u, ~[], ty::mk_nil_ptr(tcx))
}
~"visit_tydesc" => {
type pat_ctxt = {
fcx: @fn_ctxt,
map: PatIdMap,
- alt_region: ty::region, // Region for the alt as a whole
- block_region: ty::region, // Region for the block of the arm
+ alt_region: ty::Region, // Region for the alt as a whole
+ block_region: ty::Region, // Region for the block of the arm
};
fn check_pat_variant(pcx: pat_ctxt, pat: @ast::pat, path: @ast::path,
// Check to ensure that the struct is the one specified.
match tcx.def_map.get(pat.id) {
- ast::def_class(supplied_def_id, _)
+ ast::def_class(supplied_def_id)
if supplied_def_id == class_id => {
// OK.
}
*/
-use coherence::get_base_type_def_id;
+use coherence::{ProvidedMethodInfo, get_base_type_def_id};
use middle::resolve::{Impl, MethodInfo};
use middle::ty::*;
use syntax::ast::{def_id, sty_by_ref, sty_value, sty_region, sty_box,
// Prepare the list of candidates
self.push_inherent_candidates(self_ty);
- self.push_extension_candidates();
+ self.push_extension_candidates(self_ty);
let enum_dids = DVec();
let mut self_ty = self_ty;
}
}
- fn push_extension_candidates(&self) {
+ fn push_extension_candidates(&self, self_ty: ty::t) {
// If the method being called is associated with a trait, then
// find all the impls of that trait. Each of those are
// candidates.
for opt_applicable_traits.each |applicable_traits| {
for applicable_traits.each |trait_did| {
let coherence_info = self.fcx.ccx.coherence_info;
+
+ // Look for explicit implementations.
let opt_impl_infos =
coherence_info.extension_methods.find(*trait_did);
for opt_impl_infos.each |impl_infos| {
&self.extension_candidates, *impl_info);
}
}
+
+ // Look for default methods.
+ match coherence_info.provided_methods.find(*trait_did) {
+ Some(methods) => {
+ self.push_candidates_from_provided_methods(
+ &self.extension_candidates, self_ty, *trait_did,
+ methods);
+ }
+ None => {}
+ }
}
}
}
- fn push_inherent_candidates_from_param(&self, param_ty: param_ty)
- {
+ fn push_inherent_candidates_from_param(&self, param_ty: param_ty) {
debug!("push_inherent_candidates_from_param(param_ty=%?)",
param_ty);
let _indenter = indenter();
self_ty: ty::t,
did: def_id,
substs: &ty::substs,
- vstore: ty::vstore)
- {
+ vstore: ty::vstore) {
debug!("push_inherent_candidates_from_trait(did=%s, substs=%s)",
self.did_to_str(did),
substs_to_str(self.tcx(), substs));
};
let method = &ms[index];
- /* FIXME(#3468) we should transform the vstore in accordance
+ /* FIXME(#3157) we should transform the vstore in accordance
with the self type
match method.self_type {
});
}
- fn push_inherent_impl_candidates_for_type(did: def_id)
- {
+ fn push_inherent_impl_candidates_for_type(did: def_id) {
let opt_impl_infos =
self.fcx.ccx.coherence_info.inherent_methods.find(did);
for opt_impl_infos.each |impl_infos| {
}
fn push_candidates_from_impl(&self, candidates: &DVec<Candidate>,
- impl_info: &resolve::Impl)
- {
+ impl_info: &resolve::Impl) {
if !self.impl_dups.insert(impl_info.did, ()) {
return; // already visited
}
});
}
+ fn push_candidates_from_provided_methods(
+ &self,
+ candidates: &DVec<Candidate>,
+ self_ty: ty::t,
+ trait_def_id: def_id,
+ methods: @DVec<@ProvidedMethodInfo>) {
+ debug!("(pushing candidates from provided methods) considering trait \
+ id %d:%d",
+ trait_def_id.crate,
+ trait_def_id.node);
+
+ for methods.each |provided_method_info| {
+ if provided_method_info.method_info.ident != self.m_name { loop; }
+
+ debug!("(pushing candidates from provided methods) adding \
+ candidate");
+
+ // XXX: Needs to support generics.
+ let dummy_substs = { self_r: None, self_ty: None, tps: ~[] };
+ let (impl_ty, impl_substs) =
+ self.create_rcvr_ty_and_substs_for_method(
+ provided_method_info.method_info.self_type,
+ self_ty,
+ dummy_substs);
+
+ candidates.push(Candidate {
+ rcvr_ty: impl_ty,
+ rcvr_substs: move impl_substs,
+ num_method_tps: provided_method_info.method_info.n_tps,
+ self_mode: get_mode_from_self_type(
+ provided_method_info.method_info.self_type),
+ origin: method_static(provided_method_info.method_info.did)
+ });
+ }
+ }
+
fn create_rcvr_ty_and_substs_for_method(&self,
self_decl: ast::self_ty_,
self_ty: ty::t,
+self_substs: ty::substs)
- -> (ty::t, ty::substs)
- {
+ -> (ty::t, ty::substs) {
// If the self type includes a region (like &self), we need to
// ensure that the receiver substitutions have a self region.
// If the receiver type does not itself contain borrowed
kind: AutoRefKind,
autoderefs: uint,
mutbls: &[ast::mutability],
- mk_autoref_ty: &fn(ast::mutability, ty::region) -> ty::t)
+ mk_autoref_ty: &fn(ast::mutability, ty::Region) -> ty::t)
-> Option<method_map_entry>
{
// This is hokey. We should have mutability inference as a
fn confirm_candidate(&self,
self_ty: ty::t,
candidate: &Candidate)
- -> method_map_entry
- {
+ -> method_map_entry {
let tcx = self.tcx();
let fty = self.fn_ty_from_origin(&candidate.origin);
}
fn transform_self_type_for_method(tcx: ty::ctxt,
- self_region: Option<ty::region>,
+ self_region: Option<ty::Region>,
impl_ty: ty::t,
self_type: ast::self_ty_)
-> ty::t
enum rcx { rcx_({fcx: @fn_ctxt, mut errors_reported: uint}) }
type rvt = visit::vt<@rcx>;
-fn encl_region_of_def(fcx: @fn_ctxt, def: ast::def) -> ty::region {
+fn encl_region_of_def(fcx: @fn_ctxt, def: ast::def) -> ty::Region {
let tcx = fcx.tcx();
match def {
def_local(node_id, _) | def_arg(node_id, _) | def_self(node_id) |
fn constrain_free_variables(
rcx: @rcx,
- region: ty::region,
+ region: ty::Region,
expr: @ast::expr)
{
/*!
fn constrain_regions_in_type_of_node(
rcx: @rcx,
id: ast::node_id,
- encl_region: ty::region,
+ encl_region: ty::Region,
span: span) -> bool
{
let tcx = rcx.fcx.tcx();
fn constrain_regions_in_type(
rcx: @rcx,
- encl_region: ty::region,
+ encl_region: ty::Region,
span: span,
ty: ty::t) -> bool
{
return (e == rcx.errors_reported);
fn constrain_region(rcx: @rcx,
- encl_region: ty::region,
+ encl_region: ty::Region,
span: span,
- region: ty::region) {
+ region: ty::Region) {
let tcx = rcx.fcx.ccx.tcx;
debug!("constrain_region(encl_region=%?, region=%?)",
isr: isr_alist,
self_info: Option<self_info>,
fn_ty: &ty::FnTy,
- mapf: fn(ty::bound_region) -> ty::region) ->
+ mapf: fn(ty::bound_region) -> ty::Region) ->
{isr: isr_alist, self_info: Option<self_info>, fn_ty: ty::FnTy} {
// Take self_info apart; the self_ty part is the only one we want
tcx: ty::ctxt,
isr: isr_alist,
tys: ~[ty::t],
- to_r: fn(ty::bound_region) -> ty::region) -> isr_alist {
+ to_r: fn(ty::bound_region) -> ty::Region) -> isr_alist {
// Takes `isr` (described above), `to_r` (described above),
// and `r`, a region. If `r` is anything other than a bound
// updated isr_alist that now contains a mapping from `r` to
// the result of calling `to_r` on it.
fn append_isr(isr: isr_alist,
- to_r: fn(ty::bound_region) -> ty::region,
- r: ty::region) -> isr_alist {
+ to_r: fn(ty::bound_region) -> ty::Region,
+ r: ty::Region) -> isr_alist {
match r {
ty::re_free(_, _) | ty::re_static | ty::re_scope(_) |
ty::re_var(_) => {
// XXX: This should work for multiple traits.
let ity = ty::impl_traits(tcx, impl_did, vstore)[0];
- let trait_ty = ty::subst_tps(tcx, impl_tys, ity);
+ let trait_ty = ty::subst_tps(tcx, impl_tys, None, ity);
debug!("(connect trait tps) trait type is %?, impl did is %?",
ty::get(trait_ty).sty, impl_did);
match ty::get(trait_ty).sty {
// has at most one implementation for each type. Then we build a mapping from
// each trait in the system to its implementations.
-use metadata::csearch::{each_path, get_impl_traits, get_impls_for_mod};
-use metadata::cstore::{cstore, iter_crate_data};
+use metadata::csearch::{ProvidedTraitMethodInfo, each_path, get_impl_traits};
+use metadata::csearch::{get_impls_for_mod};
+use metadata::cstore::{CStore, iter_crate_data};
use metadata::decoder::{dl_def, dl_field, dl_impl};
use middle::resolve::{Impl, MethodInfo};
-use middle::ty::{get, lookup_item_type, subst, t, ty_box};
-use middle::ty::{ty_uniq, ty_ptr, ty_rptr, ty_enum};
+use middle::ty::{ProvidedMethodSource, get, lookup_item_type, subst, t};
+use middle::ty::{ty_box, ty_uniq, ty_ptr, ty_rptr, ty_enum};
use middle::ty::{ty_class, ty_nil, ty_bot, ty_bool, ty_int, ty_uint};
use middle::ty::{ty_float, ty_estr, ty_evec, ty_rec};
use middle::ty::{ty_fn, ty_trait, ty_tup, ty_infer};
use middle::ty::{ty_opaque_closure_ptr, ty_unboxed_vec, type_is_ty_var};
use middle::typeck::infer::{infer_ctxt, can_mk_subty};
use middle::typeck::infer::{new_infer_ctxt, resolve_ivar, resolve_type};
-use syntax::ast::{crate, def_id, def_mod};
+use syntax::ast::{crate, def_id, def_mod, def_ty};
use syntax::ast::{item, item_class, item_const, item_enum, item_fn};
use syntax::ast::{item_foreign_mod, item_impl, item_mac, item_mod};
use syntax::ast::{item_trait, item_ty, local_crate, method, node_id};
}
}
+// Stores the method info and definition ID of the associated trait method for
+// each instantiation of each provided method.
+struct ProvidedMethodInfo {
+ method_info: @MethodInfo,
+ trait_method_def_id: def_id
+}
+
+// Stores information about provided methods (a.k.a. default methods) in
+// implementations.
+//
+// This is a map from ID of each implementation to the method info and trait
+// method ID of each of the default methods belonging to the trait that that
+// implementation implements.
+type ProvidedMethodsMap = HashMap<def_id,@DVec<@ProvidedMethodInfo>>;
+
struct CoherenceInfo {
// Contains implementations of methods that are inherent to a type.
// Methods in these implementations don't need to be exported.
extension_methods: HashMap<def_id,@DVec<@Impl>>,
// A mapping from a supertrait to its subtraits.
- supertrait_to_subtraits: HashMap<def_id,@DVec<def_id>>
+ supertrait_to_subtraits: HashMap<def_id,@DVec<def_id>>,
+
+ // A mapping from an implementation ID to the method info and trait method
+ // ID of the provided (a.k.a. default) methods in the traits that that
+ // implementation implements.
+ provided_methods: ProvidedMethodsMap,
}
fn CoherenceInfo() -> CoherenceInfo {
CoherenceInfo {
inherent_methods: HashMap(),
extension_methods: HashMap(),
- supertrait_to_subtraits: HashMap()
+ supertrait_to_subtraits: HashMap(),
+ provided_methods: HashMap(),
}
}
}
impl CoherenceChecker {
- // Create a mapping containing a MethodInfo for every provided
- // method in every trait.
- fn build_provided_methods_map(crate: @crate) {
- let sess = self.crate_context.tcx.sess;
-
- let pmm = self.crate_context.provided_methods_map;
-
- visit_crate(*crate, (), mk_simple_visitor(@{
- visit_item: |item| {
- match item.node {
- item_trait(_, _, trait_methods) => {
- for trait_methods.each |trait_method| {
- debug!("(building provided methods map) checking \
- trait `%s` with id %d",
- sess.str_of(item.ident), item.id);
-
- match *trait_method {
- required(_) => { /* fall through */}
- provided(m) => {
- // For every provided method in the
- // trait, store a MethodInfo.
- let mi = method_to_MethodInfo(m);
-
- match pmm.find(item.id) {
- Some(mis) => {
- // If the trait already has an
- // entry in the
- // provided_methods_map, we just
- // need to add this method to
- // that entry.
- debug!("(building provided \
- methods map) adding \
- method `%s` to entry for \
- existing trait",
- sess.str_of(mi.ident));
- let mut method_infos = mis;
- method_infos.push(mi);
- pmm.insert(item.id, method_infos);
- }
- None => {
- // If the trait doesn't have an
- // entry yet, create one.
- debug!("(building provided \
- methods map) creating new \
- entry for method `%s`",
- sess.str_of(mi.ident));
- pmm.insert(item.id, ~[mi]);
- }
- }
- }
- }
- }
- }
- _ => {
- // Nothing to do.
- }
- };
- },
- .. *default_simple_visitor()
- }));
- }
-
fn check_coherence(crate: @crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
self.crate_context.tcx.sess.parse_sess.interner),
self.crate_context.tcx.sess.str_of(item.ident));
+ self.instantiate_default_methods(item.id, trait_did);
let implementation = self.create_impl_from_item(item);
self.add_trait_method(trait_did, implementation);
}
// Nothing to do.
}
Some(base_type_def_id) => {
+ // XXX: Gather up default methods?
let implementation = self.create_impl_from_item(item);
self.add_inherent_method(base_type_def_id, implementation);
}
}
+ // Creates default method IDs and performs type substitutions for an impl
+ // and trait pair. Then, for each provided method in the trait, inserts a
+ // `ProvidedMethodInfo` instance into the `provided_method_sources` map.
+ fn instantiate_default_methods(impl_id: ast::node_id,
+ trait_did: ast::def_id) {
+ for self.each_provided_trait_method(trait_did) |trait_method| {
+ // Synthesize an ID.
+ let tcx = self.crate_context.tcx;
+ let new_id = syntax::parse::next_node_id(tcx.sess.parse_sess);
+ let new_did = local_def(new_id);
+
+ // XXX: Perform substitutions.
+ let new_polytype = ty::lookup_item_type(tcx, trait_method.def_id);
+ tcx.tcache.insert(new_did, new_polytype);
+
+ // Pair the new synthesized ID up with the
+ // ID of the method.
+ let source = ProvidedMethodSource {
+ method_id: trait_method.def_id,
+ impl_id: local_def(impl_id)
+ };
+
+ self.crate_context.tcx.provided_method_sources.insert(new_did,
+ source);
+
+ let provided_method_info =
+ @ProvidedMethodInfo {
+ method_info: @{
+ did: new_did,
+ n_tps: trait_method.tps.len(),
+ ident: trait_method.ident,
+ self_type: trait_method.self_ty
+ },
+ trait_method_def_id: trait_method.def_id
+ };
+
+ let pmm = self.crate_context.coherence_info.provided_methods;
+ match pmm.find(local_def(impl_id)) {
+ Some(mis) => {
+ // If the trait already has an entry in the
+ // provided_methods_map, we just need to add this
+ // method to that entry.
+ debug!("(checking implementation) adding method `%s` \
+ to entry for existing trait",
+ self.crate_context.tcx.sess.str_of(
+ provided_method_info.method_info.ident));
+ mis.push(provided_method_info);
+ }
+ None => {
+ // If the trait doesn't have an entry yet, create one.
+ debug!("(checking implementation) creating new entry \
+ for method `%s`",
+ self.crate_context.tcx.sess.str_of(
+ provided_method_info.method_info.ident));
+ let method_infos = @DVec();
+ method_infos.push(provided_method_info);
+ pmm.insert(local_def(impl_id), method_infos);
+ }
+ }
+ }
+ }
+
fn register_inherited_trait(item: @item, supertraits: ~[@trait_ref]) {
// XXX: This is wrong. We need to support substitutions; e.g.
// trait Foo : Bar<int>.
fn add_inherent_method(base_def_id: def_id, implementation: @Impl) {
let implementation_list;
match self.crate_context.coherence_info.inherent_methods
- .find(base_def_id) {
-
+ .find(base_def_id) {
None => {
implementation_list = @DVec();
self.crate_context.coherence_info.inherent_methods
fn add_trait_method(trait_id: def_id, implementation: @Impl) {
let implementation_list;
match self.crate_context.coherence_info.extension_methods
- .find(trait_id) {
-
+ .find(trait_id) {
None => {
implementation_list = @DVec();
self.crate_context.coherence_info.extension_methods
}
}
+ fn each_provided_trait_method(
+ trait_did: ast::def_id,
+ f: &fn(x: &ty::method) -> bool) {
+ // Make a list of all the names of the provided methods.
+ // XXX: This is horrible.
+ let provided_method_idents = HashMap();
+ let tcx = self.crate_context.tcx;
+ for ty::provided_trait_methods(tcx, trait_did).each |ident| {
+ provided_method_idents.insert(*ident, ());
+ }
+
+ for ty::trait_methods(tcx, trait_did).each |method| {
+ if provided_method_idents.contains_key(method.ident) {
+ if !f(method) {
+ break;
+ }
+ }
+ }
+ }
+
fn polytypes_unify(polytype_a: ty_param_bounds_and_ty,
polytype_b: ty_param_bounds_and_ty)
-> bool {
fn get_self_type_for_implementation(implementation: @Impl)
-> ty_param_bounds_and_ty {
-
return self.crate_context.tcx.tcache.get(implementation.did);
}
// Converts an implementation in the AST to an Impl structure.
fn create_impl_from_item(item: @item) -> @Impl {
-
- fn add_provided_methods(inherent_methods: ~[@MethodInfo],
- all_provided_methods: ~[@MethodInfo],
- sess: driver::session::session)
- -> ~[@MethodInfo] {
-
- let mut methods = inherent_methods;
-
- // If there's no inherent method with the same name as a
- // provided method, add that provided method to `methods`.
+ fn add_provided_methods(all_methods: &mut ~[@MethodInfo],
+ all_provided_methods: ~[@ProvidedMethodInfo],
+ sess: driver::session::Session) {
for all_provided_methods.each |provided_method| {
- let mut method_inherent_to_impl = false;
- for inherent_methods.each |inherent_method| {
- if provided_method.ident == inherent_method.ident {
- method_inherent_to_impl = true;
- }
- }
-
- if !method_inherent_to_impl {
- debug!(
- "(creating impl) adding provided method `%s` to impl",
- sess.str_of(provided_method.ident));
- methods.push(*provided_method);
- }
+ debug!(
+ "(creating impl) adding provided method `%s` to impl",
+ sess.str_of(provided_method.method_info.ident));
+ vec::push(all_methods, provided_method.method_info);
}
-
- return methods;
}
match item.node {
let trait_did =
self.trait_ref_to_trait_def_id(*trait_ref);
- match self.crate_context.provided_methods_map
- .find(trait_did.node) {
+ match self.crate_context
+ .coherence_info
+ .provided_methods
+ .find(local_def(item.id)) {
None => {
debug!("(creating impl) trait with node_id `%d` \
has no provided methods", trait_did.node);
/* fall through */
}
- Some(all_provided)
- => {
+ Some(all_provided) => {
debug!("(creating impl) trait with node_id `%d` \
has provided methods", trait_did.node);
- // Selectively add only those provided
- // methods that aren't inherent to the
- // trait.
-
- // XXX: could probably be doing this with filter.
- methods = add_provided_methods(
- methods, all_provided,
+ // Add all provided methods.
+ add_provided_methods(
+ &mut methods,
+ all_provided.get(),
self.crate_context.tcx.sess);
}
}
// External crate handling
fn add_impls_for_module(impls_seen: HashMap<def_id,()>,
- crate_store: cstore,
+ crate_store: CStore,
module_def_id: def_id) {
let implementations = get_impls_for_mod(crate_store,
}
}
+ fn add_default_methods_for_external_trait(trait_def_id: ast::def_id) {
+ let tcx = self.crate_context.tcx;
+ let pmm = self.crate_context.coherence_info.provided_methods;
+
+ if pmm.contains_key(trait_def_id) { return; }
+
+ debug!("(adding default methods for trait) processing trait");
+
+ for csearch::get_provided_trait_methods(tcx,
+ trait_def_id).each |info| {
+ debug!("(adding default methods for trait) found default method");
+
+ // Create a new def ID for this provided method.
+ let parse_sess = &self.crate_context.tcx.sess.parse_sess;
+ let new_did = local_def(syntax::parse::next_node_id(*parse_sess));
+
+ let provided_method_info =
+ @ProvidedMethodInfo {
+ method_info: @{
+ did: new_did,
+ n_tps: info.ty.tps.len(),
+ ident: info.ty.ident,
+ self_type: info.ty.self_ty
+ },
+ trait_method_def_id: info.def_id
+ };
+
+ let method_infos = @DVec();
+ method_infos.push(provided_method_info);
+ pmm.insert(trait_def_id, method_infos);
+ }
+ }
+
+ // Adds implementations and traits from external crates to the coherence
+ // info.
fn add_external_crates() {
let impls_seen = HashMap();
{ crate: crate_number, node: 0 });
for each_path(crate_store, crate_number) |path_entry| {
- let module_def_id;
match path_entry.def_like {
dl_def(def_mod(def_id)) => {
- module_def_id = def_id;
+ self.add_impls_for_module(impls_seen,
+ crate_store,
+ def_id);
+ }
+ dl_def(def_ty(def_id)) => {
+ let tcx = self.crate_context.tcx;
+ let polytype = csearch::get_type(tcx, def_id);
+ match ty::get(polytype.ty).sty {
+ ty::ty_trait(*) => {
+ self.add_default_methods_for_external_trait(
+ def_id);
+ }
+ _ => {}
+ }
}
dl_def(_) | dl_impl(_) | dl_field => {
// Skip this.
loop;
}
}
-
- self.add_impls_for_module(impls_seen,
- crate_store,
- module_def_id);
}
}
}
fn check_coherence(crate_context: @crate_ctxt, crate: @crate) {
let coherence_checker = @CoherenceChecker(crate_context);
- (*coherence_checker).build_provided_methods_map(crate);
(*coherence_checker).check_coherence(crate);
}
impl @crate_ctxt {
fn to_ty<RS: region_scope Copy Owned>(
- rs: RS, ast_ty: @ast::ty) -> ty::t {
+ rs: RS, ast_ty: @ast::Ty) -> ty::t {
ast_ty_to_ty(self, rs, ast_ty)
}
match tcx.items.get(id) {
ast_map::node_item(@{node: ast::item_trait(params, _, ms), _}, _) => {
store_methods::<ast::trait_method>(ccx, id, ms, |m| {
+ let def_id;
+ match *m {
+ ast::required(ty_method) => def_id = local_def(ty_method.id),
+ ast::provided(method) => def_id = local_def(method.id)
+ }
+
let trait_bounds = ty_param_bounds(ccx, params);
let ty_m = trait_method_to_ty_method(*m);
- let method_ty = ty_of_ty_method(ccx, ty_m, region_paramd);
+ let method_ty = ty_of_ty_method(ccx, ty_m, region_paramd, def_id);
if ty_m.self_ty.node == ast::sty_static {
make_static_method_ty(ccx, ty_m, region_paramd,
method_ty, trait_ty, trait_bounds);
}
if impl_m.tps.len() != trait_m.tps.len() {
- tcx.sess.span_err(sp, #fmt("method `%s` \
+ tcx.sess.span_err(sp, fmt!("method `%s` \
has %u type %s, but its trait declaration has %u type %s",
tcx.sess.str_of(trait_m.ident), impl_m.tps.len(),
pluralize(impl_m.tps.len(), ~"parameter"),
// Would be nice to use the ty param names in the error message,
// but we don't have easy access to them here
if impl_param_bounds.len() != trait_param_bounds.len() {
- tcx.sess.span_err(sp, #fmt("in method `%s`, \
+ tcx.sess.span_err(sp, fmt!("in method `%s`, \
type parameter %u has %u %s, but the same type \
parameter in its trait declaration has %u %s",
tcx.sess.str_of(trait_m.ident),
// Replaces bound references to the self region with `with_r`.
fn replace_bound_self(tcx: ty::ctxt, ty: ty::t,
- with_r: ty::region) -> ty::t {
+ with_r: ty::Region) -> ty::t {
do ty::fold_regions(tcx, ty) |r, _in_fn| {
if r == ty::re_bound(ty::br_self) {with_r} else {r}
}
let provided_methods = ty::provided_trait_methods(tcx, did);
match vec::find(provided_methods, |provided_method|
- provided_method.ident == trait_m.ident) {
+ *provided_method == trait_m.ident) {
Some(_) => {
// If there's a provided method with the name we
// want, then we're fine; nothing else to do.
tpt: ty::ty_param_bounds_and_ty,
id: ast::node_id) {
let tcx = ccx.tcx;
- do option::iter(&struct_def.ctor) |ctor| {
- // Write the ctor type
- let t_args = ctor.node.dec.inputs.map(
- |a| ty_of_arg(ccx, type_rscope(rp), *a, None) );
- let t_res = ty::mk_class(
- tcx, local_def(id),
- {self_r: rscope::bound_self_region(rp),
- self_ty: None,
- tps: ty::ty_params_to_tys(tcx, tps)});
- let proto = ty::proto_vstore(ty::vstore_slice(ty::re_static));
- let t_ctor = ty::mk_fn(tcx, FnTyBase {
- meta: FnMeta {purity: ast::impure_fn,
- proto: proto,
- bounds: @~[],
- ret_style: ast::return_val},
- sig: FnSig {inputs: t_args,
- output: t_res}
- });
- write_ty_to_tcx(tcx, ctor.node.id, t_ctor);
- tcx.tcache.insert(local_def(ctor.node.id),
- {bounds: tpt.bounds,
- region_param: rp,
- ty: t_ctor});
- }
do option::iter(&struct_def.dtor) |dtor| {
// Write the dtor type
m.purity, @~[],
m.decl, None, m.span),
self_ty: m.self_ty.node,
- vis: m.vis}
+ vis: m.vis,
+ def_id: local_def(m.id)}
}
fn ty_of_ty_method(self: @crate_ctxt,
m: ast::ty_method,
- rp: Option<ty::region_variance>) -> ty::method {
+ rp: Option<ty::region_variance>,
+ id: ast::def_id) -> ty::method {
{ident: m.ident,
tps: ty_param_bounds(self, m.tps),
fty: ty_of_fn_decl(self, type_rscope(rp), ast::proto_bare, m.purity,
@~[], m.decl, None, m.span),
// assume public, because this is only invoked on trait methods
self_ty: m.self_ty.node,
- vis: ast::public}
+ vis: ast::public,
+ def_id: id}
}
/*
}
}
+// Translate the AST's notion of ty param bounds (which are just newtyped Tys)
+// to ty's notion of ty param bounds, which can either be user-defined traits,
+// or one of the four built-in traits (formerly known as kinds): Const, Copy,
+// Owned, and Send.
fn compute_bounds(ccx: @crate_ctxt,
ast_bounds: @~[ast::ty_param_bound]) -> ty::param_bounds {
@do vec::flat_map(*ast_bounds) |b| {
- match *b {
- ast::bound_send => ~[ty::bound_send],
- ast::bound_copy => ~[ty::bound_copy],
- ast::bound_const => ~[ty::bound_const],
- ast::bound_owned => ~[ty::bound_owned],
- ast::bound_trait(t) => {
- let ity = ast_ty_to_ty(ccx, empty_rscope, t);
- match ty::get(ity).sty {
- ty::ty_trait(*) => {
- ~[ty::bound_trait(ity)]
- }
- _ => {
+ let li = &ccx.tcx.lang_items;
+ let ity = ast_ty_to_ty(ccx, empty_rscope, **b);
+ match ty::get(ity).sty {
+ ty::ty_trait(did, _, _) => {
+ let d = Some(did);
+ if d == li.send_trait {
+ ~[ty::bound_send]
+ }
+ else if d == li.copy_trait {
+ ~[ty::bound_copy]
+ }
+ else if d == li.const_trait {
+ ~[ty::bound_const]
+ }
+ else if d == li.owned_trait {
+ ~[ty::bound_owned]
+ }
+ else {
+ // Must be a user-defined trait
+ ~[ty::bound_trait(ity)]
+ }
+ }
+ _ => {
ccx.tcx.sess.span_err(
- t.span, ~"type parameter bounds must be \
- trait types");
+ (*b).span, ~"type parameter bounds must be \
+ trait types");
~[]
- }
}
- }
}
}
}
use result::{Result, Ok, Err, map_vec, map_vec2, iter_vec2};
use ty::{mk_fn, type_is_bot};
use check::regionmanip::{replace_bound_regions_in_fn_ty};
-use driver::session::session;
use util::common::{indent, indenter};
use ast::{unsafe_fn, impure_fn, pure_fn, extern_fn};
use ast::{m_const, m_imm, m_mutbl};
use integral::{int_ty_set, int_ty_set_all};
use combine::{combine_fields, eq_tys};
use assignment::Assign;
-use to_str::to_str;
+use to_str::ToStr;
use sub::Sub;
use lub::Lub;
}
fn mk_subr(cx: infer_ctxt, a_is_expected: bool, span: span,
- a: ty::region, b: ty::region) -> ures {
+ a: ty::Region, b: ty::Region) -> ures {
debug!("mk_subr(%s <: %s)", a.to_str(cx), b.to_str(cx));
do indent {
do cx.commit {
resolver(cx, modes).resolve_type_chk(a)
}
-fn resolve_region(cx: infer_ctxt, r: ty::region, modes: uint)
- -> fres<ty::region> {
+fn resolve_region(cx: infer_ctxt, r: ty::Region, modes: uint)
+ -> fres<ty::Region> {
resolver(cx, modes).resolve_region_chk(r)
}
do indent {
let r <- self.try(f);
- // FIXME (#2814)---could use a vec::clear() that ran
- // destructors but kept the vec at its currently allocated
- // length
- self.ty_var_bindings.bindings = ~[];
- self.int_var_bindings.bindings = ~[];
+ self.ty_var_bindings.bindings.truncate(0);
+ self.int_var_bindings.bindings.truncate(0);
self.region_vars.commit();
move r
}
ty::mk_int_var(self.tcx, self.next_int_var_id())
}
- fn next_region_var_nb(span: span) -> ty::region {
+ fn next_region_var_nb(span: span) -> ty::Region {
ty::re_var(self.region_vars.new_region_var(span))
}
fn next_region_var_with_lb(span: span,
- lb_region: ty::region) -> ty::region {
+ lb_region: ty::Region) -> ty::Region {
let region_var = self.next_region_var_nb(span);
// add lb_region as a lower bound on the newly built variable
return region_var;
}
- fn next_region_var(span: span, scope_id: ast::node_id) -> ty::region {
+ fn next_region_var(span: span, scope_id: ast::node_id) -> ty::Region {
self.next_region_var_with_lb(span, ty::re_scope(scope_id))
}
// A. But this upper-bound might be stricter than what is truly
// needed.
-use to_str::to_str;
+use to_str::ToStr;
use combine::combine_fields;
fn to_ares(+c: cres<ty::t>) -> ares {
a: ty::t,
nr_b: ty::t,
m: ast::mutability,
- r_b: ty::region) -> ares {
+ r_b: ty::Region) -> ares {
debug!("try_assign(a=%s, nr_b=%s, m=%?, r_b=%s)",
a.to_str(self.infcx),
// terms of error reporting, although we do not do that properly right
// now.
-use to_str::to_str;
+use to_str::ToStr;
use ty::{FnTyBase, FnMeta, FnSig};
trait combine {
fn protos(p1: ty::fn_proto, p2: ty::fn_proto) -> cres<ty::fn_proto>;
fn ret_styles(r1: ret_style, r2: ret_style) -> cres<ret_style>;
fn purities(a: purity, b: purity) -> cres<purity>;
- fn contraregions(a: ty::region, b: ty::region) -> cres<ty::region>;
- fn regions(a: ty::region, b: ty::region) -> cres<ty::region>;
+ fn contraregions(a: ty::Region, b: ty::Region) -> cres<ty::Region>;
+ fn regions(a: ty::Region, b: ty::Region) -> cres<ty::Region>;
fn vstores(vk: ty::terr_vstore_kind,
a: ty::vstore, b: ty::vstore) -> cres<ty::vstore>;
}
self: &C, +a: T, +b: T) -> ty::expected_found<T> {
if self.a_is_expected() {
- ty::expected_found {expected: a, found: b}
+ ty::expected_found {expected: move a, found: move b}
} else {
- ty::expected_found {expected: b, found: a}
+ ty::expected_found {expected: move b, found: move a}
}
}
}
}
-fn eq_regions<C: combine>(self: &C, a: ty::region, b: ty::region) -> ures {
+fn eq_regions<C: combine>(self: &C, a: ty::Region, b: ty::Region) -> ures {
debug!("eq_regions(%s, %s)",
a.to_str(self.infcx()),
b.to_str(self.infcx()));
fn eq_opt_regions<C:combine>(
self: &C,
- a: Option<ty::region>,
- b: Option<ty::region>) -> cres<Option<ty::region>> {
+ a: Option<ty::Region>,
+ b: Option<ty::Region>) -> cres<Option<ty::Region>> {
match (a, b) {
(None, None) => {
fn relate_region_param<C:combine>(
self: &C,
did: ast::def_id,
- a: Option<ty::region>,
- b: Option<ty::region>)
- -> cres<Option<ty::region>>
+ a: Option<ty::Region>,
+ b: Option<ty::Region>)
+ -> cres<Option<ty::Region>>
{
let polyty = ty::lookup_item_type(self.infcx().tcx, did);
match (polyty.region_param, a, b) {
use combine::*;
use lattice::*;
-use to_str::to_str;
+use to_str::ToStr;
enum Glb = combine_fields; // "greatest lower bound" (common subtype)
}
}
- fn regions(a: ty::region, b: ty::region) -> cres<ty::region> {
+ fn regions(a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("%s.regions(%?, %?)",
self.tag(),
a.to_str(self.infcx),
}
}
- fn contraregions(a: ty::region, b: ty::region) -> cres<ty::region> {
+ fn contraregions(a: ty::Region, b: ty::Region) -> cres<ty::Region> {
Lub(*self).regions(a, b)
}
*/
-use to_str::to_str;
+use to_str::ToStr;
// Bitvector to represent sets of integral types
enum int_ty_set = uint;
use combine::*;
use unify::*;
-use to_str::to_str;
+use to_str::ToStr;
// ______________________________________________________________________
// Lattice operations on variables
use combine::*;
use lattice::*;
-use to_str::to_str;
+use to_str::ToStr;
enum Lub = combine_fields; // "subtype", "subregion" etc
}
}
- fn contraregions(a: ty::region, b: ty::region) -> cres<ty::region> {
+ fn contraregions(a: ty::Region, b: ty::Region) -> cres<ty::Region> {
return Glb(*self).regions(a, b);
}
- fn regions(a: ty::region, b: ty::region) -> cres<ty::region> {
+ fn regions(a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("%s.regions(%?, %?)",
self.tag(),
a.to_str(self.infcx),
use std::cell::{Cell, empty_cell};
use std::list::{List, Nil, Cons};
-use ty::{region, RegionVid};
use region::is_subregion_of;
+use ty::{Region, RegionVid};
use syntax::codemap;
-use to_str::to_str;
+use to_str::ToStr;
use util::ppaux::note_and_explain_region;
export RegionVarBindings;
enum Constraint {
ConstrainVarSubVar(RegionVid, RegionVid),
- ConstrainRegSubVar(region, RegionVid),
- ConstrainVarSubReg(RegionVid, region)
+ ConstrainRegSubVar(Region, RegionVid),
+ ConstrainVarSubReg(RegionVid, Region)
}
impl Constraint : cmp::Eq {
}
struct TwoRegions {
- a: region,
- b: region,
+ a: Region,
+ b: Region,
}
impl TwoRegions : cmp::Eq {
struct RegionVarBindings {
tcx: ty::ctxt,
var_spans: DVec<span>,
- values: Cell<~[ty::region]>,
+ values: Cell<~[ty::Region]>,
constraints: HashMap<Constraint, span>,
lubs: CombineMap,
glbs: CombineMap,
}
}
- fn make_subregion(span: span, sub: region, sup: region) -> cres<()> {
+ fn make_subregion(span: span, sub: Region, sup: Region) -> cres<()> {
// cannot add constraints once regions are resolved
assert self.values.is_empty();
}
}
- fn lub_regions(span: span, a: region, b: region) -> cres<region> {
+ fn lub_regions(span: span, a: Region, b: Region) -> cres<Region> {
// cannot add constraints once regions are resolved
assert self.values.is_empty();
}
}
- fn glb_regions(span: span, a: region, b: region) -> cres<region> {
+ fn glb_regions(span: span, a: Region, b: Region) -> cres<Region> {
// cannot add constraints once regions are resolved
assert self.values.is_empty();
}
}
- fn resolve_var(rid: RegionVid) -> ty::region {
+ fn resolve_var(rid: RegionVid) -> ty::Region {
debug!("RegionVarBindings: resolve_var(%?=%u)", rid, *rid);
if self.values.is_empty() {
self.tcx.sess.span_bug(
self.values.with_ref(|values| values[*rid])
}
- fn combine_vars(combines: CombineMap, a: region, b: region, span: span,
- relate: fn(old_r: region, new_r: region) -> cres<()>)
- -> cres<region> {
+ fn combine_vars(combines: CombineMap, a: Region, b: Region, span: span,
+ relate: fn(old_r: Region, new_r: Region) -> cres<()>)
+ -> cres<Region> {
let vars = TwoRegions { a: a, b: b };
match combines.find(vars) {
}
priv impl RegionVarBindings {
- fn is_subregion_of(sub: region, sup: region) -> bool {
+ fn is_subregion_of(sub: Region, sup: Region) -> bool {
is_subregion_of(self.tcx.region_map, sub, sup)
}
- fn lub_concrete_regions(+a: region, +b: region) -> region {
+ fn lub_concrete_regions(+a: Region, +b: Region) -> Region {
match (a, b) {
(ty::re_static, _) | (_, ty::re_static) => {
ty::re_static // nothing lives longer than static
}
}
- fn glb_concrete_regions(+a: region, +b: region) -> cres<region> {
+ fn glb_concrete_regions(+a: Region, +b: Region) -> cres<Region> {
match (a, b) {
(ty::re_static, r) | (r, ty::re_static) => {
// static lives longer than everything else
pure fn ne(other: &Classification) -> bool { !self.eq(other) }
}
-enum GraphNodeValue { NoValue, Value(region), ErrorValue }
+enum GraphNodeValue { NoValue, Value(Region), ErrorValue }
struct GraphNode {
span: span,
}
struct SpannedRegion {
- region: region,
+ region: Region,
span: span,
}
}
impl RegionVarBindings {
- fn infer_variable_values() -> ~[region] {
+ fn infer_variable_values() -> ~[Region] {
let graph = self.construct_graph();
self.expansion(&graph);
self.contraction(&graph);
}
}
- fn expand_node(a_region: region,
+ fn expand_node(a_region: Region,
b_vid: RegionVid,
b_node: &GraphNode) -> bool {
debug!("expand_node(%?, %? == %?)",
fn contract_node(a_vid: RegionVid,
a_node: &GraphNode,
- b_region: region) -> bool {
+ b_region: Region) -> bool {
debug!("contract_node(%? == %?/%?, %?)",
a_vid, a_node.value, a_node.classification, b_region);
fn check_node(self: &RegionVarBindings,
a_vid: RegionVid,
a_node: &GraphNode,
- a_region: region,
- b_region: region) -> bool {
+ a_region: Region,
+ b_region: Region) -> bool {
if !self.is_subregion_of(a_region, b_region) {
debug!("Setting %? to ErrorValue: %? not subregion of %?",
a_vid, a_region, b_region);
fn adjust_node(self: &RegionVarBindings,
a_vid: RegionVid,
a_node: &GraphNode,
- a_region: region,
- b_region: region) -> bool {
+ a_region: Region,
+ b_region: Region) -> bool {
match self.glb_concrete_regions(a_region, b_region) {
Ok(glb) => {
if glb == a_region {
debug!("---- %s Complete after %u iteration(s)", tag, iteration);
}
- fn extract_regions_and_report_errors(graph: &Graph) -> ~[region] {
+ fn extract_regions_and_report_errors(graph: &Graph) -> ~[Region] {
let dup_map = TwoRegionsMap();
graph.nodes.mapi(|idx, node| {
match node.value {
// Used to suppress reporting the same basic error over and over
fn is_reported(dup_map: TwoRegionsMap,
- r_a: region,
- r_b: region) -> bool {
+ r_a: Region,
+ r_b: Region) -> bool {
let key = TwoRegions { a: r_a, b: r_b };
!dup_map.insert(key, ())
}
// probably better off writing `resolve_all - resolve_ivar`.
use integral::*;
-use to_str::to_str;
+use to_str::ToStr;
const resolve_nested_tvar: uint = 0b00000001;
const resolve_rvar: uint = 0b00000010;
}
}
- fn resolve_region_chk(orig: ty::region) -> fres<ty::region> {
+ fn resolve_region_chk(orig: ty::Region) -> fres<ty::Region> {
self.err = None;
let resolved = indent(|| self.resolve_region(orig) );
match self.err {
}
}
- fn resolve_region(orig: ty::region) -> ty::region {
+ fn resolve_region(orig: ty::Region) -> ty::Region {
debug!("Resolve_region(%s)", orig.to_str(self.infcx));
match orig {
ty::re_var(rid) => self.resolve_region_var(rid),
}
}
- fn resolve_region_var(rid: RegionVid) -> ty::region {
+ fn resolve_region_var(rid: RegionVid) -> ty::Region {
if !self.should(resolve_rvar) {
return ty::re_var(rid)
}
self.infcx.region_vars.resolve_var(rid)
}
- fn assert_not_rvar(rid: RegionVid, r: ty::region) {
+ fn assert_not_rvar(rid: RegionVid, r: ty::Region) {
match r {
ty::re_var(rid2) => {
self.err = Some(region_var_bound_by_region_var(rid, rid2));
use combine::*;
use unify::*;
-use to_str::to_str;
+use to_str::ToStr;
enum Sub = combine_fields; // "subtype", "subregion" etc
Sub(opp).tys(b, a)
}
- fn contraregions(a: ty::region, b: ty::region) -> cres<ty::region> {
+ fn contraregions(a: ty::Region, b: ty::Region) -> cres<ty::Region> {
let opp = combine_fields {
a_is_expected: !self.a_is_expected,.. *self
};
Sub(opp).regions(b, a)
}
- fn regions(a: ty::region, b: ty::region) -> cres<ty::region> {
+ fn regions(a: ty::Region, b: ty::Region) -> cres<ty::Region> {
debug!("%s.regions(%s, %s)",
self.tag(),
a.to_str(self.infcx),
use integral::{int_ty_set};
use unify::{var_value, redirect, root};
-trait to_str {
+trait ToStr {
fn to_str(cx: infer_ctxt) -> ~str;
}
-impl ty::t: to_str {
+impl ty::t: ToStr {
fn to_str(cx: infer_ctxt) -> ~str {
ty_to_str(cx.tcx, self)
}
}
-impl ty::mt: to_str {
+impl ty::mt: ToStr {
fn to_str(cx: infer_ctxt) -> ~str {
mt_to_str(cx.tcx, self)
}
}
-impl ty::region: to_str {
+impl ty::Region: ToStr {
fn to_str(cx: infer_ctxt) -> ~str {
util::ppaux::region_to_str(cx.tcx, self)
}
}
-impl<V:Copy to_str> bound<V>: to_str {
+impl<V:Copy ToStr> bound<V>: ToStr {
fn to_str(cx: infer_ctxt) -> ~str {
match self {
Some(v) => v.to_str(cx),
}
}
-impl<T:Copy to_str> bounds<T>: to_str {
+impl<T:Copy ToStr> bounds<T>: ToStr {
fn to_str(cx: infer_ctxt) -> ~str {
fmt!("{%s <: %s}",
self.lb.to_str(cx),
}
}
-impl int_ty_set: to_str {
+impl int_ty_set: ToStr {
fn to_str(_cx: infer_ctxt) -> ~str {
match self {
int_ty_set(v) => uint::to_str(v, 10u)
}
}
-impl<V:Copy vid, T:Copy to_str> var_value<V, T>: to_str {
+impl<V:Copy vid, T:Copy ToStr> var_value<V, T>: ToStr {
fn to_str(cx: infer_ctxt) -> ~str {
match self {
redirect(vid) => fmt!("redirect(%s)", vid.to_str()),
use combine::combine;
use integral::*;
-use to_str::to_str;
+use to_str::ToStr;
use std::smallintmap::SmallIntMap;
enum var_value<V:Copy, T:Copy> {
}
}
- fn set<V:Copy vid, T:Copy to_str>(
+ fn set<V:Copy vid, T:Copy ToStr>(
vb: &vals_and_bindings<V, T>, vid: V,
+new_v: var_value<V, T>) {
use syntax::parse::token::special_idents;
trait region_scope {
- fn anon_region(span: span) -> Result<ty::region, ~str>;
- fn self_region(span: span) -> Result<ty::region, ~str>;
- fn named_region(span: span, id: ast::ident) -> Result<ty::region, ~str>;
+ fn anon_region(span: span) -> Result<ty::Region, ~str>;
+ fn self_region(span: span) -> Result<ty::Region, ~str>;
+ fn named_region(span: span, id: ast::ident) -> Result<ty::Region, ~str>;
}
enum empty_rscope { empty_rscope }
impl empty_rscope: region_scope {
- fn anon_region(_span: span) -> Result<ty::region, ~str> {
+ fn anon_region(_span: span) -> Result<ty::Region, ~str> {
result::Ok(ty::re_static)
}
- fn self_region(_span: span) -> Result<ty::region, ~str> {
+ fn self_region(_span: span) -> Result<ty::Region, ~str> {
result::Err(~"only the static region is allowed here")
}
fn named_region(_span: span, _id: ast::ident)
- -> Result<ty::region, ~str>
+ -> Result<ty::Region, ~str>
{
result::Err(~"only the static region is allowed here")
}
enum type_rscope = Option<ty::region_variance>;
impl type_rscope: region_scope {
- fn anon_region(_span: span) -> Result<ty::region, ~str> {
+ fn anon_region(_span: span) -> Result<ty::Region, ~str> {
match *self {
Some(_) => result::Ok(ty::re_bound(ty::br_self)),
None => result::Err(~"to use region types here, the containing \
type must be declared with a region bound")
}
}
- fn self_region(span: span) -> Result<ty::region, ~str> {
+ fn self_region(span: span) -> Result<ty::Region, ~str> {
self.anon_region(span)
}
- fn named_region(span: span, id: ast::ident) -> Result<ty::region, ~str> {
+ fn named_region(span: span, id: ast::ident) -> Result<ty::Region, ~str> {
do empty_rscope.named_region(span, id).chain_err |_e| {
result::Err(~"named regions other than `self` are not \
allowed as part of a type declaration")
}
}
-fn bound_self_region(rp: Option<ty::region_variance>) -> Option<ty::region> {
+fn bound_self_region(rp: Option<ty::region_variance>) -> Option<ty::Region> {
match rp {
Some(_) => Some(ty::re_bound(ty::br_self)),
None => None
}
}
-enum anon_rscope = {anon: ty::region, base: region_scope};
-fn in_anon_rscope<RS: region_scope Copy Owned>(self: RS, r: ty::region)
+enum anon_rscope = {anon: ty::Region, base: region_scope};
+fn in_anon_rscope<RS: region_scope Copy Owned>(self: RS, r: ty::Region)
-> @anon_rscope {
@anon_rscope({anon: r, base: self as region_scope})
}
impl @anon_rscope: region_scope {
- fn anon_region(_span: span) -> Result<ty::region, ~str> {
+ fn anon_region(_span: span) -> Result<ty::Region, ~str> {
result::Ok(self.anon)
}
- fn self_region(span: span) -> Result<ty::region, ~str> {
+ fn self_region(span: span) -> Result<ty::Region, ~str> {
self.base.self_region(span)
}
- fn named_region(span: span, id: ast::ident) -> Result<ty::region, ~str> {
+ fn named_region(span: span, id: ast::ident) -> Result<ty::Region, ~str> {
self.base.named_region(span, id)
}
}
@binding_rscope { base: base, anon_bindings: 0 }
}
impl @binding_rscope: region_scope {
- fn anon_region(_span: span) -> Result<ty::region, ~str> {
+ fn anon_region(_span: span) -> Result<ty::Region, ~str> {
let idx = self.anon_bindings;
self.anon_bindings += 1;
result::Ok(ty::re_bound(ty::br_anon(idx)))
}
- fn self_region(span: span) -> Result<ty::region, ~str> {
+ fn self_region(span: span) -> Result<ty::Region, ~str> {
self.base.self_region(span)
}
- fn named_region(span: span, id: ast::ident) -> Result<ty::region, ~str> {
+ fn named_region(span: span, id: ast::ident) -> Result<ty::Region, ~str> {
do self.base.named_region(span, id).chain_err |_e| {
result::Ok(ty::re_bound(ty::br_named(id)))
}
// -*- rust -*-
#[link(name = "rustc",
- vers = "0.4",
+ vers = "0.5",
uuid = "0ce89b41-2f92-459e-bbc1-8f5fe32f16cf",
url = "https://github.com/mozilla/rust/tree/master/src/rustc")];
#[allow(deprecated_mode)];
#[allow(deprecated_pattern)];
-extern mod core(vers = "0.4");
-extern mod std(vers = "0.4");
-extern mod syntax(vers = "0.4");
+extern mod core(vers = "0.5");
+extern mod std(vers = "0.5");
+extern mod syntax(vers = "0.5");
use core::*;
use std::map::HashMap;
use syntax::ast;
-use ast::{ty, pat};
use syntax::codemap::{span};
use syntax::visit;
use syntax::print;
fn field_expr(f: ast::field) -> @ast::expr { return f.node.expr; }
fn field_exprs(fields: ~[ast::field]) -> ~[@ast::expr] {
- let mut es = ~[];
- for fields.each |f| { es.push(f.node.expr); }
- return es;
+ fields.map(|f| f.node.expr)
}
// Takes a predicate p, returns true iff p is true for any subexpressions
return *rs;
}
-fn has_nonlocal_exits(b: ast::blk) -> bool {
- do loop_query(b) |e| {
- match e {
- ast::expr_break(_) | ast::expr_again(_) => true,
- _ => false
- }
- }
-}
-
-fn may_break(b: ast::blk) -> bool {
- do loop_query(b) |e| {
- match e {
- ast::expr_break(_) => true,
- _ => false
- }
- }
+// Takes a predicate p, returns true iff p is true for any subexpressions
+// of b -- skipping any inner loops (loop, while, loop_body)
+fn block_query(b: ast::blk, p: fn@(@ast::expr) -> bool) -> bool {
+ let rs = @mut false;
+ let visit_expr =
+ |e: @ast::expr, &&flag: @mut bool, v: visit::vt<@mut bool>| {
+ *flag |= p(e);
+ visit::visit_expr(e, flag, v)
+ };
+ let v = visit::mk_vt(@{visit_expr: visit_expr
+ ,.. *visit::default_visitor()});
+ visit::visit_block(b, rs, v);
+ return *rs;
}
fn local_rhs_span(l: @ast::local, def: span) -> span {
}
fn is_main_name(path: syntax::ast_map::path) -> bool {
- // FIXME (#34): path should be a constrained type, so we know
- // the call to last doesn't fail.
vec::last(path) == syntax::ast_map::path_name(
syntax::parse::token::special_idents::main
)
use middle::ty::{bound_region, br_anon, br_named, br_self, br_cap_avoid};
use middle::ty::{ck_block, ck_box, ck_uniq, ctxt, field, method};
use middle::ty::{mt, t, param_bound};
-use middle::ty::{re_bound, re_free, re_scope, re_var, re_static, region};
+use middle::ty::{re_bound, re_free, re_scope, re_var, re_static, Region};
use middle::ty::{ty_bool, ty_bot, ty_box, ty_class, ty_enum};
use middle::ty::{ty_estr, ty_evec, ty_float, ty_fn, ty_trait, ty_int};
use middle::ty::{ty_nil, ty_opaque_box, ty_opaque_closure_ptr, ty_param};
mode_to_str, purity_to_str};
use syntax::{ast, ast_util};
use syntax::ast_map;
-use driver::session::session;
fn note_and_explain_region(cx: ctxt,
prefix: ~str,
- region: ty::region,
+ region: ty::Region,
suffix: ~str) {
match explain_region_and_span(cx, region) {
(str, Some(span)) => {
/// Returns a string like "the block at 27:31" that attempts to explain a
/// lifetime in a way it might plausibly be understood.
-fn explain_region(cx: ctxt, region: ty::region) -> ~str {
+fn explain_region(cx: ctxt, region: ty::Region) -> ~str {
let (res, _) = explain_region_and_span(cx, region);
return res;
}
-fn explain_region_and_span(cx: ctxt, region: ty::region)
+fn explain_region_and_span(cx: ctxt, region: ty::Region)
-> (~str, Option<span>)
{
return match region {
if cx.sess.verbose() {fmt!("&%u", idx)} else {~"&"}
}
- // FIXME(#3011) -- even if this arm is removed, exhaustiveness checking
- // does not fail
br_cap_avoid(id, br) => {
if cx.sess.verbose() {
fmt!("br_cap_avoid(%?, %s)", id, bound_region_to_str(cx, *br))
// In general, if you are giving a region error message,
// you should use `explain_region()` or, better yet,
// `note_and_explain_region()`
-fn region_to_str(cx: ctxt, region: region) -> ~str {
+fn region_to_str(cx: ctxt, region: Region) -> ~str {
if cx.sess.verbose() {
return fmt!("&%?", region);
}
_ => { }
}
s += ~"(";
- let mut strs = ~[];
- for inputs.each |a| { strs.push(fn_input_to_str(cx, *a)); }
+ let strs = inputs.map(|a| fn_input_to_str(cx, *a));
s += str::connect(strs, ~", ");
s += ~")";
if ty::get(output).sty != ty_nil {
ty_unboxed_vec(tm) => { ~"unboxed_vec<" + mt_to_str(cx, tm) + ~">" }
ty_type => ~"type",
ty_rec(elems) => {
- let mut strs: ~[~str] = ~[];
- for elems.each |fld| { strs.push(field_to_str(cx, *fld)); }
+ let strs = elems.map(|fld| field_to_str(cx, *fld));
~"{" + str::connect(strs, ~",") + ~"}"
}
ty_tup(elems) => {
- let mut strs = ~[];
- for elems.each |elem| { strs.push(ty_to_str(cx, *elem)); }
+ let strs = elems.map(|elem| ty_to_str(cx, *elem));
~"(" + str::connect(strs, ~",") + ~")"
}
ty_fn(ref f) => {
fn parameterized(cx: ctxt,
base: ~str,
- self_r: Option<ty::region>,
+ self_r: Option<ty::Region>,
tps: ~[ty::t]) -> ~str {
let r_str = match self_r {
use std::map::HashMap;
use rustc::driver::session;
use session::{basic_options, options};
-use session::session;
+use session::Session;
use rustc::driver::driver;
use syntax::diagnostic;
use syntax::diagnostic::handler;
type SrvOwner<T> = fn(srv: Srv) -> T;
type CtxtHandler<T> = fn~(ctxt: Ctxt) -> T;
-type Parser = fn~(session, ~str) -> @ast::crate;
+type Parser = fn~(Session, ~str) -> @ast::crate;
enum Msg {
HandleRequest(fn~(Ctxt)),
fn run<T>(owner: SrvOwner<T>, source: ~str, +parse: Parser) -> T {
let srv_ = Srv({
- ch: do task::spawn_listener |po| {
+ ch: do task::spawn_listener |move parse, po| {
act(po, source, parse);
}
});
let res = owner(srv_);
comm::send(srv_.ch, Exit);
- return res;
+ move res
}
fn act(po: comm::Port<Msg>, source: ~str, parse: Parser) {
let msg = HandleRequest(fn~(move f, ctxt: Ctxt) {
comm::send(ch, f(ctxt))
});
- comm::send(srv.ch, msg);
+ comm::send(srv.ch, move msg);
comm::recv(po)
}
-fn build_ctxt(sess: session,
+fn build_ctxt(sess: Session,
ast: @ast::crate) -> Ctxt {
use rustc::front::config;
}
}
-fn build_session() -> session {
+fn build_session() -> Session {
let sopts: @options = basic_options();
let codemap = codemap::new_codemap();
let error_handlers = build_error_handlers(codemap);
// Build a custom error handler that will allow us to ignore non-fatal
// errors
fn build_error_handlers(
- codemap: codemap::codemap
+ codemap: codemap::CodeMap
) -> ErrorHandlers {
type DiagnosticHandler = {
fn note(msg: &str) { self.inner.note(msg) }
fn bug(msg: &str) -> ! { self.inner.bug(msg) }
fn unimpl(msg: &str) -> ! { self.inner.unimpl(msg) }
- fn emit(cmsp: Option<(codemap::codemap, codemap::span)>,
+ fn emit(cmsp: Option<(codemap::CodeMap, codemap::span)>,
msg: &str, lvl: diagnostic::level) {
self.inner.emit(cmsp, msg, lvl)
}
}
- let emitter = fn@(cmsp: Option<(codemap::codemap, codemap::span)>,
+ let emitter = fn@(cmsp: Option<(codemap::CodeMap, codemap::span)>,
msg: &str, lvl: diagnostic::level) {
diagnostic::emit(cmsp, msg, lvl);
};
}
}
-fn doc_meta(
- attrs: ~[ast::attribute]
-) -> Option<@ast::meta_item> {
-
- /*!
- * Given a vec of attributes, extract the meta_items contained in the \
- * doc attribute
- */
-
- let doc_metas = doc_metas(attrs);
- if vec::is_not_empty(doc_metas) {
- if vec::len(doc_metas) != 1u {
- warn!("ignoring %u doc attributes", vec::len(doc_metas) - 1u);
- }
- Some(doc_metas[0])
- } else {
- None
- }
-
-}
-
fn doc_metas(
attrs: ~[ast::attribute]
) -> ~[@ast::meta_item] {
}
fn parse_desc(attrs: ~[ast::attribute]) -> Option<~str> {
- match doc_meta(attrs) {
- Some(meta) => {
- attr::get_meta_item_value_str(meta)
- }
- None => None
+ let doc_strs = do doc_metas(attrs).filter_map |meta| {
+ attr::get_meta_item_value_str(*meta)
+ };
+ if doc_strs.is_empty() {
+ None
+ } else {
+ Some(str::connect(doc_strs, "\n"))
}
}
let attrs = test::parse_attributes(source);
assert parse_hidden(attrs) == false;
}
+
+#[test]
+fn should_concatenate_multiple_doc_comments() {
+ let source = ~"/// foo\n/// bar";
+ let desc = parse_desc(test::parse_attributes(source));
+ assert desc == Some(~"foo\nbar");
+}
+
+
{
topmod: doc::ModDoc_({
item: {
- name: option::get_default(&attrs.name, doc.topmod.name()),
+ name: option::get_default(attrs.name, doc.topmod.name()),
.. doc.topmod.item
},
.. *doc.topmod
srv: astsrv::Srv,
id: doc::AstId,
+parse_attrs: fn~(~[ast::attribute]) -> T) -> T {
- do astsrv::exec(srv) |ctxt| {
+ do astsrv::exec(srv) |move parse_attrs, ctxt| {
let attrs = match ctxt.ast_map.get(id) {
ast_map::node_item(item, _) => item.attrs,
ast_map::node_foreign_item(item, _, _) => item.attrs,
node: ast::item_enum(enum_definition, _), _
}, _) => {
let ast_variant = option::get(
- &vec::find(enum_definition.variants, |v| {
+ vec::find(enum_definition.variants, |v| {
to_str(v.node.name) == variant.name
}));
attr_parser::parse_desc(ast_variant.node.attrs)
}
- _ => fail #fmt("Enum variant %s has id that's not bound \
+ _ => fail fmt!("Enum variant %s has id that's not bound \
to an enum item", variant.name)
}
};
* for testing purposes. It doesn't surve any functional
* purpose. This here, for instance, is just some filler text.
*
- * FIXME (#1654): It would be nice if we could run some automated
+ * FIXME (#3731): It would be nice if we could run some automated
* tests on this file
*/
impl Doc {
fn CrateDoc() -> CrateDoc {
- option::get(&vec::foldl(None, self.pages, |_m, page| {
+ option::get(vec::foldl(None, self.pages, |_m, page| {
match *page {
doc::CratePage(doc) => Some(doc),
_ => None
) -> Fold<T> {
Fold({
ctxt: ctxt,
- fold_doc: fold_doc,
- fold_crate: fold_crate,
- fold_item: fold_item,
- fold_mod: fold_mod,
- fold_nmod: fold_nmod,
- fold_fn: fold_fn,
- fold_const: fold_const,
- fold_enum: fold_enum,
- fold_trait: fold_trait,
- fold_impl: fold_impl,
- fold_type: fold_type,
- fold_struct: fold_struct,
+ fold_doc: move fold_doc,
+ fold_crate: move fold_crate,
+ fold_item: move fold_item,
+ fold_mod: move fold_mod,
+ fold_nmod: move fold_nmod,
+ fold_fn: move fold_fn,
+ fold_const: move fold_const,
+ fold_enum: move fold_enum,
+ fold_trait: move fold_trait,
+ fold_impl: move fold_impl,
+ fold_type: move fold_type,
+ fold_struct: move fold_struct
})
}
export header_kind, header_name, header_text;
fn mk_pass(+writer_factory: WriterFactory) -> Pass {
- let f = fn~(srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
+ let f = fn~(move writer_factory,
+ srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
run(srv, doc, copy writer_factory)
};
{
name: ~"markdown",
- f: f
+ f: move f
}
}
~"mods last", mods_last
).f(srv, doc);
- write_markdown(sorted_doc, writer_factory);
+ write_markdown(sorted_doc, move writer_factory);
return doc;
}
doc: doc::Doc,
+writer_factory: WriterFactory
) {
- // FIXME #2484: There is easy parallelism to be had here but
- // we don't want to spawn too many pandoc processes
+ // There is easy parallelism to be had here, but
+ // we don't want to spawn too many pandoc processes.
+ // (See #2484, which is closed.)
do doc.pages.map |page| {
let ctxt = {
w: writer_factory(*page)
let (srv, doc) = test::create_doc_srv(~"mod a { }");
// Split the document up into pages
let doc = page_pass::mk_pass(config::DocPerMod).f(srv, doc);
- write_markdown(doc, writer_factory);
+ write_markdown(doc, move writer_factory);
// We expect two pages to have been written
- for iter::repeat(2u) {
+ for iter::repeat(2) {
comm::recv(po);
}
}
let (srv, doc) = test::create_doc_srv(
~"#[link(name = \"core\")]; mod a { }");
let doc = page_pass::mk_pass(config::DocPerMod).f(srv, doc);
- write_markdown(doc, writer_factory);
- for iter::repeat(2u) {
+ write_markdown(doc, move writer_factory);
+ for iter::repeat(2) {
let (page, markdown) = comm::recv(po);
match page {
doc::CratePage(_) => {
doc: doc::Doc
) -> ~str {
let (writer_factory, po) = markdown_writer::future_writer_factory();
- write_markdown(doc, writer_factory);
+ write_markdown(doc, move writer_factory);
return comm::recv(po).second();
}
doc: doc::Doc
) -> ~str {
let (writer_factory, po) = markdown_writer::future_writer_factory();
- let pass = mk_pass(writer_factory);
+ let pass = mk_pass(move writer_factory);
pass.f(srv, doc);
return comm::recv(po).second();
}
// Copied from run::program_output
let file = os::fdopen(fd);
let reader = io::FILE_reader(file, false);
- let mut buf = ~"";
- while !reader.eof() {
- let bytes = reader.read_bytes(4096u);
- buf += str::from_bytes(bytes);
- }
+ let buf = io::with_bytes_writer(|writer| {
+ let mut bytes = [mut 0, ..4096];
+ while !reader.eof() {
+ let nread = reader.read(bytes, bytes.len());
+ writer.write(bytes.view(0, nread));
+ }
+ });
os::fclose(file);
- return buf;
+ str::from_bytes(buf)
}
fn generic_writer(+process: fn~(markdown: ~str)) -> Writer {
- let ch = do task::spawn_listener |po: comm::Port<WriteInstr>| {
+ let ch = do task::spawn_listener
+ |move process, po: comm::Port<WriteInstr>| {
let mut markdown = ~"";
let mut keep_going = true;
while keep_going {
Done => keep_going = false
}
}
- process(markdown);
+ process(move markdown);
};
fn~(+instr: WriteInstr) {
let writer_ch = comm::Chan(&writer_po);
do task::spawn {
let (writer, future) = future_writer();
- comm::send(writer_ch, writer);
+ comm::send(writer_ch, move writer);
let s = future::get(&future);
comm::send(markdown_ch, (page, s));
}
comm::recv(writer_po)
};
- (writer_factory, markdown_po)
+ (move writer_factory, markdown_po)
}
fn future_writer() -> (Writer, future::Future<~str>) {
let (chan, port) = pipes::stream();
- let writer = fn~(+instr: WriteInstr) {
+ let writer = fn~(move chan, +instr: WriteInstr) {
chan.send(copy instr);
};
- let future = do future::from_fn {
+ let future = do future::from_fn |move port| {
let mut res = ~"";
loop {
match port.recv() {
}
res
};
- (writer, future)
+ (move writer, move future)
}
loop {
let val = comm::recv(page_port);
if val.is_some() {
- pages += ~[option::unwrap(val)];
+ pages += ~[option::unwrap(move val)];
} else {
break;
}
~"-", @source, ~[], parse::new_parse_sess(None))
}
-fn from_file_sess(sess: session::session, file: &Path) -> @ast::crate {
+fn from_file_sess(sess: session::Session, file: &Path) -> @ast::crate {
parse::parse_crate_from_file(
file, cfg(sess, file_input(*file)), sess.parse_sess)
}
-fn from_str_sess(sess: session::session, source: ~str) -> @ast::crate {
+fn from_str_sess(sess: session::Session, source: ~str) -> @ast::crate {
parse::parse_crate_from_source_str(
~"-", @source, cfg(sess, str_input(source)), sess.parse_sess)
}
-fn cfg(sess: session::session, input: driver::input) -> ast::crate_cfg {
+fn cfg(sess: session::Session, input: driver::input) -> ast::crate_cfg {
driver::default_configuration(sess, ~"rustdoc", input)
}
//! Rustdoc - The Rust documentation generator
#[link(name = "rustdoc",
- vers = "0.4",
+ vers = "0.5",
uuid = "f8abd014-b281-484d-a0c3-26e3de8e2412",
url = "https://github.com/mozilla/rust/tree/master/src/rustdoc")];
#[allow(deprecated_mode)];
#[allow(deprecated_pattern)];
-extern mod core(vers = "0.4");
-extern mod std(vers = "0.4");
-extern mod rustc(vers = "0.4");
-extern mod syntax(vers = "0.4");
+extern mod core(vers = "0.5");
+extern mod std(vers = "0.5");
+extern mod rustc(vers = "0.5");
+extern mod syntax(vers = "0.5");
use core::*;
use std::par;
let rv = f();
let end = std::time::precise_time_s();
info!("time: %3.3f s %s", end - start, what);
- return rv;
+ move rv
}
fn mk_pass(name: ~str, +lteq: ItemLtEq) -> Pass {
{
name: name,
- f: fn~(srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
+ f: fn~(move lteq, srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
run(srv, doc, lteq)
}
}
fn mk_pass(name: ~str, +op: fn~(~str) -> ~str) -> Pass {
{
name: name,
- f: fn~(srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
+ f: fn~(move op, srv: astsrv::Srv, doc: doc::Doc) -> doc::Doc {
run(srv, doc, op)
}
}
}
extern "C" void LLVMSetDebug(int Enabled) {
+#ifndef NDEBUG
DebugFlag = Enabled;
+#endif
}
+S 2012-10-09 cd6f24f
+ macos-i386 7f2f2857eac33ff0792e4ea7a3ff91a09304fcab
+ macos-x86_64 bb3d191e2e31cb754223ab162281fd9727e63ea9
+ freebsd-x86_64 a2b5e9dddfa8f21cc8a068b77a47ba5425bfdcc6
+ linux-i386 7c13c04ed6593dc77db6b3b56f057213f567a32b
+ linux-x86_64 7860cdd4023e9d6bec892dc5a7144b286a7fd38e
+ winnt-i386 9e917c2f3d72f72042d5e9b60b45790740676e82
+
+S 2012-10-08 a477c5a
+ macos-i386 c059c3d5bd113f7edec48a4c2128f4b6138c3db8
+ macos-x86_64 f121f4e2d831434f7825f72f3d328c11b13a522f
+ freebsd-x86_64 7f0de8feefc13267cbdebd299adc6b06f832cb9f
+ linux-i386 5975e794e5939034516fe888b70b532d34327bb2
+ linux-x86_64 6dd88754f170f85d268e9430f7728efe43522383
+ winnt-i386 76655d202b59c9b61cae860110ad5d0ca6e12cbf
+
+S 2012-10-07 d301dd3
+ macos-i386 c9dfce9f231f22969b7e7995c1f39fcf86f81b2b
+ macos-x86_64 3b1f6fd43fe03d7af334eeb111bc384428c4cd3d
+ freebsd-x86_64 784ac161fee0351281e3edfefc81a0c1b5d068b5
+ linux-i386 4d945d7e0de4e4544928ed5aa111d1508522c697
+ linux-x86_64 9bea5436042dd4bb7e682d3a10d0d51c3590b531
+ winnt-i386 62de5eea3eba70a2f4a2b4d42c72aa2fa75f999a
+
S 2012-10-05 937f8f4
macos-i386 8b5ddc78b3004e539c6fbe224e492e4a6a1bc867
macos-x86_64 03793e0136512c644edfb5f13cc5bb7d67fb24e5
--- /dev/null
+pub struct Foo {
+ x: int
+}
+
+pub impl Foo {
+ static fn new() -> Foo {
+ Foo { x: 3 }
+ }
+}
+
cat {
meows: in_x,
how_hungry: in_y,
- info: in_info
+ info: move in_info
}
}
}
impl cat : ToStr {
- fn to_str() -> ~str { self.name }
+ pure fn to_str() -> ~str { self.name }
}
priv impl cat {
--- /dev/null
+#[crate_type = "lib"];
+
+pub mod issue_3136_a;
--- /dev/null
+trait x {
+ fn use_x<T>();
+}
+enum y = ();
+impl y:x {
+ fn use_x<T>() {
+ struct foo { //~ ERROR quux
+ i: ()
+ }
+ fn new_foo<T>(i: ()) -> foo {
+ foo { i: i }
+ }
+ }
+}
+
use libc::size_t;
-export port::{};
export port;
export recv;
/// Receive on a raw port pointer
fn recv_<T: Send>(p: *rust_port) -> T {
- let yield = 0u;
+ let yield = 0;
let yieldp = ptr::addr_of(&yield);
let mut res;
res = rusti::init::<T>();
rustrt::port_recv(ptr::addr_of(&res) as *uint, p, yieldp);
- if yield != 0u {
+ if yield != 0 {
// Data isn't available yet, so res has not been initialized.
task::yield();
} else {
// this is a good place to yield
task::yield();
}
- return res;
+ move res
}
Q.add_back(key);
marks[key] = key;
- while Q.size() > 0u {
+ while Q.size() > 0 {
let t = Q.pop_front();
do graph[t].each() |k| {
};
}
- vec::from_mut(marks)
+ vec::from_mut(move marks)
}
/**
}
}
- let mut i = 0u;
+ let mut i = 0;
while vec::any(colors, is_gray) {
// Do the BFS.
log(info, fmt!("PBFS iteration %?", i));
- i += 1u;
+ i += 1;
colors = do colors.mapi() |i, c| {
let c : color = *c;
match c {
i += 1;
let old_len = colors.len();
- let color = arc::ARC(colors);
+ let color = arc::ARC(move colors);
let color_vec = arc::get(&color); // FIXME #3387 requires this temp
colors = do par::mapi_factory(*color_vec) {
let colors = arc::clone(&color);
let graph = arc::clone(&graph);
- fn~(+i: uint, +c: color) -> color {
+ fn~(move graph, move colors, +i: uint, +c: color) -> color {
let c : color = c;
let colors = arc::get(&colors);
let graph = arc::get(&graph);
let args = os::args();
let args = if os::getenv(~"RUST_BENCH").is_some() {
~[~"", ~"15", ~"48"]
- } else if args.len() <= 1u {
+ } else if args.len() <= 1 {
~[~"", ~"10", ~"16"]
} else {
args
let do_sequential = true;
let start = time::precise_time_s();
- let edges = make_edges(scale, 16u);
+ let edges = make_edges(scale, 16);
let stop = time::precise_time_s();
io::stdout().write_line(fmt!("Generated %? edges in %? seconds.",
vec::len(edges), stop - start));
let start = time::precise_time_s();
- let graph = make_graph(1u << scale, edges);
+ let graph = make_graph(1 << scale, edges);
let stop = time::precise_time_s();
- let mut total_edges = 0u;
+ let mut total_edges = 0;
vec::each(graph, |edges| { total_edges += edges.len(); true });
io::stdout().write_line(fmt!("Generated graph with %? edges in %? seconds.",
- total_edges / 2u,
+ total_edges / 2,
stop - start));
let mut total_seq = 0.0;
use pipes::{Port, Chan, SharedChan};
macro_rules! move_out (
- { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); y } }
+ { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); move y } }
)
enum request {
let (to_parent, from_child) = pipes::stream();
let (to_child, from_parent) = pipes::stream();
- let to_child = SharedChan(to_child);
+ let to_child = SharedChan(move to_child);
let size = uint::from_str(args[1]).get();
let workers = uint::from_str(args[2]).get();
let num_bytes = 100;
let start = std::time::precise_time_s();
let mut worker_results = ~[];
- for uint::range(0u, workers) |i| {
+ for uint::range(0, workers) |_i| {
let to_child = to_child.clone();
do task::task().future_result(|+r| {
- worker_results.push(r);
- }).spawn {
- for uint::range(0u, size / workers) |_i| {
+ worker_results.push(move r);
+ }).spawn |move to_child| {
+ for uint::range(0, size / workers) |_i| {
//error!("worker %?: sending %? bytes", i, num_bytes);
to_child.send(bytes(num_bytes));
}
//error!("worker %? exiting", i);
};
}
- do task::spawn {
+ do task::spawn |move from_parent, move to_parent| {
server(from_parent, to_parent);
}
use pipes::{Port, PortSet, Chan};
macro_rules! move_out (
- { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); y } }
+ { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); move y } }
)
enum request {
let (to_parent, from_child) = pipes::stream();
let (to_child, from_parent_) = pipes::stream();
let from_parent = PortSet();
- from_parent.add(from_parent_);
+ from_parent.add(move from_parent_);
let size = uint::from_str(args[1]).get();
let workers = uint::from_str(args[2]).get();
let num_bytes = 100;
let start = std::time::precise_time_s();
let mut worker_results = ~[];
- for uint::range(0u, workers) |i| {
+ for uint::range(0, workers) |_i| {
let (to_child, from_parent_) = pipes::stream();
- from_parent.add(from_parent_);
+ from_parent.add(move from_parent_);
do task::task().future_result(|+r| {
- worker_results.push(r);
- }).spawn {
- for uint::range(0u, size / workers) |_i| {
+ worker_results.push(move r);
+ }).spawn |move to_child| {
+ for uint::range(0, size / workers) |_i| {
//error!("worker %?: sending %? bytes", i, num_bytes);
to_child.send(bytes(num_bytes));
}
//error!("worker %? exiting", i);
};
}
- do task::spawn {
+ do task::spawn |move from_parent, move to_parent| {
server(from_parent, to_parent);
}
fn init() -> (pipe,pipe) {
let m = arc::MutexARC(~[]);
- ((&m).clone(), m)
+ ((&m).clone(), move m)
}
count: uint,
+num_chan: pipe,
+num_port: pipe) {
- let mut num_chan <- Some(num_chan);
- let mut num_port <- Some(num_port);
+ let mut num_chan <- Some(move num_chan);
+ let mut num_port <- Some(move num_port);
// Send/Receive lots of messages.
for uint::range(0u, count) |j| {
//error!("task %?, iter %?", i, j);
let mut num_chan2 = option::swap_unwrap(&mut num_chan);
let mut num_port2 = option::swap_unwrap(&mut num_port);
send(&num_chan2, i * j);
- num_chan = Some(num_chan2);
+ num_chan = Some(move num_chan2);
let _n = recv(&num_port2);
//log(error, _n);
- num_port = Some(num_port2);
+ num_port = Some(move num_port2);
};
}
let msg_per_task = uint::from_str(args[2]).get();
let (num_chan, num_port) = init();
- let mut num_chan = Some(num_chan);
+ let mut num_chan = Some(move num_chan);
let start = time::precise_time_s();
let (new_chan, num_port) = init();
let num_chan2 = ~mut None;
*num_chan2 <-> num_chan;
- let num_port = ~mut Some(num_port);
+ let num_port = ~mut Some(move num_port);
let new_future = future::spawn(|move num_chan2, move num_port| {
let mut num_chan = None;
num_chan <-> *num_chan2;
let mut num_port1 = None;
num_port1 <-> *num_port;
thread_ring(i, msg_per_task,
- option::unwrap(num_chan),
- option::unwrap(num_port1))
+ option::unwrap(move num_chan),
+ option::unwrap(move num_port1))
});
- futures.push(new_future);
- num_chan = Some(new_chan);
+ futures.push(move new_future);
+ num_chan = Some(move new_chan);
};
// do our iteration
- thread_ring(0u, msg_per_task, option::unwrap(num_chan), num_port);
+ thread_ring(0, msg_per_task, option::unwrap(move num_chan), move num_port);
// synchronize
for futures.each |f| { future::get(f) };
fn macros() {
#macro[
[#move_out[x],
- unsafe { let y <- *ptr::addr_of(&x); y }]
+ unsafe { let y <- *ptr::addr_of(&x); move y }]
];
}
count: uint,
+num_chan: ring::client::num,
+num_port: ring::server::num) {
- let mut num_chan <- Some(num_chan);
- let mut num_port <- Some(num_port);
+ let mut num_chan <- Some(move num_chan);
+ let mut num_port <- Some(move num_port);
// Send/Receive lots of messages.
- for uint::range(0u, count) |j| {
+ for uint::range(0, count) |j| {
//error!("task %?, iter %?", i, j);
let mut num_chan2 = None;
let mut num_port2 = None;
num_chan2 <-> num_chan;
num_port2 <-> num_port;
- num_chan = Some(ring::client::num(option::unwrap(num_chan2), i * j));
- let port = option::unwrap(num_port2);
- match recv(port) {
+ num_chan = Some(ring::client::num(option::unwrap(move num_chan2), i * j));
+ let port = option::unwrap(move num_port2);
+ match recv(move port) {
ring::num(_n, p) => {
//log(error, _n);
num_port = Some(move_out!(p));
let msg_per_task = uint::from_str(args[2]).get();
let (num_chan, num_port) = ring::init();
- let mut num_chan = Some(num_chan);
+ let mut num_chan = Some(move num_chan);
let start = time::precise_time_s();
let (new_chan, num_port) = ring::init();
let num_chan2 = ~mut None;
*num_chan2 <-> num_chan;
- let num_port = ~mut Some(num_port);
+ let num_port = ~mut Some(move num_port);
let new_future = do future::spawn
|move num_chan2, move num_port| {
let mut num_chan = None;
let mut num_port1 = None;
num_port1 <-> *num_port;
thread_ring(i, msg_per_task,
- option::unwrap(num_chan),
- option::unwrap(num_port1))
+ option::unwrap(move num_chan),
+ option::unwrap(move num_port1))
};
- futures.push(new_future);
- num_chan = Some(new_chan);
+ futures.push(move new_future);
+ num_chan = Some(move new_chan);
};
// do our iteration
- thread_ring(0u, msg_per_task, option::unwrap(num_chan), num_port);
+ thread_ring(0, msg_per_task, option::unwrap(move num_chan), move num_port);
// synchronize
for futures.each |f| { future::get(f) };
fn init() -> (pipe,pipe) {
let x = arc::RWARC(~[]);
- ((&x).clone(), x)
+ ((&x).clone(), move x)
}
count: uint,
+num_chan: pipe,
+num_port: pipe) {
- let mut num_chan <- Some(num_chan);
- let mut num_port <- Some(num_port);
+ let mut num_chan <- Some(move num_chan);
+ let mut num_port <- Some(move num_port);
// Send/Receive lots of messages.
for uint::range(0u, count) |j| {
//error!("task %?, iter %?", i, j);
let mut num_chan2 = option::swap_unwrap(&mut num_chan);
let mut num_port2 = option::swap_unwrap(&mut num_port);
send(&num_chan2, i * j);
- num_chan = Some(num_chan2);
+ num_chan = Some(move num_chan2);
let _n = recv(&num_port2);
//log(error, _n);
- num_port = Some(num_port2);
+ num_port = Some(move num_port2);
};
}
let msg_per_task = uint::from_str(args[2]).get();
let (num_chan, num_port) = init();
- let mut num_chan = Some(num_chan);
+ let mut num_chan = Some(move num_chan);
let start = time::precise_time_s();
let (new_chan, num_port) = init();
let num_chan2 = ~mut None;
*num_chan2 <-> num_chan;
- let num_port = ~mut Some(num_port);
+ let num_port = ~mut Some(move num_port);
let new_future = do future::spawn
|move num_chan2, move num_port| {
let mut num_chan = None;
let mut num_port1 = None;
num_port1 <-> *num_port;
thread_ring(i, msg_per_task,
- option::unwrap(num_chan),
- option::unwrap(num_port1))
+ option::unwrap(move num_chan),
+ option::unwrap(move num_port1))
};
- futures.push(new_future);
- num_chan = Some(new_chan);
+ futures.push(move new_future);
+ num_chan = Some(move new_chan);
};
// do our iteration
- thread_ring(0u, msg_per_task, option::unwrap(num_chan), num_port);
+ thread_ring(0, msg_per_task, option::unwrap(move num_chan), move num_port);
// synchronize
for futures.each |f| { future::get(f) };
get_chan_chan.send(Chan(&p));
thread_ring(i, msg_per_task, num_chan, p)
};
- futures.push(new_future);
+ futures.push(move new_future);
num_chan = get_chan.recv();
};
let workers = uint::from_str(args[2]).get();
let start = std::time::precise_time_s();
let mut worker_results = ~[];
- for uint::range(0u, workers) |_i| {
+ for uint::range(0, workers) |_i| {
do task::task().future_result(|+r| {
- worker_results.push(r);
+ worker_results.push(move r);
}).spawn {
- for uint::range(0u, size / workers) |_i| {
- comm::send(to_child, bytes(100u));
+ for uint::range(0, size / workers) |_i| {
+ comm::send(to_child, bytes(100));
}
};
}
// This stuff should go in libcore::pipes
macro_rules! move_it (
- { $x:expr } => { let t <- *ptr::addr_of(&($x)); t }
+ { $x:expr } => { let t <- *ptr::addr_of(&($x)); move t }
)
macro_rules! follow (
$($message:path($($x: ident),+) -> $next:ident $e:expr)+
} => (
|m| match move m {
- $(Some($message($($x,)* next)) => {
- // FIXME (#2329) use regular move here once move out of
- // enums is supported.
- let $next = unsafe { move_it!(next) };
- $e })+
+ $(Some($message($($x,)* move next)) => {
+ let $next = move next;
+ move $e })+
_ => { fail }
}
);
$($message:path -> $next:ident $e:expr)+
} => (
|m| match move m {
- $(Some($message(next)) => {
- // FIXME (#2329) use regular move here once move out of
- // enums is supported.
- let $next = unsafe { move_it!(next) };
- $e })+
+ $(Some($message(move next)) => {
+ let $next = move next;
+ move $e })+
_ => { fail }
}
)
fn switch<T: Send, Tb: Send, U>(+endp: pipes::RecvPacketBuffered<T, Tb>,
f: fn(+v: Option<T>) -> U) -> U {
- f(pipes::try_recv(endp))
+ f(pipes::try_recv(move endp))
}
// Here's the benchmark
let mut ch = do spawn_service(init) |ch| {
let mut count = count;
- let mut ch = ch;
+ let mut ch = move ch;
while count > 0 {
- ch = switch(ch, follow! (
- ping -> next { server::pong(next) }
+ ch = switch(move ch, follow! (
+ ping -> next { server::pong(move next) }
));
count -= 1;
let mut count = count;
while count > 0 {
- let ch_ = client::ping(ch);
+ let ch_ = client::ping(move ch);
- ch = switch(ch_, follow! (
- pong -> next { next }
+ ch = switch(move ch_, follow! (
+ pong -> next { move next }
));
count -= 1;
let mut ch = do spawn_service(init) |ch| {
let mut count = count;
- let mut ch = ch;
+ let mut ch = move ch;
while count > 0 {
- ch = switch(ch, follow! (
- ping -> next { server::pong(next) }
+ ch = switch(move ch, follow! (
+ ping -> next { server::pong(move next) }
));
count -= 1;
let mut count = count;
while count > 0 {
- let ch_ = client::ping(ch);
+ let ch_ = client::ping(move ch);
- ch = switch(ch_, follow! (
- pong -> next { next }
+ ch = switch(move ch_, follow! (
+ pong -> next { move next }
));
count -= 1;
};
//comm::send(to_parent, fmt!("yay{%u}", sz));
- to_parent.send(buffer);
+ to_parent.send(move buffer);
}
// given a FASTA file on stdin, process sequence THREE
// initialize each sequence sorter
- let sizes = ~[1u,2u,3u,4u,6u,12u,18u];
+ let sizes = ~[1,2,3,4,6,12,18];
let streams = vec::map(sizes, |_sz| Some(stream()));
- let streams = vec::to_mut(streams);
+ let streams = vec::to_mut(move streams);
let mut from_child = ~[];
let to_child = vec::mapi(sizes, |ii, sz| {
let sz = *sz;
let mut stream = None;
stream <-> streams[ii];
- let (to_parent_, from_child_) = option::unwrap(stream);
+ let (to_parent_, from_child_) = option::unwrap(move stream);
- from_child.push(from_child_);
+ from_child.push(move from_child_);
let (to_child, from_parent) = pipes::stream();
- do task::spawn_with(from_parent) |from_parent| {
+ do task::spawn_with(move from_parent) |move to_parent_, from_parent| {
make_sequence_processor(sz, from_parent, to_parent_);
};
- to_child
+ move to_child
});
} else {
let p = pipes::PortSet();
let ch = p.chan();
- task::spawn(|| pfib(ch, n - 1) );
+ task::spawn(|move ch| pfib(ch, n - 1) );
let ch = p.chan();
- task::spawn(|| pfib(ch, n - 2) );
+ task::spawn(|move ch| pfib(ch, n - 2) );
c.send(p.recv() + p.recv());
}
}
let (ch, p) = pipes::stream();
- let t = task::spawn(|| pfib(ch, n) );
+ let _t = task::spawn(|move ch| pfib(ch, n) );
p.recv()
}
let mut results = ~[];
for range(0, num_tasks) |i| {
do task::task().future_result(|+r| {
- results.push(r);
+ results.push(move r);
}).spawn {
stress_task(i);
}
let out = io::stdout();
for range(1, max + 1) |n| {
- for range(0, num_trials) |i| {
+ for range(0, num_trials) |_i| {
let start = time::precise_time_ns();
let fibn = fib(n);
let stop = time::precise_time_ns();
let args = os::args();
let grid = if vec::len(args) == 1u {
// FIXME create sudoku inline since nested vec consts dont work yet
- // (#571)
+ // (#3733)
let g = vec::from_fn(10u, |_i| {
vec::to_mut(vec::from_elem(10u, 0 as u8))
});
box: @Cons((), st.box),
unique: ~Cons((), @*st.unique),
fn_box: fn@() -> @nillist { @Cons((), fn_box()) },
- fn_unique: fn~() -> ~nillist { ~Cons((), @*fn_unique()) },
+ fn_unique: fn~(move fn_unique) -> ~nillist
+ { ~Cons((), @*fn_unique()) },
tuple: (@Cons((), st.tuple.first()),
~Cons((), @*st.tuple.second())),
vec: st.vec + ~[@Cons((), st.vec.last())],
}
};
- recurse_or_fail(depth, Some(st));
+ recurse_or_fail(depth, Some(move st));
}
}
// This used to be O(n^2) in the number of generations that ever existed.
// With this code, only as many generations are alive at a time as tasks
// alive at a time,
- let c = ~mut Some(c);
- do task::spawn_supervised {
+ let c = ~mut Some(move c);
+ do task::spawn_supervised |move c| {
let c = option::swap_unwrap(c);
if gens_left & 1 == 1 {
task::yield(); // shake things up a bit
}
if gens_left > 0 {
- child_generation(gens_left - 1, c); // recurse
+ child_generation(gens_left - 1, move c); // recurse
} else {
c.send(())
}
let args = os::args();
let args = if os::getenv(~"RUST_BENCH").is_some() {
~[~"", ~"100000"]
- } else if args.len() <= 1u {
+ } else if args.len() <= 1 {
~[~"", ~"100"]
} else {
copy args
};
let (c,p) = pipes::stream();
- child_generation(uint::from_str(args[1]).get(), c);
+ child_generation(uint::from_str(args[1]).get(), move c);
if p.try_recv().is_none() {
fail ~"it happened when we slumbered";
}
comm::recv(comm::Port::<()>()); // block forever
}
}
- #error["Grandchild group getting started"];
+ error!("Grandchild group getting started");
for num_tasks.times {
// Make sure all above children are fully spawned; i.e., enlisted in
// their ancestor groups.
comm::recv(po);
}
- #error["Grandchild group ready to go."];
+ error!("Grandchild group ready to go.");
// Master grandchild task exits early.
}
fn spawn_supervised_blocking(myname: &str, +f: fn~()) {
let mut res = None;
- task::task().future_result(|+r| res = Some(r)).supervised().spawn(f);
- #error["%s group waiting", myname];
- let x = future::get(&option::unwrap(res));
+ task::task().future_result(|+r| res = Some(move r)).supervised().spawn(move f);
+ error!("%s group waiting", myname);
+ let x = future::get(&option::unwrap(move res));
assert x == task::Success;
}
grandchild_group(num_tasks);
}
// When grandchild group is ready to go, make the middle group exit.
- #error["Middle group wakes up and exits"];
+ error!("Middle group wakes up and exits");
}
// Grandparent group waits for middle group to be gone, then fails
- #error["Grandparent group wakes up and fails"];
+ error!("Grandparent group wakes up and fails");
fail;
};
assert x.is_err();
use to_bytes::IterBytes;
macro_rules! move_out (
- { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); y } }
+ { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); move y } }
)
trait word_reader {
fn swap(f: fn(+v: T) -> T) {
let mut tmp = None;
self.contents <-> tmp;
- self.contents = Some(f(option::unwrap(tmp)));
+ self.contents = Some(f(option::unwrap(move tmp)));
}
fn unwrap() -> T {
let mut tmp = None;
self.contents <-> tmp;
- option::unwrap(tmp)
+ option::unwrap(move tmp)
}
}
fn box<T>(+x: T) -> box<T> {
box {
- contents: Some(x)
+ contents: Some(move x)
}
}
let mut tasks = ~[];
for inputs.each |i| {
let (ctrl, ctrl_server) = ctrl_proto::init();
- let ctrl = box(ctrl);
+ let ctrl = box(move ctrl);
let i = copy *i;
let m = copy *map;
- tasks.push(spawn_joinable(|move i| map_task(m, &ctrl, i)));
- ctrls.push(ctrl_server);
+ tasks.push(spawn_joinable(|move ctrl, move i| map_task(m, &ctrl, i)));
+ ctrls.push(move ctrl_server);
}
- return tasks;
+ move tasks
}
fn map_task<K1: Copy Send, K2: Hash IterBytes Eq Const Copy Send, V: Copy Send>(
Some(_c) => { c = Some(_c); }
None => {
do ctrl.swap |ctrl| {
- let ctrl = ctrl_proto::client::find_reducer(ctrl, *key);
- match pipes::recv(ctrl) {
+ let ctrl = ctrl_proto::client::find_reducer(move ctrl, *key);
+ match pipes::recv(move ctrl) {
ctrl_proto::reducer(c_, ctrl) => {
c = Some(c_);
move_out!(ctrl)
let mut num_mappers = vec::len(inputs) as int;
while num_mappers > 0 {
- let (_ready, message, ctrls) = pipes::select(ctrl);
- match option::unwrap(message) {
+ let (_ready, message, ctrls) = pipes::select(move ctrl);
+ match option::unwrap(move message) {
ctrl_proto::mapper_done => {
// error!("received mapper terminated.");
num_mappers -= 1;
- ctrl = ctrls;
+ ctrl = move ctrls;
}
ctrl_proto::find_reducer(k, cc) => {
let c;
let p = Port();
let ch = Chan(&p);
let r = reduce, kk = k;
- tasks.push(spawn_joinable(|| reduce_task(~r, kk, ch) ));
+ tasks.push(spawn_joinable(|move r| reduce_task(~r, kk, ch) ));
c = recv(p);
reducers.insert(k, c);
}
}
ctrl = vec::append_one(
- ctrls,
+ move ctrls,
ctrl_proto::server::reducer(move_out!(cc), c));
}
}
struct defer {
x: &[&str],
- drop { #error["%?", self.x]; }
+ drop { error!("%?", self.x); }
}
fn defer(x: &r/[&r/str]) -> defer/&r {
--- /dev/null
+struct Foo {
+ x: uint
+}
+
+struct Bar {
+ foo: Foo
+}
+
+fn main() {
+ let mut b = Bar { foo: Foo { x: 3 } };
+ let p = &b; //~ NOTE prior loan as immutable granted here
+ let q = &mut b.foo.x; //~ ERROR loan of mutable local variable as mutable conflicts with prior loan
+ let r = &p.foo.x;
+ io::println(fmt!("*r = %u", *r));
+ *q += 1;
+ io::println(fmt!("*r = %u", *r));
+}
\ No newline at end of file
--- /dev/null
+struct Foo {
+ mut x: uint
+}
+
+struct Bar {
+ foo: Foo
+}
+
+fn main() {
+ let mut b = Bar { foo: Foo { x: 3 } };
+ let p = &b.foo.x;
+ let q = &mut b.foo; //~ ERROR loan of mutable field as mutable conflicts with prior loan
+ //~^ ERROR loan of mutable local variable as mutable conflicts with prior loan
+ let r = &mut b; //~ ERROR loan of mutable local variable as mutable conflicts with prior loan
+ io::println(fmt!("*p = %u", *p));
+ q.x += 1;
+ r.foo.x += 1;
+ io::println(fmt!("*p = %u", *p));
+}
\ No newline at end of file
fn box_imm() {
let v = ~3;
let _w = &v; //~ NOTE loan of immutable local variable granted here
- take(v); //~ ERROR moving out of immutable local variable prohibited due to outstanding loan
+ take(move v); //~ ERROR moving out of immutable local variable prohibited due to outstanding loan
}
fn main() {
--- /dev/null
+use core::either::{Either, Left, Right};
+
+ fn f(x: &mut Either<int,float>, y: &Either<int,float>) -> int {
+ match *y {
+ Left(ref z) => {
+ *x = Right(1.0);
+ *z
+ }
+ _ => fail
+ }
+ }
+
+ fn g() {
+ let mut x: Either<int,float> = Left(3);
+ io::println(f(&mut x, &x).to_str()); //~ ERROR conflicts with prior loan
+ }
+
+ fn h() {
+ let mut x: Either<int,float> = Left(3);
+ let y: &Either<int, float> = &x;
+ let z: &mut Either<int, float> = &mut x; //~ ERROR conflicts with prior loan
+ *z = *y;
+ }
+
+ fn main() {}
--- /dev/null
+struct Foo {
+ mut x: uint
+}
+
+struct Bar {
+ foo: Foo
+}
+
+fn main() {
+ let mut b = Bar { foo: Foo { x: 3 } };
+ let p = &b;
+ let q = &mut b.foo.x;
+ let r = &p.foo.x; //~ ERROR illegal borrow unless pure
+ let s = &b.foo.x; //~ ERROR loan of mutable field as immutable conflicts with prior loan
+ io::println(fmt!("*r = %u", *r));
+ io::println(fmt!("*r = %u", *s));
+ *q += 1;
+ io::println(fmt!("*r = %u", *r));
+ io::println(fmt!("*r = %u", *s));
+}
\ No newline at end of file
struct noncopyable {
- i: (), drop { #error["dropped"]; }
+ i: (), drop { error!("dropped"); }
}
fn noncopyable() -> noncopyable {
fn main() {
let x1 = wrapper(noncopyable());
let _x2 = move *x1; //~ ERROR moving out of enum content
-}
\ No newline at end of file
+}
-// error-pattern:Unsatisfied precondition constraint
-
fn main() {
let x = 5;
- let _y = fn~(move x) { };
- let _z = x; //< error: Unsatisfied precondition constraint
+ let _y = fn~(move x) { }; //~ WARNING captured variable `x` not used in closure
+ let _z = x; //~ ERROR use of moved variable: `x`
}
//~^ WARNING implicitly copying a non-implicitly-copyable value
//~^^ NOTE to copy values into a @fn closure, use a capture clause
};
- (x,f)
+ (move x,f)
}
fn closure2(+x: util::NonCopyable) -> (util::NonCopyable,
//~^^ NOTE non-copyable value cannot be copied into a @fn closure
//~^^^ ERROR copying a noncopyable value
};
- (x,f)
+ (move x,f)
}
fn closure3(+x: util::NonCopyable) {
do task::spawn {
--- /dev/null
+// error-pattern:unmatched visibility `pub`
+extern {
+ pub pub fn foo();
+}
fn wants_box(x: @str) { }
fn wants_uniq(x: ~str) { }
-fn wants_three(x: str/3) { }
+fn wants_slice(x: &str) { }
fn has_box(x: @str) {
wants_box(x);
wants_uniq(x); //~ ERROR str storage differs: expected ~ but found @
- wants_three(x); //~ ERROR str storage differs: expected 3 but found @
+ wants_slice(x);
}
fn has_uniq(x: ~str) {
wants_box(x); //~ ERROR str storage differs: expected @ but found ~
wants_uniq(x);
- wants_three(x); //~ ERROR str storage differs: expected 3 but found ~
+ wants_slice(x);
}
-fn has_three(x: str/3) {
- wants_box(x); //~ ERROR str storage differs: expected @ but found 3
- wants_uniq(x); //~ ERROR str storage differs: expected ~ but found 3
- wants_three(x);
-}
-
-fn has_four(x: str/4) {
- wants_box(x); //~ ERROR str storage differs: expected @ but found 4
- wants_uniq(x); //~ ERROR str storage differs: expected ~ but found 4
- wants_three(x); //~ ERROR str storage differs: expected 3 but found 4
+fn has_slice(x: &str) {
+ wants_box(x); //~ ERROR str storage differs: expected @ but found &
+ wants_uniq(x); //~ ERROR str storage differs: expected ~ but found &
+ wants_slice(x);
}
fn main() {
fn wants_box(x: @[uint]) { }
fn wants_uniq(x: ~[uint]) { }
-fn wants_three(x: [uint]/3) { }
+fn wants_three(x: [uint * 3]) { }
fn has_box(x: @[uint]) {
wants_box(x);
wants_three(x); //~ ERROR [] storage differs: expected 3 but found ~
}
-fn has_three(x: [uint]/3) {
+fn has_three(x: [uint * 3]) {
wants_box(x); //~ ERROR [] storage differs: expected @ but found 3
wants_uniq(x); //~ ERROR [] storage differs: expected ~ but found 3
wants_three(x);
}
-fn has_four(x: [uint]/4) {
+fn has_four(x: [uint * 4]) {
wants_box(x); //~ ERROR [] storage differs: expected @ but found 4
wants_uniq(x); //~ ERROR [] storage differs: expected ~ but found 4
wants_three(x); //~ ERROR [] storage differs: expected 3 but found 4
-// error-pattern:#env takes between 1 and 1 arguments
+// error-pattern: env! takes between 1 and 1 arguments
fn main() { env!(); }
-// error-pattern:#env takes between 1 and 1 arguments
+// error-pattern: env! takes between 1 and 1 arguments
fn main() { env!("one", "two"); }
-// error-pattern:#fmt needs at least 1 arguments
+// error-pattern:fmt! needs at least 1 arguments
fn main() { fmt!(); }
// error-pattern: literal
fn main() {
- // #fmt's first argument must be a literal. Hopefully this
+ // fmt!'s first argument must be a literal. Hopefully this
// restriction can be eased eventually to just require a
// compile-time constant.
let x = fmt!("a" + "b");
// error-pattern: literal
fn main() {
- // #fmt's first argument must be a literal. Hopefully this
+ // fmt!'s first argument must be a literal. Hopefully this
// restriction can be eased eventually to just require a
// compile-time constant.
let x = fmt!(20);
-// error-pattern:only valid in signed #fmt conversion
+// error-pattern:only valid in signed fmt! conversion
fn main() {
// Can't use a sign on unsigned conversions
-// error-pattern:only valid in signed #fmt conversion
+// error-pattern:only valid in signed fmt! conversion
fn main() {
// Can't use a space on unsigned conversions
--- /dev/null
+struct Bar {
+ x: int,
+ drop { io::println("Goodbye, cruel world"); }
+}
+
+struct Foo {
+ x: int,
+ y: Bar
+}
+
+fn main() {
+ let a = Foo { x: 1, y: Bar { x: 5 } };
+ let c = Foo { x: 4, .. a}; //~ ERROR copying a noncopyable value
+ io::println(fmt!("%?", c));
+}
+
fn test(-x: uint) {}
fn main() {
- let i = 3u;
- for uint::range(0u, 10u) |_x| {test(i)}
+ let i = 3;
+ for uint::range(0, 10) |_x| {test(move i)}
}
--- /dev/null
+// xfail-test
+fn main() {
+ let one = fn@() -> uint {
+ enum r { a };
+ return a as uint;
+ };
+ let two = fn@() -> uint {
+ enum r { a };
+ return a as uint;
+ };
+ one(); two();
+}
let mut res = foo(x);
let mut v = ~[mut];
- v <- ~[mut res] + v; //~ ERROR instantiating a type parameter with an incompatible type (needs `copy`, got `owned`, missing `copy`)
+ v <- ~[mut (move res)] + v; //~ ERROR instantiating a type parameter with an incompatible type (needs `copy`, got `owned`, missing `copy`)
assert (v.len() == 2);
}
impl parser: parse {
fn parse() -> ~[int] {
- dvec::unwrap(move self.tokens) //~ ERROR illegal move from self
+ dvec::unwrap(move self.tokens) //~ ERROR moving out of immutable field
}
}
mod stream {
#[legacy_exports];
- enum stream<T: Send> { send(T, server::stream<T>), }
+ enum Stream<T: Send> { send(T, server::Stream<T>), }
mod server {
#[legacy_exports];
- impl<T: Send> stream<T> {
- fn recv() -> extern fn(+v: stream<T>) -> stream::stream<T> {
+ impl<T: Send> Stream<T> {
+ fn recv() -> extern fn(+v: Stream<T>) -> stream::Stream<T> {
// resolve really should report just one error here.
// Change the test case when it changes.
- fn recv(+pipe: stream<T>) -> stream::stream<T> { //~ ERROR attempt to use a type argument out of scope
+ fn recv(+pipe: Stream<T>) -> stream::Stream<T> { //~ ERROR attempt to use a type argument out of scope
//~^ ERROR use of undeclared type name
//~^^ ERROR attempt to use a type argument out of scope
//~^^^ ERROR use of undeclared type name
recv
}
}
- type stream<T: Send> = pipes::RecvPacket<stream::stream<T>>;
+ type Stream<T: Send> = pipes::RecvPacket<stream::Stream<T>>;
}
}
--- /dev/null
+struct C {
+ x: int,
+ drop {
+ #error("dropping: %?", self.x);
+ }
+}
+
+fn main() {
+ let c = C{ x: 2};
+ let d = copy c; //~ ERROR copying a noncopyable value
+ #error("%?", d.x);
+}
\ No newline at end of file
enum a { b, c }
-enum a { d, e } //~ ERROR Duplicate definition of type a
+enum a { d, e } //~ ERROR duplicate definition of type a
fn main() {}
mod a {}
#[legacy_exports]
-mod a {} //~ ERROR Duplicate definition of module a
+mod a {} //~ ERROR duplicate definition of type a
fn main() {}
fn a(x: ~str) -> ~str {
- #fmt("First function with %s", x)
+ fmt!("First function with %s", x)
}
-fn a(x: ~str, y: ~str) -> ~str { //~ ERROR Duplicate definition of value a
- #fmt("Second function with %s and %s", x, y)
+fn a(x: ~str, y: ~str) -> ~str { //~ ERROR duplicate definition of value a
+ fmt!("Second function with %s and %s", x, y)
}
fn main() {
- #info("Result: ");
+ info!("Result: ");
}
--- /dev/null
+fn foo<T>() {
+ struct foo {
+ mut x: T, //~ ERROR attempt to use a type argument out of scope
+ //~^ ERROR use of undeclared type name
+ drop { }
+ }
+}
+fn main() { }
--- /dev/null
+fn main() {
+ let foo = 100;
+
+ const y: int = foo + 1; //~ ERROR: attempt to use a non-constant value in a constant
+
+ log(error, y);
+}
--- /dev/null
+fn main() {
+ let foo = 100;
+
+ enum Stuff {
+ Bar = foo //~ ERROR attempt to use a non-constant value in a constant
+ }
+
+ log(error, Bar);
+}
--- /dev/null
+fn f(x:int) {
+ const child: int = x + 1; //~ ERROR attempt to use a non-constant value in a constant
+}
+
+fn main() {}
--- /dev/null
+struct P { child: Option<@mut P> }
+trait PTrait {
+ fn getChildOption() -> Option<@P>;
+}
+
+impl P: PTrait {
+ fn getChildOption() -> Option<@P> {
+ const childVal: @P = self.child.get(); //~ ERROR attempt to use a non-constant value in a constant
+ fail;
+ }
+}
+
+fn main() {}
fn take(-_x: int) { }
fn from_by_value_arg(++x: int) {
- take(x); //~ ERROR illegal move from argument `x`, which is not copy or move mode
+ take(move x); //~ ERROR illegal move from argument `x`, which is not copy or move mode
}
fn from_by_ref_arg(&&x: int) {
- take(x); //~ ERROR illegal move from argument `x`, which is not copy or move mode
+ take(move x); //~ ERROR illegal move from argument `x`, which is not copy or move mode
}
fn from_copy_arg(+x: int) {
- take(x);
+ take(move x);
}
fn from_move_arg(-x: int) {
- take(x);
+ take(move x);
}
fn main() {
let x: int = 25;
loop {
- take(x); //~ ERROR use of moved variable: `x`
+ take(move x); //~ ERROR use of moved variable: `x`
//~^ NOTE move of variable occurred here
}
}
}
fn main() {
let x = r { x: () };
- fn@() { copy x; }; //~ ERROR copying a noncopyable value
+ fn@(move x) { copy x; }; //~ ERROR copying a noncopyable value
}
// Tests that "log(debug, message);" is flagged as using
// message after the send deinitializes it
fn test00_start(ch: _chan<int>, message: int, _count: int) {
- send(ch, message); //~ NOTE move of variable occurred here
+ send(ch, move message); //~ NOTE move of variable occurred here
log(debug, message); //~ ERROR use of moved variable: `message`
}
//~^ ERROR obsolete syntax: with
}
+fn obsolete_fixed_length_vec() {
+ let foo: [int]/1;
+ //~^ ERROR obsolete syntax: fixed-length vector
+ foo = [1]/_;
+ //~^ ERROR obsolete syntax: fixed-length vector
+ let foo: [int]/1;
+ //~^ ERROR obsolete syntax: fixed-length vector
+ foo = [1]/1;
+ //~^ ERROR obsolete syntax: fixed-length vector
+}
+
fn main() { }
fn main() {
let i = 3;
do wants_static_fn {
- #debug("i=%d", i);
+ debug!("i=%d", i);
//~^ ERROR captured variable does not outlive the enclosing closure
}
}
fn set_desc(self, s: &str) -> Flag {
Flag { //~ ERROR cannot infer an appropriate lifetime
name: self.name,
- desc: s,
+ desc: s, //~ ERROR cannot infer an appropriate lifetime
max_count: self.max_count,
value: self.value
}
extern mod core;
-fn last<T: Copy>(v: ~[const T]) -> core::Option<T> {
+fn last<T>(v: ~[const &T]) -> core::Option<T> {
fail;
}
fn main() {
let i = ~@100;
- f(i); //~ ERROR missing `send`
+ f(move i); //~ ERROR missing `send`
}
let cat = ~"kitty";
let po = comm::Port(); //~ ERROR missing `send`
let ch = comm::Chan(&po); //~ ERROR missing `send`
- comm::send(ch, foo(42, @cat)); //~ ERROR missing `send`
+ comm::send(ch, foo(42, @(move cat))); //~ ERROR missing `send`
}
// pp-exact
-fn f() -> [int]/3 {
+fn f() -> [int * 3] {
let picard = 0;
let data = 1;
let worf = 2;
- let enterprise = [picard, data, worf]/_;
+ let enterprise = [picard, data, worf];
--- /dev/null
+//error-pattern:One
+fn main() {
+ fail ~"One";
+ fail ~"Two";
+}
\ No newline at end of file
send(oc, Chan(&p));
let x = recv(p);
- send(c, x);
+ send(c, move x);
}
fn main() { fail ~"meep"; }
fn main() unsafe {
let i1 = ~0;
let i1p = cast::reinterpret_cast(&i1);
- cast::forget(i1);
+ cast::forget(move i1);
let x = @r(i1p);
failfn();
log(error, x);
// error-pattern:fail
-fn f(-a: @int) {
+fn f(-_a: @int) {
fail;
}
fn main() {
let a = @0;
- f(a);
+ f(move a);
}
\ No newline at end of file
--- /dev/null
+struct Foo {
+ x: int
+}
+
+impl Foo {
+ static fn new() -> Foo {
+ Foo { x: 3 }
+ }
+}
+
+fn main() {
+ let x = Foo::new();
+ io::println(x.x.to_str());
+}
+
--- /dev/null
+// xfail-fast - check-fast doesn't understand aux-build
+// aux-build:anon_trait_static_method_lib.rs
+
+extern mod anon_trait_static_method_lib;
+use anon_trait_static_method_lib::Foo;
+
+fn main() {
+ let x = Foo::new();
+ io::println(x.x.to_str());
+}
+
+
+
fn main() {
let mut a = {mut x: 1}, b = 2, c = 3;
- assert (f1(a, &mut b, c) == 6);
+ assert (f1(a, &mut b, move c) == 6);
assert (a.x == 0);
assert (b == 10);
assert (f2(a.x, |x| a.x = 50 ) == 0);
assert length::<int, &[int]>(x) == vec::len(x);
// Now try it with a type that *needs* to be borrowed
- let z = [0,1,2,3]/_;
+ let z = [0,1,2,3];
// Call a method
for z.iterate() |y| { assert z[*y] == *y; }
// Call a parameterized function
impl<T> ~[T]: Pushable<T> {
fn push_val(&mut self, +t: T) {
- self.push(t);
+ self.push(move t);
}
}
-// xfail-fast
-#[legacy_modes];
-
extern mod std;
// These tests used to be separate files, but I wanted to refactor all
use cmp::Eq;
use std::ebml;
use io::Writer;
-use std::serialization::{serialize_uint, deserialize_uint};
-
-fn test_ser_and_deser<A:Eq>(a1: A,
- expected: ~str,
- ebml_ser_fn: fn(ebml::Writer, A),
- ebml_deser_fn: fn(ebml::EbmlDeserializer) -> A,
- io_ser_fn: fn(io::Writer, A)) {
-
- // check the pretty printer:
- io_ser_fn(io::stdout(), a1);
- let s = io::with_str_writer(|w| io_ser_fn(w, a1) );
+use std::serialization::{Serializable, Deserializable, deserialize};
+use std::prettyprint;
+
+fn test_prettyprint<A: Serializable<prettyprint::Serializer>>(
+ a: &A,
+ expected: &~str
+) {
+ let s = do io::with_str_writer |w| {
+ a.serialize(&prettyprint::Serializer(w))
+ };
debug!("s == %?", s);
- assert s == expected;
+ assert s == *expected;
+}
- // check the EBML serializer:
+fn test_ebml<A:
+ Eq
+ Serializable<ebml::Serializer>
+ Deserializable<ebml::Deserializer>
+>(a1: &A) {
let bytes = do io::with_bytes_writer |wr| {
- let w = ebml::Writer(wr);
- ebml_ser_fn(w, a1);
+ let ebml_w = &ebml::Serializer(wr);
+ a1.serialize(ebml_w)
};
let d = ebml::Doc(@bytes);
- let a2 = ebml_deser_fn(ebml::ebml_deserializer(d));
- io::print(~"\na1 = ");
- io_ser_fn(io::stdout(), a1);
- io::print(~"\na2 = ");
- io_ser_fn(io::stdout(), a2);
- io::print(~"\n");
- assert a1 == a2;
-
+ let a2: A = deserialize(&ebml::Deserializer(d));
+ assert *a1 == a2;
}
#[auto_serialize]
-enum expr {
- val(uint),
- plus(@expr, @expr),
- minus(@expr, @expr)
+#[auto_deserialize]
+enum Expr {
+ Val(uint),
+ Plus(@Expr, @Expr),
+ Minus(@Expr, @Expr)
}
-impl an_enum : cmp::Eq {
- pure fn eq(other: &an_enum) -> bool {
- self.v == (*other).v
- }
- pure fn ne(other: &an_enum) -> bool { !self.eq(other) }
-}
-
-impl point : cmp::Eq {
- pure fn eq(other: &point) -> bool {
- self.x == (*other).x && self.y == (*other).y
- }
- pure fn ne(other: &point) -> bool { !self.eq(other) }
-}
-
-impl<T:cmp::Eq> quark<T> : cmp::Eq {
- pure fn eq(other: &quark<T>) -> bool {
+impl Expr : cmp::Eq {
+ pure fn eq(other: &Expr) -> bool {
match self {
- top(ref q) => match (*other) {
- top(ref r) => q == r,
- bottom(_) => false
- },
- bottom(ref q) => match (*other) {
- top(_) => false,
- bottom(ref r) => q == r
- }
+ Val(e0a) => {
+ match *other {
+ Val(e0b) => e0a == e0b,
+ _ => false
+ }
+ }
+ Plus(e0a, e1a) => {
+ match *other {
+ Plus(e0b, e1b) => e0a == e0b && e1a == e1b,
+ _ => false
+ }
+ }
+ Minus(e0a, e1a) => {
+ match *other {
+ Minus(e0b, e1b) => e0a == e0b && e1a == e1b,
+ _ => false
+ }
+ }
}
}
- pure fn ne(other: &quark<T>) -> bool { !self.eq(other) }
+ pure fn ne(other: &Expr) -> bool { !self.eq(other) }
}
+impl AnEnum : cmp::Eq {
+ pure fn eq(other: &AnEnum) -> bool {
+ self.v == other.v
+ }
+ pure fn ne(other: &AnEnum) -> bool { !self.eq(other) }
+}
-impl c_like : cmp::Eq {
- pure fn eq(other: &c_like) -> bool {
- self as int == (*other) as int
+impl Point : cmp::Eq {
+ pure fn eq(other: &Point) -> bool {
+ self.x == other.x && self.y == other.y
}
- pure fn ne(other: &c_like) -> bool { !self.eq(other) }
+ pure fn ne(other: &Point) -> bool { !self.eq(other) }
}
-impl expr : cmp::Eq {
- pure fn eq(other: &expr) -> bool {
+impl<T:cmp::Eq> Quark<T> : cmp::Eq {
+ pure fn eq(other: &Quark<T>) -> bool {
match self {
- val(e0a) => {
- match (*other) {
- val(e0b) => e0a == e0b,
- _ => false
+ Top(ref q) => {
+ match *other {
+ Top(ref r) => q == r,
+ Bottom(_) => false
}
- }
- plus(e0a, e1a) => {
- match (*other) {
- plus(e0b, e1b) => e0a == e0b && e1a == e1b,
- _ => false
- }
- }
- minus(e0a, e1a) => {
- match (*other) {
- minus(e0b, e1b) => e0a == e0b && e1a == e1b,
- _ => false
+ },
+ Bottom(ref q) => {
+ match *other {
+ Top(_) => false,
+ Bottom(ref r) => q == r
}
- }
+ },
}
}
- pure fn ne(other: &expr) -> bool { !self.eq(other) }
+ pure fn ne(other: &Quark<T>) -> bool { !self.eq(other) }
}
-#[auto_serialize]
-type spanned<T> = {lo: uint, hi: uint, node: T};
-
-impl<T:cmp::Eq> spanned<T> : cmp::Eq {
- pure fn eq(other: &spanned<T>) -> bool {
- self.lo == (*other).lo && self.hi == (*other).hi &&
- self.node.eq(&(*other).node)
+impl CLike : cmp::Eq {
+ pure fn eq(other: &CLike) -> bool {
+ self as int == *other as int
}
- pure fn ne(other: &spanned<T>) -> bool { !self.eq(other) }
+ pure fn ne(other: &CLike) -> bool { !self.eq(other) }
}
#[auto_serialize]
-type spanned_uint = spanned<uint>;
+#[auto_deserialize]
+type Spanned<T> = {lo: uint, hi: uint, node: T};
-#[auto_serialize]
-type some_rec = {v: uint_vec};
+impl<T:cmp::Eq> Spanned<T> : cmp::Eq {
+ pure fn eq(other: &Spanned<T>) -> bool {
+ self.lo == other.lo && self.hi == other.hi && self.node == other.node
+ }
+ pure fn ne(other: &Spanned<T>) -> bool { !self.eq(other) }
+}
#[auto_serialize]
-enum an_enum = some_rec;
+#[auto_deserialize]
+type SomeRec = {v: ~[uint]};
#[auto_serialize]
-type uint_vec = ~[uint];
+#[auto_deserialize]
+enum AnEnum = SomeRec;
#[auto_serialize]
-type point = {x: uint, y: uint};
+#[auto_deserialize]
+struct Point {x: uint, y: uint}
#[auto_serialize]
-enum quark<T> {
- top(T),
- bottom(T)
+#[auto_deserialize]
+enum Quark<T> {
+ Top(T),
+ Bottom(T)
}
#[auto_serialize]
-type uint_quark = quark<uint>;
-
-#[auto_serialize]
-enum c_like { a, b, c }
+#[auto_deserialize]
+enum CLike { A, B, C }
fn main() {
-
- test_ser_and_deser(plus(@minus(@val(3u), @val(10u)),
- @plus(@val(22u), @val(5u))),
- ~"plus(@minus(@val(3u), @val(10u)), \
- @plus(@val(22u), @val(5u)))",
- serialize_expr,
- deserialize_expr,
- serialize_expr);
-
- test_ser_and_deser({lo: 0u, hi: 5u, node: 22u},
- ~"{lo: 0u, hi: 5u, node: 22u}",
- serialize_spanned_uint,
- deserialize_spanned_uint,
- serialize_spanned_uint);
-
- test_ser_and_deser(an_enum({v: ~[1u, 2u, 3u]}),
- ~"an_enum({v: [1u, 2u, 3u]})",
- serialize_an_enum,
- deserialize_an_enum,
- serialize_an_enum);
-
- test_ser_and_deser({x: 3u, y: 5u},
- ~"{x: 3u, y: 5u}",
- serialize_point,
- deserialize_point,
- serialize_point);
-
- test_ser_and_deser(~[1u, 2u, 3u],
- ~"[1u, 2u, 3u]",
- serialize_uint_vec,
- deserialize_uint_vec,
- serialize_uint_vec);
-
- test_ser_and_deser(top(22u),
- ~"top(22u)",
- serialize_uint_quark,
- deserialize_uint_quark,
- serialize_uint_quark);
-
- test_ser_and_deser(bottom(222u),
- ~"bottom(222u)",
- serialize_uint_quark,
- deserialize_uint_quark,
- serialize_uint_quark);
-
- test_ser_and_deser(a,
- ~"a",
- serialize_c_like,
- deserialize_c_like,
- serialize_c_like);
-
- test_ser_and_deser(b,
- ~"b",
- serialize_c_like,
- deserialize_c_like,
- serialize_c_like);
+ let a = &Plus(@Minus(@Val(3u), @Val(10u)), @Plus(@Val(22u), @Val(5u)));
+ test_prettyprint(a, &~"Plus(@Minus(@Val(3u), @Val(10u)), \
+ @Plus(@Val(22u), @Val(5u)))");
+ test_ebml(a);
+
+ let a = &{lo: 0u, hi: 5u, node: 22u};
+ test_prettyprint(a, &~"{lo: 0u, hi: 5u, node: 22u}");
+ test_ebml(a);
+
+ let a = &AnEnum({v: ~[1u, 2u, 3u]});
+ test_prettyprint(a, &~"AnEnum({v: ~[1u, 2u, 3u]})");
+ test_ebml(a);
+
+ let a = &Point {x: 3u, y: 5u};
+ test_prettyprint(a, &~"Point {x: 3u, y: 5u}");
+ test_ebml(a);
+
+ let a = &@[1u, 2u, 3u];
+ test_prettyprint(a, &~"@[1u, 2u, 3u]");
+ test_ebml(a);
+
+ let a = &Top(22u);
+ test_prettyprint(a, &~"Top(22u)");
+ test_ebml(a);
+
+ let a = &Bottom(222u);
+ test_prettyprint(a, &~"Bottom(222u)");
+ test_ebml(a);
+
+ let a = &A;
+ test_prettyprint(a, &~"A");
+ test_ebml(a);
+
+ let a = &B;
+ test_prettyprint(a, &~"B");
+ test_ebml(a);
}
+++ /dev/null
-extern mod std;
-
-// These tests used to be separate files, but I wanted to refactor all
-// the common code.
-
-use cmp::Eq;
-use std::ebml2;
-use io::Writer;
-use std::serialization2::{Serializable, Deserializable, deserialize};
-use std::prettyprint2;
-
-fn test_ser_and_deser<A:Eq Serializable Deserializable>(
- a1: &A,
- +expected: ~str
-) {
- // check the pretty printer:
- let s = do io::with_str_writer |w| {
- a1.serialize(&prettyprint2::Serializer(w))
- };
- debug!("s == %?", s);
- assert s == expected;
-
- // check the EBML serializer:
- let bytes = do io::with_bytes_writer |wr| {
- let ebml_w = &ebml2::Serializer(wr);
- a1.serialize(ebml_w)
- };
- let d = ebml2::Doc(@bytes);
- let a2: A = deserialize(&ebml2::Deserializer(d));
- assert *a1 == a2;
-}
-
-#[auto_serialize2]
-#[auto_deserialize2]
-enum Expr {
- Val(uint),
- Plus(@Expr, @Expr),
- Minus(@Expr, @Expr)
-}
-
-impl Expr : cmp::Eq {
- pure fn eq(other: &Expr) -> bool {
- match self {
- Val(e0a) => {
- match *other {
- Val(e0b) => e0a == e0b,
- _ => false
- }
- }
- Plus(e0a, e1a) => {
- match *other {
- Plus(e0b, e1b) => e0a == e0b && e1a == e1b,
- _ => false
- }
- }
- Minus(e0a, e1a) => {
- match *other {
- Minus(e0b, e1b) => e0a == e0b && e1a == e1b,
- _ => false
- }
- }
- }
- }
- pure fn ne(other: &Expr) -> bool { !self.eq(other) }
-}
-
-impl AnEnum : cmp::Eq {
- pure fn eq(other: &AnEnum) -> bool {
- self.v == other.v
- }
- pure fn ne(other: &AnEnum) -> bool { !self.eq(other) }
-}
-
-impl Point : cmp::Eq {
- pure fn eq(other: &Point) -> bool {
- self.x == other.x && self.y == other.y
- }
- pure fn ne(other: &Point) -> bool { !self.eq(other) }
-}
-
-impl<T:cmp::Eq> Quark<T> : cmp::Eq {
- pure fn eq(other: &Quark<T>) -> bool {
- match self {
- Top(ref q) => {
- match *other {
- Top(ref r) => q == r,
- Bottom(_) => false
- }
- },
- Bottom(ref q) => {
- match *other {
- Top(_) => false,
- Bottom(ref r) => q == r
- }
- },
- }
- }
- pure fn ne(other: &Quark<T>) -> bool { !self.eq(other) }
-}
-
-impl CLike : cmp::Eq {
- pure fn eq(other: &CLike) -> bool {
- self as int == *other as int
- }
- pure fn ne(other: &CLike) -> bool { !self.eq(other) }
-}
-
-#[auto_serialize2]
-#[auto_deserialize2]
-type Spanned<T> = {lo: uint, hi: uint, node: T};
-
-impl<T:cmp::Eq> Spanned<T> : cmp::Eq {
- pure fn eq(other: &Spanned<T>) -> bool {
- self.lo == other.lo && self.hi == other.hi && self.node == other.node
- }
- pure fn ne(other: &Spanned<T>) -> bool { !self.eq(other) }
-}
-
-#[auto_serialize2]
-#[auto_deserialize2]
-type SomeRec = {v: ~[uint]};
-
-#[auto_serialize2]
-#[auto_deserialize2]
-enum AnEnum = SomeRec;
-
-#[auto_serialize2]
-#[auto_deserialize2]
-struct Point {x: uint, y: uint}
-
-#[auto_serialize2]
-#[auto_deserialize2]
-enum Quark<T> {
- Top(T),
- Bottom(T)
-}
-
-#[auto_serialize2]
-#[auto_deserialize2]
-enum CLike { A, B, C }
-
-fn main() {
- test_ser_and_deser(&Plus(@Minus(@Val(3u), @Val(10u)),
- @Plus(@Val(22u), @Val(5u))),
- ~"Plus(@Minus(@Val(3u), @Val(10u)), \
- @Plus(@Val(22u), @Val(5u)))");
-
- test_ser_and_deser(&{lo: 0u, hi: 5u, node: 22u},
- ~"{lo: 0u, hi: 5u, node: 22u}");
-
- test_ser_and_deser(&AnEnum({v: ~[1u, 2u, 3u]}),
- ~"AnEnum({v: ~[1u, 2u, 3u]})");
-
- test_ser_and_deser(&Point {x: 3u, y: 5u}, ~"Point {x: 3u, y: 5u}");
-
- test_ser_and_deser(&@[1u, 2u, 3u], ~"@[1u, 2u, 3u]");
-
- test_ser_and_deser(&Top(22u), ~"Top(22u)");
- test_ser_and_deser(&Bottom(222u), ~"Bottom(222u)");
-
- test_ser_and_deser(&A, ~"A");
- test_ser_and_deser(&B, ~"B");
-}
fn main() {
- let x = [22]/1;
+ let x = [22];
let y = &x[0];
assert *y == 22;
-}
\ No newline at end of file
+}
}
impl cat: ToStr {
- fn to_str() -> ~str { self.name }
+ pure fn to_str() -> ~str { self.name }
}
fn print_out<T: ToStr>(thing: T, expected: ~str) {
-const x : [int]/4 = [1,2,3,4];
+const x : [int * 4] = [1,2,3,4];
const p : int = x[2];
const y : &[int] = &[1,2,3,4];
const q : int = y[2];
assert p == 3;
assert q == 3;
assert t == 20;
-}
\ No newline at end of file
+}
-const x : [int]/4 = [1,2,3,4];
+const x : [int * 4] = [1,2,3,4];
const y : &[int] = &[1,2,3,4];
fn main() {
assert x[1] == 2;
assert x[3] == 4;
assert x[3] == y[3];
-}
\ No newline at end of file
+}
--- /dev/null
+trait Foo {
+ fn f() {
+ io::println("Hello!");
+ self.g();
+ }
+ fn g();
+}
+
+struct A {
+ x: int
+}
+
+impl A : Foo {
+ fn g() {
+ io::println("Goodbye!");
+ }
+}
+
+fn main() {
+ let a = A { x: 1 };
+ a.f();
+}
+
fn main() {
for iter::eachi(&(Some({a: 0}))) |i, a| {
- #debug["%u %d", i, a.a];
+ debug!("%u %d", i, a.a);
}
let _x: Option<float> = foo_func(0);
fn main() {
- let x : [@int]/5 = [@1,@2,@3,@4,@5]/5;
- let _y : [@int]/5 = [@1,@2,@3,@4,@5]/_;
- let mut z = [@1,@2,@3,@4,@5]/_;
+ let x : [@int * 5] = [@1,@2,@3,@4,@5];
+ let _y : [@int * 5] = [@1,@2,@3,@4,@5];
+ let mut z = [@1,@2,@3,@4,@5];
z = x;
assert *z[0] == 1;
assert *z[4] == 5;
// Doesn't work; needs a design decision.
fn main() {
- let x : [int]/5 = [1,2,3,4,5]/5;
- let _y : [int]/5 = [1,2,3,4,5]/_;
- let mut z = [1,2,3,4,5]/_;
+ let x : [int * 5] = [1,2,3,4,5];
+ let _y : [int * 5] = [1,2,3,4,5];
+ let mut z = [1,2,3,4,5];
z = x;
assert z[0] == 1;
assert z[4] == 5;
- let a : [int]/5 = [1,1,1,1,1]/_;
- let b : [int]/5 = [2,2,2,2,2]/_;
- let c : [int]/5 = [2,2,2,2,3]/_;
+ let a : [int * 5] = [1,1,1,1,1];
+ let b : [int * 5] = [2,2,2,2,2];
+ let c : [int * 5] = [2,2,2,2,3];
log(debug, a);
// are implicitly copyable
#[deny(implicit_copies)]
fn main() {
- let arr = [1,2,3]/3;
+ let arr = [1,2,3];
let arr2 = arr;
assert(arr[1] == 2);
assert(arr2[2] == 3);
fn main() {
- let arr = [1,2,3]/3;
+ let arr = [1,2,3];
let struc = {a: 13u8, b: arr, c: 42};
let s = sys::log_str(&struc);
assert(s == ~"{ a: 13, b: [ 1, 2, 3 ], c: 42 }");
// This is what the signature to spawn should look like with bare functions
fn spawn<T: Send>(val: T, f: extern fn(T)) {
- f(val);
+ f(move val);
}
fn f(+i: int) {
fn main() {
let a = Foo { x: 1, y: 2 };
- let c = Foo { x: 4, .. a };
+ let c = Foo { x: 4, .. a};
io::println(fmt!("%?", c));
}
fn main() {
let mut x = @1;
let mut y = @2;
- rusti::move_val(&mut y, x);
+ rusti::move_val(&mut y, move x);
assert *y == 1;
}
\ No newline at end of file
do self.iter |a| {
b <- blk(b, a);
}
- return b;
+ move b
}
fn range(lo: uint, hi: uint, it: fn(uint)) {
--- /dev/null
+fn main() {
+ let mut x = 0;
+
+ loop foo: {
+ loop bar: {
+ loop quux: {
+ if 1 == 2 {
+ break foo;
+ }
+ else {
+ break bar;
+ }
+ }
+ loop foo;
+ }
+ x = 42;
+ break;
+ }
+
+ error!("%?", x);
+ assert(x == 42);
+}
\ No newline at end of file
--- /dev/null
+trait Send {
+ fn f();
+}
+
+fn f<T: Send>(t: T) {
+ t.f();
+}
+
+fn main() {
+}
\ No newline at end of file
--- /dev/null
+fn main() {
+ let foo = 100;
+ const quux: int = 5;
+
+ enum Stuff {
+ Bar = quux
+ }
+
+ assert (Bar as int == quux);
+}
+// tjc: I don't know why
+// xfail-pretty
mod pipes {
#[legacy_exports];
use cast::{forget, transmute};
// We should consider moving this to core::unsafe, although I
// suspect graydon would want us to use void pointers instead.
unsafe fn uniquify<T>(+x: *T) -> ~T {
- unsafe { cast::transmute(x) }
+ unsafe { cast::transmute(move x) }
}
fn swap_state_acq(+dst: &mut state, src: state) -> state {
unsafe {
- transmute(rusti::atomic_xchg_acq(transmute(dst), src as int))
+ transmute(rusti::atomic_xchg_acq(transmute(move dst), src as int))
}
}
fn swap_state_rel(+dst: &mut state, src: state) -> state {
unsafe {
- transmute(rusti::atomic_xchg_rel(transmute(dst), src as int))
+ transmute(rusti::atomic_xchg_rel(transmute(move dst), src as int))
}
}
let p = p.unwrap();
let p = unsafe { uniquify(p) };
assert (*p).payload.is_none();
- (*p).payload <- Some(payload);
+ (*p).payload <- Some(move payload);
let old_state = swap_state_rel(&mut (*p).state, full);
match old_state {
empty => {
// Yay, fastpath.
// The receiver will eventually clean this up.
- unsafe { forget(p); }
+ unsafe { forget(move p); }
}
full => { fail ~"duplicate send" }
blocked => {
// The receiver will eventually clean this up.
- unsafe { forget(p); }
+ unsafe { forget(move p); }
}
terminated => {
// The receiver will never receive this. Rely on drop_glue
full => {
let mut payload = None;
payload <-> (*p).payload;
- return Some(option::unwrap(payload))
+ return Some(option::unwrap(move payload))
}
terminated => {
assert old_state == terminated;
match swap_state_rel(&mut (*p).state, terminated) {
empty | blocked => {
// The receiver will eventually clean up.
- unsafe { forget(p) }
+ unsafe { forget(move p) }
}
full => {
// This is impossible
match swap_state_rel(&mut (*p).state, terminated) {
empty => {
// the sender will clean up
- unsafe { forget(p) }
+ unsafe { forget(move p) }
}
blocked => {
// this shouldn't happen.
if self.p != None {
let mut p = None;
p <-> self.p;
- sender_terminate(option::unwrap(p))
+ sender_terminate(option::unwrap(move p))
}
}
}
fn unwrap() -> *packet<T> {
let mut p = None;
p <-> self.p;
- option::unwrap(p)
+ option::unwrap(move p)
}
}
if self.p != None {
let mut p = None;
p <-> self.p;
- receiver_terminate(option::unwrap(p))
+ receiver_terminate(option::unwrap(move p))
}
}
}
fn unwrap() -> *packet<T> {
let mut p = None;
p <-> self.p;
- option::unwrap(p)
+ option::unwrap(move p)
}
}
ping(x) => { cast::transmute(ptr::addr_of(&x)) }
};
let liberated_value <- *addr;
- cast::forget(p);
- liberated_value
+ cast::forget(move p);
+ move liberated_value
}
fn liberate_pong(-p: pong) -> pipes::send_packet<ping> unsafe {
pong(x) => { cast::transmute(ptr::addr_of(&x)) }
};
let liberated_value <- *addr;
- cast::forget(p);
- liberated_value
+ cast::forget(move p);
+ move liberated_value
}
fn init() -> (client::ping, server::ping) {
fn do_ping(-c: ping) -> pong {
let (sp, rp) = pipes::entangle();
- pipes::send(c, ping(sp));
- rp
+ pipes::send(move c, ping(move sp));
+ move rp
}
fn do_pong(-c: pong) -> (ping, ()) {
- let packet = pipes::recv(c);
+ let packet = pipes::recv(move c);
if packet.is_none() {
fail ~"sender closed the connection"
}
- (liberate_pong(option::unwrap(packet)), ())
+ (liberate_pong(option::unwrap(move packet)), ())
}
}
type pong = pipes::send_packet<pingpong::pong>;
fn do_ping(-c: ping) -> (pong, ()) {
- let packet = pipes::recv(c);
+ let packet = pipes::recv(move c);
if packet.is_none() {
fail ~"sender closed the connection"
}
- (liberate_ping(option::unwrap(packet)), ())
+ (liberate_ping(option::unwrap(move packet)), ())
}
fn do_pong(-c: pong) -> ping {
let (sp, rp) = pipes::entangle();
- pipes::send(c, pong(sp));
- rp
+ pipes::send(move c, pong(move sp));
+ move rp
}
}
}
fn client(-chan: pingpong::client::ping) {
- let chan = pingpong::client::do_ping(chan);
+ let chan = pingpong::client::do_ping(move chan);
log(error, ~"Sent ping");
- let (chan, _data) = pingpong::client::do_pong(chan);
+ let (_chan, _data) = pingpong::client::do_pong(move chan);
log(error, ~"Received pong");
}
fn server(-chan: pingpong::server::ping) {
- let (chan, _data) = pingpong::server::do_ping(chan);
+ let (chan, _data) = pingpong::server::do_ping(move chan);
log(error, ~"Received ping");
- let chan = pingpong::server::do_pong(chan);
+ let _chan = pingpong::server::do_pong(move chan);
log(error, ~"Sent pong");
}
fn rendezvous() {
let (c, s) = streamp::init();
- let streams: ~[streamp::client::open<int>] = ~[c];
+ let streams: ~[streamp::client::open<int>] = ~[move c];
error!("%?", streams[0]);
}
--- /dev/null
+use sys::size_of;
+extern mod std;
+
+struct Cat {
+ x: int
+}
+
+struct Kitty {
+ x: int,
+ drop {}
+}
+
+#[cfg(target_arch = "x86_64")]
+fn main() {
+ assert (size_of::<Cat>() == 8 as uint);
+ assert (size_of::<Kitty>() == 16 as uint);
+}
+
+#[cfg(target_arch = "x86")]
+fn main() {
+ assert (size_of::<Cat>() == 4 as uint);
+ assert (size_of::<Kitty>() == 8 as uint);
+}
}
impl square: to_str::ToStr {
- fn to_str() -> ~str {
+ pure fn to_str() -> ~str {
match self {
bot => { ~"R" }
wall => { ~"#" }
'.' => { earth }
' ' => { empty }
_ => {
- #error("invalid square: %?", c);
+ error!("invalid square: %?", c);
fail
}
}
}
fn read_board_grid<rdr: Owned io::Reader>(+in: rdr) -> ~[~[square]] {
- let in = in as io::Reader;
+ let in = (move in) as io::Reader;
let mut grid = ~[];
for in.each_line |line| {
let mut row = ~[];
proto! stream (
- stream:send<T:Send> {
- send(T) -> stream<T>
+ Stream:send<T:Send> {
+ send(T) -> Stream<T>
}
)
fn main() {
let (bc, _bp) = stream::init();
- stream::client::send(bc, ~"abc");
+ stream::client::send(move bc, ~"abc");
}
// x.f();
// y.f();
// (*z).f();
- #error["ok so far..."];
+ error!("ok so far...");
z.f(); //segfault
}
let bools2 = to_bools({storage: ~[0b01100100]});
for uint::range(0, 8) |i| {
- io::println(#fmt("%u => %u vs %u", i, bools[i] as uint, bools2[i] as uint));
+ io::println(fmt!("%u => %u vs %u", i, bools[i] as uint, bools2[i] as uint));
}
assert bools == bools2;
-}
\ No newline at end of file
+}
fn main() {
let (c,p) = pipes::stream();
- do task::try {
+ do task::try |move c| {
let (c2,p2) = pipes::stream();
- do task::spawn {
+ do task::spawn |move p2| {
p2.recv();
- #error["brother fails"];
+ error!("sibling fails");
fail;
}
let (c3,p3) = pipes::stream();
- c.send(c3);
+ c.send(move c3);
c2.send(());
- #error["child blocks"];
+ error!("child blocks");
p3.recv();
};
- #error["parent tries"];
+ error!("parent tries");
assert !p.recv().try_send(());
- #error("all done!");
+ error!("all done!");
}
fn main() {
let (c,p) = pipes::stream();
- do task::try {
+ do task::try |move c| {
let (c2,p2) = pipes::stream();
- do task::spawn {
+ do task::spawn |move p2| {
p2.recv();
- #error["brother fails"];
+ error!("sibling fails");
fail;
}
let (c3,p3) = pipes::stream();
- c.send(c3);
+ c.send(move c3);
c2.send(());
- #error["child blocks"];
+ error!("child blocks");
let (c, p) = pipes::stream();
- (p, p3).select();
+ (move p, move p3).select();
c.send(());
};
- #error["parent tries"];
+ error!("parent tries");
assert !p.recv().try_send(());
- #error("all done!");
+ error!("all done!");
}
--- /dev/null
+struct list<T> {
+ element: &self/T,
+ mut next: Option<@list<T>>
+}
+
+impl<T> list<T>{
+ fn addEnd(&self, element: &self/T) {
+ let newList = list {
+ element: element,
+ next: option::None
+ };
+
+ self.next = Some(@(move newList));
+ }
+}
+
+fn main() {
+ let s = @"str";
+ let ls = list {
+ element: &s,
+ next: option::None
+ };
+ io::println(*ls.element);
+}
--- /dev/null
+// xfail-fast Can't redeclare malloc with wrong signature because bugs
+// Issue #3656
+// Incorrect struct size computation in the FFI, because of not taking
+// the alignment of elements into account.
+
+use libc::*;
+
+struct KEYGEN {
+ hash_algorithm: [c_uint * 2],
+ count: uint32_t,
+ salt: *c_void,
+ salt_size: uint32_t,
+}
+
+extern {
+ // Bogus signature, just need to test if it compiles.
+ pub fn malloc(++data: KEYGEN);
+}
+
+fn main() {
+}
--- /dev/null
+// xfail-fast - check-fast doesn't understand aux-build
+// aux-build:issue_3136_a.rc
+
+extern mod issue_3136_a;
+fn main() {}
+
#[attr = "val"]
fn f() { }
- /* FIXME: Issue #493
#[attr = "val"]
mod mod1 {
#[legacy_exports];
extern mod rustrt {
#[legacy_exports];
}
- */
}
}
fn is_even(x: &uint) -> bool { (*x % 2) == 0 }
fn main() {
- assert ![1u, 2u]/_.all(is_even);
- assert [2u, 4u]/_.all(is_even);
- assert []/_.all(is_even);
+ assert ![1u, 2u].all(is_even);
+ assert [2u, 4u].all(is_even);
+ assert [].all(is_even);
assert !Some(1u).all(is_even);
assert Some(2u).all(is_even);
fn is_even(x: &uint) -> bool { (*x % 2) == 0 }
fn main() {
- assert ![1u, 3u]/_.any(is_even);
- assert [1u, 2u]/_.any(is_even);
- assert ![]/_.any(is_even);
+ assert ![1u, 3u].any(is_even);
+ assert [1u, 2u].any(is_even);
+ assert ![].any(is_even);
assert !Some(1).any(is_even);
assert Some(2).any(is_even);
fn main() {
- assert []/_.contains(&22u) == false;
- assert [1u, 3u]/_.contains(&22u) == false;
- assert [22u, 1u, 3u]/_.contains(&22u) == true;
- assert [1u, 22u, 3u]/_.contains(&22u) == true;
- assert [1u, 3u, 22u]/_.contains(&22u) == true;
+ assert [].contains(&22u) == false;
+ assert [1u, 3u].contains(&22u) == false;
+ assert [22u, 1u, 3u].contains(&22u) == true;
+ assert [1u, 22u, 3u].contains(&22u) == true;
+ assert [1u, 3u, 22u].contains(&22u) == true;
assert None.contains(&22u) == false;
assert Some(1u).contains(&22u) == false;
assert Some(22u).contains(&22u) == true;
fn main() {
- assert []/_.count(&22u) == 0u;
- assert [1u, 3u]/_.count(&22u) == 0u;
- assert [22u, 1u, 3u]/_.count(&22u) == 1u;
- assert [22u, 1u, 22u]/_.count(&22u) == 2u;
+ assert [].count(&22u) == 0u;
+ assert [1u, 3u].count(&22u) == 0u;
+ assert [22u, 1u, 3u].count(&22u) == 1u;
+ assert [22u, 1u, 22u].count(&22u) == 2u;
assert None.count(&22u) == 0u;
assert Some(1u).count(&22u) == 0u;
assert Some(22u).count(&22u) == 1u;
fn main() {
let mut c = 0u;
- for [1u, 2u, 3u, 4u, 5u]/_.eachi |i, v| {
+ for [1u, 2u, 3u, 4u, 5u].eachi |i, v| {
assert (i + 1u) == *v;
c += 1u;
}
fn is_even(+x: uint) -> bool { (x % 2) == 0 }
fn main() {
- assert [1, 3]/_.filter_to_vec(is_even) == ~[];
- assert [1, 2, 3]/_.filter_to_vec(is_even) == ~[2];
+ assert [1, 3].filter_to_vec(is_even) == ~[];
+ assert [1, 2, 3].filter_to_vec(is_even) == ~[2];
assert None.filter_to_vec(is_even) == ~[];
assert Some(1).filter_to_vec(is_even) == ~[];
assert Some(2).filter_to_vec(is_even) == ~[2];
fn add(x: &float, y: &uint) -> float { *x + ((*y) as float) }
fn main() {
- assert [1u, 3u]/_.foldl(20f, add) == 24f;
- assert []/_.foldl(20f, add) == 20f;
+ assert [1u, 3u].foldl(20f, add) == 24f;
+ assert [].foldl(20f, add) == 20f;
assert None.foldl(20f, add) == 20f;
assert Some(1u).foldl(20f, add) == 21f;
assert Some(2u).foldl(20f, add) == 22f;
fn inc(+x: uint) -> uint { x + 1 }
fn main() {
- assert [1, 3]/_.map_to_vec(inc) == ~[2, 4];
- assert [1, 2, 3]/_.map_to_vec(inc) == ~[2, 3, 4];
+ assert [1, 3].map_to_vec(inc) == ~[2, 4];
+ assert [1, 2, 3].map_to_vec(inc) == ~[2, 3, 4];
assert None.map_to_vec(inc) == ~[];
assert Some(1).map_to_vec(inc) == ~[2];
assert Some(2).map_to_vec(inc) == ~[3];
fn is_even(&&x: uint) -> bool { (x % 2u) == 0u }
fn main() {
- assert [1u, 3u]/_.min() == 1u;
- assert [3u, 1u]/_.min() == 1u;
+ assert [1u, 3u].min() == 1u;
+ assert [3u, 1u].min() == 1u;
assert Some(1u).min() == 1u;
- assert [1u, 3u]/_.max() == 3u;
- assert [3u, 1u]/_.max() == 3u;
+ assert [1u, 3u].max() == 3u;
+ assert [3u, 1u].max() == 3u;
assert Some(3u).max() == 3u;
}
fn main() {
- assert [1u, 3u]/_.to_vec() == ~[1u, 3u];
+ assert [1u, 3u].to_vec() == ~[1u, 3u];
let e: ~[uint] = ~[];
assert e.to_vec() == ~[];
assert None::<uint>.to_vec() == ~[];
// Make sure closing over can be a last use
let q = ~10;
let addr = ptr::addr_of(&(*q));
- let f = fn@() -> *int { ptr::addr_of(&(*q)) };
+ let f = fn@(move q) -> *int { ptr::addr_of(&(*q)) };
assert addr == f();
// But only when it really is the last use
fn lp<T>(s: ~str, f: fn(~str) -> T) -> T {
while false {
let r = f(s);
- return r;
+ return (move r);
}
fail;
}
fn apply<T>(s: ~str, f: fn(~str) -> T) -> T {
fn g<T>(s: ~str, f: fn(~str) -> T) -> T {f(s)}
- g(s, |v| { let r = f(v); r })
+ g(s, |v| { let r = f(v); move r })
}
fn main() {}
loop {
let x = 5;
if x > 3 {
- list += ~[take(x)];
+ list += ~[take(move x)];
} else {
break;
}
fn mk<T>() -> smallintmap<T> {
let v: ~[mut option<T>] = ~[mut];
- return @{mut v: v};
+ return @{mut v: move v};
}
fn f<T,U>() {
impl<A: Copy Serializable> F<A>: Serializable {
fn serialize<S: Serializer>(s: S) {
- self.a.serialize(s);
+ self.a.serialize(move s);
}
}
fn main() {
let x = ~~[10];
// Test forgetting a local by move-in
- test(x);
+ test(move x);
// Test forgetting a temporary by move-in.
test(~~[10]);
fn main() {
let x = @~[10];
// Test forgetting a local by move-in
- test(x);
+ test(move x);
// Test forgetting a temporary by move-in.
test(@~[10]);
fn test(-foo: int) { assert (foo == 10); }
-fn main() { let x = 10; test(x); }
+fn main() { let x = 10; test(move x); }
fn f2(-thing: fn@()) { }
fn f(-thing: fn@()) {
- f2(thing);
+ f2(move thing);
}
fn main() {
}
fn apply<T>(x: T, f: fn(T)) {
- f(x);
+ f(move x);
}
fn check_int(x: int) {
fn unwrap<T>(+o: Option<T>) -> T {
match move o {
- Some(move v) => v,
+ Some(move v) => move v,
None => fail
}
}
{
let b = Some(dtor { x:x });
- let c = unwrap(b);
+ let c = unwrap(move b);
}
assert *x == 0;
)
macro_rules! move_it (
- { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); y } }
+ { $x:expr } => { unsafe { let y <- *ptr::addr_of(&($x)); move y } }
)
fn switch<T: Send, U>(+endp: pipes::RecvPacket<T>,
f: fn(+v: Option<T>) -> U) -> U {
- f(pipes::try_recv(endp))
+ f(pipes::try_recv(move endp))
}
-fn move_it<T>(-x: T) -> T { x }
+fn move_it<T>(-x: T) -> T { move x }
macro_rules! follow (
{
fn client_follow(+bank: bank::client::login) {
use bank::*;
- let bank = client::login(bank, ~"theincredibleholk", ~"1234");
- let bank = switch(bank, follow! (
- ok -> connected { connected }
+ let bank = client::login(move bank, ~"theincredibleholk", ~"1234");
+ let bank = switch(move bank, follow! (
+ ok -> connected { move connected }
invalid -> _next { fail ~"bank closed the connected" }
));
- let bank = client::deposit(bank, 100.00);
- let bank = client::withdrawal(bank, 50.00);
- switch(bank, follow! (
+ let bank = client::deposit(move bank, 100.00);
+ let bank = client::withdrawal(move bank, 50.00);
+ switch(move bank, follow! (
money(m) -> _next {
io::println(~"Yay! I got money!");
}
fn bank_client(+bank: bank::client::login) {
use bank::*;
- let bank = client::login(bank, ~"theincredibleholk", ~"1234");
- let bank = match try_recv(bank) {
+ let bank = client::login(move bank, ~"theincredibleholk", ~"1234");
+ let bank = match try_recv(move bank) {
Some(ok(connected)) => {
move_it!(connected)
}
None => { fail ~"bank closed the connection" }
};
- let bank = client::deposit(bank, 100.00);
- let bank = client::withdrawal(bank, 50.00);
- match try_recv(bank) {
- Some(money(m, _)) => {
+ let bank = client::deposit(move bank, 100.00);
+ let bank = client::withdrawal(move bank, 50.00);
+ match try_recv(move bank) {
+ Some(money(*)) => {
io::println(~"Yay! I got money!");
}
Some(insufficient_funds(_)) => {
let iotask = uv::global_loop::get();
pipes::spawn_service(oneshot::init, |p| {
- match try_recv(p) {
+ match try_recv(move p) {
Some(*) => { fail }
None => { }
}
fn failtest() {
let (c, p) = oneshot::init();
- do task::spawn_with(c) |_c| {
+ do task::spawn_with(move c) |_c| {
fail;
}
- error!("%?", recv(p));
+ error!("%?", recv(move p));
// make sure we get killed if we missed it in the receive.
loop { task::yield() }
}
assert !pipes::peek(&p);
- oneshot::client::signal(c);
+ oneshot::client::signal(move c);
assert pipes::peek(&p);
}
pong: mk_packet::<pong>()
}
};
- do pipes::entangle_buffer(buffer) |buffer, data| {
+ do pipes::entangle_buffer(move buffer) |buffer, data| {
data.ping.set_buffer_(buffer);
data.pong.set_buffer_(buffer);
ptr::addr_of(&(data.ping))
let b = pipe.reuse_buffer();
let s = SendPacketBuffered(ptr::addr_of(&(b.buffer.data.pong)));
let c = RecvPacketBuffered(ptr::addr_of(&(b.buffer.data.pong)));
- let message = pingpong::ping(s);
- pipes::send(pipe, message);
- c
+ let message = pingpong::ping(move s);
+ pipes::send(move pipe, move message);
+ move c
}
}
type ping = pipes::SendPacketBuffered<pingpong::ping,
let b = pipe.reuse_buffer();
let s = SendPacketBuffered(ptr::addr_of(&(b.buffer.data.ping)));
let c = RecvPacketBuffered(ptr::addr_of(&(b.buffer.data.ping)));
- let message = pingpong::pong(s);
- pipes::send(pipe, message);
- c
+ let message = pingpong::pong(move s);
+ pipes::send(move pipe, move message);
+ move c
}
}
type pong = pipes::SendPacketBuffered<pingpong::pong,
fn client(-chan: pingpong::client::ping) {
use pingpong::client;
- let chan = client::ping(chan); return;
+ let chan = client::ping(move chan); return;
log(error, "Sent ping");
- let pong(_chan) = recv(chan);
+ let pong(_chan) = recv(move chan);
log(error, "Received pong");
}
fn server(-chan: pingpong::server::ping) {
use pingpong::server;
- let ping(chan) = recv(chan); return;
+ let ping(chan) = recv(move chan); return;
log(error, "Received ping");
- let _chan = server::pong(chan);
+ let _chan = server::pong(move chan);
log(error, "Sent pong");
}
}
fn main() {
let (client_, server_) = pingpong::init();
- let client_ = ~mut Some(client_);
- let server_ = ~mut Some(server_);
+ let client_ = ~mut Some(move client_);
+ let server_ = ~mut Some(move server_);
do task::spawn |move client_| {
let mut client__ = None;
*client_ <-> client__;
- test::client(option::unwrap(client__));
+ test::client(option::unwrap(move client__));
};
do task::spawn |move server_| {
let mut server_ˊ = None;
*server_ <-> server_ˊ;
- test::server(option::unwrap(server_ˊ));
+ test::server(option::unwrap(move server_ˊ));
};
}
fn client(-chan: pingpong::client::ping) {
use pingpong::client;
- let chan = client::ping(chan);
+ let chan = client::ping(move chan);
log(error, ~"Sent ping");
- let pong(_chan) = recv(chan);
+ let pong(_chan) = recv(move chan);
log(error, ~"Received pong");
}
fn server(-chan: pingpong::server::ping) {
use pingpong::server;
- let ping(chan) = recv(chan);
+ let ping(chan) = recv(move chan);
log(error, ~"Received ping");
- let _chan = server::pong(chan);
+ let _chan = server::pong(move chan);
log(error, ~"Sent pong");
}
}
fn main() {
let (client_, server_) = pingpong::init();
- let client_ = ~mut Some(client_);
- let server_ = ~mut Some(server_);
+ let client_ = ~mut Some(move client_);
+ let server_ = ~mut Some(move server_);
do task::spawn |move client_| {
let mut client__ = None;
*client_ <-> client__;
- test::client(option::unwrap(client__));
+ test::client(option::unwrap(move client__));
};
do task::spawn |move server_| {
let mut server_ˊ = None;
*server_ <-> server_ˊ;
- test::server(option::unwrap(server_ˊ));
+ test::server(option::unwrap(move server_ˊ));
};
}
], )*
} => {
if $index == $count {
- match move pipes::try_recv($port) {
+ match move pipes::try_recv(move $port) {
$(Some($message($($(move $x,)+)* move next)) => {
- let $next = next;
- $e
+ let $next = move next;
+ move $e
})+
_ => fail
}
-> $next:ident $e:expr),+
} )+
} => {
- let index = pipes::selecti([$(($port).header()),+]/_);
+ let index = pipes::selecti([$(($port).header()),+]);
select_if!(index, 0, $( $port => [
$($message$(($($x),+))dont_type_this* -> $next $e),+
], )+)
}
fn draw_frame(+channel: double_buffer::client::acquire) {
- let channel = request(channel);
+ let channel = request(move channel);
select! (
channel => {
give_buffer(buffer) -> channel {
render(&buffer);
- release(channel, move buffer)
+ release(move channel, move buffer)
}
}
);
}
fn draw_two_frames(+channel: double_buffer::client::acquire) {
- let channel = request(channel);
+ let channel = request(move channel);
let channel = select! (
channel => {
give_buffer(buffer) -> channel {
render(&buffer);
- release(channel, move buffer)
+ release(move channel, move buffer)
}
}
);
- let channel = request(channel);
+ let channel = request(move channel);
select! (
channel => {
give_buffer(buffer) -> channel {
render(&buffer);
- release(channel, move buffer)
+ release(move channel, move buffer)
}
}
);
+// tjc: un-xfail after snapshot
+// xfail-test
// xfail-pretty
// Protocols
},
do_baz(b) -> _next {
- if *b { debug!("true") } else { debug!("false") }
+ if b { debug!("true") } else { debug!("false") }
}
}
)
)
proto! stream (
- stream:send<T:Send> {
- send(T) -> stream<T>
+ Stream:send<T:Send> {
+ send(T) -> Stream<T>
}
)
let c = pipes::spawn_service(stream::init, |p| {
error!("waiting for pipes");
- let stream::send(x, p) = recv(p);
+ let stream::send(x, p) = recv(move p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
- = x;
+ = move x;
error!("selecting");
- let (i, _, _) = select(~[left, right]);
+ let (i, _, _) = select(~[move left, move right]);
error!("selected");
assert i == 0;
error!("waiting for pipes");
- let stream::send(x, _) = recv(p);
+ let stream::send(x, _) = recv(move p);
error!("got pipes");
let (left, right) : (oneshot::server::waiting,
oneshot::server::waiting)
- = x;
+ = move x;
error!("selecting");
- let (i, m, _) = select(~[left, right]);
+ let (i, m, _) = select(~[move left, move right]);
error!("selected %?", i);
if m.is_some() {
assert i == 1;
let (c1, p1) = oneshot::init();
let (_c2, p2) = oneshot::init();
- let c = send(c, (p1, p2));
+ let c = send(move c, (move p1, move p2));
sleep(iotask, 100);
- signal(c1);
+ signal(move c1);
let (_c1, p1) = oneshot::init();
let (c2, p2) = oneshot::init();
- send(c, (p1, p2));
+ send(move c, (move p1, move p2));
sleep(iotask, 100);
- signal(c2);
+ signal(move c2);
test_select2();
}
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
- stream::client::send(ac, 42);
+ stream::client::send(move ac, 42);
- match pipes::select2(ap, bp) {
+ match pipes::select2(move ap, move bp) {
either::Left(*) => { }
either::Right(*) => { fail }
}
- stream::client::send(bc, ~"abc");
+ stream::client::send(move bc, ~"abc");
error!("done with first select2");
let (ac, ap) = stream::init();
let (bc, bp) = stream::init();
- stream::client::send(bc, ~"abc");
+ stream::client::send(move bc, ~"abc");
- match pipes::select2(ap, bp) {
+ match pipes::select2(move ap, move bp) {
either::Left(*) => { fail }
either::Right(*) => { }
}
- stream::client::send(ac, 42);
+ stream::client::send(move ac, 42);
}
fn main() {
use oneshot::client::*;
- let c = pipes::spawn_service(oneshot::init, |p| { recv(p); });
+ let c = pipes::spawn_service(oneshot::init, |p| { recv(move p); });
let iotask = uv::global_loop::get();
sleep(iotask, 500);
- signal(c);
+ signal(move c);
}
\ No newline at end of file
-// Testing that calling #fmt (via #debug) doesn't complain about impure borrows
+// Testing that calling fmt! (via debug!) doesn't complain about impure borrows
pure fn foo() {
let a = {
}
fn main() {
-}
\ No newline at end of file
+}
use intrinsic::{TyDesc, get_tydesc, visit_tydesc, TyVisitor};
use libc::c_void;
-
-// FIXME: this is a near-duplicate of code in core::vec.
-type unboxed_vec_repr = {
- mut fill: uint,
- mut alloc: uint,
- data: u8
-};
+use vec::UnboxedVecRepr;
#[doc = "High-level interfaces to `intrinsic::visit_ty` reflection system."]
}
fn visit_unboxed_vec(mtbl: uint, inner: *TyDesc) -> bool {
- self.align_to::<unboxed_vec_repr>();
- // FIXME: Inner really has to move its own pointers on this one.
+ self.align_to::<UnboxedVecRepr>();
+ // FIXME (#3732): Inner really has to move its own pointers on this one.
// or else possibly we could have some weird interface wherein we
// read-off a word from inner's pointers, but the read-word has to
// always be the same in all sub-pointers? Dubious.
fn visit_enter_enum(_n_variants: uint,
_sz: uint, _align: uint) -> bool {
- // FIXME: this needs to rewind between enum variants, or something.
+ // FIXME (#3732): this needs to rewind between enum variants, or something.
true
}
fn visit_enter_enum_variant(_variant: uint,
-// FIXME: un-xfail after snapshot
// xfail-test
-
-use intrinsic::{tydesc, get_tydesc, visit_tydesc, ty_visitor};
+use intrinsic::{TyDesc, get_tydesc, visit_tydesc, TyVisitor};
enum my_visitor = @{ mut types: ~[str] };
-impl of ty_visitor for my_visitor {
+impl my_visitor: TyVisitor {
fn visit_bot() -> bool {
- self.types += ["bot"];
+ self.types += ~["bot"];
error!("visited bot type");
true
}
fn visit_nil() -> bool {
- self.types += ["nil"];
+ self.types += ~["nil"];
error!("visited nil type");
true
}
fn visit_bool() -> bool {
- self.types += ["bool"];
+ self.types += ~["bool"];
error!("visited bool type");
true
}
fn visit_int() -> bool {
- self.types += ["int"];
+ self.types += ~["int"];
error!("visited int type");
true
}
fn visit_i8() -> bool {
- self.types += ["i8"];
+ self.types += ~["i8"];
error!("visited i8 type");
true
}
fn visit_i16() -> bool {
- self.types += ["i16"];
+ self.types += ~["i16"];
error!("visited i16 type");
true
}
fn visit_estr_fixed(_sz: uint, _sz: uint,
_align: uint) -> bool { true }
- fn visit_box(_mtbl: uint, _inner: *tydesc) -> bool { true }
- fn visit_uniq(_mtbl: uint, _inner: *tydesc) -> bool { true }
- fn visit_ptr(_mtbl: uint, _inner: *tydesc) -> bool { true }
- fn visit_rptr(_mtbl: uint, _inner: *tydesc) -> bool { true }
+ fn visit_box(_mtbl: uint, _inner: *TyDesc) -> bool { true }
+ fn visit_uniq(_mtbl: uint, _inner: *TyDesc) -> bool { true }
+ fn visit_ptr(_mtbl: uint, _inner: *TyDesc) -> bool { true }
+ fn visit_rptr(_mtbl: uint, _inner: *TyDesc) -> bool { true }
- fn visit_vec(_mtbl: uint, inner: *tydesc) -> bool {
- self.types += ["["];
- visit_tydesc(inner, my_visitor(*self) as ty_visitor);
- self.types += ["]"];
+ fn visit_vec(_mtbl: uint, inner: *TyDesc) -> bool {
+ self.types += ~["["];
+ visit_tydesc(inner, my_visitor(*self) as TyVisitor);
+ self.types += ~["]"];
true
}
- fn visit_unboxed_vec(_mtbl: uint, _inner: *tydesc) -> bool { true }
- fn visit_evec_box(_mtbl: uint, _inner: *tydesc) -> bool { true }
- fn visit_evec_uniq(_mtbl: uint, _inner: *tydesc) -> bool { true }
- fn visit_evec_slice(_mtbl: uint, _inner: *tydesc) -> bool { true }
+ fn visit_unboxed_vec(_mtbl: uint, _inner: *TyDesc) -> bool { true }
+ fn visit_evec_box(_mtbl: uint, _inner: *TyDesc) -> bool { true }
+ fn visit_evec_uniq(_mtbl: uint, _inner: *TyDesc) -> bool { true }
+ fn visit_evec_slice(_mtbl: uint, _inner: *TyDesc) -> bool { true }
fn visit_evec_fixed(_n: uint, _sz: uint, _align: uint,
- _mtbl: uint, _inner: *tydesc) -> bool { true }
+ _mtbl: uint, _inner: *TyDesc) -> bool { true }
fn visit_enter_rec(_n_fields: uint,
_sz: uint, _align: uint) -> bool { true }
fn visit_rec_field(_i: uint, _name: &str,
- _mtbl: uint, _inner: *tydesc) -> bool { true }
+ _mtbl: uint, _inner: *TyDesc) -> bool { true }
fn visit_leave_rec(_n_fields: uint,
_sz: uint, _align: uint) -> bool { true }
fn visit_enter_class(_n_fields: uint,
_sz: uint, _align: uint) -> bool { true }
fn visit_class_field(_i: uint, _name: &str,
- _mtbl: uint, _inner: *tydesc) -> bool { true }
+ _mtbl: uint, _inner: *TyDesc) -> bool { true }
fn visit_leave_class(_n_fields: uint,
_sz: uint, _align: uint) -> bool { true }
fn visit_enter_tup(_n_fields: uint,
_sz: uint, _align: uint) -> bool { true }
- fn visit_tup_field(_i: uint, _inner: *tydesc) -> bool { true }
+ fn visit_tup_field(_i: uint, _inner: *TyDesc) -> bool { true }
fn visit_leave_tup(_n_fields: uint,
_sz: uint, _align: uint) -> bool { true }
_disr_val: int,
_n_fields: uint,
_name: &str) -> bool { true }
- fn visit_enum_variant_field(_i: uint, _inner: *tydesc) -> bool { true }
+ fn visit_enum_variant_field(_i: uint, _inner: *TyDesc) -> bool { true }
fn visit_leave_enum_variant(_variant: uint,
_disr_val: int,
_n_fields: uint,
fn visit_enter_fn(_purity: uint, _proto: uint,
_n_inputs: uint, _retstyle: uint) -> bool { true }
- fn visit_fn_input(_i: uint, _mode: uint, _inner: *tydesc) -> bool { true }
- fn visit_fn_output(_retstyle: uint, _inner: *tydesc) -> bool { true }
+ fn visit_fn_input(_i: uint, _mode: uint, _inner: *TyDesc) -> bool { true }
+ fn visit_fn_output(_retstyle: uint, _inner: *TyDesc) -> bool { true }
fn visit_leave_fn(_purity: uint, _proto: uint,
_n_inputs: uint, _retstyle: uint) -> bool { true }
fn visit_self() -> bool { true }
fn visit_type() -> bool { true }
fn visit_opaque_box() -> bool { true }
- fn visit_constr(_inner: *tydesc) -> bool { true }
+ fn visit_constr(_inner: *TyDesc) -> bool { true }
fn visit_closure_ptr(_ck: uint) -> bool { true }
}
-fn visit_ty<T>(v: ty_visitor) {
+fn visit_ty<T>(v: TyVisitor) {
visit_tydesc(get_tydesc::<T>(), v);
}
fn main() {
let v = my_visitor(@{mut types: ~[]});
- let vv = v as ty_visitor;
+ let vv = v as TyVisitor;
visit_ty::<bool>(vv);
visit_ty::<int>(vv);
}
fn main() {
- let p = [1,2,3,4,5]/_;
+ let p = [1,2,3,4,5];
assert foo(p) == 1;
}
cl: &fn(),
}
-fn box_it(x: &r/fn()) -> closure_box/&r {
- closure_box {cl: x}
+fn box_it(+x: &r/fn()) -> closure_box/&r {
+ closure_box {cl: move x}
}
fn main() {
cl: &fn(),
}
-fn box_it(x: &r/fn()) -> closure_box/&r {
- closure_box {cl: x}
+fn box_it(+x: &r/fn()) -> closure_box/&r {
+ closure_box {cl: move x}
}
fn call_static_closure(cl: closure_box/&static) {
fn main() {
let cl_box = box_it(|| debug!("Hello, world!"));
- call_static_closure(cl_box);
+ call_static_closure(move cl_box);
}
// Even though these look like copies, they are guaranteed not to be
{
let a = r(i);
- let b = (a, 10);
- let (c, _d) = b;
+ let b = (move a, 10);
+ let (c, _d) = move b;
log(debug, c);
}
assert *i == 1;
fn main() unsafe {
let i1 = ~0;
let i1p = cast::reinterpret_cast(&i1);
- cast::forget(i1);
+ cast::forget(move i1);
let i2 = ~0;
let i2p = cast::reinterpret_cast(&i2);
- cast::forget(i2);
+ cast::forget(move i2);
let x1 = @t({
mut next: None,
let rs = r(i1p);
debug!("r = %x",
cast::reinterpret_cast::<*r, uint>(&ptr::addr_of(&rs)));
- rs }
+ move rs }
});
debug!("x1 = %x, x1.r = %x",
let rs = r(i2p);
debug!("r2 = %x",
cast::reinterpret_cast::<*r, uint>(&ptr::addr_of(&rs)));
- rs
+ move rs
}
});
fn main() unsafe {
let i1 = ~0xA;
let i1p = cast::reinterpret_cast(&i1);
- cast::forget(i1);
+ cast::forget(move i1);
let i2 = ~0xA;
let i2p = cast::reinterpret_cast(&i2);
- cast::forget(i2);
+ cast::forget(move i2);
let u1 = {a: 0xB, b: 0xC, c: i1p};
let u2 = {a: 0xB, b: 0xC, c: i2p};
fn main() unsafe {
let i1 = ~0xA;
let i1p = cast::reinterpret_cast(&i1);
- cast::forget(i1);
+ cast::forget(move i1);
let i2 = ~0xA;
let i2p = cast::reinterpret_cast(&i2);
- cast::forget(i2);
+ cast::forget(move i2);
let u1 = {a: 0xB, b: 0xC, c: i1p};
let u2 = {a: 0xB, b: 0xC, c: i2p};
};
let fptr = cast::reinterpret_cast(&ptr::addr_of(&f));
rustrt::start_task(new_task_id, fptr);
- cast::forget(f);
+ cast::forget(move f);
comm::recv(po);
}
$count:expr,
$port:path => [
$(type_this $message:path$(($(x $x: ident),+))dont_type_this*
- -> $next:ident => { $e:expr }),+
+ -> $next:ident => { move $e:expr }),+
]
$(, $ports:path => [
$(type_this $messages:path$(($(x $xs: ident),+))dont_type_this*
- -> $nexts:ident => { $es:expr }),+
+ -> $nexts:ident => { move $es:expr }),+
] )*
} => {
if $index == $count {
match move pipes::try_recv($port) {
- $(Some($message($($(ref $x,)+)* ref next)) => {
- // FIXME (#2329) we really want move out of enum here.
- let $next = unsafe { let x <- *ptr::addr_of(&(*next)); x };
- $e
+ $(Some($message($($(move $x,)+)* move next)) => {
+ let $next = move next;
+ move $e
})+
_ => fail
}
$count + 1
$(, $ports => [
$(type_this $messages$(($(x $xs),+))dont_type_this*
- -> $nexts => { $es }),+
+ -> $nexts => { move $es }),+
])*
)
}
-> $next:ident $e:expr),+
} )+
} => {
- let index = pipes::selecti([$(($port).header()),+]/_);
+ let index = pipes::selecti([$(($port).header()),+]);
select_if!(index, 0 $(, $port => [
- $(type_this $message$(($(x $x),+))dont_type_this* -> $next => { $e }),+
+ $(type_this $message$(($(x $x),+))dont_type_this* -> $next => { move $e }),+
])+)
}
)
--- /dev/null
+trait Deserializer {
+ fn read_int() -> int;
+}
+
+trait Deserializable<D: Deserializer> {
+ static fn deserialize(d: &D) -> self;
+}
+
+impl<D: Deserializer> int: Deserializable<D> {
+ static fn deserialize(d: &D) -> int {
+ return d.read_int();
+ }
+}
+
+struct FromThinAir { dummy: () }
+
+impl FromThinAir: Deserializer {
+ fn read_int() -> int { 22 }
+}
+
+fn main() {
+ let d = FromThinAir { dummy: () };
+ let i: int = deserialize(&d);
+ assert i == 22;
+}
\ No newline at end of file
impl bool: bool_like {
static fn select<A>(&&b: bool, +x1: A, +x2: A) -> A {
- if b { x1 } else { x2 }
+ if b { move x1 } else { move x2 }
}
}
impl int: bool_like {
static fn select<A>(&&b: int, +x1: A, +x2: A) -> A {
- if b != 0 { x1 } else { x2 }
+ if b != 0 { move x1 } else { move x2 }
}
}
struct foo {
x: ~str,
- drop { #error["%s", self.x]; }
+ drop { error!("%s", self.x); }
}
fn main() {
fn test05() {
let (ch, po) = pipes::stream();
- task::spawn(|| test05_start(ch) );
+ task::spawn(|move ch| test05_start(ch) );
let mut value = po.recv();
log(error, value);
value = po.recv();
fn start(c: pipes::Chan<pipes::Chan<~str>>) {
let (ch, p) = pipes::stream();
- c.send(ch);
+ c.send(move ch);
let mut a;
let mut b;
log(error, a);
b = p.recv();
assert b == ~"B";
- log(error, b);
+ log(error, move b);
}
fn main() {
let (ch, p) = pipes::stream();
- let child = task::spawn(|| start(ch) );
+ let child = task::spawn(|move ch| start(ch) );
let c = p.recv();
c.send(~"A");
fn start(c: pipes::Chan<pipes::Chan<int>>) {
let (ch, p) = pipes::stream();
- c.send(ch);
+ c.send(move ch);
}
fn main() {
let (ch, p) = pipes::stream();
- let child = task::spawn(|| start(ch) );
+ let child = task::spawn(|move ch| start(ch) );
let c = p.recv();
}
fn test00() {
let i: int = 0;
let mut result = None;
- do task::task().future_result(|+r| { result = Some(r); }).spawn {
+ do task::task().future_result(|+r| { result = Some(move r); }).spawn {
start(i)
}
}
// Try joining tasks that have already finished.
- future::get(&option::unwrap(result));
+ future::get(&option::unwrap(move result));
debug!("Joined task.");
}
fn main() {
debug!("Check that we don't deadlock.");
let (ch, p) = pipes::stream();
- task::try(|| start(ch, 0, 10) );
+ task::try(|move ch| start(ch, 0, 10) );
debug!("Joined task");
}
while (i > 0) {
log(debug, i);
let (ch, p) = pipes::stream();
- po.add(p);
- task::spawn(|copy i| child(i, ch) );
+ po.add(move p);
+ task::spawn(|move ch, copy i| child(i, ch) );
i = i - 1;
}
// the child's point of view the receiver may die. We should
// drop messages on the floor in this case, and not crash!
let (ch, p) = pipes::stream();
- task::spawn(|| start(ch, 10));
+ task::spawn(|move ch| start(ch, 10));
p.recv();
}
fn test_chan() {
let (ch, po) = pipes::stream();
let (ch0, po0) = pipes::stream();
- ch.send(ch0);
+ ch.send(move ch0);
let ch1 = po.recv();
// Does the transmitted channel still work?
while i < number_of_tasks {
let ch = po.chan();
do task::task().future_result(|+r| {
- results.push(r);
- }).spawn |copy i| {
+ results.push(move r);
+ }).spawn |move ch, copy i| {
test00_start(ch, i, number_of_messages)
}
i = i + 1;
let number_of_messages: int = 10;
let c = p.chan();
- do task::spawn {
+ do task::spawn |move c| {
test00_start(c, number_of_messages * 0, number_of_messages);
}
let c = p.chan();
- do task::spawn {
+ do task::spawn |move c| {
test00_start(c, number_of_messages * 1, number_of_messages);
}
let c = p.chan();
- do task::spawn {
+ do task::spawn |move c| {
test00_start(c, number_of_messages * 2, number_of_messages);
}
let c = p.chan();
- do task::spawn {
+ do task::spawn |move c| {
test00_start(c, number_of_messages * 3, number_of_messages);
}
let ch = p.chan();
let mut result = None;
- do task::task().future_result(|+r| { result = Some(r); }).spawn {
+ do task::task().future_result(|+r| { result = Some(move r); }).spawn
+ |move ch| {
test00_start(ch, number_of_messages);
}
i += 1;
}
- future::get(&option::unwrap(result));
+ future::get(&option::unwrap(move result));
assert (sum == number_of_messages * (number_of_messages - 1) / 2);
}
while i < number_of_tasks {
i = i + 1;
do task::task().future_result(|+r| {
- results.push(r);
+ results.push(move r);
}).spawn |copy i| {
test00_start(ch, i, number_of_messages);
}
while i < number_of_tasks {
i = i + 1;
do task::task().future_result(|+r| {
- results.push(r);
+ results.push(move r);
}).spawn |copy i| {
test06_start(i);
};
+// xfail-test
/**
A test case for issue #577, which also exposes #588
*/
-// FIXME: This won't work until we can compare resources (#2601)
-// xfail-test
-
extern mod std;
-use task::join;
fn child() { }
+struct notify {
+ ch: comm::Chan<bool>, v: @mut bool,
+ drop {
+ error!("notify: task=%? v=%x unwinding=%b b=%b",
+ task::get_task(),
+ ptr::addr_of(&(*(self.v))) as uint,
+ task::failing(),
+ *(self.v));
+ let b = *(self.v);
+ comm::send(self.ch, b);
+ }
+}
+
+fn notify(ch: comm::Chan<bool>, v: @mut bool) -> notify {
+ notify {
+ ch: ch,
+ v: v
+ }
+}
+
+fn joinable(+f: fn~()) -> comm::Port<bool> {
+ fn wrapper(+c: comm::Chan<bool>, +f: fn()) {
+ let b = @mut false;
+ error!("wrapper: task=%? allocated v=%x",
+ task::get_task(),
+ ptr::addr_of(&(*b)) as uint);
+ let _r = notify(c, b);
+ f();
+ *b = true;
+ }
+ let p = comm::Port();
+ let c = comm::Chan(&p);
+ do task::spawn_unlinked { wrapper(c, copy f) };
+ p
+}
+
+fn join(port: comm::Port<bool>) -> bool {
+ comm::recv(port)
+}
+
fn main() {
// tasks
let t1;
let t2;
let c1 = child, c2 = child;
- t1 = task::spawn_joinable(c1);
- t2 = task::spawn_joinable(c2);
+ t1 = joinable(c1);
+ t2 = joinable(c2);
assert (t1 == t1);
assert (t1 != t2);
let p1;
let p2;
- p1 = comm::port::<int>();
- p2 = comm::port::<int>();
+ p1 = comm::Port::<int>();
+ p2 = comm::Port::<int>();
assert (p1 == p1);
assert (p1 != p2);
// channels
- let c1 = comm::chan(p1);
- let c2 = comm::chan(p2);
+ let c1 = comm::Chan(p1);
+ let c2 = comm::Chan(p2);
assert (c1 == c1);
assert (c1 != c2);
let tests = __test::tests();
let shouldignore = option::get(
- &vec::find(tests, |t| t.name == ~"shouldignore" ));
+ vec::find(tests, |t| t.name == ~"shouldignore" ));
assert shouldignore.ignore == true;
let shouldnotignore = option::get(
- &vec::find(tests, |t| t.name == ~"shouldnotignore" ));
+ vec::find(tests, |t| t.name == ~"shouldnotignore" ));
assert shouldnotignore.ignore == false;
}
\ No newline at end of file
assert indirect(~[10, 20]) == ~"[10, 20]!";
fn indirect2<T: to_str>(x: T) -> ~str {
- indirect(x)
+ indirect(move x)
}
assert indirect2(~[1]) == ~"[1]!";
}
fn main() {
let i = ~100;
- f(i);
+ f(move i);
}
\ No newline at end of file
#[no_core];
extern mod core;
extern mod zed(name = "core");
-extern mod bar(name = "core", vers = "0.4");
+extern mod bar(name = "core", vers = "0.5");
use core::str;
fn main() {
let mut result = None;
- task::task().future_result(|+r| { result = Some(r); }).spawn(child);
+ task::task().future_result(|+r| { result = Some(move r); }).spawn(child);
error!("1");
yield();
error!("2");
yield();
error!("3");
- future::get(&option::unwrap(result));
+ future::get(&option::unwrap(move result));
}
fn child() {
fn main() {
let mut result = None;
- task::task().future_result(|+r| { result = Some(r); }).spawn(child);
+ task::task().future_result(|+r| { result = Some(move r); }).spawn(child);
error!("1");
yield();
- future::get(&option::unwrap(result));
+ future::get(&option::unwrap(move result));
}
fn child() { error!("2"); }