}
pub enum AbiArchitecture {
- RustArch, // Not a real ABI (e.g., intrinsic)
- AllArch, // An ABI that specifies cross-platform defaults (e.g., "C")
- Archs(u32) // Multiple architectures (bitset)
+ /// Not a real ABI (e.g., intrinsic)
+ RustArch,
+ /// An ABI that specifies cross-platform defaults (e.g., "C")
+ AllArch,
+ /// Multiple architectures (bitset)
+ Archs(u32)
}
static AbiDatas: &'static [AbiData] = &[
AbiData {abi: RustIntrinsic, name: "rust-intrinsic", abi_arch: RustArch},
];
+/// Iterates through each of the defined ABIs.
fn each_abi(op: |abi: Abi| -> bool) -> bool {
- /*!
- *
- * Iterates through each of the defined ABIs.
- */
-
AbiDatas.iter().advance(|abi_data| op(abi_data.abi))
}
+/// Returns the ABI with the given name (if any).
pub fn lookup(name: &str) -> Option<Abi> {
- /*!
- *
- * Returns the ABI with the given name (if any).
- */
-
let mut res = None;
each_abi(|abi| {
use std::gc::{Gc, GC};
use serialize::{Encodable, Decodable, Encoder, Decoder};
-/// A pointer abstraction. FIXME(eddyb) #10676 use Rc<T> in the future.
+/// A pointer abstraction.
+// FIXME(eddyb) #10676 use Rc<T> in the future.
pub type P<T> = Gc<T>;
#[allow(non_snake_case_functions)]
// FIXME #6993: in librustc, uses of "ident" should be replaced
// by just "Name".
-// an identifier contains a Name (index into the interner
-// table) and a SyntaxContext to track renaming and
-// macro expansion per Flatt et al., "Macros
-// That Work Together"
+/// An identifier contains a Name (index into the interner
+/// table) and a SyntaxContext to track renaming and
+/// macro expansion per Flatt et al., "Macros
+/// That Work Together"
#[deriving(Clone, Hash, PartialOrd, Eq, Ord, Show)]
pub struct Ident {
pub name: Name,
pub name: Name
}
-// a "Path" is essentially Rust's notion of a name;
-// for instance: std::cmp::PartialEq . It's represented
-// as a sequence of identifiers, along with a bunch
-// of supporting information.
+/// A "Path" is essentially Rust's notion of a name; for instance:
+/// std::cmp::PartialEq . It's represented as a sequence of identifiers,
+/// along with a bunch of supporting information.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Path {
pub span: Span,
pub static LOCAL_CRATE: CrateNum = 0;
pub static CRATE_NODE_ID: NodeId = 0;
-// When parsing and doing expansions, we initially give all AST nodes this AST
-// node value. Then later, in the renumber pass, we renumber them to have
-// small, positive ids.
+/// When parsing and doing expansions, we initially give all AST nodes this AST
+/// node value. Then later, in the renumber pass, we renumber them to have
+/// small, positive ids.
pub static DUMMY_NODE_ID: NodeId = -1;
-// The AST represents all type param bounds as types.
-// typeck::collect::compute_bounds matches these against
-// the "special" built-in traits (see middle::lang_items) and
-// detects Copy, Send and Share.
+/// The AST represents all type param bounds as types.
+/// typeck::collect::compute_bounds matches these against
+/// the "special" built-in traits (see middle::lang_items) and
+/// detects Copy, Send and Share.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum TyParamBound {
TraitTyParamBound(TraitRef),
}
}
-// The set of MetaItems that define the compilation environment of the crate,
-// used to drive conditional compilation
-pub type CrateConfig = Vec<Gc<MetaItem>>;
+/// The set of MetaItems that define the compilation environment of the crate,
+/// used to drive conditional compilation
+pub type CrateConfig = Vec<Gc<MetaItem>> ;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Crate {
pub enum Pat_ {
PatWild,
PatWildMulti,
- // A PatIdent may either be a new bound variable,
- // or a nullary enum (in which case the third field
- // is None).
- // In the nullary enum case, the parser can't determine
- // which it is. The resolver determines this, and
- // records this pattern's NodeId in an auxiliary
- // set (of "PatIdents that refer to nullary enums")
+ /// A PatIdent may either be a new bound variable,
+ /// or a nullary enum (in which case the third field
+ /// is None).
+ /// In the nullary enum case, the parser can't determine
+ /// which it is. The resolver determines this, and
+ /// records this pattern's NodeId in an auxiliary
+ /// set (of "PatIdents that refer to nullary enums")
PatIdent(BindingMode, SpannedIdent, Option<Gc<Pat>>),
PatEnum(Path, Option<Vec<Gc<Pat>>>), /* "none" means a * pattern where
* we don't bind the fields to names */
PatRegion(Gc<Pat>), // reference pattern
PatLit(Gc<Expr>),
PatRange(Gc<Expr>, Gc<Expr>),
- // [a, b, ..i, y, z] is represented as
- // PatVec(~[a, b], Some(i), ~[y, z])
+ /// [a, b, ..i, y, z] is represented as:
+ /// PatVec(~[a, b], Some(i), ~[y, z])
PatVec(Vec<Gc<Pat>>, Option<Gc<Pat>>, Vec<Gc<Pat>>),
PatMac(Mac),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ExprVstore {
- ExprVstoreUniq, // ~[1,2,3,4]
- ExprVstoreSlice, // &[1,2,3,4]
- ExprVstoreMutSlice, // &mut [1,2,3,4]
+ /// ~[1, 2, 3, 4]
+ ExprVstoreUniq,
+ /// &[1, 2, 3, 4]
+ ExprVstoreSlice,
+ /// &mut [1, 2, 3, 4]
+ ExprVstoreMutSlice,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Stmt_ {
- // could be an item or a local (let) binding:
+ /// Could be an item or a local (let) binding:
StmtDecl(Gc<Decl>, NodeId),
- // expr without trailing semi-colon (must have unit type):
+ /// Expr without trailing semi-colon (must have unit type):
StmtExpr(Gc<Expr>, NodeId),
- // expr with trailing semi-colon (may have any type):
+ /// Expr with trailing semi-colon (may have any type):
StmtSemi(Gc<Expr>, NodeId),
- // bool: is there a trailing sem-colon?
+ /// bool: is there a trailing sem-colon?
StmtMac(Mac, bool),
}
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Decl_ {
- // a local (let) binding:
+ /// A local (let) binding:
DeclLocal(Gc<Local>),
- // an item binding:
+ /// An item binding:
DeclItem(Gc<Item>),
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Expr_ {
ExprVstore(Gc<Expr>, ExprVstore),
- // First expr is the place; second expr is the value.
+ /// First expr is the place; second expr is the value.
ExprBox(Gc<Expr>, Gc<Expr>),
ExprVec(Vec<Gc<Expr>>),
ExprCall(Gc<Expr>, Vec<Gc<Expr>>),
ExprMac(Mac),
- // A struct literal expression.
+ /// A struct literal expression.
ExprStruct(Path, Vec<Field> , Option<Gc<Expr>> /* base */),
- // A vector literal constructed from one repeated element.
+ /// A vector literal constructed from one repeated element.
ExprRepeat(Gc<Expr> /* element */, Gc<Expr> /* count */),
- // No-op: used solely so we can pretty-print faithfully
+ /// No-op: used solely so we can pretty-print faithfully
ExprParen(Gc<Expr>)
}
-// When the main rust parser encounters a syntax-extension invocation, it
-// parses the arguments to the invocation as a token-tree. This is a very
-// loose structure, such that all sorts of different AST-fragments can
-// be passed to syntax extensions using a uniform type.
-//
-// If the syntax extension is an MBE macro, it will attempt to match its
-// LHS "matchers" against the provided token tree, and if it finds a
-// match, will transcribe the RHS token tree, splicing in any captured
-// macro_parser::matched_nonterminals into the TTNonterminals it finds.
-//
-// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq
-// makes any real sense. You could write them elsewhere but nothing
-// else knows what to do with them, so you'll probably get a syntax
-// error.
-//
+/// When the main rust parser encounters a syntax-extension invocation, it
+/// parses the arguments to the invocation as a token-tree. This is a very
+/// loose structure, such that all sorts of different AST-fragments can
+/// be passed to syntax extensions using a uniform type.
+///
+/// If the syntax extension is an MBE macro, it will attempt to match its
+/// LHS "matchers" against the provided token tree, and if it finds a
+/// match, will transcribe the RHS token tree, splicing in any captured
+/// macro_parser::matched_nonterminals into the TTNonterminals it finds.
+///
+/// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq
+/// makes any real sense. You could write them elsewhere but nothing
+/// else knows what to do with them, so you'll probably get a syntax
+/// error.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
#[doc="For macro invocations; parsing is delegated to the macro"]
pub enum TokenTree {
- // a single token
+ /// A single token
TTTok(Span, ::parse::token::Token),
- // a delimited sequence (the delimiters appear as the first
- // and last elements of the vector)
+ /// A delimited sequence (the delimiters appear as the first
+ /// and last elements of the vector)
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTDelim(Rc<Vec<TokenTree>>),
// These only make sense for right-hand-sides of MBE macros:
- // a kleene-style repetition sequence with a span, a TTForest,
- // an optional separator, and a boolean where true indicates
- // zero or more (..), and false indicates one or more (+).
+ /// A kleene-style repetition sequence with a span, a TTForest,
+ /// an optional separator, and a boolean where true indicates
+ /// zero or more (..), and false indicates one or more (+).
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
TTSeq(Span, Rc<Vec<TokenTree>>, Option<::parse::token::Token>, bool),
- // a syntactic variable that will be filled in by macro expansion.
+ /// A syntactic variable that will be filled in by macro expansion.
TTNonterminal(Span, Ident)
}
-//
-// Matchers are nodes defined-by and recognized-by the main rust parser and
-// language, but they're only ever found inside syntax-extension invocations;
-// indeed, the only thing that ever _activates_ the rules in the rust parser
-// for parsing a matcher is a matcher looking for the 'matchers' nonterminal
-// itself. Matchers represent a small sub-language for pattern-matching
-// token-trees, and are thus primarily used by the macro-defining extension
-// itself.
-//
-// MatchTok
-// --------
-//
-// A matcher that matches a single token, denoted by the token itself. So
-// long as there's no $ involved.
-//
-//
-// MatchSeq
-// --------
-//
-// A matcher that matches a sequence of sub-matchers, denoted various
-// possible ways:
-//
-// $(M)* zero or more Ms
-// $(M)+ one or more Ms
-// $(M),+ one or more comma-separated Ms
-// $(A B C);* zero or more semi-separated 'A B C' seqs
-//
-//
-// MatchNonterminal
-// -----------------
-//
-// A matcher that matches one of a few interesting named rust
-// nonterminals, such as types, expressions, items, or raw token-trees. A
-// black-box matcher on expr, for example, binds an expr to a given ident,
-// and that ident can re-occur as an interpolation in the RHS of a
-// macro-by-example rule. For example:
-//
-// $foo:expr => 1 + $foo // interpolate an expr
-// $foo:tt => $foo // interpolate a token-tree
-// $foo:tt => bar! $foo // only other valid interpolation
-// // is in arg position for another
-// // macro
-//
-// As a final, horrifying aside, note that macro-by-example's input is
-// also matched by one of these matchers. Holy self-referential! It is matched
-// by a MatchSeq, specifically this one:
-//
-// $( $lhs:matchers => $rhs:tt );+
-//
-// If you understand that, you have closed to loop and understand the whole
-// macro system. Congratulations.
-//
+/// Matchers are nodes defined-by and recognized-by the main rust parser and
+/// language, but they're only ever found inside syntax-extension invocations;
+/// indeed, the only thing that ever _activates_ the rules in the rust parser
+/// for parsing a matcher is a matcher looking for the 'matchers' nonterminal
+/// itself. Matchers represent a small sub-language for pattern-matching
+/// token-trees, and are thus primarily used by the macro-defining extension
+/// itself.
+///
+/// MatchTok
+/// --------
+///
+/// A matcher that matches a single token, denoted by the token itself. So
+/// long as there's no $ involved.
+///
+///
+/// MatchSeq
+/// --------
+///
+/// A matcher that matches a sequence of sub-matchers, denoted various
+/// possible ways:
+///
+/// $(M)* zero or more Ms
+/// $(M)+ one or more Ms
+/// $(M),+ one or more comma-separated Ms
+/// $(A B C);* zero or more semi-separated 'A B C' seqs
+///
+///
+/// MatchNonterminal
+/// -----------------
+///
+/// A matcher that matches one of a few interesting named rust
+/// nonterminals, such as types, expressions, items, or raw token-trees. A
+/// black-box matcher on expr, for example, binds an expr to a given ident,
+/// and that ident can re-occur as an interpolation in the RHS of a
+/// macro-by-example rule. For example:
+///
+/// $foo:expr => 1 + $foo // interpolate an expr
+/// $foo:tt => $foo // interpolate a token-tree
+/// $foo:tt => bar! $foo // only other valid interpolation
+/// // is in arg position for another
+/// // macro
+///
+/// As a final, horrifying aside, note that macro-by-example's input is
+/// also matched by one of these matchers. Holy self-referential! It is matched
+/// by a MatchSeq, specifically this one:
+///
+/// $( $lhs:matchers => $rhs:tt );+
+///
+/// If you understand that, you have closed the loop and understand the whole
+/// macro system. Congratulations.
pub type Matcher = Spanned<Matcher_>;
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Matcher_ {
- // match one token
+ /// Match one token
MatchTok(::parse::token::Token),
- // match repetitions of a sequence: body, separator, zero ok?,
- // lo, hi position-in-match-array used:
+ /// Match repetitions of a sequence: body, separator, zero ok?,
+ /// lo, hi position-in-match-array used:
MatchSeq(Vec<Matcher> , Option<::parse::token::Token>, bool, uint, uint),
- // parse a Rust NT: name to bind, name of NT, position in match array:
+ /// Parse a Rust NT: name to bind, name of NT, position in match array:
MatchNonterminal(Ident, Ident, uint)
}
pub type Mac = Spanned<Mac_>;
-// represents a macro invocation. The Path indicates which macro
-// is being invoked, and the vector of token-trees contains the source
-// of the macro invocation.
-// There's only one flavor, now, so this could presumably be simplified.
+/// Represents a macro invocation. The Path indicates which macro
+/// is being invoked, and the vector of token-trees contains the source
+/// of the macro invocation.
+/// There's only one flavor, now, so this could presumably be simplified.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum Mac_ {
MacInvocTT(Path, Vec<TokenTree> , SyntaxContext), // new macro-invocation
pub vis: Visibility,
}
-/// Represents a method declaration in a trait declaration, possibly
-/// including a default implementation
-// A trait method is either required (meaning it doesn't have an
-// implementation, just a signature) or provided (meaning it has a default
-// implementation).
+/// Represents a method declaration in a trait declaration, possibly including
+/// a default implementation A trait method is either required (meaning it
+/// doesn't have an implementation, just a signature) or provided (meaning it
+/// has a default implementation).
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum TraitMethod {
Required(TypeMethod),
pub span: Span,
}
-// Not represented directly in the AST, referred to by name through a ty_path.
+/// Not represented directly in the AST, referred to by name through a ty_path.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum PrimTy {
TyInt(IntTy),
pub fn_style: FnStyle,
pub onceness: Onceness,
pub decl: P<FnDecl>,
- // Optional optvec distinguishes between "fn()" and "fn:()" so we can
- // implement issue #7264. None means "fn()", which means infer a default
- // bound based on pointer sigil during typeck. Some(Empty) means "fn:()",
- // which means use no bounds (e.g., not even Owned on a ~fn()).
+ /// Optional optvec distinguishes between "fn()" and "fn:()" so we can
+ /// implement issue #7264. None means "fn()", which means infer a default
+ /// bound based on pointer sigil during typeck. Some(Empty) means "fn:()",
+ /// which means use no bounds (e.g., not even Owned on a ~fn()).
pub bounds: Option<OwnedSlice<TyParamBound>>,
}
TyUnboxedFn(Gc<UnboxedFnTy>),
TyTup(Vec<P<Ty>> ),
TyPath(Path, Option<OwnedSlice<TyParamBound>>, NodeId), // for #7264; see above
- // No-op; kept solely so that we can pretty-print faithfully
+ /// No-op; kept solely so that we can pretty-print faithfully
TyParen(P<Ty>),
TyTypeof(Gc<Expr>),
- // TyInfer means the type should be inferred instead of it having been
- // specified. This can appear anywhere in a type.
+ /// TyInfer means the type should be inferred instead of it having been
+ /// specified. This can appear anywhere in a type.
TyInfer,
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum FnStyle {
- UnsafeFn, // declared with "unsafe fn"
- NormalFn, // declared with "fn"
+ /// Declared with "unsafe fn"
+ UnsafeFn,
+ /// Declared with "fn"
+ NormalFn,
}
impl fmt::Show for FnStyle {
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum RetStyle {
- NoReturn, // functions with return type _|_ that always
- // raise an error or exit (i.e. never return to the caller)
- Return, // everything else
+ /// Functions with return type ! that always
+ /// raise an error or exit (i.e. never return to the caller)
+ NoReturn,
+ /// Everything else
+ Return,
}
/// Represents the kind of 'self' associated with a method
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ExplicitSelf_ {
- SelfStatic, // no self
- SelfValue(Ident), // `self`
- SelfRegion(Option<Lifetime>, Mutability, Ident), // `&'lt self`, `&'lt mut self`
- SelfUniq(Ident), // `~self`
+ /// No self
+ SelfStatic,
+ /// `self
+ SelfValue(Ident),
+ /// `&'lt self`, `&'lt mut self`
+ SelfRegion(Option<Lifetime>, Mutability, Ident),
+ /// `~self`
+ SelfUniq(Ident)
}
pub type ExplicitSelf = Spanned<ExplicitSelf_>;
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ViewPath_ {
- // quux = foo::bar::baz
- //
- // or just
- //
- // foo::bar::baz (with 'baz =' implicitly on the left)
+ /// `quux = foo::bar::baz`
+ ///
+ /// or just
+ ///
+ /// `foo::bar::baz ` (with 'baz =' implicitly on the left)
ViewPathSimple(Ident, Path, NodeId),
- // foo::bar::*
+ /// `foo::bar::*`
ViewPathGlob(Path, NodeId),
- // foo::bar::{a,b,c}
+ /// `foo::bar::{a,b,c}`
ViewPathList(Path, Vec<PathListIdent> , NodeId)
}
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum ViewItem_ {
- // ident: name used to refer to this crate in the code
- // optional (InternedString,StrStyle): if present, this is a location
- // (containing arbitrary characters) from which to fetch the crate sources
- // For example, extern crate whatever = "github.com/rust-lang/rust"
+ /// Ident: name used to refer to this crate in the code
+ /// optional (InternedString,StrStyle): if present, this is a location
+ /// (containing arbitrary characters) from which to fetch the crate sources
+ /// For example, extern crate whatever = "github.com/rust-lang/rust"
ViewItemExternCrate(Ident, Option<(InternedString,StrStyle)>, NodeId),
ViewItemUse(Gc<ViewPath>),
}
-// Meta-data associated with an item
+/// Meta-data associated with an item
pub type Attribute = Spanned<Attribute_>;
-// Distinguishes between Attributes that decorate items and Attributes that
-// are contained as statements within items. These two cases need to be
-// distinguished for pretty-printing.
+/// Distinguishes between Attributes that decorate items and Attributes that
+/// are contained as statements within items. These two cases need to be
+/// distinguished for pretty-printing.
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum AttrStyle {
AttrOuter,
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct AttrId(pub uint);
-// doc-comments are promoted to attributes that have is_sugared_doc = true
+/// Doc-comments are promoted to attributes that have is_sugared_doc = true
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct Attribute_ {
pub id: AttrId,
pub is_sugared_doc: bool,
}
-/*
- TraitRef's appear in impls.
- resolve maps each TraitRef's ref_id to its defining trait; that's all
- that the ref_id is for. The impl_id maps to the "self type" of this impl.
- If this impl is an ItemImpl, the impl_id is redundant (it could be the
- same as the impl's node id).
- */
+
+/// TraitRef's appear in impls.
+/// resolve maps each TraitRef's ref_id to its defining trait; that's all
+/// that the ref_id is for. The impl_id maps to the "self type" of this impl.
+/// If this impl is an ItemImpl, the impl_id is redundant (it could be the
+/// same as the impl's node id).
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct TraitRef {
pub path: Path,
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum StructFieldKind {
NamedField(Ident, Visibility),
- UnnamedField(Visibility), // element of a tuple-like struct
+ /// Element of a tuple-like struct
+ UnnamedField(Visibility),
}
impl StructFieldKind {
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub struct StructDef {
- pub fields: Vec<StructField>, /* fields, not including ctor */
- /* ID of the constructor. This is only used for tuple- or enum-like
- * structs. */
+ /// Fields, not including ctor
+ pub fields: Vec<StructField>,
+ /// ID of the constructor. This is only used for tuple- or enum-like
+ /// structs.
pub ctor_id: Option<NodeId>,
- pub super_struct: Option<P<Ty>>, // Super struct, if specified.
- pub is_virtual: bool, // True iff the struct may be inherited from.
+ /// Super struct, if specified.
+ pub super_struct: Option<P<Ty>>,
+ /// True iff the struct may be inherited from.
+ pub is_virtual: bool,
}
/*
Option<TraitRef>, // (optional) trait this impl implements
P<Ty>, // self
Vec<Gc<Method>>),
- // a macro invocation (which includes macro definition)
+ /// A macro invocation (which includes macro definition)
ItemMac(Mac),
}
ForeignItemStatic(P<Ty>, /* is_mutbl */ bool),
}
-// The data we save and restore about an inlined item or method. This is not
-// part of the AST that we parse from a file, but it becomes part of the tree
-// that we trans.
+/// The data we save and restore about an inlined item or method. This is not
+/// part of the AST that we parse from a file, but it becomes part of the tree
+/// that we trans.
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
pub enum InlinedItem {
IIItem(Gc<Item>),
NodeLifetime(Gc<Lifetime>),
}
-// The odd layout is to bring down the total size.
+/// The odd layout is to bring down the total size.
#[deriving(Clone)]
enum MapEntry {
- // Placeholder for holes in the map.
+ /// Placeholder for holes in the map.
NotPresent,
- // All the node types, with a parent ID.
+ /// All the node types, with a parent ID.
EntryItem(NodeId, Gc<Item>),
EntryForeignItem(NodeId, Gc<ForeignItem>),
EntryTraitMethod(NodeId, Gc<TraitMethod>),
EntryStructCtor(NodeId, Gc<StructDef>),
EntryLifetime(NodeId, Gc<Lifetime>),
- // Roots for node trees.
+ /// Roots for node trees.
RootCrate,
RootInlinedParent(P<InlinedParent>)
}
struct InlinedParent {
path: Vec<PathElem> ,
- // Required by NodeTraitMethod and NodeMethod.
+ /// Required by NodeTraitMethod and NodeMethod.
def_id: DefId
}
ItemForeignMod(ref nm) => Some(nm.abi),
_ => None
},
- // Wrong but OK, because the only inlined foreign items are intrinsics.
+ /// Wrong but OK, because the only inlined foreign items are intrinsics.
Some(RootInlinedParent(_)) => Some(abi::RustIntrinsic),
_ => None
};
pub struct Ctx<'a, F> {
map: &'a Map,
- // The node in which we are currently mapping (an item or a method).
- // When equal to DUMMY_NODE_ID, the next mapped node becomes the parent.
+ /// The node in which we are currently mapping (an item or a method).
+ /// When equal to DUMMY_NODE_ID, the next mapped node becomes the parent.
parent: NodeId,
fold_ops: F
}
(krate, map)
}
-// Used for items loaded from external crate that are being inlined into this
-// crate. The `path` should be the path to the item but should not include
-// the item itself.
+/// Used for items loaded from external crate that are being inlined into this
+/// crate. The `path` should be the path to the item but should not include
+/// the item itself.
pub fn map_decoded_item<F: FoldOps>(map: &Map,
path: Vec<PathElem> ,
fold_ops: F,
return match e.node { ExprPath(_) => true, _ => false };
}
-// Get a string representation of a signed int type, with its value.
-// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
+/// Get a string representation of a signed int type, with its value.
+/// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String {
let s = match t {
TyI if val.is_some() => "i",
}
}
-// Get a string representation of an unsigned int type, with its value.
-// We want to avoid "42uint" in favor of "42u"
+/// Get a string representation of an unsigned int type, with its value.
+/// We want to avoid "42uint" in favor of "42u"
pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String {
let s = match t {
TyU if val.is_some() => "u",
}).collect()
}
-// extract a TypeMethod from a TraitMethod. if the TraitMethod is
-// a default, pull out the useful fields to make a TypeMethod
+/// extract a TypeMethod from a TraitMethod. if the TraitMethod is
+/// a default, pull out the useful fields to make a TypeMethod
pub fn trait_method_to_ty_method(method: &TraitMethod) -> TypeMethod {
match *method {
Required(ref m) => (*m).clone(),
}
}
-// Returns true if this literal is a string and false otherwise.
+/// Returns true if this literal is a string and false otherwise.
pub fn lit_is_str(lit: Gc<Lit>) -> bool {
match lit.node {
LitStr(..) => true,
/// #[foo="bar"] and #[foo(bar)]
fn name(&self) -> InternedString;
- /**
- * Gets the string value if self is a MetaNameValue variant
- * containing a string, otherwise None.
- */
+ /// Gets the string value if self is a MetaNameValue variant
+ /// containing a string, otherwise None.
fn value_str(&self) -> Option<InternedString>;
/// Gets a list of inner meta items from a list MetaItem type.
fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]>;
}
-/**
- * Fold this over attributes to parse #[repr(...)] forms.
- *
- * Valid repr contents: any of the primitive integral type names (see
- * `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
- * the same discriminant size that the corresponding C enum would. These are
- * not allowed on univariant or zero-variant enums, which have no discriminant.
- *
- * If a discriminant type is so specified, then the discriminant will be
- * present (before fields, if any) with that type; reprensentation
- * optimizations which would remove it will not be done.
- */
+/// Fold this over attributes to parse #[repr(...)] forms.
+///
+/// Valid repr contents: any of the primitive integral type names (see
+/// `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
+/// the same discriminant size that the corresponding C enum would. These are
+/// not allowed on univariant or zero-variant enums, which have no discriminant.
+///
+/// If a discriminant type is so specified, then the discriminant will be
+/// present (before fields, if any) with that type; reprensentation
+/// optimizations which would remove it will not be done.
pub fn find_repr_attr(diagnostic: &SpanHandler, attr: &Attribute, acc: ReprAttr)
-> ReprAttr {
let mut acc = acc;
}
impl FileMap {
- // EFFECT: register a start-of-line offset in the
- // table of line-beginnings.
- // UNCHECKED INVARIANT: these offsets must be added in the right
- // order and must be in the right places; there is shared knowledge
- // about what ends a line between this file and parse.rs
- // WARNING: pos param here is the offset relative to start of CodeMap,
- // and CodeMap will append a newline when adding a filemap without a newline at the end,
- // so the safe way to call this is with value calculated as
- // filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
+ /// EFFECT: register a start-of-line offset in the
+ /// table of line-beginnings.
+ /// UNCHECKED INVARIANT: these offsets must be added in the right
+ /// order and must be in the right places; there is shared knowledge
+ /// about what ends a line between this file and parse.rs
+ /// WARNING: pos param here is the offset relative to start of CodeMap,
+ /// and CodeMap will append a newline when adding a filemap without a newline at the end,
+ /// so the safe way to call this is with value calculated as
+ /// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();;
lines.push(pos);
}
- // get a line from the list of pre-computed line-beginnings
+ /// get a line from the list of pre-computed line-beginnings
pub fn get_line(&self, line: int) -> String {
let mut lines = self.lines.borrow_mut();
let begin: BytePos = *lines.get(line as uint) - self.start_pos;
FileMapAndBytePos {fm: fm, pos: offset}
}
- // Converts an absolute BytePos to a CharPos relative to the filemap and above.
+ /// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
debug!("codemap: converting {:?} to char pos", bpos);
let idx = self.lookup_filemap_idx(bpos);
use term::WriterWrapper;
use term;
-// maximum number of lines we will print for each error; arbitrary.
+/// maximum number of lines we will print for each error; arbitrary.
static MAX_LINES: uint = 6u;
#[deriving(Clone)]
/// or `.span_bug` rather than a failed assertion, etc.
pub struct ExplicitBug;
-// a span-handler is like a handler but also
-// accepts span information for source-location
-// reporting.
+/// A span-handler is like a handler but also
+/// accepts span information for source-location
+/// reporting.
pub struct SpanHandler {
pub handler: Handler,
pub cm: codemap::CodeMap,
}
}
-// a handler deals with errors; certain errors
-// (fatal, bug, unimpl) may cause immediate exit,
-// others log errors for later reporting.
+/// A handler deals with errors; certain errors
+/// (fatal, bug, unimpl) may cause immediate exit,
+/// others log errors for later reporting.
pub struct Handler {
err_count: Cell<uint>,
emit: RefCell<Box<Emitter + Send>>,
Ok(())
}
-// Here are the differences between this and the normal `highlight_lines`:
-// `custom_highlight_lines` will always put arrow on the last byte of the
-// span (instead of the first byte). Also, when the span is too long (more
-// than 6 lines), `custom_highlight_lines` will print the first line, then
-// dot dot dot, then last line, whereas `highlight_lines` prints the first
-// six lines.
+/// Here are the differences between this and the normal `highlight_lines`:
+/// `custom_highlight_lines` will always put arrow on the last byte of the
+/// span (instead of the first byte). Also, when the span is too long (more
+/// than 6 lines), `custom_highlight_lines` will print the first line, then
+/// dot dot dot, then last line, whereas `highlight_lines` prints the first
+/// six lines.
fn custom_highlight_lines(w: &mut EmitterWriter,
cm: &codemap::CodeMap,
sp: Span,
pub type NamedSyntaxExtension = (Name, SyntaxExtension);
pub struct BlockInfo {
- // should macros escape from this scope?
+ /// Should macros escape from this scope?
pub macros_escape: bool,
- // what are the pending renames?
+ /// What are the pending renames?
pub pending_renames: mtwt::RenameList,
}
}
}
-// The base map of methods for expanding syntax extension
-// AST nodes into full ASTs
+/// The base map of methods for expanding syntax extension
+/// AST nodes into full ASTs
pub fn syntax_expander_table() -> SyntaxEnv {
// utility function to simplify creating NormalTT syntax extensions
fn builtin_normal_expander(f: MacroExpanderFn) -> SyntaxExtension {
syntax_expanders
}
-// One of these is made during expansion and incrementally updated as we go;
-// when a macro expansion occurs, the resulting nodes have the backtrace()
-// -> expn_info of their expansion context stored into their span.
+/// One of these is made during expansion and incrementally updated as we go;
+/// when a macro expansion occurs, the resulting nodes have the backtrace()
+/// -> expn_info of their expansion context stored into their span.
pub struct ExtCtxt<'a> {
pub parse_sess: &'a parse::ParseSess,
pub cfg: ast::CrateConfig,
Some(es)
}
-// in order to have some notion of scoping for macros,
-// we want to implement the notion of a transformation
-// environment.
+/// In order to have some notion of scoping for macros,
+/// we want to implement the notion of a transformation
+/// environment.
-// This environment maps Names to SyntaxExtensions.
+/// This environment maps Names to SyntaxExtensions.
//impl question: how to implement it? Initially, the
// env will contain only macros, so it might be painful
map: HashMap<Name, SyntaxExtension>,
}
-// Only generic to make it easy to test
pub struct SyntaxEnv {
chain: Vec<MapChainFrame> ,
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/*!
-
-The compiler code necessary to implement the `#[deriving(Encodable)]`
-(and `Decodable`, in decodable.rs) extension. The idea here is that
-type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`.
-
-For example, a type like:
-
-```ignore
-#[deriving(Encodable, Decodable)]
-struct Node { id: uint }
-```
-
-would generate two implementations like:
-
-```ignore
-impl<S:serialize::Encoder> Encodable<S> for Node {
- fn encode(&self, s: &S) {
- s.emit_struct("Node", 1, || {
- s.emit_field("id", 0, || s.emit_uint(self.id))
- })
- }
-}
-
-impl<D:Decoder> Decodable for node_id {
- fn decode(d: &D) -> Node {
- d.read_struct("Node", 1, || {
- Node {
- id: d.read_field("x".to_string(), 0, || decode(d))
- }
- })
- }
-}
-```
-
-Other interesting scenarios are whe the item has type parameters or
-references other non-built-in types. A type definition like:
-
-```ignore
-#[deriving(Encodable, Decodable)]
-struct spanned<T> { node: T, span: Span }
-```
-
-would yield functions like:
-
-```ignore
- impl<
- S: Encoder,
- T: Encodable<S>
- > spanned<T>: Encodable<S> {
- fn encode<S:Encoder>(s: &S) {
- s.emit_rec(|| {
- s.emit_field("node", 0, || self.node.encode(s));
- s.emit_field("span", 1, || self.span.encode(s));
- })
- }
- }
-
- impl<
- D: Decoder,
- T: Decodable<D>
- > spanned<T>: Decodable<D> {
- fn decode(d: &D) -> spanned<T> {
- d.read_rec(|| {
- {
- node: d.read_field("node".to_string(), 0, || decode(d)),
- span: d.read_field("span".to_string(), 1, || decode(d)),
- }
- })
- }
- }
-```
-*/
+//! The compiler code necessary to implement the `#[deriving(Encodable)]`
+//! (and `Decodable`, in decodable.rs) extension. The idea here is that
+//! type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`.
+//!
+//! For example, a type like:
+//!
+//! ```ignore
+//! #[deriving(Encodable, Decodable)]
+//! struct Node { id: uint }
+//! ```
+//!
+//! would generate two implementations like:
+//!
+//! ```ignore
+//! impl<S:serialize::Encoder> Encodable<S> for Node {
+//! fn encode(&self, s: &S) {
+//! s.emit_struct("Node", 1, || {
+//! s.emit_field("id", 0, || s.emit_uint(self.id))
+//! })
+//! }
+//! }
+//!
+//! impl<D:Decoder> Decodable for node_id {
+//! fn decode(d: &D) -> Node {
+//! d.read_struct("Node", 1, || {
+//! Node {
+//! id: d.read_field("x".to_string(), 0, || decode(d))
+//! }
+//! })
+//! }
+//! }
+//! ```
+//!
+//! Other interesting scenarios are whe the item has type parameters or
+//! references other non-built-in types. A type definition like:
+//!
+//! ```ignore
+//! #[deriving(Encodable, Decodable)]
+//! struct spanned<T> { node: T, span: Span }
+//! ```
+//!
+//! would yield functions like:
+//!
+//! ```ignore
+//! impl<
+//! S: Encoder,
+//! T: Encodable<S>
+//! > spanned<T>: Encodable<S> {
+//! fn encode<S:Encoder>(s: &S) {
+//! s.emit_rec(|| {
+//! s.emit_field("node", 0, || self.node.encode(s));
+//! s.emit_field("span", 1, || self.span.encode(s));
+//! })
+//! }
+//! }
+//!
+//! impl<
+//! D: Decoder,
+//! T: Decodable<D>
+//! > spanned<T>: Decodable<D> {
+//! fn decode(d: &D) -> spanned<T> {
+//! d.read_rec(|| {
+//! {
+//! node: d.read_field("node".to_string(), 0, || decode(d)),
+//! span: d.read_field("span".to_string(), 1, || decode(d)),
+//! }
+//! })
+//! }
+//! }
+//! ```
use ast::{MetaItem, Item, Expr, ExprRet, MutMutable, LitNil};
use codemap::Span;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/*!
-
-Some code that abstracts away much of the boilerplate of writing
-`deriving` instances for traits. Among other things it manages getting
-access to the fields of the 4 different sorts of structs and enum
-variants, as well as creating the method and impl ast instances.
-
-Supported features (fairly exhaustive):
-
-- Methods taking any number of parameters of any type, and returning
- any type, other than vectors, bottom and closures.
-- Generating `impl`s for types with type parameters and lifetimes
- (e.g. `Option<T>`), the parameters are automatically given the
- current trait as a bound. (This includes separate type parameters
- and lifetimes for methods.)
-- Additional bounds on the type parameters, e.g. the `Ord` instance
- requires an explicit `PartialEq` bound at the
- moment. (`TraitDef.additional_bounds`)
-
-Unsupported: FIXME #6257: calling methods on reference fields,
-e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`,
-because of how the auto-dereferencing happens.
-
-The most important thing for implementers is the `Substructure` and
-`SubstructureFields` objects. The latter groups 5 possibilities of the
-arguments:
-
-- `Struct`, when `Self` is a struct (including tuple structs, e.g
- `struct T(int, char)`).
-- `EnumMatching`, when `Self` is an enum and all the arguments are the
- same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`)
-- `EnumNonMatching` when `Self` is an enum and the arguments are not
- the same variant (e.g. `None`, `Some(1)` and `None`). If
- `const_nonmatching` is true, this will contain an empty list.
-- `StaticEnum` and `StaticStruct` for static methods, where the type
- being derived upon is either an enum or struct respectively. (Any
- argument with type Self is just grouped among the non-self
- arguments.)
-
-In the first two cases, the values from the corresponding fields in
-all the arguments are grouped together. In the `EnumNonMatching` case
-this isn't possible (different variants have different fields), so the
-fields are grouped by which argument they come from. There are no
-fields with values in the static cases, so these are treated entirely
-differently.
-
-The non-static cases have `Option<ident>` in several places associated
-with field `expr`s. This represents the name of the field it is
-associated with. It is only not `None` when the associated field has
-an identifier in the source code. For example, the `x`s in the
-following snippet
-
-```rust
-struct A { x : int }
-
-struct B(int);
-
-enum C {
- C0(int),
- C1 { x: int }
-}
-```
-
-The `int`s in `B` and `C0` don't have an identifier, so the
-`Option<ident>`s would be `None` for them.
-
-In the static cases, the structure is summarised, either into the just
-spans of the fields or a list of spans and the field idents (for tuple
-structs and record structs, respectively), or a list of these, for
-enums (one for each variant). For empty struct and empty enum
-variants, it is represented as a count of 0.
-
-# Examples
-
-The following simplified `PartialEq` is used for in-code examples:
-
-```rust
-trait PartialEq {
- fn eq(&self, other: &Self);
-}
-impl PartialEq for int {
- fn eq(&self, other: &int) -> bool {
- *self == *other
- }
-}
-```
-
-Some examples of the values of `SubstructureFields` follow, using the
-above `PartialEq`, `A`, `B` and `C`.
-
-## Structs
-
-When generating the `expr` for the `A` impl, the `SubstructureFields` is
-
-~~~text
-Struct(~[FieldInfo {
- span: <span of x>
- name: Some(<ident of x>),
- self_: <expr for &self.x>,
- other: ~[<expr for &other.x]
- }])
-~~~
-
-For the `B` impl, called with `B(a)` and `B(b)`,
-
-~~~text
-Struct(~[FieldInfo {
- span: <span of `int`>,
- name: None,
- <expr for &a>
- ~[<expr for &b>]
- }])
-~~~
-
-## Enums
-
-When generating the `expr` for a call with `self == C0(a)` and `other
-== C0(b)`, the SubstructureFields is
-
-~~~text
-EnumMatching(0, <ast::Variant for C0>,
- ~[FieldInfo {
- span: <span of int>
- name: None,
- self_: <expr for &a>,
- other: ~[<expr for &b>]
- }])
-~~~
-
-For `C1 {x}` and `C1 {x}`,
-
-~~~text
-EnumMatching(1, <ast::Variant for C1>,
- ~[FieldInfo {
- span: <span of x>
- name: Some(<ident of x>),
- self_: <expr for &self.x>,
- other: ~[<expr for &other.x>]
- }])
-~~~
-
-For `C0(a)` and `C1 {x}` ,
-
-~~~text
-EnumNonMatching(~[(0, <ast::Variant for B0>,
- ~[(<span of int>, None, <expr for &a>)]),
- (1, <ast::Variant for B1>,
- ~[(<span of x>, Some(<ident of x>),
- <expr for &other.x>)])])
-~~~
-
-(and vice versa, but with the order of the outermost list flipped.)
-
-## Static
-
-A static method on the above would result in,
-
-~~~text
-StaticStruct(<ast::StructDef of A>, Named(~[(<ident of x>, <span of x>)]))
-
-StaticStruct(<ast::StructDef of B>, Unnamed(~[<span of x>]))
-
-StaticEnum(<ast::EnumDef of C>, ~[(<ident of C0>, <span of C0>, Unnamed(~[<span of int>])),
- (<ident of C1>, <span of C1>,
- Named(~[(<ident of x>, <span of x>)]))])
-~~~
-
-*/
+//! Some code that abstracts away much of the boilerplate of writing
+//! `deriving` instances for traits. Among other things it manages getting
+//! access to the fields of the 4 different sorts of structs and enum
+//! variants, as well as creating the method and impl ast instances.
+//!
+//! Supported features (fairly exhaustive):
+//!
+//! - Methods taking any number of parameters of any type, and returning
+//! any type, other than vectors, bottom and closures.
+//! - Generating `impl`s for types with type parameters and lifetimes
+//! (e.g. `Option<T>`), the parameters are automatically given the
+//! current trait as a bound. (This includes separate type parameters
+//! and lifetimes for methods.)
+//! - Additional bounds on the type parameters, e.g. the `Ord` instance
+//! requires an explicit `PartialEq` bound at the
+//! moment. (`TraitDef.additional_bounds`)
+//!
+//! Unsupported: FIXME #6257: calling methods on reference fields,
+//! e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`,
+//! because of how the auto-dereferencing happens.
+//!
+//! The most important thing for implementers is the `Substructure` and
+//! `SubstructureFields` objects. The latter groups 5 possibilities of the
+//! arguments:
+//!
+//! - `Struct`, when `Self` is a struct (including tuple structs, e.g
+//! `struct T(int, char)`).
+//! - `EnumMatching`, when `Self` is an enum and all the arguments are the
+//! same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`)
+//! - `EnumNonMatching` when `Self` is an enum and the arguments are not
+//! the same variant (e.g. `None`, `Some(1)` and `None`). If
+//! `const_nonmatching` is true, this will contain an empty list.
+//! - `StaticEnum` and `StaticStruct` for static methods, where the type
+//! being derived upon is either an enum or struct respectively. (Any
+//! argument with type Self is just grouped among the non-self
+//! arguments.)
+//!
+//! In the first two cases, the values from the corresponding fields in
+//! all the arguments are grouped together. In the `EnumNonMatching` case
+//! this isn't possible (different variants have different fields), so the
+//! fields are grouped by which argument they come from. There are no
+//! fields with values in the static cases, so these are treated entirely
+//! differently.
+//!
+//! The non-static cases have `Option<ident>` in several places associated
+//! with field `expr`s. This represents the name of the field it is
+//! associated with. It is only not `None` when the associated field has
+//! an identifier in the source code. For example, the `x`s in the
+//! following snippet
+//!
+//! ```rust
+//! struct A { x : int }
+//!
+//! struct B(int);
+//!
+//! enum C {
+//! C0(int),
+//! C1 { x: int }
+//! }
+//! ```
+//!
+//! The `int`s in `B` and `C0` don't have an identifier, so the
+//! `Option<ident>`s would be `None` for them.
+//!
+//! In the static cases, the structure is summarised, either into the just
+//! spans of the fields or a list of spans and the field idents (for tuple
+//! structs and record structs, respectively), or a list of these, for
+//! enums (one for each variant). For empty struct and empty enum
+//! variants, it is represented as a count of 0.
+//!
+//! # Examples
+//!
+//! The following simplified `PartialEq` is used for in-code examples:
+//!
+//! ```rust
+//! trait PartialEq {
+//! fn eq(&self, other: &Self);
+//! }
+//! impl PartialEq for int {
+//! fn eq(&self, other: &int) -> bool {
+//! *self == *other
+//! }
+//! }
+//! ```
+//!
+//! Some examples of the values of `SubstructureFields` follow, using the
+//! above `PartialEq`, `A`, `B` and `C`.
+//!
+//! ## Structs
+//!
+//! When generating the `expr` for the `A` impl, the `SubstructureFields` is
+//!
+//! ~~~text
+//! Struct(~[FieldInfo {
+//! span: <span of x>
+//! name: Some(<ident of x>),
+//! self_: <expr for &self.x>,
+//! other: ~[<expr for &other.x]
+//! }])
+//! ~~~
+//!
+//! For the `B` impl, called with `B(a)` and `B(b)`,
+//!
+//! ~~~text
+//! Struct(~[FieldInfo {
+//! span: <span of `int`>,
+//! name: None,
+//! <expr for &a>
+//! ~[<expr for &b>]
+//! }])
+//! ~~~
+//!
+//! ## Enums
+//!
+//! When generating the `expr` for a call with `self == C0(a)` and `other
+//! == C0(b)`, the SubstructureFields is
+//!
+//! ~~~text
+//! EnumMatching(0, <ast::Variant for C0>,
+//! ~[FieldInfo {
+//! span: <span of int>
+//! name: None,
+//! self_: <expr for &a>,
+//! other: ~[<expr for &b>]
+//! }])
+//! ~~~
+//!
+//! For `C1 {x}` and `C1 {x}`,
+//!
+//! ~~~text
+//! EnumMatching(1, <ast::Variant for C1>,
+//! ~[FieldInfo {
+//! span: <span of x>
+//! name: Some(<ident of x>),
+//! self_: <expr for &self.x>,
+//! other: ~[<expr for &other.x>]
+//! }])
+//! ~~~
+//!
+//! For `C0(a)` and `C1 {x}` ,
+//!
+//! ~~~text
+//! EnumNonMatching(~[(0, <ast::Variant for B0>,
+//! ~[(<span of int>, None, <expr for &a>)]),
+//! (1, <ast::Variant for B1>,
+//! ~[(<span of x>, Some(<ident of x>),
+//! <expr for &other.x>)])])
+//! ~~~
+//!
+//! (and vice versa, but with the order of the outermost list flipped.)
+//!
+//! ## Static
+//!
+//! A static method on the above would result in,
+//!
+//! ~~~text
+//! StaticStruct(<ast::StructDef of A>, Named(~[(<ident of x>, <span of x>)]))
+//!
+//! StaticStruct(<ast::StructDef of B>, Unnamed(~[<span of x>]))
+//!
+//! StaticEnum(<ast::EnumDef of C>, ~[(<ident of C0>, <span of C0>, Unnamed(~[<span of int>])),
+//! (<ident of C1>, <span of C1>,
+//! Named(~[(<ident of x>, <span of x>)]))])
+//! ~~~
use std::cell::RefCell;
use std::gc::{Gc, GC};
/// The types of pointers
pub enum PtrTy<'a> {
- Send, // ~
- Borrowed(Option<&'a str>, ast::Mutability), // &['lifetime] [mut]
+ /// ~
+ Send,
+ /// &'lifetime mut
+ Borrowed(Option<&'a str>, ast::Mutability),
}
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
/// A type. Supports pointers (except for *), Self, and literals
pub enum Ty<'a> {
Self,
- // &/Box/ Ty
+ /// &/Box/ Ty
Ptr(Box<Ty<'a>>, PtrTy<'a>),
- // mod::mod::Type<[lifetime], [Params...]>, including a plain type
- // parameter, and things like `int`
+ /// mod::mod::Type<[lifetime], [Params...]>, including a plain type
+ /// parameter, and things like `int`
Literal(Path<'a>),
- // includes nil
+ /// includes unit
Tuple(Vec<Ty<'a>> )
}
trait_def.expand(cx, mitem, item, push)
}
-// we construct a format string and then defer to std::fmt, since that
-// knows what's up with formatting at so on.
+/// We construct a format string and then defer to std::fmt, since that
+/// knows what's up with formatting and so on.
fn show_substructure(cx: &mut ExtCtxt, span: Span,
substr: &Substructure) -> Gc<Expr> {
// build `<name>`, `<name>({}, {}, ...)` or `<name> { <field>: {},
}
}
-// Rename loop label and expand its loop body
-//
-// The renaming procedure for loop is different in the sense that the loop
-// body is in a block enclosed by loop head so the renaming of loop label
-// must be propagated to the enclosed context.
+/// Rename loop label and expand its loop body
+///
+/// The renaming procedure for loop is different in the sense that the loop
+/// body is in a block enclosed by loop head so the renaming of loop label
+/// must be propagated to the enclosed context.
fn expand_loop_block(loop_block: P<Block>,
opt_ident: Option<Ident>,
fld: &mut MacroExpander) -> (P<Block>, Option<Ident>) {
ecx: &'a mut ExtCtxt<'b>,
fmtsp: Span,
- // Parsed argument expressions and the types that we've found so far for
- // them.
+ /// Parsed argument expressions and the types that we've found so far for
+ /// them.
args: Vec<Gc<ast::Expr>>,
arg_types: Vec<Option<ArgumentType>>,
- // Parsed named expressions and the types that we've found for them so far.
- // Note that we keep a side-array of the ordering of the named arguments
- // found to be sure that we can translate them in the same order that they
- // were declared in.
+ /// Parsed named expressions and the types that we've found for them so far.
+ /// Note that we keep a side-array of the ordering of the named arguments
+ /// found to be sure that we can translate them in the same order that they
+ /// were declared in.
names: HashMap<String, Gc<ast::Expr>>,
name_types: HashMap<String, ArgumentType>,
name_ordering: Vec<String>,
- // Collection of the compiled `rt::Piece` structures
+ /// Collection of the compiled `rt::Piece` structures
pieces: Vec<Gc<ast::Expr>>,
name_positions: HashMap<String, uint>,
method_statics: Vec<Gc<ast::Item>>,
- // Updated as arguments are consumed or methods are entered
+ /// Updated as arguments are consumed or methods are entered
nest_level: uint,
next_arg: uint,
}
use std::rc::Rc;
use std::collections::HashMap;
-// the SCTable contains a table of SyntaxContext_'s. It
-// represents a flattened tree structure, to avoid having
-// managed pointers everywhere (that caused an ICE).
-// the mark_memo and rename_memo fields are side-tables
-// that ensure that adding the same mark to the same context
-// gives you back the same context as before. This shouldn't
-// change the semantics--everything here is immutable--but
-// it should cut down on memory use *a lot*; applying a mark
-// to a tree containing 50 identifiers would otherwise generate
-// 50 new contexts
+/// The SCTable contains a table of SyntaxContext_'s. It
+/// represents a flattened tree structure, to avoid having
+/// managed pointers everywhere (that caused an ICE).
+/// the mark_memo and rename_memo fields are side-tables
+/// that ensure that adding the same mark to the same context
+/// gives you back the same context as before. This shouldn't
+/// change the semantics--everything here is immutable--but
+/// it should cut down on memory use *a lot*; applying a mark
+/// to a tree containing 50 identifiers would otherwise generate
+/// 50 new contexts
pub struct SCTable {
table: RefCell<Vec<SyntaxContext_>>,
mark_memo: RefCell<HashMap<(SyntaxContext,Mrk),SyntaxContext>>,
pub enum SyntaxContext_ {
EmptyCtxt,
Mark (Mrk,SyntaxContext),
- // flattening the name and syntaxcontext into the rename...
- // HIDDEN INVARIANTS:
- // 1) the first name in a Rename node
- // can only be a programmer-supplied name.
- // 2) Every Rename node with a given Name in the
- // "to" slot must have the same name and context
- // in the "from" slot. In essence, they're all
- // pointers to a single "rename" event node.
+ /// flattening the name and syntaxcontext into the rename...
+ /// HIDDEN INVARIANTS:
+ /// 1) the first name in a Rename node
+ /// can only be a programmer-supplied name.
+ /// 2) Every Rename node with a given Name in the
+ /// "to" slot must have the same name and context
+ /// in the "from" slot. In essence, they're all
+ /// pointers to a single "rename" event node.
Rename (Ident,Name,SyntaxContext),
- // actually, IllegalCtxt may not be necessary.
+ /// actually, IllegalCtxt may not be necessary.
IllegalCtxt
}
with_sctable(|table| apply_mark_internal(m, ctxt, table))
}
-// Extend a syntax context with a given mark and sctable (explicit memoization)
+/// Extend a syntax context with a given mark and sctable (explicit memoization)
fn apply_mark_internal(m: Mrk, ctxt: SyntaxContext, table: &SCTable) -> SyntaxContext {
let key = (ctxt, m);
let new_ctxt = |_: &(SyntaxContext, Mrk)|
with_sctable(|table| apply_rename_internal(id, to, ctxt, table))
}
-// Extend a syntax context with a given rename and sctable (explicit memoization)
+/// Extend a syntax context with a given rename and sctable (explicit memoization)
fn apply_rename_internal(id: Ident,
to: Name,
ctxt: SyntaxContext,
with_resolve_table_mut(|table| *table = HashMap::new());
}
-// Add a value to the end of a vec, return its index
+/// Add a value to the end of a vec, return its index
fn idx_push<T>(vec: &mut Vec<T> , val: T) -> u32 {
vec.push(val);
(vec.len() - 1) as u32
}
}
-// Resolve a syntax object to a name, per MTWT.
-// adding memoization to resolve 500+ seconds in resolve for librustc (!)
+/// Resolve a syntax object to a name, per MTWT.
+/// adding memoization to resolve 500+ seconds in resolve for librustc (!)
fn resolve_internal(id: Ident,
table: &SCTable,
resolve_table: &mut ResolveTable) -> Name {
})
}
-// Push a name... unless it matches the one on top, in which
-// case pop and discard (so two of the same marks cancel)
+/// Push a name... unless it matches the one on top, in which
+/// case pop and discard (so two of the same marks cancel)
fn xor_push(marks: &mut Vec<Mrk>, mark: Mrk) {
if (marks.len() > 0) && (*marks.last().unwrap() == mark) {
marks.pop().unwrap();
// the column/row/filename of the expression, or they include
// a given file into the current one.
-/* line!(): expands to the current line number */
+/// line!(): expands to the current line number
pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
base::check_zero_tts(cx, sp, tts, "line!");
base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.col.to_uint()))
}
-/* file!(): expands to the current filename */
-/* The filemap (`loc.file`) contains a bunch more information we could spit
- * out if we wanted. */
+/// file!(): expands to the current filename */
+/// The filemap (`loc.file`) contains a bunch more information we could spit
+/// out if we wanted.
pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
base::check_zero_tts(cx, sp, tts, "file!");
token::intern_and_get_ident(string.as_slice())))
}
-// include! : parse the given file as an expr
-// This is generally a bad idea because it's going to behave
-// unhygienically.
+/// include! : parse the given file as an expr
+/// This is generally a bad idea because it's going to behave
+/// unhygienically.
pub fn expand_include(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
-> Box<base::MacResult> {
let file = match get_single_str_from_tts(cx, sp, tts, "include!") {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// Earley-like parser for macros.
+//! This is an Earley-like parser, without support for in-grammar nonterminals,
+//! only by calling out to the main rust parser for named nonterminals (which it
+//! commits to fully when it hits one in a grammar). This means that there are no
+//! completer or predictor rules, and therefore no need to store one column per
+//! token: instead, there's a set of current Earley items and a set of next
+//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
+//! pathological cases, is worse than traditional Earley parsing, but it's an
+//! easier fit for Macro-by-Example-style rules, and I think the overhead is
+//! lower. (In order to prevent the pathological case, we'd need to lazily
+//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
+//! and require more memory to keep around old items, but it would also save
+//! overhead)
+//!
+//! Quick intro to how the parser works:
+//!
+//! A 'position' is a dot in the middle of a matcher, usually represented as a
+//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
+//!
+//! The parser walks through the input a character at a time, maintaining a list
+//! of items consistent with the current position in the input string: `cur_eis`.
+//!
+//! As it processes them, it fills up `eof_eis` with items that would be valid if
+//! the macro invocation is now over, `bb_eis` with items that are waiting on
+//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
+//! on the a particular token. Most of the logic concerns moving the · through the
+//! repetitions indicated by Kleene stars. It only advances or calls out to the
+//! real Rust parser when no `cur_eis` items remain
+//!
+//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
+//!
+//! Remaining input: `a a a a b`
+//! next_eis: [· a $( a )* a b]
+//!
+//! - - - Advance over an `a`. - - -
+//!
+//! Remaining input: `a a a b`
+//! cur: [a · $( a )* a b]
+//! Descend/Skip (first item).
+//! next: [a $( · a )* a b] [a $( a )* · a b].
+//!
+//! - - - Advance over an `a`. - - -
+//!
+//! Remaining input: `a a b`
+//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
+//! Finish/Repeat (first item)
+//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
+//!
+//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
+//!
+//! Remaining input: `a b`
+//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
+//! Finish/Repeat (first item)
+//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
+//!
+//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
+//!
+//! Remaining input: `b`
+//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
+//! Finish/Repeat (first item)
+//! next: [a $( a )* · a b] [a $( · a )* a b]
+//!
+//! - - - Advance over a `b`. - - -
+//!
+//! Remaining input: ``
+//! eof: [a $( a )* a b ·]
+
use ast;
use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident};
use std::gc::GC;
use std::collections::HashMap;
-/* This is an Earley-like parser, without support for in-grammar nonterminals,
-only by calling out to the main rust parser for named nonterminals (which it
-commits to fully when it hits one in a grammar). This means that there are no
-completer or predictor rules, and therefore no need to store one column per
-token: instead, there's a set of current Earley items and a set of next
-ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
-pathological cases, is worse than traditional Earley parsing, but it's an
-easier fit for Macro-by-Example-style rules, and I think the overhead is
-lower. (In order to prevent the pathological case, we'd need to lazily
-construct the resulting `NamedMatch`es at the very end. It'd be a pain,
-and require more memory to keep around old items, but it would also save
-overhead)*/
-
-/* Quick intro to how the parser works:
-
-A 'position' is a dot in the middle of a matcher, usually represented as a
-dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
-
-The parser walks through the input a character at a time, maintaining a list
-of items consistent with the current position in the input string: `cur_eis`.
-
-As it processes them, it fills up `eof_eis` with items that would be valid if
-the macro invocation is now over, `bb_eis` with items that are waiting on
-a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
-on the a particular token. Most of the logic concerns moving the · through the
-repetitions indicated by Kleene stars. It only advances or calls out to the
-real Rust parser when no `cur_eis` items remain
-
-Example: Start parsing `a a a a b` against [· a $( a )* a b].
-
-Remaining input: `a a a a b`
-next_eis: [· a $( a )* a b]
-
-- - - Advance over an `a`. - - -
-
-Remaining input: `a a a b`
-cur: [a · $( a )* a b]
-Descend/Skip (first item).
-next: [a $( · a )* a b] [a $( a )* · a b].
-
-- - - Advance over an `a`. - - -
-
-Remaining input: `a a b`
-cur: [a $( a · )* a b] next: [a $( a )* a · b]
-Finish/Repeat (first item)
-next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
-
-- - - Advance over an `a`. - - - (this looks exactly like the last step)
-
-Remaining input: `a b`
-cur: [a $( a · )* a b] next: [a $( a )* a · b]
-Finish/Repeat (first item)
-next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
-
-- - - Advance over an `a`. - - - (this looks exactly like the last step)
-
-Remaining input: `b`
-cur: [a $( a · )* a b] next: [a $( a )* a · b]
-Finish/Repeat (first item)
-next: [a $( a )* · a b] [a $( · a )* a b]
-
-- - - Advance over a `b`. - - -
-
-Remaining input: ``
-eof: [a $( a )* a b ·]
-
- */
-
-
/* to avoid costly uniqueness checks, we require that `MatchSeq` always has a
nonempty body. */
}
}
-// NamedMatch is a pattern-match result for a single ast::MatchNonterminal:
-// so it is associated with a single ident in a parse, and all
-// MatchedNonterminal's in the NamedMatch have the same nonterminal type
-// (expr, item, etc). All the leaves in a single NamedMatch correspond to a
-// single matcher_nonterminal in the ast::Matcher that produced it.
-//
-// It should probably be renamed, it has more or less exact correspondence to
-// ast::match nodes, and the in-memory structure of a particular NamedMatch
-// represents the match that occurred when a particular subset of an
-// ast::match -- those ast::Matcher nodes leading to a single
-// MatchNonterminal -- was applied to a particular token tree.
-//
-// The width of each MatchedSeq in the NamedMatch, and the identity of the
-// MatchedNonterminal's, will depend on the token tree it was applied to: each
-// MatchedSeq corresponds to a single MatchSeq in the originating
-// ast::Matcher. The depth of the NamedMatch structure will therefore depend
-// only on the nesting depth of ast::MatchSeq's in the originating
-// ast::Matcher it was derived from.
+/// NamedMatch is a pattern-match result for a single ast::MatchNonterminal:
+/// so it is associated with a single ident in a parse, and all
+/// MatchedNonterminal's in the NamedMatch have the same nonterminal type
+/// (expr, item, etc). All the leaves in a single NamedMatch correspond to a
+/// single matcher_nonterminal in the ast::Matcher that produced it.
+///
+/// It should probably be renamed, it has more or less exact correspondence to
+/// ast::match nodes, and the in-memory structure of a particular NamedMatch
+/// represents the match that occurred when a particular subset of an
+/// ast::match -- those ast::Matcher nodes leading to a single
+/// MatchNonterminal -- was applied to a particular token tree.
+///
+/// The width of each MatchedSeq in the NamedMatch, and the identity of the
+/// MatchedNonterminal's, will depend on the token tree it was applied to: each
+/// MatchedSeq corresponds to a single MatchSeq in the originating
+/// ast::Matcher. The depth of the NamedMatch structure will therefore depend
+/// only on the nesting depth of ast::MatchSeq's in the originating
+/// ast::Matcher it was derived from.
pub enum NamedMatch {
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
}
}
-// perform a token equality check, ignoring syntax context (that is, an unhygienic comparison)
+/// Perform a token equality check, ignoring syntax context (that is, an
+/// unhygienic comparison)
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
match (t1,t2) {
(&token::IDENT(id1,_),&token::IDENT(id2,_))
}
}
-// Given `lhses` and `rhses`, this is the new macro we create
+/// Given `lhses` and `rhses`, this is the new macro we create
fn generic_extension(cx: &ExtCtxt,
sp: Span,
name: Ident,
cx.span_fatal(best_fail_spot, best_fail_msg.as_slice());
}
-// this procedure performs the expansion of the
-// macro_rules! macro. It parses the RHS and adds
-// an extension to the current context.
+/// This procedure performs the expansion of the
+/// macro_rules! macro. It parses the RHS and adds
+/// an extension to the current context.
pub fn add_new_extension(cx: &mut ExtCtxt,
sp: Span,
name: Ident,
#[deriving(Clone)]
pub struct TtReader<'a> {
pub sp_diag: &'a SpanHandler,
- // the unzipped tree:
+ /// the unzipped tree:
stack: Vec<TtFrame>,
/* for MBE-style macro transcription */
interpolations: HashMap<Ident, Rc<NamedMatch>>,
pub cur_span: Span,
}
-/** This can do Macro-By-Example transcription. On the other hand, if
- * `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and
- * should) be none. */
+/// This can do Macro-By-Example transcription. On the other hand, if
+/// `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and
+/// should) be none.
pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler,
interp: Option<HashMap<Ident, Rc<NamedMatch>>>,
src: Vec<ast::TokenTree> )
}
}
-// return the next token from the TtReader.
-// EFFECT: advances the reader's token field
+/// Return the next token from the TtReader.
+/// EFFECT: advances the reader's token field
pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
// FIXME(pcwalton): Bad copy?
let ret_val = TokenAndSpan {
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/*!
-
-The Rust parser and macro expander.
-
-# Note
-
-This API is completely unstable and subject to change.
-
-*/
+//! The Rust parser and macro expander.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
#![crate_id = "syntax#0.11.0"] // NOTE: remove after stage0
#![crate_name = "syntax"]
use std::gc::{Gc, GC};
-// a parser that can parse attributes.
+/// A parser that can parse attributes.
pub trait ParserAttr {
fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>;
fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute;
}
impl<'a> ParserAttr for Parser<'a> {
- // Parse attributes that appear before an item
+ /// Parse attributes that appear before an item
fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> {
let mut attrs: Vec<ast::Attribute> = Vec::new();
loop {
return attrs;
}
- // matches attribute = # ! [ meta_item ]
- //
- // if permit_inner is true, then a leading `!` indicates an inner
- // attribute
+ /// Matches `attribute = # ! [ meta_item ]`
+ ///
+ /// If permit_inner is true, then a leading `!` indicates an inner
+ /// attribute
fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute {
debug!("parse_attributes: permit_inner={:?} self.token={:?}",
permit_inner, self.token);
};
}
- // Parse attributes that appear after the opening of an item. These should
- // be preceded by an exclamation mark, but we accept and warn about one
- // terminated by a semicolon. In addition to a vector of inner attributes,
- // this function also returns a vector that may contain the first outer
- // attribute of the next item (since we can't know whether the attribute
- // is an inner attribute of the containing item or an outer attribute of
- // the first contained item until we see the semi).
-
- // matches inner_attrs* outer_attr?
- // you can make the 'next' field an Option, but the result is going to be
- // more useful as a vector.
+ /// Parse attributes that appear after the opening of an item. These should
+ /// be preceded by an exclamation mark, but we accept and warn about one
+ /// terminated by a semicolon. In addition to a vector of inner attributes,
+ /// this function also returns a vector that may contain the first outer
+ /// attribute of the next item (since we can't know whether the attribute
+ /// is an inner attribute of the containing item or an outer attribute of
+ /// the first contained item until we see the semi).
+
+ /// matches inner_attrs* outer_attr?
+ /// you can make the 'next' field an Option, but the result is going to be
+ /// more useful as a vector.
fn parse_inner_attrs_and_next(&mut self)
-> (Vec<ast::Attribute> , Vec<ast::Attribute> ) {
let mut inner_attrs: Vec<ast::Attribute> = Vec::new();
(inner_attrs, next_outer_attrs)
}
- // matches meta_item = IDENT
- // | IDENT = lit
- // | IDENT meta_seq
+ /// matches meta_item = IDENT
+ /// | IDENT = lit
+ /// | IDENT meta_seq
fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> {
match self.token {
token::INTERPOLATED(token::NtMeta(e)) => {
}
}
- // matches meta_seq = ( COMMASEP(meta_item) )
+ /// matches meta_seq = ( COMMASEP(meta_item) )
fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> {
self.parse_seq(&token::LPAREN,
&token::RPAREN,
use ast;
use std::gc::Gc;
-// does this expression require a semicolon to be treated
-// as a statement? The negation of this: 'can this expression
-// be used as a statement without a semicolon' -- is used
-// as an early-bail-out in the parser so that, for instance,
-// 'if true {...} else {...}
-// |x| 5 '
-// isn't parsed as (if true {...} else {...} | x) | 5
+/// Does this expression require a semicolon to be treated
+/// as a statement? The negation of this: 'can this expression
+/// be used as a statement without a semicolon' -- is used
+/// as an early-bail-out in the parser so that, for instance,
+/// if true {...} else {...}
+/// |x| 5
+/// isn't parsed as (if true {...} else {...} | x) | 5
pub fn expr_requires_semi_to_be_stmt(e: Gc<ast::Expr>) -> bool {
match e.node {
ast::ExprIf(..)
}
}
-// this statement requires a semicolon after it.
-// note that in one case (stmt_semi), we've already
-// seen the semicolon, and thus don't need another.
+/// this statement requires a semicolon after it.
+/// note that in one case (stmt_semi), we've already
+/// seen the semicolon, and thus don't need another.
pub fn stmt_ends_with_semi(stmt: &ast::Stmt) -> bool {
return match stmt.node {
ast::StmtDecl(d, _) => {
use parse::token;
-// SeqSep : a sequence separator (token)
-// and whether a trailing separator is allowed.
+/// SeqSep : a sequence separator (token)
+/// and whether a trailing separator is allowed.
pub struct SeqSep {
pub sep: Option<token::Token>,
pub trailing_sep_allowed: bool
#[deriving(Clone, PartialEq)]
pub enum CommentStyle {
- Isolated, // No code on either side of each line of the comment
- Trailing, // Code exists to the left of the comment
- Mixed, // Code before /* foo */ and after the comment
- BlankLine, // Just a manual blank line "\n\n", for layout
+ /// No code on either side of each line of the comment
+ Isolated,
+ /// Code exists to the left of the comment
+ Trailing,
+ /// Code before /* foo */ and after the comment
+ Mixed,
+ /// Just a manual blank line "\n\n", for layout
+ BlankLine,
}
#[deriving(Clone)]
}
}
-// Returns None if the first col chars of s contain a non-whitespace char.
-// Otherwise returns Some(k) where k is first char offset after that leading
-// whitespace. Note k may be outside bounds of s.
+/// Returns None if the first col chars of s contain a non-whitespace char.
+/// Otherwise returns Some(k) where k is first char offset after that leading
+/// whitespace. Note k may be outside bounds of s.
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
let len = s.len();
let mut col = col.to_uint();
pub struct StringReader<'a> {
pub span_diagnostic: &'a SpanHandler,
- // The absolute offset within the codemap of the next character to read
+ /// The absolute offset within the codemap of the next character to read
pub pos: BytePos,
- // The absolute offset within the codemap of the last character read(curr)
+ /// The absolute offset within the codemap of the last character read(curr)
pub last_pos: BytePos,
- // The column of the next character to read
+ /// The column of the next character to read
pub col: CharPos,
- // The last character to be read
+ /// The last character to be read
pub curr: Option<char>,
pub filemap: Rc<codemap::FileMap>,
/* cached: */
impl<'a> Reader for StringReader<'a> {
fn is_eof(&self) -> bool { self.curr.is_none() }
- // return the next token. EFFECT: advances the string_reader.
+ /// Return the next token. EFFECT: advances the string_reader.
fn next_token(&mut self) -> TokenAndSpan {
let ret_val = TokenAndSpan {
tok: replace(&mut self.peek_tok, token::UNDERSCORE),
return self.consume_any_line_comment();
}
- // might return a sugared-doc-attr
+ /// Might return a sugared-doc-attr
fn consume_block_comment(&mut self) -> Option<TokenAndSpan> {
// block comments starting with "/**" or "/*!" are doc-comments
let is_doc_comment = self.curr_is('*') || self.curr_is('!');
//! The main parser interface
-
use ast;
use codemap::{Span, CodeMap, FileMap};
use diagnostic::{SpanHandler, mk_span_handler, default_handler, Auto};
pub mod classify;
pub mod obsolete;
-// info about a parsing session.
+/// Info about a parsing session.
pub struct ParseSess {
pub span_diagnostic: SpanHandler, // better be the same as the one in the reader!
/// Used to determine and report recursive mod inclusions
unreachable!()
}
-// given a session and a string, add the string to
-// the session's codemap and return the new filemap
+/// Given a session and a string, add the string to
+/// the session's codemap and return the new filemap
pub fn string_to_filemap(sess: &ParseSess, source: String, path: String)
-> Rc<FileMap> {
sess.span_diagnostic.cm.new_filemap(path, source)
}
-// given a filemap, produce a sequence of token-trees
+/// Given a filemap, produce a sequence of token-trees
pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc<FileMap>)
-> Vec<ast::TokenTree> {
// it appears to me that the cfg doesn't matter here... indeed,
p1.parse_all_token_trees()
}
-// given tts and cfg, produce a parser
+/// Given tts and cfg, produce a parser
pub fn tts_to_parser<'a>(sess: &'a ParseSess,
tts: Vec<ast::TokenTree>,
cfg: ast::CrateConfig) -> Parser<'a> {
Parser::new(sess, cfg, box trdr)
}
-// abort if necessary
+/// Abort if necessary
pub fn maybe_aborted<T>(result: T, mut p: Parser) -> T {
p.abort_if_errors();
result
pub trait ParserObsoleteMethods {
/// Reports an obsolete syntax non-fatal error.
fn obsolete(&mut self, sp: Span, kind: ObsoleteSyntax);
- // Reports an obsolete syntax non-fatal error, and returns
- // a placeholder expression
+ /// Reports an obsolete syntax non-fatal error, and returns
+ /// a placeholder expression
fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr>;
fn report(&mut self,
sp: Span,
self.report(sp, kind, kind_str, desc);
}
- // Reports an obsolete syntax non-fatal error, and returns
- // a placeholder expression
+ /// Reports an obsolete syntax non-fatal error, and returns
+ /// a placeholder expression
fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr> {
self.obsolete(sp, kind);
self.mk_expr(sp.lo, sp.hi, ExprLit(box(GC) respan(sp, LitNil)))
}
enum ItemOrViewItem {
- // Indicates a failure to parse any kind of item. The attributes are
- // returned.
+ /// Indicates a failure to parse any kind of item. The attributes are
+ /// returned.
IoviNone(Vec<Attribute>),
IoviItem(Gc<Item>),
IoviForeignItem(Gc<ForeignItem>),
}
-// Possibly accept an `INTERPOLATED` expression (a pre-parsed expression
-// dropped into the token stream, which happens while parsing the
-// result of macro expansion)
-/* Placement of these is not as complex as I feared it would be.
-The important thing is to make sure that lookahead doesn't balk
-at INTERPOLATED tokens */
+/// Possibly accept an `INTERPOLATED` expression (a pre-parsed expression
+/// dropped into the token stream, which happens while parsing the
+/// result of macro expansion)
+/// Placement of these is not as complex as I feared it would be.
+/// The important thing is to make sure that lookahead doesn't balk
+/// at INTERPOLATED tokens
macro_rules! maybe_whole_expr (
($p:expr) => (
{
)
)
-// As above, but for things other than expressions
+/// As maybe_whole_expr, but for things other than expressions
macro_rules! maybe_whole (
($p:expr, $constructor:ident) => (
{
pub struct Parser<'a> {
pub sess: &'a ParseSess,
- // the current token:
+ /// the current token:
pub token: token::Token,
- // the span of the current token:
+ /// the span of the current token:
pub span: Span,
- // the span of the prior token:
+ /// the span of the prior token:
pub last_span: Span,
pub cfg: CrateConfig,
- // the previous token or None (only stashed sometimes).
+ /// the previous token or None (only stashed sometimes).
pub last_token: Option<Box<token::Token>>,
pub buffer: [TokenAndSpan, ..4],
pub buffer_start: int,
root_module_name: None,
}
}
- // convert a token to a string using self's reader
+
+ /// Convert a token to a string using self's reader
pub fn token_to_string(token: &token::Token) -> String {
token::to_string(token)
}
- // convert the current token to a string using self's reader
+ /// Convert the current token to a string using self's reader
pub fn this_token_to_string(&mut self) -> String {
Parser::token_to_string(&self.token)
}
self.fatal(format!("unexpected token: `{}`", this_token).as_slice());
}
- // expect and consume the token t. Signal an error if
- // the next token is not t.
+ /// Expect and consume the token t. Signal an error if
+ /// the next token is not t.
pub fn expect(&mut self, t: &token::Token) {
if self.token == *t {
self.bump();
}
}
- // Expect next token to be edible or inedible token. If edible,
- // then consume it; if inedible, then return without consuming
- // anything. Signal a fatal error if next token is unexpected.
+ /// Expect next token to be edible or inedible token. If edible,
+ /// then consume it; if inedible, then return without consuming
+ /// anything. Signal a fatal error if next token is unexpected.
pub fn expect_one_of(&mut self,
edible: &[token::Token],
inedible: &[token::Token]) {
}
}
- // Check for erroneous `ident { }`; if matches, signal error and
- // recover (without consuming any expected input token). Returns
- // true if and only if input was consumed for recovery.
+ /// Check for erroneous `ident { }`; if matches, signal error and
+ /// recover (without consuming any expected input token). Returns
+ /// true if and only if input was consumed for recovery.
pub fn check_for_erroneous_unit_struct_expecting(&mut self, expected: &[token::Token]) -> bool {
if self.token == token::LBRACE
&& expected.iter().all(|t| *t != token::LBRACE)
}
}
- // Commit to parsing a complete expression `e` expected to be
- // followed by some token from the set edible + inedible. Recover
- // from anticipated input errors, discarding erroneous characters.
+ /// Commit to parsing a complete expression `e` expected to be
+ /// followed by some token from the set edible + inedible. Recover
+ /// from anticipated input errors, discarding erroneous characters.
pub fn commit_expr(&mut self, e: Gc<Expr>, edible: &[token::Token],
inedible: &[token::Token]) {
debug!("commit_expr {:?}", e);
self.commit_expr(e, &[edible], &[])
}
- // Commit to parsing a complete statement `s`, which expects to be
- // followed by some token from the set edible + inedible. Check
- // for recoverable input errors, discarding erroneous characters.
+ /// Commit to parsing a complete statement `s`, which expects to be
+ /// followed by some token from the set edible + inedible. Check
+ /// for recoverable input errors, discarding erroneous characters.
pub fn commit_stmt(&mut self, s: Gc<Stmt>, edible: &[token::Token],
inedible: &[token::Token]) {
debug!("commit_stmt {:?}", s);
id: ast::DUMMY_NODE_ID })
}
- // consume token 'tok' if it exists. Returns true if the given
- // token was present, false otherwise.
+ /// Consume token 'tok' if it exists. Returns true if the given
+ /// token was present, false otherwise.
pub fn eat(&mut self, tok: &token::Token) -> bool {
let is_present = self.token == *tok;
if is_present { self.bump() }
token::is_keyword(kw, &self.token)
}
- // if the next token is the given keyword, eat it and return
- // true. Otherwise, return false.
+ /// If the next token is the given keyword, eat it and return
+ /// true. Otherwise, return false.
pub fn eat_keyword(&mut self, kw: keywords::Keyword) -> bool {
match self.token {
token::IDENT(sid, false) if kw.to_name() == sid.name => {
}
}
- // if the given word is not a keyword, signal an error.
- // if the next token is not the given word, signal an error.
- // otherwise, eat it.
+ /// If the given word is not a keyword, signal an error.
+ /// If the next token is not the given word, signal an error.
+ /// Otherwise, eat it.
pub fn expect_keyword(&mut self, kw: keywords::Keyword) {
if !self.eat_keyword(kw) {
let id_interned_str = token::get_name(kw.to_name());
}
}
- // signal an error if the given string is a strict keyword
+ /// Signal an error if the given string is a strict keyword
pub fn check_strict_keywords(&mut self) {
if token::is_strict_keyword(&self.token) {
let token_str = self.this_token_to_string();
}
}
- // signal an error if the current token is a reserved keyword
+ /// Signal an error if the current token is a reserved keyword
pub fn check_reserved_keywords(&mut self) {
if token::is_reserved_keyword(&self.token) {
let token_str = self.this_token_to_string();
}
}
- // Expect and consume an `&`. If `&&` is seen, replace it with a single
- // `&` and continue. If an `&` is not seen, signal an error.
+ /// Expect and consume an `&`. If `&&` is seen, replace it with a single
+ /// `&` and continue. If an `&` is not seen, signal an error.
fn expect_and(&mut self) {
match self.token {
token::BINOP(token::AND) => self.bump(),
}
}
- // Expect and consume a `|`. If `||` is seen, replace it with a single
- // `|` and continue. If a `|` is not seen, signal an error.
+ /// Expect and consume a `|`. If `||` is seen, replace it with a single
+ /// `|` and continue. If a `|` is not seen, signal an error.
fn expect_or(&mut self) {
match self.token {
token::BINOP(token::OR) => self.bump(),
}
}
- // Attempt to consume a `<`. If `<<` is seen, replace it with a single
- // `<` and continue. If a `<` is not seen, return false.
- //
- // This is meant to be used when parsing generics on a path to get the
- // starting token. The `force` parameter is used to forcefully break up a
- // `<<` token. If `force` is false, then `<<` is only broken when a lifetime
- // shows up next. For example, consider the expression:
- //
- // foo as bar << test
- //
- // The parser needs to know if `bar <<` is the start of a generic path or if
- // it's a left-shift token. If `test` were a lifetime, then it's impossible
- // for the token to be a left-shift, but if it's not a lifetime, then it's
- // considered a left-shift.
- //
- // The reason for this is that the only current ambiguity with `<<` is when
- // parsing closure types:
- //
- // foo::<<'a> ||>();
- // impl Foo<<'a> ||>() { ... }
+ /// Attempt to consume a `<`. If `<<` is seen, replace it with a single
+ /// `<` and continue. If a `<` is not seen, return false.
+ ///
+ /// This is meant to be used when parsing generics on a path to get the
+ /// starting token. The `force` parameter is used to forcefully break up a
+ /// `<<` token. If `force` is false, then `<<` is only broken when a lifetime
+ /// shows up next. For example, consider the expression:
+ ///
+ /// foo as bar << test
+ ///
+ /// The parser needs to know if `bar <<` is the start of a generic path or if
+ /// it's a left-shift token. If `test` were a lifetime, then it's impossible
+ /// for the token to be a left-shift, but if it's not a lifetime, then it's
+ /// considered a left-shift.
+ ///
+ /// The reason for this is that the only current ambiguity with `<<` is when
+ /// parsing closure types:
+ ///
+ /// foo::<<'a> ||>();
+ /// impl Foo<<'a> ||>() { ... }
fn eat_lt(&mut self, force: bool) -> bool {
match self.token {
token::LT => { self.bump(); true }
}
}
- // Parse a sequence bracketed by `|` and `|`, stopping before the `|`.
+ /// Parse a sequence bracketed by `|` and `|`, stopping before the `|`.
fn parse_seq_to_before_or<T>(
&mut self,
sep: &token::Token,
vector
}
- // expect and consume a GT. if a >> is seen, replace it
- // with a single > and continue. If a GT is not seen,
- // signal an error.
+ /// Expect and consume a GT. if a >> is seen, replace it
+ /// with a single > and continue. If a GT is not seen,
+ /// signal an error.
pub fn expect_gt(&mut self) {
match self.token {
token::GT => self.bump(),
}
}
- // parse a sequence bracketed by '<' and '>', stopping
- // before the '>'.
+ /// Parse a sequence bracketed by '<' and '>', stopping
+ /// before the '>'.
pub fn parse_seq_to_before_gt<T>(
&mut self,
sep: Option<token::Token>,
return v;
}
- // parse a sequence, including the closing delimiter. The function
- // f must consume tokens until reaching the next separator or
- // closing bracket.
+ /// Parse a sequence, including the closing delimiter. The function
+ /// f must consume tokens until reaching the next separator or
+ /// closing bracket.
pub fn parse_seq_to_end<T>(
&mut self,
ket: &token::Token,
val
}
- // parse a sequence, not including the closing delimiter. The function
- // f must consume tokens until reaching the next separator or
- // closing bracket.
+ /// Parse a sequence, not including the closing delimiter. The function
+ /// f must consume tokens until reaching the next separator or
+ /// closing bracket.
pub fn parse_seq_to_before_end<T>(
&mut self,
ket: &token::Token,
return v;
}
- // parse a sequence, including the closing delimiter. The function
- // f must consume tokens until reaching the next separator or
- // closing bracket.
+ /// Parse a sequence, including the closing delimiter. The function
+ /// f must consume tokens until reaching the next separator or
+ /// closing bracket.
pub fn parse_unspanned_seq<T>(
&mut self,
bra: &token::Token,
result
}
- // parse a sequence parameter of enum variant. For consistency purposes,
- // these should not be empty.
+ /// Parse a sequence parameter of enum variant. For consistency purposes,
+ /// these should not be empty.
pub fn parse_enum_variant_seq<T>(
&mut self,
bra: &token::Token,
spanned(lo, hi, result)
}
- // advance the parser by one token
+ /// Advance the parser by one token
pub fn bump(&mut self) {
self.last_span = self.span;
// Stash token for error recovery (sometimes; clone is not necessarily cheap).
self.tokens_consumed += 1u;
}
- // Advance the parser by one token and return the bumped token.
+ /// Advance the parser by one token and return the bumped token.
pub fn bump_and_get(&mut self) -> token::Token {
let old_token = replace(&mut self.token, token::UNDERSCORE);
self.bump();
old_token
}
- // EFFECT: replace the current token and span with the given one
+ /// EFFECT: replace the current token and span with the given one
pub fn replace_token(&mut self,
next: token::Token,
lo: BytePos,
token::get_ident(id)
}
- // Is the current token one of the keywords that signals a bare function
- // type?
+ /// Is the current token one of the keywords that signals a bare function
+ /// type?
pub fn token_is_bare_fn_keyword(&mut self) -> bool {
if token::is_keyword(keywords::Fn, &self.token) {
return true
false
}
- // Is the current token one of the keywords that signals a closure type?
+ /// Is the current token one of the keywords that signals a closure type?
pub fn token_is_closure_keyword(&mut self) -> bool {
token::is_keyword(keywords::Unsafe, &self.token) ||
token::is_keyword(keywords::Once, &self.token)
}
- // Is the current token one of the keywords that signals an old-style
- // closure type (with explicit sigil)?
+ /// Is the current token one of the keywords that signals an old-style
+ /// closure type (with explicit sigil)?
pub fn token_is_old_style_closure_keyword(&mut self) -> bool {
token::is_keyword(keywords::Unsafe, &self.token) ||
token::is_keyword(keywords::Once, &self.token) ||
}
}
- // parse a TyBareFn type:
+ /// parse a TyBareFn type:
pub fn parse_ty_bare_fn(&mut self) -> Ty_ {
/*
});
}
- // Parses a procedure type (`proc`). The initial `proc` keyword must
- // already have been parsed.
+ /// Parses a procedure type (`proc`). The initial `proc` keyword must
+ /// already have been parsed.
pub fn parse_proc_type(&mut self) -> Ty_ {
/*
})
}
- // parse a TyClosure type
+ /// Parse a TyClosure type
pub fn parse_ty_closure(&mut self) -> Ty_ {
/*
}
}
- // parse a function type (following the 'fn')
+ /// Parse a function type (following the 'fn')
pub fn parse_ty_fn_decl(&mut self, allow_variadic: bool)
-> (P<FnDecl>, Vec<ast::Lifetime>) {
/*
(decl, lifetimes)
}
- // parse the methods in a trait declaration
+ /// Parse the methods in a trait declaration
pub fn parse_trait_methods(&mut self) -> Vec<TraitMethod> {
self.parse_unspanned_seq(
&token::LBRACE,
})
}
- // parse a possibly mutable type
+ /// Parse a possibly mutable type
pub fn parse_mt(&mut self) -> MutTy {
let mutbl = self.parse_mutability();
let t = self.parse_ty(true);
MutTy { ty: t, mutbl: mutbl }
}
- // parse [mut/const/imm] ID : TY
- // now used only by obsolete record syntax parser...
+ /// Parse [mut/const/imm] ID : TY
+ /// now used only by obsolete record syntax parser...
pub fn parse_ty_field(&mut self) -> TypeField {
let lo = self.span.lo;
let mutbl = self.parse_mutability();
}
}
- // parse optional return type [ -> TY ] in function decl
+ /// Parse optional return type [ -> TY ] in function decl
pub fn parse_ret_ty(&mut self) -> (RetStyle, P<Ty>) {
return if self.eat(&token::RARROW) {
let lo = self.span.lo;
}
}
- // This version of parse arg doesn't necessarily require
- // identifier names.
+ /// This version of parse arg doesn't necessarily require
+ /// identifier names.
pub fn parse_arg_general(&mut self, require_name: bool) -> Arg {
let pat = if require_name || self.is_named_argument() {
debug!("parse_arg_general parse_pat (require_name:{:?})",
}
}
- // parse a single function argument
+ /// Parse a single function argument
pub fn parse_arg(&mut self) -> Arg {
self.parse_arg_general(true)
}
- // parse an argument in a lambda header e.g. |arg, arg|
+ /// Parse an argument in a lambda header e.g. |arg, arg|
pub fn parse_fn_block_arg(&mut self) -> Arg {
let pat = self.parse_pat();
let t = if self.eat(&token::COLON) {
}
}
- // matches token_lit = LIT_INT | ...
+ /// Matches token_lit = LIT_INT | ...
pub fn lit_from_token(&mut self, tok: &token::Token) -> Lit_ {
match *tok {
token::LIT_BYTE(i) => LitByte(i),
}
}
- // matches lit = true | false | token_lit
+ /// Matches lit = true | false | token_lit
pub fn parse_lit(&mut self) -> Lit {
let lo = self.span.lo;
let lit = if self.eat_keyword(keywords::True) {
codemap::Spanned { node: lit, span: mk_sp(lo, self.last_span.hi) }
}
- // matches '-' lit | lit
+ /// matches '-' lit | lit
pub fn parse_literal_maybe_minus(&mut self) -> Gc<Expr> {
let minus_lo = self.span.lo;
let minus_present = self.eat(&token::BINOP(token::MINUS));
}
/// Parses a single lifetime
- // matches lifetime = LIFETIME
+ /// Matches lifetime = LIFETIME
pub fn parse_lifetime(&mut self) -> ast::Lifetime {
match self.token {
token::LIFETIME(i) => {
token::is_keyword(keywords::Const, tok)
}
- // parse mutability declaration (mut/const/imm)
+ /// Parse mutability declaration (mut/const/imm)
pub fn parse_mutability(&mut self) -> Mutability {
if self.eat_keyword(keywords::Mut) {
MutMutable
}
}
- // parse ident COLON expr
+ /// Parse ident COLON expr
pub fn parse_field(&mut self) -> Field {
let lo = self.span.lo;
let i = self.parse_ident();
}
}
- // at the bottom (top?) of the precedence hierarchy,
- // parse things like parenthesized exprs,
- // macros, return, etc.
+ /// At the bottom (top?) of the precedence hierarchy,
+ /// parse things like parenthesized exprs,
+ /// macros, return, etc.
pub fn parse_bottom_expr(&mut self) -> Gc<Expr> {
maybe_whole_expr!(self);
return self.mk_expr(lo, hi, ex);
}
- // parse a block or unsafe block
+ /// Parse a block or unsafe block
pub fn parse_block_expr(&mut self, lo: BytePos, blk_mode: BlockCheckMode)
-> Gc<Expr> {
self.expect(&token::LBRACE);
return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk));
}
- // parse a.b or a(13) or a[4] or just a
+ /// parse a.b or a(13) or a[4] or just a
pub fn parse_dot_or_call_expr(&mut self) -> Gc<Expr> {
let b = self.parse_bottom_expr();
self.parse_dot_or_call_expr_with(b)
return e;
}
- // parse an optional separator followed by a kleene-style
- // repetition token (+ or *).
+ /// Parse an optional separator followed by a kleene-style
+ /// repetition token (+ or *).
pub fn parse_sep_and_zerok(&mut self) -> (Option<token::Token>, bool) {
fn parse_zerok(parser: &mut Parser) -> Option<bool> {
match parser.token {
}
}
- // parse a single token tree from the input.
+ /// parse a single token tree from the input.
pub fn parse_token_tree(&mut self) -> TokenTree {
// FIXME #6994: currently, this is too eager. It
// parses token trees but also identifies TTSeq's
}
}
- // This goofy function is necessary to correctly match parens in Matcher's.
- // Otherwise, `$( ( )` would be a valid Matcher, and `$( () )` would be
- // invalid. It's similar to common::parse_seq.
+ /// This goofy function is necessary to correctly match parens in Matcher's.
+ /// Otherwise, `$( ( )` would be a valid Matcher, and `$( () )` would be
+ /// invalid. It's similar to common::parse_seq.
pub fn parse_matcher_subseq_upto(&mut self,
name_idx: &mut uint,
ket: &token::Token)
return spanned(lo, self.span.hi, m);
}
- // parse a prefix-operator expr
+ /// Parse a prefix-operator expr
pub fn parse_prefix_expr(&mut self) -> Gc<Expr> {
let lo = self.span.lo;
let hi;
return self.mk_expr(lo, hi, ex);
}
- // parse an expression of binops
+ /// Parse an expression of binops
pub fn parse_binops(&mut self) -> Gc<Expr> {
let prefix_expr = self.parse_prefix_expr();
self.parse_more_binops(prefix_expr, 0)
}
- // parse an expression of binops of at least min_prec precedence
+ /// Parse an expression of binops of at least min_prec precedence
pub fn parse_more_binops(&mut self, lhs: Gc<Expr>,
min_prec: uint) -> Gc<Expr> {
if self.expr_is_complete(lhs) { return lhs; }
}
}
- // parse an assignment expression....
- // actually, this seems to be the main entry point for
- // parsing an arbitrary expression.
+ /// Parse an assignment expression....
+ /// actually, this seems to be the main entry point for
+ /// parsing an arbitrary expression.
pub fn parse_assign_expr(&mut self) -> Gc<Expr> {
let lo = self.span.lo;
let lhs = self.parse_binops();
}
}
- // parse an 'if' expression ('if' token already eaten)
+ /// Parse an 'if' expression ('if' token already eaten)
pub fn parse_if_expr(&mut self) -> Gc<Expr> {
let lo = self.last_span.lo;
let cond = self.parse_expr_res(RESTRICT_NO_STRUCT_LITERAL);
self.mk_expr(lo, hi, ExprIf(cond, thn, els))
}
- // `|args| { ... }` or `{ ...}` like in `do` expressions
+ /// `|args| { ... }` or `{ ...}` like in `do` expressions
pub fn parse_lambda_block_expr(&mut self) -> Gc<Expr> {
self.parse_lambda_expr_(
|p| {
})
}
- // `|args| expr`
+ /// `|args| expr`
pub fn parse_lambda_expr(&mut self) -> Gc<Expr> {
self.parse_lambda_expr_(|p| p.parse_fn_block_decl(),
|p| p.parse_expr())
}
- // parse something of the form |args| expr
- // this is used both in parsing a lambda expr
- // and in parsing a block expr as e.g. in for...
+ /// parse something of the form |args| expr
+ /// this is used both in parsing a lambda expr
+ /// and in parsing a block expr as e.g. in for...
pub fn parse_lambda_expr_(&mut self,
parse_decl: |&mut Parser| -> P<FnDecl>,
parse_body: |&mut Parser| -> Gc<Expr>)
}
}
- // parse a 'for' .. 'in' expression ('for' token already eaten)
+ /// Parse a 'for' .. 'in' expression ('for' token already eaten)
pub fn parse_for_expr(&mut self, opt_ident: Option<ast::Ident>) -> Gc<Expr> {
// Parse: `for <src_pat> in <src_expr> <src_loop_block>`
return self.mk_expr(lo, hi, ExprMatch(discriminant, arms));
}
- // parse an expression
+ /// Parse an expression
pub fn parse_expr(&mut self) -> Gc<Expr> {
return self.parse_expr_res(UNRESTRICTED);
}
- // parse an expression, subject to the given restriction
+ /// Parse an expression, subject to the given restriction
pub fn parse_expr_res(&mut self, r: restriction) -> Gc<Expr> {
let old = self.restriction;
self.restriction = r;
return e;
}
- // parse the RHS of a local variable declaration (e.g. '= 14;')
+ /// Parse the RHS of a local variable declaration (e.g. '= 14;')
fn parse_initializer(&mut self) -> Option<Gc<Expr>> {
if self.token == token::EQ {
self.bump();
}
}
- // parse patterns, separated by '|' s
+ /// Parse patterns, separated by '|' s
fn parse_pats(&mut self) -> Vec<Gc<Pat>> {
let mut pats = Vec::new();
loop {
(before, slice, after)
}
- // parse the fields of a struct-like pattern
+ /// Parse the fields of a struct-like pattern
fn parse_pat_fields(&mut self) -> (Vec<ast::FieldPat> , bool) {
let mut fields = Vec::new();
let mut etc = false;
return (fields, etc);
}
- // parse a pattern.
+ /// Parse a pattern.
pub fn parse_pat(&mut self) -> Gc<Pat> {
maybe_whole!(self, NtPat);
}
}
- // parse ident or ident @ pat
- // used by the copy foo and ref foo patterns to give a good
- // error message when parsing mistakes like ref foo(a,b)
+ /// Parse ident or ident @ pat
+ /// used by the copy foo and ref foo patterns to give a good
+ /// error message when parsing mistakes like ref foo(a,b)
fn parse_pat_ident(&mut self,
binding_mode: ast::BindingMode)
-> ast::Pat_ {
PatIdent(binding_mode, name, sub)
}
- // parse a local variable declaration
+ /// Parse a local variable declaration
fn parse_local(&mut self) -> Gc<Local> {
let lo = self.span.lo;
let pat = self.parse_pat();
}
}
- // parse a "let" stmt
+ /// Parse a "let" stmt
fn parse_let(&mut self) -> Gc<Decl> {
let lo = self.span.lo;
let local = self.parse_local();
box(GC) spanned(lo, self.last_span.hi, DeclLocal(local))
}
- // parse a structure field
+ /// Parse a structure field
fn parse_name_and_ty(&mut self, pr: Visibility,
attrs: Vec<Attribute> ) -> StructField {
let lo = self.span.lo;
})
}
- // parse a statement. may include decl.
- // precondition: any attributes are parsed already
+ /// Parse a statement. may include decl.
+ /// Precondition: any attributes are parsed already
pub fn parse_stmt(&mut self, item_attrs: Vec<Attribute>) -> Gc<Stmt> {
maybe_whole!(self, NtStmt);
}
}
- // is this expression a successfully-parsed statement?
+ /// Is this expression a successfully-parsed statement?
fn expr_is_complete(&mut self, e: Gc<Expr>) -> bool {
return self.restriction == RESTRICT_STMT_EXPR &&
!classify::expr_requires_semi_to_be_stmt(e);
}
- // parse a block. No inner attrs are allowed.
+ /// Parse a block. No inner attrs are allowed.
pub fn parse_block(&mut self) -> P<Block> {
maybe_whole!(no_clone self, NtBlock);
return self.parse_block_tail_(lo, DefaultBlock, Vec::new());
}
- // parse a block. Inner attrs are allowed.
+ /// Parse a block. Inner attrs are allowed.
fn parse_inner_attrs_and_block(&mut self)
-> (Vec<Attribute> , P<Block>) {
(inner, self.parse_block_tail_(lo, DefaultBlock, next))
}
- // Precondition: already parsed the '{' or '#{'
- // I guess that also means "already parsed the 'impure'" if
- // necessary, and this should take a qualifier.
- // some blocks start with "#{"...
+ /// Precondition: already parsed the '{' or '#{'
+ /// I guess that also means "already parsed the 'impure'" if
+ /// necessary, and this should take a qualifier.
+ /// Some blocks start with "#{"...
fn parse_block_tail(&mut self, lo: BytePos, s: BlockCheckMode) -> P<Block> {
self.parse_block_tail_(lo, s, Vec::new())
}
- // parse the rest of a block expression or function body
+ /// Parse the rest of a block expression or function body
fn parse_block_tail_(&mut self, lo: BytePos, s: BlockCheckMode,
first_item_attrs: Vec<Attribute> ) -> P<Block> {
let mut stmts = Vec::new();
}
}
- // matches bounds = ( boundseq )?
- // where boundseq = ( bound + boundseq ) | bound
- // and bound = 'static | ty
- // Returns "None" if there's no colon (e.g. "T");
- // Returns "Some(Empty)" if there's a colon but nothing after (e.g. "T:")
- // Returns "Some(stuff)" otherwise (e.g. "T:stuff").
- // NB: The None/Some distinction is important for issue #7264.
- //
- // Note that the `allow_any_lifetime` argument is a hack for now while the
- // AST doesn't support arbitrary lifetimes in bounds on type parameters. In
- // the future, this flag should be removed, and the return value of this
- // function should be Option<~[TyParamBound]>
+ /// matches optbounds = ( ( : ( boundseq )? )? )
+ /// where boundseq = ( bound + boundseq ) | bound
+ /// and bound = 'static | ty
+ /// Returns "None" if there's no colon (e.g. "T");
+ /// Returns "Some(Empty)" if there's a colon but nothing after (e.g. "T:")
+ /// Returns "Some(stuff)" otherwise (e.g. "T:stuff").
+ /// NB: The None/Some distinction is important for issue #7264.
+ ///
+ /// Note that the `allow_any_lifetime` argument is a hack for now while the
+ /// AST doesn't support arbitrary lifetimes in bounds on type parameters. In
+ /// the future, this flag should be removed, and the return value of this
+ /// function should be Option<~[TyParamBound]>
fn parse_ty_param_bounds(&mut self, allow_any_lifetime: bool)
-> (Option<ast::Lifetime>,
OwnedSlice<TyParamBound>) {
}
}
- // matches typaram = (unbound`?`)? IDENT optbounds ( EQ ty )?
+ /// Matches typaram = (unbound`?`)? IDENT optbounds ( EQ ty )?
fn parse_ty_param(&mut self) -> TyParam {
// This is a bit hacky. Currently we are only interested in a single
// unbound, and it may only be `Sized`. To avoid backtracking and other
}
}
- // parse a set of optional generic type parameter declarations
- // matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > )
- // | ( < lifetimes , typaramseq ( , )? > )
- // where typaramseq = ( typaram ) | ( typaram , typaramseq )
+ /// Parse a set of optional generic type parameter declarations
+ /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > )
+ /// | ( < lifetimes , typaramseq ( , )? > )
+ /// where typaramseq = ( typaram ) | ( typaram , typaramseq )
pub fn parse_generics(&mut self) -> ast::Generics {
if self.eat(&token::LT) {
let lifetimes = self.parse_lifetimes();
(args, variadic)
}
- // parse the argument list and result type of a function declaration
+ /// Parse the argument list and result type of a function declaration
pub fn parse_fn_decl(&mut self, allow_variadic: bool) -> P<FnDecl> {
let (args, variadic) = self.parse_fn_args(true, allow_variadic);
}
}
- // parse the argument list and result type of a function
- // that may have a self type.
+ /// Parse the argument list and result type of a function
+ /// that may have a self type.
fn parse_fn_decl_with_self(&mut self, parse_arg_fn: |&mut Parser| -> Arg)
-> (ExplicitSelf, P<FnDecl>) {
fn maybe_parse_borrowed_explicit_self(this: &mut Parser)
(spanned(lo, hi, explicit_self), fn_decl)
}
- // parse the |arg, arg| header on a lambda
+ /// Parse the |arg, arg| header on a lambda
fn parse_fn_block_decl(&mut self) -> P<FnDecl> {
let inputs_captures = {
if self.eat(&token::OROR) {
})
}
- // Parses the `(arg, arg) -> return_type` header on a procedure.
+ /// Parses the `(arg, arg) -> return_type` header on a procedure.
fn parse_proc_decl(&mut self) -> P<FnDecl> {
let inputs =
self.parse_unspanned_seq(&token::LPAREN,
})
}
- // parse the name and optional generic types of a function header.
+ /// Parse the name and optional generic types of a function header.
fn parse_fn_header(&mut self) -> (Ident, ast::Generics) {
let id = self.parse_ident();
let generics = self.parse_generics();
}
}
- // parse an item-position function declaration.
+ /// Parse an item-position function declaration.
fn parse_item_fn(&mut self, fn_style: FnStyle, abi: abi::Abi) -> ItemInfo {
let (ident, generics) = self.parse_fn_header();
let decl = self.parse_fn_decl(false);
(ident, ItemFn(decl, fn_style, abi, generics, body), Some(inner_attrs))
}
- // parse a method in a trait impl, starting with `attrs` attributes.
+ /// Parse a method in a trait impl, starting with `attrs` attributes.
fn parse_method(&mut self,
already_parsed_attrs: Option<Vec<Attribute>>) -> Gc<Method> {
let next_attrs = self.parse_outer_attributes();
}
}
- // parse trait Foo { ... }
+ /// Parse trait Foo { ... }
fn parse_item_trait(&mut self) -> ItemInfo {
let ident = self.parse_ident();
let tps = self.parse_generics();
(ident, ItemTrait(tps, sized, traits, meths), None)
}
- // Parses two variants (with the region/type params always optional):
- // impl<T> Foo { ... }
- // impl<T> ToString for ~[T] { ... }
+ /// Parses two variants (with the region/type params always optional):
+ /// impl<T> Foo { ... }
+ /// impl<T> ToString for ~[T] { ... }
fn parse_item_impl(&mut self) -> ItemInfo {
// First, parse type parameters if necessary.
let generics = self.parse_generics();
(ident, ItemImpl(generics, opt_trait, ty, meths), Some(inner_attrs))
}
- // parse a::B<String,int>
+ /// Parse a::B<String,int>
fn parse_trait_ref(&mut self) -> TraitRef {
ast::TraitRef {
path: self.parse_path(LifetimeAndTypesWithoutColons).path,
}
}
- // parse B + C<String,int> + D
+ /// Parse B + C<String,int> + D
fn parse_trait_ref_list(&mut self, ket: &token::Token) -> Vec<TraitRef> {
self.parse_seq_to_before_end(
ket,
)
}
- // parse struct Foo { ... }
+ /// Parse struct Foo { ... }
fn parse_item_struct(&mut self, is_virtual: bool) -> ItemInfo {
let class_name = self.parse_ident();
let generics = self.parse_generics();
None)
}
- // parse a structure field declaration
+ /// Parse a structure field declaration
pub fn parse_single_struct_field(&mut self,
vis: Visibility,
attrs: Vec<Attribute> )
a_var
}
- // parse an element of a struct definition
+ /// Parse an element of a struct definition
fn parse_struct_decl_field(&mut self) -> StructField {
let attrs = self.parse_outer_attributes();
return self.parse_single_struct_field(Inherited, attrs);
}
- // parse visiility: PUB, PRIV, or nothing
+ /// Parse visiility: PUB, PRIV, or nothing
fn parse_visibility(&mut self) -> Visibility {
if self.eat_keyword(keywords::Pub) { Public }
else { Inherited }
}
}
- // given a termination token and a vector of already-parsed
- // attributes (of length 0 or 1), parse all of the items in a module
+ /// Given a termination token and a vector of already-parsed
+ /// attributes (of length 0 or 1), parse all of the items in a module
fn parse_mod_items(&mut self,
term: token::Token,
first_item_attrs: Vec<Attribute>,
(id, ItemStatic(ty, m, e), None)
}
- // parse a `mod <foo> { ... }` or `mod <foo>;` item
+ /// Parse a `mod <foo> { ... }` or `mod <foo>;` item
fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> ItemInfo {
let id_span = self.span;
let id = self.parse_ident();
self.mod_path_stack.pop().unwrap();
}
- // read a module from a source file.
+ /// Read a module from a source file.
fn eval_src_mod(&mut self,
id: ast::Ident,
outer_attrs: &[ast::Attribute],
return (ast::ItemMod(m0), mod_attrs);
}
- // parse a function declaration from a foreign module
+ /// Parse a function declaration from a foreign module
fn parse_item_foreign_fn(&mut self, vis: ast::Visibility,
attrs: Vec<Attribute>) -> Gc<ForeignItem> {
let lo = self.span.lo;
vis: vis }
}
- // parse a static item from a foreign module
+ /// Parse a static item from a foreign module
fn parse_item_foreign_static(&mut self, vis: ast::Visibility,
attrs: Vec<Attribute> ) -> Gc<ForeignItem> {
let lo = self.span.lo;
}
}
- // parse safe/unsafe and fn
+ /// Parse safe/unsafe and fn
fn parse_fn_style(&mut self) -> FnStyle {
if self.eat_keyword(keywords::Fn) { NormalFn }
else if self.eat_keyword(keywords::Unsafe) {
}
- // at this point, this is essentially a wrapper for
- // parse_foreign_items.
+ /// At this point, this is essentially a wrapper for
+ /// parse_foreign_items.
fn parse_foreign_mod_items(&mut self,
abi: abi::Abi,
first_item_attrs: Vec<Attribute> )
return IoviItem(item);
}
- // parse type Foo = Bar;
+ /// Parse type Foo = Bar;
fn parse_item_type(&mut self) -> ItemInfo {
let ident = self.parse_ident();
let tps = self.parse_generics();
(ident, ItemTy(ty, tps), None)
}
- // parse a structure-like enum variant definition
- // this should probably be renamed or refactored...
+ /// Parse a structure-like enum variant definition
+ /// this should probably be renamed or refactored...
fn parse_struct_def(&mut self) -> Gc<StructDef> {
let mut fields: Vec<StructField> = Vec::new();
while self.token != token::RBRACE {
};
}
- // parse the part of an "enum" decl following the '{'
+ /// Parse the part of an "enum" decl following the '{'
fn parse_enum_def(&mut self, _generics: &ast::Generics) -> EnumDef {
let mut variants = Vec::new();
let mut all_nullary = true;
ast::EnumDef { variants: variants }
}
- // parse an "enum" declaration
+ /// Parse an "enum" declaration
fn parse_item_enum(&mut self) -> ItemInfo {
let id = self.parse_ident();
let generics = self.parse_generics();
}
}
- // Parses a string as an ABI spec on an extern type or module. Consumes
- // the `extern` keyword, if one is found.
+ /// Parses a string as an ABI spec on an extern type or module. Consumes
+ /// the `extern` keyword, if one is found.
fn parse_opt_abi(&mut self) -> Option<abi::Abi> {
match self.token {
token::LIT_STR(s) | token::LIT_STR_RAW(s, _) => {
}
}
- // parse one of the items or view items allowed by the
- // flags; on failure, return IoviNone.
- // NB: this function no longer parses the items inside an
- // extern crate.
+ /// Parse one of the items or view items allowed by the
+ /// flags; on failure, return IoviNone.
+ /// NB: this function no longer parses the items inside an
+ /// extern crate.
fn parse_item_or_view_item(&mut self,
attrs: Vec<Attribute> ,
macros_allowed: bool)
self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility)
}
- // parse a foreign item; on failure, return IoviNone.
+ /// Parse a foreign item; on failure, return IoviNone.
fn parse_foreign_item(&mut self,
attrs: Vec<Attribute> ,
macros_allowed: bool)
self.parse_macro_use_or_failure(attrs,macros_allowed,lo,visibility)
}
- // this is the fall-through for parsing items.
+ /// This is the fall-through for parsing items.
fn parse_macro_use_or_failure(
&mut self,
attrs: Vec<Attribute> ,
}
}
- // parse, e.g., "use a::b::{z,y}"
+ /// Parse, e.g., "use a::b::{z,y}"
fn parse_use(&mut self) -> ViewItem_ {
return ViewItemUse(self.parse_view_path());
}
- // matches view_path : MOD? IDENT EQ non_global_path
- // | MOD? non_global_path MOD_SEP LBRACE RBRACE
- // | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE
- // | MOD? non_global_path MOD_SEP STAR
- // | MOD? non_global_path
+ /// Matches view_path : MOD? IDENT EQ non_global_path
+ /// | MOD? non_global_path MOD_SEP LBRACE RBRACE
+ /// | MOD? non_global_path MOD_SEP LBRACE ident_seq RBRACE
+ /// | MOD? non_global_path MOD_SEP STAR
+ /// | MOD? non_global_path
fn parse_view_path(&mut self) -> Gc<ViewPath> {
let lo = self.span.lo;
ViewPathSimple(last, path, ast::DUMMY_NODE_ID));
}
- // Parses a sequence of items. Stops when it finds program
- // text that can't be parsed as an item
- // - mod_items uses extern_mod_allowed = true
- // - block_tail_ uses extern_mod_allowed = false
+ /// Parses a sequence of items. Stops when it finds program
+ /// text that can't be parsed as an item
+ /// - mod_items uses extern_mod_allowed = true
+ /// - block_tail_ uses extern_mod_allowed = false
fn parse_items_and_view_items(&mut self,
first_item_attrs: Vec<Attribute> ,
mut extern_mod_allowed: bool,
}
}
- // Parses a sequence of foreign items. Stops when it finds program
- // text that can't be parsed as an item
+ /// Parses a sequence of foreign items. Stops when it finds program
+ /// text that can't be parsed as an item
fn parse_foreign_items(&mut self, first_item_attrs: Vec<Attribute> ,
macros_allowed: bool)
-> ParsedItemsAndViewItems {
}
}
- // Parses a source module as a crate. This is the main
- // entry point for the parser.
+ /// Parses a source module as a crate. This is the main
+ /// entry point for the parser.
pub fn parse_crate_mod(&mut self) -> Crate {
let lo = self.span.lo;
// parse the crate's inner attrs, maybe (oops) one
LIT_BINARY_RAW(Rc<Vec<u8>>, uint), /* raw binary str delimited by n hash symbols */
/* Name components */
- // an identifier contains an "is_mod_name" boolean,
- // indicating whether :: follows this token with no
- // whitespace in between.
+ /// An identifier contains an "is_mod_name" boolean,
+ /// indicating whether :: follows this token with no
+ /// whitespace in between.
IDENT(ast::Ident, bool),
UNDERSCORE,
LIFETIME(ast::Ident),
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/*
- * This pretty-printer is a direct reimplementation of Philip Karlton's
- * Mesa pretty-printer, as described in appendix A of
- *
- * STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen.
- * Stanford Department of Computer Science, 1979.
- *
- * The algorithm's aim is to break a stream into as few lines as possible
- * while respecting the indentation-consistency requirements of the enclosing
- * block, and avoiding breaking at silly places on block boundaries, for
- * example, between "x" and ")" in "x)".
- *
- * I am implementing this algorithm because it comes with 20 pages of
- * documentation explaining its theory, and because it addresses the set of
- * concerns I've seen other pretty-printers fall down on. Weirdly. Even though
- * it's 32 years old. What can I say?
- *
- * Despite some redundancies and quirks in the way it's implemented in that
- * paper, I've opted to keep the implementation here as similar as I can,
- * changing only what was blatantly wrong, a typo, or sufficiently
- * non-idiomatic rust that it really stuck out.
- *
- * In particular you'll see a certain amount of churn related to INTEGER vs.
- * CARDINAL in the Mesa implementation. Mesa apparently interconverts the two
- * somewhat readily? In any case, I've used uint for indices-in-buffers and
- * ints for character-sizes-and-indentation-offsets. This respects the need
- * for ints to "go negative" while carrying a pending-calculation balance, and
- * helps differentiate all the numbers flying around internally (slightly).
- *
- * I also inverted the indentation arithmetic used in the print stack, since
- * the Mesa implementation (somewhat randomly) stores the offset on the print
- * stack in terms of margin-col rather than col itself. I store col.
- *
- * I also implemented a small change in the String token, in that I store an
- * explicit length for the string. For most tokens this is just the length of
- * the accompanying string. But it's necessary to permit it to differ, for
- * encoding things that are supposed to "go on their own line" -- certain
- * classes of comment and blank-line -- where relying on adjacent
- * hardbreak-like Break tokens with long blankness indication doesn't actually
- * work. To see why, consider when there is a "thing that should be on its own
- * line" between two long blocks, say functions. If you put a hardbreak after
- * each function (or before each) and the breaking algorithm decides to break
- * there anyways (because the functions themselves are long) you wind up with
- * extra blank lines. If you don't put hardbreaks you can wind up with the
- * "thing which should be on its own line" not getting its own line in the
- * rare case of "really small functions" or such. This re-occurs with comments
- * and explicit blank lines. So in those cases we use a string with a payload
- * we want isolated to a line and an explicit length that's huge, surrounded
- * by two zero-length breaks. The algorithm will try its best to fit it on a
- * line (which it can't) and so naturally place the content on its own line to
- * avoid combining it with other lines and making matters even worse.
- */
+//! This pretty-printer is a direct reimplementation of Philip Karlton's
+//! Mesa pretty-printer, as described in appendix A of
+//!
+//! STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen.
+//! Stanford Department of Computer Science, 1979.
+//!
+//! The algorithm's aim is to break a stream into as few lines as possible
+//! while respecting the indentation-consistency requirements of the enclosing
+//! block, and avoiding breaking at silly places on block boundaries, for
+//! example, between "x" and ")" in "x)".
+//!
+//! I am implementing this algorithm because it comes with 20 pages of
+//! documentation explaining its theory, and because it addresses the set of
+//! concerns I've seen other pretty-printers fall down on. Weirdly. Even though
+//! it's 32 years old. What can I say?
+//!
+//! Despite some redundancies and quirks in the way it's implemented in that
+//! paper, I've opted to keep the implementation here as similar as I can,
+//! changing only what was blatantly wrong, a typo, or sufficiently
+//! non-idiomatic rust that it really stuck out.
+//!
+//! In particular you'll see a certain amount of churn related to INTEGER vs.
+//! CARDINAL in the Mesa implementation. Mesa apparently interconverts the two
+//! somewhat readily? In any case, I've used uint for indices-in-buffers and
+//! ints for character-sizes-and-indentation-offsets. This respects the need
+//! for ints to "go negative" while carrying a pending-calculation balance, and
+//! helps differentiate all the numbers flying around internally (slightly).
+//!
+//! I also inverted the indentation arithmetic used in the print stack, since
+//! the Mesa implementation (somewhat randomly) stores the offset on the print
+//! stack in terms of margin-col rather than col itself. I store col.
+//!
+//! I also implemented a small change in the String token, in that I store an
+//! explicit length for the string. For most tokens this is just the length of
+//! the accompanying string. But it's necessary to permit it to differ, for
+//! encoding things that are supposed to "go on their own line" -- certain
+//! classes of comment and blank-line -- where relying on adjacent
+//! hardbreak-like Break tokens with long blankness indication doesn't actually
+//! work. To see why, consider when there is a "thing that should be on its own
+//! line" between two long blocks, say functions. If you put a hardbreak after
+//! each function (or before each) and the breaking algorithm decides to break
+//! there anyways (because the functions themselves are long) you wind up with
+//! extra blank lines. If you don't put hardbreaks you can wind up with the
+//! "thing which should be on its own line" not getting its own line in the
+//! rare case of "really small functions" or such. This re-occurs with comments
+//! and explicit blank lines. So in those cases we use a string with a payload
+//! we want isolated to a line and an explicit length that's huge, surrounded
+//! by two zero-length breaks. The algorithm will try its best to fit it on a
+//! line (which it can't) and so naturally place the content on its own line to
+//! avoid combining it with other lines and making matters even worse.
use std::io;
use std::string::String;
}
-/*
- * In case you do not have the paper, here is an explanation of what's going
- * on.
- *
- * There is a stream of input tokens flowing through this printer.
- *
- * The printer buffers up to 3N tokens inside itself, where N is linewidth.
- * Yes, linewidth is chars and tokens are multi-char, but in the worst
- * case every token worth buffering is 1 char long, so it's ok.
- *
- * Tokens are String, Break, and Begin/End to delimit blocks.
- *
- * Begin tokens can carry an offset, saying "how far to indent when you break
- * inside here", as well as a flag indicating "consistent" or "inconsistent"
- * breaking. Consistent breaking means that after the first break, no attempt
- * will be made to flow subsequent breaks together onto lines. Inconsistent
- * is the opposite. Inconsistent breaking example would be, say:
- *
- * foo(hello, there, good, friends)
- *
- * breaking inconsistently to become
- *
- * foo(hello, there
- * good, friends);
- *
- * whereas a consistent breaking would yield:
- *
- * foo(hello,
- * there
- * good,
- * friends);
- *
- * That is, in the consistent-break blocks we value vertical alignment
- * more than the ability to cram stuff onto a line. But in all cases if it
- * can make a block a one-liner, it'll do so.
- *
- * Carrying on with high-level logic:
- *
- * The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
- * 'right' indices denote the active portion of the ring buffer as well as
- * describing hypothetical points-in-the-infinite-stream at most 3N tokens
- * apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
- * between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer
- * and point-in-infinite-stream senses freely.
- *
- * There is a parallel ring buffer, 'size', that holds the calculated size of
- * each token. Why calculated? Because for Begin/End pairs, the "size"
- * includes everything between the pair. That is, the "size" of Begin is
- * actually the sum of the sizes of everything between Begin and the paired
- * End that follows. Since that is arbitrarily far in the future, 'size' is
- * being rewritten regularly while the printer runs; in fact most of the
- * machinery is here to work out 'size' entries on the fly (and give up when
- * they're so obviously over-long that "infinity" is a good enough
- * approximation for purposes of line breaking).
- *
- * The "input side" of the printer is managed as an abstract process called
- * SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to
- * manage calculating 'size'. SCAN is, in other words, the process of
- * calculating 'size' entries.
- *
- * The "output side" of the printer is managed by an abstract process called
- * PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to
- * do with each token/size pair it consumes as it goes. It's trying to consume
- * the entire buffered window, but can't output anything until the size is >=
- * 0 (sizes are set to negative while they're pending calculation).
- *
- * So SCAN takes input and buffers tokens and pending calculations, while
- * PRINT gobbles up completed calculations and tokens from the buffer. The
- * theory is that the two can never get more than 3N tokens apart, because
- * once there's "obviously" too much data to fit on a line, in a size
- * calculation, SCAN will write "infinity" to the size and let PRINT consume
- * it.
- *
- * In this implementation (following the paper, again) the SCAN process is
- * the method called 'pretty_print', and the 'PRINT' process is the method
- * called 'print'.
- */
+/// In case you do not have the paper, here is an explanation of what's going
+/// on.
+///
+/// There is a stream of input tokens flowing through this printer.
+///
+/// The printer buffers up to 3N tokens inside itself, where N is linewidth.
+/// Yes, linewidth is chars and tokens are multi-char, but in the worst
+/// case every token worth buffering is 1 char long, so it's ok.
+///
+/// Tokens are String, Break, and Begin/End to delimit blocks.
+///
+/// Begin tokens can carry an offset, saying "how far to indent when you break
+/// inside here", as well as a flag indicating "consistent" or "inconsistent"
+/// breaking. Consistent breaking means that after the first break, no attempt
+/// will be made to flow subsequent breaks together onto lines. Inconsistent
+/// is the opposite. Inconsistent breaking example would be, say:
+///
+/// foo(hello, there, good, friends)
+///
+/// breaking inconsistently to become
+///
+/// foo(hello, there
+/// good, friends);
+///
+/// whereas a consistent breaking would yield:
+///
+/// foo(hello,
+/// there
+/// good,
+/// friends);
+///
+/// That is, in the consistent-break blocks we value vertical alignment
+/// more than the ability to cram stuff onto a line. But in all cases if it
+/// can make a block a one-liner, it'll do so.
+///
+/// Carrying on with high-level logic:
+///
+/// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
+/// 'right' indices denote the active portion of the ring buffer as well as
+/// describing hypothetical points-in-the-infinite-stream at most 3N tokens
+/// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
+/// between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer
+/// and point-in-infinite-stream senses freely.
+///
+/// There is a parallel ring buffer, 'size', that holds the calculated size of
+/// each token. Why calculated? Because for Begin/End pairs, the "size"
+/// includes everything betwen the pair. That is, the "size" of Begin is
+/// actually the sum of the sizes of everything between Begin and the paired
+/// End that follows. Since that is arbitrarily far in the future, 'size' is
+/// being rewritten regularly while the printer runs; in fact most of the
+/// machinery is here to work out 'size' entries on the fly (and give up when
+/// they're so obviously over-long that "infinity" is a good enough
+/// approximation for purposes of line breaking).
+///
+/// The "input side" of the printer is managed as an abstract process called
+/// SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to
+/// manage calculating 'size'. SCAN is, in other words, the process of
+/// calculating 'size' entries.
+///
+/// The "output side" of the printer is managed by an abstract process called
+/// PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to
+/// do with each token/size pair it consumes as it goes. It's trying to consume
+/// the entire buffered window, but can't output anything until the size is >=
+/// 0 (sizes are set to negative while they're pending calculation).
+///
+/// So SCAN takes input and buffers tokens and pending calculations, while
+/// PRINT gobbles up completed calculations and tokens from the buffer. The
+/// theory is that the two can never get more than 3N tokens apart, because
+/// once there's "obviously" too much data to fit on a line, in a size
+/// calculation, SCAN will write "infinity" to the size and let PRINT consume
+/// it.
+///
+/// In this implementation (following the paper, again) the SCAN process is
+/// the method called 'pretty_print', and the 'PRINT' process is the method
+/// called 'print'.
pub struct Printer {
pub out: Box<io::Writer>,
buf_len: uint,
- margin: int, // width of lines we're constrained to
- space: int, // number of spaces left on line
- left: uint, // index of left side of input stream
- right: uint, // index of right side of input stream
- token: Vec<Token> , // ring-buffr stream goes through
- size: Vec<int> , // ring-buffer of calculated sizes
- left_total: int, // running size of stream "...left"
- right_total: int, // running size of stream "...right"
- // pseudo-stack, really a ring too. Holds the
- // primary-ring-buffers index of the Begin that started the
- // current block, possibly with the most recent Break after that
- // Begin (if there is any) on top of it. Stuff is flushed off the
- // bottom as it becomes irrelevant due to the primary ring-buffer
- // advancing.
+ /// Width of lines we're constrained to
+ margin: int,
+ /// Number of spaces left on line
+ space: int,
+ /// Index of left side of input stream
+ left: uint,
+ /// Index of right side of input stream
+ right: uint,
+ /// Ring-buffr stream goes through
+ token: Vec<Token> ,
+ /// Ring-buffer of calculated sizes
+ size: Vec<int> ,
+ /// Running size of stream "...left"
+ left_total: int,
+ /// Running size of stream "...right"
+ right_total: int,
+ /// Pseudo-stack, really a ring too. Holds the
+ /// primary-ring-buffers index of the Begin that started the
+ /// current block, possibly with the most recent Break after that
+ /// Begin (if there is any) on top of it. Stuff is flushed off the
+ /// bottom as it becomes irrelevant due to the primary ring-buffer
+ /// advancing.
scan_stack: Vec<uint> ,
- scan_stack_empty: bool, // top==bottom disambiguator
- top: uint, // index of top of scan_stack
- bottom: uint, // index of bottom of scan_stack
- // stack of blocks-in-progress being flushed by print
+ /// Top==bottom disambiguator
+ scan_stack_empty: bool,
+ /// Index of top of scan_stack
+ top: uint,
+ /// Index of bottom of scan_stack
+ bottom: uint,
+ /// Stack of blocks-in-progress being flushed by print
print_stack: Vec<PrintStackElem> ,
- // buffered indentation to avoid writing trailing whitespace
+ /// Buffered indentation to avoid writing trailing whitespace
pending_indentation: int,
}
pub static default_columns: uint = 78u;
-// Requires you to pass an input filename and reader so that
-// it can scan the input text for comments and literals to
-// copy forward.
+/// Requires you to pass an input filename and reader so that
+/// it can scan the input text for comments and literals to
+/// copy forward.
pub fn print_crate<'a>(cm: &'a CodeMap,
span_diagnostic: &diagnostic::SpanHandler,
krate: &ast::Crate,
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-// An "interner" is a data structure that associates values with uint tags and
-// allows bidirectional lookup; i.e. given a value, one can easily find the
-// type, and vice versa.
+//! An "interner" is a data structure that associates values with uint tags and
+//! allows bidirectional lookup; i.e. given a value, one can easily find the
+//! type, and vice versa.
use ast::Name;
use std::gc::Gc;
-// map a string to tts, using a made-up filename:
+/// Map a string to tts, using a made-up filename:
pub fn string_to_tts(source_str: String) -> Vec<ast::TokenTree> {
let ps = new_parse_sess();
filemap_to_tts(&ps,
string_to_filemap(&ps, source_str, "bogofile".to_string()))
}
-// map string to parser (via tts)
+/// Map string to parser (via tts)
pub fn string_to_parser<'a>(ps: &'a ParseSess, source_str: String) -> Parser<'a> {
new_parser_from_source_str(ps,
Vec::new(),
x
}
-// parse a string, return a crate.
+/// Parse a string, return a crate.
pub fn string_to_crate (source_str : String) -> ast::Crate {
with_error_checking_parse(source_str, |p| {
p.parse_crate_mod()
})
}
-// parse a string, return an expr
+/// Parse a string, return an expr
pub fn string_to_expr (source_str : String) -> Gc<ast::Expr> {
with_error_checking_parse(source_str, |p| {
p.parse_expr()
})
}
-// parse a string, return an item
+/// Parse a string, return an item
pub fn string_to_item (source_str : String) -> Option<Gc<ast::Item>> {
with_error_checking_parse(source_str, |p| {
p.parse_item(Vec::new())
})
}
-// parse a string, return a stmt
+/// Parse a string, return a stmt
pub fn string_to_stmt(source_str : String) -> Gc<ast::Stmt> {
with_error_checking_parse(source_str, |p| {
p.parse_stmt(Vec::new())
})
}
-// parse a string, return a pat. Uses "irrefutable"... which doesn't
-// (currently) affect parsing.
+/// Parse a string, return a pat. Uses "irrefutable"... which doesn't
+/// (currently) affect parsing.
pub fn string_to_pat(source_str: String) -> Gc<ast::Pat> {
string_to_parser(&new_parse_sess(), source_str).parse_pat()
}
-// convert a vector of strings to a vector of ast::Ident's
+/// Convert a vector of strings to a vector of ast::Ident's
pub fn strs_to_idents(ids: Vec<&str> ) -> Vec<ast::Ident> {
ids.iter().map(|u| token::str_to_ident(*u)).collect()
}
-// does the given string match the pattern? whitespace in the first string
-// may be deleted or replaced with other whitespace to match the pattern.
-// this function is unicode-ignorant; fortunately, the careful design of
-// UTF-8 mitigates this ignorance. In particular, this function only collapses
-// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode
-// chars. Unsurprisingly, it doesn't do NKF-normalization(?).
+/// Does the given string match the pattern? whitespace in the first string
+/// may be deleted or replaced with other whitespace to match the pattern.
+/// this function is unicode-ignorant; fortunately, the careful design of
+/// UTF-8 mitigates this ignorance. In particular, this function only collapses
+/// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode
+/// chars. Unsurprisingly, it doesn't do NKF-normalization(?).
pub fn matches_codepattern(a : &str, b : &str) -> bool {
let mut idx_a = 0;
let mut idx_b = 0;
}
}
-// given a string and an index, return the first uint >= idx
-// that is a non-ws-char or is outside of the legal range of
-// the string.
+/// Given a string and an index, return the first uint >= idx
+/// that is a non-ws-char or is outside of the legal range of
+/// the string.
fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint {
let mut i = idx;
let len = a.len();
i
}
-// copied from lexer.
+/// Copied from lexer.
pub fn is_whitespace(c: char) -> bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n';
}
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+//! Context-passing AST walker. Each overridden visit method has full control
+//! over what happens with its node, it can do its own traversal of the node's
+//! children (potentially passing in different contexts to each), call
+//! `visit::visit_*` to apply the default traversal algorithm (again, it can
+//! override the context), or prevent deeper traversal by doing nothing.
+//!
+//! Note: it is an important invariant that the default visitor walks the body
+//! of a function in "execution order" (more concretely, reverse post-order
+//! with respect to the CFG implied by the AST), meaning that if AST node A may
+//! execute before AST node B, then A is visited first. The borrow checker in
+//! particular relies on this property.
+//!
use abi::Abi;
use ast::*;
use ast;
use std::gc::Gc;
-// Context-passing AST walker. Each overridden visit method has full control
-// over what happens with its node, it can do its own traversal of the node's
-// children (potentially passing in different contexts to each), call
-// visit::visit_* to apply the default traversal algorithm (again, it can
-// override the context), or prevent deeper traversal by doing nothing.
-//
-// Note: it is an important invariant that the default visitor walks the body
-// of a function in "execution order" (more concretely, reverse post-order
-// with respect to the CFG implied by the AST), meaning that if AST node A may
-// execute before AST node B, then A is visited first. The borrow checker in
-// particular relies on this property.
-
pub enum FnKind<'a> {
- // fn foo() or extern "Abi" fn foo()
+ /// fn foo() or extern "Abi" fn foo()
FkItemFn(Ident, &'a Generics, FnStyle, Abi),
- // fn foo(&self)
+ /// fn foo(&self)
FkMethod(Ident, &'a Generics, &'a Method),
- // |x, y| ...
- // proc(x, y) ...
+ /// |x, y| ...
+ /// proc(x, y) ...
FkFnBlock,
}