#[auto_serialize]
enum mac_ {
mac_invoc(@path, mac_arg, mac_body),
- mac_invoc_tt(@path, token_tree), //will kill mac_invoc and steal its name
+ mac_invoc_tt(@path,~[token_tree]),//will kill mac_invoc and steal its name
mac_embed_type(@ty),
mac_embed_block(blk),
mac_ellipsis,
import codemap::{codemap, span, expn_info, expanded_from};
import std::map::str_hash;
+// second argument is the span to blame for general argument problems
type syntax_expander_ =
fn@(ext_ctxt, span, ast::mac_arg, ast::mac_body) -> @ast::expr;
+// second argument is the origin of the macro, if user-defined
type syntax_expander = {expander: syntax_expander_, span: option<span>};
type macro_def = {ident: ast::ident, ext: syntax_extension};
fn@(ext_ctxt, span, ast::meta_item, ~[@ast::item]) -> ~[@ast::item];
type syntax_expander_tt = {expander: syntax_expander_tt_, span: option<span>};
-type syntax_expander_tt_ = fn@(ext_ctxt, span, ast::token_tree) -> @ast::expr;
+type syntax_expander_tt_ = fn@(ext_ctxt, span, ~[ast::token_tree])
+ -> mac_result;
type syntax_expander_tt_item
= {expander: syntax_expander_tt_item_, span: option<span>};
type syntax_expander_tt_item_
- = fn@(ext_ctxt, span, ast::ident, ast::token_tree) -> mac_result;
+ = fn@(ext_ctxt, span, ast::ident, ~[ast::token_tree]) -> mac_result;
enum mac_result {
+ mr_expr(@ast::expr),
mr_item(@ast::item),
mr_def(macro_def)
}
macro_defining(macro_definer),
item_decorator(item_decorator),
- normal_tt(syntax_expander_tt),
+ expr_tt(syntax_expander_tt),
item_tt(syntax_expander_tt_item),
}
item_tt({expander: f, span: none})
}
let syntax_expanders = str_hash::<syntax_extension>();
+ syntax_expanders.insert("macro",
+ macro_defining(ext::simplext::add_new_extension));
+ syntax_expanders.insert("macro_rules",
+ builtin_item_tt(
+ ext::tt::macro_rules::add_new_extension));
syntax_expanders.insert("fmt", builtin(ext::fmt::expand_syntax_ext));
syntax_expanders.insert("auto_serialize",
item_decorator(ext::auto_serialize::expand));
syntax_expanders.insert("env", builtin(ext::env::expand_syntax_ext));
- syntax_expanders.insert("macro",
- macro_defining(ext::simplext::add_new_extension));
syntax_expanders.insert("concat_idents",
builtin(ext::concat_idents::expand_syntax_ext));
syntax_expanders.insert("ident_to_str",
#fmt["%s can only be used as a decorator", *extname]);
}
some(normal({expander: exp, span: exp_sp})) {
- let expanded = exp(cx, pth.span, args, body);
+ let expanded = exp(cx, mac.span, args, body);
cx.bt_push(expanded_from({call_site: s,
callie: {name: *extname, span: exp_sp}}));
(fully_expanded, s)
}
some(macro_defining(ext)) {
- let named_extension = ext(cx, pth.span, args, body);
+ let named_extension = ext(cx, mac.span, args, body);
exts.insert(*named_extension.ident, named_extension.ext);
(ast::expr_rec(~[], none), s)
}
- some(normal_tt(_)) {
+ some(expr_tt(_)) {
cx.span_fatal(pth.span,
#fmt["this tt-style macro should be \
invoked '%s!{...}'", *extname])
}
}
}
- mac_invoc_tt(pth, tt) {
- assert (vec::len(pth.idents) > 0u);
+ mac_invoc_tt(pth, tts) {
+ assert (vec::len(pth.idents) == 1u);
let extname = pth.idents[0];
alt exts.find(*extname) {
none {
cx.span_fatal(pth.span,
#fmt["macro undefined: '%s'", *extname])
}
- some(normal_tt({expander: exp, span: exp_sp})) {
- let expanded = exp(cx, pth.span, tt);
+ some(expr_tt({expander: exp, span: exp_sp})) {
+ let expanded = alt exp(cx, mac.span, tts) {
+ mr_expr(e) { e }
+ _ { cx.span_fatal(
+ pth.span, #fmt["non-expr macro in expr pos: %s",
+ *extname]) }
+ };
cx.bt_push(expanded_from({call_site: s,
callie: {name: *extname, span: exp_sp}}));
};
alt exts.find(*mname) {
none | some(normal(_)) | some(macro_defining(_))
- | some(normal_tt(_)) | some(item_tt(*)) {
+ | some(expr_tt(_)) | some(item_tt(*)) {
items
}
cx: ext_ctxt, &&it: @ast::item,
fld: ast_fold) -> option<@ast::item> {
alt it.node {
- item_mac({node: mac_invoc_tt(pth, tt), span}) {
+ item_mac({node: mac_invoc_tt(pth, tts), span}) {
let extname = pth.idents[0];
alt exts.find(*extname) {
none {
#fmt("macro undefined: '%s'", *extname))
}
some(item_tt(expand)) {
+ let expanded = expand.expander(cx, it.span, it.ident, tts);
cx.bt_push(expanded_from({call_site: it.span,
callie: {name: *extname,
span: expand.span}}));
- let maybe_it =
- alt expand.expander(cx, it.span, it.ident, tt) {
- mr_item(it) { fld.fold_item(it) }
- mr_def(mdef) {
- exts.insert(*mdef.ident, mdef.ext);
- none
- }
- };
+ let maybe_it = alt expanded {
+ mr_item(it) { fld.fold_item(it) }
+ mr_expr(e) { cx.span_fatal(pth.span,
+ "expr macro in item position: " +
+ *extname) }
+ mr_def(mdef) {
+ exts.insert(*mdef.ident, mdef.ext);
+ none
+ }
+ };
cx.bt_pop();
ret maybe_it
}
import pipes::pipec::methods;
-fn expand_proto(cx: ext_ctxt, _sp: span, id: ast::ident, tt: ast::token_tree)
- -> base::mac_result
+fn expand_proto(cx: ext_ctxt, _sp: span, id: ast::ident,
+ tt: ~[ast::token_tree]) -> base::mac_result
{
let sess = cx.parse_sess();
let cfg = cx.cfg();
- let body_core = alt tt { tt_delim(tts) { tts } _ {fail}};
let tt_rdr = new_tt_reader(cx.parse_sess().span_diagnostic,
- cx.parse_sess().interner,
- none,
- body_core);
+ cx.parse_sess().interner, none, tt);
let rdr = tt_rdr as reader;
let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
fn parse_proto(id: ident) -> protocol {
let proto = protocol(id);
- self.parse_unspanned_seq(token::LBRACE,
- token::RBRACE,
- {sep: none, trailing_sep_allowed: false},
- |self| self.parse_state(proto));
+ self.parse_seq_to_before_end(token::EOF,
+ {sep: none, trailing_sep_allowed: false},
+ |self| self.parse_state(proto));
ret proto;
}
type earley_item = matcher_pos;
-
-fn nameize(&&p_s: parse_sess, ms: ~[matcher], &&res: ~[@arb_depth])
+fn nameize(p_s: parse_sess, ms: ~[matcher], res: ~[@arb_depth])
-> hashmap<ident,@arb_depth> {
- fn n_rec(&&p_s: parse_sess, &&m: matcher, &&res: ~[@arb_depth],
- &&ret_val: hashmap<ident, @arb_depth>) {
+ fn n_rec(p_s: parse_sess, m: matcher, res: ~[@arb_depth],
+ ret_val: hashmap<ident, @arb_depth>) {
alt m {
{node: mtc_tok(_), span: _} { }
{node: mtc_rep(more_ms, _, _), span: _} {
// doing a lot of array work that will get thrown away
// most of the time.
for ei.matches.eachi() |idx, elt| {
+ let sub = elt.get();
+ // Some subtrees don't contain the name at all
+ if sub.len() == 0u { cont; }
new_pos.matches[idx]
- .push(@seq(elt.get(), mk_sp(ei.sp_lo,sp.hi)));
+ .push(@seq(sub, mk_sp(ei.sp_lo,sp.hi)));
}
new_pos.idx += 1u;
built-in NTs %s or %u other options.",
nts, next_eis.len()]);
} else if (bb_eis.len() == 0u && next_eis.len() == 0u) {
- failure(sp, "No rules expected the token "
- + to_str(*rdr.interner(), tok));
+ ret failure(sp, "No rules expected the token "
+ + to_str(*rdr.interner(), tok));
} else if (next_eis.len() > 0u) {
/* Now process the next token */
while(next_eis.len() > 0u) {
/* this would fail if zero-length tokens existed */
while rdr.peek().sp.lo < rust_parser.span.lo {
rdr.next_token();
+ } /* except for EOF... */
+ while rust_parser.token == EOF && rdr.peek().tok != EOF {
+ rdr.next_token();
}
}
}
} }
"path" { token::w_path(p.parse_path_with_tps(false)) }
"tt" {
- p.quote_depth += 1u;
+ p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
let res = token::w_tt(@p.parse_token_tree());
p.quote_depth -= 1u;
res
--- /dev/null
+import base::{ext_ctxt, mac_result, mr_expr, mr_def, expr_tt};
+import codemap::span;
+import ast::{ident, matcher_, matcher, mtc_tok, mtc_bb, mtc_rep, tt_delim};
+import parse::lexer::{new_tt_reader, tt_reader_as_reader, reader};
+import parse::token::{FAT_ARROW, SEMI, LBRACE, RBRACE, w_mtcs, w_tt};
+import parse::parser::{parser, SOURCE_FILE};
+import earley_parser::{parse, success, failure, arb_depth, seq, leaf};
+import std::map::hashmap;
+
+
+
+fn add_new_extension(cx: ext_ctxt, sp: span, name: ident,
+ arg: ~[ast::token_tree]) -> base::mac_result {
+ // these spans won't matter, anyways
+ fn ms(m: matcher_) -> matcher {
+ {node: m, span: {lo: 0u, hi: 0u, expn_info: none}}
+ }
+
+ let argument_gram = ~[
+ ms(mtc_rep(~[
+ ms(mtc_bb(@"lhs",@"mtcs", 0u)),
+ ms(mtc_tok(FAT_ARROW)),
+ ms(mtc_bb(@"rhs",@"tt", 1u)),
+ ], some(SEMI), false))];
+
+ let arg_reader = new_tt_reader(cx.parse_sess().span_diagnostic,
+ cx.parse_sess().interner, none, arg);
+ let arguments = alt parse(cx.parse_sess(), cx.cfg(),
+ arg_reader as reader, argument_gram) {
+ success(m) { m }
+ failure(sp, msg) { cx.span_fatal(sp, msg); }
+ };
+
+ let lhses = alt arguments.get(@"lhs") {
+ @seq(s, sp) { s }
+ _ { cx.span_bug(sp, "wrong-structured lhs") }
+ };
+ let rhses = alt arguments.get(@"rhs") {
+ @seq(s, sp) { s }
+ _ { cx.span_bug(sp, "wrong-structured rhs") }
+ };
+
+ fn generic_extension(cx: ext_ctxt, sp: span, arg: ~[ast::token_tree],
+ lhses: ~[@arb_depth], rhses: ~[@arb_depth])
+ -> mac_result {
+ let mut best_fail_spot = {lo: 0u, hi: 0u, expn_info: none};
+ let mut best_fail_msg = "internal error: ran no matchers";
+
+ let s_d = cx.parse_sess().span_diagnostic;
+ let itr = cx.parse_sess().interner;
+
+ for lhses.eachi() |i, lhs| {
+ alt lhs {
+ @leaf(w_mtcs(mtcs)) {
+ let arg_rdr = new_tt_reader(s_d, itr, none, arg) as reader;
+ alt parse(cx.parse_sess(), cx.cfg(), arg_rdr, mtcs) {
+ success(m) {
+ let rhs = alt rhses[i] {
+ @leaf(w_tt(@tt)) { tt }
+ _ { cx.span_bug(sp, "bad thing in rhs") }
+ };
+ let trncbr = new_tt_reader(s_d, itr, some(m), ~[rhs]);
+ let p = parser(cx.parse_sess(), cx.cfg(),
+ trncbr as reader, SOURCE_FILE);
+ ret mr_expr(p.parse_expr());
+ }
+ failure(sp, msg) {
+ if sp.lo >= best_fail_spot.lo {
+ best_fail_spot = sp; best_fail_msg = msg;
+ }
+ }
+ }
+ }
+ _ { cx.bug("non-matcher found in parsed lhses"); }
+ }
+ }
+ cx.span_fatal(best_fail_spot, best_fail_msg);
+ }
+
+ let exp = |cx, sp, arg| generic_extension(cx, sp, arg, lhses, rhses);
+
+ ret mr_def({ident: name, ext: expr_tt({expander: exp, span: some(sp)})});
+}
\ No newline at end of file
};
type tt_reader = @{
- span_diagnostic: span_handler,
+ sp_diag: span_handler,
interner: @interner<@str>,
mut cur: tt_frame,
/* for MBE-style macro transcription */
};
/** This can do Macro-By-Example transcription. On the other hand, if
- * `doc` contains no `tt_dotdotdot`s and `tt_interpolate`s, `interp` can (and
+ * `src` contains no `tt_dotdotdot`s and `tt_interpolate`s, `interp` can (and
* should) be none. */
-fn new_tt_reader(span_diagnostic: span_handler, itr: @interner<@str>,
+fn new_tt_reader(sp_diag: span_handler, itr: @interner<@str>,
interp: option<std::map::hashmap<ident,@arb_depth>>,
src: ~[ast::token_tree])
-> tt_reader {
- let r = @{span_diagnostic: span_diagnostic, interner: itr,
+ let r = @{sp_diag: sp_diag, interner: itr,
mut cur: @{readme: src, mut idx: 0u, dotdotdoted: false,
sep: none, up: tt_frame_up(option::none)},
interpolations: alt interp { /* just a convienience */
}
pure fn dup_tt_reader(&&r: tt_reader) -> tt_reader {
- @{span_diagnostic: r.span_diagnostic, interner: r.interner,
+ @{sp_diag: r.sp_diag, interner: r.interner,
mut cur: dup_tt_frame(r.cur),
interpolations: r.interpolations,
mut repeat_idx: copy r.repeat_idx, mut repeat_len: copy r.repeat_len,
fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
let ret_val = { tok: r.cur_tok, sp: r.cur_span };
- if r.cur.idx >= vec::len(r.cur.readme) {
+ while r.cur.idx >= vec::len(r.cur.readme) {
/* done with this set; pop or repeat? */
if ! r.cur.dotdotdoted
|| r.repeat_idx.last() == r.repeat_len.last() - 1 {
- if r.cur.dotdotdoted {
- vec::pop(r.repeat_idx); vec::pop(r.repeat_len);
- }
+
alt r.cur.up {
tt_frame_up(none) {
r.cur_tok = EOF;
ret ret_val;
}
tt_frame_up(some(tt_f)) {
+ if r.cur.dotdotdoted {
+ vec::pop(r.repeat_idx); vec::pop(r.repeat_len);
+ }
+
r.cur = tt_f;
- /* the outermost `if` would need to be a `while` if we
- didn't know that the last thing in a `tt_delim` is always
- a `tt_flat`, and that a `tt_dotdotdot` is never empty */
r.cur.idx += 1u;
}
}
- } else {
+ } else { /* repeat */
r.cur.idx = 0u;
r.repeat_idx[r.repeat_idx.len() - 1u] += 1u;
alt r.cur.sep {
}
}
}
- /* if `tt_delim`s could be 0-length, we'd need to be able to switch
- between popping and pushing until we got to an actual `tt_flat` */
loop { /* because it's easiest, this handles `tt_delim` not starting
with a `tt_flat`, even though it won't happen */
alt r.cur.readme[r.cur.idx] {
tt_delim(tts) {
r.cur = @{readme: tts, mut idx: 0u, dotdotdoted: false,
sep: none, up: tt_frame_up(option::some(r.cur)) };
+ // if this could be 0-length, we'd need to potentially recur here
}
tt_flat(sp, tok) {
r.cur_span = sp; r.cur_tok = tok;
tt_dotdotdot(sp, tts, sep, zerok) {
alt lockstep_iter_size(tt_dotdotdot(sp, tts, sep, zerok), r) {
lis_unconstrained {
- r.span_diagnostic.span_fatal(
- copy r.cur_span, /* blame macro writer */
+ r.sp_diag.span_fatal(
+ sp, /* blame macro writer */
"attempted to repeat an expression containing no syntax \
variables matched as repeating at this depth");
}
- lis_contradiction(msg) { /* blame macro invoker */
- r.span_diagnostic.span_fatal(sp, msg);
+ lis_contradiction(msg) { /* TODO blame macro invoker instead*/
+ r.sp_diag.span_fatal(sp, msg);
}
lis_constraint(len, _) {
- if len == 0 && !zerok {
- r.span_diagnostic.span_fatal(sp, "this must repeat \
- at least once");
- }
vec::push(r.repeat_len, len);
vec::push(r.repeat_idx, 0u);
r.cur = @{readme: tts, mut idx: 0u, dotdotdoted: true,
sep: sep, up: tt_frame_up(option::some(r.cur)) };
+
+ if len == 0 {
+ if !zerok {
+ r.sp_diag.span_fatal(sp, /* TODO blame invoker */
+ "this must repeat at least \
+ once");
+ }
+ /* we need to pop before we proceed, so recur */
+ ret tt_next_token(r);
+ }
}
}
}
ret ret_val;
}
seq(*) {
- r.span_diagnostic.span_fatal(
+ r.sp_diag.span_fatal(
copy r.cur_span, /* blame the macro writer */
#fmt["variable '%s' is still repeating at this depth",
*ident]);
tt_next_token(self)
}
fn fatal(m: str) -> ! {
- self.span_diagnostic.span_fatal(copy self.cur_span, m);
+ self.sp_diag.span_fatal(copy self.cur_span, m);
}
- fn span_diag() -> span_handler { self.span_diagnostic }
+ fn span_diag() -> span_handler { self.sp_diag }
fn interner() -> @interner<@str> { self.interner }
fn peek() -> {tok: token::token, sp: span} {
{ tok: self.cur_tok, sp: self.cur_span }
/* `!`, as an operator, is prefix, so we know this isn't that */
if self.token == token::NOT {
self.bump();
- let m_body = self.parse_token_tree();
+ let tts = self.parse_unspanned_seq(
+ token::LBRACE, token::RBRACE, seq_sep_none(),
+ |p| p.parse_token_tree());
let hi = self.span.hi;
- ret pexpr(self.mk_mac_expr(lo, hi, mac_invoc_tt(pth,m_body)));
+
+ ret pexpr(self.mk_mac_expr(lo, hi, mac_invoc_tt(pth, tts)));
} else {
hi = pth.span.hi;
ex = expr_path(pth);
self.parse_item_class()
} else if !self.is_any_keyword(copy self.token)
&& self.look_ahead(1) == token::NOT
+ && is_plain_ident(self.look_ahead(2))
{
// item macro.
let pth = self.parse_path_without_tps();
#error("parsing invocation of %s", *pth.idents[0]);
self.expect(token::NOT);
let id = self.parse_ident();
- let tt = self.parse_token_tree();
- let m = ast::mac_invoc_tt(pth, tt);
+ let tts = self.parse_unspanned_seq(token::LBRACE, token::RBRACE,
+ seq_sep_none(),
+ |p| p.parse_token_tree());
+ let m = ast::mac_invoc_tt(pth, tts);
let m: ast::mac = {node: m,
span: {lo: self.span.lo,
hi: self.span.hi,
mod tt {
mod transcribe;
mod earley_parser;
+ mod macro_rules;
}
+
+ mod simplext;
mod fmt;
mod env;
- mod simplext;
mod concat_idents;
mod ident_to_str;
mod log_syntax;
-// This test is brittle!
+// This test is brittle!
// xfail-pretty - the pretty tests lose path information, breaking #include
mod m1 {
fn main() {
assert(#line[] == 11u);
- assert(#col[] == 12u);
+ assert(#col[] == 11u);
assert(#file[].ends_with("syntax-extension-source-utils.rs"));
assert(#stringify[(2*3) + 5] == "2 * 3 + 5");
assert(#include["syntax-extension-source-utils-files/includeme.fragment"]