]> git.lizzy.rs Git - rust.git/commitdiff
Allow defining token tree macros. They should work now!
authorPaul Stansifer <paul.stansifer@gmail.com>
Sat, 7 Jul 2012 01:04:28 +0000 (18:04 -0700)
committerPaul Stansifer <paul.stansifer@gmail.com>
Tue, 10 Jul 2012 00:44:46 +0000 (17:44 -0700)
12 files changed:
src/libsyntax/ast.rs
src/libsyntax/ext/base.rs
src/libsyntax/ext/expand.rs
src/libsyntax/ext/pipes.rs
src/libsyntax/ext/pipes/parse_proto.rs
src/libsyntax/ext/tt/earley_parser.rs
src/libsyntax/ext/tt/macro_rules.rs [new file with mode: 0644]
src/libsyntax/ext/tt/transcribe.rs
src/libsyntax/parse/lexer.rs
src/libsyntax/parse/parser.rs
src/libsyntax/syntax.rc
src/test/run-pass/syntax-extension-source-utils.rs

index 1941d809d7b8740a89c5f9bbcc384fb0aa5ef272..3df4a2aefa5679b9f6778f4ce0325b0c625cf927 100644 (file)
@@ -413,7 +413,7 @@ enum matcher_ {
 #[auto_serialize]
 enum mac_ {
     mac_invoc(@path, mac_arg, mac_body),
-    mac_invoc_tt(@path, token_tree), //will kill mac_invoc and steal its name
+    mac_invoc_tt(@path,~[token_tree]),//will kill mac_invoc and steal its name
     mac_embed_type(@ty),
     mac_embed_block(blk),
     mac_ellipsis,
index afc47d2ecfce9ba792a5afaadc5bd21b4a6f5624..7b08b18596e7dac3495e75a89aa3157f0bd42f87 100644 (file)
@@ -4,8 +4,10 @@
 import codemap::{codemap, span, expn_info, expanded_from};
 import std::map::str_hash;
 
+// second argument is the span to blame for general argument problems
 type syntax_expander_ =
     fn@(ext_ctxt, span, ast::mac_arg, ast::mac_body) -> @ast::expr;
+// second argument is the origin of the macro, if user-defined
 type syntax_expander = {expander: syntax_expander_, span: option<span>};
 
 type macro_def = {ident: ast::ident, ext: syntax_extension};
     fn@(ext_ctxt, span, ast::meta_item, ~[@ast::item]) -> ~[@ast::item];
 
 type syntax_expander_tt = {expander: syntax_expander_tt_, span: option<span>};
-type syntax_expander_tt_ = fn@(ext_ctxt, span, ast::token_tree) -> @ast::expr;
+type syntax_expander_tt_ = fn@(ext_ctxt, span, ~[ast::token_tree])
+    -> mac_result;
 
 type syntax_expander_tt_item
     = {expander: syntax_expander_tt_item_, span: option<span>};
 type syntax_expander_tt_item_
-    = fn@(ext_ctxt, span, ast::ident, ast::token_tree) -> mac_result;
+    = fn@(ext_ctxt, span, ast::ident, ~[ast::token_tree]) -> mac_result;
 
 enum mac_result {
+    mr_expr(@ast::expr),
     mr_item(@ast::item),
     mr_def(macro_def)
 }
@@ -32,7 +36,7 @@ enum syntax_extension {
     macro_defining(macro_definer),
     item_decorator(item_decorator),
 
-    normal_tt(syntax_expander_tt),
+    expr_tt(syntax_expander_tt),
     item_tt(syntax_expander_tt_item),
 }
 
@@ -45,12 +49,15 @@ fn builtin_item_tt(f: syntax_expander_tt_item_) -> syntax_extension {
         item_tt({expander: f, span: none})
     }
     let syntax_expanders = str_hash::<syntax_extension>();
+    syntax_expanders.insert("macro",
+                            macro_defining(ext::simplext::add_new_extension));
+    syntax_expanders.insert("macro_rules",
+                            builtin_item_tt(
+                                ext::tt::macro_rules::add_new_extension));
     syntax_expanders.insert("fmt", builtin(ext::fmt::expand_syntax_ext));
     syntax_expanders.insert("auto_serialize",
                             item_decorator(ext::auto_serialize::expand));
     syntax_expanders.insert("env", builtin(ext::env::expand_syntax_ext));
-    syntax_expanders.insert("macro",
-                            macro_defining(ext::simplext::add_new_extension));
     syntax_expanders.insert("concat_idents",
                             builtin(ext::concat_idents::expand_syntax_ext));
     syntax_expanders.insert("ident_to_str",
index c6110fbe1a7cb7e7c6d73c1b58240e099b8d3fbb..05c7e6f1c5af6e589b5f06328565ca0d1d0f2144 100644 (file)
@@ -32,7 +32,7 @@ fn expand_expr(exts: hashmap<str, syntax_extension>, cx: ext_ctxt,
                         #fmt["%s can only be used as a decorator", *extname]);
                   }
                   some(normal({expander: exp, span: exp_sp})) {
-                    let expanded = exp(cx, pth.span, args, body);
+                    let expanded = exp(cx, mac.span, args, body);
 
                     cx.bt_push(expanded_from({call_site: s,
                                 callie: {name: *extname, span: exp_sp}}));
@@ -43,11 +43,11 @@ fn expand_expr(exts: hashmap<str, syntax_extension>, cx: ext_ctxt,
                     (fully_expanded, s)
                   }
                   some(macro_defining(ext)) {
-                    let named_extension = ext(cx, pth.span, args, body);
+                    let named_extension = ext(cx, mac.span, args, body);
                     exts.insert(*named_extension.ident, named_extension.ext);
                     (ast::expr_rec(~[], none), s)
                   }
-                  some(normal_tt(_)) {
+                  some(expr_tt(_)) {
                     cx.span_fatal(pth.span,
                                   #fmt["this tt-style macro should be \
                                         invoked '%s!{...}'", *extname])
@@ -58,16 +58,21 @@ fn expand_expr(exts: hashmap<str, syntax_extension>, cx: ext_ctxt,
                   }
                 }
               }
-              mac_invoc_tt(pth, tt) {
-                assert (vec::len(pth.idents) > 0u);
+              mac_invoc_tt(pth, tts) {
+                assert (vec::len(pth.idents) == 1u);
                 let extname = pth.idents[0];
                 alt exts.find(*extname) {
                   none {
                     cx.span_fatal(pth.span,
                                   #fmt["macro undefined: '%s'", *extname])
                   }
-                  some(normal_tt({expander: exp, span: exp_sp})) {
-                    let expanded = exp(cx, pth.span, tt);
+                  some(expr_tt({expander: exp, span: exp_sp})) {
+                    let expanded = alt exp(cx, mac.span, tts) {
+                      mr_expr(e) { e }
+                      _ { cx.span_fatal(
+                          pth.span, #fmt["non-expr macro in expr pos: %s",
+                                         *extname]) }
+                    };
 
                     cx.bt_push(expanded_from({call_site: s,
                                 callie: {name: *extname, span: exp_sp}}));
@@ -113,7 +118,7 @@ fn expand_mod_items(exts: hashmap<str, syntax_extension>, cx: ext_ctxt,
             };
             alt exts.find(*mname) {
               none | some(normal(_)) | some(macro_defining(_))
-              | some(normal_tt(_)) | some(item_tt(*)) {
+              | some(expr_tt(_)) | some(item_tt(*)) {
                 items
               }
 
@@ -159,7 +164,7 @@ fn expand_item_mac(exts: hashmap<str, syntax_extension>,
                    cx: ext_ctxt, &&it: @ast::item,
                    fld: ast_fold) -> option<@ast::item> {
     alt it.node {
-      item_mac({node: mac_invoc_tt(pth, tt), span}) {
+      item_mac({node: mac_invoc_tt(pth, tts), span}) {
         let extname = pth.idents[0];
         alt exts.find(*extname) {
           none {
@@ -167,17 +172,20 @@ fn expand_item_mac(exts: hashmap<str, syntax_extension>,
                           #fmt("macro undefined: '%s'", *extname))
           }
           some(item_tt(expand)) {
+            let expanded = expand.expander(cx, it.span, it.ident, tts);
             cx.bt_push(expanded_from({call_site: it.span,
                                       callie: {name: *extname,
                                                span: expand.span}}));
-            let maybe_it =
-                alt expand.expander(cx, it.span, it.ident, tt) {
-                  mr_item(it) { fld.fold_item(it) }
-                  mr_def(mdef) {
-                    exts.insert(*mdef.ident, mdef.ext);
-                    none
-                  }
-                };
+            let maybe_it = alt expanded {
+              mr_item(it) { fld.fold_item(it) }
+              mr_expr(e) { cx.span_fatal(pth.span,
+                                         "expr macro in item position: " +
+                                         *extname) }
+              mr_def(mdef) {
+                exts.insert(*mdef.ident, mdef.ext);
+                none
+              }
+            };
             cx.bt_pop();
             ret maybe_it
           }
index 61c154ab15eb95b0d020e608463a91fb13ecd312..3a6a2ff7a5239ba9bfcf00db72b792feb342d172 100644 (file)
 
 import pipes::pipec::methods;
 
-fn expand_proto(cx: ext_ctxt, _sp: span, id: ast::ident, tt: ast::token_tree)
-    -> base::mac_result
+fn expand_proto(cx: ext_ctxt, _sp: span, id: ast::ident,
+                tt: ~[ast::token_tree]) -> base::mac_result
 {
     let sess = cx.parse_sess();
     let cfg = cx.cfg();
-    let body_core = alt tt { tt_delim(tts) { tts } _ {fail}};
     let tt_rdr = new_tt_reader(cx.parse_sess().span_diagnostic,
-                               cx.parse_sess().interner,
-                               none,
-                               body_core);
+                               cx.parse_sess().interner, none, tt);
     let rdr = tt_rdr as reader;
     let rust_parser = parser(sess, cfg, rdr.dup(), SOURCE_FILE);
 
index abf19825fdd27736fc1158f95f61694dad7a69e4..420de449a5cbac9ab7a01808af848ebea29da3b8 100644 (file)
@@ -10,10 +10,9 @@ impl proto_parser for parser {
     fn parse_proto(id: ident) -> protocol {
         let proto = protocol(id);
 
-        self.parse_unspanned_seq(token::LBRACE,
-                                 token::RBRACE,
-                                 {sep: none, trailing_sep_allowed: false},
-                                 |self| self.parse_state(proto));
+        self.parse_seq_to_before_end(token::EOF,
+                                     {sep: none, trailing_sep_allowed: false},
+                                     |self| self.parse_state(proto));
 
         ret proto;
     }
index f890661e2390f3ab31c776cdae5e26501dfb77af..d26f5dc0e3c4108c6235c051d9d33a5607a9545c 100644 (file)
@@ -72,11 +72,10 @@ enum arb_depth { leaf(whole_nt), seq(~[@arb_depth], codemap::span) }
 
 type earley_item = matcher_pos;
 
-
-fn nameize(&&p_s: parse_sess, ms: ~[matcher], &&res: ~[@arb_depth])
+fn nameize(p_s: parse_sess, ms: ~[matcher], res: ~[@arb_depth])
     -> hashmap<ident,@arb_depth> {
-    fn n_rec(&&p_s: parse_sess, &&m: matcher, &&res: ~[@arb_depth],
-             &&ret_val: hashmap<ident, @arb_depth>) {
+    fn n_rec(p_s: parse_sess, m: matcher, res: ~[@arb_depth],
+             ret_val: hashmap<ident, @arb_depth>) {
         alt m {
           {node: mtc_tok(_), span: _} { }
           {node: mtc_rep(more_ms, _, _), span: _} {
@@ -142,8 +141,11 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
                         // doing a lot of array work that will get thrown away
                         // most of the time.
                         for ei.matches.eachi() |idx, elt| {
+                            let sub = elt.get();
+                            // Some subtrees don't contain the name at all
+                            if sub.len() == 0u { cont; }
                             new_pos.matches[idx]
-                                .push(@seq(elt.get(), mk_sp(ei.sp_lo,sp.hi)));
+                                .push(@seq(sub, mk_sp(ei.sp_lo,sp.hi)));
                         }
 
                         new_pos.idx += 1u;
@@ -221,8 +223,8 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
                      built-in NTs %s or %u other options.",
                     nts, next_eis.len()]);
             } else if (bb_eis.len() == 0u && next_eis.len() == 0u) {
-                failure(sp, "No rules expected the token "
-                        + to_str(*rdr.interner(), tok));
+                ret failure(sp, "No rules expected the token "
+                            + to_str(*rdr.interner(), tok));
             } else if (next_eis.len() > 0u) {
                 /* Now process the next token */
                 while(next_eis.len() > 0u) {
@@ -246,6 +248,9 @@ fn parse(sess: parse_sess, cfg: ast::crate_cfg, rdr: reader, ms: ~[matcher])
                 /* this would fail if zero-length tokens existed */
                 while rdr.peek().sp.lo < rust_parser.span.lo {
                     rdr.next_token();
+                } /* except for EOF... */
+                while rust_parser.token == EOF && rdr.peek().tok != EOF {
+                    rdr.next_token();
                 }
             }
         }
@@ -273,7 +278,7 @@ fn parse_nt(p: parser, name: str) -> whole_nt {
       } }
       "path" { token::w_path(p.parse_path_with_tps(false)) }
       "tt" {
-        p.quote_depth += 1u;
+        p.quote_depth += 1u; //but in theory, non-quoted tts might be useful
         let res = token::w_tt(@p.parse_token_tree());
         p.quote_depth -= 1u;
         res
diff --git a/src/libsyntax/ext/tt/macro_rules.rs b/src/libsyntax/ext/tt/macro_rules.rs
new file mode 100644 (file)
index 0000000..822e8e0
--- /dev/null
@@ -0,0 +1,83 @@
+import base::{ext_ctxt, mac_result, mr_expr, mr_def, expr_tt};
+import codemap::span;
+import ast::{ident, matcher_, matcher, mtc_tok, mtc_bb, mtc_rep, tt_delim};
+import parse::lexer::{new_tt_reader, tt_reader_as_reader, reader};
+import parse::token::{FAT_ARROW, SEMI, LBRACE, RBRACE, w_mtcs, w_tt};
+import parse::parser::{parser, SOURCE_FILE};
+import earley_parser::{parse, success, failure, arb_depth, seq, leaf};
+import std::map::hashmap;
+
+
+
+fn add_new_extension(cx: ext_ctxt, sp: span, name: ident,
+                     arg: ~[ast::token_tree]) -> base::mac_result {
+    // these spans won't matter, anyways
+    fn ms(m: matcher_) -> matcher {
+        {node: m, span: {lo: 0u, hi: 0u, expn_info: none}}
+    }
+
+    let argument_gram = ~[
+        ms(mtc_rep(~[
+            ms(mtc_bb(@"lhs",@"mtcs", 0u)),
+            ms(mtc_tok(FAT_ARROW)),
+            ms(mtc_bb(@"rhs",@"tt", 1u)),
+        ], some(SEMI), false))];
+
+    let arg_reader = new_tt_reader(cx.parse_sess().span_diagnostic,
+                                   cx.parse_sess().interner, none, arg);
+    let arguments = alt parse(cx.parse_sess(), cx.cfg(),
+                              arg_reader as reader, argument_gram) {
+      success(m) { m }
+      failure(sp, msg) { cx.span_fatal(sp, msg); }
+    };
+
+    let lhses = alt arguments.get(@"lhs") {
+      @seq(s, sp) { s }
+      _ { cx.span_bug(sp, "wrong-structured lhs") }
+    };
+    let rhses = alt arguments.get(@"rhs") {
+      @seq(s, sp) { s }
+      _ { cx.span_bug(sp, "wrong-structured rhs") }
+    };
+
+    fn generic_extension(cx: ext_ctxt, sp: span, arg: ~[ast::token_tree],
+                         lhses: ~[@arb_depth], rhses: ~[@arb_depth])
+    -> mac_result {
+        let mut best_fail_spot = {lo: 0u, hi: 0u, expn_info: none};
+        let mut best_fail_msg = "internal error: ran no matchers";
+
+        let s_d = cx.parse_sess().span_diagnostic;
+        let itr = cx.parse_sess().interner;
+
+        for lhses.eachi() |i, lhs| {
+            alt lhs {
+              @leaf(w_mtcs(mtcs)) {
+                let arg_rdr = new_tt_reader(s_d, itr, none, arg) as reader;
+                alt parse(cx.parse_sess(), cx.cfg(), arg_rdr, mtcs) {
+                  success(m) {
+                    let rhs = alt rhses[i] {
+                      @leaf(w_tt(@tt)) { tt }
+                      _ { cx.span_bug(sp, "bad thing in rhs") }
+                    };
+                    let trncbr = new_tt_reader(s_d, itr, some(m), ~[rhs]);
+                    let p = parser(cx.parse_sess(), cx.cfg(),
+                                   trncbr as reader, SOURCE_FILE);
+                    ret mr_expr(p.parse_expr());
+                  }
+                  failure(sp, msg) {
+                    if sp.lo >= best_fail_spot.lo {
+                        best_fail_spot = sp; best_fail_msg = msg;
+                    }
+                  }
+                }
+              }
+              _ { cx.bug("non-matcher found in parsed lhses"); }
+            }
+        }
+        cx.span_fatal(best_fail_spot, best_fail_msg);
+    }
+
+    let exp = |cx, sp, arg| generic_extension(cx, sp, arg, lhses, rhses);
+
+    ret mr_def({ident: name, ext: expr_tt({expander: exp, span: some(sp)})});
+}
\ No newline at end of file
index e84bc5c5421ae1bef04aba058b8f5e5d3464e378..8924c5820a946c35824cf1b3062d61368e95b503 100644 (file)
@@ -23,7 +23,7 @@ enum tt_frame_up { /* to break a circularity */
 };
 
 type tt_reader = @{
-    span_diagnostic: span_handler,
+    sp_diag: span_handler,
     interner: @interner<@str>,
     mut cur: tt_frame,
     /* for MBE-style macro transcription */
@@ -36,13 +36,13 @@ enum tt_frame_up { /* to break a circularity */
 };
 
 /** This can do Macro-By-Example transcription. On the other hand, if
- *  `doc` contains no `tt_dotdotdot`s and `tt_interpolate`s, `interp` can (and
+ *  `src` contains no `tt_dotdotdot`s and `tt_interpolate`s, `interp` can (and
  *  should) be none. */
-fn new_tt_reader(span_diagnostic: span_handler, itr: @interner<@str>,
+fn new_tt_reader(sp_diag: span_handler, itr: @interner<@str>,
                  interp: option<std::map::hashmap<ident,@arb_depth>>,
                  src: ~[ast::token_tree])
     -> tt_reader {
-    let r = @{span_diagnostic: span_diagnostic, interner: itr,
+    let r = @{sp_diag: sp_diag, interner: itr,
               mut cur: @{readme: src, mut idx: 0u, dotdotdoted: false,
                          sep: none, up: tt_frame_up(option::none)},
               interpolations: alt interp { /* just a convienience */
@@ -70,7 +70,7 @@ fn new_tt_reader(span_diagnostic: span_handler, itr: @interner<@str>,
 }
 
 pure fn dup_tt_reader(&&r: tt_reader) -> tt_reader {
-    @{span_diagnostic: r.span_diagnostic, interner: r.interner,
+    @{sp_diag: r.sp_diag, interner: r.interner,
       mut cur: dup_tt_frame(r.cur),
       interpolations: r.interpolations,
       mut repeat_idx: copy r.repeat_idx, mut repeat_len: copy r.repeat_len,
@@ -132,28 +132,27 @@ fn lis_merge(lhs: lis, rhs: lis) -> lis {
 
 fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
     let ret_val = { tok: r.cur_tok, sp: r.cur_span };
-    if r.cur.idx >= vec::len(r.cur.readme) {
+    while r.cur.idx >= vec::len(r.cur.readme) {
         /* done with this set; pop or repeat? */
         if ! r.cur.dotdotdoted
             || r.repeat_idx.last() == r.repeat_len.last() - 1 {
-            if r.cur.dotdotdoted {
-                vec::pop(r.repeat_idx); vec::pop(r.repeat_len);
-            }
+
             alt r.cur.up {
               tt_frame_up(none) {
                 r.cur_tok = EOF;
                 ret ret_val;
               }
               tt_frame_up(some(tt_f)) {
+                if r.cur.dotdotdoted {
+                    vec::pop(r.repeat_idx); vec::pop(r.repeat_len);
+                }
+
                 r.cur = tt_f;
-                /* the outermost `if` would need to be a `while` if we
-                didn't know that the last thing in a `tt_delim` is always
-                a `tt_flat`, and that a `tt_dotdotdot` is never empty */
                 r.cur.idx += 1u;
               }
             }
 
-        } else {
+        } else { /* repeat */
             r.cur.idx = 0u;
             r.repeat_idx[r.repeat_idx.len() - 1u] += 1u;
             alt r.cur.sep {
@@ -165,14 +164,13 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
             }
         }
     }
-    /* if `tt_delim`s could be 0-length, we'd need to be able to switch
-    between popping and pushing until we got to an actual `tt_flat` */
     loop { /* because it's easiest, this handles `tt_delim` not starting
     with a `tt_flat`, even though it won't happen */
         alt r.cur.readme[r.cur.idx] {
           tt_delim(tts) {
             r.cur = @{readme: tts, mut idx: 0u, dotdotdoted: false,
                       sep: none, up: tt_frame_up(option::some(r.cur)) };
+            // if this could be 0-length, we'd need to potentially recur here
           }
           tt_flat(sp, tok) {
             r.cur_span = sp; r.cur_tok = tok;
@@ -182,23 +180,29 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
           tt_dotdotdot(sp, tts, sep, zerok) {
             alt lockstep_iter_size(tt_dotdotdot(sp, tts, sep, zerok), r) {
               lis_unconstrained {
-                r.span_diagnostic.span_fatal(
-                    copy r.cur_span, /* blame macro writer */
+                r.sp_diag.span_fatal(
+                    sp, /* blame macro writer */
                     "attempted to repeat an expression containing no syntax \
                      variables matched as repeating at this depth");
               }
-              lis_contradiction(msg) { /* blame macro invoker */
-                r.span_diagnostic.span_fatal(sp, msg);
+              lis_contradiction(msg) { /* TODO blame macro invoker instead*/
+                r.sp_diag.span_fatal(sp, msg);
               }
               lis_constraint(len, _) {
-                if len == 0 && !zerok {
-                    r.span_diagnostic.span_fatal(sp, "this must repeat \
-                                                      at least once");
-                }
                 vec::push(r.repeat_len, len);
                 vec::push(r.repeat_idx, 0u);
                 r.cur = @{readme: tts, mut idx: 0u, dotdotdoted: true,
                           sep: sep, up: tt_frame_up(option::some(r.cur)) };
+
+                if len == 0 {
+                    if !zerok {
+                        r.sp_diag.span_fatal(sp, /* TODO blame invoker */
+                                             "this must repeat at least \
+                                              once");
+                    }
+                    /* we need to pop before we proceed, so recur */
+                    ret tt_next_token(r);
+                }
               }
             }
           }
@@ -219,7 +223,7 @@ fn tt_next_token(&&r: tt_reader) -> {tok: token, sp: span} {
                 ret ret_val;
               }
               seq(*) {
-                r.span_diagnostic.span_fatal(
+                r.sp_diag.span_fatal(
                     copy r.cur_span, /* blame the macro writer */
                     #fmt["variable '%s' is still repeating at this depth",
                          *ident]);
index 600aa9f4c42a47466cb58ca72e28f003b6312720..b3ba35dea2ef797e92aad87bcc498e1792512f6f 100644 (file)
@@ -98,9 +98,9 @@ fn next_token() -> {tok: token::token, sp: span} {
         tt_next_token(self)
     }
     fn fatal(m: str) -> ! {
-        self.span_diagnostic.span_fatal(copy self.cur_span, m);
+        self.sp_diag.span_fatal(copy self.cur_span, m);
     }
-    fn span_diag() -> span_handler { self.span_diagnostic }
+    fn span_diag() -> span_handler { self.sp_diag }
     fn interner() -> @interner<@str> { self.interner }
     fn peek() -> {tok: token::token, sp: span} {
         { tok: self.cur_tok, sp: self.cur_span }
index a2d0ad33afc8b5c022a791a3394bdc27e33fafd6..b800dc7fda3adaba0348be26c7c38884cc7a5ccd 100644 (file)
@@ -1022,9 +1022,12 @@ fn parse_bottom_expr() -> pexpr {
             /* `!`, as an operator, is prefix, so we know this isn't that */
             if self.token == token::NOT {
                 self.bump();
-                let m_body = self.parse_token_tree();
+                let tts = self.parse_unspanned_seq(
+                    token::LBRACE, token::RBRACE, seq_sep_none(),
+                    |p| p.parse_token_tree());
                 let hi = self.span.hi;
-                ret pexpr(self.mk_mac_expr(lo, hi, mac_invoc_tt(pth,m_body)));
+
+                ret pexpr(self.mk_mac_expr(lo, hi, mac_invoc_tt(pth, tts)));
             } else {
                 hi = pth.span.hi;
                 ex = expr_path(pth);
@@ -2642,14 +2645,17 @@ fn parse_item(+attrs: ~[attribute], vis: visibility)
             self.parse_item_class()
         } else if !self.is_any_keyword(copy self.token)
             && self.look_ahead(1) == token::NOT
+            && is_plain_ident(self.look_ahead(2))
         {
             // item macro.
             let pth = self.parse_path_without_tps();
             #error("parsing invocation of %s", *pth.idents[0]);
             self.expect(token::NOT);
             let id = self.parse_ident();
-            let tt = self.parse_token_tree();
-            let m = ast::mac_invoc_tt(pth, tt);
+            let tts = self.parse_unspanned_seq(token::LBRACE, token::RBRACE,
+                                               seq_sep_none(),
+                                               |p| p.parse_token_tree());
+            let m = ast::mac_invoc_tt(pth, tts);
             let m: ast::mac = {node: m,
                                span: {lo: self.span.lo,
                                       hi: self.span.hi,
index 7c453cc96e45302772173df148a978aea3f1e5b9..b1a1677dfed7a821035433ef74596a2c37e045b2 100644 (file)
@@ -68,11 +68,13 @@ mod ext {
     mod tt {
         mod transcribe;
         mod earley_parser;
+        mod macro_rules;
     }
 
+
+    mod simplext;
     mod fmt;
     mod env;
-    mod simplext;
     mod concat_idents;
     mod ident_to_str;
     mod log_syntax;
index d784a25de73e6983d2855986e83cbc5916bc7680..29bd6e65f4f03f14aee8afb640c900c841be6b09 100644 (file)
@@ -1,4 +1,4 @@
-// This test is brittle! 
+// This test is brittle!
 // xfail-pretty - the pretty tests lose path information, breaking #include
 
 mod m1 {
@@ -9,7 +9,7 @@ fn where_am_i() -> str { #mod[] }
 
 fn main() {
     assert(#line[] == 11u);
-    assert(#col[] == 12u);
+    assert(#col[] == 11u);
     assert(#file[].ends_with("syntax-extension-source-utils.rs"));
     assert(#stringify[(2*3) + 5] == "2 * 3 + 5");
     assert(#include["syntax-extension-source-utils-files/includeme.fragment"]