$(findstring tidy,$(MAKECMDGOALS))),)
CFG_INFO := $(info cfg: including test rules)
include $(CFG_SRC_DIR)mk/tests.mk
+ include $(CFG_SRC_DIR)mk/grammar.mk
endif
# Performance and benchmarking
include $(CFG_SRC_DIR)mk/clean.mk
endif
-# Grammar tests
-
-ifneq ($(findstring lexer,$(MAKECMDGOALS)),)
- ifdef CFG_JAVAC
- ifdef CFG_ANTLR4
- ifdef CFG_GRUN
- CFG_INFO := $(info cfg: including grammar tests)
- include $(CFG_SRC_DIR)mk/grammar.mk
- endif
- endif
- endif
-endif
-
# CTAGS building
ifneq ($(strip $(findstring TAGS.emacs,$(MAKECMDGOALS)) \
$(findstring TAGS.vi,$(MAKECMDGOALS))),)
$(Q)$(RUSTC) -O --out-dir $(BG) -L $(L) $(SG)verify.rs
check-lexer: $(BG) $(BG)RustLexer.class $(BG)verify
+ifdef CFG_JAVAC
+ifdef CFG_ANTLR4
+ifdef CFG_GRUN
$(info Verifying libsyntax against the reference lexer ...)
- $(Q)find $(S) -iname '*.rs' -exec "$(SG)check.sh" {} "$(BG)" \
- "$(CFG_GRUN)" "$(BG)verify" "$(BG)RustLexer.tokens" "$(VERBOSE)" \;
+ $(Q)$(SG)check.sh $(S) "$(BG)" \
+ "$(CFG_GRUN)" "$(BG)verify" "$(BG)RustLexer.tokens"
+else
+$(info grun not available, skipping lexer test...)
+endif
+else
+$(info antlr4 not available, skipping lexer test...)
+endif
+else
+$(info javac not available, skipping lexer test...)
+endif
# Main test targets
######################################################################
-check: cleantmptestlogs cleantestlibs check-notidy tidy
+check: cleantmptestlogs cleantestlibs check-notidy tidy check-syntax
check-notidy: cleantmptestlogs cleantestlibs all check-stage2
$(Q)$(CFG_PYTHON) $(S)src/etc/check-summary.py tmp/*.log
# NOTE: Remove after reprogramming windows bots
check-fast: check-lite
+check-syntax: check-lexer
+
.PHONY: cleantmptestlogs cleantestlibs
cleantmptestlogs:
# Run the reference lexer against libsyntax and compare the tokens and spans.
# If "// ignore-lexer-test" is present in the file, it will be ignored.
-#
+
+
# Argument $1 is the file to check, $2 is the classpath to use, $3 is the path
# to the grun binary, $4 is the path to the verify binary, $5 is the path to
# RustLexer.tokens
-
if [ "${VERBOSE}" == "1" ]; then
set -x
fi
-grep -q "// ignore lexer-test" $1;
+check() {
+ grep --silent "// ignore-lexer-test" $1;
-if [ $? -eq 1 ]; then
- cd $2 # This `cd` is so java will pick up RustLexer.class. I couldn't
- # figure out how to wrangle the CLASSPATH, just adding build/grammr didn't
- # seem to have anny effect.
- $3 RustLexer tokens -tokens < $1 | $4 $1 $5
-fi
+ # if it's *not* found...
+ if [ $? -eq 1 ]; then
+ cd $2 # This `cd` is so java will pick up RustLexer.class. I couldn't
+ # figure out how to wrangle the CLASSPATH, just adding build/grammr didn't
+ # seem to have anny effect.
+ if $3 RustLexer tokens -tokens < $1 | $4 $1 $5; then
+ echo "pass: $1"
+ else
+ echo "fail: $1"
+ fi
+ else
+ echo "skip: $1"
+ fi
+}
+
+for file in $(find $1 -iname '*.rs' ! -path '*/test/compile-fail/*' ); do
+ check $file $2 $3 $4 $5
+done
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
#![feature(globs, phase, macro_rules)]
extern crate syntax;
}
fn parse_antlr_token(s: &str, tokens: &HashMap<String, Token>) -> TokenAndSpan {
- let re = regex!(r"\[@(?P<seq>\d+),(?P<start>\d+):(?P<end>\d+)='(?P<content>.+?)',<(?P<toknum>-?\d+)>,\d+:\d+]");
+ let re = regex!(
+ r"\[@(?P<seq>\d+),(?P<start>\d+):(?P<end>\d+)='(?P<content>.+?)',<(?P<toknum>-?\d+)>,\d+:\d+]"
+ );
let m = re.captures(s).expect(format!("The regex didn't match {}", s).as_slice());
let start = m.name("start");
let toknum = m.name("toknum");
let content = m.name("content");
- let proto_tok = tokens.find_equiv(&toknum).expect(format!("didn't find token {} in the map", toknum).as_slice());
+ let proto_tok = tokens.find_equiv(&toknum).expect(format!("didn't find token {} in the map",
+ toknum).as_slice());
let nm = parse::token::intern(content);
let token_map = parse_token_list(token_file.read_to_string().unwrap().as_slice());
let mut stdin = std::io::stdin();
- let mut antlr_tokens = stdin.lines().map(|l| parse_antlr_token(l.unwrap().as_slice().trim(), &token_map));
+ let mut antlr_tokens = stdin.lines().map(|l| parse_antlr_token(l.unwrap().as_slice().trim(),
+ &token_map));
let code = File::open(&Path::new(args.get(1).as_slice())).unwrap().read_to_string().unwrap();
let options = config::basic_options();
continue
}
- assert!(rustc_tok.sp == antlr_tok.sp, "{} and {} have different spans", rustc_tok, antlr_tok);
+ assert!(rustc_tok.sp == antlr_tok.sp, "{} and {} have different spans", rustc_tok,
+ antlr_tok);
macro_rules! matches (
( $($x:pat),+ ) => (