From 31a7a3343b11e3f4f1ae2cd41c34f9c619973ff1 Mon Sep 17 00:00:00 2001 From: Paul Beckingham Date: Sat, 28 Mar 2015 16:26:50 -0400 Subject: [PATCH] Tests: Converted ordinal tests --- test/lexer.t.cpp | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/test/lexer.t.cpp b/test/lexer.t.cpp index a39964f6f..f4e279536 100644 --- a/test/lexer.t.cpp +++ b/test/lexer.t.cpp @@ -36,7 +36,7 @@ Context context; //////////////////////////////////////////////////////////////////////////////// int main (int argc, char** argv) { - UnitTest t (640); + UnitTest t (647); std::vector > tokens; std::string token; @@ -309,22 +309,6 @@ int main (int argc, char** argv) t.is (tokens[20].first, ")", "tokens[20] == ')'"); t.is ((int) tokens[20].second, (int)Lexer::Type::op, "tokens[20] == Type::op"); // 170 - // Test ordinal dates. - Lexer l8 ("9th 10th"); - l8.ambiguity (false); - tokens.clear (); - while (l8.token (token, type)) - { - std::cout << "# «" << token << "» " << Lexer::typeName (type) << "\n"; - tokens.push_back (std::pair (token, type)); - } - - t.is ((int)tokens.size (), 2, "2 tokens"); - t.is (tokens[0].first, "9th", "tokens[0] == '9th'"); - t.is ((int) tokens[0].second, (int) Lexer::Type::word, "tokens[0] == Type::word"); - t.is (tokens[1].first, "10th", "tokens[1] == '10th'"); - t.is ((int) tokens[1].second, (int) Lexer::Type::word, "tokens[1] == Type::word"); - // Test tag recognition. Lexer l9 ("+with -WITHOUT + 2"); l9.ambiguity (false); @@ -398,6 +382,10 @@ int main (int argc, char** argv) // Path { "/long/path/to/file.txt", { { "/long/path/to/file.txt", Lexer::Type::path }, NO, NO, NO, NO }, }, + // Word + { "9th", { { "9th", Lexer::Type::word }, NO, NO, NO, NO }, }, + { "10th", { { "10th", Lexer::Type::word }, NO, NO, NO, NO }, }, + // DOM { "foo", { { "foo", Lexer::Type::dom }, NO, NO, NO, NO }, }, { "Çirçös", { { "Çirçös", Lexer::Type::dom }, NO, NO, NO, NO }, },