mirror of
https://github.com/GothenburgBitFactory/timewarrior.git
synced 2025-06-26 10:54:28 +02:00
Tests: Added Lexer::Tokenize
This commit is contained in:
parent
774b59f37e
commit
1113406b7b
1 changed files with 11 additions and 1 deletions
|
@ -34,7 +34,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
int main (int, char**)
|
||||
{
|
||||
UnitTest t (344);
|
||||
UnitTest t (351);
|
||||
|
||||
std::vector <std::pair <std::string, Lexer::Type>> tokens;
|
||||
std::string token;
|
||||
|
@ -268,6 +268,16 @@ int main (int, char**)
|
|||
t.is (Lexer::trim (" \t xxx \t "), "\t xxx \t", "Lexer::trim ' \\t xxx \\t ' -> '\\t xxx \\t'");
|
||||
t.is (Lexer::trim (" \t xxx \t ", " \t"), "xxx", "Lexer::trim ' \\t xxx \\t ' -> 'xxx'");
|
||||
|
||||
// std::vector <std::tuple <std::string, Lexer::Type>> Lexer::tokenize (const std::string& input)
|
||||
auto tokenized = Lexer::tokenize (" one two three ");
|
||||
t.is ((int)tokenized.size (), 3, "Lexer::tokenize ' one two three ' --> 3");
|
||||
t.is (std::get <0> (tokenized[0]), "one", "Lexer::tokenize ' one two three ' [0] --> 'one'");
|
||||
t.ok (std::get <1> (tokenized[0]) == Lexer::Type::word, "Lexer::tokenize ' one two three ' [0] --> word");
|
||||
t.is (std::get <0> (tokenized[1]), "two", "Lexer::tokenize ' one two three ' [1] --> 'two'");
|
||||
t.ok (std::get <1> (tokenized[1]) == Lexer::Type::word, "Lexer::tokenize ' one two three ' [1] --> word");
|
||||
t.is (std::get <0> (tokenized[2]), "three", "Lexer::tokenize ' one two three ' [2] --> 'three'");
|
||||
t.ok (std::get <1> (tokenized[2]) == Lexer::Type::word, "Lexer::tokenize ' one two three ' [2] --> word");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue