mirror of
https://github.com/GothenburgBitFactory/timewarrior.git
synced 2025-06-26 10:54:28 +02:00
Rules: Fixed bug where all values were tokenized, thereby introducing spaces
This commit is contained in:
parent
d7ad32b6d3
commit
bd5121e2c8
1 changed files with 7 additions and 5 deletions
|
@ -31,6 +31,7 @@
|
||||||
#include <format.h>
|
#include <format.h>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <tuple>
|
#include <tuple>
|
||||||
|
#include <cassert>
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -267,12 +268,13 @@ void Rules::parse (const std::string& input, int nest /* = 1 */)
|
||||||
else if (tokens.size () >= 3 &&
|
else if (tokens.size () >= 3 &&
|
||||||
std::get <0> (tokens[1]) == "=")
|
std::get <0> (tokens[1]) == "=")
|
||||||
{
|
{
|
||||||
// Extract the words from the 3rd - Nth tuple.
|
// If this line is an assignment, then tokenizing it is a mistake, so
|
||||||
std::vector <std::string> words;
|
// use the raw data from 'line'.
|
||||||
for (auto& token : std::vector <std::tuple <std::string, Lexer::Type>> (tokens.begin () + 2, tokens.end ()))
|
auto equals = line.find ('=');
|
||||||
words.push_back (std::get <0> (token));
|
assert (equals != std::string::npos);
|
||||||
|
|
||||||
set (firstWord, join (" ", words));
|
set (trim (line.substr (indent, equals - indent)),
|
||||||
|
trim (line.substr (equals + 1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Top-level settings, with no value:
|
// Top-level settings, with no value:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue