diff --git a/libnixf/src/Parse/Lexer.cpp b/libnixf/src/Parse/Lexer.cpp index 971377bbd..7047937c5 100644 --- a/libnixf/src/Parse/Lexer.cpp +++ b/libnixf/src/Parse/Lexer.cpp @@ -409,9 +409,15 @@ Token Lexer::lexIndString() { return finishToken(); } if (consumePrefix("''")) { - Tok = tok_quote2; - if (consumePrefix("$") || consumePrefix("\\") || consumePrefix("'")) + if (consumePrefix("$") || consumePrefix("'")) { Tok = tok_string_escape; + } else if (consumePrefix("\\")) { + // ''\ escapes any character + consume(); + Tok = tok_string_escape; + } else { + Tok = tok_quote2; + } return finishToken(); } diff --git a/libnixf/test/Parse/Lexer.cpp b/libnixf/test/Parse/Lexer.cpp index 5e638e3f2..0109038d5 100644 --- a/libnixf/test/Parse/Lexer.cpp +++ b/libnixf/test/Parse/Lexer.cpp @@ -197,4 +197,17 @@ TEST_F(LexerTest, lexURI) { ASSERT_EQ(Tokens.size(), sizeof(Match) / sizeof(TokenKind)); } +TEST_F(LexerTest, IndStringEscape) { + Lexer Lexer(R"(''\$${)", Diags); + const TokenKind Match[] = { + tok_string_escape, // ''\ escapes $ + tok_dollar_curly, // ${ + }; + auto Tokens = collect(Lexer, &Lexer::lexIndString); + for (size_t I = 0; I < Tokens.size(); I++) { + ASSERT_EQ(Tokens[I].kind(), Match[I]); + } + ASSERT_EQ(Tokens.size(), sizeof(Match) / sizeof(TokenKind)); +} + } // namespace