ResourceScriptToken.cpp 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. //===-- ResourceScriptToken.cpp ---------------------------------*- C++-*-===//
  2. //
  3. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4. // See https://llvm.org/LICENSE.txt for license information.
  5. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6. //
  7. //===---------------------------------------------------------------------===//
  8. //
  9. // This file implements an interface defined in ResourceScriptToken.h.
  10. // In particular, it defines an .rc script tokenizer.
  11. //
  12. //===---------------------------------------------------------------------===//
  13. #include "ResourceScriptToken.h"
  14. #include "llvm/ADT/StringExtras.h"
  15. #include "llvm/Support/raw_ostream.h"
  16. #include <algorithm>
  17. #include <cassert>
  18. #include <cctype>
  19. #include <cstdlib>
  20. #include <utility>
  21. using namespace llvm;
  22. using Kind = RCToken::Kind;
  23. // Checks if Representation is a correct description of an RC integer.
  24. // It should be a 32-bit unsigned integer, either decimal, octal (0[0-7]+),
  25. // or hexadecimal (0x[0-9a-f]+). It might be followed by a single 'L'
  26. // character (that is the difference between our representation and
  27. // StringRef's one). If Representation is correct, 'true' is returned and
  28. // the return value is put back in Num.
  29. static bool rcGetAsInteger(StringRef Representation, uint32_t &Num) {
  30. size_t Length = Representation.size();
  31. if (Length == 0)
  32. return false;
  33. // Strip the last 'L' if unnecessary.
  34. if (std::toupper(Representation.back()) == 'L')
  35. Representation = Representation.drop_back(1);
  36. return !Representation.getAsInteger<uint32_t>(0, Num);
  37. }
  38. RCToken::RCToken(RCToken::Kind RCTokenKind, StringRef Value)
  39. : TokenKind(RCTokenKind), TokenValue(Value) {}
  40. uint32_t RCToken::intValue() const {
  41. assert(TokenKind == Kind::Int);
  42. // We assume that the token already is a correct integer (checked by
  43. // rcGetAsInteger).
  44. uint32_t Result;
  45. bool IsSuccess = rcGetAsInteger(TokenValue, Result);
  46. assert(IsSuccess);
  47. (void)IsSuccess; // Silence the compiler warning when -DNDEBUG flag is on.
  48. return Result;
  49. }
  50. bool RCToken::isLongInt() const {
  51. return TokenKind == Kind::Int && std::toupper(TokenValue.back()) == 'L';
  52. }
  53. StringRef RCToken::value() const { return TokenValue; }
  54. Kind RCToken::kind() const { return TokenKind; }
  55. bool RCToken::isBinaryOp() const {
  56. switch (TokenKind) {
  57. case Kind::Plus:
  58. case Kind::Minus:
  59. case Kind::Pipe:
  60. case Kind::Amp:
  61. return true;
  62. default:
  63. return false;
  64. }
  65. }
  66. static Error getStringError(const Twine &message) {
  67. return make_error<StringError>("Error parsing file: " + message,
  68. inconvertibleErrorCode());
  69. }
  70. namespace {
  71. class Tokenizer {
  72. public:
  73. Tokenizer(StringRef Input) : Data(Input), DataLength(Input.size()), Pos(0) {}
  74. Expected<std::vector<RCToken>> run();
  75. private:
  76. // All 'advancing' methods return boolean values; if they're equal to false,
  77. // the stream has ended or failed.
  78. bool advance(size_t Amount = 1);
  79. bool skipWhitespaces();
  80. // Consumes a token. If any problem occurred, a non-empty Error is returned.
  81. Error consumeToken(const Kind TokenKind);
  82. // Check if tokenizer is about to read FollowingChars.
  83. bool willNowRead(StringRef FollowingChars) const;
  84. // Check if tokenizer can start reading an identifier at current position.
  85. // The original tool did non specify the rules to determine what is a correct
  86. // identifier. We assume they should follow the C convention:
  87. // [a-zA-Z_][a-zA-Z0-9_]*.
  88. bool canStartIdentifier() const;
  89. // Check if tokenizer can continue reading an identifier.
  90. bool canContinueIdentifier() const;
  91. // Check if tokenizer can start reading an integer.
  92. // A correct integer always starts with a 0-9 digit,
  93. // can contain characters 0-9A-Fa-f (digits),
  94. // Ll (marking the integer is 32-bit), Xx (marking the representation
  95. // is hexadecimal). As some kind of separator should come after the
  96. // integer, we can consume the integer until a non-alphanumeric
  97. // character.
  98. bool canStartInt() const;
  99. bool canContinueInt() const;
  100. bool canStartString() const;
  101. // Check if tokenizer can start reading a single line comment (e.g. a comment
  102. // that begins with '//')
  103. bool canStartLineComment() const;
  104. // Check if tokenizer can start or finish reading a block comment (e.g. a
  105. // comment that begins with '/*' and ends with '*/')
  106. bool canStartBlockComment() const;
  107. // Throw away all remaining characters on the current line.
  108. void skipCurrentLine();
  109. bool streamEof() const;
  110. // Classify the token that is about to be read from the current position.
  111. Kind classifyCurrentToken() const;
  112. // Process the Kind::Identifier token - check if it is
  113. // an identifier describing a block start or end.
  114. void processIdentifier(RCToken &token) const;
  115. StringRef Data;
  116. size_t DataLength, Pos;
  117. };
  118. void Tokenizer::skipCurrentLine() {
  119. Pos = Data.find_first_of("\r\n", Pos);
  120. Pos = Data.find_first_not_of("\r\n", Pos);
  121. if (Pos == StringRef::npos)
  122. Pos = DataLength;
  123. }
  124. Expected<std::vector<RCToken>> Tokenizer::run() {
  125. Pos = 0;
  126. std::vector<RCToken> Result;
  127. // Consume an optional UTF-8 Byte Order Mark.
  128. if (willNowRead("\xef\xbb\xbf"))
  129. advance(3);
  130. while (!streamEof()) {
  131. if (!skipWhitespaces())
  132. break;
  133. Kind TokenKind = classifyCurrentToken();
  134. if (TokenKind == Kind::Invalid)
  135. return getStringError("Invalid token found at position " + Twine(Pos));
  136. const size_t TokenStart = Pos;
  137. if (Error TokenError = consumeToken(TokenKind))
  138. return std::move(TokenError);
  139. // Comments are just deleted, don't bother saving them.
  140. if (TokenKind == Kind::LineComment || TokenKind == Kind::StartComment)
  141. continue;
  142. RCToken Token(TokenKind, Data.take_front(Pos).drop_front(TokenStart));
  143. if (TokenKind == Kind::Identifier) {
  144. processIdentifier(Token);
  145. } else if (TokenKind == Kind::Int) {
  146. uint32_t TokenInt;
  147. if (!rcGetAsInteger(Token.value(), TokenInt)) {
  148. // The integer has incorrect format or cannot be represented in
  149. // a 32-bit integer.
  150. return getStringError("Integer invalid or too large: " +
  151. Token.value().str());
  152. }
  153. }
  154. Result.push_back(Token);
  155. }
  156. return Result;
  157. }
  158. bool Tokenizer::advance(size_t Amount) {
  159. Pos += Amount;
  160. return !streamEof();
  161. }
  162. bool Tokenizer::skipWhitespaces() {
  163. while (!streamEof() && isSpace(Data[Pos]))
  164. advance();
  165. return !streamEof();
  166. }
  167. Error Tokenizer::consumeToken(const Kind TokenKind) {
  168. switch (TokenKind) {
  169. // One-character token consumption.
  170. #define TOKEN(Name)
  171. #define SHORT_TOKEN(Name, Ch) case Kind::Name:
  172. #include "ResourceScriptTokenList.def"
  173. advance();
  174. return Error::success();
  175. case Kind::LineComment:
  176. advance(2);
  177. skipCurrentLine();
  178. return Error::success();
  179. case Kind::StartComment: {
  180. advance(2);
  181. auto EndPos = Data.find("*/", Pos);
  182. if (EndPos == StringRef::npos)
  183. return getStringError(
  184. "Unclosed multi-line comment beginning at position " + Twine(Pos));
  185. advance(EndPos - Pos);
  186. advance(2);
  187. return Error::success();
  188. }
  189. case Kind::Identifier:
  190. while (!streamEof() && canContinueIdentifier())
  191. advance();
  192. return Error::success();
  193. case Kind::Int:
  194. while (!streamEof() && canContinueInt())
  195. advance();
  196. return Error::success();
  197. case Kind::String:
  198. // Consume the preceding 'L', if there is any.
  199. if (std::toupper(Data[Pos]) == 'L')
  200. advance();
  201. // Consume the double-quote.
  202. advance();
  203. // Consume the characters until the end of the file, line or string.
  204. while (true) {
  205. if (streamEof()) {
  206. return getStringError("Unterminated string literal.");
  207. } else if (Data[Pos] == '"') {
  208. // Consume the ending double-quote.
  209. advance();
  210. // However, if another '"' follows this double-quote, the string didn't
  211. // end and we just included '"' into the string.
  212. if (!willNowRead("\""))
  213. return Error::success();
  214. } else if (Data[Pos] == '\n') {
  215. return getStringError("String literal not terminated in the line.");
  216. }
  217. advance();
  218. }
  219. case Kind::Invalid:
  220. assert(false && "Cannot consume an invalid token.");
  221. }
  222. llvm_unreachable("Unknown RCToken::Kind");
  223. }
  224. bool Tokenizer::willNowRead(StringRef FollowingChars) const {
  225. return Data.drop_front(Pos).startswith(FollowingChars);
  226. }
  227. bool Tokenizer::canStartIdentifier() const {
  228. assert(!streamEof());
  229. const char CurChar = Data[Pos];
  230. return std::isalpha(CurChar) || CurChar == '_' || CurChar == '.';
  231. }
  232. bool Tokenizer::canContinueIdentifier() const {
  233. assert(!streamEof());
  234. const char CurChar = Data[Pos];
  235. return std::isalnum(CurChar) || CurChar == '_' || CurChar == '.' ||
  236. CurChar == '/' || CurChar == '\\' || CurChar == '-';
  237. }
  238. bool Tokenizer::canStartInt() const {
  239. assert(!streamEof());
  240. return std::isdigit(Data[Pos]);
  241. }
  242. bool Tokenizer::canStartBlockComment() const {
  243. assert(!streamEof());
  244. return Data.drop_front(Pos).startswith("/*");
  245. }
  246. bool Tokenizer::canStartLineComment() const {
  247. assert(!streamEof());
  248. return Data.drop_front(Pos).startswith("//");
  249. }
  250. bool Tokenizer::canContinueInt() const {
  251. assert(!streamEof());
  252. return std::isalnum(Data[Pos]);
  253. }
  254. bool Tokenizer::canStartString() const {
  255. return willNowRead("\"") || willNowRead("L\"") || willNowRead("l\"");
  256. }
  257. bool Tokenizer::streamEof() const { return Pos == DataLength; }
  258. Kind Tokenizer::classifyCurrentToken() const {
  259. if (canStartBlockComment())
  260. return Kind::StartComment;
  261. if (canStartLineComment())
  262. return Kind::LineComment;
  263. if (canStartInt())
  264. return Kind::Int;
  265. if (canStartString())
  266. return Kind::String;
  267. // BEGIN and END are at this point of lexing recognized as identifiers.
  268. if (canStartIdentifier())
  269. return Kind::Identifier;
  270. const char CurChar = Data[Pos];
  271. switch (CurChar) {
  272. // One-character token classification.
  273. #define TOKEN(Name)
  274. #define SHORT_TOKEN(Name, Ch) \
  275. case Ch: \
  276. return Kind::Name;
  277. #include "ResourceScriptTokenList.def"
  278. default:
  279. return Kind::Invalid;
  280. }
  281. }
  282. void Tokenizer::processIdentifier(RCToken &Token) const {
  283. assert(Token.kind() == Kind::Identifier);
  284. StringRef Name = Token.value();
  285. if (Name.equals_insensitive("begin"))
  286. Token = RCToken(Kind::BlockBegin, Name);
  287. else if (Name.equals_insensitive("end"))
  288. Token = RCToken(Kind::BlockEnd, Name);
  289. }
  290. } // anonymous namespace
  291. namespace llvm {
  292. Expected<std::vector<RCToken>> tokenizeRC(StringRef Input) {
  293. return Tokenizer(Input).run();
  294. }
  295. } // namespace llvm